blob: 7991183749e382f8cafc4e8dcaa98d190b6359a2 [file] [log] [blame]
Philipp Reisnerb411b362009-09-25 16:07:19 -07001/*
2 drbd_receiver.c
3
4 This file is part of DRBD by Philipp Reisner and Lars Ellenberg.
5
6 Copyright (C) 2001-2008, LINBIT Information Technologies GmbH.
7 Copyright (C) 1999-2008, Philipp Reisner <philipp.reisner@linbit.com>.
8 Copyright (C) 2002-2008, Lars Ellenberg <lars.ellenberg@linbit.com>.
9
10 drbd is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation; either version 2, or (at your option)
13 any later version.
14
15 drbd is distributed in the hope that it will be useful,
16 but WITHOUT ANY WARRANTY; without even the implied warranty of
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 GNU General Public License for more details.
19
20 You should have received a copy of the GNU General Public License
21 along with drbd; see the file COPYING. If not, write to
22 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
23 */
24
25
Philipp Reisnerb411b362009-09-25 16:07:19 -070026#include <linux/module.h>
27
28#include <asm/uaccess.h>
29#include <net/sock.h>
30
Philipp Reisnerb411b362009-09-25 16:07:19 -070031#include <linux/drbd.h>
32#include <linux/fs.h>
33#include <linux/file.h>
34#include <linux/in.h>
35#include <linux/mm.h>
36#include <linux/memcontrol.h>
37#include <linux/mm_inline.h>
38#include <linux/slab.h>
Philipp Reisnerb411b362009-09-25 16:07:19 -070039#include <linux/pkt_sched.h>
40#define __KERNEL_SYSCALLS__
41#include <linux/unistd.h>
42#include <linux/vmalloc.h>
43#include <linux/random.h>
Philipp Reisnerb411b362009-09-25 16:07:19 -070044#include <linux/string.h>
45#include <linux/scatterlist.h>
46#include "drbd_int.h"
Philipp Reisnerb411b362009-09-25 16:07:19 -070047#include "drbd_req.h"
48
49#include "drbd_vli.h"
50
Philipp Reisnerb411b362009-09-25 16:07:19 -070051enum finish_epoch {
52 FE_STILL_LIVE,
53 FE_DESTROYED,
54 FE_RECYCLED,
55};
56
57static int drbd_do_handshake(struct drbd_conf *mdev);
58static int drbd_do_auth(struct drbd_conf *mdev);
59
60static enum finish_epoch drbd_may_finish_epoch(struct drbd_conf *, struct drbd_epoch *, enum epoch_event);
61static int e_end_block(struct drbd_conf *, struct drbd_work *, int);
62
Philipp Reisnerb411b362009-09-25 16:07:19 -070063
64#define GFP_TRY (__GFP_HIGHMEM | __GFP_NOWARN)
65
Lars Ellenberg45bb9122010-05-14 17:10:48 +020066/*
67 * some helper functions to deal with single linked page lists,
68 * page->private being our "next" pointer.
69 */
70
71/* If at least n pages are linked at head, get n pages off.
72 * Otherwise, don't modify head, and return NULL.
73 * Locking is the responsibility of the caller.
74 */
75static struct page *page_chain_del(struct page **head, int n)
76{
77 struct page *page;
78 struct page *tmp;
79
80 BUG_ON(!n);
81 BUG_ON(!head);
82
83 page = *head;
Philipp Reisner23ce4222010-05-20 13:35:31 +020084
85 if (!page)
86 return NULL;
87
Lars Ellenberg45bb9122010-05-14 17:10:48 +020088 while (page) {
89 tmp = page_chain_next(page);
90 if (--n == 0)
91 break; /* found sufficient pages */
92 if (tmp == NULL)
93 /* insufficient pages, don't use any of them. */
94 return NULL;
95 page = tmp;
96 }
97
98 /* add end of list marker for the returned list */
99 set_page_private(page, 0);
100 /* actual return value, and adjustment of head */
101 page = *head;
102 *head = tmp;
103 return page;
104}
105
106/* may be used outside of locks to find the tail of a (usually short)
107 * "private" page chain, before adding it back to a global chain head
108 * with page_chain_add() under a spinlock. */
109static struct page *page_chain_tail(struct page *page, int *len)
110{
111 struct page *tmp;
112 int i = 1;
113 while ((tmp = page_chain_next(page)))
114 ++i, page = tmp;
115 if (len)
116 *len = i;
117 return page;
118}
119
120static int page_chain_free(struct page *page)
121{
122 struct page *tmp;
123 int i = 0;
124 page_chain_for_each_safe(page, tmp) {
125 put_page(page);
126 ++i;
127 }
128 return i;
129}
130
131static void page_chain_add(struct page **head,
132 struct page *chain_first, struct page *chain_last)
133{
134#if 1
135 struct page *tmp;
136 tmp = page_chain_tail(chain_first, NULL);
137 BUG_ON(tmp != chain_last);
138#endif
139
140 /* add chain to head */
141 set_page_private(chain_last, (unsigned long)*head);
142 *head = chain_first;
143}
144
145static struct page *drbd_pp_first_pages_or_try_alloc(struct drbd_conf *mdev, int number)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700146{
147 struct page *page = NULL;
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200148 struct page *tmp = NULL;
149 int i = 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700150
151 /* Yes, testing drbd_pp_vacant outside the lock is racy.
152 * So what. It saves a spin_lock. */
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200153 if (drbd_pp_vacant >= number) {
Philipp Reisnerb411b362009-09-25 16:07:19 -0700154 spin_lock(&drbd_pp_lock);
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200155 page = page_chain_del(&drbd_pp_pool, number);
156 if (page)
157 drbd_pp_vacant -= number;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700158 spin_unlock(&drbd_pp_lock);
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200159 if (page)
160 return page;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700161 }
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200162
Philipp Reisnerb411b362009-09-25 16:07:19 -0700163 /* GFP_TRY, because we must not cause arbitrary write-out: in a DRBD
164 * "criss-cross" setup, that might cause write-out on some other DRBD,
165 * which in turn might block on the other node at this very place. */
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200166 for (i = 0; i < number; i++) {
167 tmp = alloc_page(GFP_TRY);
168 if (!tmp)
169 break;
170 set_page_private(tmp, (unsigned long)page);
171 page = tmp;
172 }
173
174 if (i == number)
175 return page;
176
177 /* Not enough pages immediately available this time.
178 * No need to jump around here, drbd_pp_alloc will retry this
179 * function "soon". */
180 if (page) {
181 tmp = page_chain_tail(page, NULL);
182 spin_lock(&drbd_pp_lock);
183 page_chain_add(&drbd_pp_pool, page, tmp);
184 drbd_pp_vacant += i;
185 spin_unlock(&drbd_pp_lock);
186 }
187 return NULL;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700188}
189
Philipp Reisnerb411b362009-09-25 16:07:19 -0700190static void reclaim_net_ee(struct drbd_conf *mdev, struct list_head *to_be_freed)
191{
192 struct drbd_epoch_entry *e;
193 struct list_head *le, *tle;
194
195 /* The EEs are always appended to the end of the list. Since
196 they are sent in order over the wire, they have to finish
197 in order. As soon as we see the first not finished we can
198 stop to examine the list... */
199
200 list_for_each_safe(le, tle, &mdev->net_ee) {
201 e = list_entry(le, struct drbd_epoch_entry, w.list);
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200202 if (drbd_ee_has_active_page(e))
Philipp Reisnerb411b362009-09-25 16:07:19 -0700203 break;
204 list_move(le, to_be_freed);
205 }
206}
207
208static void drbd_kick_lo_and_reclaim_net(struct drbd_conf *mdev)
209{
210 LIST_HEAD(reclaimed);
211 struct drbd_epoch_entry *e, *t;
212
Philipp Reisnerb411b362009-09-25 16:07:19 -0700213 spin_lock_irq(&mdev->req_lock);
214 reclaim_net_ee(mdev, &reclaimed);
215 spin_unlock_irq(&mdev->req_lock);
216
217 list_for_each_entry_safe(e, t, &reclaimed, w.list)
Lars Ellenberg435f0742010-09-06 12:30:25 +0200218 drbd_free_net_ee(mdev, e);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700219}
220
221/**
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200222 * drbd_pp_alloc() - Returns @number pages, retries forever (or until signalled)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700223 * @mdev: DRBD device.
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200224 * @number: number of pages requested
225 * @retry: whether to retry, if not enough pages are available right now
Philipp Reisnerb411b362009-09-25 16:07:19 -0700226 *
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200227 * Tries to allocate number pages, first from our own page pool, then from
228 * the kernel, unless this allocation would exceed the max_buffers setting.
229 * Possibly retry until DRBD frees sufficient pages somewhere else.
230 *
231 * Returns a page chain linked via page->private.
Philipp Reisnerb411b362009-09-25 16:07:19 -0700232 */
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200233static struct page *drbd_pp_alloc(struct drbd_conf *mdev, unsigned number, bool retry)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700234{
235 struct page *page = NULL;
236 DEFINE_WAIT(wait);
237
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200238 /* Yes, we may run up to @number over max_buffers. If we
239 * follow it strictly, the admin will get it wrong anyways. */
240 if (atomic_read(&mdev->pp_in_use) < mdev->net_conf->max_buffers)
241 page = drbd_pp_first_pages_or_try_alloc(mdev, number);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700242
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200243 while (page == NULL) {
Philipp Reisnerb411b362009-09-25 16:07:19 -0700244 prepare_to_wait(&drbd_pp_wait, &wait, TASK_INTERRUPTIBLE);
245
246 drbd_kick_lo_and_reclaim_net(mdev);
247
248 if (atomic_read(&mdev->pp_in_use) < mdev->net_conf->max_buffers) {
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200249 page = drbd_pp_first_pages_or_try_alloc(mdev, number);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700250 if (page)
251 break;
252 }
253
254 if (!retry)
255 break;
256
257 if (signal_pending(current)) {
258 dev_warn(DEV, "drbd_pp_alloc interrupted!\n");
259 break;
260 }
261
262 schedule();
263 }
264 finish_wait(&drbd_pp_wait, &wait);
265
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200266 if (page)
267 atomic_add(number, &mdev->pp_in_use);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700268 return page;
269}
270
271/* Must not be used from irq, as that may deadlock: see drbd_pp_alloc.
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200272 * Is also used from inside an other spin_lock_irq(&mdev->req_lock);
273 * Either links the page chain back to the global pool,
274 * or returns all pages to the system. */
Lars Ellenberg435f0742010-09-06 12:30:25 +0200275static void drbd_pp_free(struct drbd_conf *mdev, struct page *page, int is_net)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700276{
Lars Ellenberg435f0742010-09-06 12:30:25 +0200277 atomic_t *a = is_net ? &mdev->pp_in_use_by_net : &mdev->pp_in_use;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700278 int i;
Lars Ellenberg435f0742010-09-06 12:30:25 +0200279
Lars Ellenberg1816a2b2010-11-11 15:19:07 +0100280 if (drbd_pp_vacant > (DRBD_MAX_BIO_SIZE/PAGE_SIZE)*minor_count)
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200281 i = page_chain_free(page);
282 else {
283 struct page *tmp;
284 tmp = page_chain_tail(page, &i);
285 spin_lock(&drbd_pp_lock);
286 page_chain_add(&drbd_pp_pool, page, tmp);
287 drbd_pp_vacant += i;
288 spin_unlock(&drbd_pp_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700289 }
Lars Ellenberg435f0742010-09-06 12:30:25 +0200290 i = atomic_sub_return(i, a);
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200291 if (i < 0)
Lars Ellenberg435f0742010-09-06 12:30:25 +0200292 dev_warn(DEV, "ASSERTION FAILED: %s: %d < 0\n",
293 is_net ? "pp_in_use_by_net" : "pp_in_use", i);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700294 wake_up(&drbd_pp_wait);
295}
296
297/*
298You need to hold the req_lock:
299 _drbd_wait_ee_list_empty()
300
301You must not have the req_lock:
302 drbd_free_ee()
303 drbd_alloc_ee()
304 drbd_init_ee()
305 drbd_release_ee()
306 drbd_ee_fix_bhs()
307 drbd_process_done_ee()
308 drbd_clear_done_ee()
309 drbd_wait_ee_list_empty()
310*/
311
312struct drbd_epoch_entry *drbd_alloc_ee(struct drbd_conf *mdev,
313 u64 id,
314 sector_t sector,
315 unsigned int data_size,
316 gfp_t gfp_mask) __must_hold(local)
317{
Philipp Reisnerb411b362009-09-25 16:07:19 -0700318 struct drbd_epoch_entry *e;
319 struct page *page;
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200320 unsigned nr_pages = (data_size + PAGE_SIZE -1) >> PAGE_SHIFT;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700321
Andreas Gruenbacher0cf9d272010-12-07 10:43:29 +0100322 if (drbd_insert_fault(mdev, DRBD_FAULT_AL_EE))
Philipp Reisnerb411b362009-09-25 16:07:19 -0700323 return NULL;
324
325 e = mempool_alloc(drbd_ee_mempool, gfp_mask & ~__GFP_HIGHMEM);
326 if (!e) {
327 if (!(gfp_mask & __GFP_NOWARN))
328 dev_err(DEV, "alloc_ee: Allocation of an EE failed\n");
329 return NULL;
330 }
331
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200332 page = drbd_pp_alloc(mdev, nr_pages, (gfp_mask & __GFP_WAIT));
333 if (!page)
334 goto fail;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700335
Philipp Reisnerb411b362009-09-25 16:07:19 -0700336 INIT_HLIST_NODE(&e->colision);
337 e->epoch = NULL;
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200338 e->mdev = mdev;
339 e->pages = page;
340 atomic_set(&e->pending_bios, 0);
341 e->size = data_size;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700342 e->flags = 0;
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200343 e->sector = sector;
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200344 e->block_id = id;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700345
Philipp Reisnerb411b362009-09-25 16:07:19 -0700346 return e;
347
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200348 fail:
Philipp Reisnerb411b362009-09-25 16:07:19 -0700349 mempool_free(e, drbd_ee_mempool);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700350 return NULL;
351}
352
Lars Ellenberg435f0742010-09-06 12:30:25 +0200353void drbd_free_some_ee(struct drbd_conf *mdev, struct drbd_epoch_entry *e, int is_net)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700354{
Lars Ellenbergc36c3ce2010-08-11 20:42:55 +0200355 if (e->flags & EE_HAS_DIGEST)
356 kfree(e->digest);
Lars Ellenberg435f0742010-09-06 12:30:25 +0200357 drbd_pp_free(mdev, e->pages, is_net);
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200358 D_ASSERT(atomic_read(&e->pending_bios) == 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700359 D_ASSERT(hlist_unhashed(&e->colision));
360 mempool_free(e, drbd_ee_mempool);
361}
362
363int drbd_release_ee(struct drbd_conf *mdev, struct list_head *list)
364{
365 LIST_HEAD(work_list);
366 struct drbd_epoch_entry *e, *t;
367 int count = 0;
Lars Ellenberg435f0742010-09-06 12:30:25 +0200368 int is_net = list == &mdev->net_ee;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700369
370 spin_lock_irq(&mdev->req_lock);
371 list_splice_init(list, &work_list);
372 spin_unlock_irq(&mdev->req_lock);
373
374 list_for_each_entry_safe(e, t, &work_list, w.list) {
Lars Ellenberg435f0742010-09-06 12:30:25 +0200375 drbd_free_some_ee(mdev, e, is_net);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700376 count++;
377 }
378 return count;
379}
380
381
382/*
383 * This function is called from _asender only_
384 * but see also comments in _req_mod(,barrier_acked)
385 * and receive_Barrier.
386 *
387 * Move entries from net_ee to done_ee, if ready.
388 * Grab done_ee, call all callbacks, free the entries.
389 * The callbacks typically send out ACKs.
390 */
391static int drbd_process_done_ee(struct drbd_conf *mdev)
392{
393 LIST_HEAD(work_list);
394 LIST_HEAD(reclaimed);
395 struct drbd_epoch_entry *e, *t;
396 int ok = (mdev->state.conn >= C_WF_REPORT_PARAMS);
397
398 spin_lock_irq(&mdev->req_lock);
399 reclaim_net_ee(mdev, &reclaimed);
400 list_splice_init(&mdev->done_ee, &work_list);
401 spin_unlock_irq(&mdev->req_lock);
402
403 list_for_each_entry_safe(e, t, &reclaimed, w.list)
Lars Ellenberg435f0742010-09-06 12:30:25 +0200404 drbd_free_net_ee(mdev, e);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700405
406 /* possible callbacks here:
407 * e_end_block, and e_end_resync_block, e_send_discard_ack.
408 * all ignore the last argument.
409 */
410 list_for_each_entry_safe(e, t, &work_list, w.list) {
Philipp Reisnerb411b362009-09-25 16:07:19 -0700411 /* list_del not necessary, next/prev members not touched */
412 ok = e->w.cb(mdev, &e->w, !ok) && ok;
413 drbd_free_ee(mdev, e);
414 }
415 wake_up(&mdev->ee_wait);
416
417 return ok;
418}
419
420void _drbd_wait_ee_list_empty(struct drbd_conf *mdev, struct list_head *head)
421{
422 DEFINE_WAIT(wait);
423
424 /* avoids spin_lock/unlock
425 * and calling prepare_to_wait in the fast path */
426 while (!list_empty(head)) {
427 prepare_to_wait(&mdev->ee_wait, &wait, TASK_UNINTERRUPTIBLE);
428 spin_unlock_irq(&mdev->req_lock);
Jens Axboe7eaceac2011-03-10 08:52:07 +0100429 io_schedule();
Philipp Reisnerb411b362009-09-25 16:07:19 -0700430 finish_wait(&mdev->ee_wait, &wait);
431 spin_lock_irq(&mdev->req_lock);
432 }
433}
434
435void drbd_wait_ee_list_empty(struct drbd_conf *mdev, struct list_head *head)
436{
437 spin_lock_irq(&mdev->req_lock);
438 _drbd_wait_ee_list_empty(mdev, head);
439 spin_unlock_irq(&mdev->req_lock);
440}
441
442/* see also kernel_accept; which is only present since 2.6.18.
443 * also we want to log which part of it failed, exactly */
444static int drbd_accept(struct drbd_conf *mdev, const char **what,
445 struct socket *sock, struct socket **newsock)
446{
447 struct sock *sk = sock->sk;
448 int err = 0;
449
450 *what = "listen";
451 err = sock->ops->listen(sock, 5);
452 if (err < 0)
453 goto out;
454
455 *what = "sock_create_lite";
456 err = sock_create_lite(sk->sk_family, sk->sk_type, sk->sk_protocol,
457 newsock);
458 if (err < 0)
459 goto out;
460
461 *what = "accept";
462 err = sock->ops->accept(sock, *newsock, 0);
463 if (err < 0) {
464 sock_release(*newsock);
465 *newsock = NULL;
466 goto out;
467 }
468 (*newsock)->ops = sock->ops;
469
470out:
471 return err;
472}
473
474static int drbd_recv_short(struct drbd_conf *mdev, struct socket *sock,
475 void *buf, size_t size, int flags)
476{
477 mm_segment_t oldfs;
478 struct kvec iov = {
479 .iov_base = buf,
480 .iov_len = size,
481 };
482 struct msghdr msg = {
483 .msg_iovlen = 1,
484 .msg_iov = (struct iovec *)&iov,
485 .msg_flags = (flags ? flags : MSG_WAITALL | MSG_NOSIGNAL)
486 };
487 int rv;
488
489 oldfs = get_fs();
490 set_fs(KERNEL_DS);
491 rv = sock_recvmsg(sock, &msg, size, msg.msg_flags);
492 set_fs(oldfs);
493
494 return rv;
495}
496
497static int drbd_recv(struct drbd_conf *mdev, void *buf, size_t size)
498{
499 mm_segment_t oldfs;
500 struct kvec iov = {
501 .iov_base = buf,
502 .iov_len = size,
503 };
504 struct msghdr msg = {
505 .msg_iovlen = 1,
506 .msg_iov = (struct iovec *)&iov,
507 .msg_flags = MSG_WAITALL | MSG_NOSIGNAL
508 };
509 int rv;
510
511 oldfs = get_fs();
512 set_fs(KERNEL_DS);
513
514 for (;;) {
515 rv = sock_recvmsg(mdev->data.socket, &msg, size, msg.msg_flags);
516 if (rv == size)
517 break;
518
519 /* Note:
520 * ECONNRESET other side closed the connection
521 * ERESTARTSYS (on sock) we got a signal
522 */
523
524 if (rv < 0) {
525 if (rv == -ECONNRESET)
526 dev_info(DEV, "sock was reset by peer\n");
527 else if (rv != -ERESTARTSYS)
528 dev_err(DEV, "sock_recvmsg returned %d\n", rv);
529 break;
530 } else if (rv == 0) {
531 dev_info(DEV, "sock was shut down by peer\n");
532 break;
533 } else {
534 /* signal came in, or peer/link went down,
535 * after we read a partial message
536 */
537 /* D_ASSERT(signal_pending(current)); */
538 break;
539 }
540 };
541
542 set_fs(oldfs);
543
544 if (rv != size)
545 drbd_force_state(mdev, NS(conn, C_BROKEN_PIPE));
546
547 return rv;
548}
549
Lars Ellenberg5dbf1672010-05-25 16:18:01 +0200550/* quoting tcp(7):
551 * On individual connections, the socket buffer size must be set prior to the
552 * listen(2) or connect(2) calls in order to have it take effect.
553 * This is our wrapper to do so.
554 */
555static void drbd_setbufsize(struct socket *sock, unsigned int snd,
556 unsigned int rcv)
557{
558 /* open coded SO_SNDBUF, SO_RCVBUF */
559 if (snd) {
560 sock->sk->sk_sndbuf = snd;
561 sock->sk->sk_userlocks |= SOCK_SNDBUF_LOCK;
562 }
563 if (rcv) {
564 sock->sk->sk_rcvbuf = rcv;
565 sock->sk->sk_userlocks |= SOCK_RCVBUF_LOCK;
566 }
567}
568
Philipp Reisnerb411b362009-09-25 16:07:19 -0700569static struct socket *drbd_try_connect(struct drbd_conf *mdev)
570{
571 const char *what;
572 struct socket *sock;
573 struct sockaddr_in6 src_in6;
574 int err;
575 int disconnect_on_error = 1;
576
577 if (!get_net_conf(mdev))
578 return NULL;
579
580 what = "sock_create_kern";
581 err = sock_create_kern(((struct sockaddr *)mdev->net_conf->my_addr)->sa_family,
582 SOCK_STREAM, IPPROTO_TCP, &sock);
583 if (err < 0) {
584 sock = NULL;
585 goto out;
586 }
587
588 sock->sk->sk_rcvtimeo =
589 sock->sk->sk_sndtimeo = mdev->net_conf->try_connect_int*HZ;
Lars Ellenberg5dbf1672010-05-25 16:18:01 +0200590 drbd_setbufsize(sock, mdev->net_conf->sndbuf_size,
591 mdev->net_conf->rcvbuf_size);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700592
593 /* explicitly bind to the configured IP as source IP
594 * for the outgoing connections.
595 * This is needed for multihomed hosts and to be
596 * able to use lo: interfaces for drbd.
597 * Make sure to use 0 as port number, so linux selects
598 * a free one dynamically.
599 */
600 memcpy(&src_in6, mdev->net_conf->my_addr,
601 min_t(int, mdev->net_conf->my_addr_len, sizeof(src_in6)));
602 if (((struct sockaddr *)mdev->net_conf->my_addr)->sa_family == AF_INET6)
603 src_in6.sin6_port = 0;
604 else
605 ((struct sockaddr_in *)&src_in6)->sin_port = 0; /* AF_INET & AF_SCI */
606
607 what = "bind before connect";
608 err = sock->ops->bind(sock,
609 (struct sockaddr *) &src_in6,
610 mdev->net_conf->my_addr_len);
611 if (err < 0)
612 goto out;
613
614 /* connect may fail, peer not yet available.
615 * stay C_WF_CONNECTION, don't go Disconnecting! */
616 disconnect_on_error = 0;
617 what = "connect";
618 err = sock->ops->connect(sock,
619 (struct sockaddr *)mdev->net_conf->peer_addr,
620 mdev->net_conf->peer_addr_len, 0);
621
622out:
623 if (err < 0) {
624 if (sock) {
625 sock_release(sock);
626 sock = NULL;
627 }
628 switch (-err) {
629 /* timeout, busy, signal pending */
630 case ETIMEDOUT: case EAGAIN: case EINPROGRESS:
631 case EINTR: case ERESTARTSYS:
632 /* peer not (yet) available, network problem */
633 case ECONNREFUSED: case ENETUNREACH:
634 case EHOSTDOWN: case EHOSTUNREACH:
635 disconnect_on_error = 0;
636 break;
637 default:
638 dev_err(DEV, "%s failed, err = %d\n", what, err);
639 }
640 if (disconnect_on_error)
641 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
642 }
643 put_net_conf(mdev);
644 return sock;
645}
646
647static struct socket *drbd_wait_for_connect(struct drbd_conf *mdev)
648{
649 int timeo, err;
650 struct socket *s_estab = NULL, *s_listen;
651 const char *what;
652
653 if (!get_net_conf(mdev))
654 return NULL;
655
656 what = "sock_create_kern";
657 err = sock_create_kern(((struct sockaddr *)mdev->net_conf->my_addr)->sa_family,
658 SOCK_STREAM, IPPROTO_TCP, &s_listen);
659 if (err) {
660 s_listen = NULL;
661 goto out;
662 }
663
664 timeo = mdev->net_conf->try_connect_int * HZ;
665 timeo += (random32() & 1) ? timeo / 7 : -timeo / 7; /* 28.5% random jitter */
666
667 s_listen->sk->sk_reuse = 1; /* SO_REUSEADDR */
668 s_listen->sk->sk_rcvtimeo = timeo;
669 s_listen->sk->sk_sndtimeo = timeo;
Lars Ellenberg5dbf1672010-05-25 16:18:01 +0200670 drbd_setbufsize(s_listen, mdev->net_conf->sndbuf_size,
671 mdev->net_conf->rcvbuf_size);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700672
673 what = "bind before listen";
674 err = s_listen->ops->bind(s_listen,
675 (struct sockaddr *) mdev->net_conf->my_addr,
676 mdev->net_conf->my_addr_len);
677 if (err < 0)
678 goto out;
679
680 err = drbd_accept(mdev, &what, s_listen, &s_estab);
681
682out:
683 if (s_listen)
684 sock_release(s_listen);
685 if (err < 0) {
686 if (err != -EAGAIN && err != -EINTR && err != -ERESTARTSYS) {
687 dev_err(DEV, "%s failed, err = %d\n", what, err);
688 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
689 }
690 }
691 put_net_conf(mdev);
692
693 return s_estab;
694}
695
696static int drbd_send_fp(struct drbd_conf *mdev,
697 struct socket *sock, enum drbd_packets cmd)
698{
Philipp Reisner02918be2010-08-20 14:35:10 +0200699 struct p_header80 *h = &mdev->data.sbuf.header.h80;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700700
701 return _drbd_send_cmd(mdev, sock, cmd, h, sizeof(*h), 0);
702}
703
704static enum drbd_packets drbd_recv_fp(struct drbd_conf *mdev, struct socket *sock)
705{
Philipp Reisner02918be2010-08-20 14:35:10 +0200706 struct p_header80 *h = &mdev->data.rbuf.header.h80;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700707 int rr;
708
709 rr = drbd_recv_short(mdev, sock, h, sizeof(*h), 0);
710
711 if (rr == sizeof(*h) && h->magic == BE_DRBD_MAGIC)
712 return be16_to_cpu(h->command);
713
714 return 0xffff;
715}
716
717/**
718 * drbd_socket_okay() - Free the socket if its connection is not okay
719 * @mdev: DRBD device.
720 * @sock: pointer to the pointer to the socket.
721 */
722static int drbd_socket_okay(struct drbd_conf *mdev, struct socket **sock)
723{
724 int rr;
725 char tb[4];
726
727 if (!*sock)
Andreas Gruenbacher81e84652010-12-09 15:03:57 +0100728 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700729
730 rr = drbd_recv_short(mdev, *sock, tb, 4, MSG_DONTWAIT | MSG_PEEK);
731
732 if (rr > 0 || rr == -EAGAIN) {
Andreas Gruenbacher81e84652010-12-09 15:03:57 +0100733 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700734 } else {
735 sock_release(*sock);
736 *sock = NULL;
Andreas Gruenbacher81e84652010-12-09 15:03:57 +0100737 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700738 }
739}
740
741/*
742 * return values:
743 * 1 yes, we have a valid connection
744 * 0 oops, did not work out, please try again
745 * -1 peer talks different language,
746 * no point in trying again, please go standalone.
747 * -2 We do not have a network config...
748 */
749static int drbd_connect(struct drbd_conf *mdev)
750{
751 struct socket *s, *sock, *msock;
752 int try, h, ok;
753
754 D_ASSERT(!mdev->data.socket);
755
Philipp Reisnerb411b362009-09-25 16:07:19 -0700756 if (drbd_request_state(mdev, NS(conn, C_WF_CONNECTION)) < SS_SUCCESS)
757 return -2;
758
759 clear_bit(DISCARD_CONCURRENT, &mdev->flags);
760
761 sock = NULL;
762 msock = NULL;
763
764 do {
765 for (try = 0;;) {
766 /* 3 tries, this should take less than a second! */
767 s = drbd_try_connect(mdev);
768 if (s || ++try >= 3)
769 break;
770 /* give the other side time to call bind() & listen() */
771 __set_current_state(TASK_INTERRUPTIBLE);
772 schedule_timeout(HZ / 10);
773 }
774
775 if (s) {
776 if (!sock) {
777 drbd_send_fp(mdev, s, P_HAND_SHAKE_S);
778 sock = s;
779 s = NULL;
780 } else if (!msock) {
781 drbd_send_fp(mdev, s, P_HAND_SHAKE_M);
782 msock = s;
783 s = NULL;
784 } else {
785 dev_err(DEV, "Logic error in drbd_connect()\n");
786 goto out_release_sockets;
787 }
788 }
789
790 if (sock && msock) {
791 __set_current_state(TASK_INTERRUPTIBLE);
792 schedule_timeout(HZ / 10);
793 ok = drbd_socket_okay(mdev, &sock);
794 ok = drbd_socket_okay(mdev, &msock) && ok;
795 if (ok)
796 break;
797 }
798
799retry:
800 s = drbd_wait_for_connect(mdev);
801 if (s) {
802 try = drbd_recv_fp(mdev, s);
803 drbd_socket_okay(mdev, &sock);
804 drbd_socket_okay(mdev, &msock);
805 switch (try) {
806 case P_HAND_SHAKE_S:
807 if (sock) {
808 dev_warn(DEV, "initial packet S crossed\n");
809 sock_release(sock);
810 }
811 sock = s;
812 break;
813 case P_HAND_SHAKE_M:
814 if (msock) {
815 dev_warn(DEV, "initial packet M crossed\n");
816 sock_release(msock);
817 }
818 msock = s;
819 set_bit(DISCARD_CONCURRENT, &mdev->flags);
820 break;
821 default:
822 dev_warn(DEV, "Error receiving initial packet\n");
823 sock_release(s);
824 if (random32() & 1)
825 goto retry;
826 }
827 }
828
829 if (mdev->state.conn <= C_DISCONNECTING)
830 goto out_release_sockets;
831 if (signal_pending(current)) {
832 flush_signals(current);
833 smp_rmb();
834 if (get_t_state(&mdev->receiver) == Exiting)
835 goto out_release_sockets;
836 }
837
838 if (sock && msock) {
839 ok = drbd_socket_okay(mdev, &sock);
840 ok = drbd_socket_okay(mdev, &msock) && ok;
841 if (ok)
842 break;
843 }
844 } while (1);
845
846 msock->sk->sk_reuse = 1; /* SO_REUSEADDR */
847 sock->sk->sk_reuse = 1; /* SO_REUSEADDR */
848
849 sock->sk->sk_allocation = GFP_NOIO;
850 msock->sk->sk_allocation = GFP_NOIO;
851
852 sock->sk->sk_priority = TC_PRIO_INTERACTIVE_BULK;
853 msock->sk->sk_priority = TC_PRIO_INTERACTIVE;
854
Philipp Reisnerb411b362009-09-25 16:07:19 -0700855 /* NOT YET ...
856 * sock->sk->sk_sndtimeo = mdev->net_conf->timeout*HZ/10;
857 * sock->sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
858 * first set it to the P_HAND_SHAKE timeout,
859 * which we set to 4x the configured ping_timeout. */
860 sock->sk->sk_sndtimeo =
861 sock->sk->sk_rcvtimeo = mdev->net_conf->ping_timeo*4*HZ/10;
862
863 msock->sk->sk_sndtimeo = mdev->net_conf->timeout*HZ/10;
864 msock->sk->sk_rcvtimeo = mdev->net_conf->ping_int*HZ;
865
866 /* we don't want delays.
867 * we use TCP_CORK where apropriate, though */
868 drbd_tcp_nodelay(sock);
869 drbd_tcp_nodelay(msock);
870
871 mdev->data.socket = sock;
872 mdev->meta.socket = msock;
873 mdev->last_received = jiffies;
874
875 D_ASSERT(mdev->asender.task == NULL);
876
877 h = drbd_do_handshake(mdev);
878 if (h <= 0)
879 return h;
880
881 if (mdev->cram_hmac_tfm) {
882 /* drbd_request_state(mdev, NS(conn, WFAuth)); */
Johannes Thomab10d96c2010-01-07 16:02:50 +0100883 switch (drbd_do_auth(mdev)) {
884 case -1:
Philipp Reisnerb411b362009-09-25 16:07:19 -0700885 dev_err(DEV, "Authentication of peer failed\n");
886 return -1;
Johannes Thomab10d96c2010-01-07 16:02:50 +0100887 case 0:
888 dev_err(DEV, "Authentication of peer failed, trying again.\n");
889 return 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700890 }
891 }
892
893 if (drbd_request_state(mdev, NS(conn, C_WF_REPORT_PARAMS)) < SS_SUCCESS)
894 return 0;
895
896 sock->sk->sk_sndtimeo = mdev->net_conf->timeout*HZ/10;
897 sock->sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
898
899 atomic_set(&mdev->packet_seq, 0);
900 mdev->peer_seq = 0;
901
902 drbd_thread_start(&mdev->asender);
903
Philipp Reisnerd5373382010-08-23 15:18:33 +0200904 if (mdev->agreed_pro_version < 95 && get_ldev(mdev)) {
905 drbd_setup_queue_param(mdev, DRBD_MAX_SIZE_H80_PACKET);
906 put_ldev(mdev);
907 }
908
Philipp Reisner7e2455c2010-04-22 14:50:23 +0200909 if (!drbd_send_protocol(mdev))
910 return -1;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700911 drbd_send_sync_param(mdev, &mdev->sync_conf);
Philipp Reisnere89b5912010-03-24 17:11:33 +0100912 drbd_send_sizes(mdev, 0, 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700913 drbd_send_uuids(mdev);
914 drbd_send_state(mdev);
915 clear_bit(USE_DEGR_WFC_T, &mdev->flags);
916 clear_bit(RESIZE_PENDING, &mdev->flags);
917
918 return 1;
919
920out_release_sockets:
921 if (sock)
922 sock_release(sock);
923 if (msock)
924 sock_release(msock);
925 return -1;
926}
927
Philipp Reisner02918be2010-08-20 14:35:10 +0200928static int drbd_recv_header(struct drbd_conf *mdev, enum drbd_packets *cmd, unsigned int *packet_size)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700929{
Philipp Reisner02918be2010-08-20 14:35:10 +0200930 union p_header *h = &mdev->data.rbuf.header;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700931 int r;
932
933 r = drbd_recv(mdev, h, sizeof(*h));
Philipp Reisnerb411b362009-09-25 16:07:19 -0700934 if (unlikely(r != sizeof(*h))) {
935 dev_err(DEV, "short read expecting header on sock: r=%d\n", r);
Andreas Gruenbacher81e84652010-12-09 15:03:57 +0100936 return false;
Philipp Reisner02918be2010-08-20 14:35:10 +0200937 }
938
939 if (likely(h->h80.magic == BE_DRBD_MAGIC)) {
940 *cmd = be16_to_cpu(h->h80.command);
941 *packet_size = be16_to_cpu(h->h80.length);
942 } else if (h->h95.magic == BE_DRBD_MAGIC_BIG) {
943 *cmd = be16_to_cpu(h->h95.command);
944 *packet_size = be32_to_cpu(h->h95.length);
945 } else {
Lars Ellenberg004352f2010-10-05 20:13:58 +0200946 dev_err(DEV, "magic?? on data m: 0x%08x c: %d l: %d\n",
947 be32_to_cpu(h->h80.magic),
948 be16_to_cpu(h->h80.command),
949 be16_to_cpu(h->h80.length));
Andreas Gruenbacher81e84652010-12-09 15:03:57 +0100950 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700951 }
952 mdev->last_received = jiffies;
953
Andreas Gruenbacher81e84652010-12-09 15:03:57 +0100954 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700955}
956
Philipp Reisner2451fc32010-08-24 13:43:11 +0200957static void drbd_flush(struct drbd_conf *mdev)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700958{
959 int rv;
960
961 if (mdev->write_ordering >= WO_bdev_flush && get_ldev(mdev)) {
Dmitry Monakhovfbd9b092010-04-28 17:55:06 +0400962 rv = blkdev_issue_flush(mdev->ldev->backing_bdev, GFP_KERNEL,
Christoph Hellwigdd3932e2010-09-16 20:51:46 +0200963 NULL);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700964 if (rv) {
965 dev_err(DEV, "local disk flush failed with status %d\n", rv);
966 /* would rather check on EOPNOTSUPP, but that is not reliable.
967 * don't try again for ANY return value != 0
968 * if (rv == -EOPNOTSUPP) */
969 drbd_bump_write_ordering(mdev, WO_drain_io);
970 }
971 put_ldev(mdev);
972 }
Philipp Reisnerb411b362009-09-25 16:07:19 -0700973}
974
975/**
976 * drbd_may_finish_epoch() - Applies an epoch_event to the epoch's state, eventually finishes it.
977 * @mdev: DRBD device.
978 * @epoch: Epoch object.
979 * @ev: Epoch event.
980 */
981static enum finish_epoch drbd_may_finish_epoch(struct drbd_conf *mdev,
982 struct drbd_epoch *epoch,
983 enum epoch_event ev)
984{
Philipp Reisner2451fc32010-08-24 13:43:11 +0200985 int epoch_size;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700986 struct drbd_epoch *next_epoch;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700987 enum finish_epoch rv = FE_STILL_LIVE;
988
989 spin_lock(&mdev->epoch_lock);
990 do {
991 next_epoch = NULL;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700992
993 epoch_size = atomic_read(&epoch->epoch_size);
994
995 switch (ev & ~EV_CLEANUP) {
996 case EV_PUT:
997 atomic_dec(&epoch->active);
998 break;
999 case EV_GOT_BARRIER_NR:
1000 set_bit(DE_HAVE_BARRIER_NUMBER, &epoch->flags);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001001 break;
1002 case EV_BECAME_LAST:
1003 /* nothing to do*/
1004 break;
1005 }
1006
Philipp Reisnerb411b362009-09-25 16:07:19 -07001007 if (epoch_size != 0 &&
1008 atomic_read(&epoch->active) == 0 &&
Philipp Reisner2451fc32010-08-24 13:43:11 +02001009 test_bit(DE_HAVE_BARRIER_NUMBER, &epoch->flags)) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07001010 if (!(ev & EV_CLEANUP)) {
1011 spin_unlock(&mdev->epoch_lock);
1012 drbd_send_b_ack(mdev, epoch->barrier_nr, epoch_size);
1013 spin_lock(&mdev->epoch_lock);
1014 }
1015 dec_unacked(mdev);
1016
1017 if (mdev->current_epoch != epoch) {
1018 next_epoch = list_entry(epoch->list.next, struct drbd_epoch, list);
1019 list_del(&epoch->list);
1020 ev = EV_BECAME_LAST | (ev & EV_CLEANUP);
1021 mdev->epochs--;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001022 kfree(epoch);
1023
1024 if (rv == FE_STILL_LIVE)
1025 rv = FE_DESTROYED;
1026 } else {
1027 epoch->flags = 0;
1028 atomic_set(&epoch->epoch_size, 0);
Uwe Kleine-König698f9312010-07-02 20:41:51 +02001029 /* atomic_set(&epoch->active, 0); is already zero */
Philipp Reisnerb411b362009-09-25 16:07:19 -07001030 if (rv == FE_STILL_LIVE)
1031 rv = FE_RECYCLED;
Philipp Reisner2451fc32010-08-24 13:43:11 +02001032 wake_up(&mdev->ee_wait);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001033 }
1034 }
1035
1036 if (!next_epoch)
1037 break;
1038
1039 epoch = next_epoch;
1040 } while (1);
1041
1042 spin_unlock(&mdev->epoch_lock);
1043
Philipp Reisnerb411b362009-09-25 16:07:19 -07001044 return rv;
1045}
1046
1047/**
1048 * drbd_bump_write_ordering() - Fall back to an other write ordering method
1049 * @mdev: DRBD device.
1050 * @wo: Write ordering method to try.
1051 */
1052void drbd_bump_write_ordering(struct drbd_conf *mdev, enum write_ordering_e wo) __must_hold(local)
1053{
1054 enum write_ordering_e pwo;
1055 static char *write_ordering_str[] = {
1056 [WO_none] = "none",
1057 [WO_drain_io] = "drain",
1058 [WO_bdev_flush] = "flush",
Philipp Reisnerb411b362009-09-25 16:07:19 -07001059 };
1060
1061 pwo = mdev->write_ordering;
1062 wo = min(pwo, wo);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001063 if (wo == WO_bdev_flush && mdev->ldev->dc.no_disk_flush)
1064 wo = WO_drain_io;
1065 if (wo == WO_drain_io && mdev->ldev->dc.no_disk_drain)
1066 wo = WO_none;
1067 mdev->write_ordering = wo;
Philipp Reisner2451fc32010-08-24 13:43:11 +02001068 if (pwo != mdev->write_ordering || wo == WO_bdev_flush)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001069 dev_info(DEV, "Method to ensure write ordering: %s\n", write_ordering_str[mdev->write_ordering]);
1070}
1071
1072/**
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001073 * drbd_submit_ee()
1074 * @mdev: DRBD device.
1075 * @e: epoch entry
1076 * @rw: flag field, see bio->bi_rw
1077 */
1078/* TODO allocate from our own bio_set. */
1079int drbd_submit_ee(struct drbd_conf *mdev, struct drbd_epoch_entry *e,
1080 const unsigned rw, const int fault_type)
1081{
1082 struct bio *bios = NULL;
1083 struct bio *bio;
1084 struct page *page = e->pages;
1085 sector_t sector = e->sector;
1086 unsigned ds = e->size;
1087 unsigned n_bios = 0;
1088 unsigned nr_pages = (ds + PAGE_SIZE -1) >> PAGE_SHIFT;
1089
1090 /* In most cases, we will only need one bio. But in case the lower
1091 * level restrictions happen to be different at this offset on this
1092 * side than those of the sending peer, we may need to submit the
1093 * request in more than one bio. */
1094next_bio:
1095 bio = bio_alloc(GFP_NOIO, nr_pages);
1096 if (!bio) {
1097 dev_err(DEV, "submit_ee: Allocation of a bio failed\n");
1098 goto fail;
1099 }
1100 /* > e->sector, unless this is the first bio */
1101 bio->bi_sector = sector;
1102 bio->bi_bdev = mdev->ldev->backing_bdev;
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001103 bio->bi_rw = rw;
1104 bio->bi_private = e;
1105 bio->bi_end_io = drbd_endio_sec;
1106
1107 bio->bi_next = bios;
1108 bios = bio;
1109 ++n_bios;
1110
1111 page_chain_for_each(page) {
1112 unsigned len = min_t(unsigned, ds, PAGE_SIZE);
1113 if (!bio_add_page(bio, page, len, 0)) {
1114 /* a single page must always be possible! */
1115 BUG_ON(bio->bi_vcnt == 0);
1116 goto next_bio;
1117 }
1118 ds -= len;
1119 sector += len >> 9;
1120 --nr_pages;
1121 }
1122 D_ASSERT(page == NULL);
1123 D_ASSERT(ds == 0);
1124
1125 atomic_set(&e->pending_bios, n_bios);
1126 do {
1127 bio = bios;
1128 bios = bios->bi_next;
1129 bio->bi_next = NULL;
1130
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001131 drbd_generic_make_request(mdev, fault_type, bio);
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001132 } while (bios);
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001133 return 0;
1134
1135fail:
1136 while (bios) {
1137 bio = bios;
1138 bios = bios->bi_next;
1139 bio_put(bio);
1140 }
1141 return -ENOMEM;
1142}
1143
Philipp Reisner02918be2010-08-20 14:35:10 +02001144static int receive_Barrier(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001145{
Philipp Reisner2451fc32010-08-24 13:43:11 +02001146 int rv;
Philipp Reisner02918be2010-08-20 14:35:10 +02001147 struct p_barrier *p = &mdev->data.rbuf.barrier;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001148 struct drbd_epoch *epoch;
1149
Philipp Reisnerb411b362009-09-25 16:07:19 -07001150 inc_unacked(mdev);
1151
Philipp Reisnerb411b362009-09-25 16:07:19 -07001152 mdev->current_epoch->barrier_nr = p->barrier;
1153 rv = drbd_may_finish_epoch(mdev, mdev->current_epoch, EV_GOT_BARRIER_NR);
1154
1155 /* P_BARRIER_ACK may imply that the corresponding extent is dropped from
1156 * the activity log, which means it would not be resynced in case the
1157 * R_PRIMARY crashes now.
1158 * Therefore we must send the barrier_ack after the barrier request was
1159 * completed. */
1160 switch (mdev->write_ordering) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07001161 case WO_none:
1162 if (rv == FE_RECYCLED)
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01001163 return true;
Philipp Reisner2451fc32010-08-24 13:43:11 +02001164
1165 /* receiver context, in the writeout path of the other node.
1166 * avoid potential distributed deadlock */
1167 epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO);
1168 if (epoch)
1169 break;
1170 else
1171 dev_warn(DEV, "Allocation of an epoch failed, slowing down\n");
1172 /* Fall through */
Philipp Reisnerb411b362009-09-25 16:07:19 -07001173
1174 case WO_bdev_flush:
1175 case WO_drain_io:
Philipp Reisnerb411b362009-09-25 16:07:19 -07001176 drbd_wait_ee_list_empty(mdev, &mdev->active_ee);
Philipp Reisner2451fc32010-08-24 13:43:11 +02001177 drbd_flush(mdev);
1178
1179 if (atomic_read(&mdev->current_epoch->epoch_size)) {
1180 epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO);
1181 if (epoch)
1182 break;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001183 }
1184
Philipp Reisner2451fc32010-08-24 13:43:11 +02001185 epoch = mdev->current_epoch;
1186 wait_event(mdev->ee_wait, atomic_read(&epoch->epoch_size) == 0);
1187
1188 D_ASSERT(atomic_read(&epoch->active) == 0);
1189 D_ASSERT(epoch->flags == 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001190
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01001191 return true;
Philipp Reisner2451fc32010-08-24 13:43:11 +02001192 default:
1193 dev_err(DEV, "Strangeness in mdev->write_ordering %d\n", mdev->write_ordering);
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01001194 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001195 }
1196
1197 epoch->flags = 0;
1198 atomic_set(&epoch->epoch_size, 0);
1199 atomic_set(&epoch->active, 0);
1200
1201 spin_lock(&mdev->epoch_lock);
1202 if (atomic_read(&mdev->current_epoch->epoch_size)) {
1203 list_add(&epoch->list, &mdev->current_epoch->list);
1204 mdev->current_epoch = epoch;
1205 mdev->epochs++;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001206 } else {
1207 /* The current_epoch got recycled while we allocated this one... */
1208 kfree(epoch);
1209 }
1210 spin_unlock(&mdev->epoch_lock);
1211
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01001212 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001213}
1214
1215/* used from receive_RSDataReply (recv_resync_read)
1216 * and from receive_Data */
1217static struct drbd_epoch_entry *
1218read_in_block(struct drbd_conf *mdev, u64 id, sector_t sector, int data_size) __must_hold(local)
1219{
Lars Ellenberg66660322010-04-06 12:15:04 +02001220 const sector_t capacity = drbd_get_capacity(mdev->this_bdev);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001221 struct drbd_epoch_entry *e;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001222 struct page *page;
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001223 int dgs, ds, rr;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001224 void *dig_in = mdev->int_dig_in;
1225 void *dig_vv = mdev->int_dig_vv;
Philipp Reisner6b4388a2010-04-26 14:11:45 +02001226 unsigned long *data;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001227
1228 dgs = (mdev->agreed_pro_version >= 87 && mdev->integrity_r_tfm) ?
1229 crypto_hash_digestsize(mdev->integrity_r_tfm) : 0;
1230
1231 if (dgs) {
1232 rr = drbd_recv(mdev, dig_in, dgs);
1233 if (rr != dgs) {
1234 dev_warn(DEV, "short read receiving data digest: read %d expected %d\n",
1235 rr, dgs);
1236 return NULL;
1237 }
1238 }
1239
1240 data_size -= dgs;
1241
1242 ERR_IF(data_size & 0x1ff) return NULL;
Lars Ellenberg1816a2b2010-11-11 15:19:07 +01001243 ERR_IF(data_size > DRBD_MAX_BIO_SIZE) return NULL;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001244
Lars Ellenberg66660322010-04-06 12:15:04 +02001245 /* even though we trust out peer,
1246 * we sometimes have to double check. */
1247 if (sector + (data_size>>9) > capacity) {
1248 dev_err(DEV, "capacity: %llus < sector: %llus + size: %u\n",
1249 (unsigned long long)capacity,
1250 (unsigned long long)sector, data_size);
1251 return NULL;
1252 }
1253
Philipp Reisnerb411b362009-09-25 16:07:19 -07001254 /* GFP_NOIO, because we must not cause arbitrary write-out: in a DRBD
1255 * "criss-cross" setup, that might cause write-out on some other DRBD,
1256 * which in turn might block on the other node at this very place. */
1257 e = drbd_alloc_ee(mdev, id, sector, data_size, GFP_NOIO);
1258 if (!e)
1259 return NULL;
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001260
Philipp Reisnerb411b362009-09-25 16:07:19 -07001261 ds = data_size;
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001262 page = e->pages;
1263 page_chain_for_each(page) {
1264 unsigned len = min_t(int, ds, PAGE_SIZE);
Philipp Reisner6b4388a2010-04-26 14:11:45 +02001265 data = kmap(page);
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001266 rr = drbd_recv(mdev, data, len);
Andreas Gruenbacher0cf9d272010-12-07 10:43:29 +01001267 if (drbd_insert_fault(mdev, DRBD_FAULT_RECEIVE)) {
Philipp Reisner6b4388a2010-04-26 14:11:45 +02001268 dev_err(DEV, "Fault injection: Corrupting data on receive\n");
1269 data[0] = data[0] ^ (unsigned long)-1;
1270 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07001271 kunmap(page);
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001272 if (rr != len) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07001273 drbd_free_ee(mdev, e);
1274 dev_warn(DEV, "short read receiving data: read %d expected %d\n",
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001275 rr, len);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001276 return NULL;
1277 }
1278 ds -= rr;
1279 }
1280
1281 if (dgs) {
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001282 drbd_csum_ee(mdev, mdev->integrity_r_tfm, e, dig_vv);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001283 if (memcmp(dig_in, dig_vv, dgs)) {
Lars Ellenberg470be442010-11-10 10:36:52 +01001284 dev_err(DEV, "Digest integrity check FAILED: %llus +%u\n",
1285 (unsigned long long)sector, data_size);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001286 drbd_bcast_ee(mdev, "digest failed",
1287 dgs, dig_in, dig_vv, e);
1288 drbd_free_ee(mdev, e);
1289 return NULL;
1290 }
1291 }
1292 mdev->recv_cnt += data_size>>9;
1293 return e;
1294}
1295
1296/* drbd_drain_block() just takes a data block
1297 * out of the socket input buffer, and discards it.
1298 */
1299static int drbd_drain_block(struct drbd_conf *mdev, int data_size)
1300{
1301 struct page *page;
1302 int rr, rv = 1;
1303 void *data;
1304
Lars Ellenbergc3470cd2010-04-01 16:57:19 +02001305 if (!data_size)
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01001306 return true;
Lars Ellenbergc3470cd2010-04-01 16:57:19 +02001307
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001308 page = drbd_pp_alloc(mdev, 1, 1);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001309
1310 data = kmap(page);
1311 while (data_size) {
1312 rr = drbd_recv(mdev, data, min_t(int, data_size, PAGE_SIZE));
1313 if (rr != min_t(int, data_size, PAGE_SIZE)) {
1314 rv = 0;
1315 dev_warn(DEV, "short read receiving data: read %d expected %d\n",
1316 rr, min_t(int, data_size, PAGE_SIZE));
1317 break;
1318 }
1319 data_size -= rr;
1320 }
1321 kunmap(page);
Lars Ellenberg435f0742010-09-06 12:30:25 +02001322 drbd_pp_free(mdev, page, 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001323 return rv;
1324}
1325
1326static int recv_dless_read(struct drbd_conf *mdev, struct drbd_request *req,
1327 sector_t sector, int data_size)
1328{
1329 struct bio_vec *bvec;
1330 struct bio *bio;
1331 int dgs, rr, i, expect;
1332 void *dig_in = mdev->int_dig_in;
1333 void *dig_vv = mdev->int_dig_vv;
1334
1335 dgs = (mdev->agreed_pro_version >= 87 && mdev->integrity_r_tfm) ?
1336 crypto_hash_digestsize(mdev->integrity_r_tfm) : 0;
1337
1338 if (dgs) {
1339 rr = drbd_recv(mdev, dig_in, dgs);
1340 if (rr != dgs) {
1341 dev_warn(DEV, "short read receiving data reply digest: read %d expected %d\n",
1342 rr, dgs);
1343 return 0;
1344 }
1345 }
1346
1347 data_size -= dgs;
1348
1349 /* optimistically update recv_cnt. if receiving fails below,
1350 * we disconnect anyways, and counters will be reset. */
1351 mdev->recv_cnt += data_size>>9;
1352
1353 bio = req->master_bio;
1354 D_ASSERT(sector == bio->bi_sector);
1355
1356 bio_for_each_segment(bvec, bio, i) {
1357 expect = min_t(int, data_size, bvec->bv_len);
1358 rr = drbd_recv(mdev,
1359 kmap(bvec->bv_page)+bvec->bv_offset,
1360 expect);
1361 kunmap(bvec->bv_page);
1362 if (rr != expect) {
1363 dev_warn(DEV, "short read receiving data reply: "
1364 "read %d expected %d\n",
1365 rr, expect);
1366 return 0;
1367 }
1368 data_size -= rr;
1369 }
1370
1371 if (dgs) {
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001372 drbd_csum_bio(mdev, mdev->integrity_r_tfm, bio, dig_vv);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001373 if (memcmp(dig_in, dig_vv, dgs)) {
1374 dev_err(DEV, "Digest integrity check FAILED. Broken NICs?\n");
1375 return 0;
1376 }
1377 }
1378
1379 D_ASSERT(data_size == 0);
1380 return 1;
1381}
1382
1383/* e_end_resync_block() is called via
1384 * drbd_process_done_ee() by asender only */
1385static int e_end_resync_block(struct drbd_conf *mdev, struct drbd_work *w, int unused)
1386{
1387 struct drbd_epoch_entry *e = (struct drbd_epoch_entry *)w;
1388 sector_t sector = e->sector;
1389 int ok;
1390
1391 D_ASSERT(hlist_unhashed(&e->colision));
1392
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001393 if (likely((e->flags & EE_WAS_ERROR) == 0)) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07001394 drbd_set_in_sync(mdev, sector, e->size);
1395 ok = drbd_send_ack(mdev, P_RS_WRITE_ACK, e);
1396 } else {
1397 /* Record failure to sync */
1398 drbd_rs_failed_io(mdev, sector, e->size);
1399
1400 ok = drbd_send_ack(mdev, P_NEG_ACK, e);
1401 }
1402 dec_unacked(mdev);
1403
1404 return ok;
1405}
1406
1407static int recv_resync_read(struct drbd_conf *mdev, sector_t sector, int data_size) __releases(local)
1408{
1409 struct drbd_epoch_entry *e;
1410
1411 e = read_in_block(mdev, ID_SYNCER, sector, data_size);
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001412 if (!e)
1413 goto fail;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001414
1415 dec_rs_pending(mdev);
1416
Philipp Reisnerb411b362009-09-25 16:07:19 -07001417 inc_unacked(mdev);
1418 /* corresponding dec_unacked() in e_end_resync_block()
1419 * respective _drbd_clear_done_ee */
1420
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001421 e->w.cb = e_end_resync_block;
1422
Philipp Reisnerb411b362009-09-25 16:07:19 -07001423 spin_lock_irq(&mdev->req_lock);
1424 list_add(&e->w.list, &mdev->sync_ee);
1425 spin_unlock_irq(&mdev->req_lock);
1426
Lars Ellenberg0f0601f2010-08-11 23:40:24 +02001427 atomic_add(data_size >> 9, &mdev->rs_sect_ev);
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001428 if (drbd_submit_ee(mdev, e, WRITE, DRBD_FAULT_RS_WR) == 0)
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01001429 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001430
Lars Ellenberg22cc37a2010-09-14 20:40:41 +02001431 /* drbd_submit_ee currently fails for one reason only:
1432 * not being able to allocate enough bios.
1433 * Is dropping the connection going to help? */
1434 spin_lock_irq(&mdev->req_lock);
1435 list_del(&e->w.list);
1436 spin_unlock_irq(&mdev->req_lock);
1437
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001438 drbd_free_ee(mdev, e);
1439fail:
1440 put_ldev(mdev);
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01001441 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001442}
1443
Philipp Reisner02918be2010-08-20 14:35:10 +02001444static int receive_DataReply(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001445{
1446 struct drbd_request *req;
1447 sector_t sector;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001448 int ok;
Philipp Reisner02918be2010-08-20 14:35:10 +02001449 struct p_data *p = &mdev->data.rbuf.data;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001450
1451 sector = be64_to_cpu(p->sector);
1452
1453 spin_lock_irq(&mdev->req_lock);
1454 req = _ar_id_to_req(mdev, p->block_id, sector);
1455 spin_unlock_irq(&mdev->req_lock);
1456 if (unlikely(!req)) {
1457 dev_err(DEV, "Got a corrupt block_id/sector pair(1).\n");
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01001458 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001459 }
1460
1461 /* hlist_del(&req->colision) is done in _req_may_be_done, to avoid
1462 * special casing it there for the various failure cases.
1463 * still no race with drbd_fail_pending_reads */
1464 ok = recv_dless_read(mdev, req, sector, data_size);
1465
1466 if (ok)
1467 req_mod(req, data_received);
1468 /* else: nothing. handled from drbd_disconnect...
1469 * I don't think we may complete this just yet
1470 * in case we are "on-disconnect: freeze" */
1471
1472 return ok;
1473}
1474
Philipp Reisner02918be2010-08-20 14:35:10 +02001475static int receive_RSDataReply(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001476{
1477 sector_t sector;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001478 int ok;
Philipp Reisner02918be2010-08-20 14:35:10 +02001479 struct p_data *p = &mdev->data.rbuf.data;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001480
1481 sector = be64_to_cpu(p->sector);
1482 D_ASSERT(p->block_id == ID_SYNCER);
1483
1484 if (get_ldev(mdev)) {
1485 /* data is submitted to disk within recv_resync_read.
1486 * corresponding put_ldev done below on error,
1487 * or in drbd_endio_write_sec. */
1488 ok = recv_resync_read(mdev, sector, data_size);
1489 } else {
1490 if (__ratelimit(&drbd_ratelimit_state))
1491 dev_err(DEV, "Can not write resync data to local disk.\n");
1492
1493 ok = drbd_drain_block(mdev, data_size);
1494
Lars Ellenberg2b2bf212010-10-06 11:46:55 +02001495 drbd_send_ack_dp(mdev, P_NEG_ACK, p, data_size);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001496 }
1497
Philipp Reisner778f2712010-07-06 11:14:00 +02001498 atomic_add(data_size >> 9, &mdev->rs_sect_in);
1499
Philipp Reisnerb411b362009-09-25 16:07:19 -07001500 return ok;
1501}
1502
1503/* e_end_block() is called via drbd_process_done_ee().
1504 * this means this function only runs in the asender thread
1505 */
1506static int e_end_block(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
1507{
1508 struct drbd_epoch_entry *e = (struct drbd_epoch_entry *)w;
1509 sector_t sector = e->sector;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001510 int ok = 1, pcmd;
1511
Philipp Reisnerb411b362009-09-25 16:07:19 -07001512 if (mdev->net_conf->wire_protocol == DRBD_PROT_C) {
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001513 if (likely((e->flags & EE_WAS_ERROR) == 0)) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07001514 pcmd = (mdev->state.conn >= C_SYNC_SOURCE &&
1515 mdev->state.conn <= C_PAUSED_SYNC_T &&
1516 e->flags & EE_MAY_SET_IN_SYNC) ?
1517 P_RS_WRITE_ACK : P_WRITE_ACK;
1518 ok &= drbd_send_ack(mdev, pcmd, e);
1519 if (pcmd == P_RS_WRITE_ACK)
1520 drbd_set_in_sync(mdev, sector, e->size);
1521 } else {
1522 ok = drbd_send_ack(mdev, P_NEG_ACK, e);
1523 /* we expect it to be marked out of sync anyways...
1524 * maybe assert this? */
1525 }
1526 dec_unacked(mdev);
1527 }
1528 /* we delete from the conflict detection hash _after_ we sent out the
1529 * P_WRITE_ACK / P_NEG_ACK, to get the sequence number right. */
1530 if (mdev->net_conf->two_primaries) {
1531 spin_lock_irq(&mdev->req_lock);
1532 D_ASSERT(!hlist_unhashed(&e->colision));
1533 hlist_del_init(&e->colision);
1534 spin_unlock_irq(&mdev->req_lock);
1535 } else {
1536 D_ASSERT(hlist_unhashed(&e->colision));
1537 }
1538
1539 drbd_may_finish_epoch(mdev, e->epoch, EV_PUT + (cancel ? EV_CLEANUP : 0));
1540
1541 return ok;
1542}
1543
1544static int e_send_discard_ack(struct drbd_conf *mdev, struct drbd_work *w, int unused)
1545{
1546 struct drbd_epoch_entry *e = (struct drbd_epoch_entry *)w;
1547 int ok = 1;
1548
1549 D_ASSERT(mdev->net_conf->wire_protocol == DRBD_PROT_C);
1550 ok = drbd_send_ack(mdev, P_DISCARD_ACK, e);
1551
1552 spin_lock_irq(&mdev->req_lock);
1553 D_ASSERT(!hlist_unhashed(&e->colision));
1554 hlist_del_init(&e->colision);
1555 spin_unlock_irq(&mdev->req_lock);
1556
1557 dec_unacked(mdev);
1558
1559 return ok;
1560}
1561
1562/* Called from receive_Data.
1563 * Synchronize packets on sock with packets on msock.
1564 *
1565 * This is here so even when a P_DATA packet traveling via sock overtook an Ack
1566 * packet traveling on msock, they are still processed in the order they have
1567 * been sent.
1568 *
1569 * Note: we don't care for Ack packets overtaking P_DATA packets.
1570 *
1571 * In case packet_seq is larger than mdev->peer_seq number, there are
1572 * outstanding packets on the msock. We wait for them to arrive.
1573 * In case we are the logically next packet, we update mdev->peer_seq
1574 * ourselves. Correctly handles 32bit wrap around.
1575 *
1576 * Assume we have a 10 GBit connection, that is about 1<<30 byte per second,
1577 * about 1<<21 sectors per second. So "worst" case, we have 1<<3 == 8 seconds
1578 * for the 24bit wrap (historical atomic_t guarantee on some archs), and we have
1579 * 1<<9 == 512 seconds aka ages for the 32bit wrap around...
1580 *
1581 * returns 0 if we may process the packet,
1582 * -ERESTARTSYS if we were interrupted (by disconnect signal). */
1583static int drbd_wait_peer_seq(struct drbd_conf *mdev, const u32 packet_seq)
1584{
1585 DEFINE_WAIT(wait);
1586 unsigned int p_seq;
1587 long timeout;
1588 int ret = 0;
1589 spin_lock(&mdev->peer_seq_lock);
1590 for (;;) {
1591 prepare_to_wait(&mdev->seq_wait, &wait, TASK_INTERRUPTIBLE);
1592 if (seq_le(packet_seq, mdev->peer_seq+1))
1593 break;
1594 if (signal_pending(current)) {
1595 ret = -ERESTARTSYS;
1596 break;
1597 }
1598 p_seq = mdev->peer_seq;
1599 spin_unlock(&mdev->peer_seq_lock);
1600 timeout = schedule_timeout(30*HZ);
1601 spin_lock(&mdev->peer_seq_lock);
1602 if (timeout == 0 && p_seq == mdev->peer_seq) {
1603 ret = -ETIMEDOUT;
1604 dev_err(DEV, "ASSERT FAILED waited 30 seconds for sequence update, forcing reconnect\n");
1605 break;
1606 }
1607 }
1608 finish_wait(&mdev->seq_wait, &wait);
1609 if (mdev->peer_seq+1 == packet_seq)
1610 mdev->peer_seq++;
1611 spin_unlock(&mdev->peer_seq_lock);
1612 return ret;
1613}
1614
Lars Ellenberg688593c2010-11-17 22:25:03 +01001615/* see also bio_flags_to_wire()
1616 * DRBD_REQ_*, because we need to semantically map the flags to data packet
1617 * flags and back. We may replicate to other kernel versions. */
1618static unsigned long wire_flags_to_bio(struct drbd_conf *mdev, u32 dpf)
Philipp Reisner76d2e7e2010-08-25 11:58:05 +02001619{
Lars Ellenberg688593c2010-11-17 22:25:03 +01001620 return (dpf & DP_RW_SYNC ? REQ_SYNC : 0) |
1621 (dpf & DP_FUA ? REQ_FUA : 0) |
1622 (dpf & DP_FLUSH ? REQ_FLUSH : 0) |
1623 (dpf & DP_DISCARD ? REQ_DISCARD : 0);
Philipp Reisner76d2e7e2010-08-25 11:58:05 +02001624}
1625
Philipp Reisnerb411b362009-09-25 16:07:19 -07001626/* mirrored write */
Philipp Reisner02918be2010-08-20 14:35:10 +02001627static int receive_Data(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001628{
1629 sector_t sector;
1630 struct drbd_epoch_entry *e;
Philipp Reisner02918be2010-08-20 14:35:10 +02001631 struct p_data *p = &mdev->data.rbuf.data;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001632 int rw = WRITE;
1633 u32 dp_flags;
1634
Philipp Reisnerb411b362009-09-25 16:07:19 -07001635 if (!get_ldev(mdev)) {
1636 if (__ratelimit(&drbd_ratelimit_state))
1637 dev_err(DEV, "Can not write mirrored data block "
1638 "to local disk.\n");
1639 spin_lock(&mdev->peer_seq_lock);
1640 if (mdev->peer_seq+1 == be32_to_cpu(p->seq_num))
1641 mdev->peer_seq++;
1642 spin_unlock(&mdev->peer_seq_lock);
1643
Lars Ellenberg2b2bf212010-10-06 11:46:55 +02001644 drbd_send_ack_dp(mdev, P_NEG_ACK, p, data_size);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001645 atomic_inc(&mdev->current_epoch->epoch_size);
1646 return drbd_drain_block(mdev, data_size);
1647 }
1648
1649 /* get_ldev(mdev) successful.
1650 * Corresponding put_ldev done either below (on various errors),
1651 * or in drbd_endio_write_sec, if we successfully submit the data at
1652 * the end of this function. */
1653
1654 sector = be64_to_cpu(p->sector);
1655 e = read_in_block(mdev, p->block_id, sector, data_size);
1656 if (!e) {
1657 put_ldev(mdev);
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01001658 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001659 }
1660
Philipp Reisnerb411b362009-09-25 16:07:19 -07001661 e->w.cb = e_end_block;
1662
Lars Ellenberg688593c2010-11-17 22:25:03 +01001663 dp_flags = be32_to_cpu(p->dp_flags);
1664 rw |= wire_flags_to_bio(mdev, dp_flags);
1665
1666 if (dp_flags & DP_MAY_SET_IN_SYNC)
1667 e->flags |= EE_MAY_SET_IN_SYNC;
1668
Philipp Reisnerb411b362009-09-25 16:07:19 -07001669 spin_lock(&mdev->epoch_lock);
1670 e->epoch = mdev->current_epoch;
1671 atomic_inc(&e->epoch->epoch_size);
1672 atomic_inc(&e->epoch->active);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001673 spin_unlock(&mdev->epoch_lock);
1674
Philipp Reisnerb411b362009-09-25 16:07:19 -07001675 /* I'm the receiver, I do hold a net_cnt reference. */
1676 if (!mdev->net_conf->two_primaries) {
1677 spin_lock_irq(&mdev->req_lock);
1678 } else {
1679 /* don't get the req_lock yet,
1680 * we may sleep in drbd_wait_peer_seq */
1681 const int size = e->size;
1682 const int discard = test_bit(DISCARD_CONCURRENT, &mdev->flags);
1683 DEFINE_WAIT(wait);
1684 struct drbd_request *i;
1685 struct hlist_node *n;
1686 struct hlist_head *slot;
1687 int first;
1688
1689 D_ASSERT(mdev->net_conf->wire_protocol == DRBD_PROT_C);
1690 BUG_ON(mdev->ee_hash == NULL);
1691 BUG_ON(mdev->tl_hash == NULL);
1692
1693 /* conflict detection and handling:
1694 * 1. wait on the sequence number,
1695 * in case this data packet overtook ACK packets.
1696 * 2. check our hash tables for conflicting requests.
1697 * we only need to walk the tl_hash, since an ee can not
1698 * have a conflict with an other ee: on the submitting
1699 * node, the corresponding req had already been conflicting,
1700 * and a conflicting req is never sent.
1701 *
1702 * Note: for two_primaries, we are protocol C,
1703 * so there cannot be any request that is DONE
1704 * but still on the transfer log.
1705 *
1706 * unconditionally add to the ee_hash.
1707 *
1708 * if no conflicting request is found:
1709 * submit.
1710 *
1711 * if any conflicting request is found
1712 * that has not yet been acked,
1713 * AND I have the "discard concurrent writes" flag:
1714 * queue (via done_ee) the P_DISCARD_ACK; OUT.
1715 *
1716 * if any conflicting request is found:
1717 * block the receiver, waiting on misc_wait
1718 * until no more conflicting requests are there,
1719 * or we get interrupted (disconnect).
1720 *
1721 * we do not just write after local io completion of those
1722 * requests, but only after req is done completely, i.e.
1723 * we wait for the P_DISCARD_ACK to arrive!
1724 *
1725 * then proceed normally, i.e. submit.
1726 */
1727 if (drbd_wait_peer_seq(mdev, be32_to_cpu(p->seq_num)))
1728 goto out_interrupted;
1729
1730 spin_lock_irq(&mdev->req_lock);
1731
1732 hlist_add_head(&e->colision, ee_hash_slot(mdev, sector));
1733
1734#define OVERLAPS overlaps(i->sector, i->size, sector, size)
1735 slot = tl_hash_slot(mdev, sector);
1736 first = 1;
1737 for (;;) {
1738 int have_unacked = 0;
1739 int have_conflict = 0;
1740 prepare_to_wait(&mdev->misc_wait, &wait,
1741 TASK_INTERRUPTIBLE);
1742 hlist_for_each_entry(i, n, slot, colision) {
1743 if (OVERLAPS) {
1744 /* only ALERT on first iteration,
1745 * we may be woken up early... */
1746 if (first)
1747 dev_alert(DEV, "%s[%u] Concurrent local write detected!"
1748 " new: %llus +%u; pending: %llus +%u\n",
1749 current->comm, current->pid,
1750 (unsigned long long)sector, size,
1751 (unsigned long long)i->sector, i->size);
1752 if (i->rq_state & RQ_NET_PENDING)
1753 ++have_unacked;
1754 ++have_conflict;
1755 }
1756 }
1757#undef OVERLAPS
1758 if (!have_conflict)
1759 break;
1760
1761 /* Discard Ack only for the _first_ iteration */
1762 if (first && discard && have_unacked) {
1763 dev_alert(DEV, "Concurrent write! [DISCARD BY FLAG] sec=%llus\n",
1764 (unsigned long long)sector);
1765 inc_unacked(mdev);
1766 e->w.cb = e_send_discard_ack;
1767 list_add_tail(&e->w.list, &mdev->done_ee);
1768
1769 spin_unlock_irq(&mdev->req_lock);
1770
1771 /* we could probably send that P_DISCARD_ACK ourselves,
1772 * but I don't like the receiver using the msock */
1773
1774 put_ldev(mdev);
1775 wake_asender(mdev);
1776 finish_wait(&mdev->misc_wait, &wait);
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01001777 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001778 }
1779
1780 if (signal_pending(current)) {
1781 hlist_del_init(&e->colision);
1782
1783 spin_unlock_irq(&mdev->req_lock);
1784
1785 finish_wait(&mdev->misc_wait, &wait);
1786 goto out_interrupted;
1787 }
1788
1789 spin_unlock_irq(&mdev->req_lock);
1790 if (first) {
1791 first = 0;
1792 dev_alert(DEV, "Concurrent write! [W AFTERWARDS] "
1793 "sec=%llus\n", (unsigned long long)sector);
1794 } else if (discard) {
1795 /* we had none on the first iteration.
1796 * there must be none now. */
1797 D_ASSERT(have_unacked == 0);
1798 }
1799 schedule();
1800 spin_lock_irq(&mdev->req_lock);
1801 }
1802 finish_wait(&mdev->misc_wait, &wait);
1803 }
1804
1805 list_add(&e->w.list, &mdev->active_ee);
1806 spin_unlock_irq(&mdev->req_lock);
1807
1808 switch (mdev->net_conf->wire_protocol) {
1809 case DRBD_PROT_C:
1810 inc_unacked(mdev);
1811 /* corresponding dec_unacked() in e_end_block()
1812 * respective _drbd_clear_done_ee */
1813 break;
1814 case DRBD_PROT_B:
1815 /* I really don't like it that the receiver thread
1816 * sends on the msock, but anyways */
1817 drbd_send_ack(mdev, P_RECV_ACK, e);
1818 break;
1819 case DRBD_PROT_A:
1820 /* nothing to do */
1821 break;
1822 }
1823
Lars Ellenberg6719fb02010-10-18 23:04:07 +02001824 if (mdev->state.pdsk < D_INCONSISTENT) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07001825 /* In case we have the only disk of the cluster, */
1826 drbd_set_out_of_sync(mdev, e->sector, e->size);
1827 e->flags |= EE_CALL_AL_COMPLETE_IO;
Lars Ellenberg6719fb02010-10-18 23:04:07 +02001828 e->flags &= ~EE_MAY_SET_IN_SYNC;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001829 drbd_al_begin_io(mdev, e->sector);
1830 }
1831
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001832 if (drbd_submit_ee(mdev, e, rw, DRBD_FAULT_DT_WR) == 0)
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01001833 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001834
Lars Ellenberg22cc37a2010-09-14 20:40:41 +02001835 /* drbd_submit_ee currently fails for one reason only:
1836 * not being able to allocate enough bios.
1837 * Is dropping the connection going to help? */
1838 spin_lock_irq(&mdev->req_lock);
1839 list_del(&e->w.list);
1840 hlist_del_init(&e->colision);
1841 spin_unlock_irq(&mdev->req_lock);
1842 if (e->flags & EE_CALL_AL_COMPLETE_IO)
1843 drbd_al_complete_io(mdev, e->sector);
1844
Philipp Reisnerb411b362009-09-25 16:07:19 -07001845out_interrupted:
1846 /* yes, the epoch_size now is imbalanced.
1847 * but we drop the connection anyways, so we don't have a chance to
1848 * receive a barrier... atomic_inc(&mdev->epoch_size); */
1849 put_ldev(mdev);
1850 drbd_free_ee(mdev, e);
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01001851 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001852}
1853
Lars Ellenberg0f0601f2010-08-11 23:40:24 +02001854/* We may throttle resync, if the lower device seems to be busy,
1855 * and current sync rate is above c_min_rate.
1856 *
1857 * To decide whether or not the lower device is busy, we use a scheme similar
1858 * to MD RAID is_mddev_idle(): if the partition stats reveal "significant"
1859 * (more than 64 sectors) of activity we cannot account for with our own resync
1860 * activity, it obviously is "busy".
1861 *
1862 * The current sync rate used here uses only the most recent two step marks,
1863 * to have a short time average so we can react faster.
1864 */
Philipp Reisnere3555d82010-11-07 15:56:29 +01001865int drbd_rs_should_slow_down(struct drbd_conf *mdev, sector_t sector)
Lars Ellenberg0f0601f2010-08-11 23:40:24 +02001866{
1867 struct gendisk *disk = mdev->ldev->backing_bdev->bd_contains->bd_disk;
1868 unsigned long db, dt, dbdt;
Philipp Reisnere3555d82010-11-07 15:56:29 +01001869 struct lc_element *tmp;
Lars Ellenberg0f0601f2010-08-11 23:40:24 +02001870 int curr_events;
1871 int throttle = 0;
1872
1873 /* feature disabled? */
1874 if (mdev->sync_conf.c_min_rate == 0)
1875 return 0;
1876
Philipp Reisnere3555d82010-11-07 15:56:29 +01001877 spin_lock_irq(&mdev->al_lock);
1878 tmp = lc_find(mdev->resync, BM_SECT_TO_EXT(sector));
1879 if (tmp) {
1880 struct bm_extent *bm_ext = lc_entry(tmp, struct bm_extent, lce);
1881 if (test_bit(BME_PRIORITY, &bm_ext->flags)) {
1882 spin_unlock_irq(&mdev->al_lock);
1883 return 0;
1884 }
1885 /* Do not slow down if app IO is already waiting for this extent */
1886 }
1887 spin_unlock_irq(&mdev->al_lock);
1888
Lars Ellenberg0f0601f2010-08-11 23:40:24 +02001889 curr_events = (int)part_stat_read(&disk->part0, sectors[0]) +
1890 (int)part_stat_read(&disk->part0, sectors[1]) -
1891 atomic_read(&mdev->rs_sect_ev);
Philipp Reisnere3555d82010-11-07 15:56:29 +01001892
Lars Ellenberg0f0601f2010-08-11 23:40:24 +02001893 if (!mdev->rs_last_events || curr_events - mdev->rs_last_events > 64) {
1894 unsigned long rs_left;
1895 int i;
1896
1897 mdev->rs_last_events = curr_events;
1898
1899 /* sync speed average over the last 2*DRBD_SYNC_MARK_STEP,
1900 * approx. */
Lars Ellenberg2649f082010-11-05 10:05:47 +01001901 i = (mdev->rs_last_mark + DRBD_SYNC_MARKS-1) % DRBD_SYNC_MARKS;
1902
1903 if (mdev->state.conn == C_VERIFY_S || mdev->state.conn == C_VERIFY_T)
1904 rs_left = mdev->ov_left;
1905 else
1906 rs_left = drbd_bm_total_weight(mdev) - mdev->rs_failed;
Lars Ellenberg0f0601f2010-08-11 23:40:24 +02001907
1908 dt = ((long)jiffies - (long)mdev->rs_mark_time[i]) / HZ;
1909 if (!dt)
1910 dt++;
1911 db = mdev->rs_mark_left[i] - rs_left;
1912 dbdt = Bit2KB(db/dt);
1913
1914 if (dbdt > mdev->sync_conf.c_min_rate)
1915 throttle = 1;
1916 }
1917 return throttle;
1918}
1919
1920
Philipp Reisner02918be2010-08-20 14:35:10 +02001921static int receive_DataRequest(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int digest_size)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001922{
1923 sector_t sector;
1924 const sector_t capacity = drbd_get_capacity(mdev->this_bdev);
1925 struct drbd_epoch_entry *e;
1926 struct digest_info *di = NULL;
Philipp Reisnerb18b37b2010-10-13 15:32:44 +02001927 int size, verb;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001928 unsigned int fault_type;
Philipp Reisner02918be2010-08-20 14:35:10 +02001929 struct p_block_req *p = &mdev->data.rbuf.block_req;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001930
1931 sector = be64_to_cpu(p->sector);
1932 size = be32_to_cpu(p->blksize);
1933
Lars Ellenberg1816a2b2010-11-11 15:19:07 +01001934 if (size <= 0 || (size & 0x1ff) != 0 || size > DRBD_MAX_BIO_SIZE) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07001935 dev_err(DEV, "%s:%d: sector: %llus, size: %u\n", __FILE__, __LINE__,
1936 (unsigned long long)sector, size);
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01001937 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001938 }
1939 if (sector + (size>>9) > capacity) {
1940 dev_err(DEV, "%s:%d: sector: %llus, size: %u\n", __FILE__, __LINE__,
1941 (unsigned long long)sector, size);
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01001942 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001943 }
1944
1945 if (!get_ldev_if_state(mdev, D_UP_TO_DATE)) {
Philipp Reisnerb18b37b2010-10-13 15:32:44 +02001946 verb = 1;
1947 switch (cmd) {
1948 case P_DATA_REQUEST:
1949 drbd_send_ack_rp(mdev, P_NEG_DREPLY, p);
1950 break;
1951 case P_RS_DATA_REQUEST:
1952 case P_CSUM_RS_REQUEST:
1953 case P_OV_REQUEST:
1954 drbd_send_ack_rp(mdev, P_NEG_RS_DREPLY , p);
1955 break;
1956 case P_OV_REPLY:
1957 verb = 0;
1958 dec_rs_pending(mdev);
1959 drbd_send_ack_ex(mdev, P_OV_RESULT, sector, size, ID_IN_SYNC);
1960 break;
1961 default:
1962 dev_err(DEV, "unexpected command (%s) in receive_DataRequest\n",
1963 cmdname(cmd));
1964 }
1965 if (verb && __ratelimit(&drbd_ratelimit_state))
Philipp Reisnerb411b362009-09-25 16:07:19 -07001966 dev_err(DEV, "Can not satisfy peer's read request, "
1967 "no local data.\n");
Philipp Reisnerb18b37b2010-10-13 15:32:44 +02001968
Lars Ellenberga821cc42010-09-06 12:31:37 +02001969 /* drain possibly payload */
1970 return drbd_drain_block(mdev, digest_size);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001971 }
1972
1973 /* GFP_NOIO, because we must not cause arbitrary write-out: in a DRBD
1974 * "criss-cross" setup, that might cause write-out on some other DRBD,
1975 * which in turn might block on the other node at this very place. */
1976 e = drbd_alloc_ee(mdev, p->block_id, sector, size, GFP_NOIO);
1977 if (!e) {
1978 put_ldev(mdev);
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01001979 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001980 }
1981
Philipp Reisner02918be2010-08-20 14:35:10 +02001982 switch (cmd) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07001983 case P_DATA_REQUEST:
1984 e->w.cb = w_e_end_data_req;
1985 fault_type = DRBD_FAULT_DT_RD;
Lars Ellenberg80a40e42010-08-11 23:28:00 +02001986 /* application IO, don't drbd_rs_begin_io */
1987 goto submit;
1988
Philipp Reisnerb411b362009-09-25 16:07:19 -07001989 case P_RS_DATA_REQUEST:
1990 e->w.cb = w_e_end_rsdata_req;
1991 fault_type = DRBD_FAULT_RS_RD;
Lars Ellenberg5f9915b2010-11-09 14:15:24 +01001992 /* used in the sector offset progress display */
1993 mdev->bm_resync_fo = BM_SECT_TO_BIT(sector);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001994 break;
1995
1996 case P_OV_REPLY:
1997 case P_CSUM_RS_REQUEST:
1998 fault_type = DRBD_FAULT_RS_RD;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001999 di = kmalloc(sizeof(*di) + digest_size, GFP_NOIO);
2000 if (!di)
2001 goto out_free_e;
2002
2003 di->digest_size = digest_size;
2004 di->digest = (((char *)di)+sizeof(struct digest_info));
2005
Lars Ellenbergc36c3ce2010-08-11 20:42:55 +02002006 e->digest = di;
2007 e->flags |= EE_HAS_DIGEST;
2008
Philipp Reisnerb411b362009-09-25 16:07:19 -07002009 if (drbd_recv(mdev, di->digest, digest_size) != digest_size)
2010 goto out_free_e;
2011
Philipp Reisner02918be2010-08-20 14:35:10 +02002012 if (cmd == P_CSUM_RS_REQUEST) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07002013 D_ASSERT(mdev->agreed_pro_version >= 89);
2014 e->w.cb = w_e_end_csum_rs_req;
Lars Ellenberg5f9915b2010-11-09 14:15:24 +01002015 /* used in the sector offset progress display */
2016 mdev->bm_resync_fo = BM_SECT_TO_BIT(sector);
Philipp Reisner02918be2010-08-20 14:35:10 +02002017 } else if (cmd == P_OV_REPLY) {
Lars Ellenberg2649f082010-11-05 10:05:47 +01002018 /* track progress, we may need to throttle */
2019 atomic_add(size >> 9, &mdev->rs_sect_in);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002020 e->w.cb = w_e_end_ov_reply;
2021 dec_rs_pending(mdev);
Lars Ellenberg0f0601f2010-08-11 23:40:24 +02002022 /* drbd_rs_begin_io done when we sent this request,
2023 * but accounting still needs to be done. */
2024 goto submit_for_resync;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002025 }
2026 break;
2027
2028 case P_OV_REQUEST:
Philipp Reisnerb411b362009-09-25 16:07:19 -07002029 if (mdev->ov_start_sector == ~(sector_t)0 &&
2030 mdev->agreed_pro_version >= 90) {
Lars Ellenbergde228bb2010-11-05 09:43:15 +01002031 unsigned long now = jiffies;
2032 int i;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002033 mdev->ov_start_sector = sector;
2034 mdev->ov_position = sector;
Lars Ellenberg30b743a2010-11-05 09:39:06 +01002035 mdev->ov_left = drbd_bm_bits(mdev) - BM_SECT_TO_BIT(sector);
2036 mdev->rs_total = mdev->ov_left;
Lars Ellenbergde228bb2010-11-05 09:43:15 +01002037 for (i = 0; i < DRBD_SYNC_MARKS; i++) {
2038 mdev->rs_mark_left[i] = mdev->ov_left;
2039 mdev->rs_mark_time[i] = now;
2040 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07002041 dev_info(DEV, "Online Verify start sector: %llu\n",
2042 (unsigned long long)sector);
2043 }
2044 e->w.cb = w_e_end_ov_req;
2045 fault_type = DRBD_FAULT_RS_RD;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002046 break;
2047
Philipp Reisnerb411b362009-09-25 16:07:19 -07002048 default:
2049 dev_err(DEV, "unexpected command (%s) in receive_DataRequest\n",
Philipp Reisner02918be2010-08-20 14:35:10 +02002050 cmdname(cmd));
Philipp Reisnerb411b362009-09-25 16:07:19 -07002051 fault_type = DRBD_FAULT_MAX;
Lars Ellenberg80a40e42010-08-11 23:28:00 +02002052 goto out_free_e;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002053 }
2054
Lars Ellenberg0f0601f2010-08-11 23:40:24 +02002055 /* Throttle, drbd_rs_begin_io and submit should become asynchronous
2056 * wrt the receiver, but it is not as straightforward as it may seem.
2057 * Various places in the resync start and stop logic assume resync
2058 * requests are processed in order, requeuing this on the worker thread
2059 * introduces a bunch of new code for synchronization between threads.
2060 *
2061 * Unlimited throttling before drbd_rs_begin_io may stall the resync
2062 * "forever", throttling after drbd_rs_begin_io will lock that extent
2063 * for application writes for the same time. For now, just throttle
2064 * here, where the rest of the code expects the receiver to sleep for
2065 * a while, anyways.
2066 */
Philipp Reisnerb411b362009-09-25 16:07:19 -07002067
Lars Ellenberg0f0601f2010-08-11 23:40:24 +02002068 /* Throttle before drbd_rs_begin_io, as that locks out application IO;
2069 * this defers syncer requests for some time, before letting at least
2070 * on request through. The resync controller on the receiving side
2071 * will adapt to the incoming rate accordingly.
2072 *
2073 * We cannot throttle here if remote is Primary/SyncTarget:
2074 * we would also throttle its application reads.
2075 * In that case, throttling is done on the SyncTarget only.
2076 */
Philipp Reisnere3555d82010-11-07 15:56:29 +01002077 if (mdev->state.peer != R_PRIMARY && drbd_rs_should_slow_down(mdev, sector))
2078 schedule_timeout_uninterruptible(HZ/10);
2079 if (drbd_rs_begin_io(mdev, sector))
Lars Ellenberg80a40e42010-08-11 23:28:00 +02002080 goto out_free_e;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002081
Lars Ellenberg0f0601f2010-08-11 23:40:24 +02002082submit_for_resync:
2083 atomic_add(size >> 9, &mdev->rs_sect_ev);
2084
Lars Ellenberg80a40e42010-08-11 23:28:00 +02002085submit:
Philipp Reisnerb411b362009-09-25 16:07:19 -07002086 inc_unacked(mdev);
Lars Ellenberg80a40e42010-08-11 23:28:00 +02002087 spin_lock_irq(&mdev->req_lock);
2088 list_add_tail(&e->w.list, &mdev->read_ee);
2089 spin_unlock_irq(&mdev->req_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002090
Lars Ellenberg45bb9122010-05-14 17:10:48 +02002091 if (drbd_submit_ee(mdev, e, READ, fault_type) == 0)
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01002092 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002093
Lars Ellenberg22cc37a2010-09-14 20:40:41 +02002094 /* drbd_submit_ee currently fails for one reason only:
2095 * not being able to allocate enough bios.
2096 * Is dropping the connection going to help? */
2097 spin_lock_irq(&mdev->req_lock);
2098 list_del(&e->w.list);
2099 spin_unlock_irq(&mdev->req_lock);
2100 /* no drbd_rs_complete_io(), we are dropping the connection anyways */
2101
Philipp Reisnerb411b362009-09-25 16:07:19 -07002102out_free_e:
Philipp Reisnerb411b362009-09-25 16:07:19 -07002103 put_ldev(mdev);
2104 drbd_free_ee(mdev, e);
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01002105 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002106}
2107
2108static int drbd_asb_recover_0p(struct drbd_conf *mdev) __must_hold(local)
2109{
2110 int self, peer, rv = -100;
2111 unsigned long ch_self, ch_peer;
2112
2113 self = mdev->ldev->md.uuid[UI_BITMAP] & 1;
2114 peer = mdev->p_uuid[UI_BITMAP] & 1;
2115
2116 ch_peer = mdev->p_uuid[UI_SIZE];
2117 ch_self = mdev->comm_bm_set;
2118
2119 switch (mdev->net_conf->after_sb_0p) {
2120 case ASB_CONSENSUS:
2121 case ASB_DISCARD_SECONDARY:
2122 case ASB_CALL_HELPER:
2123 dev_err(DEV, "Configuration error.\n");
2124 break;
2125 case ASB_DISCONNECT:
2126 break;
2127 case ASB_DISCARD_YOUNGER_PRI:
2128 if (self == 0 && peer == 1) {
2129 rv = -1;
2130 break;
2131 }
2132 if (self == 1 && peer == 0) {
2133 rv = 1;
2134 break;
2135 }
2136 /* Else fall through to one of the other strategies... */
2137 case ASB_DISCARD_OLDER_PRI:
2138 if (self == 0 && peer == 1) {
2139 rv = 1;
2140 break;
2141 }
2142 if (self == 1 && peer == 0) {
2143 rv = -1;
2144 break;
2145 }
2146 /* Else fall through to one of the other strategies... */
Lars Ellenbergad19bf62009-10-14 09:36:49 +02002147 dev_warn(DEV, "Discard younger/older primary did not find a decision\n"
Philipp Reisnerb411b362009-09-25 16:07:19 -07002148 "Using discard-least-changes instead\n");
2149 case ASB_DISCARD_ZERO_CHG:
2150 if (ch_peer == 0 && ch_self == 0) {
2151 rv = test_bit(DISCARD_CONCURRENT, &mdev->flags)
2152 ? -1 : 1;
2153 break;
2154 } else {
2155 if (ch_peer == 0) { rv = 1; break; }
2156 if (ch_self == 0) { rv = -1; break; }
2157 }
2158 if (mdev->net_conf->after_sb_0p == ASB_DISCARD_ZERO_CHG)
2159 break;
2160 case ASB_DISCARD_LEAST_CHG:
2161 if (ch_self < ch_peer)
2162 rv = -1;
2163 else if (ch_self > ch_peer)
2164 rv = 1;
2165 else /* ( ch_self == ch_peer ) */
2166 /* Well, then use something else. */
2167 rv = test_bit(DISCARD_CONCURRENT, &mdev->flags)
2168 ? -1 : 1;
2169 break;
2170 case ASB_DISCARD_LOCAL:
2171 rv = -1;
2172 break;
2173 case ASB_DISCARD_REMOTE:
2174 rv = 1;
2175 }
2176
2177 return rv;
2178}
2179
2180static int drbd_asb_recover_1p(struct drbd_conf *mdev) __must_hold(local)
2181{
Andreas Gruenbacher6184ea22010-12-09 14:23:27 +01002182 int hg, rv = -100;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002183
2184 switch (mdev->net_conf->after_sb_1p) {
2185 case ASB_DISCARD_YOUNGER_PRI:
2186 case ASB_DISCARD_OLDER_PRI:
2187 case ASB_DISCARD_LEAST_CHG:
2188 case ASB_DISCARD_LOCAL:
2189 case ASB_DISCARD_REMOTE:
2190 dev_err(DEV, "Configuration error.\n");
2191 break;
2192 case ASB_DISCONNECT:
2193 break;
2194 case ASB_CONSENSUS:
2195 hg = drbd_asb_recover_0p(mdev);
2196 if (hg == -1 && mdev->state.role == R_SECONDARY)
2197 rv = hg;
2198 if (hg == 1 && mdev->state.role == R_PRIMARY)
2199 rv = hg;
2200 break;
2201 case ASB_VIOLENTLY:
2202 rv = drbd_asb_recover_0p(mdev);
2203 break;
2204 case ASB_DISCARD_SECONDARY:
2205 return mdev->state.role == R_PRIMARY ? 1 : -1;
2206 case ASB_CALL_HELPER:
2207 hg = drbd_asb_recover_0p(mdev);
2208 if (hg == -1 && mdev->state.role == R_PRIMARY) {
Andreas Gruenbacherbb437942010-12-09 14:02:35 +01002209 enum drbd_state_rv rv2;
2210
2211 drbd_set_role(mdev, R_SECONDARY, 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002212 /* drbd_change_state() does not sleep while in SS_IN_TRANSIENT_STATE,
2213 * we might be here in C_WF_REPORT_PARAMS which is transient.
2214 * we do not need to wait for the after state change work either. */
Andreas Gruenbacherbb437942010-12-09 14:02:35 +01002215 rv2 = drbd_change_state(mdev, CS_VERBOSE, NS(role, R_SECONDARY));
2216 if (rv2 != SS_SUCCESS) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07002217 drbd_khelper(mdev, "pri-lost-after-sb");
2218 } else {
2219 dev_warn(DEV, "Successfully gave up primary role.\n");
2220 rv = hg;
2221 }
2222 } else
2223 rv = hg;
2224 }
2225
2226 return rv;
2227}
2228
2229static int drbd_asb_recover_2p(struct drbd_conf *mdev) __must_hold(local)
2230{
Andreas Gruenbacher6184ea22010-12-09 14:23:27 +01002231 int hg, rv = -100;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002232
2233 switch (mdev->net_conf->after_sb_2p) {
2234 case ASB_DISCARD_YOUNGER_PRI:
2235 case ASB_DISCARD_OLDER_PRI:
2236 case ASB_DISCARD_LEAST_CHG:
2237 case ASB_DISCARD_LOCAL:
2238 case ASB_DISCARD_REMOTE:
2239 case ASB_CONSENSUS:
2240 case ASB_DISCARD_SECONDARY:
2241 dev_err(DEV, "Configuration error.\n");
2242 break;
2243 case ASB_VIOLENTLY:
2244 rv = drbd_asb_recover_0p(mdev);
2245 break;
2246 case ASB_DISCONNECT:
2247 break;
2248 case ASB_CALL_HELPER:
2249 hg = drbd_asb_recover_0p(mdev);
2250 if (hg == -1) {
Andreas Gruenbacherbb437942010-12-09 14:02:35 +01002251 enum drbd_state_rv rv2;
2252
Philipp Reisnerb411b362009-09-25 16:07:19 -07002253 /* drbd_change_state() does not sleep while in SS_IN_TRANSIENT_STATE,
2254 * we might be here in C_WF_REPORT_PARAMS which is transient.
2255 * we do not need to wait for the after state change work either. */
Andreas Gruenbacherbb437942010-12-09 14:02:35 +01002256 rv2 = drbd_change_state(mdev, CS_VERBOSE, NS(role, R_SECONDARY));
2257 if (rv2 != SS_SUCCESS) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07002258 drbd_khelper(mdev, "pri-lost-after-sb");
2259 } else {
2260 dev_warn(DEV, "Successfully gave up primary role.\n");
2261 rv = hg;
2262 }
2263 } else
2264 rv = hg;
2265 }
2266
2267 return rv;
2268}
2269
2270static void drbd_uuid_dump(struct drbd_conf *mdev, char *text, u64 *uuid,
2271 u64 bits, u64 flags)
2272{
2273 if (!uuid) {
2274 dev_info(DEV, "%s uuid info vanished while I was looking!\n", text);
2275 return;
2276 }
2277 dev_info(DEV, "%s %016llX:%016llX:%016llX:%016llX bits:%llu flags:%llX\n",
2278 text,
2279 (unsigned long long)uuid[UI_CURRENT],
2280 (unsigned long long)uuid[UI_BITMAP],
2281 (unsigned long long)uuid[UI_HISTORY_START],
2282 (unsigned long long)uuid[UI_HISTORY_END],
2283 (unsigned long long)bits,
2284 (unsigned long long)flags);
2285}
2286
2287/*
2288 100 after split brain try auto recover
2289 2 C_SYNC_SOURCE set BitMap
2290 1 C_SYNC_SOURCE use BitMap
2291 0 no Sync
2292 -1 C_SYNC_TARGET use BitMap
2293 -2 C_SYNC_TARGET set BitMap
2294 -100 after split brain, disconnect
2295-1000 unrelated data
Philipp Reisner4a23f262011-01-11 17:42:17 +01002296-1091 requires proto 91
2297-1096 requires proto 96
Philipp Reisnerb411b362009-09-25 16:07:19 -07002298 */
2299static int drbd_uuid_compare(struct drbd_conf *mdev, int *rule_nr) __must_hold(local)
2300{
2301 u64 self, peer;
2302 int i, j;
2303
2304 self = mdev->ldev->md.uuid[UI_CURRENT] & ~((u64)1);
2305 peer = mdev->p_uuid[UI_CURRENT] & ~((u64)1);
2306
2307 *rule_nr = 10;
2308 if (self == UUID_JUST_CREATED && peer == UUID_JUST_CREATED)
2309 return 0;
2310
2311 *rule_nr = 20;
2312 if ((self == UUID_JUST_CREATED || self == (u64)0) &&
2313 peer != UUID_JUST_CREATED)
2314 return -2;
2315
2316 *rule_nr = 30;
2317 if (self != UUID_JUST_CREATED &&
2318 (peer == UUID_JUST_CREATED || peer == (u64)0))
2319 return 2;
2320
2321 if (self == peer) {
2322 int rct, dc; /* roles at crash time */
2323
2324 if (mdev->p_uuid[UI_BITMAP] == (u64)0 && mdev->ldev->md.uuid[UI_BITMAP] != (u64)0) {
2325
2326 if (mdev->agreed_pro_version < 91)
Philipp Reisner4a23f262011-01-11 17:42:17 +01002327 return -1091;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002328
2329 if ((mdev->ldev->md.uuid[UI_BITMAP] & ~((u64)1)) == (mdev->p_uuid[UI_HISTORY_START] & ~((u64)1)) &&
2330 (mdev->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1)) == (mdev->p_uuid[UI_HISTORY_START + 1] & ~((u64)1))) {
2331 dev_info(DEV, "was SyncSource, missed the resync finished event, corrected myself:\n");
2332 drbd_uuid_set_bm(mdev, 0UL);
2333
2334 drbd_uuid_dump(mdev, "self", mdev->ldev->md.uuid,
2335 mdev->state.disk >= D_NEGOTIATING ? drbd_bm_total_weight(mdev) : 0, 0);
2336 *rule_nr = 34;
2337 } else {
2338 dev_info(DEV, "was SyncSource (peer failed to write sync_uuid)\n");
2339 *rule_nr = 36;
2340 }
2341
2342 return 1;
2343 }
2344
2345 if (mdev->ldev->md.uuid[UI_BITMAP] == (u64)0 && mdev->p_uuid[UI_BITMAP] != (u64)0) {
2346
2347 if (mdev->agreed_pro_version < 91)
Philipp Reisner4a23f262011-01-11 17:42:17 +01002348 return -1091;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002349
2350 if ((mdev->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1)) == (mdev->p_uuid[UI_BITMAP] & ~((u64)1)) &&
2351 (mdev->ldev->md.uuid[UI_HISTORY_START + 1] & ~((u64)1)) == (mdev->p_uuid[UI_HISTORY_START] & ~((u64)1))) {
2352 dev_info(DEV, "was SyncTarget, peer missed the resync finished event, corrected peer:\n");
2353
2354 mdev->p_uuid[UI_HISTORY_START + 1] = mdev->p_uuid[UI_HISTORY_START];
2355 mdev->p_uuid[UI_HISTORY_START] = mdev->p_uuid[UI_BITMAP];
2356 mdev->p_uuid[UI_BITMAP] = 0UL;
2357
2358 drbd_uuid_dump(mdev, "peer", mdev->p_uuid, mdev->p_uuid[UI_SIZE], mdev->p_uuid[UI_FLAGS]);
2359 *rule_nr = 35;
2360 } else {
2361 dev_info(DEV, "was SyncTarget (failed to write sync_uuid)\n");
2362 *rule_nr = 37;
2363 }
2364
2365 return -1;
2366 }
2367
2368 /* Common power [off|failure] */
2369 rct = (test_bit(CRASHED_PRIMARY, &mdev->flags) ? 1 : 0) +
2370 (mdev->p_uuid[UI_FLAGS] & 2);
2371 /* lowest bit is set when we were primary,
2372 * next bit (weight 2) is set when peer was primary */
2373 *rule_nr = 40;
2374
2375 switch (rct) {
2376 case 0: /* !self_pri && !peer_pri */ return 0;
2377 case 1: /* self_pri && !peer_pri */ return 1;
2378 case 2: /* !self_pri && peer_pri */ return -1;
2379 case 3: /* self_pri && peer_pri */
2380 dc = test_bit(DISCARD_CONCURRENT, &mdev->flags);
2381 return dc ? -1 : 1;
2382 }
2383 }
2384
2385 *rule_nr = 50;
2386 peer = mdev->p_uuid[UI_BITMAP] & ~((u64)1);
2387 if (self == peer)
2388 return -1;
2389
2390 *rule_nr = 51;
2391 peer = mdev->p_uuid[UI_HISTORY_START] & ~((u64)1);
2392 if (self == peer) {
Philipp Reisner4a23f262011-01-11 17:42:17 +01002393 if (mdev->agreed_pro_version < 96 ?
2394 (mdev->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1)) ==
2395 (mdev->p_uuid[UI_HISTORY_START + 1] & ~((u64)1)) :
2396 peer + UUID_NEW_BM_OFFSET == (mdev->p_uuid[UI_BITMAP] & ~((u64)1))) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07002397 /* The last P_SYNC_UUID did not get though. Undo the last start of
2398 resync as sync source modifications of the peer's UUIDs. */
2399
2400 if (mdev->agreed_pro_version < 91)
Philipp Reisner4a23f262011-01-11 17:42:17 +01002401 return -1091;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002402
2403 mdev->p_uuid[UI_BITMAP] = mdev->p_uuid[UI_HISTORY_START];
2404 mdev->p_uuid[UI_HISTORY_START] = mdev->p_uuid[UI_HISTORY_START + 1];
Philipp Reisner4a23f262011-01-11 17:42:17 +01002405
2406 dev_info(DEV, "Did not got last syncUUID packet, corrected:\n");
2407 drbd_uuid_dump(mdev, "peer", mdev->p_uuid, mdev->p_uuid[UI_SIZE], mdev->p_uuid[UI_FLAGS]);
2408
Philipp Reisnerb411b362009-09-25 16:07:19 -07002409 return -1;
2410 }
2411 }
2412
2413 *rule_nr = 60;
2414 self = mdev->ldev->md.uuid[UI_CURRENT] & ~((u64)1);
2415 for (i = UI_HISTORY_START; i <= UI_HISTORY_END; i++) {
2416 peer = mdev->p_uuid[i] & ~((u64)1);
2417 if (self == peer)
2418 return -2;
2419 }
2420
2421 *rule_nr = 70;
2422 self = mdev->ldev->md.uuid[UI_BITMAP] & ~((u64)1);
2423 peer = mdev->p_uuid[UI_CURRENT] & ~((u64)1);
2424 if (self == peer)
2425 return 1;
2426
2427 *rule_nr = 71;
2428 self = mdev->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1);
2429 if (self == peer) {
Philipp Reisner4a23f262011-01-11 17:42:17 +01002430 if (mdev->agreed_pro_version < 96 ?
2431 (mdev->ldev->md.uuid[UI_HISTORY_START + 1] & ~((u64)1)) ==
2432 (mdev->p_uuid[UI_HISTORY_START] & ~((u64)1)) :
2433 self + UUID_NEW_BM_OFFSET == (mdev->ldev->md.uuid[UI_BITMAP] & ~((u64)1))) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07002434 /* The last P_SYNC_UUID did not get though. Undo the last start of
2435 resync as sync source modifications of our UUIDs. */
2436
2437 if (mdev->agreed_pro_version < 91)
Philipp Reisner4a23f262011-01-11 17:42:17 +01002438 return -1091;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002439
2440 _drbd_uuid_set(mdev, UI_BITMAP, mdev->ldev->md.uuid[UI_HISTORY_START]);
2441 _drbd_uuid_set(mdev, UI_HISTORY_START, mdev->ldev->md.uuid[UI_HISTORY_START + 1]);
2442
Philipp Reisner4a23f262011-01-11 17:42:17 +01002443 dev_info(DEV, "Last syncUUID did not get through, corrected:\n");
Philipp Reisnerb411b362009-09-25 16:07:19 -07002444 drbd_uuid_dump(mdev, "self", mdev->ldev->md.uuid,
2445 mdev->state.disk >= D_NEGOTIATING ? drbd_bm_total_weight(mdev) : 0, 0);
2446
2447 return 1;
2448 }
2449 }
2450
2451
2452 *rule_nr = 80;
Philipp Reisnerd8c2a362009-11-18 15:52:51 +01002453 peer = mdev->p_uuid[UI_CURRENT] & ~((u64)1);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002454 for (i = UI_HISTORY_START; i <= UI_HISTORY_END; i++) {
2455 self = mdev->ldev->md.uuid[i] & ~((u64)1);
2456 if (self == peer)
2457 return 2;
2458 }
2459
2460 *rule_nr = 90;
2461 self = mdev->ldev->md.uuid[UI_BITMAP] & ~((u64)1);
2462 peer = mdev->p_uuid[UI_BITMAP] & ~((u64)1);
2463 if (self == peer && self != ((u64)0))
2464 return 100;
2465
2466 *rule_nr = 100;
2467 for (i = UI_HISTORY_START; i <= UI_HISTORY_END; i++) {
2468 self = mdev->ldev->md.uuid[i] & ~((u64)1);
2469 for (j = UI_HISTORY_START; j <= UI_HISTORY_END; j++) {
2470 peer = mdev->p_uuid[j] & ~((u64)1);
2471 if (self == peer)
2472 return -100;
2473 }
2474 }
2475
2476 return -1000;
2477}
2478
2479/* drbd_sync_handshake() returns the new conn state on success, or
2480 CONN_MASK (-1) on failure.
2481 */
2482static enum drbd_conns drbd_sync_handshake(struct drbd_conf *mdev, enum drbd_role peer_role,
2483 enum drbd_disk_state peer_disk) __must_hold(local)
2484{
2485 int hg, rule_nr;
2486 enum drbd_conns rv = C_MASK;
2487 enum drbd_disk_state mydisk;
2488
2489 mydisk = mdev->state.disk;
2490 if (mydisk == D_NEGOTIATING)
2491 mydisk = mdev->new_state_tmp.disk;
2492
2493 dev_info(DEV, "drbd_sync_handshake:\n");
2494 drbd_uuid_dump(mdev, "self", mdev->ldev->md.uuid, mdev->comm_bm_set, 0);
2495 drbd_uuid_dump(mdev, "peer", mdev->p_uuid,
2496 mdev->p_uuid[UI_SIZE], mdev->p_uuid[UI_FLAGS]);
2497
2498 hg = drbd_uuid_compare(mdev, &rule_nr);
2499
2500 dev_info(DEV, "uuid_compare()=%d by rule %d\n", hg, rule_nr);
2501
2502 if (hg == -1000) {
2503 dev_alert(DEV, "Unrelated data, aborting!\n");
2504 return C_MASK;
2505 }
Philipp Reisner4a23f262011-01-11 17:42:17 +01002506 if (hg < -1000) {
2507 dev_alert(DEV, "To resolve this both sides have to support at least protocol %d\n", -hg - 1000);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002508 return C_MASK;
2509 }
2510
2511 if ((mydisk == D_INCONSISTENT && peer_disk > D_INCONSISTENT) ||
2512 (peer_disk == D_INCONSISTENT && mydisk > D_INCONSISTENT)) {
2513 int f = (hg == -100) || abs(hg) == 2;
2514 hg = mydisk > D_INCONSISTENT ? 1 : -1;
2515 if (f)
2516 hg = hg*2;
2517 dev_info(DEV, "Becoming sync %s due to disk states.\n",
2518 hg > 0 ? "source" : "target");
2519 }
2520
Adam Gandelman3a11a482010-04-08 16:48:23 -07002521 if (abs(hg) == 100)
2522 drbd_khelper(mdev, "initial-split-brain");
2523
Philipp Reisnerb411b362009-09-25 16:07:19 -07002524 if (hg == 100 || (hg == -100 && mdev->net_conf->always_asbp)) {
2525 int pcount = (mdev->state.role == R_PRIMARY)
2526 + (peer_role == R_PRIMARY);
2527 int forced = (hg == -100);
2528
2529 switch (pcount) {
2530 case 0:
2531 hg = drbd_asb_recover_0p(mdev);
2532 break;
2533 case 1:
2534 hg = drbd_asb_recover_1p(mdev);
2535 break;
2536 case 2:
2537 hg = drbd_asb_recover_2p(mdev);
2538 break;
2539 }
2540 if (abs(hg) < 100) {
2541 dev_warn(DEV, "Split-Brain detected, %d primaries, "
2542 "automatically solved. Sync from %s node\n",
2543 pcount, (hg < 0) ? "peer" : "this");
2544 if (forced) {
2545 dev_warn(DEV, "Doing a full sync, since"
2546 " UUIDs where ambiguous.\n");
2547 hg = hg*2;
2548 }
2549 }
2550 }
2551
2552 if (hg == -100) {
2553 if (mdev->net_conf->want_lose && !(mdev->p_uuid[UI_FLAGS]&1))
2554 hg = -1;
2555 if (!mdev->net_conf->want_lose && (mdev->p_uuid[UI_FLAGS]&1))
2556 hg = 1;
2557
2558 if (abs(hg) < 100)
2559 dev_warn(DEV, "Split-Brain detected, manually solved. "
2560 "Sync from %s node\n",
2561 (hg < 0) ? "peer" : "this");
2562 }
2563
2564 if (hg == -100) {
Lars Ellenberg580b9762010-02-26 23:15:23 +01002565 /* FIXME this log message is not correct if we end up here
2566 * after an attempted attach on a diskless node.
2567 * We just refuse to attach -- well, we drop the "connection"
2568 * to that disk, in a way... */
Adam Gandelman3a11a482010-04-08 16:48:23 -07002569 dev_alert(DEV, "Split-Brain detected but unresolved, dropping connection!\n");
Philipp Reisnerb411b362009-09-25 16:07:19 -07002570 drbd_khelper(mdev, "split-brain");
2571 return C_MASK;
2572 }
2573
2574 if (hg > 0 && mydisk <= D_INCONSISTENT) {
2575 dev_err(DEV, "I shall become SyncSource, but I am inconsistent!\n");
2576 return C_MASK;
2577 }
2578
2579 if (hg < 0 && /* by intention we do not use mydisk here. */
2580 mdev->state.role == R_PRIMARY && mdev->state.disk >= D_CONSISTENT) {
2581 switch (mdev->net_conf->rr_conflict) {
2582 case ASB_CALL_HELPER:
2583 drbd_khelper(mdev, "pri-lost");
2584 /* fall through */
2585 case ASB_DISCONNECT:
2586 dev_err(DEV, "I shall become SyncTarget, but I am primary!\n");
2587 return C_MASK;
2588 case ASB_VIOLENTLY:
2589 dev_warn(DEV, "Becoming SyncTarget, violating the stable-data"
2590 "assumption\n");
2591 }
2592 }
2593
Philipp Reisnercf14c2e2010-02-02 21:03:50 +01002594 if (mdev->net_conf->dry_run || test_bit(CONN_DRY_RUN, &mdev->flags)) {
2595 if (hg == 0)
2596 dev_info(DEV, "dry-run connect: No resync, would become Connected immediately.\n");
2597 else
2598 dev_info(DEV, "dry-run connect: Would become %s, doing a %s resync.",
2599 drbd_conn_str(hg > 0 ? C_SYNC_SOURCE : C_SYNC_TARGET),
2600 abs(hg) >= 2 ? "full" : "bit-map based");
2601 return C_MASK;
2602 }
2603
Philipp Reisnerb411b362009-09-25 16:07:19 -07002604 if (abs(hg) >= 2) {
2605 dev_info(DEV, "Writing the whole bitmap, full sync required after drbd_sync_handshake.\n");
2606 if (drbd_bitmap_io(mdev, &drbd_bmio_set_n_write, "set_n_write from sync_handshake"))
2607 return C_MASK;
2608 }
2609
2610 if (hg > 0) { /* become sync source. */
2611 rv = C_WF_BITMAP_S;
2612 } else if (hg < 0) { /* become sync target */
2613 rv = C_WF_BITMAP_T;
2614 } else {
2615 rv = C_CONNECTED;
2616 if (drbd_bm_total_weight(mdev)) {
2617 dev_info(DEV, "No resync, but %lu bits in bitmap!\n",
2618 drbd_bm_total_weight(mdev));
2619 }
2620 }
2621
2622 return rv;
2623}
2624
2625/* returns 1 if invalid */
2626static int cmp_after_sb(enum drbd_after_sb_p peer, enum drbd_after_sb_p self)
2627{
2628 /* ASB_DISCARD_REMOTE - ASB_DISCARD_LOCAL is valid */
2629 if ((peer == ASB_DISCARD_REMOTE && self == ASB_DISCARD_LOCAL) ||
2630 (self == ASB_DISCARD_REMOTE && peer == ASB_DISCARD_LOCAL))
2631 return 0;
2632
2633 /* any other things with ASB_DISCARD_REMOTE or ASB_DISCARD_LOCAL are invalid */
2634 if (peer == ASB_DISCARD_REMOTE || peer == ASB_DISCARD_LOCAL ||
2635 self == ASB_DISCARD_REMOTE || self == ASB_DISCARD_LOCAL)
2636 return 1;
2637
2638 /* everything else is valid if they are equal on both sides. */
2639 if (peer == self)
2640 return 0;
2641
2642 /* everything es is invalid. */
2643 return 1;
2644}
2645
Philipp Reisner02918be2010-08-20 14:35:10 +02002646static int receive_protocol(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002647{
Philipp Reisner02918be2010-08-20 14:35:10 +02002648 struct p_protocol *p = &mdev->data.rbuf.protocol;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002649 int p_proto, p_after_sb_0p, p_after_sb_1p, p_after_sb_2p;
Philipp Reisnercf14c2e2010-02-02 21:03:50 +01002650 int p_want_lose, p_two_primaries, cf;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002651 char p_integrity_alg[SHARED_SECRET_MAX] = "";
2652
Philipp Reisnerb411b362009-09-25 16:07:19 -07002653 p_proto = be32_to_cpu(p->protocol);
2654 p_after_sb_0p = be32_to_cpu(p->after_sb_0p);
2655 p_after_sb_1p = be32_to_cpu(p->after_sb_1p);
2656 p_after_sb_2p = be32_to_cpu(p->after_sb_2p);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002657 p_two_primaries = be32_to_cpu(p->two_primaries);
Philipp Reisnercf14c2e2010-02-02 21:03:50 +01002658 cf = be32_to_cpu(p->conn_flags);
2659 p_want_lose = cf & CF_WANT_LOSE;
2660
2661 clear_bit(CONN_DRY_RUN, &mdev->flags);
2662
2663 if (cf & CF_DRY_RUN)
2664 set_bit(CONN_DRY_RUN, &mdev->flags);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002665
2666 if (p_proto != mdev->net_conf->wire_protocol) {
2667 dev_err(DEV, "incompatible communication protocols\n");
2668 goto disconnect;
2669 }
2670
2671 if (cmp_after_sb(p_after_sb_0p, mdev->net_conf->after_sb_0p)) {
2672 dev_err(DEV, "incompatible after-sb-0pri settings\n");
2673 goto disconnect;
2674 }
2675
2676 if (cmp_after_sb(p_after_sb_1p, mdev->net_conf->after_sb_1p)) {
2677 dev_err(DEV, "incompatible after-sb-1pri settings\n");
2678 goto disconnect;
2679 }
2680
2681 if (cmp_after_sb(p_after_sb_2p, mdev->net_conf->after_sb_2p)) {
2682 dev_err(DEV, "incompatible after-sb-2pri settings\n");
2683 goto disconnect;
2684 }
2685
2686 if (p_want_lose && mdev->net_conf->want_lose) {
2687 dev_err(DEV, "both sides have the 'want_lose' flag set\n");
2688 goto disconnect;
2689 }
2690
2691 if (p_two_primaries != mdev->net_conf->two_primaries) {
2692 dev_err(DEV, "incompatible setting of the two-primaries options\n");
2693 goto disconnect;
2694 }
2695
2696 if (mdev->agreed_pro_version >= 87) {
2697 unsigned char *my_alg = mdev->net_conf->integrity_alg;
2698
2699 if (drbd_recv(mdev, p_integrity_alg, data_size) != data_size)
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01002700 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002701
2702 p_integrity_alg[SHARED_SECRET_MAX-1] = 0;
2703 if (strcmp(p_integrity_alg, my_alg)) {
2704 dev_err(DEV, "incompatible setting of the data-integrity-alg\n");
2705 goto disconnect;
2706 }
2707 dev_info(DEV, "data-integrity-alg: %s\n",
2708 my_alg[0] ? my_alg : (unsigned char *)"<not-used>");
2709 }
2710
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01002711 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002712
2713disconnect:
2714 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01002715 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002716}
2717
2718/* helper function
2719 * input: alg name, feature name
2720 * return: NULL (alg name was "")
2721 * ERR_PTR(error) if something goes wrong
2722 * or the crypto hash ptr, if it worked out ok. */
2723struct crypto_hash *drbd_crypto_alloc_digest_safe(const struct drbd_conf *mdev,
2724 const char *alg, const char *name)
2725{
2726 struct crypto_hash *tfm;
2727
2728 if (!alg[0])
2729 return NULL;
2730
2731 tfm = crypto_alloc_hash(alg, 0, CRYPTO_ALG_ASYNC);
2732 if (IS_ERR(tfm)) {
2733 dev_err(DEV, "Can not allocate \"%s\" as %s (reason: %ld)\n",
2734 alg, name, PTR_ERR(tfm));
2735 return tfm;
2736 }
2737 if (!drbd_crypto_is_hash(crypto_hash_tfm(tfm))) {
2738 crypto_free_hash(tfm);
2739 dev_err(DEV, "\"%s\" is not a digest (%s)\n", alg, name);
2740 return ERR_PTR(-EINVAL);
2741 }
2742 return tfm;
2743}
2744
Philipp Reisner02918be2010-08-20 14:35:10 +02002745static int receive_SyncParam(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int packet_size)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002746{
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01002747 int ok = true;
Philipp Reisner02918be2010-08-20 14:35:10 +02002748 struct p_rs_param_95 *p = &mdev->data.rbuf.rs_param_95;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002749 unsigned int header_size, data_size, exp_max_sz;
2750 struct crypto_hash *verify_tfm = NULL;
2751 struct crypto_hash *csums_tfm = NULL;
2752 const int apv = mdev->agreed_pro_version;
Philipp Reisner778f2712010-07-06 11:14:00 +02002753 int *rs_plan_s = NULL;
2754 int fifo_size = 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002755
2756 exp_max_sz = apv <= 87 ? sizeof(struct p_rs_param)
2757 : apv == 88 ? sizeof(struct p_rs_param)
2758 + SHARED_SECRET_MAX
Philipp Reisner8e26f9c2010-07-06 17:25:54 +02002759 : apv <= 94 ? sizeof(struct p_rs_param_89)
2760 : /* apv >= 95 */ sizeof(struct p_rs_param_95);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002761
Philipp Reisner02918be2010-08-20 14:35:10 +02002762 if (packet_size > exp_max_sz) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07002763 dev_err(DEV, "SyncParam packet too long: received %u, expected <= %u bytes\n",
Philipp Reisner02918be2010-08-20 14:35:10 +02002764 packet_size, exp_max_sz);
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01002765 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002766 }
2767
2768 if (apv <= 88) {
Philipp Reisner02918be2010-08-20 14:35:10 +02002769 header_size = sizeof(struct p_rs_param) - sizeof(struct p_header80);
2770 data_size = packet_size - header_size;
Philipp Reisner8e26f9c2010-07-06 17:25:54 +02002771 } else if (apv <= 94) {
Philipp Reisner02918be2010-08-20 14:35:10 +02002772 header_size = sizeof(struct p_rs_param_89) - sizeof(struct p_header80);
2773 data_size = packet_size - header_size;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002774 D_ASSERT(data_size == 0);
Philipp Reisner8e26f9c2010-07-06 17:25:54 +02002775 } else {
Philipp Reisner02918be2010-08-20 14:35:10 +02002776 header_size = sizeof(struct p_rs_param_95) - sizeof(struct p_header80);
2777 data_size = packet_size - header_size;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002778 D_ASSERT(data_size == 0);
2779 }
2780
2781 /* initialize verify_alg and csums_alg */
2782 memset(p->verify_alg, 0, 2 * SHARED_SECRET_MAX);
2783
Philipp Reisner02918be2010-08-20 14:35:10 +02002784 if (drbd_recv(mdev, &p->head.payload, header_size) != header_size)
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01002785 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002786
2787 mdev->sync_conf.rate = be32_to_cpu(p->rate);
2788
2789 if (apv >= 88) {
2790 if (apv == 88) {
2791 if (data_size > SHARED_SECRET_MAX) {
2792 dev_err(DEV, "verify-alg too long, "
2793 "peer wants %u, accepting only %u byte\n",
2794 data_size, SHARED_SECRET_MAX);
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01002795 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002796 }
2797
2798 if (drbd_recv(mdev, p->verify_alg, data_size) != data_size)
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01002799 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002800
2801 /* we expect NUL terminated string */
2802 /* but just in case someone tries to be evil */
2803 D_ASSERT(p->verify_alg[data_size-1] == 0);
2804 p->verify_alg[data_size-1] = 0;
2805
2806 } else /* apv >= 89 */ {
2807 /* we still expect NUL terminated strings */
2808 /* but just in case someone tries to be evil */
2809 D_ASSERT(p->verify_alg[SHARED_SECRET_MAX-1] == 0);
2810 D_ASSERT(p->csums_alg[SHARED_SECRET_MAX-1] == 0);
2811 p->verify_alg[SHARED_SECRET_MAX-1] = 0;
2812 p->csums_alg[SHARED_SECRET_MAX-1] = 0;
2813 }
2814
2815 if (strcmp(mdev->sync_conf.verify_alg, p->verify_alg)) {
2816 if (mdev->state.conn == C_WF_REPORT_PARAMS) {
2817 dev_err(DEV, "Different verify-alg settings. me=\"%s\" peer=\"%s\"\n",
2818 mdev->sync_conf.verify_alg, p->verify_alg);
2819 goto disconnect;
2820 }
2821 verify_tfm = drbd_crypto_alloc_digest_safe(mdev,
2822 p->verify_alg, "verify-alg");
2823 if (IS_ERR(verify_tfm)) {
2824 verify_tfm = NULL;
2825 goto disconnect;
2826 }
2827 }
2828
2829 if (apv >= 89 && strcmp(mdev->sync_conf.csums_alg, p->csums_alg)) {
2830 if (mdev->state.conn == C_WF_REPORT_PARAMS) {
2831 dev_err(DEV, "Different csums-alg settings. me=\"%s\" peer=\"%s\"\n",
2832 mdev->sync_conf.csums_alg, p->csums_alg);
2833 goto disconnect;
2834 }
2835 csums_tfm = drbd_crypto_alloc_digest_safe(mdev,
2836 p->csums_alg, "csums-alg");
2837 if (IS_ERR(csums_tfm)) {
2838 csums_tfm = NULL;
2839 goto disconnect;
2840 }
2841 }
2842
Philipp Reisner8e26f9c2010-07-06 17:25:54 +02002843 if (apv > 94) {
2844 mdev->sync_conf.rate = be32_to_cpu(p->rate);
2845 mdev->sync_conf.c_plan_ahead = be32_to_cpu(p->c_plan_ahead);
2846 mdev->sync_conf.c_delay_target = be32_to_cpu(p->c_delay_target);
2847 mdev->sync_conf.c_fill_target = be32_to_cpu(p->c_fill_target);
2848 mdev->sync_conf.c_max_rate = be32_to_cpu(p->c_max_rate);
Philipp Reisner778f2712010-07-06 11:14:00 +02002849
2850 fifo_size = (mdev->sync_conf.c_plan_ahead * 10 * SLEEP_TIME) / HZ;
2851 if (fifo_size != mdev->rs_plan_s.size && fifo_size > 0) {
2852 rs_plan_s = kzalloc(sizeof(int) * fifo_size, GFP_KERNEL);
2853 if (!rs_plan_s) {
2854 dev_err(DEV, "kmalloc of fifo_buffer failed");
2855 goto disconnect;
2856 }
2857 }
Philipp Reisner8e26f9c2010-07-06 17:25:54 +02002858 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07002859
2860 spin_lock(&mdev->peer_seq_lock);
2861 /* lock against drbd_nl_syncer_conf() */
2862 if (verify_tfm) {
2863 strcpy(mdev->sync_conf.verify_alg, p->verify_alg);
2864 mdev->sync_conf.verify_alg_len = strlen(p->verify_alg) + 1;
2865 crypto_free_hash(mdev->verify_tfm);
2866 mdev->verify_tfm = verify_tfm;
2867 dev_info(DEV, "using verify-alg: \"%s\"\n", p->verify_alg);
2868 }
2869 if (csums_tfm) {
2870 strcpy(mdev->sync_conf.csums_alg, p->csums_alg);
2871 mdev->sync_conf.csums_alg_len = strlen(p->csums_alg) + 1;
2872 crypto_free_hash(mdev->csums_tfm);
2873 mdev->csums_tfm = csums_tfm;
2874 dev_info(DEV, "using csums-alg: \"%s\"\n", p->csums_alg);
2875 }
Philipp Reisner778f2712010-07-06 11:14:00 +02002876 if (fifo_size != mdev->rs_plan_s.size) {
2877 kfree(mdev->rs_plan_s.values);
2878 mdev->rs_plan_s.values = rs_plan_s;
2879 mdev->rs_plan_s.size = fifo_size;
2880 mdev->rs_planed = 0;
2881 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07002882 spin_unlock(&mdev->peer_seq_lock);
2883 }
2884
2885 return ok;
2886disconnect:
2887 /* just for completeness: actually not needed,
2888 * as this is not reached if csums_tfm was ok. */
2889 crypto_free_hash(csums_tfm);
2890 /* but free the verify_tfm again, if csums_tfm did not work out */
2891 crypto_free_hash(verify_tfm);
2892 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01002893 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002894}
2895
2896static void drbd_setup_order_type(struct drbd_conf *mdev, int peer)
2897{
2898 /* sorry, we currently have no working implementation
2899 * of distributed TCQ */
2900}
2901
2902/* warn if the arguments differ by more than 12.5% */
2903static void warn_if_differ_considerably(struct drbd_conf *mdev,
2904 const char *s, sector_t a, sector_t b)
2905{
2906 sector_t d;
2907 if (a == 0 || b == 0)
2908 return;
2909 d = (a > b) ? (a - b) : (b - a);
2910 if (d > (a>>3) || d > (b>>3))
2911 dev_warn(DEV, "Considerable difference in %s: %llus vs. %llus\n", s,
2912 (unsigned long long)a, (unsigned long long)b);
2913}
2914
Philipp Reisner02918be2010-08-20 14:35:10 +02002915static int receive_sizes(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002916{
Philipp Reisner02918be2010-08-20 14:35:10 +02002917 struct p_sizes *p = &mdev->data.rbuf.sizes;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002918 enum determine_dev_size dd = unchanged;
Lars Ellenberg1816a2b2010-11-11 15:19:07 +01002919 unsigned int max_bio_size;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002920 sector_t p_size, p_usize, my_usize;
2921 int ldsc = 0; /* local disk size changed */
Philipp Reisnere89b5912010-03-24 17:11:33 +01002922 enum dds_flags ddsf;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002923
Philipp Reisnerb411b362009-09-25 16:07:19 -07002924 p_size = be64_to_cpu(p->d_size);
2925 p_usize = be64_to_cpu(p->u_size);
2926
2927 if (p_size == 0 && mdev->state.disk == D_DISKLESS) {
2928 dev_err(DEV, "some backing storage is needed\n");
2929 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01002930 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002931 }
2932
2933 /* just store the peer's disk size for now.
2934 * we still need to figure out whether we accept that. */
2935 mdev->p_size = p_size;
2936
Philipp Reisnerb411b362009-09-25 16:07:19 -07002937 if (get_ldev(mdev)) {
2938 warn_if_differ_considerably(mdev, "lower level device sizes",
2939 p_size, drbd_get_max_capacity(mdev->ldev));
2940 warn_if_differ_considerably(mdev, "user requested size",
2941 p_usize, mdev->ldev->dc.disk_size);
2942
2943 /* if this is the first connect, or an otherwise expected
2944 * param exchange, choose the minimum */
2945 if (mdev->state.conn == C_WF_REPORT_PARAMS)
2946 p_usize = min_not_zero((sector_t)mdev->ldev->dc.disk_size,
2947 p_usize);
2948
2949 my_usize = mdev->ldev->dc.disk_size;
2950
2951 if (mdev->ldev->dc.disk_size != p_usize) {
2952 mdev->ldev->dc.disk_size = p_usize;
2953 dev_info(DEV, "Peer sets u_size to %lu sectors\n",
2954 (unsigned long)mdev->ldev->dc.disk_size);
2955 }
2956
2957 /* Never shrink a device with usable data during connect.
2958 But allow online shrinking if we are connected. */
Philipp Reisnera393db62009-12-22 13:35:52 +01002959 if (drbd_new_dev_size(mdev, mdev->ldev, 0) <
Philipp Reisnerb411b362009-09-25 16:07:19 -07002960 drbd_get_capacity(mdev->this_bdev) &&
2961 mdev->state.disk >= D_OUTDATED &&
2962 mdev->state.conn < C_CONNECTED) {
2963 dev_err(DEV, "The peer's disk size is too small!\n");
2964 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
2965 mdev->ldev->dc.disk_size = my_usize;
2966 put_ldev(mdev);
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01002967 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002968 }
2969 put_ldev(mdev);
2970 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07002971
Philipp Reisnere89b5912010-03-24 17:11:33 +01002972 ddsf = be16_to_cpu(p->dds_flags);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002973 if (get_ldev(mdev)) {
Philipp Reisnere89b5912010-03-24 17:11:33 +01002974 dd = drbd_determin_dev_size(mdev, ddsf);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002975 put_ldev(mdev);
2976 if (dd == dev_size_error)
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01002977 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002978 drbd_md_sync(mdev);
2979 } else {
2980 /* I am diskless, need to accept the peer's size. */
2981 drbd_set_my_capacity(mdev, p_size);
2982 }
2983
Philipp Reisnerb411b362009-09-25 16:07:19 -07002984 if (get_ldev(mdev)) {
2985 if (mdev->ldev->known_size != drbd_get_capacity(mdev->ldev->backing_bdev)) {
2986 mdev->ldev->known_size = drbd_get_capacity(mdev->ldev->backing_bdev);
2987 ldsc = 1;
2988 }
2989
Lars Ellenberga1c88d02010-05-14 19:16:41 +02002990 if (mdev->agreed_pro_version < 94)
Lars Ellenberg1816a2b2010-11-11 15:19:07 +01002991 max_bio_size = be32_to_cpu(p->max_bio_size);
Lars Ellenberg8979d9c2010-09-14 15:56:29 +02002992 else if (mdev->agreed_pro_version == 94)
Lars Ellenberg1816a2b2010-11-11 15:19:07 +01002993 max_bio_size = DRBD_MAX_SIZE_H80_PACKET;
Lars Ellenberga1c88d02010-05-14 19:16:41 +02002994 else /* drbd 8.3.8 onwards */
Lars Ellenberg1816a2b2010-11-11 15:19:07 +01002995 max_bio_size = DRBD_MAX_BIO_SIZE;
Lars Ellenberga1c88d02010-05-14 19:16:41 +02002996
Lars Ellenberg1816a2b2010-11-11 15:19:07 +01002997 if (max_bio_size != queue_max_hw_sectors(mdev->rq_queue) << 9)
2998 drbd_setup_queue_param(mdev, max_bio_size);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002999
Philipp Reisnere89b5912010-03-24 17:11:33 +01003000 drbd_setup_order_type(mdev, be16_to_cpu(p->queue_order_type));
Philipp Reisnerb411b362009-09-25 16:07:19 -07003001 put_ldev(mdev);
3002 }
3003
3004 if (mdev->state.conn > C_WF_REPORT_PARAMS) {
3005 if (be64_to_cpu(p->c_size) !=
3006 drbd_get_capacity(mdev->this_bdev) || ldsc) {
3007 /* we have different sizes, probably peer
3008 * needs to know my new size... */
Philipp Reisnere89b5912010-03-24 17:11:33 +01003009 drbd_send_sizes(mdev, 0, ddsf);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003010 }
3011 if (test_and_clear_bit(RESIZE_PENDING, &mdev->flags) ||
3012 (dd == grew && mdev->state.conn == C_CONNECTED)) {
3013 if (mdev->state.pdsk >= D_INCONSISTENT &&
Philipp Reisnere89b5912010-03-24 17:11:33 +01003014 mdev->state.disk >= D_INCONSISTENT) {
3015 if (ddsf & DDSF_NO_RESYNC)
3016 dev_info(DEV, "Resync of new storage suppressed with --assume-clean\n");
3017 else
3018 resync_after_online_grow(mdev);
3019 } else
Philipp Reisnerb411b362009-09-25 16:07:19 -07003020 set_bit(RESYNC_AFTER_NEG, &mdev->flags);
3021 }
3022 }
3023
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01003024 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003025}
3026
Philipp Reisner02918be2010-08-20 14:35:10 +02003027static int receive_uuids(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003028{
Philipp Reisner02918be2010-08-20 14:35:10 +02003029 struct p_uuids *p = &mdev->data.rbuf.uuids;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003030 u64 *p_uuid;
3031 int i;
3032
Philipp Reisnerb411b362009-09-25 16:07:19 -07003033 p_uuid = kmalloc(sizeof(u64)*UI_EXTENDED_SIZE, GFP_NOIO);
3034
3035 for (i = UI_CURRENT; i < UI_EXTENDED_SIZE; i++)
3036 p_uuid[i] = be64_to_cpu(p->uuid[i]);
3037
3038 kfree(mdev->p_uuid);
3039 mdev->p_uuid = p_uuid;
3040
3041 if (mdev->state.conn < C_CONNECTED &&
3042 mdev->state.disk < D_INCONSISTENT &&
3043 mdev->state.role == R_PRIMARY &&
3044 (mdev->ed_uuid & ~((u64)1)) != (p_uuid[UI_CURRENT] & ~((u64)1))) {
3045 dev_err(DEV, "Can only connect to data with current UUID=%016llX\n",
3046 (unsigned long long)mdev->ed_uuid);
3047 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01003048 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003049 }
3050
3051 if (get_ldev(mdev)) {
3052 int skip_initial_sync =
3053 mdev->state.conn == C_CONNECTED &&
3054 mdev->agreed_pro_version >= 90 &&
3055 mdev->ldev->md.uuid[UI_CURRENT] == UUID_JUST_CREATED &&
3056 (p_uuid[UI_FLAGS] & 8);
3057 if (skip_initial_sync) {
3058 dev_info(DEV, "Accepted new current UUID, preparing to skip initial sync\n");
3059 drbd_bitmap_io(mdev, &drbd_bmio_clear_n_write,
3060 "clear_n_write from receive_uuids");
3061 _drbd_uuid_set(mdev, UI_CURRENT, p_uuid[UI_CURRENT]);
3062 _drbd_uuid_set(mdev, UI_BITMAP, 0);
3063 _drbd_set_state(_NS2(mdev, disk, D_UP_TO_DATE, pdsk, D_UP_TO_DATE),
3064 CS_VERBOSE, NULL);
3065 drbd_md_sync(mdev);
3066 }
3067 put_ldev(mdev);
Philipp Reisner18a50fa2010-06-21 14:14:15 +02003068 } else if (mdev->state.disk < D_INCONSISTENT &&
3069 mdev->state.role == R_PRIMARY) {
3070 /* I am a diskless primary, the peer just created a new current UUID
3071 for me. */
3072 drbd_set_ed_uuid(mdev, p_uuid[UI_CURRENT]);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003073 }
3074
3075 /* Before we test for the disk state, we should wait until an eventually
3076 ongoing cluster wide state change is finished. That is important if
3077 we are primary and are detaching from our disk. We need to see the
3078 new disk state... */
3079 wait_event(mdev->misc_wait, !test_bit(CLUSTER_ST_CHANGE, &mdev->flags));
3080 if (mdev->state.conn >= C_CONNECTED && mdev->state.disk < D_INCONSISTENT)
3081 drbd_set_ed_uuid(mdev, p_uuid[UI_CURRENT]);
3082
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01003083 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003084}
3085
3086/**
3087 * convert_state() - Converts the peer's view of the cluster state to our point of view
3088 * @ps: The state as seen by the peer.
3089 */
3090static union drbd_state convert_state(union drbd_state ps)
3091{
3092 union drbd_state ms;
3093
3094 static enum drbd_conns c_tab[] = {
3095 [C_CONNECTED] = C_CONNECTED,
3096
3097 [C_STARTING_SYNC_S] = C_STARTING_SYNC_T,
3098 [C_STARTING_SYNC_T] = C_STARTING_SYNC_S,
3099 [C_DISCONNECTING] = C_TEAR_DOWN, /* C_NETWORK_FAILURE, */
3100 [C_VERIFY_S] = C_VERIFY_T,
3101 [C_MASK] = C_MASK,
3102 };
3103
3104 ms.i = ps.i;
3105
3106 ms.conn = c_tab[ps.conn];
3107 ms.peer = ps.role;
3108 ms.role = ps.peer;
3109 ms.pdsk = ps.disk;
3110 ms.disk = ps.pdsk;
3111 ms.peer_isp = (ps.aftr_isp | ps.user_isp);
3112
3113 return ms;
3114}
3115
Philipp Reisner02918be2010-08-20 14:35:10 +02003116static int receive_req_state(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003117{
Philipp Reisner02918be2010-08-20 14:35:10 +02003118 struct p_req_state *p = &mdev->data.rbuf.req_state;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003119 union drbd_state mask, val;
Andreas Gruenbacherbf885f82010-12-08 00:39:32 +01003120 enum drbd_state_rv rv;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003121
Philipp Reisnerb411b362009-09-25 16:07:19 -07003122 mask.i = be32_to_cpu(p->mask);
3123 val.i = be32_to_cpu(p->val);
3124
3125 if (test_bit(DISCARD_CONCURRENT, &mdev->flags) &&
3126 test_bit(CLUSTER_ST_CHANGE, &mdev->flags)) {
3127 drbd_send_sr_reply(mdev, SS_CONCURRENT_ST_CHG);
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01003128 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003129 }
3130
3131 mask = convert_state(mask);
3132 val = convert_state(val);
3133
3134 rv = drbd_change_state(mdev, CS_VERBOSE, mask, val);
3135
3136 drbd_send_sr_reply(mdev, rv);
3137 drbd_md_sync(mdev);
3138
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01003139 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003140}
3141
Philipp Reisner02918be2010-08-20 14:35:10 +02003142static int receive_state(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003143{
Philipp Reisner02918be2010-08-20 14:35:10 +02003144 struct p_state *p = &mdev->data.rbuf.state;
Lars Ellenberg4ac4aad2010-07-22 17:39:26 +02003145 union drbd_state os, ns, peer_state;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003146 enum drbd_disk_state real_peer_disk;
Philipp Reisner65d922c2010-06-16 16:18:09 +02003147 enum chg_state_flags cs_flags;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003148 int rv;
3149
Philipp Reisnerb411b362009-09-25 16:07:19 -07003150 peer_state.i = be32_to_cpu(p->state);
3151
3152 real_peer_disk = peer_state.disk;
3153 if (peer_state.disk == D_NEGOTIATING) {
3154 real_peer_disk = mdev->p_uuid[UI_FLAGS] & 4 ? D_INCONSISTENT : D_CONSISTENT;
3155 dev_info(DEV, "real peer disk state = %s\n", drbd_disk_str(real_peer_disk));
3156 }
3157
3158 spin_lock_irq(&mdev->req_lock);
3159 retry:
Lars Ellenberg4ac4aad2010-07-22 17:39:26 +02003160 os = ns = mdev->state;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003161 spin_unlock_irq(&mdev->req_lock);
3162
Lars Ellenberge9ef7bb2010-10-07 15:55:39 +02003163 /* peer says his disk is uptodate, while we think it is inconsistent,
3164 * and this happens while we think we have a sync going on. */
3165 if (os.pdsk == D_INCONSISTENT && real_peer_disk == D_UP_TO_DATE &&
3166 os.conn > C_CONNECTED && os.disk == D_UP_TO_DATE) {
3167 /* If we are (becoming) SyncSource, but peer is still in sync
3168 * preparation, ignore its uptodate-ness to avoid flapping, it
3169 * will change to inconsistent once the peer reaches active
3170 * syncing states.
3171 * It may have changed syncer-paused flags, however, so we
3172 * cannot ignore this completely. */
3173 if (peer_state.conn > C_CONNECTED &&
3174 peer_state.conn < C_SYNC_SOURCE)
3175 real_peer_disk = D_INCONSISTENT;
3176
3177 /* if peer_state changes to connected at the same time,
3178 * it explicitly notifies us that it finished resync.
3179 * Maybe we should finish it up, too? */
3180 else if (os.conn >= C_SYNC_SOURCE &&
3181 peer_state.conn == C_CONNECTED) {
3182 if (drbd_bm_total_weight(mdev) <= mdev->rs_failed)
3183 drbd_resync_finished(mdev);
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01003184 return true;
Lars Ellenberge9ef7bb2010-10-07 15:55:39 +02003185 }
3186 }
3187
3188 /* peer says his disk is inconsistent, while we think it is uptodate,
3189 * and this happens while the peer still thinks we have a sync going on,
3190 * but we think we are already done with the sync.
3191 * We ignore this to avoid flapping pdsk.
3192 * This should not happen, if the peer is a recent version of drbd. */
3193 if (os.pdsk == D_UP_TO_DATE && real_peer_disk == D_INCONSISTENT &&
3194 os.conn == C_CONNECTED && peer_state.conn > C_SYNC_SOURCE)
3195 real_peer_disk = D_UP_TO_DATE;
3196
Lars Ellenberg4ac4aad2010-07-22 17:39:26 +02003197 if (ns.conn == C_WF_REPORT_PARAMS)
3198 ns.conn = C_CONNECTED;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003199
Philipp Reisner67531712010-10-27 12:21:30 +02003200 if (peer_state.conn == C_AHEAD)
3201 ns.conn = C_BEHIND;
3202
Philipp Reisnerb411b362009-09-25 16:07:19 -07003203 if (mdev->p_uuid && peer_state.disk >= D_NEGOTIATING &&
3204 get_ldev_if_state(mdev, D_NEGOTIATING)) {
3205 int cr; /* consider resync */
3206
3207 /* if we established a new connection */
Lars Ellenberg4ac4aad2010-07-22 17:39:26 +02003208 cr = (os.conn < C_CONNECTED);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003209 /* if we had an established connection
3210 * and one of the nodes newly attaches a disk */
Lars Ellenberg4ac4aad2010-07-22 17:39:26 +02003211 cr |= (os.conn == C_CONNECTED &&
Philipp Reisnerb411b362009-09-25 16:07:19 -07003212 (peer_state.disk == D_NEGOTIATING ||
Lars Ellenberg4ac4aad2010-07-22 17:39:26 +02003213 os.disk == D_NEGOTIATING));
Philipp Reisnerb411b362009-09-25 16:07:19 -07003214 /* if we have both been inconsistent, and the peer has been
3215 * forced to be UpToDate with --overwrite-data */
3216 cr |= test_bit(CONSIDER_RESYNC, &mdev->flags);
3217 /* if we had been plain connected, and the admin requested to
3218 * start a sync by "invalidate" or "invalidate-remote" */
Lars Ellenberg4ac4aad2010-07-22 17:39:26 +02003219 cr |= (os.conn == C_CONNECTED &&
Philipp Reisnerb411b362009-09-25 16:07:19 -07003220 (peer_state.conn >= C_STARTING_SYNC_S &&
3221 peer_state.conn <= C_WF_BITMAP_T));
3222
3223 if (cr)
Lars Ellenberg4ac4aad2010-07-22 17:39:26 +02003224 ns.conn = drbd_sync_handshake(mdev, peer_state.role, real_peer_disk);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003225
3226 put_ldev(mdev);
Lars Ellenberg4ac4aad2010-07-22 17:39:26 +02003227 if (ns.conn == C_MASK) {
3228 ns.conn = C_CONNECTED;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003229 if (mdev->state.disk == D_NEGOTIATING) {
Lars Ellenberg82f59cc2010-10-16 12:13:47 +02003230 drbd_force_state(mdev, NS(disk, D_FAILED));
Philipp Reisnerb411b362009-09-25 16:07:19 -07003231 } else if (peer_state.disk == D_NEGOTIATING) {
3232 dev_err(DEV, "Disk attach process on the peer node was aborted.\n");
3233 peer_state.disk = D_DISKLESS;
Lars Ellenberg580b9762010-02-26 23:15:23 +01003234 real_peer_disk = D_DISKLESS;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003235 } else {
Philipp Reisnercf14c2e2010-02-02 21:03:50 +01003236 if (test_and_clear_bit(CONN_DRY_RUN, &mdev->flags))
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01003237 return false;
Lars Ellenberg4ac4aad2010-07-22 17:39:26 +02003238 D_ASSERT(os.conn == C_WF_REPORT_PARAMS);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003239 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01003240 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003241 }
3242 }
3243 }
3244
3245 spin_lock_irq(&mdev->req_lock);
Lars Ellenberg4ac4aad2010-07-22 17:39:26 +02003246 if (mdev->state.i != os.i)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003247 goto retry;
3248 clear_bit(CONSIDER_RESYNC, &mdev->flags);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003249 ns.peer = peer_state.role;
3250 ns.pdsk = real_peer_disk;
3251 ns.peer_isp = (peer_state.aftr_isp | peer_state.user_isp);
Lars Ellenberg4ac4aad2010-07-22 17:39:26 +02003252 if ((ns.conn == C_CONNECTED || ns.conn == C_WF_BITMAP_S) && ns.disk == D_NEGOTIATING)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003253 ns.disk = mdev->new_state_tmp.disk;
Lars Ellenberg4ac4aad2010-07-22 17:39:26 +02003254 cs_flags = CS_VERBOSE + (os.conn < C_CONNECTED && ns.conn >= C_CONNECTED ? 0 : CS_HARD);
3255 if (ns.pdsk == D_CONSISTENT && is_susp(ns) && ns.conn == C_CONNECTED && os.conn < C_CONNECTED &&
Philipp Reisner481c6f52010-06-22 14:03:27 +02003256 test_bit(NEW_CUR_UUID, &mdev->flags)) {
3257 /* Do not allow tl_restart(resend) for a rebooted peer. We can only allow this
3258 for temporal network outages! */
3259 spin_unlock_irq(&mdev->req_lock);
3260 dev_err(DEV, "Aborting Connect, can not thaw IO with an only Consistent peer\n");
3261 tl_clear(mdev);
3262 drbd_uuid_new_current(mdev);
3263 clear_bit(NEW_CUR_UUID, &mdev->flags);
3264 drbd_force_state(mdev, NS2(conn, C_PROTOCOL_ERROR, susp, 0));
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01003265 return false;
Philipp Reisner481c6f52010-06-22 14:03:27 +02003266 }
Philipp Reisner65d922c2010-06-16 16:18:09 +02003267 rv = _drbd_set_state(mdev, ns, cs_flags, NULL);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003268 ns = mdev->state;
3269 spin_unlock_irq(&mdev->req_lock);
3270
3271 if (rv < SS_SUCCESS) {
3272 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01003273 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003274 }
3275
Lars Ellenberg4ac4aad2010-07-22 17:39:26 +02003276 if (os.conn > C_WF_REPORT_PARAMS) {
3277 if (ns.conn > C_CONNECTED && peer_state.conn <= C_CONNECTED &&
Philipp Reisnerb411b362009-09-25 16:07:19 -07003278 peer_state.disk != D_NEGOTIATING ) {
3279 /* we want resync, peer has not yet decided to sync... */
3280 /* Nowadays only used when forcing a node into primary role and
3281 setting its disk to UpToDate with that */
3282 drbd_send_uuids(mdev);
3283 drbd_send_state(mdev);
3284 }
3285 }
3286
3287 mdev->net_conf->want_lose = 0;
3288
3289 drbd_md_sync(mdev); /* update connected indicator, la_size, ... */
3290
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01003291 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003292}
3293
Philipp Reisner02918be2010-08-20 14:35:10 +02003294static int receive_sync_uuid(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003295{
Philipp Reisner02918be2010-08-20 14:35:10 +02003296 struct p_rs_uuid *p = &mdev->data.rbuf.rs_uuid;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003297
3298 wait_event(mdev->misc_wait,
3299 mdev->state.conn == C_WF_SYNC_UUID ||
Philipp Reisnerc4752ef2010-10-27 17:32:36 +02003300 mdev->state.conn == C_BEHIND ||
Philipp Reisnerb411b362009-09-25 16:07:19 -07003301 mdev->state.conn < C_CONNECTED ||
3302 mdev->state.disk < D_NEGOTIATING);
3303
3304 /* D_ASSERT( mdev->state.conn == C_WF_SYNC_UUID ); */
3305
Philipp Reisnerb411b362009-09-25 16:07:19 -07003306 /* Here the _drbd_uuid_ functions are right, current should
3307 _not_ be rotated into the history */
3308 if (get_ldev_if_state(mdev, D_NEGOTIATING)) {
3309 _drbd_uuid_set(mdev, UI_CURRENT, be64_to_cpu(p->uuid));
3310 _drbd_uuid_set(mdev, UI_BITMAP, 0UL);
3311
3312 drbd_start_resync(mdev, C_SYNC_TARGET);
3313
3314 put_ldev(mdev);
3315 } else
3316 dev_err(DEV, "Ignoring SyncUUID packet!\n");
3317
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01003318 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003319}
3320
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01003321/**
3322 * receive_bitmap_plain
3323 *
3324 * Return 0 when done, 1 when another iteration is needed, and a negative error
3325 * code upon failure.
3326 */
3327static int
Philipp Reisner02918be2010-08-20 14:35:10 +02003328receive_bitmap_plain(struct drbd_conf *mdev, unsigned int data_size,
3329 unsigned long *buffer, struct bm_xfer_ctx *c)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003330{
3331 unsigned num_words = min_t(size_t, BM_PACKET_WORDS, c->bm_words - c->word_offset);
3332 unsigned want = num_words * sizeof(long);
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01003333 int err;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003334
Philipp Reisner02918be2010-08-20 14:35:10 +02003335 if (want != data_size) {
3336 dev_err(DEV, "%s:want (%u) != data_size (%u)\n", __func__, want, data_size);
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01003337 return -EIO;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003338 }
3339 if (want == 0)
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01003340 return 0;
3341 err = drbd_recv(mdev, buffer, want);
3342 if (err != want) {
3343 if (err >= 0)
3344 err = -EIO;
3345 return err;
3346 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07003347
3348 drbd_bm_merge_lel(mdev, c->word_offset, num_words, buffer);
3349
3350 c->word_offset += num_words;
3351 c->bit_offset = c->word_offset * BITS_PER_LONG;
3352 if (c->bit_offset > c->bm_bits)
3353 c->bit_offset = c->bm_bits;
3354
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01003355 return 1;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003356}
3357
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01003358/**
3359 * recv_bm_rle_bits
3360 *
3361 * Return 0 when done, 1 when another iteration is needed, and a negative error
3362 * code upon failure.
3363 */
3364static int
Philipp Reisnerb411b362009-09-25 16:07:19 -07003365recv_bm_rle_bits(struct drbd_conf *mdev,
3366 struct p_compressed_bm *p,
3367 struct bm_xfer_ctx *c)
3368{
3369 struct bitstream bs;
3370 u64 look_ahead;
3371 u64 rl;
3372 u64 tmp;
3373 unsigned long s = c->bit_offset;
3374 unsigned long e;
Lars Ellenberg004352f2010-10-05 20:13:58 +02003375 int len = be16_to_cpu(p->head.length) - (sizeof(*p) - sizeof(p->head));
Philipp Reisnerb411b362009-09-25 16:07:19 -07003376 int toggle = DCBP_get_start(p);
3377 int have;
3378 int bits;
3379
3380 bitstream_init(&bs, p->code, len, DCBP_get_pad_bits(p));
3381
3382 bits = bitstream_get_bits(&bs, &look_ahead, 64);
3383 if (bits < 0)
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01003384 return -EIO;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003385
3386 for (have = bits; have > 0; s += rl, toggle = !toggle) {
3387 bits = vli_decode_bits(&rl, look_ahead);
3388 if (bits <= 0)
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01003389 return -EIO;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003390
3391 if (toggle) {
3392 e = s + rl -1;
3393 if (e >= c->bm_bits) {
3394 dev_err(DEV, "bitmap overflow (e:%lu) while decoding bm RLE packet\n", e);
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01003395 return -EIO;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003396 }
3397 _drbd_bm_set_bits(mdev, s, e);
3398 }
3399
3400 if (have < bits) {
3401 dev_err(DEV, "bitmap decoding error: h:%d b:%d la:0x%08llx l:%u/%u\n",
3402 have, bits, look_ahead,
3403 (unsigned int)(bs.cur.b - p->code),
3404 (unsigned int)bs.buf_len);
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01003405 return -EIO;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003406 }
3407 look_ahead >>= bits;
3408 have -= bits;
3409
3410 bits = bitstream_get_bits(&bs, &tmp, 64 - have);
3411 if (bits < 0)
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01003412 return -EIO;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003413 look_ahead |= tmp << have;
3414 have += bits;
3415 }
3416
3417 c->bit_offset = s;
3418 bm_xfer_ctx_bit_to_word_offset(c);
3419
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01003420 return (s != c->bm_bits);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003421}
3422
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01003423/**
3424 * decode_bitmap_c
3425 *
3426 * Return 0 when done, 1 when another iteration is needed, and a negative error
3427 * code upon failure.
3428 */
3429static int
Philipp Reisnerb411b362009-09-25 16:07:19 -07003430decode_bitmap_c(struct drbd_conf *mdev,
3431 struct p_compressed_bm *p,
3432 struct bm_xfer_ctx *c)
3433{
3434 if (DCBP_get_code(p) == RLE_VLI_Bits)
3435 return recv_bm_rle_bits(mdev, p, c);
3436
3437 /* other variants had been implemented for evaluation,
3438 * but have been dropped as this one turned out to be "best"
3439 * during all our tests. */
3440
3441 dev_err(DEV, "receive_bitmap_c: unknown encoding %u\n", p->encoding);
3442 drbd_force_state(mdev, NS(conn, C_PROTOCOL_ERROR));
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01003443 return -EIO;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003444}
3445
3446void INFO_bm_xfer_stats(struct drbd_conf *mdev,
3447 const char *direction, struct bm_xfer_ctx *c)
3448{
3449 /* what would it take to transfer it "plaintext" */
Philipp Reisner0b70a132010-08-20 13:36:10 +02003450 unsigned plain = sizeof(struct p_header80) *
Philipp Reisnerb411b362009-09-25 16:07:19 -07003451 ((c->bm_words+BM_PACKET_WORDS-1)/BM_PACKET_WORDS+1)
3452 + c->bm_words * sizeof(long);
3453 unsigned total = c->bytes[0] + c->bytes[1];
3454 unsigned r;
3455
3456 /* total can not be zero. but just in case: */
3457 if (total == 0)
3458 return;
3459
3460 /* don't report if not compressed */
3461 if (total >= plain)
3462 return;
3463
3464 /* total < plain. check for overflow, still */
3465 r = (total > UINT_MAX/1000) ? (total / (plain/1000))
3466 : (1000 * total / plain);
3467
3468 if (r > 1000)
3469 r = 1000;
3470
3471 r = 1000 - r;
3472 dev_info(DEV, "%s bitmap stats [Bytes(packets)]: plain %u(%u), RLE %u(%u), "
3473 "total %u; compression: %u.%u%%\n",
3474 direction,
3475 c->bytes[1], c->packets[1],
3476 c->bytes[0], c->packets[0],
3477 total, r/10, r % 10);
3478}
3479
3480/* Since we are processing the bitfield from lower addresses to higher,
3481 it does not matter if the process it in 32 bit chunks or 64 bit
3482 chunks as long as it is little endian. (Understand it as byte stream,
3483 beginning with the lowest byte...) If we would use big endian
3484 we would need to process it from the highest address to the lowest,
3485 in order to be agnostic to the 32 vs 64 bits issue.
3486
3487 returns 0 on failure, 1 if we successfully received it. */
Philipp Reisner02918be2010-08-20 14:35:10 +02003488static int receive_bitmap(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003489{
3490 struct bm_xfer_ctx c;
3491 void *buffer;
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01003492 int err;
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01003493 int ok = false;
Philipp Reisner02918be2010-08-20 14:35:10 +02003494 struct p_header80 *h = &mdev->data.rbuf.header.h80;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003495
Philipp Reisner37190942010-11-10 12:08:37 +01003496 /* drbd_bm_lock(mdev, "receive bitmap"); By intention no bm_lock */
Philipp Reisnerb411b362009-09-25 16:07:19 -07003497
3498 /* maybe we should use some per thread scratch page,
3499 * and allocate that during initial device creation? */
3500 buffer = (unsigned long *) __get_free_page(GFP_NOIO);
3501 if (!buffer) {
3502 dev_err(DEV, "failed to allocate one page buffer in %s\n", __func__);
3503 goto out;
3504 }
3505
3506 c = (struct bm_xfer_ctx) {
3507 .bm_bits = drbd_bm_bits(mdev),
3508 .bm_words = drbd_bm_words(mdev),
3509 };
3510
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01003511 for(;;) {
Philipp Reisner02918be2010-08-20 14:35:10 +02003512 if (cmd == P_BITMAP) {
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01003513 err = receive_bitmap_plain(mdev, data_size, buffer, &c);
Philipp Reisner02918be2010-08-20 14:35:10 +02003514 } else if (cmd == P_COMPRESSED_BITMAP) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07003515 /* MAYBE: sanity check that we speak proto >= 90,
3516 * and the feature is enabled! */
3517 struct p_compressed_bm *p;
3518
Philipp Reisner02918be2010-08-20 14:35:10 +02003519 if (data_size > BM_PACKET_PAYLOAD_BYTES) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07003520 dev_err(DEV, "ReportCBitmap packet too large\n");
3521 goto out;
3522 }
3523 /* use the page buff */
3524 p = buffer;
3525 memcpy(p, h, sizeof(*h));
Philipp Reisner02918be2010-08-20 14:35:10 +02003526 if (drbd_recv(mdev, p->head.payload, data_size) != data_size)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003527 goto out;
Lars Ellenberg004352f2010-10-05 20:13:58 +02003528 if (data_size <= (sizeof(*p) - sizeof(p->head))) {
3529 dev_err(DEV, "ReportCBitmap packet too small (l:%u)\n", data_size);
Andreas Gruenbacher78fcbda2010-12-10 22:18:27 +01003530 goto out;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003531 }
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01003532 err = decode_bitmap_c(mdev, p, &c);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003533 } else {
Philipp Reisner02918be2010-08-20 14:35:10 +02003534 dev_warn(DEV, "receive_bitmap: cmd neither ReportBitMap nor ReportCBitMap (is 0x%x)", cmd);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003535 goto out;
3536 }
3537
Philipp Reisner02918be2010-08-20 14:35:10 +02003538 c.packets[cmd == P_BITMAP]++;
3539 c.bytes[cmd == P_BITMAP] += sizeof(struct p_header80) + data_size;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003540
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01003541 if (err <= 0) {
3542 if (err < 0)
3543 goto out;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003544 break;
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01003545 }
Philipp Reisner02918be2010-08-20 14:35:10 +02003546 if (!drbd_recv_header(mdev, &cmd, &data_size))
Philipp Reisnerb411b362009-09-25 16:07:19 -07003547 goto out;
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01003548 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07003549
3550 INFO_bm_xfer_stats(mdev, "receive", &c);
3551
3552 if (mdev->state.conn == C_WF_BITMAP_T) {
Andreas Gruenbacherde1f8e42010-12-10 21:04:00 +01003553 enum drbd_state_rv rv;
3554
Philipp Reisnerb411b362009-09-25 16:07:19 -07003555 ok = !drbd_send_bitmap(mdev);
3556 if (!ok)
3557 goto out;
3558 /* Omit CS_ORDERED with this state transition to avoid deadlocks. */
Andreas Gruenbacherde1f8e42010-12-10 21:04:00 +01003559 rv = _drbd_request_state(mdev, NS(conn, C_WF_SYNC_UUID), CS_VERBOSE);
3560 D_ASSERT(rv == SS_SUCCESS);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003561 } else if (mdev->state.conn != C_WF_BITMAP_S) {
3562 /* admin may have requested C_DISCONNECTING,
3563 * other threads may have noticed network errors */
3564 dev_info(DEV, "unexpected cstate (%s) in receive_bitmap\n",
3565 drbd_conn_str(mdev->state.conn));
3566 }
3567
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01003568 ok = true;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003569 out:
Philipp Reisner37190942010-11-10 12:08:37 +01003570 /* drbd_bm_unlock(mdev); by intention no lock */
Philipp Reisnerb411b362009-09-25 16:07:19 -07003571 if (ok && mdev->state.conn == C_WF_BITMAP_S)
3572 drbd_start_resync(mdev, C_SYNC_SOURCE);
3573 free_page((unsigned long) buffer);
3574 return ok;
3575}
3576
Philipp Reisner02918be2010-08-20 14:35:10 +02003577static int receive_skip(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003578{
3579 /* TODO zero copy sink :) */
3580 static char sink[128];
3581 int size, want, r;
3582
Philipp Reisner02918be2010-08-20 14:35:10 +02003583 dev_warn(DEV, "skipping unknown optional packet type %d, l: %d!\n",
3584 cmd, data_size);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003585
Philipp Reisner02918be2010-08-20 14:35:10 +02003586 size = data_size;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003587 while (size > 0) {
3588 want = min_t(int, size, sizeof(sink));
3589 r = drbd_recv(mdev, sink, want);
3590 ERR_IF(r <= 0) break;
3591 size -= r;
3592 }
3593 return size == 0;
3594}
3595
Philipp Reisner02918be2010-08-20 14:35:10 +02003596static int receive_UnplugRemote(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003597{
Philipp Reisnerb411b362009-09-25 16:07:19 -07003598 /* Make sure we've acked all the TCP data associated
3599 * with the data requests being unplugged */
3600 drbd_tcp_quickack(mdev->data.socket);
3601
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01003602 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003603}
3604
Philipp Reisner73a01a12010-10-27 14:33:00 +02003605static int receive_out_of_sync(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
3606{
3607 struct p_block_desc *p = &mdev->data.rbuf.block_desc;
3608
Lars Ellenbergf735e3632010-12-17 21:06:18 +01003609 switch (mdev->state.conn) {
3610 case C_WF_SYNC_UUID:
3611 case C_WF_BITMAP_T:
3612 case C_BEHIND:
3613 break;
3614 default:
3615 dev_err(DEV, "ASSERT FAILED cstate = %s, expected: WFSyncUUID|WFBitMapT|Behind\n",
3616 drbd_conn_str(mdev->state.conn));
3617 }
3618
Philipp Reisner73a01a12010-10-27 14:33:00 +02003619 drbd_set_out_of_sync(mdev, be64_to_cpu(p->sector), be32_to_cpu(p->blksize));
3620
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01003621 return true;
Philipp Reisner73a01a12010-10-27 14:33:00 +02003622}
3623
Philipp Reisner02918be2010-08-20 14:35:10 +02003624typedef int (*drbd_cmd_handler_f)(struct drbd_conf *, enum drbd_packets cmd, unsigned int to_receive);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003625
Philipp Reisner02918be2010-08-20 14:35:10 +02003626struct data_cmd {
3627 int expect_payload;
3628 size_t pkt_size;
3629 drbd_cmd_handler_f function;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003630};
3631
Philipp Reisner02918be2010-08-20 14:35:10 +02003632static struct data_cmd drbd_cmd_handler[] = {
3633 [P_DATA] = { 1, sizeof(struct p_data), receive_Data },
3634 [P_DATA_REPLY] = { 1, sizeof(struct p_data), receive_DataReply },
3635 [P_RS_DATA_REPLY] = { 1, sizeof(struct p_data), receive_RSDataReply } ,
3636 [P_BARRIER] = { 0, sizeof(struct p_barrier), receive_Barrier } ,
3637 [P_BITMAP] = { 1, sizeof(struct p_header80), receive_bitmap } ,
3638 [P_COMPRESSED_BITMAP] = { 1, sizeof(struct p_header80), receive_bitmap } ,
3639 [P_UNPLUG_REMOTE] = { 0, sizeof(struct p_header80), receive_UnplugRemote },
3640 [P_DATA_REQUEST] = { 0, sizeof(struct p_block_req), receive_DataRequest },
3641 [P_RS_DATA_REQUEST] = { 0, sizeof(struct p_block_req), receive_DataRequest },
3642 [P_SYNC_PARAM] = { 1, sizeof(struct p_header80), receive_SyncParam },
3643 [P_SYNC_PARAM89] = { 1, sizeof(struct p_header80), receive_SyncParam },
3644 [P_PROTOCOL] = { 1, sizeof(struct p_protocol), receive_protocol },
3645 [P_UUIDS] = { 0, sizeof(struct p_uuids), receive_uuids },
3646 [P_SIZES] = { 0, sizeof(struct p_sizes), receive_sizes },
3647 [P_STATE] = { 0, sizeof(struct p_state), receive_state },
3648 [P_STATE_CHG_REQ] = { 0, sizeof(struct p_req_state), receive_req_state },
3649 [P_SYNC_UUID] = { 0, sizeof(struct p_rs_uuid), receive_sync_uuid },
3650 [P_OV_REQUEST] = { 0, sizeof(struct p_block_req), receive_DataRequest },
3651 [P_OV_REPLY] = { 1, sizeof(struct p_block_req), receive_DataRequest },
3652 [P_CSUM_RS_REQUEST] = { 1, sizeof(struct p_block_req), receive_DataRequest },
3653 [P_DELAY_PROBE] = { 0, sizeof(struct p_delay_probe93), receive_skip },
Philipp Reisner73a01a12010-10-27 14:33:00 +02003654 [P_OUT_OF_SYNC] = { 0, sizeof(struct p_block_desc), receive_out_of_sync },
Philipp Reisner02918be2010-08-20 14:35:10 +02003655 /* anything missing from this table is in
3656 * the asender_tbl, see get_asender_cmd */
3657 [P_MAX_CMD] = { 0, 0, NULL },
3658};
3659
3660/* All handler functions that expect a sub-header get that sub-heder in
3661 mdev->data.rbuf.header.head.payload.
3662
3663 Usually in mdev->data.rbuf.header.head the callback can find the usual
3664 p_header, but they may not rely on that. Since there is also p_header95 !
3665 */
Philipp Reisnerb411b362009-09-25 16:07:19 -07003666
3667static void drbdd(struct drbd_conf *mdev)
3668{
Philipp Reisner02918be2010-08-20 14:35:10 +02003669 union p_header *header = &mdev->data.rbuf.header;
3670 unsigned int packet_size;
3671 enum drbd_packets cmd;
3672 size_t shs; /* sub header size */
3673 int rv;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003674
3675 while (get_t_state(&mdev->receiver) == Running) {
3676 drbd_thread_current_set_cpu(mdev);
Philipp Reisner02918be2010-08-20 14:35:10 +02003677 if (!drbd_recv_header(mdev, &cmd, &packet_size))
3678 goto err_out;
3679
3680 if (unlikely(cmd >= P_MAX_CMD || !drbd_cmd_handler[cmd].function)) {
3681 dev_err(DEV, "unknown packet type %d, l: %d!\n", cmd, packet_size);
3682 goto err_out;
Lars Ellenberg0b33a912009-11-16 15:58:04 +01003683 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07003684
Philipp Reisner02918be2010-08-20 14:35:10 +02003685 shs = drbd_cmd_handler[cmd].pkt_size - sizeof(union p_header);
Philipp Reisner02918be2010-08-20 14:35:10 +02003686 if (packet_size - shs > 0 && !drbd_cmd_handler[cmd].expect_payload) {
3687 dev_err(DEV, "No payload expected %s l:%d\n", cmdname(cmd), packet_size);
3688 goto err_out;
3689 }
3690
Lars Ellenbergc13f7e12010-10-29 23:32:01 +02003691 if (shs) {
3692 rv = drbd_recv(mdev, &header->h80.payload, shs);
3693 if (unlikely(rv != shs)) {
3694 dev_err(DEV, "short read while reading sub header: rv=%d\n", rv);
3695 goto err_out;
3696 }
3697 }
3698
Philipp Reisner02918be2010-08-20 14:35:10 +02003699 rv = drbd_cmd_handler[cmd].function(mdev, cmd, packet_size - shs);
3700
3701 if (unlikely(!rv)) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07003702 dev_err(DEV, "error receiving %s, l: %d!\n",
Philipp Reisner02918be2010-08-20 14:35:10 +02003703 cmdname(cmd), packet_size);
3704 goto err_out;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003705 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07003706 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07003707
Philipp Reisner02918be2010-08-20 14:35:10 +02003708 if (0) {
3709 err_out:
3710 drbd_force_state(mdev, NS(conn, C_PROTOCOL_ERROR));
Philipp Reisnerb411b362009-09-25 16:07:19 -07003711 }
Lars Ellenberg856c50c2010-10-14 13:37:40 +02003712 /* If we leave here, we probably want to update at least the
3713 * "Connected" indicator on stable storage. Do so explicitly here. */
3714 drbd_md_sync(mdev);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003715}
3716
3717void drbd_flush_workqueue(struct drbd_conf *mdev)
3718{
3719 struct drbd_wq_barrier barr;
3720
3721 barr.w.cb = w_prev_work_done;
3722 init_completion(&barr.done);
3723 drbd_queue_work(&mdev->data.work, &barr.w);
3724 wait_for_completion(&barr.done);
3725}
3726
Philipp Reisnerf70b35112010-06-24 14:34:40 +02003727void drbd_free_tl_hash(struct drbd_conf *mdev)
3728{
3729 struct hlist_head *h;
3730
3731 spin_lock_irq(&mdev->req_lock);
3732
3733 if (!mdev->tl_hash || mdev->state.conn != C_STANDALONE) {
3734 spin_unlock_irq(&mdev->req_lock);
3735 return;
3736 }
3737 /* paranoia code */
3738 for (h = mdev->ee_hash; h < mdev->ee_hash + mdev->ee_hash_s; h++)
3739 if (h->first)
3740 dev_err(DEV, "ASSERT FAILED ee_hash[%u].first == %p, expected NULL\n",
3741 (int)(h - mdev->ee_hash), h->first);
3742 kfree(mdev->ee_hash);
3743 mdev->ee_hash = NULL;
3744 mdev->ee_hash_s = 0;
3745
3746 /* paranoia code */
3747 for (h = mdev->tl_hash; h < mdev->tl_hash + mdev->tl_hash_s; h++)
3748 if (h->first)
3749 dev_err(DEV, "ASSERT FAILED tl_hash[%u] == %p, expected NULL\n",
3750 (int)(h - mdev->tl_hash), h->first);
3751 kfree(mdev->tl_hash);
3752 mdev->tl_hash = NULL;
3753 mdev->tl_hash_s = 0;
3754 spin_unlock_irq(&mdev->req_lock);
3755}
3756
Philipp Reisnerb411b362009-09-25 16:07:19 -07003757static void drbd_disconnect(struct drbd_conf *mdev)
3758{
3759 enum drbd_fencing_p fp;
3760 union drbd_state os, ns;
3761 int rv = SS_UNKNOWN_ERROR;
3762 unsigned int i;
3763
3764 if (mdev->state.conn == C_STANDALONE)
3765 return;
3766 if (mdev->state.conn >= C_WF_CONNECTION)
3767 dev_err(DEV, "ASSERT FAILED cstate = %s, expected < WFConnection\n",
3768 drbd_conn_str(mdev->state.conn));
3769
3770 /* asender does not clean up anything. it must not interfere, either */
3771 drbd_thread_stop(&mdev->asender);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003772 drbd_free_sock(mdev);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003773
Philipp Reisner85719572010-07-21 10:20:17 +02003774 /* wait for current activity to cease. */
Philipp Reisnerb411b362009-09-25 16:07:19 -07003775 spin_lock_irq(&mdev->req_lock);
3776 _drbd_wait_ee_list_empty(mdev, &mdev->active_ee);
3777 _drbd_wait_ee_list_empty(mdev, &mdev->sync_ee);
3778 _drbd_wait_ee_list_empty(mdev, &mdev->read_ee);
3779 spin_unlock_irq(&mdev->req_lock);
3780
3781 /* We do not have data structures that would allow us to
3782 * get the rs_pending_cnt down to 0 again.
3783 * * On C_SYNC_TARGET we do not have any data structures describing
3784 * the pending RSDataRequest's we have sent.
3785 * * On C_SYNC_SOURCE there is no data structure that tracks
3786 * the P_RS_DATA_REPLY blocks that we sent to the SyncTarget.
3787 * And no, it is not the sum of the reference counts in the
3788 * resync_LRU. The resync_LRU tracks the whole operation including
3789 * the disk-IO, while the rs_pending_cnt only tracks the blocks
3790 * on the fly. */
3791 drbd_rs_cancel_all(mdev);
3792 mdev->rs_total = 0;
3793 mdev->rs_failed = 0;
3794 atomic_set(&mdev->rs_pending_cnt, 0);
3795 wake_up(&mdev->misc_wait);
3796
3797 /* make sure syncer is stopped and w_resume_next_sg queued */
3798 del_timer_sync(&mdev->resync_timer);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003799 resync_timer_fn((unsigned long)mdev);
3800
Philipp Reisnerb411b362009-09-25 16:07:19 -07003801 /* wait for all w_e_end_data_req, w_e_end_rsdata_req, w_send_barrier,
3802 * w_make_resync_request etc. which may still be on the worker queue
3803 * to be "canceled" */
3804 drbd_flush_workqueue(mdev);
3805
3806 /* This also does reclaim_net_ee(). If we do this too early, we might
3807 * miss some resync ee and pages.*/
3808 drbd_process_done_ee(mdev);
3809
3810 kfree(mdev->p_uuid);
3811 mdev->p_uuid = NULL;
3812
Philipp Reisnerfb22c402010-09-08 23:20:21 +02003813 if (!is_susp(mdev->state))
Philipp Reisnerb411b362009-09-25 16:07:19 -07003814 tl_clear(mdev);
3815
Philipp Reisnerb411b362009-09-25 16:07:19 -07003816 dev_info(DEV, "Connection closed\n");
3817
3818 drbd_md_sync(mdev);
3819
3820 fp = FP_DONT_CARE;
3821 if (get_ldev(mdev)) {
3822 fp = mdev->ldev->dc.fencing;
3823 put_ldev(mdev);
3824 }
3825
Philipp Reisner87f7be42010-06-11 13:56:33 +02003826 if (mdev->state.role == R_PRIMARY && fp >= FP_RESOURCE && mdev->state.pdsk >= D_UNKNOWN)
3827 drbd_try_outdate_peer_async(mdev);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003828
3829 spin_lock_irq(&mdev->req_lock);
3830 os = mdev->state;
3831 if (os.conn >= C_UNCONNECTED) {
3832 /* Do not restart in case we are C_DISCONNECTING */
3833 ns = os;
3834 ns.conn = C_UNCONNECTED;
3835 rv = _drbd_set_state(mdev, ns, CS_VERBOSE, NULL);
3836 }
3837 spin_unlock_irq(&mdev->req_lock);
3838
3839 if (os.conn == C_DISCONNECTING) {
Philipp Reisner84dfb9f2010-06-23 11:20:05 +02003840 wait_event(mdev->net_cnt_wait, atomic_read(&mdev->net_cnt) == 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003841
Philipp Reisnerb411b362009-09-25 16:07:19 -07003842 crypto_free_hash(mdev->cram_hmac_tfm);
3843 mdev->cram_hmac_tfm = NULL;
3844
3845 kfree(mdev->net_conf);
3846 mdev->net_conf = NULL;
3847 drbd_request_state(mdev, NS(conn, C_STANDALONE));
3848 }
3849
3850 /* tcp_close and release of sendpage pages can be deferred. I don't
3851 * want to use SO_LINGER, because apparently it can be deferred for
3852 * more than 20 seconds (longest time I checked).
3853 *
3854 * Actually we don't care for exactly when the network stack does its
3855 * put_page(), but release our reference on these pages right here.
3856 */
3857 i = drbd_release_ee(mdev, &mdev->net_ee);
3858 if (i)
3859 dev_info(DEV, "net_ee not empty, killed %u entries\n", i);
Lars Ellenberg435f0742010-09-06 12:30:25 +02003860 i = atomic_read(&mdev->pp_in_use_by_net);
3861 if (i)
3862 dev_info(DEV, "pp_in_use_by_net = %d, expected 0\n", i);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003863 i = atomic_read(&mdev->pp_in_use);
3864 if (i)
Lars Ellenberg45bb9122010-05-14 17:10:48 +02003865 dev_info(DEV, "pp_in_use = %d, expected 0\n", i);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003866
3867 D_ASSERT(list_empty(&mdev->read_ee));
3868 D_ASSERT(list_empty(&mdev->active_ee));
3869 D_ASSERT(list_empty(&mdev->sync_ee));
3870 D_ASSERT(list_empty(&mdev->done_ee));
3871
3872 /* ok, no more ee's on the fly, it is safe to reset the epoch_size */
3873 atomic_set(&mdev->current_epoch->epoch_size, 0);
3874 D_ASSERT(list_empty(&mdev->current_epoch->list));
3875}
3876
3877/*
3878 * We support PRO_VERSION_MIN to PRO_VERSION_MAX. The protocol version
3879 * we can agree on is stored in agreed_pro_version.
3880 *
3881 * feature flags and the reserved array should be enough room for future
3882 * enhancements of the handshake protocol, and possible plugins...
3883 *
3884 * for now, they are expected to be zero, but ignored.
3885 */
3886static int drbd_send_handshake(struct drbd_conf *mdev)
3887{
3888 /* ASSERT current == mdev->receiver ... */
3889 struct p_handshake *p = &mdev->data.sbuf.handshake;
3890 int ok;
3891
3892 if (mutex_lock_interruptible(&mdev->data.mutex)) {
3893 dev_err(DEV, "interrupted during initial handshake\n");
3894 return 0; /* interrupted. not ok. */
3895 }
3896
3897 if (mdev->data.socket == NULL) {
3898 mutex_unlock(&mdev->data.mutex);
3899 return 0;
3900 }
3901
3902 memset(p, 0, sizeof(*p));
3903 p->protocol_min = cpu_to_be32(PRO_VERSION_MIN);
3904 p->protocol_max = cpu_to_be32(PRO_VERSION_MAX);
3905 ok = _drbd_send_cmd( mdev, mdev->data.socket, P_HAND_SHAKE,
Philipp Reisner0b70a132010-08-20 13:36:10 +02003906 (struct p_header80 *)p, sizeof(*p), 0 );
Philipp Reisnerb411b362009-09-25 16:07:19 -07003907 mutex_unlock(&mdev->data.mutex);
3908 return ok;
3909}
3910
3911/*
3912 * return values:
3913 * 1 yes, we have a valid connection
3914 * 0 oops, did not work out, please try again
3915 * -1 peer talks different language,
3916 * no point in trying again, please go standalone.
3917 */
3918static int drbd_do_handshake(struct drbd_conf *mdev)
3919{
3920 /* ASSERT current == mdev->receiver ... */
3921 struct p_handshake *p = &mdev->data.rbuf.handshake;
Philipp Reisner02918be2010-08-20 14:35:10 +02003922 const int expect = sizeof(struct p_handshake) - sizeof(struct p_header80);
3923 unsigned int length;
3924 enum drbd_packets cmd;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003925 int rv;
3926
3927 rv = drbd_send_handshake(mdev);
3928 if (!rv)
3929 return 0;
3930
Philipp Reisner02918be2010-08-20 14:35:10 +02003931 rv = drbd_recv_header(mdev, &cmd, &length);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003932 if (!rv)
3933 return 0;
3934
Philipp Reisner02918be2010-08-20 14:35:10 +02003935 if (cmd != P_HAND_SHAKE) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07003936 dev_err(DEV, "expected HandShake packet, received: %s (0x%04x)\n",
Philipp Reisner02918be2010-08-20 14:35:10 +02003937 cmdname(cmd), cmd);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003938 return -1;
3939 }
3940
Philipp Reisner02918be2010-08-20 14:35:10 +02003941 if (length != expect) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07003942 dev_err(DEV, "expected HandShake length: %u, received: %u\n",
Philipp Reisner02918be2010-08-20 14:35:10 +02003943 expect, length);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003944 return -1;
3945 }
3946
3947 rv = drbd_recv(mdev, &p->head.payload, expect);
3948
3949 if (rv != expect) {
3950 dev_err(DEV, "short read receiving handshake packet: l=%u\n", rv);
3951 return 0;
3952 }
3953
Philipp Reisnerb411b362009-09-25 16:07:19 -07003954 p->protocol_min = be32_to_cpu(p->protocol_min);
3955 p->protocol_max = be32_to_cpu(p->protocol_max);
3956 if (p->protocol_max == 0)
3957 p->protocol_max = p->protocol_min;
3958
3959 if (PRO_VERSION_MAX < p->protocol_min ||
3960 PRO_VERSION_MIN > p->protocol_max)
3961 goto incompat;
3962
3963 mdev->agreed_pro_version = min_t(int, PRO_VERSION_MAX, p->protocol_max);
3964
3965 dev_info(DEV, "Handshake successful: "
3966 "Agreed network protocol version %d\n", mdev->agreed_pro_version);
3967
3968 return 1;
3969
3970 incompat:
3971 dev_err(DEV, "incompatible DRBD dialects: "
3972 "I support %d-%d, peer supports %d-%d\n",
3973 PRO_VERSION_MIN, PRO_VERSION_MAX,
3974 p->protocol_min, p->protocol_max);
3975 return -1;
3976}
3977
3978#if !defined(CONFIG_CRYPTO_HMAC) && !defined(CONFIG_CRYPTO_HMAC_MODULE)
3979static int drbd_do_auth(struct drbd_conf *mdev)
3980{
3981 dev_err(DEV, "This kernel was build without CONFIG_CRYPTO_HMAC.\n");
3982 dev_err(DEV, "You need to disable 'cram-hmac-alg' in drbd.conf.\n");
Johannes Thomab10d96c2010-01-07 16:02:50 +01003983 return -1;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003984}
3985#else
3986#define CHALLENGE_LEN 64
Johannes Thomab10d96c2010-01-07 16:02:50 +01003987
3988/* Return value:
3989 1 - auth succeeded,
3990 0 - failed, try again (network error),
3991 -1 - auth failed, don't try again.
3992*/
3993
Philipp Reisnerb411b362009-09-25 16:07:19 -07003994static int drbd_do_auth(struct drbd_conf *mdev)
3995{
3996 char my_challenge[CHALLENGE_LEN]; /* 64 Bytes... */
3997 struct scatterlist sg;
3998 char *response = NULL;
3999 char *right_response = NULL;
4000 char *peers_ch = NULL;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004001 unsigned int key_len = strlen(mdev->net_conf->shared_secret);
4002 unsigned int resp_size;
4003 struct hash_desc desc;
Philipp Reisner02918be2010-08-20 14:35:10 +02004004 enum drbd_packets cmd;
4005 unsigned int length;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004006 int rv;
4007
4008 desc.tfm = mdev->cram_hmac_tfm;
4009 desc.flags = 0;
4010
4011 rv = crypto_hash_setkey(mdev->cram_hmac_tfm,
4012 (u8 *)mdev->net_conf->shared_secret, key_len);
4013 if (rv) {
4014 dev_err(DEV, "crypto_hash_setkey() failed with %d\n", rv);
Johannes Thomab10d96c2010-01-07 16:02:50 +01004015 rv = -1;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004016 goto fail;
4017 }
4018
4019 get_random_bytes(my_challenge, CHALLENGE_LEN);
4020
4021 rv = drbd_send_cmd2(mdev, P_AUTH_CHALLENGE, my_challenge, CHALLENGE_LEN);
4022 if (!rv)
4023 goto fail;
4024
Philipp Reisner02918be2010-08-20 14:35:10 +02004025 rv = drbd_recv_header(mdev, &cmd, &length);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004026 if (!rv)
4027 goto fail;
4028
Philipp Reisner02918be2010-08-20 14:35:10 +02004029 if (cmd != P_AUTH_CHALLENGE) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07004030 dev_err(DEV, "expected AuthChallenge packet, received: %s (0x%04x)\n",
Philipp Reisner02918be2010-08-20 14:35:10 +02004031 cmdname(cmd), cmd);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004032 rv = 0;
4033 goto fail;
4034 }
4035
Philipp Reisner02918be2010-08-20 14:35:10 +02004036 if (length > CHALLENGE_LEN * 2) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07004037 dev_err(DEV, "expected AuthChallenge payload too big.\n");
Johannes Thomab10d96c2010-01-07 16:02:50 +01004038 rv = -1;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004039 goto fail;
4040 }
4041
Philipp Reisner02918be2010-08-20 14:35:10 +02004042 peers_ch = kmalloc(length, GFP_NOIO);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004043 if (peers_ch == NULL) {
4044 dev_err(DEV, "kmalloc of peers_ch failed\n");
Johannes Thomab10d96c2010-01-07 16:02:50 +01004045 rv = -1;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004046 goto fail;
4047 }
4048
Philipp Reisner02918be2010-08-20 14:35:10 +02004049 rv = drbd_recv(mdev, peers_ch, length);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004050
Philipp Reisner02918be2010-08-20 14:35:10 +02004051 if (rv != length) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07004052 dev_err(DEV, "short read AuthChallenge: l=%u\n", rv);
4053 rv = 0;
4054 goto fail;
4055 }
4056
4057 resp_size = crypto_hash_digestsize(mdev->cram_hmac_tfm);
4058 response = kmalloc(resp_size, GFP_NOIO);
4059 if (response == NULL) {
4060 dev_err(DEV, "kmalloc of response failed\n");
Johannes Thomab10d96c2010-01-07 16:02:50 +01004061 rv = -1;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004062 goto fail;
4063 }
4064
4065 sg_init_table(&sg, 1);
Philipp Reisner02918be2010-08-20 14:35:10 +02004066 sg_set_buf(&sg, peers_ch, length);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004067
4068 rv = crypto_hash_digest(&desc, &sg, sg.length, response);
4069 if (rv) {
4070 dev_err(DEV, "crypto_hash_digest() failed with %d\n", rv);
Johannes Thomab10d96c2010-01-07 16:02:50 +01004071 rv = -1;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004072 goto fail;
4073 }
4074
4075 rv = drbd_send_cmd2(mdev, P_AUTH_RESPONSE, response, resp_size);
4076 if (!rv)
4077 goto fail;
4078
Philipp Reisner02918be2010-08-20 14:35:10 +02004079 rv = drbd_recv_header(mdev, &cmd, &length);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004080 if (!rv)
4081 goto fail;
4082
Philipp Reisner02918be2010-08-20 14:35:10 +02004083 if (cmd != P_AUTH_RESPONSE) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07004084 dev_err(DEV, "expected AuthResponse packet, received: %s (0x%04x)\n",
Philipp Reisner02918be2010-08-20 14:35:10 +02004085 cmdname(cmd), cmd);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004086 rv = 0;
4087 goto fail;
4088 }
4089
Philipp Reisner02918be2010-08-20 14:35:10 +02004090 if (length != resp_size) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07004091 dev_err(DEV, "expected AuthResponse payload of wrong size\n");
4092 rv = 0;
4093 goto fail;
4094 }
4095
4096 rv = drbd_recv(mdev, response , resp_size);
4097
4098 if (rv != resp_size) {
4099 dev_err(DEV, "short read receiving AuthResponse: l=%u\n", rv);
4100 rv = 0;
4101 goto fail;
4102 }
4103
4104 right_response = kmalloc(resp_size, GFP_NOIO);
Julia Lawall2d1ee872009-12-27 22:27:11 +01004105 if (right_response == NULL) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07004106 dev_err(DEV, "kmalloc of right_response failed\n");
Johannes Thomab10d96c2010-01-07 16:02:50 +01004107 rv = -1;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004108 goto fail;
4109 }
4110
4111 sg_set_buf(&sg, my_challenge, CHALLENGE_LEN);
4112
4113 rv = crypto_hash_digest(&desc, &sg, sg.length, right_response);
4114 if (rv) {
4115 dev_err(DEV, "crypto_hash_digest() failed with %d\n", rv);
Johannes Thomab10d96c2010-01-07 16:02:50 +01004116 rv = -1;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004117 goto fail;
4118 }
4119
4120 rv = !memcmp(response, right_response, resp_size);
4121
4122 if (rv)
4123 dev_info(DEV, "Peer authenticated using %d bytes of '%s' HMAC\n",
4124 resp_size, mdev->net_conf->cram_hmac_alg);
Johannes Thomab10d96c2010-01-07 16:02:50 +01004125 else
4126 rv = -1;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004127
4128 fail:
4129 kfree(peers_ch);
4130 kfree(response);
4131 kfree(right_response);
4132
4133 return rv;
4134}
4135#endif
4136
4137int drbdd_init(struct drbd_thread *thi)
4138{
4139 struct drbd_conf *mdev = thi->mdev;
4140 unsigned int minor = mdev_to_minor(mdev);
4141 int h;
4142
4143 sprintf(current->comm, "drbd%d_receiver", minor);
4144
4145 dev_info(DEV, "receiver (re)started\n");
4146
4147 do {
4148 h = drbd_connect(mdev);
4149 if (h == 0) {
4150 drbd_disconnect(mdev);
4151 __set_current_state(TASK_INTERRUPTIBLE);
4152 schedule_timeout(HZ);
4153 }
4154 if (h == -1) {
4155 dev_warn(DEV, "Discarding network configuration.\n");
4156 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
4157 }
4158 } while (h == 0);
4159
4160 if (h > 0) {
4161 if (get_net_conf(mdev)) {
4162 drbdd(mdev);
4163 put_net_conf(mdev);
4164 }
4165 }
4166
4167 drbd_disconnect(mdev);
4168
4169 dev_info(DEV, "receiver terminated\n");
4170 return 0;
4171}
4172
4173/* ********* acknowledge sender ******** */
4174
Philipp Reisner0b70a132010-08-20 13:36:10 +02004175static int got_RqSReply(struct drbd_conf *mdev, struct p_header80 *h)
Philipp Reisnerb411b362009-09-25 16:07:19 -07004176{
4177 struct p_req_state_reply *p = (struct p_req_state_reply *)h;
4178
4179 int retcode = be32_to_cpu(p->retcode);
4180
4181 if (retcode >= SS_SUCCESS) {
4182 set_bit(CL_ST_CHG_SUCCESS, &mdev->flags);
4183 } else {
4184 set_bit(CL_ST_CHG_FAIL, &mdev->flags);
4185 dev_err(DEV, "Requested state change failed by peer: %s (%d)\n",
4186 drbd_set_st_err_str(retcode), retcode);
4187 }
4188 wake_up(&mdev->state_wait);
4189
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01004190 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004191}
4192
Philipp Reisner0b70a132010-08-20 13:36:10 +02004193static int got_Ping(struct drbd_conf *mdev, struct p_header80 *h)
Philipp Reisnerb411b362009-09-25 16:07:19 -07004194{
4195 return drbd_send_ping_ack(mdev);
4196
4197}
4198
Philipp Reisner0b70a132010-08-20 13:36:10 +02004199static int got_PingAck(struct drbd_conf *mdev, struct p_header80 *h)
Philipp Reisnerb411b362009-09-25 16:07:19 -07004200{
4201 /* restore idle timeout */
4202 mdev->meta.socket->sk->sk_rcvtimeo = mdev->net_conf->ping_int*HZ;
Philipp Reisner309d1602010-03-02 15:03:44 +01004203 if (!test_and_set_bit(GOT_PING_ACK, &mdev->flags))
4204 wake_up(&mdev->misc_wait);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004205
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01004206 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004207}
4208
Philipp Reisner0b70a132010-08-20 13:36:10 +02004209static int got_IsInSync(struct drbd_conf *mdev, struct p_header80 *h)
Philipp Reisnerb411b362009-09-25 16:07:19 -07004210{
4211 struct p_block_ack *p = (struct p_block_ack *)h;
4212 sector_t sector = be64_to_cpu(p->sector);
4213 int blksize = be32_to_cpu(p->blksize);
4214
4215 D_ASSERT(mdev->agreed_pro_version >= 89);
4216
4217 update_peer_seq(mdev, be32_to_cpu(p->seq_num));
4218
Lars Ellenberg1d53f092010-09-05 01:13:24 +02004219 if (get_ldev(mdev)) {
4220 drbd_rs_complete_io(mdev, sector);
4221 drbd_set_in_sync(mdev, sector, blksize);
4222 /* rs_same_csums is supposed to count in units of BM_BLOCK_SIZE */
4223 mdev->rs_same_csum += (blksize >> BM_BLOCK_SHIFT);
4224 put_ldev(mdev);
4225 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07004226 dec_rs_pending(mdev);
Philipp Reisner778f2712010-07-06 11:14:00 +02004227 atomic_add(blksize >> 9, &mdev->rs_sect_in);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004228
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01004229 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004230}
4231
4232/* when we receive the ACK for a write request,
4233 * verify that we actually know about it */
4234static struct drbd_request *_ack_id_to_req(struct drbd_conf *mdev,
4235 u64 id, sector_t sector)
4236{
4237 struct hlist_head *slot = tl_hash_slot(mdev, sector);
4238 struct hlist_node *n;
4239 struct drbd_request *req;
4240
4241 hlist_for_each_entry(req, n, slot, colision) {
4242 if ((unsigned long)req == (unsigned long)id) {
4243 if (req->sector != sector) {
4244 dev_err(DEV, "_ack_id_to_req: found req %p but it has "
4245 "wrong sector (%llus versus %llus)\n", req,
4246 (unsigned long long)req->sector,
4247 (unsigned long long)sector);
4248 break;
4249 }
4250 return req;
4251 }
4252 }
4253 dev_err(DEV, "_ack_id_to_req: failed to find req %p, sector %llus in list\n",
4254 (void *)(unsigned long)id, (unsigned long long)sector);
4255 return NULL;
4256}
4257
4258typedef struct drbd_request *(req_validator_fn)
4259 (struct drbd_conf *mdev, u64 id, sector_t sector);
4260
4261static int validate_req_change_req_state(struct drbd_conf *mdev,
4262 u64 id, sector_t sector, req_validator_fn validator,
4263 const char *func, enum drbd_req_event what)
4264{
4265 struct drbd_request *req;
4266 struct bio_and_error m;
4267
4268 spin_lock_irq(&mdev->req_lock);
4269 req = validator(mdev, id, sector);
4270 if (unlikely(!req)) {
4271 spin_unlock_irq(&mdev->req_lock);
4272 dev_err(DEV, "%s: got a corrupt block_id/sector pair\n", func);
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01004273 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004274 }
4275 __req_mod(req, what, &m);
4276 spin_unlock_irq(&mdev->req_lock);
4277
4278 if (m.bio)
4279 complete_master_bio(mdev, &m);
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01004280 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004281}
4282
Philipp Reisner0b70a132010-08-20 13:36:10 +02004283static int got_BlockAck(struct drbd_conf *mdev, struct p_header80 *h)
Philipp Reisnerb411b362009-09-25 16:07:19 -07004284{
4285 struct p_block_ack *p = (struct p_block_ack *)h;
4286 sector_t sector = be64_to_cpu(p->sector);
4287 int blksize = be32_to_cpu(p->blksize);
4288 enum drbd_req_event what;
4289
4290 update_peer_seq(mdev, be32_to_cpu(p->seq_num));
4291
4292 if (is_syncer_block_id(p->block_id)) {
4293 drbd_set_in_sync(mdev, sector, blksize);
4294 dec_rs_pending(mdev);
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01004295 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004296 }
4297 switch (be16_to_cpu(h->command)) {
4298 case P_RS_WRITE_ACK:
4299 D_ASSERT(mdev->net_conf->wire_protocol == DRBD_PROT_C);
4300 what = write_acked_by_peer_and_sis;
4301 break;
4302 case P_WRITE_ACK:
4303 D_ASSERT(mdev->net_conf->wire_protocol == DRBD_PROT_C);
4304 what = write_acked_by_peer;
4305 break;
4306 case P_RECV_ACK:
4307 D_ASSERT(mdev->net_conf->wire_protocol == DRBD_PROT_B);
4308 what = recv_acked_by_peer;
4309 break;
4310 case P_DISCARD_ACK:
4311 D_ASSERT(mdev->net_conf->wire_protocol == DRBD_PROT_C);
4312 what = conflict_discarded_by_peer;
4313 break;
4314 default:
4315 D_ASSERT(0);
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01004316 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004317 }
4318
4319 return validate_req_change_req_state(mdev, p->block_id, sector,
4320 _ack_id_to_req, __func__ , what);
4321}
4322
Philipp Reisner0b70a132010-08-20 13:36:10 +02004323static int got_NegAck(struct drbd_conf *mdev, struct p_header80 *h)
Philipp Reisnerb411b362009-09-25 16:07:19 -07004324{
4325 struct p_block_ack *p = (struct p_block_ack *)h;
4326 sector_t sector = be64_to_cpu(p->sector);
4327
4328 if (__ratelimit(&drbd_ratelimit_state))
4329 dev_warn(DEV, "Got NegAck packet. Peer is in troubles?\n");
4330
4331 update_peer_seq(mdev, be32_to_cpu(p->seq_num));
4332
4333 if (is_syncer_block_id(p->block_id)) {
4334 int size = be32_to_cpu(p->blksize);
4335 dec_rs_pending(mdev);
4336 drbd_rs_failed_io(mdev, sector, size);
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01004337 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004338 }
4339 return validate_req_change_req_state(mdev, p->block_id, sector,
4340 _ack_id_to_req, __func__ , neg_acked);
4341}
4342
Philipp Reisner0b70a132010-08-20 13:36:10 +02004343static int got_NegDReply(struct drbd_conf *mdev, struct p_header80 *h)
Philipp Reisnerb411b362009-09-25 16:07:19 -07004344{
4345 struct p_block_ack *p = (struct p_block_ack *)h;
4346 sector_t sector = be64_to_cpu(p->sector);
4347
4348 update_peer_seq(mdev, be32_to_cpu(p->seq_num));
4349 dev_err(DEV, "Got NegDReply; Sector %llus, len %u; Fail original request.\n",
4350 (unsigned long long)sector, be32_to_cpu(p->blksize));
4351
4352 return validate_req_change_req_state(mdev, p->block_id, sector,
4353 _ar_id_to_req, __func__ , neg_acked);
4354}
4355
Philipp Reisner0b70a132010-08-20 13:36:10 +02004356static int got_NegRSDReply(struct drbd_conf *mdev, struct p_header80 *h)
Philipp Reisnerb411b362009-09-25 16:07:19 -07004357{
4358 sector_t sector;
4359 int size;
4360 struct p_block_ack *p = (struct p_block_ack *)h;
4361
4362 sector = be64_to_cpu(p->sector);
4363 size = be32_to_cpu(p->blksize);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004364
4365 update_peer_seq(mdev, be32_to_cpu(p->seq_num));
4366
4367 dec_rs_pending(mdev);
4368
4369 if (get_ldev_if_state(mdev, D_FAILED)) {
4370 drbd_rs_complete_io(mdev, sector);
Philipp Reisnerd612d302010-12-27 10:53:28 +01004371 switch (be16_to_cpu(h->command)) {
4372 case P_NEG_RS_DREPLY:
4373 drbd_rs_failed_io(mdev, sector, size);
4374 case P_RS_CANCEL:
4375 break;
4376 default:
4377 D_ASSERT(0);
4378 put_ldev(mdev);
4379 return false;
4380 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07004381 put_ldev(mdev);
4382 }
4383
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01004384 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004385}
4386
Philipp Reisner0b70a132010-08-20 13:36:10 +02004387static int got_BarrierAck(struct drbd_conf *mdev, struct p_header80 *h)
Philipp Reisnerb411b362009-09-25 16:07:19 -07004388{
4389 struct p_barrier_ack *p = (struct p_barrier_ack *)h;
4390
4391 tl_release(mdev, p->barrier, be32_to_cpu(p->set_size));
4392
Philipp Reisnerc4752ef2010-10-27 17:32:36 +02004393 if (mdev->state.conn == C_AHEAD &&
4394 atomic_read(&mdev->ap_in_flight) == 0 &&
Philipp Reisnerda0a7812010-12-23 14:24:33 +01004395 atomic_read(&mdev->rs_pending_cnt) == 0 &&
Philipp Reisnerc4752ef2010-10-27 17:32:36 +02004396 list_empty(&mdev->start_resync_work.list)) {
4397 struct drbd_work *w = &mdev->start_resync_work;
4398 w->cb = w_start_resync;
Philipp Reisnerda0a7812010-12-23 14:24:33 +01004399 drbd_queue_work(&mdev->data.work, w);
Philipp Reisnerc4752ef2010-10-27 17:32:36 +02004400 }
4401
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01004402 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004403}
4404
Philipp Reisner0b70a132010-08-20 13:36:10 +02004405static int got_OVResult(struct drbd_conf *mdev, struct p_header80 *h)
Philipp Reisnerb411b362009-09-25 16:07:19 -07004406{
4407 struct p_block_ack *p = (struct p_block_ack *)h;
4408 struct drbd_work *w;
4409 sector_t sector;
4410 int size;
4411
4412 sector = be64_to_cpu(p->sector);
4413 size = be32_to_cpu(p->blksize);
4414
4415 update_peer_seq(mdev, be32_to_cpu(p->seq_num));
4416
4417 if (be64_to_cpu(p->block_id) == ID_OUT_OF_SYNC)
4418 drbd_ov_oos_found(mdev, sector, size);
4419 else
4420 ov_oos_print(mdev);
4421
Lars Ellenberg1d53f092010-09-05 01:13:24 +02004422 if (!get_ldev(mdev))
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01004423 return true;
Lars Ellenberg1d53f092010-09-05 01:13:24 +02004424
Philipp Reisnerb411b362009-09-25 16:07:19 -07004425 drbd_rs_complete_io(mdev, sector);
4426 dec_rs_pending(mdev);
4427
Lars Ellenbergea5442a2010-11-05 09:48:01 +01004428 --mdev->ov_left;
4429
4430 /* let's advance progress step marks only for every other megabyte */
4431 if ((mdev->ov_left & 0x200) == 0x200)
4432 drbd_advance_rs_marks(mdev, mdev->ov_left);
4433
4434 if (mdev->ov_left == 0) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07004435 w = kmalloc(sizeof(*w), GFP_NOIO);
4436 if (w) {
4437 w->cb = w_ov_finished;
4438 drbd_queue_work_front(&mdev->data.work, w);
4439 } else {
4440 dev_err(DEV, "kmalloc(w) failed.");
4441 ov_oos_print(mdev);
4442 drbd_resync_finished(mdev);
4443 }
4444 }
Lars Ellenberg1d53f092010-09-05 01:13:24 +02004445 put_ldev(mdev);
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01004446 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004447}
4448
Philipp Reisner02918be2010-08-20 14:35:10 +02004449static int got_skip(struct drbd_conf *mdev, struct p_header80 *h)
Philipp Reisner0ced55a2010-04-30 15:26:20 +02004450{
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01004451 return true;
Philipp Reisner0ced55a2010-04-30 15:26:20 +02004452}
4453
Philipp Reisnerb411b362009-09-25 16:07:19 -07004454struct asender_cmd {
4455 size_t pkt_size;
Philipp Reisner0b70a132010-08-20 13:36:10 +02004456 int (*process)(struct drbd_conf *mdev, struct p_header80 *h);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004457};
4458
4459static struct asender_cmd *get_asender_cmd(int cmd)
4460{
4461 static struct asender_cmd asender_tbl[] = {
4462 /* anything missing from this table is in
4463 * the drbd_cmd_handler (drbd_default_handler) table,
4464 * see the beginning of drbdd() */
Philipp Reisner0b70a132010-08-20 13:36:10 +02004465 [P_PING] = { sizeof(struct p_header80), got_Ping },
4466 [P_PING_ACK] = { sizeof(struct p_header80), got_PingAck },
Philipp Reisnerb411b362009-09-25 16:07:19 -07004467 [P_RECV_ACK] = { sizeof(struct p_block_ack), got_BlockAck },
4468 [P_WRITE_ACK] = { sizeof(struct p_block_ack), got_BlockAck },
4469 [P_RS_WRITE_ACK] = { sizeof(struct p_block_ack), got_BlockAck },
4470 [P_DISCARD_ACK] = { sizeof(struct p_block_ack), got_BlockAck },
4471 [P_NEG_ACK] = { sizeof(struct p_block_ack), got_NegAck },
4472 [P_NEG_DREPLY] = { sizeof(struct p_block_ack), got_NegDReply },
4473 [P_NEG_RS_DREPLY] = { sizeof(struct p_block_ack), got_NegRSDReply},
4474 [P_OV_RESULT] = { sizeof(struct p_block_ack), got_OVResult },
4475 [P_BARRIER_ACK] = { sizeof(struct p_barrier_ack), got_BarrierAck },
4476 [P_STATE_CHG_REPLY] = { sizeof(struct p_req_state_reply), got_RqSReply },
4477 [P_RS_IS_IN_SYNC] = { sizeof(struct p_block_ack), got_IsInSync },
Philipp Reisner02918be2010-08-20 14:35:10 +02004478 [P_DELAY_PROBE] = { sizeof(struct p_delay_probe93), got_skip },
Philipp Reisnerd612d302010-12-27 10:53:28 +01004479 [P_RS_CANCEL] = { sizeof(struct p_block_ack), got_NegRSDReply},
Philipp Reisnerb411b362009-09-25 16:07:19 -07004480 [P_MAX_CMD] = { 0, NULL },
4481 };
4482 if (cmd > P_MAX_CMD || asender_tbl[cmd].process == NULL)
4483 return NULL;
4484 return &asender_tbl[cmd];
4485}
4486
4487int drbd_asender(struct drbd_thread *thi)
4488{
4489 struct drbd_conf *mdev = thi->mdev;
Philipp Reisner02918be2010-08-20 14:35:10 +02004490 struct p_header80 *h = &mdev->meta.rbuf.header.h80;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004491 struct asender_cmd *cmd = NULL;
4492
4493 int rv, len;
4494 void *buf = h;
4495 int received = 0;
Philipp Reisner0b70a132010-08-20 13:36:10 +02004496 int expect = sizeof(struct p_header80);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004497 int empty;
4498
4499 sprintf(current->comm, "drbd%d_asender", mdev_to_minor(mdev));
4500
4501 current->policy = SCHED_RR; /* Make this a realtime task! */
4502 current->rt_priority = 2; /* more important than all other tasks */
4503
4504 while (get_t_state(thi) == Running) {
4505 drbd_thread_current_set_cpu(mdev);
4506 if (test_and_clear_bit(SEND_PING, &mdev->flags)) {
4507 ERR_IF(!drbd_send_ping(mdev)) goto reconnect;
4508 mdev->meta.socket->sk->sk_rcvtimeo =
4509 mdev->net_conf->ping_timeo*HZ/10;
4510 }
4511
4512 /* conditionally cork;
4513 * it may hurt latency if we cork without much to send */
4514 if (!mdev->net_conf->no_cork &&
4515 3 < atomic_read(&mdev->unacked_cnt))
4516 drbd_tcp_cork(mdev->meta.socket);
4517 while (1) {
4518 clear_bit(SIGNAL_ASENDER, &mdev->flags);
4519 flush_signals(current);
Lars Ellenberg0f8488e2010-10-13 18:19:23 +02004520 if (!drbd_process_done_ee(mdev))
Philipp Reisnerb411b362009-09-25 16:07:19 -07004521 goto reconnect;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004522 /* to avoid race with newly queued ACKs */
4523 set_bit(SIGNAL_ASENDER, &mdev->flags);
4524 spin_lock_irq(&mdev->req_lock);
4525 empty = list_empty(&mdev->done_ee);
4526 spin_unlock_irq(&mdev->req_lock);
4527 /* new ack may have been queued right here,
4528 * but then there is also a signal pending,
4529 * and we start over... */
4530 if (empty)
4531 break;
4532 }
4533 /* but unconditionally uncork unless disabled */
4534 if (!mdev->net_conf->no_cork)
4535 drbd_tcp_uncork(mdev->meta.socket);
4536
4537 /* short circuit, recv_msg would return EINTR anyways. */
4538 if (signal_pending(current))
4539 continue;
4540
4541 rv = drbd_recv_short(mdev, mdev->meta.socket,
4542 buf, expect-received, 0);
4543 clear_bit(SIGNAL_ASENDER, &mdev->flags);
4544
4545 flush_signals(current);
4546
4547 /* Note:
4548 * -EINTR (on meta) we got a signal
4549 * -EAGAIN (on meta) rcvtimeo expired
4550 * -ECONNRESET other side closed the connection
4551 * -ERESTARTSYS (on data) we got a signal
4552 * rv < 0 other than above: unexpected error!
4553 * rv == expected: full header or command
4554 * rv < expected: "woken" by signal during receive
4555 * rv == 0 : "connection shut down by peer"
4556 */
4557 if (likely(rv > 0)) {
4558 received += rv;
4559 buf += rv;
4560 } else if (rv == 0) {
4561 dev_err(DEV, "meta connection shut down by peer.\n");
4562 goto reconnect;
4563 } else if (rv == -EAGAIN) {
4564 if (mdev->meta.socket->sk->sk_rcvtimeo ==
4565 mdev->net_conf->ping_timeo*HZ/10) {
4566 dev_err(DEV, "PingAck did not arrive in time.\n");
4567 goto reconnect;
4568 }
4569 set_bit(SEND_PING, &mdev->flags);
4570 continue;
4571 } else if (rv == -EINTR) {
4572 continue;
4573 } else {
4574 dev_err(DEV, "sock_recvmsg returned %d\n", rv);
4575 goto reconnect;
4576 }
4577
4578 if (received == expect && cmd == NULL) {
4579 if (unlikely(h->magic != BE_DRBD_MAGIC)) {
Lars Ellenberg004352f2010-10-05 20:13:58 +02004580 dev_err(DEV, "magic?? on meta m: 0x%08x c: %d l: %d\n",
4581 be32_to_cpu(h->magic),
4582 be16_to_cpu(h->command),
4583 be16_to_cpu(h->length));
Philipp Reisnerb411b362009-09-25 16:07:19 -07004584 goto reconnect;
4585 }
4586 cmd = get_asender_cmd(be16_to_cpu(h->command));
4587 len = be16_to_cpu(h->length);
4588 if (unlikely(cmd == NULL)) {
Lars Ellenberg004352f2010-10-05 20:13:58 +02004589 dev_err(DEV, "unknown command?? on meta m: 0x%08x c: %d l: %d\n",
4590 be32_to_cpu(h->magic),
4591 be16_to_cpu(h->command),
4592 be16_to_cpu(h->length));
Philipp Reisnerb411b362009-09-25 16:07:19 -07004593 goto disconnect;
4594 }
4595 expect = cmd->pkt_size;
Philipp Reisner0b70a132010-08-20 13:36:10 +02004596 ERR_IF(len != expect-sizeof(struct p_header80))
Philipp Reisnerb411b362009-09-25 16:07:19 -07004597 goto reconnect;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004598 }
4599 if (received == expect) {
4600 D_ASSERT(cmd != NULL);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004601 if (!cmd->process(mdev, h))
4602 goto reconnect;
4603
4604 buf = h;
4605 received = 0;
Philipp Reisner0b70a132010-08-20 13:36:10 +02004606 expect = sizeof(struct p_header80);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004607 cmd = NULL;
4608 }
4609 }
4610
4611 if (0) {
4612reconnect:
4613 drbd_force_state(mdev, NS(conn, C_NETWORK_FAILURE));
Lars Ellenberg856c50c2010-10-14 13:37:40 +02004614 drbd_md_sync(mdev);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004615 }
4616 if (0) {
4617disconnect:
4618 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
Lars Ellenberg856c50c2010-10-14 13:37:40 +02004619 drbd_md_sync(mdev);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004620 }
4621 clear_bit(SIGNAL_ASENDER, &mdev->flags);
4622
4623 D_ASSERT(mdev->state.conn < C_CONNECTED);
4624 dev_info(DEV, "asender terminated\n");
4625
4626 return 0;
4627}