blob: d9f3f7fd9bb21436f78853f3fc0c70fe4395b863 [file] [log] [blame]
Philipp Reisnerb411b362009-09-25 16:07:19 -07001/*
2 drbd_receiver.c
3
4 This file is part of DRBD by Philipp Reisner and Lars Ellenberg.
5
6 Copyright (C) 2001-2008, LINBIT Information Technologies GmbH.
7 Copyright (C) 1999-2008, Philipp Reisner <philipp.reisner@linbit.com>.
8 Copyright (C) 2002-2008, Lars Ellenberg <lars.ellenberg@linbit.com>.
9
10 drbd is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation; either version 2, or (at your option)
13 any later version.
14
15 drbd is distributed in the hope that it will be useful,
16 but WITHOUT ANY WARRANTY; without even the implied warranty of
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 GNU General Public License for more details.
19
20 You should have received a copy of the GNU General Public License
21 along with drbd; see the file COPYING. If not, write to
22 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
23 */
24
25
Philipp Reisnerb411b362009-09-25 16:07:19 -070026#include <linux/module.h>
27
28#include <asm/uaccess.h>
29#include <net/sock.h>
30
Philipp Reisnerb411b362009-09-25 16:07:19 -070031#include <linux/drbd.h>
32#include <linux/fs.h>
33#include <linux/file.h>
34#include <linux/in.h>
35#include <linux/mm.h>
36#include <linux/memcontrol.h>
37#include <linux/mm_inline.h>
38#include <linux/slab.h>
Philipp Reisnerb411b362009-09-25 16:07:19 -070039#include <linux/pkt_sched.h>
40#define __KERNEL_SYSCALLS__
41#include <linux/unistd.h>
42#include <linux/vmalloc.h>
43#include <linux/random.h>
Philipp Reisnerb411b362009-09-25 16:07:19 -070044#include <linux/string.h>
45#include <linux/scatterlist.h>
46#include "drbd_int.h"
Philipp Reisnerb411b362009-09-25 16:07:19 -070047#include "drbd_req.h"
48
49#include "drbd_vli.h"
50
Philipp Reisnerb411b362009-09-25 16:07:19 -070051enum finish_epoch {
52 FE_STILL_LIVE,
53 FE_DESTROYED,
54 FE_RECYCLED,
55};
56
57static int drbd_do_handshake(struct drbd_conf *mdev);
58static int drbd_do_auth(struct drbd_conf *mdev);
59
60static enum finish_epoch drbd_may_finish_epoch(struct drbd_conf *, struct drbd_epoch *, enum epoch_event);
61static int e_end_block(struct drbd_conf *, struct drbd_work *, int);
62
Philipp Reisnerb411b362009-09-25 16:07:19 -070063
64#define GFP_TRY (__GFP_HIGHMEM | __GFP_NOWARN)
65
Lars Ellenberg45bb9122010-05-14 17:10:48 +020066/*
67 * some helper functions to deal with single linked page lists,
68 * page->private being our "next" pointer.
69 */
70
71/* If at least n pages are linked at head, get n pages off.
72 * Otherwise, don't modify head, and return NULL.
73 * Locking is the responsibility of the caller.
74 */
75static struct page *page_chain_del(struct page **head, int n)
76{
77 struct page *page;
78 struct page *tmp;
79
80 BUG_ON(!n);
81 BUG_ON(!head);
82
83 page = *head;
Philipp Reisner23ce4222010-05-20 13:35:31 +020084
85 if (!page)
86 return NULL;
87
Lars Ellenberg45bb9122010-05-14 17:10:48 +020088 while (page) {
89 tmp = page_chain_next(page);
90 if (--n == 0)
91 break; /* found sufficient pages */
92 if (tmp == NULL)
93 /* insufficient pages, don't use any of them. */
94 return NULL;
95 page = tmp;
96 }
97
98 /* add end of list marker for the returned list */
99 set_page_private(page, 0);
100 /* actual return value, and adjustment of head */
101 page = *head;
102 *head = tmp;
103 return page;
104}
105
106/* may be used outside of locks to find the tail of a (usually short)
107 * "private" page chain, before adding it back to a global chain head
108 * with page_chain_add() under a spinlock. */
109static struct page *page_chain_tail(struct page *page, int *len)
110{
111 struct page *tmp;
112 int i = 1;
113 while ((tmp = page_chain_next(page)))
114 ++i, page = tmp;
115 if (len)
116 *len = i;
117 return page;
118}
119
120static int page_chain_free(struct page *page)
121{
122 struct page *tmp;
123 int i = 0;
124 page_chain_for_each_safe(page, tmp) {
125 put_page(page);
126 ++i;
127 }
128 return i;
129}
130
131static void page_chain_add(struct page **head,
132 struct page *chain_first, struct page *chain_last)
133{
134#if 1
135 struct page *tmp;
136 tmp = page_chain_tail(chain_first, NULL);
137 BUG_ON(tmp != chain_last);
138#endif
139
140 /* add chain to head */
141 set_page_private(chain_last, (unsigned long)*head);
142 *head = chain_first;
143}
144
145static struct page *drbd_pp_first_pages_or_try_alloc(struct drbd_conf *mdev, int number)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700146{
147 struct page *page = NULL;
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200148 struct page *tmp = NULL;
149 int i = 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700150
151 /* Yes, testing drbd_pp_vacant outside the lock is racy.
152 * So what. It saves a spin_lock. */
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200153 if (drbd_pp_vacant >= number) {
Philipp Reisnerb411b362009-09-25 16:07:19 -0700154 spin_lock(&drbd_pp_lock);
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200155 page = page_chain_del(&drbd_pp_pool, number);
156 if (page)
157 drbd_pp_vacant -= number;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700158 spin_unlock(&drbd_pp_lock);
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200159 if (page)
160 return page;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700161 }
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200162
Philipp Reisnerb411b362009-09-25 16:07:19 -0700163 /* GFP_TRY, because we must not cause arbitrary write-out: in a DRBD
164 * "criss-cross" setup, that might cause write-out on some other DRBD,
165 * which in turn might block on the other node at this very place. */
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200166 for (i = 0; i < number; i++) {
167 tmp = alloc_page(GFP_TRY);
168 if (!tmp)
169 break;
170 set_page_private(tmp, (unsigned long)page);
171 page = tmp;
172 }
173
174 if (i == number)
175 return page;
176
177 /* Not enough pages immediately available this time.
178 * No need to jump around here, drbd_pp_alloc will retry this
179 * function "soon". */
180 if (page) {
181 tmp = page_chain_tail(page, NULL);
182 spin_lock(&drbd_pp_lock);
183 page_chain_add(&drbd_pp_pool, page, tmp);
184 drbd_pp_vacant += i;
185 spin_unlock(&drbd_pp_lock);
186 }
187 return NULL;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700188}
189
Philipp Reisnerb411b362009-09-25 16:07:19 -0700190static void reclaim_net_ee(struct drbd_conf *mdev, struct list_head *to_be_freed)
191{
192 struct drbd_epoch_entry *e;
193 struct list_head *le, *tle;
194
195 /* The EEs are always appended to the end of the list. Since
196 they are sent in order over the wire, they have to finish
197 in order. As soon as we see the first not finished we can
198 stop to examine the list... */
199
200 list_for_each_safe(le, tle, &mdev->net_ee) {
201 e = list_entry(le, struct drbd_epoch_entry, w.list);
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200202 if (drbd_ee_has_active_page(e))
Philipp Reisnerb411b362009-09-25 16:07:19 -0700203 break;
204 list_move(le, to_be_freed);
205 }
206}
207
208static void drbd_kick_lo_and_reclaim_net(struct drbd_conf *mdev)
209{
210 LIST_HEAD(reclaimed);
211 struct drbd_epoch_entry *e, *t;
212
Philipp Reisner87eeee42011-01-19 14:16:30 +0100213 spin_lock_irq(&mdev->tconn->req_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700214 reclaim_net_ee(mdev, &reclaimed);
Philipp Reisner87eeee42011-01-19 14:16:30 +0100215 spin_unlock_irq(&mdev->tconn->req_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700216
217 list_for_each_entry_safe(e, t, &reclaimed, w.list)
Lars Ellenberg435f0742010-09-06 12:30:25 +0200218 drbd_free_net_ee(mdev, e);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700219}
220
221/**
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200222 * drbd_pp_alloc() - Returns @number pages, retries forever (or until signalled)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700223 * @mdev: DRBD device.
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200224 * @number: number of pages requested
225 * @retry: whether to retry, if not enough pages are available right now
Philipp Reisnerb411b362009-09-25 16:07:19 -0700226 *
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200227 * Tries to allocate number pages, first from our own page pool, then from
228 * the kernel, unless this allocation would exceed the max_buffers setting.
229 * Possibly retry until DRBD frees sufficient pages somewhere else.
230 *
231 * Returns a page chain linked via page->private.
Philipp Reisnerb411b362009-09-25 16:07:19 -0700232 */
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200233static struct page *drbd_pp_alloc(struct drbd_conf *mdev, unsigned number, bool retry)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700234{
235 struct page *page = NULL;
236 DEFINE_WAIT(wait);
237
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200238 /* Yes, we may run up to @number over max_buffers. If we
239 * follow it strictly, the admin will get it wrong anyways. */
Philipp Reisner89e58e72011-01-19 13:12:45 +0100240 if (atomic_read(&mdev->pp_in_use) < mdev->tconn->net_conf->max_buffers)
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200241 page = drbd_pp_first_pages_or_try_alloc(mdev, number);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700242
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200243 while (page == NULL) {
Philipp Reisnerb411b362009-09-25 16:07:19 -0700244 prepare_to_wait(&drbd_pp_wait, &wait, TASK_INTERRUPTIBLE);
245
246 drbd_kick_lo_and_reclaim_net(mdev);
247
Philipp Reisner89e58e72011-01-19 13:12:45 +0100248 if (atomic_read(&mdev->pp_in_use) < mdev->tconn->net_conf->max_buffers) {
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200249 page = drbd_pp_first_pages_or_try_alloc(mdev, number);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700250 if (page)
251 break;
252 }
253
254 if (!retry)
255 break;
256
257 if (signal_pending(current)) {
258 dev_warn(DEV, "drbd_pp_alloc interrupted!\n");
259 break;
260 }
261
262 schedule();
263 }
264 finish_wait(&drbd_pp_wait, &wait);
265
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200266 if (page)
267 atomic_add(number, &mdev->pp_in_use);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700268 return page;
269}
270
271/* Must not be used from irq, as that may deadlock: see drbd_pp_alloc.
Philipp Reisner87eeee42011-01-19 14:16:30 +0100272 * Is also used from inside an other spin_lock_irq(&mdev->tconn->req_lock);
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200273 * Either links the page chain back to the global pool,
274 * or returns all pages to the system. */
Lars Ellenberg435f0742010-09-06 12:30:25 +0200275static void drbd_pp_free(struct drbd_conf *mdev, struct page *page, int is_net)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700276{
Lars Ellenberg435f0742010-09-06 12:30:25 +0200277 atomic_t *a = is_net ? &mdev->pp_in_use_by_net : &mdev->pp_in_use;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700278 int i;
Lars Ellenberg435f0742010-09-06 12:30:25 +0200279
Lars Ellenberg1816a2b2010-11-11 15:19:07 +0100280 if (drbd_pp_vacant > (DRBD_MAX_BIO_SIZE/PAGE_SIZE)*minor_count)
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200281 i = page_chain_free(page);
282 else {
283 struct page *tmp;
284 tmp = page_chain_tail(page, &i);
285 spin_lock(&drbd_pp_lock);
286 page_chain_add(&drbd_pp_pool, page, tmp);
287 drbd_pp_vacant += i;
288 spin_unlock(&drbd_pp_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700289 }
Lars Ellenberg435f0742010-09-06 12:30:25 +0200290 i = atomic_sub_return(i, a);
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200291 if (i < 0)
Lars Ellenberg435f0742010-09-06 12:30:25 +0200292 dev_warn(DEV, "ASSERTION FAILED: %s: %d < 0\n",
293 is_net ? "pp_in_use_by_net" : "pp_in_use", i);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700294 wake_up(&drbd_pp_wait);
295}
296
297/*
298You need to hold the req_lock:
299 _drbd_wait_ee_list_empty()
300
301You must not have the req_lock:
302 drbd_free_ee()
303 drbd_alloc_ee()
304 drbd_init_ee()
305 drbd_release_ee()
306 drbd_ee_fix_bhs()
307 drbd_process_done_ee()
308 drbd_clear_done_ee()
309 drbd_wait_ee_list_empty()
310*/
311
312struct drbd_epoch_entry *drbd_alloc_ee(struct drbd_conf *mdev,
313 u64 id,
314 sector_t sector,
315 unsigned int data_size,
316 gfp_t gfp_mask) __must_hold(local)
317{
Philipp Reisnerb411b362009-09-25 16:07:19 -0700318 struct drbd_epoch_entry *e;
319 struct page *page;
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200320 unsigned nr_pages = (data_size + PAGE_SIZE -1) >> PAGE_SHIFT;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700321
Andreas Gruenbacher0cf9d272010-12-07 10:43:29 +0100322 if (drbd_insert_fault(mdev, DRBD_FAULT_AL_EE))
Philipp Reisnerb411b362009-09-25 16:07:19 -0700323 return NULL;
324
325 e = mempool_alloc(drbd_ee_mempool, gfp_mask & ~__GFP_HIGHMEM);
326 if (!e) {
327 if (!(gfp_mask & __GFP_NOWARN))
328 dev_err(DEV, "alloc_ee: Allocation of an EE failed\n");
329 return NULL;
330 }
331
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200332 page = drbd_pp_alloc(mdev, nr_pages, (gfp_mask & __GFP_WAIT));
333 if (!page)
334 goto fail;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700335
Andreas Gruenbacher8b946252011-01-20 15:23:07 +0100336 drbd_clear_interval(&e->i);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700337 e->epoch = NULL;
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200338 e->mdev = mdev;
339 e->pages = page;
340 atomic_set(&e->pending_bios, 0);
Andreas Gruenbacher010f6e62011-01-14 20:59:35 +0100341 e->i.size = data_size;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700342 e->flags = 0;
Andreas Gruenbacher010f6e62011-01-14 20:59:35 +0100343 e->i.sector = sector;
Andreas Gruenbacher9a8e7752011-01-11 14:04:09 +0100344 /*
345 * The block_id is opaque to the receiver. It is not endianness
346 * converted, and sent back to the sender unchanged.
347 */
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200348 e->block_id = id;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700349
Philipp Reisnerb411b362009-09-25 16:07:19 -0700350 return e;
351
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200352 fail:
Philipp Reisnerb411b362009-09-25 16:07:19 -0700353 mempool_free(e, drbd_ee_mempool);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700354 return NULL;
355}
356
Lars Ellenberg435f0742010-09-06 12:30:25 +0200357void drbd_free_some_ee(struct drbd_conf *mdev, struct drbd_epoch_entry *e, int is_net)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700358{
Lars Ellenbergc36c3ce2010-08-11 20:42:55 +0200359 if (e->flags & EE_HAS_DIGEST)
360 kfree(e->digest);
Lars Ellenberg435f0742010-09-06 12:30:25 +0200361 drbd_pp_free(mdev, e->pages, is_net);
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200362 D_ASSERT(atomic_read(&e->pending_bios) == 0);
Andreas Gruenbacher8b946252011-01-20 15:23:07 +0100363 D_ASSERT(drbd_interval_empty(&e->i));
Philipp Reisnerb411b362009-09-25 16:07:19 -0700364 mempool_free(e, drbd_ee_mempool);
365}
366
367int drbd_release_ee(struct drbd_conf *mdev, struct list_head *list)
368{
369 LIST_HEAD(work_list);
370 struct drbd_epoch_entry *e, *t;
371 int count = 0;
Lars Ellenberg435f0742010-09-06 12:30:25 +0200372 int is_net = list == &mdev->net_ee;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700373
Philipp Reisner87eeee42011-01-19 14:16:30 +0100374 spin_lock_irq(&mdev->tconn->req_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700375 list_splice_init(list, &work_list);
Philipp Reisner87eeee42011-01-19 14:16:30 +0100376 spin_unlock_irq(&mdev->tconn->req_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700377
378 list_for_each_entry_safe(e, t, &work_list, w.list) {
Lars Ellenberg435f0742010-09-06 12:30:25 +0200379 drbd_free_some_ee(mdev, e, is_net);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700380 count++;
381 }
382 return count;
383}
384
385
386/*
387 * This function is called from _asender only_
Andreas Gruenbacher8554df12011-01-25 15:37:43 +0100388 * but see also comments in _req_mod(,BARRIER_ACKED)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700389 * and receive_Barrier.
390 *
391 * Move entries from net_ee to done_ee, if ready.
392 * Grab done_ee, call all callbacks, free the entries.
393 * The callbacks typically send out ACKs.
394 */
395static int drbd_process_done_ee(struct drbd_conf *mdev)
396{
397 LIST_HEAD(work_list);
398 LIST_HEAD(reclaimed);
399 struct drbd_epoch_entry *e, *t;
400 int ok = (mdev->state.conn >= C_WF_REPORT_PARAMS);
401
Philipp Reisner87eeee42011-01-19 14:16:30 +0100402 spin_lock_irq(&mdev->tconn->req_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700403 reclaim_net_ee(mdev, &reclaimed);
404 list_splice_init(&mdev->done_ee, &work_list);
Philipp Reisner87eeee42011-01-19 14:16:30 +0100405 spin_unlock_irq(&mdev->tconn->req_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700406
407 list_for_each_entry_safe(e, t, &reclaimed, w.list)
Lars Ellenberg435f0742010-09-06 12:30:25 +0200408 drbd_free_net_ee(mdev, e);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700409
410 /* possible callbacks here:
411 * e_end_block, and e_end_resync_block, e_send_discard_ack.
412 * all ignore the last argument.
413 */
414 list_for_each_entry_safe(e, t, &work_list, w.list) {
Philipp Reisnerb411b362009-09-25 16:07:19 -0700415 /* list_del not necessary, next/prev members not touched */
416 ok = e->w.cb(mdev, &e->w, !ok) && ok;
417 drbd_free_ee(mdev, e);
418 }
419 wake_up(&mdev->ee_wait);
420
421 return ok;
422}
423
424void _drbd_wait_ee_list_empty(struct drbd_conf *mdev, struct list_head *head)
425{
426 DEFINE_WAIT(wait);
427
428 /* avoids spin_lock/unlock
429 * and calling prepare_to_wait in the fast path */
430 while (!list_empty(head)) {
431 prepare_to_wait(&mdev->ee_wait, &wait, TASK_UNINTERRUPTIBLE);
Philipp Reisner87eeee42011-01-19 14:16:30 +0100432 spin_unlock_irq(&mdev->tconn->req_lock);
Jens Axboe7eaceac2011-03-10 08:52:07 +0100433 io_schedule();
Philipp Reisnerb411b362009-09-25 16:07:19 -0700434 finish_wait(&mdev->ee_wait, &wait);
Philipp Reisner87eeee42011-01-19 14:16:30 +0100435 spin_lock_irq(&mdev->tconn->req_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700436 }
437}
438
439void drbd_wait_ee_list_empty(struct drbd_conf *mdev, struct list_head *head)
440{
Philipp Reisner87eeee42011-01-19 14:16:30 +0100441 spin_lock_irq(&mdev->tconn->req_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700442 _drbd_wait_ee_list_empty(mdev, head);
Philipp Reisner87eeee42011-01-19 14:16:30 +0100443 spin_unlock_irq(&mdev->tconn->req_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700444}
445
446/* see also kernel_accept; which is only present since 2.6.18.
447 * also we want to log which part of it failed, exactly */
448static int drbd_accept(struct drbd_conf *mdev, const char **what,
449 struct socket *sock, struct socket **newsock)
450{
451 struct sock *sk = sock->sk;
452 int err = 0;
453
454 *what = "listen";
455 err = sock->ops->listen(sock, 5);
456 if (err < 0)
457 goto out;
458
459 *what = "sock_create_lite";
460 err = sock_create_lite(sk->sk_family, sk->sk_type, sk->sk_protocol,
461 newsock);
462 if (err < 0)
463 goto out;
464
465 *what = "accept";
466 err = sock->ops->accept(sock, *newsock, 0);
467 if (err < 0) {
468 sock_release(*newsock);
469 *newsock = NULL;
470 goto out;
471 }
472 (*newsock)->ops = sock->ops;
473
474out:
475 return err;
476}
477
478static int drbd_recv_short(struct drbd_conf *mdev, struct socket *sock,
479 void *buf, size_t size, int flags)
480{
481 mm_segment_t oldfs;
482 struct kvec iov = {
483 .iov_base = buf,
484 .iov_len = size,
485 };
486 struct msghdr msg = {
487 .msg_iovlen = 1,
488 .msg_iov = (struct iovec *)&iov,
489 .msg_flags = (flags ? flags : MSG_WAITALL | MSG_NOSIGNAL)
490 };
491 int rv;
492
493 oldfs = get_fs();
494 set_fs(KERNEL_DS);
495 rv = sock_recvmsg(sock, &msg, size, msg.msg_flags);
496 set_fs(oldfs);
497
498 return rv;
499}
500
501static int drbd_recv(struct drbd_conf *mdev, void *buf, size_t size)
502{
503 mm_segment_t oldfs;
504 struct kvec iov = {
505 .iov_base = buf,
506 .iov_len = size,
507 };
508 struct msghdr msg = {
509 .msg_iovlen = 1,
510 .msg_iov = (struct iovec *)&iov,
511 .msg_flags = MSG_WAITALL | MSG_NOSIGNAL
512 };
513 int rv;
514
515 oldfs = get_fs();
516 set_fs(KERNEL_DS);
517
518 for (;;) {
Philipp Reisnere42325a2011-01-19 13:55:45 +0100519 rv = sock_recvmsg(mdev->tconn->data.socket, &msg, size, msg.msg_flags);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700520 if (rv == size)
521 break;
522
523 /* Note:
524 * ECONNRESET other side closed the connection
525 * ERESTARTSYS (on sock) we got a signal
526 */
527
528 if (rv < 0) {
529 if (rv == -ECONNRESET)
530 dev_info(DEV, "sock was reset by peer\n");
531 else if (rv != -ERESTARTSYS)
532 dev_err(DEV, "sock_recvmsg returned %d\n", rv);
533 break;
534 } else if (rv == 0) {
535 dev_info(DEV, "sock was shut down by peer\n");
536 break;
537 } else {
538 /* signal came in, or peer/link went down,
539 * after we read a partial message
540 */
541 /* D_ASSERT(signal_pending(current)); */
542 break;
543 }
544 };
545
546 set_fs(oldfs);
547
548 if (rv != size)
549 drbd_force_state(mdev, NS(conn, C_BROKEN_PIPE));
550
551 return rv;
552}
553
Lars Ellenberg5dbf1672010-05-25 16:18:01 +0200554/* quoting tcp(7):
555 * On individual connections, the socket buffer size must be set prior to the
556 * listen(2) or connect(2) calls in order to have it take effect.
557 * This is our wrapper to do so.
558 */
559static void drbd_setbufsize(struct socket *sock, unsigned int snd,
560 unsigned int rcv)
561{
562 /* open coded SO_SNDBUF, SO_RCVBUF */
563 if (snd) {
564 sock->sk->sk_sndbuf = snd;
565 sock->sk->sk_userlocks |= SOCK_SNDBUF_LOCK;
566 }
567 if (rcv) {
568 sock->sk->sk_rcvbuf = rcv;
569 sock->sk->sk_userlocks |= SOCK_RCVBUF_LOCK;
570 }
571}
572
Philipp Reisnerb411b362009-09-25 16:07:19 -0700573static struct socket *drbd_try_connect(struct drbd_conf *mdev)
574{
575 const char *what;
576 struct socket *sock;
577 struct sockaddr_in6 src_in6;
578 int err;
579 int disconnect_on_error = 1;
580
Philipp Reisnerb2fb6dbe2011-01-19 13:48:44 +0100581 if (!get_net_conf(mdev->tconn))
Philipp Reisnerb411b362009-09-25 16:07:19 -0700582 return NULL;
583
584 what = "sock_create_kern";
Philipp Reisner89e58e72011-01-19 13:12:45 +0100585 err = sock_create_kern(((struct sockaddr *)mdev->tconn->net_conf->my_addr)->sa_family,
Philipp Reisnerb411b362009-09-25 16:07:19 -0700586 SOCK_STREAM, IPPROTO_TCP, &sock);
587 if (err < 0) {
588 sock = NULL;
589 goto out;
590 }
591
592 sock->sk->sk_rcvtimeo =
Philipp Reisner89e58e72011-01-19 13:12:45 +0100593 sock->sk->sk_sndtimeo = mdev->tconn->net_conf->try_connect_int*HZ;
594 drbd_setbufsize(sock, mdev->tconn->net_conf->sndbuf_size,
595 mdev->tconn->net_conf->rcvbuf_size);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700596
597 /* explicitly bind to the configured IP as source IP
598 * for the outgoing connections.
599 * This is needed for multihomed hosts and to be
600 * able to use lo: interfaces for drbd.
601 * Make sure to use 0 as port number, so linux selects
602 * a free one dynamically.
603 */
Philipp Reisner89e58e72011-01-19 13:12:45 +0100604 memcpy(&src_in6, mdev->tconn->net_conf->my_addr,
605 min_t(int, mdev->tconn->net_conf->my_addr_len, sizeof(src_in6)));
606 if (((struct sockaddr *)mdev->tconn->net_conf->my_addr)->sa_family == AF_INET6)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700607 src_in6.sin6_port = 0;
608 else
609 ((struct sockaddr_in *)&src_in6)->sin_port = 0; /* AF_INET & AF_SCI */
610
611 what = "bind before connect";
612 err = sock->ops->bind(sock,
613 (struct sockaddr *) &src_in6,
Philipp Reisner89e58e72011-01-19 13:12:45 +0100614 mdev->tconn->net_conf->my_addr_len);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700615 if (err < 0)
616 goto out;
617
618 /* connect may fail, peer not yet available.
619 * stay C_WF_CONNECTION, don't go Disconnecting! */
620 disconnect_on_error = 0;
621 what = "connect";
622 err = sock->ops->connect(sock,
Philipp Reisner89e58e72011-01-19 13:12:45 +0100623 (struct sockaddr *)mdev->tconn->net_conf->peer_addr,
624 mdev->tconn->net_conf->peer_addr_len, 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700625
626out:
627 if (err < 0) {
628 if (sock) {
629 sock_release(sock);
630 sock = NULL;
631 }
632 switch (-err) {
633 /* timeout, busy, signal pending */
634 case ETIMEDOUT: case EAGAIN: case EINPROGRESS:
635 case EINTR: case ERESTARTSYS:
636 /* peer not (yet) available, network problem */
637 case ECONNREFUSED: case ENETUNREACH:
638 case EHOSTDOWN: case EHOSTUNREACH:
639 disconnect_on_error = 0;
640 break;
641 default:
642 dev_err(DEV, "%s failed, err = %d\n", what, err);
643 }
644 if (disconnect_on_error)
645 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
646 }
Philipp Reisnerb2fb6dbe2011-01-19 13:48:44 +0100647 put_net_conf(mdev->tconn);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700648 return sock;
649}
650
651static struct socket *drbd_wait_for_connect(struct drbd_conf *mdev)
652{
653 int timeo, err;
654 struct socket *s_estab = NULL, *s_listen;
655 const char *what;
656
Philipp Reisnerb2fb6dbe2011-01-19 13:48:44 +0100657 if (!get_net_conf(mdev->tconn))
Philipp Reisnerb411b362009-09-25 16:07:19 -0700658 return NULL;
659
660 what = "sock_create_kern";
Philipp Reisner89e58e72011-01-19 13:12:45 +0100661 err = sock_create_kern(((struct sockaddr *)mdev->tconn->net_conf->my_addr)->sa_family,
Philipp Reisnerb411b362009-09-25 16:07:19 -0700662 SOCK_STREAM, IPPROTO_TCP, &s_listen);
663 if (err) {
664 s_listen = NULL;
665 goto out;
666 }
667
Philipp Reisner89e58e72011-01-19 13:12:45 +0100668 timeo = mdev->tconn->net_conf->try_connect_int * HZ;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700669 timeo += (random32() & 1) ? timeo / 7 : -timeo / 7; /* 28.5% random jitter */
670
671 s_listen->sk->sk_reuse = 1; /* SO_REUSEADDR */
672 s_listen->sk->sk_rcvtimeo = timeo;
673 s_listen->sk->sk_sndtimeo = timeo;
Philipp Reisner89e58e72011-01-19 13:12:45 +0100674 drbd_setbufsize(s_listen, mdev->tconn->net_conf->sndbuf_size,
675 mdev->tconn->net_conf->rcvbuf_size);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700676
677 what = "bind before listen";
678 err = s_listen->ops->bind(s_listen,
Philipp Reisner89e58e72011-01-19 13:12:45 +0100679 (struct sockaddr *) mdev->tconn->net_conf->my_addr,
680 mdev->tconn->net_conf->my_addr_len);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700681 if (err < 0)
682 goto out;
683
684 err = drbd_accept(mdev, &what, s_listen, &s_estab);
685
686out:
687 if (s_listen)
688 sock_release(s_listen);
689 if (err < 0) {
690 if (err != -EAGAIN && err != -EINTR && err != -ERESTARTSYS) {
691 dev_err(DEV, "%s failed, err = %d\n", what, err);
692 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
693 }
694 }
Philipp Reisnerb2fb6dbe2011-01-19 13:48:44 +0100695 put_net_conf(mdev->tconn);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700696
697 return s_estab;
698}
699
Andreas Gruenbacherd8763022011-01-26 17:39:41 +0100700static int drbd_send_fp(struct drbd_conf *mdev, struct socket *sock,
701 enum drbd_packet cmd)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700702{
Philipp Reisnerc0129492011-01-19 16:58:16 +0100703 struct p_header *h = &mdev->tconn->data.sbuf.header;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700704
705 return _drbd_send_cmd(mdev, sock, cmd, h, sizeof(*h), 0);
706}
707
Andreas Gruenbacherd8763022011-01-26 17:39:41 +0100708static enum drbd_packet drbd_recv_fp(struct drbd_conf *mdev,
709 struct socket *sock)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700710{
Philipp Reisnere42325a2011-01-19 13:55:45 +0100711 struct p_header80 *h = &mdev->tconn->data.rbuf.header.h80;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700712 int rr;
713
714 rr = drbd_recv_short(mdev, sock, h, sizeof(*h), 0);
715
Andreas Gruenbacherca9bc122011-01-11 13:47:24 +0100716 if (rr == sizeof(*h) && h->magic == cpu_to_be32(DRBD_MAGIC))
Philipp Reisnerb411b362009-09-25 16:07:19 -0700717 return be16_to_cpu(h->command);
718
719 return 0xffff;
720}
721
722/**
723 * drbd_socket_okay() - Free the socket if its connection is not okay
724 * @mdev: DRBD device.
725 * @sock: pointer to the pointer to the socket.
726 */
727static int drbd_socket_okay(struct drbd_conf *mdev, struct socket **sock)
728{
729 int rr;
730 char tb[4];
731
732 if (!*sock)
Andreas Gruenbacher81e84652010-12-09 15:03:57 +0100733 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700734
735 rr = drbd_recv_short(mdev, *sock, tb, 4, MSG_DONTWAIT | MSG_PEEK);
736
737 if (rr > 0 || rr == -EAGAIN) {
Andreas Gruenbacher81e84652010-12-09 15:03:57 +0100738 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700739 } else {
740 sock_release(*sock);
741 *sock = NULL;
Andreas Gruenbacher81e84652010-12-09 15:03:57 +0100742 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700743 }
744}
745
746/*
747 * return values:
748 * 1 yes, we have a valid connection
749 * 0 oops, did not work out, please try again
750 * -1 peer talks different language,
751 * no point in trying again, please go standalone.
752 * -2 We do not have a network config...
753 */
754static int drbd_connect(struct drbd_conf *mdev)
755{
756 struct socket *s, *sock, *msock;
757 int try, h, ok;
758
Philipp Reisnere42325a2011-01-19 13:55:45 +0100759 D_ASSERT(!mdev->tconn->data.socket);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700760
Philipp Reisnerb411b362009-09-25 16:07:19 -0700761 if (drbd_request_state(mdev, NS(conn, C_WF_CONNECTION)) < SS_SUCCESS)
762 return -2;
763
764 clear_bit(DISCARD_CONCURRENT, &mdev->flags);
Philipp Reisnerfd340c12011-01-19 16:57:39 +0100765 mdev->tconn->agreed_pro_version = 99;
766 /* agreed_pro_version must be smaller than 100 so we send the old
767 header (h80) in the first packet and in the handshake packet. */
Philipp Reisnerb411b362009-09-25 16:07:19 -0700768
769 sock = NULL;
770 msock = NULL;
771
772 do {
773 for (try = 0;;) {
774 /* 3 tries, this should take less than a second! */
775 s = drbd_try_connect(mdev);
776 if (s || ++try >= 3)
777 break;
778 /* give the other side time to call bind() & listen() */
Philipp Reisner20ee6392011-01-18 15:28:59 +0100779 schedule_timeout_interruptible(HZ / 10);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700780 }
781
782 if (s) {
783 if (!sock) {
784 drbd_send_fp(mdev, s, P_HAND_SHAKE_S);
785 sock = s;
786 s = NULL;
787 } else if (!msock) {
788 drbd_send_fp(mdev, s, P_HAND_SHAKE_M);
789 msock = s;
790 s = NULL;
791 } else {
792 dev_err(DEV, "Logic error in drbd_connect()\n");
793 goto out_release_sockets;
794 }
795 }
796
797 if (sock && msock) {
Philipp Reisner89e58e72011-01-19 13:12:45 +0100798 schedule_timeout_interruptible(mdev->tconn->net_conf->ping_timeo*HZ/10);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700799 ok = drbd_socket_okay(mdev, &sock);
800 ok = drbd_socket_okay(mdev, &msock) && ok;
801 if (ok)
802 break;
803 }
804
805retry:
806 s = drbd_wait_for_connect(mdev);
807 if (s) {
808 try = drbd_recv_fp(mdev, s);
809 drbd_socket_okay(mdev, &sock);
810 drbd_socket_okay(mdev, &msock);
811 switch (try) {
812 case P_HAND_SHAKE_S:
813 if (sock) {
814 dev_warn(DEV, "initial packet S crossed\n");
815 sock_release(sock);
816 }
817 sock = s;
818 break;
819 case P_HAND_SHAKE_M:
820 if (msock) {
821 dev_warn(DEV, "initial packet M crossed\n");
822 sock_release(msock);
823 }
824 msock = s;
825 set_bit(DISCARD_CONCURRENT, &mdev->flags);
826 break;
827 default:
828 dev_warn(DEV, "Error receiving initial packet\n");
829 sock_release(s);
830 if (random32() & 1)
831 goto retry;
832 }
833 }
834
835 if (mdev->state.conn <= C_DISCONNECTING)
836 goto out_release_sockets;
837 if (signal_pending(current)) {
838 flush_signals(current);
839 smp_rmb();
Philipp Reisnere6b3ea82011-01-19 14:02:01 +0100840 if (get_t_state(&mdev->tconn->receiver) == EXITING)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700841 goto out_release_sockets;
842 }
843
844 if (sock && msock) {
845 ok = drbd_socket_okay(mdev, &sock);
846 ok = drbd_socket_okay(mdev, &msock) && ok;
847 if (ok)
848 break;
849 }
850 } while (1);
851
852 msock->sk->sk_reuse = 1; /* SO_REUSEADDR */
853 sock->sk->sk_reuse = 1; /* SO_REUSEADDR */
854
855 sock->sk->sk_allocation = GFP_NOIO;
856 msock->sk->sk_allocation = GFP_NOIO;
857
858 sock->sk->sk_priority = TC_PRIO_INTERACTIVE_BULK;
859 msock->sk->sk_priority = TC_PRIO_INTERACTIVE;
860
Philipp Reisnerb411b362009-09-25 16:07:19 -0700861 /* NOT YET ...
Philipp Reisner89e58e72011-01-19 13:12:45 +0100862 * sock->sk->sk_sndtimeo = mdev->tconn->net_conf->timeout*HZ/10;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700863 * sock->sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
864 * first set it to the P_HAND_SHAKE timeout,
865 * which we set to 4x the configured ping_timeout. */
866 sock->sk->sk_sndtimeo =
Philipp Reisner89e58e72011-01-19 13:12:45 +0100867 sock->sk->sk_rcvtimeo = mdev->tconn->net_conf->ping_timeo*4*HZ/10;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700868
Philipp Reisner89e58e72011-01-19 13:12:45 +0100869 msock->sk->sk_sndtimeo = mdev->tconn->net_conf->timeout*HZ/10;
870 msock->sk->sk_rcvtimeo = mdev->tconn->net_conf->ping_int*HZ;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700871
872 /* we don't want delays.
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300873 * we use TCP_CORK where appropriate, though */
Philipp Reisnerb411b362009-09-25 16:07:19 -0700874 drbd_tcp_nodelay(sock);
875 drbd_tcp_nodelay(msock);
876
Philipp Reisnere42325a2011-01-19 13:55:45 +0100877 mdev->tconn->data.socket = sock;
878 mdev->tconn->meta.socket = msock;
Philipp Reisner31890f42011-01-19 14:12:51 +0100879 mdev->tconn->last_received = jiffies;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700880
Philipp Reisnere6b3ea82011-01-19 14:02:01 +0100881 D_ASSERT(mdev->tconn->asender.task == NULL);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700882
883 h = drbd_do_handshake(mdev);
884 if (h <= 0)
885 return h;
886
Philipp Reisnera0638452011-01-19 14:31:32 +0100887 if (mdev->tconn->cram_hmac_tfm) {
Philipp Reisnerb411b362009-09-25 16:07:19 -0700888 /* drbd_request_state(mdev, NS(conn, WFAuth)); */
Johannes Thomab10d96c2010-01-07 16:02:50 +0100889 switch (drbd_do_auth(mdev)) {
890 case -1:
Philipp Reisnerb411b362009-09-25 16:07:19 -0700891 dev_err(DEV, "Authentication of peer failed\n");
892 return -1;
Johannes Thomab10d96c2010-01-07 16:02:50 +0100893 case 0:
894 dev_err(DEV, "Authentication of peer failed, trying again.\n");
895 return 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700896 }
897 }
898
899 if (drbd_request_state(mdev, NS(conn, C_WF_REPORT_PARAMS)) < SS_SUCCESS)
900 return 0;
901
Philipp Reisner89e58e72011-01-19 13:12:45 +0100902 sock->sk->sk_sndtimeo = mdev->tconn->net_conf->timeout*HZ/10;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700903 sock->sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
904
905 atomic_set(&mdev->packet_seq, 0);
906 mdev->peer_seq = 0;
907
Philipp Reisnere6b3ea82011-01-19 14:02:01 +0100908 drbd_thread_start(&mdev->tconn->asender);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700909
Philipp Reisner148efa12011-01-15 00:21:15 +0100910 if (drbd_send_protocol(mdev) == -1)
Philipp Reisner7e2455c2010-04-22 14:50:23 +0200911 return -1;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700912 drbd_send_sync_param(mdev, &mdev->sync_conf);
Philipp Reisnere89b5912010-03-24 17:11:33 +0100913 drbd_send_sizes(mdev, 0, 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700914 drbd_send_uuids(mdev);
915 drbd_send_state(mdev);
916 clear_bit(USE_DEGR_WFC_T, &mdev->flags);
917 clear_bit(RESIZE_PENDING, &mdev->flags);
Philipp Reisner7fde2be2011-03-01 11:08:28 +0100918 mod_timer(&mdev->request_timer, jiffies + HZ); /* just start it here. */
Philipp Reisnerb411b362009-09-25 16:07:19 -0700919
920 return 1;
921
922out_release_sockets:
923 if (sock)
924 sock_release(sock);
925 if (msock)
926 sock_release(msock);
927 return -1;
928}
929
Andreas Gruenbacherd8763022011-01-26 17:39:41 +0100930static bool decode_header(struct drbd_conf *mdev, struct p_header *h,
931 enum drbd_packet *cmd, unsigned int *packet_size)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700932{
Philipp Reisnerfd340c12011-01-19 16:57:39 +0100933 if (h->h80.magic == cpu_to_be32(DRBD_MAGIC)) {
Philipp Reisner02918be2010-08-20 14:35:10 +0200934 *cmd = be16_to_cpu(h->h80.command);
935 *packet_size = be16_to_cpu(h->h80.length);
Andreas Gruenbacherca9bc122011-01-11 13:47:24 +0100936 } else if (h->h95.magic == cpu_to_be16(DRBD_MAGIC_BIG)) {
Philipp Reisner02918be2010-08-20 14:35:10 +0200937 *cmd = be16_to_cpu(h->h95.command);
Philipp Reisnerfd340c12011-01-19 16:57:39 +0100938 *packet_size = be32_to_cpu(h->h95.length) & 0x00ffffff;
Philipp Reisner02918be2010-08-20 14:35:10 +0200939 } else {
Lars Ellenberg004352f2010-10-05 20:13:58 +0200940 dev_err(DEV, "magic?? on data m: 0x%08x c: %d l: %d\n",
941 be32_to_cpu(h->h80.magic),
942 be16_to_cpu(h->h80.command),
943 be16_to_cpu(h->h80.length));
Andreas Gruenbacher81e84652010-12-09 15:03:57 +0100944 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700945 }
Philipp Reisner257d0af2011-01-26 12:15:29 +0100946 return true;
947}
948
Andreas Gruenbacherd8763022011-01-26 17:39:41 +0100949static int drbd_recv_header(struct drbd_conf *mdev, enum drbd_packet *cmd,
950 unsigned int *packet_size)
Philipp Reisner257d0af2011-01-26 12:15:29 +0100951{
952 struct p_header *h = &mdev->tconn->data.rbuf.header;
953 int r;
954
955 r = drbd_recv(mdev, h, sizeof(*h));
956 if (unlikely(r != sizeof(*h))) {
957 if (!signal_pending(current))
958 dev_warn(DEV, "short read expecting header on sock: r=%d\n", r);
959 return false;
960 }
961
962 r = decode_header(mdev, h, cmd, packet_size);
Philipp Reisner31890f42011-01-19 14:12:51 +0100963 mdev->tconn->last_received = jiffies;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700964
Philipp Reisner257d0af2011-01-26 12:15:29 +0100965 return r;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700966}
967
Philipp Reisner2451fc32010-08-24 13:43:11 +0200968static void drbd_flush(struct drbd_conf *mdev)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700969{
970 int rv;
971
972 if (mdev->write_ordering >= WO_bdev_flush && get_ldev(mdev)) {
Dmitry Monakhovfbd9b092010-04-28 17:55:06 +0400973 rv = blkdev_issue_flush(mdev->ldev->backing_bdev, GFP_KERNEL,
Christoph Hellwigdd3932e2010-09-16 20:51:46 +0200974 NULL);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700975 if (rv) {
976 dev_err(DEV, "local disk flush failed with status %d\n", rv);
977 /* would rather check on EOPNOTSUPP, but that is not reliable.
978 * don't try again for ANY return value != 0
979 * if (rv == -EOPNOTSUPP) */
980 drbd_bump_write_ordering(mdev, WO_drain_io);
981 }
982 put_ldev(mdev);
983 }
Philipp Reisnerb411b362009-09-25 16:07:19 -0700984}
985
986/**
987 * drbd_may_finish_epoch() - Applies an epoch_event to the epoch's state, eventually finishes it.
988 * @mdev: DRBD device.
989 * @epoch: Epoch object.
990 * @ev: Epoch event.
991 */
992static enum finish_epoch drbd_may_finish_epoch(struct drbd_conf *mdev,
993 struct drbd_epoch *epoch,
994 enum epoch_event ev)
995{
Philipp Reisner2451fc32010-08-24 13:43:11 +0200996 int epoch_size;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700997 struct drbd_epoch *next_epoch;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700998 enum finish_epoch rv = FE_STILL_LIVE;
999
1000 spin_lock(&mdev->epoch_lock);
1001 do {
1002 next_epoch = NULL;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001003
1004 epoch_size = atomic_read(&epoch->epoch_size);
1005
1006 switch (ev & ~EV_CLEANUP) {
1007 case EV_PUT:
1008 atomic_dec(&epoch->active);
1009 break;
1010 case EV_GOT_BARRIER_NR:
1011 set_bit(DE_HAVE_BARRIER_NUMBER, &epoch->flags);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001012 break;
1013 case EV_BECAME_LAST:
1014 /* nothing to do*/
1015 break;
1016 }
1017
Philipp Reisnerb411b362009-09-25 16:07:19 -07001018 if (epoch_size != 0 &&
1019 atomic_read(&epoch->active) == 0 &&
Philipp Reisner2451fc32010-08-24 13:43:11 +02001020 test_bit(DE_HAVE_BARRIER_NUMBER, &epoch->flags)) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07001021 if (!(ev & EV_CLEANUP)) {
1022 spin_unlock(&mdev->epoch_lock);
1023 drbd_send_b_ack(mdev, epoch->barrier_nr, epoch_size);
1024 spin_lock(&mdev->epoch_lock);
1025 }
1026 dec_unacked(mdev);
1027
1028 if (mdev->current_epoch != epoch) {
1029 next_epoch = list_entry(epoch->list.next, struct drbd_epoch, list);
1030 list_del(&epoch->list);
1031 ev = EV_BECAME_LAST | (ev & EV_CLEANUP);
1032 mdev->epochs--;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001033 kfree(epoch);
1034
1035 if (rv == FE_STILL_LIVE)
1036 rv = FE_DESTROYED;
1037 } else {
1038 epoch->flags = 0;
1039 atomic_set(&epoch->epoch_size, 0);
Uwe Kleine-König698f9312010-07-02 20:41:51 +02001040 /* atomic_set(&epoch->active, 0); is already zero */
Philipp Reisnerb411b362009-09-25 16:07:19 -07001041 if (rv == FE_STILL_LIVE)
1042 rv = FE_RECYCLED;
Philipp Reisner2451fc32010-08-24 13:43:11 +02001043 wake_up(&mdev->ee_wait);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001044 }
1045 }
1046
1047 if (!next_epoch)
1048 break;
1049
1050 epoch = next_epoch;
1051 } while (1);
1052
1053 spin_unlock(&mdev->epoch_lock);
1054
Philipp Reisnerb411b362009-09-25 16:07:19 -07001055 return rv;
1056}
1057
1058/**
1059 * drbd_bump_write_ordering() - Fall back to an other write ordering method
1060 * @mdev: DRBD device.
1061 * @wo: Write ordering method to try.
1062 */
1063void drbd_bump_write_ordering(struct drbd_conf *mdev, enum write_ordering_e wo) __must_hold(local)
1064{
1065 enum write_ordering_e pwo;
1066 static char *write_ordering_str[] = {
1067 [WO_none] = "none",
1068 [WO_drain_io] = "drain",
1069 [WO_bdev_flush] = "flush",
Philipp Reisnerb411b362009-09-25 16:07:19 -07001070 };
1071
1072 pwo = mdev->write_ordering;
1073 wo = min(pwo, wo);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001074 if (wo == WO_bdev_flush && mdev->ldev->dc.no_disk_flush)
1075 wo = WO_drain_io;
1076 if (wo == WO_drain_io && mdev->ldev->dc.no_disk_drain)
1077 wo = WO_none;
1078 mdev->write_ordering = wo;
Philipp Reisner2451fc32010-08-24 13:43:11 +02001079 if (pwo != mdev->write_ordering || wo == WO_bdev_flush)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001080 dev_info(DEV, "Method to ensure write ordering: %s\n", write_ordering_str[mdev->write_ordering]);
1081}
1082
1083/**
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001084 * drbd_submit_ee()
1085 * @mdev: DRBD device.
1086 * @e: epoch entry
1087 * @rw: flag field, see bio->bi_rw
Lars Ellenberg10f6d9922011-01-24 14:47:09 +01001088 *
1089 * May spread the pages to multiple bios,
1090 * depending on bio_add_page restrictions.
1091 *
1092 * Returns 0 if all bios have been submitted,
1093 * -ENOMEM if we could not allocate enough bios,
1094 * -ENOSPC (any better suggestion?) if we have not been able to bio_add_page a
1095 * single page to an empty bio (which should never happen and likely indicates
1096 * that the lower level IO stack is in some way broken). This has been observed
1097 * on certain Xen deployments.
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001098 */
1099/* TODO allocate from our own bio_set. */
1100int drbd_submit_ee(struct drbd_conf *mdev, struct drbd_epoch_entry *e,
1101 const unsigned rw, const int fault_type)
1102{
1103 struct bio *bios = NULL;
1104 struct bio *bio;
1105 struct page *page = e->pages;
Andreas Gruenbacher010f6e62011-01-14 20:59:35 +01001106 sector_t sector = e->i.sector;
1107 unsigned ds = e->i.size;
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001108 unsigned n_bios = 0;
1109 unsigned nr_pages = (ds + PAGE_SIZE -1) >> PAGE_SHIFT;
Lars Ellenberg10f6d9922011-01-24 14:47:09 +01001110 int err = -ENOMEM;
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001111
1112 /* In most cases, we will only need one bio. But in case the lower
1113 * level restrictions happen to be different at this offset on this
1114 * side than those of the sending peer, we may need to submit the
1115 * request in more than one bio. */
1116next_bio:
1117 bio = bio_alloc(GFP_NOIO, nr_pages);
1118 if (!bio) {
1119 dev_err(DEV, "submit_ee: Allocation of a bio failed\n");
1120 goto fail;
1121 }
Andreas Gruenbacher010f6e62011-01-14 20:59:35 +01001122 /* > e->i.sector, unless this is the first bio */
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001123 bio->bi_sector = sector;
1124 bio->bi_bdev = mdev->ldev->backing_bdev;
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001125 bio->bi_rw = rw;
1126 bio->bi_private = e;
1127 bio->bi_end_io = drbd_endio_sec;
1128
1129 bio->bi_next = bios;
1130 bios = bio;
1131 ++n_bios;
1132
1133 page_chain_for_each(page) {
1134 unsigned len = min_t(unsigned, ds, PAGE_SIZE);
1135 if (!bio_add_page(bio, page, len, 0)) {
Lars Ellenberg10f6d9922011-01-24 14:47:09 +01001136 /* A single page must always be possible!
1137 * But in case it fails anyways,
1138 * we deal with it, and complain (below). */
1139 if (bio->bi_vcnt == 0) {
1140 dev_err(DEV,
1141 "bio_add_page failed for len=%u, "
1142 "bi_vcnt=0 (bi_sector=%llu)\n",
1143 len, (unsigned long long)bio->bi_sector);
1144 err = -ENOSPC;
1145 goto fail;
1146 }
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001147 goto next_bio;
1148 }
1149 ds -= len;
1150 sector += len >> 9;
1151 --nr_pages;
1152 }
1153 D_ASSERT(page == NULL);
1154 D_ASSERT(ds == 0);
1155
1156 atomic_set(&e->pending_bios, n_bios);
1157 do {
1158 bio = bios;
1159 bios = bios->bi_next;
1160 bio->bi_next = NULL;
1161
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001162 drbd_generic_make_request(mdev, fault_type, bio);
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001163 } while (bios);
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001164 return 0;
1165
1166fail:
1167 while (bios) {
1168 bio = bios;
1169 bios = bios->bi_next;
1170 bio_put(bio);
1171 }
Lars Ellenberg10f6d9922011-01-24 14:47:09 +01001172 return err;
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001173}
1174
Andreas Gruenbacherd8763022011-01-26 17:39:41 +01001175static int receive_Barrier(struct drbd_conf *mdev, enum drbd_packet cmd,
1176 unsigned int data_size)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001177{
Philipp Reisner2451fc32010-08-24 13:43:11 +02001178 int rv;
Philipp Reisnere42325a2011-01-19 13:55:45 +01001179 struct p_barrier *p = &mdev->tconn->data.rbuf.barrier;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001180 struct drbd_epoch *epoch;
1181
Philipp Reisnerb411b362009-09-25 16:07:19 -07001182 inc_unacked(mdev);
1183
Philipp Reisnerb411b362009-09-25 16:07:19 -07001184 mdev->current_epoch->barrier_nr = p->barrier;
1185 rv = drbd_may_finish_epoch(mdev, mdev->current_epoch, EV_GOT_BARRIER_NR);
1186
1187 /* P_BARRIER_ACK may imply that the corresponding extent is dropped from
1188 * the activity log, which means it would not be resynced in case the
1189 * R_PRIMARY crashes now.
1190 * Therefore we must send the barrier_ack after the barrier request was
1191 * completed. */
1192 switch (mdev->write_ordering) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07001193 case WO_none:
1194 if (rv == FE_RECYCLED)
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01001195 return true;
Philipp Reisner2451fc32010-08-24 13:43:11 +02001196
1197 /* receiver context, in the writeout path of the other node.
1198 * avoid potential distributed deadlock */
1199 epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO);
1200 if (epoch)
1201 break;
1202 else
1203 dev_warn(DEV, "Allocation of an epoch failed, slowing down\n");
1204 /* Fall through */
Philipp Reisnerb411b362009-09-25 16:07:19 -07001205
1206 case WO_bdev_flush:
1207 case WO_drain_io:
Philipp Reisnerb411b362009-09-25 16:07:19 -07001208 drbd_wait_ee_list_empty(mdev, &mdev->active_ee);
Philipp Reisner2451fc32010-08-24 13:43:11 +02001209 drbd_flush(mdev);
1210
1211 if (atomic_read(&mdev->current_epoch->epoch_size)) {
1212 epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO);
1213 if (epoch)
1214 break;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001215 }
1216
Philipp Reisner2451fc32010-08-24 13:43:11 +02001217 epoch = mdev->current_epoch;
1218 wait_event(mdev->ee_wait, atomic_read(&epoch->epoch_size) == 0);
1219
1220 D_ASSERT(atomic_read(&epoch->active) == 0);
1221 D_ASSERT(epoch->flags == 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001222
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01001223 return true;
Philipp Reisner2451fc32010-08-24 13:43:11 +02001224 default:
1225 dev_err(DEV, "Strangeness in mdev->write_ordering %d\n", mdev->write_ordering);
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01001226 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001227 }
1228
1229 epoch->flags = 0;
1230 atomic_set(&epoch->epoch_size, 0);
1231 atomic_set(&epoch->active, 0);
1232
1233 spin_lock(&mdev->epoch_lock);
1234 if (atomic_read(&mdev->current_epoch->epoch_size)) {
1235 list_add(&epoch->list, &mdev->current_epoch->list);
1236 mdev->current_epoch = epoch;
1237 mdev->epochs++;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001238 } else {
1239 /* The current_epoch got recycled while we allocated this one... */
1240 kfree(epoch);
1241 }
1242 spin_unlock(&mdev->epoch_lock);
1243
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01001244 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001245}
1246
1247/* used from receive_RSDataReply (recv_resync_read)
1248 * and from receive_Data */
1249static struct drbd_epoch_entry *
1250read_in_block(struct drbd_conf *mdev, u64 id, sector_t sector, int data_size) __must_hold(local)
1251{
Lars Ellenberg66660322010-04-06 12:15:04 +02001252 const sector_t capacity = drbd_get_capacity(mdev->this_bdev);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001253 struct drbd_epoch_entry *e;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001254 struct page *page;
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001255 int dgs, ds, rr;
Philipp Reisnera0638452011-01-19 14:31:32 +01001256 void *dig_in = mdev->tconn->int_dig_in;
1257 void *dig_vv = mdev->tconn->int_dig_vv;
Philipp Reisner6b4388a2010-04-26 14:11:45 +02001258 unsigned long *data;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001259
Philipp Reisnera0638452011-01-19 14:31:32 +01001260 dgs = (mdev->tconn->agreed_pro_version >= 87 && mdev->tconn->integrity_r_tfm) ?
1261 crypto_hash_digestsize(mdev->tconn->integrity_r_tfm) : 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001262
1263 if (dgs) {
1264 rr = drbd_recv(mdev, dig_in, dgs);
1265 if (rr != dgs) {
Lars Ellenberg0ddc5542011-01-21 12:35:15 +01001266 if (!signal_pending(current))
1267 dev_warn(DEV,
1268 "short read receiving data digest: read %d expected %d\n",
1269 rr, dgs);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001270 return NULL;
1271 }
1272 }
1273
1274 data_size -= dgs;
1275
Andreas Gruenbacher841ce242010-12-15 19:31:20 +01001276 if (!expect(data_size != 0))
1277 return NULL;
1278 if (!expect(IS_ALIGNED(data_size, 512)))
1279 return NULL;
1280 if (!expect(data_size <= DRBD_MAX_BIO_SIZE))
1281 return NULL;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001282
Lars Ellenberg66660322010-04-06 12:15:04 +02001283 /* even though we trust out peer,
1284 * we sometimes have to double check. */
1285 if (sector + (data_size>>9) > capacity) {
Lars Ellenbergfdda6542011-01-24 15:11:01 +01001286 dev_err(DEV, "request from peer beyond end of local disk: "
1287 "capacity: %llus < sector: %llus + size: %u\n",
Lars Ellenberg66660322010-04-06 12:15:04 +02001288 (unsigned long long)capacity,
1289 (unsigned long long)sector, data_size);
1290 return NULL;
1291 }
1292
Philipp Reisnerb411b362009-09-25 16:07:19 -07001293 /* GFP_NOIO, because we must not cause arbitrary write-out: in a DRBD
1294 * "criss-cross" setup, that might cause write-out on some other DRBD,
1295 * which in turn might block on the other node at this very place. */
1296 e = drbd_alloc_ee(mdev, id, sector, data_size, GFP_NOIO);
1297 if (!e)
1298 return NULL;
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001299
Philipp Reisnerb411b362009-09-25 16:07:19 -07001300 ds = data_size;
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001301 page = e->pages;
1302 page_chain_for_each(page) {
1303 unsigned len = min_t(int, ds, PAGE_SIZE);
Philipp Reisner6b4388a2010-04-26 14:11:45 +02001304 data = kmap(page);
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001305 rr = drbd_recv(mdev, data, len);
Andreas Gruenbacher0cf9d272010-12-07 10:43:29 +01001306 if (drbd_insert_fault(mdev, DRBD_FAULT_RECEIVE)) {
Philipp Reisner6b4388a2010-04-26 14:11:45 +02001307 dev_err(DEV, "Fault injection: Corrupting data on receive\n");
1308 data[0] = data[0] ^ (unsigned long)-1;
1309 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07001310 kunmap(page);
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001311 if (rr != len) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07001312 drbd_free_ee(mdev, e);
Lars Ellenberg0ddc5542011-01-21 12:35:15 +01001313 if (!signal_pending(current))
1314 dev_warn(DEV, "short read receiving data: read %d expected %d\n",
1315 rr, len);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001316 return NULL;
1317 }
1318 ds -= rr;
1319 }
1320
1321 if (dgs) {
Philipp Reisnera0638452011-01-19 14:31:32 +01001322 drbd_csum_ee(mdev, mdev->tconn->integrity_r_tfm, e, dig_vv);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001323 if (memcmp(dig_in, dig_vv, dgs)) {
Lars Ellenberg470be442010-11-10 10:36:52 +01001324 dev_err(DEV, "Digest integrity check FAILED: %llus +%u\n",
1325 (unsigned long long)sector, data_size);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001326 drbd_bcast_ee(mdev, "digest failed",
1327 dgs, dig_in, dig_vv, e);
1328 drbd_free_ee(mdev, e);
1329 return NULL;
1330 }
1331 }
1332 mdev->recv_cnt += data_size>>9;
1333 return e;
1334}
1335
1336/* drbd_drain_block() just takes a data block
1337 * out of the socket input buffer, and discards it.
1338 */
1339static int drbd_drain_block(struct drbd_conf *mdev, int data_size)
1340{
1341 struct page *page;
1342 int rr, rv = 1;
1343 void *data;
1344
Lars Ellenbergc3470cd2010-04-01 16:57:19 +02001345 if (!data_size)
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01001346 return true;
Lars Ellenbergc3470cd2010-04-01 16:57:19 +02001347
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001348 page = drbd_pp_alloc(mdev, 1, 1);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001349
1350 data = kmap(page);
1351 while (data_size) {
1352 rr = drbd_recv(mdev, data, min_t(int, data_size, PAGE_SIZE));
1353 if (rr != min_t(int, data_size, PAGE_SIZE)) {
1354 rv = 0;
Lars Ellenberg0ddc5542011-01-21 12:35:15 +01001355 if (!signal_pending(current))
1356 dev_warn(DEV,
1357 "short read receiving data: read %d expected %d\n",
1358 rr, min_t(int, data_size, PAGE_SIZE));
Philipp Reisnerb411b362009-09-25 16:07:19 -07001359 break;
1360 }
1361 data_size -= rr;
1362 }
1363 kunmap(page);
Lars Ellenberg435f0742010-09-06 12:30:25 +02001364 drbd_pp_free(mdev, page, 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001365 return rv;
1366}
1367
1368static int recv_dless_read(struct drbd_conf *mdev, struct drbd_request *req,
1369 sector_t sector, int data_size)
1370{
1371 struct bio_vec *bvec;
1372 struct bio *bio;
1373 int dgs, rr, i, expect;
Philipp Reisnera0638452011-01-19 14:31:32 +01001374 void *dig_in = mdev->tconn->int_dig_in;
1375 void *dig_vv = mdev->tconn->int_dig_vv;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001376
Philipp Reisnera0638452011-01-19 14:31:32 +01001377 dgs = (mdev->tconn->agreed_pro_version >= 87 && mdev->tconn->integrity_r_tfm) ?
1378 crypto_hash_digestsize(mdev->tconn->integrity_r_tfm) : 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001379
1380 if (dgs) {
1381 rr = drbd_recv(mdev, dig_in, dgs);
1382 if (rr != dgs) {
Lars Ellenberg0ddc5542011-01-21 12:35:15 +01001383 if (!signal_pending(current))
1384 dev_warn(DEV,
1385 "short read receiving data reply digest: read %d expected %d\n",
1386 rr, dgs);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001387 return 0;
1388 }
1389 }
1390
1391 data_size -= dgs;
1392
1393 /* optimistically update recv_cnt. if receiving fails below,
1394 * we disconnect anyways, and counters will be reset. */
1395 mdev->recv_cnt += data_size>>9;
1396
1397 bio = req->master_bio;
1398 D_ASSERT(sector == bio->bi_sector);
1399
1400 bio_for_each_segment(bvec, bio, i) {
1401 expect = min_t(int, data_size, bvec->bv_len);
1402 rr = drbd_recv(mdev,
1403 kmap(bvec->bv_page)+bvec->bv_offset,
1404 expect);
1405 kunmap(bvec->bv_page);
1406 if (rr != expect) {
Lars Ellenberg0ddc5542011-01-21 12:35:15 +01001407 if (!signal_pending(current))
1408 dev_warn(DEV, "short read receiving data reply: "
1409 "read %d expected %d\n",
1410 rr, expect);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001411 return 0;
1412 }
1413 data_size -= rr;
1414 }
1415
1416 if (dgs) {
Philipp Reisnera0638452011-01-19 14:31:32 +01001417 drbd_csum_bio(mdev, mdev->tconn->integrity_r_tfm, bio, dig_vv);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001418 if (memcmp(dig_in, dig_vv, dgs)) {
1419 dev_err(DEV, "Digest integrity check FAILED. Broken NICs?\n");
1420 return 0;
1421 }
1422 }
1423
1424 D_ASSERT(data_size == 0);
1425 return 1;
1426}
1427
1428/* e_end_resync_block() is called via
1429 * drbd_process_done_ee() by asender only */
1430static int e_end_resync_block(struct drbd_conf *mdev, struct drbd_work *w, int unused)
1431{
1432 struct drbd_epoch_entry *e = (struct drbd_epoch_entry *)w;
Andreas Gruenbacher010f6e62011-01-14 20:59:35 +01001433 sector_t sector = e->i.sector;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001434 int ok;
1435
Andreas Gruenbacher8b946252011-01-20 15:23:07 +01001436 D_ASSERT(drbd_interval_empty(&e->i));
Philipp Reisnerb411b362009-09-25 16:07:19 -07001437
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001438 if (likely((e->flags & EE_WAS_ERROR) == 0)) {
Andreas Gruenbacher010f6e62011-01-14 20:59:35 +01001439 drbd_set_in_sync(mdev, sector, e->i.size);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001440 ok = drbd_send_ack(mdev, P_RS_WRITE_ACK, e);
1441 } else {
1442 /* Record failure to sync */
Andreas Gruenbacher010f6e62011-01-14 20:59:35 +01001443 drbd_rs_failed_io(mdev, sector, e->i.size);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001444
1445 ok = drbd_send_ack(mdev, P_NEG_ACK, e);
1446 }
1447 dec_unacked(mdev);
1448
1449 return ok;
1450}
1451
1452static int recv_resync_read(struct drbd_conf *mdev, sector_t sector, int data_size) __releases(local)
1453{
1454 struct drbd_epoch_entry *e;
1455
1456 e = read_in_block(mdev, ID_SYNCER, sector, data_size);
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001457 if (!e)
1458 goto fail;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001459
1460 dec_rs_pending(mdev);
1461
Philipp Reisnerb411b362009-09-25 16:07:19 -07001462 inc_unacked(mdev);
1463 /* corresponding dec_unacked() in e_end_resync_block()
1464 * respective _drbd_clear_done_ee */
1465
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001466 e->w.cb = e_end_resync_block;
1467
Philipp Reisner87eeee42011-01-19 14:16:30 +01001468 spin_lock_irq(&mdev->tconn->req_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001469 list_add(&e->w.list, &mdev->sync_ee);
Philipp Reisner87eeee42011-01-19 14:16:30 +01001470 spin_unlock_irq(&mdev->tconn->req_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001471
Lars Ellenberg0f0601f2010-08-11 23:40:24 +02001472 atomic_add(data_size >> 9, &mdev->rs_sect_ev);
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001473 if (drbd_submit_ee(mdev, e, WRITE, DRBD_FAULT_RS_WR) == 0)
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01001474 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001475
Lars Ellenberg10f6d9922011-01-24 14:47:09 +01001476 /* don't care for the reason here */
1477 dev_err(DEV, "submit failed, triggering re-connect\n");
Philipp Reisner87eeee42011-01-19 14:16:30 +01001478 spin_lock_irq(&mdev->tconn->req_lock);
Lars Ellenberg22cc37a2010-09-14 20:40:41 +02001479 list_del(&e->w.list);
Philipp Reisner87eeee42011-01-19 14:16:30 +01001480 spin_unlock_irq(&mdev->tconn->req_lock);
Lars Ellenberg22cc37a2010-09-14 20:40:41 +02001481
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001482 drbd_free_ee(mdev, e);
1483fail:
1484 put_ldev(mdev);
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01001485 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001486}
1487
Andreas Gruenbacher668eebc2011-01-20 17:14:26 +01001488static struct drbd_request *
Andreas Gruenbacherbc9c5c42011-01-21 18:00:55 +01001489find_request(struct drbd_conf *mdev, struct rb_root *root, u64 id,
1490 sector_t sector, bool missing_ok, const char *func)
Andreas Gruenbacher668eebc2011-01-20 17:14:26 +01001491{
Andreas Gruenbacher668eebc2011-01-20 17:14:26 +01001492 struct drbd_request *req;
1493
Andreas Gruenbacherbc9c5c42011-01-21 18:00:55 +01001494 /* Request object according to our peer */
1495 req = (struct drbd_request *)(unsigned long)id;
1496 if (drbd_contains_interval(root, sector, &req->i))
Andreas Gruenbacher668eebc2011-01-20 17:14:26 +01001497 return req;
Andreas Gruenbacherc3afd8f2011-01-20 22:25:40 +01001498 if (!missing_ok) {
1499 dev_err(DEV, "%s: failed to find request %lu, sector %llus\n", func,
1500 (unsigned long)id, (unsigned long long)sector);
1501 }
Andreas Gruenbacher668eebc2011-01-20 17:14:26 +01001502 return NULL;
1503}
1504
Andreas Gruenbacherd8763022011-01-26 17:39:41 +01001505static int receive_DataReply(struct drbd_conf *mdev, enum drbd_packet cmd,
1506 unsigned int data_size)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001507{
1508 struct drbd_request *req;
1509 sector_t sector;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001510 int ok;
Philipp Reisnere42325a2011-01-19 13:55:45 +01001511 struct p_data *p = &mdev->tconn->data.rbuf.data;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001512
1513 sector = be64_to_cpu(p->sector);
1514
Philipp Reisner87eeee42011-01-19 14:16:30 +01001515 spin_lock_irq(&mdev->tconn->req_lock);
Andreas Gruenbacherbc9c5c42011-01-21 18:00:55 +01001516 req = find_request(mdev, &mdev->read_requests, p->block_id, sector, false, __func__);
Philipp Reisner87eeee42011-01-19 14:16:30 +01001517 spin_unlock_irq(&mdev->tconn->req_lock);
Andreas Gruenbacherc3afd8f2011-01-20 22:25:40 +01001518 if (unlikely(!req))
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01001519 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001520
Bart Van Assche24c48302011-05-21 18:32:29 +02001521 /* hlist_del(&req->collision) is done in _req_may_be_done, to avoid
Philipp Reisnerb411b362009-09-25 16:07:19 -07001522 * special casing it there for the various failure cases.
1523 * still no race with drbd_fail_pending_reads */
1524 ok = recv_dless_read(mdev, req, sector, data_size);
1525
1526 if (ok)
Andreas Gruenbacher8554df12011-01-25 15:37:43 +01001527 req_mod(req, DATA_RECEIVED);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001528 /* else: nothing. handled from drbd_disconnect...
1529 * I don't think we may complete this just yet
1530 * in case we are "on-disconnect: freeze" */
1531
1532 return ok;
1533}
1534
Andreas Gruenbacherd8763022011-01-26 17:39:41 +01001535static int receive_RSDataReply(struct drbd_conf *mdev, enum drbd_packet cmd,
1536 unsigned int data_size)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001537{
1538 sector_t sector;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001539 int ok;
Philipp Reisnere42325a2011-01-19 13:55:45 +01001540 struct p_data *p = &mdev->tconn->data.rbuf.data;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001541
1542 sector = be64_to_cpu(p->sector);
1543 D_ASSERT(p->block_id == ID_SYNCER);
1544
1545 if (get_ldev(mdev)) {
1546 /* data is submitted to disk within recv_resync_read.
1547 * corresponding put_ldev done below on error,
Andreas Gruenbacher9c508422011-01-14 21:19:36 +01001548 * or in drbd_endio_sec. */
Philipp Reisnerb411b362009-09-25 16:07:19 -07001549 ok = recv_resync_read(mdev, sector, data_size);
1550 } else {
1551 if (__ratelimit(&drbd_ratelimit_state))
1552 dev_err(DEV, "Can not write resync data to local disk.\n");
1553
1554 ok = drbd_drain_block(mdev, data_size);
1555
Lars Ellenberg2b2bf212010-10-06 11:46:55 +02001556 drbd_send_ack_dp(mdev, P_NEG_ACK, p, data_size);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001557 }
1558
Philipp Reisner778f2712010-07-06 11:14:00 +02001559 atomic_add(data_size >> 9, &mdev->rs_sect_in);
1560
Philipp Reisnerb411b362009-09-25 16:07:19 -07001561 return ok;
1562}
1563
1564/* e_end_block() is called via drbd_process_done_ee().
1565 * this means this function only runs in the asender thread
1566 */
1567static int e_end_block(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
1568{
1569 struct drbd_epoch_entry *e = (struct drbd_epoch_entry *)w;
Andreas Gruenbacher010f6e62011-01-14 20:59:35 +01001570 sector_t sector = e->i.sector;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001571 int ok = 1, pcmd;
1572
Philipp Reisner89e58e72011-01-19 13:12:45 +01001573 if (mdev->tconn->net_conf->wire_protocol == DRBD_PROT_C) {
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001574 if (likely((e->flags & EE_WAS_ERROR) == 0)) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07001575 pcmd = (mdev->state.conn >= C_SYNC_SOURCE &&
1576 mdev->state.conn <= C_PAUSED_SYNC_T &&
1577 e->flags & EE_MAY_SET_IN_SYNC) ?
1578 P_RS_WRITE_ACK : P_WRITE_ACK;
1579 ok &= drbd_send_ack(mdev, pcmd, e);
1580 if (pcmd == P_RS_WRITE_ACK)
Andreas Gruenbacher010f6e62011-01-14 20:59:35 +01001581 drbd_set_in_sync(mdev, sector, e->i.size);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001582 } else {
1583 ok = drbd_send_ack(mdev, P_NEG_ACK, e);
1584 /* we expect it to be marked out of sync anyways...
1585 * maybe assert this? */
1586 }
1587 dec_unacked(mdev);
1588 }
1589 /* we delete from the conflict detection hash _after_ we sent out the
1590 * P_WRITE_ACK / P_NEG_ACK, to get the sequence number right. */
Philipp Reisner89e58e72011-01-19 13:12:45 +01001591 if (mdev->tconn->net_conf->two_primaries) {
Philipp Reisner87eeee42011-01-19 14:16:30 +01001592 spin_lock_irq(&mdev->tconn->req_lock);
Andreas Gruenbacher8b946252011-01-20 15:23:07 +01001593 D_ASSERT(!drbd_interval_empty(&e->i));
1594 drbd_remove_interval(&mdev->epoch_entries, &e->i);
1595 drbd_clear_interval(&e->i);
Philipp Reisner87eeee42011-01-19 14:16:30 +01001596 spin_unlock_irq(&mdev->tconn->req_lock);
Andreas Gruenbacherbb3bfe92011-01-21 15:59:23 +01001597 } else
Andreas Gruenbacher8b946252011-01-20 15:23:07 +01001598 D_ASSERT(drbd_interval_empty(&e->i));
Philipp Reisnerb411b362009-09-25 16:07:19 -07001599
1600 drbd_may_finish_epoch(mdev, e->epoch, EV_PUT + (cancel ? EV_CLEANUP : 0));
1601
1602 return ok;
1603}
1604
1605static int e_send_discard_ack(struct drbd_conf *mdev, struct drbd_work *w, int unused)
1606{
1607 struct drbd_epoch_entry *e = (struct drbd_epoch_entry *)w;
1608 int ok = 1;
1609
Philipp Reisner89e58e72011-01-19 13:12:45 +01001610 D_ASSERT(mdev->tconn->net_conf->wire_protocol == DRBD_PROT_C);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001611 ok = drbd_send_ack(mdev, P_DISCARD_ACK, e);
1612
Philipp Reisner87eeee42011-01-19 14:16:30 +01001613 spin_lock_irq(&mdev->tconn->req_lock);
Andreas Gruenbacher8b946252011-01-20 15:23:07 +01001614 D_ASSERT(!drbd_interval_empty(&e->i));
1615 drbd_remove_interval(&mdev->epoch_entries, &e->i);
1616 drbd_clear_interval(&e->i);
Philipp Reisner87eeee42011-01-19 14:16:30 +01001617 spin_unlock_irq(&mdev->tconn->req_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001618
1619 dec_unacked(mdev);
1620
1621 return ok;
1622}
1623
Andreas Gruenbacher3e394da2011-01-26 18:36:55 +01001624static bool seq_greater(u32 a, u32 b)
1625{
1626 /*
1627 * We assume 32-bit wrap-around here.
1628 * For 24-bit wrap-around, we would have to shift:
1629 * a <<= 8; b <<= 8;
1630 */
1631 return (s32)a - (s32)b > 0;
1632}
1633
1634static u32 seq_max(u32 a, u32 b)
1635{
1636 return seq_greater(a, b) ? a : b;
1637}
1638
1639static void update_peer_seq(struct drbd_conf *mdev, unsigned int new_seq)
1640{
1641 unsigned int m;
1642
1643 spin_lock(&mdev->peer_seq_lock);
1644 m = seq_max(mdev->peer_seq, new_seq);
1645 mdev->peer_seq = m;
1646 spin_unlock(&mdev->peer_seq_lock);
1647 if (m == new_seq)
1648 wake_up(&mdev->seq_wait);
1649}
1650
Philipp Reisnerb411b362009-09-25 16:07:19 -07001651/* Called from receive_Data.
1652 * Synchronize packets on sock with packets on msock.
1653 *
1654 * This is here so even when a P_DATA packet traveling via sock overtook an Ack
1655 * packet traveling on msock, they are still processed in the order they have
1656 * been sent.
1657 *
1658 * Note: we don't care for Ack packets overtaking P_DATA packets.
1659 *
1660 * In case packet_seq is larger than mdev->peer_seq number, there are
1661 * outstanding packets on the msock. We wait for them to arrive.
1662 * In case we are the logically next packet, we update mdev->peer_seq
1663 * ourselves. Correctly handles 32bit wrap around.
1664 *
1665 * Assume we have a 10 GBit connection, that is about 1<<30 byte per second,
1666 * about 1<<21 sectors per second. So "worst" case, we have 1<<3 == 8 seconds
1667 * for the 24bit wrap (historical atomic_t guarantee on some archs), and we have
1668 * 1<<9 == 512 seconds aka ages for the 32bit wrap around...
1669 *
1670 * returns 0 if we may process the packet,
1671 * -ERESTARTSYS if we were interrupted (by disconnect signal). */
1672static int drbd_wait_peer_seq(struct drbd_conf *mdev, const u32 packet_seq)
1673{
1674 DEFINE_WAIT(wait);
1675 unsigned int p_seq;
1676 long timeout;
1677 int ret = 0;
1678 spin_lock(&mdev->peer_seq_lock);
1679 for (;;) {
1680 prepare_to_wait(&mdev->seq_wait, &wait, TASK_INTERRUPTIBLE);
Andreas Gruenbacher3e394da2011-01-26 18:36:55 +01001681 if (!seq_greater(packet_seq, mdev->peer_seq + 1))
Philipp Reisnerb411b362009-09-25 16:07:19 -07001682 break;
1683 if (signal_pending(current)) {
1684 ret = -ERESTARTSYS;
1685 break;
1686 }
1687 p_seq = mdev->peer_seq;
1688 spin_unlock(&mdev->peer_seq_lock);
1689 timeout = schedule_timeout(30*HZ);
1690 spin_lock(&mdev->peer_seq_lock);
1691 if (timeout == 0 && p_seq == mdev->peer_seq) {
1692 ret = -ETIMEDOUT;
1693 dev_err(DEV, "ASSERT FAILED waited 30 seconds for sequence update, forcing reconnect\n");
1694 break;
1695 }
1696 }
1697 finish_wait(&mdev->seq_wait, &wait);
1698 if (mdev->peer_seq+1 == packet_seq)
1699 mdev->peer_seq++;
1700 spin_unlock(&mdev->peer_seq_lock);
1701 return ret;
1702}
1703
Lars Ellenberg688593c2010-11-17 22:25:03 +01001704/* see also bio_flags_to_wire()
1705 * DRBD_REQ_*, because we need to semantically map the flags to data packet
1706 * flags and back. We may replicate to other kernel versions. */
1707static unsigned long wire_flags_to_bio(struct drbd_conf *mdev, u32 dpf)
Philipp Reisner76d2e7e2010-08-25 11:58:05 +02001708{
Lars Ellenberg688593c2010-11-17 22:25:03 +01001709 return (dpf & DP_RW_SYNC ? REQ_SYNC : 0) |
1710 (dpf & DP_FUA ? REQ_FUA : 0) |
1711 (dpf & DP_FLUSH ? REQ_FLUSH : 0) |
1712 (dpf & DP_DISCARD ? REQ_DISCARD : 0);
Philipp Reisner76d2e7e2010-08-25 11:58:05 +02001713}
1714
Philipp Reisnerb411b362009-09-25 16:07:19 -07001715/* mirrored write */
Andreas Gruenbacherd8763022011-01-26 17:39:41 +01001716static int receive_Data(struct drbd_conf *mdev, enum drbd_packet cmd,
1717 unsigned int data_size)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001718{
1719 sector_t sector;
1720 struct drbd_epoch_entry *e;
Philipp Reisnere42325a2011-01-19 13:55:45 +01001721 struct p_data *p = &mdev->tconn->data.rbuf.data;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001722 int rw = WRITE;
1723 u32 dp_flags;
1724
Philipp Reisnerb411b362009-09-25 16:07:19 -07001725 if (!get_ldev(mdev)) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07001726 spin_lock(&mdev->peer_seq_lock);
1727 if (mdev->peer_seq+1 == be32_to_cpu(p->seq_num))
1728 mdev->peer_seq++;
1729 spin_unlock(&mdev->peer_seq_lock);
1730
Lars Ellenberg2b2bf212010-10-06 11:46:55 +02001731 drbd_send_ack_dp(mdev, P_NEG_ACK, p, data_size);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001732 atomic_inc(&mdev->current_epoch->epoch_size);
1733 return drbd_drain_block(mdev, data_size);
1734 }
1735
1736 /* get_ldev(mdev) successful.
1737 * Corresponding put_ldev done either below (on various errors),
Andreas Gruenbacher9c508422011-01-14 21:19:36 +01001738 * or in drbd_endio_sec, if we successfully submit the data at
Philipp Reisnerb411b362009-09-25 16:07:19 -07001739 * the end of this function. */
1740
1741 sector = be64_to_cpu(p->sector);
1742 e = read_in_block(mdev, p->block_id, sector, data_size);
1743 if (!e) {
1744 put_ldev(mdev);
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01001745 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001746 }
1747
Philipp Reisnerb411b362009-09-25 16:07:19 -07001748 e->w.cb = e_end_block;
1749
Lars Ellenberg688593c2010-11-17 22:25:03 +01001750 dp_flags = be32_to_cpu(p->dp_flags);
1751 rw |= wire_flags_to_bio(mdev, dp_flags);
1752
1753 if (dp_flags & DP_MAY_SET_IN_SYNC)
1754 e->flags |= EE_MAY_SET_IN_SYNC;
1755
Philipp Reisnerb411b362009-09-25 16:07:19 -07001756 spin_lock(&mdev->epoch_lock);
1757 e->epoch = mdev->current_epoch;
1758 atomic_inc(&e->epoch->epoch_size);
1759 atomic_inc(&e->epoch->active);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001760 spin_unlock(&mdev->epoch_lock);
1761
Philipp Reisnerb411b362009-09-25 16:07:19 -07001762 /* I'm the receiver, I do hold a net_cnt reference. */
Philipp Reisner89e58e72011-01-19 13:12:45 +01001763 if (!mdev->tconn->net_conf->two_primaries) {
Philipp Reisner87eeee42011-01-19 14:16:30 +01001764 spin_lock_irq(&mdev->tconn->req_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001765 } else {
1766 /* don't get the req_lock yet,
1767 * we may sleep in drbd_wait_peer_seq */
Andreas Gruenbacher010f6e62011-01-14 20:59:35 +01001768 const int size = e->i.size;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001769 const int discard = test_bit(DISCARD_CONCURRENT, &mdev->flags);
1770 DEFINE_WAIT(wait);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001771 int first;
1772
Philipp Reisner89e58e72011-01-19 13:12:45 +01001773 D_ASSERT(mdev->tconn->net_conf->wire_protocol == DRBD_PROT_C);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001774
1775 /* conflict detection and handling:
1776 * 1. wait on the sequence number,
1777 * in case this data packet overtook ACK packets.
Andreas Gruenbacherbb3bfe92011-01-21 15:59:23 +01001778 * 2. check our interval trees for conflicting requests:
1779 * we only need to check the write_requests tree; the
1780 * epoch_entries tree cannot contain any overlaps because
1781 * they were already eliminated on the submitting node.
Philipp Reisnerb411b362009-09-25 16:07:19 -07001782 *
1783 * Note: for two_primaries, we are protocol C,
1784 * so there cannot be any request that is DONE
1785 * but still on the transfer log.
1786 *
Andreas Gruenbacherbb3bfe92011-01-21 15:59:23 +01001787 * unconditionally add to the epoch_entries tree.
Philipp Reisnerb411b362009-09-25 16:07:19 -07001788 *
1789 * if no conflicting request is found:
1790 * submit.
1791 *
1792 * if any conflicting request is found
1793 * that has not yet been acked,
1794 * AND I have the "discard concurrent writes" flag:
1795 * queue (via done_ee) the P_DISCARD_ACK; OUT.
1796 *
1797 * if any conflicting request is found:
1798 * block the receiver, waiting on misc_wait
1799 * until no more conflicting requests are there,
1800 * or we get interrupted (disconnect).
1801 *
1802 * we do not just write after local io completion of those
1803 * requests, but only after req is done completely, i.e.
1804 * we wait for the P_DISCARD_ACK to arrive!
1805 *
1806 * then proceed normally, i.e. submit.
1807 */
1808 if (drbd_wait_peer_seq(mdev, be32_to_cpu(p->seq_num)))
1809 goto out_interrupted;
1810
Philipp Reisner87eeee42011-01-19 14:16:30 +01001811 spin_lock_irq(&mdev->tconn->req_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001812
Andreas Gruenbacher8b946252011-01-20 15:23:07 +01001813 drbd_insert_interval(&mdev->epoch_entries, &e->i);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001814
Philipp Reisnerb411b362009-09-25 16:07:19 -07001815 first = 1;
1816 for (;;) {
Andreas Gruenbacherde696712011-01-20 15:00:24 +01001817 struct drbd_interval *i;
Andreas Gruenbachera500c2e2011-01-27 14:12:23 +01001818 struct drbd_request *req2;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001819 int have_unacked = 0;
1820 int have_conflict = 0;
1821 prepare_to_wait(&mdev->misc_wait, &wait,
1822 TASK_INTERRUPTIBLE);
Andreas Gruenbacherde696712011-01-20 15:00:24 +01001823
1824 i = drbd_find_overlap(&mdev->write_requests, sector, size);
1825 if (i) {
Andreas Gruenbachera500c2e2011-01-27 14:12:23 +01001826 req2 = container_of(i, struct drbd_request, i);
Andreas Gruenbacherde696712011-01-20 15:00:24 +01001827
1828 /* only ALERT on first iteration,
1829 * we may be woken up early... */
1830 if (first)
1831 dev_alert(DEV, "%s[%u] Concurrent local write detected!"
1832 " new: %llus +%u; pending: %llus +%u\n",
1833 current->comm, current->pid,
1834 (unsigned long long)sector, size,
1835 (unsigned long long)req2->i.sector, req2->i.size);
1836 if (req2->rq_state & RQ_NET_PENDING)
1837 ++have_unacked;
1838 ++have_conflict;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001839 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07001840 if (!have_conflict)
1841 break;
1842
1843 /* Discard Ack only for the _first_ iteration */
1844 if (first && discard && have_unacked) {
1845 dev_alert(DEV, "Concurrent write! [DISCARD BY FLAG] sec=%llus\n",
1846 (unsigned long long)sector);
1847 inc_unacked(mdev);
1848 e->w.cb = e_send_discard_ack;
1849 list_add_tail(&e->w.list, &mdev->done_ee);
1850
Philipp Reisner87eeee42011-01-19 14:16:30 +01001851 spin_unlock_irq(&mdev->tconn->req_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001852
1853 /* we could probably send that P_DISCARD_ACK ourselves,
1854 * but I don't like the receiver using the msock */
1855
1856 put_ldev(mdev);
1857 wake_asender(mdev);
1858 finish_wait(&mdev->misc_wait, &wait);
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01001859 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001860 }
1861
1862 if (signal_pending(current)) {
Andreas Gruenbacher8b946252011-01-20 15:23:07 +01001863 drbd_remove_interval(&mdev->epoch_entries, &e->i);
1864 drbd_clear_interval(&e->i);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001865
Philipp Reisner87eeee42011-01-19 14:16:30 +01001866 spin_unlock_irq(&mdev->tconn->req_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001867
1868 finish_wait(&mdev->misc_wait, &wait);
1869 goto out_interrupted;
1870 }
1871
Andreas Gruenbachera500c2e2011-01-27 14:12:23 +01001872 /* Indicate to wake up mdev->misc_wait upon completion. */
1873 req2->rq_state |= RQ_COLLISION;
1874
Philipp Reisner87eeee42011-01-19 14:16:30 +01001875 spin_unlock_irq(&mdev->tconn->req_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001876 if (first) {
1877 first = 0;
1878 dev_alert(DEV, "Concurrent write! [W AFTERWARDS] "
1879 "sec=%llus\n", (unsigned long long)sector);
1880 } else if (discard) {
1881 /* we had none on the first iteration.
1882 * there must be none now. */
1883 D_ASSERT(have_unacked == 0);
1884 }
1885 schedule();
Philipp Reisner87eeee42011-01-19 14:16:30 +01001886 spin_lock_irq(&mdev->tconn->req_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001887 }
1888 finish_wait(&mdev->misc_wait, &wait);
1889 }
1890
1891 list_add(&e->w.list, &mdev->active_ee);
Philipp Reisner87eeee42011-01-19 14:16:30 +01001892 spin_unlock_irq(&mdev->tconn->req_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001893
Philipp Reisner89e58e72011-01-19 13:12:45 +01001894 switch (mdev->tconn->net_conf->wire_protocol) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07001895 case DRBD_PROT_C:
1896 inc_unacked(mdev);
1897 /* corresponding dec_unacked() in e_end_block()
1898 * respective _drbd_clear_done_ee */
1899 break;
1900 case DRBD_PROT_B:
1901 /* I really don't like it that the receiver thread
1902 * sends on the msock, but anyways */
1903 drbd_send_ack(mdev, P_RECV_ACK, e);
1904 break;
1905 case DRBD_PROT_A:
1906 /* nothing to do */
1907 break;
1908 }
1909
Lars Ellenberg6719fb02010-10-18 23:04:07 +02001910 if (mdev->state.pdsk < D_INCONSISTENT) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07001911 /* In case we have the only disk of the cluster, */
Andreas Gruenbacher010f6e62011-01-14 20:59:35 +01001912 drbd_set_out_of_sync(mdev, e->i.sector, e->i.size);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001913 e->flags |= EE_CALL_AL_COMPLETE_IO;
Lars Ellenberg6719fb02010-10-18 23:04:07 +02001914 e->flags &= ~EE_MAY_SET_IN_SYNC;
Andreas Gruenbacher010f6e62011-01-14 20:59:35 +01001915 drbd_al_begin_io(mdev, e->i.sector);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001916 }
1917
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001918 if (drbd_submit_ee(mdev, e, rw, DRBD_FAULT_DT_WR) == 0)
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01001919 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001920
Lars Ellenberg10f6d9922011-01-24 14:47:09 +01001921 /* don't care for the reason here */
1922 dev_err(DEV, "submit failed, triggering re-connect\n");
Philipp Reisner87eeee42011-01-19 14:16:30 +01001923 spin_lock_irq(&mdev->tconn->req_lock);
Lars Ellenberg22cc37a2010-09-14 20:40:41 +02001924 list_del(&e->w.list);
Andreas Gruenbacher8b946252011-01-20 15:23:07 +01001925 drbd_remove_interval(&mdev->epoch_entries, &e->i);
1926 drbd_clear_interval(&e->i);
Philipp Reisner87eeee42011-01-19 14:16:30 +01001927 spin_unlock_irq(&mdev->tconn->req_lock);
Lars Ellenberg22cc37a2010-09-14 20:40:41 +02001928 if (e->flags & EE_CALL_AL_COMPLETE_IO)
Andreas Gruenbacher010f6e62011-01-14 20:59:35 +01001929 drbd_al_complete_io(mdev, e->i.sector);
Lars Ellenberg22cc37a2010-09-14 20:40:41 +02001930
Philipp Reisnerb411b362009-09-25 16:07:19 -07001931out_interrupted:
Lars Ellenberg10f6d9922011-01-24 14:47:09 +01001932 drbd_may_finish_epoch(mdev, e->epoch, EV_PUT + EV_CLEANUP);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001933 put_ldev(mdev);
1934 drbd_free_ee(mdev, e);
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01001935 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001936}
1937
Lars Ellenberg0f0601f2010-08-11 23:40:24 +02001938/* We may throttle resync, if the lower device seems to be busy,
1939 * and current sync rate is above c_min_rate.
1940 *
1941 * To decide whether or not the lower device is busy, we use a scheme similar
1942 * to MD RAID is_mddev_idle(): if the partition stats reveal "significant"
1943 * (more than 64 sectors) of activity we cannot account for with our own resync
1944 * activity, it obviously is "busy".
1945 *
1946 * The current sync rate used here uses only the most recent two step marks,
1947 * to have a short time average so we can react faster.
1948 */
Philipp Reisnere3555d82010-11-07 15:56:29 +01001949int drbd_rs_should_slow_down(struct drbd_conf *mdev, sector_t sector)
Lars Ellenberg0f0601f2010-08-11 23:40:24 +02001950{
1951 struct gendisk *disk = mdev->ldev->backing_bdev->bd_contains->bd_disk;
1952 unsigned long db, dt, dbdt;
Philipp Reisnere3555d82010-11-07 15:56:29 +01001953 struct lc_element *tmp;
Lars Ellenberg0f0601f2010-08-11 23:40:24 +02001954 int curr_events;
1955 int throttle = 0;
1956
1957 /* feature disabled? */
1958 if (mdev->sync_conf.c_min_rate == 0)
1959 return 0;
1960
Philipp Reisnere3555d82010-11-07 15:56:29 +01001961 spin_lock_irq(&mdev->al_lock);
1962 tmp = lc_find(mdev->resync, BM_SECT_TO_EXT(sector));
1963 if (tmp) {
1964 struct bm_extent *bm_ext = lc_entry(tmp, struct bm_extent, lce);
1965 if (test_bit(BME_PRIORITY, &bm_ext->flags)) {
1966 spin_unlock_irq(&mdev->al_lock);
1967 return 0;
1968 }
1969 /* Do not slow down if app IO is already waiting for this extent */
1970 }
1971 spin_unlock_irq(&mdev->al_lock);
1972
Lars Ellenberg0f0601f2010-08-11 23:40:24 +02001973 curr_events = (int)part_stat_read(&disk->part0, sectors[0]) +
1974 (int)part_stat_read(&disk->part0, sectors[1]) -
1975 atomic_read(&mdev->rs_sect_ev);
Philipp Reisnere3555d82010-11-07 15:56:29 +01001976
Lars Ellenberg0f0601f2010-08-11 23:40:24 +02001977 if (!mdev->rs_last_events || curr_events - mdev->rs_last_events > 64) {
1978 unsigned long rs_left;
1979 int i;
1980
1981 mdev->rs_last_events = curr_events;
1982
1983 /* sync speed average over the last 2*DRBD_SYNC_MARK_STEP,
1984 * approx. */
Lars Ellenberg2649f082010-11-05 10:05:47 +01001985 i = (mdev->rs_last_mark + DRBD_SYNC_MARKS-1) % DRBD_SYNC_MARKS;
1986
1987 if (mdev->state.conn == C_VERIFY_S || mdev->state.conn == C_VERIFY_T)
1988 rs_left = mdev->ov_left;
1989 else
1990 rs_left = drbd_bm_total_weight(mdev) - mdev->rs_failed;
Lars Ellenberg0f0601f2010-08-11 23:40:24 +02001991
1992 dt = ((long)jiffies - (long)mdev->rs_mark_time[i]) / HZ;
1993 if (!dt)
1994 dt++;
1995 db = mdev->rs_mark_left[i] - rs_left;
1996 dbdt = Bit2KB(db/dt);
1997
1998 if (dbdt > mdev->sync_conf.c_min_rate)
1999 throttle = 1;
2000 }
2001 return throttle;
2002}
2003
2004
Andreas Gruenbacherd8763022011-01-26 17:39:41 +01002005static int receive_DataRequest(struct drbd_conf *mdev, enum drbd_packet cmd,
2006 unsigned int digest_size)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002007{
2008 sector_t sector;
2009 const sector_t capacity = drbd_get_capacity(mdev->this_bdev);
2010 struct drbd_epoch_entry *e;
2011 struct digest_info *di = NULL;
Philipp Reisnerb18b37b2010-10-13 15:32:44 +02002012 int size, verb;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002013 unsigned int fault_type;
Philipp Reisnere42325a2011-01-19 13:55:45 +01002014 struct p_block_req *p = &mdev->tconn->data.rbuf.block_req;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002015
2016 sector = be64_to_cpu(p->sector);
2017 size = be32_to_cpu(p->blksize);
2018
Lars Ellenberg1816a2b2010-11-11 15:19:07 +01002019 if (size <= 0 || (size & 0x1ff) != 0 || size > DRBD_MAX_BIO_SIZE) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07002020 dev_err(DEV, "%s:%d: sector: %llus, size: %u\n", __FILE__, __LINE__,
2021 (unsigned long long)sector, size);
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01002022 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002023 }
2024 if (sector + (size>>9) > capacity) {
2025 dev_err(DEV, "%s:%d: sector: %llus, size: %u\n", __FILE__, __LINE__,
2026 (unsigned long long)sector, size);
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01002027 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002028 }
2029
2030 if (!get_ldev_if_state(mdev, D_UP_TO_DATE)) {
Philipp Reisnerb18b37b2010-10-13 15:32:44 +02002031 verb = 1;
2032 switch (cmd) {
2033 case P_DATA_REQUEST:
2034 drbd_send_ack_rp(mdev, P_NEG_DREPLY, p);
2035 break;
2036 case P_RS_DATA_REQUEST:
2037 case P_CSUM_RS_REQUEST:
2038 case P_OV_REQUEST:
2039 drbd_send_ack_rp(mdev, P_NEG_RS_DREPLY , p);
2040 break;
2041 case P_OV_REPLY:
2042 verb = 0;
2043 dec_rs_pending(mdev);
2044 drbd_send_ack_ex(mdev, P_OV_RESULT, sector, size, ID_IN_SYNC);
2045 break;
2046 default:
2047 dev_err(DEV, "unexpected command (%s) in receive_DataRequest\n",
2048 cmdname(cmd));
2049 }
2050 if (verb && __ratelimit(&drbd_ratelimit_state))
Philipp Reisnerb411b362009-09-25 16:07:19 -07002051 dev_err(DEV, "Can not satisfy peer's read request, "
2052 "no local data.\n");
Philipp Reisnerb18b37b2010-10-13 15:32:44 +02002053
Lars Ellenberga821cc42010-09-06 12:31:37 +02002054 /* drain possibly payload */
2055 return drbd_drain_block(mdev, digest_size);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002056 }
2057
2058 /* GFP_NOIO, because we must not cause arbitrary write-out: in a DRBD
2059 * "criss-cross" setup, that might cause write-out on some other DRBD,
2060 * which in turn might block on the other node at this very place. */
2061 e = drbd_alloc_ee(mdev, p->block_id, sector, size, GFP_NOIO);
2062 if (!e) {
2063 put_ldev(mdev);
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01002064 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002065 }
2066
Philipp Reisner02918be2010-08-20 14:35:10 +02002067 switch (cmd) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07002068 case P_DATA_REQUEST:
2069 e->w.cb = w_e_end_data_req;
2070 fault_type = DRBD_FAULT_DT_RD;
Lars Ellenberg80a40e42010-08-11 23:28:00 +02002071 /* application IO, don't drbd_rs_begin_io */
2072 goto submit;
2073
Philipp Reisnerb411b362009-09-25 16:07:19 -07002074 case P_RS_DATA_REQUEST:
2075 e->w.cb = w_e_end_rsdata_req;
2076 fault_type = DRBD_FAULT_RS_RD;
Lars Ellenberg5f9915b2010-11-09 14:15:24 +01002077 /* used in the sector offset progress display */
2078 mdev->bm_resync_fo = BM_SECT_TO_BIT(sector);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002079 break;
2080
2081 case P_OV_REPLY:
2082 case P_CSUM_RS_REQUEST:
2083 fault_type = DRBD_FAULT_RS_RD;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002084 di = kmalloc(sizeof(*di) + digest_size, GFP_NOIO);
2085 if (!di)
2086 goto out_free_e;
2087
2088 di->digest_size = digest_size;
2089 di->digest = (((char *)di)+sizeof(struct digest_info));
2090
Lars Ellenbergc36c3ce2010-08-11 20:42:55 +02002091 e->digest = di;
2092 e->flags |= EE_HAS_DIGEST;
2093
Philipp Reisnerb411b362009-09-25 16:07:19 -07002094 if (drbd_recv(mdev, di->digest, digest_size) != digest_size)
2095 goto out_free_e;
2096
Philipp Reisner02918be2010-08-20 14:35:10 +02002097 if (cmd == P_CSUM_RS_REQUEST) {
Philipp Reisner31890f42011-01-19 14:12:51 +01002098 D_ASSERT(mdev->tconn->agreed_pro_version >= 89);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002099 e->w.cb = w_e_end_csum_rs_req;
Lars Ellenberg5f9915b2010-11-09 14:15:24 +01002100 /* used in the sector offset progress display */
2101 mdev->bm_resync_fo = BM_SECT_TO_BIT(sector);
Philipp Reisner02918be2010-08-20 14:35:10 +02002102 } else if (cmd == P_OV_REPLY) {
Lars Ellenberg2649f082010-11-05 10:05:47 +01002103 /* track progress, we may need to throttle */
2104 atomic_add(size >> 9, &mdev->rs_sect_in);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002105 e->w.cb = w_e_end_ov_reply;
2106 dec_rs_pending(mdev);
Lars Ellenberg0f0601f2010-08-11 23:40:24 +02002107 /* drbd_rs_begin_io done when we sent this request,
2108 * but accounting still needs to be done. */
2109 goto submit_for_resync;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002110 }
2111 break;
2112
2113 case P_OV_REQUEST:
Philipp Reisnerb411b362009-09-25 16:07:19 -07002114 if (mdev->ov_start_sector == ~(sector_t)0 &&
Philipp Reisner31890f42011-01-19 14:12:51 +01002115 mdev->tconn->agreed_pro_version >= 90) {
Lars Ellenbergde228bb2010-11-05 09:43:15 +01002116 unsigned long now = jiffies;
2117 int i;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002118 mdev->ov_start_sector = sector;
2119 mdev->ov_position = sector;
Lars Ellenberg30b743a2010-11-05 09:39:06 +01002120 mdev->ov_left = drbd_bm_bits(mdev) - BM_SECT_TO_BIT(sector);
2121 mdev->rs_total = mdev->ov_left;
Lars Ellenbergde228bb2010-11-05 09:43:15 +01002122 for (i = 0; i < DRBD_SYNC_MARKS; i++) {
2123 mdev->rs_mark_left[i] = mdev->ov_left;
2124 mdev->rs_mark_time[i] = now;
2125 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07002126 dev_info(DEV, "Online Verify start sector: %llu\n",
2127 (unsigned long long)sector);
2128 }
2129 e->w.cb = w_e_end_ov_req;
2130 fault_type = DRBD_FAULT_RS_RD;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002131 break;
2132
Philipp Reisnerb411b362009-09-25 16:07:19 -07002133 default:
2134 dev_err(DEV, "unexpected command (%s) in receive_DataRequest\n",
Philipp Reisner02918be2010-08-20 14:35:10 +02002135 cmdname(cmd));
Philipp Reisnerb411b362009-09-25 16:07:19 -07002136 fault_type = DRBD_FAULT_MAX;
Lars Ellenberg80a40e42010-08-11 23:28:00 +02002137 goto out_free_e;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002138 }
2139
Lars Ellenberg0f0601f2010-08-11 23:40:24 +02002140 /* Throttle, drbd_rs_begin_io and submit should become asynchronous
2141 * wrt the receiver, but it is not as straightforward as it may seem.
2142 * Various places in the resync start and stop logic assume resync
2143 * requests are processed in order, requeuing this on the worker thread
2144 * introduces a bunch of new code for synchronization between threads.
2145 *
2146 * Unlimited throttling before drbd_rs_begin_io may stall the resync
2147 * "forever", throttling after drbd_rs_begin_io will lock that extent
2148 * for application writes for the same time. For now, just throttle
2149 * here, where the rest of the code expects the receiver to sleep for
2150 * a while, anyways.
2151 */
Philipp Reisnerb411b362009-09-25 16:07:19 -07002152
Lars Ellenberg0f0601f2010-08-11 23:40:24 +02002153 /* Throttle before drbd_rs_begin_io, as that locks out application IO;
2154 * this defers syncer requests for some time, before letting at least
2155 * on request through. The resync controller on the receiving side
2156 * will adapt to the incoming rate accordingly.
2157 *
2158 * We cannot throttle here if remote is Primary/SyncTarget:
2159 * we would also throttle its application reads.
2160 * In that case, throttling is done on the SyncTarget only.
2161 */
Philipp Reisnere3555d82010-11-07 15:56:29 +01002162 if (mdev->state.peer != R_PRIMARY && drbd_rs_should_slow_down(mdev, sector))
2163 schedule_timeout_uninterruptible(HZ/10);
2164 if (drbd_rs_begin_io(mdev, sector))
Lars Ellenberg80a40e42010-08-11 23:28:00 +02002165 goto out_free_e;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002166
Lars Ellenberg0f0601f2010-08-11 23:40:24 +02002167submit_for_resync:
2168 atomic_add(size >> 9, &mdev->rs_sect_ev);
2169
Lars Ellenberg80a40e42010-08-11 23:28:00 +02002170submit:
Philipp Reisnerb411b362009-09-25 16:07:19 -07002171 inc_unacked(mdev);
Philipp Reisner87eeee42011-01-19 14:16:30 +01002172 spin_lock_irq(&mdev->tconn->req_lock);
Lars Ellenberg80a40e42010-08-11 23:28:00 +02002173 list_add_tail(&e->w.list, &mdev->read_ee);
Philipp Reisner87eeee42011-01-19 14:16:30 +01002174 spin_unlock_irq(&mdev->tconn->req_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002175
Lars Ellenberg45bb9122010-05-14 17:10:48 +02002176 if (drbd_submit_ee(mdev, e, READ, fault_type) == 0)
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01002177 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002178
Lars Ellenberg10f6d9922011-01-24 14:47:09 +01002179 /* don't care for the reason here */
2180 dev_err(DEV, "submit failed, triggering re-connect\n");
Philipp Reisner87eeee42011-01-19 14:16:30 +01002181 spin_lock_irq(&mdev->tconn->req_lock);
Lars Ellenberg22cc37a2010-09-14 20:40:41 +02002182 list_del(&e->w.list);
Philipp Reisner87eeee42011-01-19 14:16:30 +01002183 spin_unlock_irq(&mdev->tconn->req_lock);
Lars Ellenberg22cc37a2010-09-14 20:40:41 +02002184 /* no drbd_rs_complete_io(), we are dropping the connection anyways */
2185
Philipp Reisnerb411b362009-09-25 16:07:19 -07002186out_free_e:
Philipp Reisnerb411b362009-09-25 16:07:19 -07002187 put_ldev(mdev);
2188 drbd_free_ee(mdev, e);
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01002189 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002190}
2191
2192static int drbd_asb_recover_0p(struct drbd_conf *mdev) __must_hold(local)
2193{
2194 int self, peer, rv = -100;
2195 unsigned long ch_self, ch_peer;
2196
2197 self = mdev->ldev->md.uuid[UI_BITMAP] & 1;
2198 peer = mdev->p_uuid[UI_BITMAP] & 1;
2199
2200 ch_peer = mdev->p_uuid[UI_SIZE];
2201 ch_self = mdev->comm_bm_set;
2202
Philipp Reisner89e58e72011-01-19 13:12:45 +01002203 switch (mdev->tconn->net_conf->after_sb_0p) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07002204 case ASB_CONSENSUS:
2205 case ASB_DISCARD_SECONDARY:
2206 case ASB_CALL_HELPER:
2207 dev_err(DEV, "Configuration error.\n");
2208 break;
2209 case ASB_DISCONNECT:
2210 break;
2211 case ASB_DISCARD_YOUNGER_PRI:
2212 if (self == 0 && peer == 1) {
2213 rv = -1;
2214 break;
2215 }
2216 if (self == 1 && peer == 0) {
2217 rv = 1;
2218 break;
2219 }
2220 /* Else fall through to one of the other strategies... */
2221 case ASB_DISCARD_OLDER_PRI:
2222 if (self == 0 && peer == 1) {
2223 rv = 1;
2224 break;
2225 }
2226 if (self == 1 && peer == 0) {
2227 rv = -1;
2228 break;
2229 }
2230 /* Else fall through to one of the other strategies... */
Lars Ellenbergad19bf62009-10-14 09:36:49 +02002231 dev_warn(DEV, "Discard younger/older primary did not find a decision\n"
Philipp Reisnerb411b362009-09-25 16:07:19 -07002232 "Using discard-least-changes instead\n");
2233 case ASB_DISCARD_ZERO_CHG:
2234 if (ch_peer == 0 && ch_self == 0) {
2235 rv = test_bit(DISCARD_CONCURRENT, &mdev->flags)
2236 ? -1 : 1;
2237 break;
2238 } else {
2239 if (ch_peer == 0) { rv = 1; break; }
2240 if (ch_self == 0) { rv = -1; break; }
2241 }
Philipp Reisner89e58e72011-01-19 13:12:45 +01002242 if (mdev->tconn->net_conf->after_sb_0p == ASB_DISCARD_ZERO_CHG)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002243 break;
2244 case ASB_DISCARD_LEAST_CHG:
2245 if (ch_self < ch_peer)
2246 rv = -1;
2247 else if (ch_self > ch_peer)
2248 rv = 1;
2249 else /* ( ch_self == ch_peer ) */
2250 /* Well, then use something else. */
2251 rv = test_bit(DISCARD_CONCURRENT, &mdev->flags)
2252 ? -1 : 1;
2253 break;
2254 case ASB_DISCARD_LOCAL:
2255 rv = -1;
2256 break;
2257 case ASB_DISCARD_REMOTE:
2258 rv = 1;
2259 }
2260
2261 return rv;
2262}
2263
2264static int drbd_asb_recover_1p(struct drbd_conf *mdev) __must_hold(local)
2265{
Andreas Gruenbacher6184ea22010-12-09 14:23:27 +01002266 int hg, rv = -100;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002267
Philipp Reisner89e58e72011-01-19 13:12:45 +01002268 switch (mdev->tconn->net_conf->after_sb_1p) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07002269 case ASB_DISCARD_YOUNGER_PRI:
2270 case ASB_DISCARD_OLDER_PRI:
2271 case ASB_DISCARD_LEAST_CHG:
2272 case ASB_DISCARD_LOCAL:
2273 case ASB_DISCARD_REMOTE:
2274 dev_err(DEV, "Configuration error.\n");
2275 break;
2276 case ASB_DISCONNECT:
2277 break;
2278 case ASB_CONSENSUS:
2279 hg = drbd_asb_recover_0p(mdev);
2280 if (hg == -1 && mdev->state.role == R_SECONDARY)
2281 rv = hg;
2282 if (hg == 1 && mdev->state.role == R_PRIMARY)
2283 rv = hg;
2284 break;
2285 case ASB_VIOLENTLY:
2286 rv = drbd_asb_recover_0p(mdev);
2287 break;
2288 case ASB_DISCARD_SECONDARY:
2289 return mdev->state.role == R_PRIMARY ? 1 : -1;
2290 case ASB_CALL_HELPER:
2291 hg = drbd_asb_recover_0p(mdev);
2292 if (hg == -1 && mdev->state.role == R_PRIMARY) {
Andreas Gruenbacherbb437942010-12-09 14:02:35 +01002293 enum drbd_state_rv rv2;
2294
2295 drbd_set_role(mdev, R_SECONDARY, 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002296 /* drbd_change_state() does not sleep while in SS_IN_TRANSIENT_STATE,
2297 * we might be here in C_WF_REPORT_PARAMS which is transient.
2298 * we do not need to wait for the after state change work either. */
Andreas Gruenbacherbb437942010-12-09 14:02:35 +01002299 rv2 = drbd_change_state(mdev, CS_VERBOSE, NS(role, R_SECONDARY));
2300 if (rv2 != SS_SUCCESS) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07002301 drbd_khelper(mdev, "pri-lost-after-sb");
2302 } else {
2303 dev_warn(DEV, "Successfully gave up primary role.\n");
2304 rv = hg;
2305 }
2306 } else
2307 rv = hg;
2308 }
2309
2310 return rv;
2311}
2312
2313static int drbd_asb_recover_2p(struct drbd_conf *mdev) __must_hold(local)
2314{
Andreas Gruenbacher6184ea22010-12-09 14:23:27 +01002315 int hg, rv = -100;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002316
Philipp Reisner89e58e72011-01-19 13:12:45 +01002317 switch (mdev->tconn->net_conf->after_sb_2p) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07002318 case ASB_DISCARD_YOUNGER_PRI:
2319 case ASB_DISCARD_OLDER_PRI:
2320 case ASB_DISCARD_LEAST_CHG:
2321 case ASB_DISCARD_LOCAL:
2322 case ASB_DISCARD_REMOTE:
2323 case ASB_CONSENSUS:
2324 case ASB_DISCARD_SECONDARY:
2325 dev_err(DEV, "Configuration error.\n");
2326 break;
2327 case ASB_VIOLENTLY:
2328 rv = drbd_asb_recover_0p(mdev);
2329 break;
2330 case ASB_DISCONNECT:
2331 break;
2332 case ASB_CALL_HELPER:
2333 hg = drbd_asb_recover_0p(mdev);
2334 if (hg == -1) {
Andreas Gruenbacherbb437942010-12-09 14:02:35 +01002335 enum drbd_state_rv rv2;
2336
Philipp Reisnerb411b362009-09-25 16:07:19 -07002337 /* drbd_change_state() does not sleep while in SS_IN_TRANSIENT_STATE,
2338 * we might be here in C_WF_REPORT_PARAMS which is transient.
2339 * we do not need to wait for the after state change work either. */
Andreas Gruenbacherbb437942010-12-09 14:02:35 +01002340 rv2 = drbd_change_state(mdev, CS_VERBOSE, NS(role, R_SECONDARY));
2341 if (rv2 != SS_SUCCESS) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07002342 drbd_khelper(mdev, "pri-lost-after-sb");
2343 } else {
2344 dev_warn(DEV, "Successfully gave up primary role.\n");
2345 rv = hg;
2346 }
2347 } else
2348 rv = hg;
2349 }
2350
2351 return rv;
2352}
2353
2354static void drbd_uuid_dump(struct drbd_conf *mdev, char *text, u64 *uuid,
2355 u64 bits, u64 flags)
2356{
2357 if (!uuid) {
2358 dev_info(DEV, "%s uuid info vanished while I was looking!\n", text);
2359 return;
2360 }
2361 dev_info(DEV, "%s %016llX:%016llX:%016llX:%016llX bits:%llu flags:%llX\n",
2362 text,
2363 (unsigned long long)uuid[UI_CURRENT],
2364 (unsigned long long)uuid[UI_BITMAP],
2365 (unsigned long long)uuid[UI_HISTORY_START],
2366 (unsigned long long)uuid[UI_HISTORY_END],
2367 (unsigned long long)bits,
2368 (unsigned long long)flags);
2369}
2370
2371/*
2372 100 after split brain try auto recover
2373 2 C_SYNC_SOURCE set BitMap
2374 1 C_SYNC_SOURCE use BitMap
2375 0 no Sync
2376 -1 C_SYNC_TARGET use BitMap
2377 -2 C_SYNC_TARGET set BitMap
2378 -100 after split brain, disconnect
2379-1000 unrelated data
Philipp Reisner4a23f262011-01-11 17:42:17 +01002380-1091 requires proto 91
2381-1096 requires proto 96
Philipp Reisnerb411b362009-09-25 16:07:19 -07002382 */
2383static int drbd_uuid_compare(struct drbd_conf *mdev, int *rule_nr) __must_hold(local)
2384{
2385 u64 self, peer;
2386 int i, j;
2387
2388 self = mdev->ldev->md.uuid[UI_CURRENT] & ~((u64)1);
2389 peer = mdev->p_uuid[UI_CURRENT] & ~((u64)1);
2390
2391 *rule_nr = 10;
2392 if (self == UUID_JUST_CREATED && peer == UUID_JUST_CREATED)
2393 return 0;
2394
2395 *rule_nr = 20;
2396 if ((self == UUID_JUST_CREATED || self == (u64)0) &&
2397 peer != UUID_JUST_CREATED)
2398 return -2;
2399
2400 *rule_nr = 30;
2401 if (self != UUID_JUST_CREATED &&
2402 (peer == UUID_JUST_CREATED || peer == (u64)0))
2403 return 2;
2404
2405 if (self == peer) {
2406 int rct, dc; /* roles at crash time */
2407
2408 if (mdev->p_uuid[UI_BITMAP] == (u64)0 && mdev->ldev->md.uuid[UI_BITMAP] != (u64)0) {
2409
Philipp Reisner31890f42011-01-19 14:12:51 +01002410 if (mdev->tconn->agreed_pro_version < 91)
Philipp Reisner4a23f262011-01-11 17:42:17 +01002411 return -1091;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002412
2413 if ((mdev->ldev->md.uuid[UI_BITMAP] & ~((u64)1)) == (mdev->p_uuid[UI_HISTORY_START] & ~((u64)1)) &&
2414 (mdev->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1)) == (mdev->p_uuid[UI_HISTORY_START + 1] & ~((u64)1))) {
2415 dev_info(DEV, "was SyncSource, missed the resync finished event, corrected myself:\n");
2416 drbd_uuid_set_bm(mdev, 0UL);
2417
2418 drbd_uuid_dump(mdev, "self", mdev->ldev->md.uuid,
2419 mdev->state.disk >= D_NEGOTIATING ? drbd_bm_total_weight(mdev) : 0, 0);
2420 *rule_nr = 34;
2421 } else {
2422 dev_info(DEV, "was SyncSource (peer failed to write sync_uuid)\n");
2423 *rule_nr = 36;
2424 }
2425
2426 return 1;
2427 }
2428
2429 if (mdev->ldev->md.uuid[UI_BITMAP] == (u64)0 && mdev->p_uuid[UI_BITMAP] != (u64)0) {
2430
Philipp Reisner31890f42011-01-19 14:12:51 +01002431 if (mdev->tconn->agreed_pro_version < 91)
Philipp Reisner4a23f262011-01-11 17:42:17 +01002432 return -1091;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002433
2434 if ((mdev->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1)) == (mdev->p_uuid[UI_BITMAP] & ~((u64)1)) &&
2435 (mdev->ldev->md.uuid[UI_HISTORY_START + 1] & ~((u64)1)) == (mdev->p_uuid[UI_HISTORY_START] & ~((u64)1))) {
2436 dev_info(DEV, "was SyncTarget, peer missed the resync finished event, corrected peer:\n");
2437
2438 mdev->p_uuid[UI_HISTORY_START + 1] = mdev->p_uuid[UI_HISTORY_START];
2439 mdev->p_uuid[UI_HISTORY_START] = mdev->p_uuid[UI_BITMAP];
2440 mdev->p_uuid[UI_BITMAP] = 0UL;
2441
2442 drbd_uuid_dump(mdev, "peer", mdev->p_uuid, mdev->p_uuid[UI_SIZE], mdev->p_uuid[UI_FLAGS]);
2443 *rule_nr = 35;
2444 } else {
2445 dev_info(DEV, "was SyncTarget (failed to write sync_uuid)\n");
2446 *rule_nr = 37;
2447 }
2448
2449 return -1;
2450 }
2451
2452 /* Common power [off|failure] */
2453 rct = (test_bit(CRASHED_PRIMARY, &mdev->flags) ? 1 : 0) +
2454 (mdev->p_uuid[UI_FLAGS] & 2);
2455 /* lowest bit is set when we were primary,
2456 * next bit (weight 2) is set when peer was primary */
2457 *rule_nr = 40;
2458
2459 switch (rct) {
2460 case 0: /* !self_pri && !peer_pri */ return 0;
2461 case 1: /* self_pri && !peer_pri */ return 1;
2462 case 2: /* !self_pri && peer_pri */ return -1;
2463 case 3: /* self_pri && peer_pri */
2464 dc = test_bit(DISCARD_CONCURRENT, &mdev->flags);
2465 return dc ? -1 : 1;
2466 }
2467 }
2468
2469 *rule_nr = 50;
2470 peer = mdev->p_uuid[UI_BITMAP] & ~((u64)1);
2471 if (self == peer)
2472 return -1;
2473
2474 *rule_nr = 51;
2475 peer = mdev->p_uuid[UI_HISTORY_START] & ~((u64)1);
2476 if (self == peer) {
Philipp Reisner31890f42011-01-19 14:12:51 +01002477 if (mdev->tconn->agreed_pro_version < 96 ?
Philipp Reisner4a23f262011-01-11 17:42:17 +01002478 (mdev->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1)) ==
2479 (mdev->p_uuid[UI_HISTORY_START + 1] & ~((u64)1)) :
2480 peer + UUID_NEW_BM_OFFSET == (mdev->p_uuid[UI_BITMAP] & ~((u64)1))) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07002481 /* The last P_SYNC_UUID did not get though. Undo the last start of
2482 resync as sync source modifications of the peer's UUIDs. */
2483
Philipp Reisner31890f42011-01-19 14:12:51 +01002484 if (mdev->tconn->agreed_pro_version < 91)
Philipp Reisner4a23f262011-01-11 17:42:17 +01002485 return -1091;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002486
2487 mdev->p_uuid[UI_BITMAP] = mdev->p_uuid[UI_HISTORY_START];
2488 mdev->p_uuid[UI_HISTORY_START] = mdev->p_uuid[UI_HISTORY_START + 1];
Philipp Reisner4a23f262011-01-11 17:42:17 +01002489
2490 dev_info(DEV, "Did not got last syncUUID packet, corrected:\n");
2491 drbd_uuid_dump(mdev, "peer", mdev->p_uuid, mdev->p_uuid[UI_SIZE], mdev->p_uuid[UI_FLAGS]);
2492
Philipp Reisnerb411b362009-09-25 16:07:19 -07002493 return -1;
2494 }
2495 }
2496
2497 *rule_nr = 60;
2498 self = mdev->ldev->md.uuid[UI_CURRENT] & ~((u64)1);
2499 for (i = UI_HISTORY_START; i <= UI_HISTORY_END; i++) {
2500 peer = mdev->p_uuid[i] & ~((u64)1);
2501 if (self == peer)
2502 return -2;
2503 }
2504
2505 *rule_nr = 70;
2506 self = mdev->ldev->md.uuid[UI_BITMAP] & ~((u64)1);
2507 peer = mdev->p_uuid[UI_CURRENT] & ~((u64)1);
2508 if (self == peer)
2509 return 1;
2510
2511 *rule_nr = 71;
2512 self = mdev->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1);
2513 if (self == peer) {
Philipp Reisner31890f42011-01-19 14:12:51 +01002514 if (mdev->tconn->agreed_pro_version < 96 ?
Philipp Reisner4a23f262011-01-11 17:42:17 +01002515 (mdev->ldev->md.uuid[UI_HISTORY_START + 1] & ~((u64)1)) ==
2516 (mdev->p_uuid[UI_HISTORY_START] & ~((u64)1)) :
2517 self + UUID_NEW_BM_OFFSET == (mdev->ldev->md.uuid[UI_BITMAP] & ~((u64)1))) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07002518 /* The last P_SYNC_UUID did not get though. Undo the last start of
2519 resync as sync source modifications of our UUIDs. */
2520
Philipp Reisner31890f42011-01-19 14:12:51 +01002521 if (mdev->tconn->agreed_pro_version < 91)
Philipp Reisner4a23f262011-01-11 17:42:17 +01002522 return -1091;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002523
2524 _drbd_uuid_set(mdev, UI_BITMAP, mdev->ldev->md.uuid[UI_HISTORY_START]);
2525 _drbd_uuid_set(mdev, UI_HISTORY_START, mdev->ldev->md.uuid[UI_HISTORY_START + 1]);
2526
Philipp Reisner4a23f262011-01-11 17:42:17 +01002527 dev_info(DEV, "Last syncUUID did not get through, corrected:\n");
Philipp Reisnerb411b362009-09-25 16:07:19 -07002528 drbd_uuid_dump(mdev, "self", mdev->ldev->md.uuid,
2529 mdev->state.disk >= D_NEGOTIATING ? drbd_bm_total_weight(mdev) : 0, 0);
2530
2531 return 1;
2532 }
2533 }
2534
2535
2536 *rule_nr = 80;
Philipp Reisnerd8c2a362009-11-18 15:52:51 +01002537 peer = mdev->p_uuid[UI_CURRENT] & ~((u64)1);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002538 for (i = UI_HISTORY_START; i <= UI_HISTORY_END; i++) {
2539 self = mdev->ldev->md.uuid[i] & ~((u64)1);
2540 if (self == peer)
2541 return 2;
2542 }
2543
2544 *rule_nr = 90;
2545 self = mdev->ldev->md.uuid[UI_BITMAP] & ~((u64)1);
2546 peer = mdev->p_uuid[UI_BITMAP] & ~((u64)1);
2547 if (self == peer && self != ((u64)0))
2548 return 100;
2549
2550 *rule_nr = 100;
2551 for (i = UI_HISTORY_START; i <= UI_HISTORY_END; i++) {
2552 self = mdev->ldev->md.uuid[i] & ~((u64)1);
2553 for (j = UI_HISTORY_START; j <= UI_HISTORY_END; j++) {
2554 peer = mdev->p_uuid[j] & ~((u64)1);
2555 if (self == peer)
2556 return -100;
2557 }
2558 }
2559
2560 return -1000;
2561}
2562
2563/* drbd_sync_handshake() returns the new conn state on success, or
2564 CONN_MASK (-1) on failure.
2565 */
2566static enum drbd_conns drbd_sync_handshake(struct drbd_conf *mdev, enum drbd_role peer_role,
2567 enum drbd_disk_state peer_disk) __must_hold(local)
2568{
2569 int hg, rule_nr;
2570 enum drbd_conns rv = C_MASK;
2571 enum drbd_disk_state mydisk;
2572
2573 mydisk = mdev->state.disk;
2574 if (mydisk == D_NEGOTIATING)
2575 mydisk = mdev->new_state_tmp.disk;
2576
2577 dev_info(DEV, "drbd_sync_handshake:\n");
2578 drbd_uuid_dump(mdev, "self", mdev->ldev->md.uuid, mdev->comm_bm_set, 0);
2579 drbd_uuid_dump(mdev, "peer", mdev->p_uuid,
2580 mdev->p_uuid[UI_SIZE], mdev->p_uuid[UI_FLAGS]);
2581
2582 hg = drbd_uuid_compare(mdev, &rule_nr);
2583
2584 dev_info(DEV, "uuid_compare()=%d by rule %d\n", hg, rule_nr);
2585
2586 if (hg == -1000) {
2587 dev_alert(DEV, "Unrelated data, aborting!\n");
2588 return C_MASK;
2589 }
Philipp Reisner4a23f262011-01-11 17:42:17 +01002590 if (hg < -1000) {
2591 dev_alert(DEV, "To resolve this both sides have to support at least protocol %d\n", -hg - 1000);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002592 return C_MASK;
2593 }
2594
2595 if ((mydisk == D_INCONSISTENT && peer_disk > D_INCONSISTENT) ||
2596 (peer_disk == D_INCONSISTENT && mydisk > D_INCONSISTENT)) {
2597 int f = (hg == -100) || abs(hg) == 2;
2598 hg = mydisk > D_INCONSISTENT ? 1 : -1;
2599 if (f)
2600 hg = hg*2;
2601 dev_info(DEV, "Becoming sync %s due to disk states.\n",
2602 hg > 0 ? "source" : "target");
2603 }
2604
Adam Gandelman3a11a482010-04-08 16:48:23 -07002605 if (abs(hg) == 100)
2606 drbd_khelper(mdev, "initial-split-brain");
2607
Philipp Reisner89e58e72011-01-19 13:12:45 +01002608 if (hg == 100 || (hg == -100 && mdev->tconn->net_conf->always_asbp)) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07002609 int pcount = (mdev->state.role == R_PRIMARY)
2610 + (peer_role == R_PRIMARY);
2611 int forced = (hg == -100);
2612
2613 switch (pcount) {
2614 case 0:
2615 hg = drbd_asb_recover_0p(mdev);
2616 break;
2617 case 1:
2618 hg = drbd_asb_recover_1p(mdev);
2619 break;
2620 case 2:
2621 hg = drbd_asb_recover_2p(mdev);
2622 break;
2623 }
2624 if (abs(hg) < 100) {
2625 dev_warn(DEV, "Split-Brain detected, %d primaries, "
2626 "automatically solved. Sync from %s node\n",
2627 pcount, (hg < 0) ? "peer" : "this");
2628 if (forced) {
2629 dev_warn(DEV, "Doing a full sync, since"
2630 " UUIDs where ambiguous.\n");
2631 hg = hg*2;
2632 }
2633 }
2634 }
2635
2636 if (hg == -100) {
Philipp Reisner89e58e72011-01-19 13:12:45 +01002637 if (mdev->tconn->net_conf->want_lose && !(mdev->p_uuid[UI_FLAGS]&1))
Philipp Reisnerb411b362009-09-25 16:07:19 -07002638 hg = -1;
Philipp Reisner89e58e72011-01-19 13:12:45 +01002639 if (!mdev->tconn->net_conf->want_lose && (mdev->p_uuid[UI_FLAGS]&1))
Philipp Reisnerb411b362009-09-25 16:07:19 -07002640 hg = 1;
2641
2642 if (abs(hg) < 100)
2643 dev_warn(DEV, "Split-Brain detected, manually solved. "
2644 "Sync from %s node\n",
2645 (hg < 0) ? "peer" : "this");
2646 }
2647
2648 if (hg == -100) {
Lars Ellenberg580b9762010-02-26 23:15:23 +01002649 /* FIXME this log message is not correct if we end up here
2650 * after an attempted attach on a diskless node.
2651 * We just refuse to attach -- well, we drop the "connection"
2652 * to that disk, in a way... */
Adam Gandelman3a11a482010-04-08 16:48:23 -07002653 dev_alert(DEV, "Split-Brain detected but unresolved, dropping connection!\n");
Philipp Reisnerb411b362009-09-25 16:07:19 -07002654 drbd_khelper(mdev, "split-brain");
2655 return C_MASK;
2656 }
2657
2658 if (hg > 0 && mydisk <= D_INCONSISTENT) {
2659 dev_err(DEV, "I shall become SyncSource, but I am inconsistent!\n");
2660 return C_MASK;
2661 }
2662
2663 if (hg < 0 && /* by intention we do not use mydisk here. */
2664 mdev->state.role == R_PRIMARY && mdev->state.disk >= D_CONSISTENT) {
Philipp Reisner89e58e72011-01-19 13:12:45 +01002665 switch (mdev->tconn->net_conf->rr_conflict) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07002666 case ASB_CALL_HELPER:
2667 drbd_khelper(mdev, "pri-lost");
2668 /* fall through */
2669 case ASB_DISCONNECT:
2670 dev_err(DEV, "I shall become SyncTarget, but I am primary!\n");
2671 return C_MASK;
2672 case ASB_VIOLENTLY:
2673 dev_warn(DEV, "Becoming SyncTarget, violating the stable-data"
2674 "assumption\n");
2675 }
2676 }
2677
Philipp Reisner89e58e72011-01-19 13:12:45 +01002678 if (mdev->tconn->net_conf->dry_run || test_bit(CONN_DRY_RUN, &mdev->flags)) {
Philipp Reisnercf14c2e2010-02-02 21:03:50 +01002679 if (hg == 0)
2680 dev_info(DEV, "dry-run connect: No resync, would become Connected immediately.\n");
2681 else
2682 dev_info(DEV, "dry-run connect: Would become %s, doing a %s resync.",
2683 drbd_conn_str(hg > 0 ? C_SYNC_SOURCE : C_SYNC_TARGET),
2684 abs(hg) >= 2 ? "full" : "bit-map based");
2685 return C_MASK;
2686 }
2687
Philipp Reisnerb411b362009-09-25 16:07:19 -07002688 if (abs(hg) >= 2) {
2689 dev_info(DEV, "Writing the whole bitmap, full sync required after drbd_sync_handshake.\n");
Lars Ellenberg20ceb2b2011-01-21 10:56:44 +01002690 if (drbd_bitmap_io(mdev, &drbd_bmio_set_n_write, "set_n_write from sync_handshake",
2691 BM_LOCKED_SET_ALLOWED))
Philipp Reisnerb411b362009-09-25 16:07:19 -07002692 return C_MASK;
2693 }
2694
2695 if (hg > 0) { /* become sync source. */
2696 rv = C_WF_BITMAP_S;
2697 } else if (hg < 0) { /* become sync target */
2698 rv = C_WF_BITMAP_T;
2699 } else {
2700 rv = C_CONNECTED;
2701 if (drbd_bm_total_weight(mdev)) {
2702 dev_info(DEV, "No resync, but %lu bits in bitmap!\n",
2703 drbd_bm_total_weight(mdev));
2704 }
2705 }
2706
2707 return rv;
2708}
2709
2710/* returns 1 if invalid */
2711static int cmp_after_sb(enum drbd_after_sb_p peer, enum drbd_after_sb_p self)
2712{
2713 /* ASB_DISCARD_REMOTE - ASB_DISCARD_LOCAL is valid */
2714 if ((peer == ASB_DISCARD_REMOTE && self == ASB_DISCARD_LOCAL) ||
2715 (self == ASB_DISCARD_REMOTE && peer == ASB_DISCARD_LOCAL))
2716 return 0;
2717
2718 /* any other things with ASB_DISCARD_REMOTE or ASB_DISCARD_LOCAL are invalid */
2719 if (peer == ASB_DISCARD_REMOTE || peer == ASB_DISCARD_LOCAL ||
2720 self == ASB_DISCARD_REMOTE || self == ASB_DISCARD_LOCAL)
2721 return 1;
2722
2723 /* everything else is valid if they are equal on both sides. */
2724 if (peer == self)
2725 return 0;
2726
2727 /* everything es is invalid. */
2728 return 1;
2729}
2730
Andreas Gruenbacherd8763022011-01-26 17:39:41 +01002731static int receive_protocol(struct drbd_conf *mdev, enum drbd_packet cmd,
2732 unsigned int data_size)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002733{
Philipp Reisnere42325a2011-01-19 13:55:45 +01002734 struct p_protocol *p = &mdev->tconn->data.rbuf.protocol;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002735 int p_proto, p_after_sb_0p, p_after_sb_1p, p_after_sb_2p;
Philipp Reisnercf14c2e2010-02-02 21:03:50 +01002736 int p_want_lose, p_two_primaries, cf;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002737 char p_integrity_alg[SHARED_SECRET_MAX] = "";
2738
Philipp Reisnerb411b362009-09-25 16:07:19 -07002739 p_proto = be32_to_cpu(p->protocol);
2740 p_after_sb_0p = be32_to_cpu(p->after_sb_0p);
2741 p_after_sb_1p = be32_to_cpu(p->after_sb_1p);
2742 p_after_sb_2p = be32_to_cpu(p->after_sb_2p);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002743 p_two_primaries = be32_to_cpu(p->two_primaries);
Philipp Reisnercf14c2e2010-02-02 21:03:50 +01002744 cf = be32_to_cpu(p->conn_flags);
2745 p_want_lose = cf & CF_WANT_LOSE;
2746
2747 clear_bit(CONN_DRY_RUN, &mdev->flags);
2748
2749 if (cf & CF_DRY_RUN)
2750 set_bit(CONN_DRY_RUN, &mdev->flags);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002751
Philipp Reisner89e58e72011-01-19 13:12:45 +01002752 if (p_proto != mdev->tconn->net_conf->wire_protocol) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07002753 dev_err(DEV, "incompatible communication protocols\n");
2754 goto disconnect;
2755 }
2756
Philipp Reisner89e58e72011-01-19 13:12:45 +01002757 if (cmp_after_sb(p_after_sb_0p, mdev->tconn->net_conf->after_sb_0p)) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07002758 dev_err(DEV, "incompatible after-sb-0pri settings\n");
2759 goto disconnect;
2760 }
2761
Philipp Reisner89e58e72011-01-19 13:12:45 +01002762 if (cmp_after_sb(p_after_sb_1p, mdev->tconn->net_conf->after_sb_1p)) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07002763 dev_err(DEV, "incompatible after-sb-1pri settings\n");
2764 goto disconnect;
2765 }
2766
Philipp Reisner89e58e72011-01-19 13:12:45 +01002767 if (cmp_after_sb(p_after_sb_2p, mdev->tconn->net_conf->after_sb_2p)) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07002768 dev_err(DEV, "incompatible after-sb-2pri settings\n");
2769 goto disconnect;
2770 }
2771
Philipp Reisner89e58e72011-01-19 13:12:45 +01002772 if (p_want_lose && mdev->tconn->net_conf->want_lose) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07002773 dev_err(DEV, "both sides have the 'want_lose' flag set\n");
2774 goto disconnect;
2775 }
2776
Philipp Reisner89e58e72011-01-19 13:12:45 +01002777 if (p_two_primaries != mdev->tconn->net_conf->two_primaries) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07002778 dev_err(DEV, "incompatible setting of the two-primaries options\n");
2779 goto disconnect;
2780 }
2781
Philipp Reisner31890f42011-01-19 14:12:51 +01002782 if (mdev->tconn->agreed_pro_version >= 87) {
Philipp Reisner89e58e72011-01-19 13:12:45 +01002783 unsigned char *my_alg = mdev->tconn->net_conf->integrity_alg;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002784
2785 if (drbd_recv(mdev, p_integrity_alg, data_size) != data_size)
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01002786 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002787
2788 p_integrity_alg[SHARED_SECRET_MAX-1] = 0;
2789 if (strcmp(p_integrity_alg, my_alg)) {
2790 dev_err(DEV, "incompatible setting of the data-integrity-alg\n");
2791 goto disconnect;
2792 }
2793 dev_info(DEV, "data-integrity-alg: %s\n",
2794 my_alg[0] ? my_alg : (unsigned char *)"<not-used>");
2795 }
2796
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01002797 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002798
2799disconnect:
2800 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01002801 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002802}
2803
2804/* helper function
2805 * input: alg name, feature name
2806 * return: NULL (alg name was "")
2807 * ERR_PTR(error) if something goes wrong
2808 * or the crypto hash ptr, if it worked out ok. */
2809struct crypto_hash *drbd_crypto_alloc_digest_safe(const struct drbd_conf *mdev,
2810 const char *alg, const char *name)
2811{
2812 struct crypto_hash *tfm;
2813
2814 if (!alg[0])
2815 return NULL;
2816
2817 tfm = crypto_alloc_hash(alg, 0, CRYPTO_ALG_ASYNC);
2818 if (IS_ERR(tfm)) {
2819 dev_err(DEV, "Can not allocate \"%s\" as %s (reason: %ld)\n",
2820 alg, name, PTR_ERR(tfm));
2821 return tfm;
2822 }
2823 if (!drbd_crypto_is_hash(crypto_hash_tfm(tfm))) {
2824 crypto_free_hash(tfm);
2825 dev_err(DEV, "\"%s\" is not a digest (%s)\n", alg, name);
2826 return ERR_PTR(-EINVAL);
2827 }
2828 return tfm;
2829}
2830
Andreas Gruenbacherd8763022011-01-26 17:39:41 +01002831static int receive_SyncParam(struct drbd_conf *mdev, enum drbd_packet cmd,
2832 unsigned int packet_size)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002833{
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01002834 int ok = true;
Philipp Reisnere42325a2011-01-19 13:55:45 +01002835 struct p_rs_param_95 *p = &mdev->tconn->data.rbuf.rs_param_95;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002836 unsigned int header_size, data_size, exp_max_sz;
2837 struct crypto_hash *verify_tfm = NULL;
2838 struct crypto_hash *csums_tfm = NULL;
Philipp Reisner31890f42011-01-19 14:12:51 +01002839 const int apv = mdev->tconn->agreed_pro_version;
Philipp Reisner778f2712010-07-06 11:14:00 +02002840 int *rs_plan_s = NULL;
2841 int fifo_size = 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002842
2843 exp_max_sz = apv <= 87 ? sizeof(struct p_rs_param)
2844 : apv == 88 ? sizeof(struct p_rs_param)
2845 + SHARED_SECRET_MAX
Philipp Reisner8e26f9c2010-07-06 17:25:54 +02002846 : apv <= 94 ? sizeof(struct p_rs_param_89)
2847 : /* apv >= 95 */ sizeof(struct p_rs_param_95);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002848
Philipp Reisner02918be2010-08-20 14:35:10 +02002849 if (packet_size > exp_max_sz) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07002850 dev_err(DEV, "SyncParam packet too long: received %u, expected <= %u bytes\n",
Philipp Reisner02918be2010-08-20 14:35:10 +02002851 packet_size, exp_max_sz);
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01002852 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002853 }
2854
2855 if (apv <= 88) {
Philipp Reisner257d0af2011-01-26 12:15:29 +01002856 header_size = sizeof(struct p_rs_param) - sizeof(struct p_header);
Philipp Reisner02918be2010-08-20 14:35:10 +02002857 data_size = packet_size - header_size;
Philipp Reisner8e26f9c2010-07-06 17:25:54 +02002858 } else if (apv <= 94) {
Philipp Reisner257d0af2011-01-26 12:15:29 +01002859 header_size = sizeof(struct p_rs_param_89) - sizeof(struct p_header);
Philipp Reisner02918be2010-08-20 14:35:10 +02002860 data_size = packet_size - header_size;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002861 D_ASSERT(data_size == 0);
Philipp Reisner8e26f9c2010-07-06 17:25:54 +02002862 } else {
Philipp Reisner257d0af2011-01-26 12:15:29 +01002863 header_size = sizeof(struct p_rs_param_95) - sizeof(struct p_header);
Philipp Reisner02918be2010-08-20 14:35:10 +02002864 data_size = packet_size - header_size;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002865 D_ASSERT(data_size == 0);
2866 }
2867
2868 /* initialize verify_alg and csums_alg */
2869 memset(p->verify_alg, 0, 2 * SHARED_SECRET_MAX);
2870
Philipp Reisner02918be2010-08-20 14:35:10 +02002871 if (drbd_recv(mdev, &p->head.payload, header_size) != header_size)
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01002872 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002873
2874 mdev->sync_conf.rate = be32_to_cpu(p->rate);
2875
2876 if (apv >= 88) {
2877 if (apv == 88) {
2878 if (data_size > SHARED_SECRET_MAX) {
2879 dev_err(DEV, "verify-alg too long, "
2880 "peer wants %u, accepting only %u byte\n",
2881 data_size, SHARED_SECRET_MAX);
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01002882 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002883 }
2884
2885 if (drbd_recv(mdev, p->verify_alg, data_size) != data_size)
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01002886 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002887
2888 /* we expect NUL terminated string */
2889 /* but just in case someone tries to be evil */
2890 D_ASSERT(p->verify_alg[data_size-1] == 0);
2891 p->verify_alg[data_size-1] = 0;
2892
2893 } else /* apv >= 89 */ {
2894 /* we still expect NUL terminated strings */
2895 /* but just in case someone tries to be evil */
2896 D_ASSERT(p->verify_alg[SHARED_SECRET_MAX-1] == 0);
2897 D_ASSERT(p->csums_alg[SHARED_SECRET_MAX-1] == 0);
2898 p->verify_alg[SHARED_SECRET_MAX-1] = 0;
2899 p->csums_alg[SHARED_SECRET_MAX-1] = 0;
2900 }
2901
2902 if (strcmp(mdev->sync_conf.verify_alg, p->verify_alg)) {
2903 if (mdev->state.conn == C_WF_REPORT_PARAMS) {
2904 dev_err(DEV, "Different verify-alg settings. me=\"%s\" peer=\"%s\"\n",
2905 mdev->sync_conf.verify_alg, p->verify_alg);
2906 goto disconnect;
2907 }
2908 verify_tfm = drbd_crypto_alloc_digest_safe(mdev,
2909 p->verify_alg, "verify-alg");
2910 if (IS_ERR(verify_tfm)) {
2911 verify_tfm = NULL;
2912 goto disconnect;
2913 }
2914 }
2915
2916 if (apv >= 89 && strcmp(mdev->sync_conf.csums_alg, p->csums_alg)) {
2917 if (mdev->state.conn == C_WF_REPORT_PARAMS) {
2918 dev_err(DEV, "Different csums-alg settings. me=\"%s\" peer=\"%s\"\n",
2919 mdev->sync_conf.csums_alg, p->csums_alg);
2920 goto disconnect;
2921 }
2922 csums_tfm = drbd_crypto_alloc_digest_safe(mdev,
2923 p->csums_alg, "csums-alg");
2924 if (IS_ERR(csums_tfm)) {
2925 csums_tfm = NULL;
2926 goto disconnect;
2927 }
2928 }
2929
Philipp Reisner8e26f9c2010-07-06 17:25:54 +02002930 if (apv > 94) {
2931 mdev->sync_conf.rate = be32_to_cpu(p->rate);
2932 mdev->sync_conf.c_plan_ahead = be32_to_cpu(p->c_plan_ahead);
2933 mdev->sync_conf.c_delay_target = be32_to_cpu(p->c_delay_target);
2934 mdev->sync_conf.c_fill_target = be32_to_cpu(p->c_fill_target);
2935 mdev->sync_conf.c_max_rate = be32_to_cpu(p->c_max_rate);
Philipp Reisner778f2712010-07-06 11:14:00 +02002936
2937 fifo_size = (mdev->sync_conf.c_plan_ahead * 10 * SLEEP_TIME) / HZ;
2938 if (fifo_size != mdev->rs_plan_s.size && fifo_size > 0) {
2939 rs_plan_s = kzalloc(sizeof(int) * fifo_size, GFP_KERNEL);
2940 if (!rs_plan_s) {
2941 dev_err(DEV, "kmalloc of fifo_buffer failed");
2942 goto disconnect;
2943 }
2944 }
Philipp Reisner8e26f9c2010-07-06 17:25:54 +02002945 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07002946
2947 spin_lock(&mdev->peer_seq_lock);
2948 /* lock against drbd_nl_syncer_conf() */
2949 if (verify_tfm) {
2950 strcpy(mdev->sync_conf.verify_alg, p->verify_alg);
2951 mdev->sync_conf.verify_alg_len = strlen(p->verify_alg) + 1;
2952 crypto_free_hash(mdev->verify_tfm);
2953 mdev->verify_tfm = verify_tfm;
2954 dev_info(DEV, "using verify-alg: \"%s\"\n", p->verify_alg);
2955 }
2956 if (csums_tfm) {
2957 strcpy(mdev->sync_conf.csums_alg, p->csums_alg);
2958 mdev->sync_conf.csums_alg_len = strlen(p->csums_alg) + 1;
2959 crypto_free_hash(mdev->csums_tfm);
2960 mdev->csums_tfm = csums_tfm;
2961 dev_info(DEV, "using csums-alg: \"%s\"\n", p->csums_alg);
2962 }
Philipp Reisner778f2712010-07-06 11:14:00 +02002963 if (fifo_size != mdev->rs_plan_s.size) {
2964 kfree(mdev->rs_plan_s.values);
2965 mdev->rs_plan_s.values = rs_plan_s;
2966 mdev->rs_plan_s.size = fifo_size;
2967 mdev->rs_planed = 0;
2968 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07002969 spin_unlock(&mdev->peer_seq_lock);
2970 }
2971
2972 return ok;
2973disconnect:
2974 /* just for completeness: actually not needed,
2975 * as this is not reached if csums_tfm was ok. */
2976 crypto_free_hash(csums_tfm);
2977 /* but free the verify_tfm again, if csums_tfm did not work out */
2978 crypto_free_hash(verify_tfm);
2979 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01002980 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002981}
2982
Philipp Reisnerb411b362009-09-25 16:07:19 -07002983/* warn if the arguments differ by more than 12.5% */
2984static void warn_if_differ_considerably(struct drbd_conf *mdev,
2985 const char *s, sector_t a, sector_t b)
2986{
2987 sector_t d;
2988 if (a == 0 || b == 0)
2989 return;
2990 d = (a > b) ? (a - b) : (b - a);
2991 if (d > (a>>3) || d > (b>>3))
2992 dev_warn(DEV, "Considerable difference in %s: %llus vs. %llus\n", s,
2993 (unsigned long long)a, (unsigned long long)b);
2994}
2995
Andreas Gruenbacherd8763022011-01-26 17:39:41 +01002996static int receive_sizes(struct drbd_conf *mdev, enum drbd_packet cmd,
2997 unsigned int data_size)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002998{
Philipp Reisnere42325a2011-01-19 13:55:45 +01002999 struct p_sizes *p = &mdev->tconn->data.rbuf.sizes;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003000 enum determine_dev_size dd = unchanged;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003001 sector_t p_size, p_usize, my_usize;
3002 int ldsc = 0; /* local disk size changed */
Philipp Reisnere89b5912010-03-24 17:11:33 +01003003 enum dds_flags ddsf;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003004
Philipp Reisnerb411b362009-09-25 16:07:19 -07003005 p_size = be64_to_cpu(p->d_size);
3006 p_usize = be64_to_cpu(p->u_size);
3007
3008 if (p_size == 0 && mdev->state.disk == D_DISKLESS) {
3009 dev_err(DEV, "some backing storage is needed\n");
3010 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01003011 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003012 }
3013
3014 /* just store the peer's disk size for now.
3015 * we still need to figure out whether we accept that. */
3016 mdev->p_size = p_size;
3017
Philipp Reisnerb411b362009-09-25 16:07:19 -07003018 if (get_ldev(mdev)) {
3019 warn_if_differ_considerably(mdev, "lower level device sizes",
3020 p_size, drbd_get_max_capacity(mdev->ldev));
3021 warn_if_differ_considerably(mdev, "user requested size",
3022 p_usize, mdev->ldev->dc.disk_size);
3023
3024 /* if this is the first connect, or an otherwise expected
3025 * param exchange, choose the minimum */
3026 if (mdev->state.conn == C_WF_REPORT_PARAMS)
3027 p_usize = min_not_zero((sector_t)mdev->ldev->dc.disk_size,
3028 p_usize);
3029
3030 my_usize = mdev->ldev->dc.disk_size;
3031
3032 if (mdev->ldev->dc.disk_size != p_usize) {
3033 mdev->ldev->dc.disk_size = p_usize;
3034 dev_info(DEV, "Peer sets u_size to %lu sectors\n",
3035 (unsigned long)mdev->ldev->dc.disk_size);
3036 }
3037
3038 /* Never shrink a device with usable data during connect.
3039 But allow online shrinking if we are connected. */
Philipp Reisnera393db62009-12-22 13:35:52 +01003040 if (drbd_new_dev_size(mdev, mdev->ldev, 0) <
Philipp Reisnerb411b362009-09-25 16:07:19 -07003041 drbd_get_capacity(mdev->this_bdev) &&
3042 mdev->state.disk >= D_OUTDATED &&
3043 mdev->state.conn < C_CONNECTED) {
3044 dev_err(DEV, "The peer's disk size is too small!\n");
3045 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
3046 mdev->ldev->dc.disk_size = my_usize;
3047 put_ldev(mdev);
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01003048 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003049 }
3050 put_ldev(mdev);
3051 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07003052
Philipp Reisnere89b5912010-03-24 17:11:33 +01003053 ddsf = be16_to_cpu(p->dds_flags);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003054 if (get_ldev(mdev)) {
Bart Van Assche24c48302011-05-21 18:32:29 +02003055 dd = drbd_determine_dev_size(mdev, ddsf);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003056 put_ldev(mdev);
3057 if (dd == dev_size_error)
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01003058 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003059 drbd_md_sync(mdev);
3060 } else {
3061 /* I am diskless, need to accept the peer's size. */
3062 drbd_set_my_capacity(mdev, p_size);
3063 }
3064
Philipp Reisner99432fc2011-05-20 16:39:13 +02003065 mdev->peer_max_bio_size = be32_to_cpu(p->max_bio_size);
3066 drbd_reconsider_max_bio_size(mdev);
3067
Philipp Reisnerb411b362009-09-25 16:07:19 -07003068 if (get_ldev(mdev)) {
3069 if (mdev->ldev->known_size != drbd_get_capacity(mdev->ldev->backing_bdev)) {
3070 mdev->ldev->known_size = drbd_get_capacity(mdev->ldev->backing_bdev);
3071 ldsc = 1;
3072 }
3073
Philipp Reisnerb411b362009-09-25 16:07:19 -07003074 put_ldev(mdev);
3075 }
3076
3077 if (mdev->state.conn > C_WF_REPORT_PARAMS) {
3078 if (be64_to_cpu(p->c_size) !=
3079 drbd_get_capacity(mdev->this_bdev) || ldsc) {
3080 /* we have different sizes, probably peer
3081 * needs to know my new size... */
Philipp Reisnere89b5912010-03-24 17:11:33 +01003082 drbd_send_sizes(mdev, 0, ddsf);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003083 }
3084 if (test_and_clear_bit(RESIZE_PENDING, &mdev->flags) ||
3085 (dd == grew && mdev->state.conn == C_CONNECTED)) {
3086 if (mdev->state.pdsk >= D_INCONSISTENT &&
Philipp Reisnere89b5912010-03-24 17:11:33 +01003087 mdev->state.disk >= D_INCONSISTENT) {
3088 if (ddsf & DDSF_NO_RESYNC)
3089 dev_info(DEV, "Resync of new storage suppressed with --assume-clean\n");
3090 else
3091 resync_after_online_grow(mdev);
3092 } else
Philipp Reisnerb411b362009-09-25 16:07:19 -07003093 set_bit(RESYNC_AFTER_NEG, &mdev->flags);
3094 }
3095 }
3096
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01003097 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003098}
3099
Andreas Gruenbacherd8763022011-01-26 17:39:41 +01003100static int receive_uuids(struct drbd_conf *mdev, enum drbd_packet cmd,
3101 unsigned int data_size)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003102{
Philipp Reisnere42325a2011-01-19 13:55:45 +01003103 struct p_uuids *p = &mdev->tconn->data.rbuf.uuids;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003104 u64 *p_uuid;
Lars Ellenberg62b0da32011-01-20 13:25:21 +01003105 int i, updated_uuids = 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003106
Philipp Reisnerb411b362009-09-25 16:07:19 -07003107 p_uuid = kmalloc(sizeof(u64)*UI_EXTENDED_SIZE, GFP_NOIO);
3108
3109 for (i = UI_CURRENT; i < UI_EXTENDED_SIZE; i++)
3110 p_uuid[i] = be64_to_cpu(p->uuid[i]);
3111
3112 kfree(mdev->p_uuid);
3113 mdev->p_uuid = p_uuid;
3114
3115 if (mdev->state.conn < C_CONNECTED &&
3116 mdev->state.disk < D_INCONSISTENT &&
3117 mdev->state.role == R_PRIMARY &&
3118 (mdev->ed_uuid & ~((u64)1)) != (p_uuid[UI_CURRENT] & ~((u64)1))) {
3119 dev_err(DEV, "Can only connect to data with current UUID=%016llX\n",
3120 (unsigned long long)mdev->ed_uuid);
3121 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01003122 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003123 }
3124
3125 if (get_ldev(mdev)) {
3126 int skip_initial_sync =
3127 mdev->state.conn == C_CONNECTED &&
Philipp Reisner31890f42011-01-19 14:12:51 +01003128 mdev->tconn->agreed_pro_version >= 90 &&
Philipp Reisnerb411b362009-09-25 16:07:19 -07003129 mdev->ldev->md.uuid[UI_CURRENT] == UUID_JUST_CREATED &&
3130 (p_uuid[UI_FLAGS] & 8);
3131 if (skip_initial_sync) {
3132 dev_info(DEV, "Accepted new current UUID, preparing to skip initial sync\n");
3133 drbd_bitmap_io(mdev, &drbd_bmio_clear_n_write,
Lars Ellenberg20ceb2b2011-01-21 10:56:44 +01003134 "clear_n_write from receive_uuids",
3135 BM_LOCKED_TEST_ALLOWED);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003136 _drbd_uuid_set(mdev, UI_CURRENT, p_uuid[UI_CURRENT]);
3137 _drbd_uuid_set(mdev, UI_BITMAP, 0);
3138 _drbd_set_state(_NS2(mdev, disk, D_UP_TO_DATE, pdsk, D_UP_TO_DATE),
3139 CS_VERBOSE, NULL);
3140 drbd_md_sync(mdev);
Lars Ellenberg62b0da32011-01-20 13:25:21 +01003141 updated_uuids = 1;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003142 }
3143 put_ldev(mdev);
Philipp Reisner18a50fa2010-06-21 14:14:15 +02003144 } else if (mdev->state.disk < D_INCONSISTENT &&
3145 mdev->state.role == R_PRIMARY) {
3146 /* I am a diskless primary, the peer just created a new current UUID
3147 for me. */
Lars Ellenberg62b0da32011-01-20 13:25:21 +01003148 updated_uuids = drbd_set_ed_uuid(mdev, p_uuid[UI_CURRENT]);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003149 }
3150
3151 /* Before we test for the disk state, we should wait until an eventually
3152 ongoing cluster wide state change is finished. That is important if
3153 we are primary and are detaching from our disk. We need to see the
3154 new disk state... */
3155 wait_event(mdev->misc_wait, !test_bit(CLUSTER_ST_CHANGE, &mdev->flags));
3156 if (mdev->state.conn >= C_CONNECTED && mdev->state.disk < D_INCONSISTENT)
Lars Ellenberg62b0da32011-01-20 13:25:21 +01003157 updated_uuids |= drbd_set_ed_uuid(mdev, p_uuid[UI_CURRENT]);
3158
3159 if (updated_uuids)
3160 drbd_print_uuids(mdev, "receiver updated UUIDs to");
Philipp Reisnerb411b362009-09-25 16:07:19 -07003161
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01003162 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003163}
3164
3165/**
3166 * convert_state() - Converts the peer's view of the cluster state to our point of view
3167 * @ps: The state as seen by the peer.
3168 */
3169static union drbd_state convert_state(union drbd_state ps)
3170{
3171 union drbd_state ms;
3172
3173 static enum drbd_conns c_tab[] = {
3174 [C_CONNECTED] = C_CONNECTED,
3175
3176 [C_STARTING_SYNC_S] = C_STARTING_SYNC_T,
3177 [C_STARTING_SYNC_T] = C_STARTING_SYNC_S,
3178 [C_DISCONNECTING] = C_TEAR_DOWN, /* C_NETWORK_FAILURE, */
3179 [C_VERIFY_S] = C_VERIFY_T,
3180 [C_MASK] = C_MASK,
3181 };
3182
3183 ms.i = ps.i;
3184
3185 ms.conn = c_tab[ps.conn];
3186 ms.peer = ps.role;
3187 ms.role = ps.peer;
3188 ms.pdsk = ps.disk;
3189 ms.disk = ps.pdsk;
3190 ms.peer_isp = (ps.aftr_isp | ps.user_isp);
3191
3192 return ms;
3193}
3194
Andreas Gruenbacherd8763022011-01-26 17:39:41 +01003195static int receive_req_state(struct drbd_conf *mdev, enum drbd_packet cmd,
3196 unsigned int data_size)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003197{
Philipp Reisnere42325a2011-01-19 13:55:45 +01003198 struct p_req_state *p = &mdev->tconn->data.rbuf.req_state;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003199 union drbd_state mask, val;
Andreas Gruenbacherbf885f82010-12-08 00:39:32 +01003200 enum drbd_state_rv rv;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003201
Philipp Reisnerb411b362009-09-25 16:07:19 -07003202 mask.i = be32_to_cpu(p->mask);
3203 val.i = be32_to_cpu(p->val);
3204
3205 if (test_bit(DISCARD_CONCURRENT, &mdev->flags) &&
3206 test_bit(CLUSTER_ST_CHANGE, &mdev->flags)) {
3207 drbd_send_sr_reply(mdev, SS_CONCURRENT_ST_CHG);
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01003208 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003209 }
3210
3211 mask = convert_state(mask);
3212 val = convert_state(val);
3213
3214 rv = drbd_change_state(mdev, CS_VERBOSE, mask, val);
3215
3216 drbd_send_sr_reply(mdev, rv);
3217 drbd_md_sync(mdev);
3218
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01003219 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003220}
3221
Andreas Gruenbacherd8763022011-01-26 17:39:41 +01003222static int receive_state(struct drbd_conf *mdev, enum drbd_packet cmd,
3223 unsigned int data_size)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003224{
Philipp Reisnere42325a2011-01-19 13:55:45 +01003225 struct p_state *p = &mdev->tconn->data.rbuf.state;
Lars Ellenberg4ac4aad2010-07-22 17:39:26 +02003226 union drbd_state os, ns, peer_state;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003227 enum drbd_disk_state real_peer_disk;
Philipp Reisner65d922c2010-06-16 16:18:09 +02003228 enum chg_state_flags cs_flags;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003229 int rv;
3230
Philipp Reisnerb411b362009-09-25 16:07:19 -07003231 peer_state.i = be32_to_cpu(p->state);
3232
3233 real_peer_disk = peer_state.disk;
3234 if (peer_state.disk == D_NEGOTIATING) {
3235 real_peer_disk = mdev->p_uuid[UI_FLAGS] & 4 ? D_INCONSISTENT : D_CONSISTENT;
3236 dev_info(DEV, "real peer disk state = %s\n", drbd_disk_str(real_peer_disk));
3237 }
3238
Philipp Reisner87eeee42011-01-19 14:16:30 +01003239 spin_lock_irq(&mdev->tconn->req_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003240 retry:
Lars Ellenberg4ac4aad2010-07-22 17:39:26 +02003241 os = ns = mdev->state;
Philipp Reisner87eeee42011-01-19 14:16:30 +01003242 spin_unlock_irq(&mdev->tconn->req_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003243
Lars Ellenberge9ef7bb2010-10-07 15:55:39 +02003244 /* peer says his disk is uptodate, while we think it is inconsistent,
3245 * and this happens while we think we have a sync going on. */
3246 if (os.pdsk == D_INCONSISTENT && real_peer_disk == D_UP_TO_DATE &&
3247 os.conn > C_CONNECTED && os.disk == D_UP_TO_DATE) {
3248 /* If we are (becoming) SyncSource, but peer is still in sync
3249 * preparation, ignore its uptodate-ness to avoid flapping, it
3250 * will change to inconsistent once the peer reaches active
3251 * syncing states.
3252 * It may have changed syncer-paused flags, however, so we
3253 * cannot ignore this completely. */
3254 if (peer_state.conn > C_CONNECTED &&
3255 peer_state.conn < C_SYNC_SOURCE)
3256 real_peer_disk = D_INCONSISTENT;
3257
3258 /* if peer_state changes to connected at the same time,
3259 * it explicitly notifies us that it finished resync.
3260 * Maybe we should finish it up, too? */
3261 else if (os.conn >= C_SYNC_SOURCE &&
3262 peer_state.conn == C_CONNECTED) {
3263 if (drbd_bm_total_weight(mdev) <= mdev->rs_failed)
3264 drbd_resync_finished(mdev);
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01003265 return true;
Lars Ellenberge9ef7bb2010-10-07 15:55:39 +02003266 }
3267 }
3268
3269 /* peer says his disk is inconsistent, while we think it is uptodate,
3270 * and this happens while the peer still thinks we have a sync going on,
3271 * but we think we are already done with the sync.
3272 * We ignore this to avoid flapping pdsk.
3273 * This should not happen, if the peer is a recent version of drbd. */
3274 if (os.pdsk == D_UP_TO_DATE && real_peer_disk == D_INCONSISTENT &&
3275 os.conn == C_CONNECTED && peer_state.conn > C_SYNC_SOURCE)
3276 real_peer_disk = D_UP_TO_DATE;
3277
Lars Ellenberg4ac4aad2010-07-22 17:39:26 +02003278 if (ns.conn == C_WF_REPORT_PARAMS)
3279 ns.conn = C_CONNECTED;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003280
Philipp Reisner67531712010-10-27 12:21:30 +02003281 if (peer_state.conn == C_AHEAD)
3282 ns.conn = C_BEHIND;
3283
Philipp Reisnerb411b362009-09-25 16:07:19 -07003284 if (mdev->p_uuid && peer_state.disk >= D_NEGOTIATING &&
3285 get_ldev_if_state(mdev, D_NEGOTIATING)) {
3286 int cr; /* consider resync */
3287
3288 /* if we established a new connection */
Lars Ellenberg4ac4aad2010-07-22 17:39:26 +02003289 cr = (os.conn < C_CONNECTED);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003290 /* if we had an established connection
3291 * and one of the nodes newly attaches a disk */
Lars Ellenberg4ac4aad2010-07-22 17:39:26 +02003292 cr |= (os.conn == C_CONNECTED &&
Philipp Reisnerb411b362009-09-25 16:07:19 -07003293 (peer_state.disk == D_NEGOTIATING ||
Lars Ellenberg4ac4aad2010-07-22 17:39:26 +02003294 os.disk == D_NEGOTIATING));
Philipp Reisnerb411b362009-09-25 16:07:19 -07003295 /* if we have both been inconsistent, and the peer has been
3296 * forced to be UpToDate with --overwrite-data */
3297 cr |= test_bit(CONSIDER_RESYNC, &mdev->flags);
3298 /* if we had been plain connected, and the admin requested to
3299 * start a sync by "invalidate" or "invalidate-remote" */
Lars Ellenberg4ac4aad2010-07-22 17:39:26 +02003300 cr |= (os.conn == C_CONNECTED &&
Philipp Reisnerb411b362009-09-25 16:07:19 -07003301 (peer_state.conn >= C_STARTING_SYNC_S &&
3302 peer_state.conn <= C_WF_BITMAP_T));
3303
3304 if (cr)
Lars Ellenberg4ac4aad2010-07-22 17:39:26 +02003305 ns.conn = drbd_sync_handshake(mdev, peer_state.role, real_peer_disk);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003306
3307 put_ldev(mdev);
Lars Ellenberg4ac4aad2010-07-22 17:39:26 +02003308 if (ns.conn == C_MASK) {
3309 ns.conn = C_CONNECTED;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003310 if (mdev->state.disk == D_NEGOTIATING) {
Lars Ellenberg82f59cc2010-10-16 12:13:47 +02003311 drbd_force_state(mdev, NS(disk, D_FAILED));
Philipp Reisnerb411b362009-09-25 16:07:19 -07003312 } else if (peer_state.disk == D_NEGOTIATING) {
3313 dev_err(DEV, "Disk attach process on the peer node was aborted.\n");
3314 peer_state.disk = D_DISKLESS;
Lars Ellenberg580b9762010-02-26 23:15:23 +01003315 real_peer_disk = D_DISKLESS;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003316 } else {
Philipp Reisnercf14c2e2010-02-02 21:03:50 +01003317 if (test_and_clear_bit(CONN_DRY_RUN, &mdev->flags))
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01003318 return false;
Lars Ellenberg4ac4aad2010-07-22 17:39:26 +02003319 D_ASSERT(os.conn == C_WF_REPORT_PARAMS);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003320 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01003321 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003322 }
3323 }
3324 }
3325
Philipp Reisner87eeee42011-01-19 14:16:30 +01003326 spin_lock_irq(&mdev->tconn->req_lock);
Lars Ellenberg4ac4aad2010-07-22 17:39:26 +02003327 if (mdev->state.i != os.i)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003328 goto retry;
3329 clear_bit(CONSIDER_RESYNC, &mdev->flags);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003330 ns.peer = peer_state.role;
3331 ns.pdsk = real_peer_disk;
3332 ns.peer_isp = (peer_state.aftr_isp | peer_state.user_isp);
Lars Ellenberg4ac4aad2010-07-22 17:39:26 +02003333 if ((ns.conn == C_CONNECTED || ns.conn == C_WF_BITMAP_S) && ns.disk == D_NEGOTIATING)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003334 ns.disk = mdev->new_state_tmp.disk;
Lars Ellenberg4ac4aad2010-07-22 17:39:26 +02003335 cs_flags = CS_VERBOSE + (os.conn < C_CONNECTED && ns.conn >= C_CONNECTED ? 0 : CS_HARD);
3336 if (ns.pdsk == D_CONSISTENT && is_susp(ns) && ns.conn == C_CONNECTED && os.conn < C_CONNECTED &&
Philipp Reisner481c6f52010-06-22 14:03:27 +02003337 test_bit(NEW_CUR_UUID, &mdev->flags)) {
Andreas Gruenbacher8554df12011-01-25 15:37:43 +01003338 /* Do not allow tl_restart(RESEND) for a rebooted peer. We can only allow this
Philipp Reisner481c6f52010-06-22 14:03:27 +02003339 for temporal network outages! */
Philipp Reisner87eeee42011-01-19 14:16:30 +01003340 spin_unlock_irq(&mdev->tconn->req_lock);
Philipp Reisner481c6f52010-06-22 14:03:27 +02003341 dev_err(DEV, "Aborting Connect, can not thaw IO with an only Consistent peer\n");
3342 tl_clear(mdev);
3343 drbd_uuid_new_current(mdev);
3344 clear_bit(NEW_CUR_UUID, &mdev->flags);
3345 drbd_force_state(mdev, NS2(conn, C_PROTOCOL_ERROR, susp, 0));
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01003346 return false;
Philipp Reisner481c6f52010-06-22 14:03:27 +02003347 }
Philipp Reisner65d922c2010-06-16 16:18:09 +02003348 rv = _drbd_set_state(mdev, ns, cs_flags, NULL);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003349 ns = mdev->state;
Philipp Reisner87eeee42011-01-19 14:16:30 +01003350 spin_unlock_irq(&mdev->tconn->req_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003351
3352 if (rv < SS_SUCCESS) {
3353 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01003354 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003355 }
3356
Lars Ellenberg4ac4aad2010-07-22 17:39:26 +02003357 if (os.conn > C_WF_REPORT_PARAMS) {
3358 if (ns.conn > C_CONNECTED && peer_state.conn <= C_CONNECTED &&
Philipp Reisnerb411b362009-09-25 16:07:19 -07003359 peer_state.disk != D_NEGOTIATING ) {
3360 /* we want resync, peer has not yet decided to sync... */
3361 /* Nowadays only used when forcing a node into primary role and
3362 setting its disk to UpToDate with that */
3363 drbd_send_uuids(mdev);
3364 drbd_send_state(mdev);
3365 }
3366 }
3367
Philipp Reisner89e58e72011-01-19 13:12:45 +01003368 mdev->tconn->net_conf->want_lose = 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003369
3370 drbd_md_sync(mdev); /* update connected indicator, la_size, ... */
3371
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01003372 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003373}
3374
Andreas Gruenbacherd8763022011-01-26 17:39:41 +01003375static int receive_sync_uuid(struct drbd_conf *mdev, enum drbd_packet cmd,
3376 unsigned int data_size)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003377{
Philipp Reisnere42325a2011-01-19 13:55:45 +01003378 struct p_rs_uuid *p = &mdev->tconn->data.rbuf.rs_uuid;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003379
3380 wait_event(mdev->misc_wait,
3381 mdev->state.conn == C_WF_SYNC_UUID ||
Philipp Reisnerc4752ef2010-10-27 17:32:36 +02003382 mdev->state.conn == C_BEHIND ||
Philipp Reisnerb411b362009-09-25 16:07:19 -07003383 mdev->state.conn < C_CONNECTED ||
3384 mdev->state.disk < D_NEGOTIATING);
3385
3386 /* D_ASSERT( mdev->state.conn == C_WF_SYNC_UUID ); */
3387
Philipp Reisnerb411b362009-09-25 16:07:19 -07003388 /* Here the _drbd_uuid_ functions are right, current should
3389 _not_ be rotated into the history */
3390 if (get_ldev_if_state(mdev, D_NEGOTIATING)) {
3391 _drbd_uuid_set(mdev, UI_CURRENT, be64_to_cpu(p->uuid));
3392 _drbd_uuid_set(mdev, UI_BITMAP, 0UL);
3393
Lars Ellenberg62b0da32011-01-20 13:25:21 +01003394 drbd_print_uuids(mdev, "updated sync uuid");
Philipp Reisnerb411b362009-09-25 16:07:19 -07003395 drbd_start_resync(mdev, C_SYNC_TARGET);
3396
3397 put_ldev(mdev);
3398 } else
3399 dev_err(DEV, "Ignoring SyncUUID packet!\n");
3400
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01003401 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003402}
3403
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01003404/**
3405 * receive_bitmap_plain
3406 *
3407 * Return 0 when done, 1 when another iteration is needed, and a negative error
3408 * code upon failure.
3409 */
3410static int
Philipp Reisner02918be2010-08-20 14:35:10 +02003411receive_bitmap_plain(struct drbd_conf *mdev, unsigned int data_size,
3412 unsigned long *buffer, struct bm_xfer_ctx *c)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003413{
3414 unsigned num_words = min_t(size_t, BM_PACKET_WORDS, c->bm_words - c->word_offset);
3415 unsigned want = num_words * sizeof(long);
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01003416 int err;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003417
Philipp Reisner02918be2010-08-20 14:35:10 +02003418 if (want != data_size) {
3419 dev_err(DEV, "%s:want (%u) != data_size (%u)\n", __func__, want, data_size);
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01003420 return -EIO;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003421 }
3422 if (want == 0)
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01003423 return 0;
3424 err = drbd_recv(mdev, buffer, want);
3425 if (err != want) {
3426 if (err >= 0)
3427 err = -EIO;
3428 return err;
3429 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07003430
3431 drbd_bm_merge_lel(mdev, c->word_offset, num_words, buffer);
3432
3433 c->word_offset += num_words;
3434 c->bit_offset = c->word_offset * BITS_PER_LONG;
3435 if (c->bit_offset > c->bm_bits)
3436 c->bit_offset = c->bm_bits;
3437
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01003438 return 1;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003439}
3440
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01003441/**
3442 * recv_bm_rle_bits
3443 *
3444 * Return 0 when done, 1 when another iteration is needed, and a negative error
3445 * code upon failure.
3446 */
3447static int
Philipp Reisnerb411b362009-09-25 16:07:19 -07003448recv_bm_rle_bits(struct drbd_conf *mdev,
3449 struct p_compressed_bm *p,
Philipp Reisnerc6d25cf2011-01-19 16:13:06 +01003450 struct bm_xfer_ctx *c,
3451 unsigned int len)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003452{
3453 struct bitstream bs;
3454 u64 look_ahead;
3455 u64 rl;
3456 u64 tmp;
3457 unsigned long s = c->bit_offset;
3458 unsigned long e;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003459 int toggle = DCBP_get_start(p);
3460 int have;
3461 int bits;
3462
3463 bitstream_init(&bs, p->code, len, DCBP_get_pad_bits(p));
3464
3465 bits = bitstream_get_bits(&bs, &look_ahead, 64);
3466 if (bits < 0)
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01003467 return -EIO;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003468
3469 for (have = bits; have > 0; s += rl, toggle = !toggle) {
3470 bits = vli_decode_bits(&rl, look_ahead);
3471 if (bits <= 0)
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01003472 return -EIO;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003473
3474 if (toggle) {
3475 e = s + rl -1;
3476 if (e >= c->bm_bits) {
3477 dev_err(DEV, "bitmap overflow (e:%lu) while decoding bm RLE packet\n", e);
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01003478 return -EIO;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003479 }
3480 _drbd_bm_set_bits(mdev, s, e);
3481 }
3482
3483 if (have < bits) {
3484 dev_err(DEV, "bitmap decoding error: h:%d b:%d la:0x%08llx l:%u/%u\n",
3485 have, bits, look_ahead,
3486 (unsigned int)(bs.cur.b - p->code),
3487 (unsigned int)bs.buf_len);
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01003488 return -EIO;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003489 }
3490 look_ahead >>= bits;
3491 have -= bits;
3492
3493 bits = bitstream_get_bits(&bs, &tmp, 64 - have);
3494 if (bits < 0)
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01003495 return -EIO;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003496 look_ahead |= tmp << have;
3497 have += bits;
3498 }
3499
3500 c->bit_offset = s;
3501 bm_xfer_ctx_bit_to_word_offset(c);
3502
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01003503 return (s != c->bm_bits);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003504}
3505
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01003506/**
3507 * decode_bitmap_c
3508 *
3509 * Return 0 when done, 1 when another iteration is needed, and a negative error
3510 * code upon failure.
3511 */
3512static int
Philipp Reisnerb411b362009-09-25 16:07:19 -07003513decode_bitmap_c(struct drbd_conf *mdev,
3514 struct p_compressed_bm *p,
Philipp Reisnerc6d25cf2011-01-19 16:13:06 +01003515 struct bm_xfer_ctx *c,
3516 unsigned int len)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003517{
3518 if (DCBP_get_code(p) == RLE_VLI_Bits)
Philipp Reisnerc6d25cf2011-01-19 16:13:06 +01003519 return recv_bm_rle_bits(mdev, p, c, len);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003520
3521 /* other variants had been implemented for evaluation,
3522 * but have been dropped as this one turned out to be "best"
3523 * during all our tests. */
3524
3525 dev_err(DEV, "receive_bitmap_c: unknown encoding %u\n", p->encoding);
3526 drbd_force_state(mdev, NS(conn, C_PROTOCOL_ERROR));
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01003527 return -EIO;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003528}
3529
3530void INFO_bm_xfer_stats(struct drbd_conf *mdev,
3531 const char *direction, struct bm_xfer_ctx *c)
3532{
3533 /* what would it take to transfer it "plaintext" */
Philipp Reisnerc0129492011-01-19 16:58:16 +01003534 unsigned plain = sizeof(struct p_header) *
Philipp Reisnerb411b362009-09-25 16:07:19 -07003535 ((c->bm_words+BM_PACKET_WORDS-1)/BM_PACKET_WORDS+1)
3536 + c->bm_words * sizeof(long);
3537 unsigned total = c->bytes[0] + c->bytes[1];
3538 unsigned r;
3539
3540 /* total can not be zero. but just in case: */
3541 if (total == 0)
3542 return;
3543
3544 /* don't report if not compressed */
3545 if (total >= plain)
3546 return;
3547
3548 /* total < plain. check for overflow, still */
3549 r = (total > UINT_MAX/1000) ? (total / (plain/1000))
3550 : (1000 * total / plain);
3551
3552 if (r > 1000)
3553 r = 1000;
3554
3555 r = 1000 - r;
3556 dev_info(DEV, "%s bitmap stats [Bytes(packets)]: plain %u(%u), RLE %u(%u), "
3557 "total %u; compression: %u.%u%%\n",
3558 direction,
3559 c->bytes[1], c->packets[1],
3560 c->bytes[0], c->packets[0],
3561 total, r/10, r % 10);
3562}
3563
3564/* Since we are processing the bitfield from lower addresses to higher,
3565 it does not matter if the process it in 32 bit chunks or 64 bit
3566 chunks as long as it is little endian. (Understand it as byte stream,
3567 beginning with the lowest byte...) If we would use big endian
3568 we would need to process it from the highest address to the lowest,
3569 in order to be agnostic to the 32 vs 64 bits issue.
3570
3571 returns 0 on failure, 1 if we successfully received it. */
Andreas Gruenbacherd8763022011-01-26 17:39:41 +01003572static int receive_bitmap(struct drbd_conf *mdev, enum drbd_packet cmd,
3573 unsigned int data_size)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003574{
3575 struct bm_xfer_ctx c;
3576 void *buffer;
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01003577 int err;
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01003578 int ok = false;
Philipp Reisner257d0af2011-01-26 12:15:29 +01003579 struct p_header *h = &mdev->tconn->data.rbuf.header;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003580
Lars Ellenberg20ceb2b2011-01-21 10:56:44 +01003581 drbd_bm_lock(mdev, "receive bitmap", BM_LOCKED_SET_ALLOWED);
3582 /* you are supposed to send additional out-of-sync information
3583 * if you actually set bits during this phase */
Philipp Reisnerb411b362009-09-25 16:07:19 -07003584
3585 /* maybe we should use some per thread scratch page,
3586 * and allocate that during initial device creation? */
3587 buffer = (unsigned long *) __get_free_page(GFP_NOIO);
3588 if (!buffer) {
3589 dev_err(DEV, "failed to allocate one page buffer in %s\n", __func__);
3590 goto out;
3591 }
3592
3593 c = (struct bm_xfer_ctx) {
3594 .bm_bits = drbd_bm_bits(mdev),
3595 .bm_words = drbd_bm_words(mdev),
3596 };
3597
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01003598 for(;;) {
Philipp Reisner02918be2010-08-20 14:35:10 +02003599 if (cmd == P_BITMAP) {
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01003600 err = receive_bitmap_plain(mdev, data_size, buffer, &c);
Philipp Reisner02918be2010-08-20 14:35:10 +02003601 } else if (cmd == P_COMPRESSED_BITMAP) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07003602 /* MAYBE: sanity check that we speak proto >= 90,
3603 * and the feature is enabled! */
3604 struct p_compressed_bm *p;
3605
Philipp Reisner02918be2010-08-20 14:35:10 +02003606 if (data_size > BM_PACKET_PAYLOAD_BYTES) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07003607 dev_err(DEV, "ReportCBitmap packet too large\n");
3608 goto out;
3609 }
3610 /* use the page buff */
3611 p = buffer;
3612 memcpy(p, h, sizeof(*h));
Philipp Reisner02918be2010-08-20 14:35:10 +02003613 if (drbd_recv(mdev, p->head.payload, data_size) != data_size)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003614 goto out;
Lars Ellenberg004352f2010-10-05 20:13:58 +02003615 if (data_size <= (sizeof(*p) - sizeof(p->head))) {
3616 dev_err(DEV, "ReportCBitmap packet too small (l:%u)\n", data_size);
Andreas Gruenbacher78fcbda2010-12-10 22:18:27 +01003617 goto out;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003618 }
Philipp Reisnerc6d25cf2011-01-19 16:13:06 +01003619 err = decode_bitmap_c(mdev, p, &c, data_size);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003620 } else {
Philipp Reisner02918be2010-08-20 14:35:10 +02003621 dev_warn(DEV, "receive_bitmap: cmd neither ReportBitMap nor ReportCBitMap (is 0x%x)", cmd);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003622 goto out;
3623 }
3624
Philipp Reisner02918be2010-08-20 14:35:10 +02003625 c.packets[cmd == P_BITMAP]++;
Philipp Reisner257d0af2011-01-26 12:15:29 +01003626 c.bytes[cmd == P_BITMAP] += sizeof(struct p_header) + data_size;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003627
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01003628 if (err <= 0) {
3629 if (err < 0)
3630 goto out;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003631 break;
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01003632 }
Philipp Reisner02918be2010-08-20 14:35:10 +02003633 if (!drbd_recv_header(mdev, &cmd, &data_size))
Philipp Reisnerb411b362009-09-25 16:07:19 -07003634 goto out;
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01003635 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07003636
3637 INFO_bm_xfer_stats(mdev, "receive", &c);
3638
3639 if (mdev->state.conn == C_WF_BITMAP_T) {
Andreas Gruenbacherde1f8e42010-12-10 21:04:00 +01003640 enum drbd_state_rv rv;
3641
Philipp Reisnerb411b362009-09-25 16:07:19 -07003642 ok = !drbd_send_bitmap(mdev);
3643 if (!ok)
3644 goto out;
3645 /* Omit CS_ORDERED with this state transition to avoid deadlocks. */
Andreas Gruenbacherde1f8e42010-12-10 21:04:00 +01003646 rv = _drbd_request_state(mdev, NS(conn, C_WF_SYNC_UUID), CS_VERBOSE);
3647 D_ASSERT(rv == SS_SUCCESS);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003648 } else if (mdev->state.conn != C_WF_BITMAP_S) {
3649 /* admin may have requested C_DISCONNECTING,
3650 * other threads may have noticed network errors */
3651 dev_info(DEV, "unexpected cstate (%s) in receive_bitmap\n",
3652 drbd_conn_str(mdev->state.conn));
3653 }
3654
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01003655 ok = true;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003656 out:
Lars Ellenberg20ceb2b2011-01-21 10:56:44 +01003657 drbd_bm_unlock(mdev);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003658 if (ok && mdev->state.conn == C_WF_BITMAP_S)
3659 drbd_start_resync(mdev, C_SYNC_SOURCE);
3660 free_page((unsigned long) buffer);
3661 return ok;
3662}
3663
Andreas Gruenbacherd8763022011-01-26 17:39:41 +01003664static int receive_skip(struct drbd_conf *mdev, enum drbd_packet cmd,
3665 unsigned int data_size)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003666{
3667 /* TODO zero copy sink :) */
3668 static char sink[128];
3669 int size, want, r;
3670
Philipp Reisner02918be2010-08-20 14:35:10 +02003671 dev_warn(DEV, "skipping unknown optional packet type %d, l: %d!\n",
3672 cmd, data_size);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003673
Philipp Reisner02918be2010-08-20 14:35:10 +02003674 size = data_size;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003675 while (size > 0) {
3676 want = min_t(int, size, sizeof(sink));
3677 r = drbd_recv(mdev, sink, want);
Andreas Gruenbacher841ce242010-12-15 19:31:20 +01003678 if (!expect(r > 0))
3679 break;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003680 size -= r;
3681 }
3682 return size == 0;
3683}
3684
Andreas Gruenbacherd8763022011-01-26 17:39:41 +01003685static int receive_UnplugRemote(struct drbd_conf *mdev, enum drbd_packet cmd,
3686 unsigned int data_size)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003687{
Philipp Reisnerb411b362009-09-25 16:07:19 -07003688 /* Make sure we've acked all the TCP data associated
3689 * with the data requests being unplugged */
Philipp Reisnere42325a2011-01-19 13:55:45 +01003690 drbd_tcp_quickack(mdev->tconn->data.socket);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003691
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01003692 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003693}
3694
Andreas Gruenbacherd8763022011-01-26 17:39:41 +01003695static int receive_out_of_sync(struct drbd_conf *mdev, enum drbd_packet cmd,
3696 unsigned int data_size)
Philipp Reisner73a01a12010-10-27 14:33:00 +02003697{
Philipp Reisnere42325a2011-01-19 13:55:45 +01003698 struct p_block_desc *p = &mdev->tconn->data.rbuf.block_desc;
Philipp Reisner73a01a12010-10-27 14:33:00 +02003699
Lars Ellenbergf735e3632010-12-17 21:06:18 +01003700 switch (mdev->state.conn) {
3701 case C_WF_SYNC_UUID:
3702 case C_WF_BITMAP_T:
3703 case C_BEHIND:
3704 break;
3705 default:
3706 dev_err(DEV, "ASSERT FAILED cstate = %s, expected: WFSyncUUID|WFBitMapT|Behind\n",
3707 drbd_conn_str(mdev->state.conn));
3708 }
3709
Philipp Reisner73a01a12010-10-27 14:33:00 +02003710 drbd_set_out_of_sync(mdev, be64_to_cpu(p->sector), be32_to_cpu(p->blksize));
3711
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01003712 return true;
Philipp Reisner73a01a12010-10-27 14:33:00 +02003713}
3714
Andreas Gruenbacherd8763022011-01-26 17:39:41 +01003715typedef int (*drbd_cmd_handler_f)(struct drbd_conf *, enum drbd_packet cmd,
3716 unsigned int to_receive);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003717
Philipp Reisner02918be2010-08-20 14:35:10 +02003718struct data_cmd {
3719 int expect_payload;
3720 size_t pkt_size;
3721 drbd_cmd_handler_f function;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003722};
3723
Philipp Reisner02918be2010-08-20 14:35:10 +02003724static struct data_cmd drbd_cmd_handler[] = {
3725 [P_DATA] = { 1, sizeof(struct p_data), receive_Data },
3726 [P_DATA_REPLY] = { 1, sizeof(struct p_data), receive_DataReply },
3727 [P_RS_DATA_REPLY] = { 1, sizeof(struct p_data), receive_RSDataReply } ,
3728 [P_BARRIER] = { 0, sizeof(struct p_barrier), receive_Barrier } ,
Philipp Reisner257d0af2011-01-26 12:15:29 +01003729 [P_BITMAP] = { 1, sizeof(struct p_header), receive_bitmap } ,
3730 [P_COMPRESSED_BITMAP] = { 1, sizeof(struct p_header), receive_bitmap } ,
3731 [P_UNPLUG_REMOTE] = { 0, sizeof(struct p_header), receive_UnplugRemote },
Philipp Reisner02918be2010-08-20 14:35:10 +02003732 [P_DATA_REQUEST] = { 0, sizeof(struct p_block_req), receive_DataRequest },
3733 [P_RS_DATA_REQUEST] = { 0, sizeof(struct p_block_req), receive_DataRequest },
Philipp Reisner257d0af2011-01-26 12:15:29 +01003734 [P_SYNC_PARAM] = { 1, sizeof(struct p_header), receive_SyncParam },
3735 [P_SYNC_PARAM89] = { 1, sizeof(struct p_header), receive_SyncParam },
Philipp Reisner02918be2010-08-20 14:35:10 +02003736 [P_PROTOCOL] = { 1, sizeof(struct p_protocol), receive_protocol },
3737 [P_UUIDS] = { 0, sizeof(struct p_uuids), receive_uuids },
3738 [P_SIZES] = { 0, sizeof(struct p_sizes), receive_sizes },
3739 [P_STATE] = { 0, sizeof(struct p_state), receive_state },
3740 [P_STATE_CHG_REQ] = { 0, sizeof(struct p_req_state), receive_req_state },
3741 [P_SYNC_UUID] = { 0, sizeof(struct p_rs_uuid), receive_sync_uuid },
3742 [P_OV_REQUEST] = { 0, sizeof(struct p_block_req), receive_DataRequest },
3743 [P_OV_REPLY] = { 1, sizeof(struct p_block_req), receive_DataRequest },
3744 [P_CSUM_RS_REQUEST] = { 1, sizeof(struct p_block_req), receive_DataRequest },
3745 [P_DELAY_PROBE] = { 0, sizeof(struct p_delay_probe93), receive_skip },
Philipp Reisner73a01a12010-10-27 14:33:00 +02003746 [P_OUT_OF_SYNC] = { 0, sizeof(struct p_block_desc), receive_out_of_sync },
Philipp Reisner02918be2010-08-20 14:35:10 +02003747 /* anything missing from this table is in
3748 * the asender_tbl, see get_asender_cmd */
3749 [P_MAX_CMD] = { 0, 0, NULL },
3750};
3751
3752/* All handler functions that expect a sub-header get that sub-heder in
Philipp Reisnere42325a2011-01-19 13:55:45 +01003753 mdev->tconn->data.rbuf.header.head.payload.
Philipp Reisner02918be2010-08-20 14:35:10 +02003754
Philipp Reisnere42325a2011-01-19 13:55:45 +01003755 Usually in mdev->tconn->data.rbuf.header.head the callback can find the usual
Philipp Reisner02918be2010-08-20 14:35:10 +02003756 p_header, but they may not rely on that. Since there is also p_header95 !
3757 */
Philipp Reisnerb411b362009-09-25 16:07:19 -07003758
3759static void drbdd(struct drbd_conf *mdev)
3760{
Philipp Reisnerc0129492011-01-19 16:58:16 +01003761 struct p_header *header = &mdev->tconn->data.rbuf.header;
Philipp Reisner02918be2010-08-20 14:35:10 +02003762 unsigned int packet_size;
Andreas Gruenbacherd8763022011-01-26 17:39:41 +01003763 enum drbd_packet cmd;
Philipp Reisner02918be2010-08-20 14:35:10 +02003764 size_t shs; /* sub header size */
3765 int rv;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003766
Philipp Reisnere6b3ea82011-01-19 14:02:01 +01003767 while (get_t_state(&mdev->tconn->receiver) == RUNNING) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07003768 drbd_thread_current_set_cpu(mdev);
Philipp Reisner02918be2010-08-20 14:35:10 +02003769 if (!drbd_recv_header(mdev, &cmd, &packet_size))
3770 goto err_out;
3771
3772 if (unlikely(cmd >= P_MAX_CMD || !drbd_cmd_handler[cmd].function)) {
3773 dev_err(DEV, "unknown packet type %d, l: %d!\n", cmd, packet_size);
3774 goto err_out;
Lars Ellenberg0b33a912009-11-16 15:58:04 +01003775 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07003776
Philipp Reisnerc0129492011-01-19 16:58:16 +01003777 shs = drbd_cmd_handler[cmd].pkt_size - sizeof(struct p_header);
Philipp Reisner02918be2010-08-20 14:35:10 +02003778 if (packet_size - shs > 0 && !drbd_cmd_handler[cmd].expect_payload) {
3779 dev_err(DEV, "No payload expected %s l:%d\n", cmdname(cmd), packet_size);
3780 goto err_out;
3781 }
3782
Lars Ellenbergc13f7e12010-10-29 23:32:01 +02003783 if (shs) {
Philipp Reisnerc0129492011-01-19 16:58:16 +01003784 rv = drbd_recv(mdev, &header->payload, shs);
Lars Ellenbergc13f7e12010-10-29 23:32:01 +02003785 if (unlikely(rv != shs)) {
Lars Ellenberg0ddc5542011-01-21 12:35:15 +01003786 if (!signal_pending(current))
3787 dev_warn(DEV, "short read while reading sub header: rv=%d\n", rv);
Lars Ellenbergc13f7e12010-10-29 23:32:01 +02003788 goto err_out;
3789 }
3790 }
3791
Philipp Reisner02918be2010-08-20 14:35:10 +02003792 rv = drbd_cmd_handler[cmd].function(mdev, cmd, packet_size - shs);
3793
3794 if (unlikely(!rv)) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07003795 dev_err(DEV, "error receiving %s, l: %d!\n",
Philipp Reisner02918be2010-08-20 14:35:10 +02003796 cmdname(cmd), packet_size);
3797 goto err_out;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003798 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07003799 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07003800
Philipp Reisner02918be2010-08-20 14:35:10 +02003801 if (0) {
3802 err_out:
3803 drbd_force_state(mdev, NS(conn, C_PROTOCOL_ERROR));
Philipp Reisnerb411b362009-09-25 16:07:19 -07003804 }
Lars Ellenberg856c50c2010-10-14 13:37:40 +02003805 /* If we leave here, we probably want to update at least the
3806 * "Connected" indicator on stable storage. Do so explicitly here. */
3807 drbd_md_sync(mdev);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003808}
3809
Philipp Reisner191d3cc2011-01-19 14:53:22 +01003810void drbd_flush_workqueue(struct drbd_tconn *tconn)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003811{
3812 struct drbd_wq_barrier barr;
3813
3814 barr.w.cb = w_prev_work_done;
3815 init_completion(&barr.done);
Philipp Reisner191d3cc2011-01-19 14:53:22 +01003816 drbd_queue_work(&tconn->data.work, &barr.w);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003817 wait_for_completion(&barr.done);
3818}
3819
3820static void drbd_disconnect(struct drbd_conf *mdev)
3821{
3822 enum drbd_fencing_p fp;
3823 union drbd_state os, ns;
3824 int rv = SS_UNKNOWN_ERROR;
3825 unsigned int i;
3826
3827 if (mdev->state.conn == C_STANDALONE)
3828 return;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003829
3830 /* asender does not clean up anything. it must not interfere, either */
Philipp Reisnere6b3ea82011-01-19 14:02:01 +01003831 drbd_thread_stop(&mdev->tconn->asender);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003832 drbd_free_sock(mdev);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003833
Philipp Reisner85719572010-07-21 10:20:17 +02003834 /* wait for current activity to cease. */
Philipp Reisner87eeee42011-01-19 14:16:30 +01003835 spin_lock_irq(&mdev->tconn->req_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003836 _drbd_wait_ee_list_empty(mdev, &mdev->active_ee);
3837 _drbd_wait_ee_list_empty(mdev, &mdev->sync_ee);
3838 _drbd_wait_ee_list_empty(mdev, &mdev->read_ee);
Philipp Reisner87eeee42011-01-19 14:16:30 +01003839 spin_unlock_irq(&mdev->tconn->req_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003840
3841 /* We do not have data structures that would allow us to
3842 * get the rs_pending_cnt down to 0 again.
3843 * * On C_SYNC_TARGET we do not have any data structures describing
3844 * the pending RSDataRequest's we have sent.
3845 * * On C_SYNC_SOURCE there is no data structure that tracks
3846 * the P_RS_DATA_REPLY blocks that we sent to the SyncTarget.
3847 * And no, it is not the sum of the reference counts in the
3848 * resync_LRU. The resync_LRU tracks the whole operation including
3849 * the disk-IO, while the rs_pending_cnt only tracks the blocks
3850 * on the fly. */
3851 drbd_rs_cancel_all(mdev);
3852 mdev->rs_total = 0;
3853 mdev->rs_failed = 0;
3854 atomic_set(&mdev->rs_pending_cnt, 0);
3855 wake_up(&mdev->misc_wait);
3856
Philipp Reisner7fde2be2011-03-01 11:08:28 +01003857 del_timer(&mdev->request_timer);
3858
Philipp Reisnerb411b362009-09-25 16:07:19 -07003859 /* make sure syncer is stopped and w_resume_next_sg queued */
3860 del_timer_sync(&mdev->resync_timer);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003861 resync_timer_fn((unsigned long)mdev);
3862
Philipp Reisnerb411b362009-09-25 16:07:19 -07003863 /* wait for all w_e_end_data_req, w_e_end_rsdata_req, w_send_barrier,
3864 * w_make_resync_request etc. which may still be on the worker queue
3865 * to be "canceled" */
Philipp Reisner191d3cc2011-01-19 14:53:22 +01003866 drbd_flush_workqueue(mdev->tconn);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003867
3868 /* This also does reclaim_net_ee(). If we do this too early, we might
3869 * miss some resync ee and pages.*/
3870 drbd_process_done_ee(mdev);
3871
3872 kfree(mdev->p_uuid);
3873 mdev->p_uuid = NULL;
3874
Philipp Reisnerfb22c402010-09-08 23:20:21 +02003875 if (!is_susp(mdev->state))
Philipp Reisnerb411b362009-09-25 16:07:19 -07003876 tl_clear(mdev);
3877
Philipp Reisnerb411b362009-09-25 16:07:19 -07003878 dev_info(DEV, "Connection closed\n");
3879
3880 drbd_md_sync(mdev);
3881
3882 fp = FP_DONT_CARE;
3883 if (get_ldev(mdev)) {
3884 fp = mdev->ldev->dc.fencing;
3885 put_ldev(mdev);
3886 }
3887
Philipp Reisner87f7be42010-06-11 13:56:33 +02003888 if (mdev->state.role == R_PRIMARY && fp >= FP_RESOURCE && mdev->state.pdsk >= D_UNKNOWN)
3889 drbd_try_outdate_peer_async(mdev);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003890
Philipp Reisner87eeee42011-01-19 14:16:30 +01003891 spin_lock_irq(&mdev->tconn->req_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003892 os = mdev->state;
3893 if (os.conn >= C_UNCONNECTED) {
3894 /* Do not restart in case we are C_DISCONNECTING */
3895 ns = os;
3896 ns.conn = C_UNCONNECTED;
3897 rv = _drbd_set_state(mdev, ns, CS_VERBOSE, NULL);
3898 }
Philipp Reisner87eeee42011-01-19 14:16:30 +01003899 spin_unlock_irq(&mdev->tconn->req_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003900
3901 if (os.conn == C_DISCONNECTING) {
Philipp Reisnerb2fb6dbe2011-01-19 13:48:44 +01003902 wait_event(mdev->tconn->net_cnt_wait, atomic_read(&mdev->tconn->net_cnt) == 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003903
Philipp Reisnera0638452011-01-19 14:31:32 +01003904 crypto_free_hash(mdev->tconn->cram_hmac_tfm);
3905 mdev->tconn->cram_hmac_tfm = NULL;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003906
Philipp Reisner89e58e72011-01-19 13:12:45 +01003907 kfree(mdev->tconn->net_conf);
3908 mdev->tconn->net_conf = NULL;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003909 drbd_request_state(mdev, NS(conn, C_STANDALONE));
3910 }
3911
Lars Ellenberg20ceb2b2011-01-21 10:56:44 +01003912 /* serialize with bitmap writeout triggered by the state change,
3913 * if any. */
3914 wait_event(mdev->misc_wait, !test_bit(BITMAP_IO, &mdev->flags));
3915
Philipp Reisnerb411b362009-09-25 16:07:19 -07003916 /* tcp_close and release of sendpage pages can be deferred. I don't
3917 * want to use SO_LINGER, because apparently it can be deferred for
3918 * more than 20 seconds (longest time I checked).
3919 *
3920 * Actually we don't care for exactly when the network stack does its
3921 * put_page(), but release our reference on these pages right here.
3922 */
3923 i = drbd_release_ee(mdev, &mdev->net_ee);
3924 if (i)
3925 dev_info(DEV, "net_ee not empty, killed %u entries\n", i);
Lars Ellenberg435f0742010-09-06 12:30:25 +02003926 i = atomic_read(&mdev->pp_in_use_by_net);
3927 if (i)
3928 dev_info(DEV, "pp_in_use_by_net = %d, expected 0\n", i);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003929 i = atomic_read(&mdev->pp_in_use);
3930 if (i)
Lars Ellenberg45bb9122010-05-14 17:10:48 +02003931 dev_info(DEV, "pp_in_use = %d, expected 0\n", i);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003932
3933 D_ASSERT(list_empty(&mdev->read_ee));
3934 D_ASSERT(list_empty(&mdev->active_ee));
3935 D_ASSERT(list_empty(&mdev->sync_ee));
3936 D_ASSERT(list_empty(&mdev->done_ee));
3937
3938 /* ok, no more ee's on the fly, it is safe to reset the epoch_size */
3939 atomic_set(&mdev->current_epoch->epoch_size, 0);
3940 D_ASSERT(list_empty(&mdev->current_epoch->list));
3941}
3942
3943/*
3944 * We support PRO_VERSION_MIN to PRO_VERSION_MAX. The protocol version
3945 * we can agree on is stored in agreed_pro_version.
3946 *
3947 * feature flags and the reserved array should be enough room for future
3948 * enhancements of the handshake protocol, and possible plugins...
3949 *
3950 * for now, they are expected to be zero, but ignored.
3951 */
3952static int drbd_send_handshake(struct drbd_conf *mdev)
3953{
Philipp Reisnere6b3ea82011-01-19 14:02:01 +01003954 /* ASSERT current == mdev->tconn->receiver ... */
Philipp Reisnere42325a2011-01-19 13:55:45 +01003955 struct p_handshake *p = &mdev->tconn->data.sbuf.handshake;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003956 int ok;
3957
Philipp Reisnere42325a2011-01-19 13:55:45 +01003958 if (mutex_lock_interruptible(&mdev->tconn->data.mutex)) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07003959 dev_err(DEV, "interrupted during initial handshake\n");
3960 return 0; /* interrupted. not ok. */
3961 }
3962
Philipp Reisnere42325a2011-01-19 13:55:45 +01003963 if (mdev->tconn->data.socket == NULL) {
3964 mutex_unlock(&mdev->tconn->data.mutex);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003965 return 0;
3966 }
3967
3968 memset(p, 0, sizeof(*p));
3969 p->protocol_min = cpu_to_be32(PRO_VERSION_MIN);
3970 p->protocol_max = cpu_to_be32(PRO_VERSION_MAX);
Philipp Reisnerc0129492011-01-19 16:58:16 +01003971 ok = _drbd_send_cmd(mdev, mdev->tconn->data.socket, P_HAND_SHAKE,
3972 &p->head, sizeof(*p), 0 );
Philipp Reisnere42325a2011-01-19 13:55:45 +01003973 mutex_unlock(&mdev->tconn->data.mutex);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003974 return ok;
3975}
3976
3977/*
3978 * return values:
3979 * 1 yes, we have a valid connection
3980 * 0 oops, did not work out, please try again
3981 * -1 peer talks different language,
3982 * no point in trying again, please go standalone.
3983 */
3984static int drbd_do_handshake(struct drbd_conf *mdev)
3985{
Philipp Reisnere6b3ea82011-01-19 14:02:01 +01003986 /* ASSERT current == mdev->tconn->receiver ... */
Philipp Reisnere42325a2011-01-19 13:55:45 +01003987 struct p_handshake *p = &mdev->tconn->data.rbuf.handshake;
Philipp Reisner02918be2010-08-20 14:35:10 +02003988 const int expect = sizeof(struct p_handshake) - sizeof(struct p_header80);
3989 unsigned int length;
Andreas Gruenbacherd8763022011-01-26 17:39:41 +01003990 enum drbd_packet cmd;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003991 int rv;
3992
3993 rv = drbd_send_handshake(mdev);
3994 if (!rv)
3995 return 0;
3996
Philipp Reisner02918be2010-08-20 14:35:10 +02003997 rv = drbd_recv_header(mdev, &cmd, &length);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003998 if (!rv)
3999 return 0;
4000
Philipp Reisner02918be2010-08-20 14:35:10 +02004001 if (cmd != P_HAND_SHAKE) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07004002 dev_err(DEV, "expected HandShake packet, received: %s (0x%04x)\n",
Philipp Reisner02918be2010-08-20 14:35:10 +02004003 cmdname(cmd), cmd);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004004 return -1;
4005 }
4006
Philipp Reisner02918be2010-08-20 14:35:10 +02004007 if (length != expect) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07004008 dev_err(DEV, "expected HandShake length: %u, received: %u\n",
Philipp Reisner02918be2010-08-20 14:35:10 +02004009 expect, length);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004010 return -1;
4011 }
4012
4013 rv = drbd_recv(mdev, &p->head.payload, expect);
4014
4015 if (rv != expect) {
Lars Ellenberg0ddc5542011-01-21 12:35:15 +01004016 if (!signal_pending(current))
4017 dev_warn(DEV, "short read receiving handshake packet: l=%u\n", rv);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004018 return 0;
4019 }
4020
Philipp Reisnerb411b362009-09-25 16:07:19 -07004021 p->protocol_min = be32_to_cpu(p->protocol_min);
4022 p->protocol_max = be32_to_cpu(p->protocol_max);
4023 if (p->protocol_max == 0)
4024 p->protocol_max = p->protocol_min;
4025
4026 if (PRO_VERSION_MAX < p->protocol_min ||
4027 PRO_VERSION_MIN > p->protocol_max)
4028 goto incompat;
4029
Philipp Reisner31890f42011-01-19 14:12:51 +01004030 mdev->tconn->agreed_pro_version = min_t(int, PRO_VERSION_MAX, p->protocol_max);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004031
4032 dev_info(DEV, "Handshake successful: "
Philipp Reisner31890f42011-01-19 14:12:51 +01004033 "Agreed network protocol version %d\n", mdev->tconn->agreed_pro_version);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004034
4035 return 1;
4036
4037 incompat:
4038 dev_err(DEV, "incompatible DRBD dialects: "
4039 "I support %d-%d, peer supports %d-%d\n",
4040 PRO_VERSION_MIN, PRO_VERSION_MAX,
4041 p->protocol_min, p->protocol_max);
4042 return -1;
4043}
4044
4045#if !defined(CONFIG_CRYPTO_HMAC) && !defined(CONFIG_CRYPTO_HMAC_MODULE)
4046static int drbd_do_auth(struct drbd_conf *mdev)
4047{
4048 dev_err(DEV, "This kernel was build without CONFIG_CRYPTO_HMAC.\n");
4049 dev_err(DEV, "You need to disable 'cram-hmac-alg' in drbd.conf.\n");
Johannes Thomab10d96c2010-01-07 16:02:50 +01004050 return -1;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004051}
4052#else
4053#define CHALLENGE_LEN 64
Johannes Thomab10d96c2010-01-07 16:02:50 +01004054
4055/* Return value:
4056 1 - auth succeeded,
4057 0 - failed, try again (network error),
4058 -1 - auth failed, don't try again.
4059*/
4060
Philipp Reisnerb411b362009-09-25 16:07:19 -07004061static int drbd_do_auth(struct drbd_conf *mdev)
4062{
4063 char my_challenge[CHALLENGE_LEN]; /* 64 Bytes... */
4064 struct scatterlist sg;
4065 char *response = NULL;
4066 char *right_response = NULL;
4067 char *peers_ch = NULL;
Philipp Reisner89e58e72011-01-19 13:12:45 +01004068 unsigned int key_len = strlen(mdev->tconn->net_conf->shared_secret);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004069 unsigned int resp_size;
4070 struct hash_desc desc;
Andreas Gruenbacherd8763022011-01-26 17:39:41 +01004071 enum drbd_packet cmd;
Philipp Reisner02918be2010-08-20 14:35:10 +02004072 unsigned int length;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004073 int rv;
4074
Philipp Reisnera0638452011-01-19 14:31:32 +01004075 desc.tfm = mdev->tconn->cram_hmac_tfm;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004076 desc.flags = 0;
4077
Philipp Reisnera0638452011-01-19 14:31:32 +01004078 rv = crypto_hash_setkey(mdev->tconn->cram_hmac_tfm,
Philipp Reisner89e58e72011-01-19 13:12:45 +01004079 (u8 *)mdev->tconn->net_conf->shared_secret, key_len);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004080 if (rv) {
4081 dev_err(DEV, "crypto_hash_setkey() failed with %d\n", rv);
Johannes Thomab10d96c2010-01-07 16:02:50 +01004082 rv = -1;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004083 goto fail;
4084 }
4085
4086 get_random_bytes(my_challenge, CHALLENGE_LEN);
4087
4088 rv = drbd_send_cmd2(mdev, P_AUTH_CHALLENGE, my_challenge, CHALLENGE_LEN);
4089 if (!rv)
4090 goto fail;
4091
Philipp Reisner02918be2010-08-20 14:35:10 +02004092 rv = drbd_recv_header(mdev, &cmd, &length);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004093 if (!rv)
4094 goto fail;
4095
Philipp Reisner02918be2010-08-20 14:35:10 +02004096 if (cmd != P_AUTH_CHALLENGE) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07004097 dev_err(DEV, "expected AuthChallenge packet, received: %s (0x%04x)\n",
Philipp Reisner02918be2010-08-20 14:35:10 +02004098 cmdname(cmd), cmd);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004099 rv = 0;
4100 goto fail;
4101 }
4102
Philipp Reisner02918be2010-08-20 14:35:10 +02004103 if (length > CHALLENGE_LEN * 2) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07004104 dev_err(DEV, "expected AuthChallenge payload too big.\n");
Johannes Thomab10d96c2010-01-07 16:02:50 +01004105 rv = -1;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004106 goto fail;
4107 }
4108
Philipp Reisner02918be2010-08-20 14:35:10 +02004109 peers_ch = kmalloc(length, GFP_NOIO);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004110 if (peers_ch == NULL) {
4111 dev_err(DEV, "kmalloc of peers_ch failed\n");
Johannes Thomab10d96c2010-01-07 16:02:50 +01004112 rv = -1;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004113 goto fail;
4114 }
4115
Philipp Reisner02918be2010-08-20 14:35:10 +02004116 rv = drbd_recv(mdev, peers_ch, length);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004117
Philipp Reisner02918be2010-08-20 14:35:10 +02004118 if (rv != length) {
Lars Ellenberg0ddc5542011-01-21 12:35:15 +01004119 if (!signal_pending(current))
4120 dev_warn(DEV, "short read AuthChallenge: l=%u\n", rv);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004121 rv = 0;
4122 goto fail;
4123 }
4124
Philipp Reisnera0638452011-01-19 14:31:32 +01004125 resp_size = crypto_hash_digestsize(mdev->tconn->cram_hmac_tfm);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004126 response = kmalloc(resp_size, GFP_NOIO);
4127 if (response == NULL) {
4128 dev_err(DEV, "kmalloc of response failed\n");
Johannes Thomab10d96c2010-01-07 16:02:50 +01004129 rv = -1;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004130 goto fail;
4131 }
4132
4133 sg_init_table(&sg, 1);
Philipp Reisner02918be2010-08-20 14:35:10 +02004134 sg_set_buf(&sg, peers_ch, length);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004135
4136 rv = crypto_hash_digest(&desc, &sg, sg.length, response);
4137 if (rv) {
4138 dev_err(DEV, "crypto_hash_digest() failed with %d\n", rv);
Johannes Thomab10d96c2010-01-07 16:02:50 +01004139 rv = -1;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004140 goto fail;
4141 }
4142
4143 rv = drbd_send_cmd2(mdev, P_AUTH_RESPONSE, response, resp_size);
4144 if (!rv)
4145 goto fail;
4146
Philipp Reisner02918be2010-08-20 14:35:10 +02004147 rv = drbd_recv_header(mdev, &cmd, &length);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004148 if (!rv)
4149 goto fail;
4150
Philipp Reisner02918be2010-08-20 14:35:10 +02004151 if (cmd != P_AUTH_RESPONSE) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07004152 dev_err(DEV, "expected AuthResponse packet, received: %s (0x%04x)\n",
Philipp Reisner02918be2010-08-20 14:35:10 +02004153 cmdname(cmd), cmd);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004154 rv = 0;
4155 goto fail;
4156 }
4157
Philipp Reisner02918be2010-08-20 14:35:10 +02004158 if (length != resp_size) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07004159 dev_err(DEV, "expected AuthResponse payload of wrong size\n");
4160 rv = 0;
4161 goto fail;
4162 }
4163
4164 rv = drbd_recv(mdev, response , resp_size);
4165
4166 if (rv != resp_size) {
Lars Ellenberg0ddc5542011-01-21 12:35:15 +01004167 if (!signal_pending(current))
4168 dev_warn(DEV, "short read receiving AuthResponse: l=%u\n", rv);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004169 rv = 0;
4170 goto fail;
4171 }
4172
4173 right_response = kmalloc(resp_size, GFP_NOIO);
Julia Lawall2d1ee872009-12-27 22:27:11 +01004174 if (right_response == NULL) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07004175 dev_err(DEV, "kmalloc of right_response failed\n");
Johannes Thomab10d96c2010-01-07 16:02:50 +01004176 rv = -1;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004177 goto fail;
4178 }
4179
4180 sg_set_buf(&sg, my_challenge, CHALLENGE_LEN);
4181
4182 rv = crypto_hash_digest(&desc, &sg, sg.length, right_response);
4183 if (rv) {
4184 dev_err(DEV, "crypto_hash_digest() failed with %d\n", rv);
Johannes Thomab10d96c2010-01-07 16:02:50 +01004185 rv = -1;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004186 goto fail;
4187 }
4188
4189 rv = !memcmp(response, right_response, resp_size);
4190
4191 if (rv)
4192 dev_info(DEV, "Peer authenticated using %d bytes of '%s' HMAC\n",
Philipp Reisner89e58e72011-01-19 13:12:45 +01004193 resp_size, mdev->tconn->net_conf->cram_hmac_alg);
Johannes Thomab10d96c2010-01-07 16:02:50 +01004194 else
4195 rv = -1;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004196
4197 fail:
4198 kfree(peers_ch);
4199 kfree(response);
4200 kfree(right_response);
4201
4202 return rv;
4203}
4204#endif
4205
4206int drbdd_init(struct drbd_thread *thi)
4207{
4208 struct drbd_conf *mdev = thi->mdev;
4209 unsigned int minor = mdev_to_minor(mdev);
4210 int h;
4211
4212 sprintf(current->comm, "drbd%d_receiver", minor);
4213
4214 dev_info(DEV, "receiver (re)started\n");
4215
4216 do {
4217 h = drbd_connect(mdev);
4218 if (h == 0) {
4219 drbd_disconnect(mdev);
Philipp Reisner20ee6392011-01-18 15:28:59 +01004220 schedule_timeout_interruptible(HZ);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004221 }
4222 if (h == -1) {
4223 dev_warn(DEV, "Discarding network configuration.\n");
4224 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
4225 }
4226 } while (h == 0);
4227
4228 if (h > 0) {
Philipp Reisnerb2fb6dbe2011-01-19 13:48:44 +01004229 if (get_net_conf(mdev->tconn)) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07004230 drbdd(mdev);
Philipp Reisnerb2fb6dbe2011-01-19 13:48:44 +01004231 put_net_conf(mdev->tconn);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004232 }
4233 }
4234
4235 drbd_disconnect(mdev);
4236
4237 dev_info(DEV, "receiver terminated\n");
4238 return 0;
4239}
4240
4241/* ********* acknowledge sender ******** */
4242
Andreas Gruenbacherd8763022011-01-26 17:39:41 +01004243static int got_RqSReply(struct drbd_conf *mdev, enum drbd_packet cmd)
Philipp Reisnerb411b362009-09-25 16:07:19 -07004244{
Philipp Reisner257d0af2011-01-26 12:15:29 +01004245 struct p_req_state_reply *p = &mdev->tconn->meta.rbuf.req_state_reply;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004246
4247 int retcode = be32_to_cpu(p->retcode);
4248
4249 if (retcode >= SS_SUCCESS) {
4250 set_bit(CL_ST_CHG_SUCCESS, &mdev->flags);
4251 } else {
4252 set_bit(CL_ST_CHG_FAIL, &mdev->flags);
4253 dev_err(DEV, "Requested state change failed by peer: %s (%d)\n",
4254 drbd_set_st_err_str(retcode), retcode);
4255 }
4256 wake_up(&mdev->state_wait);
4257
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01004258 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004259}
4260
Andreas Gruenbacherd8763022011-01-26 17:39:41 +01004261static int got_Ping(struct drbd_conf *mdev, enum drbd_packet cmd)
Philipp Reisnerb411b362009-09-25 16:07:19 -07004262{
4263 return drbd_send_ping_ack(mdev);
4264
4265}
4266
Andreas Gruenbacherd8763022011-01-26 17:39:41 +01004267static int got_PingAck(struct drbd_conf *mdev, enum drbd_packet cmd)
Philipp Reisnerb411b362009-09-25 16:07:19 -07004268{
4269 /* restore idle timeout */
Philipp Reisnere42325a2011-01-19 13:55:45 +01004270 mdev->tconn->meta.socket->sk->sk_rcvtimeo = mdev->tconn->net_conf->ping_int*HZ;
Philipp Reisner309d1602010-03-02 15:03:44 +01004271 if (!test_and_set_bit(GOT_PING_ACK, &mdev->flags))
4272 wake_up(&mdev->misc_wait);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004273
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01004274 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004275}
4276
Andreas Gruenbacherd8763022011-01-26 17:39:41 +01004277static int got_IsInSync(struct drbd_conf *mdev, enum drbd_packet cmd)
Philipp Reisnerb411b362009-09-25 16:07:19 -07004278{
Philipp Reisner257d0af2011-01-26 12:15:29 +01004279 struct p_block_ack *p = &mdev->tconn->meta.rbuf.block_ack;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004280 sector_t sector = be64_to_cpu(p->sector);
4281 int blksize = be32_to_cpu(p->blksize);
4282
Philipp Reisner31890f42011-01-19 14:12:51 +01004283 D_ASSERT(mdev->tconn->agreed_pro_version >= 89);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004284
4285 update_peer_seq(mdev, be32_to_cpu(p->seq_num));
4286
Lars Ellenberg1d53f092010-09-05 01:13:24 +02004287 if (get_ldev(mdev)) {
4288 drbd_rs_complete_io(mdev, sector);
4289 drbd_set_in_sync(mdev, sector, blksize);
4290 /* rs_same_csums is supposed to count in units of BM_BLOCK_SIZE */
4291 mdev->rs_same_csum += (blksize >> BM_BLOCK_SHIFT);
4292 put_ldev(mdev);
4293 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07004294 dec_rs_pending(mdev);
Philipp Reisner778f2712010-07-06 11:14:00 +02004295 atomic_add(blksize >> 9, &mdev->rs_sect_in);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004296
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01004297 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004298}
4299
Andreas Gruenbacherbc9c5c42011-01-21 18:00:55 +01004300static int
4301validate_req_change_req_state(struct drbd_conf *mdev, u64 id, sector_t sector,
4302 struct rb_root *root, const char *func,
4303 enum drbd_req_event what, bool missing_ok)
Philipp Reisnerb411b362009-09-25 16:07:19 -07004304{
4305 struct drbd_request *req;
4306 struct bio_and_error m;
4307
Philipp Reisner87eeee42011-01-19 14:16:30 +01004308 spin_lock_irq(&mdev->tconn->req_lock);
Andreas Gruenbacherbc9c5c42011-01-21 18:00:55 +01004309 req = find_request(mdev, root, id, sector, missing_ok, func);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004310 if (unlikely(!req)) {
Philipp Reisner87eeee42011-01-19 14:16:30 +01004311 spin_unlock_irq(&mdev->tconn->req_lock);
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01004312 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004313 }
4314 __req_mod(req, what, &m);
Philipp Reisner87eeee42011-01-19 14:16:30 +01004315 spin_unlock_irq(&mdev->tconn->req_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004316
4317 if (m.bio)
4318 complete_master_bio(mdev, &m);
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01004319 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004320}
4321
Andreas Gruenbacherd8763022011-01-26 17:39:41 +01004322static int got_BlockAck(struct drbd_conf *mdev, enum drbd_packet cmd)
Philipp Reisnerb411b362009-09-25 16:07:19 -07004323{
Philipp Reisner257d0af2011-01-26 12:15:29 +01004324 struct p_block_ack *p = &mdev->tconn->meta.rbuf.block_ack;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004325 sector_t sector = be64_to_cpu(p->sector);
4326 int blksize = be32_to_cpu(p->blksize);
4327 enum drbd_req_event what;
4328
4329 update_peer_seq(mdev, be32_to_cpu(p->seq_num));
4330
Andreas Gruenbacher579b57e2011-01-13 18:40:57 +01004331 if (p->block_id == ID_SYNCER) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07004332 drbd_set_in_sync(mdev, sector, blksize);
4333 dec_rs_pending(mdev);
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01004334 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004335 }
Philipp Reisner257d0af2011-01-26 12:15:29 +01004336 switch (cmd) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07004337 case P_RS_WRITE_ACK:
Philipp Reisner89e58e72011-01-19 13:12:45 +01004338 D_ASSERT(mdev->tconn->net_conf->wire_protocol == DRBD_PROT_C);
Andreas Gruenbacher8554df12011-01-25 15:37:43 +01004339 what = WRITE_ACKED_BY_PEER_AND_SIS;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004340 break;
4341 case P_WRITE_ACK:
Philipp Reisner89e58e72011-01-19 13:12:45 +01004342 D_ASSERT(mdev->tconn->net_conf->wire_protocol == DRBD_PROT_C);
Andreas Gruenbacher8554df12011-01-25 15:37:43 +01004343 what = WRITE_ACKED_BY_PEER;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004344 break;
4345 case P_RECV_ACK:
Philipp Reisner89e58e72011-01-19 13:12:45 +01004346 D_ASSERT(mdev->tconn->net_conf->wire_protocol == DRBD_PROT_B);
Andreas Gruenbacher8554df12011-01-25 15:37:43 +01004347 what = RECV_ACKED_BY_PEER;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004348 break;
4349 case P_DISCARD_ACK:
Philipp Reisner89e58e72011-01-19 13:12:45 +01004350 D_ASSERT(mdev->tconn->net_conf->wire_protocol == DRBD_PROT_C);
Andreas Gruenbacher8554df12011-01-25 15:37:43 +01004351 what = CONFLICT_DISCARDED_BY_PEER;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004352 break;
4353 default:
4354 D_ASSERT(0);
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01004355 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004356 }
4357
4358 return validate_req_change_req_state(mdev, p->block_id, sector,
Andreas Gruenbacherbc9c5c42011-01-21 18:00:55 +01004359 &mdev->write_requests, __func__,
4360 what, false);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004361}
4362
Andreas Gruenbacherd8763022011-01-26 17:39:41 +01004363static int got_NegAck(struct drbd_conf *mdev, enum drbd_packet cmd)
Philipp Reisnerb411b362009-09-25 16:07:19 -07004364{
Philipp Reisner257d0af2011-01-26 12:15:29 +01004365 struct p_block_ack *p = &mdev->tconn->meta.rbuf.block_ack;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004366 sector_t sector = be64_to_cpu(p->sector);
Philipp Reisner2deb8332011-01-17 18:39:18 +01004367 int size = be32_to_cpu(p->blksize);
Philipp Reisner89e58e72011-01-19 13:12:45 +01004368 bool missing_ok = mdev->tconn->net_conf->wire_protocol == DRBD_PROT_A ||
4369 mdev->tconn->net_conf->wire_protocol == DRBD_PROT_B;
Andreas Gruenbacherc3afd8f2011-01-20 22:25:40 +01004370 bool found;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004371
4372 update_peer_seq(mdev, be32_to_cpu(p->seq_num));
4373
Andreas Gruenbacher579b57e2011-01-13 18:40:57 +01004374 if (p->block_id == ID_SYNCER) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07004375 dec_rs_pending(mdev);
4376 drbd_rs_failed_io(mdev, sector, size);
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01004377 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004378 }
Philipp Reisner2deb8332011-01-17 18:39:18 +01004379
Andreas Gruenbacherc3afd8f2011-01-20 22:25:40 +01004380 found = validate_req_change_req_state(mdev, p->block_id, sector,
Andreas Gruenbacherbc9c5c42011-01-21 18:00:55 +01004381 &mdev->write_requests, __func__,
Andreas Gruenbacher8554df12011-01-25 15:37:43 +01004382 NEG_ACKED, missing_ok);
Andreas Gruenbacherc3afd8f2011-01-20 22:25:40 +01004383 if (!found) {
4384 /* Protocol A has no P_WRITE_ACKs, but has P_NEG_ACKs.
4385 The master bio might already be completed, therefore the
4386 request is no longer in the collision hash. */
4387 /* In Protocol B we might already have got a P_RECV_ACK
4388 but then get a P_NEG_ACK afterwards. */
4389 if (!missing_ok)
Philipp Reisner2deb8332011-01-17 18:39:18 +01004390 return false;
Andreas Gruenbacherc3afd8f2011-01-20 22:25:40 +01004391 drbd_set_out_of_sync(mdev, sector, size);
Philipp Reisner2deb8332011-01-17 18:39:18 +01004392 }
Philipp Reisner2deb8332011-01-17 18:39:18 +01004393 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004394}
4395
Andreas Gruenbacherd8763022011-01-26 17:39:41 +01004396static int got_NegDReply(struct drbd_conf *mdev, enum drbd_packet cmd)
Philipp Reisnerb411b362009-09-25 16:07:19 -07004397{
Philipp Reisner257d0af2011-01-26 12:15:29 +01004398 struct p_block_ack *p = &mdev->tconn->meta.rbuf.block_ack;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004399 sector_t sector = be64_to_cpu(p->sector);
4400
4401 update_peer_seq(mdev, be32_to_cpu(p->seq_num));
4402 dev_err(DEV, "Got NegDReply; Sector %llus, len %u; Fail original request.\n",
4403 (unsigned long long)sector, be32_to_cpu(p->blksize));
4404
4405 return validate_req_change_req_state(mdev, p->block_id, sector,
Andreas Gruenbacherbc9c5c42011-01-21 18:00:55 +01004406 &mdev->read_requests, __func__,
Andreas Gruenbacher8554df12011-01-25 15:37:43 +01004407 NEG_ACKED, false);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004408}
4409
Andreas Gruenbacherd8763022011-01-26 17:39:41 +01004410static int got_NegRSDReply(struct drbd_conf *mdev, enum drbd_packet cmd)
Philipp Reisnerb411b362009-09-25 16:07:19 -07004411{
4412 sector_t sector;
4413 int size;
Philipp Reisner257d0af2011-01-26 12:15:29 +01004414 struct p_block_ack *p = &mdev->tconn->meta.rbuf.block_ack;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004415
4416 sector = be64_to_cpu(p->sector);
4417 size = be32_to_cpu(p->blksize);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004418
4419 update_peer_seq(mdev, be32_to_cpu(p->seq_num));
4420
4421 dec_rs_pending(mdev);
4422
4423 if (get_ldev_if_state(mdev, D_FAILED)) {
4424 drbd_rs_complete_io(mdev, sector);
Philipp Reisner257d0af2011-01-26 12:15:29 +01004425 switch (cmd) {
Philipp Reisnerd612d302010-12-27 10:53:28 +01004426 case P_NEG_RS_DREPLY:
4427 drbd_rs_failed_io(mdev, sector, size);
4428 case P_RS_CANCEL:
4429 break;
4430 default:
4431 D_ASSERT(0);
4432 put_ldev(mdev);
4433 return false;
4434 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07004435 put_ldev(mdev);
4436 }
4437
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01004438 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004439}
4440
Andreas Gruenbacherd8763022011-01-26 17:39:41 +01004441static int got_BarrierAck(struct drbd_conf *mdev, enum drbd_packet cmd)
Philipp Reisnerb411b362009-09-25 16:07:19 -07004442{
Philipp Reisner257d0af2011-01-26 12:15:29 +01004443 struct p_barrier_ack *p = &mdev->tconn->meta.rbuf.barrier_ack;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004444
4445 tl_release(mdev, p->barrier, be32_to_cpu(p->set_size));
4446
Philipp Reisnerc4752ef2010-10-27 17:32:36 +02004447 if (mdev->state.conn == C_AHEAD &&
4448 atomic_read(&mdev->ap_in_flight) == 0 &&
Philipp Reisner370a43e2011-01-14 16:03:11 +01004449 !test_and_set_bit(AHEAD_TO_SYNC_SOURCE, &mdev->current_epoch->flags)) {
4450 mdev->start_resync_timer.expires = jiffies + HZ;
4451 add_timer(&mdev->start_resync_timer);
Philipp Reisnerc4752ef2010-10-27 17:32:36 +02004452 }
4453
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01004454 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004455}
4456
Andreas Gruenbacherd8763022011-01-26 17:39:41 +01004457static int got_OVResult(struct drbd_conf *mdev, enum drbd_packet cmd)
Philipp Reisnerb411b362009-09-25 16:07:19 -07004458{
Philipp Reisner257d0af2011-01-26 12:15:29 +01004459 struct p_block_ack *p = &mdev->tconn->meta.rbuf.block_ack;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004460 struct drbd_work *w;
4461 sector_t sector;
4462 int size;
4463
4464 sector = be64_to_cpu(p->sector);
4465 size = be32_to_cpu(p->blksize);
4466
4467 update_peer_seq(mdev, be32_to_cpu(p->seq_num));
4468
4469 if (be64_to_cpu(p->block_id) == ID_OUT_OF_SYNC)
4470 drbd_ov_oos_found(mdev, sector, size);
4471 else
4472 ov_oos_print(mdev);
4473
Lars Ellenberg1d53f092010-09-05 01:13:24 +02004474 if (!get_ldev(mdev))
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01004475 return true;
Lars Ellenberg1d53f092010-09-05 01:13:24 +02004476
Philipp Reisnerb411b362009-09-25 16:07:19 -07004477 drbd_rs_complete_io(mdev, sector);
4478 dec_rs_pending(mdev);
4479
Lars Ellenbergea5442a2010-11-05 09:48:01 +01004480 --mdev->ov_left;
4481
4482 /* let's advance progress step marks only for every other megabyte */
4483 if ((mdev->ov_left & 0x200) == 0x200)
4484 drbd_advance_rs_marks(mdev, mdev->ov_left);
4485
4486 if (mdev->ov_left == 0) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07004487 w = kmalloc(sizeof(*w), GFP_NOIO);
4488 if (w) {
4489 w->cb = w_ov_finished;
Philipp Reisnere42325a2011-01-19 13:55:45 +01004490 drbd_queue_work_front(&mdev->tconn->data.work, w);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004491 } else {
4492 dev_err(DEV, "kmalloc(w) failed.");
4493 ov_oos_print(mdev);
4494 drbd_resync_finished(mdev);
4495 }
4496 }
Lars Ellenberg1d53f092010-09-05 01:13:24 +02004497 put_ldev(mdev);
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01004498 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004499}
4500
Andreas Gruenbacherd8763022011-01-26 17:39:41 +01004501static int got_skip(struct drbd_conf *mdev, enum drbd_packet cmd)
Philipp Reisner0ced55a2010-04-30 15:26:20 +02004502{
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01004503 return true;
Philipp Reisner0ced55a2010-04-30 15:26:20 +02004504}
4505
Philipp Reisnerb411b362009-09-25 16:07:19 -07004506struct asender_cmd {
4507 size_t pkt_size;
Andreas Gruenbacherd8763022011-01-26 17:39:41 +01004508 int (*process)(struct drbd_conf *mdev, enum drbd_packet cmd);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004509};
4510
4511static struct asender_cmd *get_asender_cmd(int cmd)
4512{
4513 static struct asender_cmd asender_tbl[] = {
4514 /* anything missing from this table is in
4515 * the drbd_cmd_handler (drbd_default_handler) table,
4516 * see the beginning of drbdd() */
Philipp Reisner257d0af2011-01-26 12:15:29 +01004517 [P_PING] = { sizeof(struct p_header), got_Ping },
4518 [P_PING_ACK] = { sizeof(struct p_header), got_PingAck },
Philipp Reisnerb411b362009-09-25 16:07:19 -07004519 [P_RECV_ACK] = { sizeof(struct p_block_ack), got_BlockAck },
4520 [P_WRITE_ACK] = { sizeof(struct p_block_ack), got_BlockAck },
4521 [P_RS_WRITE_ACK] = { sizeof(struct p_block_ack), got_BlockAck },
4522 [P_DISCARD_ACK] = { sizeof(struct p_block_ack), got_BlockAck },
4523 [P_NEG_ACK] = { sizeof(struct p_block_ack), got_NegAck },
4524 [P_NEG_DREPLY] = { sizeof(struct p_block_ack), got_NegDReply },
4525 [P_NEG_RS_DREPLY] = { sizeof(struct p_block_ack), got_NegRSDReply},
4526 [P_OV_RESULT] = { sizeof(struct p_block_ack), got_OVResult },
4527 [P_BARRIER_ACK] = { sizeof(struct p_barrier_ack), got_BarrierAck },
4528 [P_STATE_CHG_REPLY] = { sizeof(struct p_req_state_reply), got_RqSReply },
4529 [P_RS_IS_IN_SYNC] = { sizeof(struct p_block_ack), got_IsInSync },
Philipp Reisner02918be2010-08-20 14:35:10 +02004530 [P_DELAY_PROBE] = { sizeof(struct p_delay_probe93), got_skip },
Philipp Reisnerd612d302010-12-27 10:53:28 +01004531 [P_RS_CANCEL] = { sizeof(struct p_block_ack), got_NegRSDReply},
Philipp Reisnerb411b362009-09-25 16:07:19 -07004532 [P_MAX_CMD] = { 0, NULL },
4533 };
4534 if (cmd > P_MAX_CMD || asender_tbl[cmd].process == NULL)
4535 return NULL;
4536 return &asender_tbl[cmd];
4537}
4538
4539int drbd_asender(struct drbd_thread *thi)
4540{
4541 struct drbd_conf *mdev = thi->mdev;
Philipp Reisner257d0af2011-01-26 12:15:29 +01004542 struct p_header *h = &mdev->tconn->meta.rbuf.header;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004543 struct asender_cmd *cmd = NULL;
4544
Philipp Reisner257d0af2011-01-26 12:15:29 +01004545 int rv;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004546 void *buf = h;
4547 int received = 0;
Philipp Reisner257d0af2011-01-26 12:15:29 +01004548 int expect = sizeof(struct p_header);
Lars Ellenbergf36af182011-03-09 22:44:55 +01004549 int ping_timeout_active = 0;
Philipp Reisner257d0af2011-01-26 12:15:29 +01004550 int empty, pkt_size;
Andreas Gruenbacherd8763022011-01-26 17:39:41 +01004551 enum drbd_packet cmd_nr;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004552
4553 sprintf(current->comm, "drbd%d_asender", mdev_to_minor(mdev));
4554
4555 current->policy = SCHED_RR; /* Make this a realtime task! */
4556 current->rt_priority = 2; /* more important than all other tasks */
4557
Andreas Gruenbachere77a0a52011-01-25 15:43:39 +01004558 while (get_t_state(thi) == RUNNING) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07004559 drbd_thread_current_set_cpu(mdev);
4560 if (test_and_clear_bit(SEND_PING, &mdev->flags)) {
Andreas Gruenbacher841ce242010-12-15 19:31:20 +01004561 if (!drbd_send_ping(mdev)) {
4562 dev_err(DEV, "drbd_send_ping has failed\n");
4563 goto reconnect;
4564 }
Philipp Reisnere42325a2011-01-19 13:55:45 +01004565 mdev->tconn->meta.socket->sk->sk_rcvtimeo =
Philipp Reisner89e58e72011-01-19 13:12:45 +01004566 mdev->tconn->net_conf->ping_timeo*HZ/10;
Lars Ellenbergf36af182011-03-09 22:44:55 +01004567 ping_timeout_active = 1;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004568 }
4569
4570 /* conditionally cork;
4571 * it may hurt latency if we cork without much to send */
Philipp Reisner89e58e72011-01-19 13:12:45 +01004572 if (!mdev->tconn->net_conf->no_cork &&
Philipp Reisnerb411b362009-09-25 16:07:19 -07004573 3 < atomic_read(&mdev->unacked_cnt))
Philipp Reisnere42325a2011-01-19 13:55:45 +01004574 drbd_tcp_cork(mdev->tconn->meta.socket);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004575 while (1) {
4576 clear_bit(SIGNAL_ASENDER, &mdev->flags);
4577 flush_signals(current);
Lars Ellenberg0f8488e2010-10-13 18:19:23 +02004578 if (!drbd_process_done_ee(mdev))
Philipp Reisnerb411b362009-09-25 16:07:19 -07004579 goto reconnect;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004580 /* to avoid race with newly queued ACKs */
4581 set_bit(SIGNAL_ASENDER, &mdev->flags);
Philipp Reisner87eeee42011-01-19 14:16:30 +01004582 spin_lock_irq(&mdev->tconn->req_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004583 empty = list_empty(&mdev->done_ee);
Philipp Reisner87eeee42011-01-19 14:16:30 +01004584 spin_unlock_irq(&mdev->tconn->req_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004585 /* new ack may have been queued right here,
4586 * but then there is also a signal pending,
4587 * and we start over... */
4588 if (empty)
4589 break;
4590 }
4591 /* but unconditionally uncork unless disabled */
Philipp Reisner89e58e72011-01-19 13:12:45 +01004592 if (!mdev->tconn->net_conf->no_cork)
Philipp Reisnere42325a2011-01-19 13:55:45 +01004593 drbd_tcp_uncork(mdev->tconn->meta.socket);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004594
4595 /* short circuit, recv_msg would return EINTR anyways. */
4596 if (signal_pending(current))
4597 continue;
4598
Philipp Reisnere42325a2011-01-19 13:55:45 +01004599 rv = drbd_recv_short(mdev, mdev->tconn->meta.socket,
Philipp Reisnerb411b362009-09-25 16:07:19 -07004600 buf, expect-received, 0);
4601 clear_bit(SIGNAL_ASENDER, &mdev->flags);
4602
4603 flush_signals(current);
4604
4605 /* Note:
4606 * -EINTR (on meta) we got a signal
4607 * -EAGAIN (on meta) rcvtimeo expired
4608 * -ECONNRESET other side closed the connection
4609 * -ERESTARTSYS (on data) we got a signal
4610 * rv < 0 other than above: unexpected error!
4611 * rv == expected: full header or command
4612 * rv < expected: "woken" by signal during receive
4613 * rv == 0 : "connection shut down by peer"
4614 */
4615 if (likely(rv > 0)) {
4616 received += rv;
4617 buf += rv;
4618 } else if (rv == 0) {
4619 dev_err(DEV, "meta connection shut down by peer.\n");
4620 goto reconnect;
4621 } else if (rv == -EAGAIN) {
Lars Ellenbergcb6518c2011-06-20 14:44:45 +02004622 /* If the data socket received something meanwhile,
4623 * that is good enough: peer is still alive. */
Philipp Reisner31890f42011-01-19 14:12:51 +01004624 if (time_after(mdev->tconn->last_received,
Philipp Reisnere42325a2011-01-19 13:55:45 +01004625 jiffies - mdev->tconn->meta.socket->sk->sk_rcvtimeo))
Lars Ellenbergcb6518c2011-06-20 14:44:45 +02004626 continue;
Lars Ellenbergf36af182011-03-09 22:44:55 +01004627 if (ping_timeout_active) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07004628 dev_err(DEV, "PingAck did not arrive in time.\n");
4629 goto reconnect;
4630 }
4631 set_bit(SEND_PING, &mdev->flags);
4632 continue;
4633 } else if (rv == -EINTR) {
4634 continue;
4635 } else {
4636 dev_err(DEV, "sock_recvmsg returned %d\n", rv);
4637 goto reconnect;
4638 }
4639
4640 if (received == expect && cmd == NULL) {
Philipp Reisner257d0af2011-01-26 12:15:29 +01004641 if (!decode_header(mdev, h, &cmd_nr, &pkt_size))
Philipp Reisnerb411b362009-09-25 16:07:19 -07004642 goto reconnect;
Philipp Reisner257d0af2011-01-26 12:15:29 +01004643 cmd = get_asender_cmd(cmd_nr);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004644 if (unlikely(cmd == NULL)) {
Philipp Reisner257d0af2011-01-26 12:15:29 +01004645 dev_err(DEV, "unknown command %d on meta (l: %d)\n",
4646 cmd_nr, pkt_size);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004647 goto disconnect;
4648 }
4649 expect = cmd->pkt_size;
Philipp Reisner257d0af2011-01-26 12:15:29 +01004650 if (pkt_size != expect - sizeof(struct p_header)) {
4651 dev_err(DEV, "Wrong packet size on meta (c: %d, l: %d)\n",
4652 cmd_nr, pkt_size);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004653 goto reconnect;
Philipp Reisner257d0af2011-01-26 12:15:29 +01004654 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07004655 }
4656 if (received == expect) {
Philipp Reisner31890f42011-01-19 14:12:51 +01004657 mdev->tconn->last_received = jiffies;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004658 D_ASSERT(cmd != NULL);
Philipp Reisner257d0af2011-01-26 12:15:29 +01004659 if (!cmd->process(mdev, cmd_nr))
Philipp Reisnerb411b362009-09-25 16:07:19 -07004660 goto reconnect;
4661
Lars Ellenbergf36af182011-03-09 22:44:55 +01004662 /* the idle_timeout (ping-int)
4663 * has been restored in got_PingAck() */
4664 if (cmd == get_asender_cmd(P_PING_ACK))
4665 ping_timeout_active = 0;
4666
Philipp Reisnerb411b362009-09-25 16:07:19 -07004667 buf = h;
4668 received = 0;
Philipp Reisner257d0af2011-01-26 12:15:29 +01004669 expect = sizeof(struct p_header);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004670 cmd = NULL;
4671 }
4672 }
4673
4674 if (0) {
4675reconnect:
4676 drbd_force_state(mdev, NS(conn, C_NETWORK_FAILURE));
Lars Ellenberg856c50c2010-10-14 13:37:40 +02004677 drbd_md_sync(mdev);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004678 }
4679 if (0) {
4680disconnect:
4681 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
Lars Ellenberg856c50c2010-10-14 13:37:40 +02004682 drbd_md_sync(mdev);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004683 }
4684 clear_bit(SIGNAL_ASENDER, &mdev->flags);
4685
4686 D_ASSERT(mdev->state.conn < C_CONNECTED);
4687 dev_info(DEV, "asender terminated\n");
4688
4689 return 0;
4690}