blob: e2eed149bb9c01bee5a7625585e4d94b1466f71b [file] [log] [blame]
Philipp Reisnerb411b362009-09-25 16:07:19 -07001/*
2 drbd_receiver.c
3
4 This file is part of DRBD by Philipp Reisner and Lars Ellenberg.
5
6 Copyright (C) 2001-2008, LINBIT Information Technologies GmbH.
7 Copyright (C) 1999-2008, Philipp Reisner <philipp.reisner@linbit.com>.
8 Copyright (C) 2002-2008, Lars Ellenberg <lars.ellenberg@linbit.com>.
9
10 drbd is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation; either version 2, or (at your option)
13 any later version.
14
15 drbd is distributed in the hope that it will be useful,
16 but WITHOUT ANY WARRANTY; without even the implied warranty of
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 GNU General Public License for more details.
19
20 You should have received a copy of the GNU General Public License
21 along with drbd; see the file COPYING. If not, write to
22 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
23 */
24
25
Philipp Reisnerb411b362009-09-25 16:07:19 -070026#include <linux/module.h>
27
28#include <asm/uaccess.h>
29#include <net/sock.h>
30
Philipp Reisnerb411b362009-09-25 16:07:19 -070031#include <linux/drbd.h>
32#include <linux/fs.h>
33#include <linux/file.h>
34#include <linux/in.h>
35#include <linux/mm.h>
36#include <linux/memcontrol.h>
37#include <linux/mm_inline.h>
38#include <linux/slab.h>
Philipp Reisnerb411b362009-09-25 16:07:19 -070039#include <linux/pkt_sched.h>
40#define __KERNEL_SYSCALLS__
41#include <linux/unistd.h>
42#include <linux/vmalloc.h>
43#include <linux/random.h>
Philipp Reisnerb411b362009-09-25 16:07:19 -070044#include <linux/string.h>
45#include <linux/scatterlist.h>
46#include "drbd_int.h"
Philipp Reisnerb411b362009-09-25 16:07:19 -070047#include "drbd_req.h"
48
49#include "drbd_vli.h"
50
Philipp Reisnerb411b362009-09-25 16:07:19 -070051enum finish_epoch {
52 FE_STILL_LIVE,
53 FE_DESTROYED,
54 FE_RECYCLED,
55};
56
57static int drbd_do_handshake(struct drbd_conf *mdev);
58static int drbd_do_auth(struct drbd_conf *mdev);
59
60static enum finish_epoch drbd_may_finish_epoch(struct drbd_conf *, struct drbd_epoch *, enum epoch_event);
61static int e_end_block(struct drbd_conf *, struct drbd_work *, int);
62
Philipp Reisnerb411b362009-09-25 16:07:19 -070063
64#define GFP_TRY (__GFP_HIGHMEM | __GFP_NOWARN)
65
Lars Ellenberg45bb9122010-05-14 17:10:48 +020066/*
67 * some helper functions to deal with single linked page lists,
68 * page->private being our "next" pointer.
69 */
70
71/* If at least n pages are linked at head, get n pages off.
72 * Otherwise, don't modify head, and return NULL.
73 * Locking is the responsibility of the caller.
74 */
75static struct page *page_chain_del(struct page **head, int n)
76{
77 struct page *page;
78 struct page *tmp;
79
80 BUG_ON(!n);
81 BUG_ON(!head);
82
83 page = *head;
Philipp Reisner23ce4222010-05-20 13:35:31 +020084
85 if (!page)
86 return NULL;
87
Lars Ellenberg45bb9122010-05-14 17:10:48 +020088 while (page) {
89 tmp = page_chain_next(page);
90 if (--n == 0)
91 break; /* found sufficient pages */
92 if (tmp == NULL)
93 /* insufficient pages, don't use any of them. */
94 return NULL;
95 page = tmp;
96 }
97
98 /* add end of list marker for the returned list */
99 set_page_private(page, 0);
100 /* actual return value, and adjustment of head */
101 page = *head;
102 *head = tmp;
103 return page;
104}
105
106/* may be used outside of locks to find the tail of a (usually short)
107 * "private" page chain, before adding it back to a global chain head
108 * with page_chain_add() under a spinlock. */
109static struct page *page_chain_tail(struct page *page, int *len)
110{
111 struct page *tmp;
112 int i = 1;
113 while ((tmp = page_chain_next(page)))
114 ++i, page = tmp;
115 if (len)
116 *len = i;
117 return page;
118}
119
120static int page_chain_free(struct page *page)
121{
122 struct page *tmp;
123 int i = 0;
124 page_chain_for_each_safe(page, tmp) {
125 put_page(page);
126 ++i;
127 }
128 return i;
129}
130
131static void page_chain_add(struct page **head,
132 struct page *chain_first, struct page *chain_last)
133{
134#if 1
135 struct page *tmp;
136 tmp = page_chain_tail(chain_first, NULL);
137 BUG_ON(tmp != chain_last);
138#endif
139
140 /* add chain to head */
141 set_page_private(chain_last, (unsigned long)*head);
142 *head = chain_first;
143}
144
145static struct page *drbd_pp_first_pages_or_try_alloc(struct drbd_conf *mdev, int number)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700146{
147 struct page *page = NULL;
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200148 struct page *tmp = NULL;
149 int i = 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700150
151 /* Yes, testing drbd_pp_vacant outside the lock is racy.
152 * So what. It saves a spin_lock. */
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200153 if (drbd_pp_vacant >= number) {
Philipp Reisnerb411b362009-09-25 16:07:19 -0700154 spin_lock(&drbd_pp_lock);
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200155 page = page_chain_del(&drbd_pp_pool, number);
156 if (page)
157 drbd_pp_vacant -= number;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700158 spin_unlock(&drbd_pp_lock);
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200159 if (page)
160 return page;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700161 }
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200162
Philipp Reisnerb411b362009-09-25 16:07:19 -0700163 /* GFP_TRY, because we must not cause arbitrary write-out: in a DRBD
164 * "criss-cross" setup, that might cause write-out on some other DRBD,
165 * which in turn might block on the other node at this very place. */
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200166 for (i = 0; i < number; i++) {
167 tmp = alloc_page(GFP_TRY);
168 if (!tmp)
169 break;
170 set_page_private(tmp, (unsigned long)page);
171 page = tmp;
172 }
173
174 if (i == number)
175 return page;
176
177 /* Not enough pages immediately available this time.
178 * No need to jump around here, drbd_pp_alloc will retry this
179 * function "soon". */
180 if (page) {
181 tmp = page_chain_tail(page, NULL);
182 spin_lock(&drbd_pp_lock);
183 page_chain_add(&drbd_pp_pool, page, tmp);
184 drbd_pp_vacant += i;
185 spin_unlock(&drbd_pp_lock);
186 }
187 return NULL;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700188}
189
Philipp Reisnerb411b362009-09-25 16:07:19 -0700190static void reclaim_net_ee(struct drbd_conf *mdev, struct list_head *to_be_freed)
191{
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +0100192 struct drbd_peer_request *peer_req;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700193 struct list_head *le, *tle;
194
195 /* The EEs are always appended to the end of the list. Since
196 they are sent in order over the wire, they have to finish
197 in order. As soon as we see the first not finished we can
198 stop to examine the list... */
199
200 list_for_each_safe(le, tle, &mdev->net_ee) {
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +0100201 peer_req = list_entry(le, struct drbd_peer_request, w.list);
202 if (drbd_ee_has_active_page(peer_req))
Philipp Reisnerb411b362009-09-25 16:07:19 -0700203 break;
204 list_move(le, to_be_freed);
205 }
206}
207
208static void drbd_kick_lo_and_reclaim_net(struct drbd_conf *mdev)
209{
210 LIST_HEAD(reclaimed);
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +0100211 struct drbd_peer_request *peer_req, *t;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700212
Philipp Reisner87eeee42011-01-19 14:16:30 +0100213 spin_lock_irq(&mdev->tconn->req_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700214 reclaim_net_ee(mdev, &reclaimed);
Philipp Reisner87eeee42011-01-19 14:16:30 +0100215 spin_unlock_irq(&mdev->tconn->req_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700216
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +0100217 list_for_each_entry_safe(peer_req, t, &reclaimed, w.list)
218 drbd_free_net_ee(mdev, peer_req);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700219}
220
221/**
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200222 * drbd_pp_alloc() - Returns @number pages, retries forever (or until signalled)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700223 * @mdev: DRBD device.
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200224 * @number: number of pages requested
225 * @retry: whether to retry, if not enough pages are available right now
Philipp Reisnerb411b362009-09-25 16:07:19 -0700226 *
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200227 * Tries to allocate number pages, first from our own page pool, then from
228 * the kernel, unless this allocation would exceed the max_buffers setting.
229 * Possibly retry until DRBD frees sufficient pages somewhere else.
230 *
231 * Returns a page chain linked via page->private.
Philipp Reisnerb411b362009-09-25 16:07:19 -0700232 */
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200233static struct page *drbd_pp_alloc(struct drbd_conf *mdev, unsigned number, bool retry)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700234{
235 struct page *page = NULL;
236 DEFINE_WAIT(wait);
237
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200238 /* Yes, we may run up to @number over max_buffers. If we
239 * follow it strictly, the admin will get it wrong anyways. */
Philipp Reisner89e58e72011-01-19 13:12:45 +0100240 if (atomic_read(&mdev->pp_in_use) < mdev->tconn->net_conf->max_buffers)
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200241 page = drbd_pp_first_pages_or_try_alloc(mdev, number);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700242
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200243 while (page == NULL) {
Philipp Reisnerb411b362009-09-25 16:07:19 -0700244 prepare_to_wait(&drbd_pp_wait, &wait, TASK_INTERRUPTIBLE);
245
246 drbd_kick_lo_and_reclaim_net(mdev);
247
Philipp Reisner89e58e72011-01-19 13:12:45 +0100248 if (atomic_read(&mdev->pp_in_use) < mdev->tconn->net_conf->max_buffers) {
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200249 page = drbd_pp_first_pages_or_try_alloc(mdev, number);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700250 if (page)
251 break;
252 }
253
254 if (!retry)
255 break;
256
257 if (signal_pending(current)) {
258 dev_warn(DEV, "drbd_pp_alloc interrupted!\n");
259 break;
260 }
261
262 schedule();
263 }
264 finish_wait(&drbd_pp_wait, &wait);
265
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200266 if (page)
267 atomic_add(number, &mdev->pp_in_use);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700268 return page;
269}
270
271/* Must not be used from irq, as that may deadlock: see drbd_pp_alloc.
Philipp Reisner87eeee42011-01-19 14:16:30 +0100272 * Is also used from inside an other spin_lock_irq(&mdev->tconn->req_lock);
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200273 * Either links the page chain back to the global pool,
274 * or returns all pages to the system. */
Lars Ellenberg435f0742010-09-06 12:30:25 +0200275static void drbd_pp_free(struct drbd_conf *mdev, struct page *page, int is_net)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700276{
Lars Ellenberg435f0742010-09-06 12:30:25 +0200277 atomic_t *a = is_net ? &mdev->pp_in_use_by_net : &mdev->pp_in_use;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700278 int i;
Lars Ellenberg435f0742010-09-06 12:30:25 +0200279
Lars Ellenberg1816a2b2010-11-11 15:19:07 +0100280 if (drbd_pp_vacant > (DRBD_MAX_BIO_SIZE/PAGE_SIZE)*minor_count)
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200281 i = page_chain_free(page);
282 else {
283 struct page *tmp;
284 tmp = page_chain_tail(page, &i);
285 spin_lock(&drbd_pp_lock);
286 page_chain_add(&drbd_pp_pool, page, tmp);
287 drbd_pp_vacant += i;
288 spin_unlock(&drbd_pp_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700289 }
Lars Ellenberg435f0742010-09-06 12:30:25 +0200290 i = atomic_sub_return(i, a);
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200291 if (i < 0)
Lars Ellenberg435f0742010-09-06 12:30:25 +0200292 dev_warn(DEV, "ASSERTION FAILED: %s: %d < 0\n",
293 is_net ? "pp_in_use_by_net" : "pp_in_use", i);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700294 wake_up(&drbd_pp_wait);
295}
296
297/*
298You need to hold the req_lock:
299 _drbd_wait_ee_list_empty()
300
301You must not have the req_lock:
302 drbd_free_ee()
303 drbd_alloc_ee()
304 drbd_init_ee()
305 drbd_release_ee()
306 drbd_ee_fix_bhs()
307 drbd_process_done_ee()
308 drbd_clear_done_ee()
309 drbd_wait_ee_list_empty()
310*/
311
Andreas Gruenbacherf6ffca92011-02-04 15:30:34 +0100312struct drbd_peer_request *
313drbd_alloc_ee(struct drbd_conf *mdev, u64 id, sector_t sector,
314 unsigned int data_size, gfp_t gfp_mask) __must_hold(local)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700315{
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +0100316 struct drbd_peer_request *peer_req;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700317 struct page *page;
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200318 unsigned nr_pages = (data_size + PAGE_SIZE -1) >> PAGE_SHIFT;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700319
Andreas Gruenbacher0cf9d272010-12-07 10:43:29 +0100320 if (drbd_insert_fault(mdev, DRBD_FAULT_AL_EE))
Philipp Reisnerb411b362009-09-25 16:07:19 -0700321 return NULL;
322
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +0100323 peer_req = mempool_alloc(drbd_ee_mempool, gfp_mask & ~__GFP_HIGHMEM);
324 if (!peer_req) {
Philipp Reisnerb411b362009-09-25 16:07:19 -0700325 if (!(gfp_mask & __GFP_NOWARN))
326 dev_err(DEV, "alloc_ee: Allocation of an EE failed\n");
327 return NULL;
328 }
329
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200330 page = drbd_pp_alloc(mdev, nr_pages, (gfp_mask & __GFP_WAIT));
331 if (!page)
332 goto fail;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700333
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +0100334 drbd_clear_interval(&peer_req->i);
335 peer_req->i.size = data_size;
336 peer_req->i.sector = sector;
337 peer_req->i.local = false;
338 peer_req->i.waiting = false;
Andreas Gruenbacher53840642011-01-28 10:31:04 +0100339
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +0100340 peer_req->epoch = NULL;
341 peer_req->mdev = mdev;
342 peer_req->pages = page;
343 atomic_set(&peer_req->pending_bios, 0);
344 peer_req->flags = 0;
Andreas Gruenbacher9a8e7752011-01-11 14:04:09 +0100345 /*
346 * The block_id is opaque to the receiver. It is not endianness
347 * converted, and sent back to the sender unchanged.
348 */
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +0100349 peer_req->block_id = id;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700350
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +0100351 return peer_req;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700352
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200353 fail:
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +0100354 mempool_free(peer_req, drbd_ee_mempool);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700355 return NULL;
356}
357
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +0100358void drbd_free_some_ee(struct drbd_conf *mdev, struct drbd_peer_request *peer_req,
Andreas Gruenbacherf6ffca92011-02-04 15:30:34 +0100359 int is_net)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700360{
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +0100361 if (peer_req->flags & EE_HAS_DIGEST)
362 kfree(peer_req->digest);
363 drbd_pp_free(mdev, peer_req->pages, is_net);
364 D_ASSERT(atomic_read(&peer_req->pending_bios) == 0);
365 D_ASSERT(drbd_interval_empty(&peer_req->i));
366 mempool_free(peer_req, drbd_ee_mempool);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700367}
368
369int drbd_release_ee(struct drbd_conf *mdev, struct list_head *list)
370{
371 LIST_HEAD(work_list);
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +0100372 struct drbd_peer_request *peer_req, *t;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700373 int count = 0;
Lars Ellenberg435f0742010-09-06 12:30:25 +0200374 int is_net = list == &mdev->net_ee;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700375
Philipp Reisner87eeee42011-01-19 14:16:30 +0100376 spin_lock_irq(&mdev->tconn->req_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700377 list_splice_init(list, &work_list);
Philipp Reisner87eeee42011-01-19 14:16:30 +0100378 spin_unlock_irq(&mdev->tconn->req_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700379
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +0100380 list_for_each_entry_safe(peer_req, t, &work_list, w.list) {
381 drbd_free_some_ee(mdev, peer_req, is_net);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700382 count++;
383 }
384 return count;
385}
386
387
388/*
389 * This function is called from _asender only_
Andreas Gruenbacher8554df12011-01-25 15:37:43 +0100390 * but see also comments in _req_mod(,BARRIER_ACKED)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700391 * and receive_Barrier.
392 *
393 * Move entries from net_ee to done_ee, if ready.
394 * Grab done_ee, call all callbacks, free the entries.
395 * The callbacks typically send out ACKs.
396 */
397static int drbd_process_done_ee(struct drbd_conf *mdev)
398{
399 LIST_HEAD(work_list);
400 LIST_HEAD(reclaimed);
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +0100401 struct drbd_peer_request *peer_req, *t;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700402 int ok = (mdev->state.conn >= C_WF_REPORT_PARAMS);
403
Philipp Reisner87eeee42011-01-19 14:16:30 +0100404 spin_lock_irq(&mdev->tconn->req_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700405 reclaim_net_ee(mdev, &reclaimed);
406 list_splice_init(&mdev->done_ee, &work_list);
Philipp Reisner87eeee42011-01-19 14:16:30 +0100407 spin_unlock_irq(&mdev->tconn->req_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700408
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +0100409 list_for_each_entry_safe(peer_req, t, &reclaimed, w.list)
410 drbd_free_net_ee(mdev, peer_req);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700411
412 /* possible callbacks here:
413 * e_end_block, and e_end_resync_block, e_send_discard_ack.
414 * all ignore the last argument.
415 */
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +0100416 list_for_each_entry_safe(peer_req, t, &work_list, w.list) {
Philipp Reisnerb411b362009-09-25 16:07:19 -0700417 /* list_del not necessary, next/prev members not touched */
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +0100418 ok = peer_req->w.cb(mdev, &peer_req->w, !ok) && ok;
419 drbd_free_ee(mdev, peer_req);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700420 }
421 wake_up(&mdev->ee_wait);
422
423 return ok;
424}
425
426void _drbd_wait_ee_list_empty(struct drbd_conf *mdev, struct list_head *head)
427{
428 DEFINE_WAIT(wait);
429
430 /* avoids spin_lock/unlock
431 * and calling prepare_to_wait in the fast path */
432 while (!list_empty(head)) {
433 prepare_to_wait(&mdev->ee_wait, &wait, TASK_UNINTERRUPTIBLE);
Philipp Reisner87eeee42011-01-19 14:16:30 +0100434 spin_unlock_irq(&mdev->tconn->req_lock);
Jens Axboe7eaceac2011-03-10 08:52:07 +0100435 io_schedule();
Philipp Reisnerb411b362009-09-25 16:07:19 -0700436 finish_wait(&mdev->ee_wait, &wait);
Philipp Reisner87eeee42011-01-19 14:16:30 +0100437 spin_lock_irq(&mdev->tconn->req_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700438 }
439}
440
441void drbd_wait_ee_list_empty(struct drbd_conf *mdev, struct list_head *head)
442{
Philipp Reisner87eeee42011-01-19 14:16:30 +0100443 spin_lock_irq(&mdev->tconn->req_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700444 _drbd_wait_ee_list_empty(mdev, head);
Philipp Reisner87eeee42011-01-19 14:16:30 +0100445 spin_unlock_irq(&mdev->tconn->req_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700446}
447
448/* see also kernel_accept; which is only present since 2.6.18.
449 * also we want to log which part of it failed, exactly */
Philipp Reisner76536202011-02-07 14:09:54 +0100450static int drbd_accept(const char **what, struct socket *sock, struct socket **newsock)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700451{
452 struct sock *sk = sock->sk;
453 int err = 0;
454
455 *what = "listen";
456 err = sock->ops->listen(sock, 5);
457 if (err < 0)
458 goto out;
459
460 *what = "sock_create_lite";
461 err = sock_create_lite(sk->sk_family, sk->sk_type, sk->sk_protocol,
462 newsock);
463 if (err < 0)
464 goto out;
465
466 *what = "accept";
467 err = sock->ops->accept(sock, *newsock, 0);
468 if (err < 0) {
469 sock_release(*newsock);
470 *newsock = NULL;
471 goto out;
472 }
473 (*newsock)->ops = sock->ops;
474
475out:
476 return err;
477}
478
479static int drbd_recv_short(struct drbd_conf *mdev, struct socket *sock,
480 void *buf, size_t size, int flags)
481{
482 mm_segment_t oldfs;
483 struct kvec iov = {
484 .iov_base = buf,
485 .iov_len = size,
486 };
487 struct msghdr msg = {
488 .msg_iovlen = 1,
489 .msg_iov = (struct iovec *)&iov,
490 .msg_flags = (flags ? flags : MSG_WAITALL | MSG_NOSIGNAL)
491 };
492 int rv;
493
494 oldfs = get_fs();
495 set_fs(KERNEL_DS);
496 rv = sock_recvmsg(sock, &msg, size, msg.msg_flags);
497 set_fs(oldfs);
498
499 return rv;
500}
501
502static int drbd_recv(struct drbd_conf *mdev, void *buf, size_t size)
503{
504 mm_segment_t oldfs;
505 struct kvec iov = {
506 .iov_base = buf,
507 .iov_len = size,
508 };
509 struct msghdr msg = {
510 .msg_iovlen = 1,
511 .msg_iov = (struct iovec *)&iov,
512 .msg_flags = MSG_WAITALL | MSG_NOSIGNAL
513 };
514 int rv;
515
516 oldfs = get_fs();
517 set_fs(KERNEL_DS);
518
519 for (;;) {
Philipp Reisnere42325a2011-01-19 13:55:45 +0100520 rv = sock_recvmsg(mdev->tconn->data.socket, &msg, size, msg.msg_flags);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700521 if (rv == size)
522 break;
523
524 /* Note:
525 * ECONNRESET other side closed the connection
526 * ERESTARTSYS (on sock) we got a signal
527 */
528
529 if (rv < 0) {
530 if (rv == -ECONNRESET)
531 dev_info(DEV, "sock was reset by peer\n");
532 else if (rv != -ERESTARTSYS)
533 dev_err(DEV, "sock_recvmsg returned %d\n", rv);
534 break;
535 } else if (rv == 0) {
536 dev_info(DEV, "sock was shut down by peer\n");
537 break;
538 } else {
539 /* signal came in, or peer/link went down,
540 * after we read a partial message
541 */
542 /* D_ASSERT(signal_pending(current)); */
543 break;
544 }
545 };
546
547 set_fs(oldfs);
548
549 if (rv != size)
550 drbd_force_state(mdev, NS(conn, C_BROKEN_PIPE));
551
552 return rv;
553}
554
Lars Ellenberg5dbf1672010-05-25 16:18:01 +0200555/* quoting tcp(7):
556 * On individual connections, the socket buffer size must be set prior to the
557 * listen(2) or connect(2) calls in order to have it take effect.
558 * This is our wrapper to do so.
559 */
560static void drbd_setbufsize(struct socket *sock, unsigned int snd,
561 unsigned int rcv)
562{
563 /* open coded SO_SNDBUF, SO_RCVBUF */
564 if (snd) {
565 sock->sk->sk_sndbuf = snd;
566 sock->sk->sk_userlocks |= SOCK_SNDBUF_LOCK;
567 }
568 if (rcv) {
569 sock->sk->sk_rcvbuf = rcv;
570 sock->sk->sk_userlocks |= SOCK_RCVBUF_LOCK;
571 }
572}
573
Philipp Reisnereac3e992011-02-07 14:05:07 +0100574static struct socket *drbd_try_connect(struct drbd_tconn *tconn)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700575{
576 const char *what;
577 struct socket *sock;
578 struct sockaddr_in6 src_in6;
579 int err;
580 int disconnect_on_error = 1;
581
Philipp Reisnereac3e992011-02-07 14:05:07 +0100582 if (!get_net_conf(tconn))
Philipp Reisnerb411b362009-09-25 16:07:19 -0700583 return NULL;
584
585 what = "sock_create_kern";
Philipp Reisnereac3e992011-02-07 14:05:07 +0100586 err = sock_create_kern(((struct sockaddr *)tconn->net_conf->my_addr)->sa_family,
Philipp Reisnerb411b362009-09-25 16:07:19 -0700587 SOCK_STREAM, IPPROTO_TCP, &sock);
588 if (err < 0) {
589 sock = NULL;
590 goto out;
591 }
592
593 sock->sk->sk_rcvtimeo =
Philipp Reisnereac3e992011-02-07 14:05:07 +0100594 sock->sk->sk_sndtimeo = tconn->net_conf->try_connect_int*HZ;
595 drbd_setbufsize(sock, tconn->net_conf->sndbuf_size,
596 tconn->net_conf->rcvbuf_size);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700597
598 /* explicitly bind to the configured IP as source IP
599 * for the outgoing connections.
600 * This is needed for multihomed hosts and to be
601 * able to use lo: interfaces for drbd.
602 * Make sure to use 0 as port number, so linux selects
603 * a free one dynamically.
604 */
Philipp Reisnereac3e992011-02-07 14:05:07 +0100605 memcpy(&src_in6, tconn->net_conf->my_addr,
606 min_t(int, tconn->net_conf->my_addr_len, sizeof(src_in6)));
607 if (((struct sockaddr *)tconn->net_conf->my_addr)->sa_family == AF_INET6)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700608 src_in6.sin6_port = 0;
609 else
610 ((struct sockaddr_in *)&src_in6)->sin_port = 0; /* AF_INET & AF_SCI */
611
612 what = "bind before connect";
613 err = sock->ops->bind(sock,
614 (struct sockaddr *) &src_in6,
Philipp Reisnereac3e992011-02-07 14:05:07 +0100615 tconn->net_conf->my_addr_len);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700616 if (err < 0)
617 goto out;
618
619 /* connect may fail, peer not yet available.
620 * stay C_WF_CONNECTION, don't go Disconnecting! */
621 disconnect_on_error = 0;
622 what = "connect";
623 err = sock->ops->connect(sock,
Philipp Reisnereac3e992011-02-07 14:05:07 +0100624 (struct sockaddr *)tconn->net_conf->peer_addr,
625 tconn->net_conf->peer_addr_len, 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700626
627out:
628 if (err < 0) {
629 if (sock) {
630 sock_release(sock);
631 sock = NULL;
632 }
633 switch (-err) {
634 /* timeout, busy, signal pending */
635 case ETIMEDOUT: case EAGAIN: case EINPROGRESS:
636 case EINTR: case ERESTARTSYS:
637 /* peer not (yet) available, network problem */
638 case ECONNREFUSED: case ENETUNREACH:
639 case EHOSTDOWN: case EHOSTUNREACH:
640 disconnect_on_error = 0;
641 break;
642 default:
Philipp Reisnereac3e992011-02-07 14:05:07 +0100643 conn_err(tconn, "%s failed, err = %d\n", what, err);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700644 }
645 if (disconnect_on_error)
Philipp Reisnereac3e992011-02-07 14:05:07 +0100646 drbd_force_state(tconn->volume0, NS(conn, C_DISCONNECTING));
Philipp Reisnerb411b362009-09-25 16:07:19 -0700647 }
Philipp Reisnereac3e992011-02-07 14:05:07 +0100648 put_net_conf(tconn);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700649 return sock;
650}
651
Philipp Reisner76536202011-02-07 14:09:54 +0100652static struct socket *drbd_wait_for_connect(struct drbd_tconn *tconn)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700653{
654 int timeo, err;
655 struct socket *s_estab = NULL, *s_listen;
656 const char *what;
657
Philipp Reisner76536202011-02-07 14:09:54 +0100658 if (!get_net_conf(tconn))
Philipp Reisnerb411b362009-09-25 16:07:19 -0700659 return NULL;
660
661 what = "sock_create_kern";
Philipp Reisner76536202011-02-07 14:09:54 +0100662 err = sock_create_kern(((struct sockaddr *)tconn->net_conf->my_addr)->sa_family,
Philipp Reisnerb411b362009-09-25 16:07:19 -0700663 SOCK_STREAM, IPPROTO_TCP, &s_listen);
664 if (err) {
665 s_listen = NULL;
666 goto out;
667 }
668
Philipp Reisner76536202011-02-07 14:09:54 +0100669 timeo = tconn->net_conf->try_connect_int * HZ;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700670 timeo += (random32() & 1) ? timeo / 7 : -timeo / 7; /* 28.5% random jitter */
671
672 s_listen->sk->sk_reuse = 1; /* SO_REUSEADDR */
673 s_listen->sk->sk_rcvtimeo = timeo;
674 s_listen->sk->sk_sndtimeo = timeo;
Philipp Reisner76536202011-02-07 14:09:54 +0100675 drbd_setbufsize(s_listen, tconn->net_conf->sndbuf_size,
676 tconn->net_conf->rcvbuf_size);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700677
678 what = "bind before listen";
679 err = s_listen->ops->bind(s_listen,
Philipp Reisner76536202011-02-07 14:09:54 +0100680 (struct sockaddr *) tconn->net_conf->my_addr,
681 tconn->net_conf->my_addr_len);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700682 if (err < 0)
683 goto out;
684
Philipp Reisner76536202011-02-07 14:09:54 +0100685 err = drbd_accept(&what, s_listen, &s_estab);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700686
687out:
688 if (s_listen)
689 sock_release(s_listen);
690 if (err < 0) {
691 if (err != -EAGAIN && err != -EINTR && err != -ERESTARTSYS) {
Philipp Reisner76536202011-02-07 14:09:54 +0100692 conn_err(tconn, "%s failed, err = %d\n", what, err);
693 drbd_force_state(tconn->volume0, NS(conn, C_DISCONNECTING));
Philipp Reisnerb411b362009-09-25 16:07:19 -0700694 }
695 }
Philipp Reisner76536202011-02-07 14:09:54 +0100696 put_net_conf(tconn);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700697
698 return s_estab;
699}
700
Andreas Gruenbacherd8763022011-01-26 17:39:41 +0100701static int drbd_send_fp(struct drbd_conf *mdev, struct socket *sock,
702 enum drbd_packet cmd)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700703{
Philipp Reisnerc0129492011-01-19 16:58:16 +0100704 struct p_header *h = &mdev->tconn->data.sbuf.header;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700705
706 return _drbd_send_cmd(mdev, sock, cmd, h, sizeof(*h), 0);
707}
708
Andreas Gruenbacherd8763022011-01-26 17:39:41 +0100709static enum drbd_packet drbd_recv_fp(struct drbd_conf *mdev,
710 struct socket *sock)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700711{
Philipp Reisnere42325a2011-01-19 13:55:45 +0100712 struct p_header80 *h = &mdev->tconn->data.rbuf.header.h80;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700713 int rr;
714
715 rr = drbd_recv_short(mdev, sock, h, sizeof(*h), 0);
716
Andreas Gruenbacherca9bc122011-01-11 13:47:24 +0100717 if (rr == sizeof(*h) && h->magic == cpu_to_be32(DRBD_MAGIC))
Philipp Reisnerb411b362009-09-25 16:07:19 -0700718 return be16_to_cpu(h->command);
719
720 return 0xffff;
721}
722
723/**
724 * drbd_socket_okay() - Free the socket if its connection is not okay
725 * @mdev: DRBD device.
726 * @sock: pointer to the pointer to the socket.
727 */
728static int drbd_socket_okay(struct drbd_conf *mdev, struct socket **sock)
729{
730 int rr;
731 char tb[4];
732
733 if (!*sock)
Andreas Gruenbacher81e84652010-12-09 15:03:57 +0100734 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700735
736 rr = drbd_recv_short(mdev, *sock, tb, 4, MSG_DONTWAIT | MSG_PEEK);
737
738 if (rr > 0 || rr == -EAGAIN) {
Andreas Gruenbacher81e84652010-12-09 15:03:57 +0100739 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700740 } else {
741 sock_release(*sock);
742 *sock = NULL;
Andreas Gruenbacher81e84652010-12-09 15:03:57 +0100743 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700744 }
745}
746
747/*
748 * return values:
749 * 1 yes, we have a valid connection
750 * 0 oops, did not work out, please try again
751 * -1 peer talks different language,
752 * no point in trying again, please go standalone.
753 * -2 We do not have a network config...
754 */
755static int drbd_connect(struct drbd_conf *mdev)
756{
757 struct socket *s, *sock, *msock;
758 int try, h, ok;
759
Philipp Reisnere42325a2011-01-19 13:55:45 +0100760 D_ASSERT(!mdev->tconn->data.socket);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700761
Philipp Reisnerb411b362009-09-25 16:07:19 -0700762 if (drbd_request_state(mdev, NS(conn, C_WF_CONNECTION)) < SS_SUCCESS)
763 return -2;
764
Philipp Reisner25703f82011-02-07 14:35:25 +0100765 clear_bit(DISCARD_CONCURRENT, &mdev->tconn->flags);
Philipp Reisnerfd340c12011-01-19 16:57:39 +0100766 mdev->tconn->agreed_pro_version = 99;
767 /* agreed_pro_version must be smaller than 100 so we send the old
768 header (h80) in the first packet and in the handshake packet. */
Philipp Reisnerb411b362009-09-25 16:07:19 -0700769
770 sock = NULL;
771 msock = NULL;
772
773 do {
774 for (try = 0;;) {
775 /* 3 tries, this should take less than a second! */
Philipp Reisnereac3e992011-02-07 14:05:07 +0100776 s = drbd_try_connect(mdev->tconn);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700777 if (s || ++try >= 3)
778 break;
779 /* give the other side time to call bind() & listen() */
Philipp Reisner20ee6392011-01-18 15:28:59 +0100780 schedule_timeout_interruptible(HZ / 10);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700781 }
782
783 if (s) {
784 if (!sock) {
785 drbd_send_fp(mdev, s, P_HAND_SHAKE_S);
786 sock = s;
787 s = NULL;
788 } else if (!msock) {
789 drbd_send_fp(mdev, s, P_HAND_SHAKE_M);
790 msock = s;
791 s = NULL;
792 } else {
793 dev_err(DEV, "Logic error in drbd_connect()\n");
794 goto out_release_sockets;
795 }
796 }
797
798 if (sock && msock) {
Philipp Reisner89e58e72011-01-19 13:12:45 +0100799 schedule_timeout_interruptible(mdev->tconn->net_conf->ping_timeo*HZ/10);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700800 ok = drbd_socket_okay(mdev, &sock);
801 ok = drbd_socket_okay(mdev, &msock) && ok;
802 if (ok)
803 break;
804 }
805
806retry:
Philipp Reisner76536202011-02-07 14:09:54 +0100807 s = drbd_wait_for_connect(mdev->tconn);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700808 if (s) {
809 try = drbd_recv_fp(mdev, s);
810 drbd_socket_okay(mdev, &sock);
811 drbd_socket_okay(mdev, &msock);
812 switch (try) {
813 case P_HAND_SHAKE_S:
814 if (sock) {
815 dev_warn(DEV, "initial packet S crossed\n");
816 sock_release(sock);
817 }
818 sock = s;
819 break;
820 case P_HAND_SHAKE_M:
821 if (msock) {
822 dev_warn(DEV, "initial packet M crossed\n");
823 sock_release(msock);
824 }
825 msock = s;
Philipp Reisner25703f82011-02-07 14:35:25 +0100826 set_bit(DISCARD_CONCURRENT, &mdev->tconn->flags);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700827 break;
828 default:
829 dev_warn(DEV, "Error receiving initial packet\n");
830 sock_release(s);
831 if (random32() & 1)
832 goto retry;
833 }
834 }
835
836 if (mdev->state.conn <= C_DISCONNECTING)
837 goto out_release_sockets;
838 if (signal_pending(current)) {
839 flush_signals(current);
840 smp_rmb();
Philipp Reisnere6b3ea82011-01-19 14:02:01 +0100841 if (get_t_state(&mdev->tconn->receiver) == EXITING)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700842 goto out_release_sockets;
843 }
844
845 if (sock && msock) {
846 ok = drbd_socket_okay(mdev, &sock);
847 ok = drbd_socket_okay(mdev, &msock) && ok;
848 if (ok)
849 break;
850 }
851 } while (1);
852
853 msock->sk->sk_reuse = 1; /* SO_REUSEADDR */
854 sock->sk->sk_reuse = 1; /* SO_REUSEADDR */
855
856 sock->sk->sk_allocation = GFP_NOIO;
857 msock->sk->sk_allocation = GFP_NOIO;
858
859 sock->sk->sk_priority = TC_PRIO_INTERACTIVE_BULK;
860 msock->sk->sk_priority = TC_PRIO_INTERACTIVE;
861
Philipp Reisnerb411b362009-09-25 16:07:19 -0700862 /* NOT YET ...
Philipp Reisner89e58e72011-01-19 13:12:45 +0100863 * sock->sk->sk_sndtimeo = mdev->tconn->net_conf->timeout*HZ/10;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700864 * sock->sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
865 * first set it to the P_HAND_SHAKE timeout,
866 * which we set to 4x the configured ping_timeout. */
867 sock->sk->sk_sndtimeo =
Philipp Reisner89e58e72011-01-19 13:12:45 +0100868 sock->sk->sk_rcvtimeo = mdev->tconn->net_conf->ping_timeo*4*HZ/10;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700869
Philipp Reisner89e58e72011-01-19 13:12:45 +0100870 msock->sk->sk_sndtimeo = mdev->tconn->net_conf->timeout*HZ/10;
871 msock->sk->sk_rcvtimeo = mdev->tconn->net_conf->ping_int*HZ;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700872
873 /* we don't want delays.
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300874 * we use TCP_CORK where appropriate, though */
Philipp Reisnerb411b362009-09-25 16:07:19 -0700875 drbd_tcp_nodelay(sock);
876 drbd_tcp_nodelay(msock);
877
Philipp Reisnere42325a2011-01-19 13:55:45 +0100878 mdev->tconn->data.socket = sock;
879 mdev->tconn->meta.socket = msock;
Philipp Reisner31890f42011-01-19 14:12:51 +0100880 mdev->tconn->last_received = jiffies;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700881
Philipp Reisnere6b3ea82011-01-19 14:02:01 +0100882 D_ASSERT(mdev->tconn->asender.task == NULL);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700883
884 h = drbd_do_handshake(mdev);
885 if (h <= 0)
886 return h;
887
Philipp Reisnera0638452011-01-19 14:31:32 +0100888 if (mdev->tconn->cram_hmac_tfm) {
Philipp Reisnerb411b362009-09-25 16:07:19 -0700889 /* drbd_request_state(mdev, NS(conn, WFAuth)); */
Johannes Thomab10d96c2010-01-07 16:02:50 +0100890 switch (drbd_do_auth(mdev)) {
891 case -1:
Philipp Reisnerb411b362009-09-25 16:07:19 -0700892 dev_err(DEV, "Authentication of peer failed\n");
893 return -1;
Johannes Thomab10d96c2010-01-07 16:02:50 +0100894 case 0:
895 dev_err(DEV, "Authentication of peer failed, trying again.\n");
896 return 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700897 }
898 }
899
900 if (drbd_request_state(mdev, NS(conn, C_WF_REPORT_PARAMS)) < SS_SUCCESS)
901 return 0;
902
Philipp Reisner89e58e72011-01-19 13:12:45 +0100903 sock->sk->sk_sndtimeo = mdev->tconn->net_conf->timeout*HZ/10;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700904 sock->sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
905
906 atomic_set(&mdev->packet_seq, 0);
907 mdev->peer_seq = 0;
908
Philipp Reisnere6b3ea82011-01-19 14:02:01 +0100909 drbd_thread_start(&mdev->tconn->asender);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700910
Philipp Reisner148efa12011-01-15 00:21:15 +0100911 if (drbd_send_protocol(mdev) == -1)
Philipp Reisner7e2455c2010-04-22 14:50:23 +0200912 return -1;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700913 drbd_send_sync_param(mdev, &mdev->sync_conf);
Philipp Reisnere89b5912010-03-24 17:11:33 +0100914 drbd_send_sizes(mdev, 0, 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700915 drbd_send_uuids(mdev);
916 drbd_send_state(mdev);
917 clear_bit(USE_DEGR_WFC_T, &mdev->flags);
918 clear_bit(RESIZE_PENDING, &mdev->flags);
Philipp Reisner7fde2be2011-03-01 11:08:28 +0100919 mod_timer(&mdev->request_timer, jiffies + HZ); /* just start it here. */
Philipp Reisnerb411b362009-09-25 16:07:19 -0700920
921 return 1;
922
923out_release_sockets:
924 if (sock)
925 sock_release(sock);
926 if (msock)
927 sock_release(msock);
928 return -1;
929}
930
Andreas Gruenbacherd8763022011-01-26 17:39:41 +0100931static bool decode_header(struct drbd_conf *mdev, struct p_header *h,
932 enum drbd_packet *cmd, unsigned int *packet_size)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700933{
Philipp Reisnerfd340c12011-01-19 16:57:39 +0100934 if (h->h80.magic == cpu_to_be32(DRBD_MAGIC)) {
Philipp Reisner02918be2010-08-20 14:35:10 +0200935 *cmd = be16_to_cpu(h->h80.command);
936 *packet_size = be16_to_cpu(h->h80.length);
Andreas Gruenbacherca9bc122011-01-11 13:47:24 +0100937 } else if (h->h95.magic == cpu_to_be16(DRBD_MAGIC_BIG)) {
Philipp Reisner02918be2010-08-20 14:35:10 +0200938 *cmd = be16_to_cpu(h->h95.command);
Philipp Reisnerfd340c12011-01-19 16:57:39 +0100939 *packet_size = be32_to_cpu(h->h95.length) & 0x00ffffff;
Philipp Reisner02918be2010-08-20 14:35:10 +0200940 } else {
Lars Ellenberg004352f2010-10-05 20:13:58 +0200941 dev_err(DEV, "magic?? on data m: 0x%08x c: %d l: %d\n",
942 be32_to_cpu(h->h80.magic),
943 be16_to_cpu(h->h80.command),
944 be16_to_cpu(h->h80.length));
Andreas Gruenbacher81e84652010-12-09 15:03:57 +0100945 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700946 }
Philipp Reisner257d0af2011-01-26 12:15:29 +0100947 return true;
948}
949
Andreas Gruenbacherd8763022011-01-26 17:39:41 +0100950static int drbd_recv_header(struct drbd_conf *mdev, enum drbd_packet *cmd,
951 unsigned int *packet_size)
Philipp Reisner257d0af2011-01-26 12:15:29 +0100952{
953 struct p_header *h = &mdev->tconn->data.rbuf.header;
954 int r;
955
956 r = drbd_recv(mdev, h, sizeof(*h));
957 if (unlikely(r != sizeof(*h))) {
958 if (!signal_pending(current))
959 dev_warn(DEV, "short read expecting header on sock: r=%d\n", r);
960 return false;
961 }
962
963 r = decode_header(mdev, h, cmd, packet_size);
Philipp Reisner31890f42011-01-19 14:12:51 +0100964 mdev->tconn->last_received = jiffies;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700965
Philipp Reisner257d0af2011-01-26 12:15:29 +0100966 return r;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700967}
968
Philipp Reisner2451fc32010-08-24 13:43:11 +0200969static void drbd_flush(struct drbd_conf *mdev)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700970{
971 int rv;
972
973 if (mdev->write_ordering >= WO_bdev_flush && get_ldev(mdev)) {
Dmitry Monakhovfbd9b092010-04-28 17:55:06 +0400974 rv = blkdev_issue_flush(mdev->ldev->backing_bdev, GFP_KERNEL,
Christoph Hellwigdd3932e2010-09-16 20:51:46 +0200975 NULL);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700976 if (rv) {
977 dev_err(DEV, "local disk flush failed with status %d\n", rv);
978 /* would rather check on EOPNOTSUPP, but that is not reliable.
979 * don't try again for ANY return value != 0
980 * if (rv == -EOPNOTSUPP) */
981 drbd_bump_write_ordering(mdev, WO_drain_io);
982 }
983 put_ldev(mdev);
984 }
Philipp Reisnerb411b362009-09-25 16:07:19 -0700985}
986
987/**
988 * drbd_may_finish_epoch() - Applies an epoch_event to the epoch's state, eventually finishes it.
989 * @mdev: DRBD device.
990 * @epoch: Epoch object.
991 * @ev: Epoch event.
992 */
993static enum finish_epoch drbd_may_finish_epoch(struct drbd_conf *mdev,
994 struct drbd_epoch *epoch,
995 enum epoch_event ev)
996{
Philipp Reisner2451fc32010-08-24 13:43:11 +0200997 int epoch_size;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700998 struct drbd_epoch *next_epoch;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700999 enum finish_epoch rv = FE_STILL_LIVE;
1000
1001 spin_lock(&mdev->epoch_lock);
1002 do {
1003 next_epoch = NULL;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001004
1005 epoch_size = atomic_read(&epoch->epoch_size);
1006
1007 switch (ev & ~EV_CLEANUP) {
1008 case EV_PUT:
1009 atomic_dec(&epoch->active);
1010 break;
1011 case EV_GOT_BARRIER_NR:
1012 set_bit(DE_HAVE_BARRIER_NUMBER, &epoch->flags);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001013 break;
1014 case EV_BECAME_LAST:
1015 /* nothing to do*/
1016 break;
1017 }
1018
Philipp Reisnerb411b362009-09-25 16:07:19 -07001019 if (epoch_size != 0 &&
1020 atomic_read(&epoch->active) == 0 &&
Philipp Reisner2451fc32010-08-24 13:43:11 +02001021 test_bit(DE_HAVE_BARRIER_NUMBER, &epoch->flags)) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07001022 if (!(ev & EV_CLEANUP)) {
1023 spin_unlock(&mdev->epoch_lock);
1024 drbd_send_b_ack(mdev, epoch->barrier_nr, epoch_size);
1025 spin_lock(&mdev->epoch_lock);
1026 }
1027 dec_unacked(mdev);
1028
1029 if (mdev->current_epoch != epoch) {
1030 next_epoch = list_entry(epoch->list.next, struct drbd_epoch, list);
1031 list_del(&epoch->list);
1032 ev = EV_BECAME_LAST | (ev & EV_CLEANUP);
1033 mdev->epochs--;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001034 kfree(epoch);
1035
1036 if (rv == FE_STILL_LIVE)
1037 rv = FE_DESTROYED;
1038 } else {
1039 epoch->flags = 0;
1040 atomic_set(&epoch->epoch_size, 0);
Uwe Kleine-König698f9312010-07-02 20:41:51 +02001041 /* atomic_set(&epoch->active, 0); is already zero */
Philipp Reisnerb411b362009-09-25 16:07:19 -07001042 if (rv == FE_STILL_LIVE)
1043 rv = FE_RECYCLED;
Philipp Reisner2451fc32010-08-24 13:43:11 +02001044 wake_up(&mdev->ee_wait);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001045 }
1046 }
1047
1048 if (!next_epoch)
1049 break;
1050
1051 epoch = next_epoch;
1052 } while (1);
1053
1054 spin_unlock(&mdev->epoch_lock);
1055
Philipp Reisnerb411b362009-09-25 16:07:19 -07001056 return rv;
1057}
1058
1059/**
1060 * drbd_bump_write_ordering() - Fall back to an other write ordering method
1061 * @mdev: DRBD device.
1062 * @wo: Write ordering method to try.
1063 */
1064void drbd_bump_write_ordering(struct drbd_conf *mdev, enum write_ordering_e wo) __must_hold(local)
1065{
1066 enum write_ordering_e pwo;
1067 static char *write_ordering_str[] = {
1068 [WO_none] = "none",
1069 [WO_drain_io] = "drain",
1070 [WO_bdev_flush] = "flush",
Philipp Reisnerb411b362009-09-25 16:07:19 -07001071 };
1072
1073 pwo = mdev->write_ordering;
1074 wo = min(pwo, wo);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001075 if (wo == WO_bdev_flush && mdev->ldev->dc.no_disk_flush)
1076 wo = WO_drain_io;
1077 if (wo == WO_drain_io && mdev->ldev->dc.no_disk_drain)
1078 wo = WO_none;
1079 mdev->write_ordering = wo;
Philipp Reisner2451fc32010-08-24 13:43:11 +02001080 if (pwo != mdev->write_ordering || wo == WO_bdev_flush)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001081 dev_info(DEV, "Method to ensure write ordering: %s\n", write_ordering_str[mdev->write_ordering]);
1082}
1083
1084/**
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001085 * drbd_submit_ee()
1086 * @mdev: DRBD device.
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01001087 * @peer_req: peer request
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001088 * @rw: flag field, see bio->bi_rw
Lars Ellenberg10f6d9922011-01-24 14:47:09 +01001089 *
1090 * May spread the pages to multiple bios,
1091 * depending on bio_add_page restrictions.
1092 *
1093 * Returns 0 if all bios have been submitted,
1094 * -ENOMEM if we could not allocate enough bios,
1095 * -ENOSPC (any better suggestion?) if we have not been able to bio_add_page a
1096 * single page to an empty bio (which should never happen and likely indicates
1097 * that the lower level IO stack is in some way broken). This has been observed
1098 * on certain Xen deployments.
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001099 */
1100/* TODO allocate from our own bio_set. */
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01001101int drbd_submit_ee(struct drbd_conf *mdev, struct drbd_peer_request *peer_req,
Andreas Gruenbacherf6ffca92011-02-04 15:30:34 +01001102 const unsigned rw, const int fault_type)
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001103{
1104 struct bio *bios = NULL;
1105 struct bio *bio;
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01001106 struct page *page = peer_req->pages;
1107 sector_t sector = peer_req->i.sector;
1108 unsigned ds = peer_req->i.size;
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001109 unsigned n_bios = 0;
1110 unsigned nr_pages = (ds + PAGE_SIZE -1) >> PAGE_SHIFT;
Lars Ellenberg10f6d9922011-01-24 14:47:09 +01001111 int err = -ENOMEM;
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001112
1113 /* In most cases, we will only need one bio. But in case the lower
1114 * level restrictions happen to be different at this offset on this
1115 * side than those of the sending peer, we may need to submit the
1116 * request in more than one bio. */
1117next_bio:
1118 bio = bio_alloc(GFP_NOIO, nr_pages);
1119 if (!bio) {
1120 dev_err(DEV, "submit_ee: Allocation of a bio failed\n");
1121 goto fail;
1122 }
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01001123 /* > peer_req->i.sector, unless this is the first bio */
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001124 bio->bi_sector = sector;
1125 bio->bi_bdev = mdev->ldev->backing_bdev;
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001126 bio->bi_rw = rw;
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01001127 bio->bi_private = peer_req;
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001128 bio->bi_end_io = drbd_endio_sec;
1129
1130 bio->bi_next = bios;
1131 bios = bio;
1132 ++n_bios;
1133
1134 page_chain_for_each(page) {
1135 unsigned len = min_t(unsigned, ds, PAGE_SIZE);
1136 if (!bio_add_page(bio, page, len, 0)) {
Lars Ellenberg10f6d9922011-01-24 14:47:09 +01001137 /* A single page must always be possible!
1138 * But in case it fails anyways,
1139 * we deal with it, and complain (below). */
1140 if (bio->bi_vcnt == 0) {
1141 dev_err(DEV,
1142 "bio_add_page failed for len=%u, "
1143 "bi_vcnt=0 (bi_sector=%llu)\n",
1144 len, (unsigned long long)bio->bi_sector);
1145 err = -ENOSPC;
1146 goto fail;
1147 }
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001148 goto next_bio;
1149 }
1150 ds -= len;
1151 sector += len >> 9;
1152 --nr_pages;
1153 }
1154 D_ASSERT(page == NULL);
1155 D_ASSERT(ds == 0);
1156
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01001157 atomic_set(&peer_req->pending_bios, n_bios);
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001158 do {
1159 bio = bios;
1160 bios = bios->bi_next;
1161 bio->bi_next = NULL;
1162
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001163 drbd_generic_make_request(mdev, fault_type, bio);
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001164 } while (bios);
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001165 return 0;
1166
1167fail:
1168 while (bios) {
1169 bio = bios;
1170 bios = bios->bi_next;
1171 bio_put(bio);
1172 }
Lars Ellenberg10f6d9922011-01-24 14:47:09 +01001173 return err;
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001174}
1175
Andreas Gruenbacher53840642011-01-28 10:31:04 +01001176static void drbd_remove_epoch_entry_interval(struct drbd_conf *mdev,
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01001177 struct drbd_peer_request *peer_req)
Andreas Gruenbacher53840642011-01-28 10:31:04 +01001178{
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01001179 struct drbd_interval *i = &peer_req->i;
Andreas Gruenbacher53840642011-01-28 10:31:04 +01001180
1181 drbd_remove_interval(&mdev->write_requests, i);
1182 drbd_clear_interval(i);
1183
Andreas Gruenbacher6c852be2011-02-04 15:38:52 +01001184 /* Wake up any processes waiting for this peer request to complete. */
Andreas Gruenbacher53840642011-01-28 10:31:04 +01001185 if (i->waiting)
1186 wake_up(&mdev->misc_wait);
1187}
1188
Andreas Gruenbacherd8763022011-01-26 17:39:41 +01001189static int receive_Barrier(struct drbd_conf *mdev, enum drbd_packet cmd,
1190 unsigned int data_size)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001191{
Philipp Reisner2451fc32010-08-24 13:43:11 +02001192 int rv;
Philipp Reisnere42325a2011-01-19 13:55:45 +01001193 struct p_barrier *p = &mdev->tconn->data.rbuf.barrier;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001194 struct drbd_epoch *epoch;
1195
Philipp Reisnerb411b362009-09-25 16:07:19 -07001196 inc_unacked(mdev);
1197
Philipp Reisnerb411b362009-09-25 16:07:19 -07001198 mdev->current_epoch->barrier_nr = p->barrier;
1199 rv = drbd_may_finish_epoch(mdev, mdev->current_epoch, EV_GOT_BARRIER_NR);
1200
1201 /* P_BARRIER_ACK may imply that the corresponding extent is dropped from
1202 * the activity log, which means it would not be resynced in case the
1203 * R_PRIMARY crashes now.
1204 * Therefore we must send the barrier_ack after the barrier request was
1205 * completed. */
1206 switch (mdev->write_ordering) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07001207 case WO_none:
1208 if (rv == FE_RECYCLED)
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01001209 return true;
Philipp Reisner2451fc32010-08-24 13:43:11 +02001210
1211 /* receiver context, in the writeout path of the other node.
1212 * avoid potential distributed deadlock */
1213 epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO);
1214 if (epoch)
1215 break;
1216 else
1217 dev_warn(DEV, "Allocation of an epoch failed, slowing down\n");
1218 /* Fall through */
Philipp Reisnerb411b362009-09-25 16:07:19 -07001219
1220 case WO_bdev_flush:
1221 case WO_drain_io:
Philipp Reisnerb411b362009-09-25 16:07:19 -07001222 drbd_wait_ee_list_empty(mdev, &mdev->active_ee);
Philipp Reisner2451fc32010-08-24 13:43:11 +02001223 drbd_flush(mdev);
1224
1225 if (atomic_read(&mdev->current_epoch->epoch_size)) {
1226 epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO);
1227 if (epoch)
1228 break;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001229 }
1230
Philipp Reisner2451fc32010-08-24 13:43:11 +02001231 epoch = mdev->current_epoch;
1232 wait_event(mdev->ee_wait, atomic_read(&epoch->epoch_size) == 0);
1233
1234 D_ASSERT(atomic_read(&epoch->active) == 0);
1235 D_ASSERT(epoch->flags == 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001236
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01001237 return true;
Philipp Reisner2451fc32010-08-24 13:43:11 +02001238 default:
1239 dev_err(DEV, "Strangeness in mdev->write_ordering %d\n", mdev->write_ordering);
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01001240 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001241 }
1242
1243 epoch->flags = 0;
1244 atomic_set(&epoch->epoch_size, 0);
1245 atomic_set(&epoch->active, 0);
1246
1247 spin_lock(&mdev->epoch_lock);
1248 if (atomic_read(&mdev->current_epoch->epoch_size)) {
1249 list_add(&epoch->list, &mdev->current_epoch->list);
1250 mdev->current_epoch = epoch;
1251 mdev->epochs++;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001252 } else {
1253 /* The current_epoch got recycled while we allocated this one... */
1254 kfree(epoch);
1255 }
1256 spin_unlock(&mdev->epoch_lock);
1257
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01001258 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001259}
1260
1261/* used from receive_RSDataReply (recv_resync_read)
1262 * and from receive_Data */
Andreas Gruenbacherf6ffca92011-02-04 15:30:34 +01001263static struct drbd_peer_request *
1264read_in_block(struct drbd_conf *mdev, u64 id, sector_t sector,
1265 int data_size) __must_hold(local)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001266{
Lars Ellenberg66660322010-04-06 12:15:04 +02001267 const sector_t capacity = drbd_get_capacity(mdev->this_bdev);
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01001268 struct drbd_peer_request *peer_req;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001269 struct page *page;
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001270 int dgs, ds, rr;
Philipp Reisnera0638452011-01-19 14:31:32 +01001271 void *dig_in = mdev->tconn->int_dig_in;
1272 void *dig_vv = mdev->tconn->int_dig_vv;
Philipp Reisner6b4388a2010-04-26 14:11:45 +02001273 unsigned long *data;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001274
Philipp Reisnera0638452011-01-19 14:31:32 +01001275 dgs = (mdev->tconn->agreed_pro_version >= 87 && mdev->tconn->integrity_r_tfm) ?
1276 crypto_hash_digestsize(mdev->tconn->integrity_r_tfm) : 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001277
1278 if (dgs) {
1279 rr = drbd_recv(mdev, dig_in, dgs);
1280 if (rr != dgs) {
Lars Ellenberg0ddc5542011-01-21 12:35:15 +01001281 if (!signal_pending(current))
1282 dev_warn(DEV,
1283 "short read receiving data digest: read %d expected %d\n",
1284 rr, dgs);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001285 return NULL;
1286 }
1287 }
1288
1289 data_size -= dgs;
1290
Andreas Gruenbacher841ce242010-12-15 19:31:20 +01001291 if (!expect(data_size != 0))
1292 return NULL;
1293 if (!expect(IS_ALIGNED(data_size, 512)))
1294 return NULL;
1295 if (!expect(data_size <= DRBD_MAX_BIO_SIZE))
1296 return NULL;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001297
Lars Ellenberg66660322010-04-06 12:15:04 +02001298 /* even though we trust out peer,
1299 * we sometimes have to double check. */
1300 if (sector + (data_size>>9) > capacity) {
Lars Ellenbergfdda6542011-01-24 15:11:01 +01001301 dev_err(DEV, "request from peer beyond end of local disk: "
1302 "capacity: %llus < sector: %llus + size: %u\n",
Lars Ellenberg66660322010-04-06 12:15:04 +02001303 (unsigned long long)capacity,
1304 (unsigned long long)sector, data_size);
1305 return NULL;
1306 }
1307
Philipp Reisnerb411b362009-09-25 16:07:19 -07001308 /* GFP_NOIO, because we must not cause arbitrary write-out: in a DRBD
1309 * "criss-cross" setup, that might cause write-out on some other DRBD,
1310 * which in turn might block on the other node at this very place. */
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01001311 peer_req = drbd_alloc_ee(mdev, id, sector, data_size, GFP_NOIO);
1312 if (!peer_req)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001313 return NULL;
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001314
Philipp Reisnerb411b362009-09-25 16:07:19 -07001315 ds = data_size;
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01001316 page = peer_req->pages;
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001317 page_chain_for_each(page) {
1318 unsigned len = min_t(int, ds, PAGE_SIZE);
Philipp Reisner6b4388a2010-04-26 14:11:45 +02001319 data = kmap(page);
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001320 rr = drbd_recv(mdev, data, len);
Andreas Gruenbacher0cf9d272010-12-07 10:43:29 +01001321 if (drbd_insert_fault(mdev, DRBD_FAULT_RECEIVE)) {
Philipp Reisner6b4388a2010-04-26 14:11:45 +02001322 dev_err(DEV, "Fault injection: Corrupting data on receive\n");
1323 data[0] = data[0] ^ (unsigned long)-1;
1324 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07001325 kunmap(page);
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001326 if (rr != len) {
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01001327 drbd_free_ee(mdev, peer_req);
Lars Ellenberg0ddc5542011-01-21 12:35:15 +01001328 if (!signal_pending(current))
1329 dev_warn(DEV, "short read receiving data: read %d expected %d\n",
1330 rr, len);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001331 return NULL;
1332 }
1333 ds -= rr;
1334 }
1335
1336 if (dgs) {
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01001337 drbd_csum_ee(mdev, mdev->tconn->integrity_r_tfm, peer_req, dig_vv);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001338 if (memcmp(dig_in, dig_vv, dgs)) {
Lars Ellenberg470be442010-11-10 10:36:52 +01001339 dev_err(DEV, "Digest integrity check FAILED: %llus +%u\n",
1340 (unsigned long long)sector, data_size);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001341 drbd_bcast_ee(mdev, "digest failed",
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01001342 dgs, dig_in, dig_vv, peer_req);
1343 drbd_free_ee(mdev, peer_req);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001344 return NULL;
1345 }
1346 }
1347 mdev->recv_cnt += data_size>>9;
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01001348 return peer_req;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001349}
1350
1351/* drbd_drain_block() just takes a data block
1352 * out of the socket input buffer, and discards it.
1353 */
1354static int drbd_drain_block(struct drbd_conf *mdev, int data_size)
1355{
1356 struct page *page;
1357 int rr, rv = 1;
1358 void *data;
1359
Lars Ellenbergc3470cd2010-04-01 16:57:19 +02001360 if (!data_size)
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01001361 return true;
Lars Ellenbergc3470cd2010-04-01 16:57:19 +02001362
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001363 page = drbd_pp_alloc(mdev, 1, 1);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001364
1365 data = kmap(page);
1366 while (data_size) {
1367 rr = drbd_recv(mdev, data, min_t(int, data_size, PAGE_SIZE));
1368 if (rr != min_t(int, data_size, PAGE_SIZE)) {
1369 rv = 0;
Lars Ellenberg0ddc5542011-01-21 12:35:15 +01001370 if (!signal_pending(current))
1371 dev_warn(DEV,
1372 "short read receiving data: read %d expected %d\n",
1373 rr, min_t(int, data_size, PAGE_SIZE));
Philipp Reisnerb411b362009-09-25 16:07:19 -07001374 break;
1375 }
1376 data_size -= rr;
1377 }
1378 kunmap(page);
Lars Ellenberg435f0742010-09-06 12:30:25 +02001379 drbd_pp_free(mdev, page, 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001380 return rv;
1381}
1382
1383static int recv_dless_read(struct drbd_conf *mdev, struct drbd_request *req,
1384 sector_t sector, int data_size)
1385{
1386 struct bio_vec *bvec;
1387 struct bio *bio;
1388 int dgs, rr, i, expect;
Philipp Reisnera0638452011-01-19 14:31:32 +01001389 void *dig_in = mdev->tconn->int_dig_in;
1390 void *dig_vv = mdev->tconn->int_dig_vv;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001391
Philipp Reisnera0638452011-01-19 14:31:32 +01001392 dgs = (mdev->tconn->agreed_pro_version >= 87 && mdev->tconn->integrity_r_tfm) ?
1393 crypto_hash_digestsize(mdev->tconn->integrity_r_tfm) : 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001394
1395 if (dgs) {
1396 rr = drbd_recv(mdev, dig_in, dgs);
1397 if (rr != dgs) {
Lars Ellenberg0ddc5542011-01-21 12:35:15 +01001398 if (!signal_pending(current))
1399 dev_warn(DEV,
1400 "short read receiving data reply digest: read %d expected %d\n",
1401 rr, dgs);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001402 return 0;
1403 }
1404 }
1405
1406 data_size -= dgs;
1407
1408 /* optimistically update recv_cnt. if receiving fails below,
1409 * we disconnect anyways, and counters will be reset. */
1410 mdev->recv_cnt += data_size>>9;
1411
1412 bio = req->master_bio;
1413 D_ASSERT(sector == bio->bi_sector);
1414
1415 bio_for_each_segment(bvec, bio, i) {
1416 expect = min_t(int, data_size, bvec->bv_len);
1417 rr = drbd_recv(mdev,
1418 kmap(bvec->bv_page)+bvec->bv_offset,
1419 expect);
1420 kunmap(bvec->bv_page);
1421 if (rr != expect) {
Lars Ellenberg0ddc5542011-01-21 12:35:15 +01001422 if (!signal_pending(current))
1423 dev_warn(DEV, "short read receiving data reply: "
1424 "read %d expected %d\n",
1425 rr, expect);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001426 return 0;
1427 }
1428 data_size -= rr;
1429 }
1430
1431 if (dgs) {
Philipp Reisnera0638452011-01-19 14:31:32 +01001432 drbd_csum_bio(mdev, mdev->tconn->integrity_r_tfm, bio, dig_vv);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001433 if (memcmp(dig_in, dig_vv, dgs)) {
1434 dev_err(DEV, "Digest integrity check FAILED. Broken NICs?\n");
1435 return 0;
1436 }
1437 }
1438
1439 D_ASSERT(data_size == 0);
1440 return 1;
1441}
1442
1443/* e_end_resync_block() is called via
1444 * drbd_process_done_ee() by asender only */
1445static int e_end_resync_block(struct drbd_conf *mdev, struct drbd_work *w, int unused)
1446{
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01001447 struct drbd_peer_request *peer_req = (struct drbd_peer_request *)w;
1448 sector_t sector = peer_req->i.sector;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001449 int ok;
1450
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01001451 D_ASSERT(drbd_interval_empty(&peer_req->i));
Philipp Reisnerb411b362009-09-25 16:07:19 -07001452
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01001453 if (likely((peer_req->flags & EE_WAS_ERROR) == 0)) {
1454 drbd_set_in_sync(mdev, sector, peer_req->i.size);
1455 ok = drbd_send_ack(mdev, P_RS_WRITE_ACK, peer_req);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001456 } else {
1457 /* Record failure to sync */
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01001458 drbd_rs_failed_io(mdev, sector, peer_req->i.size);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001459
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01001460 ok = drbd_send_ack(mdev, P_NEG_ACK, peer_req);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001461 }
1462 dec_unacked(mdev);
1463
1464 return ok;
1465}
1466
1467static int recv_resync_read(struct drbd_conf *mdev, sector_t sector, int data_size) __releases(local)
1468{
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01001469 struct drbd_peer_request *peer_req;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001470
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01001471 peer_req = read_in_block(mdev, ID_SYNCER, sector, data_size);
1472 if (!peer_req)
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001473 goto fail;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001474
1475 dec_rs_pending(mdev);
1476
Philipp Reisnerb411b362009-09-25 16:07:19 -07001477 inc_unacked(mdev);
1478 /* corresponding dec_unacked() in e_end_resync_block()
1479 * respective _drbd_clear_done_ee */
1480
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01001481 peer_req->w.cb = e_end_resync_block;
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001482
Philipp Reisner87eeee42011-01-19 14:16:30 +01001483 spin_lock_irq(&mdev->tconn->req_lock);
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01001484 list_add(&peer_req->w.list, &mdev->sync_ee);
Philipp Reisner87eeee42011-01-19 14:16:30 +01001485 spin_unlock_irq(&mdev->tconn->req_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001486
Lars Ellenberg0f0601f2010-08-11 23:40:24 +02001487 atomic_add(data_size >> 9, &mdev->rs_sect_ev);
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01001488 if (drbd_submit_ee(mdev, peer_req, WRITE, DRBD_FAULT_RS_WR) == 0)
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01001489 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001490
Lars Ellenberg10f6d9922011-01-24 14:47:09 +01001491 /* don't care for the reason here */
1492 dev_err(DEV, "submit failed, triggering re-connect\n");
Philipp Reisner87eeee42011-01-19 14:16:30 +01001493 spin_lock_irq(&mdev->tconn->req_lock);
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01001494 list_del(&peer_req->w.list);
Philipp Reisner87eeee42011-01-19 14:16:30 +01001495 spin_unlock_irq(&mdev->tconn->req_lock);
Lars Ellenberg22cc37a2010-09-14 20:40:41 +02001496
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01001497 drbd_free_ee(mdev, peer_req);
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001498fail:
1499 put_ldev(mdev);
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01001500 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001501}
1502
Andreas Gruenbacher668eebc2011-01-20 17:14:26 +01001503static struct drbd_request *
Andreas Gruenbacherbc9c5c42011-01-21 18:00:55 +01001504find_request(struct drbd_conf *mdev, struct rb_root *root, u64 id,
1505 sector_t sector, bool missing_ok, const char *func)
Andreas Gruenbacher668eebc2011-01-20 17:14:26 +01001506{
Andreas Gruenbacher668eebc2011-01-20 17:14:26 +01001507 struct drbd_request *req;
1508
Andreas Gruenbacherbc9c5c42011-01-21 18:00:55 +01001509 /* Request object according to our peer */
1510 req = (struct drbd_request *)(unsigned long)id;
Andreas Gruenbacher5e472262011-01-27 14:42:51 +01001511 if (drbd_contains_interval(root, sector, &req->i) && req->i.local)
Andreas Gruenbacher668eebc2011-01-20 17:14:26 +01001512 return req;
Andreas Gruenbacherc3afd8f2011-01-20 22:25:40 +01001513 if (!missing_ok) {
1514 dev_err(DEV, "%s: failed to find request %lu, sector %llus\n", func,
1515 (unsigned long)id, (unsigned long long)sector);
1516 }
Andreas Gruenbacher668eebc2011-01-20 17:14:26 +01001517 return NULL;
1518}
1519
Andreas Gruenbacherd8763022011-01-26 17:39:41 +01001520static int receive_DataReply(struct drbd_conf *mdev, enum drbd_packet cmd,
1521 unsigned int data_size)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001522{
1523 struct drbd_request *req;
1524 sector_t sector;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001525 int ok;
Philipp Reisnere42325a2011-01-19 13:55:45 +01001526 struct p_data *p = &mdev->tconn->data.rbuf.data;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001527
1528 sector = be64_to_cpu(p->sector);
1529
Philipp Reisner87eeee42011-01-19 14:16:30 +01001530 spin_lock_irq(&mdev->tconn->req_lock);
Andreas Gruenbacherbc9c5c42011-01-21 18:00:55 +01001531 req = find_request(mdev, &mdev->read_requests, p->block_id, sector, false, __func__);
Philipp Reisner87eeee42011-01-19 14:16:30 +01001532 spin_unlock_irq(&mdev->tconn->req_lock);
Andreas Gruenbacherc3afd8f2011-01-20 22:25:40 +01001533 if (unlikely(!req))
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01001534 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001535
Bart Van Assche24c48302011-05-21 18:32:29 +02001536 /* hlist_del(&req->collision) is done in _req_may_be_done, to avoid
Philipp Reisnerb411b362009-09-25 16:07:19 -07001537 * special casing it there for the various failure cases.
1538 * still no race with drbd_fail_pending_reads */
1539 ok = recv_dless_read(mdev, req, sector, data_size);
1540
1541 if (ok)
Andreas Gruenbacher8554df12011-01-25 15:37:43 +01001542 req_mod(req, DATA_RECEIVED);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001543 /* else: nothing. handled from drbd_disconnect...
1544 * I don't think we may complete this just yet
1545 * in case we are "on-disconnect: freeze" */
1546
1547 return ok;
1548}
1549
Andreas Gruenbacherd8763022011-01-26 17:39:41 +01001550static int receive_RSDataReply(struct drbd_conf *mdev, enum drbd_packet cmd,
1551 unsigned int data_size)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001552{
1553 sector_t sector;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001554 int ok;
Philipp Reisnere42325a2011-01-19 13:55:45 +01001555 struct p_data *p = &mdev->tconn->data.rbuf.data;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001556
1557 sector = be64_to_cpu(p->sector);
1558 D_ASSERT(p->block_id == ID_SYNCER);
1559
1560 if (get_ldev(mdev)) {
1561 /* data is submitted to disk within recv_resync_read.
1562 * corresponding put_ldev done below on error,
Andreas Gruenbacher9c508422011-01-14 21:19:36 +01001563 * or in drbd_endio_sec. */
Philipp Reisnerb411b362009-09-25 16:07:19 -07001564 ok = recv_resync_read(mdev, sector, data_size);
1565 } else {
1566 if (__ratelimit(&drbd_ratelimit_state))
1567 dev_err(DEV, "Can not write resync data to local disk.\n");
1568
1569 ok = drbd_drain_block(mdev, data_size);
1570
Lars Ellenberg2b2bf212010-10-06 11:46:55 +02001571 drbd_send_ack_dp(mdev, P_NEG_ACK, p, data_size);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001572 }
1573
Philipp Reisner778f2712010-07-06 11:14:00 +02001574 atomic_add(data_size >> 9, &mdev->rs_sect_in);
1575
Philipp Reisnerb411b362009-09-25 16:07:19 -07001576 return ok;
1577}
1578
1579/* e_end_block() is called via drbd_process_done_ee().
1580 * this means this function only runs in the asender thread
1581 */
1582static int e_end_block(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
1583{
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01001584 struct drbd_peer_request *peer_req = (struct drbd_peer_request *)w;
1585 sector_t sector = peer_req->i.sector;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001586 int ok = 1, pcmd;
1587
Philipp Reisner89e58e72011-01-19 13:12:45 +01001588 if (mdev->tconn->net_conf->wire_protocol == DRBD_PROT_C) {
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01001589 if (likely((peer_req->flags & EE_WAS_ERROR) == 0)) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07001590 pcmd = (mdev->state.conn >= C_SYNC_SOURCE &&
1591 mdev->state.conn <= C_PAUSED_SYNC_T &&
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01001592 peer_req->flags & EE_MAY_SET_IN_SYNC) ?
Philipp Reisnerb411b362009-09-25 16:07:19 -07001593 P_RS_WRITE_ACK : P_WRITE_ACK;
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01001594 ok &= drbd_send_ack(mdev, pcmd, peer_req);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001595 if (pcmd == P_RS_WRITE_ACK)
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01001596 drbd_set_in_sync(mdev, sector, peer_req->i.size);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001597 } else {
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01001598 ok = drbd_send_ack(mdev, P_NEG_ACK, peer_req);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001599 /* we expect it to be marked out of sync anyways...
1600 * maybe assert this? */
1601 }
1602 dec_unacked(mdev);
1603 }
1604 /* we delete from the conflict detection hash _after_ we sent out the
1605 * P_WRITE_ACK / P_NEG_ACK, to get the sequence number right. */
Philipp Reisner89e58e72011-01-19 13:12:45 +01001606 if (mdev->tconn->net_conf->two_primaries) {
Philipp Reisner87eeee42011-01-19 14:16:30 +01001607 spin_lock_irq(&mdev->tconn->req_lock);
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01001608 D_ASSERT(!drbd_interval_empty(&peer_req->i));
1609 drbd_remove_epoch_entry_interval(mdev, peer_req);
Philipp Reisner87eeee42011-01-19 14:16:30 +01001610 spin_unlock_irq(&mdev->tconn->req_lock);
Andreas Gruenbacherbb3bfe92011-01-21 15:59:23 +01001611 } else
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01001612 D_ASSERT(drbd_interval_empty(&peer_req->i));
Philipp Reisnerb411b362009-09-25 16:07:19 -07001613
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01001614 drbd_may_finish_epoch(mdev, peer_req->epoch, EV_PUT + (cancel ? EV_CLEANUP : 0));
Philipp Reisnerb411b362009-09-25 16:07:19 -07001615
1616 return ok;
1617}
1618
1619static int e_send_discard_ack(struct drbd_conf *mdev, struct drbd_work *w, int unused)
1620{
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01001621 struct drbd_peer_request *peer_req = (struct drbd_peer_request *)w;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001622 int ok = 1;
1623
Philipp Reisner89e58e72011-01-19 13:12:45 +01001624 D_ASSERT(mdev->tconn->net_conf->wire_protocol == DRBD_PROT_C);
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01001625 ok = drbd_send_ack(mdev, P_DISCARD_ACK, peer_req);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001626
Philipp Reisner87eeee42011-01-19 14:16:30 +01001627 spin_lock_irq(&mdev->tconn->req_lock);
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01001628 D_ASSERT(!drbd_interval_empty(&peer_req->i));
1629 drbd_remove_epoch_entry_interval(mdev, peer_req);
Philipp Reisner87eeee42011-01-19 14:16:30 +01001630 spin_unlock_irq(&mdev->tconn->req_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001631
1632 dec_unacked(mdev);
1633
1634 return ok;
1635}
1636
Andreas Gruenbacher3e394da2011-01-26 18:36:55 +01001637static bool seq_greater(u32 a, u32 b)
1638{
1639 /*
1640 * We assume 32-bit wrap-around here.
1641 * For 24-bit wrap-around, we would have to shift:
1642 * a <<= 8; b <<= 8;
1643 */
1644 return (s32)a - (s32)b > 0;
1645}
1646
1647static u32 seq_max(u32 a, u32 b)
1648{
1649 return seq_greater(a, b) ? a : b;
1650}
1651
Andreas Gruenbacher43ae0772011-02-03 18:42:08 +01001652static void update_peer_seq(struct drbd_conf *mdev, unsigned int peer_seq)
Andreas Gruenbacher3e394da2011-01-26 18:36:55 +01001653{
Andreas Gruenbacher43ae0772011-02-03 18:42:08 +01001654 unsigned int old_peer_seq;
Andreas Gruenbacher3e394da2011-01-26 18:36:55 +01001655
1656 spin_lock(&mdev->peer_seq_lock);
Andreas Gruenbacher43ae0772011-02-03 18:42:08 +01001657 old_peer_seq = mdev->peer_seq;
1658 mdev->peer_seq = seq_max(mdev->peer_seq, peer_seq);
Andreas Gruenbacher3e394da2011-01-26 18:36:55 +01001659 spin_unlock(&mdev->peer_seq_lock);
Andreas Gruenbacher43ae0772011-02-03 18:42:08 +01001660 if (old_peer_seq != peer_seq)
Andreas Gruenbacher3e394da2011-01-26 18:36:55 +01001661 wake_up(&mdev->seq_wait);
1662}
1663
Philipp Reisnerb411b362009-09-25 16:07:19 -07001664/* Called from receive_Data.
1665 * Synchronize packets on sock with packets on msock.
1666 *
1667 * This is here so even when a P_DATA packet traveling via sock overtook an Ack
1668 * packet traveling on msock, they are still processed in the order they have
1669 * been sent.
1670 *
1671 * Note: we don't care for Ack packets overtaking P_DATA packets.
1672 *
1673 * In case packet_seq is larger than mdev->peer_seq number, there are
1674 * outstanding packets on the msock. We wait for them to arrive.
1675 * In case we are the logically next packet, we update mdev->peer_seq
1676 * ourselves. Correctly handles 32bit wrap around.
1677 *
1678 * Assume we have a 10 GBit connection, that is about 1<<30 byte per second,
1679 * about 1<<21 sectors per second. So "worst" case, we have 1<<3 == 8 seconds
1680 * for the 24bit wrap (historical atomic_t guarantee on some archs), and we have
1681 * 1<<9 == 512 seconds aka ages for the 32bit wrap around...
1682 *
1683 * returns 0 if we may process the packet,
1684 * -ERESTARTSYS if we were interrupted (by disconnect signal). */
1685static int drbd_wait_peer_seq(struct drbd_conf *mdev, const u32 packet_seq)
1686{
1687 DEFINE_WAIT(wait);
1688 unsigned int p_seq;
1689 long timeout;
1690 int ret = 0;
1691 spin_lock(&mdev->peer_seq_lock);
1692 for (;;) {
1693 prepare_to_wait(&mdev->seq_wait, &wait, TASK_INTERRUPTIBLE);
Andreas Gruenbacher3e394da2011-01-26 18:36:55 +01001694 if (!seq_greater(packet_seq, mdev->peer_seq + 1))
Philipp Reisnerb411b362009-09-25 16:07:19 -07001695 break;
1696 if (signal_pending(current)) {
1697 ret = -ERESTARTSYS;
1698 break;
1699 }
1700 p_seq = mdev->peer_seq;
1701 spin_unlock(&mdev->peer_seq_lock);
1702 timeout = schedule_timeout(30*HZ);
1703 spin_lock(&mdev->peer_seq_lock);
1704 if (timeout == 0 && p_seq == mdev->peer_seq) {
1705 ret = -ETIMEDOUT;
1706 dev_err(DEV, "ASSERT FAILED waited 30 seconds for sequence update, forcing reconnect\n");
1707 break;
1708 }
1709 }
1710 finish_wait(&mdev->seq_wait, &wait);
1711 if (mdev->peer_seq+1 == packet_seq)
1712 mdev->peer_seq++;
1713 spin_unlock(&mdev->peer_seq_lock);
1714 return ret;
1715}
1716
Lars Ellenberg688593c2010-11-17 22:25:03 +01001717/* see also bio_flags_to_wire()
1718 * DRBD_REQ_*, because we need to semantically map the flags to data packet
1719 * flags and back. We may replicate to other kernel versions. */
1720static unsigned long wire_flags_to_bio(struct drbd_conf *mdev, u32 dpf)
Philipp Reisner76d2e7e2010-08-25 11:58:05 +02001721{
Lars Ellenberg688593c2010-11-17 22:25:03 +01001722 return (dpf & DP_RW_SYNC ? REQ_SYNC : 0) |
1723 (dpf & DP_FUA ? REQ_FUA : 0) |
1724 (dpf & DP_FLUSH ? REQ_FLUSH : 0) |
1725 (dpf & DP_DISCARD ? REQ_DISCARD : 0);
Philipp Reisner76d2e7e2010-08-25 11:58:05 +02001726}
1727
Philipp Reisnerb411b362009-09-25 16:07:19 -07001728/* mirrored write */
Andreas Gruenbacherd8763022011-01-26 17:39:41 +01001729static int receive_Data(struct drbd_conf *mdev, enum drbd_packet cmd,
1730 unsigned int data_size)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001731{
1732 sector_t sector;
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01001733 struct drbd_peer_request *peer_req;
Philipp Reisnere42325a2011-01-19 13:55:45 +01001734 struct p_data *p = &mdev->tconn->data.rbuf.data;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001735 int rw = WRITE;
1736 u32 dp_flags;
1737
Philipp Reisnerb411b362009-09-25 16:07:19 -07001738 if (!get_ldev(mdev)) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07001739 spin_lock(&mdev->peer_seq_lock);
1740 if (mdev->peer_seq+1 == be32_to_cpu(p->seq_num))
1741 mdev->peer_seq++;
1742 spin_unlock(&mdev->peer_seq_lock);
1743
Lars Ellenberg2b2bf212010-10-06 11:46:55 +02001744 drbd_send_ack_dp(mdev, P_NEG_ACK, p, data_size);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001745 atomic_inc(&mdev->current_epoch->epoch_size);
1746 return drbd_drain_block(mdev, data_size);
1747 }
1748
1749 /* get_ldev(mdev) successful.
1750 * Corresponding put_ldev done either below (on various errors),
Andreas Gruenbacher9c508422011-01-14 21:19:36 +01001751 * or in drbd_endio_sec, if we successfully submit the data at
Philipp Reisnerb411b362009-09-25 16:07:19 -07001752 * the end of this function. */
1753
1754 sector = be64_to_cpu(p->sector);
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01001755 peer_req = read_in_block(mdev, p->block_id, sector, data_size);
1756 if (!peer_req) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07001757 put_ldev(mdev);
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01001758 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001759 }
1760
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01001761 peer_req->w.cb = e_end_block;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001762
Lars Ellenberg688593c2010-11-17 22:25:03 +01001763 dp_flags = be32_to_cpu(p->dp_flags);
1764 rw |= wire_flags_to_bio(mdev, dp_flags);
1765
1766 if (dp_flags & DP_MAY_SET_IN_SYNC)
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01001767 peer_req->flags |= EE_MAY_SET_IN_SYNC;
Lars Ellenberg688593c2010-11-17 22:25:03 +01001768
Philipp Reisnerb411b362009-09-25 16:07:19 -07001769 spin_lock(&mdev->epoch_lock);
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01001770 peer_req->epoch = mdev->current_epoch;
1771 atomic_inc(&peer_req->epoch->epoch_size);
1772 atomic_inc(&peer_req->epoch->active);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001773 spin_unlock(&mdev->epoch_lock);
1774
Philipp Reisnerb411b362009-09-25 16:07:19 -07001775 /* I'm the receiver, I do hold a net_cnt reference. */
Philipp Reisner89e58e72011-01-19 13:12:45 +01001776 if (!mdev->tconn->net_conf->two_primaries) {
Philipp Reisner87eeee42011-01-19 14:16:30 +01001777 spin_lock_irq(&mdev->tconn->req_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001778 } else {
1779 /* don't get the req_lock yet,
1780 * we may sleep in drbd_wait_peer_seq */
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01001781 const int size = peer_req->i.size;
Philipp Reisner25703f82011-02-07 14:35:25 +01001782 const int discard = test_bit(DISCARD_CONCURRENT, &mdev->tconn->flags);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001783 DEFINE_WAIT(wait);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001784 int first;
1785
Philipp Reisner89e58e72011-01-19 13:12:45 +01001786 D_ASSERT(mdev->tconn->net_conf->wire_protocol == DRBD_PROT_C);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001787
1788 /* conflict detection and handling:
1789 * 1. wait on the sequence number,
1790 * in case this data packet overtook ACK packets.
Andreas Gruenbacher5e472262011-01-27 14:42:51 +01001791 * 2. check for conflicting write requests.
Philipp Reisnerb411b362009-09-25 16:07:19 -07001792 *
1793 * Note: for two_primaries, we are protocol C,
1794 * so there cannot be any request that is DONE
1795 * but still on the transfer log.
1796 *
Philipp Reisnerb411b362009-09-25 16:07:19 -07001797 * if no conflicting request is found:
1798 * submit.
1799 *
1800 * if any conflicting request is found
1801 * that has not yet been acked,
1802 * AND I have the "discard concurrent writes" flag:
1803 * queue (via done_ee) the P_DISCARD_ACK; OUT.
1804 *
1805 * if any conflicting request is found:
1806 * block the receiver, waiting on misc_wait
1807 * until no more conflicting requests are there,
1808 * or we get interrupted (disconnect).
1809 *
1810 * we do not just write after local io completion of those
1811 * requests, but only after req is done completely, i.e.
1812 * we wait for the P_DISCARD_ACK to arrive!
1813 *
1814 * then proceed normally, i.e. submit.
1815 */
1816 if (drbd_wait_peer_seq(mdev, be32_to_cpu(p->seq_num)))
1817 goto out_interrupted;
1818
Philipp Reisner87eeee42011-01-19 14:16:30 +01001819 spin_lock_irq(&mdev->tconn->req_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001820
Philipp Reisnerb411b362009-09-25 16:07:19 -07001821 first = 1;
1822 for (;;) {
Andreas Gruenbacherde696712011-01-20 15:00:24 +01001823 struct drbd_interval *i;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001824 int have_unacked = 0;
1825 int have_conflict = 0;
1826 prepare_to_wait(&mdev->misc_wait, &wait,
1827 TASK_INTERRUPTIBLE);
Andreas Gruenbacherde696712011-01-20 15:00:24 +01001828
1829 i = drbd_find_overlap(&mdev->write_requests, sector, size);
1830 if (i) {
Andreas Gruenbacherde696712011-01-20 15:00:24 +01001831 /* only ALERT on first iteration,
1832 * we may be woken up early... */
1833 if (first)
Andreas Gruenbacher5e472262011-01-27 14:42:51 +01001834 dev_alert(DEV, "%s[%u] Concurrent %s write detected!"
Andreas Gruenbacherde696712011-01-20 15:00:24 +01001835 " new: %llus +%u; pending: %llus +%u\n",
1836 current->comm, current->pid,
Andreas Gruenbacher5e472262011-01-27 14:42:51 +01001837 i->local ? "local" : "remote",
Andreas Gruenbacherde696712011-01-20 15:00:24 +01001838 (unsigned long long)sector, size,
Andreas Gruenbacher5e472262011-01-27 14:42:51 +01001839 (unsigned long long)i->sector, i->size);
1840
1841 if (i->local) {
1842 struct drbd_request *req2;
1843
1844 req2 = container_of(i, struct drbd_request, i);
1845 if (req2->rq_state & RQ_NET_PENDING)
1846 ++have_unacked;
1847 }
Andreas Gruenbacherde696712011-01-20 15:00:24 +01001848 ++have_conflict;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001849 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07001850 if (!have_conflict)
1851 break;
1852
1853 /* Discard Ack only for the _first_ iteration */
1854 if (first && discard && have_unacked) {
1855 dev_alert(DEV, "Concurrent write! [DISCARD BY FLAG] sec=%llus\n",
1856 (unsigned long long)sector);
1857 inc_unacked(mdev);
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01001858 peer_req->w.cb = e_send_discard_ack;
1859 list_add_tail(&peer_req->w.list, &mdev->done_ee);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001860
Philipp Reisner87eeee42011-01-19 14:16:30 +01001861 spin_unlock_irq(&mdev->tconn->req_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001862
1863 /* we could probably send that P_DISCARD_ACK ourselves,
1864 * but I don't like the receiver using the msock */
1865
1866 put_ldev(mdev);
1867 wake_asender(mdev);
1868 finish_wait(&mdev->misc_wait, &wait);
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01001869 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001870 }
1871
1872 if (signal_pending(current)) {
Philipp Reisner87eeee42011-01-19 14:16:30 +01001873 spin_unlock_irq(&mdev->tconn->req_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001874 finish_wait(&mdev->misc_wait, &wait);
1875 goto out_interrupted;
1876 }
1877
Andreas Gruenbachera500c2e2011-01-27 14:12:23 +01001878 /* Indicate to wake up mdev->misc_wait upon completion. */
Andreas Gruenbacher53840642011-01-28 10:31:04 +01001879 i->waiting = true;
Andreas Gruenbachera500c2e2011-01-27 14:12:23 +01001880
Philipp Reisner87eeee42011-01-19 14:16:30 +01001881 spin_unlock_irq(&mdev->tconn->req_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001882 if (first) {
1883 first = 0;
1884 dev_alert(DEV, "Concurrent write! [W AFTERWARDS] "
1885 "sec=%llus\n", (unsigned long long)sector);
1886 } else if (discard) {
1887 /* we had none on the first iteration.
1888 * there must be none now. */
1889 D_ASSERT(have_unacked == 0);
1890 }
1891 schedule();
Philipp Reisner87eeee42011-01-19 14:16:30 +01001892 spin_lock_irq(&mdev->tconn->req_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001893 }
1894 finish_wait(&mdev->misc_wait, &wait);
Andreas Gruenbacher5e472262011-01-27 14:42:51 +01001895
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01001896 drbd_insert_interval(&mdev->write_requests, &peer_req->i);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001897 }
1898
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01001899 list_add(&peer_req->w.list, &mdev->active_ee);
Philipp Reisner87eeee42011-01-19 14:16:30 +01001900 spin_unlock_irq(&mdev->tconn->req_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001901
Philipp Reisner89e58e72011-01-19 13:12:45 +01001902 switch (mdev->tconn->net_conf->wire_protocol) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07001903 case DRBD_PROT_C:
1904 inc_unacked(mdev);
1905 /* corresponding dec_unacked() in e_end_block()
1906 * respective _drbd_clear_done_ee */
1907 break;
1908 case DRBD_PROT_B:
1909 /* I really don't like it that the receiver thread
1910 * sends on the msock, but anyways */
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01001911 drbd_send_ack(mdev, P_RECV_ACK, peer_req);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001912 break;
1913 case DRBD_PROT_A:
1914 /* nothing to do */
1915 break;
1916 }
1917
Lars Ellenberg6719fb02010-10-18 23:04:07 +02001918 if (mdev->state.pdsk < D_INCONSISTENT) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07001919 /* In case we have the only disk of the cluster, */
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01001920 drbd_set_out_of_sync(mdev, peer_req->i.sector, peer_req->i.size);
1921 peer_req->flags |= EE_CALL_AL_COMPLETE_IO;
1922 peer_req->flags &= ~EE_MAY_SET_IN_SYNC;
1923 drbd_al_begin_io(mdev, peer_req->i.sector);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001924 }
1925
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01001926 if (drbd_submit_ee(mdev, peer_req, rw, DRBD_FAULT_DT_WR) == 0)
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01001927 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001928
Lars Ellenberg10f6d9922011-01-24 14:47:09 +01001929 /* don't care for the reason here */
1930 dev_err(DEV, "submit failed, triggering re-connect\n");
Philipp Reisner87eeee42011-01-19 14:16:30 +01001931 spin_lock_irq(&mdev->tconn->req_lock);
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01001932 list_del(&peer_req->w.list);
1933 drbd_remove_epoch_entry_interval(mdev, peer_req);
Philipp Reisner87eeee42011-01-19 14:16:30 +01001934 spin_unlock_irq(&mdev->tconn->req_lock);
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01001935 if (peer_req->flags & EE_CALL_AL_COMPLETE_IO)
1936 drbd_al_complete_io(mdev, peer_req->i.sector);
Lars Ellenberg22cc37a2010-09-14 20:40:41 +02001937
Philipp Reisnerb411b362009-09-25 16:07:19 -07001938out_interrupted:
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01001939 drbd_may_finish_epoch(mdev, peer_req->epoch, EV_PUT + EV_CLEANUP);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001940 put_ldev(mdev);
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01001941 drbd_free_ee(mdev, peer_req);
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01001942 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001943}
1944
Lars Ellenberg0f0601f2010-08-11 23:40:24 +02001945/* We may throttle resync, if the lower device seems to be busy,
1946 * and current sync rate is above c_min_rate.
1947 *
1948 * To decide whether or not the lower device is busy, we use a scheme similar
1949 * to MD RAID is_mddev_idle(): if the partition stats reveal "significant"
1950 * (more than 64 sectors) of activity we cannot account for with our own resync
1951 * activity, it obviously is "busy".
1952 *
1953 * The current sync rate used here uses only the most recent two step marks,
1954 * to have a short time average so we can react faster.
1955 */
Philipp Reisnere3555d82010-11-07 15:56:29 +01001956int drbd_rs_should_slow_down(struct drbd_conf *mdev, sector_t sector)
Lars Ellenberg0f0601f2010-08-11 23:40:24 +02001957{
1958 struct gendisk *disk = mdev->ldev->backing_bdev->bd_contains->bd_disk;
1959 unsigned long db, dt, dbdt;
Philipp Reisnere3555d82010-11-07 15:56:29 +01001960 struct lc_element *tmp;
Lars Ellenberg0f0601f2010-08-11 23:40:24 +02001961 int curr_events;
1962 int throttle = 0;
1963
1964 /* feature disabled? */
1965 if (mdev->sync_conf.c_min_rate == 0)
1966 return 0;
1967
Philipp Reisnere3555d82010-11-07 15:56:29 +01001968 spin_lock_irq(&mdev->al_lock);
1969 tmp = lc_find(mdev->resync, BM_SECT_TO_EXT(sector));
1970 if (tmp) {
1971 struct bm_extent *bm_ext = lc_entry(tmp, struct bm_extent, lce);
1972 if (test_bit(BME_PRIORITY, &bm_ext->flags)) {
1973 spin_unlock_irq(&mdev->al_lock);
1974 return 0;
1975 }
1976 /* Do not slow down if app IO is already waiting for this extent */
1977 }
1978 spin_unlock_irq(&mdev->al_lock);
1979
Lars Ellenberg0f0601f2010-08-11 23:40:24 +02001980 curr_events = (int)part_stat_read(&disk->part0, sectors[0]) +
1981 (int)part_stat_read(&disk->part0, sectors[1]) -
1982 atomic_read(&mdev->rs_sect_ev);
Philipp Reisnere3555d82010-11-07 15:56:29 +01001983
Lars Ellenberg0f0601f2010-08-11 23:40:24 +02001984 if (!mdev->rs_last_events || curr_events - mdev->rs_last_events > 64) {
1985 unsigned long rs_left;
1986 int i;
1987
1988 mdev->rs_last_events = curr_events;
1989
1990 /* sync speed average over the last 2*DRBD_SYNC_MARK_STEP,
1991 * approx. */
Lars Ellenberg2649f082010-11-05 10:05:47 +01001992 i = (mdev->rs_last_mark + DRBD_SYNC_MARKS-1) % DRBD_SYNC_MARKS;
1993
1994 if (mdev->state.conn == C_VERIFY_S || mdev->state.conn == C_VERIFY_T)
1995 rs_left = mdev->ov_left;
1996 else
1997 rs_left = drbd_bm_total_weight(mdev) - mdev->rs_failed;
Lars Ellenberg0f0601f2010-08-11 23:40:24 +02001998
1999 dt = ((long)jiffies - (long)mdev->rs_mark_time[i]) / HZ;
2000 if (!dt)
2001 dt++;
2002 db = mdev->rs_mark_left[i] - rs_left;
2003 dbdt = Bit2KB(db/dt);
2004
2005 if (dbdt > mdev->sync_conf.c_min_rate)
2006 throttle = 1;
2007 }
2008 return throttle;
2009}
2010
2011
Andreas Gruenbacherd8763022011-01-26 17:39:41 +01002012static int receive_DataRequest(struct drbd_conf *mdev, enum drbd_packet cmd,
2013 unsigned int digest_size)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002014{
2015 sector_t sector;
2016 const sector_t capacity = drbd_get_capacity(mdev->this_bdev);
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01002017 struct drbd_peer_request *peer_req;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002018 struct digest_info *di = NULL;
Philipp Reisnerb18b37b2010-10-13 15:32:44 +02002019 int size, verb;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002020 unsigned int fault_type;
Philipp Reisnere42325a2011-01-19 13:55:45 +01002021 struct p_block_req *p = &mdev->tconn->data.rbuf.block_req;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002022
2023 sector = be64_to_cpu(p->sector);
2024 size = be32_to_cpu(p->blksize);
2025
Lars Ellenberg1816a2b2010-11-11 15:19:07 +01002026 if (size <= 0 || (size & 0x1ff) != 0 || size > DRBD_MAX_BIO_SIZE) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07002027 dev_err(DEV, "%s:%d: sector: %llus, size: %u\n", __FILE__, __LINE__,
2028 (unsigned long long)sector, size);
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01002029 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002030 }
2031 if (sector + (size>>9) > capacity) {
2032 dev_err(DEV, "%s:%d: sector: %llus, size: %u\n", __FILE__, __LINE__,
2033 (unsigned long long)sector, size);
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01002034 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002035 }
2036
2037 if (!get_ldev_if_state(mdev, D_UP_TO_DATE)) {
Philipp Reisnerb18b37b2010-10-13 15:32:44 +02002038 verb = 1;
2039 switch (cmd) {
2040 case P_DATA_REQUEST:
2041 drbd_send_ack_rp(mdev, P_NEG_DREPLY, p);
2042 break;
2043 case P_RS_DATA_REQUEST:
2044 case P_CSUM_RS_REQUEST:
2045 case P_OV_REQUEST:
2046 drbd_send_ack_rp(mdev, P_NEG_RS_DREPLY , p);
2047 break;
2048 case P_OV_REPLY:
2049 verb = 0;
2050 dec_rs_pending(mdev);
2051 drbd_send_ack_ex(mdev, P_OV_RESULT, sector, size, ID_IN_SYNC);
2052 break;
2053 default:
2054 dev_err(DEV, "unexpected command (%s) in receive_DataRequest\n",
2055 cmdname(cmd));
2056 }
2057 if (verb && __ratelimit(&drbd_ratelimit_state))
Philipp Reisnerb411b362009-09-25 16:07:19 -07002058 dev_err(DEV, "Can not satisfy peer's read request, "
2059 "no local data.\n");
Philipp Reisnerb18b37b2010-10-13 15:32:44 +02002060
Lars Ellenberga821cc42010-09-06 12:31:37 +02002061 /* drain possibly payload */
2062 return drbd_drain_block(mdev, digest_size);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002063 }
2064
2065 /* GFP_NOIO, because we must not cause arbitrary write-out: in a DRBD
2066 * "criss-cross" setup, that might cause write-out on some other DRBD,
2067 * which in turn might block on the other node at this very place. */
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01002068 peer_req = drbd_alloc_ee(mdev, p->block_id, sector, size, GFP_NOIO);
2069 if (!peer_req) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07002070 put_ldev(mdev);
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01002071 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002072 }
2073
Philipp Reisner02918be2010-08-20 14:35:10 +02002074 switch (cmd) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07002075 case P_DATA_REQUEST:
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01002076 peer_req->w.cb = w_e_end_data_req;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002077 fault_type = DRBD_FAULT_DT_RD;
Lars Ellenberg80a40e42010-08-11 23:28:00 +02002078 /* application IO, don't drbd_rs_begin_io */
2079 goto submit;
2080
Philipp Reisnerb411b362009-09-25 16:07:19 -07002081 case P_RS_DATA_REQUEST:
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01002082 peer_req->w.cb = w_e_end_rsdata_req;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002083 fault_type = DRBD_FAULT_RS_RD;
Lars Ellenberg5f9915b2010-11-09 14:15:24 +01002084 /* used in the sector offset progress display */
2085 mdev->bm_resync_fo = BM_SECT_TO_BIT(sector);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002086 break;
2087
2088 case P_OV_REPLY:
2089 case P_CSUM_RS_REQUEST:
2090 fault_type = DRBD_FAULT_RS_RD;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002091 di = kmalloc(sizeof(*di) + digest_size, GFP_NOIO);
2092 if (!di)
2093 goto out_free_e;
2094
2095 di->digest_size = digest_size;
2096 di->digest = (((char *)di)+sizeof(struct digest_info));
2097
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01002098 peer_req->digest = di;
2099 peer_req->flags |= EE_HAS_DIGEST;
Lars Ellenbergc36c3ce2010-08-11 20:42:55 +02002100
Philipp Reisnerb411b362009-09-25 16:07:19 -07002101 if (drbd_recv(mdev, di->digest, digest_size) != digest_size)
2102 goto out_free_e;
2103
Philipp Reisner02918be2010-08-20 14:35:10 +02002104 if (cmd == P_CSUM_RS_REQUEST) {
Philipp Reisner31890f42011-01-19 14:12:51 +01002105 D_ASSERT(mdev->tconn->agreed_pro_version >= 89);
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01002106 peer_req->w.cb = w_e_end_csum_rs_req;
Lars Ellenberg5f9915b2010-11-09 14:15:24 +01002107 /* used in the sector offset progress display */
2108 mdev->bm_resync_fo = BM_SECT_TO_BIT(sector);
Philipp Reisner02918be2010-08-20 14:35:10 +02002109 } else if (cmd == P_OV_REPLY) {
Lars Ellenberg2649f082010-11-05 10:05:47 +01002110 /* track progress, we may need to throttle */
2111 atomic_add(size >> 9, &mdev->rs_sect_in);
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01002112 peer_req->w.cb = w_e_end_ov_reply;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002113 dec_rs_pending(mdev);
Lars Ellenberg0f0601f2010-08-11 23:40:24 +02002114 /* drbd_rs_begin_io done when we sent this request,
2115 * but accounting still needs to be done. */
2116 goto submit_for_resync;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002117 }
2118 break;
2119
2120 case P_OV_REQUEST:
Philipp Reisnerb411b362009-09-25 16:07:19 -07002121 if (mdev->ov_start_sector == ~(sector_t)0 &&
Philipp Reisner31890f42011-01-19 14:12:51 +01002122 mdev->tconn->agreed_pro_version >= 90) {
Lars Ellenbergde228bb2010-11-05 09:43:15 +01002123 unsigned long now = jiffies;
2124 int i;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002125 mdev->ov_start_sector = sector;
2126 mdev->ov_position = sector;
Lars Ellenberg30b743a2010-11-05 09:39:06 +01002127 mdev->ov_left = drbd_bm_bits(mdev) - BM_SECT_TO_BIT(sector);
2128 mdev->rs_total = mdev->ov_left;
Lars Ellenbergde228bb2010-11-05 09:43:15 +01002129 for (i = 0; i < DRBD_SYNC_MARKS; i++) {
2130 mdev->rs_mark_left[i] = mdev->ov_left;
2131 mdev->rs_mark_time[i] = now;
2132 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07002133 dev_info(DEV, "Online Verify start sector: %llu\n",
2134 (unsigned long long)sector);
2135 }
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01002136 peer_req->w.cb = w_e_end_ov_req;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002137 fault_type = DRBD_FAULT_RS_RD;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002138 break;
2139
Philipp Reisnerb411b362009-09-25 16:07:19 -07002140 default:
2141 dev_err(DEV, "unexpected command (%s) in receive_DataRequest\n",
Philipp Reisner02918be2010-08-20 14:35:10 +02002142 cmdname(cmd));
Philipp Reisnerb411b362009-09-25 16:07:19 -07002143 fault_type = DRBD_FAULT_MAX;
Lars Ellenberg80a40e42010-08-11 23:28:00 +02002144 goto out_free_e;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002145 }
2146
Lars Ellenberg0f0601f2010-08-11 23:40:24 +02002147 /* Throttle, drbd_rs_begin_io and submit should become asynchronous
2148 * wrt the receiver, but it is not as straightforward as it may seem.
2149 * Various places in the resync start and stop logic assume resync
2150 * requests are processed in order, requeuing this on the worker thread
2151 * introduces a bunch of new code for synchronization between threads.
2152 *
2153 * Unlimited throttling before drbd_rs_begin_io may stall the resync
2154 * "forever", throttling after drbd_rs_begin_io will lock that extent
2155 * for application writes for the same time. For now, just throttle
2156 * here, where the rest of the code expects the receiver to sleep for
2157 * a while, anyways.
2158 */
Philipp Reisnerb411b362009-09-25 16:07:19 -07002159
Lars Ellenberg0f0601f2010-08-11 23:40:24 +02002160 /* Throttle before drbd_rs_begin_io, as that locks out application IO;
2161 * this defers syncer requests for some time, before letting at least
2162 * on request through. The resync controller on the receiving side
2163 * will adapt to the incoming rate accordingly.
2164 *
2165 * We cannot throttle here if remote is Primary/SyncTarget:
2166 * we would also throttle its application reads.
2167 * In that case, throttling is done on the SyncTarget only.
2168 */
Philipp Reisnere3555d82010-11-07 15:56:29 +01002169 if (mdev->state.peer != R_PRIMARY && drbd_rs_should_slow_down(mdev, sector))
2170 schedule_timeout_uninterruptible(HZ/10);
2171 if (drbd_rs_begin_io(mdev, sector))
Lars Ellenberg80a40e42010-08-11 23:28:00 +02002172 goto out_free_e;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002173
Lars Ellenberg0f0601f2010-08-11 23:40:24 +02002174submit_for_resync:
2175 atomic_add(size >> 9, &mdev->rs_sect_ev);
2176
Lars Ellenberg80a40e42010-08-11 23:28:00 +02002177submit:
Philipp Reisnerb411b362009-09-25 16:07:19 -07002178 inc_unacked(mdev);
Philipp Reisner87eeee42011-01-19 14:16:30 +01002179 spin_lock_irq(&mdev->tconn->req_lock);
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01002180 list_add_tail(&peer_req->w.list, &mdev->read_ee);
Philipp Reisner87eeee42011-01-19 14:16:30 +01002181 spin_unlock_irq(&mdev->tconn->req_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002182
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01002183 if (drbd_submit_ee(mdev, peer_req, READ, fault_type) == 0)
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01002184 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002185
Lars Ellenberg10f6d9922011-01-24 14:47:09 +01002186 /* don't care for the reason here */
2187 dev_err(DEV, "submit failed, triggering re-connect\n");
Philipp Reisner87eeee42011-01-19 14:16:30 +01002188 spin_lock_irq(&mdev->tconn->req_lock);
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01002189 list_del(&peer_req->w.list);
Philipp Reisner87eeee42011-01-19 14:16:30 +01002190 spin_unlock_irq(&mdev->tconn->req_lock);
Lars Ellenberg22cc37a2010-09-14 20:40:41 +02002191 /* no drbd_rs_complete_io(), we are dropping the connection anyways */
2192
Philipp Reisnerb411b362009-09-25 16:07:19 -07002193out_free_e:
Philipp Reisnerb411b362009-09-25 16:07:19 -07002194 put_ldev(mdev);
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01002195 drbd_free_ee(mdev, peer_req);
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01002196 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002197}
2198
2199static int drbd_asb_recover_0p(struct drbd_conf *mdev) __must_hold(local)
2200{
2201 int self, peer, rv = -100;
2202 unsigned long ch_self, ch_peer;
2203
2204 self = mdev->ldev->md.uuid[UI_BITMAP] & 1;
2205 peer = mdev->p_uuid[UI_BITMAP] & 1;
2206
2207 ch_peer = mdev->p_uuid[UI_SIZE];
2208 ch_self = mdev->comm_bm_set;
2209
Philipp Reisner89e58e72011-01-19 13:12:45 +01002210 switch (mdev->tconn->net_conf->after_sb_0p) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07002211 case ASB_CONSENSUS:
2212 case ASB_DISCARD_SECONDARY:
2213 case ASB_CALL_HELPER:
2214 dev_err(DEV, "Configuration error.\n");
2215 break;
2216 case ASB_DISCONNECT:
2217 break;
2218 case ASB_DISCARD_YOUNGER_PRI:
2219 if (self == 0 && peer == 1) {
2220 rv = -1;
2221 break;
2222 }
2223 if (self == 1 && peer == 0) {
2224 rv = 1;
2225 break;
2226 }
2227 /* Else fall through to one of the other strategies... */
2228 case ASB_DISCARD_OLDER_PRI:
2229 if (self == 0 && peer == 1) {
2230 rv = 1;
2231 break;
2232 }
2233 if (self == 1 && peer == 0) {
2234 rv = -1;
2235 break;
2236 }
2237 /* Else fall through to one of the other strategies... */
Lars Ellenbergad19bf62009-10-14 09:36:49 +02002238 dev_warn(DEV, "Discard younger/older primary did not find a decision\n"
Philipp Reisnerb411b362009-09-25 16:07:19 -07002239 "Using discard-least-changes instead\n");
2240 case ASB_DISCARD_ZERO_CHG:
2241 if (ch_peer == 0 && ch_self == 0) {
Philipp Reisner25703f82011-02-07 14:35:25 +01002242 rv = test_bit(DISCARD_CONCURRENT, &mdev->tconn->flags)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002243 ? -1 : 1;
2244 break;
2245 } else {
2246 if (ch_peer == 0) { rv = 1; break; }
2247 if (ch_self == 0) { rv = -1; break; }
2248 }
Philipp Reisner89e58e72011-01-19 13:12:45 +01002249 if (mdev->tconn->net_conf->after_sb_0p == ASB_DISCARD_ZERO_CHG)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002250 break;
2251 case ASB_DISCARD_LEAST_CHG:
2252 if (ch_self < ch_peer)
2253 rv = -1;
2254 else if (ch_self > ch_peer)
2255 rv = 1;
2256 else /* ( ch_self == ch_peer ) */
2257 /* Well, then use something else. */
Philipp Reisner25703f82011-02-07 14:35:25 +01002258 rv = test_bit(DISCARD_CONCURRENT, &mdev->tconn->flags)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002259 ? -1 : 1;
2260 break;
2261 case ASB_DISCARD_LOCAL:
2262 rv = -1;
2263 break;
2264 case ASB_DISCARD_REMOTE:
2265 rv = 1;
2266 }
2267
2268 return rv;
2269}
2270
2271static int drbd_asb_recover_1p(struct drbd_conf *mdev) __must_hold(local)
2272{
Andreas Gruenbacher6184ea22010-12-09 14:23:27 +01002273 int hg, rv = -100;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002274
Philipp Reisner89e58e72011-01-19 13:12:45 +01002275 switch (mdev->tconn->net_conf->after_sb_1p) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07002276 case ASB_DISCARD_YOUNGER_PRI:
2277 case ASB_DISCARD_OLDER_PRI:
2278 case ASB_DISCARD_LEAST_CHG:
2279 case ASB_DISCARD_LOCAL:
2280 case ASB_DISCARD_REMOTE:
2281 dev_err(DEV, "Configuration error.\n");
2282 break;
2283 case ASB_DISCONNECT:
2284 break;
2285 case ASB_CONSENSUS:
2286 hg = drbd_asb_recover_0p(mdev);
2287 if (hg == -1 && mdev->state.role == R_SECONDARY)
2288 rv = hg;
2289 if (hg == 1 && mdev->state.role == R_PRIMARY)
2290 rv = hg;
2291 break;
2292 case ASB_VIOLENTLY:
2293 rv = drbd_asb_recover_0p(mdev);
2294 break;
2295 case ASB_DISCARD_SECONDARY:
2296 return mdev->state.role == R_PRIMARY ? 1 : -1;
2297 case ASB_CALL_HELPER:
2298 hg = drbd_asb_recover_0p(mdev);
2299 if (hg == -1 && mdev->state.role == R_PRIMARY) {
Andreas Gruenbacherbb437942010-12-09 14:02:35 +01002300 enum drbd_state_rv rv2;
2301
2302 drbd_set_role(mdev, R_SECONDARY, 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002303 /* drbd_change_state() does not sleep while in SS_IN_TRANSIENT_STATE,
2304 * we might be here in C_WF_REPORT_PARAMS which is transient.
2305 * we do not need to wait for the after state change work either. */
Andreas Gruenbacherbb437942010-12-09 14:02:35 +01002306 rv2 = drbd_change_state(mdev, CS_VERBOSE, NS(role, R_SECONDARY));
2307 if (rv2 != SS_SUCCESS) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07002308 drbd_khelper(mdev, "pri-lost-after-sb");
2309 } else {
2310 dev_warn(DEV, "Successfully gave up primary role.\n");
2311 rv = hg;
2312 }
2313 } else
2314 rv = hg;
2315 }
2316
2317 return rv;
2318}
2319
2320static int drbd_asb_recover_2p(struct drbd_conf *mdev) __must_hold(local)
2321{
Andreas Gruenbacher6184ea22010-12-09 14:23:27 +01002322 int hg, rv = -100;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002323
Philipp Reisner89e58e72011-01-19 13:12:45 +01002324 switch (mdev->tconn->net_conf->after_sb_2p) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07002325 case ASB_DISCARD_YOUNGER_PRI:
2326 case ASB_DISCARD_OLDER_PRI:
2327 case ASB_DISCARD_LEAST_CHG:
2328 case ASB_DISCARD_LOCAL:
2329 case ASB_DISCARD_REMOTE:
2330 case ASB_CONSENSUS:
2331 case ASB_DISCARD_SECONDARY:
2332 dev_err(DEV, "Configuration error.\n");
2333 break;
2334 case ASB_VIOLENTLY:
2335 rv = drbd_asb_recover_0p(mdev);
2336 break;
2337 case ASB_DISCONNECT:
2338 break;
2339 case ASB_CALL_HELPER:
2340 hg = drbd_asb_recover_0p(mdev);
2341 if (hg == -1) {
Andreas Gruenbacherbb437942010-12-09 14:02:35 +01002342 enum drbd_state_rv rv2;
2343
Philipp Reisnerb411b362009-09-25 16:07:19 -07002344 /* drbd_change_state() does not sleep while in SS_IN_TRANSIENT_STATE,
2345 * we might be here in C_WF_REPORT_PARAMS which is transient.
2346 * we do not need to wait for the after state change work either. */
Andreas Gruenbacherbb437942010-12-09 14:02:35 +01002347 rv2 = drbd_change_state(mdev, CS_VERBOSE, NS(role, R_SECONDARY));
2348 if (rv2 != SS_SUCCESS) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07002349 drbd_khelper(mdev, "pri-lost-after-sb");
2350 } else {
2351 dev_warn(DEV, "Successfully gave up primary role.\n");
2352 rv = hg;
2353 }
2354 } else
2355 rv = hg;
2356 }
2357
2358 return rv;
2359}
2360
2361static void drbd_uuid_dump(struct drbd_conf *mdev, char *text, u64 *uuid,
2362 u64 bits, u64 flags)
2363{
2364 if (!uuid) {
2365 dev_info(DEV, "%s uuid info vanished while I was looking!\n", text);
2366 return;
2367 }
2368 dev_info(DEV, "%s %016llX:%016llX:%016llX:%016llX bits:%llu flags:%llX\n",
2369 text,
2370 (unsigned long long)uuid[UI_CURRENT],
2371 (unsigned long long)uuid[UI_BITMAP],
2372 (unsigned long long)uuid[UI_HISTORY_START],
2373 (unsigned long long)uuid[UI_HISTORY_END],
2374 (unsigned long long)bits,
2375 (unsigned long long)flags);
2376}
2377
2378/*
2379 100 after split brain try auto recover
2380 2 C_SYNC_SOURCE set BitMap
2381 1 C_SYNC_SOURCE use BitMap
2382 0 no Sync
2383 -1 C_SYNC_TARGET use BitMap
2384 -2 C_SYNC_TARGET set BitMap
2385 -100 after split brain, disconnect
2386-1000 unrelated data
Philipp Reisner4a23f262011-01-11 17:42:17 +01002387-1091 requires proto 91
2388-1096 requires proto 96
Philipp Reisnerb411b362009-09-25 16:07:19 -07002389 */
2390static int drbd_uuid_compare(struct drbd_conf *mdev, int *rule_nr) __must_hold(local)
2391{
2392 u64 self, peer;
2393 int i, j;
2394
2395 self = mdev->ldev->md.uuid[UI_CURRENT] & ~((u64)1);
2396 peer = mdev->p_uuid[UI_CURRENT] & ~((u64)1);
2397
2398 *rule_nr = 10;
2399 if (self == UUID_JUST_CREATED && peer == UUID_JUST_CREATED)
2400 return 0;
2401
2402 *rule_nr = 20;
2403 if ((self == UUID_JUST_CREATED || self == (u64)0) &&
2404 peer != UUID_JUST_CREATED)
2405 return -2;
2406
2407 *rule_nr = 30;
2408 if (self != UUID_JUST_CREATED &&
2409 (peer == UUID_JUST_CREATED || peer == (u64)0))
2410 return 2;
2411
2412 if (self == peer) {
2413 int rct, dc; /* roles at crash time */
2414
2415 if (mdev->p_uuid[UI_BITMAP] == (u64)0 && mdev->ldev->md.uuid[UI_BITMAP] != (u64)0) {
2416
Philipp Reisner31890f42011-01-19 14:12:51 +01002417 if (mdev->tconn->agreed_pro_version < 91)
Philipp Reisner4a23f262011-01-11 17:42:17 +01002418 return -1091;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002419
2420 if ((mdev->ldev->md.uuid[UI_BITMAP] & ~((u64)1)) == (mdev->p_uuid[UI_HISTORY_START] & ~((u64)1)) &&
2421 (mdev->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1)) == (mdev->p_uuid[UI_HISTORY_START + 1] & ~((u64)1))) {
2422 dev_info(DEV, "was SyncSource, missed the resync finished event, corrected myself:\n");
2423 drbd_uuid_set_bm(mdev, 0UL);
2424
2425 drbd_uuid_dump(mdev, "self", mdev->ldev->md.uuid,
2426 mdev->state.disk >= D_NEGOTIATING ? drbd_bm_total_weight(mdev) : 0, 0);
2427 *rule_nr = 34;
2428 } else {
2429 dev_info(DEV, "was SyncSource (peer failed to write sync_uuid)\n");
2430 *rule_nr = 36;
2431 }
2432
2433 return 1;
2434 }
2435
2436 if (mdev->ldev->md.uuid[UI_BITMAP] == (u64)0 && mdev->p_uuid[UI_BITMAP] != (u64)0) {
2437
Philipp Reisner31890f42011-01-19 14:12:51 +01002438 if (mdev->tconn->agreed_pro_version < 91)
Philipp Reisner4a23f262011-01-11 17:42:17 +01002439 return -1091;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002440
2441 if ((mdev->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1)) == (mdev->p_uuid[UI_BITMAP] & ~((u64)1)) &&
2442 (mdev->ldev->md.uuid[UI_HISTORY_START + 1] & ~((u64)1)) == (mdev->p_uuid[UI_HISTORY_START] & ~((u64)1))) {
2443 dev_info(DEV, "was SyncTarget, peer missed the resync finished event, corrected peer:\n");
2444
2445 mdev->p_uuid[UI_HISTORY_START + 1] = mdev->p_uuid[UI_HISTORY_START];
2446 mdev->p_uuid[UI_HISTORY_START] = mdev->p_uuid[UI_BITMAP];
2447 mdev->p_uuid[UI_BITMAP] = 0UL;
2448
2449 drbd_uuid_dump(mdev, "peer", mdev->p_uuid, mdev->p_uuid[UI_SIZE], mdev->p_uuid[UI_FLAGS]);
2450 *rule_nr = 35;
2451 } else {
2452 dev_info(DEV, "was SyncTarget (failed to write sync_uuid)\n");
2453 *rule_nr = 37;
2454 }
2455
2456 return -1;
2457 }
2458
2459 /* Common power [off|failure] */
2460 rct = (test_bit(CRASHED_PRIMARY, &mdev->flags) ? 1 : 0) +
2461 (mdev->p_uuid[UI_FLAGS] & 2);
2462 /* lowest bit is set when we were primary,
2463 * next bit (weight 2) is set when peer was primary */
2464 *rule_nr = 40;
2465
2466 switch (rct) {
2467 case 0: /* !self_pri && !peer_pri */ return 0;
2468 case 1: /* self_pri && !peer_pri */ return 1;
2469 case 2: /* !self_pri && peer_pri */ return -1;
2470 case 3: /* self_pri && peer_pri */
Philipp Reisner25703f82011-02-07 14:35:25 +01002471 dc = test_bit(DISCARD_CONCURRENT, &mdev->tconn->flags);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002472 return dc ? -1 : 1;
2473 }
2474 }
2475
2476 *rule_nr = 50;
2477 peer = mdev->p_uuid[UI_BITMAP] & ~((u64)1);
2478 if (self == peer)
2479 return -1;
2480
2481 *rule_nr = 51;
2482 peer = mdev->p_uuid[UI_HISTORY_START] & ~((u64)1);
2483 if (self == peer) {
Philipp Reisner31890f42011-01-19 14:12:51 +01002484 if (mdev->tconn->agreed_pro_version < 96 ?
Philipp Reisner4a23f262011-01-11 17:42:17 +01002485 (mdev->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1)) ==
2486 (mdev->p_uuid[UI_HISTORY_START + 1] & ~((u64)1)) :
2487 peer + UUID_NEW_BM_OFFSET == (mdev->p_uuid[UI_BITMAP] & ~((u64)1))) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07002488 /* The last P_SYNC_UUID did not get though. Undo the last start of
2489 resync as sync source modifications of the peer's UUIDs. */
2490
Philipp Reisner31890f42011-01-19 14:12:51 +01002491 if (mdev->tconn->agreed_pro_version < 91)
Philipp Reisner4a23f262011-01-11 17:42:17 +01002492 return -1091;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002493
2494 mdev->p_uuid[UI_BITMAP] = mdev->p_uuid[UI_HISTORY_START];
2495 mdev->p_uuid[UI_HISTORY_START] = mdev->p_uuid[UI_HISTORY_START + 1];
Philipp Reisner4a23f262011-01-11 17:42:17 +01002496
2497 dev_info(DEV, "Did not got last syncUUID packet, corrected:\n");
2498 drbd_uuid_dump(mdev, "peer", mdev->p_uuid, mdev->p_uuid[UI_SIZE], mdev->p_uuid[UI_FLAGS]);
2499
Philipp Reisnerb411b362009-09-25 16:07:19 -07002500 return -1;
2501 }
2502 }
2503
2504 *rule_nr = 60;
2505 self = mdev->ldev->md.uuid[UI_CURRENT] & ~((u64)1);
2506 for (i = UI_HISTORY_START; i <= UI_HISTORY_END; i++) {
2507 peer = mdev->p_uuid[i] & ~((u64)1);
2508 if (self == peer)
2509 return -2;
2510 }
2511
2512 *rule_nr = 70;
2513 self = mdev->ldev->md.uuid[UI_BITMAP] & ~((u64)1);
2514 peer = mdev->p_uuid[UI_CURRENT] & ~((u64)1);
2515 if (self == peer)
2516 return 1;
2517
2518 *rule_nr = 71;
2519 self = mdev->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1);
2520 if (self == peer) {
Philipp Reisner31890f42011-01-19 14:12:51 +01002521 if (mdev->tconn->agreed_pro_version < 96 ?
Philipp Reisner4a23f262011-01-11 17:42:17 +01002522 (mdev->ldev->md.uuid[UI_HISTORY_START + 1] & ~((u64)1)) ==
2523 (mdev->p_uuid[UI_HISTORY_START] & ~((u64)1)) :
2524 self + UUID_NEW_BM_OFFSET == (mdev->ldev->md.uuid[UI_BITMAP] & ~((u64)1))) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07002525 /* The last P_SYNC_UUID did not get though. Undo the last start of
2526 resync as sync source modifications of our UUIDs. */
2527
Philipp Reisner31890f42011-01-19 14:12:51 +01002528 if (mdev->tconn->agreed_pro_version < 91)
Philipp Reisner4a23f262011-01-11 17:42:17 +01002529 return -1091;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002530
2531 _drbd_uuid_set(mdev, UI_BITMAP, mdev->ldev->md.uuid[UI_HISTORY_START]);
2532 _drbd_uuid_set(mdev, UI_HISTORY_START, mdev->ldev->md.uuid[UI_HISTORY_START + 1]);
2533
Philipp Reisner4a23f262011-01-11 17:42:17 +01002534 dev_info(DEV, "Last syncUUID did not get through, corrected:\n");
Philipp Reisnerb411b362009-09-25 16:07:19 -07002535 drbd_uuid_dump(mdev, "self", mdev->ldev->md.uuid,
2536 mdev->state.disk >= D_NEGOTIATING ? drbd_bm_total_weight(mdev) : 0, 0);
2537
2538 return 1;
2539 }
2540 }
2541
2542
2543 *rule_nr = 80;
Philipp Reisnerd8c2a362009-11-18 15:52:51 +01002544 peer = mdev->p_uuid[UI_CURRENT] & ~((u64)1);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002545 for (i = UI_HISTORY_START; i <= UI_HISTORY_END; i++) {
2546 self = mdev->ldev->md.uuid[i] & ~((u64)1);
2547 if (self == peer)
2548 return 2;
2549 }
2550
2551 *rule_nr = 90;
2552 self = mdev->ldev->md.uuid[UI_BITMAP] & ~((u64)1);
2553 peer = mdev->p_uuid[UI_BITMAP] & ~((u64)1);
2554 if (self == peer && self != ((u64)0))
2555 return 100;
2556
2557 *rule_nr = 100;
2558 for (i = UI_HISTORY_START; i <= UI_HISTORY_END; i++) {
2559 self = mdev->ldev->md.uuid[i] & ~((u64)1);
2560 for (j = UI_HISTORY_START; j <= UI_HISTORY_END; j++) {
2561 peer = mdev->p_uuid[j] & ~((u64)1);
2562 if (self == peer)
2563 return -100;
2564 }
2565 }
2566
2567 return -1000;
2568}
2569
2570/* drbd_sync_handshake() returns the new conn state on success, or
2571 CONN_MASK (-1) on failure.
2572 */
2573static enum drbd_conns drbd_sync_handshake(struct drbd_conf *mdev, enum drbd_role peer_role,
2574 enum drbd_disk_state peer_disk) __must_hold(local)
2575{
2576 int hg, rule_nr;
2577 enum drbd_conns rv = C_MASK;
2578 enum drbd_disk_state mydisk;
2579
2580 mydisk = mdev->state.disk;
2581 if (mydisk == D_NEGOTIATING)
2582 mydisk = mdev->new_state_tmp.disk;
2583
2584 dev_info(DEV, "drbd_sync_handshake:\n");
2585 drbd_uuid_dump(mdev, "self", mdev->ldev->md.uuid, mdev->comm_bm_set, 0);
2586 drbd_uuid_dump(mdev, "peer", mdev->p_uuid,
2587 mdev->p_uuid[UI_SIZE], mdev->p_uuid[UI_FLAGS]);
2588
2589 hg = drbd_uuid_compare(mdev, &rule_nr);
2590
2591 dev_info(DEV, "uuid_compare()=%d by rule %d\n", hg, rule_nr);
2592
2593 if (hg == -1000) {
2594 dev_alert(DEV, "Unrelated data, aborting!\n");
2595 return C_MASK;
2596 }
Philipp Reisner4a23f262011-01-11 17:42:17 +01002597 if (hg < -1000) {
2598 dev_alert(DEV, "To resolve this both sides have to support at least protocol %d\n", -hg - 1000);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002599 return C_MASK;
2600 }
2601
2602 if ((mydisk == D_INCONSISTENT && peer_disk > D_INCONSISTENT) ||
2603 (peer_disk == D_INCONSISTENT && mydisk > D_INCONSISTENT)) {
2604 int f = (hg == -100) || abs(hg) == 2;
2605 hg = mydisk > D_INCONSISTENT ? 1 : -1;
2606 if (f)
2607 hg = hg*2;
2608 dev_info(DEV, "Becoming sync %s due to disk states.\n",
2609 hg > 0 ? "source" : "target");
2610 }
2611
Adam Gandelman3a11a482010-04-08 16:48:23 -07002612 if (abs(hg) == 100)
2613 drbd_khelper(mdev, "initial-split-brain");
2614
Philipp Reisner89e58e72011-01-19 13:12:45 +01002615 if (hg == 100 || (hg == -100 && mdev->tconn->net_conf->always_asbp)) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07002616 int pcount = (mdev->state.role == R_PRIMARY)
2617 + (peer_role == R_PRIMARY);
2618 int forced = (hg == -100);
2619
2620 switch (pcount) {
2621 case 0:
2622 hg = drbd_asb_recover_0p(mdev);
2623 break;
2624 case 1:
2625 hg = drbd_asb_recover_1p(mdev);
2626 break;
2627 case 2:
2628 hg = drbd_asb_recover_2p(mdev);
2629 break;
2630 }
2631 if (abs(hg) < 100) {
2632 dev_warn(DEV, "Split-Brain detected, %d primaries, "
2633 "automatically solved. Sync from %s node\n",
2634 pcount, (hg < 0) ? "peer" : "this");
2635 if (forced) {
2636 dev_warn(DEV, "Doing a full sync, since"
2637 " UUIDs where ambiguous.\n");
2638 hg = hg*2;
2639 }
2640 }
2641 }
2642
2643 if (hg == -100) {
Philipp Reisner89e58e72011-01-19 13:12:45 +01002644 if (mdev->tconn->net_conf->want_lose && !(mdev->p_uuid[UI_FLAGS]&1))
Philipp Reisnerb411b362009-09-25 16:07:19 -07002645 hg = -1;
Philipp Reisner89e58e72011-01-19 13:12:45 +01002646 if (!mdev->tconn->net_conf->want_lose && (mdev->p_uuid[UI_FLAGS]&1))
Philipp Reisnerb411b362009-09-25 16:07:19 -07002647 hg = 1;
2648
2649 if (abs(hg) < 100)
2650 dev_warn(DEV, "Split-Brain detected, manually solved. "
2651 "Sync from %s node\n",
2652 (hg < 0) ? "peer" : "this");
2653 }
2654
2655 if (hg == -100) {
Lars Ellenberg580b9762010-02-26 23:15:23 +01002656 /* FIXME this log message is not correct if we end up here
2657 * after an attempted attach on a diskless node.
2658 * We just refuse to attach -- well, we drop the "connection"
2659 * to that disk, in a way... */
Adam Gandelman3a11a482010-04-08 16:48:23 -07002660 dev_alert(DEV, "Split-Brain detected but unresolved, dropping connection!\n");
Philipp Reisnerb411b362009-09-25 16:07:19 -07002661 drbd_khelper(mdev, "split-brain");
2662 return C_MASK;
2663 }
2664
2665 if (hg > 0 && mydisk <= D_INCONSISTENT) {
2666 dev_err(DEV, "I shall become SyncSource, but I am inconsistent!\n");
2667 return C_MASK;
2668 }
2669
2670 if (hg < 0 && /* by intention we do not use mydisk here. */
2671 mdev->state.role == R_PRIMARY && mdev->state.disk >= D_CONSISTENT) {
Philipp Reisner89e58e72011-01-19 13:12:45 +01002672 switch (mdev->tconn->net_conf->rr_conflict) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07002673 case ASB_CALL_HELPER:
2674 drbd_khelper(mdev, "pri-lost");
2675 /* fall through */
2676 case ASB_DISCONNECT:
2677 dev_err(DEV, "I shall become SyncTarget, but I am primary!\n");
2678 return C_MASK;
2679 case ASB_VIOLENTLY:
2680 dev_warn(DEV, "Becoming SyncTarget, violating the stable-data"
2681 "assumption\n");
2682 }
2683 }
2684
Philipp Reisner89e58e72011-01-19 13:12:45 +01002685 if (mdev->tconn->net_conf->dry_run || test_bit(CONN_DRY_RUN, &mdev->flags)) {
Philipp Reisnercf14c2e2010-02-02 21:03:50 +01002686 if (hg == 0)
2687 dev_info(DEV, "dry-run connect: No resync, would become Connected immediately.\n");
2688 else
2689 dev_info(DEV, "dry-run connect: Would become %s, doing a %s resync.",
2690 drbd_conn_str(hg > 0 ? C_SYNC_SOURCE : C_SYNC_TARGET),
2691 abs(hg) >= 2 ? "full" : "bit-map based");
2692 return C_MASK;
2693 }
2694
Philipp Reisnerb411b362009-09-25 16:07:19 -07002695 if (abs(hg) >= 2) {
2696 dev_info(DEV, "Writing the whole bitmap, full sync required after drbd_sync_handshake.\n");
Lars Ellenberg20ceb2b2011-01-21 10:56:44 +01002697 if (drbd_bitmap_io(mdev, &drbd_bmio_set_n_write, "set_n_write from sync_handshake",
2698 BM_LOCKED_SET_ALLOWED))
Philipp Reisnerb411b362009-09-25 16:07:19 -07002699 return C_MASK;
2700 }
2701
2702 if (hg > 0) { /* become sync source. */
2703 rv = C_WF_BITMAP_S;
2704 } else if (hg < 0) { /* become sync target */
2705 rv = C_WF_BITMAP_T;
2706 } else {
2707 rv = C_CONNECTED;
2708 if (drbd_bm_total_weight(mdev)) {
2709 dev_info(DEV, "No resync, but %lu bits in bitmap!\n",
2710 drbd_bm_total_weight(mdev));
2711 }
2712 }
2713
2714 return rv;
2715}
2716
2717/* returns 1 if invalid */
2718static int cmp_after_sb(enum drbd_after_sb_p peer, enum drbd_after_sb_p self)
2719{
2720 /* ASB_DISCARD_REMOTE - ASB_DISCARD_LOCAL is valid */
2721 if ((peer == ASB_DISCARD_REMOTE && self == ASB_DISCARD_LOCAL) ||
2722 (self == ASB_DISCARD_REMOTE && peer == ASB_DISCARD_LOCAL))
2723 return 0;
2724
2725 /* any other things with ASB_DISCARD_REMOTE or ASB_DISCARD_LOCAL are invalid */
2726 if (peer == ASB_DISCARD_REMOTE || peer == ASB_DISCARD_LOCAL ||
2727 self == ASB_DISCARD_REMOTE || self == ASB_DISCARD_LOCAL)
2728 return 1;
2729
2730 /* everything else is valid if they are equal on both sides. */
2731 if (peer == self)
2732 return 0;
2733
2734 /* everything es is invalid. */
2735 return 1;
2736}
2737
Andreas Gruenbacherd8763022011-01-26 17:39:41 +01002738static int receive_protocol(struct drbd_conf *mdev, enum drbd_packet cmd,
2739 unsigned int data_size)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002740{
Philipp Reisnere42325a2011-01-19 13:55:45 +01002741 struct p_protocol *p = &mdev->tconn->data.rbuf.protocol;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002742 int p_proto, p_after_sb_0p, p_after_sb_1p, p_after_sb_2p;
Philipp Reisnercf14c2e2010-02-02 21:03:50 +01002743 int p_want_lose, p_two_primaries, cf;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002744 char p_integrity_alg[SHARED_SECRET_MAX] = "";
2745
Philipp Reisnerb411b362009-09-25 16:07:19 -07002746 p_proto = be32_to_cpu(p->protocol);
2747 p_after_sb_0p = be32_to_cpu(p->after_sb_0p);
2748 p_after_sb_1p = be32_to_cpu(p->after_sb_1p);
2749 p_after_sb_2p = be32_to_cpu(p->after_sb_2p);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002750 p_two_primaries = be32_to_cpu(p->two_primaries);
Philipp Reisnercf14c2e2010-02-02 21:03:50 +01002751 cf = be32_to_cpu(p->conn_flags);
2752 p_want_lose = cf & CF_WANT_LOSE;
2753
2754 clear_bit(CONN_DRY_RUN, &mdev->flags);
2755
2756 if (cf & CF_DRY_RUN)
2757 set_bit(CONN_DRY_RUN, &mdev->flags);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002758
Philipp Reisner89e58e72011-01-19 13:12:45 +01002759 if (p_proto != mdev->tconn->net_conf->wire_protocol) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07002760 dev_err(DEV, "incompatible communication protocols\n");
2761 goto disconnect;
2762 }
2763
Philipp Reisner89e58e72011-01-19 13:12:45 +01002764 if (cmp_after_sb(p_after_sb_0p, mdev->tconn->net_conf->after_sb_0p)) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07002765 dev_err(DEV, "incompatible after-sb-0pri settings\n");
2766 goto disconnect;
2767 }
2768
Philipp Reisner89e58e72011-01-19 13:12:45 +01002769 if (cmp_after_sb(p_after_sb_1p, mdev->tconn->net_conf->after_sb_1p)) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07002770 dev_err(DEV, "incompatible after-sb-1pri settings\n");
2771 goto disconnect;
2772 }
2773
Philipp Reisner89e58e72011-01-19 13:12:45 +01002774 if (cmp_after_sb(p_after_sb_2p, mdev->tconn->net_conf->after_sb_2p)) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07002775 dev_err(DEV, "incompatible after-sb-2pri settings\n");
2776 goto disconnect;
2777 }
2778
Philipp Reisner89e58e72011-01-19 13:12:45 +01002779 if (p_want_lose && mdev->tconn->net_conf->want_lose) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07002780 dev_err(DEV, "both sides have the 'want_lose' flag set\n");
2781 goto disconnect;
2782 }
2783
Philipp Reisner89e58e72011-01-19 13:12:45 +01002784 if (p_two_primaries != mdev->tconn->net_conf->two_primaries) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07002785 dev_err(DEV, "incompatible setting of the two-primaries options\n");
2786 goto disconnect;
2787 }
2788
Philipp Reisner31890f42011-01-19 14:12:51 +01002789 if (mdev->tconn->agreed_pro_version >= 87) {
Philipp Reisner89e58e72011-01-19 13:12:45 +01002790 unsigned char *my_alg = mdev->tconn->net_conf->integrity_alg;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002791
2792 if (drbd_recv(mdev, p_integrity_alg, data_size) != data_size)
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01002793 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002794
2795 p_integrity_alg[SHARED_SECRET_MAX-1] = 0;
2796 if (strcmp(p_integrity_alg, my_alg)) {
2797 dev_err(DEV, "incompatible setting of the data-integrity-alg\n");
2798 goto disconnect;
2799 }
2800 dev_info(DEV, "data-integrity-alg: %s\n",
2801 my_alg[0] ? my_alg : (unsigned char *)"<not-used>");
2802 }
2803
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01002804 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002805
2806disconnect:
2807 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01002808 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002809}
2810
2811/* helper function
2812 * input: alg name, feature name
2813 * return: NULL (alg name was "")
2814 * ERR_PTR(error) if something goes wrong
2815 * or the crypto hash ptr, if it worked out ok. */
2816struct crypto_hash *drbd_crypto_alloc_digest_safe(const struct drbd_conf *mdev,
2817 const char *alg, const char *name)
2818{
2819 struct crypto_hash *tfm;
2820
2821 if (!alg[0])
2822 return NULL;
2823
2824 tfm = crypto_alloc_hash(alg, 0, CRYPTO_ALG_ASYNC);
2825 if (IS_ERR(tfm)) {
2826 dev_err(DEV, "Can not allocate \"%s\" as %s (reason: %ld)\n",
2827 alg, name, PTR_ERR(tfm));
2828 return tfm;
2829 }
2830 if (!drbd_crypto_is_hash(crypto_hash_tfm(tfm))) {
2831 crypto_free_hash(tfm);
2832 dev_err(DEV, "\"%s\" is not a digest (%s)\n", alg, name);
2833 return ERR_PTR(-EINVAL);
2834 }
2835 return tfm;
2836}
2837
Andreas Gruenbacherd8763022011-01-26 17:39:41 +01002838static int receive_SyncParam(struct drbd_conf *mdev, enum drbd_packet cmd,
2839 unsigned int packet_size)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002840{
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01002841 int ok = true;
Philipp Reisnere42325a2011-01-19 13:55:45 +01002842 struct p_rs_param_95 *p = &mdev->tconn->data.rbuf.rs_param_95;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002843 unsigned int header_size, data_size, exp_max_sz;
2844 struct crypto_hash *verify_tfm = NULL;
2845 struct crypto_hash *csums_tfm = NULL;
Philipp Reisner31890f42011-01-19 14:12:51 +01002846 const int apv = mdev->tconn->agreed_pro_version;
Philipp Reisner778f2712010-07-06 11:14:00 +02002847 int *rs_plan_s = NULL;
2848 int fifo_size = 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002849
2850 exp_max_sz = apv <= 87 ? sizeof(struct p_rs_param)
2851 : apv == 88 ? sizeof(struct p_rs_param)
2852 + SHARED_SECRET_MAX
Philipp Reisner8e26f9c2010-07-06 17:25:54 +02002853 : apv <= 94 ? sizeof(struct p_rs_param_89)
2854 : /* apv >= 95 */ sizeof(struct p_rs_param_95);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002855
Philipp Reisner02918be2010-08-20 14:35:10 +02002856 if (packet_size > exp_max_sz) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07002857 dev_err(DEV, "SyncParam packet too long: received %u, expected <= %u bytes\n",
Philipp Reisner02918be2010-08-20 14:35:10 +02002858 packet_size, exp_max_sz);
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01002859 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002860 }
2861
2862 if (apv <= 88) {
Philipp Reisner257d0af2011-01-26 12:15:29 +01002863 header_size = sizeof(struct p_rs_param) - sizeof(struct p_header);
Philipp Reisner02918be2010-08-20 14:35:10 +02002864 data_size = packet_size - header_size;
Philipp Reisner8e26f9c2010-07-06 17:25:54 +02002865 } else if (apv <= 94) {
Philipp Reisner257d0af2011-01-26 12:15:29 +01002866 header_size = sizeof(struct p_rs_param_89) - sizeof(struct p_header);
Philipp Reisner02918be2010-08-20 14:35:10 +02002867 data_size = packet_size - header_size;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002868 D_ASSERT(data_size == 0);
Philipp Reisner8e26f9c2010-07-06 17:25:54 +02002869 } else {
Philipp Reisner257d0af2011-01-26 12:15:29 +01002870 header_size = sizeof(struct p_rs_param_95) - sizeof(struct p_header);
Philipp Reisner02918be2010-08-20 14:35:10 +02002871 data_size = packet_size - header_size;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002872 D_ASSERT(data_size == 0);
2873 }
2874
2875 /* initialize verify_alg and csums_alg */
2876 memset(p->verify_alg, 0, 2 * SHARED_SECRET_MAX);
2877
Philipp Reisner02918be2010-08-20 14:35:10 +02002878 if (drbd_recv(mdev, &p->head.payload, header_size) != header_size)
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01002879 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002880
2881 mdev->sync_conf.rate = be32_to_cpu(p->rate);
2882
2883 if (apv >= 88) {
2884 if (apv == 88) {
2885 if (data_size > SHARED_SECRET_MAX) {
2886 dev_err(DEV, "verify-alg too long, "
2887 "peer wants %u, accepting only %u byte\n",
2888 data_size, SHARED_SECRET_MAX);
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01002889 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002890 }
2891
2892 if (drbd_recv(mdev, p->verify_alg, data_size) != data_size)
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01002893 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002894
2895 /* we expect NUL terminated string */
2896 /* but just in case someone tries to be evil */
2897 D_ASSERT(p->verify_alg[data_size-1] == 0);
2898 p->verify_alg[data_size-1] = 0;
2899
2900 } else /* apv >= 89 */ {
2901 /* we still expect NUL terminated strings */
2902 /* but just in case someone tries to be evil */
2903 D_ASSERT(p->verify_alg[SHARED_SECRET_MAX-1] == 0);
2904 D_ASSERT(p->csums_alg[SHARED_SECRET_MAX-1] == 0);
2905 p->verify_alg[SHARED_SECRET_MAX-1] = 0;
2906 p->csums_alg[SHARED_SECRET_MAX-1] = 0;
2907 }
2908
2909 if (strcmp(mdev->sync_conf.verify_alg, p->verify_alg)) {
2910 if (mdev->state.conn == C_WF_REPORT_PARAMS) {
2911 dev_err(DEV, "Different verify-alg settings. me=\"%s\" peer=\"%s\"\n",
2912 mdev->sync_conf.verify_alg, p->verify_alg);
2913 goto disconnect;
2914 }
2915 verify_tfm = drbd_crypto_alloc_digest_safe(mdev,
2916 p->verify_alg, "verify-alg");
2917 if (IS_ERR(verify_tfm)) {
2918 verify_tfm = NULL;
2919 goto disconnect;
2920 }
2921 }
2922
2923 if (apv >= 89 && strcmp(mdev->sync_conf.csums_alg, p->csums_alg)) {
2924 if (mdev->state.conn == C_WF_REPORT_PARAMS) {
2925 dev_err(DEV, "Different csums-alg settings. me=\"%s\" peer=\"%s\"\n",
2926 mdev->sync_conf.csums_alg, p->csums_alg);
2927 goto disconnect;
2928 }
2929 csums_tfm = drbd_crypto_alloc_digest_safe(mdev,
2930 p->csums_alg, "csums-alg");
2931 if (IS_ERR(csums_tfm)) {
2932 csums_tfm = NULL;
2933 goto disconnect;
2934 }
2935 }
2936
Philipp Reisner8e26f9c2010-07-06 17:25:54 +02002937 if (apv > 94) {
2938 mdev->sync_conf.rate = be32_to_cpu(p->rate);
2939 mdev->sync_conf.c_plan_ahead = be32_to_cpu(p->c_plan_ahead);
2940 mdev->sync_conf.c_delay_target = be32_to_cpu(p->c_delay_target);
2941 mdev->sync_conf.c_fill_target = be32_to_cpu(p->c_fill_target);
2942 mdev->sync_conf.c_max_rate = be32_to_cpu(p->c_max_rate);
Philipp Reisner778f2712010-07-06 11:14:00 +02002943
2944 fifo_size = (mdev->sync_conf.c_plan_ahead * 10 * SLEEP_TIME) / HZ;
2945 if (fifo_size != mdev->rs_plan_s.size && fifo_size > 0) {
2946 rs_plan_s = kzalloc(sizeof(int) * fifo_size, GFP_KERNEL);
2947 if (!rs_plan_s) {
2948 dev_err(DEV, "kmalloc of fifo_buffer failed");
2949 goto disconnect;
2950 }
2951 }
Philipp Reisner8e26f9c2010-07-06 17:25:54 +02002952 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07002953
2954 spin_lock(&mdev->peer_seq_lock);
2955 /* lock against drbd_nl_syncer_conf() */
2956 if (verify_tfm) {
2957 strcpy(mdev->sync_conf.verify_alg, p->verify_alg);
2958 mdev->sync_conf.verify_alg_len = strlen(p->verify_alg) + 1;
2959 crypto_free_hash(mdev->verify_tfm);
2960 mdev->verify_tfm = verify_tfm;
2961 dev_info(DEV, "using verify-alg: \"%s\"\n", p->verify_alg);
2962 }
2963 if (csums_tfm) {
2964 strcpy(mdev->sync_conf.csums_alg, p->csums_alg);
2965 mdev->sync_conf.csums_alg_len = strlen(p->csums_alg) + 1;
2966 crypto_free_hash(mdev->csums_tfm);
2967 mdev->csums_tfm = csums_tfm;
2968 dev_info(DEV, "using csums-alg: \"%s\"\n", p->csums_alg);
2969 }
Philipp Reisner778f2712010-07-06 11:14:00 +02002970 if (fifo_size != mdev->rs_plan_s.size) {
2971 kfree(mdev->rs_plan_s.values);
2972 mdev->rs_plan_s.values = rs_plan_s;
2973 mdev->rs_plan_s.size = fifo_size;
2974 mdev->rs_planed = 0;
2975 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07002976 spin_unlock(&mdev->peer_seq_lock);
2977 }
2978
2979 return ok;
2980disconnect:
2981 /* just for completeness: actually not needed,
2982 * as this is not reached if csums_tfm was ok. */
2983 crypto_free_hash(csums_tfm);
2984 /* but free the verify_tfm again, if csums_tfm did not work out */
2985 crypto_free_hash(verify_tfm);
2986 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01002987 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002988}
2989
Philipp Reisnerb411b362009-09-25 16:07:19 -07002990/* warn if the arguments differ by more than 12.5% */
2991static void warn_if_differ_considerably(struct drbd_conf *mdev,
2992 const char *s, sector_t a, sector_t b)
2993{
2994 sector_t d;
2995 if (a == 0 || b == 0)
2996 return;
2997 d = (a > b) ? (a - b) : (b - a);
2998 if (d > (a>>3) || d > (b>>3))
2999 dev_warn(DEV, "Considerable difference in %s: %llus vs. %llus\n", s,
3000 (unsigned long long)a, (unsigned long long)b);
3001}
3002
Andreas Gruenbacherd8763022011-01-26 17:39:41 +01003003static int receive_sizes(struct drbd_conf *mdev, enum drbd_packet cmd,
3004 unsigned int data_size)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003005{
Philipp Reisnere42325a2011-01-19 13:55:45 +01003006 struct p_sizes *p = &mdev->tconn->data.rbuf.sizes;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003007 enum determine_dev_size dd = unchanged;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003008 sector_t p_size, p_usize, my_usize;
3009 int ldsc = 0; /* local disk size changed */
Philipp Reisnere89b5912010-03-24 17:11:33 +01003010 enum dds_flags ddsf;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003011
Philipp Reisnerb411b362009-09-25 16:07:19 -07003012 p_size = be64_to_cpu(p->d_size);
3013 p_usize = be64_to_cpu(p->u_size);
3014
3015 if (p_size == 0 && mdev->state.disk == D_DISKLESS) {
3016 dev_err(DEV, "some backing storage is needed\n");
3017 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01003018 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003019 }
3020
3021 /* just store the peer's disk size for now.
3022 * we still need to figure out whether we accept that. */
3023 mdev->p_size = p_size;
3024
Philipp Reisnerb411b362009-09-25 16:07:19 -07003025 if (get_ldev(mdev)) {
3026 warn_if_differ_considerably(mdev, "lower level device sizes",
3027 p_size, drbd_get_max_capacity(mdev->ldev));
3028 warn_if_differ_considerably(mdev, "user requested size",
3029 p_usize, mdev->ldev->dc.disk_size);
3030
3031 /* if this is the first connect, or an otherwise expected
3032 * param exchange, choose the minimum */
3033 if (mdev->state.conn == C_WF_REPORT_PARAMS)
3034 p_usize = min_not_zero((sector_t)mdev->ldev->dc.disk_size,
3035 p_usize);
3036
3037 my_usize = mdev->ldev->dc.disk_size;
3038
3039 if (mdev->ldev->dc.disk_size != p_usize) {
3040 mdev->ldev->dc.disk_size = p_usize;
3041 dev_info(DEV, "Peer sets u_size to %lu sectors\n",
3042 (unsigned long)mdev->ldev->dc.disk_size);
3043 }
3044
3045 /* Never shrink a device with usable data during connect.
3046 But allow online shrinking if we are connected. */
Philipp Reisnera393db62009-12-22 13:35:52 +01003047 if (drbd_new_dev_size(mdev, mdev->ldev, 0) <
Philipp Reisnerb411b362009-09-25 16:07:19 -07003048 drbd_get_capacity(mdev->this_bdev) &&
3049 mdev->state.disk >= D_OUTDATED &&
3050 mdev->state.conn < C_CONNECTED) {
3051 dev_err(DEV, "The peer's disk size is too small!\n");
3052 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
3053 mdev->ldev->dc.disk_size = my_usize;
3054 put_ldev(mdev);
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01003055 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003056 }
3057 put_ldev(mdev);
3058 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07003059
Philipp Reisnere89b5912010-03-24 17:11:33 +01003060 ddsf = be16_to_cpu(p->dds_flags);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003061 if (get_ldev(mdev)) {
Bart Van Assche24c48302011-05-21 18:32:29 +02003062 dd = drbd_determine_dev_size(mdev, ddsf);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003063 put_ldev(mdev);
3064 if (dd == dev_size_error)
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01003065 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003066 drbd_md_sync(mdev);
3067 } else {
3068 /* I am diskless, need to accept the peer's size. */
3069 drbd_set_my_capacity(mdev, p_size);
3070 }
3071
Philipp Reisner99432fc2011-05-20 16:39:13 +02003072 mdev->peer_max_bio_size = be32_to_cpu(p->max_bio_size);
3073 drbd_reconsider_max_bio_size(mdev);
3074
Philipp Reisnerb411b362009-09-25 16:07:19 -07003075 if (get_ldev(mdev)) {
3076 if (mdev->ldev->known_size != drbd_get_capacity(mdev->ldev->backing_bdev)) {
3077 mdev->ldev->known_size = drbd_get_capacity(mdev->ldev->backing_bdev);
3078 ldsc = 1;
3079 }
3080
Philipp Reisnerb411b362009-09-25 16:07:19 -07003081 put_ldev(mdev);
3082 }
3083
3084 if (mdev->state.conn > C_WF_REPORT_PARAMS) {
3085 if (be64_to_cpu(p->c_size) !=
3086 drbd_get_capacity(mdev->this_bdev) || ldsc) {
3087 /* we have different sizes, probably peer
3088 * needs to know my new size... */
Philipp Reisnere89b5912010-03-24 17:11:33 +01003089 drbd_send_sizes(mdev, 0, ddsf);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003090 }
3091 if (test_and_clear_bit(RESIZE_PENDING, &mdev->flags) ||
3092 (dd == grew && mdev->state.conn == C_CONNECTED)) {
3093 if (mdev->state.pdsk >= D_INCONSISTENT &&
Philipp Reisnere89b5912010-03-24 17:11:33 +01003094 mdev->state.disk >= D_INCONSISTENT) {
3095 if (ddsf & DDSF_NO_RESYNC)
3096 dev_info(DEV, "Resync of new storage suppressed with --assume-clean\n");
3097 else
3098 resync_after_online_grow(mdev);
3099 } else
Philipp Reisnerb411b362009-09-25 16:07:19 -07003100 set_bit(RESYNC_AFTER_NEG, &mdev->flags);
3101 }
3102 }
3103
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01003104 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003105}
3106
Andreas Gruenbacherd8763022011-01-26 17:39:41 +01003107static int receive_uuids(struct drbd_conf *mdev, enum drbd_packet cmd,
3108 unsigned int data_size)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003109{
Philipp Reisnere42325a2011-01-19 13:55:45 +01003110 struct p_uuids *p = &mdev->tconn->data.rbuf.uuids;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003111 u64 *p_uuid;
Lars Ellenberg62b0da32011-01-20 13:25:21 +01003112 int i, updated_uuids = 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003113
Philipp Reisnerb411b362009-09-25 16:07:19 -07003114 p_uuid = kmalloc(sizeof(u64)*UI_EXTENDED_SIZE, GFP_NOIO);
3115
3116 for (i = UI_CURRENT; i < UI_EXTENDED_SIZE; i++)
3117 p_uuid[i] = be64_to_cpu(p->uuid[i]);
3118
3119 kfree(mdev->p_uuid);
3120 mdev->p_uuid = p_uuid;
3121
3122 if (mdev->state.conn < C_CONNECTED &&
3123 mdev->state.disk < D_INCONSISTENT &&
3124 mdev->state.role == R_PRIMARY &&
3125 (mdev->ed_uuid & ~((u64)1)) != (p_uuid[UI_CURRENT] & ~((u64)1))) {
3126 dev_err(DEV, "Can only connect to data with current UUID=%016llX\n",
3127 (unsigned long long)mdev->ed_uuid);
3128 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01003129 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003130 }
3131
3132 if (get_ldev(mdev)) {
3133 int skip_initial_sync =
3134 mdev->state.conn == C_CONNECTED &&
Philipp Reisner31890f42011-01-19 14:12:51 +01003135 mdev->tconn->agreed_pro_version >= 90 &&
Philipp Reisnerb411b362009-09-25 16:07:19 -07003136 mdev->ldev->md.uuid[UI_CURRENT] == UUID_JUST_CREATED &&
3137 (p_uuid[UI_FLAGS] & 8);
3138 if (skip_initial_sync) {
3139 dev_info(DEV, "Accepted new current UUID, preparing to skip initial sync\n");
3140 drbd_bitmap_io(mdev, &drbd_bmio_clear_n_write,
Lars Ellenberg20ceb2b2011-01-21 10:56:44 +01003141 "clear_n_write from receive_uuids",
3142 BM_LOCKED_TEST_ALLOWED);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003143 _drbd_uuid_set(mdev, UI_CURRENT, p_uuid[UI_CURRENT]);
3144 _drbd_uuid_set(mdev, UI_BITMAP, 0);
3145 _drbd_set_state(_NS2(mdev, disk, D_UP_TO_DATE, pdsk, D_UP_TO_DATE),
3146 CS_VERBOSE, NULL);
3147 drbd_md_sync(mdev);
Lars Ellenberg62b0da32011-01-20 13:25:21 +01003148 updated_uuids = 1;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003149 }
3150 put_ldev(mdev);
Philipp Reisner18a50fa2010-06-21 14:14:15 +02003151 } else if (mdev->state.disk < D_INCONSISTENT &&
3152 mdev->state.role == R_PRIMARY) {
3153 /* I am a diskless primary, the peer just created a new current UUID
3154 for me. */
Lars Ellenberg62b0da32011-01-20 13:25:21 +01003155 updated_uuids = drbd_set_ed_uuid(mdev, p_uuid[UI_CURRENT]);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003156 }
3157
3158 /* Before we test for the disk state, we should wait until an eventually
3159 ongoing cluster wide state change is finished. That is important if
3160 we are primary and are detaching from our disk. We need to see the
3161 new disk state... */
3162 wait_event(mdev->misc_wait, !test_bit(CLUSTER_ST_CHANGE, &mdev->flags));
3163 if (mdev->state.conn >= C_CONNECTED && mdev->state.disk < D_INCONSISTENT)
Lars Ellenberg62b0da32011-01-20 13:25:21 +01003164 updated_uuids |= drbd_set_ed_uuid(mdev, p_uuid[UI_CURRENT]);
3165
3166 if (updated_uuids)
3167 drbd_print_uuids(mdev, "receiver updated UUIDs to");
Philipp Reisnerb411b362009-09-25 16:07:19 -07003168
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01003169 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003170}
3171
3172/**
3173 * convert_state() - Converts the peer's view of the cluster state to our point of view
3174 * @ps: The state as seen by the peer.
3175 */
3176static union drbd_state convert_state(union drbd_state ps)
3177{
3178 union drbd_state ms;
3179
3180 static enum drbd_conns c_tab[] = {
3181 [C_CONNECTED] = C_CONNECTED,
3182
3183 [C_STARTING_SYNC_S] = C_STARTING_SYNC_T,
3184 [C_STARTING_SYNC_T] = C_STARTING_SYNC_S,
3185 [C_DISCONNECTING] = C_TEAR_DOWN, /* C_NETWORK_FAILURE, */
3186 [C_VERIFY_S] = C_VERIFY_T,
3187 [C_MASK] = C_MASK,
3188 };
3189
3190 ms.i = ps.i;
3191
3192 ms.conn = c_tab[ps.conn];
3193 ms.peer = ps.role;
3194 ms.role = ps.peer;
3195 ms.pdsk = ps.disk;
3196 ms.disk = ps.pdsk;
3197 ms.peer_isp = (ps.aftr_isp | ps.user_isp);
3198
3199 return ms;
3200}
3201
Andreas Gruenbacherd8763022011-01-26 17:39:41 +01003202static int receive_req_state(struct drbd_conf *mdev, enum drbd_packet cmd,
3203 unsigned int data_size)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003204{
Philipp Reisnere42325a2011-01-19 13:55:45 +01003205 struct p_req_state *p = &mdev->tconn->data.rbuf.req_state;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003206 union drbd_state mask, val;
Andreas Gruenbacherbf885f82010-12-08 00:39:32 +01003207 enum drbd_state_rv rv;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003208
Philipp Reisnerb411b362009-09-25 16:07:19 -07003209 mask.i = be32_to_cpu(p->mask);
3210 val.i = be32_to_cpu(p->val);
3211
Philipp Reisner25703f82011-02-07 14:35:25 +01003212 if (test_bit(DISCARD_CONCURRENT, &mdev->tconn->flags) &&
Philipp Reisnerb411b362009-09-25 16:07:19 -07003213 test_bit(CLUSTER_ST_CHANGE, &mdev->flags)) {
3214 drbd_send_sr_reply(mdev, SS_CONCURRENT_ST_CHG);
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01003215 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003216 }
3217
3218 mask = convert_state(mask);
3219 val = convert_state(val);
3220
3221 rv = drbd_change_state(mdev, CS_VERBOSE, mask, val);
3222
3223 drbd_send_sr_reply(mdev, rv);
3224 drbd_md_sync(mdev);
3225
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01003226 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003227}
3228
Andreas Gruenbacherd8763022011-01-26 17:39:41 +01003229static int receive_state(struct drbd_conf *mdev, enum drbd_packet cmd,
3230 unsigned int data_size)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003231{
Philipp Reisnere42325a2011-01-19 13:55:45 +01003232 struct p_state *p = &mdev->tconn->data.rbuf.state;
Lars Ellenberg4ac4aad2010-07-22 17:39:26 +02003233 union drbd_state os, ns, peer_state;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003234 enum drbd_disk_state real_peer_disk;
Philipp Reisner65d922c2010-06-16 16:18:09 +02003235 enum chg_state_flags cs_flags;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003236 int rv;
3237
Philipp Reisnerb411b362009-09-25 16:07:19 -07003238 peer_state.i = be32_to_cpu(p->state);
3239
3240 real_peer_disk = peer_state.disk;
3241 if (peer_state.disk == D_NEGOTIATING) {
3242 real_peer_disk = mdev->p_uuid[UI_FLAGS] & 4 ? D_INCONSISTENT : D_CONSISTENT;
3243 dev_info(DEV, "real peer disk state = %s\n", drbd_disk_str(real_peer_disk));
3244 }
3245
Philipp Reisner87eeee42011-01-19 14:16:30 +01003246 spin_lock_irq(&mdev->tconn->req_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003247 retry:
Lars Ellenberg4ac4aad2010-07-22 17:39:26 +02003248 os = ns = mdev->state;
Philipp Reisner87eeee42011-01-19 14:16:30 +01003249 spin_unlock_irq(&mdev->tconn->req_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003250
Lars Ellenberge9ef7bb2010-10-07 15:55:39 +02003251 /* peer says his disk is uptodate, while we think it is inconsistent,
3252 * and this happens while we think we have a sync going on. */
3253 if (os.pdsk == D_INCONSISTENT && real_peer_disk == D_UP_TO_DATE &&
3254 os.conn > C_CONNECTED && os.disk == D_UP_TO_DATE) {
3255 /* If we are (becoming) SyncSource, but peer is still in sync
3256 * preparation, ignore its uptodate-ness to avoid flapping, it
3257 * will change to inconsistent once the peer reaches active
3258 * syncing states.
3259 * It may have changed syncer-paused flags, however, so we
3260 * cannot ignore this completely. */
3261 if (peer_state.conn > C_CONNECTED &&
3262 peer_state.conn < C_SYNC_SOURCE)
3263 real_peer_disk = D_INCONSISTENT;
3264
3265 /* if peer_state changes to connected at the same time,
3266 * it explicitly notifies us that it finished resync.
3267 * Maybe we should finish it up, too? */
3268 else if (os.conn >= C_SYNC_SOURCE &&
3269 peer_state.conn == C_CONNECTED) {
3270 if (drbd_bm_total_weight(mdev) <= mdev->rs_failed)
3271 drbd_resync_finished(mdev);
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01003272 return true;
Lars Ellenberge9ef7bb2010-10-07 15:55:39 +02003273 }
3274 }
3275
3276 /* peer says his disk is inconsistent, while we think it is uptodate,
3277 * and this happens while the peer still thinks we have a sync going on,
3278 * but we think we are already done with the sync.
3279 * We ignore this to avoid flapping pdsk.
3280 * This should not happen, if the peer is a recent version of drbd. */
3281 if (os.pdsk == D_UP_TO_DATE && real_peer_disk == D_INCONSISTENT &&
3282 os.conn == C_CONNECTED && peer_state.conn > C_SYNC_SOURCE)
3283 real_peer_disk = D_UP_TO_DATE;
3284
Lars Ellenberg4ac4aad2010-07-22 17:39:26 +02003285 if (ns.conn == C_WF_REPORT_PARAMS)
3286 ns.conn = C_CONNECTED;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003287
Philipp Reisner67531712010-10-27 12:21:30 +02003288 if (peer_state.conn == C_AHEAD)
3289 ns.conn = C_BEHIND;
3290
Philipp Reisnerb411b362009-09-25 16:07:19 -07003291 if (mdev->p_uuid && peer_state.disk >= D_NEGOTIATING &&
3292 get_ldev_if_state(mdev, D_NEGOTIATING)) {
3293 int cr; /* consider resync */
3294
3295 /* if we established a new connection */
Lars Ellenberg4ac4aad2010-07-22 17:39:26 +02003296 cr = (os.conn < C_CONNECTED);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003297 /* if we had an established connection
3298 * and one of the nodes newly attaches a disk */
Lars Ellenberg4ac4aad2010-07-22 17:39:26 +02003299 cr |= (os.conn == C_CONNECTED &&
Philipp Reisnerb411b362009-09-25 16:07:19 -07003300 (peer_state.disk == D_NEGOTIATING ||
Lars Ellenberg4ac4aad2010-07-22 17:39:26 +02003301 os.disk == D_NEGOTIATING));
Philipp Reisnerb411b362009-09-25 16:07:19 -07003302 /* if we have both been inconsistent, and the peer has been
3303 * forced to be UpToDate with --overwrite-data */
3304 cr |= test_bit(CONSIDER_RESYNC, &mdev->flags);
3305 /* if we had been plain connected, and the admin requested to
3306 * start a sync by "invalidate" or "invalidate-remote" */
Lars Ellenberg4ac4aad2010-07-22 17:39:26 +02003307 cr |= (os.conn == C_CONNECTED &&
Philipp Reisnerb411b362009-09-25 16:07:19 -07003308 (peer_state.conn >= C_STARTING_SYNC_S &&
3309 peer_state.conn <= C_WF_BITMAP_T));
3310
3311 if (cr)
Lars Ellenberg4ac4aad2010-07-22 17:39:26 +02003312 ns.conn = drbd_sync_handshake(mdev, peer_state.role, real_peer_disk);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003313
3314 put_ldev(mdev);
Lars Ellenberg4ac4aad2010-07-22 17:39:26 +02003315 if (ns.conn == C_MASK) {
3316 ns.conn = C_CONNECTED;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003317 if (mdev->state.disk == D_NEGOTIATING) {
Lars Ellenberg82f59cc2010-10-16 12:13:47 +02003318 drbd_force_state(mdev, NS(disk, D_FAILED));
Philipp Reisnerb411b362009-09-25 16:07:19 -07003319 } else if (peer_state.disk == D_NEGOTIATING) {
3320 dev_err(DEV, "Disk attach process on the peer node was aborted.\n");
3321 peer_state.disk = D_DISKLESS;
Lars Ellenberg580b9762010-02-26 23:15:23 +01003322 real_peer_disk = D_DISKLESS;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003323 } else {
Philipp Reisnercf14c2e2010-02-02 21:03:50 +01003324 if (test_and_clear_bit(CONN_DRY_RUN, &mdev->flags))
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01003325 return false;
Lars Ellenberg4ac4aad2010-07-22 17:39:26 +02003326 D_ASSERT(os.conn == C_WF_REPORT_PARAMS);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003327 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01003328 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003329 }
3330 }
3331 }
3332
Philipp Reisner87eeee42011-01-19 14:16:30 +01003333 spin_lock_irq(&mdev->tconn->req_lock);
Lars Ellenberg4ac4aad2010-07-22 17:39:26 +02003334 if (mdev->state.i != os.i)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003335 goto retry;
3336 clear_bit(CONSIDER_RESYNC, &mdev->flags);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003337 ns.peer = peer_state.role;
3338 ns.pdsk = real_peer_disk;
3339 ns.peer_isp = (peer_state.aftr_isp | peer_state.user_isp);
Lars Ellenberg4ac4aad2010-07-22 17:39:26 +02003340 if ((ns.conn == C_CONNECTED || ns.conn == C_WF_BITMAP_S) && ns.disk == D_NEGOTIATING)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003341 ns.disk = mdev->new_state_tmp.disk;
Lars Ellenberg4ac4aad2010-07-22 17:39:26 +02003342 cs_flags = CS_VERBOSE + (os.conn < C_CONNECTED && ns.conn >= C_CONNECTED ? 0 : CS_HARD);
3343 if (ns.pdsk == D_CONSISTENT && is_susp(ns) && ns.conn == C_CONNECTED && os.conn < C_CONNECTED &&
Philipp Reisner481c6f52010-06-22 14:03:27 +02003344 test_bit(NEW_CUR_UUID, &mdev->flags)) {
Andreas Gruenbacher8554df12011-01-25 15:37:43 +01003345 /* Do not allow tl_restart(RESEND) for a rebooted peer. We can only allow this
Philipp Reisner481c6f52010-06-22 14:03:27 +02003346 for temporal network outages! */
Philipp Reisner87eeee42011-01-19 14:16:30 +01003347 spin_unlock_irq(&mdev->tconn->req_lock);
Philipp Reisner481c6f52010-06-22 14:03:27 +02003348 dev_err(DEV, "Aborting Connect, can not thaw IO with an only Consistent peer\n");
3349 tl_clear(mdev);
3350 drbd_uuid_new_current(mdev);
3351 clear_bit(NEW_CUR_UUID, &mdev->flags);
3352 drbd_force_state(mdev, NS2(conn, C_PROTOCOL_ERROR, susp, 0));
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01003353 return false;
Philipp Reisner481c6f52010-06-22 14:03:27 +02003354 }
Philipp Reisner65d922c2010-06-16 16:18:09 +02003355 rv = _drbd_set_state(mdev, ns, cs_flags, NULL);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003356 ns = mdev->state;
Philipp Reisner87eeee42011-01-19 14:16:30 +01003357 spin_unlock_irq(&mdev->tconn->req_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003358
3359 if (rv < SS_SUCCESS) {
3360 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01003361 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003362 }
3363
Lars Ellenberg4ac4aad2010-07-22 17:39:26 +02003364 if (os.conn > C_WF_REPORT_PARAMS) {
3365 if (ns.conn > C_CONNECTED && peer_state.conn <= C_CONNECTED &&
Philipp Reisnerb411b362009-09-25 16:07:19 -07003366 peer_state.disk != D_NEGOTIATING ) {
3367 /* we want resync, peer has not yet decided to sync... */
3368 /* Nowadays only used when forcing a node into primary role and
3369 setting its disk to UpToDate with that */
3370 drbd_send_uuids(mdev);
3371 drbd_send_state(mdev);
3372 }
3373 }
3374
Philipp Reisner89e58e72011-01-19 13:12:45 +01003375 mdev->tconn->net_conf->want_lose = 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003376
3377 drbd_md_sync(mdev); /* update connected indicator, la_size, ... */
3378
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01003379 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003380}
3381
Andreas Gruenbacherd8763022011-01-26 17:39:41 +01003382static int receive_sync_uuid(struct drbd_conf *mdev, enum drbd_packet cmd,
3383 unsigned int data_size)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003384{
Philipp Reisnere42325a2011-01-19 13:55:45 +01003385 struct p_rs_uuid *p = &mdev->tconn->data.rbuf.rs_uuid;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003386
3387 wait_event(mdev->misc_wait,
3388 mdev->state.conn == C_WF_SYNC_UUID ||
Philipp Reisnerc4752ef2010-10-27 17:32:36 +02003389 mdev->state.conn == C_BEHIND ||
Philipp Reisnerb411b362009-09-25 16:07:19 -07003390 mdev->state.conn < C_CONNECTED ||
3391 mdev->state.disk < D_NEGOTIATING);
3392
3393 /* D_ASSERT( mdev->state.conn == C_WF_SYNC_UUID ); */
3394
Philipp Reisnerb411b362009-09-25 16:07:19 -07003395 /* Here the _drbd_uuid_ functions are right, current should
3396 _not_ be rotated into the history */
3397 if (get_ldev_if_state(mdev, D_NEGOTIATING)) {
3398 _drbd_uuid_set(mdev, UI_CURRENT, be64_to_cpu(p->uuid));
3399 _drbd_uuid_set(mdev, UI_BITMAP, 0UL);
3400
Lars Ellenberg62b0da32011-01-20 13:25:21 +01003401 drbd_print_uuids(mdev, "updated sync uuid");
Philipp Reisnerb411b362009-09-25 16:07:19 -07003402 drbd_start_resync(mdev, C_SYNC_TARGET);
3403
3404 put_ldev(mdev);
3405 } else
3406 dev_err(DEV, "Ignoring SyncUUID packet!\n");
3407
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01003408 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003409}
3410
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01003411/**
3412 * receive_bitmap_plain
3413 *
3414 * Return 0 when done, 1 when another iteration is needed, and a negative error
3415 * code upon failure.
3416 */
3417static int
Philipp Reisner02918be2010-08-20 14:35:10 +02003418receive_bitmap_plain(struct drbd_conf *mdev, unsigned int data_size,
3419 unsigned long *buffer, struct bm_xfer_ctx *c)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003420{
3421 unsigned num_words = min_t(size_t, BM_PACKET_WORDS, c->bm_words - c->word_offset);
3422 unsigned want = num_words * sizeof(long);
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01003423 int err;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003424
Philipp Reisner02918be2010-08-20 14:35:10 +02003425 if (want != data_size) {
3426 dev_err(DEV, "%s:want (%u) != data_size (%u)\n", __func__, want, data_size);
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01003427 return -EIO;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003428 }
3429 if (want == 0)
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01003430 return 0;
3431 err = drbd_recv(mdev, buffer, want);
3432 if (err != want) {
3433 if (err >= 0)
3434 err = -EIO;
3435 return err;
3436 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07003437
3438 drbd_bm_merge_lel(mdev, c->word_offset, num_words, buffer);
3439
3440 c->word_offset += num_words;
3441 c->bit_offset = c->word_offset * BITS_PER_LONG;
3442 if (c->bit_offset > c->bm_bits)
3443 c->bit_offset = c->bm_bits;
3444
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01003445 return 1;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003446}
3447
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01003448/**
3449 * recv_bm_rle_bits
3450 *
3451 * Return 0 when done, 1 when another iteration is needed, and a negative error
3452 * code upon failure.
3453 */
3454static int
Philipp Reisnerb411b362009-09-25 16:07:19 -07003455recv_bm_rle_bits(struct drbd_conf *mdev,
3456 struct p_compressed_bm *p,
Philipp Reisnerc6d25cf2011-01-19 16:13:06 +01003457 struct bm_xfer_ctx *c,
3458 unsigned int len)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003459{
3460 struct bitstream bs;
3461 u64 look_ahead;
3462 u64 rl;
3463 u64 tmp;
3464 unsigned long s = c->bit_offset;
3465 unsigned long e;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003466 int toggle = DCBP_get_start(p);
3467 int have;
3468 int bits;
3469
3470 bitstream_init(&bs, p->code, len, DCBP_get_pad_bits(p));
3471
3472 bits = bitstream_get_bits(&bs, &look_ahead, 64);
3473 if (bits < 0)
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01003474 return -EIO;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003475
3476 for (have = bits; have > 0; s += rl, toggle = !toggle) {
3477 bits = vli_decode_bits(&rl, look_ahead);
3478 if (bits <= 0)
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01003479 return -EIO;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003480
3481 if (toggle) {
3482 e = s + rl -1;
3483 if (e >= c->bm_bits) {
3484 dev_err(DEV, "bitmap overflow (e:%lu) while decoding bm RLE packet\n", e);
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01003485 return -EIO;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003486 }
3487 _drbd_bm_set_bits(mdev, s, e);
3488 }
3489
3490 if (have < bits) {
3491 dev_err(DEV, "bitmap decoding error: h:%d b:%d la:0x%08llx l:%u/%u\n",
3492 have, bits, look_ahead,
3493 (unsigned int)(bs.cur.b - p->code),
3494 (unsigned int)bs.buf_len);
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01003495 return -EIO;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003496 }
3497 look_ahead >>= bits;
3498 have -= bits;
3499
3500 bits = bitstream_get_bits(&bs, &tmp, 64 - have);
3501 if (bits < 0)
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01003502 return -EIO;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003503 look_ahead |= tmp << have;
3504 have += bits;
3505 }
3506
3507 c->bit_offset = s;
3508 bm_xfer_ctx_bit_to_word_offset(c);
3509
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01003510 return (s != c->bm_bits);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003511}
3512
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01003513/**
3514 * decode_bitmap_c
3515 *
3516 * Return 0 when done, 1 when another iteration is needed, and a negative error
3517 * code upon failure.
3518 */
3519static int
Philipp Reisnerb411b362009-09-25 16:07:19 -07003520decode_bitmap_c(struct drbd_conf *mdev,
3521 struct p_compressed_bm *p,
Philipp Reisnerc6d25cf2011-01-19 16:13:06 +01003522 struct bm_xfer_ctx *c,
3523 unsigned int len)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003524{
3525 if (DCBP_get_code(p) == RLE_VLI_Bits)
Philipp Reisnerc6d25cf2011-01-19 16:13:06 +01003526 return recv_bm_rle_bits(mdev, p, c, len);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003527
3528 /* other variants had been implemented for evaluation,
3529 * but have been dropped as this one turned out to be "best"
3530 * during all our tests. */
3531
3532 dev_err(DEV, "receive_bitmap_c: unknown encoding %u\n", p->encoding);
3533 drbd_force_state(mdev, NS(conn, C_PROTOCOL_ERROR));
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01003534 return -EIO;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003535}
3536
3537void INFO_bm_xfer_stats(struct drbd_conf *mdev,
3538 const char *direction, struct bm_xfer_ctx *c)
3539{
3540 /* what would it take to transfer it "plaintext" */
Philipp Reisnerc0129492011-01-19 16:58:16 +01003541 unsigned plain = sizeof(struct p_header) *
Philipp Reisnerb411b362009-09-25 16:07:19 -07003542 ((c->bm_words+BM_PACKET_WORDS-1)/BM_PACKET_WORDS+1)
3543 + c->bm_words * sizeof(long);
3544 unsigned total = c->bytes[0] + c->bytes[1];
3545 unsigned r;
3546
3547 /* total can not be zero. but just in case: */
3548 if (total == 0)
3549 return;
3550
3551 /* don't report if not compressed */
3552 if (total >= plain)
3553 return;
3554
3555 /* total < plain. check for overflow, still */
3556 r = (total > UINT_MAX/1000) ? (total / (plain/1000))
3557 : (1000 * total / plain);
3558
3559 if (r > 1000)
3560 r = 1000;
3561
3562 r = 1000 - r;
3563 dev_info(DEV, "%s bitmap stats [Bytes(packets)]: plain %u(%u), RLE %u(%u), "
3564 "total %u; compression: %u.%u%%\n",
3565 direction,
3566 c->bytes[1], c->packets[1],
3567 c->bytes[0], c->packets[0],
3568 total, r/10, r % 10);
3569}
3570
3571/* Since we are processing the bitfield from lower addresses to higher,
3572 it does not matter if the process it in 32 bit chunks or 64 bit
3573 chunks as long as it is little endian. (Understand it as byte stream,
3574 beginning with the lowest byte...) If we would use big endian
3575 we would need to process it from the highest address to the lowest,
3576 in order to be agnostic to the 32 vs 64 bits issue.
3577
3578 returns 0 on failure, 1 if we successfully received it. */
Andreas Gruenbacherd8763022011-01-26 17:39:41 +01003579static int receive_bitmap(struct drbd_conf *mdev, enum drbd_packet cmd,
3580 unsigned int data_size)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003581{
3582 struct bm_xfer_ctx c;
3583 void *buffer;
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01003584 int err;
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01003585 int ok = false;
Philipp Reisner257d0af2011-01-26 12:15:29 +01003586 struct p_header *h = &mdev->tconn->data.rbuf.header;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003587
Lars Ellenberg20ceb2b2011-01-21 10:56:44 +01003588 drbd_bm_lock(mdev, "receive bitmap", BM_LOCKED_SET_ALLOWED);
3589 /* you are supposed to send additional out-of-sync information
3590 * if you actually set bits during this phase */
Philipp Reisnerb411b362009-09-25 16:07:19 -07003591
3592 /* maybe we should use some per thread scratch page,
3593 * and allocate that during initial device creation? */
3594 buffer = (unsigned long *) __get_free_page(GFP_NOIO);
3595 if (!buffer) {
3596 dev_err(DEV, "failed to allocate one page buffer in %s\n", __func__);
3597 goto out;
3598 }
3599
3600 c = (struct bm_xfer_ctx) {
3601 .bm_bits = drbd_bm_bits(mdev),
3602 .bm_words = drbd_bm_words(mdev),
3603 };
3604
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01003605 for(;;) {
Philipp Reisner02918be2010-08-20 14:35:10 +02003606 if (cmd == P_BITMAP) {
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01003607 err = receive_bitmap_plain(mdev, data_size, buffer, &c);
Philipp Reisner02918be2010-08-20 14:35:10 +02003608 } else if (cmd == P_COMPRESSED_BITMAP) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07003609 /* MAYBE: sanity check that we speak proto >= 90,
3610 * and the feature is enabled! */
3611 struct p_compressed_bm *p;
3612
Philipp Reisner02918be2010-08-20 14:35:10 +02003613 if (data_size > BM_PACKET_PAYLOAD_BYTES) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07003614 dev_err(DEV, "ReportCBitmap packet too large\n");
3615 goto out;
3616 }
3617 /* use the page buff */
3618 p = buffer;
3619 memcpy(p, h, sizeof(*h));
Philipp Reisner02918be2010-08-20 14:35:10 +02003620 if (drbd_recv(mdev, p->head.payload, data_size) != data_size)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003621 goto out;
Lars Ellenberg004352f2010-10-05 20:13:58 +02003622 if (data_size <= (sizeof(*p) - sizeof(p->head))) {
3623 dev_err(DEV, "ReportCBitmap packet too small (l:%u)\n", data_size);
Andreas Gruenbacher78fcbda2010-12-10 22:18:27 +01003624 goto out;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003625 }
Philipp Reisnerc6d25cf2011-01-19 16:13:06 +01003626 err = decode_bitmap_c(mdev, p, &c, data_size);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003627 } else {
Philipp Reisner02918be2010-08-20 14:35:10 +02003628 dev_warn(DEV, "receive_bitmap: cmd neither ReportBitMap nor ReportCBitMap (is 0x%x)", cmd);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003629 goto out;
3630 }
3631
Philipp Reisner02918be2010-08-20 14:35:10 +02003632 c.packets[cmd == P_BITMAP]++;
Philipp Reisner257d0af2011-01-26 12:15:29 +01003633 c.bytes[cmd == P_BITMAP] += sizeof(struct p_header) + data_size;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003634
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01003635 if (err <= 0) {
3636 if (err < 0)
3637 goto out;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003638 break;
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01003639 }
Philipp Reisner02918be2010-08-20 14:35:10 +02003640 if (!drbd_recv_header(mdev, &cmd, &data_size))
Philipp Reisnerb411b362009-09-25 16:07:19 -07003641 goto out;
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01003642 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07003643
3644 INFO_bm_xfer_stats(mdev, "receive", &c);
3645
3646 if (mdev->state.conn == C_WF_BITMAP_T) {
Andreas Gruenbacherde1f8e42010-12-10 21:04:00 +01003647 enum drbd_state_rv rv;
3648
Philipp Reisnerb411b362009-09-25 16:07:19 -07003649 ok = !drbd_send_bitmap(mdev);
3650 if (!ok)
3651 goto out;
3652 /* Omit CS_ORDERED with this state transition to avoid deadlocks. */
Andreas Gruenbacherde1f8e42010-12-10 21:04:00 +01003653 rv = _drbd_request_state(mdev, NS(conn, C_WF_SYNC_UUID), CS_VERBOSE);
3654 D_ASSERT(rv == SS_SUCCESS);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003655 } else if (mdev->state.conn != C_WF_BITMAP_S) {
3656 /* admin may have requested C_DISCONNECTING,
3657 * other threads may have noticed network errors */
3658 dev_info(DEV, "unexpected cstate (%s) in receive_bitmap\n",
3659 drbd_conn_str(mdev->state.conn));
3660 }
3661
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01003662 ok = true;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003663 out:
Lars Ellenberg20ceb2b2011-01-21 10:56:44 +01003664 drbd_bm_unlock(mdev);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003665 if (ok && mdev->state.conn == C_WF_BITMAP_S)
3666 drbd_start_resync(mdev, C_SYNC_SOURCE);
3667 free_page((unsigned long) buffer);
3668 return ok;
3669}
3670
Andreas Gruenbacherd8763022011-01-26 17:39:41 +01003671static int receive_skip(struct drbd_conf *mdev, enum drbd_packet cmd,
3672 unsigned int data_size)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003673{
3674 /* TODO zero copy sink :) */
3675 static char sink[128];
3676 int size, want, r;
3677
Philipp Reisner02918be2010-08-20 14:35:10 +02003678 dev_warn(DEV, "skipping unknown optional packet type %d, l: %d!\n",
3679 cmd, data_size);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003680
Philipp Reisner02918be2010-08-20 14:35:10 +02003681 size = data_size;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003682 while (size > 0) {
3683 want = min_t(int, size, sizeof(sink));
3684 r = drbd_recv(mdev, sink, want);
Andreas Gruenbacher841ce242010-12-15 19:31:20 +01003685 if (!expect(r > 0))
3686 break;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003687 size -= r;
3688 }
3689 return size == 0;
3690}
3691
Andreas Gruenbacherd8763022011-01-26 17:39:41 +01003692static int receive_UnplugRemote(struct drbd_conf *mdev, enum drbd_packet cmd,
3693 unsigned int data_size)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003694{
Philipp Reisnerb411b362009-09-25 16:07:19 -07003695 /* Make sure we've acked all the TCP data associated
3696 * with the data requests being unplugged */
Philipp Reisnere42325a2011-01-19 13:55:45 +01003697 drbd_tcp_quickack(mdev->tconn->data.socket);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003698
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01003699 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003700}
3701
Andreas Gruenbacherd8763022011-01-26 17:39:41 +01003702static int receive_out_of_sync(struct drbd_conf *mdev, enum drbd_packet cmd,
3703 unsigned int data_size)
Philipp Reisner73a01a12010-10-27 14:33:00 +02003704{
Philipp Reisnere42325a2011-01-19 13:55:45 +01003705 struct p_block_desc *p = &mdev->tconn->data.rbuf.block_desc;
Philipp Reisner73a01a12010-10-27 14:33:00 +02003706
Lars Ellenbergf735e3632010-12-17 21:06:18 +01003707 switch (mdev->state.conn) {
3708 case C_WF_SYNC_UUID:
3709 case C_WF_BITMAP_T:
3710 case C_BEHIND:
3711 break;
3712 default:
3713 dev_err(DEV, "ASSERT FAILED cstate = %s, expected: WFSyncUUID|WFBitMapT|Behind\n",
3714 drbd_conn_str(mdev->state.conn));
3715 }
3716
Philipp Reisner73a01a12010-10-27 14:33:00 +02003717 drbd_set_out_of_sync(mdev, be64_to_cpu(p->sector), be32_to_cpu(p->blksize));
3718
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01003719 return true;
Philipp Reisner73a01a12010-10-27 14:33:00 +02003720}
3721
Andreas Gruenbacherd8763022011-01-26 17:39:41 +01003722typedef int (*drbd_cmd_handler_f)(struct drbd_conf *, enum drbd_packet cmd,
3723 unsigned int to_receive);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003724
Philipp Reisner02918be2010-08-20 14:35:10 +02003725struct data_cmd {
3726 int expect_payload;
3727 size_t pkt_size;
3728 drbd_cmd_handler_f function;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003729};
3730
Philipp Reisner02918be2010-08-20 14:35:10 +02003731static struct data_cmd drbd_cmd_handler[] = {
3732 [P_DATA] = { 1, sizeof(struct p_data), receive_Data },
3733 [P_DATA_REPLY] = { 1, sizeof(struct p_data), receive_DataReply },
3734 [P_RS_DATA_REPLY] = { 1, sizeof(struct p_data), receive_RSDataReply } ,
3735 [P_BARRIER] = { 0, sizeof(struct p_barrier), receive_Barrier } ,
Philipp Reisner257d0af2011-01-26 12:15:29 +01003736 [P_BITMAP] = { 1, sizeof(struct p_header), receive_bitmap } ,
3737 [P_COMPRESSED_BITMAP] = { 1, sizeof(struct p_header), receive_bitmap } ,
3738 [P_UNPLUG_REMOTE] = { 0, sizeof(struct p_header), receive_UnplugRemote },
Philipp Reisner02918be2010-08-20 14:35:10 +02003739 [P_DATA_REQUEST] = { 0, sizeof(struct p_block_req), receive_DataRequest },
3740 [P_RS_DATA_REQUEST] = { 0, sizeof(struct p_block_req), receive_DataRequest },
Philipp Reisner257d0af2011-01-26 12:15:29 +01003741 [P_SYNC_PARAM] = { 1, sizeof(struct p_header), receive_SyncParam },
3742 [P_SYNC_PARAM89] = { 1, sizeof(struct p_header), receive_SyncParam },
Philipp Reisner02918be2010-08-20 14:35:10 +02003743 [P_PROTOCOL] = { 1, sizeof(struct p_protocol), receive_protocol },
3744 [P_UUIDS] = { 0, sizeof(struct p_uuids), receive_uuids },
3745 [P_SIZES] = { 0, sizeof(struct p_sizes), receive_sizes },
3746 [P_STATE] = { 0, sizeof(struct p_state), receive_state },
3747 [P_STATE_CHG_REQ] = { 0, sizeof(struct p_req_state), receive_req_state },
3748 [P_SYNC_UUID] = { 0, sizeof(struct p_rs_uuid), receive_sync_uuid },
3749 [P_OV_REQUEST] = { 0, sizeof(struct p_block_req), receive_DataRequest },
3750 [P_OV_REPLY] = { 1, sizeof(struct p_block_req), receive_DataRequest },
3751 [P_CSUM_RS_REQUEST] = { 1, sizeof(struct p_block_req), receive_DataRequest },
3752 [P_DELAY_PROBE] = { 0, sizeof(struct p_delay_probe93), receive_skip },
Philipp Reisner73a01a12010-10-27 14:33:00 +02003753 [P_OUT_OF_SYNC] = { 0, sizeof(struct p_block_desc), receive_out_of_sync },
Philipp Reisner02918be2010-08-20 14:35:10 +02003754 /* anything missing from this table is in
3755 * the asender_tbl, see get_asender_cmd */
3756 [P_MAX_CMD] = { 0, 0, NULL },
3757};
3758
3759/* All handler functions that expect a sub-header get that sub-heder in
Philipp Reisnere42325a2011-01-19 13:55:45 +01003760 mdev->tconn->data.rbuf.header.head.payload.
Philipp Reisner02918be2010-08-20 14:35:10 +02003761
Philipp Reisnere42325a2011-01-19 13:55:45 +01003762 Usually in mdev->tconn->data.rbuf.header.head the callback can find the usual
Philipp Reisner02918be2010-08-20 14:35:10 +02003763 p_header, but they may not rely on that. Since there is also p_header95 !
3764 */
Philipp Reisnerb411b362009-09-25 16:07:19 -07003765
3766static void drbdd(struct drbd_conf *mdev)
3767{
Philipp Reisnerc0129492011-01-19 16:58:16 +01003768 struct p_header *header = &mdev->tconn->data.rbuf.header;
Philipp Reisner02918be2010-08-20 14:35:10 +02003769 unsigned int packet_size;
Andreas Gruenbacherd8763022011-01-26 17:39:41 +01003770 enum drbd_packet cmd;
Philipp Reisner02918be2010-08-20 14:35:10 +02003771 size_t shs; /* sub header size */
3772 int rv;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003773
Philipp Reisnere6b3ea82011-01-19 14:02:01 +01003774 while (get_t_state(&mdev->tconn->receiver) == RUNNING) {
Philipp Reisnerbc31fe32011-02-07 11:14:38 +01003775 drbd_thread_current_set_cpu(mdev, &mdev->tconn->receiver);
Philipp Reisner02918be2010-08-20 14:35:10 +02003776 if (!drbd_recv_header(mdev, &cmd, &packet_size))
3777 goto err_out;
3778
3779 if (unlikely(cmd >= P_MAX_CMD || !drbd_cmd_handler[cmd].function)) {
3780 dev_err(DEV, "unknown packet type %d, l: %d!\n", cmd, packet_size);
3781 goto err_out;
Lars Ellenberg0b33a912009-11-16 15:58:04 +01003782 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07003783
Philipp Reisnerc0129492011-01-19 16:58:16 +01003784 shs = drbd_cmd_handler[cmd].pkt_size - sizeof(struct p_header);
Philipp Reisner02918be2010-08-20 14:35:10 +02003785 if (packet_size - shs > 0 && !drbd_cmd_handler[cmd].expect_payload) {
3786 dev_err(DEV, "No payload expected %s l:%d\n", cmdname(cmd), packet_size);
3787 goto err_out;
3788 }
3789
Lars Ellenbergc13f7e12010-10-29 23:32:01 +02003790 if (shs) {
Philipp Reisnerc0129492011-01-19 16:58:16 +01003791 rv = drbd_recv(mdev, &header->payload, shs);
Lars Ellenbergc13f7e12010-10-29 23:32:01 +02003792 if (unlikely(rv != shs)) {
Lars Ellenberg0ddc5542011-01-21 12:35:15 +01003793 if (!signal_pending(current))
3794 dev_warn(DEV, "short read while reading sub header: rv=%d\n", rv);
Lars Ellenbergc13f7e12010-10-29 23:32:01 +02003795 goto err_out;
3796 }
3797 }
3798
Philipp Reisner02918be2010-08-20 14:35:10 +02003799 rv = drbd_cmd_handler[cmd].function(mdev, cmd, packet_size - shs);
3800
3801 if (unlikely(!rv)) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07003802 dev_err(DEV, "error receiving %s, l: %d!\n",
Philipp Reisner02918be2010-08-20 14:35:10 +02003803 cmdname(cmd), packet_size);
3804 goto err_out;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003805 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07003806 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07003807
Philipp Reisner02918be2010-08-20 14:35:10 +02003808 if (0) {
3809 err_out:
3810 drbd_force_state(mdev, NS(conn, C_PROTOCOL_ERROR));
Philipp Reisnerb411b362009-09-25 16:07:19 -07003811 }
Lars Ellenberg856c50c2010-10-14 13:37:40 +02003812 /* If we leave here, we probably want to update at least the
3813 * "Connected" indicator on stable storage. Do so explicitly here. */
3814 drbd_md_sync(mdev);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003815}
3816
Philipp Reisner191d3cc2011-01-19 14:53:22 +01003817void drbd_flush_workqueue(struct drbd_tconn *tconn)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003818{
3819 struct drbd_wq_barrier barr;
3820
3821 barr.w.cb = w_prev_work_done;
3822 init_completion(&barr.done);
Philipp Reisner191d3cc2011-01-19 14:53:22 +01003823 drbd_queue_work(&tconn->data.work, &barr.w);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003824 wait_for_completion(&barr.done);
3825}
3826
3827static void drbd_disconnect(struct drbd_conf *mdev)
3828{
3829 enum drbd_fencing_p fp;
3830 union drbd_state os, ns;
3831 int rv = SS_UNKNOWN_ERROR;
3832 unsigned int i;
3833
3834 if (mdev->state.conn == C_STANDALONE)
3835 return;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003836
3837 /* asender does not clean up anything. it must not interfere, either */
Philipp Reisnere6b3ea82011-01-19 14:02:01 +01003838 drbd_thread_stop(&mdev->tconn->asender);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003839 drbd_free_sock(mdev);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003840
Philipp Reisner85719572010-07-21 10:20:17 +02003841 /* wait for current activity to cease. */
Philipp Reisner87eeee42011-01-19 14:16:30 +01003842 spin_lock_irq(&mdev->tconn->req_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003843 _drbd_wait_ee_list_empty(mdev, &mdev->active_ee);
3844 _drbd_wait_ee_list_empty(mdev, &mdev->sync_ee);
3845 _drbd_wait_ee_list_empty(mdev, &mdev->read_ee);
Philipp Reisner87eeee42011-01-19 14:16:30 +01003846 spin_unlock_irq(&mdev->tconn->req_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003847
3848 /* We do not have data structures that would allow us to
3849 * get the rs_pending_cnt down to 0 again.
3850 * * On C_SYNC_TARGET we do not have any data structures describing
3851 * the pending RSDataRequest's we have sent.
3852 * * On C_SYNC_SOURCE there is no data structure that tracks
3853 * the P_RS_DATA_REPLY blocks that we sent to the SyncTarget.
3854 * And no, it is not the sum of the reference counts in the
3855 * resync_LRU. The resync_LRU tracks the whole operation including
3856 * the disk-IO, while the rs_pending_cnt only tracks the blocks
3857 * on the fly. */
3858 drbd_rs_cancel_all(mdev);
3859 mdev->rs_total = 0;
3860 mdev->rs_failed = 0;
3861 atomic_set(&mdev->rs_pending_cnt, 0);
3862 wake_up(&mdev->misc_wait);
3863
Philipp Reisner7fde2be2011-03-01 11:08:28 +01003864 del_timer(&mdev->request_timer);
3865
Philipp Reisnerb411b362009-09-25 16:07:19 -07003866 /* make sure syncer is stopped and w_resume_next_sg queued */
3867 del_timer_sync(&mdev->resync_timer);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003868 resync_timer_fn((unsigned long)mdev);
3869
Philipp Reisnerb411b362009-09-25 16:07:19 -07003870 /* wait for all w_e_end_data_req, w_e_end_rsdata_req, w_send_barrier,
3871 * w_make_resync_request etc. which may still be on the worker queue
3872 * to be "canceled" */
Philipp Reisner191d3cc2011-01-19 14:53:22 +01003873 drbd_flush_workqueue(mdev->tconn);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003874
3875 /* This also does reclaim_net_ee(). If we do this too early, we might
3876 * miss some resync ee and pages.*/
3877 drbd_process_done_ee(mdev);
3878
3879 kfree(mdev->p_uuid);
3880 mdev->p_uuid = NULL;
3881
Philipp Reisnerfb22c402010-09-08 23:20:21 +02003882 if (!is_susp(mdev->state))
Philipp Reisnerb411b362009-09-25 16:07:19 -07003883 tl_clear(mdev);
3884
Philipp Reisnerb411b362009-09-25 16:07:19 -07003885 dev_info(DEV, "Connection closed\n");
3886
3887 drbd_md_sync(mdev);
3888
3889 fp = FP_DONT_CARE;
3890 if (get_ldev(mdev)) {
3891 fp = mdev->ldev->dc.fencing;
3892 put_ldev(mdev);
3893 }
3894
Philipp Reisner87f7be42010-06-11 13:56:33 +02003895 if (mdev->state.role == R_PRIMARY && fp >= FP_RESOURCE && mdev->state.pdsk >= D_UNKNOWN)
3896 drbd_try_outdate_peer_async(mdev);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003897
Philipp Reisner87eeee42011-01-19 14:16:30 +01003898 spin_lock_irq(&mdev->tconn->req_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003899 os = mdev->state;
3900 if (os.conn >= C_UNCONNECTED) {
3901 /* Do not restart in case we are C_DISCONNECTING */
3902 ns = os;
3903 ns.conn = C_UNCONNECTED;
3904 rv = _drbd_set_state(mdev, ns, CS_VERBOSE, NULL);
3905 }
Philipp Reisner87eeee42011-01-19 14:16:30 +01003906 spin_unlock_irq(&mdev->tconn->req_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003907
3908 if (os.conn == C_DISCONNECTING) {
Philipp Reisnerb2fb6dbe2011-01-19 13:48:44 +01003909 wait_event(mdev->tconn->net_cnt_wait, atomic_read(&mdev->tconn->net_cnt) == 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003910
Philipp Reisnera0638452011-01-19 14:31:32 +01003911 crypto_free_hash(mdev->tconn->cram_hmac_tfm);
3912 mdev->tconn->cram_hmac_tfm = NULL;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003913
Philipp Reisner89e58e72011-01-19 13:12:45 +01003914 kfree(mdev->tconn->net_conf);
3915 mdev->tconn->net_conf = NULL;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003916 drbd_request_state(mdev, NS(conn, C_STANDALONE));
3917 }
3918
Lars Ellenberg20ceb2b2011-01-21 10:56:44 +01003919 /* serialize with bitmap writeout triggered by the state change,
3920 * if any. */
3921 wait_event(mdev->misc_wait, !test_bit(BITMAP_IO, &mdev->flags));
3922
Philipp Reisnerb411b362009-09-25 16:07:19 -07003923 /* tcp_close and release of sendpage pages can be deferred. I don't
3924 * want to use SO_LINGER, because apparently it can be deferred for
3925 * more than 20 seconds (longest time I checked).
3926 *
3927 * Actually we don't care for exactly when the network stack does its
3928 * put_page(), but release our reference on these pages right here.
3929 */
3930 i = drbd_release_ee(mdev, &mdev->net_ee);
3931 if (i)
3932 dev_info(DEV, "net_ee not empty, killed %u entries\n", i);
Lars Ellenberg435f0742010-09-06 12:30:25 +02003933 i = atomic_read(&mdev->pp_in_use_by_net);
3934 if (i)
3935 dev_info(DEV, "pp_in_use_by_net = %d, expected 0\n", i);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003936 i = atomic_read(&mdev->pp_in_use);
3937 if (i)
Lars Ellenberg45bb9122010-05-14 17:10:48 +02003938 dev_info(DEV, "pp_in_use = %d, expected 0\n", i);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003939
3940 D_ASSERT(list_empty(&mdev->read_ee));
3941 D_ASSERT(list_empty(&mdev->active_ee));
3942 D_ASSERT(list_empty(&mdev->sync_ee));
3943 D_ASSERT(list_empty(&mdev->done_ee));
3944
3945 /* ok, no more ee's on the fly, it is safe to reset the epoch_size */
3946 atomic_set(&mdev->current_epoch->epoch_size, 0);
3947 D_ASSERT(list_empty(&mdev->current_epoch->list));
3948}
3949
3950/*
3951 * We support PRO_VERSION_MIN to PRO_VERSION_MAX. The protocol version
3952 * we can agree on is stored in agreed_pro_version.
3953 *
3954 * feature flags and the reserved array should be enough room for future
3955 * enhancements of the handshake protocol, and possible plugins...
3956 *
3957 * for now, they are expected to be zero, but ignored.
3958 */
3959static int drbd_send_handshake(struct drbd_conf *mdev)
3960{
Philipp Reisnere6b3ea82011-01-19 14:02:01 +01003961 /* ASSERT current == mdev->tconn->receiver ... */
Philipp Reisnere42325a2011-01-19 13:55:45 +01003962 struct p_handshake *p = &mdev->tconn->data.sbuf.handshake;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003963 int ok;
3964
Philipp Reisnere42325a2011-01-19 13:55:45 +01003965 if (mutex_lock_interruptible(&mdev->tconn->data.mutex)) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07003966 dev_err(DEV, "interrupted during initial handshake\n");
3967 return 0; /* interrupted. not ok. */
3968 }
3969
Philipp Reisnere42325a2011-01-19 13:55:45 +01003970 if (mdev->tconn->data.socket == NULL) {
3971 mutex_unlock(&mdev->tconn->data.mutex);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003972 return 0;
3973 }
3974
3975 memset(p, 0, sizeof(*p));
3976 p->protocol_min = cpu_to_be32(PRO_VERSION_MIN);
3977 p->protocol_max = cpu_to_be32(PRO_VERSION_MAX);
Philipp Reisnerc0129492011-01-19 16:58:16 +01003978 ok = _drbd_send_cmd(mdev, mdev->tconn->data.socket, P_HAND_SHAKE,
3979 &p->head, sizeof(*p), 0 );
Philipp Reisnere42325a2011-01-19 13:55:45 +01003980 mutex_unlock(&mdev->tconn->data.mutex);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003981 return ok;
3982}
3983
3984/*
3985 * return values:
3986 * 1 yes, we have a valid connection
3987 * 0 oops, did not work out, please try again
3988 * -1 peer talks different language,
3989 * no point in trying again, please go standalone.
3990 */
3991static int drbd_do_handshake(struct drbd_conf *mdev)
3992{
Philipp Reisnere6b3ea82011-01-19 14:02:01 +01003993 /* ASSERT current == mdev->tconn->receiver ... */
Philipp Reisnere42325a2011-01-19 13:55:45 +01003994 struct p_handshake *p = &mdev->tconn->data.rbuf.handshake;
Philipp Reisner02918be2010-08-20 14:35:10 +02003995 const int expect = sizeof(struct p_handshake) - sizeof(struct p_header80);
3996 unsigned int length;
Andreas Gruenbacherd8763022011-01-26 17:39:41 +01003997 enum drbd_packet cmd;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003998 int rv;
3999
4000 rv = drbd_send_handshake(mdev);
4001 if (!rv)
4002 return 0;
4003
Philipp Reisner02918be2010-08-20 14:35:10 +02004004 rv = drbd_recv_header(mdev, &cmd, &length);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004005 if (!rv)
4006 return 0;
4007
Philipp Reisner02918be2010-08-20 14:35:10 +02004008 if (cmd != P_HAND_SHAKE) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07004009 dev_err(DEV, "expected HandShake packet, received: %s (0x%04x)\n",
Philipp Reisner02918be2010-08-20 14:35:10 +02004010 cmdname(cmd), cmd);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004011 return -1;
4012 }
4013
Philipp Reisner02918be2010-08-20 14:35:10 +02004014 if (length != expect) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07004015 dev_err(DEV, "expected HandShake length: %u, received: %u\n",
Philipp Reisner02918be2010-08-20 14:35:10 +02004016 expect, length);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004017 return -1;
4018 }
4019
4020 rv = drbd_recv(mdev, &p->head.payload, expect);
4021
4022 if (rv != expect) {
Lars Ellenberg0ddc5542011-01-21 12:35:15 +01004023 if (!signal_pending(current))
4024 dev_warn(DEV, "short read receiving handshake packet: l=%u\n", rv);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004025 return 0;
4026 }
4027
Philipp Reisnerb411b362009-09-25 16:07:19 -07004028 p->protocol_min = be32_to_cpu(p->protocol_min);
4029 p->protocol_max = be32_to_cpu(p->protocol_max);
4030 if (p->protocol_max == 0)
4031 p->protocol_max = p->protocol_min;
4032
4033 if (PRO_VERSION_MAX < p->protocol_min ||
4034 PRO_VERSION_MIN > p->protocol_max)
4035 goto incompat;
4036
Philipp Reisner31890f42011-01-19 14:12:51 +01004037 mdev->tconn->agreed_pro_version = min_t(int, PRO_VERSION_MAX, p->protocol_max);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004038
4039 dev_info(DEV, "Handshake successful: "
Philipp Reisner31890f42011-01-19 14:12:51 +01004040 "Agreed network protocol version %d\n", mdev->tconn->agreed_pro_version);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004041
4042 return 1;
4043
4044 incompat:
4045 dev_err(DEV, "incompatible DRBD dialects: "
4046 "I support %d-%d, peer supports %d-%d\n",
4047 PRO_VERSION_MIN, PRO_VERSION_MAX,
4048 p->protocol_min, p->protocol_max);
4049 return -1;
4050}
4051
4052#if !defined(CONFIG_CRYPTO_HMAC) && !defined(CONFIG_CRYPTO_HMAC_MODULE)
4053static int drbd_do_auth(struct drbd_conf *mdev)
4054{
4055 dev_err(DEV, "This kernel was build without CONFIG_CRYPTO_HMAC.\n");
4056 dev_err(DEV, "You need to disable 'cram-hmac-alg' in drbd.conf.\n");
Johannes Thomab10d96c2010-01-07 16:02:50 +01004057 return -1;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004058}
4059#else
4060#define CHALLENGE_LEN 64
Johannes Thomab10d96c2010-01-07 16:02:50 +01004061
4062/* Return value:
4063 1 - auth succeeded,
4064 0 - failed, try again (network error),
4065 -1 - auth failed, don't try again.
4066*/
4067
Philipp Reisnerb411b362009-09-25 16:07:19 -07004068static int drbd_do_auth(struct drbd_conf *mdev)
4069{
4070 char my_challenge[CHALLENGE_LEN]; /* 64 Bytes... */
4071 struct scatterlist sg;
4072 char *response = NULL;
4073 char *right_response = NULL;
4074 char *peers_ch = NULL;
Philipp Reisner89e58e72011-01-19 13:12:45 +01004075 unsigned int key_len = strlen(mdev->tconn->net_conf->shared_secret);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004076 unsigned int resp_size;
4077 struct hash_desc desc;
Andreas Gruenbacherd8763022011-01-26 17:39:41 +01004078 enum drbd_packet cmd;
Philipp Reisner02918be2010-08-20 14:35:10 +02004079 unsigned int length;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004080 int rv;
4081
Philipp Reisnera0638452011-01-19 14:31:32 +01004082 desc.tfm = mdev->tconn->cram_hmac_tfm;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004083 desc.flags = 0;
4084
Philipp Reisnera0638452011-01-19 14:31:32 +01004085 rv = crypto_hash_setkey(mdev->tconn->cram_hmac_tfm,
Philipp Reisner89e58e72011-01-19 13:12:45 +01004086 (u8 *)mdev->tconn->net_conf->shared_secret, key_len);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004087 if (rv) {
4088 dev_err(DEV, "crypto_hash_setkey() failed with %d\n", rv);
Johannes Thomab10d96c2010-01-07 16:02:50 +01004089 rv = -1;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004090 goto fail;
4091 }
4092
4093 get_random_bytes(my_challenge, CHALLENGE_LEN);
4094
4095 rv = drbd_send_cmd2(mdev, P_AUTH_CHALLENGE, my_challenge, CHALLENGE_LEN);
4096 if (!rv)
4097 goto fail;
4098
Philipp Reisner02918be2010-08-20 14:35:10 +02004099 rv = drbd_recv_header(mdev, &cmd, &length);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004100 if (!rv)
4101 goto fail;
4102
Philipp Reisner02918be2010-08-20 14:35:10 +02004103 if (cmd != P_AUTH_CHALLENGE) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07004104 dev_err(DEV, "expected AuthChallenge packet, received: %s (0x%04x)\n",
Philipp Reisner02918be2010-08-20 14:35:10 +02004105 cmdname(cmd), cmd);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004106 rv = 0;
4107 goto fail;
4108 }
4109
Philipp Reisner02918be2010-08-20 14:35:10 +02004110 if (length > CHALLENGE_LEN * 2) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07004111 dev_err(DEV, "expected AuthChallenge payload too big.\n");
Johannes Thomab10d96c2010-01-07 16:02:50 +01004112 rv = -1;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004113 goto fail;
4114 }
4115
Philipp Reisner02918be2010-08-20 14:35:10 +02004116 peers_ch = kmalloc(length, GFP_NOIO);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004117 if (peers_ch == NULL) {
4118 dev_err(DEV, "kmalloc of peers_ch failed\n");
Johannes Thomab10d96c2010-01-07 16:02:50 +01004119 rv = -1;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004120 goto fail;
4121 }
4122
Philipp Reisner02918be2010-08-20 14:35:10 +02004123 rv = drbd_recv(mdev, peers_ch, length);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004124
Philipp Reisner02918be2010-08-20 14:35:10 +02004125 if (rv != length) {
Lars Ellenberg0ddc5542011-01-21 12:35:15 +01004126 if (!signal_pending(current))
4127 dev_warn(DEV, "short read AuthChallenge: l=%u\n", rv);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004128 rv = 0;
4129 goto fail;
4130 }
4131
Philipp Reisnera0638452011-01-19 14:31:32 +01004132 resp_size = crypto_hash_digestsize(mdev->tconn->cram_hmac_tfm);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004133 response = kmalloc(resp_size, GFP_NOIO);
4134 if (response == NULL) {
4135 dev_err(DEV, "kmalloc of response failed\n");
Johannes Thomab10d96c2010-01-07 16:02:50 +01004136 rv = -1;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004137 goto fail;
4138 }
4139
4140 sg_init_table(&sg, 1);
Philipp Reisner02918be2010-08-20 14:35:10 +02004141 sg_set_buf(&sg, peers_ch, length);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004142
4143 rv = crypto_hash_digest(&desc, &sg, sg.length, response);
4144 if (rv) {
4145 dev_err(DEV, "crypto_hash_digest() failed with %d\n", rv);
Johannes Thomab10d96c2010-01-07 16:02:50 +01004146 rv = -1;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004147 goto fail;
4148 }
4149
4150 rv = drbd_send_cmd2(mdev, P_AUTH_RESPONSE, response, resp_size);
4151 if (!rv)
4152 goto fail;
4153
Philipp Reisner02918be2010-08-20 14:35:10 +02004154 rv = drbd_recv_header(mdev, &cmd, &length);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004155 if (!rv)
4156 goto fail;
4157
Philipp Reisner02918be2010-08-20 14:35:10 +02004158 if (cmd != P_AUTH_RESPONSE) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07004159 dev_err(DEV, "expected AuthResponse packet, received: %s (0x%04x)\n",
Philipp Reisner02918be2010-08-20 14:35:10 +02004160 cmdname(cmd), cmd);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004161 rv = 0;
4162 goto fail;
4163 }
4164
Philipp Reisner02918be2010-08-20 14:35:10 +02004165 if (length != resp_size) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07004166 dev_err(DEV, "expected AuthResponse payload of wrong size\n");
4167 rv = 0;
4168 goto fail;
4169 }
4170
4171 rv = drbd_recv(mdev, response , resp_size);
4172
4173 if (rv != resp_size) {
Lars Ellenberg0ddc5542011-01-21 12:35:15 +01004174 if (!signal_pending(current))
4175 dev_warn(DEV, "short read receiving AuthResponse: l=%u\n", rv);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004176 rv = 0;
4177 goto fail;
4178 }
4179
4180 right_response = kmalloc(resp_size, GFP_NOIO);
Julia Lawall2d1ee872009-12-27 22:27:11 +01004181 if (right_response == NULL) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07004182 dev_err(DEV, "kmalloc of right_response failed\n");
Johannes Thomab10d96c2010-01-07 16:02:50 +01004183 rv = -1;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004184 goto fail;
4185 }
4186
4187 sg_set_buf(&sg, my_challenge, CHALLENGE_LEN);
4188
4189 rv = crypto_hash_digest(&desc, &sg, sg.length, right_response);
4190 if (rv) {
4191 dev_err(DEV, "crypto_hash_digest() failed with %d\n", rv);
Johannes Thomab10d96c2010-01-07 16:02:50 +01004192 rv = -1;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004193 goto fail;
4194 }
4195
4196 rv = !memcmp(response, right_response, resp_size);
4197
4198 if (rv)
4199 dev_info(DEV, "Peer authenticated using %d bytes of '%s' HMAC\n",
Philipp Reisner89e58e72011-01-19 13:12:45 +01004200 resp_size, mdev->tconn->net_conf->cram_hmac_alg);
Johannes Thomab10d96c2010-01-07 16:02:50 +01004201 else
4202 rv = -1;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004203
4204 fail:
4205 kfree(peers_ch);
4206 kfree(response);
4207 kfree(right_response);
4208
4209 return rv;
4210}
4211#endif
4212
4213int drbdd_init(struct drbd_thread *thi)
4214{
4215 struct drbd_conf *mdev = thi->mdev;
4216 unsigned int minor = mdev_to_minor(mdev);
4217 int h;
4218
4219 sprintf(current->comm, "drbd%d_receiver", minor);
4220
4221 dev_info(DEV, "receiver (re)started\n");
4222
4223 do {
4224 h = drbd_connect(mdev);
4225 if (h == 0) {
4226 drbd_disconnect(mdev);
Philipp Reisner20ee6392011-01-18 15:28:59 +01004227 schedule_timeout_interruptible(HZ);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004228 }
4229 if (h == -1) {
4230 dev_warn(DEV, "Discarding network configuration.\n");
4231 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
4232 }
4233 } while (h == 0);
4234
4235 if (h > 0) {
Philipp Reisnerb2fb6dbe2011-01-19 13:48:44 +01004236 if (get_net_conf(mdev->tconn)) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07004237 drbdd(mdev);
Philipp Reisnerb2fb6dbe2011-01-19 13:48:44 +01004238 put_net_conf(mdev->tconn);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004239 }
4240 }
4241
4242 drbd_disconnect(mdev);
4243
4244 dev_info(DEV, "receiver terminated\n");
4245 return 0;
4246}
4247
4248/* ********* acknowledge sender ******** */
4249
Andreas Gruenbacherd8763022011-01-26 17:39:41 +01004250static int got_RqSReply(struct drbd_conf *mdev, enum drbd_packet cmd)
Philipp Reisnerb411b362009-09-25 16:07:19 -07004251{
Philipp Reisner257d0af2011-01-26 12:15:29 +01004252 struct p_req_state_reply *p = &mdev->tconn->meta.rbuf.req_state_reply;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004253
4254 int retcode = be32_to_cpu(p->retcode);
4255
4256 if (retcode >= SS_SUCCESS) {
4257 set_bit(CL_ST_CHG_SUCCESS, &mdev->flags);
4258 } else {
4259 set_bit(CL_ST_CHG_FAIL, &mdev->flags);
4260 dev_err(DEV, "Requested state change failed by peer: %s (%d)\n",
4261 drbd_set_st_err_str(retcode), retcode);
4262 }
4263 wake_up(&mdev->state_wait);
4264
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01004265 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004266}
4267
Andreas Gruenbacherd8763022011-01-26 17:39:41 +01004268static int got_Ping(struct drbd_conf *mdev, enum drbd_packet cmd)
Philipp Reisnerb411b362009-09-25 16:07:19 -07004269{
4270 return drbd_send_ping_ack(mdev);
4271
4272}
4273
Andreas Gruenbacherd8763022011-01-26 17:39:41 +01004274static int got_PingAck(struct drbd_conf *mdev, enum drbd_packet cmd)
Philipp Reisnerb411b362009-09-25 16:07:19 -07004275{
4276 /* restore idle timeout */
Philipp Reisnere42325a2011-01-19 13:55:45 +01004277 mdev->tconn->meta.socket->sk->sk_rcvtimeo = mdev->tconn->net_conf->ping_int*HZ;
Philipp Reisner309d1602010-03-02 15:03:44 +01004278 if (!test_and_set_bit(GOT_PING_ACK, &mdev->flags))
4279 wake_up(&mdev->misc_wait);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004280
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01004281 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004282}
4283
Andreas Gruenbacherd8763022011-01-26 17:39:41 +01004284static int got_IsInSync(struct drbd_conf *mdev, enum drbd_packet cmd)
Philipp Reisnerb411b362009-09-25 16:07:19 -07004285{
Philipp Reisner257d0af2011-01-26 12:15:29 +01004286 struct p_block_ack *p = &mdev->tconn->meta.rbuf.block_ack;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004287 sector_t sector = be64_to_cpu(p->sector);
4288 int blksize = be32_to_cpu(p->blksize);
4289
Philipp Reisner31890f42011-01-19 14:12:51 +01004290 D_ASSERT(mdev->tconn->agreed_pro_version >= 89);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004291
4292 update_peer_seq(mdev, be32_to_cpu(p->seq_num));
4293
Lars Ellenberg1d53f092010-09-05 01:13:24 +02004294 if (get_ldev(mdev)) {
4295 drbd_rs_complete_io(mdev, sector);
4296 drbd_set_in_sync(mdev, sector, blksize);
4297 /* rs_same_csums is supposed to count in units of BM_BLOCK_SIZE */
4298 mdev->rs_same_csum += (blksize >> BM_BLOCK_SHIFT);
4299 put_ldev(mdev);
4300 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07004301 dec_rs_pending(mdev);
Philipp Reisner778f2712010-07-06 11:14:00 +02004302 atomic_add(blksize >> 9, &mdev->rs_sect_in);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004303
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01004304 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004305}
4306
Andreas Gruenbacherbc9c5c42011-01-21 18:00:55 +01004307static int
4308validate_req_change_req_state(struct drbd_conf *mdev, u64 id, sector_t sector,
4309 struct rb_root *root, const char *func,
4310 enum drbd_req_event what, bool missing_ok)
Philipp Reisnerb411b362009-09-25 16:07:19 -07004311{
4312 struct drbd_request *req;
4313 struct bio_and_error m;
4314
Philipp Reisner87eeee42011-01-19 14:16:30 +01004315 spin_lock_irq(&mdev->tconn->req_lock);
Andreas Gruenbacherbc9c5c42011-01-21 18:00:55 +01004316 req = find_request(mdev, root, id, sector, missing_ok, func);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004317 if (unlikely(!req)) {
Philipp Reisner87eeee42011-01-19 14:16:30 +01004318 spin_unlock_irq(&mdev->tconn->req_lock);
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01004319 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004320 }
4321 __req_mod(req, what, &m);
Philipp Reisner87eeee42011-01-19 14:16:30 +01004322 spin_unlock_irq(&mdev->tconn->req_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004323
4324 if (m.bio)
4325 complete_master_bio(mdev, &m);
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01004326 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004327}
4328
Andreas Gruenbacherd8763022011-01-26 17:39:41 +01004329static int got_BlockAck(struct drbd_conf *mdev, enum drbd_packet cmd)
Philipp Reisnerb411b362009-09-25 16:07:19 -07004330{
Philipp Reisner257d0af2011-01-26 12:15:29 +01004331 struct p_block_ack *p = &mdev->tconn->meta.rbuf.block_ack;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004332 sector_t sector = be64_to_cpu(p->sector);
4333 int blksize = be32_to_cpu(p->blksize);
4334 enum drbd_req_event what;
4335
4336 update_peer_seq(mdev, be32_to_cpu(p->seq_num));
4337
Andreas Gruenbacher579b57e2011-01-13 18:40:57 +01004338 if (p->block_id == ID_SYNCER) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07004339 drbd_set_in_sync(mdev, sector, blksize);
4340 dec_rs_pending(mdev);
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01004341 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004342 }
Philipp Reisner257d0af2011-01-26 12:15:29 +01004343 switch (cmd) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07004344 case P_RS_WRITE_ACK:
Philipp Reisner89e58e72011-01-19 13:12:45 +01004345 D_ASSERT(mdev->tconn->net_conf->wire_protocol == DRBD_PROT_C);
Andreas Gruenbacher8554df12011-01-25 15:37:43 +01004346 what = WRITE_ACKED_BY_PEER_AND_SIS;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004347 break;
4348 case P_WRITE_ACK:
Philipp Reisner89e58e72011-01-19 13:12:45 +01004349 D_ASSERT(mdev->tconn->net_conf->wire_protocol == DRBD_PROT_C);
Andreas Gruenbacher8554df12011-01-25 15:37:43 +01004350 what = WRITE_ACKED_BY_PEER;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004351 break;
4352 case P_RECV_ACK:
Philipp Reisner89e58e72011-01-19 13:12:45 +01004353 D_ASSERT(mdev->tconn->net_conf->wire_protocol == DRBD_PROT_B);
Andreas Gruenbacher8554df12011-01-25 15:37:43 +01004354 what = RECV_ACKED_BY_PEER;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004355 break;
4356 case P_DISCARD_ACK:
Philipp Reisner89e58e72011-01-19 13:12:45 +01004357 D_ASSERT(mdev->tconn->net_conf->wire_protocol == DRBD_PROT_C);
Andreas Gruenbacher8554df12011-01-25 15:37:43 +01004358 what = CONFLICT_DISCARDED_BY_PEER;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004359 break;
4360 default:
4361 D_ASSERT(0);
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01004362 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004363 }
4364
4365 return validate_req_change_req_state(mdev, p->block_id, sector,
Andreas Gruenbacherbc9c5c42011-01-21 18:00:55 +01004366 &mdev->write_requests, __func__,
4367 what, false);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004368}
4369
Andreas Gruenbacherd8763022011-01-26 17:39:41 +01004370static int got_NegAck(struct drbd_conf *mdev, enum drbd_packet cmd)
Philipp Reisnerb411b362009-09-25 16:07:19 -07004371{
Philipp Reisner257d0af2011-01-26 12:15:29 +01004372 struct p_block_ack *p = &mdev->tconn->meta.rbuf.block_ack;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004373 sector_t sector = be64_to_cpu(p->sector);
Philipp Reisner2deb8332011-01-17 18:39:18 +01004374 int size = be32_to_cpu(p->blksize);
Philipp Reisner89e58e72011-01-19 13:12:45 +01004375 bool missing_ok = mdev->tconn->net_conf->wire_protocol == DRBD_PROT_A ||
4376 mdev->tconn->net_conf->wire_protocol == DRBD_PROT_B;
Andreas Gruenbacherc3afd8f2011-01-20 22:25:40 +01004377 bool found;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004378
4379 update_peer_seq(mdev, be32_to_cpu(p->seq_num));
4380
Andreas Gruenbacher579b57e2011-01-13 18:40:57 +01004381 if (p->block_id == ID_SYNCER) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07004382 dec_rs_pending(mdev);
4383 drbd_rs_failed_io(mdev, sector, size);
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01004384 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004385 }
Philipp Reisner2deb8332011-01-17 18:39:18 +01004386
Andreas Gruenbacherc3afd8f2011-01-20 22:25:40 +01004387 found = validate_req_change_req_state(mdev, p->block_id, sector,
Andreas Gruenbacherbc9c5c42011-01-21 18:00:55 +01004388 &mdev->write_requests, __func__,
Andreas Gruenbacher8554df12011-01-25 15:37:43 +01004389 NEG_ACKED, missing_ok);
Andreas Gruenbacherc3afd8f2011-01-20 22:25:40 +01004390 if (!found) {
4391 /* Protocol A has no P_WRITE_ACKs, but has P_NEG_ACKs.
4392 The master bio might already be completed, therefore the
4393 request is no longer in the collision hash. */
4394 /* In Protocol B we might already have got a P_RECV_ACK
4395 but then get a P_NEG_ACK afterwards. */
4396 if (!missing_ok)
Philipp Reisner2deb8332011-01-17 18:39:18 +01004397 return false;
Andreas Gruenbacherc3afd8f2011-01-20 22:25:40 +01004398 drbd_set_out_of_sync(mdev, sector, size);
Philipp Reisner2deb8332011-01-17 18:39:18 +01004399 }
Philipp Reisner2deb8332011-01-17 18:39:18 +01004400 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004401}
4402
Andreas Gruenbacherd8763022011-01-26 17:39:41 +01004403static int got_NegDReply(struct drbd_conf *mdev, enum drbd_packet cmd)
Philipp Reisnerb411b362009-09-25 16:07:19 -07004404{
Philipp Reisner257d0af2011-01-26 12:15:29 +01004405 struct p_block_ack *p = &mdev->tconn->meta.rbuf.block_ack;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004406 sector_t sector = be64_to_cpu(p->sector);
4407
4408 update_peer_seq(mdev, be32_to_cpu(p->seq_num));
4409 dev_err(DEV, "Got NegDReply; Sector %llus, len %u; Fail original request.\n",
4410 (unsigned long long)sector, be32_to_cpu(p->blksize));
4411
4412 return validate_req_change_req_state(mdev, p->block_id, sector,
Andreas Gruenbacherbc9c5c42011-01-21 18:00:55 +01004413 &mdev->read_requests, __func__,
Andreas Gruenbacher8554df12011-01-25 15:37:43 +01004414 NEG_ACKED, false);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004415}
4416
Andreas Gruenbacherd8763022011-01-26 17:39:41 +01004417static int got_NegRSDReply(struct drbd_conf *mdev, enum drbd_packet cmd)
Philipp Reisnerb411b362009-09-25 16:07:19 -07004418{
4419 sector_t sector;
4420 int size;
Philipp Reisner257d0af2011-01-26 12:15:29 +01004421 struct p_block_ack *p = &mdev->tconn->meta.rbuf.block_ack;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004422
4423 sector = be64_to_cpu(p->sector);
4424 size = be32_to_cpu(p->blksize);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004425
4426 update_peer_seq(mdev, be32_to_cpu(p->seq_num));
4427
4428 dec_rs_pending(mdev);
4429
4430 if (get_ldev_if_state(mdev, D_FAILED)) {
4431 drbd_rs_complete_io(mdev, sector);
Philipp Reisner257d0af2011-01-26 12:15:29 +01004432 switch (cmd) {
Philipp Reisnerd612d302010-12-27 10:53:28 +01004433 case P_NEG_RS_DREPLY:
4434 drbd_rs_failed_io(mdev, sector, size);
4435 case P_RS_CANCEL:
4436 break;
4437 default:
4438 D_ASSERT(0);
4439 put_ldev(mdev);
4440 return false;
4441 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07004442 put_ldev(mdev);
4443 }
4444
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01004445 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004446}
4447
Andreas Gruenbacherd8763022011-01-26 17:39:41 +01004448static int got_BarrierAck(struct drbd_conf *mdev, enum drbd_packet cmd)
Philipp Reisnerb411b362009-09-25 16:07:19 -07004449{
Philipp Reisner257d0af2011-01-26 12:15:29 +01004450 struct p_barrier_ack *p = &mdev->tconn->meta.rbuf.barrier_ack;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004451
4452 tl_release(mdev, p->barrier, be32_to_cpu(p->set_size));
4453
Philipp Reisnerc4752ef2010-10-27 17:32:36 +02004454 if (mdev->state.conn == C_AHEAD &&
4455 atomic_read(&mdev->ap_in_flight) == 0 &&
Philipp Reisner370a43e2011-01-14 16:03:11 +01004456 !test_and_set_bit(AHEAD_TO_SYNC_SOURCE, &mdev->current_epoch->flags)) {
4457 mdev->start_resync_timer.expires = jiffies + HZ;
4458 add_timer(&mdev->start_resync_timer);
Philipp Reisnerc4752ef2010-10-27 17:32:36 +02004459 }
4460
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01004461 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004462}
4463
Andreas Gruenbacherd8763022011-01-26 17:39:41 +01004464static int got_OVResult(struct drbd_conf *mdev, enum drbd_packet cmd)
Philipp Reisnerb411b362009-09-25 16:07:19 -07004465{
Philipp Reisner257d0af2011-01-26 12:15:29 +01004466 struct p_block_ack *p = &mdev->tconn->meta.rbuf.block_ack;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004467 struct drbd_work *w;
4468 sector_t sector;
4469 int size;
4470
4471 sector = be64_to_cpu(p->sector);
4472 size = be32_to_cpu(p->blksize);
4473
4474 update_peer_seq(mdev, be32_to_cpu(p->seq_num));
4475
4476 if (be64_to_cpu(p->block_id) == ID_OUT_OF_SYNC)
4477 drbd_ov_oos_found(mdev, sector, size);
4478 else
4479 ov_oos_print(mdev);
4480
Lars Ellenberg1d53f092010-09-05 01:13:24 +02004481 if (!get_ldev(mdev))
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01004482 return true;
Lars Ellenberg1d53f092010-09-05 01:13:24 +02004483
Philipp Reisnerb411b362009-09-25 16:07:19 -07004484 drbd_rs_complete_io(mdev, sector);
4485 dec_rs_pending(mdev);
4486
Lars Ellenbergea5442a2010-11-05 09:48:01 +01004487 --mdev->ov_left;
4488
4489 /* let's advance progress step marks only for every other megabyte */
4490 if ((mdev->ov_left & 0x200) == 0x200)
4491 drbd_advance_rs_marks(mdev, mdev->ov_left);
4492
4493 if (mdev->ov_left == 0) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07004494 w = kmalloc(sizeof(*w), GFP_NOIO);
4495 if (w) {
4496 w->cb = w_ov_finished;
Philipp Reisnere42325a2011-01-19 13:55:45 +01004497 drbd_queue_work_front(&mdev->tconn->data.work, w);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004498 } else {
4499 dev_err(DEV, "kmalloc(w) failed.");
4500 ov_oos_print(mdev);
4501 drbd_resync_finished(mdev);
4502 }
4503 }
Lars Ellenberg1d53f092010-09-05 01:13:24 +02004504 put_ldev(mdev);
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01004505 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004506}
4507
Andreas Gruenbacherd8763022011-01-26 17:39:41 +01004508static int got_skip(struct drbd_conf *mdev, enum drbd_packet cmd)
Philipp Reisner0ced55a2010-04-30 15:26:20 +02004509{
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01004510 return true;
Philipp Reisner0ced55a2010-04-30 15:26:20 +02004511}
4512
Philipp Reisnerb411b362009-09-25 16:07:19 -07004513struct asender_cmd {
4514 size_t pkt_size;
Andreas Gruenbacherd8763022011-01-26 17:39:41 +01004515 int (*process)(struct drbd_conf *mdev, enum drbd_packet cmd);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004516};
4517
4518static struct asender_cmd *get_asender_cmd(int cmd)
4519{
4520 static struct asender_cmd asender_tbl[] = {
4521 /* anything missing from this table is in
4522 * the drbd_cmd_handler (drbd_default_handler) table,
4523 * see the beginning of drbdd() */
Philipp Reisner257d0af2011-01-26 12:15:29 +01004524 [P_PING] = { sizeof(struct p_header), got_Ping },
4525 [P_PING_ACK] = { sizeof(struct p_header), got_PingAck },
Philipp Reisnerb411b362009-09-25 16:07:19 -07004526 [P_RECV_ACK] = { sizeof(struct p_block_ack), got_BlockAck },
4527 [P_WRITE_ACK] = { sizeof(struct p_block_ack), got_BlockAck },
4528 [P_RS_WRITE_ACK] = { sizeof(struct p_block_ack), got_BlockAck },
4529 [P_DISCARD_ACK] = { sizeof(struct p_block_ack), got_BlockAck },
4530 [P_NEG_ACK] = { sizeof(struct p_block_ack), got_NegAck },
4531 [P_NEG_DREPLY] = { sizeof(struct p_block_ack), got_NegDReply },
4532 [P_NEG_RS_DREPLY] = { sizeof(struct p_block_ack), got_NegRSDReply},
4533 [P_OV_RESULT] = { sizeof(struct p_block_ack), got_OVResult },
4534 [P_BARRIER_ACK] = { sizeof(struct p_barrier_ack), got_BarrierAck },
4535 [P_STATE_CHG_REPLY] = { sizeof(struct p_req_state_reply), got_RqSReply },
4536 [P_RS_IS_IN_SYNC] = { sizeof(struct p_block_ack), got_IsInSync },
Philipp Reisner02918be2010-08-20 14:35:10 +02004537 [P_DELAY_PROBE] = { sizeof(struct p_delay_probe93), got_skip },
Philipp Reisnerd612d302010-12-27 10:53:28 +01004538 [P_RS_CANCEL] = { sizeof(struct p_block_ack), got_NegRSDReply},
Philipp Reisnerb411b362009-09-25 16:07:19 -07004539 [P_MAX_CMD] = { 0, NULL },
4540 };
4541 if (cmd > P_MAX_CMD || asender_tbl[cmd].process == NULL)
4542 return NULL;
4543 return &asender_tbl[cmd];
4544}
4545
4546int drbd_asender(struct drbd_thread *thi)
4547{
4548 struct drbd_conf *mdev = thi->mdev;
Philipp Reisner257d0af2011-01-26 12:15:29 +01004549 struct p_header *h = &mdev->tconn->meta.rbuf.header;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004550 struct asender_cmd *cmd = NULL;
4551
Philipp Reisner257d0af2011-01-26 12:15:29 +01004552 int rv;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004553 void *buf = h;
4554 int received = 0;
Philipp Reisner257d0af2011-01-26 12:15:29 +01004555 int expect = sizeof(struct p_header);
Lars Ellenbergf36af182011-03-09 22:44:55 +01004556 int ping_timeout_active = 0;
Philipp Reisner257d0af2011-01-26 12:15:29 +01004557 int empty, pkt_size;
Andreas Gruenbacherd8763022011-01-26 17:39:41 +01004558 enum drbd_packet cmd_nr;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004559
4560 sprintf(current->comm, "drbd%d_asender", mdev_to_minor(mdev));
4561
4562 current->policy = SCHED_RR; /* Make this a realtime task! */
4563 current->rt_priority = 2; /* more important than all other tasks */
4564
Andreas Gruenbachere77a0a52011-01-25 15:43:39 +01004565 while (get_t_state(thi) == RUNNING) {
Philipp Reisnerbc31fe32011-02-07 11:14:38 +01004566 drbd_thread_current_set_cpu(mdev, thi);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004567 if (test_and_clear_bit(SEND_PING, &mdev->flags)) {
Andreas Gruenbacher841ce242010-12-15 19:31:20 +01004568 if (!drbd_send_ping(mdev)) {
4569 dev_err(DEV, "drbd_send_ping has failed\n");
4570 goto reconnect;
4571 }
Philipp Reisnere42325a2011-01-19 13:55:45 +01004572 mdev->tconn->meta.socket->sk->sk_rcvtimeo =
Philipp Reisner89e58e72011-01-19 13:12:45 +01004573 mdev->tconn->net_conf->ping_timeo*HZ/10;
Lars Ellenbergf36af182011-03-09 22:44:55 +01004574 ping_timeout_active = 1;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004575 }
4576
4577 /* conditionally cork;
4578 * it may hurt latency if we cork without much to send */
Philipp Reisner89e58e72011-01-19 13:12:45 +01004579 if (!mdev->tconn->net_conf->no_cork &&
Philipp Reisnerb411b362009-09-25 16:07:19 -07004580 3 < atomic_read(&mdev->unacked_cnt))
Philipp Reisnere42325a2011-01-19 13:55:45 +01004581 drbd_tcp_cork(mdev->tconn->meta.socket);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004582 while (1) {
4583 clear_bit(SIGNAL_ASENDER, &mdev->flags);
4584 flush_signals(current);
Lars Ellenberg0f8488e2010-10-13 18:19:23 +02004585 if (!drbd_process_done_ee(mdev))
Philipp Reisnerb411b362009-09-25 16:07:19 -07004586 goto reconnect;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004587 /* to avoid race with newly queued ACKs */
4588 set_bit(SIGNAL_ASENDER, &mdev->flags);
Philipp Reisner87eeee42011-01-19 14:16:30 +01004589 spin_lock_irq(&mdev->tconn->req_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004590 empty = list_empty(&mdev->done_ee);
Philipp Reisner87eeee42011-01-19 14:16:30 +01004591 spin_unlock_irq(&mdev->tconn->req_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004592 /* new ack may have been queued right here,
4593 * but then there is also a signal pending,
4594 * and we start over... */
4595 if (empty)
4596 break;
4597 }
4598 /* but unconditionally uncork unless disabled */
Philipp Reisner89e58e72011-01-19 13:12:45 +01004599 if (!mdev->tconn->net_conf->no_cork)
Philipp Reisnere42325a2011-01-19 13:55:45 +01004600 drbd_tcp_uncork(mdev->tconn->meta.socket);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004601
4602 /* short circuit, recv_msg would return EINTR anyways. */
4603 if (signal_pending(current))
4604 continue;
4605
Philipp Reisnere42325a2011-01-19 13:55:45 +01004606 rv = drbd_recv_short(mdev, mdev->tconn->meta.socket,
Philipp Reisnerb411b362009-09-25 16:07:19 -07004607 buf, expect-received, 0);
4608 clear_bit(SIGNAL_ASENDER, &mdev->flags);
4609
4610 flush_signals(current);
4611
4612 /* Note:
4613 * -EINTR (on meta) we got a signal
4614 * -EAGAIN (on meta) rcvtimeo expired
4615 * -ECONNRESET other side closed the connection
4616 * -ERESTARTSYS (on data) we got a signal
4617 * rv < 0 other than above: unexpected error!
4618 * rv == expected: full header or command
4619 * rv < expected: "woken" by signal during receive
4620 * rv == 0 : "connection shut down by peer"
4621 */
4622 if (likely(rv > 0)) {
4623 received += rv;
4624 buf += rv;
4625 } else if (rv == 0) {
4626 dev_err(DEV, "meta connection shut down by peer.\n");
4627 goto reconnect;
4628 } else if (rv == -EAGAIN) {
Lars Ellenbergcb6518c2011-06-20 14:44:45 +02004629 /* If the data socket received something meanwhile,
4630 * that is good enough: peer is still alive. */
Philipp Reisner31890f42011-01-19 14:12:51 +01004631 if (time_after(mdev->tconn->last_received,
Philipp Reisnere42325a2011-01-19 13:55:45 +01004632 jiffies - mdev->tconn->meta.socket->sk->sk_rcvtimeo))
Lars Ellenbergcb6518c2011-06-20 14:44:45 +02004633 continue;
Lars Ellenbergf36af182011-03-09 22:44:55 +01004634 if (ping_timeout_active) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07004635 dev_err(DEV, "PingAck did not arrive in time.\n");
4636 goto reconnect;
4637 }
4638 set_bit(SEND_PING, &mdev->flags);
4639 continue;
4640 } else if (rv == -EINTR) {
4641 continue;
4642 } else {
4643 dev_err(DEV, "sock_recvmsg returned %d\n", rv);
4644 goto reconnect;
4645 }
4646
4647 if (received == expect && cmd == NULL) {
Philipp Reisner257d0af2011-01-26 12:15:29 +01004648 if (!decode_header(mdev, h, &cmd_nr, &pkt_size))
Philipp Reisnerb411b362009-09-25 16:07:19 -07004649 goto reconnect;
Philipp Reisner257d0af2011-01-26 12:15:29 +01004650 cmd = get_asender_cmd(cmd_nr);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004651 if (unlikely(cmd == NULL)) {
Philipp Reisner257d0af2011-01-26 12:15:29 +01004652 dev_err(DEV, "unknown command %d on meta (l: %d)\n",
4653 cmd_nr, pkt_size);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004654 goto disconnect;
4655 }
4656 expect = cmd->pkt_size;
Philipp Reisner257d0af2011-01-26 12:15:29 +01004657 if (pkt_size != expect - sizeof(struct p_header)) {
4658 dev_err(DEV, "Wrong packet size on meta (c: %d, l: %d)\n",
4659 cmd_nr, pkt_size);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004660 goto reconnect;
Philipp Reisner257d0af2011-01-26 12:15:29 +01004661 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07004662 }
4663 if (received == expect) {
Philipp Reisner31890f42011-01-19 14:12:51 +01004664 mdev->tconn->last_received = jiffies;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004665 D_ASSERT(cmd != NULL);
Philipp Reisner257d0af2011-01-26 12:15:29 +01004666 if (!cmd->process(mdev, cmd_nr))
Philipp Reisnerb411b362009-09-25 16:07:19 -07004667 goto reconnect;
4668
Lars Ellenbergf36af182011-03-09 22:44:55 +01004669 /* the idle_timeout (ping-int)
4670 * has been restored in got_PingAck() */
4671 if (cmd == get_asender_cmd(P_PING_ACK))
4672 ping_timeout_active = 0;
4673
Philipp Reisnerb411b362009-09-25 16:07:19 -07004674 buf = h;
4675 received = 0;
Philipp Reisner257d0af2011-01-26 12:15:29 +01004676 expect = sizeof(struct p_header);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004677 cmd = NULL;
4678 }
4679 }
4680
4681 if (0) {
4682reconnect:
4683 drbd_force_state(mdev, NS(conn, C_NETWORK_FAILURE));
Lars Ellenberg856c50c2010-10-14 13:37:40 +02004684 drbd_md_sync(mdev);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004685 }
4686 if (0) {
4687disconnect:
4688 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
Lars Ellenberg856c50c2010-10-14 13:37:40 +02004689 drbd_md_sync(mdev);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004690 }
4691 clear_bit(SIGNAL_ASENDER, &mdev->flags);
4692
4693 D_ASSERT(mdev->state.conn < C_CONNECTED);
4694 dev_info(DEV, "asender terminated\n");
4695
4696 return 0;
4697}