blob: 3243c7849914acd91ae466eb3922be12d716ee39 [file] [log] [blame]
Philipp Reisnerb411b362009-09-25 16:07:19 -07001/*
2 drbd_receiver.c
3
4 This file is part of DRBD by Philipp Reisner and Lars Ellenberg.
5
6 Copyright (C) 2001-2008, LINBIT Information Technologies GmbH.
7 Copyright (C) 1999-2008, Philipp Reisner <philipp.reisner@linbit.com>.
8 Copyright (C) 2002-2008, Lars Ellenberg <lars.ellenberg@linbit.com>.
9
10 drbd is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation; either version 2, or (at your option)
13 any later version.
14
15 drbd is distributed in the hope that it will be useful,
16 but WITHOUT ANY WARRANTY; without even the implied warranty of
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 GNU General Public License for more details.
19
20 You should have received a copy of the GNU General Public License
21 along with drbd; see the file COPYING. If not, write to
22 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
23 */
24
25
Philipp Reisnerb411b362009-09-25 16:07:19 -070026#include <linux/module.h>
27
28#include <asm/uaccess.h>
29#include <net/sock.h>
30
Philipp Reisnerb411b362009-09-25 16:07:19 -070031#include <linux/drbd.h>
32#include <linux/fs.h>
33#include <linux/file.h>
34#include <linux/in.h>
35#include <linux/mm.h>
36#include <linux/memcontrol.h>
37#include <linux/mm_inline.h>
38#include <linux/slab.h>
Philipp Reisnerb411b362009-09-25 16:07:19 -070039#include <linux/pkt_sched.h>
40#define __KERNEL_SYSCALLS__
41#include <linux/unistd.h>
42#include <linux/vmalloc.h>
43#include <linux/random.h>
Philipp Reisnerb411b362009-09-25 16:07:19 -070044#include <linux/string.h>
45#include <linux/scatterlist.h>
46#include "drbd_int.h"
Philipp Reisnerb411b362009-09-25 16:07:19 -070047#include "drbd_req.h"
48
49#include "drbd_vli.h"
50
Philipp Reisner77351055b2011-02-07 17:24:26 +010051struct packet_info {
52 enum drbd_packet cmd;
Andreas Gruenbachere2857212011-03-25 00:57:38 +010053 unsigned int size;
54 unsigned int vnr;
Philipp Reisner77351055b2011-02-07 17:24:26 +010055};
56
Philipp Reisnerb411b362009-09-25 16:07:19 -070057enum finish_epoch {
58 FE_STILL_LIVE,
59 FE_DESTROYED,
60 FE_RECYCLED,
61};
62
Philipp Reisnera4fbda82011-03-16 11:13:17 +010063enum mdev_or_conn {
64 MDEV,
65 CONN,
66};
67
Philipp Reisner65d11ed2011-02-07 17:35:59 +010068static int drbd_do_handshake(struct drbd_tconn *tconn);
Philipp Reisner13e60372011-02-08 09:54:40 +010069static int drbd_do_auth(struct drbd_tconn *tconn);
Philipp Reisner360cc742011-02-08 14:29:53 +010070static int drbd_disconnected(int vnr, void *p, void *data);
Philipp Reisnerb411b362009-09-25 16:07:19 -070071
72static enum finish_epoch drbd_may_finish_epoch(struct drbd_conf *, struct drbd_epoch *, enum epoch_event);
Andreas Gruenbacher99920dc2011-03-16 15:31:39 +010073static int e_end_block(struct drbd_work *, int);
Philipp Reisnerb411b362009-09-25 16:07:19 -070074
Philipp Reisnerb411b362009-09-25 16:07:19 -070075
76#define GFP_TRY (__GFP_HIGHMEM | __GFP_NOWARN)
77
Lars Ellenberg45bb9122010-05-14 17:10:48 +020078/*
79 * some helper functions to deal with single linked page lists,
80 * page->private being our "next" pointer.
81 */
82
83/* If at least n pages are linked at head, get n pages off.
84 * Otherwise, don't modify head, and return NULL.
85 * Locking is the responsibility of the caller.
86 */
87static struct page *page_chain_del(struct page **head, int n)
88{
89 struct page *page;
90 struct page *tmp;
91
92 BUG_ON(!n);
93 BUG_ON(!head);
94
95 page = *head;
Philipp Reisner23ce4222010-05-20 13:35:31 +020096
97 if (!page)
98 return NULL;
99
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200100 while (page) {
101 tmp = page_chain_next(page);
102 if (--n == 0)
103 break; /* found sufficient pages */
104 if (tmp == NULL)
105 /* insufficient pages, don't use any of them. */
106 return NULL;
107 page = tmp;
108 }
109
110 /* add end of list marker for the returned list */
111 set_page_private(page, 0);
112 /* actual return value, and adjustment of head */
113 page = *head;
114 *head = tmp;
115 return page;
116}
117
118/* may be used outside of locks to find the tail of a (usually short)
119 * "private" page chain, before adding it back to a global chain head
120 * with page_chain_add() under a spinlock. */
121static struct page *page_chain_tail(struct page *page, int *len)
122{
123 struct page *tmp;
124 int i = 1;
125 while ((tmp = page_chain_next(page)))
126 ++i, page = tmp;
127 if (len)
128 *len = i;
129 return page;
130}
131
132static int page_chain_free(struct page *page)
133{
134 struct page *tmp;
135 int i = 0;
136 page_chain_for_each_safe(page, tmp) {
137 put_page(page);
138 ++i;
139 }
140 return i;
141}
142
143static void page_chain_add(struct page **head,
144 struct page *chain_first, struct page *chain_last)
145{
146#if 1
147 struct page *tmp;
148 tmp = page_chain_tail(chain_first, NULL);
149 BUG_ON(tmp != chain_last);
150#endif
151
152 /* add chain to head */
153 set_page_private(chain_last, (unsigned long)*head);
154 *head = chain_first;
155}
156
157static struct page *drbd_pp_first_pages_or_try_alloc(struct drbd_conf *mdev, int number)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700158{
159 struct page *page = NULL;
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200160 struct page *tmp = NULL;
161 int i = 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700162
163 /* Yes, testing drbd_pp_vacant outside the lock is racy.
164 * So what. It saves a spin_lock. */
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200165 if (drbd_pp_vacant >= number) {
Philipp Reisnerb411b362009-09-25 16:07:19 -0700166 spin_lock(&drbd_pp_lock);
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200167 page = page_chain_del(&drbd_pp_pool, number);
168 if (page)
169 drbd_pp_vacant -= number;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700170 spin_unlock(&drbd_pp_lock);
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200171 if (page)
172 return page;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700173 }
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200174
Philipp Reisnerb411b362009-09-25 16:07:19 -0700175 /* GFP_TRY, because we must not cause arbitrary write-out: in a DRBD
176 * "criss-cross" setup, that might cause write-out on some other DRBD,
177 * which in turn might block on the other node at this very place. */
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200178 for (i = 0; i < number; i++) {
179 tmp = alloc_page(GFP_TRY);
180 if (!tmp)
181 break;
182 set_page_private(tmp, (unsigned long)page);
183 page = tmp;
184 }
185
186 if (i == number)
187 return page;
188
189 /* Not enough pages immediately available this time.
190 * No need to jump around here, drbd_pp_alloc will retry this
191 * function "soon". */
192 if (page) {
193 tmp = page_chain_tail(page, NULL);
194 spin_lock(&drbd_pp_lock);
195 page_chain_add(&drbd_pp_pool, page, tmp);
196 drbd_pp_vacant += i;
197 spin_unlock(&drbd_pp_lock);
198 }
199 return NULL;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700200}
201
Philipp Reisnerb411b362009-09-25 16:07:19 -0700202static void reclaim_net_ee(struct drbd_conf *mdev, struct list_head *to_be_freed)
203{
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +0100204 struct drbd_peer_request *peer_req;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700205 struct list_head *le, *tle;
206
207 /* The EEs are always appended to the end of the list. Since
208 they are sent in order over the wire, they have to finish
209 in order. As soon as we see the first not finished we can
210 stop to examine the list... */
211
212 list_for_each_safe(le, tle, &mdev->net_ee) {
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +0100213 peer_req = list_entry(le, struct drbd_peer_request, w.list);
214 if (drbd_ee_has_active_page(peer_req))
Philipp Reisnerb411b362009-09-25 16:07:19 -0700215 break;
216 list_move(le, to_be_freed);
217 }
218}
219
220static void drbd_kick_lo_and_reclaim_net(struct drbd_conf *mdev)
221{
222 LIST_HEAD(reclaimed);
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +0100223 struct drbd_peer_request *peer_req, *t;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700224
Philipp Reisner87eeee42011-01-19 14:16:30 +0100225 spin_lock_irq(&mdev->tconn->req_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700226 reclaim_net_ee(mdev, &reclaimed);
Philipp Reisner87eeee42011-01-19 14:16:30 +0100227 spin_unlock_irq(&mdev->tconn->req_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700228
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +0100229 list_for_each_entry_safe(peer_req, t, &reclaimed, w.list)
230 drbd_free_net_ee(mdev, peer_req);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700231}
232
233/**
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200234 * drbd_pp_alloc() - Returns @number pages, retries forever (or until signalled)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700235 * @mdev: DRBD device.
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200236 * @number: number of pages requested
237 * @retry: whether to retry, if not enough pages are available right now
Philipp Reisnerb411b362009-09-25 16:07:19 -0700238 *
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200239 * Tries to allocate number pages, first from our own page pool, then from
240 * the kernel, unless this allocation would exceed the max_buffers setting.
241 * Possibly retry until DRBD frees sufficient pages somewhere else.
242 *
243 * Returns a page chain linked via page->private.
Philipp Reisnerb411b362009-09-25 16:07:19 -0700244 */
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200245static struct page *drbd_pp_alloc(struct drbd_conf *mdev, unsigned number, bool retry)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700246{
247 struct page *page = NULL;
248 DEFINE_WAIT(wait);
249
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200250 /* Yes, we may run up to @number over max_buffers. If we
251 * follow it strictly, the admin will get it wrong anyways. */
Philipp Reisner89e58e72011-01-19 13:12:45 +0100252 if (atomic_read(&mdev->pp_in_use) < mdev->tconn->net_conf->max_buffers)
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200253 page = drbd_pp_first_pages_or_try_alloc(mdev, number);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700254
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200255 while (page == NULL) {
Philipp Reisnerb411b362009-09-25 16:07:19 -0700256 prepare_to_wait(&drbd_pp_wait, &wait, TASK_INTERRUPTIBLE);
257
258 drbd_kick_lo_and_reclaim_net(mdev);
259
Philipp Reisner89e58e72011-01-19 13:12:45 +0100260 if (atomic_read(&mdev->pp_in_use) < mdev->tconn->net_conf->max_buffers) {
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200261 page = drbd_pp_first_pages_or_try_alloc(mdev, number);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700262 if (page)
263 break;
264 }
265
266 if (!retry)
267 break;
268
269 if (signal_pending(current)) {
270 dev_warn(DEV, "drbd_pp_alloc interrupted!\n");
271 break;
272 }
273
274 schedule();
275 }
276 finish_wait(&drbd_pp_wait, &wait);
277
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200278 if (page)
279 atomic_add(number, &mdev->pp_in_use);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700280 return page;
281}
282
283/* Must not be used from irq, as that may deadlock: see drbd_pp_alloc.
Philipp Reisner87eeee42011-01-19 14:16:30 +0100284 * Is also used from inside an other spin_lock_irq(&mdev->tconn->req_lock);
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200285 * Either links the page chain back to the global pool,
286 * or returns all pages to the system. */
Lars Ellenberg435f0742010-09-06 12:30:25 +0200287static void drbd_pp_free(struct drbd_conf *mdev, struct page *page, int is_net)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700288{
Lars Ellenberg435f0742010-09-06 12:30:25 +0200289 atomic_t *a = is_net ? &mdev->pp_in_use_by_net : &mdev->pp_in_use;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700290 int i;
Lars Ellenberg435f0742010-09-06 12:30:25 +0200291
Philipp Reisner81a5d602011-02-22 19:53:16 -0500292 if (drbd_pp_vacant > (DRBD_MAX_BIO_SIZE/PAGE_SIZE) * minor_count)
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200293 i = page_chain_free(page);
294 else {
295 struct page *tmp;
296 tmp = page_chain_tail(page, &i);
297 spin_lock(&drbd_pp_lock);
298 page_chain_add(&drbd_pp_pool, page, tmp);
299 drbd_pp_vacant += i;
300 spin_unlock(&drbd_pp_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700301 }
Lars Ellenberg435f0742010-09-06 12:30:25 +0200302 i = atomic_sub_return(i, a);
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200303 if (i < 0)
Lars Ellenberg435f0742010-09-06 12:30:25 +0200304 dev_warn(DEV, "ASSERTION FAILED: %s: %d < 0\n",
305 is_net ? "pp_in_use_by_net" : "pp_in_use", i);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700306 wake_up(&drbd_pp_wait);
307}
308
309/*
310You need to hold the req_lock:
311 _drbd_wait_ee_list_empty()
312
313You must not have the req_lock:
314 drbd_free_ee()
315 drbd_alloc_ee()
316 drbd_init_ee()
317 drbd_release_ee()
318 drbd_ee_fix_bhs()
319 drbd_process_done_ee()
320 drbd_clear_done_ee()
321 drbd_wait_ee_list_empty()
322*/
323
Andreas Gruenbacherf6ffca92011-02-04 15:30:34 +0100324struct drbd_peer_request *
325drbd_alloc_ee(struct drbd_conf *mdev, u64 id, sector_t sector,
326 unsigned int data_size, gfp_t gfp_mask) __must_hold(local)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700327{
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +0100328 struct drbd_peer_request *peer_req;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700329 struct page *page;
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200330 unsigned nr_pages = (data_size + PAGE_SIZE -1) >> PAGE_SHIFT;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700331
Andreas Gruenbacher0cf9d272010-12-07 10:43:29 +0100332 if (drbd_insert_fault(mdev, DRBD_FAULT_AL_EE))
Philipp Reisnerb411b362009-09-25 16:07:19 -0700333 return NULL;
334
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +0100335 peer_req = mempool_alloc(drbd_ee_mempool, gfp_mask & ~__GFP_HIGHMEM);
336 if (!peer_req) {
Philipp Reisnerb411b362009-09-25 16:07:19 -0700337 if (!(gfp_mask & __GFP_NOWARN))
338 dev_err(DEV, "alloc_ee: Allocation of an EE failed\n");
339 return NULL;
340 }
341
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200342 page = drbd_pp_alloc(mdev, nr_pages, (gfp_mask & __GFP_WAIT));
343 if (!page)
344 goto fail;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700345
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +0100346 drbd_clear_interval(&peer_req->i);
347 peer_req->i.size = data_size;
348 peer_req->i.sector = sector;
349 peer_req->i.local = false;
350 peer_req->i.waiting = false;
Andreas Gruenbacher53840642011-01-28 10:31:04 +0100351
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +0100352 peer_req->epoch = NULL;
Philipp Reisnera21e9292011-02-08 15:08:49 +0100353 peer_req->w.mdev = mdev;
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +0100354 peer_req->pages = page;
355 atomic_set(&peer_req->pending_bios, 0);
356 peer_req->flags = 0;
Andreas Gruenbacher9a8e7752011-01-11 14:04:09 +0100357 /*
358 * The block_id is opaque to the receiver. It is not endianness
359 * converted, and sent back to the sender unchanged.
360 */
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +0100361 peer_req->block_id = id;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700362
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +0100363 return peer_req;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700364
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200365 fail:
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +0100366 mempool_free(peer_req, drbd_ee_mempool);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700367 return NULL;
368}
369
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +0100370void drbd_free_some_ee(struct drbd_conf *mdev, struct drbd_peer_request *peer_req,
Andreas Gruenbacherf6ffca92011-02-04 15:30:34 +0100371 int is_net)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700372{
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +0100373 if (peer_req->flags & EE_HAS_DIGEST)
374 kfree(peer_req->digest);
375 drbd_pp_free(mdev, peer_req->pages, is_net);
376 D_ASSERT(atomic_read(&peer_req->pending_bios) == 0);
377 D_ASSERT(drbd_interval_empty(&peer_req->i));
378 mempool_free(peer_req, drbd_ee_mempool);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700379}
380
381int drbd_release_ee(struct drbd_conf *mdev, struct list_head *list)
382{
383 LIST_HEAD(work_list);
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +0100384 struct drbd_peer_request *peer_req, *t;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700385 int count = 0;
Lars Ellenberg435f0742010-09-06 12:30:25 +0200386 int is_net = list == &mdev->net_ee;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700387
Philipp Reisner87eeee42011-01-19 14:16:30 +0100388 spin_lock_irq(&mdev->tconn->req_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700389 list_splice_init(list, &work_list);
Philipp Reisner87eeee42011-01-19 14:16:30 +0100390 spin_unlock_irq(&mdev->tconn->req_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700391
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +0100392 list_for_each_entry_safe(peer_req, t, &work_list, w.list) {
393 drbd_free_some_ee(mdev, peer_req, is_net);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700394 count++;
395 }
396 return count;
397}
398
399
Philipp Reisner32862ec2011-02-08 16:41:01 +0100400/* See also comments in _req_mod(,BARRIER_ACKED)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700401 * and receive_Barrier.
402 *
403 * Move entries from net_ee to done_ee, if ready.
404 * Grab done_ee, call all callbacks, free the entries.
405 * The callbacks typically send out ACKs.
406 */
407static int drbd_process_done_ee(struct drbd_conf *mdev)
408{
409 LIST_HEAD(work_list);
410 LIST_HEAD(reclaimed);
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +0100411 struct drbd_peer_request *peer_req, *t;
Andreas Gruenbachere2b30322011-03-16 17:16:12 +0100412 int err = 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700413
Philipp Reisner87eeee42011-01-19 14:16:30 +0100414 spin_lock_irq(&mdev->tconn->req_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700415 reclaim_net_ee(mdev, &reclaimed);
416 list_splice_init(&mdev->done_ee, &work_list);
Philipp Reisner87eeee42011-01-19 14:16:30 +0100417 spin_unlock_irq(&mdev->tconn->req_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700418
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +0100419 list_for_each_entry_safe(peer_req, t, &reclaimed, w.list)
420 drbd_free_net_ee(mdev, peer_req);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700421
422 /* possible callbacks here:
Andreas Gruenbacher7be8da02011-02-22 02:15:32 +0100423 * e_end_block, and e_end_resync_block, e_send_discard_write.
Philipp Reisnerb411b362009-09-25 16:07:19 -0700424 * all ignore the last argument.
425 */
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +0100426 list_for_each_entry_safe(peer_req, t, &work_list, w.list) {
Andreas Gruenbachere2b30322011-03-16 17:16:12 +0100427 int err2;
428
Philipp Reisnerb411b362009-09-25 16:07:19 -0700429 /* list_del not necessary, next/prev members not touched */
Andreas Gruenbachere2b30322011-03-16 17:16:12 +0100430 err2 = peer_req->w.cb(&peer_req->w, !!err);
431 if (!err)
432 err = err2;
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +0100433 drbd_free_ee(mdev, peer_req);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700434 }
435 wake_up(&mdev->ee_wait);
436
Andreas Gruenbachere2b30322011-03-16 17:16:12 +0100437 return err;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700438}
439
440void _drbd_wait_ee_list_empty(struct drbd_conf *mdev, struct list_head *head)
441{
442 DEFINE_WAIT(wait);
443
444 /* avoids spin_lock/unlock
445 * and calling prepare_to_wait in the fast path */
446 while (!list_empty(head)) {
447 prepare_to_wait(&mdev->ee_wait, &wait, TASK_UNINTERRUPTIBLE);
Philipp Reisner87eeee42011-01-19 14:16:30 +0100448 spin_unlock_irq(&mdev->tconn->req_lock);
Jens Axboe7eaceac2011-03-10 08:52:07 +0100449 io_schedule();
Philipp Reisnerb411b362009-09-25 16:07:19 -0700450 finish_wait(&mdev->ee_wait, &wait);
Philipp Reisner87eeee42011-01-19 14:16:30 +0100451 spin_lock_irq(&mdev->tconn->req_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700452 }
453}
454
455void drbd_wait_ee_list_empty(struct drbd_conf *mdev, struct list_head *head)
456{
Philipp Reisner87eeee42011-01-19 14:16:30 +0100457 spin_lock_irq(&mdev->tconn->req_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700458 _drbd_wait_ee_list_empty(mdev, head);
Philipp Reisner87eeee42011-01-19 14:16:30 +0100459 spin_unlock_irq(&mdev->tconn->req_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700460}
461
462/* see also kernel_accept; which is only present since 2.6.18.
463 * also we want to log which part of it failed, exactly */
Philipp Reisner76536202011-02-07 14:09:54 +0100464static int drbd_accept(const char **what, struct socket *sock, struct socket **newsock)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700465{
466 struct sock *sk = sock->sk;
467 int err = 0;
468
469 *what = "listen";
470 err = sock->ops->listen(sock, 5);
471 if (err < 0)
472 goto out;
473
474 *what = "sock_create_lite";
475 err = sock_create_lite(sk->sk_family, sk->sk_type, sk->sk_protocol,
476 newsock);
477 if (err < 0)
478 goto out;
479
480 *what = "accept";
481 err = sock->ops->accept(sock, *newsock, 0);
482 if (err < 0) {
483 sock_release(*newsock);
484 *newsock = NULL;
485 goto out;
486 }
487 (*newsock)->ops = sock->ops;
488
489out:
490 return err;
491}
492
Philipp Reisnerdbd9eea2011-02-07 15:34:16 +0100493static int drbd_recv_short(struct socket *sock, void *buf, size_t size, int flags)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700494{
495 mm_segment_t oldfs;
496 struct kvec iov = {
497 .iov_base = buf,
498 .iov_len = size,
499 };
500 struct msghdr msg = {
501 .msg_iovlen = 1,
502 .msg_iov = (struct iovec *)&iov,
503 .msg_flags = (flags ? flags : MSG_WAITALL | MSG_NOSIGNAL)
504 };
505 int rv;
506
507 oldfs = get_fs();
508 set_fs(KERNEL_DS);
509 rv = sock_recvmsg(sock, &msg, size, msg.msg_flags);
510 set_fs(oldfs);
511
512 return rv;
513}
514
Philipp Reisnerde0ff332011-02-07 16:56:20 +0100515static int drbd_recv(struct drbd_tconn *tconn, void *buf, size_t size)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700516{
517 mm_segment_t oldfs;
518 struct kvec iov = {
519 .iov_base = buf,
520 .iov_len = size,
521 };
522 struct msghdr msg = {
523 .msg_iovlen = 1,
524 .msg_iov = (struct iovec *)&iov,
525 .msg_flags = MSG_WAITALL | MSG_NOSIGNAL
526 };
527 int rv;
528
529 oldfs = get_fs();
530 set_fs(KERNEL_DS);
531
532 for (;;) {
Philipp Reisnerde0ff332011-02-07 16:56:20 +0100533 rv = sock_recvmsg(tconn->data.socket, &msg, size, msg.msg_flags);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700534 if (rv == size)
535 break;
536
537 /* Note:
538 * ECONNRESET other side closed the connection
539 * ERESTARTSYS (on sock) we got a signal
540 */
541
542 if (rv < 0) {
543 if (rv == -ECONNRESET)
Philipp Reisnerde0ff332011-02-07 16:56:20 +0100544 conn_info(tconn, "sock was reset by peer\n");
Philipp Reisnerb411b362009-09-25 16:07:19 -0700545 else if (rv != -ERESTARTSYS)
Philipp Reisnerde0ff332011-02-07 16:56:20 +0100546 conn_err(tconn, "sock_recvmsg returned %d\n", rv);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700547 break;
548 } else if (rv == 0) {
Philipp Reisnerde0ff332011-02-07 16:56:20 +0100549 conn_info(tconn, "sock was shut down by peer\n");
Philipp Reisnerb411b362009-09-25 16:07:19 -0700550 break;
551 } else {
552 /* signal came in, or peer/link went down,
553 * after we read a partial message
554 */
555 /* D_ASSERT(signal_pending(current)); */
556 break;
557 }
558 };
559
560 set_fs(oldfs);
561
562 if (rv != size)
Philipp Reisnerbbeb6412011-02-10 13:45:46 +0100563 conn_request_state(tconn, NS(conn, C_BROKEN_PIPE), CS_HARD);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700564
565 return rv;
566}
567
Andreas Gruenbacherc6967742011-03-17 17:15:20 +0100568static int drbd_recv_all(struct drbd_tconn *tconn, void *buf, size_t size)
569{
570 int err;
571
572 err = drbd_recv(tconn, buf, size);
573 if (err != size) {
574 if (err >= 0)
575 err = -EIO;
576 } else
577 err = 0;
578 return err;
579}
580
Andreas Gruenbachera5c31902011-03-24 03:28:04 +0100581static int drbd_recv_all_warn(struct drbd_tconn *tconn, void *buf, size_t size)
582{
583 int err;
584
585 err = drbd_recv_all(tconn, buf, size);
586 if (err && !signal_pending(current))
587 conn_warn(tconn, "short read (expected size %d)\n", (int)size);
588 return err;
589}
590
Lars Ellenberg5dbf1672010-05-25 16:18:01 +0200591/* quoting tcp(7):
592 * On individual connections, the socket buffer size must be set prior to the
593 * listen(2) or connect(2) calls in order to have it take effect.
594 * This is our wrapper to do so.
595 */
596static void drbd_setbufsize(struct socket *sock, unsigned int snd,
597 unsigned int rcv)
598{
599 /* open coded SO_SNDBUF, SO_RCVBUF */
600 if (snd) {
601 sock->sk->sk_sndbuf = snd;
602 sock->sk->sk_userlocks |= SOCK_SNDBUF_LOCK;
603 }
604 if (rcv) {
605 sock->sk->sk_rcvbuf = rcv;
606 sock->sk->sk_userlocks |= SOCK_RCVBUF_LOCK;
607 }
608}
609
Philipp Reisnereac3e992011-02-07 14:05:07 +0100610static struct socket *drbd_try_connect(struct drbd_tconn *tconn)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700611{
612 const char *what;
613 struct socket *sock;
614 struct sockaddr_in6 src_in6;
615 int err;
616 int disconnect_on_error = 1;
617
Philipp Reisnereac3e992011-02-07 14:05:07 +0100618 if (!get_net_conf(tconn))
Philipp Reisnerb411b362009-09-25 16:07:19 -0700619 return NULL;
620
621 what = "sock_create_kern";
Philipp Reisnereac3e992011-02-07 14:05:07 +0100622 err = sock_create_kern(((struct sockaddr *)tconn->net_conf->my_addr)->sa_family,
Philipp Reisnerb411b362009-09-25 16:07:19 -0700623 SOCK_STREAM, IPPROTO_TCP, &sock);
624 if (err < 0) {
625 sock = NULL;
626 goto out;
627 }
628
629 sock->sk->sk_rcvtimeo =
Philipp Reisnereac3e992011-02-07 14:05:07 +0100630 sock->sk->sk_sndtimeo = tconn->net_conf->try_connect_int*HZ;
631 drbd_setbufsize(sock, tconn->net_conf->sndbuf_size,
632 tconn->net_conf->rcvbuf_size);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700633
634 /* explicitly bind to the configured IP as source IP
635 * for the outgoing connections.
636 * This is needed for multihomed hosts and to be
637 * able to use lo: interfaces for drbd.
638 * Make sure to use 0 as port number, so linux selects
639 * a free one dynamically.
640 */
Philipp Reisnereac3e992011-02-07 14:05:07 +0100641 memcpy(&src_in6, tconn->net_conf->my_addr,
642 min_t(int, tconn->net_conf->my_addr_len, sizeof(src_in6)));
643 if (((struct sockaddr *)tconn->net_conf->my_addr)->sa_family == AF_INET6)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700644 src_in6.sin6_port = 0;
645 else
646 ((struct sockaddr_in *)&src_in6)->sin_port = 0; /* AF_INET & AF_SCI */
647
648 what = "bind before connect";
649 err = sock->ops->bind(sock,
650 (struct sockaddr *) &src_in6,
Philipp Reisnereac3e992011-02-07 14:05:07 +0100651 tconn->net_conf->my_addr_len);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700652 if (err < 0)
653 goto out;
654
655 /* connect may fail, peer not yet available.
656 * stay C_WF_CONNECTION, don't go Disconnecting! */
657 disconnect_on_error = 0;
658 what = "connect";
659 err = sock->ops->connect(sock,
Philipp Reisnereac3e992011-02-07 14:05:07 +0100660 (struct sockaddr *)tconn->net_conf->peer_addr,
661 tconn->net_conf->peer_addr_len, 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700662
663out:
664 if (err < 0) {
665 if (sock) {
666 sock_release(sock);
667 sock = NULL;
668 }
669 switch (-err) {
670 /* timeout, busy, signal pending */
671 case ETIMEDOUT: case EAGAIN: case EINPROGRESS:
672 case EINTR: case ERESTARTSYS:
673 /* peer not (yet) available, network problem */
674 case ECONNREFUSED: case ENETUNREACH:
675 case EHOSTDOWN: case EHOSTUNREACH:
676 disconnect_on_error = 0;
677 break;
678 default:
Philipp Reisnereac3e992011-02-07 14:05:07 +0100679 conn_err(tconn, "%s failed, err = %d\n", what, err);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700680 }
681 if (disconnect_on_error)
Philipp Reisnerbbeb6412011-02-10 13:45:46 +0100682 conn_request_state(tconn, NS(conn, C_DISCONNECTING), CS_HARD);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700683 }
Philipp Reisnereac3e992011-02-07 14:05:07 +0100684 put_net_conf(tconn);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700685 return sock;
686}
687
Philipp Reisner76536202011-02-07 14:09:54 +0100688static struct socket *drbd_wait_for_connect(struct drbd_tconn *tconn)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700689{
690 int timeo, err;
691 struct socket *s_estab = NULL, *s_listen;
692 const char *what;
693
Philipp Reisner76536202011-02-07 14:09:54 +0100694 if (!get_net_conf(tconn))
Philipp Reisnerb411b362009-09-25 16:07:19 -0700695 return NULL;
696
697 what = "sock_create_kern";
Philipp Reisner76536202011-02-07 14:09:54 +0100698 err = sock_create_kern(((struct sockaddr *)tconn->net_conf->my_addr)->sa_family,
Philipp Reisnerb411b362009-09-25 16:07:19 -0700699 SOCK_STREAM, IPPROTO_TCP, &s_listen);
700 if (err) {
701 s_listen = NULL;
702 goto out;
703 }
704
Philipp Reisner76536202011-02-07 14:09:54 +0100705 timeo = tconn->net_conf->try_connect_int * HZ;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700706 timeo += (random32() & 1) ? timeo / 7 : -timeo / 7; /* 28.5% random jitter */
707
708 s_listen->sk->sk_reuse = 1; /* SO_REUSEADDR */
709 s_listen->sk->sk_rcvtimeo = timeo;
710 s_listen->sk->sk_sndtimeo = timeo;
Philipp Reisner76536202011-02-07 14:09:54 +0100711 drbd_setbufsize(s_listen, tconn->net_conf->sndbuf_size,
712 tconn->net_conf->rcvbuf_size);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700713
714 what = "bind before listen";
715 err = s_listen->ops->bind(s_listen,
Philipp Reisner76536202011-02-07 14:09:54 +0100716 (struct sockaddr *) tconn->net_conf->my_addr,
717 tconn->net_conf->my_addr_len);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700718 if (err < 0)
719 goto out;
720
Philipp Reisner76536202011-02-07 14:09:54 +0100721 err = drbd_accept(&what, s_listen, &s_estab);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700722
723out:
724 if (s_listen)
725 sock_release(s_listen);
726 if (err < 0) {
727 if (err != -EAGAIN && err != -EINTR && err != -ERESTARTSYS) {
Philipp Reisner76536202011-02-07 14:09:54 +0100728 conn_err(tconn, "%s failed, err = %d\n", what, err);
Philipp Reisnerbbeb6412011-02-10 13:45:46 +0100729 conn_request_state(tconn, NS(conn, C_DISCONNECTING), CS_HARD);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700730 }
731 }
Philipp Reisner76536202011-02-07 14:09:54 +0100732 put_net_conf(tconn);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700733
734 return s_estab;
735}
736
Philipp Reisnerd38e7872011-02-07 15:32:04 +0100737static int drbd_send_fp(struct drbd_tconn *tconn, struct socket *sock, enum drbd_packet cmd)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700738{
Andreas Gruenbacher5a87d922011-03-24 21:17:52 +0100739 struct p_header *h = tconn->data.sbuf;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700740
Andreas Gruenbacherecf23632011-03-15 23:48:25 +0100741 return !_conn_send_cmd(tconn, 0, sock, cmd, h, sizeof(*h), 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700742}
743
Philipp Reisnera25b63f2011-02-07 15:43:45 +0100744static enum drbd_packet drbd_recv_fp(struct drbd_tconn *tconn, struct socket *sock)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700745{
Andreas Gruenbachere6ef8a52011-03-24 18:07:54 +0100746 struct p_header80 *h = tconn->data.rbuf;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700747 int rr;
748
Philipp Reisnerdbd9eea2011-02-07 15:34:16 +0100749 rr = drbd_recv_short(sock, h, sizeof(*h), 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700750
Andreas Gruenbacherca9bc122011-01-11 13:47:24 +0100751 if (rr == sizeof(*h) && h->magic == cpu_to_be32(DRBD_MAGIC))
Philipp Reisnerb411b362009-09-25 16:07:19 -0700752 return be16_to_cpu(h->command);
753
754 return 0xffff;
755}
756
757/**
758 * drbd_socket_okay() - Free the socket if its connection is not okay
Philipp Reisnerb411b362009-09-25 16:07:19 -0700759 * @sock: pointer to the pointer to the socket.
760 */
Philipp Reisnerdbd9eea2011-02-07 15:34:16 +0100761static int drbd_socket_okay(struct socket **sock)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700762{
763 int rr;
764 char tb[4];
765
766 if (!*sock)
Andreas Gruenbacher81e84652010-12-09 15:03:57 +0100767 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700768
Philipp Reisnerdbd9eea2011-02-07 15:34:16 +0100769 rr = drbd_recv_short(*sock, tb, 4, MSG_DONTWAIT | MSG_PEEK);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700770
771 if (rr > 0 || rr == -EAGAIN) {
Andreas Gruenbacher81e84652010-12-09 15:03:57 +0100772 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700773 } else {
774 sock_release(*sock);
775 *sock = NULL;
Andreas Gruenbacher81e84652010-12-09 15:03:57 +0100776 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700777 }
778}
Philipp Reisner2325eb62011-03-15 16:56:18 +0100779/* Gets called if a connection is established, or if a new minor gets created
780 in a connection */
781int drbd_connected(int vnr, void *p, void *data)
Philipp Reisner907599e2011-02-08 11:25:37 +0100782{
783 struct drbd_conf *mdev = (struct drbd_conf *)p;
Andreas Gruenbacher0829f5e2011-03-24 14:31:22 +0100784 int err;
Philipp Reisner907599e2011-02-08 11:25:37 +0100785
786 atomic_set(&mdev->packet_seq, 0);
787 mdev->peer_seq = 0;
788
Philipp Reisner8410da82011-02-11 20:11:10 +0100789 mdev->state_mutex = mdev->tconn->agreed_pro_version < 100 ?
790 &mdev->tconn->cstate_mutex :
791 &mdev->own_state_mutex;
792
Andreas Gruenbacher0829f5e2011-03-24 14:31:22 +0100793 err = drbd_send_sync_param(mdev);
794 if (!err)
795 err = drbd_send_sizes(mdev, 0, 0);
796 if (!err)
797 err = drbd_send_uuids(mdev);
798 if (!err)
799 err = drbd_send_state(mdev);
Philipp Reisner907599e2011-02-08 11:25:37 +0100800 clear_bit(USE_DEGR_WFC_T, &mdev->flags);
801 clear_bit(RESIZE_PENDING, &mdev->flags);
Andreas Gruenbacher0829f5e2011-03-24 14:31:22 +0100802 return err;
Philipp Reisner907599e2011-02-08 11:25:37 +0100803}
804
Philipp Reisnerb411b362009-09-25 16:07:19 -0700805/*
806 * return values:
807 * 1 yes, we have a valid connection
808 * 0 oops, did not work out, please try again
809 * -1 peer talks different language,
810 * no point in trying again, please go standalone.
811 * -2 We do not have a network config...
812 */
Philipp Reisner907599e2011-02-08 11:25:37 +0100813static int drbd_connect(struct drbd_tconn *tconn)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700814{
815 struct socket *s, *sock, *msock;
816 int try, h, ok;
817
Philipp Reisnerbbeb6412011-02-10 13:45:46 +0100818 if (conn_request_state(tconn, NS(conn, C_WF_CONNECTION), CS_VERBOSE) < SS_SUCCESS)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700819 return -2;
820
Philipp Reisner907599e2011-02-08 11:25:37 +0100821 clear_bit(DISCARD_CONCURRENT, &tconn->flags);
Andreas Gruenbacher0916e0e2011-03-21 14:10:15 +0100822
823 /* Assume that the peer only understands protocol 80 until we know better. */
824 tconn->agreed_pro_version = 80;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700825
826 sock = NULL;
827 msock = NULL;
828
829 do {
830 for (try = 0;;) {
831 /* 3 tries, this should take less than a second! */
Philipp Reisner907599e2011-02-08 11:25:37 +0100832 s = drbd_try_connect(tconn);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700833 if (s || ++try >= 3)
834 break;
835 /* give the other side time to call bind() & listen() */
Philipp Reisner20ee6392011-01-18 15:28:59 +0100836 schedule_timeout_interruptible(HZ / 10);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700837 }
838
839 if (s) {
840 if (!sock) {
Philipp Reisner907599e2011-02-08 11:25:37 +0100841 drbd_send_fp(tconn, s, P_HAND_SHAKE_S);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700842 sock = s;
843 s = NULL;
844 } else if (!msock) {
Philipp Reisner907599e2011-02-08 11:25:37 +0100845 drbd_send_fp(tconn, s, P_HAND_SHAKE_M);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700846 msock = s;
847 s = NULL;
848 } else {
Philipp Reisner907599e2011-02-08 11:25:37 +0100849 conn_err(tconn, "Logic error in drbd_connect()\n");
Philipp Reisnerb411b362009-09-25 16:07:19 -0700850 goto out_release_sockets;
851 }
852 }
853
854 if (sock && msock) {
Philipp Reisner907599e2011-02-08 11:25:37 +0100855 schedule_timeout_interruptible(tconn->net_conf->ping_timeo*HZ/10);
Philipp Reisnerdbd9eea2011-02-07 15:34:16 +0100856 ok = drbd_socket_okay(&sock);
857 ok = drbd_socket_okay(&msock) && ok;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700858 if (ok)
859 break;
860 }
861
862retry:
Philipp Reisner907599e2011-02-08 11:25:37 +0100863 s = drbd_wait_for_connect(tconn);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700864 if (s) {
Philipp Reisner907599e2011-02-08 11:25:37 +0100865 try = drbd_recv_fp(tconn, s);
Philipp Reisnerdbd9eea2011-02-07 15:34:16 +0100866 drbd_socket_okay(&sock);
867 drbd_socket_okay(&msock);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700868 switch (try) {
869 case P_HAND_SHAKE_S:
870 if (sock) {
Philipp Reisner907599e2011-02-08 11:25:37 +0100871 conn_warn(tconn, "initial packet S crossed\n");
Philipp Reisnerb411b362009-09-25 16:07:19 -0700872 sock_release(sock);
873 }
874 sock = s;
875 break;
876 case P_HAND_SHAKE_M:
877 if (msock) {
Philipp Reisner907599e2011-02-08 11:25:37 +0100878 conn_warn(tconn, "initial packet M crossed\n");
Philipp Reisnerb411b362009-09-25 16:07:19 -0700879 sock_release(msock);
880 }
881 msock = s;
Philipp Reisner907599e2011-02-08 11:25:37 +0100882 set_bit(DISCARD_CONCURRENT, &tconn->flags);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700883 break;
884 default:
Philipp Reisner907599e2011-02-08 11:25:37 +0100885 conn_warn(tconn, "Error receiving initial packet\n");
Philipp Reisnerb411b362009-09-25 16:07:19 -0700886 sock_release(s);
887 if (random32() & 1)
888 goto retry;
889 }
890 }
891
Philipp Reisnerbbeb6412011-02-10 13:45:46 +0100892 if (tconn->cstate <= C_DISCONNECTING)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700893 goto out_release_sockets;
894 if (signal_pending(current)) {
895 flush_signals(current);
896 smp_rmb();
Philipp Reisner907599e2011-02-08 11:25:37 +0100897 if (get_t_state(&tconn->receiver) == EXITING)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700898 goto out_release_sockets;
899 }
900
901 if (sock && msock) {
Philipp Reisnerdbd9eea2011-02-07 15:34:16 +0100902 ok = drbd_socket_okay(&sock);
903 ok = drbd_socket_okay(&msock) && ok;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700904 if (ok)
905 break;
906 }
907 } while (1);
908
909 msock->sk->sk_reuse = 1; /* SO_REUSEADDR */
910 sock->sk->sk_reuse = 1; /* SO_REUSEADDR */
911
912 sock->sk->sk_allocation = GFP_NOIO;
913 msock->sk->sk_allocation = GFP_NOIO;
914
915 sock->sk->sk_priority = TC_PRIO_INTERACTIVE_BULK;
916 msock->sk->sk_priority = TC_PRIO_INTERACTIVE;
917
Philipp Reisnerb411b362009-09-25 16:07:19 -0700918 /* NOT YET ...
Philipp Reisner907599e2011-02-08 11:25:37 +0100919 * sock->sk->sk_sndtimeo = tconn->net_conf->timeout*HZ/10;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700920 * sock->sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
921 * first set it to the P_HAND_SHAKE timeout,
922 * which we set to 4x the configured ping_timeout. */
923 sock->sk->sk_sndtimeo =
Philipp Reisner907599e2011-02-08 11:25:37 +0100924 sock->sk->sk_rcvtimeo = tconn->net_conf->ping_timeo*4*HZ/10;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700925
Philipp Reisner907599e2011-02-08 11:25:37 +0100926 msock->sk->sk_sndtimeo = tconn->net_conf->timeout*HZ/10;
927 msock->sk->sk_rcvtimeo = tconn->net_conf->ping_int*HZ;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700928
929 /* we don't want delays.
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300930 * we use TCP_CORK where appropriate, though */
Philipp Reisnerb411b362009-09-25 16:07:19 -0700931 drbd_tcp_nodelay(sock);
932 drbd_tcp_nodelay(msock);
933
Philipp Reisner907599e2011-02-08 11:25:37 +0100934 tconn->data.socket = sock;
935 tconn->meta.socket = msock;
936 tconn->last_received = jiffies;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700937
Philipp Reisner907599e2011-02-08 11:25:37 +0100938 h = drbd_do_handshake(tconn);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700939 if (h <= 0)
940 return h;
941
Philipp Reisner907599e2011-02-08 11:25:37 +0100942 if (tconn->cram_hmac_tfm) {
Philipp Reisnerb411b362009-09-25 16:07:19 -0700943 /* drbd_request_state(mdev, NS(conn, WFAuth)); */
Philipp Reisner907599e2011-02-08 11:25:37 +0100944 switch (drbd_do_auth(tconn)) {
Johannes Thomab10d96c2010-01-07 16:02:50 +0100945 case -1:
Philipp Reisner907599e2011-02-08 11:25:37 +0100946 conn_err(tconn, "Authentication of peer failed\n");
Philipp Reisnerb411b362009-09-25 16:07:19 -0700947 return -1;
Johannes Thomab10d96c2010-01-07 16:02:50 +0100948 case 0:
Philipp Reisner907599e2011-02-08 11:25:37 +0100949 conn_err(tconn, "Authentication of peer failed, trying again.\n");
Johannes Thomab10d96c2010-01-07 16:02:50 +0100950 return 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700951 }
952 }
953
Philipp Reisnerbbeb6412011-02-10 13:45:46 +0100954 if (conn_request_state(tconn, NS(conn, C_WF_REPORT_PARAMS), CS_VERBOSE) < SS_SUCCESS)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700955 return 0;
956
Philipp Reisner907599e2011-02-08 11:25:37 +0100957 sock->sk->sk_sndtimeo = tconn->net_conf->timeout*HZ/10;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700958 sock->sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
959
Philipp Reisner907599e2011-02-08 11:25:37 +0100960 drbd_thread_start(&tconn->asender);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700961
Andreas Gruenbacher387eb302011-03-16 01:05:37 +0100962 if (drbd_send_protocol(tconn) == -EOPNOTSUPP)
Philipp Reisner7e2455c2010-04-22 14:50:23 +0200963 return -1;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700964
Philipp Reisner907599e2011-02-08 11:25:37 +0100965 return !idr_for_each(&tconn->volumes, drbd_connected, tconn);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700966
967out_release_sockets:
968 if (sock)
969 sock_release(sock);
970 if (msock)
971 sock_release(msock);
972 return -1;
973}
974
Andreas Gruenbacher8172f3e2011-03-16 17:22:39 +0100975static int decode_header(struct drbd_tconn *tconn, struct p_header *h, struct packet_info *pi)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700976{
Philipp Reisnerfd340c12011-01-19 16:57:39 +0100977 if (h->h80.magic == cpu_to_be32(DRBD_MAGIC)) {
Philipp Reisner77351055b2011-02-07 17:24:26 +0100978 pi->cmd = be16_to_cpu(h->h80.command);
979 pi->size = be16_to_cpu(h->h80.length);
Philipp Reisnereefc2f72011-02-08 12:55:24 +0100980 pi->vnr = 0;
Andreas Gruenbacherca9bc122011-01-11 13:47:24 +0100981 } else if (h->h95.magic == cpu_to_be16(DRBD_MAGIC_BIG)) {
Philipp Reisner77351055b2011-02-07 17:24:26 +0100982 pi->cmd = be16_to_cpu(h->h95.command);
983 pi->size = be32_to_cpu(h->h95.length) & 0x00ffffff;
984 pi->vnr = 0;
Philipp Reisner02918be2010-08-20 14:35:10 +0200985 } else {
Philipp Reisnerce243852011-02-07 17:27:47 +0100986 conn_err(tconn, "magic?? on data m: 0x%08x c: %d l: %d\n",
Lars Ellenberg004352f2010-10-05 20:13:58 +0200987 be32_to_cpu(h->h80.magic),
988 be16_to_cpu(h->h80.command),
989 be16_to_cpu(h->h80.length));
Andreas Gruenbacher8172f3e2011-03-16 17:22:39 +0100990 return -EINVAL;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700991 }
Andreas Gruenbacher8172f3e2011-03-16 17:22:39 +0100992 return 0;
Philipp Reisner257d0af2011-01-26 12:15:29 +0100993}
994
Philipp Reisner9ba7aa02011-02-07 17:32:41 +0100995static int drbd_recv_header(struct drbd_tconn *tconn, struct packet_info *pi)
Philipp Reisner257d0af2011-01-26 12:15:29 +0100996{
Andreas Gruenbachere6ef8a52011-03-24 18:07:54 +0100997 struct p_header *h = tconn->data.rbuf;
Andreas Gruenbacher69bc7bc2011-03-16 17:31:52 +0100998 int err;
Philipp Reisner257d0af2011-01-26 12:15:29 +0100999
Andreas Gruenbachera5c31902011-03-24 03:28:04 +01001000 err = drbd_recv_all_warn(tconn, h, sizeof(*h));
1001 if (err)
Andreas Gruenbacher69bc7bc2011-03-16 17:31:52 +01001002 return err;
Philipp Reisner257d0af2011-01-26 12:15:29 +01001003
Andreas Gruenbacher69bc7bc2011-03-16 17:31:52 +01001004 err = decode_header(tconn, h, pi);
Philipp Reisner9ba7aa02011-02-07 17:32:41 +01001005 tconn->last_received = jiffies;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001006
Andreas Gruenbacher69bc7bc2011-03-16 17:31:52 +01001007 return err;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001008}
1009
Philipp Reisner2451fc32010-08-24 13:43:11 +02001010static void drbd_flush(struct drbd_conf *mdev)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001011{
1012 int rv;
1013
1014 if (mdev->write_ordering >= WO_bdev_flush && get_ldev(mdev)) {
Dmitry Monakhovfbd9b092010-04-28 17:55:06 +04001015 rv = blkdev_issue_flush(mdev->ldev->backing_bdev, GFP_KERNEL,
Christoph Hellwigdd3932e2010-09-16 20:51:46 +02001016 NULL);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001017 if (rv) {
1018 dev_err(DEV, "local disk flush failed with status %d\n", rv);
1019 /* would rather check on EOPNOTSUPP, but that is not reliable.
1020 * don't try again for ANY return value != 0
1021 * if (rv == -EOPNOTSUPP) */
1022 drbd_bump_write_ordering(mdev, WO_drain_io);
1023 }
1024 put_ldev(mdev);
1025 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07001026}
1027
1028/**
1029 * drbd_may_finish_epoch() - Applies an epoch_event to the epoch's state, eventually finishes it.
1030 * @mdev: DRBD device.
1031 * @epoch: Epoch object.
1032 * @ev: Epoch event.
1033 */
1034static enum finish_epoch drbd_may_finish_epoch(struct drbd_conf *mdev,
1035 struct drbd_epoch *epoch,
1036 enum epoch_event ev)
1037{
Philipp Reisner2451fc32010-08-24 13:43:11 +02001038 int epoch_size;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001039 struct drbd_epoch *next_epoch;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001040 enum finish_epoch rv = FE_STILL_LIVE;
1041
1042 spin_lock(&mdev->epoch_lock);
1043 do {
1044 next_epoch = NULL;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001045
1046 epoch_size = atomic_read(&epoch->epoch_size);
1047
1048 switch (ev & ~EV_CLEANUP) {
1049 case EV_PUT:
1050 atomic_dec(&epoch->active);
1051 break;
1052 case EV_GOT_BARRIER_NR:
1053 set_bit(DE_HAVE_BARRIER_NUMBER, &epoch->flags);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001054 break;
1055 case EV_BECAME_LAST:
1056 /* nothing to do*/
1057 break;
1058 }
1059
Philipp Reisnerb411b362009-09-25 16:07:19 -07001060 if (epoch_size != 0 &&
1061 atomic_read(&epoch->active) == 0 &&
Philipp Reisner2451fc32010-08-24 13:43:11 +02001062 test_bit(DE_HAVE_BARRIER_NUMBER, &epoch->flags)) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07001063 if (!(ev & EV_CLEANUP)) {
1064 spin_unlock(&mdev->epoch_lock);
1065 drbd_send_b_ack(mdev, epoch->barrier_nr, epoch_size);
1066 spin_lock(&mdev->epoch_lock);
1067 }
1068 dec_unacked(mdev);
1069
1070 if (mdev->current_epoch != epoch) {
1071 next_epoch = list_entry(epoch->list.next, struct drbd_epoch, list);
1072 list_del(&epoch->list);
1073 ev = EV_BECAME_LAST | (ev & EV_CLEANUP);
1074 mdev->epochs--;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001075 kfree(epoch);
1076
1077 if (rv == FE_STILL_LIVE)
1078 rv = FE_DESTROYED;
1079 } else {
1080 epoch->flags = 0;
1081 atomic_set(&epoch->epoch_size, 0);
Uwe Kleine-König698f9312010-07-02 20:41:51 +02001082 /* atomic_set(&epoch->active, 0); is already zero */
Philipp Reisnerb411b362009-09-25 16:07:19 -07001083 if (rv == FE_STILL_LIVE)
1084 rv = FE_RECYCLED;
Philipp Reisner2451fc32010-08-24 13:43:11 +02001085 wake_up(&mdev->ee_wait);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001086 }
1087 }
1088
1089 if (!next_epoch)
1090 break;
1091
1092 epoch = next_epoch;
1093 } while (1);
1094
1095 spin_unlock(&mdev->epoch_lock);
1096
Philipp Reisnerb411b362009-09-25 16:07:19 -07001097 return rv;
1098}
1099
1100/**
1101 * drbd_bump_write_ordering() - Fall back to an other write ordering method
1102 * @mdev: DRBD device.
1103 * @wo: Write ordering method to try.
1104 */
1105void drbd_bump_write_ordering(struct drbd_conf *mdev, enum write_ordering_e wo) __must_hold(local)
1106{
1107 enum write_ordering_e pwo;
1108 static char *write_ordering_str[] = {
1109 [WO_none] = "none",
1110 [WO_drain_io] = "drain",
1111 [WO_bdev_flush] = "flush",
Philipp Reisnerb411b362009-09-25 16:07:19 -07001112 };
1113
1114 pwo = mdev->write_ordering;
1115 wo = min(pwo, wo);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001116 if (wo == WO_bdev_flush && mdev->ldev->dc.no_disk_flush)
1117 wo = WO_drain_io;
1118 if (wo == WO_drain_io && mdev->ldev->dc.no_disk_drain)
1119 wo = WO_none;
1120 mdev->write_ordering = wo;
Philipp Reisner2451fc32010-08-24 13:43:11 +02001121 if (pwo != mdev->write_ordering || wo == WO_bdev_flush)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001122 dev_info(DEV, "Method to ensure write ordering: %s\n", write_ordering_str[mdev->write_ordering]);
1123}
1124
1125/**
Andreas Gruenbacherfbe29de2011-02-17 16:38:35 +01001126 * drbd_submit_peer_request()
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001127 * @mdev: DRBD device.
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01001128 * @peer_req: peer request
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001129 * @rw: flag field, see bio->bi_rw
Lars Ellenberg10f6d9922011-01-24 14:47:09 +01001130 *
1131 * May spread the pages to multiple bios,
1132 * depending on bio_add_page restrictions.
1133 *
1134 * Returns 0 if all bios have been submitted,
1135 * -ENOMEM if we could not allocate enough bios,
1136 * -ENOSPC (any better suggestion?) if we have not been able to bio_add_page a
1137 * single page to an empty bio (which should never happen and likely indicates
1138 * that the lower level IO stack is in some way broken). This has been observed
1139 * on certain Xen deployments.
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001140 */
1141/* TODO allocate from our own bio_set. */
Andreas Gruenbacherfbe29de2011-02-17 16:38:35 +01001142int drbd_submit_peer_request(struct drbd_conf *mdev,
1143 struct drbd_peer_request *peer_req,
1144 const unsigned rw, const int fault_type)
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001145{
1146 struct bio *bios = NULL;
1147 struct bio *bio;
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01001148 struct page *page = peer_req->pages;
1149 sector_t sector = peer_req->i.sector;
1150 unsigned ds = peer_req->i.size;
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001151 unsigned n_bios = 0;
1152 unsigned nr_pages = (ds + PAGE_SIZE -1) >> PAGE_SHIFT;
Lars Ellenberg10f6d9922011-01-24 14:47:09 +01001153 int err = -ENOMEM;
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001154
1155 /* In most cases, we will only need one bio. But in case the lower
1156 * level restrictions happen to be different at this offset on this
1157 * side than those of the sending peer, we may need to submit the
Lars Ellenbergda4a75d2011-02-23 17:02:01 +01001158 * request in more than one bio.
1159 *
1160 * Plain bio_alloc is good enough here, this is no DRBD internally
1161 * generated bio, but a bio allocated on behalf of the peer.
1162 */
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001163next_bio:
1164 bio = bio_alloc(GFP_NOIO, nr_pages);
1165 if (!bio) {
1166 dev_err(DEV, "submit_ee: Allocation of a bio failed\n");
1167 goto fail;
1168 }
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01001169 /* > peer_req->i.sector, unless this is the first bio */
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001170 bio->bi_sector = sector;
1171 bio->bi_bdev = mdev->ldev->backing_bdev;
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001172 bio->bi_rw = rw;
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01001173 bio->bi_private = peer_req;
Andreas Gruenbacherfcefa622011-02-17 16:46:59 +01001174 bio->bi_end_io = drbd_peer_request_endio;
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001175
1176 bio->bi_next = bios;
1177 bios = bio;
1178 ++n_bios;
1179
1180 page_chain_for_each(page) {
1181 unsigned len = min_t(unsigned, ds, PAGE_SIZE);
1182 if (!bio_add_page(bio, page, len, 0)) {
Lars Ellenberg10f6d9922011-01-24 14:47:09 +01001183 /* A single page must always be possible!
1184 * But in case it fails anyways,
1185 * we deal with it, and complain (below). */
1186 if (bio->bi_vcnt == 0) {
1187 dev_err(DEV,
1188 "bio_add_page failed for len=%u, "
1189 "bi_vcnt=0 (bi_sector=%llu)\n",
1190 len, (unsigned long long)bio->bi_sector);
1191 err = -ENOSPC;
1192 goto fail;
1193 }
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001194 goto next_bio;
1195 }
1196 ds -= len;
1197 sector += len >> 9;
1198 --nr_pages;
1199 }
1200 D_ASSERT(page == NULL);
1201 D_ASSERT(ds == 0);
1202
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01001203 atomic_set(&peer_req->pending_bios, n_bios);
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001204 do {
1205 bio = bios;
1206 bios = bios->bi_next;
1207 bio->bi_next = NULL;
1208
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001209 drbd_generic_make_request(mdev, fault_type, bio);
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001210 } while (bios);
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001211 return 0;
1212
1213fail:
1214 while (bios) {
1215 bio = bios;
1216 bios = bios->bi_next;
1217 bio_put(bio);
1218 }
Lars Ellenberg10f6d9922011-01-24 14:47:09 +01001219 return err;
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001220}
1221
Andreas Gruenbacher53840642011-01-28 10:31:04 +01001222static void drbd_remove_epoch_entry_interval(struct drbd_conf *mdev,
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01001223 struct drbd_peer_request *peer_req)
Andreas Gruenbacher53840642011-01-28 10:31:04 +01001224{
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01001225 struct drbd_interval *i = &peer_req->i;
Andreas Gruenbacher53840642011-01-28 10:31:04 +01001226
1227 drbd_remove_interval(&mdev->write_requests, i);
1228 drbd_clear_interval(i);
1229
Andreas Gruenbacher6c852be2011-02-04 15:38:52 +01001230 /* Wake up any processes waiting for this peer request to complete. */
Andreas Gruenbacher53840642011-01-28 10:31:04 +01001231 if (i->waiting)
1232 wake_up(&mdev->misc_wait);
1233}
1234
Andreas Gruenbacher4a76b162011-03-25 02:43:51 +01001235static int receive_Barrier(struct drbd_tconn *tconn, struct packet_info *pi)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001236{
Andreas Gruenbacher4a76b162011-03-25 02:43:51 +01001237 struct drbd_conf *mdev;
Philipp Reisner2451fc32010-08-24 13:43:11 +02001238 int rv;
Andreas Gruenbacher4a76b162011-03-25 02:43:51 +01001239 struct p_barrier *p = tconn->data.rbuf;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001240 struct drbd_epoch *epoch;
1241
Andreas Gruenbacher4a76b162011-03-25 02:43:51 +01001242 mdev = vnr_to_mdev(tconn, pi->vnr);
1243 if (!mdev)
1244 return -EIO;
1245
Philipp Reisnerb411b362009-09-25 16:07:19 -07001246 inc_unacked(mdev);
1247
Philipp Reisnerb411b362009-09-25 16:07:19 -07001248 mdev->current_epoch->barrier_nr = p->barrier;
1249 rv = drbd_may_finish_epoch(mdev, mdev->current_epoch, EV_GOT_BARRIER_NR);
1250
1251 /* P_BARRIER_ACK may imply that the corresponding extent is dropped from
1252 * the activity log, which means it would not be resynced in case the
1253 * R_PRIMARY crashes now.
1254 * Therefore we must send the barrier_ack after the barrier request was
1255 * completed. */
1256 switch (mdev->write_ordering) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07001257 case WO_none:
1258 if (rv == FE_RECYCLED)
Andreas Gruenbacher82bc0192011-03-17 12:10:19 +01001259 return 0;
Philipp Reisner2451fc32010-08-24 13:43:11 +02001260
1261 /* receiver context, in the writeout path of the other node.
1262 * avoid potential distributed deadlock */
1263 epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO);
1264 if (epoch)
1265 break;
1266 else
1267 dev_warn(DEV, "Allocation of an epoch failed, slowing down\n");
1268 /* Fall through */
Philipp Reisnerb411b362009-09-25 16:07:19 -07001269
1270 case WO_bdev_flush:
1271 case WO_drain_io:
Philipp Reisnerb411b362009-09-25 16:07:19 -07001272 drbd_wait_ee_list_empty(mdev, &mdev->active_ee);
Philipp Reisner2451fc32010-08-24 13:43:11 +02001273 drbd_flush(mdev);
1274
1275 if (atomic_read(&mdev->current_epoch->epoch_size)) {
1276 epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO);
1277 if (epoch)
1278 break;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001279 }
1280
Philipp Reisner2451fc32010-08-24 13:43:11 +02001281 epoch = mdev->current_epoch;
1282 wait_event(mdev->ee_wait, atomic_read(&epoch->epoch_size) == 0);
1283
1284 D_ASSERT(atomic_read(&epoch->active) == 0);
1285 D_ASSERT(epoch->flags == 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001286
Andreas Gruenbacher82bc0192011-03-17 12:10:19 +01001287 return 0;
Philipp Reisner2451fc32010-08-24 13:43:11 +02001288 default:
1289 dev_err(DEV, "Strangeness in mdev->write_ordering %d\n", mdev->write_ordering);
Andreas Gruenbacher82bc0192011-03-17 12:10:19 +01001290 return -EIO;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001291 }
1292
1293 epoch->flags = 0;
1294 atomic_set(&epoch->epoch_size, 0);
1295 atomic_set(&epoch->active, 0);
1296
1297 spin_lock(&mdev->epoch_lock);
1298 if (atomic_read(&mdev->current_epoch->epoch_size)) {
1299 list_add(&epoch->list, &mdev->current_epoch->list);
1300 mdev->current_epoch = epoch;
1301 mdev->epochs++;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001302 } else {
1303 /* The current_epoch got recycled while we allocated this one... */
1304 kfree(epoch);
1305 }
1306 spin_unlock(&mdev->epoch_lock);
1307
Andreas Gruenbacher82bc0192011-03-17 12:10:19 +01001308 return 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001309}
1310
1311/* used from receive_RSDataReply (recv_resync_read)
1312 * and from receive_Data */
Andreas Gruenbacherf6ffca92011-02-04 15:30:34 +01001313static struct drbd_peer_request *
1314read_in_block(struct drbd_conf *mdev, u64 id, sector_t sector,
1315 int data_size) __must_hold(local)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001316{
Lars Ellenberg66660322010-04-06 12:15:04 +02001317 const sector_t capacity = drbd_get_capacity(mdev->this_bdev);
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01001318 struct drbd_peer_request *peer_req;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001319 struct page *page;
Andreas Gruenbachera5c31902011-03-24 03:28:04 +01001320 int dgs, ds, err;
Philipp Reisnera0638452011-01-19 14:31:32 +01001321 void *dig_in = mdev->tconn->int_dig_in;
1322 void *dig_vv = mdev->tconn->int_dig_vv;
Philipp Reisner6b4388a2010-04-26 14:11:45 +02001323 unsigned long *data;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001324
Philipp Reisnera0638452011-01-19 14:31:32 +01001325 dgs = (mdev->tconn->agreed_pro_version >= 87 && mdev->tconn->integrity_r_tfm) ?
1326 crypto_hash_digestsize(mdev->tconn->integrity_r_tfm) : 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001327
1328 if (dgs) {
Andreas Gruenbachera5c31902011-03-24 03:28:04 +01001329 err = drbd_recv_all_warn(mdev->tconn, dig_in, dgs);
1330 if (err)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001331 return NULL;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001332 }
1333
1334 data_size -= dgs;
1335
Andreas Gruenbacher841ce242010-12-15 19:31:20 +01001336 if (!expect(data_size != 0))
1337 return NULL;
1338 if (!expect(IS_ALIGNED(data_size, 512)))
1339 return NULL;
1340 if (!expect(data_size <= DRBD_MAX_BIO_SIZE))
1341 return NULL;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001342
Lars Ellenberg66660322010-04-06 12:15:04 +02001343 /* even though we trust out peer,
1344 * we sometimes have to double check. */
1345 if (sector + (data_size>>9) > capacity) {
Lars Ellenbergfdda6542011-01-24 15:11:01 +01001346 dev_err(DEV, "request from peer beyond end of local disk: "
1347 "capacity: %llus < sector: %llus + size: %u\n",
Lars Ellenberg66660322010-04-06 12:15:04 +02001348 (unsigned long long)capacity,
1349 (unsigned long long)sector, data_size);
1350 return NULL;
1351 }
1352
Philipp Reisnerb411b362009-09-25 16:07:19 -07001353 /* GFP_NOIO, because we must not cause arbitrary write-out: in a DRBD
1354 * "criss-cross" setup, that might cause write-out on some other DRBD,
1355 * which in turn might block on the other node at this very place. */
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01001356 peer_req = drbd_alloc_ee(mdev, id, sector, data_size, GFP_NOIO);
1357 if (!peer_req)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001358 return NULL;
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001359
Philipp Reisnerb411b362009-09-25 16:07:19 -07001360 ds = data_size;
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01001361 page = peer_req->pages;
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001362 page_chain_for_each(page) {
1363 unsigned len = min_t(int, ds, PAGE_SIZE);
Philipp Reisner6b4388a2010-04-26 14:11:45 +02001364 data = kmap(page);
Andreas Gruenbachera5c31902011-03-24 03:28:04 +01001365 err = drbd_recv_all_warn(mdev->tconn, data, len);
Andreas Gruenbacher0cf9d272010-12-07 10:43:29 +01001366 if (drbd_insert_fault(mdev, DRBD_FAULT_RECEIVE)) {
Philipp Reisner6b4388a2010-04-26 14:11:45 +02001367 dev_err(DEV, "Fault injection: Corrupting data on receive\n");
1368 data[0] = data[0] ^ (unsigned long)-1;
1369 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07001370 kunmap(page);
Andreas Gruenbachera5c31902011-03-24 03:28:04 +01001371 if (err) {
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01001372 drbd_free_ee(mdev, peer_req);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001373 return NULL;
1374 }
Andreas Gruenbachera5c31902011-03-24 03:28:04 +01001375 ds -= len;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001376 }
1377
1378 if (dgs) {
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01001379 drbd_csum_ee(mdev, mdev->tconn->integrity_r_tfm, peer_req, dig_vv);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001380 if (memcmp(dig_in, dig_vv, dgs)) {
Lars Ellenberg470be442010-11-10 10:36:52 +01001381 dev_err(DEV, "Digest integrity check FAILED: %llus +%u\n",
1382 (unsigned long long)sector, data_size);
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01001383 drbd_free_ee(mdev, peer_req);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001384 return NULL;
1385 }
1386 }
1387 mdev->recv_cnt += data_size>>9;
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01001388 return peer_req;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001389}
1390
1391/* drbd_drain_block() just takes a data block
1392 * out of the socket input buffer, and discards it.
1393 */
1394static int drbd_drain_block(struct drbd_conf *mdev, int data_size)
1395{
1396 struct page *page;
Andreas Gruenbachera5c31902011-03-24 03:28:04 +01001397 int err = 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001398 void *data;
1399
Lars Ellenbergc3470cd2010-04-01 16:57:19 +02001400 if (!data_size)
Andreas Gruenbacherfc5be832011-03-16 17:50:50 +01001401 return 0;
Lars Ellenbergc3470cd2010-04-01 16:57:19 +02001402
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001403 page = drbd_pp_alloc(mdev, 1, 1);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001404
1405 data = kmap(page);
1406 while (data_size) {
Andreas Gruenbacherfc5be832011-03-16 17:50:50 +01001407 unsigned int len = min_t(int, data_size, PAGE_SIZE);
1408
Andreas Gruenbachera5c31902011-03-24 03:28:04 +01001409 err = drbd_recv_all_warn(mdev->tconn, data, len);
1410 if (err)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001411 break;
Andreas Gruenbachera5c31902011-03-24 03:28:04 +01001412 data_size -= len;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001413 }
1414 kunmap(page);
Lars Ellenberg435f0742010-09-06 12:30:25 +02001415 drbd_pp_free(mdev, page, 0);
Andreas Gruenbacherfc5be832011-03-16 17:50:50 +01001416 return err;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001417}
1418
1419static int recv_dless_read(struct drbd_conf *mdev, struct drbd_request *req,
1420 sector_t sector, int data_size)
1421{
1422 struct bio_vec *bvec;
1423 struct bio *bio;
Andreas Gruenbachera5c31902011-03-24 03:28:04 +01001424 int dgs, err, i, expect;
Philipp Reisnera0638452011-01-19 14:31:32 +01001425 void *dig_in = mdev->tconn->int_dig_in;
1426 void *dig_vv = mdev->tconn->int_dig_vv;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001427
Philipp Reisnera0638452011-01-19 14:31:32 +01001428 dgs = (mdev->tconn->agreed_pro_version >= 87 && mdev->tconn->integrity_r_tfm) ?
1429 crypto_hash_digestsize(mdev->tconn->integrity_r_tfm) : 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001430
1431 if (dgs) {
Andreas Gruenbachera5c31902011-03-24 03:28:04 +01001432 err = drbd_recv_all_warn(mdev->tconn, dig_in, dgs);
1433 if (err)
1434 return err;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001435 }
1436
1437 data_size -= dgs;
1438
1439 /* optimistically update recv_cnt. if receiving fails below,
1440 * we disconnect anyways, and counters will be reset. */
1441 mdev->recv_cnt += data_size>>9;
1442
1443 bio = req->master_bio;
1444 D_ASSERT(sector == bio->bi_sector);
1445
1446 bio_for_each_segment(bvec, bio, i) {
Andreas Gruenbachera5c31902011-03-24 03:28:04 +01001447 void *mapped = kmap(bvec->bv_page) + bvec->bv_offset;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001448 expect = min_t(int, data_size, bvec->bv_len);
Andreas Gruenbachera5c31902011-03-24 03:28:04 +01001449 err = drbd_recv_all_warn(mdev->tconn, mapped, expect);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001450 kunmap(bvec->bv_page);
Andreas Gruenbachera5c31902011-03-24 03:28:04 +01001451 if (err)
1452 return err;
1453 data_size -= expect;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001454 }
1455
1456 if (dgs) {
Philipp Reisnera0638452011-01-19 14:31:32 +01001457 drbd_csum_bio(mdev, mdev->tconn->integrity_r_tfm, bio, dig_vv);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001458 if (memcmp(dig_in, dig_vv, dgs)) {
1459 dev_err(DEV, "Digest integrity check FAILED. Broken NICs?\n");
Andreas Gruenbacher28284ce2011-03-16 17:54:02 +01001460 return -EINVAL;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001461 }
1462 }
1463
1464 D_ASSERT(data_size == 0);
Andreas Gruenbacher28284ce2011-03-16 17:54:02 +01001465 return 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001466}
1467
1468/* e_end_resync_block() is called via
1469 * drbd_process_done_ee() by asender only */
Andreas Gruenbacher99920dc2011-03-16 15:31:39 +01001470static int e_end_resync_block(struct drbd_work *w, int unused)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001471{
Andreas Gruenbacher8050e6d2011-02-18 16:12:48 +01001472 struct drbd_peer_request *peer_req =
1473 container_of(w, struct drbd_peer_request, w);
Philipp Reisner00d56942011-02-09 18:09:48 +01001474 struct drbd_conf *mdev = w->mdev;
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01001475 sector_t sector = peer_req->i.sector;
Andreas Gruenbacher99920dc2011-03-16 15:31:39 +01001476 int err;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001477
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01001478 D_ASSERT(drbd_interval_empty(&peer_req->i));
Philipp Reisnerb411b362009-09-25 16:07:19 -07001479
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01001480 if (likely((peer_req->flags & EE_WAS_ERROR) == 0)) {
1481 drbd_set_in_sync(mdev, sector, peer_req->i.size);
Andreas Gruenbacher99920dc2011-03-16 15:31:39 +01001482 err = drbd_send_ack(mdev, P_RS_WRITE_ACK, peer_req);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001483 } else {
1484 /* Record failure to sync */
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01001485 drbd_rs_failed_io(mdev, sector, peer_req->i.size);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001486
Andreas Gruenbacher99920dc2011-03-16 15:31:39 +01001487 err = drbd_send_ack(mdev, P_NEG_ACK, peer_req);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001488 }
1489 dec_unacked(mdev);
1490
Andreas Gruenbacher99920dc2011-03-16 15:31:39 +01001491 return err;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001492}
1493
1494static int recv_resync_read(struct drbd_conf *mdev, sector_t sector, int data_size) __releases(local)
1495{
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01001496 struct drbd_peer_request *peer_req;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001497
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01001498 peer_req = read_in_block(mdev, ID_SYNCER, sector, data_size);
1499 if (!peer_req)
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001500 goto fail;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001501
1502 dec_rs_pending(mdev);
1503
Philipp Reisnerb411b362009-09-25 16:07:19 -07001504 inc_unacked(mdev);
1505 /* corresponding dec_unacked() in e_end_resync_block()
1506 * respective _drbd_clear_done_ee */
1507
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01001508 peer_req->w.cb = e_end_resync_block;
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001509
Philipp Reisner87eeee42011-01-19 14:16:30 +01001510 spin_lock_irq(&mdev->tconn->req_lock);
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01001511 list_add(&peer_req->w.list, &mdev->sync_ee);
Philipp Reisner87eeee42011-01-19 14:16:30 +01001512 spin_unlock_irq(&mdev->tconn->req_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001513
Lars Ellenberg0f0601f2010-08-11 23:40:24 +02001514 atomic_add(data_size >> 9, &mdev->rs_sect_ev);
Andreas Gruenbacherfbe29de2011-02-17 16:38:35 +01001515 if (drbd_submit_peer_request(mdev, peer_req, WRITE, DRBD_FAULT_RS_WR) == 0)
Andreas Gruenbachere1c1b0f2011-03-16 17:58:27 +01001516 return 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001517
Lars Ellenberg10f6d9922011-01-24 14:47:09 +01001518 /* don't care for the reason here */
1519 dev_err(DEV, "submit failed, triggering re-connect\n");
Philipp Reisner87eeee42011-01-19 14:16:30 +01001520 spin_lock_irq(&mdev->tconn->req_lock);
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01001521 list_del(&peer_req->w.list);
Philipp Reisner87eeee42011-01-19 14:16:30 +01001522 spin_unlock_irq(&mdev->tconn->req_lock);
Lars Ellenberg22cc37a2010-09-14 20:40:41 +02001523
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01001524 drbd_free_ee(mdev, peer_req);
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001525fail:
1526 put_ldev(mdev);
Andreas Gruenbachere1c1b0f2011-03-16 17:58:27 +01001527 return -EIO;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001528}
1529
Andreas Gruenbacher668eebc2011-01-20 17:14:26 +01001530static struct drbd_request *
Andreas Gruenbacherbc9c5c42011-01-21 18:00:55 +01001531find_request(struct drbd_conf *mdev, struct rb_root *root, u64 id,
1532 sector_t sector, bool missing_ok, const char *func)
Andreas Gruenbacher668eebc2011-01-20 17:14:26 +01001533{
Andreas Gruenbacher668eebc2011-01-20 17:14:26 +01001534 struct drbd_request *req;
1535
Andreas Gruenbacherbc9c5c42011-01-21 18:00:55 +01001536 /* Request object according to our peer */
1537 req = (struct drbd_request *)(unsigned long)id;
Andreas Gruenbacher5e472262011-01-27 14:42:51 +01001538 if (drbd_contains_interval(root, sector, &req->i) && req->i.local)
Andreas Gruenbacher668eebc2011-01-20 17:14:26 +01001539 return req;
Andreas Gruenbacherc3afd8f2011-01-20 22:25:40 +01001540 if (!missing_ok) {
1541 dev_err(DEV, "%s: failed to find request %lu, sector %llus\n", func,
1542 (unsigned long)id, (unsigned long long)sector);
1543 }
Andreas Gruenbacher668eebc2011-01-20 17:14:26 +01001544 return NULL;
1545}
1546
Andreas Gruenbacher4a76b162011-03-25 02:43:51 +01001547static int receive_DataReply(struct drbd_tconn *tconn, struct packet_info *pi)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001548{
Andreas Gruenbacher4a76b162011-03-25 02:43:51 +01001549 struct drbd_conf *mdev;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001550 struct drbd_request *req;
1551 sector_t sector;
Andreas Gruenbacher82bc0192011-03-17 12:10:19 +01001552 int err;
Andreas Gruenbacher4a76b162011-03-25 02:43:51 +01001553 struct p_data *p = tconn->data.rbuf;
1554
1555 mdev = vnr_to_mdev(tconn, pi->vnr);
1556 if (!mdev)
1557 return -EIO;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001558
1559 sector = be64_to_cpu(p->sector);
1560
Philipp Reisner87eeee42011-01-19 14:16:30 +01001561 spin_lock_irq(&mdev->tconn->req_lock);
Andreas Gruenbacherbc9c5c42011-01-21 18:00:55 +01001562 req = find_request(mdev, &mdev->read_requests, p->block_id, sector, false, __func__);
Philipp Reisner87eeee42011-01-19 14:16:30 +01001563 spin_unlock_irq(&mdev->tconn->req_lock);
Andreas Gruenbacherc3afd8f2011-01-20 22:25:40 +01001564 if (unlikely(!req))
Andreas Gruenbacher82bc0192011-03-17 12:10:19 +01001565 return -EIO;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001566
Bart Van Assche24c48302011-05-21 18:32:29 +02001567 /* hlist_del(&req->collision) is done in _req_may_be_done, to avoid
Philipp Reisnerb411b362009-09-25 16:07:19 -07001568 * special casing it there for the various failure cases.
1569 * still no race with drbd_fail_pending_reads */
Andreas Gruenbachere2857212011-03-25 00:57:38 +01001570 err = recv_dless_read(mdev, req, sector, pi->size);
Andreas Gruenbacher82bc0192011-03-17 12:10:19 +01001571 if (!err)
Andreas Gruenbacher8554df12011-01-25 15:37:43 +01001572 req_mod(req, DATA_RECEIVED);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001573 /* else: nothing. handled from drbd_disconnect...
1574 * I don't think we may complete this just yet
1575 * in case we are "on-disconnect: freeze" */
1576
Andreas Gruenbacher82bc0192011-03-17 12:10:19 +01001577 return err;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001578}
1579
Andreas Gruenbacher4a76b162011-03-25 02:43:51 +01001580static int receive_RSDataReply(struct drbd_tconn *tconn, struct packet_info *pi)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001581{
Andreas Gruenbacher4a76b162011-03-25 02:43:51 +01001582 struct drbd_conf *mdev;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001583 sector_t sector;
Andreas Gruenbacher82bc0192011-03-17 12:10:19 +01001584 int err;
Andreas Gruenbacher4a76b162011-03-25 02:43:51 +01001585 struct p_data *p = tconn->data.rbuf;
1586
1587 mdev = vnr_to_mdev(tconn, pi->vnr);
1588 if (!mdev)
1589 return -EIO;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001590
1591 sector = be64_to_cpu(p->sector);
1592 D_ASSERT(p->block_id == ID_SYNCER);
1593
1594 if (get_ldev(mdev)) {
1595 /* data is submitted to disk within recv_resync_read.
1596 * corresponding put_ldev done below on error,
Andreas Gruenbacherfcefa622011-02-17 16:46:59 +01001597 * or in drbd_peer_request_endio. */
Andreas Gruenbachere2857212011-03-25 00:57:38 +01001598 err = recv_resync_read(mdev, sector, pi->size);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001599 } else {
1600 if (__ratelimit(&drbd_ratelimit_state))
1601 dev_err(DEV, "Can not write resync data to local disk.\n");
1602
Andreas Gruenbachere2857212011-03-25 00:57:38 +01001603 err = drbd_drain_block(mdev, pi->size);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001604
Andreas Gruenbachere2857212011-03-25 00:57:38 +01001605 drbd_send_ack_dp(mdev, P_NEG_ACK, p, pi->size);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001606 }
1607
Andreas Gruenbachere2857212011-03-25 00:57:38 +01001608 atomic_add(pi->size >> 9, &mdev->rs_sect_in);
Philipp Reisner778f2712010-07-06 11:14:00 +02001609
Andreas Gruenbacher82bc0192011-03-17 12:10:19 +01001610 return err;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001611}
1612
Andreas Gruenbacher99920dc2011-03-16 15:31:39 +01001613static int w_restart_write(struct drbd_work *w, int cancel)
Andreas Gruenbacher7be8da02011-02-22 02:15:32 +01001614{
1615 struct drbd_request *req = container_of(w, struct drbd_request, w);
1616 struct drbd_conf *mdev = w->mdev;
1617 struct bio *bio;
1618 unsigned long start_time;
1619 unsigned long flags;
1620
1621 spin_lock_irqsave(&mdev->tconn->req_lock, flags);
1622 if (!expect(req->rq_state & RQ_POSTPONED)) {
1623 spin_unlock_irqrestore(&mdev->tconn->req_lock, flags);
Andreas Gruenbacher99920dc2011-03-16 15:31:39 +01001624 return -EIO;
Andreas Gruenbacher7be8da02011-02-22 02:15:32 +01001625 }
1626 bio = req->master_bio;
1627 start_time = req->start_time;
1628 /* Postponed requests will not have their master_bio completed! */
1629 __req_mod(req, DISCARD_WRITE, NULL);
1630 spin_unlock_irqrestore(&mdev->tconn->req_lock, flags);
1631
1632 while (__drbd_make_request(mdev, bio, start_time))
1633 /* retry */ ;
Andreas Gruenbacher99920dc2011-03-16 15:31:39 +01001634 return 0;
Andreas Gruenbacher7be8da02011-02-22 02:15:32 +01001635}
1636
1637static void restart_conflicting_writes(struct drbd_conf *mdev,
1638 sector_t sector, int size)
1639{
1640 struct drbd_interval *i;
1641 struct drbd_request *req;
1642
1643 drbd_for_each_overlap(i, &mdev->write_requests, sector, size) {
1644 if (!i->local)
1645 continue;
1646 req = container_of(i, struct drbd_request, i);
1647 if (req->rq_state & RQ_LOCAL_PENDING ||
1648 !(req->rq_state & RQ_POSTPONED))
1649 continue;
1650 if (expect(list_empty(&req->w.list))) {
1651 req->w.mdev = mdev;
1652 req->w.cb = w_restart_write;
1653 drbd_queue_work(&mdev->tconn->data.work, &req->w);
1654 }
1655 }
1656}
1657
Philipp Reisnerb411b362009-09-25 16:07:19 -07001658/* e_end_block() is called via drbd_process_done_ee().
1659 * this means this function only runs in the asender thread
1660 */
Andreas Gruenbacher99920dc2011-03-16 15:31:39 +01001661static int e_end_block(struct drbd_work *w, int cancel)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001662{
Andreas Gruenbacher8050e6d2011-02-18 16:12:48 +01001663 struct drbd_peer_request *peer_req =
1664 container_of(w, struct drbd_peer_request, w);
Philipp Reisner00d56942011-02-09 18:09:48 +01001665 struct drbd_conf *mdev = w->mdev;
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01001666 sector_t sector = peer_req->i.sector;
Andreas Gruenbacher99920dc2011-03-16 15:31:39 +01001667 int err = 0, pcmd;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001668
Philipp Reisner89e58e72011-01-19 13:12:45 +01001669 if (mdev->tconn->net_conf->wire_protocol == DRBD_PROT_C) {
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01001670 if (likely((peer_req->flags & EE_WAS_ERROR) == 0)) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07001671 pcmd = (mdev->state.conn >= C_SYNC_SOURCE &&
1672 mdev->state.conn <= C_PAUSED_SYNC_T &&
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01001673 peer_req->flags & EE_MAY_SET_IN_SYNC) ?
Philipp Reisnerb411b362009-09-25 16:07:19 -07001674 P_RS_WRITE_ACK : P_WRITE_ACK;
Andreas Gruenbacher99920dc2011-03-16 15:31:39 +01001675 err = drbd_send_ack(mdev, pcmd, peer_req);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001676 if (pcmd == P_RS_WRITE_ACK)
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01001677 drbd_set_in_sync(mdev, sector, peer_req->i.size);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001678 } else {
Andreas Gruenbacher99920dc2011-03-16 15:31:39 +01001679 err = drbd_send_ack(mdev, P_NEG_ACK, peer_req);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001680 /* we expect it to be marked out of sync anyways...
1681 * maybe assert this? */
1682 }
1683 dec_unacked(mdev);
1684 }
1685 /* we delete from the conflict detection hash _after_ we sent out the
1686 * P_WRITE_ACK / P_NEG_ACK, to get the sequence number right. */
Philipp Reisner89e58e72011-01-19 13:12:45 +01001687 if (mdev->tconn->net_conf->two_primaries) {
Philipp Reisner87eeee42011-01-19 14:16:30 +01001688 spin_lock_irq(&mdev->tconn->req_lock);
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01001689 D_ASSERT(!drbd_interval_empty(&peer_req->i));
1690 drbd_remove_epoch_entry_interval(mdev, peer_req);
Andreas Gruenbacher7be8da02011-02-22 02:15:32 +01001691 if (peer_req->flags & EE_RESTART_REQUESTS)
1692 restart_conflicting_writes(mdev, sector, peer_req->i.size);
Philipp Reisner87eeee42011-01-19 14:16:30 +01001693 spin_unlock_irq(&mdev->tconn->req_lock);
Andreas Gruenbacherbb3bfe92011-01-21 15:59:23 +01001694 } else
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01001695 D_ASSERT(drbd_interval_empty(&peer_req->i));
Philipp Reisnerb411b362009-09-25 16:07:19 -07001696
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01001697 drbd_may_finish_epoch(mdev, peer_req->epoch, EV_PUT + (cancel ? EV_CLEANUP : 0));
Philipp Reisnerb411b362009-09-25 16:07:19 -07001698
Andreas Gruenbacher99920dc2011-03-16 15:31:39 +01001699 return err;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001700}
1701
Andreas Gruenbacher7be8da02011-02-22 02:15:32 +01001702static int e_send_ack(struct drbd_work *w, enum drbd_packet ack)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001703{
Andreas Gruenbacher7be8da02011-02-22 02:15:32 +01001704 struct drbd_conf *mdev = w->mdev;
Andreas Gruenbacher8050e6d2011-02-18 16:12:48 +01001705 struct drbd_peer_request *peer_req =
1706 container_of(w, struct drbd_peer_request, w);
Andreas Gruenbacher99920dc2011-03-16 15:31:39 +01001707 int err;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001708
Andreas Gruenbacher99920dc2011-03-16 15:31:39 +01001709 err = drbd_send_ack(mdev, ack, peer_req);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001710 dec_unacked(mdev);
1711
Andreas Gruenbacher99920dc2011-03-16 15:31:39 +01001712 return err;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001713}
1714
Andreas Gruenbacher99920dc2011-03-16 15:31:39 +01001715static int e_send_discard_write(struct drbd_work *w, int unused)
Andreas Gruenbacher7be8da02011-02-22 02:15:32 +01001716{
1717 return e_send_ack(w, P_DISCARD_WRITE);
1718}
1719
Andreas Gruenbacher99920dc2011-03-16 15:31:39 +01001720static int e_send_retry_write(struct drbd_work *w, int unused)
Andreas Gruenbacher7be8da02011-02-22 02:15:32 +01001721{
1722 struct drbd_tconn *tconn = w->mdev->tconn;
1723
1724 return e_send_ack(w, tconn->agreed_pro_version >= 100 ?
1725 P_RETRY_WRITE : P_DISCARD_WRITE);
1726}
1727
Andreas Gruenbacher3e394da2011-01-26 18:36:55 +01001728static bool seq_greater(u32 a, u32 b)
1729{
1730 /*
1731 * We assume 32-bit wrap-around here.
1732 * For 24-bit wrap-around, we would have to shift:
1733 * a <<= 8; b <<= 8;
1734 */
1735 return (s32)a - (s32)b > 0;
1736}
1737
1738static u32 seq_max(u32 a, u32 b)
1739{
1740 return seq_greater(a, b) ? a : b;
1741}
1742
Andreas Gruenbacher7be8da02011-02-22 02:15:32 +01001743static bool need_peer_seq(struct drbd_conf *mdev)
1744{
1745 struct drbd_tconn *tconn = mdev->tconn;
1746
1747 /*
1748 * We only need to keep track of the last packet_seq number of our peer
1749 * if we are in dual-primary mode and we have the discard flag set; see
1750 * handle_write_conflicts().
1751 */
1752 return tconn->net_conf->two_primaries &&
1753 test_bit(DISCARD_CONCURRENT, &tconn->flags);
1754}
1755
Andreas Gruenbacher43ae0772011-02-03 18:42:08 +01001756static void update_peer_seq(struct drbd_conf *mdev, unsigned int peer_seq)
Andreas Gruenbacher3e394da2011-01-26 18:36:55 +01001757{
Lars Ellenberg3c13b682011-02-23 16:10:01 +01001758 unsigned int newest_peer_seq;
Andreas Gruenbacher3e394da2011-01-26 18:36:55 +01001759
Andreas Gruenbacher7be8da02011-02-22 02:15:32 +01001760 if (need_peer_seq(mdev)) {
1761 spin_lock(&mdev->peer_seq_lock);
Lars Ellenberg3c13b682011-02-23 16:10:01 +01001762 newest_peer_seq = seq_max(mdev->peer_seq, peer_seq);
1763 mdev->peer_seq = newest_peer_seq;
Andreas Gruenbacher7be8da02011-02-22 02:15:32 +01001764 spin_unlock(&mdev->peer_seq_lock);
Lars Ellenberg3c13b682011-02-23 16:10:01 +01001765 /* wake up only if we actually changed mdev->peer_seq */
1766 if (peer_seq == newest_peer_seq)
Andreas Gruenbacher7be8da02011-02-22 02:15:32 +01001767 wake_up(&mdev->seq_wait);
1768 }
Andreas Gruenbacher3e394da2011-01-26 18:36:55 +01001769}
1770
Philipp Reisnerb411b362009-09-25 16:07:19 -07001771/* Called from receive_Data.
1772 * Synchronize packets on sock with packets on msock.
1773 *
1774 * This is here so even when a P_DATA packet traveling via sock overtook an Ack
1775 * packet traveling on msock, they are still processed in the order they have
1776 * been sent.
1777 *
1778 * Note: we don't care for Ack packets overtaking P_DATA packets.
1779 *
1780 * In case packet_seq is larger than mdev->peer_seq number, there are
1781 * outstanding packets on the msock. We wait for them to arrive.
1782 * In case we are the logically next packet, we update mdev->peer_seq
1783 * ourselves. Correctly handles 32bit wrap around.
1784 *
1785 * Assume we have a 10 GBit connection, that is about 1<<30 byte per second,
1786 * about 1<<21 sectors per second. So "worst" case, we have 1<<3 == 8 seconds
1787 * for the 24bit wrap (historical atomic_t guarantee on some archs), and we have
1788 * 1<<9 == 512 seconds aka ages for the 32bit wrap around...
1789 *
1790 * returns 0 if we may process the packet,
1791 * -ERESTARTSYS if we were interrupted (by disconnect signal). */
Andreas Gruenbacher7be8da02011-02-22 02:15:32 +01001792static int wait_for_and_update_peer_seq(struct drbd_conf *mdev, const u32 peer_seq)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001793{
1794 DEFINE_WAIT(wait);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001795 long timeout;
Andreas Gruenbacher7be8da02011-02-22 02:15:32 +01001796 int ret;
1797
1798 if (!need_peer_seq(mdev))
1799 return 0;
1800
Philipp Reisnerb411b362009-09-25 16:07:19 -07001801 spin_lock(&mdev->peer_seq_lock);
1802 for (;;) {
Andreas Gruenbacher7be8da02011-02-22 02:15:32 +01001803 if (!seq_greater(peer_seq - 1, mdev->peer_seq)) {
1804 mdev->peer_seq = seq_max(mdev->peer_seq, peer_seq);
1805 ret = 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001806 break;
Andreas Gruenbacher7be8da02011-02-22 02:15:32 +01001807 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07001808 if (signal_pending(current)) {
1809 ret = -ERESTARTSYS;
1810 break;
1811 }
Andreas Gruenbacher7be8da02011-02-22 02:15:32 +01001812 prepare_to_wait(&mdev->seq_wait, &wait, TASK_INTERRUPTIBLE);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001813 spin_unlock(&mdev->peer_seq_lock);
Andreas Gruenbacher71b1c1e2011-03-01 15:40:43 +01001814 timeout = mdev->tconn->net_conf->ping_timeo*HZ/10;
1815 timeout = schedule_timeout(timeout);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001816 spin_lock(&mdev->peer_seq_lock);
Andreas Gruenbacher7be8da02011-02-22 02:15:32 +01001817 if (!timeout) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07001818 ret = -ETIMEDOUT;
Andreas Gruenbacher71b1c1e2011-03-01 15:40:43 +01001819 dev_err(DEV, "Timed out waiting for missing ack packets; disconnecting\n");
Philipp Reisnerb411b362009-09-25 16:07:19 -07001820 break;
1821 }
1822 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07001823 spin_unlock(&mdev->peer_seq_lock);
Andreas Gruenbacher7be8da02011-02-22 02:15:32 +01001824 finish_wait(&mdev->seq_wait, &wait);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001825 return ret;
1826}
1827
Lars Ellenberg688593c2010-11-17 22:25:03 +01001828/* see also bio_flags_to_wire()
1829 * DRBD_REQ_*, because we need to semantically map the flags to data packet
1830 * flags and back. We may replicate to other kernel versions. */
1831static unsigned long wire_flags_to_bio(struct drbd_conf *mdev, u32 dpf)
Philipp Reisner76d2e7e2010-08-25 11:58:05 +02001832{
Lars Ellenberg688593c2010-11-17 22:25:03 +01001833 return (dpf & DP_RW_SYNC ? REQ_SYNC : 0) |
1834 (dpf & DP_FUA ? REQ_FUA : 0) |
1835 (dpf & DP_FLUSH ? REQ_FLUSH : 0) |
1836 (dpf & DP_DISCARD ? REQ_DISCARD : 0);
Philipp Reisner76d2e7e2010-08-25 11:58:05 +02001837}
1838
Andreas Gruenbacher7be8da02011-02-22 02:15:32 +01001839static void fail_postponed_requests(struct drbd_conf *mdev, sector_t sector,
1840 unsigned int size)
1841{
1842 struct drbd_interval *i;
1843
1844 repeat:
1845 drbd_for_each_overlap(i, &mdev->write_requests, sector, size) {
1846 struct drbd_request *req;
1847 struct bio_and_error m;
1848
1849 if (!i->local)
1850 continue;
1851 req = container_of(i, struct drbd_request, i);
1852 if (!(req->rq_state & RQ_POSTPONED))
1853 continue;
1854 req->rq_state &= ~RQ_POSTPONED;
1855 __req_mod(req, NEG_ACKED, &m);
1856 spin_unlock_irq(&mdev->tconn->req_lock);
1857 if (m.bio)
1858 complete_master_bio(mdev, &m);
1859 spin_lock_irq(&mdev->tconn->req_lock);
1860 goto repeat;
1861 }
1862}
1863
1864static int handle_write_conflicts(struct drbd_conf *mdev,
1865 struct drbd_peer_request *peer_req)
1866{
1867 struct drbd_tconn *tconn = mdev->tconn;
1868 bool resolve_conflicts = test_bit(DISCARD_CONCURRENT, &tconn->flags);
1869 sector_t sector = peer_req->i.sector;
1870 const unsigned int size = peer_req->i.size;
1871 struct drbd_interval *i;
1872 bool equal;
1873 int err;
1874
1875 /*
1876 * Inserting the peer request into the write_requests tree will prevent
1877 * new conflicting local requests from being added.
1878 */
1879 drbd_insert_interval(&mdev->write_requests, &peer_req->i);
1880
1881 repeat:
1882 drbd_for_each_overlap(i, &mdev->write_requests, sector, size) {
1883 if (i == &peer_req->i)
1884 continue;
1885
1886 if (!i->local) {
1887 /*
1888 * Our peer has sent a conflicting remote request; this
1889 * should not happen in a two-node setup. Wait for the
1890 * earlier peer request to complete.
1891 */
1892 err = drbd_wait_misc(mdev, i);
1893 if (err)
1894 goto out;
1895 goto repeat;
1896 }
1897
1898 equal = i->sector == sector && i->size == size;
1899 if (resolve_conflicts) {
1900 /*
1901 * If the peer request is fully contained within the
1902 * overlapping request, it can be discarded; otherwise,
1903 * it will be retried once all overlapping requests
1904 * have completed.
1905 */
1906 bool discard = i->sector <= sector && i->sector +
1907 (i->size >> 9) >= sector + (size >> 9);
1908
1909 if (!equal)
1910 dev_alert(DEV, "Concurrent writes detected: "
1911 "local=%llus +%u, remote=%llus +%u, "
1912 "assuming %s came first\n",
1913 (unsigned long long)i->sector, i->size,
1914 (unsigned long long)sector, size,
1915 discard ? "local" : "remote");
1916
1917 inc_unacked(mdev);
1918 peer_req->w.cb = discard ? e_send_discard_write :
1919 e_send_retry_write;
1920 list_add_tail(&peer_req->w.list, &mdev->done_ee);
1921 wake_asender(mdev->tconn);
1922
1923 err = -ENOENT;
1924 goto out;
1925 } else {
1926 struct drbd_request *req =
1927 container_of(i, struct drbd_request, i);
1928
1929 if (!equal)
1930 dev_alert(DEV, "Concurrent writes detected: "
1931 "local=%llus +%u, remote=%llus +%u\n",
1932 (unsigned long long)i->sector, i->size,
1933 (unsigned long long)sector, size);
1934
1935 if (req->rq_state & RQ_LOCAL_PENDING ||
1936 !(req->rq_state & RQ_POSTPONED)) {
1937 /*
1938 * Wait for the node with the discard flag to
1939 * decide if this request will be discarded or
1940 * retried. Requests that are discarded will
1941 * disappear from the write_requests tree.
1942 *
1943 * In addition, wait for the conflicting
1944 * request to finish locally before submitting
1945 * the conflicting peer request.
1946 */
1947 err = drbd_wait_misc(mdev, &req->i);
1948 if (err) {
1949 _conn_request_state(mdev->tconn,
1950 NS(conn, C_TIMEOUT),
1951 CS_HARD);
1952 fail_postponed_requests(mdev, sector, size);
1953 goto out;
1954 }
1955 goto repeat;
1956 }
1957 /*
1958 * Remember to restart the conflicting requests after
1959 * the new peer request has completed.
1960 */
1961 peer_req->flags |= EE_RESTART_REQUESTS;
1962 }
1963 }
1964 err = 0;
1965
1966 out:
1967 if (err)
1968 drbd_remove_epoch_entry_interval(mdev, peer_req);
1969 return err;
1970}
1971
Philipp Reisnerb411b362009-09-25 16:07:19 -07001972/* mirrored write */
Andreas Gruenbacher4a76b162011-03-25 02:43:51 +01001973static int receive_Data(struct drbd_tconn *tconn, struct packet_info *pi)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001974{
Andreas Gruenbacher4a76b162011-03-25 02:43:51 +01001975 struct drbd_conf *mdev;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001976 sector_t sector;
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01001977 struct drbd_peer_request *peer_req;
Andreas Gruenbacher4a76b162011-03-25 02:43:51 +01001978 struct p_data *p = tconn->data.rbuf;
Andreas Gruenbacher7be8da02011-02-22 02:15:32 +01001979 u32 peer_seq = be32_to_cpu(p->seq_num);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001980 int rw = WRITE;
1981 u32 dp_flags;
Andreas Gruenbacher7be8da02011-02-22 02:15:32 +01001982 int err;
1983
Andreas Gruenbacher4a76b162011-03-25 02:43:51 +01001984 mdev = vnr_to_mdev(tconn, pi->vnr);
1985 if (!mdev)
1986 return -EIO;
1987
Philipp Reisnerb411b362009-09-25 16:07:19 -07001988 if (!get_ldev(mdev)) {
Andreas Gruenbacher82bc0192011-03-17 12:10:19 +01001989 int err2;
1990
Andreas Gruenbacher7be8da02011-02-22 02:15:32 +01001991 err = wait_for_and_update_peer_seq(mdev, peer_seq);
Andreas Gruenbachere2857212011-03-25 00:57:38 +01001992 drbd_send_ack_dp(mdev, P_NEG_ACK, p, pi->size);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001993 atomic_inc(&mdev->current_epoch->epoch_size);
Andreas Gruenbachere2857212011-03-25 00:57:38 +01001994 err2 = drbd_drain_block(mdev, pi->size);
Andreas Gruenbacher82bc0192011-03-17 12:10:19 +01001995 if (!err)
1996 err = err2;
1997 return err;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001998 }
1999
Andreas Gruenbacherfcefa622011-02-17 16:46:59 +01002000 /*
2001 * Corresponding put_ldev done either below (on various errors), or in
2002 * drbd_peer_request_endio, if we successfully submit the data at the
2003 * end of this function.
2004 */
Philipp Reisnerb411b362009-09-25 16:07:19 -07002005
2006 sector = be64_to_cpu(p->sector);
Andreas Gruenbachere2857212011-03-25 00:57:38 +01002007 peer_req = read_in_block(mdev, p->block_id, sector, pi->size);
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01002008 if (!peer_req) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07002009 put_ldev(mdev);
Andreas Gruenbacher82bc0192011-03-17 12:10:19 +01002010 return -EIO;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002011 }
2012
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01002013 peer_req->w.cb = e_end_block;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002014
Lars Ellenberg688593c2010-11-17 22:25:03 +01002015 dp_flags = be32_to_cpu(p->dp_flags);
2016 rw |= wire_flags_to_bio(mdev, dp_flags);
2017
2018 if (dp_flags & DP_MAY_SET_IN_SYNC)
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01002019 peer_req->flags |= EE_MAY_SET_IN_SYNC;
Lars Ellenberg688593c2010-11-17 22:25:03 +01002020
Philipp Reisnerb411b362009-09-25 16:07:19 -07002021 spin_lock(&mdev->epoch_lock);
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01002022 peer_req->epoch = mdev->current_epoch;
2023 atomic_inc(&peer_req->epoch->epoch_size);
2024 atomic_inc(&peer_req->epoch->active);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002025 spin_unlock(&mdev->epoch_lock);
2026
Andreas Gruenbacher7be8da02011-02-22 02:15:32 +01002027 if (mdev->tconn->net_conf->two_primaries) {
2028 err = wait_for_and_update_peer_seq(mdev, peer_seq);
2029 if (err)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002030 goto out_interrupted;
Philipp Reisner87eeee42011-01-19 14:16:30 +01002031 spin_lock_irq(&mdev->tconn->req_lock);
Andreas Gruenbacher7be8da02011-02-22 02:15:32 +01002032 err = handle_write_conflicts(mdev, peer_req);
2033 if (err) {
2034 spin_unlock_irq(&mdev->tconn->req_lock);
2035 if (err == -ENOENT) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07002036 put_ldev(mdev);
Andreas Gruenbacher82bc0192011-03-17 12:10:19 +01002037 return 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002038 }
Andreas Gruenbacher7be8da02011-02-22 02:15:32 +01002039 goto out_interrupted;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002040 }
Andreas Gruenbacher7be8da02011-02-22 02:15:32 +01002041 } else
2042 spin_lock_irq(&mdev->tconn->req_lock);
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01002043 list_add(&peer_req->w.list, &mdev->active_ee);
Philipp Reisner87eeee42011-01-19 14:16:30 +01002044 spin_unlock_irq(&mdev->tconn->req_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002045
Philipp Reisner89e58e72011-01-19 13:12:45 +01002046 switch (mdev->tconn->net_conf->wire_protocol) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07002047 case DRBD_PROT_C:
2048 inc_unacked(mdev);
2049 /* corresponding dec_unacked() in e_end_block()
2050 * respective _drbd_clear_done_ee */
2051 break;
2052 case DRBD_PROT_B:
2053 /* I really don't like it that the receiver thread
2054 * sends on the msock, but anyways */
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01002055 drbd_send_ack(mdev, P_RECV_ACK, peer_req);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002056 break;
2057 case DRBD_PROT_A:
2058 /* nothing to do */
2059 break;
2060 }
2061
Lars Ellenberg6719fb02010-10-18 23:04:07 +02002062 if (mdev->state.pdsk < D_INCONSISTENT) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07002063 /* In case we have the only disk of the cluster, */
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01002064 drbd_set_out_of_sync(mdev, peer_req->i.sector, peer_req->i.size);
2065 peer_req->flags |= EE_CALL_AL_COMPLETE_IO;
2066 peer_req->flags &= ~EE_MAY_SET_IN_SYNC;
2067 drbd_al_begin_io(mdev, peer_req->i.sector);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002068 }
2069
Andreas Gruenbacher82bc0192011-03-17 12:10:19 +01002070 err = drbd_submit_peer_request(mdev, peer_req, rw, DRBD_FAULT_DT_WR);
2071 if (!err)
2072 return 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002073
Lars Ellenberg10f6d9922011-01-24 14:47:09 +01002074 /* don't care for the reason here */
2075 dev_err(DEV, "submit failed, triggering re-connect\n");
Philipp Reisner87eeee42011-01-19 14:16:30 +01002076 spin_lock_irq(&mdev->tconn->req_lock);
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01002077 list_del(&peer_req->w.list);
2078 drbd_remove_epoch_entry_interval(mdev, peer_req);
Philipp Reisner87eeee42011-01-19 14:16:30 +01002079 spin_unlock_irq(&mdev->tconn->req_lock);
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01002080 if (peer_req->flags & EE_CALL_AL_COMPLETE_IO)
2081 drbd_al_complete_io(mdev, peer_req->i.sector);
Lars Ellenberg22cc37a2010-09-14 20:40:41 +02002082
Philipp Reisnerb411b362009-09-25 16:07:19 -07002083out_interrupted:
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01002084 drbd_may_finish_epoch(mdev, peer_req->epoch, EV_PUT + EV_CLEANUP);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002085 put_ldev(mdev);
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01002086 drbd_free_ee(mdev, peer_req);
Andreas Gruenbacher82bc0192011-03-17 12:10:19 +01002087 return err;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002088}
2089
Lars Ellenberg0f0601f2010-08-11 23:40:24 +02002090/* We may throttle resync, if the lower device seems to be busy,
2091 * and current sync rate is above c_min_rate.
2092 *
2093 * To decide whether or not the lower device is busy, we use a scheme similar
2094 * to MD RAID is_mddev_idle(): if the partition stats reveal "significant"
2095 * (more than 64 sectors) of activity we cannot account for with our own resync
2096 * activity, it obviously is "busy".
2097 *
2098 * The current sync rate used here uses only the most recent two step marks,
2099 * to have a short time average so we can react faster.
2100 */
Philipp Reisnere3555d82010-11-07 15:56:29 +01002101int drbd_rs_should_slow_down(struct drbd_conf *mdev, sector_t sector)
Lars Ellenberg0f0601f2010-08-11 23:40:24 +02002102{
2103 struct gendisk *disk = mdev->ldev->backing_bdev->bd_contains->bd_disk;
2104 unsigned long db, dt, dbdt;
Philipp Reisnere3555d82010-11-07 15:56:29 +01002105 struct lc_element *tmp;
Lars Ellenberg0f0601f2010-08-11 23:40:24 +02002106 int curr_events;
2107 int throttle = 0;
2108
2109 /* feature disabled? */
Lars Ellenbergf3990022011-03-23 14:31:09 +01002110 if (mdev->ldev->dc.c_min_rate == 0)
Lars Ellenberg0f0601f2010-08-11 23:40:24 +02002111 return 0;
2112
Philipp Reisnere3555d82010-11-07 15:56:29 +01002113 spin_lock_irq(&mdev->al_lock);
2114 tmp = lc_find(mdev->resync, BM_SECT_TO_EXT(sector));
2115 if (tmp) {
2116 struct bm_extent *bm_ext = lc_entry(tmp, struct bm_extent, lce);
2117 if (test_bit(BME_PRIORITY, &bm_ext->flags)) {
2118 spin_unlock_irq(&mdev->al_lock);
2119 return 0;
2120 }
2121 /* Do not slow down if app IO is already waiting for this extent */
2122 }
2123 spin_unlock_irq(&mdev->al_lock);
2124
Lars Ellenberg0f0601f2010-08-11 23:40:24 +02002125 curr_events = (int)part_stat_read(&disk->part0, sectors[0]) +
2126 (int)part_stat_read(&disk->part0, sectors[1]) -
2127 atomic_read(&mdev->rs_sect_ev);
Philipp Reisnere3555d82010-11-07 15:56:29 +01002128
Lars Ellenberg0f0601f2010-08-11 23:40:24 +02002129 if (!mdev->rs_last_events || curr_events - mdev->rs_last_events > 64) {
2130 unsigned long rs_left;
2131 int i;
2132
2133 mdev->rs_last_events = curr_events;
2134
2135 /* sync speed average over the last 2*DRBD_SYNC_MARK_STEP,
2136 * approx. */
Lars Ellenberg2649f082010-11-05 10:05:47 +01002137 i = (mdev->rs_last_mark + DRBD_SYNC_MARKS-1) % DRBD_SYNC_MARKS;
2138
2139 if (mdev->state.conn == C_VERIFY_S || mdev->state.conn == C_VERIFY_T)
2140 rs_left = mdev->ov_left;
2141 else
2142 rs_left = drbd_bm_total_weight(mdev) - mdev->rs_failed;
Lars Ellenberg0f0601f2010-08-11 23:40:24 +02002143
2144 dt = ((long)jiffies - (long)mdev->rs_mark_time[i]) / HZ;
2145 if (!dt)
2146 dt++;
2147 db = mdev->rs_mark_left[i] - rs_left;
2148 dbdt = Bit2KB(db/dt);
2149
Lars Ellenbergf3990022011-03-23 14:31:09 +01002150 if (dbdt > mdev->ldev->dc.c_min_rate)
Lars Ellenberg0f0601f2010-08-11 23:40:24 +02002151 throttle = 1;
2152 }
2153 return throttle;
2154}
2155
2156
Andreas Gruenbacher4a76b162011-03-25 02:43:51 +01002157static int receive_DataRequest(struct drbd_tconn *tconn, struct packet_info *pi)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002158{
Andreas Gruenbacher4a76b162011-03-25 02:43:51 +01002159 struct drbd_conf *mdev;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002160 sector_t sector;
Andreas Gruenbacher4a76b162011-03-25 02:43:51 +01002161 sector_t capacity;
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01002162 struct drbd_peer_request *peer_req;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002163 struct digest_info *di = NULL;
Philipp Reisnerb18b37b2010-10-13 15:32:44 +02002164 int size, verb;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002165 unsigned int fault_type;
Andreas Gruenbacher4a76b162011-03-25 02:43:51 +01002166 struct p_block_req *p = tconn->data.rbuf;
2167
2168 mdev = vnr_to_mdev(tconn, pi->vnr);
2169 if (!mdev)
2170 return -EIO;
2171 capacity = drbd_get_capacity(mdev->this_bdev);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002172
2173 sector = be64_to_cpu(p->sector);
2174 size = be32_to_cpu(p->blksize);
2175
Andreas Gruenbacherc670a392011-02-21 12:41:39 +01002176 if (size <= 0 || !IS_ALIGNED(size, 512) || size > DRBD_MAX_BIO_SIZE) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07002177 dev_err(DEV, "%s:%d: sector: %llus, size: %u\n", __FILE__, __LINE__,
2178 (unsigned long long)sector, size);
Andreas Gruenbacher82bc0192011-03-17 12:10:19 +01002179 return -EINVAL;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002180 }
2181 if (sector + (size>>9) > capacity) {
2182 dev_err(DEV, "%s:%d: sector: %llus, size: %u\n", __FILE__, __LINE__,
2183 (unsigned long long)sector, size);
Andreas Gruenbacher82bc0192011-03-17 12:10:19 +01002184 return -EINVAL;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002185 }
2186
2187 if (!get_ldev_if_state(mdev, D_UP_TO_DATE)) {
Philipp Reisnerb18b37b2010-10-13 15:32:44 +02002188 verb = 1;
Andreas Gruenbachere2857212011-03-25 00:57:38 +01002189 switch (pi->cmd) {
Philipp Reisnerb18b37b2010-10-13 15:32:44 +02002190 case P_DATA_REQUEST:
2191 drbd_send_ack_rp(mdev, P_NEG_DREPLY, p);
2192 break;
2193 case P_RS_DATA_REQUEST:
2194 case P_CSUM_RS_REQUEST:
2195 case P_OV_REQUEST:
2196 drbd_send_ack_rp(mdev, P_NEG_RS_DREPLY , p);
2197 break;
2198 case P_OV_REPLY:
2199 verb = 0;
2200 dec_rs_pending(mdev);
2201 drbd_send_ack_ex(mdev, P_OV_RESULT, sector, size, ID_IN_SYNC);
2202 break;
2203 default:
Andreas Gruenbacher49ba9b12011-03-25 00:35:45 +01002204 BUG();
Philipp Reisnerb18b37b2010-10-13 15:32:44 +02002205 }
2206 if (verb && __ratelimit(&drbd_ratelimit_state))
Philipp Reisnerb411b362009-09-25 16:07:19 -07002207 dev_err(DEV, "Can not satisfy peer's read request, "
2208 "no local data.\n");
Philipp Reisnerb18b37b2010-10-13 15:32:44 +02002209
Lars Ellenberga821cc42010-09-06 12:31:37 +02002210 /* drain possibly payload */
Andreas Gruenbachere2857212011-03-25 00:57:38 +01002211 return drbd_drain_block(mdev, pi->size);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002212 }
2213
2214 /* GFP_NOIO, because we must not cause arbitrary write-out: in a DRBD
2215 * "criss-cross" setup, that might cause write-out on some other DRBD,
2216 * which in turn might block on the other node at this very place. */
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01002217 peer_req = drbd_alloc_ee(mdev, p->block_id, sector, size, GFP_NOIO);
2218 if (!peer_req) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07002219 put_ldev(mdev);
Andreas Gruenbacher82bc0192011-03-17 12:10:19 +01002220 return -ENOMEM;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002221 }
2222
Andreas Gruenbachere2857212011-03-25 00:57:38 +01002223 switch (pi->cmd) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07002224 case P_DATA_REQUEST:
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01002225 peer_req->w.cb = w_e_end_data_req;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002226 fault_type = DRBD_FAULT_DT_RD;
Lars Ellenberg80a40e42010-08-11 23:28:00 +02002227 /* application IO, don't drbd_rs_begin_io */
2228 goto submit;
2229
Philipp Reisnerb411b362009-09-25 16:07:19 -07002230 case P_RS_DATA_REQUEST:
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01002231 peer_req->w.cb = w_e_end_rsdata_req;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002232 fault_type = DRBD_FAULT_RS_RD;
Lars Ellenberg5f9915b2010-11-09 14:15:24 +01002233 /* used in the sector offset progress display */
2234 mdev->bm_resync_fo = BM_SECT_TO_BIT(sector);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002235 break;
2236
2237 case P_OV_REPLY:
2238 case P_CSUM_RS_REQUEST:
2239 fault_type = DRBD_FAULT_RS_RD;
Andreas Gruenbachere2857212011-03-25 00:57:38 +01002240 di = kmalloc(sizeof(*di) + pi->size, GFP_NOIO);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002241 if (!di)
2242 goto out_free_e;
2243
Andreas Gruenbachere2857212011-03-25 00:57:38 +01002244 di->digest_size = pi->size;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002245 di->digest = (((char *)di)+sizeof(struct digest_info));
2246
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01002247 peer_req->digest = di;
2248 peer_req->flags |= EE_HAS_DIGEST;
Lars Ellenbergc36c3ce2010-08-11 20:42:55 +02002249
Andreas Gruenbachere2857212011-03-25 00:57:38 +01002250 if (drbd_recv_all(mdev->tconn, di->digest, pi->size))
Philipp Reisnerb411b362009-09-25 16:07:19 -07002251 goto out_free_e;
2252
Andreas Gruenbachere2857212011-03-25 00:57:38 +01002253 if (pi->cmd == P_CSUM_RS_REQUEST) {
Philipp Reisner31890f42011-01-19 14:12:51 +01002254 D_ASSERT(mdev->tconn->agreed_pro_version >= 89);
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01002255 peer_req->w.cb = w_e_end_csum_rs_req;
Lars Ellenberg5f9915b2010-11-09 14:15:24 +01002256 /* used in the sector offset progress display */
2257 mdev->bm_resync_fo = BM_SECT_TO_BIT(sector);
Andreas Gruenbachere2857212011-03-25 00:57:38 +01002258 } else if (pi->cmd == P_OV_REPLY) {
Lars Ellenberg2649f082010-11-05 10:05:47 +01002259 /* track progress, we may need to throttle */
2260 atomic_add(size >> 9, &mdev->rs_sect_in);
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01002261 peer_req->w.cb = w_e_end_ov_reply;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002262 dec_rs_pending(mdev);
Lars Ellenberg0f0601f2010-08-11 23:40:24 +02002263 /* drbd_rs_begin_io done when we sent this request,
2264 * but accounting still needs to be done. */
2265 goto submit_for_resync;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002266 }
2267 break;
2268
2269 case P_OV_REQUEST:
Philipp Reisnerb411b362009-09-25 16:07:19 -07002270 if (mdev->ov_start_sector == ~(sector_t)0 &&
Philipp Reisner31890f42011-01-19 14:12:51 +01002271 mdev->tconn->agreed_pro_version >= 90) {
Lars Ellenbergde228bb2010-11-05 09:43:15 +01002272 unsigned long now = jiffies;
2273 int i;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002274 mdev->ov_start_sector = sector;
2275 mdev->ov_position = sector;
Lars Ellenberg30b743a2010-11-05 09:39:06 +01002276 mdev->ov_left = drbd_bm_bits(mdev) - BM_SECT_TO_BIT(sector);
2277 mdev->rs_total = mdev->ov_left;
Lars Ellenbergde228bb2010-11-05 09:43:15 +01002278 for (i = 0; i < DRBD_SYNC_MARKS; i++) {
2279 mdev->rs_mark_left[i] = mdev->ov_left;
2280 mdev->rs_mark_time[i] = now;
2281 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07002282 dev_info(DEV, "Online Verify start sector: %llu\n",
2283 (unsigned long long)sector);
2284 }
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01002285 peer_req->w.cb = w_e_end_ov_req;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002286 fault_type = DRBD_FAULT_RS_RD;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002287 break;
2288
Philipp Reisnerb411b362009-09-25 16:07:19 -07002289 default:
Andreas Gruenbacher49ba9b12011-03-25 00:35:45 +01002290 BUG();
Philipp Reisnerb411b362009-09-25 16:07:19 -07002291 }
2292
Lars Ellenberg0f0601f2010-08-11 23:40:24 +02002293 /* Throttle, drbd_rs_begin_io and submit should become asynchronous
2294 * wrt the receiver, but it is not as straightforward as it may seem.
2295 * Various places in the resync start and stop logic assume resync
2296 * requests are processed in order, requeuing this on the worker thread
2297 * introduces a bunch of new code for synchronization between threads.
2298 *
2299 * Unlimited throttling before drbd_rs_begin_io may stall the resync
2300 * "forever", throttling after drbd_rs_begin_io will lock that extent
2301 * for application writes for the same time. For now, just throttle
2302 * here, where the rest of the code expects the receiver to sleep for
2303 * a while, anyways.
2304 */
Philipp Reisnerb411b362009-09-25 16:07:19 -07002305
Lars Ellenberg0f0601f2010-08-11 23:40:24 +02002306 /* Throttle before drbd_rs_begin_io, as that locks out application IO;
2307 * this defers syncer requests for some time, before letting at least
2308 * on request through. The resync controller on the receiving side
2309 * will adapt to the incoming rate accordingly.
2310 *
2311 * We cannot throttle here if remote is Primary/SyncTarget:
2312 * we would also throttle its application reads.
2313 * In that case, throttling is done on the SyncTarget only.
2314 */
Philipp Reisnere3555d82010-11-07 15:56:29 +01002315 if (mdev->state.peer != R_PRIMARY && drbd_rs_should_slow_down(mdev, sector))
2316 schedule_timeout_uninterruptible(HZ/10);
2317 if (drbd_rs_begin_io(mdev, sector))
Lars Ellenberg80a40e42010-08-11 23:28:00 +02002318 goto out_free_e;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002319
Lars Ellenberg0f0601f2010-08-11 23:40:24 +02002320submit_for_resync:
2321 atomic_add(size >> 9, &mdev->rs_sect_ev);
2322
Lars Ellenberg80a40e42010-08-11 23:28:00 +02002323submit:
Philipp Reisnerb411b362009-09-25 16:07:19 -07002324 inc_unacked(mdev);
Philipp Reisner87eeee42011-01-19 14:16:30 +01002325 spin_lock_irq(&mdev->tconn->req_lock);
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01002326 list_add_tail(&peer_req->w.list, &mdev->read_ee);
Philipp Reisner87eeee42011-01-19 14:16:30 +01002327 spin_unlock_irq(&mdev->tconn->req_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002328
Andreas Gruenbacherfbe29de2011-02-17 16:38:35 +01002329 if (drbd_submit_peer_request(mdev, peer_req, READ, fault_type) == 0)
Andreas Gruenbacher82bc0192011-03-17 12:10:19 +01002330 return 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002331
Lars Ellenberg10f6d9922011-01-24 14:47:09 +01002332 /* don't care for the reason here */
2333 dev_err(DEV, "submit failed, triggering re-connect\n");
Philipp Reisner87eeee42011-01-19 14:16:30 +01002334 spin_lock_irq(&mdev->tconn->req_lock);
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01002335 list_del(&peer_req->w.list);
Philipp Reisner87eeee42011-01-19 14:16:30 +01002336 spin_unlock_irq(&mdev->tconn->req_lock);
Lars Ellenberg22cc37a2010-09-14 20:40:41 +02002337 /* no drbd_rs_complete_io(), we are dropping the connection anyways */
2338
Philipp Reisnerb411b362009-09-25 16:07:19 -07002339out_free_e:
Philipp Reisnerb411b362009-09-25 16:07:19 -07002340 put_ldev(mdev);
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01002341 drbd_free_ee(mdev, peer_req);
Andreas Gruenbacher82bc0192011-03-17 12:10:19 +01002342 return -EIO;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002343}
2344
2345static int drbd_asb_recover_0p(struct drbd_conf *mdev) __must_hold(local)
2346{
2347 int self, peer, rv = -100;
2348 unsigned long ch_self, ch_peer;
2349
2350 self = mdev->ldev->md.uuid[UI_BITMAP] & 1;
2351 peer = mdev->p_uuid[UI_BITMAP] & 1;
2352
2353 ch_peer = mdev->p_uuid[UI_SIZE];
2354 ch_self = mdev->comm_bm_set;
2355
Philipp Reisner89e58e72011-01-19 13:12:45 +01002356 switch (mdev->tconn->net_conf->after_sb_0p) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07002357 case ASB_CONSENSUS:
2358 case ASB_DISCARD_SECONDARY:
2359 case ASB_CALL_HELPER:
2360 dev_err(DEV, "Configuration error.\n");
2361 break;
2362 case ASB_DISCONNECT:
2363 break;
2364 case ASB_DISCARD_YOUNGER_PRI:
2365 if (self == 0 && peer == 1) {
2366 rv = -1;
2367 break;
2368 }
2369 if (self == 1 && peer == 0) {
2370 rv = 1;
2371 break;
2372 }
2373 /* Else fall through to one of the other strategies... */
2374 case ASB_DISCARD_OLDER_PRI:
2375 if (self == 0 && peer == 1) {
2376 rv = 1;
2377 break;
2378 }
2379 if (self == 1 && peer == 0) {
2380 rv = -1;
2381 break;
2382 }
2383 /* Else fall through to one of the other strategies... */
Lars Ellenbergad19bf62009-10-14 09:36:49 +02002384 dev_warn(DEV, "Discard younger/older primary did not find a decision\n"
Philipp Reisnerb411b362009-09-25 16:07:19 -07002385 "Using discard-least-changes instead\n");
2386 case ASB_DISCARD_ZERO_CHG:
2387 if (ch_peer == 0 && ch_self == 0) {
Philipp Reisner25703f82011-02-07 14:35:25 +01002388 rv = test_bit(DISCARD_CONCURRENT, &mdev->tconn->flags)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002389 ? -1 : 1;
2390 break;
2391 } else {
2392 if (ch_peer == 0) { rv = 1; break; }
2393 if (ch_self == 0) { rv = -1; break; }
2394 }
Philipp Reisner89e58e72011-01-19 13:12:45 +01002395 if (mdev->tconn->net_conf->after_sb_0p == ASB_DISCARD_ZERO_CHG)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002396 break;
2397 case ASB_DISCARD_LEAST_CHG:
2398 if (ch_self < ch_peer)
2399 rv = -1;
2400 else if (ch_self > ch_peer)
2401 rv = 1;
2402 else /* ( ch_self == ch_peer ) */
2403 /* Well, then use something else. */
Philipp Reisner25703f82011-02-07 14:35:25 +01002404 rv = test_bit(DISCARD_CONCURRENT, &mdev->tconn->flags)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002405 ? -1 : 1;
2406 break;
2407 case ASB_DISCARD_LOCAL:
2408 rv = -1;
2409 break;
2410 case ASB_DISCARD_REMOTE:
2411 rv = 1;
2412 }
2413
2414 return rv;
2415}
2416
2417static int drbd_asb_recover_1p(struct drbd_conf *mdev) __must_hold(local)
2418{
Andreas Gruenbacher6184ea22010-12-09 14:23:27 +01002419 int hg, rv = -100;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002420
Philipp Reisner89e58e72011-01-19 13:12:45 +01002421 switch (mdev->tconn->net_conf->after_sb_1p) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07002422 case ASB_DISCARD_YOUNGER_PRI:
2423 case ASB_DISCARD_OLDER_PRI:
2424 case ASB_DISCARD_LEAST_CHG:
2425 case ASB_DISCARD_LOCAL:
2426 case ASB_DISCARD_REMOTE:
2427 dev_err(DEV, "Configuration error.\n");
2428 break;
2429 case ASB_DISCONNECT:
2430 break;
2431 case ASB_CONSENSUS:
2432 hg = drbd_asb_recover_0p(mdev);
2433 if (hg == -1 && mdev->state.role == R_SECONDARY)
2434 rv = hg;
2435 if (hg == 1 && mdev->state.role == R_PRIMARY)
2436 rv = hg;
2437 break;
2438 case ASB_VIOLENTLY:
2439 rv = drbd_asb_recover_0p(mdev);
2440 break;
2441 case ASB_DISCARD_SECONDARY:
2442 return mdev->state.role == R_PRIMARY ? 1 : -1;
2443 case ASB_CALL_HELPER:
2444 hg = drbd_asb_recover_0p(mdev);
2445 if (hg == -1 && mdev->state.role == R_PRIMARY) {
Andreas Gruenbacherbb437942010-12-09 14:02:35 +01002446 enum drbd_state_rv rv2;
2447
2448 drbd_set_role(mdev, R_SECONDARY, 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002449 /* drbd_change_state() does not sleep while in SS_IN_TRANSIENT_STATE,
2450 * we might be here in C_WF_REPORT_PARAMS which is transient.
2451 * we do not need to wait for the after state change work either. */
Andreas Gruenbacherbb437942010-12-09 14:02:35 +01002452 rv2 = drbd_change_state(mdev, CS_VERBOSE, NS(role, R_SECONDARY));
2453 if (rv2 != SS_SUCCESS) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07002454 drbd_khelper(mdev, "pri-lost-after-sb");
2455 } else {
2456 dev_warn(DEV, "Successfully gave up primary role.\n");
2457 rv = hg;
2458 }
2459 } else
2460 rv = hg;
2461 }
2462
2463 return rv;
2464}
2465
2466static int drbd_asb_recover_2p(struct drbd_conf *mdev) __must_hold(local)
2467{
Andreas Gruenbacher6184ea22010-12-09 14:23:27 +01002468 int hg, rv = -100;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002469
Philipp Reisner89e58e72011-01-19 13:12:45 +01002470 switch (mdev->tconn->net_conf->after_sb_2p) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07002471 case ASB_DISCARD_YOUNGER_PRI:
2472 case ASB_DISCARD_OLDER_PRI:
2473 case ASB_DISCARD_LEAST_CHG:
2474 case ASB_DISCARD_LOCAL:
2475 case ASB_DISCARD_REMOTE:
2476 case ASB_CONSENSUS:
2477 case ASB_DISCARD_SECONDARY:
2478 dev_err(DEV, "Configuration error.\n");
2479 break;
2480 case ASB_VIOLENTLY:
2481 rv = drbd_asb_recover_0p(mdev);
2482 break;
2483 case ASB_DISCONNECT:
2484 break;
2485 case ASB_CALL_HELPER:
2486 hg = drbd_asb_recover_0p(mdev);
2487 if (hg == -1) {
Andreas Gruenbacherbb437942010-12-09 14:02:35 +01002488 enum drbd_state_rv rv2;
2489
Philipp Reisnerb411b362009-09-25 16:07:19 -07002490 /* drbd_change_state() does not sleep while in SS_IN_TRANSIENT_STATE,
2491 * we might be here in C_WF_REPORT_PARAMS which is transient.
2492 * we do not need to wait for the after state change work either. */
Andreas Gruenbacherbb437942010-12-09 14:02:35 +01002493 rv2 = drbd_change_state(mdev, CS_VERBOSE, NS(role, R_SECONDARY));
2494 if (rv2 != SS_SUCCESS) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07002495 drbd_khelper(mdev, "pri-lost-after-sb");
2496 } else {
2497 dev_warn(DEV, "Successfully gave up primary role.\n");
2498 rv = hg;
2499 }
2500 } else
2501 rv = hg;
2502 }
2503
2504 return rv;
2505}
2506
2507static void drbd_uuid_dump(struct drbd_conf *mdev, char *text, u64 *uuid,
2508 u64 bits, u64 flags)
2509{
2510 if (!uuid) {
2511 dev_info(DEV, "%s uuid info vanished while I was looking!\n", text);
2512 return;
2513 }
2514 dev_info(DEV, "%s %016llX:%016llX:%016llX:%016llX bits:%llu flags:%llX\n",
2515 text,
2516 (unsigned long long)uuid[UI_CURRENT],
2517 (unsigned long long)uuid[UI_BITMAP],
2518 (unsigned long long)uuid[UI_HISTORY_START],
2519 (unsigned long long)uuid[UI_HISTORY_END],
2520 (unsigned long long)bits,
2521 (unsigned long long)flags);
2522}
2523
2524/*
2525 100 after split brain try auto recover
2526 2 C_SYNC_SOURCE set BitMap
2527 1 C_SYNC_SOURCE use BitMap
2528 0 no Sync
2529 -1 C_SYNC_TARGET use BitMap
2530 -2 C_SYNC_TARGET set BitMap
2531 -100 after split brain, disconnect
2532-1000 unrelated data
Philipp Reisner4a23f262011-01-11 17:42:17 +01002533-1091 requires proto 91
2534-1096 requires proto 96
Philipp Reisnerb411b362009-09-25 16:07:19 -07002535 */
2536static int drbd_uuid_compare(struct drbd_conf *mdev, int *rule_nr) __must_hold(local)
2537{
2538 u64 self, peer;
2539 int i, j;
2540
2541 self = mdev->ldev->md.uuid[UI_CURRENT] & ~((u64)1);
2542 peer = mdev->p_uuid[UI_CURRENT] & ~((u64)1);
2543
2544 *rule_nr = 10;
2545 if (self == UUID_JUST_CREATED && peer == UUID_JUST_CREATED)
2546 return 0;
2547
2548 *rule_nr = 20;
2549 if ((self == UUID_JUST_CREATED || self == (u64)0) &&
2550 peer != UUID_JUST_CREATED)
2551 return -2;
2552
2553 *rule_nr = 30;
2554 if (self != UUID_JUST_CREATED &&
2555 (peer == UUID_JUST_CREATED || peer == (u64)0))
2556 return 2;
2557
2558 if (self == peer) {
2559 int rct, dc; /* roles at crash time */
2560
2561 if (mdev->p_uuid[UI_BITMAP] == (u64)0 && mdev->ldev->md.uuid[UI_BITMAP] != (u64)0) {
2562
Philipp Reisner31890f42011-01-19 14:12:51 +01002563 if (mdev->tconn->agreed_pro_version < 91)
Philipp Reisner4a23f262011-01-11 17:42:17 +01002564 return -1091;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002565
2566 if ((mdev->ldev->md.uuid[UI_BITMAP] & ~((u64)1)) == (mdev->p_uuid[UI_HISTORY_START] & ~((u64)1)) &&
2567 (mdev->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1)) == (mdev->p_uuid[UI_HISTORY_START + 1] & ~((u64)1))) {
2568 dev_info(DEV, "was SyncSource, missed the resync finished event, corrected myself:\n");
2569 drbd_uuid_set_bm(mdev, 0UL);
2570
2571 drbd_uuid_dump(mdev, "self", mdev->ldev->md.uuid,
2572 mdev->state.disk >= D_NEGOTIATING ? drbd_bm_total_weight(mdev) : 0, 0);
2573 *rule_nr = 34;
2574 } else {
2575 dev_info(DEV, "was SyncSource (peer failed to write sync_uuid)\n");
2576 *rule_nr = 36;
2577 }
2578
2579 return 1;
2580 }
2581
2582 if (mdev->ldev->md.uuid[UI_BITMAP] == (u64)0 && mdev->p_uuid[UI_BITMAP] != (u64)0) {
2583
Philipp Reisner31890f42011-01-19 14:12:51 +01002584 if (mdev->tconn->agreed_pro_version < 91)
Philipp Reisner4a23f262011-01-11 17:42:17 +01002585 return -1091;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002586
2587 if ((mdev->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1)) == (mdev->p_uuid[UI_BITMAP] & ~((u64)1)) &&
2588 (mdev->ldev->md.uuid[UI_HISTORY_START + 1] & ~((u64)1)) == (mdev->p_uuid[UI_HISTORY_START] & ~((u64)1))) {
2589 dev_info(DEV, "was SyncTarget, peer missed the resync finished event, corrected peer:\n");
2590
2591 mdev->p_uuid[UI_HISTORY_START + 1] = mdev->p_uuid[UI_HISTORY_START];
2592 mdev->p_uuid[UI_HISTORY_START] = mdev->p_uuid[UI_BITMAP];
2593 mdev->p_uuid[UI_BITMAP] = 0UL;
2594
2595 drbd_uuid_dump(mdev, "peer", mdev->p_uuid, mdev->p_uuid[UI_SIZE], mdev->p_uuid[UI_FLAGS]);
2596 *rule_nr = 35;
2597 } else {
2598 dev_info(DEV, "was SyncTarget (failed to write sync_uuid)\n");
2599 *rule_nr = 37;
2600 }
2601
2602 return -1;
2603 }
2604
2605 /* Common power [off|failure] */
2606 rct = (test_bit(CRASHED_PRIMARY, &mdev->flags) ? 1 : 0) +
2607 (mdev->p_uuid[UI_FLAGS] & 2);
2608 /* lowest bit is set when we were primary,
2609 * next bit (weight 2) is set when peer was primary */
2610 *rule_nr = 40;
2611
2612 switch (rct) {
2613 case 0: /* !self_pri && !peer_pri */ return 0;
2614 case 1: /* self_pri && !peer_pri */ return 1;
2615 case 2: /* !self_pri && peer_pri */ return -1;
2616 case 3: /* self_pri && peer_pri */
Philipp Reisner25703f82011-02-07 14:35:25 +01002617 dc = test_bit(DISCARD_CONCURRENT, &mdev->tconn->flags);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002618 return dc ? -1 : 1;
2619 }
2620 }
2621
2622 *rule_nr = 50;
2623 peer = mdev->p_uuid[UI_BITMAP] & ~((u64)1);
2624 if (self == peer)
2625 return -1;
2626
2627 *rule_nr = 51;
2628 peer = mdev->p_uuid[UI_HISTORY_START] & ~((u64)1);
2629 if (self == peer) {
Philipp Reisner31890f42011-01-19 14:12:51 +01002630 if (mdev->tconn->agreed_pro_version < 96 ?
Philipp Reisner4a23f262011-01-11 17:42:17 +01002631 (mdev->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1)) ==
2632 (mdev->p_uuid[UI_HISTORY_START + 1] & ~((u64)1)) :
2633 peer + UUID_NEW_BM_OFFSET == (mdev->p_uuid[UI_BITMAP] & ~((u64)1))) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07002634 /* The last P_SYNC_UUID did not get though. Undo the last start of
2635 resync as sync source modifications of the peer's UUIDs. */
2636
Philipp Reisner31890f42011-01-19 14:12:51 +01002637 if (mdev->tconn->agreed_pro_version < 91)
Philipp Reisner4a23f262011-01-11 17:42:17 +01002638 return -1091;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002639
2640 mdev->p_uuid[UI_BITMAP] = mdev->p_uuid[UI_HISTORY_START];
2641 mdev->p_uuid[UI_HISTORY_START] = mdev->p_uuid[UI_HISTORY_START + 1];
Philipp Reisner4a23f262011-01-11 17:42:17 +01002642
2643 dev_info(DEV, "Did not got last syncUUID packet, corrected:\n");
2644 drbd_uuid_dump(mdev, "peer", mdev->p_uuid, mdev->p_uuid[UI_SIZE], mdev->p_uuid[UI_FLAGS]);
2645
Philipp Reisnerb411b362009-09-25 16:07:19 -07002646 return -1;
2647 }
2648 }
2649
2650 *rule_nr = 60;
2651 self = mdev->ldev->md.uuid[UI_CURRENT] & ~((u64)1);
2652 for (i = UI_HISTORY_START; i <= UI_HISTORY_END; i++) {
2653 peer = mdev->p_uuid[i] & ~((u64)1);
2654 if (self == peer)
2655 return -2;
2656 }
2657
2658 *rule_nr = 70;
2659 self = mdev->ldev->md.uuid[UI_BITMAP] & ~((u64)1);
2660 peer = mdev->p_uuid[UI_CURRENT] & ~((u64)1);
2661 if (self == peer)
2662 return 1;
2663
2664 *rule_nr = 71;
2665 self = mdev->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1);
2666 if (self == peer) {
Philipp Reisner31890f42011-01-19 14:12:51 +01002667 if (mdev->tconn->agreed_pro_version < 96 ?
Philipp Reisner4a23f262011-01-11 17:42:17 +01002668 (mdev->ldev->md.uuid[UI_HISTORY_START + 1] & ~((u64)1)) ==
2669 (mdev->p_uuid[UI_HISTORY_START] & ~((u64)1)) :
2670 self + UUID_NEW_BM_OFFSET == (mdev->ldev->md.uuid[UI_BITMAP] & ~((u64)1))) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07002671 /* The last P_SYNC_UUID did not get though. Undo the last start of
2672 resync as sync source modifications of our UUIDs. */
2673
Philipp Reisner31890f42011-01-19 14:12:51 +01002674 if (mdev->tconn->agreed_pro_version < 91)
Philipp Reisner4a23f262011-01-11 17:42:17 +01002675 return -1091;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002676
2677 _drbd_uuid_set(mdev, UI_BITMAP, mdev->ldev->md.uuid[UI_HISTORY_START]);
2678 _drbd_uuid_set(mdev, UI_HISTORY_START, mdev->ldev->md.uuid[UI_HISTORY_START + 1]);
2679
Philipp Reisner4a23f262011-01-11 17:42:17 +01002680 dev_info(DEV, "Last syncUUID did not get through, corrected:\n");
Philipp Reisnerb411b362009-09-25 16:07:19 -07002681 drbd_uuid_dump(mdev, "self", mdev->ldev->md.uuid,
2682 mdev->state.disk >= D_NEGOTIATING ? drbd_bm_total_weight(mdev) : 0, 0);
2683
2684 return 1;
2685 }
2686 }
2687
2688
2689 *rule_nr = 80;
Philipp Reisnerd8c2a362009-11-18 15:52:51 +01002690 peer = mdev->p_uuid[UI_CURRENT] & ~((u64)1);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002691 for (i = UI_HISTORY_START; i <= UI_HISTORY_END; i++) {
2692 self = mdev->ldev->md.uuid[i] & ~((u64)1);
2693 if (self == peer)
2694 return 2;
2695 }
2696
2697 *rule_nr = 90;
2698 self = mdev->ldev->md.uuid[UI_BITMAP] & ~((u64)1);
2699 peer = mdev->p_uuid[UI_BITMAP] & ~((u64)1);
2700 if (self == peer && self != ((u64)0))
2701 return 100;
2702
2703 *rule_nr = 100;
2704 for (i = UI_HISTORY_START; i <= UI_HISTORY_END; i++) {
2705 self = mdev->ldev->md.uuid[i] & ~((u64)1);
2706 for (j = UI_HISTORY_START; j <= UI_HISTORY_END; j++) {
2707 peer = mdev->p_uuid[j] & ~((u64)1);
2708 if (self == peer)
2709 return -100;
2710 }
2711 }
2712
2713 return -1000;
2714}
2715
2716/* drbd_sync_handshake() returns the new conn state on success, or
2717 CONN_MASK (-1) on failure.
2718 */
2719static enum drbd_conns drbd_sync_handshake(struct drbd_conf *mdev, enum drbd_role peer_role,
2720 enum drbd_disk_state peer_disk) __must_hold(local)
2721{
2722 int hg, rule_nr;
2723 enum drbd_conns rv = C_MASK;
2724 enum drbd_disk_state mydisk;
2725
2726 mydisk = mdev->state.disk;
2727 if (mydisk == D_NEGOTIATING)
2728 mydisk = mdev->new_state_tmp.disk;
2729
2730 dev_info(DEV, "drbd_sync_handshake:\n");
2731 drbd_uuid_dump(mdev, "self", mdev->ldev->md.uuid, mdev->comm_bm_set, 0);
2732 drbd_uuid_dump(mdev, "peer", mdev->p_uuid,
2733 mdev->p_uuid[UI_SIZE], mdev->p_uuid[UI_FLAGS]);
2734
2735 hg = drbd_uuid_compare(mdev, &rule_nr);
2736
2737 dev_info(DEV, "uuid_compare()=%d by rule %d\n", hg, rule_nr);
2738
2739 if (hg == -1000) {
2740 dev_alert(DEV, "Unrelated data, aborting!\n");
2741 return C_MASK;
2742 }
Philipp Reisner4a23f262011-01-11 17:42:17 +01002743 if (hg < -1000) {
2744 dev_alert(DEV, "To resolve this both sides have to support at least protocol %d\n", -hg - 1000);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002745 return C_MASK;
2746 }
2747
2748 if ((mydisk == D_INCONSISTENT && peer_disk > D_INCONSISTENT) ||
2749 (peer_disk == D_INCONSISTENT && mydisk > D_INCONSISTENT)) {
2750 int f = (hg == -100) || abs(hg) == 2;
2751 hg = mydisk > D_INCONSISTENT ? 1 : -1;
2752 if (f)
2753 hg = hg*2;
2754 dev_info(DEV, "Becoming sync %s due to disk states.\n",
2755 hg > 0 ? "source" : "target");
2756 }
2757
Adam Gandelman3a11a482010-04-08 16:48:23 -07002758 if (abs(hg) == 100)
2759 drbd_khelper(mdev, "initial-split-brain");
2760
Philipp Reisner89e58e72011-01-19 13:12:45 +01002761 if (hg == 100 || (hg == -100 && mdev->tconn->net_conf->always_asbp)) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07002762 int pcount = (mdev->state.role == R_PRIMARY)
2763 + (peer_role == R_PRIMARY);
2764 int forced = (hg == -100);
2765
2766 switch (pcount) {
2767 case 0:
2768 hg = drbd_asb_recover_0p(mdev);
2769 break;
2770 case 1:
2771 hg = drbd_asb_recover_1p(mdev);
2772 break;
2773 case 2:
2774 hg = drbd_asb_recover_2p(mdev);
2775 break;
2776 }
2777 if (abs(hg) < 100) {
2778 dev_warn(DEV, "Split-Brain detected, %d primaries, "
2779 "automatically solved. Sync from %s node\n",
2780 pcount, (hg < 0) ? "peer" : "this");
2781 if (forced) {
2782 dev_warn(DEV, "Doing a full sync, since"
2783 " UUIDs where ambiguous.\n");
2784 hg = hg*2;
2785 }
2786 }
2787 }
2788
2789 if (hg == -100) {
Philipp Reisner89e58e72011-01-19 13:12:45 +01002790 if (mdev->tconn->net_conf->want_lose && !(mdev->p_uuid[UI_FLAGS]&1))
Philipp Reisnerb411b362009-09-25 16:07:19 -07002791 hg = -1;
Philipp Reisner89e58e72011-01-19 13:12:45 +01002792 if (!mdev->tconn->net_conf->want_lose && (mdev->p_uuid[UI_FLAGS]&1))
Philipp Reisnerb411b362009-09-25 16:07:19 -07002793 hg = 1;
2794
2795 if (abs(hg) < 100)
2796 dev_warn(DEV, "Split-Brain detected, manually solved. "
2797 "Sync from %s node\n",
2798 (hg < 0) ? "peer" : "this");
2799 }
2800
2801 if (hg == -100) {
Lars Ellenberg580b9762010-02-26 23:15:23 +01002802 /* FIXME this log message is not correct if we end up here
2803 * after an attempted attach on a diskless node.
2804 * We just refuse to attach -- well, we drop the "connection"
2805 * to that disk, in a way... */
Adam Gandelman3a11a482010-04-08 16:48:23 -07002806 dev_alert(DEV, "Split-Brain detected but unresolved, dropping connection!\n");
Philipp Reisnerb411b362009-09-25 16:07:19 -07002807 drbd_khelper(mdev, "split-brain");
2808 return C_MASK;
2809 }
2810
2811 if (hg > 0 && mydisk <= D_INCONSISTENT) {
2812 dev_err(DEV, "I shall become SyncSource, but I am inconsistent!\n");
2813 return C_MASK;
2814 }
2815
2816 if (hg < 0 && /* by intention we do not use mydisk here. */
2817 mdev->state.role == R_PRIMARY && mdev->state.disk >= D_CONSISTENT) {
Philipp Reisner89e58e72011-01-19 13:12:45 +01002818 switch (mdev->tconn->net_conf->rr_conflict) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07002819 case ASB_CALL_HELPER:
2820 drbd_khelper(mdev, "pri-lost");
2821 /* fall through */
2822 case ASB_DISCONNECT:
2823 dev_err(DEV, "I shall become SyncTarget, but I am primary!\n");
2824 return C_MASK;
2825 case ASB_VIOLENTLY:
2826 dev_warn(DEV, "Becoming SyncTarget, violating the stable-data"
2827 "assumption\n");
2828 }
2829 }
2830
Philipp Reisner8169e412011-03-15 18:40:27 +01002831 if (mdev->tconn->net_conf->dry_run || test_bit(CONN_DRY_RUN, &mdev->tconn->flags)) {
Philipp Reisnercf14c2e2010-02-02 21:03:50 +01002832 if (hg == 0)
2833 dev_info(DEV, "dry-run connect: No resync, would become Connected immediately.\n");
2834 else
2835 dev_info(DEV, "dry-run connect: Would become %s, doing a %s resync.",
2836 drbd_conn_str(hg > 0 ? C_SYNC_SOURCE : C_SYNC_TARGET),
2837 abs(hg) >= 2 ? "full" : "bit-map based");
2838 return C_MASK;
2839 }
2840
Philipp Reisnerb411b362009-09-25 16:07:19 -07002841 if (abs(hg) >= 2) {
2842 dev_info(DEV, "Writing the whole bitmap, full sync required after drbd_sync_handshake.\n");
Lars Ellenberg20ceb2b2011-01-21 10:56:44 +01002843 if (drbd_bitmap_io(mdev, &drbd_bmio_set_n_write, "set_n_write from sync_handshake",
2844 BM_LOCKED_SET_ALLOWED))
Philipp Reisnerb411b362009-09-25 16:07:19 -07002845 return C_MASK;
2846 }
2847
2848 if (hg > 0) { /* become sync source. */
2849 rv = C_WF_BITMAP_S;
2850 } else if (hg < 0) { /* become sync target */
2851 rv = C_WF_BITMAP_T;
2852 } else {
2853 rv = C_CONNECTED;
2854 if (drbd_bm_total_weight(mdev)) {
2855 dev_info(DEV, "No resync, but %lu bits in bitmap!\n",
2856 drbd_bm_total_weight(mdev));
2857 }
2858 }
2859
2860 return rv;
2861}
2862
2863/* returns 1 if invalid */
2864static int cmp_after_sb(enum drbd_after_sb_p peer, enum drbd_after_sb_p self)
2865{
2866 /* ASB_DISCARD_REMOTE - ASB_DISCARD_LOCAL is valid */
2867 if ((peer == ASB_DISCARD_REMOTE && self == ASB_DISCARD_LOCAL) ||
2868 (self == ASB_DISCARD_REMOTE && peer == ASB_DISCARD_LOCAL))
2869 return 0;
2870
2871 /* any other things with ASB_DISCARD_REMOTE or ASB_DISCARD_LOCAL are invalid */
2872 if (peer == ASB_DISCARD_REMOTE || peer == ASB_DISCARD_LOCAL ||
2873 self == ASB_DISCARD_REMOTE || self == ASB_DISCARD_LOCAL)
2874 return 1;
2875
2876 /* everything else is valid if they are equal on both sides. */
2877 if (peer == self)
2878 return 0;
2879
2880 /* everything es is invalid. */
2881 return 1;
2882}
2883
Andreas Gruenbachere2857212011-03-25 00:57:38 +01002884static int receive_protocol(struct drbd_tconn *tconn, struct packet_info *pi)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002885{
Andreas Gruenbachere6ef8a52011-03-24 18:07:54 +01002886 struct p_protocol *p = tconn->data.rbuf;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002887 int p_proto, p_after_sb_0p, p_after_sb_1p, p_after_sb_2p;
Philipp Reisnercf14c2e2010-02-02 21:03:50 +01002888 int p_want_lose, p_two_primaries, cf;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002889 char p_integrity_alg[SHARED_SECRET_MAX] = "";
2890
Philipp Reisnerb411b362009-09-25 16:07:19 -07002891 p_proto = be32_to_cpu(p->protocol);
2892 p_after_sb_0p = be32_to_cpu(p->after_sb_0p);
2893 p_after_sb_1p = be32_to_cpu(p->after_sb_1p);
2894 p_after_sb_2p = be32_to_cpu(p->after_sb_2p);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002895 p_two_primaries = be32_to_cpu(p->two_primaries);
Philipp Reisnercf14c2e2010-02-02 21:03:50 +01002896 cf = be32_to_cpu(p->conn_flags);
2897 p_want_lose = cf & CF_WANT_LOSE;
2898
Philipp Reisner72046242011-03-15 18:51:47 +01002899 clear_bit(CONN_DRY_RUN, &tconn->flags);
Philipp Reisnercf14c2e2010-02-02 21:03:50 +01002900
2901 if (cf & CF_DRY_RUN)
Philipp Reisner72046242011-03-15 18:51:47 +01002902 set_bit(CONN_DRY_RUN, &tconn->flags);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002903
Philipp Reisner72046242011-03-15 18:51:47 +01002904 if (p_proto != tconn->net_conf->wire_protocol) {
2905 conn_err(tconn, "incompatible communication protocols\n");
Philipp Reisnerb411b362009-09-25 16:07:19 -07002906 goto disconnect;
2907 }
2908
Philipp Reisner72046242011-03-15 18:51:47 +01002909 if (cmp_after_sb(p_after_sb_0p, tconn->net_conf->after_sb_0p)) {
2910 conn_err(tconn, "incompatible after-sb-0pri settings\n");
Philipp Reisnerb411b362009-09-25 16:07:19 -07002911 goto disconnect;
2912 }
2913
Philipp Reisner72046242011-03-15 18:51:47 +01002914 if (cmp_after_sb(p_after_sb_1p, tconn->net_conf->after_sb_1p)) {
2915 conn_err(tconn, "incompatible after-sb-1pri settings\n");
Philipp Reisnerb411b362009-09-25 16:07:19 -07002916 goto disconnect;
2917 }
2918
Philipp Reisner72046242011-03-15 18:51:47 +01002919 if (cmp_after_sb(p_after_sb_2p, tconn->net_conf->after_sb_2p)) {
2920 conn_err(tconn, "incompatible after-sb-2pri settings\n");
Philipp Reisnerb411b362009-09-25 16:07:19 -07002921 goto disconnect;
2922 }
2923
Philipp Reisner72046242011-03-15 18:51:47 +01002924 if (p_want_lose && tconn->net_conf->want_lose) {
2925 conn_err(tconn, "both sides have the 'want_lose' flag set\n");
Philipp Reisnerb411b362009-09-25 16:07:19 -07002926 goto disconnect;
2927 }
2928
Philipp Reisner72046242011-03-15 18:51:47 +01002929 if (p_two_primaries != tconn->net_conf->two_primaries) {
2930 conn_err(tconn, "incompatible setting of the two-primaries options\n");
Philipp Reisnerb411b362009-09-25 16:07:19 -07002931 goto disconnect;
2932 }
2933
Philipp Reisner72046242011-03-15 18:51:47 +01002934 if (tconn->agreed_pro_version >= 87) {
2935 unsigned char *my_alg = tconn->net_conf->integrity_alg;
Andreas Gruenbacher82bc0192011-03-17 12:10:19 +01002936 int err;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002937
Andreas Gruenbachere2857212011-03-25 00:57:38 +01002938 err = drbd_recv_all(tconn, p_integrity_alg, pi->size);
Andreas Gruenbacher82bc0192011-03-17 12:10:19 +01002939 if (err)
2940 return err;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002941
2942 p_integrity_alg[SHARED_SECRET_MAX-1] = 0;
2943 if (strcmp(p_integrity_alg, my_alg)) {
Philipp Reisner72046242011-03-15 18:51:47 +01002944 conn_err(tconn, "incompatible setting of the data-integrity-alg\n");
Philipp Reisnerb411b362009-09-25 16:07:19 -07002945 goto disconnect;
2946 }
Philipp Reisner72046242011-03-15 18:51:47 +01002947 conn_info(tconn, "data-integrity-alg: %s\n",
Philipp Reisnerb411b362009-09-25 16:07:19 -07002948 my_alg[0] ? my_alg : (unsigned char *)"<not-used>");
2949 }
2950
Andreas Gruenbacher82bc0192011-03-17 12:10:19 +01002951 return 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002952
2953disconnect:
Philipp Reisner72046242011-03-15 18:51:47 +01002954 conn_request_state(tconn, NS(conn, C_DISCONNECTING), CS_HARD);
Andreas Gruenbacher82bc0192011-03-17 12:10:19 +01002955 return -EIO;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002956}
2957
2958/* helper function
2959 * input: alg name, feature name
2960 * return: NULL (alg name was "")
2961 * ERR_PTR(error) if something goes wrong
2962 * or the crypto hash ptr, if it worked out ok. */
2963struct crypto_hash *drbd_crypto_alloc_digest_safe(const struct drbd_conf *mdev,
2964 const char *alg, const char *name)
2965{
2966 struct crypto_hash *tfm;
2967
2968 if (!alg[0])
2969 return NULL;
2970
2971 tfm = crypto_alloc_hash(alg, 0, CRYPTO_ALG_ASYNC);
2972 if (IS_ERR(tfm)) {
2973 dev_err(DEV, "Can not allocate \"%s\" as %s (reason: %ld)\n",
2974 alg, name, PTR_ERR(tfm));
2975 return tfm;
2976 }
2977 if (!drbd_crypto_is_hash(crypto_hash_tfm(tfm))) {
2978 crypto_free_hash(tfm);
2979 dev_err(DEV, "\"%s\" is not a digest (%s)\n", alg, name);
2980 return ERR_PTR(-EINVAL);
2981 }
2982 return tfm;
2983}
2984
Andreas Gruenbacher4a76b162011-03-25 02:43:51 +01002985static int ignore_remaining_packet(struct drbd_tconn *tconn, struct packet_info *pi)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002986{
Andreas Gruenbacher4a76b162011-03-25 02:43:51 +01002987 void *buffer = tconn->data.rbuf;
2988 int size = pi->size;
2989
2990 while (size) {
2991 int s = min_t(int, size, DRBD_SOCKET_BUFFER_SIZE);
2992 s = drbd_recv(tconn, buffer, s);
2993 if (s <= 0) {
2994 if (s < 0)
2995 return s;
2996 break;
2997 }
2998 size -= s;
2999 }
3000 if (size)
3001 return -EIO;
3002 return 0;
3003}
3004
3005/*
3006 * config_unknown_volume - device configuration command for unknown volume
3007 *
3008 * When a device is added to an existing connection, the node on which the
3009 * device is added first will send configuration commands to its peer but the
3010 * peer will not know about the device yet. It will warn and ignore these
3011 * commands. Once the device is added on the second node, the second node will
3012 * send the same device configuration commands, but in the other direction.
3013 *
3014 * (We can also end up here if drbd is misconfigured.)
3015 */
3016static int config_unknown_volume(struct drbd_tconn *tconn, struct packet_info *pi)
3017{
3018 conn_warn(tconn, "Volume %u unknown; ignoring %s packet\n",
3019 pi->vnr, cmdname(pi->cmd));
3020 return ignore_remaining_packet(tconn, pi);
3021}
3022
3023static int receive_SyncParam(struct drbd_tconn *tconn, struct packet_info *pi)
3024{
3025 struct drbd_conf *mdev;
3026 struct p_rs_param_95 *p = tconn->data.rbuf;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003027 unsigned int header_size, data_size, exp_max_sz;
3028 struct crypto_hash *verify_tfm = NULL;
3029 struct crypto_hash *csums_tfm = NULL;
Andreas Gruenbacher4a76b162011-03-25 02:43:51 +01003030 const int apv = tconn->agreed_pro_version;
Philipp Reisner778f2712010-07-06 11:14:00 +02003031 int *rs_plan_s = NULL;
3032 int fifo_size = 0;
Andreas Gruenbacher82bc0192011-03-17 12:10:19 +01003033 int err;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003034
Andreas Gruenbacher4a76b162011-03-25 02:43:51 +01003035 mdev = vnr_to_mdev(tconn, pi->vnr);
3036 if (!mdev)
3037 return config_unknown_volume(tconn, pi);
3038
Philipp Reisnerb411b362009-09-25 16:07:19 -07003039 exp_max_sz = apv <= 87 ? sizeof(struct p_rs_param)
3040 : apv == 88 ? sizeof(struct p_rs_param)
3041 + SHARED_SECRET_MAX
Philipp Reisner8e26f9c2010-07-06 17:25:54 +02003042 : apv <= 94 ? sizeof(struct p_rs_param_89)
3043 : /* apv >= 95 */ sizeof(struct p_rs_param_95);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003044
Andreas Gruenbachere2857212011-03-25 00:57:38 +01003045 if (pi->size > exp_max_sz) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07003046 dev_err(DEV, "SyncParam packet too long: received %u, expected <= %u bytes\n",
Andreas Gruenbachere2857212011-03-25 00:57:38 +01003047 pi->size, exp_max_sz);
Andreas Gruenbacher82bc0192011-03-17 12:10:19 +01003048 return -EIO;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003049 }
3050
3051 if (apv <= 88) {
Philipp Reisner257d0af2011-01-26 12:15:29 +01003052 header_size = sizeof(struct p_rs_param) - sizeof(struct p_header);
Andreas Gruenbachere2857212011-03-25 00:57:38 +01003053 data_size = pi->size - header_size;
Philipp Reisner8e26f9c2010-07-06 17:25:54 +02003054 } else if (apv <= 94) {
Philipp Reisner257d0af2011-01-26 12:15:29 +01003055 header_size = sizeof(struct p_rs_param_89) - sizeof(struct p_header);
Andreas Gruenbachere2857212011-03-25 00:57:38 +01003056 data_size = pi->size - header_size;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003057 D_ASSERT(data_size == 0);
Philipp Reisner8e26f9c2010-07-06 17:25:54 +02003058 } else {
Philipp Reisner257d0af2011-01-26 12:15:29 +01003059 header_size = sizeof(struct p_rs_param_95) - sizeof(struct p_header);
Andreas Gruenbachere2857212011-03-25 00:57:38 +01003060 data_size = pi->size - header_size;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003061 D_ASSERT(data_size == 0);
3062 }
3063
3064 /* initialize verify_alg and csums_alg */
3065 memset(p->verify_alg, 0, 2 * SHARED_SECRET_MAX);
3066
Andreas Gruenbacher82bc0192011-03-17 12:10:19 +01003067 err = drbd_recv_all(mdev->tconn, &p->head.payload, header_size);
3068 if (err)
3069 return err;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003070
Lars Ellenbergf3990022011-03-23 14:31:09 +01003071 if (get_ldev(mdev)) {
3072 mdev->ldev->dc.resync_rate = be32_to_cpu(p->rate);
3073 put_ldev(mdev);
3074 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07003075
3076 if (apv >= 88) {
3077 if (apv == 88) {
3078 if (data_size > SHARED_SECRET_MAX) {
3079 dev_err(DEV, "verify-alg too long, "
3080 "peer wants %u, accepting only %u byte\n",
3081 data_size, SHARED_SECRET_MAX);
Andreas Gruenbacher82bc0192011-03-17 12:10:19 +01003082 return -EIO;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003083 }
3084
Andreas Gruenbacher82bc0192011-03-17 12:10:19 +01003085 err = drbd_recv_all(mdev->tconn, p->verify_alg, data_size);
3086 if (err)
3087 return err;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003088
3089 /* we expect NUL terminated string */
3090 /* but just in case someone tries to be evil */
3091 D_ASSERT(p->verify_alg[data_size-1] == 0);
3092 p->verify_alg[data_size-1] = 0;
3093
3094 } else /* apv >= 89 */ {
3095 /* we still expect NUL terminated strings */
3096 /* but just in case someone tries to be evil */
3097 D_ASSERT(p->verify_alg[SHARED_SECRET_MAX-1] == 0);
3098 D_ASSERT(p->csums_alg[SHARED_SECRET_MAX-1] == 0);
3099 p->verify_alg[SHARED_SECRET_MAX-1] = 0;
3100 p->csums_alg[SHARED_SECRET_MAX-1] = 0;
3101 }
3102
Lars Ellenbergf3990022011-03-23 14:31:09 +01003103 if (strcmp(mdev->tconn->net_conf->verify_alg, p->verify_alg)) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07003104 if (mdev->state.conn == C_WF_REPORT_PARAMS) {
3105 dev_err(DEV, "Different verify-alg settings. me=\"%s\" peer=\"%s\"\n",
Lars Ellenbergf3990022011-03-23 14:31:09 +01003106 mdev->tconn->net_conf->verify_alg, p->verify_alg);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003107 goto disconnect;
3108 }
3109 verify_tfm = drbd_crypto_alloc_digest_safe(mdev,
3110 p->verify_alg, "verify-alg");
3111 if (IS_ERR(verify_tfm)) {
3112 verify_tfm = NULL;
3113 goto disconnect;
3114 }
3115 }
3116
Lars Ellenbergf3990022011-03-23 14:31:09 +01003117 if (apv >= 89 && strcmp(mdev->tconn->net_conf->csums_alg, p->csums_alg)) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07003118 if (mdev->state.conn == C_WF_REPORT_PARAMS) {
3119 dev_err(DEV, "Different csums-alg settings. me=\"%s\" peer=\"%s\"\n",
Lars Ellenbergf3990022011-03-23 14:31:09 +01003120 mdev->tconn->net_conf->csums_alg, p->csums_alg);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003121 goto disconnect;
3122 }
3123 csums_tfm = drbd_crypto_alloc_digest_safe(mdev,
3124 p->csums_alg, "csums-alg");
3125 if (IS_ERR(csums_tfm)) {
3126 csums_tfm = NULL;
3127 goto disconnect;
3128 }
3129 }
3130
Lars Ellenbergf3990022011-03-23 14:31:09 +01003131 if (apv > 94 && get_ldev(mdev)) {
3132 mdev->ldev->dc.resync_rate = be32_to_cpu(p->rate);
3133 mdev->ldev->dc.c_plan_ahead = be32_to_cpu(p->c_plan_ahead);
3134 mdev->ldev->dc.c_delay_target = be32_to_cpu(p->c_delay_target);
3135 mdev->ldev->dc.c_fill_target = be32_to_cpu(p->c_fill_target);
3136 mdev->ldev->dc.c_max_rate = be32_to_cpu(p->c_max_rate);
Philipp Reisner778f2712010-07-06 11:14:00 +02003137
Lars Ellenbergf3990022011-03-23 14:31:09 +01003138 fifo_size = (mdev->ldev->dc.c_plan_ahead * 10 * SLEEP_TIME) / HZ;
Philipp Reisner778f2712010-07-06 11:14:00 +02003139 if (fifo_size != mdev->rs_plan_s.size && fifo_size > 0) {
3140 rs_plan_s = kzalloc(sizeof(int) * fifo_size, GFP_KERNEL);
3141 if (!rs_plan_s) {
3142 dev_err(DEV, "kmalloc of fifo_buffer failed");
Lars Ellenbergf3990022011-03-23 14:31:09 +01003143 put_ldev(mdev);
Philipp Reisner778f2712010-07-06 11:14:00 +02003144 goto disconnect;
3145 }
3146 }
Lars Ellenbergf3990022011-03-23 14:31:09 +01003147 put_ldev(mdev);
Philipp Reisner8e26f9c2010-07-06 17:25:54 +02003148 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07003149
3150 spin_lock(&mdev->peer_seq_lock);
3151 /* lock against drbd_nl_syncer_conf() */
3152 if (verify_tfm) {
Lars Ellenbergf3990022011-03-23 14:31:09 +01003153 strcpy(mdev->tconn->net_conf->verify_alg, p->verify_alg);
3154 mdev->tconn->net_conf->verify_alg_len = strlen(p->verify_alg) + 1;
3155 crypto_free_hash(mdev->tconn->verify_tfm);
3156 mdev->tconn->verify_tfm = verify_tfm;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003157 dev_info(DEV, "using verify-alg: \"%s\"\n", p->verify_alg);
3158 }
3159 if (csums_tfm) {
Lars Ellenbergf3990022011-03-23 14:31:09 +01003160 strcpy(mdev->tconn->net_conf->csums_alg, p->csums_alg);
3161 mdev->tconn->net_conf->csums_alg_len = strlen(p->csums_alg) + 1;
3162 crypto_free_hash(mdev->tconn->csums_tfm);
3163 mdev->tconn->csums_tfm = csums_tfm;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003164 dev_info(DEV, "using csums-alg: \"%s\"\n", p->csums_alg);
3165 }
Philipp Reisner778f2712010-07-06 11:14:00 +02003166 if (fifo_size != mdev->rs_plan_s.size) {
3167 kfree(mdev->rs_plan_s.values);
3168 mdev->rs_plan_s.values = rs_plan_s;
3169 mdev->rs_plan_s.size = fifo_size;
3170 mdev->rs_planed = 0;
3171 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07003172 spin_unlock(&mdev->peer_seq_lock);
3173 }
Andreas Gruenbacher82bc0192011-03-17 12:10:19 +01003174 return 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003175
Philipp Reisnerb411b362009-09-25 16:07:19 -07003176disconnect:
3177 /* just for completeness: actually not needed,
3178 * as this is not reached if csums_tfm was ok. */
3179 crypto_free_hash(csums_tfm);
3180 /* but free the verify_tfm again, if csums_tfm did not work out */
3181 crypto_free_hash(verify_tfm);
Philipp Reisner38fa9982011-03-15 18:24:49 +01003182 conn_request_state(mdev->tconn, NS(conn, C_DISCONNECTING), CS_HARD);
Andreas Gruenbacher82bc0192011-03-17 12:10:19 +01003183 return -EIO;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003184}
3185
Philipp Reisnerb411b362009-09-25 16:07:19 -07003186/* warn if the arguments differ by more than 12.5% */
3187static void warn_if_differ_considerably(struct drbd_conf *mdev,
3188 const char *s, sector_t a, sector_t b)
3189{
3190 sector_t d;
3191 if (a == 0 || b == 0)
3192 return;
3193 d = (a > b) ? (a - b) : (b - a);
3194 if (d > (a>>3) || d > (b>>3))
3195 dev_warn(DEV, "Considerable difference in %s: %llus vs. %llus\n", s,
3196 (unsigned long long)a, (unsigned long long)b);
3197}
3198
Andreas Gruenbacher4a76b162011-03-25 02:43:51 +01003199static int receive_sizes(struct drbd_tconn *tconn, struct packet_info *pi)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003200{
Andreas Gruenbacher4a76b162011-03-25 02:43:51 +01003201 struct drbd_conf *mdev;
3202 struct p_sizes *p = tconn->data.rbuf;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003203 enum determine_dev_size dd = unchanged;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003204 sector_t p_size, p_usize, my_usize;
3205 int ldsc = 0; /* local disk size changed */
Philipp Reisnere89b5912010-03-24 17:11:33 +01003206 enum dds_flags ddsf;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003207
Andreas Gruenbacher4a76b162011-03-25 02:43:51 +01003208 mdev = vnr_to_mdev(tconn, pi->vnr);
3209 if (!mdev)
3210 return config_unknown_volume(tconn, pi);
3211
Philipp Reisnerb411b362009-09-25 16:07:19 -07003212 p_size = be64_to_cpu(p->d_size);
3213 p_usize = be64_to_cpu(p->u_size);
3214
Philipp Reisnerb411b362009-09-25 16:07:19 -07003215 /* just store the peer's disk size for now.
3216 * we still need to figure out whether we accept that. */
3217 mdev->p_size = p_size;
3218
Philipp Reisnerb411b362009-09-25 16:07:19 -07003219 if (get_ldev(mdev)) {
3220 warn_if_differ_considerably(mdev, "lower level device sizes",
3221 p_size, drbd_get_max_capacity(mdev->ldev));
3222 warn_if_differ_considerably(mdev, "user requested size",
3223 p_usize, mdev->ldev->dc.disk_size);
3224
3225 /* if this is the first connect, or an otherwise expected
3226 * param exchange, choose the minimum */
3227 if (mdev->state.conn == C_WF_REPORT_PARAMS)
3228 p_usize = min_not_zero((sector_t)mdev->ldev->dc.disk_size,
3229 p_usize);
3230
3231 my_usize = mdev->ldev->dc.disk_size;
3232
3233 if (mdev->ldev->dc.disk_size != p_usize) {
3234 mdev->ldev->dc.disk_size = p_usize;
3235 dev_info(DEV, "Peer sets u_size to %lu sectors\n",
3236 (unsigned long)mdev->ldev->dc.disk_size);
3237 }
3238
3239 /* Never shrink a device with usable data during connect.
3240 But allow online shrinking if we are connected. */
Philipp Reisnera393db62009-12-22 13:35:52 +01003241 if (drbd_new_dev_size(mdev, mdev->ldev, 0) <
Philipp Reisnerb411b362009-09-25 16:07:19 -07003242 drbd_get_capacity(mdev->this_bdev) &&
3243 mdev->state.disk >= D_OUTDATED &&
3244 mdev->state.conn < C_CONNECTED) {
3245 dev_err(DEV, "The peer's disk size is too small!\n");
Philipp Reisner38fa9982011-03-15 18:24:49 +01003246 conn_request_state(mdev->tconn, NS(conn, C_DISCONNECTING), CS_HARD);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003247 mdev->ldev->dc.disk_size = my_usize;
3248 put_ldev(mdev);
Andreas Gruenbacher82bc0192011-03-17 12:10:19 +01003249 return -EIO;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003250 }
3251 put_ldev(mdev);
3252 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07003253
Philipp Reisnere89b5912010-03-24 17:11:33 +01003254 ddsf = be16_to_cpu(p->dds_flags);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003255 if (get_ldev(mdev)) {
Bart Van Assche24c48302011-05-21 18:32:29 +02003256 dd = drbd_determine_dev_size(mdev, ddsf);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003257 put_ldev(mdev);
3258 if (dd == dev_size_error)
Andreas Gruenbacher82bc0192011-03-17 12:10:19 +01003259 return -EIO;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003260 drbd_md_sync(mdev);
3261 } else {
3262 /* I am diskless, need to accept the peer's size. */
3263 drbd_set_my_capacity(mdev, p_size);
3264 }
3265
Philipp Reisner99432fc2011-05-20 16:39:13 +02003266 mdev->peer_max_bio_size = be32_to_cpu(p->max_bio_size);
3267 drbd_reconsider_max_bio_size(mdev);
3268
Philipp Reisnerb411b362009-09-25 16:07:19 -07003269 if (get_ldev(mdev)) {
3270 if (mdev->ldev->known_size != drbd_get_capacity(mdev->ldev->backing_bdev)) {
3271 mdev->ldev->known_size = drbd_get_capacity(mdev->ldev->backing_bdev);
3272 ldsc = 1;
3273 }
3274
Philipp Reisnerb411b362009-09-25 16:07:19 -07003275 put_ldev(mdev);
3276 }
3277
3278 if (mdev->state.conn > C_WF_REPORT_PARAMS) {
3279 if (be64_to_cpu(p->c_size) !=
3280 drbd_get_capacity(mdev->this_bdev) || ldsc) {
3281 /* we have different sizes, probably peer
3282 * needs to know my new size... */
Philipp Reisnere89b5912010-03-24 17:11:33 +01003283 drbd_send_sizes(mdev, 0, ddsf);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003284 }
3285 if (test_and_clear_bit(RESIZE_PENDING, &mdev->flags) ||
3286 (dd == grew && mdev->state.conn == C_CONNECTED)) {
3287 if (mdev->state.pdsk >= D_INCONSISTENT &&
Philipp Reisnere89b5912010-03-24 17:11:33 +01003288 mdev->state.disk >= D_INCONSISTENT) {
3289 if (ddsf & DDSF_NO_RESYNC)
3290 dev_info(DEV, "Resync of new storage suppressed with --assume-clean\n");
3291 else
3292 resync_after_online_grow(mdev);
3293 } else
Philipp Reisnerb411b362009-09-25 16:07:19 -07003294 set_bit(RESYNC_AFTER_NEG, &mdev->flags);
3295 }
3296 }
3297
Andreas Gruenbacher82bc0192011-03-17 12:10:19 +01003298 return 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003299}
3300
Andreas Gruenbacher4a76b162011-03-25 02:43:51 +01003301static int receive_uuids(struct drbd_tconn *tconn, struct packet_info *pi)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003302{
Andreas Gruenbacher4a76b162011-03-25 02:43:51 +01003303 struct drbd_conf *mdev;
3304 struct p_uuids *p = tconn->data.rbuf;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003305 u64 *p_uuid;
Lars Ellenberg62b0da32011-01-20 13:25:21 +01003306 int i, updated_uuids = 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003307
Andreas Gruenbacher4a76b162011-03-25 02:43:51 +01003308 mdev = vnr_to_mdev(tconn, pi->vnr);
3309 if (!mdev)
3310 return config_unknown_volume(tconn, pi);
3311
Philipp Reisnerb411b362009-09-25 16:07:19 -07003312 p_uuid = kmalloc(sizeof(u64)*UI_EXTENDED_SIZE, GFP_NOIO);
3313
3314 for (i = UI_CURRENT; i < UI_EXTENDED_SIZE; i++)
3315 p_uuid[i] = be64_to_cpu(p->uuid[i]);
3316
3317 kfree(mdev->p_uuid);
3318 mdev->p_uuid = p_uuid;
3319
3320 if (mdev->state.conn < C_CONNECTED &&
3321 mdev->state.disk < D_INCONSISTENT &&
3322 mdev->state.role == R_PRIMARY &&
3323 (mdev->ed_uuid & ~((u64)1)) != (p_uuid[UI_CURRENT] & ~((u64)1))) {
3324 dev_err(DEV, "Can only connect to data with current UUID=%016llX\n",
3325 (unsigned long long)mdev->ed_uuid);
Philipp Reisner38fa9982011-03-15 18:24:49 +01003326 conn_request_state(mdev->tconn, NS(conn, C_DISCONNECTING), CS_HARD);
Andreas Gruenbacher82bc0192011-03-17 12:10:19 +01003327 return -EIO;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003328 }
3329
3330 if (get_ldev(mdev)) {
3331 int skip_initial_sync =
3332 mdev->state.conn == C_CONNECTED &&
Philipp Reisner31890f42011-01-19 14:12:51 +01003333 mdev->tconn->agreed_pro_version >= 90 &&
Philipp Reisnerb411b362009-09-25 16:07:19 -07003334 mdev->ldev->md.uuid[UI_CURRENT] == UUID_JUST_CREATED &&
3335 (p_uuid[UI_FLAGS] & 8);
3336 if (skip_initial_sync) {
3337 dev_info(DEV, "Accepted new current UUID, preparing to skip initial sync\n");
3338 drbd_bitmap_io(mdev, &drbd_bmio_clear_n_write,
Lars Ellenberg20ceb2b2011-01-21 10:56:44 +01003339 "clear_n_write from receive_uuids",
3340 BM_LOCKED_TEST_ALLOWED);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003341 _drbd_uuid_set(mdev, UI_CURRENT, p_uuid[UI_CURRENT]);
3342 _drbd_uuid_set(mdev, UI_BITMAP, 0);
3343 _drbd_set_state(_NS2(mdev, disk, D_UP_TO_DATE, pdsk, D_UP_TO_DATE),
3344 CS_VERBOSE, NULL);
3345 drbd_md_sync(mdev);
Lars Ellenberg62b0da32011-01-20 13:25:21 +01003346 updated_uuids = 1;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003347 }
3348 put_ldev(mdev);
Philipp Reisner18a50fa2010-06-21 14:14:15 +02003349 } else if (mdev->state.disk < D_INCONSISTENT &&
3350 mdev->state.role == R_PRIMARY) {
3351 /* I am a diskless primary, the peer just created a new current UUID
3352 for me. */
Lars Ellenberg62b0da32011-01-20 13:25:21 +01003353 updated_uuids = drbd_set_ed_uuid(mdev, p_uuid[UI_CURRENT]);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003354 }
3355
3356 /* Before we test for the disk state, we should wait until an eventually
3357 ongoing cluster wide state change is finished. That is important if
3358 we are primary and are detaching from our disk. We need to see the
3359 new disk state... */
Philipp Reisner8410da82011-02-11 20:11:10 +01003360 mutex_lock(mdev->state_mutex);
3361 mutex_unlock(mdev->state_mutex);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003362 if (mdev->state.conn >= C_CONNECTED && mdev->state.disk < D_INCONSISTENT)
Lars Ellenberg62b0da32011-01-20 13:25:21 +01003363 updated_uuids |= drbd_set_ed_uuid(mdev, p_uuid[UI_CURRENT]);
3364
3365 if (updated_uuids)
3366 drbd_print_uuids(mdev, "receiver updated UUIDs to");
Philipp Reisnerb411b362009-09-25 16:07:19 -07003367
Andreas Gruenbacher82bc0192011-03-17 12:10:19 +01003368 return 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003369}
3370
3371/**
3372 * convert_state() - Converts the peer's view of the cluster state to our point of view
3373 * @ps: The state as seen by the peer.
3374 */
3375static union drbd_state convert_state(union drbd_state ps)
3376{
3377 union drbd_state ms;
3378
3379 static enum drbd_conns c_tab[] = {
3380 [C_CONNECTED] = C_CONNECTED,
3381
3382 [C_STARTING_SYNC_S] = C_STARTING_SYNC_T,
3383 [C_STARTING_SYNC_T] = C_STARTING_SYNC_S,
3384 [C_DISCONNECTING] = C_TEAR_DOWN, /* C_NETWORK_FAILURE, */
3385 [C_VERIFY_S] = C_VERIFY_T,
3386 [C_MASK] = C_MASK,
3387 };
3388
3389 ms.i = ps.i;
3390
3391 ms.conn = c_tab[ps.conn];
3392 ms.peer = ps.role;
3393 ms.role = ps.peer;
3394 ms.pdsk = ps.disk;
3395 ms.disk = ps.pdsk;
3396 ms.peer_isp = (ps.aftr_isp | ps.user_isp);
3397
3398 return ms;
3399}
3400
Andreas Gruenbacher4a76b162011-03-25 02:43:51 +01003401static int receive_req_state(struct drbd_tconn *tconn, struct packet_info *pi)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003402{
Andreas Gruenbacher4a76b162011-03-25 02:43:51 +01003403 struct drbd_conf *mdev;
3404 struct p_req_state *p = tconn->data.rbuf;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003405 union drbd_state mask, val;
Andreas Gruenbacherbf885f82010-12-08 00:39:32 +01003406 enum drbd_state_rv rv;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003407
Andreas Gruenbacher4a76b162011-03-25 02:43:51 +01003408 mdev = vnr_to_mdev(tconn, pi->vnr);
3409 if (!mdev)
3410 return -EIO;
3411
Philipp Reisnerb411b362009-09-25 16:07:19 -07003412 mask.i = be32_to_cpu(p->mask);
3413 val.i = be32_to_cpu(p->val);
3414
Philipp Reisner25703f82011-02-07 14:35:25 +01003415 if (test_bit(DISCARD_CONCURRENT, &mdev->tconn->flags) &&
Philipp Reisner8410da82011-02-11 20:11:10 +01003416 mutex_is_locked(mdev->state_mutex)) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07003417 drbd_send_sr_reply(mdev, SS_CONCURRENT_ST_CHG);
Andreas Gruenbacher82bc0192011-03-17 12:10:19 +01003418 return 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003419 }
3420
3421 mask = convert_state(mask);
3422 val = convert_state(val);
3423
Philipp Reisnerdfafcc82011-03-16 10:55:07 +01003424 rv = drbd_change_state(mdev, CS_VERBOSE, mask, val);
3425 drbd_send_sr_reply(mdev, rv);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003426
Philipp Reisnerb411b362009-09-25 16:07:19 -07003427 drbd_md_sync(mdev);
3428
Andreas Gruenbacher82bc0192011-03-17 12:10:19 +01003429 return 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003430}
3431
Andreas Gruenbachere2857212011-03-25 00:57:38 +01003432static int receive_req_conn_state(struct drbd_tconn *tconn, struct packet_info *pi)
Philipp Reisnerdfafcc82011-03-16 10:55:07 +01003433{
Andreas Gruenbachere6ef8a52011-03-24 18:07:54 +01003434 struct p_req_state *p = tconn->data.rbuf;
Philipp Reisnerdfafcc82011-03-16 10:55:07 +01003435 union drbd_state mask, val;
3436 enum drbd_state_rv rv;
3437
3438 mask.i = be32_to_cpu(p->mask);
3439 val.i = be32_to_cpu(p->val);
3440
3441 if (test_bit(DISCARD_CONCURRENT, &tconn->flags) &&
3442 mutex_is_locked(&tconn->cstate_mutex)) {
3443 conn_send_sr_reply(tconn, SS_CONCURRENT_ST_CHG);
Andreas Gruenbacher82bc0192011-03-17 12:10:19 +01003444 return 0;
Philipp Reisnerdfafcc82011-03-16 10:55:07 +01003445 }
3446
3447 mask = convert_state(mask);
3448 val = convert_state(val);
3449
3450 rv = conn_request_state(tconn, mask, val, CS_VERBOSE | CS_LOCAL_ONLY);
3451 conn_send_sr_reply(tconn, rv);
3452
Andreas Gruenbacher82bc0192011-03-17 12:10:19 +01003453 return 0;
Philipp Reisnerdfafcc82011-03-16 10:55:07 +01003454}
3455
Andreas Gruenbacher4a76b162011-03-25 02:43:51 +01003456static int receive_state(struct drbd_tconn *tconn, struct packet_info *pi)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003457{
Andreas Gruenbacher4a76b162011-03-25 02:43:51 +01003458 struct drbd_conf *mdev;
3459 struct p_state *p = tconn->data.rbuf;
Lars Ellenberg4ac4aad2010-07-22 17:39:26 +02003460 union drbd_state os, ns, peer_state;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003461 enum drbd_disk_state real_peer_disk;
Philipp Reisner65d922c2010-06-16 16:18:09 +02003462 enum chg_state_flags cs_flags;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003463 int rv;
3464
Andreas Gruenbacher4a76b162011-03-25 02:43:51 +01003465 mdev = vnr_to_mdev(tconn, pi->vnr);
3466 if (!mdev)
3467 return config_unknown_volume(tconn, pi);
3468
Philipp Reisnerb411b362009-09-25 16:07:19 -07003469 peer_state.i = be32_to_cpu(p->state);
3470
3471 real_peer_disk = peer_state.disk;
3472 if (peer_state.disk == D_NEGOTIATING) {
3473 real_peer_disk = mdev->p_uuid[UI_FLAGS] & 4 ? D_INCONSISTENT : D_CONSISTENT;
3474 dev_info(DEV, "real peer disk state = %s\n", drbd_disk_str(real_peer_disk));
3475 }
3476
Philipp Reisner87eeee42011-01-19 14:16:30 +01003477 spin_lock_irq(&mdev->tconn->req_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003478 retry:
Lars Ellenberg4ac4aad2010-07-22 17:39:26 +02003479 os = ns = mdev->state;
Philipp Reisner87eeee42011-01-19 14:16:30 +01003480 spin_unlock_irq(&mdev->tconn->req_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003481
Lars Ellenberge9ef7bb2010-10-07 15:55:39 +02003482 /* peer says his disk is uptodate, while we think it is inconsistent,
3483 * and this happens while we think we have a sync going on. */
3484 if (os.pdsk == D_INCONSISTENT && real_peer_disk == D_UP_TO_DATE &&
3485 os.conn > C_CONNECTED && os.disk == D_UP_TO_DATE) {
3486 /* If we are (becoming) SyncSource, but peer is still in sync
3487 * preparation, ignore its uptodate-ness to avoid flapping, it
3488 * will change to inconsistent once the peer reaches active
3489 * syncing states.
3490 * It may have changed syncer-paused flags, however, so we
3491 * cannot ignore this completely. */
3492 if (peer_state.conn > C_CONNECTED &&
3493 peer_state.conn < C_SYNC_SOURCE)
3494 real_peer_disk = D_INCONSISTENT;
3495
3496 /* if peer_state changes to connected at the same time,
3497 * it explicitly notifies us that it finished resync.
3498 * Maybe we should finish it up, too? */
3499 else if (os.conn >= C_SYNC_SOURCE &&
3500 peer_state.conn == C_CONNECTED) {
3501 if (drbd_bm_total_weight(mdev) <= mdev->rs_failed)
3502 drbd_resync_finished(mdev);
Andreas Gruenbacher82bc0192011-03-17 12:10:19 +01003503 return 0;
Lars Ellenberge9ef7bb2010-10-07 15:55:39 +02003504 }
3505 }
3506
3507 /* peer says his disk is inconsistent, while we think it is uptodate,
3508 * and this happens while the peer still thinks we have a sync going on,
3509 * but we think we are already done with the sync.
3510 * We ignore this to avoid flapping pdsk.
3511 * This should not happen, if the peer is a recent version of drbd. */
3512 if (os.pdsk == D_UP_TO_DATE && real_peer_disk == D_INCONSISTENT &&
3513 os.conn == C_CONNECTED && peer_state.conn > C_SYNC_SOURCE)
3514 real_peer_disk = D_UP_TO_DATE;
3515
Lars Ellenberg4ac4aad2010-07-22 17:39:26 +02003516 if (ns.conn == C_WF_REPORT_PARAMS)
3517 ns.conn = C_CONNECTED;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003518
Philipp Reisner67531712010-10-27 12:21:30 +02003519 if (peer_state.conn == C_AHEAD)
3520 ns.conn = C_BEHIND;
3521
Philipp Reisnerb411b362009-09-25 16:07:19 -07003522 if (mdev->p_uuid && peer_state.disk >= D_NEGOTIATING &&
3523 get_ldev_if_state(mdev, D_NEGOTIATING)) {
3524 int cr; /* consider resync */
3525
3526 /* if we established a new connection */
Lars Ellenberg4ac4aad2010-07-22 17:39:26 +02003527 cr = (os.conn < C_CONNECTED);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003528 /* if we had an established connection
3529 * and one of the nodes newly attaches a disk */
Lars Ellenberg4ac4aad2010-07-22 17:39:26 +02003530 cr |= (os.conn == C_CONNECTED &&
Philipp Reisnerb411b362009-09-25 16:07:19 -07003531 (peer_state.disk == D_NEGOTIATING ||
Lars Ellenberg4ac4aad2010-07-22 17:39:26 +02003532 os.disk == D_NEGOTIATING));
Philipp Reisnerb411b362009-09-25 16:07:19 -07003533 /* if we have both been inconsistent, and the peer has been
3534 * forced to be UpToDate with --overwrite-data */
3535 cr |= test_bit(CONSIDER_RESYNC, &mdev->flags);
3536 /* if we had been plain connected, and the admin requested to
3537 * start a sync by "invalidate" or "invalidate-remote" */
Lars Ellenberg4ac4aad2010-07-22 17:39:26 +02003538 cr |= (os.conn == C_CONNECTED &&
Philipp Reisnerb411b362009-09-25 16:07:19 -07003539 (peer_state.conn >= C_STARTING_SYNC_S &&
3540 peer_state.conn <= C_WF_BITMAP_T));
3541
3542 if (cr)
Lars Ellenberg4ac4aad2010-07-22 17:39:26 +02003543 ns.conn = drbd_sync_handshake(mdev, peer_state.role, real_peer_disk);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003544
3545 put_ldev(mdev);
Lars Ellenberg4ac4aad2010-07-22 17:39:26 +02003546 if (ns.conn == C_MASK) {
3547 ns.conn = C_CONNECTED;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003548 if (mdev->state.disk == D_NEGOTIATING) {
Lars Ellenberg82f59cc2010-10-16 12:13:47 +02003549 drbd_force_state(mdev, NS(disk, D_FAILED));
Philipp Reisnerb411b362009-09-25 16:07:19 -07003550 } else if (peer_state.disk == D_NEGOTIATING) {
3551 dev_err(DEV, "Disk attach process on the peer node was aborted.\n");
3552 peer_state.disk = D_DISKLESS;
Lars Ellenberg580b9762010-02-26 23:15:23 +01003553 real_peer_disk = D_DISKLESS;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003554 } else {
Philipp Reisner8169e412011-03-15 18:40:27 +01003555 if (test_and_clear_bit(CONN_DRY_RUN, &mdev->tconn->flags))
Andreas Gruenbacher82bc0192011-03-17 12:10:19 +01003556 return -EIO;
Lars Ellenberg4ac4aad2010-07-22 17:39:26 +02003557 D_ASSERT(os.conn == C_WF_REPORT_PARAMS);
Philipp Reisner38fa9982011-03-15 18:24:49 +01003558 conn_request_state(mdev->tconn, NS(conn, C_DISCONNECTING), CS_HARD);
Andreas Gruenbacher82bc0192011-03-17 12:10:19 +01003559 return -EIO;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003560 }
3561 }
3562 }
3563
Philipp Reisner87eeee42011-01-19 14:16:30 +01003564 spin_lock_irq(&mdev->tconn->req_lock);
Lars Ellenberg4ac4aad2010-07-22 17:39:26 +02003565 if (mdev->state.i != os.i)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003566 goto retry;
3567 clear_bit(CONSIDER_RESYNC, &mdev->flags);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003568 ns.peer = peer_state.role;
3569 ns.pdsk = real_peer_disk;
3570 ns.peer_isp = (peer_state.aftr_isp | peer_state.user_isp);
Lars Ellenberg4ac4aad2010-07-22 17:39:26 +02003571 if ((ns.conn == C_CONNECTED || ns.conn == C_WF_BITMAP_S) && ns.disk == D_NEGOTIATING)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003572 ns.disk = mdev->new_state_tmp.disk;
Lars Ellenberg4ac4aad2010-07-22 17:39:26 +02003573 cs_flags = CS_VERBOSE + (os.conn < C_CONNECTED && ns.conn >= C_CONNECTED ? 0 : CS_HARD);
3574 if (ns.pdsk == D_CONSISTENT && is_susp(ns) && ns.conn == C_CONNECTED && os.conn < C_CONNECTED &&
Philipp Reisner481c6f52010-06-22 14:03:27 +02003575 test_bit(NEW_CUR_UUID, &mdev->flags)) {
Andreas Gruenbacher8554df12011-01-25 15:37:43 +01003576 /* Do not allow tl_restart(RESEND) for a rebooted peer. We can only allow this
Philipp Reisner481c6f52010-06-22 14:03:27 +02003577 for temporal network outages! */
Philipp Reisner87eeee42011-01-19 14:16:30 +01003578 spin_unlock_irq(&mdev->tconn->req_lock);
Philipp Reisner481c6f52010-06-22 14:03:27 +02003579 dev_err(DEV, "Aborting Connect, can not thaw IO with an only Consistent peer\n");
Philipp Reisner2f5cdd02011-02-21 14:29:27 +01003580 tl_clear(mdev->tconn);
Philipp Reisner481c6f52010-06-22 14:03:27 +02003581 drbd_uuid_new_current(mdev);
3582 clear_bit(NEW_CUR_UUID, &mdev->flags);
Philipp Reisner38fa9982011-03-15 18:24:49 +01003583 conn_request_state(mdev->tconn, NS2(conn, C_PROTOCOL_ERROR, susp, 0), CS_HARD);
Andreas Gruenbacher82bc0192011-03-17 12:10:19 +01003584 return -EIO;
Philipp Reisner481c6f52010-06-22 14:03:27 +02003585 }
Philipp Reisner65d922c2010-06-16 16:18:09 +02003586 rv = _drbd_set_state(mdev, ns, cs_flags, NULL);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003587 ns = mdev->state;
Philipp Reisner87eeee42011-01-19 14:16:30 +01003588 spin_unlock_irq(&mdev->tconn->req_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003589
3590 if (rv < SS_SUCCESS) {
Philipp Reisner38fa9982011-03-15 18:24:49 +01003591 conn_request_state(mdev->tconn, NS(conn, C_DISCONNECTING), CS_HARD);
Andreas Gruenbacher82bc0192011-03-17 12:10:19 +01003592 return -EIO;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003593 }
3594
Lars Ellenberg4ac4aad2010-07-22 17:39:26 +02003595 if (os.conn > C_WF_REPORT_PARAMS) {
3596 if (ns.conn > C_CONNECTED && peer_state.conn <= C_CONNECTED &&
Philipp Reisnerb411b362009-09-25 16:07:19 -07003597 peer_state.disk != D_NEGOTIATING ) {
3598 /* we want resync, peer has not yet decided to sync... */
3599 /* Nowadays only used when forcing a node into primary role and
3600 setting its disk to UpToDate with that */
3601 drbd_send_uuids(mdev);
3602 drbd_send_state(mdev);
3603 }
3604 }
3605
Philipp Reisner89e58e72011-01-19 13:12:45 +01003606 mdev->tconn->net_conf->want_lose = 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003607
3608 drbd_md_sync(mdev); /* update connected indicator, la_size, ... */
3609
Andreas Gruenbacher82bc0192011-03-17 12:10:19 +01003610 return 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003611}
3612
Andreas Gruenbacher4a76b162011-03-25 02:43:51 +01003613static int receive_sync_uuid(struct drbd_tconn *tconn, struct packet_info *pi)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003614{
Andreas Gruenbacher4a76b162011-03-25 02:43:51 +01003615 struct drbd_conf *mdev;
3616 struct p_rs_uuid *p = tconn->data.rbuf;
3617
3618 mdev = vnr_to_mdev(tconn, pi->vnr);
3619 if (!mdev)
3620 return -EIO;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003621
3622 wait_event(mdev->misc_wait,
3623 mdev->state.conn == C_WF_SYNC_UUID ||
Philipp Reisnerc4752ef2010-10-27 17:32:36 +02003624 mdev->state.conn == C_BEHIND ||
Philipp Reisnerb411b362009-09-25 16:07:19 -07003625 mdev->state.conn < C_CONNECTED ||
3626 mdev->state.disk < D_NEGOTIATING);
3627
3628 /* D_ASSERT( mdev->state.conn == C_WF_SYNC_UUID ); */
3629
Philipp Reisnerb411b362009-09-25 16:07:19 -07003630 /* Here the _drbd_uuid_ functions are right, current should
3631 _not_ be rotated into the history */
3632 if (get_ldev_if_state(mdev, D_NEGOTIATING)) {
3633 _drbd_uuid_set(mdev, UI_CURRENT, be64_to_cpu(p->uuid));
3634 _drbd_uuid_set(mdev, UI_BITMAP, 0UL);
3635
Lars Ellenberg62b0da32011-01-20 13:25:21 +01003636 drbd_print_uuids(mdev, "updated sync uuid");
Philipp Reisnerb411b362009-09-25 16:07:19 -07003637 drbd_start_resync(mdev, C_SYNC_TARGET);
3638
3639 put_ldev(mdev);
3640 } else
3641 dev_err(DEV, "Ignoring SyncUUID packet!\n");
3642
Andreas Gruenbacher82bc0192011-03-17 12:10:19 +01003643 return 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003644}
3645
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01003646/**
3647 * receive_bitmap_plain
3648 *
3649 * Return 0 when done, 1 when another iteration is needed, and a negative error
3650 * code upon failure.
3651 */
3652static int
Philipp Reisner02918be2010-08-20 14:35:10 +02003653receive_bitmap_plain(struct drbd_conf *mdev, unsigned int data_size,
Andreas Gruenbacherfc568152011-03-24 21:23:50 +01003654 struct p_header *h, struct bm_xfer_ctx *c)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003655{
Andreas Gruenbacherfc568152011-03-24 21:23:50 +01003656 unsigned long *buffer = (unsigned long *)h->payload;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003657 unsigned num_words = min_t(size_t, BM_PACKET_WORDS, c->bm_words - c->word_offset);
3658 unsigned want = num_words * sizeof(long);
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01003659 int err;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003660
Philipp Reisner02918be2010-08-20 14:35:10 +02003661 if (want != data_size) {
3662 dev_err(DEV, "%s:want (%u) != data_size (%u)\n", __func__, want, data_size);
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01003663 return -EIO;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003664 }
3665 if (want == 0)
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01003666 return 0;
Andreas Gruenbacher82bc0192011-03-17 12:10:19 +01003667 err = drbd_recv_all(mdev->tconn, buffer, want);
3668 if (err)
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01003669 return err;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003670
3671 drbd_bm_merge_lel(mdev, c->word_offset, num_words, buffer);
3672
3673 c->word_offset += num_words;
3674 c->bit_offset = c->word_offset * BITS_PER_LONG;
3675 if (c->bit_offset > c->bm_bits)
3676 c->bit_offset = c->bm_bits;
3677
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01003678 return 1;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003679}
3680
Andreas Gruenbachera02d1242011-03-22 17:20:45 +01003681static enum drbd_bitmap_code dcbp_get_code(struct p_compressed_bm *p)
3682{
3683 return (enum drbd_bitmap_code)(p->encoding & 0x0f);
3684}
3685
3686static int dcbp_get_start(struct p_compressed_bm *p)
3687{
3688 return (p->encoding & 0x80) != 0;
3689}
3690
3691static int dcbp_get_pad_bits(struct p_compressed_bm *p)
3692{
3693 return (p->encoding >> 4) & 0x7;
3694}
3695
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01003696/**
3697 * recv_bm_rle_bits
3698 *
3699 * Return 0 when done, 1 when another iteration is needed, and a negative error
3700 * code upon failure.
3701 */
3702static int
Philipp Reisnerb411b362009-09-25 16:07:19 -07003703recv_bm_rle_bits(struct drbd_conf *mdev,
3704 struct p_compressed_bm *p,
Philipp Reisnerc6d25cf2011-01-19 16:13:06 +01003705 struct bm_xfer_ctx *c,
3706 unsigned int len)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003707{
3708 struct bitstream bs;
3709 u64 look_ahead;
3710 u64 rl;
3711 u64 tmp;
3712 unsigned long s = c->bit_offset;
3713 unsigned long e;
Andreas Gruenbachera02d1242011-03-22 17:20:45 +01003714 int toggle = dcbp_get_start(p);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003715 int have;
3716 int bits;
3717
Andreas Gruenbachera02d1242011-03-22 17:20:45 +01003718 bitstream_init(&bs, p->code, len, dcbp_get_pad_bits(p));
Philipp Reisnerb411b362009-09-25 16:07:19 -07003719
3720 bits = bitstream_get_bits(&bs, &look_ahead, 64);
3721 if (bits < 0)
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01003722 return -EIO;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003723
3724 for (have = bits; have > 0; s += rl, toggle = !toggle) {
3725 bits = vli_decode_bits(&rl, look_ahead);
3726 if (bits <= 0)
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01003727 return -EIO;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003728
3729 if (toggle) {
3730 e = s + rl -1;
3731 if (e >= c->bm_bits) {
3732 dev_err(DEV, "bitmap overflow (e:%lu) while decoding bm RLE packet\n", e);
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01003733 return -EIO;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003734 }
3735 _drbd_bm_set_bits(mdev, s, e);
3736 }
3737
3738 if (have < bits) {
3739 dev_err(DEV, "bitmap decoding error: h:%d b:%d la:0x%08llx l:%u/%u\n",
3740 have, bits, look_ahead,
3741 (unsigned int)(bs.cur.b - p->code),
3742 (unsigned int)bs.buf_len);
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01003743 return -EIO;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003744 }
3745 look_ahead >>= bits;
3746 have -= bits;
3747
3748 bits = bitstream_get_bits(&bs, &tmp, 64 - have);
3749 if (bits < 0)
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01003750 return -EIO;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003751 look_ahead |= tmp << have;
3752 have += bits;
3753 }
3754
3755 c->bit_offset = s;
3756 bm_xfer_ctx_bit_to_word_offset(c);
3757
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01003758 return (s != c->bm_bits);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003759}
3760
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01003761/**
3762 * decode_bitmap_c
3763 *
3764 * Return 0 when done, 1 when another iteration is needed, and a negative error
3765 * code upon failure.
3766 */
3767static int
Philipp Reisnerb411b362009-09-25 16:07:19 -07003768decode_bitmap_c(struct drbd_conf *mdev,
3769 struct p_compressed_bm *p,
Philipp Reisnerc6d25cf2011-01-19 16:13:06 +01003770 struct bm_xfer_ctx *c,
3771 unsigned int len)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003772{
Andreas Gruenbachera02d1242011-03-22 17:20:45 +01003773 if (dcbp_get_code(p) == RLE_VLI_Bits)
Philipp Reisnerc6d25cf2011-01-19 16:13:06 +01003774 return recv_bm_rle_bits(mdev, p, c, len);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003775
3776 /* other variants had been implemented for evaluation,
3777 * but have been dropped as this one turned out to be "best"
3778 * during all our tests. */
3779
3780 dev_err(DEV, "receive_bitmap_c: unknown encoding %u\n", p->encoding);
Philipp Reisner38fa9982011-03-15 18:24:49 +01003781 conn_request_state(mdev->tconn, NS(conn, C_PROTOCOL_ERROR), CS_HARD);
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01003782 return -EIO;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003783}
3784
3785void INFO_bm_xfer_stats(struct drbd_conf *mdev,
3786 const char *direction, struct bm_xfer_ctx *c)
3787{
3788 /* what would it take to transfer it "plaintext" */
Philipp Reisnerc0129492011-01-19 16:58:16 +01003789 unsigned plain = sizeof(struct p_header) *
Philipp Reisnerb411b362009-09-25 16:07:19 -07003790 ((c->bm_words+BM_PACKET_WORDS-1)/BM_PACKET_WORDS+1)
3791 + c->bm_words * sizeof(long);
3792 unsigned total = c->bytes[0] + c->bytes[1];
3793 unsigned r;
3794
3795 /* total can not be zero. but just in case: */
3796 if (total == 0)
3797 return;
3798
3799 /* don't report if not compressed */
3800 if (total >= plain)
3801 return;
3802
3803 /* total < plain. check for overflow, still */
3804 r = (total > UINT_MAX/1000) ? (total / (plain/1000))
3805 : (1000 * total / plain);
3806
3807 if (r > 1000)
3808 r = 1000;
3809
3810 r = 1000 - r;
3811 dev_info(DEV, "%s bitmap stats [Bytes(packets)]: plain %u(%u), RLE %u(%u), "
3812 "total %u; compression: %u.%u%%\n",
3813 direction,
3814 c->bytes[1], c->packets[1],
3815 c->bytes[0], c->packets[0],
3816 total, r/10, r % 10);
3817}
3818
3819/* Since we are processing the bitfield from lower addresses to higher,
3820 it does not matter if the process it in 32 bit chunks or 64 bit
3821 chunks as long as it is little endian. (Understand it as byte stream,
3822 beginning with the lowest byte...) If we would use big endian
3823 we would need to process it from the highest address to the lowest,
3824 in order to be agnostic to the 32 vs 64 bits issue.
3825
3826 returns 0 on failure, 1 if we successfully received it. */
Andreas Gruenbacher4a76b162011-03-25 02:43:51 +01003827static int receive_bitmap(struct drbd_tconn *tconn, struct packet_info *pi)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003828{
Andreas Gruenbacher4a76b162011-03-25 02:43:51 +01003829 struct drbd_conf *mdev;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003830 struct bm_xfer_ctx c;
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01003831 int err;
Andreas Gruenbacher4a76b162011-03-25 02:43:51 +01003832 struct p_header *h = tconn->data.rbuf;
3833
3834 mdev = vnr_to_mdev(tconn, pi->vnr);
3835 if (!mdev)
3836 return -EIO;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003837
Lars Ellenberg20ceb2b2011-01-21 10:56:44 +01003838 drbd_bm_lock(mdev, "receive bitmap", BM_LOCKED_SET_ALLOWED);
3839 /* you are supposed to send additional out-of-sync information
3840 * if you actually set bits during this phase */
Philipp Reisnerb411b362009-09-25 16:07:19 -07003841
Philipp Reisnerb411b362009-09-25 16:07:19 -07003842 c = (struct bm_xfer_ctx) {
3843 .bm_bits = drbd_bm_bits(mdev),
3844 .bm_words = drbd_bm_words(mdev),
3845 };
3846
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01003847 for(;;) {
Andreas Gruenbachere2857212011-03-25 00:57:38 +01003848 if (pi->cmd == P_BITMAP) {
3849 err = receive_bitmap_plain(mdev, pi->size, h, &c);
3850 } else if (pi->cmd == P_COMPRESSED_BITMAP) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07003851 /* MAYBE: sanity check that we speak proto >= 90,
3852 * and the feature is enabled! */
3853 struct p_compressed_bm *p;
3854
Andreas Gruenbachere2857212011-03-25 00:57:38 +01003855 if (pi->size > BM_PACKET_PAYLOAD_BYTES) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07003856 dev_err(DEV, "ReportCBitmap packet too large\n");
Andreas Gruenbacher82bc0192011-03-17 12:10:19 +01003857 err = -EIO;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003858 goto out;
3859 }
Andreas Gruenbacherfc568152011-03-24 21:23:50 +01003860
3861 p = mdev->tconn->data.rbuf;
Andreas Gruenbachere2857212011-03-25 00:57:38 +01003862 err = drbd_recv_all(mdev->tconn, p->head.payload, pi->size);
Andreas Gruenbacher82bc0192011-03-17 12:10:19 +01003863 if (err)
3864 goto out;
Andreas Gruenbachere2857212011-03-25 00:57:38 +01003865 if (pi->size <= (sizeof(*p) - sizeof(p->head))) {
3866 dev_err(DEV, "ReportCBitmap packet too small (l:%u)\n", pi->size);
Andreas Gruenbacher82bc0192011-03-17 12:10:19 +01003867 err = -EIO;
Andreas Gruenbacher78fcbda2010-12-10 22:18:27 +01003868 goto out;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003869 }
Andreas Gruenbachere2857212011-03-25 00:57:38 +01003870 err = decode_bitmap_c(mdev, p, &c, pi->size);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003871 } else {
Andreas Gruenbachere2857212011-03-25 00:57:38 +01003872 dev_warn(DEV, "receive_bitmap: cmd neither ReportBitMap nor ReportCBitMap (is 0x%x)", pi->cmd);
Andreas Gruenbacher82bc0192011-03-17 12:10:19 +01003873 err = -EIO;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003874 goto out;
3875 }
3876
Andreas Gruenbachere2857212011-03-25 00:57:38 +01003877 c.packets[pi->cmd == P_BITMAP]++;
3878 c.bytes[pi->cmd == P_BITMAP] += sizeof(struct p_header) + pi->size;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003879
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01003880 if (err <= 0) {
3881 if (err < 0)
3882 goto out;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003883 break;
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01003884 }
Andreas Gruenbachere2857212011-03-25 00:57:38 +01003885 err = drbd_recv_header(mdev->tconn, pi);
Andreas Gruenbacher82bc0192011-03-17 12:10:19 +01003886 if (err)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003887 goto out;
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01003888 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07003889
3890 INFO_bm_xfer_stats(mdev, "receive", &c);
3891
3892 if (mdev->state.conn == C_WF_BITMAP_T) {
Andreas Gruenbacherde1f8e42010-12-10 21:04:00 +01003893 enum drbd_state_rv rv;
3894
Andreas Gruenbacher82bc0192011-03-17 12:10:19 +01003895 err = drbd_send_bitmap(mdev);
3896 if (err)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003897 goto out;
3898 /* Omit CS_ORDERED with this state transition to avoid deadlocks. */
Andreas Gruenbacherde1f8e42010-12-10 21:04:00 +01003899 rv = _drbd_request_state(mdev, NS(conn, C_WF_SYNC_UUID), CS_VERBOSE);
3900 D_ASSERT(rv == SS_SUCCESS);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003901 } else if (mdev->state.conn != C_WF_BITMAP_S) {
3902 /* admin may have requested C_DISCONNECTING,
3903 * other threads may have noticed network errors */
3904 dev_info(DEV, "unexpected cstate (%s) in receive_bitmap\n",
3905 drbd_conn_str(mdev->state.conn));
3906 }
Andreas Gruenbacher82bc0192011-03-17 12:10:19 +01003907 err = 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003908
Philipp Reisnerb411b362009-09-25 16:07:19 -07003909 out:
Lars Ellenberg20ceb2b2011-01-21 10:56:44 +01003910 drbd_bm_unlock(mdev);
Andreas Gruenbacher82bc0192011-03-17 12:10:19 +01003911 if (!err && mdev->state.conn == C_WF_BITMAP_S)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003912 drbd_start_resync(mdev, C_SYNC_SOURCE);
Andreas Gruenbacher82bc0192011-03-17 12:10:19 +01003913 return err;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003914}
3915
Andreas Gruenbacher4a76b162011-03-25 02:43:51 +01003916static int receive_skip(struct drbd_tconn *tconn, struct packet_info *pi)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003917{
Andreas Gruenbacher4a76b162011-03-25 02:43:51 +01003918 conn_warn(tconn, "skipping unknown optional packet type %d, l: %d!\n",
Andreas Gruenbachere2857212011-03-25 00:57:38 +01003919 pi->cmd, pi->size);
Philipp Reisner2de876e2011-03-15 14:38:01 +01003920
Andreas Gruenbacher4a76b162011-03-25 02:43:51 +01003921 return ignore_remaining_packet(tconn, pi);
Philipp Reisner2de876e2011-03-15 14:38:01 +01003922}
3923
Andreas Gruenbacher4a76b162011-03-25 02:43:51 +01003924static int receive_UnplugRemote(struct drbd_tconn *tconn, struct packet_info *pi)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003925{
Philipp Reisnerb411b362009-09-25 16:07:19 -07003926 /* Make sure we've acked all the TCP data associated
3927 * with the data requests being unplugged */
Andreas Gruenbacher4a76b162011-03-25 02:43:51 +01003928 drbd_tcp_quickack(tconn->data.socket);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003929
Andreas Gruenbacher82bc0192011-03-17 12:10:19 +01003930 return 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003931}
3932
Andreas Gruenbacher4a76b162011-03-25 02:43:51 +01003933static int receive_out_of_sync(struct drbd_tconn *tconn, struct packet_info *pi)
Philipp Reisner73a01a12010-10-27 14:33:00 +02003934{
Andreas Gruenbacher4a76b162011-03-25 02:43:51 +01003935 struct drbd_conf *mdev;
3936 struct p_block_desc *p = tconn->data.rbuf;
3937
3938 mdev = vnr_to_mdev(tconn, pi->vnr);
3939 if (!mdev)
3940 return -EIO;
Philipp Reisner73a01a12010-10-27 14:33:00 +02003941
Lars Ellenbergf735e3632010-12-17 21:06:18 +01003942 switch (mdev->state.conn) {
3943 case C_WF_SYNC_UUID:
3944 case C_WF_BITMAP_T:
3945 case C_BEHIND:
3946 break;
3947 default:
3948 dev_err(DEV, "ASSERT FAILED cstate = %s, expected: WFSyncUUID|WFBitMapT|Behind\n",
3949 drbd_conn_str(mdev->state.conn));
3950 }
3951
Philipp Reisner73a01a12010-10-27 14:33:00 +02003952 drbd_set_out_of_sync(mdev, be64_to_cpu(p->sector), be32_to_cpu(p->blksize));
3953
Andreas Gruenbacher82bc0192011-03-17 12:10:19 +01003954 return 0;
Philipp Reisner73a01a12010-10-27 14:33:00 +02003955}
3956
Philipp Reisner02918be2010-08-20 14:35:10 +02003957struct data_cmd {
3958 int expect_payload;
3959 size_t pkt_size;
Andreas Gruenbacher4a76b162011-03-25 02:43:51 +01003960 int (*fn)(struct drbd_tconn *, struct packet_info *);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003961};
3962
Philipp Reisner02918be2010-08-20 14:35:10 +02003963static struct data_cmd drbd_cmd_handler[] = {
Andreas Gruenbacher4a76b162011-03-25 02:43:51 +01003964 [P_DATA] = { 1, sizeof(struct p_data), receive_Data },
3965 [P_DATA_REPLY] = { 1, sizeof(struct p_data), receive_DataReply },
3966 [P_RS_DATA_REPLY] = { 1, sizeof(struct p_data), receive_RSDataReply } ,
3967 [P_BARRIER] = { 0, sizeof(struct p_barrier), receive_Barrier } ,
3968 [P_BITMAP] = { 1, sizeof(struct p_header), receive_bitmap } ,
3969 [P_COMPRESSED_BITMAP] = { 1, sizeof(struct p_header), receive_bitmap } ,
3970 [P_UNPLUG_REMOTE] = { 0, sizeof(struct p_header), receive_UnplugRemote },
3971 [P_DATA_REQUEST] = { 0, sizeof(struct p_block_req), receive_DataRequest },
3972 [P_RS_DATA_REQUEST] = { 0, sizeof(struct p_block_req), receive_DataRequest },
3973 [P_SYNC_PARAM] = { 1, sizeof(struct p_header), receive_SyncParam },
3974 [P_SYNC_PARAM89] = { 1, sizeof(struct p_header), receive_SyncParam },
3975 [P_PROTOCOL] = { 1, sizeof(struct p_protocol), receive_protocol },
3976 [P_UUIDS] = { 0, sizeof(struct p_uuids), receive_uuids },
3977 [P_SIZES] = { 0, sizeof(struct p_sizes), receive_sizes },
3978 [P_STATE] = { 0, sizeof(struct p_state), receive_state },
3979 [P_STATE_CHG_REQ] = { 0, sizeof(struct p_req_state), receive_req_state },
3980 [P_SYNC_UUID] = { 0, sizeof(struct p_rs_uuid), receive_sync_uuid },
3981 [P_OV_REQUEST] = { 0, sizeof(struct p_block_req), receive_DataRequest },
3982 [P_OV_REPLY] = { 1, sizeof(struct p_block_req), receive_DataRequest },
3983 [P_CSUM_RS_REQUEST] = { 1, sizeof(struct p_block_req), receive_DataRequest },
3984 [P_DELAY_PROBE] = { 0, sizeof(struct p_delay_probe93), receive_skip },
3985 [P_OUT_OF_SYNC] = { 0, sizeof(struct p_block_desc), receive_out_of_sync },
3986 [P_CONN_ST_CHG_REQ] = { 0, sizeof(struct p_req_state), receive_req_conn_state },
Philipp Reisner02918be2010-08-20 14:35:10 +02003987};
3988
Philipp Reisnereefc2f72011-02-08 12:55:24 +01003989static void drbdd(struct drbd_tconn *tconn)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003990{
Andreas Gruenbachere6ef8a52011-03-24 18:07:54 +01003991 struct p_header *header = tconn->data.rbuf;
Philipp Reisner77351055b2011-02-07 17:24:26 +01003992 struct packet_info pi;
Philipp Reisner02918be2010-08-20 14:35:10 +02003993 size_t shs; /* sub header size */
Andreas Gruenbacher82bc0192011-03-17 12:10:19 +01003994 int err;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003995
Philipp Reisnereefc2f72011-02-08 12:55:24 +01003996 while (get_t_state(&tconn->receiver) == RUNNING) {
Andreas Gruenbacherdeebe192011-03-25 00:01:04 +01003997 struct data_cmd *cmd;
3998
Philipp Reisnereefc2f72011-02-08 12:55:24 +01003999 drbd_thread_current_set_cpu(&tconn->receiver);
Andreas Gruenbacher69bc7bc2011-03-16 17:31:52 +01004000 if (drbd_recv_header(tconn, &pi))
Philipp Reisner02918be2010-08-20 14:35:10 +02004001 goto err_out;
4002
Andreas Gruenbacherdeebe192011-03-25 00:01:04 +01004003 cmd = &drbd_cmd_handler[pi.cmd];
Andreas Gruenbacher4a76b162011-03-25 02:43:51 +01004004 if (unlikely(pi.cmd >= ARRAY_SIZE(drbd_cmd_handler) || !cmd->fn)) {
Philipp Reisnereefc2f72011-02-08 12:55:24 +01004005 conn_err(tconn, "unknown packet type %d, l: %d!\n", pi.cmd, pi.size);
Philipp Reisner02918be2010-08-20 14:35:10 +02004006 goto err_out;
Lars Ellenberg0b33a912009-11-16 15:58:04 +01004007 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07004008
Andreas Gruenbacherdeebe192011-03-25 00:01:04 +01004009 shs = cmd->pkt_size - sizeof(struct p_header);
4010 if (pi.size - shs > 0 && !cmd->expect_payload) {
Philipp Reisnereefc2f72011-02-08 12:55:24 +01004011 conn_err(tconn, "No payload expected %s l:%d\n", cmdname(pi.cmd), pi.size);
Philipp Reisner02918be2010-08-20 14:35:10 +02004012 goto err_out;
4013 }
4014
Lars Ellenbergc13f7e12010-10-29 23:32:01 +02004015 if (shs) {
Andreas Gruenbachera5c31902011-03-24 03:28:04 +01004016 err = drbd_recv_all_warn(tconn, &header->payload, shs);
4017 if (err)
Lars Ellenbergc13f7e12010-10-29 23:32:01 +02004018 goto err_out;
Andreas Gruenbachere2857212011-03-25 00:57:38 +01004019 pi.size -= shs;
Lars Ellenbergc13f7e12010-10-29 23:32:01 +02004020 }
4021
Andreas Gruenbacher4a76b162011-03-25 02:43:51 +01004022 err = cmd->fn(tconn, &pi);
4023 if (err) {
Philipp Reisnereefc2f72011-02-08 12:55:24 +01004024 conn_err(tconn, "error receiving %s, l: %d!\n",
Philipp Reisner77351055b2011-02-07 17:24:26 +01004025 cmdname(pi.cmd), pi.size);
Philipp Reisner02918be2010-08-20 14:35:10 +02004026 goto err_out;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004027 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07004028 }
Andreas Gruenbacher82bc0192011-03-17 12:10:19 +01004029 return;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004030
Andreas Gruenbacher82bc0192011-03-17 12:10:19 +01004031 err_out:
4032 conn_request_state(tconn, NS(conn, C_PROTOCOL_ERROR), CS_HARD);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004033}
4034
Philipp Reisner0e29d162011-02-18 14:23:11 +01004035void conn_flush_workqueue(struct drbd_tconn *tconn)
Philipp Reisnerb411b362009-09-25 16:07:19 -07004036{
4037 struct drbd_wq_barrier barr;
4038
4039 barr.w.cb = w_prev_work_done;
Philipp Reisner0e29d162011-02-18 14:23:11 +01004040 barr.w.tconn = tconn;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004041 init_completion(&barr.done);
Philipp Reisner0e29d162011-02-18 14:23:11 +01004042 drbd_queue_work(&tconn->data.work, &barr.w);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004043 wait_for_completion(&barr.done);
4044}
4045
Philipp Reisner360cc742011-02-08 14:29:53 +01004046static void drbd_disconnect(struct drbd_tconn *tconn)
Philipp Reisnerb411b362009-09-25 16:07:19 -07004047{
Philipp Reisnerbbeb6412011-02-10 13:45:46 +01004048 enum drbd_conns oc;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004049 int rv = SS_UNKNOWN_ERROR;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004050
Philipp Reisnerbbeb6412011-02-10 13:45:46 +01004051 if (tconn->cstate == C_STANDALONE)
Philipp Reisnerb411b362009-09-25 16:07:19 -07004052 return;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004053
4054 /* asender does not clean up anything. it must not interfere, either */
Philipp Reisner360cc742011-02-08 14:29:53 +01004055 drbd_thread_stop(&tconn->asender);
4056 drbd_free_sock(tconn);
4057
4058 idr_for_each(&tconn->volumes, drbd_disconnected, tconn);
Philipp Reisner360cc742011-02-08 14:29:53 +01004059 conn_info(tconn, "Connection closed\n");
4060
Philipp Reisnercb703452011-03-24 11:03:07 +01004061 if (conn_highest_role(tconn) == R_PRIMARY && conn_highest_pdsk(tconn) >= D_UNKNOWN)
4062 conn_try_outdate_peer_async(tconn);
4063
Philipp Reisner360cc742011-02-08 14:29:53 +01004064 spin_lock_irq(&tconn->req_lock);
Philipp Reisnerbbeb6412011-02-10 13:45:46 +01004065 oc = tconn->cstate;
4066 if (oc >= C_UNCONNECTED)
4067 rv = _conn_request_state(tconn, NS(conn, C_UNCONNECTED), CS_VERBOSE);
4068
Philipp Reisner360cc742011-02-08 14:29:53 +01004069 spin_unlock_irq(&tconn->req_lock);
4070
Philipp Reisnerbbeb6412011-02-10 13:45:46 +01004071 if (oc == C_DISCONNECTING) {
Philipp Reisner360cc742011-02-08 14:29:53 +01004072 wait_event(tconn->net_cnt_wait, atomic_read(&tconn->net_cnt) == 0);
4073
4074 crypto_free_hash(tconn->cram_hmac_tfm);
4075 tconn->cram_hmac_tfm = NULL;
4076
4077 kfree(tconn->net_conf);
4078 tconn->net_conf = NULL;
Philipp Reisnerbbeb6412011-02-10 13:45:46 +01004079 conn_request_state(tconn, NS(conn, C_STANDALONE), CS_VERBOSE);
Philipp Reisner360cc742011-02-08 14:29:53 +01004080 }
4081}
4082
4083static int drbd_disconnected(int vnr, void *p, void *data)
4084{
4085 struct drbd_conf *mdev = (struct drbd_conf *)p;
4086 enum drbd_fencing_p fp;
4087 unsigned int i;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004088
Philipp Reisner85719572010-07-21 10:20:17 +02004089 /* wait for current activity to cease. */
Philipp Reisner87eeee42011-01-19 14:16:30 +01004090 spin_lock_irq(&mdev->tconn->req_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004091 _drbd_wait_ee_list_empty(mdev, &mdev->active_ee);
4092 _drbd_wait_ee_list_empty(mdev, &mdev->sync_ee);
4093 _drbd_wait_ee_list_empty(mdev, &mdev->read_ee);
Philipp Reisner87eeee42011-01-19 14:16:30 +01004094 spin_unlock_irq(&mdev->tconn->req_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004095
4096 /* We do not have data structures that would allow us to
4097 * get the rs_pending_cnt down to 0 again.
4098 * * On C_SYNC_TARGET we do not have any data structures describing
4099 * the pending RSDataRequest's we have sent.
4100 * * On C_SYNC_SOURCE there is no data structure that tracks
4101 * the P_RS_DATA_REPLY blocks that we sent to the SyncTarget.
4102 * And no, it is not the sum of the reference counts in the
4103 * resync_LRU. The resync_LRU tracks the whole operation including
4104 * the disk-IO, while the rs_pending_cnt only tracks the blocks
4105 * on the fly. */
4106 drbd_rs_cancel_all(mdev);
4107 mdev->rs_total = 0;
4108 mdev->rs_failed = 0;
4109 atomic_set(&mdev->rs_pending_cnt, 0);
4110 wake_up(&mdev->misc_wait);
4111
Philipp Reisner7fde2be2011-03-01 11:08:28 +01004112 del_timer(&mdev->request_timer);
4113
Philipp Reisnerb411b362009-09-25 16:07:19 -07004114 del_timer_sync(&mdev->resync_timer);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004115 resync_timer_fn((unsigned long)mdev);
4116
Philipp Reisnerb411b362009-09-25 16:07:19 -07004117 /* wait for all w_e_end_data_req, w_e_end_rsdata_req, w_send_barrier,
4118 * w_make_resync_request etc. which may still be on the worker queue
4119 * to be "canceled" */
Philipp Reisnera21e9292011-02-08 15:08:49 +01004120 drbd_flush_workqueue(mdev);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004121
4122 /* This also does reclaim_net_ee(). If we do this too early, we might
4123 * miss some resync ee and pages.*/
4124 drbd_process_done_ee(mdev);
4125
4126 kfree(mdev->p_uuid);
4127 mdev->p_uuid = NULL;
4128
Philipp Reisnerfb22c402010-09-08 23:20:21 +02004129 if (!is_susp(mdev->state))
Philipp Reisner2f5cdd02011-02-21 14:29:27 +01004130 tl_clear(mdev->tconn);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004131
Philipp Reisnerb411b362009-09-25 16:07:19 -07004132 drbd_md_sync(mdev);
4133
4134 fp = FP_DONT_CARE;
4135 if (get_ldev(mdev)) {
4136 fp = mdev->ldev->dc.fencing;
4137 put_ldev(mdev);
4138 }
4139
Lars Ellenberg20ceb2b2011-01-21 10:56:44 +01004140 /* serialize with bitmap writeout triggered by the state change,
4141 * if any. */
4142 wait_event(mdev->misc_wait, !test_bit(BITMAP_IO, &mdev->flags));
4143
Philipp Reisnerb411b362009-09-25 16:07:19 -07004144 /* tcp_close and release of sendpage pages can be deferred. I don't
4145 * want to use SO_LINGER, because apparently it can be deferred for
4146 * more than 20 seconds (longest time I checked).
4147 *
4148 * Actually we don't care for exactly when the network stack does its
4149 * put_page(), but release our reference on these pages right here.
4150 */
4151 i = drbd_release_ee(mdev, &mdev->net_ee);
4152 if (i)
4153 dev_info(DEV, "net_ee not empty, killed %u entries\n", i);
Lars Ellenberg435f0742010-09-06 12:30:25 +02004154 i = atomic_read(&mdev->pp_in_use_by_net);
4155 if (i)
4156 dev_info(DEV, "pp_in_use_by_net = %d, expected 0\n", i);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004157 i = atomic_read(&mdev->pp_in_use);
4158 if (i)
Lars Ellenberg45bb9122010-05-14 17:10:48 +02004159 dev_info(DEV, "pp_in_use = %d, expected 0\n", i);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004160
4161 D_ASSERT(list_empty(&mdev->read_ee));
4162 D_ASSERT(list_empty(&mdev->active_ee));
4163 D_ASSERT(list_empty(&mdev->sync_ee));
4164 D_ASSERT(list_empty(&mdev->done_ee));
4165
4166 /* ok, no more ee's on the fly, it is safe to reset the epoch_size */
4167 atomic_set(&mdev->current_epoch->epoch_size, 0);
4168 D_ASSERT(list_empty(&mdev->current_epoch->list));
Philipp Reisner360cc742011-02-08 14:29:53 +01004169
4170 return 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004171}
4172
4173/*
4174 * We support PRO_VERSION_MIN to PRO_VERSION_MAX. The protocol version
4175 * we can agree on is stored in agreed_pro_version.
4176 *
4177 * feature flags and the reserved array should be enough room for future
4178 * enhancements of the handshake protocol, and possible plugins...
4179 *
4180 * for now, they are expected to be zero, but ignored.
4181 */
Philipp Reisner8a22ccc2011-02-07 16:47:12 +01004182static int drbd_send_handshake(struct drbd_tconn *tconn)
Philipp Reisnerb411b362009-09-25 16:07:19 -07004183{
Philipp Reisnere6b3ea82011-01-19 14:02:01 +01004184 /* ASSERT current == mdev->tconn->receiver ... */
Andreas Gruenbacher5a87d922011-03-24 21:17:52 +01004185 struct p_handshake *p = tconn->data.sbuf;
Andreas Gruenbachere8d17b02011-03-16 00:54:19 +01004186 int err;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004187
Philipp Reisner8a22ccc2011-02-07 16:47:12 +01004188 if (mutex_lock_interruptible(&tconn->data.mutex)) {
4189 conn_err(tconn, "interrupted during initial handshake\n");
Andreas Gruenbachere8d17b02011-03-16 00:54:19 +01004190 return -EINTR;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004191 }
4192
Philipp Reisner8a22ccc2011-02-07 16:47:12 +01004193 if (tconn->data.socket == NULL) {
4194 mutex_unlock(&tconn->data.mutex);
Andreas Gruenbachere8d17b02011-03-16 00:54:19 +01004195 return -EIO;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004196 }
4197
4198 memset(p, 0, sizeof(*p));
4199 p->protocol_min = cpu_to_be32(PRO_VERSION_MIN);
4200 p->protocol_max = cpu_to_be32(PRO_VERSION_MAX);
Andreas Gruenbachere8d17b02011-03-16 00:54:19 +01004201 err = _conn_send_cmd(tconn, 0, tconn->data.socket, P_HAND_SHAKE,
Andreas Gruenbacherecf23632011-03-15 23:48:25 +01004202 &p->head, sizeof(*p), 0);
Philipp Reisner8a22ccc2011-02-07 16:47:12 +01004203 mutex_unlock(&tconn->data.mutex);
Andreas Gruenbachere8d17b02011-03-16 00:54:19 +01004204 return err;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004205}
4206
4207/*
4208 * return values:
4209 * 1 yes, we have a valid connection
4210 * 0 oops, did not work out, please try again
4211 * -1 peer talks different language,
4212 * no point in trying again, please go standalone.
4213 */
Philipp Reisner65d11ed2011-02-07 17:35:59 +01004214static int drbd_do_handshake(struct drbd_tconn *tconn)
Philipp Reisnerb411b362009-09-25 16:07:19 -07004215{
Philipp Reisner65d11ed2011-02-07 17:35:59 +01004216 /* ASSERT current == tconn->receiver ... */
Andreas Gruenbachere6ef8a52011-03-24 18:07:54 +01004217 struct p_handshake *p = tconn->data.rbuf;
Philipp Reisner02918be2010-08-20 14:35:10 +02004218 const int expect = sizeof(struct p_handshake) - sizeof(struct p_header80);
Philipp Reisner77351055b2011-02-07 17:24:26 +01004219 struct packet_info pi;
Andreas Gruenbachera5c31902011-03-24 03:28:04 +01004220 int err;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004221
Andreas Gruenbachere8d17b02011-03-16 00:54:19 +01004222 err = drbd_send_handshake(tconn);
4223 if (err)
Philipp Reisnerb411b362009-09-25 16:07:19 -07004224 return 0;
4225
Andreas Gruenbacher69bc7bc2011-03-16 17:31:52 +01004226 err = drbd_recv_header(tconn, &pi);
4227 if (err)
Philipp Reisnerb411b362009-09-25 16:07:19 -07004228 return 0;
4229
Philipp Reisner77351055b2011-02-07 17:24:26 +01004230 if (pi.cmd != P_HAND_SHAKE) {
Philipp Reisner65d11ed2011-02-07 17:35:59 +01004231 conn_err(tconn, "expected HandShake packet, received: %s (0x%04x)\n",
Philipp Reisner77351055b2011-02-07 17:24:26 +01004232 cmdname(pi.cmd), pi.cmd);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004233 return -1;
4234 }
4235
Philipp Reisner77351055b2011-02-07 17:24:26 +01004236 if (pi.size != expect) {
Philipp Reisner65d11ed2011-02-07 17:35:59 +01004237 conn_err(tconn, "expected HandShake length: %u, received: %u\n",
Philipp Reisner77351055b2011-02-07 17:24:26 +01004238 expect, pi.size);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004239 return -1;
4240 }
4241
Andreas Gruenbachera5c31902011-03-24 03:28:04 +01004242 err = drbd_recv_all_warn(tconn, &p->head.payload, expect);
4243 if (err)
Philipp Reisnerb411b362009-09-25 16:07:19 -07004244 return 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004245
Philipp Reisnerb411b362009-09-25 16:07:19 -07004246 p->protocol_min = be32_to_cpu(p->protocol_min);
4247 p->protocol_max = be32_to_cpu(p->protocol_max);
4248 if (p->protocol_max == 0)
4249 p->protocol_max = p->protocol_min;
4250
4251 if (PRO_VERSION_MAX < p->protocol_min ||
4252 PRO_VERSION_MIN > p->protocol_max)
4253 goto incompat;
4254
Philipp Reisner65d11ed2011-02-07 17:35:59 +01004255 tconn->agreed_pro_version = min_t(int, PRO_VERSION_MAX, p->protocol_max);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004256
Philipp Reisner65d11ed2011-02-07 17:35:59 +01004257 conn_info(tconn, "Handshake successful: "
4258 "Agreed network protocol version %d\n", tconn->agreed_pro_version);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004259
4260 return 1;
4261
4262 incompat:
Philipp Reisner65d11ed2011-02-07 17:35:59 +01004263 conn_err(tconn, "incompatible DRBD dialects: "
Philipp Reisnerb411b362009-09-25 16:07:19 -07004264 "I support %d-%d, peer supports %d-%d\n",
4265 PRO_VERSION_MIN, PRO_VERSION_MAX,
4266 p->protocol_min, p->protocol_max);
4267 return -1;
4268}
4269
4270#if !defined(CONFIG_CRYPTO_HMAC) && !defined(CONFIG_CRYPTO_HMAC_MODULE)
Philipp Reisner13e60372011-02-08 09:54:40 +01004271static int drbd_do_auth(struct drbd_tconn *tconn)
Philipp Reisnerb411b362009-09-25 16:07:19 -07004272{
4273 dev_err(DEV, "This kernel was build without CONFIG_CRYPTO_HMAC.\n");
4274 dev_err(DEV, "You need to disable 'cram-hmac-alg' in drbd.conf.\n");
Johannes Thomab10d96c2010-01-07 16:02:50 +01004275 return -1;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004276}
4277#else
4278#define CHALLENGE_LEN 64
Johannes Thomab10d96c2010-01-07 16:02:50 +01004279
4280/* Return value:
4281 1 - auth succeeded,
4282 0 - failed, try again (network error),
4283 -1 - auth failed, don't try again.
4284*/
4285
Philipp Reisner13e60372011-02-08 09:54:40 +01004286static int drbd_do_auth(struct drbd_tconn *tconn)
Philipp Reisnerb411b362009-09-25 16:07:19 -07004287{
4288 char my_challenge[CHALLENGE_LEN]; /* 64 Bytes... */
4289 struct scatterlist sg;
4290 char *response = NULL;
4291 char *right_response = NULL;
4292 char *peers_ch = NULL;
Philipp Reisner13e60372011-02-08 09:54:40 +01004293 unsigned int key_len = strlen(tconn->net_conf->shared_secret);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004294 unsigned int resp_size;
4295 struct hash_desc desc;
Philipp Reisner77351055b2011-02-07 17:24:26 +01004296 struct packet_info pi;
Andreas Gruenbacher69bc7bc2011-03-16 17:31:52 +01004297 int err, rv;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004298
Philipp Reisner13e60372011-02-08 09:54:40 +01004299 desc.tfm = tconn->cram_hmac_tfm;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004300 desc.flags = 0;
4301
Philipp Reisner13e60372011-02-08 09:54:40 +01004302 rv = crypto_hash_setkey(tconn->cram_hmac_tfm,
4303 (u8 *)tconn->net_conf->shared_secret, key_len);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004304 if (rv) {
Philipp Reisner13e60372011-02-08 09:54:40 +01004305 conn_err(tconn, "crypto_hash_setkey() failed with %d\n", rv);
Johannes Thomab10d96c2010-01-07 16:02:50 +01004306 rv = -1;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004307 goto fail;
4308 }
4309
4310 get_random_bytes(my_challenge, CHALLENGE_LEN);
4311
Andreas Gruenbacherce9879c2011-03-15 23:34:29 +01004312 rv = !conn_send_cmd2(tconn, P_AUTH_CHALLENGE, my_challenge, CHALLENGE_LEN);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004313 if (!rv)
4314 goto fail;
4315
Andreas Gruenbacher69bc7bc2011-03-16 17:31:52 +01004316 err = drbd_recv_header(tconn, &pi);
4317 if (err) {
4318 rv = 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004319 goto fail;
Andreas Gruenbacher69bc7bc2011-03-16 17:31:52 +01004320 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07004321
Philipp Reisner77351055b2011-02-07 17:24:26 +01004322 if (pi.cmd != P_AUTH_CHALLENGE) {
Philipp Reisner13e60372011-02-08 09:54:40 +01004323 conn_err(tconn, "expected AuthChallenge packet, received: %s (0x%04x)\n",
Philipp Reisner77351055b2011-02-07 17:24:26 +01004324 cmdname(pi.cmd), pi.cmd);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004325 rv = 0;
4326 goto fail;
4327 }
4328
Philipp Reisner77351055b2011-02-07 17:24:26 +01004329 if (pi.size > CHALLENGE_LEN * 2) {
Philipp Reisner13e60372011-02-08 09:54:40 +01004330 conn_err(tconn, "expected AuthChallenge payload too big.\n");
Johannes Thomab10d96c2010-01-07 16:02:50 +01004331 rv = -1;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004332 goto fail;
4333 }
4334
Philipp Reisner77351055b2011-02-07 17:24:26 +01004335 peers_ch = kmalloc(pi.size, GFP_NOIO);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004336 if (peers_ch == NULL) {
Philipp Reisner13e60372011-02-08 09:54:40 +01004337 conn_err(tconn, "kmalloc of peers_ch failed\n");
Johannes Thomab10d96c2010-01-07 16:02:50 +01004338 rv = -1;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004339 goto fail;
4340 }
4341
Andreas Gruenbachera5c31902011-03-24 03:28:04 +01004342 err = drbd_recv_all_warn(tconn, peers_ch, pi.size);
4343 if (err) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07004344 rv = 0;
4345 goto fail;
4346 }
4347
Philipp Reisner13e60372011-02-08 09:54:40 +01004348 resp_size = crypto_hash_digestsize(tconn->cram_hmac_tfm);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004349 response = kmalloc(resp_size, GFP_NOIO);
4350 if (response == NULL) {
Philipp Reisner13e60372011-02-08 09:54:40 +01004351 conn_err(tconn, "kmalloc of response failed\n");
Johannes Thomab10d96c2010-01-07 16:02:50 +01004352 rv = -1;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004353 goto fail;
4354 }
4355
4356 sg_init_table(&sg, 1);
Philipp Reisner77351055b2011-02-07 17:24:26 +01004357 sg_set_buf(&sg, peers_ch, pi.size);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004358
4359 rv = crypto_hash_digest(&desc, &sg, sg.length, response);
4360 if (rv) {
Philipp Reisner13e60372011-02-08 09:54:40 +01004361 conn_err(tconn, "crypto_hash_digest() failed with %d\n", rv);
Johannes Thomab10d96c2010-01-07 16:02:50 +01004362 rv = -1;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004363 goto fail;
4364 }
4365
Andreas Gruenbacherce9879c2011-03-15 23:34:29 +01004366 rv = !conn_send_cmd2(tconn, P_AUTH_RESPONSE, response, resp_size);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004367 if (!rv)
4368 goto fail;
4369
Andreas Gruenbacher69bc7bc2011-03-16 17:31:52 +01004370 err = drbd_recv_header(tconn, &pi);
4371 if (err) {
4372 rv = 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004373 goto fail;
Andreas Gruenbacher69bc7bc2011-03-16 17:31:52 +01004374 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07004375
Philipp Reisner77351055b2011-02-07 17:24:26 +01004376 if (pi.cmd != P_AUTH_RESPONSE) {
Philipp Reisner13e60372011-02-08 09:54:40 +01004377 conn_err(tconn, "expected AuthResponse packet, received: %s (0x%04x)\n",
Philipp Reisner77351055b2011-02-07 17:24:26 +01004378 cmdname(pi.cmd), pi.cmd);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004379 rv = 0;
4380 goto fail;
4381 }
4382
Philipp Reisner77351055b2011-02-07 17:24:26 +01004383 if (pi.size != resp_size) {
Philipp Reisner13e60372011-02-08 09:54:40 +01004384 conn_err(tconn, "expected AuthResponse payload of wrong size\n");
Philipp Reisnerb411b362009-09-25 16:07:19 -07004385 rv = 0;
4386 goto fail;
4387 }
4388
Andreas Gruenbachera5c31902011-03-24 03:28:04 +01004389 err = drbd_recv_all_warn(tconn, response , resp_size);
4390 if (err) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07004391 rv = 0;
4392 goto fail;
4393 }
4394
4395 right_response = kmalloc(resp_size, GFP_NOIO);
Julia Lawall2d1ee872009-12-27 22:27:11 +01004396 if (right_response == NULL) {
Philipp Reisner13e60372011-02-08 09:54:40 +01004397 conn_err(tconn, "kmalloc of right_response failed\n");
Johannes Thomab10d96c2010-01-07 16:02:50 +01004398 rv = -1;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004399 goto fail;
4400 }
4401
4402 sg_set_buf(&sg, my_challenge, CHALLENGE_LEN);
4403
4404 rv = crypto_hash_digest(&desc, &sg, sg.length, right_response);
4405 if (rv) {
Philipp Reisner13e60372011-02-08 09:54:40 +01004406 conn_err(tconn, "crypto_hash_digest() failed with %d\n", rv);
Johannes Thomab10d96c2010-01-07 16:02:50 +01004407 rv = -1;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004408 goto fail;
4409 }
4410
4411 rv = !memcmp(response, right_response, resp_size);
4412
4413 if (rv)
Philipp Reisner13e60372011-02-08 09:54:40 +01004414 conn_info(tconn, "Peer authenticated using %d bytes of '%s' HMAC\n",
4415 resp_size, tconn->net_conf->cram_hmac_alg);
Johannes Thomab10d96c2010-01-07 16:02:50 +01004416 else
4417 rv = -1;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004418
4419 fail:
4420 kfree(peers_ch);
4421 kfree(response);
4422 kfree(right_response);
4423
4424 return rv;
4425}
4426#endif
4427
4428int drbdd_init(struct drbd_thread *thi)
4429{
Philipp Reisner392c8802011-02-09 10:33:31 +01004430 struct drbd_tconn *tconn = thi->tconn;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004431 int h;
4432
Philipp Reisner4d641dd2011-02-08 15:40:24 +01004433 conn_info(tconn, "receiver (re)started\n");
Philipp Reisnerb411b362009-09-25 16:07:19 -07004434
4435 do {
Philipp Reisner4d641dd2011-02-08 15:40:24 +01004436 h = drbd_connect(tconn);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004437 if (h == 0) {
Philipp Reisner4d641dd2011-02-08 15:40:24 +01004438 drbd_disconnect(tconn);
Philipp Reisner20ee6392011-01-18 15:28:59 +01004439 schedule_timeout_interruptible(HZ);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004440 }
4441 if (h == -1) {
Philipp Reisner4d641dd2011-02-08 15:40:24 +01004442 conn_warn(tconn, "Discarding network configuration.\n");
Philipp Reisnerbbeb6412011-02-10 13:45:46 +01004443 conn_request_state(tconn, NS(conn, C_DISCONNECTING), CS_HARD);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004444 }
4445 } while (h == 0);
4446
4447 if (h > 0) {
Philipp Reisner4d641dd2011-02-08 15:40:24 +01004448 if (get_net_conf(tconn)) {
4449 drbdd(tconn);
4450 put_net_conf(tconn);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004451 }
4452 }
4453
Philipp Reisner4d641dd2011-02-08 15:40:24 +01004454 drbd_disconnect(tconn);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004455
Philipp Reisner4d641dd2011-02-08 15:40:24 +01004456 conn_info(tconn, "receiver terminated\n");
Philipp Reisnerb411b362009-09-25 16:07:19 -07004457 return 0;
4458}
4459
4460/* ********* acknowledge sender ******** */
4461
Philipp Reisnere4f78ed2011-03-16 11:27:48 +01004462static int got_conn_RqSReply(struct drbd_tconn *tconn, enum drbd_packet cmd)
4463{
Andreas Gruenbachere6ef8a52011-03-24 18:07:54 +01004464 struct p_req_state_reply *p = tconn->meta.rbuf;
Philipp Reisnere4f78ed2011-03-16 11:27:48 +01004465 int retcode = be32_to_cpu(p->retcode);
4466
4467 if (retcode >= SS_SUCCESS) {
4468 set_bit(CONN_WD_ST_CHG_OKAY, &tconn->flags);
4469 } else {
4470 set_bit(CONN_WD_ST_CHG_FAIL, &tconn->flags);
4471 conn_err(tconn, "Requested state change failed by peer: %s (%d)\n",
4472 drbd_set_st_err_str(retcode), retcode);
4473 }
4474 wake_up(&tconn->ping_wait);
4475
4476 return true;
4477}
4478
Andreas Gruenbacherd8763022011-01-26 17:39:41 +01004479static int got_RqSReply(struct drbd_conf *mdev, enum drbd_packet cmd)
Philipp Reisnerb411b362009-09-25 16:07:19 -07004480{
Andreas Gruenbachere6ef8a52011-03-24 18:07:54 +01004481 struct p_req_state_reply *p = mdev->tconn->meta.rbuf;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004482 int retcode = be32_to_cpu(p->retcode);
4483
Philipp Reisnere4f78ed2011-03-16 11:27:48 +01004484 if (retcode >= SS_SUCCESS) {
4485 set_bit(CL_ST_CHG_SUCCESS, &mdev->flags);
4486 } else {
4487 set_bit(CL_ST_CHG_FAIL, &mdev->flags);
4488 dev_err(DEV, "Requested state change failed by peer: %s (%d)\n",
4489 drbd_set_st_err_str(retcode), retcode);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004490 }
Philipp Reisnere4f78ed2011-03-16 11:27:48 +01004491 wake_up(&mdev->state_wait);
4492
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01004493 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004494}
4495
Philipp Reisnerf19e4f82011-03-16 11:21:50 +01004496static int got_Ping(struct drbd_tconn *tconn, enum drbd_packet cmd)
Philipp Reisnerb411b362009-09-25 16:07:19 -07004497{
Philipp Reisnerf19e4f82011-03-16 11:21:50 +01004498 return drbd_send_ping_ack(tconn);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004499
4500}
4501
Philipp Reisnerf19e4f82011-03-16 11:21:50 +01004502static int got_PingAck(struct drbd_tconn *tconn, enum drbd_packet cmd)
Philipp Reisnerb411b362009-09-25 16:07:19 -07004503{
4504 /* restore idle timeout */
Philipp Reisner2a67d8b2011-02-09 14:10:32 +01004505 tconn->meta.socket->sk->sk_rcvtimeo = tconn->net_conf->ping_int*HZ;
4506 if (!test_and_set_bit(GOT_PING_ACK, &tconn->flags))
4507 wake_up(&tconn->ping_wait);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004508
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01004509 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004510}
4511
Andreas Gruenbacherd8763022011-01-26 17:39:41 +01004512static int got_IsInSync(struct drbd_conf *mdev, enum drbd_packet cmd)
Philipp Reisnerb411b362009-09-25 16:07:19 -07004513{
Andreas Gruenbachere6ef8a52011-03-24 18:07:54 +01004514 struct p_block_ack *p = mdev->tconn->meta.rbuf;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004515 sector_t sector = be64_to_cpu(p->sector);
4516 int blksize = be32_to_cpu(p->blksize);
4517
Philipp Reisner31890f42011-01-19 14:12:51 +01004518 D_ASSERT(mdev->tconn->agreed_pro_version >= 89);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004519
4520 update_peer_seq(mdev, be32_to_cpu(p->seq_num));
4521
Lars Ellenberg1d53f092010-09-05 01:13:24 +02004522 if (get_ldev(mdev)) {
4523 drbd_rs_complete_io(mdev, sector);
4524 drbd_set_in_sync(mdev, sector, blksize);
4525 /* rs_same_csums is supposed to count in units of BM_BLOCK_SIZE */
4526 mdev->rs_same_csum += (blksize >> BM_BLOCK_SHIFT);
4527 put_ldev(mdev);
4528 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07004529 dec_rs_pending(mdev);
Philipp Reisner778f2712010-07-06 11:14:00 +02004530 atomic_add(blksize >> 9, &mdev->rs_sect_in);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004531
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01004532 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004533}
4534
Andreas Gruenbacherbc9c5c42011-01-21 18:00:55 +01004535static int
4536validate_req_change_req_state(struct drbd_conf *mdev, u64 id, sector_t sector,
4537 struct rb_root *root, const char *func,
4538 enum drbd_req_event what, bool missing_ok)
Philipp Reisnerb411b362009-09-25 16:07:19 -07004539{
4540 struct drbd_request *req;
4541 struct bio_and_error m;
4542
Philipp Reisner87eeee42011-01-19 14:16:30 +01004543 spin_lock_irq(&mdev->tconn->req_lock);
Andreas Gruenbacherbc9c5c42011-01-21 18:00:55 +01004544 req = find_request(mdev, root, id, sector, missing_ok, func);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004545 if (unlikely(!req)) {
Philipp Reisner87eeee42011-01-19 14:16:30 +01004546 spin_unlock_irq(&mdev->tconn->req_lock);
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01004547 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004548 }
4549 __req_mod(req, what, &m);
Philipp Reisner87eeee42011-01-19 14:16:30 +01004550 spin_unlock_irq(&mdev->tconn->req_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004551
4552 if (m.bio)
4553 complete_master_bio(mdev, &m);
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01004554 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004555}
4556
Andreas Gruenbacherd8763022011-01-26 17:39:41 +01004557static int got_BlockAck(struct drbd_conf *mdev, enum drbd_packet cmd)
Philipp Reisnerb411b362009-09-25 16:07:19 -07004558{
Andreas Gruenbachere6ef8a52011-03-24 18:07:54 +01004559 struct p_block_ack *p = mdev->tconn->meta.rbuf;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004560 sector_t sector = be64_to_cpu(p->sector);
4561 int blksize = be32_to_cpu(p->blksize);
4562 enum drbd_req_event what;
4563
4564 update_peer_seq(mdev, be32_to_cpu(p->seq_num));
4565
Andreas Gruenbacher579b57e2011-01-13 18:40:57 +01004566 if (p->block_id == ID_SYNCER) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07004567 drbd_set_in_sync(mdev, sector, blksize);
4568 dec_rs_pending(mdev);
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01004569 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004570 }
Philipp Reisner257d0af2011-01-26 12:15:29 +01004571 switch (cmd) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07004572 case P_RS_WRITE_ACK:
Philipp Reisner89e58e72011-01-19 13:12:45 +01004573 D_ASSERT(mdev->tconn->net_conf->wire_protocol == DRBD_PROT_C);
Andreas Gruenbacher8554df12011-01-25 15:37:43 +01004574 what = WRITE_ACKED_BY_PEER_AND_SIS;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004575 break;
4576 case P_WRITE_ACK:
Philipp Reisner89e58e72011-01-19 13:12:45 +01004577 D_ASSERT(mdev->tconn->net_conf->wire_protocol == DRBD_PROT_C);
Andreas Gruenbacher8554df12011-01-25 15:37:43 +01004578 what = WRITE_ACKED_BY_PEER;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004579 break;
4580 case P_RECV_ACK:
Philipp Reisner89e58e72011-01-19 13:12:45 +01004581 D_ASSERT(mdev->tconn->net_conf->wire_protocol == DRBD_PROT_B);
Andreas Gruenbacher8554df12011-01-25 15:37:43 +01004582 what = RECV_ACKED_BY_PEER;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004583 break;
Andreas Gruenbacher7be8da02011-02-22 02:15:32 +01004584 case P_DISCARD_WRITE:
Philipp Reisner89e58e72011-01-19 13:12:45 +01004585 D_ASSERT(mdev->tconn->net_conf->wire_protocol == DRBD_PROT_C);
Andreas Gruenbacher7be8da02011-02-22 02:15:32 +01004586 what = DISCARD_WRITE;
4587 break;
4588 case P_RETRY_WRITE:
4589 D_ASSERT(mdev->tconn->net_conf->wire_protocol == DRBD_PROT_C);
4590 what = POSTPONE_WRITE;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004591 break;
4592 default:
4593 D_ASSERT(0);
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01004594 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004595 }
4596
4597 return validate_req_change_req_state(mdev, p->block_id, sector,
Andreas Gruenbacherbc9c5c42011-01-21 18:00:55 +01004598 &mdev->write_requests, __func__,
4599 what, false);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004600}
4601
Andreas Gruenbacherd8763022011-01-26 17:39:41 +01004602static int got_NegAck(struct drbd_conf *mdev, enum drbd_packet cmd)
Philipp Reisnerb411b362009-09-25 16:07:19 -07004603{
Andreas Gruenbachere6ef8a52011-03-24 18:07:54 +01004604 struct p_block_ack *p = mdev->tconn->meta.rbuf;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004605 sector_t sector = be64_to_cpu(p->sector);
Philipp Reisner2deb8332011-01-17 18:39:18 +01004606 int size = be32_to_cpu(p->blksize);
Philipp Reisner89e58e72011-01-19 13:12:45 +01004607 bool missing_ok = mdev->tconn->net_conf->wire_protocol == DRBD_PROT_A ||
4608 mdev->tconn->net_conf->wire_protocol == DRBD_PROT_B;
Andreas Gruenbacherc3afd8f2011-01-20 22:25:40 +01004609 bool found;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004610
4611 update_peer_seq(mdev, be32_to_cpu(p->seq_num));
4612
Andreas Gruenbacher579b57e2011-01-13 18:40:57 +01004613 if (p->block_id == ID_SYNCER) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07004614 dec_rs_pending(mdev);
4615 drbd_rs_failed_io(mdev, sector, size);
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01004616 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004617 }
Philipp Reisner2deb8332011-01-17 18:39:18 +01004618
Andreas Gruenbacherc3afd8f2011-01-20 22:25:40 +01004619 found = validate_req_change_req_state(mdev, p->block_id, sector,
Andreas Gruenbacherbc9c5c42011-01-21 18:00:55 +01004620 &mdev->write_requests, __func__,
Andreas Gruenbacher8554df12011-01-25 15:37:43 +01004621 NEG_ACKED, missing_ok);
Andreas Gruenbacherc3afd8f2011-01-20 22:25:40 +01004622 if (!found) {
4623 /* Protocol A has no P_WRITE_ACKs, but has P_NEG_ACKs.
4624 The master bio might already be completed, therefore the
4625 request is no longer in the collision hash. */
4626 /* In Protocol B we might already have got a P_RECV_ACK
4627 but then get a P_NEG_ACK afterwards. */
4628 if (!missing_ok)
Philipp Reisner2deb8332011-01-17 18:39:18 +01004629 return false;
Andreas Gruenbacherc3afd8f2011-01-20 22:25:40 +01004630 drbd_set_out_of_sync(mdev, sector, size);
Philipp Reisner2deb8332011-01-17 18:39:18 +01004631 }
Philipp Reisner2deb8332011-01-17 18:39:18 +01004632 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004633}
4634
Andreas Gruenbacherd8763022011-01-26 17:39:41 +01004635static int got_NegDReply(struct drbd_conf *mdev, enum drbd_packet cmd)
Philipp Reisnerb411b362009-09-25 16:07:19 -07004636{
Andreas Gruenbachere6ef8a52011-03-24 18:07:54 +01004637 struct p_block_ack *p = mdev->tconn->meta.rbuf;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004638 sector_t sector = be64_to_cpu(p->sector);
4639
4640 update_peer_seq(mdev, be32_to_cpu(p->seq_num));
Andreas Gruenbacher7be8da02011-02-22 02:15:32 +01004641
Philipp Reisnerb411b362009-09-25 16:07:19 -07004642 dev_err(DEV, "Got NegDReply; Sector %llus, len %u; Fail original request.\n",
4643 (unsigned long long)sector, be32_to_cpu(p->blksize));
4644
4645 return validate_req_change_req_state(mdev, p->block_id, sector,
Andreas Gruenbacherbc9c5c42011-01-21 18:00:55 +01004646 &mdev->read_requests, __func__,
Andreas Gruenbacher8554df12011-01-25 15:37:43 +01004647 NEG_ACKED, false);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004648}
4649
Andreas Gruenbacherd8763022011-01-26 17:39:41 +01004650static int got_NegRSDReply(struct drbd_conf *mdev, enum drbd_packet cmd)
Philipp Reisnerb411b362009-09-25 16:07:19 -07004651{
4652 sector_t sector;
4653 int size;
Andreas Gruenbachere6ef8a52011-03-24 18:07:54 +01004654 struct p_block_ack *p = mdev->tconn->meta.rbuf;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004655
4656 sector = be64_to_cpu(p->sector);
4657 size = be32_to_cpu(p->blksize);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004658
4659 update_peer_seq(mdev, be32_to_cpu(p->seq_num));
4660
4661 dec_rs_pending(mdev);
4662
4663 if (get_ldev_if_state(mdev, D_FAILED)) {
4664 drbd_rs_complete_io(mdev, sector);
Philipp Reisner257d0af2011-01-26 12:15:29 +01004665 switch (cmd) {
Philipp Reisnerd612d302010-12-27 10:53:28 +01004666 case P_NEG_RS_DREPLY:
4667 drbd_rs_failed_io(mdev, sector, size);
4668 case P_RS_CANCEL:
4669 break;
4670 default:
4671 D_ASSERT(0);
4672 put_ldev(mdev);
4673 return false;
4674 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07004675 put_ldev(mdev);
4676 }
4677
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01004678 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004679}
4680
Andreas Gruenbacherd8763022011-01-26 17:39:41 +01004681static int got_BarrierAck(struct drbd_conf *mdev, enum drbd_packet cmd)
Philipp Reisnerb411b362009-09-25 16:07:19 -07004682{
Andreas Gruenbachere6ef8a52011-03-24 18:07:54 +01004683 struct p_barrier_ack *p = mdev->tconn->meta.rbuf;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004684
Philipp Reisner2f5cdd02011-02-21 14:29:27 +01004685 tl_release(mdev->tconn, p->barrier, be32_to_cpu(p->set_size));
Philipp Reisnerb411b362009-09-25 16:07:19 -07004686
Philipp Reisnerc4752ef2010-10-27 17:32:36 +02004687 if (mdev->state.conn == C_AHEAD &&
4688 atomic_read(&mdev->ap_in_flight) == 0 &&
Philipp Reisner370a43e2011-01-14 16:03:11 +01004689 !test_and_set_bit(AHEAD_TO_SYNC_SOURCE, &mdev->current_epoch->flags)) {
4690 mdev->start_resync_timer.expires = jiffies + HZ;
4691 add_timer(&mdev->start_resync_timer);
Philipp Reisnerc4752ef2010-10-27 17:32:36 +02004692 }
4693
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01004694 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004695}
4696
Andreas Gruenbacherd8763022011-01-26 17:39:41 +01004697static int got_OVResult(struct drbd_conf *mdev, enum drbd_packet cmd)
Philipp Reisnerb411b362009-09-25 16:07:19 -07004698{
Andreas Gruenbachere6ef8a52011-03-24 18:07:54 +01004699 struct p_block_ack *p = mdev->tconn->meta.rbuf;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004700 struct drbd_work *w;
4701 sector_t sector;
4702 int size;
4703
4704 sector = be64_to_cpu(p->sector);
4705 size = be32_to_cpu(p->blksize);
4706
4707 update_peer_seq(mdev, be32_to_cpu(p->seq_num));
4708
4709 if (be64_to_cpu(p->block_id) == ID_OUT_OF_SYNC)
Andreas Gruenbacher8f7bed72010-12-19 23:53:14 +01004710 drbd_ov_out_of_sync_found(mdev, sector, size);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004711 else
Andreas Gruenbacher8f7bed72010-12-19 23:53:14 +01004712 ov_out_of_sync_print(mdev);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004713
Lars Ellenberg1d53f092010-09-05 01:13:24 +02004714 if (!get_ldev(mdev))
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01004715 return true;
Lars Ellenberg1d53f092010-09-05 01:13:24 +02004716
Philipp Reisnerb411b362009-09-25 16:07:19 -07004717 drbd_rs_complete_io(mdev, sector);
4718 dec_rs_pending(mdev);
4719
Lars Ellenbergea5442a2010-11-05 09:48:01 +01004720 --mdev->ov_left;
4721
4722 /* let's advance progress step marks only for every other megabyte */
4723 if ((mdev->ov_left & 0x200) == 0x200)
4724 drbd_advance_rs_marks(mdev, mdev->ov_left);
4725
4726 if (mdev->ov_left == 0) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07004727 w = kmalloc(sizeof(*w), GFP_NOIO);
4728 if (w) {
4729 w->cb = w_ov_finished;
Philipp Reisnera21e9292011-02-08 15:08:49 +01004730 w->mdev = mdev;
Philipp Reisnere42325a2011-01-19 13:55:45 +01004731 drbd_queue_work_front(&mdev->tconn->data.work, w);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004732 } else {
4733 dev_err(DEV, "kmalloc(w) failed.");
Andreas Gruenbacher8f7bed72010-12-19 23:53:14 +01004734 ov_out_of_sync_print(mdev);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004735 drbd_resync_finished(mdev);
4736 }
4737 }
Lars Ellenberg1d53f092010-09-05 01:13:24 +02004738 put_ldev(mdev);
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01004739 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004740}
4741
Andreas Gruenbacherd8763022011-01-26 17:39:41 +01004742static int got_skip(struct drbd_conf *mdev, enum drbd_packet cmd)
Philipp Reisner0ced55a2010-04-30 15:26:20 +02004743{
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01004744 return true;
Philipp Reisner0ced55a2010-04-30 15:26:20 +02004745}
4746
Philipp Reisner32862ec2011-02-08 16:41:01 +01004747static int tconn_process_done_ee(struct drbd_tconn *tconn)
4748{
Philipp Reisner082a3432011-03-15 16:05:42 +01004749 struct drbd_conf *mdev;
4750 int i, not_empty = 0;
Philipp Reisner32862ec2011-02-08 16:41:01 +01004751
4752 do {
4753 clear_bit(SIGNAL_ASENDER, &tconn->flags);
4754 flush_signals(current);
Philipp Reisner082a3432011-03-15 16:05:42 +01004755 idr_for_each_entry(&tconn->volumes, mdev, i) {
Andreas Gruenbachere2b30322011-03-16 17:16:12 +01004756 if (drbd_process_done_ee(mdev))
Philipp Reisner082a3432011-03-15 16:05:42 +01004757 return 1; /* error */
4758 }
Philipp Reisner32862ec2011-02-08 16:41:01 +01004759 set_bit(SIGNAL_ASENDER, &tconn->flags);
Philipp Reisner082a3432011-03-15 16:05:42 +01004760
4761 spin_lock_irq(&tconn->req_lock);
4762 idr_for_each_entry(&tconn->volumes, mdev, i) {
4763 not_empty = !list_empty(&mdev->done_ee);
4764 if (not_empty)
4765 break;
4766 }
4767 spin_unlock_irq(&tconn->req_lock);
Philipp Reisner32862ec2011-02-08 16:41:01 +01004768 } while (not_empty);
4769
4770 return 0;
4771}
4772
Andreas Gruenbacher7201b972011-03-14 18:23:00 +01004773struct asender_cmd {
4774 size_t pkt_size;
Philipp Reisnera4fbda82011-03-16 11:13:17 +01004775 enum mdev_or_conn fa_type; /* first argument's type */
4776 union {
4777 int (*mdev_fn)(struct drbd_conf *mdev, enum drbd_packet cmd);
4778 int (*conn_fn)(struct drbd_tconn *tconn, enum drbd_packet cmd);
4779 };
Andreas Gruenbacher7201b972011-03-14 18:23:00 +01004780};
4781
4782static struct asender_cmd asender_tbl[] = {
Philipp Reisnerf19e4f82011-03-16 11:21:50 +01004783 [P_PING] = { sizeof(struct p_header), CONN, { .conn_fn = got_Ping } },
4784 [P_PING_ACK] = { sizeof(struct p_header), CONN, { .conn_fn = got_PingAck } },
Philipp Reisnera4fbda82011-03-16 11:13:17 +01004785 [P_RECV_ACK] = { sizeof(struct p_block_ack), MDEV, { got_BlockAck } },
4786 [P_WRITE_ACK] = { sizeof(struct p_block_ack), MDEV, { got_BlockAck } },
4787 [P_RS_WRITE_ACK] = { sizeof(struct p_block_ack), MDEV, { got_BlockAck } },
4788 [P_DISCARD_WRITE] = { sizeof(struct p_block_ack), MDEV, { got_BlockAck } },
4789 [P_NEG_ACK] = { sizeof(struct p_block_ack), MDEV, { got_NegAck } },
4790 [P_NEG_DREPLY] = { sizeof(struct p_block_ack), MDEV, { got_NegDReply } },
4791 [P_NEG_RS_DREPLY] = { sizeof(struct p_block_ack), MDEV, { got_NegRSDReply } },
4792 [P_OV_RESULT] = { sizeof(struct p_block_ack), MDEV, { got_OVResult } },
4793 [P_BARRIER_ACK] = { sizeof(struct p_barrier_ack), MDEV, { got_BarrierAck } },
4794 [P_STATE_CHG_REPLY] = { sizeof(struct p_req_state_reply), MDEV, { got_RqSReply } },
4795 [P_RS_IS_IN_SYNC] = { sizeof(struct p_block_ack), MDEV, { got_IsInSync } },
4796 [P_DELAY_PROBE] = { sizeof(struct p_delay_probe93), MDEV, { got_skip } },
4797 [P_RS_CANCEL] = { sizeof(struct p_block_ack), MDEV, { got_NegRSDReply } },
Philipp Reisnere4f78ed2011-03-16 11:27:48 +01004798 [P_CONN_ST_CHG_REPLY]={ sizeof(struct p_req_state_reply), CONN, {.conn_fn = got_conn_RqSReply}},
Philipp Reisnera4fbda82011-03-16 11:13:17 +01004799 [P_RETRY_WRITE] = { sizeof(struct p_block_ack), MDEV, { got_BlockAck } },
Andreas Gruenbacher7201b972011-03-14 18:23:00 +01004800};
4801
Philipp Reisnerb411b362009-09-25 16:07:19 -07004802int drbd_asender(struct drbd_thread *thi)
4803{
Philipp Reisner392c8802011-02-09 10:33:31 +01004804 struct drbd_tconn *tconn = thi->tconn;
Andreas Gruenbachere6ef8a52011-03-24 18:07:54 +01004805 struct p_header *h = tconn->meta.rbuf;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004806 struct asender_cmd *cmd = NULL;
Philipp Reisner77351055b2011-02-07 17:24:26 +01004807 struct packet_info pi;
Philipp Reisner257d0af2011-01-26 12:15:29 +01004808 int rv;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004809 void *buf = h;
4810 int received = 0;
Philipp Reisner257d0af2011-01-26 12:15:29 +01004811 int expect = sizeof(struct p_header);
Lars Ellenbergf36af182011-03-09 22:44:55 +01004812 int ping_timeout_active = 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004813
Philipp Reisnerb411b362009-09-25 16:07:19 -07004814 current->policy = SCHED_RR; /* Make this a realtime task! */
4815 current->rt_priority = 2; /* more important than all other tasks */
4816
Andreas Gruenbachere77a0a52011-01-25 15:43:39 +01004817 while (get_t_state(thi) == RUNNING) {
Philipp Reisner80822282011-02-08 12:46:30 +01004818 drbd_thread_current_set_cpu(thi);
Philipp Reisner32862ec2011-02-08 16:41:01 +01004819 if (test_and_clear_bit(SEND_PING, &tconn->flags)) {
Philipp Reisner2a67d8b2011-02-09 14:10:32 +01004820 if (!drbd_send_ping(tconn)) {
Philipp Reisner32862ec2011-02-08 16:41:01 +01004821 conn_err(tconn, "drbd_send_ping has failed\n");
Andreas Gruenbacher841ce242010-12-15 19:31:20 +01004822 goto reconnect;
4823 }
Philipp Reisner32862ec2011-02-08 16:41:01 +01004824 tconn->meta.socket->sk->sk_rcvtimeo =
4825 tconn->net_conf->ping_timeo*HZ/10;
Lars Ellenbergf36af182011-03-09 22:44:55 +01004826 ping_timeout_active = 1;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004827 }
4828
Philipp Reisner32862ec2011-02-08 16:41:01 +01004829 /* TODO: conditionally cork; it may hurt latency if we cork without
4830 much to send */
4831 if (!tconn->net_conf->no_cork)
4832 drbd_tcp_cork(tconn->meta.socket);
Philipp Reisner082a3432011-03-15 16:05:42 +01004833 if (tconn_process_done_ee(tconn)) {
4834 conn_err(tconn, "tconn_process_done_ee() failed\n");
Philipp Reisner32862ec2011-02-08 16:41:01 +01004835 goto reconnect;
Philipp Reisner082a3432011-03-15 16:05:42 +01004836 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07004837 /* but unconditionally uncork unless disabled */
Philipp Reisner32862ec2011-02-08 16:41:01 +01004838 if (!tconn->net_conf->no_cork)
4839 drbd_tcp_uncork(tconn->meta.socket);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004840
4841 /* short circuit, recv_msg would return EINTR anyways. */
4842 if (signal_pending(current))
4843 continue;
4844
Philipp Reisner32862ec2011-02-08 16:41:01 +01004845 rv = drbd_recv_short(tconn->meta.socket, buf, expect-received, 0);
4846 clear_bit(SIGNAL_ASENDER, &tconn->flags);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004847
4848 flush_signals(current);
4849
4850 /* Note:
4851 * -EINTR (on meta) we got a signal
4852 * -EAGAIN (on meta) rcvtimeo expired
4853 * -ECONNRESET other side closed the connection
4854 * -ERESTARTSYS (on data) we got a signal
4855 * rv < 0 other than above: unexpected error!
4856 * rv == expected: full header or command
4857 * rv < expected: "woken" by signal during receive
4858 * rv == 0 : "connection shut down by peer"
4859 */
4860 if (likely(rv > 0)) {
4861 received += rv;
4862 buf += rv;
4863 } else if (rv == 0) {
Philipp Reisner32862ec2011-02-08 16:41:01 +01004864 conn_err(tconn, "meta connection shut down by peer.\n");
Philipp Reisnerb411b362009-09-25 16:07:19 -07004865 goto reconnect;
4866 } else if (rv == -EAGAIN) {
Lars Ellenbergcb6518c2011-06-20 14:44:45 +02004867 /* If the data socket received something meanwhile,
4868 * that is good enough: peer is still alive. */
Philipp Reisner32862ec2011-02-08 16:41:01 +01004869 if (time_after(tconn->last_received,
4870 jiffies - tconn->meta.socket->sk->sk_rcvtimeo))
Lars Ellenbergcb6518c2011-06-20 14:44:45 +02004871 continue;
Lars Ellenbergf36af182011-03-09 22:44:55 +01004872 if (ping_timeout_active) {
Philipp Reisner32862ec2011-02-08 16:41:01 +01004873 conn_err(tconn, "PingAck did not arrive in time.\n");
Philipp Reisnerb411b362009-09-25 16:07:19 -07004874 goto reconnect;
4875 }
Philipp Reisner32862ec2011-02-08 16:41:01 +01004876 set_bit(SEND_PING, &tconn->flags);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004877 continue;
4878 } else if (rv == -EINTR) {
4879 continue;
4880 } else {
Philipp Reisner32862ec2011-02-08 16:41:01 +01004881 conn_err(tconn, "sock_recvmsg returned %d\n", rv);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004882 goto reconnect;
4883 }
4884
4885 if (received == expect && cmd == NULL) {
Andreas Gruenbacher8172f3e2011-03-16 17:22:39 +01004886 if (decode_header(tconn, h, &pi))
Philipp Reisnerb411b362009-09-25 16:07:19 -07004887 goto reconnect;
Andreas Gruenbacher7201b972011-03-14 18:23:00 +01004888 cmd = &asender_tbl[pi.cmd];
4889 if (pi.cmd >= ARRAY_SIZE(asender_tbl) || !cmd) {
Philipp Reisner32862ec2011-02-08 16:41:01 +01004890 conn_err(tconn, "unknown command %d on meta (l: %d)\n",
Philipp Reisner77351055b2011-02-07 17:24:26 +01004891 pi.cmd, pi.size);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004892 goto disconnect;
4893 }
4894 expect = cmd->pkt_size;
Philipp Reisner77351055b2011-02-07 17:24:26 +01004895 if (pi.size != expect - sizeof(struct p_header)) {
Philipp Reisner32862ec2011-02-08 16:41:01 +01004896 conn_err(tconn, "Wrong packet size on meta (c: %d, l: %d)\n",
Philipp Reisner77351055b2011-02-07 17:24:26 +01004897 pi.cmd, pi.size);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004898 goto reconnect;
Philipp Reisner257d0af2011-01-26 12:15:29 +01004899 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07004900 }
4901 if (received == expect) {
Philipp Reisnera4fbda82011-03-16 11:13:17 +01004902 bool rv;
4903
4904 if (cmd->fa_type == CONN) {
4905 rv = cmd->conn_fn(tconn, pi.cmd);
4906 } else {
4907 struct drbd_conf *mdev = vnr_to_mdev(tconn, pi.vnr);
4908 rv = cmd->mdev_fn(mdev, pi.cmd);
4909 }
4910
4911 if (!rv)
Philipp Reisnerb411b362009-09-25 16:07:19 -07004912 goto reconnect;
4913
Philipp Reisnera4fbda82011-03-16 11:13:17 +01004914 tconn->last_received = jiffies;
4915
Lars Ellenbergf36af182011-03-09 22:44:55 +01004916 /* the idle_timeout (ping-int)
4917 * has been restored in got_PingAck() */
Andreas Gruenbacher7201b972011-03-14 18:23:00 +01004918 if (cmd == &asender_tbl[P_PING_ACK])
Lars Ellenbergf36af182011-03-09 22:44:55 +01004919 ping_timeout_active = 0;
4920
Philipp Reisnerb411b362009-09-25 16:07:19 -07004921 buf = h;
4922 received = 0;
Philipp Reisner257d0af2011-01-26 12:15:29 +01004923 expect = sizeof(struct p_header);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004924 cmd = NULL;
4925 }
4926 }
4927
4928 if (0) {
4929reconnect:
Philipp Reisnerbbeb6412011-02-10 13:45:46 +01004930 conn_request_state(tconn, NS(conn, C_NETWORK_FAILURE), CS_HARD);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004931 }
4932 if (0) {
4933disconnect:
Philipp Reisnerbbeb6412011-02-10 13:45:46 +01004934 conn_request_state(tconn, NS(conn, C_DISCONNECTING), CS_HARD);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004935 }
Philipp Reisner32862ec2011-02-08 16:41:01 +01004936 clear_bit(SIGNAL_ASENDER, &tconn->flags);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004937
Philipp Reisner32862ec2011-02-08 16:41:01 +01004938 conn_info(tconn, "asender terminated\n");
Philipp Reisnerb411b362009-09-25 16:07:19 -07004939
4940 return 0;
4941}