blob: 68914a0d35380bc7882951f8a3273195c7bc4eea [file] [log] [blame]
Philipp Reisnerb411b362009-09-25 16:07:19 -07001/*
2 drbd_receiver.c
3
4 This file is part of DRBD by Philipp Reisner and Lars Ellenberg.
5
6 Copyright (C) 2001-2008, LINBIT Information Technologies GmbH.
7 Copyright (C) 1999-2008, Philipp Reisner <philipp.reisner@linbit.com>.
8 Copyright (C) 2002-2008, Lars Ellenberg <lars.ellenberg@linbit.com>.
9
10 drbd is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation; either version 2, or (at your option)
13 any later version.
14
15 drbd is distributed in the hope that it will be useful,
16 but WITHOUT ANY WARRANTY; without even the implied warranty of
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 GNU General Public License for more details.
19
20 You should have received a copy of the GNU General Public License
21 along with drbd; see the file COPYING. If not, write to
22 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
23 */
24
25
Philipp Reisnerb411b362009-09-25 16:07:19 -070026#include <linux/module.h>
27
28#include <asm/uaccess.h>
29#include <net/sock.h>
30
Philipp Reisnerb411b362009-09-25 16:07:19 -070031#include <linux/drbd.h>
32#include <linux/fs.h>
33#include <linux/file.h>
34#include <linux/in.h>
35#include <linux/mm.h>
36#include <linux/memcontrol.h>
37#include <linux/mm_inline.h>
38#include <linux/slab.h>
Philipp Reisnerb411b362009-09-25 16:07:19 -070039#include <linux/pkt_sched.h>
40#define __KERNEL_SYSCALLS__
41#include <linux/unistd.h>
42#include <linux/vmalloc.h>
43#include <linux/random.h>
Philipp Reisnerb411b362009-09-25 16:07:19 -070044#include <linux/string.h>
45#include <linux/scatterlist.h>
46#include "drbd_int.h"
Philipp Reisnerb411b362009-09-25 16:07:19 -070047#include "drbd_req.h"
48
49#include "drbd_vli.h"
50
Philipp Reisner77351055b2011-02-07 17:24:26 +010051struct packet_info {
52 enum drbd_packet cmd;
Andreas Gruenbachere2857212011-03-25 00:57:38 +010053 unsigned int size;
54 unsigned int vnr;
Andreas Gruenbachere6589832011-03-30 12:54:42 +020055 void *data;
Philipp Reisner77351055b2011-02-07 17:24:26 +010056};
57
Philipp Reisnerb411b362009-09-25 16:07:19 -070058enum finish_epoch {
59 FE_STILL_LIVE,
60 FE_DESTROYED,
61 FE_RECYCLED,
62};
63
Andreas Gruenbacher60381782011-03-28 17:05:50 +020064static int drbd_do_features(struct drbd_tconn *tconn);
Philipp Reisner13e60372011-02-08 09:54:40 +010065static int drbd_do_auth(struct drbd_tconn *tconn);
Philipp Reisner360cc742011-02-08 14:29:53 +010066static int drbd_disconnected(int vnr, void *p, void *data);
Philipp Reisnerb411b362009-09-25 16:07:19 -070067
68static enum finish_epoch drbd_may_finish_epoch(struct drbd_conf *, struct drbd_epoch *, enum epoch_event);
Andreas Gruenbacher99920dc2011-03-16 15:31:39 +010069static int e_end_block(struct drbd_work *, int);
Philipp Reisnerb411b362009-09-25 16:07:19 -070070
Philipp Reisnerb411b362009-09-25 16:07:19 -070071
72#define GFP_TRY (__GFP_HIGHMEM | __GFP_NOWARN)
73
Lars Ellenberg45bb9122010-05-14 17:10:48 +020074/*
75 * some helper functions to deal with single linked page lists,
76 * page->private being our "next" pointer.
77 */
78
79/* If at least n pages are linked at head, get n pages off.
80 * Otherwise, don't modify head, and return NULL.
81 * Locking is the responsibility of the caller.
82 */
83static struct page *page_chain_del(struct page **head, int n)
84{
85 struct page *page;
86 struct page *tmp;
87
88 BUG_ON(!n);
89 BUG_ON(!head);
90
91 page = *head;
Philipp Reisner23ce4222010-05-20 13:35:31 +020092
93 if (!page)
94 return NULL;
95
Lars Ellenberg45bb9122010-05-14 17:10:48 +020096 while (page) {
97 tmp = page_chain_next(page);
98 if (--n == 0)
99 break; /* found sufficient pages */
100 if (tmp == NULL)
101 /* insufficient pages, don't use any of them. */
102 return NULL;
103 page = tmp;
104 }
105
106 /* add end of list marker for the returned list */
107 set_page_private(page, 0);
108 /* actual return value, and adjustment of head */
109 page = *head;
110 *head = tmp;
111 return page;
112}
113
114/* may be used outside of locks to find the tail of a (usually short)
115 * "private" page chain, before adding it back to a global chain head
116 * with page_chain_add() under a spinlock. */
117static struct page *page_chain_tail(struct page *page, int *len)
118{
119 struct page *tmp;
120 int i = 1;
121 while ((tmp = page_chain_next(page)))
122 ++i, page = tmp;
123 if (len)
124 *len = i;
125 return page;
126}
127
128static int page_chain_free(struct page *page)
129{
130 struct page *tmp;
131 int i = 0;
132 page_chain_for_each_safe(page, tmp) {
133 put_page(page);
134 ++i;
135 }
136 return i;
137}
138
139static void page_chain_add(struct page **head,
140 struct page *chain_first, struct page *chain_last)
141{
142#if 1
143 struct page *tmp;
144 tmp = page_chain_tail(chain_first, NULL);
145 BUG_ON(tmp != chain_last);
146#endif
147
148 /* add chain to head */
149 set_page_private(chain_last, (unsigned long)*head);
150 *head = chain_first;
151}
152
Andreas Gruenbacher18c2d522011-04-07 21:08:50 +0200153static struct page *__drbd_alloc_pages(struct drbd_conf *mdev,
154 unsigned int number)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700155{
156 struct page *page = NULL;
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200157 struct page *tmp = NULL;
Andreas Gruenbacher18c2d522011-04-07 21:08:50 +0200158 unsigned int i = 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700159
160 /* Yes, testing drbd_pp_vacant outside the lock is racy.
161 * So what. It saves a spin_lock. */
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200162 if (drbd_pp_vacant >= number) {
Philipp Reisnerb411b362009-09-25 16:07:19 -0700163 spin_lock(&drbd_pp_lock);
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200164 page = page_chain_del(&drbd_pp_pool, number);
165 if (page)
166 drbd_pp_vacant -= number;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700167 spin_unlock(&drbd_pp_lock);
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200168 if (page)
169 return page;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700170 }
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200171
Philipp Reisnerb411b362009-09-25 16:07:19 -0700172 /* GFP_TRY, because we must not cause arbitrary write-out: in a DRBD
173 * "criss-cross" setup, that might cause write-out on some other DRBD,
174 * which in turn might block on the other node at this very place. */
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200175 for (i = 0; i < number; i++) {
176 tmp = alloc_page(GFP_TRY);
177 if (!tmp)
178 break;
179 set_page_private(tmp, (unsigned long)page);
180 page = tmp;
181 }
182
183 if (i == number)
184 return page;
185
186 /* Not enough pages immediately available this time.
187 * No need to jump around here, drbd_pp_alloc will retry this
188 * function "soon". */
189 if (page) {
190 tmp = page_chain_tail(page, NULL);
191 spin_lock(&drbd_pp_lock);
192 page_chain_add(&drbd_pp_pool, page, tmp);
193 drbd_pp_vacant += i;
194 spin_unlock(&drbd_pp_lock);
195 }
196 return NULL;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700197}
198
Andreas Gruenbachera990be42011-04-06 17:56:48 +0200199static void reclaim_finished_net_peer_reqs(struct drbd_conf *mdev,
200 struct list_head *to_be_freed)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700201{
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +0100202 struct drbd_peer_request *peer_req;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700203 struct list_head *le, *tle;
204
205 /* The EEs are always appended to the end of the list. Since
206 they are sent in order over the wire, they have to finish
207 in order. As soon as we see the first not finished we can
208 stop to examine the list... */
209
210 list_for_each_safe(le, tle, &mdev->net_ee) {
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +0100211 peer_req = list_entry(le, struct drbd_peer_request, w.list);
Andreas Gruenbacher045417f2011-04-07 21:34:24 +0200212 if (drbd_peer_req_has_active_page(peer_req))
Philipp Reisnerb411b362009-09-25 16:07:19 -0700213 break;
214 list_move(le, to_be_freed);
215 }
216}
217
218static void drbd_kick_lo_and_reclaim_net(struct drbd_conf *mdev)
219{
220 LIST_HEAD(reclaimed);
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +0100221 struct drbd_peer_request *peer_req, *t;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700222
Philipp Reisner87eeee42011-01-19 14:16:30 +0100223 spin_lock_irq(&mdev->tconn->req_lock);
Andreas Gruenbachera990be42011-04-06 17:56:48 +0200224 reclaim_finished_net_peer_reqs(mdev, &reclaimed);
Philipp Reisner87eeee42011-01-19 14:16:30 +0100225 spin_unlock_irq(&mdev->tconn->req_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700226
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +0100227 list_for_each_entry_safe(peer_req, t, &reclaimed, w.list)
Andreas Gruenbacher3967deb2011-04-06 16:16:56 +0200228 drbd_free_net_peer_req(mdev, peer_req);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700229}
230
231/**
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200232 * drbd_pp_alloc() - Returns @number pages, retries forever (or until signalled)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700233 * @mdev: DRBD device.
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200234 * @number: number of pages requested
235 * @retry: whether to retry, if not enough pages are available right now
Philipp Reisnerb411b362009-09-25 16:07:19 -0700236 *
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200237 * Tries to allocate number pages, first from our own page pool, then from
238 * the kernel, unless this allocation would exceed the max_buffers setting.
239 * Possibly retry until DRBD frees sufficient pages somewhere else.
240 *
241 * Returns a page chain linked via page->private.
Philipp Reisnerb411b362009-09-25 16:07:19 -0700242 */
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200243static struct page *drbd_pp_alloc(struct drbd_conf *mdev, unsigned number, bool retry)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700244{
245 struct page *page = NULL;
246 DEFINE_WAIT(wait);
247
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200248 /* Yes, we may run up to @number over max_buffers. If we
249 * follow it strictly, the admin will get it wrong anyways. */
Philipp Reisner89e58e72011-01-19 13:12:45 +0100250 if (atomic_read(&mdev->pp_in_use) < mdev->tconn->net_conf->max_buffers)
Andreas Gruenbacher18c2d522011-04-07 21:08:50 +0200251 page = __drbd_alloc_pages(mdev, number);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700252
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200253 while (page == NULL) {
Philipp Reisnerb411b362009-09-25 16:07:19 -0700254 prepare_to_wait(&drbd_pp_wait, &wait, TASK_INTERRUPTIBLE);
255
256 drbd_kick_lo_and_reclaim_net(mdev);
257
Philipp Reisner89e58e72011-01-19 13:12:45 +0100258 if (atomic_read(&mdev->pp_in_use) < mdev->tconn->net_conf->max_buffers) {
Andreas Gruenbacher18c2d522011-04-07 21:08:50 +0200259 page = __drbd_alloc_pages(mdev, number);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700260 if (page)
261 break;
262 }
263
264 if (!retry)
265 break;
266
267 if (signal_pending(current)) {
268 dev_warn(DEV, "drbd_pp_alloc interrupted!\n");
269 break;
270 }
271
272 schedule();
273 }
274 finish_wait(&drbd_pp_wait, &wait);
275
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200276 if (page)
277 atomic_add(number, &mdev->pp_in_use);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700278 return page;
279}
280
281/* Must not be used from irq, as that may deadlock: see drbd_pp_alloc.
Philipp Reisner87eeee42011-01-19 14:16:30 +0100282 * Is also used from inside an other spin_lock_irq(&mdev->tconn->req_lock);
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200283 * Either links the page chain back to the global pool,
284 * or returns all pages to the system. */
Lars Ellenberg435f0742010-09-06 12:30:25 +0200285static void drbd_pp_free(struct drbd_conf *mdev, struct page *page, int is_net)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700286{
Lars Ellenberg435f0742010-09-06 12:30:25 +0200287 atomic_t *a = is_net ? &mdev->pp_in_use_by_net : &mdev->pp_in_use;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700288 int i;
Lars Ellenberg435f0742010-09-06 12:30:25 +0200289
Philipp Reisner81a5d602011-02-22 19:53:16 -0500290 if (drbd_pp_vacant > (DRBD_MAX_BIO_SIZE/PAGE_SIZE) * minor_count)
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200291 i = page_chain_free(page);
292 else {
293 struct page *tmp;
294 tmp = page_chain_tail(page, &i);
295 spin_lock(&drbd_pp_lock);
296 page_chain_add(&drbd_pp_pool, page, tmp);
297 drbd_pp_vacant += i;
298 spin_unlock(&drbd_pp_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700299 }
Lars Ellenberg435f0742010-09-06 12:30:25 +0200300 i = atomic_sub_return(i, a);
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200301 if (i < 0)
Lars Ellenberg435f0742010-09-06 12:30:25 +0200302 dev_warn(DEV, "ASSERTION FAILED: %s: %d < 0\n",
303 is_net ? "pp_in_use_by_net" : "pp_in_use", i);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700304 wake_up(&drbd_pp_wait);
305}
306
307/*
308You need to hold the req_lock:
309 _drbd_wait_ee_list_empty()
310
311You must not have the req_lock:
Andreas Gruenbacher3967deb2011-04-06 16:16:56 +0200312 drbd_free_peer_req()
Andreas Gruenbacher0db55362011-04-06 16:09:15 +0200313 drbd_alloc_peer_req()
Andreas Gruenbacher7721f562011-04-06 17:14:02 +0200314 drbd_free_peer_reqs()
Philipp Reisnerb411b362009-09-25 16:07:19 -0700315 drbd_ee_fix_bhs()
Andreas Gruenbachera990be42011-04-06 17:56:48 +0200316 drbd_finish_peer_reqs()
Philipp Reisnerb411b362009-09-25 16:07:19 -0700317 drbd_clear_done_ee()
318 drbd_wait_ee_list_empty()
319*/
320
Andreas Gruenbacherf6ffca92011-02-04 15:30:34 +0100321struct drbd_peer_request *
Andreas Gruenbacher0db55362011-04-06 16:09:15 +0200322drbd_alloc_peer_req(struct drbd_conf *mdev, u64 id, sector_t sector,
323 unsigned int data_size, gfp_t gfp_mask) __must_hold(local)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700324{
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +0100325 struct drbd_peer_request *peer_req;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700326 struct page *page;
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200327 unsigned nr_pages = (data_size + PAGE_SIZE -1) >> PAGE_SHIFT;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700328
Andreas Gruenbacher0cf9d272010-12-07 10:43:29 +0100329 if (drbd_insert_fault(mdev, DRBD_FAULT_AL_EE))
Philipp Reisnerb411b362009-09-25 16:07:19 -0700330 return NULL;
331
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +0100332 peer_req = mempool_alloc(drbd_ee_mempool, gfp_mask & ~__GFP_HIGHMEM);
333 if (!peer_req) {
Philipp Reisnerb411b362009-09-25 16:07:19 -0700334 if (!(gfp_mask & __GFP_NOWARN))
Andreas Gruenbacher0db55362011-04-06 16:09:15 +0200335 dev_err(DEV, "%s: allocation failed\n", __func__);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700336 return NULL;
337 }
338
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200339 page = drbd_pp_alloc(mdev, nr_pages, (gfp_mask & __GFP_WAIT));
340 if (!page)
341 goto fail;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700342
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +0100343 drbd_clear_interval(&peer_req->i);
344 peer_req->i.size = data_size;
345 peer_req->i.sector = sector;
346 peer_req->i.local = false;
347 peer_req->i.waiting = false;
Andreas Gruenbacher53840642011-01-28 10:31:04 +0100348
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +0100349 peer_req->epoch = NULL;
Philipp Reisnera21e9292011-02-08 15:08:49 +0100350 peer_req->w.mdev = mdev;
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +0100351 peer_req->pages = page;
352 atomic_set(&peer_req->pending_bios, 0);
353 peer_req->flags = 0;
Andreas Gruenbacher9a8e7752011-01-11 14:04:09 +0100354 /*
355 * The block_id is opaque to the receiver. It is not endianness
356 * converted, and sent back to the sender unchanged.
357 */
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +0100358 peer_req->block_id = id;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700359
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +0100360 return peer_req;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700361
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200362 fail:
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +0100363 mempool_free(peer_req, drbd_ee_mempool);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700364 return NULL;
365}
366
Andreas Gruenbacher3967deb2011-04-06 16:16:56 +0200367void __drbd_free_peer_req(struct drbd_conf *mdev, struct drbd_peer_request *peer_req,
Andreas Gruenbacherf6ffca92011-02-04 15:30:34 +0100368 int is_net)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700369{
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +0100370 if (peer_req->flags & EE_HAS_DIGEST)
371 kfree(peer_req->digest);
372 drbd_pp_free(mdev, peer_req->pages, is_net);
373 D_ASSERT(atomic_read(&peer_req->pending_bios) == 0);
374 D_ASSERT(drbd_interval_empty(&peer_req->i));
375 mempool_free(peer_req, drbd_ee_mempool);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700376}
377
Andreas Gruenbacher7721f562011-04-06 17:14:02 +0200378int drbd_free_peer_reqs(struct drbd_conf *mdev, struct list_head *list)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700379{
380 LIST_HEAD(work_list);
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +0100381 struct drbd_peer_request *peer_req, *t;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700382 int count = 0;
Lars Ellenberg435f0742010-09-06 12:30:25 +0200383 int is_net = list == &mdev->net_ee;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700384
Philipp Reisner87eeee42011-01-19 14:16:30 +0100385 spin_lock_irq(&mdev->tconn->req_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700386 list_splice_init(list, &work_list);
Philipp Reisner87eeee42011-01-19 14:16:30 +0100387 spin_unlock_irq(&mdev->tconn->req_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700388
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +0100389 list_for_each_entry_safe(peer_req, t, &work_list, w.list) {
Andreas Gruenbacher3967deb2011-04-06 16:16:56 +0200390 __drbd_free_peer_req(mdev, peer_req, is_net);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700391 count++;
392 }
393 return count;
394}
395
Andreas Gruenbachera990be42011-04-06 17:56:48 +0200396/*
397 * See also comments in _req_mod(,BARRIER_ACKED) and receive_Barrier.
Philipp Reisnerb411b362009-09-25 16:07:19 -0700398 */
Andreas Gruenbachera990be42011-04-06 17:56:48 +0200399static int drbd_finish_peer_reqs(struct drbd_conf *mdev)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700400{
401 LIST_HEAD(work_list);
402 LIST_HEAD(reclaimed);
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +0100403 struct drbd_peer_request *peer_req, *t;
Andreas Gruenbachere2b30322011-03-16 17:16:12 +0100404 int err = 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700405
Philipp Reisner87eeee42011-01-19 14:16:30 +0100406 spin_lock_irq(&mdev->tconn->req_lock);
Andreas Gruenbachera990be42011-04-06 17:56:48 +0200407 reclaim_finished_net_peer_reqs(mdev, &reclaimed);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700408 list_splice_init(&mdev->done_ee, &work_list);
Philipp Reisner87eeee42011-01-19 14:16:30 +0100409 spin_unlock_irq(&mdev->tconn->req_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700410
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +0100411 list_for_each_entry_safe(peer_req, t, &reclaimed, w.list)
Andreas Gruenbacher3967deb2011-04-06 16:16:56 +0200412 drbd_free_net_peer_req(mdev, peer_req);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700413
414 /* possible callbacks here:
Andreas Gruenbacher7be8da02011-02-22 02:15:32 +0100415 * e_end_block, and e_end_resync_block, e_send_discard_write.
Philipp Reisnerb411b362009-09-25 16:07:19 -0700416 * all ignore the last argument.
417 */
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +0100418 list_for_each_entry_safe(peer_req, t, &work_list, w.list) {
Andreas Gruenbachere2b30322011-03-16 17:16:12 +0100419 int err2;
420
Philipp Reisnerb411b362009-09-25 16:07:19 -0700421 /* list_del not necessary, next/prev members not touched */
Andreas Gruenbachere2b30322011-03-16 17:16:12 +0100422 err2 = peer_req->w.cb(&peer_req->w, !!err);
423 if (!err)
424 err = err2;
Andreas Gruenbacher3967deb2011-04-06 16:16:56 +0200425 drbd_free_peer_req(mdev, peer_req);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700426 }
427 wake_up(&mdev->ee_wait);
428
Andreas Gruenbachere2b30322011-03-16 17:16:12 +0100429 return err;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700430}
431
Andreas Gruenbacherd4da1532011-04-07 00:06:56 +0200432static void _drbd_wait_ee_list_empty(struct drbd_conf *mdev,
433 struct list_head *head)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700434{
435 DEFINE_WAIT(wait);
436
437 /* avoids spin_lock/unlock
438 * and calling prepare_to_wait in the fast path */
439 while (!list_empty(head)) {
440 prepare_to_wait(&mdev->ee_wait, &wait, TASK_UNINTERRUPTIBLE);
Philipp Reisner87eeee42011-01-19 14:16:30 +0100441 spin_unlock_irq(&mdev->tconn->req_lock);
Jens Axboe7eaceac2011-03-10 08:52:07 +0100442 io_schedule();
Philipp Reisnerb411b362009-09-25 16:07:19 -0700443 finish_wait(&mdev->ee_wait, &wait);
Philipp Reisner87eeee42011-01-19 14:16:30 +0100444 spin_lock_irq(&mdev->tconn->req_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700445 }
446}
447
Andreas Gruenbacherd4da1532011-04-07 00:06:56 +0200448static void drbd_wait_ee_list_empty(struct drbd_conf *mdev,
449 struct list_head *head)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700450{
Philipp Reisner87eeee42011-01-19 14:16:30 +0100451 spin_lock_irq(&mdev->tconn->req_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700452 _drbd_wait_ee_list_empty(mdev, head);
Philipp Reisner87eeee42011-01-19 14:16:30 +0100453 spin_unlock_irq(&mdev->tconn->req_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700454}
455
456/* see also kernel_accept; which is only present since 2.6.18.
457 * also we want to log which part of it failed, exactly */
Philipp Reisner76536202011-02-07 14:09:54 +0100458static int drbd_accept(const char **what, struct socket *sock, struct socket **newsock)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700459{
460 struct sock *sk = sock->sk;
461 int err = 0;
462
463 *what = "listen";
464 err = sock->ops->listen(sock, 5);
465 if (err < 0)
466 goto out;
467
468 *what = "sock_create_lite";
469 err = sock_create_lite(sk->sk_family, sk->sk_type, sk->sk_protocol,
470 newsock);
471 if (err < 0)
472 goto out;
473
474 *what = "accept";
475 err = sock->ops->accept(sock, *newsock, 0);
476 if (err < 0) {
477 sock_release(*newsock);
478 *newsock = NULL;
479 goto out;
480 }
481 (*newsock)->ops = sock->ops;
482
483out:
484 return err;
485}
486
Philipp Reisnerdbd9eea2011-02-07 15:34:16 +0100487static int drbd_recv_short(struct socket *sock, void *buf, size_t size, int flags)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700488{
489 mm_segment_t oldfs;
490 struct kvec iov = {
491 .iov_base = buf,
492 .iov_len = size,
493 };
494 struct msghdr msg = {
495 .msg_iovlen = 1,
496 .msg_iov = (struct iovec *)&iov,
497 .msg_flags = (flags ? flags : MSG_WAITALL | MSG_NOSIGNAL)
498 };
499 int rv;
500
501 oldfs = get_fs();
502 set_fs(KERNEL_DS);
503 rv = sock_recvmsg(sock, &msg, size, msg.msg_flags);
504 set_fs(oldfs);
505
506 return rv;
507}
508
Philipp Reisnerde0ff332011-02-07 16:56:20 +0100509static int drbd_recv(struct drbd_tconn *tconn, void *buf, size_t size)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700510{
511 mm_segment_t oldfs;
512 struct kvec iov = {
513 .iov_base = buf,
514 .iov_len = size,
515 };
516 struct msghdr msg = {
517 .msg_iovlen = 1,
518 .msg_iov = (struct iovec *)&iov,
519 .msg_flags = MSG_WAITALL | MSG_NOSIGNAL
520 };
521 int rv;
522
523 oldfs = get_fs();
524 set_fs(KERNEL_DS);
525
526 for (;;) {
Philipp Reisnerde0ff332011-02-07 16:56:20 +0100527 rv = sock_recvmsg(tconn->data.socket, &msg, size, msg.msg_flags);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700528 if (rv == size)
529 break;
530
531 /* Note:
532 * ECONNRESET other side closed the connection
533 * ERESTARTSYS (on sock) we got a signal
534 */
535
536 if (rv < 0) {
537 if (rv == -ECONNRESET)
Philipp Reisnerde0ff332011-02-07 16:56:20 +0100538 conn_info(tconn, "sock was reset by peer\n");
Philipp Reisnerb411b362009-09-25 16:07:19 -0700539 else if (rv != -ERESTARTSYS)
Philipp Reisnerde0ff332011-02-07 16:56:20 +0100540 conn_err(tconn, "sock_recvmsg returned %d\n", rv);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700541 break;
542 } else if (rv == 0) {
Philipp Reisnerde0ff332011-02-07 16:56:20 +0100543 conn_info(tconn, "sock was shut down by peer\n");
Philipp Reisnerb411b362009-09-25 16:07:19 -0700544 break;
545 } else {
546 /* signal came in, or peer/link went down,
547 * after we read a partial message
548 */
549 /* D_ASSERT(signal_pending(current)); */
550 break;
551 }
552 };
553
554 set_fs(oldfs);
555
556 if (rv != size)
Philipp Reisnerbbeb6412011-02-10 13:45:46 +0100557 conn_request_state(tconn, NS(conn, C_BROKEN_PIPE), CS_HARD);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700558
559 return rv;
560}
561
Andreas Gruenbacherc6967742011-03-17 17:15:20 +0100562static int drbd_recv_all(struct drbd_tconn *tconn, void *buf, size_t size)
563{
564 int err;
565
566 err = drbd_recv(tconn, buf, size);
567 if (err != size) {
568 if (err >= 0)
569 err = -EIO;
570 } else
571 err = 0;
572 return err;
573}
574
Andreas Gruenbachera5c31902011-03-24 03:28:04 +0100575static int drbd_recv_all_warn(struct drbd_tconn *tconn, void *buf, size_t size)
576{
577 int err;
578
579 err = drbd_recv_all(tconn, buf, size);
580 if (err && !signal_pending(current))
581 conn_warn(tconn, "short read (expected size %d)\n", (int)size);
582 return err;
583}
584
Lars Ellenberg5dbf1672010-05-25 16:18:01 +0200585/* quoting tcp(7):
586 * On individual connections, the socket buffer size must be set prior to the
587 * listen(2) or connect(2) calls in order to have it take effect.
588 * This is our wrapper to do so.
589 */
590static void drbd_setbufsize(struct socket *sock, unsigned int snd,
591 unsigned int rcv)
592{
593 /* open coded SO_SNDBUF, SO_RCVBUF */
594 if (snd) {
595 sock->sk->sk_sndbuf = snd;
596 sock->sk->sk_userlocks |= SOCK_SNDBUF_LOCK;
597 }
598 if (rcv) {
599 sock->sk->sk_rcvbuf = rcv;
600 sock->sk->sk_userlocks |= SOCK_RCVBUF_LOCK;
601 }
602}
603
Philipp Reisnereac3e992011-02-07 14:05:07 +0100604static struct socket *drbd_try_connect(struct drbd_tconn *tconn)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700605{
606 const char *what;
607 struct socket *sock;
608 struct sockaddr_in6 src_in6;
609 int err;
610 int disconnect_on_error = 1;
611
Philipp Reisnereac3e992011-02-07 14:05:07 +0100612 if (!get_net_conf(tconn))
Philipp Reisnerb411b362009-09-25 16:07:19 -0700613 return NULL;
614
615 what = "sock_create_kern";
Philipp Reisnereac3e992011-02-07 14:05:07 +0100616 err = sock_create_kern(((struct sockaddr *)tconn->net_conf->my_addr)->sa_family,
Philipp Reisnerb411b362009-09-25 16:07:19 -0700617 SOCK_STREAM, IPPROTO_TCP, &sock);
618 if (err < 0) {
619 sock = NULL;
620 goto out;
621 }
622
623 sock->sk->sk_rcvtimeo =
Philipp Reisnereac3e992011-02-07 14:05:07 +0100624 sock->sk->sk_sndtimeo = tconn->net_conf->try_connect_int*HZ;
625 drbd_setbufsize(sock, tconn->net_conf->sndbuf_size,
626 tconn->net_conf->rcvbuf_size);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700627
628 /* explicitly bind to the configured IP as source IP
629 * for the outgoing connections.
630 * This is needed for multihomed hosts and to be
631 * able to use lo: interfaces for drbd.
632 * Make sure to use 0 as port number, so linux selects
633 * a free one dynamically.
634 */
Philipp Reisnereac3e992011-02-07 14:05:07 +0100635 memcpy(&src_in6, tconn->net_conf->my_addr,
636 min_t(int, tconn->net_conf->my_addr_len, sizeof(src_in6)));
637 if (((struct sockaddr *)tconn->net_conf->my_addr)->sa_family == AF_INET6)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700638 src_in6.sin6_port = 0;
639 else
640 ((struct sockaddr_in *)&src_in6)->sin_port = 0; /* AF_INET & AF_SCI */
641
642 what = "bind before connect";
643 err = sock->ops->bind(sock,
644 (struct sockaddr *) &src_in6,
Philipp Reisnereac3e992011-02-07 14:05:07 +0100645 tconn->net_conf->my_addr_len);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700646 if (err < 0)
647 goto out;
648
649 /* connect may fail, peer not yet available.
650 * stay C_WF_CONNECTION, don't go Disconnecting! */
651 disconnect_on_error = 0;
652 what = "connect";
653 err = sock->ops->connect(sock,
Philipp Reisnereac3e992011-02-07 14:05:07 +0100654 (struct sockaddr *)tconn->net_conf->peer_addr,
655 tconn->net_conf->peer_addr_len, 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700656
657out:
658 if (err < 0) {
659 if (sock) {
660 sock_release(sock);
661 sock = NULL;
662 }
663 switch (-err) {
664 /* timeout, busy, signal pending */
665 case ETIMEDOUT: case EAGAIN: case EINPROGRESS:
666 case EINTR: case ERESTARTSYS:
667 /* peer not (yet) available, network problem */
668 case ECONNREFUSED: case ENETUNREACH:
669 case EHOSTDOWN: case EHOSTUNREACH:
670 disconnect_on_error = 0;
671 break;
672 default:
Philipp Reisnereac3e992011-02-07 14:05:07 +0100673 conn_err(tconn, "%s failed, err = %d\n", what, err);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700674 }
675 if (disconnect_on_error)
Philipp Reisnerbbeb6412011-02-10 13:45:46 +0100676 conn_request_state(tconn, NS(conn, C_DISCONNECTING), CS_HARD);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700677 }
Philipp Reisnereac3e992011-02-07 14:05:07 +0100678 put_net_conf(tconn);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700679 return sock;
680}
681
Philipp Reisner76536202011-02-07 14:09:54 +0100682static struct socket *drbd_wait_for_connect(struct drbd_tconn *tconn)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700683{
684 int timeo, err;
685 struct socket *s_estab = NULL, *s_listen;
686 const char *what;
687
Philipp Reisner76536202011-02-07 14:09:54 +0100688 if (!get_net_conf(tconn))
Philipp Reisnerb411b362009-09-25 16:07:19 -0700689 return NULL;
690
691 what = "sock_create_kern";
Philipp Reisner76536202011-02-07 14:09:54 +0100692 err = sock_create_kern(((struct sockaddr *)tconn->net_conf->my_addr)->sa_family,
Philipp Reisnerb411b362009-09-25 16:07:19 -0700693 SOCK_STREAM, IPPROTO_TCP, &s_listen);
694 if (err) {
695 s_listen = NULL;
696 goto out;
697 }
698
Philipp Reisner76536202011-02-07 14:09:54 +0100699 timeo = tconn->net_conf->try_connect_int * HZ;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700700 timeo += (random32() & 1) ? timeo / 7 : -timeo / 7; /* 28.5% random jitter */
701
702 s_listen->sk->sk_reuse = 1; /* SO_REUSEADDR */
703 s_listen->sk->sk_rcvtimeo = timeo;
704 s_listen->sk->sk_sndtimeo = timeo;
Philipp Reisner76536202011-02-07 14:09:54 +0100705 drbd_setbufsize(s_listen, tconn->net_conf->sndbuf_size,
706 tconn->net_conf->rcvbuf_size);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700707
708 what = "bind before listen";
709 err = s_listen->ops->bind(s_listen,
Philipp Reisner76536202011-02-07 14:09:54 +0100710 (struct sockaddr *) tconn->net_conf->my_addr,
711 tconn->net_conf->my_addr_len);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700712 if (err < 0)
713 goto out;
714
Philipp Reisner76536202011-02-07 14:09:54 +0100715 err = drbd_accept(&what, s_listen, &s_estab);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700716
717out:
718 if (s_listen)
719 sock_release(s_listen);
720 if (err < 0) {
721 if (err != -EAGAIN && err != -EINTR && err != -ERESTARTSYS) {
Philipp Reisner76536202011-02-07 14:09:54 +0100722 conn_err(tconn, "%s failed, err = %d\n", what, err);
Philipp Reisnerbbeb6412011-02-10 13:45:46 +0100723 conn_request_state(tconn, NS(conn, C_DISCONNECTING), CS_HARD);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700724 }
725 }
Philipp Reisner76536202011-02-07 14:09:54 +0100726 put_net_conf(tconn);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700727
728 return s_estab;
729}
730
Andreas Gruenbachere6589832011-03-30 12:54:42 +0200731static int decode_header(struct drbd_tconn *, void *, struct packet_info *);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700732
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +0200733static int send_first_packet(struct drbd_tconn *tconn, struct drbd_socket *sock,
734 enum drbd_packet cmd)
735{
736 if (!conn_prepare_command(tconn, sock))
737 return -EIO;
Andreas Gruenbachere6589832011-03-30 12:54:42 +0200738 return conn_send_command(tconn, sock, cmd, 0, NULL, 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700739}
740
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +0200741static int receive_first_packet(struct drbd_tconn *tconn, struct socket *sock)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700742{
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +0200743 unsigned int header_size = drbd_header_size(tconn);
744 struct packet_info pi;
745 int err;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700746
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +0200747 err = drbd_recv_short(sock, tconn->data.rbuf, header_size, 0);
748 if (err != header_size) {
749 if (err >= 0)
750 err = -EIO;
751 return err;
752 }
753 err = decode_header(tconn, tconn->data.rbuf, &pi);
754 if (err)
755 return err;
756 return pi.cmd;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700757}
758
759/**
760 * drbd_socket_okay() - Free the socket if its connection is not okay
Philipp Reisnerb411b362009-09-25 16:07:19 -0700761 * @sock: pointer to the pointer to the socket.
762 */
Philipp Reisnerdbd9eea2011-02-07 15:34:16 +0100763static int drbd_socket_okay(struct socket **sock)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700764{
765 int rr;
766 char tb[4];
767
768 if (!*sock)
Andreas Gruenbacher81e84652010-12-09 15:03:57 +0100769 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700770
Philipp Reisnerdbd9eea2011-02-07 15:34:16 +0100771 rr = drbd_recv_short(*sock, tb, 4, MSG_DONTWAIT | MSG_PEEK);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700772
773 if (rr > 0 || rr == -EAGAIN) {
Andreas Gruenbacher81e84652010-12-09 15:03:57 +0100774 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700775 } else {
776 sock_release(*sock);
777 *sock = NULL;
Andreas Gruenbacher81e84652010-12-09 15:03:57 +0100778 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700779 }
780}
Philipp Reisner2325eb62011-03-15 16:56:18 +0100781/* Gets called if a connection is established, or if a new minor gets created
782 in a connection */
783int drbd_connected(int vnr, void *p, void *data)
Philipp Reisner907599e2011-02-08 11:25:37 +0100784{
785 struct drbd_conf *mdev = (struct drbd_conf *)p;
Andreas Gruenbacher0829f5e2011-03-24 14:31:22 +0100786 int err;
Philipp Reisner907599e2011-02-08 11:25:37 +0100787
788 atomic_set(&mdev->packet_seq, 0);
789 mdev->peer_seq = 0;
790
Philipp Reisner8410da82011-02-11 20:11:10 +0100791 mdev->state_mutex = mdev->tconn->agreed_pro_version < 100 ?
792 &mdev->tconn->cstate_mutex :
793 &mdev->own_state_mutex;
794
Andreas Gruenbacher0829f5e2011-03-24 14:31:22 +0100795 err = drbd_send_sync_param(mdev);
796 if (!err)
797 err = drbd_send_sizes(mdev, 0, 0);
798 if (!err)
799 err = drbd_send_uuids(mdev);
800 if (!err)
801 err = drbd_send_state(mdev);
Philipp Reisner907599e2011-02-08 11:25:37 +0100802 clear_bit(USE_DEGR_WFC_T, &mdev->flags);
803 clear_bit(RESIZE_PENDING, &mdev->flags);
Philipp Reisner8b924f12011-03-01 11:08:28 +0100804 mod_timer(&mdev->request_timer, jiffies + HZ); /* just start it here. */
Andreas Gruenbacher0829f5e2011-03-24 14:31:22 +0100805 return err;
Philipp Reisner907599e2011-02-08 11:25:37 +0100806}
807
Philipp Reisnerb411b362009-09-25 16:07:19 -0700808/*
809 * return values:
810 * 1 yes, we have a valid connection
811 * 0 oops, did not work out, please try again
812 * -1 peer talks different language,
813 * no point in trying again, please go standalone.
814 * -2 We do not have a network config...
815 */
Philipp Reisner907599e2011-02-08 11:25:37 +0100816static int drbd_connect(struct drbd_tconn *tconn)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700817{
Andreas Gruenbacher2bf89622011-03-28 16:33:12 +0200818 struct socket *sock, *msock;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700819 int try, h, ok;
820
Philipp Reisnerbbeb6412011-02-10 13:45:46 +0100821 if (conn_request_state(tconn, NS(conn, C_WF_CONNECTION), CS_VERBOSE) < SS_SUCCESS)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700822 return -2;
823
Philipp Reisner907599e2011-02-08 11:25:37 +0100824 clear_bit(DISCARD_CONCURRENT, &tconn->flags);
Andreas Gruenbacher0916e0e2011-03-21 14:10:15 +0100825
826 /* Assume that the peer only understands protocol 80 until we know better. */
827 tconn->agreed_pro_version = 80;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700828
Philipp Reisnerb411b362009-09-25 16:07:19 -0700829 do {
Andreas Gruenbacher2bf89622011-03-28 16:33:12 +0200830 struct socket *s;
831
Philipp Reisnerb411b362009-09-25 16:07:19 -0700832 for (try = 0;;) {
833 /* 3 tries, this should take less than a second! */
Philipp Reisner907599e2011-02-08 11:25:37 +0100834 s = drbd_try_connect(tconn);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700835 if (s || ++try >= 3)
836 break;
837 /* give the other side time to call bind() & listen() */
Philipp Reisner20ee6392011-01-18 15:28:59 +0100838 schedule_timeout_interruptible(HZ / 10);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700839 }
840
841 if (s) {
Andreas Gruenbacher2bf89622011-03-28 16:33:12 +0200842 if (!tconn->data.socket) {
843 tconn->data.socket = s;
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +0200844 send_first_packet(tconn, &tconn->data, P_INITIAL_DATA);
Andreas Gruenbacher2bf89622011-03-28 16:33:12 +0200845 } else if (!tconn->meta.socket) {
846 tconn->meta.socket = s;
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +0200847 send_first_packet(tconn, &tconn->meta, P_INITIAL_META);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700848 } else {
Philipp Reisner907599e2011-02-08 11:25:37 +0100849 conn_err(tconn, "Logic error in drbd_connect()\n");
Philipp Reisnerb411b362009-09-25 16:07:19 -0700850 goto out_release_sockets;
851 }
852 }
853
Andreas Gruenbacher2bf89622011-03-28 16:33:12 +0200854 if (tconn->data.socket && tconn->meta.socket) {
Philipp Reisner907599e2011-02-08 11:25:37 +0100855 schedule_timeout_interruptible(tconn->net_conf->ping_timeo*HZ/10);
Andreas Gruenbacher2bf89622011-03-28 16:33:12 +0200856 ok = drbd_socket_okay(&tconn->data.socket);
857 ok = drbd_socket_okay(&tconn->meta.socket) && ok;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700858 if (ok)
859 break;
860 }
861
862retry:
Philipp Reisner907599e2011-02-08 11:25:37 +0100863 s = drbd_wait_for_connect(tconn);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700864 if (s) {
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +0200865 try = receive_first_packet(tconn, s);
Andreas Gruenbacher2bf89622011-03-28 16:33:12 +0200866 drbd_socket_okay(&tconn->data.socket);
867 drbd_socket_okay(&tconn->meta.socket);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700868 switch (try) {
Andreas Gruenbachere5d6f332011-03-28 16:44:40 +0200869 case P_INITIAL_DATA:
Andreas Gruenbacher2bf89622011-03-28 16:33:12 +0200870 if (tconn->data.socket) {
Philipp Reisner907599e2011-02-08 11:25:37 +0100871 conn_warn(tconn, "initial packet S crossed\n");
Andreas Gruenbacher2bf89622011-03-28 16:33:12 +0200872 sock_release(tconn->data.socket);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700873 }
Andreas Gruenbacher2bf89622011-03-28 16:33:12 +0200874 tconn->data.socket = s;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700875 break;
Andreas Gruenbachere5d6f332011-03-28 16:44:40 +0200876 case P_INITIAL_META:
Andreas Gruenbacher2bf89622011-03-28 16:33:12 +0200877 if (tconn->meta.socket) {
Philipp Reisner907599e2011-02-08 11:25:37 +0100878 conn_warn(tconn, "initial packet M crossed\n");
Andreas Gruenbacher2bf89622011-03-28 16:33:12 +0200879 sock_release(tconn->meta.socket);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700880 }
Andreas Gruenbacher2bf89622011-03-28 16:33:12 +0200881 tconn->meta.socket = s;
Philipp Reisner907599e2011-02-08 11:25:37 +0100882 set_bit(DISCARD_CONCURRENT, &tconn->flags);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700883 break;
884 default:
Philipp Reisner907599e2011-02-08 11:25:37 +0100885 conn_warn(tconn, "Error receiving initial packet\n");
Philipp Reisnerb411b362009-09-25 16:07:19 -0700886 sock_release(s);
887 if (random32() & 1)
888 goto retry;
889 }
890 }
891
Philipp Reisnerbbeb6412011-02-10 13:45:46 +0100892 if (tconn->cstate <= C_DISCONNECTING)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700893 goto out_release_sockets;
894 if (signal_pending(current)) {
895 flush_signals(current);
896 smp_rmb();
Philipp Reisner907599e2011-02-08 11:25:37 +0100897 if (get_t_state(&tconn->receiver) == EXITING)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700898 goto out_release_sockets;
899 }
900
Andreas Gruenbacher2bf89622011-03-28 16:33:12 +0200901 if (tconn->data.socket && &tconn->meta.socket) {
902 ok = drbd_socket_okay(&tconn->data.socket);
903 ok = drbd_socket_okay(&tconn->meta.socket) && ok;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700904 if (ok)
905 break;
906 }
907 } while (1);
908
Andreas Gruenbacher2bf89622011-03-28 16:33:12 +0200909 sock = tconn->data.socket;
910 msock = tconn->meta.socket;
911
Philipp Reisnerb411b362009-09-25 16:07:19 -0700912 msock->sk->sk_reuse = 1; /* SO_REUSEADDR */
913 sock->sk->sk_reuse = 1; /* SO_REUSEADDR */
914
915 sock->sk->sk_allocation = GFP_NOIO;
916 msock->sk->sk_allocation = GFP_NOIO;
917
918 sock->sk->sk_priority = TC_PRIO_INTERACTIVE_BULK;
919 msock->sk->sk_priority = TC_PRIO_INTERACTIVE;
920
Philipp Reisnerb411b362009-09-25 16:07:19 -0700921 /* NOT YET ...
Philipp Reisner907599e2011-02-08 11:25:37 +0100922 * sock->sk->sk_sndtimeo = tconn->net_conf->timeout*HZ/10;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700923 * sock->sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
Andreas Gruenbacher60381782011-03-28 17:05:50 +0200924 * first set it to the P_CONNECTION_FEATURES timeout,
Philipp Reisnerb411b362009-09-25 16:07:19 -0700925 * which we set to 4x the configured ping_timeout. */
926 sock->sk->sk_sndtimeo =
Philipp Reisner907599e2011-02-08 11:25:37 +0100927 sock->sk->sk_rcvtimeo = tconn->net_conf->ping_timeo*4*HZ/10;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700928
Philipp Reisner907599e2011-02-08 11:25:37 +0100929 msock->sk->sk_sndtimeo = tconn->net_conf->timeout*HZ/10;
930 msock->sk->sk_rcvtimeo = tconn->net_conf->ping_int*HZ;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700931
932 /* we don't want delays.
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300933 * we use TCP_CORK where appropriate, though */
Philipp Reisnerb411b362009-09-25 16:07:19 -0700934 drbd_tcp_nodelay(sock);
935 drbd_tcp_nodelay(msock);
936
Philipp Reisner907599e2011-02-08 11:25:37 +0100937 tconn->last_received = jiffies;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700938
Andreas Gruenbacher60381782011-03-28 17:05:50 +0200939 h = drbd_do_features(tconn);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700940 if (h <= 0)
941 return h;
942
Philipp Reisner907599e2011-02-08 11:25:37 +0100943 if (tconn->cram_hmac_tfm) {
Philipp Reisnerb411b362009-09-25 16:07:19 -0700944 /* drbd_request_state(mdev, NS(conn, WFAuth)); */
Philipp Reisner907599e2011-02-08 11:25:37 +0100945 switch (drbd_do_auth(tconn)) {
Johannes Thomab10d96c2010-01-07 16:02:50 +0100946 case -1:
Philipp Reisner907599e2011-02-08 11:25:37 +0100947 conn_err(tconn, "Authentication of peer failed\n");
Philipp Reisnerb411b362009-09-25 16:07:19 -0700948 return -1;
Johannes Thomab10d96c2010-01-07 16:02:50 +0100949 case 0:
Philipp Reisner907599e2011-02-08 11:25:37 +0100950 conn_err(tconn, "Authentication of peer failed, trying again.\n");
Johannes Thomab10d96c2010-01-07 16:02:50 +0100951 return 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700952 }
953 }
954
Philipp Reisnerbbeb6412011-02-10 13:45:46 +0100955 if (conn_request_state(tconn, NS(conn, C_WF_REPORT_PARAMS), CS_VERBOSE) < SS_SUCCESS)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700956 return 0;
957
Philipp Reisner907599e2011-02-08 11:25:37 +0100958 sock->sk->sk_sndtimeo = tconn->net_conf->timeout*HZ/10;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700959 sock->sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
960
Philipp Reisner907599e2011-02-08 11:25:37 +0100961 drbd_thread_start(&tconn->asender);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700962
Andreas Gruenbacher387eb302011-03-16 01:05:37 +0100963 if (drbd_send_protocol(tconn) == -EOPNOTSUPP)
Philipp Reisner7e2455c2010-04-22 14:50:23 +0200964 return -1;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700965
Philipp Reisner907599e2011-02-08 11:25:37 +0100966 return !idr_for_each(&tconn->volumes, drbd_connected, tconn);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700967
968out_release_sockets:
Andreas Gruenbacher2bf89622011-03-28 16:33:12 +0200969 if (tconn->data.socket) {
970 sock_release(tconn->data.socket);
971 tconn->data.socket = NULL;
972 }
973 if (tconn->meta.socket) {
974 sock_release(tconn->meta.socket);
975 tconn->meta.socket = NULL;
976 }
Philipp Reisnerb411b362009-09-25 16:07:19 -0700977 return -1;
978}
979
Andreas Gruenbachere6589832011-03-30 12:54:42 +0200980static int decode_header(struct drbd_tconn *tconn, void *header, struct packet_info *pi)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700981{
Andreas Gruenbachere6589832011-03-30 12:54:42 +0200982 unsigned int header_size = drbd_header_size(tconn);
983
Andreas Gruenbacher0c8e36d2011-03-30 16:00:17 +0200984 if (header_size == sizeof(struct p_header100) &&
985 *(__be32 *)header == cpu_to_be32(DRBD_MAGIC_100)) {
986 struct p_header100 *h = header;
987 if (h->pad != 0) {
988 conn_err(tconn, "Header padding is not zero\n");
989 return -EINVAL;
990 }
991 pi->vnr = be16_to_cpu(h->volume);
992 pi->cmd = be16_to_cpu(h->command);
993 pi->size = be32_to_cpu(h->length);
994 } else if (header_size == sizeof(struct p_header95) &&
995 *(__be16 *)header == cpu_to_be16(DRBD_MAGIC_BIG)) {
Andreas Gruenbachere6589832011-03-30 12:54:42 +0200996 struct p_header95 *h = header;
Andreas Gruenbachere6589832011-03-30 12:54:42 +0200997 pi->cmd = be16_to_cpu(h->command);
Andreas Gruenbacherb55d84b2011-03-22 13:17:47 +0100998 pi->size = be32_to_cpu(h->length);
999 pi->vnr = 0;
Andreas Gruenbachere6589832011-03-30 12:54:42 +02001000 } else if (header_size == sizeof(struct p_header80) &&
1001 *(__be32 *)header == cpu_to_be32(DRBD_MAGIC)) {
1002 struct p_header80 *h = header;
1003 pi->cmd = be16_to_cpu(h->command);
1004 pi->size = be16_to_cpu(h->length);
Philipp Reisner77351055b2011-02-07 17:24:26 +01001005 pi->vnr = 0;
Philipp Reisner02918be2010-08-20 14:35:10 +02001006 } else {
Andreas Gruenbachere6589832011-03-30 12:54:42 +02001007 conn_err(tconn, "Wrong magic value 0x%08x in protocol version %d\n",
1008 be32_to_cpu(*(__be32 *)header),
1009 tconn->agreed_pro_version);
Andreas Gruenbacher8172f3e2011-03-16 17:22:39 +01001010 return -EINVAL;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001011 }
Andreas Gruenbachere6589832011-03-30 12:54:42 +02001012 pi->data = header + header_size;
Andreas Gruenbacher8172f3e2011-03-16 17:22:39 +01001013 return 0;
Philipp Reisner257d0af2011-01-26 12:15:29 +01001014}
1015
Philipp Reisner9ba7aa02011-02-07 17:32:41 +01001016static int drbd_recv_header(struct drbd_tconn *tconn, struct packet_info *pi)
Philipp Reisner257d0af2011-01-26 12:15:29 +01001017{
Andreas Gruenbachere6589832011-03-30 12:54:42 +02001018 void *buffer = tconn->data.rbuf;
Andreas Gruenbacher69bc7bc2011-03-16 17:31:52 +01001019 int err;
Philipp Reisner257d0af2011-01-26 12:15:29 +01001020
Andreas Gruenbachere6589832011-03-30 12:54:42 +02001021 err = drbd_recv_all_warn(tconn, buffer, drbd_header_size(tconn));
Andreas Gruenbachera5c31902011-03-24 03:28:04 +01001022 if (err)
Andreas Gruenbacher69bc7bc2011-03-16 17:31:52 +01001023 return err;
Philipp Reisner257d0af2011-01-26 12:15:29 +01001024
Andreas Gruenbachere6589832011-03-30 12:54:42 +02001025 err = decode_header(tconn, buffer, pi);
Philipp Reisner9ba7aa02011-02-07 17:32:41 +01001026 tconn->last_received = jiffies;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001027
Andreas Gruenbacher69bc7bc2011-03-16 17:31:52 +01001028 return err;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001029}
1030
Philipp Reisner2451fc32010-08-24 13:43:11 +02001031static void drbd_flush(struct drbd_conf *mdev)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001032{
1033 int rv;
1034
1035 if (mdev->write_ordering >= WO_bdev_flush && get_ldev(mdev)) {
Dmitry Monakhovfbd9b092010-04-28 17:55:06 +04001036 rv = blkdev_issue_flush(mdev->ldev->backing_bdev, GFP_KERNEL,
Christoph Hellwigdd3932e2010-09-16 20:51:46 +02001037 NULL);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001038 if (rv) {
1039 dev_err(DEV, "local disk flush failed with status %d\n", rv);
1040 /* would rather check on EOPNOTSUPP, but that is not reliable.
1041 * don't try again for ANY return value != 0
1042 * if (rv == -EOPNOTSUPP) */
1043 drbd_bump_write_ordering(mdev, WO_drain_io);
1044 }
1045 put_ldev(mdev);
1046 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07001047}
1048
1049/**
1050 * drbd_may_finish_epoch() - Applies an epoch_event to the epoch's state, eventually finishes it.
1051 * @mdev: DRBD device.
1052 * @epoch: Epoch object.
1053 * @ev: Epoch event.
1054 */
1055static enum finish_epoch drbd_may_finish_epoch(struct drbd_conf *mdev,
1056 struct drbd_epoch *epoch,
1057 enum epoch_event ev)
1058{
Philipp Reisner2451fc32010-08-24 13:43:11 +02001059 int epoch_size;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001060 struct drbd_epoch *next_epoch;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001061 enum finish_epoch rv = FE_STILL_LIVE;
1062
1063 spin_lock(&mdev->epoch_lock);
1064 do {
1065 next_epoch = NULL;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001066
1067 epoch_size = atomic_read(&epoch->epoch_size);
1068
1069 switch (ev & ~EV_CLEANUP) {
1070 case EV_PUT:
1071 atomic_dec(&epoch->active);
1072 break;
1073 case EV_GOT_BARRIER_NR:
1074 set_bit(DE_HAVE_BARRIER_NUMBER, &epoch->flags);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001075 break;
1076 case EV_BECAME_LAST:
1077 /* nothing to do*/
1078 break;
1079 }
1080
Philipp Reisnerb411b362009-09-25 16:07:19 -07001081 if (epoch_size != 0 &&
1082 atomic_read(&epoch->active) == 0 &&
Philipp Reisner2451fc32010-08-24 13:43:11 +02001083 test_bit(DE_HAVE_BARRIER_NUMBER, &epoch->flags)) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07001084 if (!(ev & EV_CLEANUP)) {
1085 spin_unlock(&mdev->epoch_lock);
1086 drbd_send_b_ack(mdev, epoch->barrier_nr, epoch_size);
1087 spin_lock(&mdev->epoch_lock);
1088 }
1089 dec_unacked(mdev);
1090
1091 if (mdev->current_epoch != epoch) {
1092 next_epoch = list_entry(epoch->list.next, struct drbd_epoch, list);
1093 list_del(&epoch->list);
1094 ev = EV_BECAME_LAST | (ev & EV_CLEANUP);
1095 mdev->epochs--;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001096 kfree(epoch);
1097
1098 if (rv == FE_STILL_LIVE)
1099 rv = FE_DESTROYED;
1100 } else {
1101 epoch->flags = 0;
1102 atomic_set(&epoch->epoch_size, 0);
Uwe Kleine-König698f9312010-07-02 20:41:51 +02001103 /* atomic_set(&epoch->active, 0); is already zero */
Philipp Reisnerb411b362009-09-25 16:07:19 -07001104 if (rv == FE_STILL_LIVE)
1105 rv = FE_RECYCLED;
Philipp Reisner2451fc32010-08-24 13:43:11 +02001106 wake_up(&mdev->ee_wait);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001107 }
1108 }
1109
1110 if (!next_epoch)
1111 break;
1112
1113 epoch = next_epoch;
1114 } while (1);
1115
1116 spin_unlock(&mdev->epoch_lock);
1117
Philipp Reisnerb411b362009-09-25 16:07:19 -07001118 return rv;
1119}
1120
1121/**
1122 * drbd_bump_write_ordering() - Fall back to an other write ordering method
1123 * @mdev: DRBD device.
1124 * @wo: Write ordering method to try.
1125 */
1126void drbd_bump_write_ordering(struct drbd_conf *mdev, enum write_ordering_e wo) __must_hold(local)
1127{
1128 enum write_ordering_e pwo;
1129 static char *write_ordering_str[] = {
1130 [WO_none] = "none",
1131 [WO_drain_io] = "drain",
1132 [WO_bdev_flush] = "flush",
Philipp Reisnerb411b362009-09-25 16:07:19 -07001133 };
1134
1135 pwo = mdev->write_ordering;
1136 wo = min(pwo, wo);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001137 if (wo == WO_bdev_flush && mdev->ldev->dc.no_disk_flush)
1138 wo = WO_drain_io;
1139 if (wo == WO_drain_io && mdev->ldev->dc.no_disk_drain)
1140 wo = WO_none;
1141 mdev->write_ordering = wo;
Philipp Reisner2451fc32010-08-24 13:43:11 +02001142 if (pwo != mdev->write_ordering || wo == WO_bdev_flush)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001143 dev_info(DEV, "Method to ensure write ordering: %s\n", write_ordering_str[mdev->write_ordering]);
1144}
1145
1146/**
Andreas Gruenbacherfbe29de2011-02-17 16:38:35 +01001147 * drbd_submit_peer_request()
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001148 * @mdev: DRBD device.
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01001149 * @peer_req: peer request
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001150 * @rw: flag field, see bio->bi_rw
Lars Ellenberg10f6d9922011-01-24 14:47:09 +01001151 *
1152 * May spread the pages to multiple bios,
1153 * depending on bio_add_page restrictions.
1154 *
1155 * Returns 0 if all bios have been submitted,
1156 * -ENOMEM if we could not allocate enough bios,
1157 * -ENOSPC (any better suggestion?) if we have not been able to bio_add_page a
1158 * single page to an empty bio (which should never happen and likely indicates
1159 * that the lower level IO stack is in some way broken). This has been observed
1160 * on certain Xen deployments.
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001161 */
1162/* TODO allocate from our own bio_set. */
Andreas Gruenbacherfbe29de2011-02-17 16:38:35 +01001163int drbd_submit_peer_request(struct drbd_conf *mdev,
1164 struct drbd_peer_request *peer_req,
1165 const unsigned rw, const int fault_type)
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001166{
1167 struct bio *bios = NULL;
1168 struct bio *bio;
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01001169 struct page *page = peer_req->pages;
1170 sector_t sector = peer_req->i.sector;
1171 unsigned ds = peer_req->i.size;
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001172 unsigned n_bios = 0;
1173 unsigned nr_pages = (ds + PAGE_SIZE -1) >> PAGE_SHIFT;
Lars Ellenberg10f6d9922011-01-24 14:47:09 +01001174 int err = -ENOMEM;
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001175
1176 /* In most cases, we will only need one bio. But in case the lower
1177 * level restrictions happen to be different at this offset on this
1178 * side than those of the sending peer, we may need to submit the
Lars Ellenbergda4a75d2011-02-23 17:02:01 +01001179 * request in more than one bio.
1180 *
1181 * Plain bio_alloc is good enough here, this is no DRBD internally
1182 * generated bio, but a bio allocated on behalf of the peer.
1183 */
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001184next_bio:
1185 bio = bio_alloc(GFP_NOIO, nr_pages);
1186 if (!bio) {
1187 dev_err(DEV, "submit_ee: Allocation of a bio failed\n");
1188 goto fail;
1189 }
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01001190 /* > peer_req->i.sector, unless this is the first bio */
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001191 bio->bi_sector = sector;
1192 bio->bi_bdev = mdev->ldev->backing_bdev;
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001193 bio->bi_rw = rw;
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01001194 bio->bi_private = peer_req;
Andreas Gruenbacherfcefa622011-02-17 16:46:59 +01001195 bio->bi_end_io = drbd_peer_request_endio;
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001196
1197 bio->bi_next = bios;
1198 bios = bio;
1199 ++n_bios;
1200
1201 page_chain_for_each(page) {
1202 unsigned len = min_t(unsigned, ds, PAGE_SIZE);
1203 if (!bio_add_page(bio, page, len, 0)) {
Lars Ellenberg10f6d9922011-01-24 14:47:09 +01001204 /* A single page must always be possible!
1205 * But in case it fails anyways,
1206 * we deal with it, and complain (below). */
1207 if (bio->bi_vcnt == 0) {
1208 dev_err(DEV,
1209 "bio_add_page failed for len=%u, "
1210 "bi_vcnt=0 (bi_sector=%llu)\n",
1211 len, (unsigned long long)bio->bi_sector);
1212 err = -ENOSPC;
1213 goto fail;
1214 }
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001215 goto next_bio;
1216 }
1217 ds -= len;
1218 sector += len >> 9;
1219 --nr_pages;
1220 }
1221 D_ASSERT(page == NULL);
1222 D_ASSERT(ds == 0);
1223
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01001224 atomic_set(&peer_req->pending_bios, n_bios);
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001225 do {
1226 bio = bios;
1227 bios = bios->bi_next;
1228 bio->bi_next = NULL;
1229
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001230 drbd_generic_make_request(mdev, fault_type, bio);
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001231 } while (bios);
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001232 return 0;
1233
1234fail:
1235 while (bios) {
1236 bio = bios;
1237 bios = bios->bi_next;
1238 bio_put(bio);
1239 }
Lars Ellenberg10f6d9922011-01-24 14:47:09 +01001240 return err;
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001241}
1242
Andreas Gruenbacher53840642011-01-28 10:31:04 +01001243static void drbd_remove_epoch_entry_interval(struct drbd_conf *mdev,
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01001244 struct drbd_peer_request *peer_req)
Andreas Gruenbacher53840642011-01-28 10:31:04 +01001245{
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01001246 struct drbd_interval *i = &peer_req->i;
Andreas Gruenbacher53840642011-01-28 10:31:04 +01001247
1248 drbd_remove_interval(&mdev->write_requests, i);
1249 drbd_clear_interval(i);
1250
Andreas Gruenbacher6c852be2011-02-04 15:38:52 +01001251 /* Wake up any processes waiting for this peer request to complete. */
Andreas Gruenbacher53840642011-01-28 10:31:04 +01001252 if (i->waiting)
1253 wake_up(&mdev->misc_wait);
1254}
1255
Andreas Gruenbacher4a76b162011-03-25 02:43:51 +01001256static int receive_Barrier(struct drbd_tconn *tconn, struct packet_info *pi)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001257{
Andreas Gruenbacher4a76b162011-03-25 02:43:51 +01001258 struct drbd_conf *mdev;
Philipp Reisner2451fc32010-08-24 13:43:11 +02001259 int rv;
Andreas Gruenbachere6589832011-03-30 12:54:42 +02001260 struct p_barrier *p = pi->data;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001261 struct drbd_epoch *epoch;
1262
Andreas Gruenbacher4a76b162011-03-25 02:43:51 +01001263 mdev = vnr_to_mdev(tconn, pi->vnr);
1264 if (!mdev)
1265 return -EIO;
1266
Philipp Reisnerb411b362009-09-25 16:07:19 -07001267 inc_unacked(mdev);
1268
Philipp Reisnerb411b362009-09-25 16:07:19 -07001269 mdev->current_epoch->barrier_nr = p->barrier;
1270 rv = drbd_may_finish_epoch(mdev, mdev->current_epoch, EV_GOT_BARRIER_NR);
1271
1272 /* P_BARRIER_ACK may imply that the corresponding extent is dropped from
1273 * the activity log, which means it would not be resynced in case the
1274 * R_PRIMARY crashes now.
1275 * Therefore we must send the barrier_ack after the barrier request was
1276 * completed. */
1277 switch (mdev->write_ordering) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07001278 case WO_none:
1279 if (rv == FE_RECYCLED)
Andreas Gruenbacher82bc0192011-03-17 12:10:19 +01001280 return 0;
Philipp Reisner2451fc32010-08-24 13:43:11 +02001281
1282 /* receiver context, in the writeout path of the other node.
1283 * avoid potential distributed deadlock */
1284 epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO);
1285 if (epoch)
1286 break;
1287 else
1288 dev_warn(DEV, "Allocation of an epoch failed, slowing down\n");
1289 /* Fall through */
Philipp Reisnerb411b362009-09-25 16:07:19 -07001290
1291 case WO_bdev_flush:
1292 case WO_drain_io:
Philipp Reisnerb411b362009-09-25 16:07:19 -07001293 drbd_wait_ee_list_empty(mdev, &mdev->active_ee);
Philipp Reisner2451fc32010-08-24 13:43:11 +02001294 drbd_flush(mdev);
1295
1296 if (atomic_read(&mdev->current_epoch->epoch_size)) {
1297 epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO);
1298 if (epoch)
1299 break;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001300 }
1301
Philipp Reisner2451fc32010-08-24 13:43:11 +02001302 epoch = mdev->current_epoch;
1303 wait_event(mdev->ee_wait, atomic_read(&epoch->epoch_size) == 0);
1304
1305 D_ASSERT(atomic_read(&epoch->active) == 0);
1306 D_ASSERT(epoch->flags == 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001307
Andreas Gruenbacher82bc0192011-03-17 12:10:19 +01001308 return 0;
Philipp Reisner2451fc32010-08-24 13:43:11 +02001309 default:
1310 dev_err(DEV, "Strangeness in mdev->write_ordering %d\n", mdev->write_ordering);
Andreas Gruenbacher82bc0192011-03-17 12:10:19 +01001311 return -EIO;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001312 }
1313
1314 epoch->flags = 0;
1315 atomic_set(&epoch->epoch_size, 0);
1316 atomic_set(&epoch->active, 0);
1317
1318 spin_lock(&mdev->epoch_lock);
1319 if (atomic_read(&mdev->current_epoch->epoch_size)) {
1320 list_add(&epoch->list, &mdev->current_epoch->list);
1321 mdev->current_epoch = epoch;
1322 mdev->epochs++;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001323 } else {
1324 /* The current_epoch got recycled while we allocated this one... */
1325 kfree(epoch);
1326 }
1327 spin_unlock(&mdev->epoch_lock);
1328
Andreas Gruenbacher82bc0192011-03-17 12:10:19 +01001329 return 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001330}
1331
1332/* used from receive_RSDataReply (recv_resync_read)
1333 * and from receive_Data */
Andreas Gruenbacherf6ffca92011-02-04 15:30:34 +01001334static struct drbd_peer_request *
1335read_in_block(struct drbd_conf *mdev, u64 id, sector_t sector,
1336 int data_size) __must_hold(local)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001337{
Lars Ellenberg66660322010-04-06 12:15:04 +02001338 const sector_t capacity = drbd_get_capacity(mdev->this_bdev);
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01001339 struct drbd_peer_request *peer_req;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001340 struct page *page;
Andreas Gruenbachera5c31902011-03-24 03:28:04 +01001341 int dgs, ds, err;
Philipp Reisnera0638452011-01-19 14:31:32 +01001342 void *dig_in = mdev->tconn->int_dig_in;
1343 void *dig_vv = mdev->tconn->int_dig_vv;
Philipp Reisner6b4388a2010-04-26 14:11:45 +02001344 unsigned long *data;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001345
Philipp Reisnera0638452011-01-19 14:31:32 +01001346 dgs = (mdev->tconn->agreed_pro_version >= 87 && mdev->tconn->integrity_r_tfm) ?
1347 crypto_hash_digestsize(mdev->tconn->integrity_r_tfm) : 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001348
1349 if (dgs) {
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001350 /*
1351 * FIXME: Receive the incoming digest into the receive buffer
1352 * here, together with its struct p_data?
1353 */
Andreas Gruenbachera5c31902011-03-24 03:28:04 +01001354 err = drbd_recv_all_warn(mdev->tconn, dig_in, dgs);
1355 if (err)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001356 return NULL;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001357 }
1358
1359 data_size -= dgs;
1360
Andreas Gruenbacher841ce242010-12-15 19:31:20 +01001361 if (!expect(data_size != 0))
1362 return NULL;
1363 if (!expect(IS_ALIGNED(data_size, 512)))
1364 return NULL;
1365 if (!expect(data_size <= DRBD_MAX_BIO_SIZE))
1366 return NULL;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001367
Lars Ellenberg66660322010-04-06 12:15:04 +02001368 /* even though we trust out peer,
1369 * we sometimes have to double check. */
1370 if (sector + (data_size>>9) > capacity) {
Lars Ellenbergfdda6542011-01-24 15:11:01 +01001371 dev_err(DEV, "request from peer beyond end of local disk: "
1372 "capacity: %llus < sector: %llus + size: %u\n",
Lars Ellenberg66660322010-04-06 12:15:04 +02001373 (unsigned long long)capacity,
1374 (unsigned long long)sector, data_size);
1375 return NULL;
1376 }
1377
Philipp Reisnerb411b362009-09-25 16:07:19 -07001378 /* GFP_NOIO, because we must not cause arbitrary write-out: in a DRBD
1379 * "criss-cross" setup, that might cause write-out on some other DRBD,
1380 * which in turn might block on the other node at this very place. */
Andreas Gruenbacher0db55362011-04-06 16:09:15 +02001381 peer_req = drbd_alloc_peer_req(mdev, id, sector, data_size, GFP_NOIO);
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01001382 if (!peer_req)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001383 return NULL;
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001384
Philipp Reisnerb411b362009-09-25 16:07:19 -07001385 ds = data_size;
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01001386 page = peer_req->pages;
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001387 page_chain_for_each(page) {
1388 unsigned len = min_t(int, ds, PAGE_SIZE);
Philipp Reisner6b4388a2010-04-26 14:11:45 +02001389 data = kmap(page);
Andreas Gruenbachera5c31902011-03-24 03:28:04 +01001390 err = drbd_recv_all_warn(mdev->tconn, data, len);
Andreas Gruenbacher0cf9d272010-12-07 10:43:29 +01001391 if (drbd_insert_fault(mdev, DRBD_FAULT_RECEIVE)) {
Philipp Reisner6b4388a2010-04-26 14:11:45 +02001392 dev_err(DEV, "Fault injection: Corrupting data on receive\n");
1393 data[0] = data[0] ^ (unsigned long)-1;
1394 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07001395 kunmap(page);
Andreas Gruenbachera5c31902011-03-24 03:28:04 +01001396 if (err) {
Andreas Gruenbacher3967deb2011-04-06 16:16:56 +02001397 drbd_free_peer_req(mdev, peer_req);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001398 return NULL;
1399 }
Andreas Gruenbachera5c31902011-03-24 03:28:04 +01001400 ds -= len;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001401 }
1402
1403 if (dgs) {
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01001404 drbd_csum_ee(mdev, mdev->tconn->integrity_r_tfm, peer_req, dig_vv);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001405 if (memcmp(dig_in, dig_vv, dgs)) {
Lars Ellenberg470be442010-11-10 10:36:52 +01001406 dev_err(DEV, "Digest integrity check FAILED: %llus +%u\n",
1407 (unsigned long long)sector, data_size);
Andreas Gruenbacher3967deb2011-04-06 16:16:56 +02001408 drbd_free_peer_req(mdev, peer_req);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001409 return NULL;
1410 }
1411 }
1412 mdev->recv_cnt += data_size>>9;
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01001413 return peer_req;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001414}
1415
1416/* drbd_drain_block() just takes a data block
1417 * out of the socket input buffer, and discards it.
1418 */
1419static int drbd_drain_block(struct drbd_conf *mdev, int data_size)
1420{
1421 struct page *page;
Andreas Gruenbachera5c31902011-03-24 03:28:04 +01001422 int err = 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001423 void *data;
1424
Lars Ellenbergc3470cd2010-04-01 16:57:19 +02001425 if (!data_size)
Andreas Gruenbacherfc5be832011-03-16 17:50:50 +01001426 return 0;
Lars Ellenbergc3470cd2010-04-01 16:57:19 +02001427
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001428 page = drbd_pp_alloc(mdev, 1, 1);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001429
1430 data = kmap(page);
1431 while (data_size) {
Andreas Gruenbacherfc5be832011-03-16 17:50:50 +01001432 unsigned int len = min_t(int, data_size, PAGE_SIZE);
1433
Andreas Gruenbachera5c31902011-03-24 03:28:04 +01001434 err = drbd_recv_all_warn(mdev->tconn, data, len);
1435 if (err)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001436 break;
Andreas Gruenbachera5c31902011-03-24 03:28:04 +01001437 data_size -= len;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001438 }
1439 kunmap(page);
Lars Ellenberg435f0742010-09-06 12:30:25 +02001440 drbd_pp_free(mdev, page, 0);
Andreas Gruenbacherfc5be832011-03-16 17:50:50 +01001441 return err;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001442}
1443
1444static int recv_dless_read(struct drbd_conf *mdev, struct drbd_request *req,
1445 sector_t sector, int data_size)
1446{
1447 struct bio_vec *bvec;
1448 struct bio *bio;
Andreas Gruenbachera5c31902011-03-24 03:28:04 +01001449 int dgs, err, i, expect;
Philipp Reisnera0638452011-01-19 14:31:32 +01001450 void *dig_in = mdev->tconn->int_dig_in;
1451 void *dig_vv = mdev->tconn->int_dig_vv;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001452
Philipp Reisnera0638452011-01-19 14:31:32 +01001453 dgs = (mdev->tconn->agreed_pro_version >= 87 && mdev->tconn->integrity_r_tfm) ?
1454 crypto_hash_digestsize(mdev->tconn->integrity_r_tfm) : 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001455
1456 if (dgs) {
Andreas Gruenbachera5c31902011-03-24 03:28:04 +01001457 err = drbd_recv_all_warn(mdev->tconn, dig_in, dgs);
1458 if (err)
1459 return err;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001460 }
1461
1462 data_size -= dgs;
1463
1464 /* optimistically update recv_cnt. if receiving fails below,
1465 * we disconnect anyways, and counters will be reset. */
1466 mdev->recv_cnt += data_size>>9;
1467
1468 bio = req->master_bio;
1469 D_ASSERT(sector == bio->bi_sector);
1470
1471 bio_for_each_segment(bvec, bio, i) {
Andreas Gruenbachera5c31902011-03-24 03:28:04 +01001472 void *mapped = kmap(bvec->bv_page) + bvec->bv_offset;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001473 expect = min_t(int, data_size, bvec->bv_len);
Andreas Gruenbachera5c31902011-03-24 03:28:04 +01001474 err = drbd_recv_all_warn(mdev->tconn, mapped, expect);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001475 kunmap(bvec->bv_page);
Andreas Gruenbachera5c31902011-03-24 03:28:04 +01001476 if (err)
1477 return err;
1478 data_size -= expect;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001479 }
1480
1481 if (dgs) {
Philipp Reisnera0638452011-01-19 14:31:32 +01001482 drbd_csum_bio(mdev, mdev->tconn->integrity_r_tfm, bio, dig_vv);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001483 if (memcmp(dig_in, dig_vv, dgs)) {
1484 dev_err(DEV, "Digest integrity check FAILED. Broken NICs?\n");
Andreas Gruenbacher28284ce2011-03-16 17:54:02 +01001485 return -EINVAL;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001486 }
1487 }
1488
1489 D_ASSERT(data_size == 0);
Andreas Gruenbacher28284ce2011-03-16 17:54:02 +01001490 return 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001491}
1492
Andreas Gruenbachera990be42011-04-06 17:56:48 +02001493/*
1494 * e_end_resync_block() is called in asender context via
1495 * drbd_finish_peer_reqs().
1496 */
Andreas Gruenbacher99920dc2011-03-16 15:31:39 +01001497static int e_end_resync_block(struct drbd_work *w, int unused)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001498{
Andreas Gruenbacher8050e6d2011-02-18 16:12:48 +01001499 struct drbd_peer_request *peer_req =
1500 container_of(w, struct drbd_peer_request, w);
Philipp Reisner00d56942011-02-09 18:09:48 +01001501 struct drbd_conf *mdev = w->mdev;
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01001502 sector_t sector = peer_req->i.sector;
Andreas Gruenbacher99920dc2011-03-16 15:31:39 +01001503 int err;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001504
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01001505 D_ASSERT(drbd_interval_empty(&peer_req->i));
Philipp Reisnerb411b362009-09-25 16:07:19 -07001506
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01001507 if (likely((peer_req->flags & EE_WAS_ERROR) == 0)) {
1508 drbd_set_in_sync(mdev, sector, peer_req->i.size);
Andreas Gruenbacher99920dc2011-03-16 15:31:39 +01001509 err = drbd_send_ack(mdev, P_RS_WRITE_ACK, peer_req);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001510 } else {
1511 /* Record failure to sync */
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01001512 drbd_rs_failed_io(mdev, sector, peer_req->i.size);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001513
Andreas Gruenbacher99920dc2011-03-16 15:31:39 +01001514 err = drbd_send_ack(mdev, P_NEG_ACK, peer_req);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001515 }
1516 dec_unacked(mdev);
1517
Andreas Gruenbacher99920dc2011-03-16 15:31:39 +01001518 return err;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001519}
1520
1521static int recv_resync_read(struct drbd_conf *mdev, sector_t sector, int data_size) __releases(local)
1522{
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01001523 struct drbd_peer_request *peer_req;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001524
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01001525 peer_req = read_in_block(mdev, ID_SYNCER, sector, data_size);
1526 if (!peer_req)
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001527 goto fail;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001528
1529 dec_rs_pending(mdev);
1530
Philipp Reisnerb411b362009-09-25 16:07:19 -07001531 inc_unacked(mdev);
1532 /* corresponding dec_unacked() in e_end_resync_block()
1533 * respective _drbd_clear_done_ee */
1534
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01001535 peer_req->w.cb = e_end_resync_block;
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001536
Philipp Reisner87eeee42011-01-19 14:16:30 +01001537 spin_lock_irq(&mdev->tconn->req_lock);
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01001538 list_add(&peer_req->w.list, &mdev->sync_ee);
Philipp Reisner87eeee42011-01-19 14:16:30 +01001539 spin_unlock_irq(&mdev->tconn->req_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001540
Lars Ellenberg0f0601f2010-08-11 23:40:24 +02001541 atomic_add(data_size >> 9, &mdev->rs_sect_ev);
Andreas Gruenbacherfbe29de2011-02-17 16:38:35 +01001542 if (drbd_submit_peer_request(mdev, peer_req, WRITE, DRBD_FAULT_RS_WR) == 0)
Andreas Gruenbachere1c1b0f2011-03-16 17:58:27 +01001543 return 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001544
Lars Ellenberg10f6d9922011-01-24 14:47:09 +01001545 /* don't care for the reason here */
1546 dev_err(DEV, "submit failed, triggering re-connect\n");
Philipp Reisner87eeee42011-01-19 14:16:30 +01001547 spin_lock_irq(&mdev->tconn->req_lock);
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01001548 list_del(&peer_req->w.list);
Philipp Reisner87eeee42011-01-19 14:16:30 +01001549 spin_unlock_irq(&mdev->tconn->req_lock);
Lars Ellenberg22cc37a2010-09-14 20:40:41 +02001550
Andreas Gruenbacher3967deb2011-04-06 16:16:56 +02001551 drbd_free_peer_req(mdev, peer_req);
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001552fail:
1553 put_ldev(mdev);
Andreas Gruenbachere1c1b0f2011-03-16 17:58:27 +01001554 return -EIO;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001555}
1556
Andreas Gruenbacher668eebc2011-01-20 17:14:26 +01001557static struct drbd_request *
Andreas Gruenbacherbc9c5c42011-01-21 18:00:55 +01001558find_request(struct drbd_conf *mdev, struct rb_root *root, u64 id,
1559 sector_t sector, bool missing_ok, const char *func)
Andreas Gruenbacher668eebc2011-01-20 17:14:26 +01001560{
Andreas Gruenbacher668eebc2011-01-20 17:14:26 +01001561 struct drbd_request *req;
1562
Andreas Gruenbacherbc9c5c42011-01-21 18:00:55 +01001563 /* Request object according to our peer */
1564 req = (struct drbd_request *)(unsigned long)id;
Andreas Gruenbacher5e472262011-01-27 14:42:51 +01001565 if (drbd_contains_interval(root, sector, &req->i) && req->i.local)
Andreas Gruenbacher668eebc2011-01-20 17:14:26 +01001566 return req;
Andreas Gruenbacherc3afd8f2011-01-20 22:25:40 +01001567 if (!missing_ok) {
1568 dev_err(DEV, "%s: failed to find request %lu, sector %llus\n", func,
1569 (unsigned long)id, (unsigned long long)sector);
1570 }
Andreas Gruenbacher668eebc2011-01-20 17:14:26 +01001571 return NULL;
1572}
1573
Andreas Gruenbacher4a76b162011-03-25 02:43:51 +01001574static int receive_DataReply(struct drbd_tconn *tconn, struct packet_info *pi)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001575{
Andreas Gruenbacher4a76b162011-03-25 02:43:51 +01001576 struct drbd_conf *mdev;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001577 struct drbd_request *req;
1578 sector_t sector;
Andreas Gruenbacher82bc0192011-03-17 12:10:19 +01001579 int err;
Andreas Gruenbachere6589832011-03-30 12:54:42 +02001580 struct p_data *p = pi->data;
Andreas Gruenbacher4a76b162011-03-25 02:43:51 +01001581
1582 mdev = vnr_to_mdev(tconn, pi->vnr);
1583 if (!mdev)
1584 return -EIO;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001585
1586 sector = be64_to_cpu(p->sector);
1587
Philipp Reisner87eeee42011-01-19 14:16:30 +01001588 spin_lock_irq(&mdev->tconn->req_lock);
Andreas Gruenbacherbc9c5c42011-01-21 18:00:55 +01001589 req = find_request(mdev, &mdev->read_requests, p->block_id, sector, false, __func__);
Philipp Reisner87eeee42011-01-19 14:16:30 +01001590 spin_unlock_irq(&mdev->tconn->req_lock);
Andreas Gruenbacherc3afd8f2011-01-20 22:25:40 +01001591 if (unlikely(!req))
Andreas Gruenbacher82bc0192011-03-17 12:10:19 +01001592 return -EIO;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001593
Bart Van Assche24c48302011-05-21 18:32:29 +02001594 /* hlist_del(&req->collision) is done in _req_may_be_done, to avoid
Philipp Reisnerb411b362009-09-25 16:07:19 -07001595 * special casing it there for the various failure cases.
1596 * still no race with drbd_fail_pending_reads */
Andreas Gruenbachere2857212011-03-25 00:57:38 +01001597 err = recv_dless_read(mdev, req, sector, pi->size);
Andreas Gruenbacher82bc0192011-03-17 12:10:19 +01001598 if (!err)
Andreas Gruenbacher8554df12011-01-25 15:37:43 +01001599 req_mod(req, DATA_RECEIVED);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001600 /* else: nothing. handled from drbd_disconnect...
1601 * I don't think we may complete this just yet
1602 * in case we are "on-disconnect: freeze" */
1603
Andreas Gruenbacher82bc0192011-03-17 12:10:19 +01001604 return err;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001605}
1606
Andreas Gruenbacher4a76b162011-03-25 02:43:51 +01001607static int receive_RSDataReply(struct drbd_tconn *tconn, struct packet_info *pi)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001608{
Andreas Gruenbacher4a76b162011-03-25 02:43:51 +01001609 struct drbd_conf *mdev;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001610 sector_t sector;
Andreas Gruenbacher82bc0192011-03-17 12:10:19 +01001611 int err;
Andreas Gruenbachere6589832011-03-30 12:54:42 +02001612 struct p_data *p = pi->data;
Andreas Gruenbacher4a76b162011-03-25 02:43:51 +01001613
1614 mdev = vnr_to_mdev(tconn, pi->vnr);
1615 if (!mdev)
1616 return -EIO;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001617
1618 sector = be64_to_cpu(p->sector);
1619 D_ASSERT(p->block_id == ID_SYNCER);
1620
1621 if (get_ldev(mdev)) {
1622 /* data is submitted to disk within recv_resync_read.
1623 * corresponding put_ldev done below on error,
Andreas Gruenbacherfcefa622011-02-17 16:46:59 +01001624 * or in drbd_peer_request_endio. */
Andreas Gruenbachere2857212011-03-25 00:57:38 +01001625 err = recv_resync_read(mdev, sector, pi->size);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001626 } else {
1627 if (__ratelimit(&drbd_ratelimit_state))
1628 dev_err(DEV, "Can not write resync data to local disk.\n");
1629
Andreas Gruenbachere2857212011-03-25 00:57:38 +01001630 err = drbd_drain_block(mdev, pi->size);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001631
Andreas Gruenbachere2857212011-03-25 00:57:38 +01001632 drbd_send_ack_dp(mdev, P_NEG_ACK, p, pi->size);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001633 }
1634
Andreas Gruenbachere2857212011-03-25 00:57:38 +01001635 atomic_add(pi->size >> 9, &mdev->rs_sect_in);
Philipp Reisner778f2712010-07-06 11:14:00 +02001636
Andreas Gruenbacher82bc0192011-03-17 12:10:19 +01001637 return err;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001638}
1639
Andreas Gruenbacher99920dc2011-03-16 15:31:39 +01001640static int w_restart_write(struct drbd_work *w, int cancel)
Andreas Gruenbacher7be8da02011-02-22 02:15:32 +01001641{
1642 struct drbd_request *req = container_of(w, struct drbd_request, w);
1643 struct drbd_conf *mdev = w->mdev;
1644 struct bio *bio;
1645 unsigned long start_time;
1646 unsigned long flags;
1647
1648 spin_lock_irqsave(&mdev->tconn->req_lock, flags);
1649 if (!expect(req->rq_state & RQ_POSTPONED)) {
1650 spin_unlock_irqrestore(&mdev->tconn->req_lock, flags);
Andreas Gruenbacher99920dc2011-03-16 15:31:39 +01001651 return -EIO;
Andreas Gruenbacher7be8da02011-02-22 02:15:32 +01001652 }
1653 bio = req->master_bio;
1654 start_time = req->start_time;
1655 /* Postponed requests will not have their master_bio completed! */
1656 __req_mod(req, DISCARD_WRITE, NULL);
1657 spin_unlock_irqrestore(&mdev->tconn->req_lock, flags);
1658
1659 while (__drbd_make_request(mdev, bio, start_time))
1660 /* retry */ ;
Andreas Gruenbacher99920dc2011-03-16 15:31:39 +01001661 return 0;
Andreas Gruenbacher7be8da02011-02-22 02:15:32 +01001662}
1663
1664static void restart_conflicting_writes(struct drbd_conf *mdev,
1665 sector_t sector, int size)
1666{
1667 struct drbd_interval *i;
1668 struct drbd_request *req;
1669
1670 drbd_for_each_overlap(i, &mdev->write_requests, sector, size) {
1671 if (!i->local)
1672 continue;
1673 req = container_of(i, struct drbd_request, i);
1674 if (req->rq_state & RQ_LOCAL_PENDING ||
1675 !(req->rq_state & RQ_POSTPONED))
1676 continue;
1677 if (expect(list_empty(&req->w.list))) {
1678 req->w.mdev = mdev;
1679 req->w.cb = w_restart_write;
1680 drbd_queue_work(&mdev->tconn->data.work, &req->w);
1681 }
1682 }
1683}
1684
Andreas Gruenbachera990be42011-04-06 17:56:48 +02001685/*
1686 * e_end_block() is called in asender context via drbd_finish_peer_reqs().
Philipp Reisnerb411b362009-09-25 16:07:19 -07001687 */
Andreas Gruenbacher99920dc2011-03-16 15:31:39 +01001688static int e_end_block(struct drbd_work *w, int cancel)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001689{
Andreas Gruenbacher8050e6d2011-02-18 16:12:48 +01001690 struct drbd_peer_request *peer_req =
1691 container_of(w, struct drbd_peer_request, w);
Philipp Reisner00d56942011-02-09 18:09:48 +01001692 struct drbd_conf *mdev = w->mdev;
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01001693 sector_t sector = peer_req->i.sector;
Andreas Gruenbacher99920dc2011-03-16 15:31:39 +01001694 int err = 0, pcmd;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001695
Philipp Reisner89e58e72011-01-19 13:12:45 +01001696 if (mdev->tconn->net_conf->wire_protocol == DRBD_PROT_C) {
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01001697 if (likely((peer_req->flags & EE_WAS_ERROR) == 0)) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07001698 pcmd = (mdev->state.conn >= C_SYNC_SOURCE &&
1699 mdev->state.conn <= C_PAUSED_SYNC_T &&
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01001700 peer_req->flags & EE_MAY_SET_IN_SYNC) ?
Philipp Reisnerb411b362009-09-25 16:07:19 -07001701 P_RS_WRITE_ACK : P_WRITE_ACK;
Andreas Gruenbacher99920dc2011-03-16 15:31:39 +01001702 err = drbd_send_ack(mdev, pcmd, peer_req);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001703 if (pcmd == P_RS_WRITE_ACK)
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01001704 drbd_set_in_sync(mdev, sector, peer_req->i.size);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001705 } else {
Andreas Gruenbacher99920dc2011-03-16 15:31:39 +01001706 err = drbd_send_ack(mdev, P_NEG_ACK, peer_req);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001707 /* we expect it to be marked out of sync anyways...
1708 * maybe assert this? */
1709 }
1710 dec_unacked(mdev);
1711 }
1712 /* we delete from the conflict detection hash _after_ we sent out the
1713 * P_WRITE_ACK / P_NEG_ACK, to get the sequence number right. */
Philipp Reisner89e58e72011-01-19 13:12:45 +01001714 if (mdev->tconn->net_conf->two_primaries) {
Philipp Reisner87eeee42011-01-19 14:16:30 +01001715 spin_lock_irq(&mdev->tconn->req_lock);
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01001716 D_ASSERT(!drbd_interval_empty(&peer_req->i));
1717 drbd_remove_epoch_entry_interval(mdev, peer_req);
Andreas Gruenbacher7be8da02011-02-22 02:15:32 +01001718 if (peer_req->flags & EE_RESTART_REQUESTS)
1719 restart_conflicting_writes(mdev, sector, peer_req->i.size);
Philipp Reisner87eeee42011-01-19 14:16:30 +01001720 spin_unlock_irq(&mdev->tconn->req_lock);
Andreas Gruenbacherbb3bfe92011-01-21 15:59:23 +01001721 } else
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01001722 D_ASSERT(drbd_interval_empty(&peer_req->i));
Philipp Reisnerb411b362009-09-25 16:07:19 -07001723
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01001724 drbd_may_finish_epoch(mdev, peer_req->epoch, EV_PUT + (cancel ? EV_CLEANUP : 0));
Philipp Reisnerb411b362009-09-25 16:07:19 -07001725
Andreas Gruenbacher99920dc2011-03-16 15:31:39 +01001726 return err;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001727}
1728
Andreas Gruenbacher7be8da02011-02-22 02:15:32 +01001729static int e_send_ack(struct drbd_work *w, enum drbd_packet ack)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001730{
Andreas Gruenbacher7be8da02011-02-22 02:15:32 +01001731 struct drbd_conf *mdev = w->mdev;
Andreas Gruenbacher8050e6d2011-02-18 16:12:48 +01001732 struct drbd_peer_request *peer_req =
1733 container_of(w, struct drbd_peer_request, w);
Andreas Gruenbacher99920dc2011-03-16 15:31:39 +01001734 int err;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001735
Andreas Gruenbacher99920dc2011-03-16 15:31:39 +01001736 err = drbd_send_ack(mdev, ack, peer_req);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001737 dec_unacked(mdev);
1738
Andreas Gruenbacher99920dc2011-03-16 15:31:39 +01001739 return err;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001740}
1741
Andreas Gruenbacher99920dc2011-03-16 15:31:39 +01001742static int e_send_discard_write(struct drbd_work *w, int unused)
Andreas Gruenbacher7be8da02011-02-22 02:15:32 +01001743{
1744 return e_send_ack(w, P_DISCARD_WRITE);
1745}
1746
Andreas Gruenbacher99920dc2011-03-16 15:31:39 +01001747static int e_send_retry_write(struct drbd_work *w, int unused)
Andreas Gruenbacher7be8da02011-02-22 02:15:32 +01001748{
1749 struct drbd_tconn *tconn = w->mdev->tconn;
1750
1751 return e_send_ack(w, tconn->agreed_pro_version >= 100 ?
1752 P_RETRY_WRITE : P_DISCARD_WRITE);
1753}
1754
Andreas Gruenbacher3e394da2011-01-26 18:36:55 +01001755static bool seq_greater(u32 a, u32 b)
1756{
1757 /*
1758 * We assume 32-bit wrap-around here.
1759 * For 24-bit wrap-around, we would have to shift:
1760 * a <<= 8; b <<= 8;
1761 */
1762 return (s32)a - (s32)b > 0;
1763}
1764
1765static u32 seq_max(u32 a, u32 b)
1766{
1767 return seq_greater(a, b) ? a : b;
1768}
1769
Andreas Gruenbacher7be8da02011-02-22 02:15:32 +01001770static bool need_peer_seq(struct drbd_conf *mdev)
1771{
1772 struct drbd_tconn *tconn = mdev->tconn;
1773
1774 /*
1775 * We only need to keep track of the last packet_seq number of our peer
1776 * if we are in dual-primary mode and we have the discard flag set; see
1777 * handle_write_conflicts().
1778 */
1779 return tconn->net_conf->two_primaries &&
1780 test_bit(DISCARD_CONCURRENT, &tconn->flags);
1781}
1782
Andreas Gruenbacher43ae0772011-02-03 18:42:08 +01001783static void update_peer_seq(struct drbd_conf *mdev, unsigned int peer_seq)
Andreas Gruenbacher3e394da2011-01-26 18:36:55 +01001784{
Lars Ellenberg3c13b682011-02-23 16:10:01 +01001785 unsigned int newest_peer_seq;
Andreas Gruenbacher3e394da2011-01-26 18:36:55 +01001786
Andreas Gruenbacher7be8da02011-02-22 02:15:32 +01001787 if (need_peer_seq(mdev)) {
1788 spin_lock(&mdev->peer_seq_lock);
Lars Ellenberg3c13b682011-02-23 16:10:01 +01001789 newest_peer_seq = seq_max(mdev->peer_seq, peer_seq);
1790 mdev->peer_seq = newest_peer_seq;
Andreas Gruenbacher7be8da02011-02-22 02:15:32 +01001791 spin_unlock(&mdev->peer_seq_lock);
Lars Ellenberg3c13b682011-02-23 16:10:01 +01001792 /* wake up only if we actually changed mdev->peer_seq */
1793 if (peer_seq == newest_peer_seq)
Andreas Gruenbacher7be8da02011-02-22 02:15:32 +01001794 wake_up(&mdev->seq_wait);
1795 }
Andreas Gruenbacher3e394da2011-01-26 18:36:55 +01001796}
1797
Philipp Reisnerb411b362009-09-25 16:07:19 -07001798/* Called from receive_Data.
1799 * Synchronize packets on sock with packets on msock.
1800 *
1801 * This is here so even when a P_DATA packet traveling via sock overtook an Ack
1802 * packet traveling on msock, they are still processed in the order they have
1803 * been sent.
1804 *
1805 * Note: we don't care for Ack packets overtaking P_DATA packets.
1806 *
1807 * In case packet_seq is larger than mdev->peer_seq number, there are
1808 * outstanding packets on the msock. We wait for them to arrive.
1809 * In case we are the logically next packet, we update mdev->peer_seq
1810 * ourselves. Correctly handles 32bit wrap around.
1811 *
1812 * Assume we have a 10 GBit connection, that is about 1<<30 byte per second,
1813 * about 1<<21 sectors per second. So "worst" case, we have 1<<3 == 8 seconds
1814 * for the 24bit wrap (historical atomic_t guarantee on some archs), and we have
1815 * 1<<9 == 512 seconds aka ages for the 32bit wrap around...
1816 *
1817 * returns 0 if we may process the packet,
1818 * -ERESTARTSYS if we were interrupted (by disconnect signal). */
Andreas Gruenbacher7be8da02011-02-22 02:15:32 +01001819static int wait_for_and_update_peer_seq(struct drbd_conf *mdev, const u32 peer_seq)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001820{
1821 DEFINE_WAIT(wait);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001822 long timeout;
Andreas Gruenbacher7be8da02011-02-22 02:15:32 +01001823 int ret;
1824
1825 if (!need_peer_seq(mdev))
1826 return 0;
1827
Philipp Reisnerb411b362009-09-25 16:07:19 -07001828 spin_lock(&mdev->peer_seq_lock);
1829 for (;;) {
Andreas Gruenbacher7be8da02011-02-22 02:15:32 +01001830 if (!seq_greater(peer_seq - 1, mdev->peer_seq)) {
1831 mdev->peer_seq = seq_max(mdev->peer_seq, peer_seq);
1832 ret = 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001833 break;
Andreas Gruenbacher7be8da02011-02-22 02:15:32 +01001834 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07001835 if (signal_pending(current)) {
1836 ret = -ERESTARTSYS;
1837 break;
1838 }
Andreas Gruenbacher7be8da02011-02-22 02:15:32 +01001839 prepare_to_wait(&mdev->seq_wait, &wait, TASK_INTERRUPTIBLE);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001840 spin_unlock(&mdev->peer_seq_lock);
Andreas Gruenbacher71b1c1e2011-03-01 15:40:43 +01001841 timeout = mdev->tconn->net_conf->ping_timeo*HZ/10;
1842 timeout = schedule_timeout(timeout);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001843 spin_lock(&mdev->peer_seq_lock);
Andreas Gruenbacher7be8da02011-02-22 02:15:32 +01001844 if (!timeout) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07001845 ret = -ETIMEDOUT;
Andreas Gruenbacher71b1c1e2011-03-01 15:40:43 +01001846 dev_err(DEV, "Timed out waiting for missing ack packets; disconnecting\n");
Philipp Reisnerb411b362009-09-25 16:07:19 -07001847 break;
1848 }
1849 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07001850 spin_unlock(&mdev->peer_seq_lock);
Andreas Gruenbacher7be8da02011-02-22 02:15:32 +01001851 finish_wait(&mdev->seq_wait, &wait);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001852 return ret;
1853}
1854
Lars Ellenberg688593c2010-11-17 22:25:03 +01001855/* see also bio_flags_to_wire()
1856 * DRBD_REQ_*, because we need to semantically map the flags to data packet
1857 * flags and back. We may replicate to other kernel versions. */
1858static unsigned long wire_flags_to_bio(struct drbd_conf *mdev, u32 dpf)
Philipp Reisner76d2e7e2010-08-25 11:58:05 +02001859{
Lars Ellenberg688593c2010-11-17 22:25:03 +01001860 return (dpf & DP_RW_SYNC ? REQ_SYNC : 0) |
1861 (dpf & DP_FUA ? REQ_FUA : 0) |
1862 (dpf & DP_FLUSH ? REQ_FLUSH : 0) |
1863 (dpf & DP_DISCARD ? REQ_DISCARD : 0);
Philipp Reisner76d2e7e2010-08-25 11:58:05 +02001864}
1865
Andreas Gruenbacher7be8da02011-02-22 02:15:32 +01001866static void fail_postponed_requests(struct drbd_conf *mdev, sector_t sector,
1867 unsigned int size)
1868{
1869 struct drbd_interval *i;
1870
1871 repeat:
1872 drbd_for_each_overlap(i, &mdev->write_requests, sector, size) {
1873 struct drbd_request *req;
1874 struct bio_and_error m;
1875
1876 if (!i->local)
1877 continue;
1878 req = container_of(i, struct drbd_request, i);
1879 if (!(req->rq_state & RQ_POSTPONED))
1880 continue;
1881 req->rq_state &= ~RQ_POSTPONED;
1882 __req_mod(req, NEG_ACKED, &m);
1883 spin_unlock_irq(&mdev->tconn->req_lock);
1884 if (m.bio)
1885 complete_master_bio(mdev, &m);
1886 spin_lock_irq(&mdev->tconn->req_lock);
1887 goto repeat;
1888 }
1889}
1890
1891static int handle_write_conflicts(struct drbd_conf *mdev,
1892 struct drbd_peer_request *peer_req)
1893{
1894 struct drbd_tconn *tconn = mdev->tconn;
1895 bool resolve_conflicts = test_bit(DISCARD_CONCURRENT, &tconn->flags);
1896 sector_t sector = peer_req->i.sector;
1897 const unsigned int size = peer_req->i.size;
1898 struct drbd_interval *i;
1899 bool equal;
1900 int err;
1901
1902 /*
1903 * Inserting the peer request into the write_requests tree will prevent
1904 * new conflicting local requests from being added.
1905 */
1906 drbd_insert_interval(&mdev->write_requests, &peer_req->i);
1907
1908 repeat:
1909 drbd_for_each_overlap(i, &mdev->write_requests, sector, size) {
1910 if (i == &peer_req->i)
1911 continue;
1912
1913 if (!i->local) {
1914 /*
1915 * Our peer has sent a conflicting remote request; this
1916 * should not happen in a two-node setup. Wait for the
1917 * earlier peer request to complete.
1918 */
1919 err = drbd_wait_misc(mdev, i);
1920 if (err)
1921 goto out;
1922 goto repeat;
1923 }
1924
1925 equal = i->sector == sector && i->size == size;
1926 if (resolve_conflicts) {
1927 /*
1928 * If the peer request is fully contained within the
1929 * overlapping request, it can be discarded; otherwise,
1930 * it will be retried once all overlapping requests
1931 * have completed.
1932 */
1933 bool discard = i->sector <= sector && i->sector +
1934 (i->size >> 9) >= sector + (size >> 9);
1935
1936 if (!equal)
1937 dev_alert(DEV, "Concurrent writes detected: "
1938 "local=%llus +%u, remote=%llus +%u, "
1939 "assuming %s came first\n",
1940 (unsigned long long)i->sector, i->size,
1941 (unsigned long long)sector, size,
1942 discard ? "local" : "remote");
1943
1944 inc_unacked(mdev);
1945 peer_req->w.cb = discard ? e_send_discard_write :
1946 e_send_retry_write;
1947 list_add_tail(&peer_req->w.list, &mdev->done_ee);
1948 wake_asender(mdev->tconn);
1949
1950 err = -ENOENT;
1951 goto out;
1952 } else {
1953 struct drbd_request *req =
1954 container_of(i, struct drbd_request, i);
1955
1956 if (!equal)
1957 dev_alert(DEV, "Concurrent writes detected: "
1958 "local=%llus +%u, remote=%llus +%u\n",
1959 (unsigned long long)i->sector, i->size,
1960 (unsigned long long)sector, size);
1961
1962 if (req->rq_state & RQ_LOCAL_PENDING ||
1963 !(req->rq_state & RQ_POSTPONED)) {
1964 /*
1965 * Wait for the node with the discard flag to
1966 * decide if this request will be discarded or
1967 * retried. Requests that are discarded will
1968 * disappear from the write_requests tree.
1969 *
1970 * In addition, wait for the conflicting
1971 * request to finish locally before submitting
1972 * the conflicting peer request.
1973 */
1974 err = drbd_wait_misc(mdev, &req->i);
1975 if (err) {
1976 _conn_request_state(mdev->tconn,
1977 NS(conn, C_TIMEOUT),
1978 CS_HARD);
1979 fail_postponed_requests(mdev, sector, size);
1980 goto out;
1981 }
1982 goto repeat;
1983 }
1984 /*
1985 * Remember to restart the conflicting requests after
1986 * the new peer request has completed.
1987 */
1988 peer_req->flags |= EE_RESTART_REQUESTS;
1989 }
1990 }
1991 err = 0;
1992
1993 out:
1994 if (err)
1995 drbd_remove_epoch_entry_interval(mdev, peer_req);
1996 return err;
1997}
1998
Philipp Reisnerb411b362009-09-25 16:07:19 -07001999/* mirrored write */
Andreas Gruenbacher4a76b162011-03-25 02:43:51 +01002000static int receive_Data(struct drbd_tconn *tconn, struct packet_info *pi)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002001{
Andreas Gruenbacher4a76b162011-03-25 02:43:51 +01002002 struct drbd_conf *mdev;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002003 sector_t sector;
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01002004 struct drbd_peer_request *peer_req;
Andreas Gruenbachere6589832011-03-30 12:54:42 +02002005 struct p_data *p = pi->data;
Andreas Gruenbacher7be8da02011-02-22 02:15:32 +01002006 u32 peer_seq = be32_to_cpu(p->seq_num);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002007 int rw = WRITE;
2008 u32 dp_flags;
Andreas Gruenbacher7be8da02011-02-22 02:15:32 +01002009 int err;
2010
Andreas Gruenbacher4a76b162011-03-25 02:43:51 +01002011 mdev = vnr_to_mdev(tconn, pi->vnr);
2012 if (!mdev)
2013 return -EIO;
2014
Philipp Reisnerb411b362009-09-25 16:07:19 -07002015 if (!get_ldev(mdev)) {
Andreas Gruenbacher82bc0192011-03-17 12:10:19 +01002016 int err2;
2017
Andreas Gruenbacher7be8da02011-02-22 02:15:32 +01002018 err = wait_for_and_update_peer_seq(mdev, peer_seq);
Andreas Gruenbachere2857212011-03-25 00:57:38 +01002019 drbd_send_ack_dp(mdev, P_NEG_ACK, p, pi->size);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002020 atomic_inc(&mdev->current_epoch->epoch_size);
Andreas Gruenbachere2857212011-03-25 00:57:38 +01002021 err2 = drbd_drain_block(mdev, pi->size);
Andreas Gruenbacher82bc0192011-03-17 12:10:19 +01002022 if (!err)
2023 err = err2;
2024 return err;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002025 }
2026
Andreas Gruenbacherfcefa622011-02-17 16:46:59 +01002027 /*
2028 * Corresponding put_ldev done either below (on various errors), or in
2029 * drbd_peer_request_endio, if we successfully submit the data at the
2030 * end of this function.
2031 */
Philipp Reisnerb411b362009-09-25 16:07:19 -07002032
2033 sector = be64_to_cpu(p->sector);
Andreas Gruenbachere2857212011-03-25 00:57:38 +01002034 peer_req = read_in_block(mdev, p->block_id, sector, pi->size);
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01002035 if (!peer_req) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07002036 put_ldev(mdev);
Andreas Gruenbacher82bc0192011-03-17 12:10:19 +01002037 return -EIO;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002038 }
2039
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01002040 peer_req->w.cb = e_end_block;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002041
Lars Ellenberg688593c2010-11-17 22:25:03 +01002042 dp_flags = be32_to_cpu(p->dp_flags);
2043 rw |= wire_flags_to_bio(mdev, dp_flags);
2044
2045 if (dp_flags & DP_MAY_SET_IN_SYNC)
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01002046 peer_req->flags |= EE_MAY_SET_IN_SYNC;
Lars Ellenberg688593c2010-11-17 22:25:03 +01002047
Philipp Reisnerb411b362009-09-25 16:07:19 -07002048 spin_lock(&mdev->epoch_lock);
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01002049 peer_req->epoch = mdev->current_epoch;
2050 atomic_inc(&peer_req->epoch->epoch_size);
2051 atomic_inc(&peer_req->epoch->active);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002052 spin_unlock(&mdev->epoch_lock);
2053
Andreas Gruenbacher7be8da02011-02-22 02:15:32 +01002054 if (mdev->tconn->net_conf->two_primaries) {
2055 err = wait_for_and_update_peer_seq(mdev, peer_seq);
2056 if (err)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002057 goto out_interrupted;
Philipp Reisner87eeee42011-01-19 14:16:30 +01002058 spin_lock_irq(&mdev->tconn->req_lock);
Andreas Gruenbacher7be8da02011-02-22 02:15:32 +01002059 err = handle_write_conflicts(mdev, peer_req);
2060 if (err) {
2061 spin_unlock_irq(&mdev->tconn->req_lock);
2062 if (err == -ENOENT) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07002063 put_ldev(mdev);
Andreas Gruenbacher82bc0192011-03-17 12:10:19 +01002064 return 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002065 }
Andreas Gruenbacher7be8da02011-02-22 02:15:32 +01002066 goto out_interrupted;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002067 }
Andreas Gruenbacher7be8da02011-02-22 02:15:32 +01002068 } else
2069 spin_lock_irq(&mdev->tconn->req_lock);
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01002070 list_add(&peer_req->w.list, &mdev->active_ee);
Philipp Reisner87eeee42011-01-19 14:16:30 +01002071 spin_unlock_irq(&mdev->tconn->req_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002072
Philipp Reisner89e58e72011-01-19 13:12:45 +01002073 switch (mdev->tconn->net_conf->wire_protocol) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07002074 case DRBD_PROT_C:
2075 inc_unacked(mdev);
2076 /* corresponding dec_unacked() in e_end_block()
2077 * respective _drbd_clear_done_ee */
2078 break;
2079 case DRBD_PROT_B:
2080 /* I really don't like it that the receiver thread
2081 * sends on the msock, but anyways */
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01002082 drbd_send_ack(mdev, P_RECV_ACK, peer_req);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002083 break;
2084 case DRBD_PROT_A:
2085 /* nothing to do */
2086 break;
2087 }
2088
Lars Ellenberg6719fb02010-10-18 23:04:07 +02002089 if (mdev->state.pdsk < D_INCONSISTENT) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07002090 /* In case we have the only disk of the cluster, */
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01002091 drbd_set_out_of_sync(mdev, peer_req->i.sector, peer_req->i.size);
2092 peer_req->flags |= EE_CALL_AL_COMPLETE_IO;
2093 peer_req->flags &= ~EE_MAY_SET_IN_SYNC;
Lars Ellenberg181286a2011-03-31 15:18:56 +02002094 drbd_al_begin_io(mdev, &peer_req->i);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002095 }
2096
Andreas Gruenbacher82bc0192011-03-17 12:10:19 +01002097 err = drbd_submit_peer_request(mdev, peer_req, rw, DRBD_FAULT_DT_WR);
2098 if (!err)
2099 return 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002100
Lars Ellenberg10f6d9922011-01-24 14:47:09 +01002101 /* don't care for the reason here */
2102 dev_err(DEV, "submit failed, triggering re-connect\n");
Philipp Reisner87eeee42011-01-19 14:16:30 +01002103 spin_lock_irq(&mdev->tconn->req_lock);
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01002104 list_del(&peer_req->w.list);
2105 drbd_remove_epoch_entry_interval(mdev, peer_req);
Philipp Reisner87eeee42011-01-19 14:16:30 +01002106 spin_unlock_irq(&mdev->tconn->req_lock);
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01002107 if (peer_req->flags & EE_CALL_AL_COMPLETE_IO)
Lars Ellenberg181286a2011-03-31 15:18:56 +02002108 drbd_al_complete_io(mdev, &peer_req->i);
Lars Ellenberg22cc37a2010-09-14 20:40:41 +02002109
Philipp Reisnerb411b362009-09-25 16:07:19 -07002110out_interrupted:
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01002111 drbd_may_finish_epoch(mdev, peer_req->epoch, EV_PUT + EV_CLEANUP);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002112 put_ldev(mdev);
Andreas Gruenbacher3967deb2011-04-06 16:16:56 +02002113 drbd_free_peer_req(mdev, peer_req);
Andreas Gruenbacher82bc0192011-03-17 12:10:19 +01002114 return err;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002115}
2116
Lars Ellenberg0f0601f2010-08-11 23:40:24 +02002117/* We may throttle resync, if the lower device seems to be busy,
2118 * and current sync rate is above c_min_rate.
2119 *
2120 * To decide whether or not the lower device is busy, we use a scheme similar
2121 * to MD RAID is_mddev_idle(): if the partition stats reveal "significant"
2122 * (more than 64 sectors) of activity we cannot account for with our own resync
2123 * activity, it obviously is "busy".
2124 *
2125 * The current sync rate used here uses only the most recent two step marks,
2126 * to have a short time average so we can react faster.
2127 */
Philipp Reisnere3555d82010-11-07 15:56:29 +01002128int drbd_rs_should_slow_down(struct drbd_conf *mdev, sector_t sector)
Lars Ellenberg0f0601f2010-08-11 23:40:24 +02002129{
2130 struct gendisk *disk = mdev->ldev->backing_bdev->bd_contains->bd_disk;
2131 unsigned long db, dt, dbdt;
Philipp Reisnere3555d82010-11-07 15:56:29 +01002132 struct lc_element *tmp;
Lars Ellenberg0f0601f2010-08-11 23:40:24 +02002133 int curr_events;
2134 int throttle = 0;
2135
2136 /* feature disabled? */
Lars Ellenbergf3990022011-03-23 14:31:09 +01002137 if (mdev->ldev->dc.c_min_rate == 0)
Lars Ellenberg0f0601f2010-08-11 23:40:24 +02002138 return 0;
2139
Philipp Reisnere3555d82010-11-07 15:56:29 +01002140 spin_lock_irq(&mdev->al_lock);
2141 tmp = lc_find(mdev->resync, BM_SECT_TO_EXT(sector));
2142 if (tmp) {
2143 struct bm_extent *bm_ext = lc_entry(tmp, struct bm_extent, lce);
2144 if (test_bit(BME_PRIORITY, &bm_ext->flags)) {
2145 spin_unlock_irq(&mdev->al_lock);
2146 return 0;
2147 }
2148 /* Do not slow down if app IO is already waiting for this extent */
2149 }
2150 spin_unlock_irq(&mdev->al_lock);
2151
Lars Ellenberg0f0601f2010-08-11 23:40:24 +02002152 curr_events = (int)part_stat_read(&disk->part0, sectors[0]) +
2153 (int)part_stat_read(&disk->part0, sectors[1]) -
2154 atomic_read(&mdev->rs_sect_ev);
Philipp Reisnere3555d82010-11-07 15:56:29 +01002155
Lars Ellenberg0f0601f2010-08-11 23:40:24 +02002156 if (!mdev->rs_last_events || curr_events - mdev->rs_last_events > 64) {
2157 unsigned long rs_left;
2158 int i;
2159
2160 mdev->rs_last_events = curr_events;
2161
2162 /* sync speed average over the last 2*DRBD_SYNC_MARK_STEP,
2163 * approx. */
Lars Ellenberg2649f082010-11-05 10:05:47 +01002164 i = (mdev->rs_last_mark + DRBD_SYNC_MARKS-1) % DRBD_SYNC_MARKS;
2165
2166 if (mdev->state.conn == C_VERIFY_S || mdev->state.conn == C_VERIFY_T)
2167 rs_left = mdev->ov_left;
2168 else
2169 rs_left = drbd_bm_total_weight(mdev) - mdev->rs_failed;
Lars Ellenberg0f0601f2010-08-11 23:40:24 +02002170
2171 dt = ((long)jiffies - (long)mdev->rs_mark_time[i]) / HZ;
2172 if (!dt)
2173 dt++;
2174 db = mdev->rs_mark_left[i] - rs_left;
2175 dbdt = Bit2KB(db/dt);
2176
Lars Ellenbergf3990022011-03-23 14:31:09 +01002177 if (dbdt > mdev->ldev->dc.c_min_rate)
Lars Ellenberg0f0601f2010-08-11 23:40:24 +02002178 throttle = 1;
2179 }
2180 return throttle;
2181}
2182
2183
Andreas Gruenbacher4a76b162011-03-25 02:43:51 +01002184static int receive_DataRequest(struct drbd_tconn *tconn, struct packet_info *pi)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002185{
Andreas Gruenbacher4a76b162011-03-25 02:43:51 +01002186 struct drbd_conf *mdev;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002187 sector_t sector;
Andreas Gruenbacher4a76b162011-03-25 02:43:51 +01002188 sector_t capacity;
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01002189 struct drbd_peer_request *peer_req;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002190 struct digest_info *di = NULL;
Philipp Reisnerb18b37b2010-10-13 15:32:44 +02002191 int size, verb;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002192 unsigned int fault_type;
Andreas Gruenbachere6589832011-03-30 12:54:42 +02002193 struct p_block_req *p = pi->data;
Andreas Gruenbacher4a76b162011-03-25 02:43:51 +01002194
2195 mdev = vnr_to_mdev(tconn, pi->vnr);
2196 if (!mdev)
2197 return -EIO;
2198 capacity = drbd_get_capacity(mdev->this_bdev);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002199
2200 sector = be64_to_cpu(p->sector);
2201 size = be32_to_cpu(p->blksize);
2202
Andreas Gruenbacherc670a392011-02-21 12:41:39 +01002203 if (size <= 0 || !IS_ALIGNED(size, 512) || size > DRBD_MAX_BIO_SIZE) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07002204 dev_err(DEV, "%s:%d: sector: %llus, size: %u\n", __FILE__, __LINE__,
2205 (unsigned long long)sector, size);
Andreas Gruenbacher82bc0192011-03-17 12:10:19 +01002206 return -EINVAL;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002207 }
2208 if (sector + (size>>9) > capacity) {
2209 dev_err(DEV, "%s:%d: sector: %llus, size: %u\n", __FILE__, __LINE__,
2210 (unsigned long long)sector, size);
Andreas Gruenbacher82bc0192011-03-17 12:10:19 +01002211 return -EINVAL;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002212 }
2213
2214 if (!get_ldev_if_state(mdev, D_UP_TO_DATE)) {
Philipp Reisnerb18b37b2010-10-13 15:32:44 +02002215 verb = 1;
Andreas Gruenbachere2857212011-03-25 00:57:38 +01002216 switch (pi->cmd) {
Philipp Reisnerb18b37b2010-10-13 15:32:44 +02002217 case P_DATA_REQUEST:
2218 drbd_send_ack_rp(mdev, P_NEG_DREPLY, p);
2219 break;
2220 case P_RS_DATA_REQUEST:
2221 case P_CSUM_RS_REQUEST:
2222 case P_OV_REQUEST:
2223 drbd_send_ack_rp(mdev, P_NEG_RS_DREPLY , p);
2224 break;
2225 case P_OV_REPLY:
2226 verb = 0;
2227 dec_rs_pending(mdev);
2228 drbd_send_ack_ex(mdev, P_OV_RESULT, sector, size, ID_IN_SYNC);
2229 break;
2230 default:
Andreas Gruenbacher49ba9b12011-03-25 00:35:45 +01002231 BUG();
Philipp Reisnerb18b37b2010-10-13 15:32:44 +02002232 }
2233 if (verb && __ratelimit(&drbd_ratelimit_state))
Philipp Reisnerb411b362009-09-25 16:07:19 -07002234 dev_err(DEV, "Can not satisfy peer's read request, "
2235 "no local data.\n");
Philipp Reisnerb18b37b2010-10-13 15:32:44 +02002236
Lars Ellenberga821cc42010-09-06 12:31:37 +02002237 /* drain possibly payload */
Andreas Gruenbachere2857212011-03-25 00:57:38 +01002238 return drbd_drain_block(mdev, pi->size);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002239 }
2240
2241 /* GFP_NOIO, because we must not cause arbitrary write-out: in a DRBD
2242 * "criss-cross" setup, that might cause write-out on some other DRBD,
2243 * which in turn might block on the other node at this very place. */
Andreas Gruenbacher0db55362011-04-06 16:09:15 +02002244 peer_req = drbd_alloc_peer_req(mdev, p->block_id, sector, size, GFP_NOIO);
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01002245 if (!peer_req) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07002246 put_ldev(mdev);
Andreas Gruenbacher82bc0192011-03-17 12:10:19 +01002247 return -ENOMEM;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002248 }
2249
Andreas Gruenbachere2857212011-03-25 00:57:38 +01002250 switch (pi->cmd) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07002251 case P_DATA_REQUEST:
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01002252 peer_req->w.cb = w_e_end_data_req;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002253 fault_type = DRBD_FAULT_DT_RD;
Lars Ellenberg80a40e42010-08-11 23:28:00 +02002254 /* application IO, don't drbd_rs_begin_io */
2255 goto submit;
2256
Philipp Reisnerb411b362009-09-25 16:07:19 -07002257 case P_RS_DATA_REQUEST:
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01002258 peer_req->w.cb = w_e_end_rsdata_req;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002259 fault_type = DRBD_FAULT_RS_RD;
Lars Ellenberg5f9915b2010-11-09 14:15:24 +01002260 /* used in the sector offset progress display */
2261 mdev->bm_resync_fo = BM_SECT_TO_BIT(sector);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002262 break;
2263
2264 case P_OV_REPLY:
2265 case P_CSUM_RS_REQUEST:
2266 fault_type = DRBD_FAULT_RS_RD;
Andreas Gruenbachere2857212011-03-25 00:57:38 +01002267 di = kmalloc(sizeof(*di) + pi->size, GFP_NOIO);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002268 if (!di)
2269 goto out_free_e;
2270
Andreas Gruenbachere2857212011-03-25 00:57:38 +01002271 di->digest_size = pi->size;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002272 di->digest = (((char *)di)+sizeof(struct digest_info));
2273
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01002274 peer_req->digest = di;
2275 peer_req->flags |= EE_HAS_DIGEST;
Lars Ellenbergc36c3ce2010-08-11 20:42:55 +02002276
Andreas Gruenbachere2857212011-03-25 00:57:38 +01002277 if (drbd_recv_all(mdev->tconn, di->digest, pi->size))
Philipp Reisnerb411b362009-09-25 16:07:19 -07002278 goto out_free_e;
2279
Andreas Gruenbachere2857212011-03-25 00:57:38 +01002280 if (pi->cmd == P_CSUM_RS_REQUEST) {
Philipp Reisner31890f42011-01-19 14:12:51 +01002281 D_ASSERT(mdev->tconn->agreed_pro_version >= 89);
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01002282 peer_req->w.cb = w_e_end_csum_rs_req;
Lars Ellenberg5f9915b2010-11-09 14:15:24 +01002283 /* used in the sector offset progress display */
2284 mdev->bm_resync_fo = BM_SECT_TO_BIT(sector);
Andreas Gruenbachere2857212011-03-25 00:57:38 +01002285 } else if (pi->cmd == P_OV_REPLY) {
Lars Ellenberg2649f082010-11-05 10:05:47 +01002286 /* track progress, we may need to throttle */
2287 atomic_add(size >> 9, &mdev->rs_sect_in);
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01002288 peer_req->w.cb = w_e_end_ov_reply;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002289 dec_rs_pending(mdev);
Lars Ellenberg0f0601f2010-08-11 23:40:24 +02002290 /* drbd_rs_begin_io done when we sent this request,
2291 * but accounting still needs to be done. */
2292 goto submit_for_resync;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002293 }
2294 break;
2295
2296 case P_OV_REQUEST:
Philipp Reisnerb411b362009-09-25 16:07:19 -07002297 if (mdev->ov_start_sector == ~(sector_t)0 &&
Philipp Reisner31890f42011-01-19 14:12:51 +01002298 mdev->tconn->agreed_pro_version >= 90) {
Lars Ellenbergde228bb2010-11-05 09:43:15 +01002299 unsigned long now = jiffies;
2300 int i;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002301 mdev->ov_start_sector = sector;
2302 mdev->ov_position = sector;
Lars Ellenberg30b743a2010-11-05 09:39:06 +01002303 mdev->ov_left = drbd_bm_bits(mdev) - BM_SECT_TO_BIT(sector);
2304 mdev->rs_total = mdev->ov_left;
Lars Ellenbergde228bb2010-11-05 09:43:15 +01002305 for (i = 0; i < DRBD_SYNC_MARKS; i++) {
2306 mdev->rs_mark_left[i] = mdev->ov_left;
2307 mdev->rs_mark_time[i] = now;
2308 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07002309 dev_info(DEV, "Online Verify start sector: %llu\n",
2310 (unsigned long long)sector);
2311 }
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01002312 peer_req->w.cb = w_e_end_ov_req;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002313 fault_type = DRBD_FAULT_RS_RD;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002314 break;
2315
Philipp Reisnerb411b362009-09-25 16:07:19 -07002316 default:
Andreas Gruenbacher49ba9b12011-03-25 00:35:45 +01002317 BUG();
Philipp Reisnerb411b362009-09-25 16:07:19 -07002318 }
2319
Lars Ellenberg0f0601f2010-08-11 23:40:24 +02002320 /* Throttle, drbd_rs_begin_io and submit should become asynchronous
2321 * wrt the receiver, but it is not as straightforward as it may seem.
2322 * Various places in the resync start and stop logic assume resync
2323 * requests are processed in order, requeuing this on the worker thread
2324 * introduces a bunch of new code for synchronization between threads.
2325 *
2326 * Unlimited throttling before drbd_rs_begin_io may stall the resync
2327 * "forever", throttling after drbd_rs_begin_io will lock that extent
2328 * for application writes for the same time. For now, just throttle
2329 * here, where the rest of the code expects the receiver to sleep for
2330 * a while, anyways.
2331 */
Philipp Reisnerb411b362009-09-25 16:07:19 -07002332
Lars Ellenberg0f0601f2010-08-11 23:40:24 +02002333 /* Throttle before drbd_rs_begin_io, as that locks out application IO;
2334 * this defers syncer requests for some time, before letting at least
2335 * on request through. The resync controller on the receiving side
2336 * will adapt to the incoming rate accordingly.
2337 *
2338 * We cannot throttle here if remote is Primary/SyncTarget:
2339 * we would also throttle its application reads.
2340 * In that case, throttling is done on the SyncTarget only.
2341 */
Philipp Reisnere3555d82010-11-07 15:56:29 +01002342 if (mdev->state.peer != R_PRIMARY && drbd_rs_should_slow_down(mdev, sector))
2343 schedule_timeout_uninterruptible(HZ/10);
2344 if (drbd_rs_begin_io(mdev, sector))
Lars Ellenberg80a40e42010-08-11 23:28:00 +02002345 goto out_free_e;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002346
Lars Ellenberg0f0601f2010-08-11 23:40:24 +02002347submit_for_resync:
2348 atomic_add(size >> 9, &mdev->rs_sect_ev);
2349
Lars Ellenberg80a40e42010-08-11 23:28:00 +02002350submit:
Philipp Reisnerb411b362009-09-25 16:07:19 -07002351 inc_unacked(mdev);
Philipp Reisner87eeee42011-01-19 14:16:30 +01002352 spin_lock_irq(&mdev->tconn->req_lock);
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01002353 list_add_tail(&peer_req->w.list, &mdev->read_ee);
Philipp Reisner87eeee42011-01-19 14:16:30 +01002354 spin_unlock_irq(&mdev->tconn->req_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002355
Andreas Gruenbacherfbe29de2011-02-17 16:38:35 +01002356 if (drbd_submit_peer_request(mdev, peer_req, READ, fault_type) == 0)
Andreas Gruenbacher82bc0192011-03-17 12:10:19 +01002357 return 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002358
Lars Ellenberg10f6d9922011-01-24 14:47:09 +01002359 /* don't care for the reason here */
2360 dev_err(DEV, "submit failed, triggering re-connect\n");
Philipp Reisner87eeee42011-01-19 14:16:30 +01002361 spin_lock_irq(&mdev->tconn->req_lock);
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01002362 list_del(&peer_req->w.list);
Philipp Reisner87eeee42011-01-19 14:16:30 +01002363 spin_unlock_irq(&mdev->tconn->req_lock);
Lars Ellenberg22cc37a2010-09-14 20:40:41 +02002364 /* no drbd_rs_complete_io(), we are dropping the connection anyways */
2365
Philipp Reisnerb411b362009-09-25 16:07:19 -07002366out_free_e:
Philipp Reisnerb411b362009-09-25 16:07:19 -07002367 put_ldev(mdev);
Andreas Gruenbacher3967deb2011-04-06 16:16:56 +02002368 drbd_free_peer_req(mdev, peer_req);
Andreas Gruenbacher82bc0192011-03-17 12:10:19 +01002369 return -EIO;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002370}
2371
2372static int drbd_asb_recover_0p(struct drbd_conf *mdev) __must_hold(local)
2373{
2374 int self, peer, rv = -100;
2375 unsigned long ch_self, ch_peer;
2376
2377 self = mdev->ldev->md.uuid[UI_BITMAP] & 1;
2378 peer = mdev->p_uuid[UI_BITMAP] & 1;
2379
2380 ch_peer = mdev->p_uuid[UI_SIZE];
2381 ch_self = mdev->comm_bm_set;
2382
Philipp Reisner89e58e72011-01-19 13:12:45 +01002383 switch (mdev->tconn->net_conf->after_sb_0p) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07002384 case ASB_CONSENSUS:
2385 case ASB_DISCARD_SECONDARY:
2386 case ASB_CALL_HELPER:
2387 dev_err(DEV, "Configuration error.\n");
2388 break;
2389 case ASB_DISCONNECT:
2390 break;
2391 case ASB_DISCARD_YOUNGER_PRI:
2392 if (self == 0 && peer == 1) {
2393 rv = -1;
2394 break;
2395 }
2396 if (self == 1 && peer == 0) {
2397 rv = 1;
2398 break;
2399 }
2400 /* Else fall through to one of the other strategies... */
2401 case ASB_DISCARD_OLDER_PRI:
2402 if (self == 0 && peer == 1) {
2403 rv = 1;
2404 break;
2405 }
2406 if (self == 1 && peer == 0) {
2407 rv = -1;
2408 break;
2409 }
2410 /* Else fall through to one of the other strategies... */
Lars Ellenbergad19bf62009-10-14 09:36:49 +02002411 dev_warn(DEV, "Discard younger/older primary did not find a decision\n"
Philipp Reisnerb411b362009-09-25 16:07:19 -07002412 "Using discard-least-changes instead\n");
2413 case ASB_DISCARD_ZERO_CHG:
2414 if (ch_peer == 0 && ch_self == 0) {
Philipp Reisner25703f82011-02-07 14:35:25 +01002415 rv = test_bit(DISCARD_CONCURRENT, &mdev->tconn->flags)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002416 ? -1 : 1;
2417 break;
2418 } else {
2419 if (ch_peer == 0) { rv = 1; break; }
2420 if (ch_self == 0) { rv = -1; break; }
2421 }
Philipp Reisner89e58e72011-01-19 13:12:45 +01002422 if (mdev->tconn->net_conf->after_sb_0p == ASB_DISCARD_ZERO_CHG)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002423 break;
2424 case ASB_DISCARD_LEAST_CHG:
2425 if (ch_self < ch_peer)
2426 rv = -1;
2427 else if (ch_self > ch_peer)
2428 rv = 1;
2429 else /* ( ch_self == ch_peer ) */
2430 /* Well, then use something else. */
Philipp Reisner25703f82011-02-07 14:35:25 +01002431 rv = test_bit(DISCARD_CONCURRENT, &mdev->tconn->flags)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002432 ? -1 : 1;
2433 break;
2434 case ASB_DISCARD_LOCAL:
2435 rv = -1;
2436 break;
2437 case ASB_DISCARD_REMOTE:
2438 rv = 1;
2439 }
2440
2441 return rv;
2442}
2443
2444static int drbd_asb_recover_1p(struct drbd_conf *mdev) __must_hold(local)
2445{
Andreas Gruenbacher6184ea22010-12-09 14:23:27 +01002446 int hg, rv = -100;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002447
Philipp Reisner89e58e72011-01-19 13:12:45 +01002448 switch (mdev->tconn->net_conf->after_sb_1p) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07002449 case ASB_DISCARD_YOUNGER_PRI:
2450 case ASB_DISCARD_OLDER_PRI:
2451 case ASB_DISCARD_LEAST_CHG:
2452 case ASB_DISCARD_LOCAL:
2453 case ASB_DISCARD_REMOTE:
2454 dev_err(DEV, "Configuration error.\n");
2455 break;
2456 case ASB_DISCONNECT:
2457 break;
2458 case ASB_CONSENSUS:
2459 hg = drbd_asb_recover_0p(mdev);
2460 if (hg == -1 && mdev->state.role == R_SECONDARY)
2461 rv = hg;
2462 if (hg == 1 && mdev->state.role == R_PRIMARY)
2463 rv = hg;
2464 break;
2465 case ASB_VIOLENTLY:
2466 rv = drbd_asb_recover_0p(mdev);
2467 break;
2468 case ASB_DISCARD_SECONDARY:
2469 return mdev->state.role == R_PRIMARY ? 1 : -1;
2470 case ASB_CALL_HELPER:
2471 hg = drbd_asb_recover_0p(mdev);
2472 if (hg == -1 && mdev->state.role == R_PRIMARY) {
Andreas Gruenbacherbb437942010-12-09 14:02:35 +01002473 enum drbd_state_rv rv2;
2474
2475 drbd_set_role(mdev, R_SECONDARY, 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002476 /* drbd_change_state() does not sleep while in SS_IN_TRANSIENT_STATE,
2477 * we might be here in C_WF_REPORT_PARAMS which is transient.
2478 * we do not need to wait for the after state change work either. */
Andreas Gruenbacherbb437942010-12-09 14:02:35 +01002479 rv2 = drbd_change_state(mdev, CS_VERBOSE, NS(role, R_SECONDARY));
2480 if (rv2 != SS_SUCCESS) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07002481 drbd_khelper(mdev, "pri-lost-after-sb");
2482 } else {
2483 dev_warn(DEV, "Successfully gave up primary role.\n");
2484 rv = hg;
2485 }
2486 } else
2487 rv = hg;
2488 }
2489
2490 return rv;
2491}
2492
2493static int drbd_asb_recover_2p(struct drbd_conf *mdev) __must_hold(local)
2494{
Andreas Gruenbacher6184ea22010-12-09 14:23:27 +01002495 int hg, rv = -100;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002496
Philipp Reisner89e58e72011-01-19 13:12:45 +01002497 switch (mdev->tconn->net_conf->after_sb_2p) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07002498 case ASB_DISCARD_YOUNGER_PRI:
2499 case ASB_DISCARD_OLDER_PRI:
2500 case ASB_DISCARD_LEAST_CHG:
2501 case ASB_DISCARD_LOCAL:
2502 case ASB_DISCARD_REMOTE:
2503 case ASB_CONSENSUS:
2504 case ASB_DISCARD_SECONDARY:
2505 dev_err(DEV, "Configuration error.\n");
2506 break;
2507 case ASB_VIOLENTLY:
2508 rv = drbd_asb_recover_0p(mdev);
2509 break;
2510 case ASB_DISCONNECT:
2511 break;
2512 case ASB_CALL_HELPER:
2513 hg = drbd_asb_recover_0p(mdev);
2514 if (hg == -1) {
Andreas Gruenbacherbb437942010-12-09 14:02:35 +01002515 enum drbd_state_rv rv2;
2516
Philipp Reisnerb411b362009-09-25 16:07:19 -07002517 /* drbd_change_state() does not sleep while in SS_IN_TRANSIENT_STATE,
2518 * we might be here in C_WF_REPORT_PARAMS which is transient.
2519 * we do not need to wait for the after state change work either. */
Andreas Gruenbacherbb437942010-12-09 14:02:35 +01002520 rv2 = drbd_change_state(mdev, CS_VERBOSE, NS(role, R_SECONDARY));
2521 if (rv2 != SS_SUCCESS) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07002522 drbd_khelper(mdev, "pri-lost-after-sb");
2523 } else {
2524 dev_warn(DEV, "Successfully gave up primary role.\n");
2525 rv = hg;
2526 }
2527 } else
2528 rv = hg;
2529 }
2530
2531 return rv;
2532}
2533
2534static void drbd_uuid_dump(struct drbd_conf *mdev, char *text, u64 *uuid,
2535 u64 bits, u64 flags)
2536{
2537 if (!uuid) {
2538 dev_info(DEV, "%s uuid info vanished while I was looking!\n", text);
2539 return;
2540 }
2541 dev_info(DEV, "%s %016llX:%016llX:%016llX:%016llX bits:%llu flags:%llX\n",
2542 text,
2543 (unsigned long long)uuid[UI_CURRENT],
2544 (unsigned long long)uuid[UI_BITMAP],
2545 (unsigned long long)uuid[UI_HISTORY_START],
2546 (unsigned long long)uuid[UI_HISTORY_END],
2547 (unsigned long long)bits,
2548 (unsigned long long)flags);
2549}
2550
2551/*
2552 100 after split brain try auto recover
2553 2 C_SYNC_SOURCE set BitMap
2554 1 C_SYNC_SOURCE use BitMap
2555 0 no Sync
2556 -1 C_SYNC_TARGET use BitMap
2557 -2 C_SYNC_TARGET set BitMap
2558 -100 after split brain, disconnect
2559-1000 unrelated data
Philipp Reisner4a23f262011-01-11 17:42:17 +01002560-1091 requires proto 91
2561-1096 requires proto 96
Philipp Reisnerb411b362009-09-25 16:07:19 -07002562 */
2563static int drbd_uuid_compare(struct drbd_conf *mdev, int *rule_nr) __must_hold(local)
2564{
2565 u64 self, peer;
2566 int i, j;
2567
2568 self = mdev->ldev->md.uuid[UI_CURRENT] & ~((u64)1);
2569 peer = mdev->p_uuid[UI_CURRENT] & ~((u64)1);
2570
2571 *rule_nr = 10;
2572 if (self == UUID_JUST_CREATED && peer == UUID_JUST_CREATED)
2573 return 0;
2574
2575 *rule_nr = 20;
2576 if ((self == UUID_JUST_CREATED || self == (u64)0) &&
2577 peer != UUID_JUST_CREATED)
2578 return -2;
2579
2580 *rule_nr = 30;
2581 if (self != UUID_JUST_CREATED &&
2582 (peer == UUID_JUST_CREATED || peer == (u64)0))
2583 return 2;
2584
2585 if (self == peer) {
2586 int rct, dc; /* roles at crash time */
2587
2588 if (mdev->p_uuid[UI_BITMAP] == (u64)0 && mdev->ldev->md.uuid[UI_BITMAP] != (u64)0) {
2589
Philipp Reisner31890f42011-01-19 14:12:51 +01002590 if (mdev->tconn->agreed_pro_version < 91)
Philipp Reisner4a23f262011-01-11 17:42:17 +01002591 return -1091;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002592
2593 if ((mdev->ldev->md.uuid[UI_BITMAP] & ~((u64)1)) == (mdev->p_uuid[UI_HISTORY_START] & ~((u64)1)) &&
2594 (mdev->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1)) == (mdev->p_uuid[UI_HISTORY_START + 1] & ~((u64)1))) {
2595 dev_info(DEV, "was SyncSource, missed the resync finished event, corrected myself:\n");
2596 drbd_uuid_set_bm(mdev, 0UL);
2597
2598 drbd_uuid_dump(mdev, "self", mdev->ldev->md.uuid,
2599 mdev->state.disk >= D_NEGOTIATING ? drbd_bm_total_weight(mdev) : 0, 0);
2600 *rule_nr = 34;
2601 } else {
2602 dev_info(DEV, "was SyncSource (peer failed to write sync_uuid)\n");
2603 *rule_nr = 36;
2604 }
2605
2606 return 1;
2607 }
2608
2609 if (mdev->ldev->md.uuid[UI_BITMAP] == (u64)0 && mdev->p_uuid[UI_BITMAP] != (u64)0) {
2610
Philipp Reisner31890f42011-01-19 14:12:51 +01002611 if (mdev->tconn->agreed_pro_version < 91)
Philipp Reisner4a23f262011-01-11 17:42:17 +01002612 return -1091;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002613
2614 if ((mdev->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1)) == (mdev->p_uuid[UI_BITMAP] & ~((u64)1)) &&
2615 (mdev->ldev->md.uuid[UI_HISTORY_START + 1] & ~((u64)1)) == (mdev->p_uuid[UI_HISTORY_START] & ~((u64)1))) {
2616 dev_info(DEV, "was SyncTarget, peer missed the resync finished event, corrected peer:\n");
2617
2618 mdev->p_uuid[UI_HISTORY_START + 1] = mdev->p_uuid[UI_HISTORY_START];
2619 mdev->p_uuid[UI_HISTORY_START] = mdev->p_uuid[UI_BITMAP];
2620 mdev->p_uuid[UI_BITMAP] = 0UL;
2621
2622 drbd_uuid_dump(mdev, "peer", mdev->p_uuid, mdev->p_uuid[UI_SIZE], mdev->p_uuid[UI_FLAGS]);
2623 *rule_nr = 35;
2624 } else {
2625 dev_info(DEV, "was SyncTarget (failed to write sync_uuid)\n");
2626 *rule_nr = 37;
2627 }
2628
2629 return -1;
2630 }
2631
2632 /* Common power [off|failure] */
2633 rct = (test_bit(CRASHED_PRIMARY, &mdev->flags) ? 1 : 0) +
2634 (mdev->p_uuid[UI_FLAGS] & 2);
2635 /* lowest bit is set when we were primary,
2636 * next bit (weight 2) is set when peer was primary */
2637 *rule_nr = 40;
2638
2639 switch (rct) {
2640 case 0: /* !self_pri && !peer_pri */ return 0;
2641 case 1: /* self_pri && !peer_pri */ return 1;
2642 case 2: /* !self_pri && peer_pri */ return -1;
2643 case 3: /* self_pri && peer_pri */
Philipp Reisner25703f82011-02-07 14:35:25 +01002644 dc = test_bit(DISCARD_CONCURRENT, &mdev->tconn->flags);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002645 return dc ? -1 : 1;
2646 }
2647 }
2648
2649 *rule_nr = 50;
2650 peer = mdev->p_uuid[UI_BITMAP] & ~((u64)1);
2651 if (self == peer)
2652 return -1;
2653
2654 *rule_nr = 51;
2655 peer = mdev->p_uuid[UI_HISTORY_START] & ~((u64)1);
2656 if (self == peer) {
Philipp Reisner31890f42011-01-19 14:12:51 +01002657 if (mdev->tconn->agreed_pro_version < 96 ?
Philipp Reisner4a23f262011-01-11 17:42:17 +01002658 (mdev->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1)) ==
2659 (mdev->p_uuid[UI_HISTORY_START + 1] & ~((u64)1)) :
2660 peer + UUID_NEW_BM_OFFSET == (mdev->p_uuid[UI_BITMAP] & ~((u64)1))) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07002661 /* The last P_SYNC_UUID did not get though. Undo the last start of
2662 resync as sync source modifications of the peer's UUIDs. */
2663
Philipp Reisner31890f42011-01-19 14:12:51 +01002664 if (mdev->tconn->agreed_pro_version < 91)
Philipp Reisner4a23f262011-01-11 17:42:17 +01002665 return -1091;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002666
2667 mdev->p_uuid[UI_BITMAP] = mdev->p_uuid[UI_HISTORY_START];
2668 mdev->p_uuid[UI_HISTORY_START] = mdev->p_uuid[UI_HISTORY_START + 1];
Philipp Reisner4a23f262011-01-11 17:42:17 +01002669
2670 dev_info(DEV, "Did not got last syncUUID packet, corrected:\n");
2671 drbd_uuid_dump(mdev, "peer", mdev->p_uuid, mdev->p_uuid[UI_SIZE], mdev->p_uuid[UI_FLAGS]);
2672
Philipp Reisnerb411b362009-09-25 16:07:19 -07002673 return -1;
2674 }
2675 }
2676
2677 *rule_nr = 60;
2678 self = mdev->ldev->md.uuid[UI_CURRENT] & ~((u64)1);
2679 for (i = UI_HISTORY_START; i <= UI_HISTORY_END; i++) {
2680 peer = mdev->p_uuid[i] & ~((u64)1);
2681 if (self == peer)
2682 return -2;
2683 }
2684
2685 *rule_nr = 70;
2686 self = mdev->ldev->md.uuid[UI_BITMAP] & ~((u64)1);
2687 peer = mdev->p_uuid[UI_CURRENT] & ~((u64)1);
2688 if (self == peer)
2689 return 1;
2690
2691 *rule_nr = 71;
2692 self = mdev->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1);
2693 if (self == peer) {
Philipp Reisner31890f42011-01-19 14:12:51 +01002694 if (mdev->tconn->agreed_pro_version < 96 ?
Philipp Reisner4a23f262011-01-11 17:42:17 +01002695 (mdev->ldev->md.uuid[UI_HISTORY_START + 1] & ~((u64)1)) ==
2696 (mdev->p_uuid[UI_HISTORY_START] & ~((u64)1)) :
2697 self + UUID_NEW_BM_OFFSET == (mdev->ldev->md.uuid[UI_BITMAP] & ~((u64)1))) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07002698 /* The last P_SYNC_UUID did not get though. Undo the last start of
2699 resync as sync source modifications of our UUIDs. */
2700
Philipp Reisner31890f42011-01-19 14:12:51 +01002701 if (mdev->tconn->agreed_pro_version < 91)
Philipp Reisner4a23f262011-01-11 17:42:17 +01002702 return -1091;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002703
2704 _drbd_uuid_set(mdev, UI_BITMAP, mdev->ldev->md.uuid[UI_HISTORY_START]);
2705 _drbd_uuid_set(mdev, UI_HISTORY_START, mdev->ldev->md.uuid[UI_HISTORY_START + 1]);
2706
Philipp Reisner4a23f262011-01-11 17:42:17 +01002707 dev_info(DEV, "Last syncUUID did not get through, corrected:\n");
Philipp Reisnerb411b362009-09-25 16:07:19 -07002708 drbd_uuid_dump(mdev, "self", mdev->ldev->md.uuid,
2709 mdev->state.disk >= D_NEGOTIATING ? drbd_bm_total_weight(mdev) : 0, 0);
2710
2711 return 1;
2712 }
2713 }
2714
2715
2716 *rule_nr = 80;
Philipp Reisnerd8c2a362009-11-18 15:52:51 +01002717 peer = mdev->p_uuid[UI_CURRENT] & ~((u64)1);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002718 for (i = UI_HISTORY_START; i <= UI_HISTORY_END; i++) {
2719 self = mdev->ldev->md.uuid[i] & ~((u64)1);
2720 if (self == peer)
2721 return 2;
2722 }
2723
2724 *rule_nr = 90;
2725 self = mdev->ldev->md.uuid[UI_BITMAP] & ~((u64)1);
2726 peer = mdev->p_uuid[UI_BITMAP] & ~((u64)1);
2727 if (self == peer && self != ((u64)0))
2728 return 100;
2729
2730 *rule_nr = 100;
2731 for (i = UI_HISTORY_START; i <= UI_HISTORY_END; i++) {
2732 self = mdev->ldev->md.uuid[i] & ~((u64)1);
2733 for (j = UI_HISTORY_START; j <= UI_HISTORY_END; j++) {
2734 peer = mdev->p_uuid[j] & ~((u64)1);
2735 if (self == peer)
2736 return -100;
2737 }
2738 }
2739
2740 return -1000;
2741}
2742
2743/* drbd_sync_handshake() returns the new conn state on success, or
2744 CONN_MASK (-1) on failure.
2745 */
2746static enum drbd_conns drbd_sync_handshake(struct drbd_conf *mdev, enum drbd_role peer_role,
2747 enum drbd_disk_state peer_disk) __must_hold(local)
2748{
2749 int hg, rule_nr;
2750 enum drbd_conns rv = C_MASK;
2751 enum drbd_disk_state mydisk;
2752
2753 mydisk = mdev->state.disk;
2754 if (mydisk == D_NEGOTIATING)
2755 mydisk = mdev->new_state_tmp.disk;
2756
2757 dev_info(DEV, "drbd_sync_handshake:\n");
2758 drbd_uuid_dump(mdev, "self", mdev->ldev->md.uuid, mdev->comm_bm_set, 0);
2759 drbd_uuid_dump(mdev, "peer", mdev->p_uuid,
2760 mdev->p_uuid[UI_SIZE], mdev->p_uuid[UI_FLAGS]);
2761
2762 hg = drbd_uuid_compare(mdev, &rule_nr);
2763
2764 dev_info(DEV, "uuid_compare()=%d by rule %d\n", hg, rule_nr);
2765
2766 if (hg == -1000) {
2767 dev_alert(DEV, "Unrelated data, aborting!\n");
2768 return C_MASK;
2769 }
Philipp Reisner4a23f262011-01-11 17:42:17 +01002770 if (hg < -1000) {
2771 dev_alert(DEV, "To resolve this both sides have to support at least protocol %d\n", -hg - 1000);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002772 return C_MASK;
2773 }
2774
2775 if ((mydisk == D_INCONSISTENT && peer_disk > D_INCONSISTENT) ||
2776 (peer_disk == D_INCONSISTENT && mydisk > D_INCONSISTENT)) {
2777 int f = (hg == -100) || abs(hg) == 2;
2778 hg = mydisk > D_INCONSISTENT ? 1 : -1;
2779 if (f)
2780 hg = hg*2;
2781 dev_info(DEV, "Becoming sync %s due to disk states.\n",
2782 hg > 0 ? "source" : "target");
2783 }
2784
Adam Gandelman3a11a482010-04-08 16:48:23 -07002785 if (abs(hg) == 100)
2786 drbd_khelper(mdev, "initial-split-brain");
2787
Philipp Reisner89e58e72011-01-19 13:12:45 +01002788 if (hg == 100 || (hg == -100 && mdev->tconn->net_conf->always_asbp)) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07002789 int pcount = (mdev->state.role == R_PRIMARY)
2790 + (peer_role == R_PRIMARY);
2791 int forced = (hg == -100);
2792
2793 switch (pcount) {
2794 case 0:
2795 hg = drbd_asb_recover_0p(mdev);
2796 break;
2797 case 1:
2798 hg = drbd_asb_recover_1p(mdev);
2799 break;
2800 case 2:
2801 hg = drbd_asb_recover_2p(mdev);
2802 break;
2803 }
2804 if (abs(hg) < 100) {
2805 dev_warn(DEV, "Split-Brain detected, %d primaries, "
2806 "automatically solved. Sync from %s node\n",
2807 pcount, (hg < 0) ? "peer" : "this");
2808 if (forced) {
2809 dev_warn(DEV, "Doing a full sync, since"
2810 " UUIDs where ambiguous.\n");
2811 hg = hg*2;
2812 }
2813 }
2814 }
2815
2816 if (hg == -100) {
Philipp Reisner89e58e72011-01-19 13:12:45 +01002817 if (mdev->tconn->net_conf->want_lose && !(mdev->p_uuid[UI_FLAGS]&1))
Philipp Reisnerb411b362009-09-25 16:07:19 -07002818 hg = -1;
Philipp Reisner89e58e72011-01-19 13:12:45 +01002819 if (!mdev->tconn->net_conf->want_lose && (mdev->p_uuid[UI_FLAGS]&1))
Philipp Reisnerb411b362009-09-25 16:07:19 -07002820 hg = 1;
2821
2822 if (abs(hg) < 100)
2823 dev_warn(DEV, "Split-Brain detected, manually solved. "
2824 "Sync from %s node\n",
2825 (hg < 0) ? "peer" : "this");
2826 }
2827
2828 if (hg == -100) {
Lars Ellenberg580b9762010-02-26 23:15:23 +01002829 /* FIXME this log message is not correct if we end up here
2830 * after an attempted attach on a diskless node.
2831 * We just refuse to attach -- well, we drop the "connection"
2832 * to that disk, in a way... */
Adam Gandelman3a11a482010-04-08 16:48:23 -07002833 dev_alert(DEV, "Split-Brain detected but unresolved, dropping connection!\n");
Philipp Reisnerb411b362009-09-25 16:07:19 -07002834 drbd_khelper(mdev, "split-brain");
2835 return C_MASK;
2836 }
2837
2838 if (hg > 0 && mydisk <= D_INCONSISTENT) {
2839 dev_err(DEV, "I shall become SyncSource, but I am inconsistent!\n");
2840 return C_MASK;
2841 }
2842
2843 if (hg < 0 && /* by intention we do not use mydisk here. */
2844 mdev->state.role == R_PRIMARY && mdev->state.disk >= D_CONSISTENT) {
Philipp Reisner89e58e72011-01-19 13:12:45 +01002845 switch (mdev->tconn->net_conf->rr_conflict) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07002846 case ASB_CALL_HELPER:
2847 drbd_khelper(mdev, "pri-lost");
2848 /* fall through */
2849 case ASB_DISCONNECT:
2850 dev_err(DEV, "I shall become SyncTarget, but I am primary!\n");
2851 return C_MASK;
2852 case ASB_VIOLENTLY:
2853 dev_warn(DEV, "Becoming SyncTarget, violating the stable-data"
2854 "assumption\n");
2855 }
2856 }
2857
Philipp Reisner8169e412011-03-15 18:40:27 +01002858 if (mdev->tconn->net_conf->dry_run || test_bit(CONN_DRY_RUN, &mdev->tconn->flags)) {
Philipp Reisnercf14c2e2010-02-02 21:03:50 +01002859 if (hg == 0)
2860 dev_info(DEV, "dry-run connect: No resync, would become Connected immediately.\n");
2861 else
2862 dev_info(DEV, "dry-run connect: Would become %s, doing a %s resync.",
2863 drbd_conn_str(hg > 0 ? C_SYNC_SOURCE : C_SYNC_TARGET),
2864 abs(hg) >= 2 ? "full" : "bit-map based");
2865 return C_MASK;
2866 }
2867
Philipp Reisnerb411b362009-09-25 16:07:19 -07002868 if (abs(hg) >= 2) {
2869 dev_info(DEV, "Writing the whole bitmap, full sync required after drbd_sync_handshake.\n");
Lars Ellenberg20ceb2b2011-01-21 10:56:44 +01002870 if (drbd_bitmap_io(mdev, &drbd_bmio_set_n_write, "set_n_write from sync_handshake",
2871 BM_LOCKED_SET_ALLOWED))
Philipp Reisnerb411b362009-09-25 16:07:19 -07002872 return C_MASK;
2873 }
2874
2875 if (hg > 0) { /* become sync source. */
2876 rv = C_WF_BITMAP_S;
2877 } else if (hg < 0) { /* become sync target */
2878 rv = C_WF_BITMAP_T;
2879 } else {
2880 rv = C_CONNECTED;
2881 if (drbd_bm_total_weight(mdev)) {
2882 dev_info(DEV, "No resync, but %lu bits in bitmap!\n",
2883 drbd_bm_total_weight(mdev));
2884 }
2885 }
2886
2887 return rv;
2888}
2889
2890/* returns 1 if invalid */
2891static int cmp_after_sb(enum drbd_after_sb_p peer, enum drbd_after_sb_p self)
2892{
2893 /* ASB_DISCARD_REMOTE - ASB_DISCARD_LOCAL is valid */
2894 if ((peer == ASB_DISCARD_REMOTE && self == ASB_DISCARD_LOCAL) ||
2895 (self == ASB_DISCARD_REMOTE && peer == ASB_DISCARD_LOCAL))
2896 return 0;
2897
2898 /* any other things with ASB_DISCARD_REMOTE or ASB_DISCARD_LOCAL are invalid */
2899 if (peer == ASB_DISCARD_REMOTE || peer == ASB_DISCARD_LOCAL ||
2900 self == ASB_DISCARD_REMOTE || self == ASB_DISCARD_LOCAL)
2901 return 1;
2902
2903 /* everything else is valid if they are equal on both sides. */
2904 if (peer == self)
2905 return 0;
2906
2907 /* everything es is invalid. */
2908 return 1;
2909}
2910
Andreas Gruenbachere2857212011-03-25 00:57:38 +01002911static int receive_protocol(struct drbd_tconn *tconn, struct packet_info *pi)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002912{
Andreas Gruenbachere6589832011-03-30 12:54:42 +02002913 struct p_protocol *p = pi->data;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002914 int p_proto, p_after_sb_0p, p_after_sb_1p, p_after_sb_2p;
Philipp Reisnercf14c2e2010-02-02 21:03:50 +01002915 int p_want_lose, p_two_primaries, cf;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002916 char p_integrity_alg[SHARED_SECRET_MAX] = "";
2917
Philipp Reisnerb411b362009-09-25 16:07:19 -07002918 p_proto = be32_to_cpu(p->protocol);
2919 p_after_sb_0p = be32_to_cpu(p->after_sb_0p);
2920 p_after_sb_1p = be32_to_cpu(p->after_sb_1p);
2921 p_after_sb_2p = be32_to_cpu(p->after_sb_2p);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002922 p_two_primaries = be32_to_cpu(p->two_primaries);
Philipp Reisnercf14c2e2010-02-02 21:03:50 +01002923 cf = be32_to_cpu(p->conn_flags);
2924 p_want_lose = cf & CF_WANT_LOSE;
2925
Philipp Reisner72046242011-03-15 18:51:47 +01002926 clear_bit(CONN_DRY_RUN, &tconn->flags);
Philipp Reisnercf14c2e2010-02-02 21:03:50 +01002927
2928 if (cf & CF_DRY_RUN)
Philipp Reisner72046242011-03-15 18:51:47 +01002929 set_bit(CONN_DRY_RUN, &tconn->flags);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002930
Philipp Reisner72046242011-03-15 18:51:47 +01002931 if (p_proto != tconn->net_conf->wire_protocol) {
2932 conn_err(tconn, "incompatible communication protocols\n");
Philipp Reisnerb411b362009-09-25 16:07:19 -07002933 goto disconnect;
2934 }
2935
Philipp Reisner72046242011-03-15 18:51:47 +01002936 if (cmp_after_sb(p_after_sb_0p, tconn->net_conf->after_sb_0p)) {
2937 conn_err(tconn, "incompatible after-sb-0pri settings\n");
Philipp Reisnerb411b362009-09-25 16:07:19 -07002938 goto disconnect;
2939 }
2940
Philipp Reisner72046242011-03-15 18:51:47 +01002941 if (cmp_after_sb(p_after_sb_1p, tconn->net_conf->after_sb_1p)) {
2942 conn_err(tconn, "incompatible after-sb-1pri settings\n");
Philipp Reisnerb411b362009-09-25 16:07:19 -07002943 goto disconnect;
2944 }
2945
Philipp Reisner72046242011-03-15 18:51:47 +01002946 if (cmp_after_sb(p_after_sb_2p, tconn->net_conf->after_sb_2p)) {
2947 conn_err(tconn, "incompatible after-sb-2pri settings\n");
Philipp Reisnerb411b362009-09-25 16:07:19 -07002948 goto disconnect;
2949 }
2950
Philipp Reisner72046242011-03-15 18:51:47 +01002951 if (p_want_lose && tconn->net_conf->want_lose) {
2952 conn_err(tconn, "both sides have the 'want_lose' flag set\n");
Philipp Reisnerb411b362009-09-25 16:07:19 -07002953 goto disconnect;
2954 }
2955
Philipp Reisner72046242011-03-15 18:51:47 +01002956 if (p_two_primaries != tconn->net_conf->two_primaries) {
2957 conn_err(tconn, "incompatible setting of the two-primaries options\n");
Philipp Reisnerb411b362009-09-25 16:07:19 -07002958 goto disconnect;
2959 }
2960
Philipp Reisner72046242011-03-15 18:51:47 +01002961 if (tconn->agreed_pro_version >= 87) {
2962 unsigned char *my_alg = tconn->net_conf->integrity_alg;
Andreas Gruenbacher82bc0192011-03-17 12:10:19 +01002963 int err;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002964
Andreas Gruenbachere2857212011-03-25 00:57:38 +01002965 err = drbd_recv_all(tconn, p_integrity_alg, pi->size);
Andreas Gruenbacher82bc0192011-03-17 12:10:19 +01002966 if (err)
2967 return err;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002968
2969 p_integrity_alg[SHARED_SECRET_MAX-1] = 0;
2970 if (strcmp(p_integrity_alg, my_alg)) {
Philipp Reisner72046242011-03-15 18:51:47 +01002971 conn_err(tconn, "incompatible setting of the data-integrity-alg\n");
Philipp Reisnerb411b362009-09-25 16:07:19 -07002972 goto disconnect;
2973 }
Philipp Reisner72046242011-03-15 18:51:47 +01002974 conn_info(tconn, "data-integrity-alg: %s\n",
Philipp Reisnerb411b362009-09-25 16:07:19 -07002975 my_alg[0] ? my_alg : (unsigned char *)"<not-used>");
2976 }
2977
Andreas Gruenbacher82bc0192011-03-17 12:10:19 +01002978 return 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002979
2980disconnect:
Philipp Reisner72046242011-03-15 18:51:47 +01002981 conn_request_state(tconn, NS(conn, C_DISCONNECTING), CS_HARD);
Andreas Gruenbacher82bc0192011-03-17 12:10:19 +01002982 return -EIO;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002983}
2984
2985/* helper function
2986 * input: alg name, feature name
2987 * return: NULL (alg name was "")
2988 * ERR_PTR(error) if something goes wrong
2989 * or the crypto hash ptr, if it worked out ok. */
2990struct crypto_hash *drbd_crypto_alloc_digest_safe(const struct drbd_conf *mdev,
2991 const char *alg, const char *name)
2992{
2993 struct crypto_hash *tfm;
2994
2995 if (!alg[0])
2996 return NULL;
2997
2998 tfm = crypto_alloc_hash(alg, 0, CRYPTO_ALG_ASYNC);
2999 if (IS_ERR(tfm)) {
3000 dev_err(DEV, "Can not allocate \"%s\" as %s (reason: %ld)\n",
3001 alg, name, PTR_ERR(tfm));
3002 return tfm;
3003 }
3004 if (!drbd_crypto_is_hash(crypto_hash_tfm(tfm))) {
3005 crypto_free_hash(tfm);
3006 dev_err(DEV, "\"%s\" is not a digest (%s)\n", alg, name);
3007 return ERR_PTR(-EINVAL);
3008 }
3009 return tfm;
3010}
3011
Andreas Gruenbacher4a76b162011-03-25 02:43:51 +01003012static int ignore_remaining_packet(struct drbd_tconn *tconn, struct packet_info *pi)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003013{
Andreas Gruenbacher4a76b162011-03-25 02:43:51 +01003014 void *buffer = tconn->data.rbuf;
3015 int size = pi->size;
3016
3017 while (size) {
3018 int s = min_t(int, size, DRBD_SOCKET_BUFFER_SIZE);
3019 s = drbd_recv(tconn, buffer, s);
3020 if (s <= 0) {
3021 if (s < 0)
3022 return s;
3023 break;
3024 }
3025 size -= s;
3026 }
3027 if (size)
3028 return -EIO;
3029 return 0;
3030}
3031
3032/*
3033 * config_unknown_volume - device configuration command for unknown volume
3034 *
3035 * When a device is added to an existing connection, the node on which the
3036 * device is added first will send configuration commands to its peer but the
3037 * peer will not know about the device yet. It will warn and ignore these
3038 * commands. Once the device is added on the second node, the second node will
3039 * send the same device configuration commands, but in the other direction.
3040 *
3041 * (We can also end up here if drbd is misconfigured.)
3042 */
3043static int config_unknown_volume(struct drbd_tconn *tconn, struct packet_info *pi)
3044{
3045 conn_warn(tconn, "Volume %u unknown; ignoring %s packet\n",
3046 pi->vnr, cmdname(pi->cmd));
3047 return ignore_remaining_packet(tconn, pi);
3048}
3049
3050static int receive_SyncParam(struct drbd_tconn *tconn, struct packet_info *pi)
3051{
3052 struct drbd_conf *mdev;
Andreas Gruenbachere6589832011-03-30 12:54:42 +02003053 struct p_rs_param_95 *p;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003054 unsigned int header_size, data_size, exp_max_sz;
3055 struct crypto_hash *verify_tfm = NULL;
3056 struct crypto_hash *csums_tfm = NULL;
Andreas Gruenbacher4a76b162011-03-25 02:43:51 +01003057 const int apv = tconn->agreed_pro_version;
Philipp Reisner778f2712010-07-06 11:14:00 +02003058 int *rs_plan_s = NULL;
3059 int fifo_size = 0;
Andreas Gruenbacher82bc0192011-03-17 12:10:19 +01003060 int err;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003061
Andreas Gruenbacher4a76b162011-03-25 02:43:51 +01003062 mdev = vnr_to_mdev(tconn, pi->vnr);
3063 if (!mdev)
3064 return config_unknown_volume(tconn, pi);
3065
Philipp Reisnerb411b362009-09-25 16:07:19 -07003066 exp_max_sz = apv <= 87 ? sizeof(struct p_rs_param)
3067 : apv == 88 ? sizeof(struct p_rs_param)
3068 + SHARED_SECRET_MAX
Philipp Reisner8e26f9c2010-07-06 17:25:54 +02003069 : apv <= 94 ? sizeof(struct p_rs_param_89)
3070 : /* apv >= 95 */ sizeof(struct p_rs_param_95);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003071
Andreas Gruenbachere2857212011-03-25 00:57:38 +01003072 if (pi->size > exp_max_sz) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07003073 dev_err(DEV, "SyncParam packet too long: received %u, expected <= %u bytes\n",
Andreas Gruenbachere2857212011-03-25 00:57:38 +01003074 pi->size, exp_max_sz);
Andreas Gruenbacher82bc0192011-03-17 12:10:19 +01003075 return -EIO;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003076 }
3077
3078 if (apv <= 88) {
Andreas Gruenbachere6589832011-03-30 12:54:42 +02003079 header_size = sizeof(struct p_rs_param);
Andreas Gruenbachere2857212011-03-25 00:57:38 +01003080 data_size = pi->size - header_size;
Philipp Reisner8e26f9c2010-07-06 17:25:54 +02003081 } else if (apv <= 94) {
Andreas Gruenbachere6589832011-03-30 12:54:42 +02003082 header_size = sizeof(struct p_rs_param_89);
Andreas Gruenbachere2857212011-03-25 00:57:38 +01003083 data_size = pi->size - header_size;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003084 D_ASSERT(data_size == 0);
Philipp Reisner8e26f9c2010-07-06 17:25:54 +02003085 } else {
Andreas Gruenbachere6589832011-03-30 12:54:42 +02003086 header_size = sizeof(struct p_rs_param_95);
Andreas Gruenbachere2857212011-03-25 00:57:38 +01003087 data_size = pi->size - header_size;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003088 D_ASSERT(data_size == 0);
3089 }
3090
3091 /* initialize verify_alg and csums_alg */
Andreas Gruenbachere6589832011-03-30 12:54:42 +02003092 p = pi->data;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003093 memset(p->verify_alg, 0, 2 * SHARED_SECRET_MAX);
3094
Andreas Gruenbachere6589832011-03-30 12:54:42 +02003095 err = drbd_recv_all(mdev->tconn, p, header_size);
Andreas Gruenbacher82bc0192011-03-17 12:10:19 +01003096 if (err)
3097 return err;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003098
Lars Ellenbergf3990022011-03-23 14:31:09 +01003099 if (get_ldev(mdev)) {
3100 mdev->ldev->dc.resync_rate = be32_to_cpu(p->rate);
3101 put_ldev(mdev);
3102 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07003103
3104 if (apv >= 88) {
3105 if (apv == 88) {
3106 if (data_size > SHARED_SECRET_MAX) {
3107 dev_err(DEV, "verify-alg too long, "
3108 "peer wants %u, accepting only %u byte\n",
3109 data_size, SHARED_SECRET_MAX);
Andreas Gruenbacher82bc0192011-03-17 12:10:19 +01003110 return -EIO;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003111 }
3112
Andreas Gruenbacher82bc0192011-03-17 12:10:19 +01003113 err = drbd_recv_all(mdev->tconn, p->verify_alg, data_size);
3114 if (err)
3115 return err;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003116
3117 /* we expect NUL terminated string */
3118 /* but just in case someone tries to be evil */
3119 D_ASSERT(p->verify_alg[data_size-1] == 0);
3120 p->verify_alg[data_size-1] = 0;
3121
3122 } else /* apv >= 89 */ {
3123 /* we still expect NUL terminated strings */
3124 /* but just in case someone tries to be evil */
3125 D_ASSERT(p->verify_alg[SHARED_SECRET_MAX-1] == 0);
3126 D_ASSERT(p->csums_alg[SHARED_SECRET_MAX-1] == 0);
3127 p->verify_alg[SHARED_SECRET_MAX-1] = 0;
3128 p->csums_alg[SHARED_SECRET_MAX-1] = 0;
3129 }
3130
Lars Ellenbergf3990022011-03-23 14:31:09 +01003131 if (strcmp(mdev->tconn->net_conf->verify_alg, p->verify_alg)) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07003132 if (mdev->state.conn == C_WF_REPORT_PARAMS) {
3133 dev_err(DEV, "Different verify-alg settings. me=\"%s\" peer=\"%s\"\n",
Lars Ellenbergf3990022011-03-23 14:31:09 +01003134 mdev->tconn->net_conf->verify_alg, p->verify_alg);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003135 goto disconnect;
3136 }
3137 verify_tfm = drbd_crypto_alloc_digest_safe(mdev,
3138 p->verify_alg, "verify-alg");
3139 if (IS_ERR(verify_tfm)) {
3140 verify_tfm = NULL;
3141 goto disconnect;
3142 }
3143 }
3144
Lars Ellenbergf3990022011-03-23 14:31:09 +01003145 if (apv >= 89 && strcmp(mdev->tconn->net_conf->csums_alg, p->csums_alg)) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07003146 if (mdev->state.conn == C_WF_REPORT_PARAMS) {
3147 dev_err(DEV, "Different csums-alg settings. me=\"%s\" peer=\"%s\"\n",
Lars Ellenbergf3990022011-03-23 14:31:09 +01003148 mdev->tconn->net_conf->csums_alg, p->csums_alg);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003149 goto disconnect;
3150 }
3151 csums_tfm = drbd_crypto_alloc_digest_safe(mdev,
3152 p->csums_alg, "csums-alg");
3153 if (IS_ERR(csums_tfm)) {
3154 csums_tfm = NULL;
3155 goto disconnect;
3156 }
3157 }
3158
Lars Ellenbergf3990022011-03-23 14:31:09 +01003159 if (apv > 94 && get_ldev(mdev)) {
3160 mdev->ldev->dc.resync_rate = be32_to_cpu(p->rate);
3161 mdev->ldev->dc.c_plan_ahead = be32_to_cpu(p->c_plan_ahead);
3162 mdev->ldev->dc.c_delay_target = be32_to_cpu(p->c_delay_target);
3163 mdev->ldev->dc.c_fill_target = be32_to_cpu(p->c_fill_target);
3164 mdev->ldev->dc.c_max_rate = be32_to_cpu(p->c_max_rate);
Philipp Reisner778f2712010-07-06 11:14:00 +02003165
Lars Ellenbergf3990022011-03-23 14:31:09 +01003166 fifo_size = (mdev->ldev->dc.c_plan_ahead * 10 * SLEEP_TIME) / HZ;
Philipp Reisner778f2712010-07-06 11:14:00 +02003167 if (fifo_size != mdev->rs_plan_s.size && fifo_size > 0) {
3168 rs_plan_s = kzalloc(sizeof(int) * fifo_size, GFP_KERNEL);
3169 if (!rs_plan_s) {
3170 dev_err(DEV, "kmalloc of fifo_buffer failed");
Lars Ellenbergf3990022011-03-23 14:31:09 +01003171 put_ldev(mdev);
Philipp Reisner778f2712010-07-06 11:14:00 +02003172 goto disconnect;
3173 }
3174 }
Lars Ellenbergf3990022011-03-23 14:31:09 +01003175 put_ldev(mdev);
Philipp Reisner8e26f9c2010-07-06 17:25:54 +02003176 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07003177
3178 spin_lock(&mdev->peer_seq_lock);
3179 /* lock against drbd_nl_syncer_conf() */
3180 if (verify_tfm) {
Lars Ellenbergf3990022011-03-23 14:31:09 +01003181 strcpy(mdev->tconn->net_conf->verify_alg, p->verify_alg);
3182 mdev->tconn->net_conf->verify_alg_len = strlen(p->verify_alg) + 1;
3183 crypto_free_hash(mdev->tconn->verify_tfm);
3184 mdev->tconn->verify_tfm = verify_tfm;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003185 dev_info(DEV, "using verify-alg: \"%s\"\n", p->verify_alg);
3186 }
3187 if (csums_tfm) {
Lars Ellenbergf3990022011-03-23 14:31:09 +01003188 strcpy(mdev->tconn->net_conf->csums_alg, p->csums_alg);
3189 mdev->tconn->net_conf->csums_alg_len = strlen(p->csums_alg) + 1;
3190 crypto_free_hash(mdev->tconn->csums_tfm);
3191 mdev->tconn->csums_tfm = csums_tfm;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003192 dev_info(DEV, "using csums-alg: \"%s\"\n", p->csums_alg);
3193 }
Philipp Reisner778f2712010-07-06 11:14:00 +02003194 if (fifo_size != mdev->rs_plan_s.size) {
3195 kfree(mdev->rs_plan_s.values);
3196 mdev->rs_plan_s.values = rs_plan_s;
3197 mdev->rs_plan_s.size = fifo_size;
3198 mdev->rs_planed = 0;
3199 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07003200 spin_unlock(&mdev->peer_seq_lock);
3201 }
Andreas Gruenbacher82bc0192011-03-17 12:10:19 +01003202 return 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003203
Philipp Reisnerb411b362009-09-25 16:07:19 -07003204disconnect:
3205 /* just for completeness: actually not needed,
3206 * as this is not reached if csums_tfm was ok. */
3207 crypto_free_hash(csums_tfm);
3208 /* but free the verify_tfm again, if csums_tfm did not work out */
3209 crypto_free_hash(verify_tfm);
Philipp Reisner38fa9982011-03-15 18:24:49 +01003210 conn_request_state(mdev->tconn, NS(conn, C_DISCONNECTING), CS_HARD);
Andreas Gruenbacher82bc0192011-03-17 12:10:19 +01003211 return -EIO;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003212}
3213
Philipp Reisnerb411b362009-09-25 16:07:19 -07003214/* warn if the arguments differ by more than 12.5% */
3215static void warn_if_differ_considerably(struct drbd_conf *mdev,
3216 const char *s, sector_t a, sector_t b)
3217{
3218 sector_t d;
3219 if (a == 0 || b == 0)
3220 return;
3221 d = (a > b) ? (a - b) : (b - a);
3222 if (d > (a>>3) || d > (b>>3))
3223 dev_warn(DEV, "Considerable difference in %s: %llus vs. %llus\n", s,
3224 (unsigned long long)a, (unsigned long long)b);
3225}
3226
Andreas Gruenbacher4a76b162011-03-25 02:43:51 +01003227static int receive_sizes(struct drbd_tconn *tconn, struct packet_info *pi)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003228{
Andreas Gruenbacher4a76b162011-03-25 02:43:51 +01003229 struct drbd_conf *mdev;
Andreas Gruenbachere6589832011-03-30 12:54:42 +02003230 struct p_sizes *p = pi->data;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003231 enum determine_dev_size dd = unchanged;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003232 sector_t p_size, p_usize, my_usize;
3233 int ldsc = 0; /* local disk size changed */
Philipp Reisnere89b5912010-03-24 17:11:33 +01003234 enum dds_flags ddsf;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003235
Andreas Gruenbacher4a76b162011-03-25 02:43:51 +01003236 mdev = vnr_to_mdev(tconn, pi->vnr);
3237 if (!mdev)
3238 return config_unknown_volume(tconn, pi);
3239
Philipp Reisnerb411b362009-09-25 16:07:19 -07003240 p_size = be64_to_cpu(p->d_size);
3241 p_usize = be64_to_cpu(p->u_size);
3242
Philipp Reisnerb411b362009-09-25 16:07:19 -07003243 /* just store the peer's disk size for now.
3244 * we still need to figure out whether we accept that. */
3245 mdev->p_size = p_size;
3246
Philipp Reisnerb411b362009-09-25 16:07:19 -07003247 if (get_ldev(mdev)) {
3248 warn_if_differ_considerably(mdev, "lower level device sizes",
3249 p_size, drbd_get_max_capacity(mdev->ldev));
3250 warn_if_differ_considerably(mdev, "user requested size",
3251 p_usize, mdev->ldev->dc.disk_size);
3252
3253 /* if this is the first connect, or an otherwise expected
3254 * param exchange, choose the minimum */
3255 if (mdev->state.conn == C_WF_REPORT_PARAMS)
3256 p_usize = min_not_zero((sector_t)mdev->ldev->dc.disk_size,
3257 p_usize);
3258
3259 my_usize = mdev->ldev->dc.disk_size;
3260
3261 if (mdev->ldev->dc.disk_size != p_usize) {
3262 mdev->ldev->dc.disk_size = p_usize;
3263 dev_info(DEV, "Peer sets u_size to %lu sectors\n",
3264 (unsigned long)mdev->ldev->dc.disk_size);
3265 }
3266
3267 /* Never shrink a device with usable data during connect.
3268 But allow online shrinking if we are connected. */
Philipp Reisnera393db62009-12-22 13:35:52 +01003269 if (drbd_new_dev_size(mdev, mdev->ldev, 0) <
Philipp Reisnerb411b362009-09-25 16:07:19 -07003270 drbd_get_capacity(mdev->this_bdev) &&
3271 mdev->state.disk >= D_OUTDATED &&
3272 mdev->state.conn < C_CONNECTED) {
3273 dev_err(DEV, "The peer's disk size is too small!\n");
Philipp Reisner38fa9982011-03-15 18:24:49 +01003274 conn_request_state(mdev->tconn, NS(conn, C_DISCONNECTING), CS_HARD);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003275 mdev->ldev->dc.disk_size = my_usize;
3276 put_ldev(mdev);
Andreas Gruenbacher82bc0192011-03-17 12:10:19 +01003277 return -EIO;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003278 }
3279 put_ldev(mdev);
3280 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07003281
Philipp Reisnere89b5912010-03-24 17:11:33 +01003282 ddsf = be16_to_cpu(p->dds_flags);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003283 if (get_ldev(mdev)) {
Bart Van Assche24c48302011-05-21 18:32:29 +02003284 dd = drbd_determine_dev_size(mdev, ddsf);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003285 put_ldev(mdev);
3286 if (dd == dev_size_error)
Andreas Gruenbacher82bc0192011-03-17 12:10:19 +01003287 return -EIO;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003288 drbd_md_sync(mdev);
3289 } else {
3290 /* I am diskless, need to accept the peer's size. */
3291 drbd_set_my_capacity(mdev, p_size);
3292 }
3293
Philipp Reisner99432fc2011-05-20 16:39:13 +02003294 mdev->peer_max_bio_size = be32_to_cpu(p->max_bio_size);
3295 drbd_reconsider_max_bio_size(mdev);
3296
Philipp Reisnerb411b362009-09-25 16:07:19 -07003297 if (get_ldev(mdev)) {
3298 if (mdev->ldev->known_size != drbd_get_capacity(mdev->ldev->backing_bdev)) {
3299 mdev->ldev->known_size = drbd_get_capacity(mdev->ldev->backing_bdev);
3300 ldsc = 1;
3301 }
3302
Philipp Reisnerb411b362009-09-25 16:07:19 -07003303 put_ldev(mdev);
3304 }
3305
3306 if (mdev->state.conn > C_WF_REPORT_PARAMS) {
3307 if (be64_to_cpu(p->c_size) !=
3308 drbd_get_capacity(mdev->this_bdev) || ldsc) {
3309 /* we have different sizes, probably peer
3310 * needs to know my new size... */
Philipp Reisnere89b5912010-03-24 17:11:33 +01003311 drbd_send_sizes(mdev, 0, ddsf);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003312 }
3313 if (test_and_clear_bit(RESIZE_PENDING, &mdev->flags) ||
3314 (dd == grew && mdev->state.conn == C_CONNECTED)) {
3315 if (mdev->state.pdsk >= D_INCONSISTENT &&
Philipp Reisnere89b5912010-03-24 17:11:33 +01003316 mdev->state.disk >= D_INCONSISTENT) {
3317 if (ddsf & DDSF_NO_RESYNC)
3318 dev_info(DEV, "Resync of new storage suppressed with --assume-clean\n");
3319 else
3320 resync_after_online_grow(mdev);
3321 } else
Philipp Reisnerb411b362009-09-25 16:07:19 -07003322 set_bit(RESYNC_AFTER_NEG, &mdev->flags);
3323 }
3324 }
3325
Andreas Gruenbacher82bc0192011-03-17 12:10:19 +01003326 return 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003327}
3328
Andreas Gruenbacher4a76b162011-03-25 02:43:51 +01003329static int receive_uuids(struct drbd_tconn *tconn, struct packet_info *pi)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003330{
Andreas Gruenbacher4a76b162011-03-25 02:43:51 +01003331 struct drbd_conf *mdev;
Andreas Gruenbachere6589832011-03-30 12:54:42 +02003332 struct p_uuids *p = pi->data;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003333 u64 *p_uuid;
Lars Ellenberg62b0da32011-01-20 13:25:21 +01003334 int i, updated_uuids = 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003335
Andreas Gruenbacher4a76b162011-03-25 02:43:51 +01003336 mdev = vnr_to_mdev(tconn, pi->vnr);
3337 if (!mdev)
3338 return config_unknown_volume(tconn, pi);
3339
Philipp Reisnerb411b362009-09-25 16:07:19 -07003340 p_uuid = kmalloc(sizeof(u64)*UI_EXTENDED_SIZE, GFP_NOIO);
3341
3342 for (i = UI_CURRENT; i < UI_EXTENDED_SIZE; i++)
3343 p_uuid[i] = be64_to_cpu(p->uuid[i]);
3344
3345 kfree(mdev->p_uuid);
3346 mdev->p_uuid = p_uuid;
3347
3348 if (mdev->state.conn < C_CONNECTED &&
3349 mdev->state.disk < D_INCONSISTENT &&
3350 mdev->state.role == R_PRIMARY &&
3351 (mdev->ed_uuid & ~((u64)1)) != (p_uuid[UI_CURRENT] & ~((u64)1))) {
3352 dev_err(DEV, "Can only connect to data with current UUID=%016llX\n",
3353 (unsigned long long)mdev->ed_uuid);
Philipp Reisner38fa9982011-03-15 18:24:49 +01003354 conn_request_state(mdev->tconn, NS(conn, C_DISCONNECTING), CS_HARD);
Andreas Gruenbacher82bc0192011-03-17 12:10:19 +01003355 return -EIO;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003356 }
3357
3358 if (get_ldev(mdev)) {
3359 int skip_initial_sync =
3360 mdev->state.conn == C_CONNECTED &&
Philipp Reisner31890f42011-01-19 14:12:51 +01003361 mdev->tconn->agreed_pro_version >= 90 &&
Philipp Reisnerb411b362009-09-25 16:07:19 -07003362 mdev->ldev->md.uuid[UI_CURRENT] == UUID_JUST_CREATED &&
3363 (p_uuid[UI_FLAGS] & 8);
3364 if (skip_initial_sync) {
3365 dev_info(DEV, "Accepted new current UUID, preparing to skip initial sync\n");
3366 drbd_bitmap_io(mdev, &drbd_bmio_clear_n_write,
Lars Ellenberg20ceb2b2011-01-21 10:56:44 +01003367 "clear_n_write from receive_uuids",
3368 BM_LOCKED_TEST_ALLOWED);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003369 _drbd_uuid_set(mdev, UI_CURRENT, p_uuid[UI_CURRENT]);
3370 _drbd_uuid_set(mdev, UI_BITMAP, 0);
3371 _drbd_set_state(_NS2(mdev, disk, D_UP_TO_DATE, pdsk, D_UP_TO_DATE),
3372 CS_VERBOSE, NULL);
3373 drbd_md_sync(mdev);
Lars Ellenberg62b0da32011-01-20 13:25:21 +01003374 updated_uuids = 1;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003375 }
3376 put_ldev(mdev);
Philipp Reisner18a50fa2010-06-21 14:14:15 +02003377 } else if (mdev->state.disk < D_INCONSISTENT &&
3378 mdev->state.role == R_PRIMARY) {
3379 /* I am a diskless primary, the peer just created a new current UUID
3380 for me. */
Lars Ellenberg62b0da32011-01-20 13:25:21 +01003381 updated_uuids = drbd_set_ed_uuid(mdev, p_uuid[UI_CURRENT]);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003382 }
3383
3384 /* Before we test for the disk state, we should wait until an eventually
3385 ongoing cluster wide state change is finished. That is important if
3386 we are primary and are detaching from our disk. We need to see the
3387 new disk state... */
Philipp Reisner8410da82011-02-11 20:11:10 +01003388 mutex_lock(mdev->state_mutex);
3389 mutex_unlock(mdev->state_mutex);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003390 if (mdev->state.conn >= C_CONNECTED && mdev->state.disk < D_INCONSISTENT)
Lars Ellenberg62b0da32011-01-20 13:25:21 +01003391 updated_uuids |= drbd_set_ed_uuid(mdev, p_uuid[UI_CURRENT]);
3392
3393 if (updated_uuids)
3394 drbd_print_uuids(mdev, "receiver updated UUIDs to");
Philipp Reisnerb411b362009-09-25 16:07:19 -07003395
Andreas Gruenbacher82bc0192011-03-17 12:10:19 +01003396 return 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003397}
3398
3399/**
3400 * convert_state() - Converts the peer's view of the cluster state to our point of view
3401 * @ps: The state as seen by the peer.
3402 */
3403static union drbd_state convert_state(union drbd_state ps)
3404{
3405 union drbd_state ms;
3406
3407 static enum drbd_conns c_tab[] = {
3408 [C_CONNECTED] = C_CONNECTED,
3409
3410 [C_STARTING_SYNC_S] = C_STARTING_SYNC_T,
3411 [C_STARTING_SYNC_T] = C_STARTING_SYNC_S,
3412 [C_DISCONNECTING] = C_TEAR_DOWN, /* C_NETWORK_FAILURE, */
3413 [C_VERIFY_S] = C_VERIFY_T,
3414 [C_MASK] = C_MASK,
3415 };
3416
3417 ms.i = ps.i;
3418
3419 ms.conn = c_tab[ps.conn];
3420 ms.peer = ps.role;
3421 ms.role = ps.peer;
3422 ms.pdsk = ps.disk;
3423 ms.disk = ps.pdsk;
3424 ms.peer_isp = (ps.aftr_isp | ps.user_isp);
3425
3426 return ms;
3427}
3428
Andreas Gruenbacher4a76b162011-03-25 02:43:51 +01003429static int receive_req_state(struct drbd_tconn *tconn, struct packet_info *pi)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003430{
Andreas Gruenbacher4a76b162011-03-25 02:43:51 +01003431 struct drbd_conf *mdev;
Andreas Gruenbachere6589832011-03-30 12:54:42 +02003432 struct p_req_state *p = pi->data;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003433 union drbd_state mask, val;
Andreas Gruenbacherbf885f82010-12-08 00:39:32 +01003434 enum drbd_state_rv rv;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003435
Andreas Gruenbacher4a76b162011-03-25 02:43:51 +01003436 mdev = vnr_to_mdev(tconn, pi->vnr);
3437 if (!mdev)
3438 return -EIO;
3439
Philipp Reisnerb411b362009-09-25 16:07:19 -07003440 mask.i = be32_to_cpu(p->mask);
3441 val.i = be32_to_cpu(p->val);
3442
Philipp Reisner25703f82011-02-07 14:35:25 +01003443 if (test_bit(DISCARD_CONCURRENT, &mdev->tconn->flags) &&
Philipp Reisner8410da82011-02-11 20:11:10 +01003444 mutex_is_locked(mdev->state_mutex)) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07003445 drbd_send_sr_reply(mdev, SS_CONCURRENT_ST_CHG);
Andreas Gruenbacher82bc0192011-03-17 12:10:19 +01003446 return 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003447 }
3448
3449 mask = convert_state(mask);
3450 val = convert_state(val);
3451
Philipp Reisnerdfafcc82011-03-16 10:55:07 +01003452 rv = drbd_change_state(mdev, CS_VERBOSE, mask, val);
3453 drbd_send_sr_reply(mdev, rv);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003454
Philipp Reisnerb411b362009-09-25 16:07:19 -07003455 drbd_md_sync(mdev);
3456
Andreas Gruenbacher82bc0192011-03-17 12:10:19 +01003457 return 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003458}
3459
Andreas Gruenbachere2857212011-03-25 00:57:38 +01003460static int receive_req_conn_state(struct drbd_tconn *tconn, struct packet_info *pi)
Philipp Reisnerdfafcc82011-03-16 10:55:07 +01003461{
Andreas Gruenbachere6589832011-03-30 12:54:42 +02003462 struct p_req_state *p = pi->data;
Philipp Reisnerdfafcc82011-03-16 10:55:07 +01003463 union drbd_state mask, val;
3464 enum drbd_state_rv rv;
3465
3466 mask.i = be32_to_cpu(p->mask);
3467 val.i = be32_to_cpu(p->val);
3468
3469 if (test_bit(DISCARD_CONCURRENT, &tconn->flags) &&
3470 mutex_is_locked(&tconn->cstate_mutex)) {
3471 conn_send_sr_reply(tconn, SS_CONCURRENT_ST_CHG);
Andreas Gruenbacher82bc0192011-03-17 12:10:19 +01003472 return 0;
Philipp Reisnerdfafcc82011-03-16 10:55:07 +01003473 }
3474
3475 mask = convert_state(mask);
3476 val = convert_state(val);
3477
Philipp Reisner778bcf22011-03-28 12:55:03 +02003478 rv = conn_request_state(tconn, mask, val, CS_VERBOSE | CS_LOCAL_ONLY | CS_IGN_OUTD_FAIL);
Philipp Reisnerdfafcc82011-03-16 10:55:07 +01003479 conn_send_sr_reply(tconn, rv);
3480
Andreas Gruenbacher82bc0192011-03-17 12:10:19 +01003481 return 0;
Philipp Reisnerdfafcc82011-03-16 10:55:07 +01003482}
3483
Andreas Gruenbacher4a76b162011-03-25 02:43:51 +01003484static int receive_state(struct drbd_tconn *tconn, struct packet_info *pi)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003485{
Andreas Gruenbacher4a76b162011-03-25 02:43:51 +01003486 struct drbd_conf *mdev;
Andreas Gruenbachere6589832011-03-30 12:54:42 +02003487 struct p_state *p = pi->data;
Lars Ellenberg4ac4aad2010-07-22 17:39:26 +02003488 union drbd_state os, ns, peer_state;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003489 enum drbd_disk_state real_peer_disk;
Philipp Reisner65d922c2010-06-16 16:18:09 +02003490 enum chg_state_flags cs_flags;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003491 int rv;
3492
Andreas Gruenbacher4a76b162011-03-25 02:43:51 +01003493 mdev = vnr_to_mdev(tconn, pi->vnr);
3494 if (!mdev)
3495 return config_unknown_volume(tconn, pi);
3496
Philipp Reisnerb411b362009-09-25 16:07:19 -07003497 peer_state.i = be32_to_cpu(p->state);
3498
3499 real_peer_disk = peer_state.disk;
3500 if (peer_state.disk == D_NEGOTIATING) {
3501 real_peer_disk = mdev->p_uuid[UI_FLAGS] & 4 ? D_INCONSISTENT : D_CONSISTENT;
3502 dev_info(DEV, "real peer disk state = %s\n", drbd_disk_str(real_peer_disk));
3503 }
3504
Philipp Reisner87eeee42011-01-19 14:16:30 +01003505 spin_lock_irq(&mdev->tconn->req_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003506 retry:
Philipp Reisner78bae592011-03-28 15:40:12 +02003507 os = ns = drbd_read_state(mdev);
Philipp Reisner87eeee42011-01-19 14:16:30 +01003508 spin_unlock_irq(&mdev->tconn->req_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003509
Lars Ellenberge9ef7bb2010-10-07 15:55:39 +02003510 /* peer says his disk is uptodate, while we think it is inconsistent,
3511 * and this happens while we think we have a sync going on. */
3512 if (os.pdsk == D_INCONSISTENT && real_peer_disk == D_UP_TO_DATE &&
3513 os.conn > C_CONNECTED && os.disk == D_UP_TO_DATE) {
3514 /* If we are (becoming) SyncSource, but peer is still in sync
3515 * preparation, ignore its uptodate-ness to avoid flapping, it
3516 * will change to inconsistent once the peer reaches active
3517 * syncing states.
3518 * It may have changed syncer-paused flags, however, so we
3519 * cannot ignore this completely. */
3520 if (peer_state.conn > C_CONNECTED &&
3521 peer_state.conn < C_SYNC_SOURCE)
3522 real_peer_disk = D_INCONSISTENT;
3523
3524 /* if peer_state changes to connected at the same time,
3525 * it explicitly notifies us that it finished resync.
3526 * Maybe we should finish it up, too? */
3527 else if (os.conn >= C_SYNC_SOURCE &&
3528 peer_state.conn == C_CONNECTED) {
3529 if (drbd_bm_total_weight(mdev) <= mdev->rs_failed)
3530 drbd_resync_finished(mdev);
Andreas Gruenbacher82bc0192011-03-17 12:10:19 +01003531 return 0;
Lars Ellenberge9ef7bb2010-10-07 15:55:39 +02003532 }
3533 }
3534
3535 /* peer says his disk is inconsistent, while we think it is uptodate,
3536 * and this happens while the peer still thinks we have a sync going on,
3537 * but we think we are already done with the sync.
3538 * We ignore this to avoid flapping pdsk.
3539 * This should not happen, if the peer is a recent version of drbd. */
3540 if (os.pdsk == D_UP_TO_DATE && real_peer_disk == D_INCONSISTENT &&
3541 os.conn == C_CONNECTED && peer_state.conn > C_SYNC_SOURCE)
3542 real_peer_disk = D_UP_TO_DATE;
3543
Lars Ellenberg4ac4aad2010-07-22 17:39:26 +02003544 if (ns.conn == C_WF_REPORT_PARAMS)
3545 ns.conn = C_CONNECTED;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003546
Philipp Reisner67531712010-10-27 12:21:30 +02003547 if (peer_state.conn == C_AHEAD)
3548 ns.conn = C_BEHIND;
3549
Philipp Reisnerb411b362009-09-25 16:07:19 -07003550 if (mdev->p_uuid && peer_state.disk >= D_NEGOTIATING &&
3551 get_ldev_if_state(mdev, D_NEGOTIATING)) {
3552 int cr; /* consider resync */
3553
3554 /* if we established a new connection */
Lars Ellenberg4ac4aad2010-07-22 17:39:26 +02003555 cr = (os.conn < C_CONNECTED);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003556 /* if we had an established connection
3557 * and one of the nodes newly attaches a disk */
Lars Ellenberg4ac4aad2010-07-22 17:39:26 +02003558 cr |= (os.conn == C_CONNECTED &&
Philipp Reisnerb411b362009-09-25 16:07:19 -07003559 (peer_state.disk == D_NEGOTIATING ||
Lars Ellenberg4ac4aad2010-07-22 17:39:26 +02003560 os.disk == D_NEGOTIATING));
Philipp Reisnerb411b362009-09-25 16:07:19 -07003561 /* if we have both been inconsistent, and the peer has been
3562 * forced to be UpToDate with --overwrite-data */
3563 cr |= test_bit(CONSIDER_RESYNC, &mdev->flags);
3564 /* if we had been plain connected, and the admin requested to
3565 * start a sync by "invalidate" or "invalidate-remote" */
Lars Ellenberg4ac4aad2010-07-22 17:39:26 +02003566 cr |= (os.conn == C_CONNECTED &&
Philipp Reisnerb411b362009-09-25 16:07:19 -07003567 (peer_state.conn >= C_STARTING_SYNC_S &&
3568 peer_state.conn <= C_WF_BITMAP_T));
3569
3570 if (cr)
Lars Ellenberg4ac4aad2010-07-22 17:39:26 +02003571 ns.conn = drbd_sync_handshake(mdev, peer_state.role, real_peer_disk);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003572
3573 put_ldev(mdev);
Lars Ellenberg4ac4aad2010-07-22 17:39:26 +02003574 if (ns.conn == C_MASK) {
3575 ns.conn = C_CONNECTED;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003576 if (mdev->state.disk == D_NEGOTIATING) {
Lars Ellenberg82f59cc2010-10-16 12:13:47 +02003577 drbd_force_state(mdev, NS(disk, D_FAILED));
Philipp Reisnerb411b362009-09-25 16:07:19 -07003578 } else if (peer_state.disk == D_NEGOTIATING) {
3579 dev_err(DEV, "Disk attach process on the peer node was aborted.\n");
3580 peer_state.disk = D_DISKLESS;
Lars Ellenberg580b9762010-02-26 23:15:23 +01003581 real_peer_disk = D_DISKLESS;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003582 } else {
Philipp Reisner8169e412011-03-15 18:40:27 +01003583 if (test_and_clear_bit(CONN_DRY_RUN, &mdev->tconn->flags))
Andreas Gruenbacher82bc0192011-03-17 12:10:19 +01003584 return -EIO;
Lars Ellenberg4ac4aad2010-07-22 17:39:26 +02003585 D_ASSERT(os.conn == C_WF_REPORT_PARAMS);
Philipp Reisner38fa9982011-03-15 18:24:49 +01003586 conn_request_state(mdev->tconn, NS(conn, C_DISCONNECTING), CS_HARD);
Andreas Gruenbacher82bc0192011-03-17 12:10:19 +01003587 return -EIO;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003588 }
3589 }
3590 }
3591
Philipp Reisner87eeee42011-01-19 14:16:30 +01003592 spin_lock_irq(&mdev->tconn->req_lock);
Philipp Reisner78bae592011-03-28 15:40:12 +02003593 if (os.i != drbd_read_state(mdev).i)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003594 goto retry;
3595 clear_bit(CONSIDER_RESYNC, &mdev->flags);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003596 ns.peer = peer_state.role;
3597 ns.pdsk = real_peer_disk;
3598 ns.peer_isp = (peer_state.aftr_isp | peer_state.user_isp);
Lars Ellenberg4ac4aad2010-07-22 17:39:26 +02003599 if ((ns.conn == C_CONNECTED || ns.conn == C_WF_BITMAP_S) && ns.disk == D_NEGOTIATING)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003600 ns.disk = mdev->new_state_tmp.disk;
Lars Ellenberg4ac4aad2010-07-22 17:39:26 +02003601 cs_flags = CS_VERBOSE + (os.conn < C_CONNECTED && ns.conn >= C_CONNECTED ? 0 : CS_HARD);
Philipp Reisner2aebfab2011-03-28 16:48:11 +02003602 if (ns.pdsk == D_CONSISTENT && drbd_suspended(mdev) && ns.conn == C_CONNECTED && os.conn < C_CONNECTED &&
Philipp Reisner481c6f52010-06-22 14:03:27 +02003603 test_bit(NEW_CUR_UUID, &mdev->flags)) {
Andreas Gruenbacher8554df12011-01-25 15:37:43 +01003604 /* Do not allow tl_restart(RESEND) for a rebooted peer. We can only allow this
Philipp Reisner481c6f52010-06-22 14:03:27 +02003605 for temporal network outages! */
Philipp Reisner87eeee42011-01-19 14:16:30 +01003606 spin_unlock_irq(&mdev->tconn->req_lock);
Philipp Reisner481c6f52010-06-22 14:03:27 +02003607 dev_err(DEV, "Aborting Connect, can not thaw IO with an only Consistent peer\n");
Philipp Reisner2f5cdd02011-02-21 14:29:27 +01003608 tl_clear(mdev->tconn);
Philipp Reisner481c6f52010-06-22 14:03:27 +02003609 drbd_uuid_new_current(mdev);
3610 clear_bit(NEW_CUR_UUID, &mdev->flags);
Philipp Reisner38fa9982011-03-15 18:24:49 +01003611 conn_request_state(mdev->tconn, NS2(conn, C_PROTOCOL_ERROR, susp, 0), CS_HARD);
Andreas Gruenbacher82bc0192011-03-17 12:10:19 +01003612 return -EIO;
Philipp Reisner481c6f52010-06-22 14:03:27 +02003613 }
Philipp Reisner65d922c2010-06-16 16:18:09 +02003614 rv = _drbd_set_state(mdev, ns, cs_flags, NULL);
Philipp Reisner78bae592011-03-28 15:40:12 +02003615 ns = drbd_read_state(mdev);
Philipp Reisner87eeee42011-01-19 14:16:30 +01003616 spin_unlock_irq(&mdev->tconn->req_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003617
3618 if (rv < SS_SUCCESS) {
Philipp Reisner38fa9982011-03-15 18:24:49 +01003619 conn_request_state(mdev->tconn, NS(conn, C_DISCONNECTING), CS_HARD);
Andreas Gruenbacher82bc0192011-03-17 12:10:19 +01003620 return -EIO;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003621 }
3622
Lars Ellenberg4ac4aad2010-07-22 17:39:26 +02003623 if (os.conn > C_WF_REPORT_PARAMS) {
3624 if (ns.conn > C_CONNECTED && peer_state.conn <= C_CONNECTED &&
Philipp Reisnerb411b362009-09-25 16:07:19 -07003625 peer_state.disk != D_NEGOTIATING ) {
3626 /* we want resync, peer has not yet decided to sync... */
3627 /* Nowadays only used when forcing a node into primary role and
3628 setting its disk to UpToDate with that */
3629 drbd_send_uuids(mdev);
3630 drbd_send_state(mdev);
3631 }
3632 }
3633
Philipp Reisner89e58e72011-01-19 13:12:45 +01003634 mdev->tconn->net_conf->want_lose = 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003635
3636 drbd_md_sync(mdev); /* update connected indicator, la_size, ... */
3637
Andreas Gruenbacher82bc0192011-03-17 12:10:19 +01003638 return 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003639}
3640
Andreas Gruenbacher4a76b162011-03-25 02:43:51 +01003641static int receive_sync_uuid(struct drbd_tconn *tconn, struct packet_info *pi)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003642{
Andreas Gruenbacher4a76b162011-03-25 02:43:51 +01003643 struct drbd_conf *mdev;
Andreas Gruenbachere6589832011-03-30 12:54:42 +02003644 struct p_rs_uuid *p = pi->data;
Andreas Gruenbacher4a76b162011-03-25 02:43:51 +01003645
3646 mdev = vnr_to_mdev(tconn, pi->vnr);
3647 if (!mdev)
3648 return -EIO;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003649
3650 wait_event(mdev->misc_wait,
3651 mdev->state.conn == C_WF_SYNC_UUID ||
Philipp Reisnerc4752ef2010-10-27 17:32:36 +02003652 mdev->state.conn == C_BEHIND ||
Philipp Reisnerb411b362009-09-25 16:07:19 -07003653 mdev->state.conn < C_CONNECTED ||
3654 mdev->state.disk < D_NEGOTIATING);
3655
3656 /* D_ASSERT( mdev->state.conn == C_WF_SYNC_UUID ); */
3657
Philipp Reisnerb411b362009-09-25 16:07:19 -07003658 /* Here the _drbd_uuid_ functions are right, current should
3659 _not_ be rotated into the history */
3660 if (get_ldev_if_state(mdev, D_NEGOTIATING)) {
3661 _drbd_uuid_set(mdev, UI_CURRENT, be64_to_cpu(p->uuid));
3662 _drbd_uuid_set(mdev, UI_BITMAP, 0UL);
3663
Lars Ellenberg62b0da32011-01-20 13:25:21 +01003664 drbd_print_uuids(mdev, "updated sync uuid");
Philipp Reisnerb411b362009-09-25 16:07:19 -07003665 drbd_start_resync(mdev, C_SYNC_TARGET);
3666
3667 put_ldev(mdev);
3668 } else
3669 dev_err(DEV, "Ignoring SyncUUID packet!\n");
3670
Andreas Gruenbacher82bc0192011-03-17 12:10:19 +01003671 return 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003672}
3673
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01003674/**
3675 * receive_bitmap_plain
3676 *
3677 * Return 0 when done, 1 when another iteration is needed, and a negative error
3678 * code upon failure.
3679 */
3680static int
Andreas Gruenbacher50d0b1a2011-03-30 11:53:51 +02003681receive_bitmap_plain(struct drbd_conf *mdev, unsigned int size,
Andreas Gruenbachere6589832011-03-30 12:54:42 +02003682 unsigned long *p, struct bm_xfer_ctx *c)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003683{
Andreas Gruenbacher50d0b1a2011-03-30 11:53:51 +02003684 unsigned int data_size = DRBD_SOCKET_BUFFER_SIZE -
3685 drbd_header_size(mdev->tconn);
Andreas Gruenbachere6589832011-03-30 12:54:42 +02003686 unsigned int num_words = min_t(size_t, data_size / sizeof(*p),
Andreas Gruenbacher50d0b1a2011-03-30 11:53:51 +02003687 c->bm_words - c->word_offset);
Andreas Gruenbachere6589832011-03-30 12:54:42 +02003688 unsigned int want = num_words * sizeof(*p);
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01003689 int err;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003690
Andreas Gruenbacher50d0b1a2011-03-30 11:53:51 +02003691 if (want != size) {
3692 dev_err(DEV, "%s:want (%u) != size (%u)\n", __func__, want, size);
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01003693 return -EIO;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003694 }
3695 if (want == 0)
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01003696 return 0;
Andreas Gruenbachere6589832011-03-30 12:54:42 +02003697 err = drbd_recv_all(mdev->tconn, p, want);
Andreas Gruenbacher82bc0192011-03-17 12:10:19 +01003698 if (err)
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01003699 return err;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003700
Andreas Gruenbachere6589832011-03-30 12:54:42 +02003701 drbd_bm_merge_lel(mdev, c->word_offset, num_words, p);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003702
3703 c->word_offset += num_words;
3704 c->bit_offset = c->word_offset * BITS_PER_LONG;
3705 if (c->bit_offset > c->bm_bits)
3706 c->bit_offset = c->bm_bits;
3707
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01003708 return 1;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003709}
3710
Andreas Gruenbachera02d1242011-03-22 17:20:45 +01003711static enum drbd_bitmap_code dcbp_get_code(struct p_compressed_bm *p)
3712{
3713 return (enum drbd_bitmap_code)(p->encoding & 0x0f);
3714}
3715
3716static int dcbp_get_start(struct p_compressed_bm *p)
3717{
3718 return (p->encoding & 0x80) != 0;
3719}
3720
3721static int dcbp_get_pad_bits(struct p_compressed_bm *p)
3722{
3723 return (p->encoding >> 4) & 0x7;
3724}
3725
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01003726/**
3727 * recv_bm_rle_bits
3728 *
3729 * Return 0 when done, 1 when another iteration is needed, and a negative error
3730 * code upon failure.
3731 */
3732static int
Philipp Reisnerb411b362009-09-25 16:07:19 -07003733recv_bm_rle_bits(struct drbd_conf *mdev,
3734 struct p_compressed_bm *p,
Philipp Reisnerc6d25cf2011-01-19 16:13:06 +01003735 struct bm_xfer_ctx *c,
3736 unsigned int len)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003737{
3738 struct bitstream bs;
3739 u64 look_ahead;
3740 u64 rl;
3741 u64 tmp;
3742 unsigned long s = c->bit_offset;
3743 unsigned long e;
Andreas Gruenbachera02d1242011-03-22 17:20:45 +01003744 int toggle = dcbp_get_start(p);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003745 int have;
3746 int bits;
3747
Andreas Gruenbachera02d1242011-03-22 17:20:45 +01003748 bitstream_init(&bs, p->code, len, dcbp_get_pad_bits(p));
Philipp Reisnerb411b362009-09-25 16:07:19 -07003749
3750 bits = bitstream_get_bits(&bs, &look_ahead, 64);
3751 if (bits < 0)
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01003752 return -EIO;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003753
3754 for (have = bits; have > 0; s += rl, toggle = !toggle) {
3755 bits = vli_decode_bits(&rl, look_ahead);
3756 if (bits <= 0)
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01003757 return -EIO;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003758
3759 if (toggle) {
3760 e = s + rl -1;
3761 if (e >= c->bm_bits) {
3762 dev_err(DEV, "bitmap overflow (e:%lu) while decoding bm RLE packet\n", e);
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01003763 return -EIO;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003764 }
3765 _drbd_bm_set_bits(mdev, s, e);
3766 }
3767
3768 if (have < bits) {
3769 dev_err(DEV, "bitmap decoding error: h:%d b:%d la:0x%08llx l:%u/%u\n",
3770 have, bits, look_ahead,
3771 (unsigned int)(bs.cur.b - p->code),
3772 (unsigned int)bs.buf_len);
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01003773 return -EIO;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003774 }
3775 look_ahead >>= bits;
3776 have -= bits;
3777
3778 bits = bitstream_get_bits(&bs, &tmp, 64 - have);
3779 if (bits < 0)
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01003780 return -EIO;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003781 look_ahead |= tmp << have;
3782 have += bits;
3783 }
3784
3785 c->bit_offset = s;
3786 bm_xfer_ctx_bit_to_word_offset(c);
3787
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01003788 return (s != c->bm_bits);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003789}
3790
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01003791/**
3792 * decode_bitmap_c
3793 *
3794 * Return 0 when done, 1 when another iteration is needed, and a negative error
3795 * code upon failure.
3796 */
3797static int
Philipp Reisnerb411b362009-09-25 16:07:19 -07003798decode_bitmap_c(struct drbd_conf *mdev,
3799 struct p_compressed_bm *p,
Philipp Reisnerc6d25cf2011-01-19 16:13:06 +01003800 struct bm_xfer_ctx *c,
3801 unsigned int len)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003802{
Andreas Gruenbachera02d1242011-03-22 17:20:45 +01003803 if (dcbp_get_code(p) == RLE_VLI_Bits)
Andreas Gruenbachere6589832011-03-30 12:54:42 +02003804 return recv_bm_rle_bits(mdev, p, c, len - sizeof(*p));
Philipp Reisnerb411b362009-09-25 16:07:19 -07003805
3806 /* other variants had been implemented for evaluation,
3807 * but have been dropped as this one turned out to be "best"
3808 * during all our tests. */
3809
3810 dev_err(DEV, "receive_bitmap_c: unknown encoding %u\n", p->encoding);
Philipp Reisner38fa9982011-03-15 18:24:49 +01003811 conn_request_state(mdev->tconn, NS(conn, C_PROTOCOL_ERROR), CS_HARD);
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01003812 return -EIO;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003813}
3814
3815void INFO_bm_xfer_stats(struct drbd_conf *mdev,
3816 const char *direction, struct bm_xfer_ctx *c)
3817{
3818 /* what would it take to transfer it "plaintext" */
Andreas Gruenbacher50d0b1a2011-03-30 11:53:51 +02003819 unsigned int header_size = drbd_header_size(mdev->tconn);
3820 unsigned int data_size = DRBD_SOCKET_BUFFER_SIZE - header_size;
3821 unsigned int plain =
3822 header_size * (DIV_ROUND_UP(c->bm_words, data_size) + 1) +
3823 c->bm_words * sizeof(unsigned long);
3824 unsigned int total = c->bytes[0] + c->bytes[1];
3825 unsigned int r;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003826
3827 /* total can not be zero. but just in case: */
3828 if (total == 0)
3829 return;
3830
3831 /* don't report if not compressed */
3832 if (total >= plain)
3833 return;
3834
3835 /* total < plain. check for overflow, still */
3836 r = (total > UINT_MAX/1000) ? (total / (plain/1000))
3837 : (1000 * total / plain);
3838
3839 if (r > 1000)
3840 r = 1000;
3841
3842 r = 1000 - r;
3843 dev_info(DEV, "%s bitmap stats [Bytes(packets)]: plain %u(%u), RLE %u(%u), "
3844 "total %u; compression: %u.%u%%\n",
3845 direction,
3846 c->bytes[1], c->packets[1],
3847 c->bytes[0], c->packets[0],
3848 total, r/10, r % 10);
3849}
3850
3851/* Since we are processing the bitfield from lower addresses to higher,
3852 it does not matter if the process it in 32 bit chunks or 64 bit
3853 chunks as long as it is little endian. (Understand it as byte stream,
3854 beginning with the lowest byte...) If we would use big endian
3855 we would need to process it from the highest address to the lowest,
3856 in order to be agnostic to the 32 vs 64 bits issue.
3857
3858 returns 0 on failure, 1 if we successfully received it. */
Andreas Gruenbacher4a76b162011-03-25 02:43:51 +01003859static int receive_bitmap(struct drbd_tconn *tconn, struct packet_info *pi)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003860{
Andreas Gruenbacher4a76b162011-03-25 02:43:51 +01003861 struct drbd_conf *mdev;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003862 struct bm_xfer_ctx c;
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01003863 int err;
Andreas Gruenbacher4a76b162011-03-25 02:43:51 +01003864
3865 mdev = vnr_to_mdev(tconn, pi->vnr);
3866 if (!mdev)
3867 return -EIO;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003868
Lars Ellenberg20ceb2b2011-01-21 10:56:44 +01003869 drbd_bm_lock(mdev, "receive bitmap", BM_LOCKED_SET_ALLOWED);
3870 /* you are supposed to send additional out-of-sync information
3871 * if you actually set bits during this phase */
Philipp Reisnerb411b362009-09-25 16:07:19 -07003872
Philipp Reisnerb411b362009-09-25 16:07:19 -07003873 c = (struct bm_xfer_ctx) {
3874 .bm_bits = drbd_bm_bits(mdev),
3875 .bm_words = drbd_bm_words(mdev),
3876 };
3877
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01003878 for(;;) {
Andreas Gruenbachere6589832011-03-30 12:54:42 +02003879 if (pi->cmd == P_BITMAP)
3880 err = receive_bitmap_plain(mdev, pi->size, pi->data, &c);
3881 else if (pi->cmd == P_COMPRESSED_BITMAP) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07003882 /* MAYBE: sanity check that we speak proto >= 90,
3883 * and the feature is enabled! */
Andreas Gruenbachere6589832011-03-30 12:54:42 +02003884 struct p_compressed_bm *p = pi->data;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003885
Andreas Gruenbacher50d0b1a2011-03-30 11:53:51 +02003886 if (pi->size > DRBD_SOCKET_BUFFER_SIZE - drbd_header_size(tconn)) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07003887 dev_err(DEV, "ReportCBitmap packet too large\n");
Andreas Gruenbacher82bc0192011-03-17 12:10:19 +01003888 err = -EIO;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003889 goto out;
3890 }
Andreas Gruenbachere6589832011-03-30 12:54:42 +02003891 if (pi->size <= sizeof(*p)) {
Andreas Gruenbachere2857212011-03-25 00:57:38 +01003892 dev_err(DEV, "ReportCBitmap packet too small (l:%u)\n", pi->size);
Andreas Gruenbacher82bc0192011-03-17 12:10:19 +01003893 err = -EIO;
Andreas Gruenbacher78fcbda2010-12-10 22:18:27 +01003894 goto out;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003895 }
Andreas Gruenbachere6589832011-03-30 12:54:42 +02003896 err = drbd_recv_all(mdev->tconn, p, pi->size);
3897 if (err)
3898 goto out;
Andreas Gruenbachere2857212011-03-25 00:57:38 +01003899 err = decode_bitmap_c(mdev, p, &c, pi->size);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003900 } else {
Andreas Gruenbachere2857212011-03-25 00:57:38 +01003901 dev_warn(DEV, "receive_bitmap: cmd neither ReportBitMap nor ReportCBitMap (is 0x%x)", pi->cmd);
Andreas Gruenbacher82bc0192011-03-17 12:10:19 +01003902 err = -EIO;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003903 goto out;
3904 }
3905
Andreas Gruenbachere2857212011-03-25 00:57:38 +01003906 c.packets[pi->cmd == P_BITMAP]++;
Andreas Gruenbacher50d0b1a2011-03-30 11:53:51 +02003907 c.bytes[pi->cmd == P_BITMAP] += drbd_header_size(tconn) + pi->size;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003908
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01003909 if (err <= 0) {
3910 if (err < 0)
3911 goto out;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003912 break;
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01003913 }
Andreas Gruenbachere2857212011-03-25 00:57:38 +01003914 err = drbd_recv_header(mdev->tconn, pi);
Andreas Gruenbacher82bc0192011-03-17 12:10:19 +01003915 if (err)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003916 goto out;
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01003917 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07003918
3919 INFO_bm_xfer_stats(mdev, "receive", &c);
3920
3921 if (mdev->state.conn == C_WF_BITMAP_T) {
Andreas Gruenbacherde1f8e42010-12-10 21:04:00 +01003922 enum drbd_state_rv rv;
3923
Andreas Gruenbacher82bc0192011-03-17 12:10:19 +01003924 err = drbd_send_bitmap(mdev);
3925 if (err)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003926 goto out;
3927 /* Omit CS_ORDERED with this state transition to avoid deadlocks. */
Andreas Gruenbacherde1f8e42010-12-10 21:04:00 +01003928 rv = _drbd_request_state(mdev, NS(conn, C_WF_SYNC_UUID), CS_VERBOSE);
3929 D_ASSERT(rv == SS_SUCCESS);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003930 } else if (mdev->state.conn != C_WF_BITMAP_S) {
3931 /* admin may have requested C_DISCONNECTING,
3932 * other threads may have noticed network errors */
3933 dev_info(DEV, "unexpected cstate (%s) in receive_bitmap\n",
3934 drbd_conn_str(mdev->state.conn));
3935 }
Andreas Gruenbacher82bc0192011-03-17 12:10:19 +01003936 err = 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003937
Philipp Reisnerb411b362009-09-25 16:07:19 -07003938 out:
Lars Ellenberg20ceb2b2011-01-21 10:56:44 +01003939 drbd_bm_unlock(mdev);
Andreas Gruenbacher82bc0192011-03-17 12:10:19 +01003940 if (!err && mdev->state.conn == C_WF_BITMAP_S)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003941 drbd_start_resync(mdev, C_SYNC_SOURCE);
Andreas Gruenbacher82bc0192011-03-17 12:10:19 +01003942 return err;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003943}
3944
Andreas Gruenbacher4a76b162011-03-25 02:43:51 +01003945static int receive_skip(struct drbd_tconn *tconn, struct packet_info *pi)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003946{
Andreas Gruenbacher4a76b162011-03-25 02:43:51 +01003947 conn_warn(tconn, "skipping unknown optional packet type %d, l: %d!\n",
Andreas Gruenbachere2857212011-03-25 00:57:38 +01003948 pi->cmd, pi->size);
Philipp Reisner2de876e2011-03-15 14:38:01 +01003949
Andreas Gruenbacher4a76b162011-03-25 02:43:51 +01003950 return ignore_remaining_packet(tconn, pi);
Philipp Reisner2de876e2011-03-15 14:38:01 +01003951}
3952
Andreas Gruenbacher4a76b162011-03-25 02:43:51 +01003953static int receive_UnplugRemote(struct drbd_tconn *tconn, struct packet_info *pi)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003954{
Philipp Reisnerb411b362009-09-25 16:07:19 -07003955 /* Make sure we've acked all the TCP data associated
3956 * with the data requests being unplugged */
Andreas Gruenbacher4a76b162011-03-25 02:43:51 +01003957 drbd_tcp_quickack(tconn->data.socket);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003958
Andreas Gruenbacher82bc0192011-03-17 12:10:19 +01003959 return 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003960}
3961
Andreas Gruenbacher4a76b162011-03-25 02:43:51 +01003962static int receive_out_of_sync(struct drbd_tconn *tconn, struct packet_info *pi)
Philipp Reisner73a01a12010-10-27 14:33:00 +02003963{
Andreas Gruenbacher4a76b162011-03-25 02:43:51 +01003964 struct drbd_conf *mdev;
Andreas Gruenbachere6589832011-03-30 12:54:42 +02003965 struct p_block_desc *p = pi->data;
Andreas Gruenbacher4a76b162011-03-25 02:43:51 +01003966
3967 mdev = vnr_to_mdev(tconn, pi->vnr);
3968 if (!mdev)
3969 return -EIO;
Philipp Reisner73a01a12010-10-27 14:33:00 +02003970
Lars Ellenbergf735e3632010-12-17 21:06:18 +01003971 switch (mdev->state.conn) {
3972 case C_WF_SYNC_UUID:
3973 case C_WF_BITMAP_T:
3974 case C_BEHIND:
3975 break;
3976 default:
3977 dev_err(DEV, "ASSERT FAILED cstate = %s, expected: WFSyncUUID|WFBitMapT|Behind\n",
3978 drbd_conn_str(mdev->state.conn));
3979 }
3980
Philipp Reisner73a01a12010-10-27 14:33:00 +02003981 drbd_set_out_of_sync(mdev, be64_to_cpu(p->sector), be32_to_cpu(p->blksize));
3982
Andreas Gruenbacher82bc0192011-03-17 12:10:19 +01003983 return 0;
Philipp Reisner73a01a12010-10-27 14:33:00 +02003984}
3985
Philipp Reisner02918be2010-08-20 14:35:10 +02003986struct data_cmd {
3987 int expect_payload;
3988 size_t pkt_size;
Andreas Gruenbacher4a76b162011-03-25 02:43:51 +01003989 int (*fn)(struct drbd_tconn *, struct packet_info *);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003990};
3991
Philipp Reisner02918be2010-08-20 14:35:10 +02003992static struct data_cmd drbd_cmd_handler[] = {
Andreas Gruenbacher4a76b162011-03-25 02:43:51 +01003993 [P_DATA] = { 1, sizeof(struct p_data), receive_Data },
3994 [P_DATA_REPLY] = { 1, sizeof(struct p_data), receive_DataReply },
3995 [P_RS_DATA_REPLY] = { 1, sizeof(struct p_data), receive_RSDataReply } ,
3996 [P_BARRIER] = { 0, sizeof(struct p_barrier), receive_Barrier } ,
Andreas Gruenbachere6589832011-03-30 12:54:42 +02003997 [P_BITMAP] = { 1, 0, receive_bitmap } ,
3998 [P_COMPRESSED_BITMAP] = { 1, 0, receive_bitmap } ,
3999 [P_UNPLUG_REMOTE] = { 0, 0, receive_UnplugRemote },
Andreas Gruenbacher4a76b162011-03-25 02:43:51 +01004000 [P_DATA_REQUEST] = { 0, sizeof(struct p_block_req), receive_DataRequest },
4001 [P_RS_DATA_REQUEST] = { 0, sizeof(struct p_block_req), receive_DataRequest },
Andreas Gruenbachere6589832011-03-30 12:54:42 +02004002 [P_SYNC_PARAM] = { 1, 0, receive_SyncParam },
4003 [P_SYNC_PARAM89] = { 1, 0, receive_SyncParam },
Andreas Gruenbacher4a76b162011-03-25 02:43:51 +01004004 [P_PROTOCOL] = { 1, sizeof(struct p_protocol), receive_protocol },
4005 [P_UUIDS] = { 0, sizeof(struct p_uuids), receive_uuids },
4006 [P_SIZES] = { 0, sizeof(struct p_sizes), receive_sizes },
4007 [P_STATE] = { 0, sizeof(struct p_state), receive_state },
4008 [P_STATE_CHG_REQ] = { 0, sizeof(struct p_req_state), receive_req_state },
4009 [P_SYNC_UUID] = { 0, sizeof(struct p_rs_uuid), receive_sync_uuid },
4010 [P_OV_REQUEST] = { 0, sizeof(struct p_block_req), receive_DataRequest },
4011 [P_OV_REPLY] = { 1, sizeof(struct p_block_req), receive_DataRequest },
4012 [P_CSUM_RS_REQUEST] = { 1, sizeof(struct p_block_req), receive_DataRequest },
4013 [P_DELAY_PROBE] = { 0, sizeof(struct p_delay_probe93), receive_skip },
4014 [P_OUT_OF_SYNC] = { 0, sizeof(struct p_block_desc), receive_out_of_sync },
4015 [P_CONN_ST_CHG_REQ] = { 0, sizeof(struct p_req_state), receive_req_conn_state },
Philipp Reisner02918be2010-08-20 14:35:10 +02004016};
4017
Philipp Reisnereefc2f72011-02-08 12:55:24 +01004018static void drbdd(struct drbd_tconn *tconn)
Philipp Reisnerb411b362009-09-25 16:07:19 -07004019{
Philipp Reisner77351055b2011-02-07 17:24:26 +01004020 struct packet_info pi;
Philipp Reisner02918be2010-08-20 14:35:10 +02004021 size_t shs; /* sub header size */
Andreas Gruenbacher82bc0192011-03-17 12:10:19 +01004022 int err;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004023
Philipp Reisnereefc2f72011-02-08 12:55:24 +01004024 while (get_t_state(&tconn->receiver) == RUNNING) {
Andreas Gruenbacherdeebe192011-03-25 00:01:04 +01004025 struct data_cmd *cmd;
4026
Philipp Reisnereefc2f72011-02-08 12:55:24 +01004027 drbd_thread_current_set_cpu(&tconn->receiver);
Andreas Gruenbacher69bc7bc2011-03-16 17:31:52 +01004028 if (drbd_recv_header(tconn, &pi))
Philipp Reisner02918be2010-08-20 14:35:10 +02004029 goto err_out;
4030
Andreas Gruenbacherdeebe192011-03-25 00:01:04 +01004031 cmd = &drbd_cmd_handler[pi.cmd];
Andreas Gruenbacher4a76b162011-03-25 02:43:51 +01004032 if (unlikely(pi.cmd >= ARRAY_SIZE(drbd_cmd_handler) || !cmd->fn)) {
Philipp Reisnereefc2f72011-02-08 12:55:24 +01004033 conn_err(tconn, "unknown packet type %d, l: %d!\n", pi.cmd, pi.size);
Philipp Reisner02918be2010-08-20 14:35:10 +02004034 goto err_out;
Lars Ellenberg0b33a912009-11-16 15:58:04 +01004035 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07004036
Andreas Gruenbachere6589832011-03-30 12:54:42 +02004037 shs = cmd->pkt_size;
4038 if (pi.size > shs && !cmd->expect_payload) {
Philipp Reisnereefc2f72011-02-08 12:55:24 +01004039 conn_err(tconn, "No payload expected %s l:%d\n", cmdname(pi.cmd), pi.size);
Philipp Reisner02918be2010-08-20 14:35:10 +02004040 goto err_out;
4041 }
4042
Lars Ellenbergc13f7e12010-10-29 23:32:01 +02004043 if (shs) {
Andreas Gruenbachere6589832011-03-30 12:54:42 +02004044 err = drbd_recv_all_warn(tconn, pi.data, shs);
Andreas Gruenbachera5c31902011-03-24 03:28:04 +01004045 if (err)
Lars Ellenbergc13f7e12010-10-29 23:32:01 +02004046 goto err_out;
Andreas Gruenbachere2857212011-03-25 00:57:38 +01004047 pi.size -= shs;
Lars Ellenbergc13f7e12010-10-29 23:32:01 +02004048 }
4049
Andreas Gruenbacher4a76b162011-03-25 02:43:51 +01004050 err = cmd->fn(tconn, &pi);
4051 if (err) {
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02004052 conn_err(tconn, "error receiving %s, e: %d l: %d!\n",
4053 cmdname(pi.cmd), err, pi.size);
Philipp Reisner02918be2010-08-20 14:35:10 +02004054 goto err_out;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004055 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07004056 }
Andreas Gruenbacher82bc0192011-03-17 12:10:19 +01004057 return;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004058
Andreas Gruenbacher82bc0192011-03-17 12:10:19 +01004059 err_out:
4060 conn_request_state(tconn, NS(conn, C_PROTOCOL_ERROR), CS_HARD);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004061}
4062
Philipp Reisner0e29d162011-02-18 14:23:11 +01004063void conn_flush_workqueue(struct drbd_tconn *tconn)
Philipp Reisnerb411b362009-09-25 16:07:19 -07004064{
4065 struct drbd_wq_barrier barr;
4066
4067 barr.w.cb = w_prev_work_done;
Philipp Reisner0e29d162011-02-18 14:23:11 +01004068 barr.w.tconn = tconn;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004069 init_completion(&barr.done);
Philipp Reisner0e29d162011-02-18 14:23:11 +01004070 drbd_queue_work(&tconn->data.work, &barr.w);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004071 wait_for_completion(&barr.done);
4072}
4073
Philipp Reisner360cc742011-02-08 14:29:53 +01004074static void drbd_disconnect(struct drbd_tconn *tconn)
Philipp Reisnerb411b362009-09-25 16:07:19 -07004075{
Philipp Reisnerbbeb6412011-02-10 13:45:46 +01004076 enum drbd_conns oc;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004077 int rv = SS_UNKNOWN_ERROR;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004078
Philipp Reisnerbbeb6412011-02-10 13:45:46 +01004079 if (tconn->cstate == C_STANDALONE)
Philipp Reisnerb411b362009-09-25 16:07:19 -07004080 return;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004081
4082 /* asender does not clean up anything. it must not interfere, either */
Philipp Reisner360cc742011-02-08 14:29:53 +01004083 drbd_thread_stop(&tconn->asender);
4084 drbd_free_sock(tconn);
4085
4086 idr_for_each(&tconn->volumes, drbd_disconnected, tconn);
Philipp Reisner360cc742011-02-08 14:29:53 +01004087 conn_info(tconn, "Connection closed\n");
4088
Philipp Reisnercb703452011-03-24 11:03:07 +01004089 if (conn_highest_role(tconn) == R_PRIMARY && conn_highest_pdsk(tconn) >= D_UNKNOWN)
4090 conn_try_outdate_peer_async(tconn);
4091
Philipp Reisner360cc742011-02-08 14:29:53 +01004092 spin_lock_irq(&tconn->req_lock);
Philipp Reisnerbbeb6412011-02-10 13:45:46 +01004093 oc = tconn->cstate;
4094 if (oc >= C_UNCONNECTED)
4095 rv = _conn_request_state(tconn, NS(conn, C_UNCONNECTED), CS_VERBOSE);
4096
Philipp Reisner360cc742011-02-08 14:29:53 +01004097 spin_unlock_irq(&tconn->req_lock);
4098
Philipp Reisnerbbeb6412011-02-10 13:45:46 +01004099 if (oc == C_DISCONNECTING) {
Philipp Reisner360cc742011-02-08 14:29:53 +01004100 wait_event(tconn->net_cnt_wait, atomic_read(&tconn->net_cnt) == 0);
4101
4102 crypto_free_hash(tconn->cram_hmac_tfm);
4103 tconn->cram_hmac_tfm = NULL;
4104
4105 kfree(tconn->net_conf);
4106 tconn->net_conf = NULL;
Philipp Reisnerbbeb6412011-02-10 13:45:46 +01004107 conn_request_state(tconn, NS(conn, C_STANDALONE), CS_VERBOSE);
Philipp Reisner360cc742011-02-08 14:29:53 +01004108 }
4109}
4110
4111static int drbd_disconnected(int vnr, void *p, void *data)
4112{
4113 struct drbd_conf *mdev = (struct drbd_conf *)p;
4114 enum drbd_fencing_p fp;
4115 unsigned int i;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004116
Philipp Reisner85719572010-07-21 10:20:17 +02004117 /* wait for current activity to cease. */
Philipp Reisner87eeee42011-01-19 14:16:30 +01004118 spin_lock_irq(&mdev->tconn->req_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004119 _drbd_wait_ee_list_empty(mdev, &mdev->active_ee);
4120 _drbd_wait_ee_list_empty(mdev, &mdev->sync_ee);
4121 _drbd_wait_ee_list_empty(mdev, &mdev->read_ee);
Philipp Reisner87eeee42011-01-19 14:16:30 +01004122 spin_unlock_irq(&mdev->tconn->req_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004123
4124 /* We do not have data structures that would allow us to
4125 * get the rs_pending_cnt down to 0 again.
4126 * * On C_SYNC_TARGET we do not have any data structures describing
4127 * the pending RSDataRequest's we have sent.
4128 * * On C_SYNC_SOURCE there is no data structure that tracks
4129 * the P_RS_DATA_REPLY blocks that we sent to the SyncTarget.
4130 * And no, it is not the sum of the reference counts in the
4131 * resync_LRU. The resync_LRU tracks the whole operation including
4132 * the disk-IO, while the rs_pending_cnt only tracks the blocks
4133 * on the fly. */
4134 drbd_rs_cancel_all(mdev);
4135 mdev->rs_total = 0;
4136 mdev->rs_failed = 0;
4137 atomic_set(&mdev->rs_pending_cnt, 0);
4138 wake_up(&mdev->misc_wait);
4139
Philipp Reisner7fde2be2011-03-01 11:08:28 +01004140 del_timer(&mdev->request_timer);
4141
Philipp Reisnerb411b362009-09-25 16:07:19 -07004142 del_timer_sync(&mdev->resync_timer);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004143 resync_timer_fn((unsigned long)mdev);
4144
Philipp Reisnerb411b362009-09-25 16:07:19 -07004145 /* wait for all w_e_end_data_req, w_e_end_rsdata_req, w_send_barrier,
4146 * w_make_resync_request etc. which may still be on the worker queue
4147 * to be "canceled" */
Philipp Reisnera21e9292011-02-08 15:08:49 +01004148 drbd_flush_workqueue(mdev);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004149
Andreas Gruenbachera990be42011-04-06 17:56:48 +02004150 drbd_finish_peer_reqs(mdev);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004151
4152 kfree(mdev->p_uuid);
4153 mdev->p_uuid = NULL;
4154
Philipp Reisner2aebfab2011-03-28 16:48:11 +02004155 if (!drbd_suspended(mdev))
Philipp Reisner2f5cdd02011-02-21 14:29:27 +01004156 tl_clear(mdev->tconn);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004157
Philipp Reisnerb411b362009-09-25 16:07:19 -07004158 drbd_md_sync(mdev);
4159
4160 fp = FP_DONT_CARE;
4161 if (get_ldev(mdev)) {
4162 fp = mdev->ldev->dc.fencing;
4163 put_ldev(mdev);
4164 }
4165
Lars Ellenberg20ceb2b2011-01-21 10:56:44 +01004166 /* serialize with bitmap writeout triggered by the state change,
4167 * if any. */
4168 wait_event(mdev->misc_wait, !test_bit(BITMAP_IO, &mdev->flags));
4169
Philipp Reisnerb411b362009-09-25 16:07:19 -07004170 /* tcp_close and release of sendpage pages can be deferred. I don't
4171 * want to use SO_LINGER, because apparently it can be deferred for
4172 * more than 20 seconds (longest time I checked).
4173 *
4174 * Actually we don't care for exactly when the network stack does its
4175 * put_page(), but release our reference on these pages right here.
4176 */
Andreas Gruenbacher7721f562011-04-06 17:14:02 +02004177 i = drbd_free_peer_reqs(mdev, &mdev->net_ee);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004178 if (i)
4179 dev_info(DEV, "net_ee not empty, killed %u entries\n", i);
Lars Ellenberg435f0742010-09-06 12:30:25 +02004180 i = atomic_read(&mdev->pp_in_use_by_net);
4181 if (i)
4182 dev_info(DEV, "pp_in_use_by_net = %d, expected 0\n", i);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004183 i = atomic_read(&mdev->pp_in_use);
4184 if (i)
Lars Ellenberg45bb9122010-05-14 17:10:48 +02004185 dev_info(DEV, "pp_in_use = %d, expected 0\n", i);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004186
4187 D_ASSERT(list_empty(&mdev->read_ee));
4188 D_ASSERT(list_empty(&mdev->active_ee));
4189 D_ASSERT(list_empty(&mdev->sync_ee));
4190 D_ASSERT(list_empty(&mdev->done_ee));
4191
4192 /* ok, no more ee's on the fly, it is safe to reset the epoch_size */
4193 atomic_set(&mdev->current_epoch->epoch_size, 0);
4194 D_ASSERT(list_empty(&mdev->current_epoch->list));
Philipp Reisner360cc742011-02-08 14:29:53 +01004195
4196 return 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004197}
4198
4199/*
4200 * We support PRO_VERSION_MIN to PRO_VERSION_MAX. The protocol version
4201 * we can agree on is stored in agreed_pro_version.
4202 *
4203 * feature flags and the reserved array should be enough room for future
4204 * enhancements of the handshake protocol, and possible plugins...
4205 *
4206 * for now, they are expected to be zero, but ignored.
4207 */
Andreas Gruenbacher60381782011-03-28 17:05:50 +02004208static int drbd_send_features(struct drbd_tconn *tconn)
Philipp Reisnerb411b362009-09-25 16:07:19 -07004209{
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02004210 struct drbd_socket *sock;
4211 struct p_connection_features *p;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004212
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02004213 sock = &tconn->data;
4214 p = conn_prepare_command(tconn, sock);
4215 if (!p)
Andreas Gruenbachere8d17b02011-03-16 00:54:19 +01004216 return -EIO;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004217 memset(p, 0, sizeof(*p));
4218 p->protocol_min = cpu_to_be32(PRO_VERSION_MIN);
4219 p->protocol_max = cpu_to_be32(PRO_VERSION_MAX);
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02004220 return conn_send_command(tconn, sock, P_CONNECTION_FEATURES, sizeof(*p), NULL, 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004221}
4222
4223/*
4224 * return values:
4225 * 1 yes, we have a valid connection
4226 * 0 oops, did not work out, please try again
4227 * -1 peer talks different language,
4228 * no point in trying again, please go standalone.
4229 */
Andreas Gruenbacher60381782011-03-28 17:05:50 +02004230static int drbd_do_features(struct drbd_tconn *tconn)
Philipp Reisnerb411b362009-09-25 16:07:19 -07004231{
Philipp Reisner65d11ed2011-02-07 17:35:59 +01004232 /* ASSERT current == tconn->receiver ... */
Andreas Gruenbachere6589832011-03-30 12:54:42 +02004233 struct p_connection_features *p;
4234 const int expect = sizeof(struct p_connection_features);
Philipp Reisner77351055b2011-02-07 17:24:26 +01004235 struct packet_info pi;
Andreas Gruenbachera5c31902011-03-24 03:28:04 +01004236 int err;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004237
Andreas Gruenbacher60381782011-03-28 17:05:50 +02004238 err = drbd_send_features(tconn);
Andreas Gruenbachere8d17b02011-03-16 00:54:19 +01004239 if (err)
Philipp Reisnerb411b362009-09-25 16:07:19 -07004240 return 0;
4241
Andreas Gruenbacher69bc7bc2011-03-16 17:31:52 +01004242 err = drbd_recv_header(tconn, &pi);
4243 if (err)
Philipp Reisnerb411b362009-09-25 16:07:19 -07004244 return 0;
4245
Andreas Gruenbacher60381782011-03-28 17:05:50 +02004246 if (pi.cmd != P_CONNECTION_FEATURES) {
4247 conn_err(tconn, "expected ConnectionFeatures packet, received: %s (0x%04x)\n",
Philipp Reisner77351055b2011-02-07 17:24:26 +01004248 cmdname(pi.cmd), pi.cmd);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004249 return -1;
4250 }
4251
Philipp Reisner77351055b2011-02-07 17:24:26 +01004252 if (pi.size != expect) {
Andreas Gruenbacher60381782011-03-28 17:05:50 +02004253 conn_err(tconn, "expected ConnectionFeatures length: %u, received: %u\n",
Philipp Reisner77351055b2011-02-07 17:24:26 +01004254 expect, pi.size);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004255 return -1;
4256 }
4257
Andreas Gruenbachere6589832011-03-30 12:54:42 +02004258 p = pi.data;
4259 err = drbd_recv_all_warn(tconn, p, expect);
Andreas Gruenbachera5c31902011-03-24 03:28:04 +01004260 if (err)
Philipp Reisnerb411b362009-09-25 16:07:19 -07004261 return 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004262
Philipp Reisnerb411b362009-09-25 16:07:19 -07004263 p->protocol_min = be32_to_cpu(p->protocol_min);
4264 p->protocol_max = be32_to_cpu(p->protocol_max);
4265 if (p->protocol_max == 0)
4266 p->protocol_max = p->protocol_min;
4267
4268 if (PRO_VERSION_MAX < p->protocol_min ||
4269 PRO_VERSION_MIN > p->protocol_max)
4270 goto incompat;
4271
Philipp Reisner65d11ed2011-02-07 17:35:59 +01004272 tconn->agreed_pro_version = min_t(int, PRO_VERSION_MAX, p->protocol_max);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004273
Philipp Reisner65d11ed2011-02-07 17:35:59 +01004274 conn_info(tconn, "Handshake successful: "
4275 "Agreed network protocol version %d\n", tconn->agreed_pro_version);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004276
4277 return 1;
4278
4279 incompat:
Philipp Reisner65d11ed2011-02-07 17:35:59 +01004280 conn_err(tconn, "incompatible DRBD dialects: "
Philipp Reisnerb411b362009-09-25 16:07:19 -07004281 "I support %d-%d, peer supports %d-%d\n",
4282 PRO_VERSION_MIN, PRO_VERSION_MAX,
4283 p->protocol_min, p->protocol_max);
4284 return -1;
4285}
4286
4287#if !defined(CONFIG_CRYPTO_HMAC) && !defined(CONFIG_CRYPTO_HMAC_MODULE)
Philipp Reisner13e60372011-02-08 09:54:40 +01004288static int drbd_do_auth(struct drbd_tconn *tconn)
Philipp Reisnerb411b362009-09-25 16:07:19 -07004289{
4290 dev_err(DEV, "This kernel was build without CONFIG_CRYPTO_HMAC.\n");
4291 dev_err(DEV, "You need to disable 'cram-hmac-alg' in drbd.conf.\n");
Johannes Thomab10d96c2010-01-07 16:02:50 +01004292 return -1;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004293}
4294#else
4295#define CHALLENGE_LEN 64
Johannes Thomab10d96c2010-01-07 16:02:50 +01004296
4297/* Return value:
4298 1 - auth succeeded,
4299 0 - failed, try again (network error),
4300 -1 - auth failed, don't try again.
4301*/
4302
Philipp Reisner13e60372011-02-08 09:54:40 +01004303static int drbd_do_auth(struct drbd_tconn *tconn)
Philipp Reisnerb411b362009-09-25 16:07:19 -07004304{
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02004305 struct drbd_socket *sock;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004306 char my_challenge[CHALLENGE_LEN]; /* 64 Bytes... */
4307 struct scatterlist sg;
4308 char *response = NULL;
4309 char *right_response = NULL;
4310 char *peers_ch = NULL;
Philipp Reisner13e60372011-02-08 09:54:40 +01004311 unsigned int key_len = strlen(tconn->net_conf->shared_secret);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004312 unsigned int resp_size;
4313 struct hash_desc desc;
Philipp Reisner77351055b2011-02-07 17:24:26 +01004314 struct packet_info pi;
Andreas Gruenbacher69bc7bc2011-03-16 17:31:52 +01004315 int err, rv;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004316
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02004317 /* FIXME: Put the challenge/response into the preallocated socket buffer. */
4318
Philipp Reisner13e60372011-02-08 09:54:40 +01004319 desc.tfm = tconn->cram_hmac_tfm;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004320 desc.flags = 0;
4321
Philipp Reisner13e60372011-02-08 09:54:40 +01004322 rv = crypto_hash_setkey(tconn->cram_hmac_tfm,
4323 (u8 *)tconn->net_conf->shared_secret, key_len);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004324 if (rv) {
Philipp Reisner13e60372011-02-08 09:54:40 +01004325 conn_err(tconn, "crypto_hash_setkey() failed with %d\n", rv);
Johannes Thomab10d96c2010-01-07 16:02:50 +01004326 rv = -1;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004327 goto fail;
4328 }
4329
4330 get_random_bytes(my_challenge, CHALLENGE_LEN);
4331
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02004332 sock = &tconn->data;
4333 if (!conn_prepare_command(tconn, sock)) {
4334 rv = 0;
4335 goto fail;
4336 }
Andreas Gruenbachere6589832011-03-30 12:54:42 +02004337 rv = !conn_send_command(tconn, sock, P_AUTH_CHALLENGE, 0,
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02004338 my_challenge, CHALLENGE_LEN);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004339 if (!rv)
4340 goto fail;
4341
Andreas Gruenbacher69bc7bc2011-03-16 17:31:52 +01004342 err = drbd_recv_header(tconn, &pi);
4343 if (err) {
4344 rv = 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004345 goto fail;
Andreas Gruenbacher69bc7bc2011-03-16 17:31:52 +01004346 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07004347
Philipp Reisner77351055b2011-02-07 17:24:26 +01004348 if (pi.cmd != P_AUTH_CHALLENGE) {
Philipp Reisner13e60372011-02-08 09:54:40 +01004349 conn_err(tconn, "expected AuthChallenge packet, received: %s (0x%04x)\n",
Philipp Reisner77351055b2011-02-07 17:24:26 +01004350 cmdname(pi.cmd), pi.cmd);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004351 rv = 0;
4352 goto fail;
4353 }
4354
Philipp Reisner77351055b2011-02-07 17:24:26 +01004355 if (pi.size > CHALLENGE_LEN * 2) {
Philipp Reisner13e60372011-02-08 09:54:40 +01004356 conn_err(tconn, "expected AuthChallenge payload too big.\n");
Johannes Thomab10d96c2010-01-07 16:02:50 +01004357 rv = -1;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004358 goto fail;
4359 }
4360
Philipp Reisner77351055b2011-02-07 17:24:26 +01004361 peers_ch = kmalloc(pi.size, GFP_NOIO);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004362 if (peers_ch == NULL) {
Philipp Reisner13e60372011-02-08 09:54:40 +01004363 conn_err(tconn, "kmalloc of peers_ch failed\n");
Johannes Thomab10d96c2010-01-07 16:02:50 +01004364 rv = -1;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004365 goto fail;
4366 }
4367
Andreas Gruenbachera5c31902011-03-24 03:28:04 +01004368 err = drbd_recv_all_warn(tconn, peers_ch, pi.size);
4369 if (err) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07004370 rv = 0;
4371 goto fail;
4372 }
4373
Philipp Reisner13e60372011-02-08 09:54:40 +01004374 resp_size = crypto_hash_digestsize(tconn->cram_hmac_tfm);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004375 response = kmalloc(resp_size, GFP_NOIO);
4376 if (response == NULL) {
Philipp Reisner13e60372011-02-08 09:54:40 +01004377 conn_err(tconn, "kmalloc of response failed\n");
Johannes Thomab10d96c2010-01-07 16:02:50 +01004378 rv = -1;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004379 goto fail;
4380 }
4381
4382 sg_init_table(&sg, 1);
Philipp Reisner77351055b2011-02-07 17:24:26 +01004383 sg_set_buf(&sg, peers_ch, pi.size);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004384
4385 rv = crypto_hash_digest(&desc, &sg, sg.length, response);
4386 if (rv) {
Philipp Reisner13e60372011-02-08 09:54:40 +01004387 conn_err(tconn, "crypto_hash_digest() failed with %d\n", rv);
Johannes Thomab10d96c2010-01-07 16:02:50 +01004388 rv = -1;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004389 goto fail;
4390 }
4391
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02004392 if (!conn_prepare_command(tconn, sock)) {
4393 rv = 0;
4394 goto fail;
4395 }
Andreas Gruenbachere6589832011-03-30 12:54:42 +02004396 rv = !conn_send_command(tconn, sock, P_AUTH_RESPONSE, 0,
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02004397 response, resp_size);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004398 if (!rv)
4399 goto fail;
4400
Andreas Gruenbacher69bc7bc2011-03-16 17:31:52 +01004401 err = drbd_recv_header(tconn, &pi);
4402 if (err) {
4403 rv = 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004404 goto fail;
Andreas Gruenbacher69bc7bc2011-03-16 17:31:52 +01004405 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07004406
Philipp Reisner77351055b2011-02-07 17:24:26 +01004407 if (pi.cmd != P_AUTH_RESPONSE) {
Philipp Reisner13e60372011-02-08 09:54:40 +01004408 conn_err(tconn, "expected AuthResponse packet, received: %s (0x%04x)\n",
Philipp Reisner77351055b2011-02-07 17:24:26 +01004409 cmdname(pi.cmd), pi.cmd);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004410 rv = 0;
4411 goto fail;
4412 }
4413
Philipp Reisner77351055b2011-02-07 17:24:26 +01004414 if (pi.size != resp_size) {
Philipp Reisner13e60372011-02-08 09:54:40 +01004415 conn_err(tconn, "expected AuthResponse payload of wrong size\n");
Philipp Reisnerb411b362009-09-25 16:07:19 -07004416 rv = 0;
4417 goto fail;
4418 }
4419
Andreas Gruenbachera5c31902011-03-24 03:28:04 +01004420 err = drbd_recv_all_warn(tconn, response , resp_size);
4421 if (err) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07004422 rv = 0;
4423 goto fail;
4424 }
4425
4426 right_response = kmalloc(resp_size, GFP_NOIO);
Julia Lawall2d1ee872009-12-27 22:27:11 +01004427 if (right_response == NULL) {
Philipp Reisner13e60372011-02-08 09:54:40 +01004428 conn_err(tconn, "kmalloc of right_response failed\n");
Johannes Thomab10d96c2010-01-07 16:02:50 +01004429 rv = -1;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004430 goto fail;
4431 }
4432
4433 sg_set_buf(&sg, my_challenge, CHALLENGE_LEN);
4434
4435 rv = crypto_hash_digest(&desc, &sg, sg.length, right_response);
4436 if (rv) {
Philipp Reisner13e60372011-02-08 09:54:40 +01004437 conn_err(tconn, "crypto_hash_digest() failed with %d\n", rv);
Johannes Thomab10d96c2010-01-07 16:02:50 +01004438 rv = -1;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004439 goto fail;
4440 }
4441
4442 rv = !memcmp(response, right_response, resp_size);
4443
4444 if (rv)
Philipp Reisner13e60372011-02-08 09:54:40 +01004445 conn_info(tconn, "Peer authenticated using %d bytes of '%s' HMAC\n",
4446 resp_size, tconn->net_conf->cram_hmac_alg);
Johannes Thomab10d96c2010-01-07 16:02:50 +01004447 else
4448 rv = -1;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004449
4450 fail:
4451 kfree(peers_ch);
4452 kfree(response);
4453 kfree(right_response);
4454
4455 return rv;
4456}
4457#endif
4458
4459int drbdd_init(struct drbd_thread *thi)
4460{
Philipp Reisner392c8802011-02-09 10:33:31 +01004461 struct drbd_tconn *tconn = thi->tconn;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004462 int h;
4463
Philipp Reisner4d641dd2011-02-08 15:40:24 +01004464 conn_info(tconn, "receiver (re)started\n");
Philipp Reisnerb411b362009-09-25 16:07:19 -07004465
4466 do {
Philipp Reisner4d641dd2011-02-08 15:40:24 +01004467 h = drbd_connect(tconn);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004468 if (h == 0) {
Philipp Reisner4d641dd2011-02-08 15:40:24 +01004469 drbd_disconnect(tconn);
Philipp Reisner20ee6392011-01-18 15:28:59 +01004470 schedule_timeout_interruptible(HZ);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004471 }
4472 if (h == -1) {
Philipp Reisner4d641dd2011-02-08 15:40:24 +01004473 conn_warn(tconn, "Discarding network configuration.\n");
Philipp Reisnerbbeb6412011-02-10 13:45:46 +01004474 conn_request_state(tconn, NS(conn, C_DISCONNECTING), CS_HARD);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004475 }
4476 } while (h == 0);
4477
4478 if (h > 0) {
Philipp Reisner4d641dd2011-02-08 15:40:24 +01004479 if (get_net_conf(tconn)) {
4480 drbdd(tconn);
4481 put_net_conf(tconn);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004482 }
4483 }
4484
Philipp Reisner4d641dd2011-02-08 15:40:24 +01004485 drbd_disconnect(tconn);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004486
Philipp Reisner4d641dd2011-02-08 15:40:24 +01004487 conn_info(tconn, "receiver terminated\n");
Philipp Reisnerb411b362009-09-25 16:07:19 -07004488 return 0;
4489}
4490
4491/* ********* acknowledge sender ******** */
4492
Andreas Gruenbachere05e1e52011-03-25 15:16:26 +01004493static int got_conn_RqSReply(struct drbd_tconn *tconn, struct packet_info *pi)
Philipp Reisnere4f78ed2011-03-16 11:27:48 +01004494{
Andreas Gruenbachere6589832011-03-30 12:54:42 +02004495 struct p_req_state_reply *p = pi->data;
Philipp Reisnere4f78ed2011-03-16 11:27:48 +01004496 int retcode = be32_to_cpu(p->retcode);
4497
4498 if (retcode >= SS_SUCCESS) {
4499 set_bit(CONN_WD_ST_CHG_OKAY, &tconn->flags);
4500 } else {
4501 set_bit(CONN_WD_ST_CHG_FAIL, &tconn->flags);
4502 conn_err(tconn, "Requested state change failed by peer: %s (%d)\n",
4503 drbd_set_st_err_str(retcode), retcode);
4504 }
4505 wake_up(&tconn->ping_wait);
4506
Andreas Gruenbacher2735a592011-04-04 15:30:24 +02004507 return 0;
Philipp Reisnere4f78ed2011-03-16 11:27:48 +01004508}
4509
Andreas Gruenbacher1952e912011-03-25 15:37:43 +01004510static int got_RqSReply(struct drbd_tconn *tconn, struct packet_info *pi)
Philipp Reisnerb411b362009-09-25 16:07:19 -07004511{
Andreas Gruenbacher1952e912011-03-25 15:37:43 +01004512 struct drbd_conf *mdev;
Andreas Gruenbachere6589832011-03-30 12:54:42 +02004513 struct p_req_state_reply *p = pi->data;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004514 int retcode = be32_to_cpu(p->retcode);
4515
Andreas Gruenbacher1952e912011-03-25 15:37:43 +01004516 mdev = vnr_to_mdev(tconn, pi->vnr);
4517 if (!mdev)
Andreas Gruenbacher2735a592011-04-04 15:30:24 +02004518 return -EIO;
Andreas Gruenbacher1952e912011-03-25 15:37:43 +01004519
Philipp Reisnere4f78ed2011-03-16 11:27:48 +01004520 if (retcode >= SS_SUCCESS) {
4521 set_bit(CL_ST_CHG_SUCCESS, &mdev->flags);
4522 } else {
4523 set_bit(CL_ST_CHG_FAIL, &mdev->flags);
4524 dev_err(DEV, "Requested state change failed by peer: %s (%d)\n",
4525 drbd_set_st_err_str(retcode), retcode);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004526 }
Philipp Reisnere4f78ed2011-03-16 11:27:48 +01004527 wake_up(&mdev->state_wait);
4528
Andreas Gruenbacher2735a592011-04-04 15:30:24 +02004529 return 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004530}
4531
Andreas Gruenbachere05e1e52011-03-25 15:16:26 +01004532static int got_Ping(struct drbd_tconn *tconn, struct packet_info *pi)
Philipp Reisnerb411b362009-09-25 16:07:19 -07004533{
Andreas Gruenbacher2735a592011-04-04 15:30:24 +02004534 return drbd_send_ping_ack(tconn);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004535
4536}
4537
Andreas Gruenbachere05e1e52011-03-25 15:16:26 +01004538static int got_PingAck(struct drbd_tconn *tconn, struct packet_info *pi)
Philipp Reisnerb411b362009-09-25 16:07:19 -07004539{
4540 /* restore idle timeout */
Philipp Reisner2a67d8b2011-02-09 14:10:32 +01004541 tconn->meta.socket->sk->sk_rcvtimeo = tconn->net_conf->ping_int*HZ;
4542 if (!test_and_set_bit(GOT_PING_ACK, &tconn->flags))
4543 wake_up(&tconn->ping_wait);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004544
Andreas Gruenbacher2735a592011-04-04 15:30:24 +02004545 return 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004546}
4547
Andreas Gruenbacher1952e912011-03-25 15:37:43 +01004548static int got_IsInSync(struct drbd_tconn *tconn, struct packet_info *pi)
Philipp Reisnerb411b362009-09-25 16:07:19 -07004549{
Andreas Gruenbacher1952e912011-03-25 15:37:43 +01004550 struct drbd_conf *mdev;
Andreas Gruenbachere6589832011-03-30 12:54:42 +02004551 struct p_block_ack *p = pi->data;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004552 sector_t sector = be64_to_cpu(p->sector);
4553 int blksize = be32_to_cpu(p->blksize);
4554
Andreas Gruenbacher1952e912011-03-25 15:37:43 +01004555 mdev = vnr_to_mdev(tconn, pi->vnr);
4556 if (!mdev)
Andreas Gruenbacher2735a592011-04-04 15:30:24 +02004557 return -EIO;
Andreas Gruenbacher1952e912011-03-25 15:37:43 +01004558
Philipp Reisner31890f42011-01-19 14:12:51 +01004559 D_ASSERT(mdev->tconn->agreed_pro_version >= 89);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004560
4561 update_peer_seq(mdev, be32_to_cpu(p->seq_num));
4562
Lars Ellenberg1d53f092010-09-05 01:13:24 +02004563 if (get_ldev(mdev)) {
4564 drbd_rs_complete_io(mdev, sector);
4565 drbd_set_in_sync(mdev, sector, blksize);
4566 /* rs_same_csums is supposed to count in units of BM_BLOCK_SIZE */
4567 mdev->rs_same_csum += (blksize >> BM_BLOCK_SHIFT);
4568 put_ldev(mdev);
4569 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07004570 dec_rs_pending(mdev);
Philipp Reisner778f2712010-07-06 11:14:00 +02004571 atomic_add(blksize >> 9, &mdev->rs_sect_in);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004572
Andreas Gruenbacher2735a592011-04-04 15:30:24 +02004573 return 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004574}
4575
Andreas Gruenbacherbc9c5c42011-01-21 18:00:55 +01004576static int
4577validate_req_change_req_state(struct drbd_conf *mdev, u64 id, sector_t sector,
4578 struct rb_root *root, const char *func,
4579 enum drbd_req_event what, bool missing_ok)
Philipp Reisnerb411b362009-09-25 16:07:19 -07004580{
4581 struct drbd_request *req;
4582 struct bio_and_error m;
4583
Philipp Reisner87eeee42011-01-19 14:16:30 +01004584 spin_lock_irq(&mdev->tconn->req_lock);
Andreas Gruenbacherbc9c5c42011-01-21 18:00:55 +01004585 req = find_request(mdev, root, id, sector, missing_ok, func);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004586 if (unlikely(!req)) {
Philipp Reisner87eeee42011-01-19 14:16:30 +01004587 spin_unlock_irq(&mdev->tconn->req_lock);
Andreas Gruenbacher85997672011-04-04 13:09:15 +02004588 return -EIO;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004589 }
4590 __req_mod(req, what, &m);
Philipp Reisner87eeee42011-01-19 14:16:30 +01004591 spin_unlock_irq(&mdev->tconn->req_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004592
4593 if (m.bio)
4594 complete_master_bio(mdev, &m);
Andreas Gruenbacher85997672011-04-04 13:09:15 +02004595 return 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004596}
4597
Andreas Gruenbacher1952e912011-03-25 15:37:43 +01004598static int got_BlockAck(struct drbd_tconn *tconn, struct packet_info *pi)
Philipp Reisnerb411b362009-09-25 16:07:19 -07004599{
Andreas Gruenbacher1952e912011-03-25 15:37:43 +01004600 struct drbd_conf *mdev;
Andreas Gruenbachere6589832011-03-30 12:54:42 +02004601 struct p_block_ack *p = pi->data;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004602 sector_t sector = be64_to_cpu(p->sector);
4603 int blksize = be32_to_cpu(p->blksize);
4604 enum drbd_req_event what;
4605
Andreas Gruenbacher1952e912011-03-25 15:37:43 +01004606 mdev = vnr_to_mdev(tconn, pi->vnr);
4607 if (!mdev)
Andreas Gruenbacher2735a592011-04-04 15:30:24 +02004608 return -EIO;
Andreas Gruenbacher1952e912011-03-25 15:37:43 +01004609
Philipp Reisnerb411b362009-09-25 16:07:19 -07004610 update_peer_seq(mdev, be32_to_cpu(p->seq_num));
4611
Andreas Gruenbacher579b57e2011-01-13 18:40:57 +01004612 if (p->block_id == ID_SYNCER) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07004613 drbd_set_in_sync(mdev, sector, blksize);
4614 dec_rs_pending(mdev);
Andreas Gruenbacher2735a592011-04-04 15:30:24 +02004615 return 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004616 }
Andreas Gruenbachere05e1e52011-03-25 15:16:26 +01004617 switch (pi->cmd) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07004618 case P_RS_WRITE_ACK:
Philipp Reisner89e58e72011-01-19 13:12:45 +01004619 D_ASSERT(mdev->tconn->net_conf->wire_protocol == DRBD_PROT_C);
Andreas Gruenbacher8554df12011-01-25 15:37:43 +01004620 what = WRITE_ACKED_BY_PEER_AND_SIS;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004621 break;
4622 case P_WRITE_ACK:
Philipp Reisner89e58e72011-01-19 13:12:45 +01004623 D_ASSERT(mdev->tconn->net_conf->wire_protocol == DRBD_PROT_C);
Andreas Gruenbacher8554df12011-01-25 15:37:43 +01004624 what = WRITE_ACKED_BY_PEER;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004625 break;
4626 case P_RECV_ACK:
Philipp Reisner89e58e72011-01-19 13:12:45 +01004627 D_ASSERT(mdev->tconn->net_conf->wire_protocol == DRBD_PROT_B);
Andreas Gruenbacher8554df12011-01-25 15:37:43 +01004628 what = RECV_ACKED_BY_PEER;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004629 break;
Andreas Gruenbacher7be8da02011-02-22 02:15:32 +01004630 case P_DISCARD_WRITE:
Philipp Reisner89e58e72011-01-19 13:12:45 +01004631 D_ASSERT(mdev->tconn->net_conf->wire_protocol == DRBD_PROT_C);
Andreas Gruenbacher7be8da02011-02-22 02:15:32 +01004632 what = DISCARD_WRITE;
4633 break;
4634 case P_RETRY_WRITE:
4635 D_ASSERT(mdev->tconn->net_conf->wire_protocol == DRBD_PROT_C);
4636 what = POSTPONE_WRITE;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004637 break;
4638 default:
Andreas Gruenbacher2735a592011-04-04 15:30:24 +02004639 BUG();
Philipp Reisnerb411b362009-09-25 16:07:19 -07004640 }
4641
Andreas Gruenbacher2735a592011-04-04 15:30:24 +02004642 return validate_req_change_req_state(mdev, p->block_id, sector,
4643 &mdev->write_requests, __func__,
4644 what, false);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004645}
4646
Andreas Gruenbacher1952e912011-03-25 15:37:43 +01004647static int got_NegAck(struct drbd_tconn *tconn, struct packet_info *pi)
Philipp Reisnerb411b362009-09-25 16:07:19 -07004648{
Andreas Gruenbacher1952e912011-03-25 15:37:43 +01004649 struct drbd_conf *mdev;
Andreas Gruenbachere6589832011-03-30 12:54:42 +02004650 struct p_block_ack *p = pi->data;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004651 sector_t sector = be64_to_cpu(p->sector);
Philipp Reisner2deb8332011-01-17 18:39:18 +01004652 int size = be32_to_cpu(p->blksize);
Andreas Gruenbacher1952e912011-03-25 15:37:43 +01004653 bool missing_ok = tconn->net_conf->wire_protocol == DRBD_PROT_A ||
4654 tconn->net_conf->wire_protocol == DRBD_PROT_B;
Andreas Gruenbacher85997672011-04-04 13:09:15 +02004655 int err;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004656
Andreas Gruenbacher1952e912011-03-25 15:37:43 +01004657 mdev = vnr_to_mdev(tconn, pi->vnr);
4658 if (!mdev)
Andreas Gruenbacher2735a592011-04-04 15:30:24 +02004659 return -EIO;
Andreas Gruenbacher1952e912011-03-25 15:37:43 +01004660
Philipp Reisnerb411b362009-09-25 16:07:19 -07004661 update_peer_seq(mdev, be32_to_cpu(p->seq_num));
4662
Andreas Gruenbacher579b57e2011-01-13 18:40:57 +01004663 if (p->block_id == ID_SYNCER) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07004664 dec_rs_pending(mdev);
4665 drbd_rs_failed_io(mdev, sector, size);
Andreas Gruenbacher2735a592011-04-04 15:30:24 +02004666 return 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004667 }
Philipp Reisner2deb8332011-01-17 18:39:18 +01004668
Andreas Gruenbacher85997672011-04-04 13:09:15 +02004669 err = validate_req_change_req_state(mdev, p->block_id, sector,
4670 &mdev->write_requests, __func__,
4671 NEG_ACKED, missing_ok);
4672 if (err) {
Andreas Gruenbacherc3afd8f2011-01-20 22:25:40 +01004673 /* Protocol A has no P_WRITE_ACKs, but has P_NEG_ACKs.
4674 The master bio might already be completed, therefore the
4675 request is no longer in the collision hash. */
4676 /* In Protocol B we might already have got a P_RECV_ACK
4677 but then get a P_NEG_ACK afterwards. */
4678 if (!missing_ok)
Andreas Gruenbacher2735a592011-04-04 15:30:24 +02004679 return err;
Andreas Gruenbacherc3afd8f2011-01-20 22:25:40 +01004680 drbd_set_out_of_sync(mdev, sector, size);
Philipp Reisner2deb8332011-01-17 18:39:18 +01004681 }
Andreas Gruenbacher2735a592011-04-04 15:30:24 +02004682 return 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004683}
4684
Andreas Gruenbacher1952e912011-03-25 15:37:43 +01004685static int got_NegDReply(struct drbd_tconn *tconn, struct packet_info *pi)
Philipp Reisnerb411b362009-09-25 16:07:19 -07004686{
Andreas Gruenbacher1952e912011-03-25 15:37:43 +01004687 struct drbd_conf *mdev;
Andreas Gruenbachere6589832011-03-30 12:54:42 +02004688 struct p_block_ack *p = pi->data;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004689 sector_t sector = be64_to_cpu(p->sector);
4690
Andreas Gruenbacher1952e912011-03-25 15:37:43 +01004691 mdev = vnr_to_mdev(tconn, pi->vnr);
4692 if (!mdev)
Andreas Gruenbacher2735a592011-04-04 15:30:24 +02004693 return -EIO;
Andreas Gruenbacher1952e912011-03-25 15:37:43 +01004694
Philipp Reisnerb411b362009-09-25 16:07:19 -07004695 update_peer_seq(mdev, be32_to_cpu(p->seq_num));
Andreas Gruenbacher7be8da02011-02-22 02:15:32 +01004696
Philipp Reisnerb411b362009-09-25 16:07:19 -07004697 dev_err(DEV, "Got NegDReply; Sector %llus, len %u; Fail original request.\n",
4698 (unsigned long long)sector, be32_to_cpu(p->blksize));
4699
Andreas Gruenbacher2735a592011-04-04 15:30:24 +02004700 return validate_req_change_req_state(mdev, p->block_id, sector,
4701 &mdev->read_requests, __func__,
4702 NEG_ACKED, false);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004703}
4704
Andreas Gruenbacher1952e912011-03-25 15:37:43 +01004705static int got_NegRSDReply(struct drbd_tconn *tconn, struct packet_info *pi)
Philipp Reisnerb411b362009-09-25 16:07:19 -07004706{
Andreas Gruenbacher1952e912011-03-25 15:37:43 +01004707 struct drbd_conf *mdev;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004708 sector_t sector;
4709 int size;
Andreas Gruenbachere6589832011-03-30 12:54:42 +02004710 struct p_block_ack *p = pi->data;
Andreas Gruenbacher1952e912011-03-25 15:37:43 +01004711
4712 mdev = vnr_to_mdev(tconn, pi->vnr);
4713 if (!mdev)
Andreas Gruenbacher2735a592011-04-04 15:30:24 +02004714 return -EIO;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004715
4716 sector = be64_to_cpu(p->sector);
4717 size = be32_to_cpu(p->blksize);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004718
4719 update_peer_seq(mdev, be32_to_cpu(p->seq_num));
4720
4721 dec_rs_pending(mdev);
4722
4723 if (get_ldev_if_state(mdev, D_FAILED)) {
4724 drbd_rs_complete_io(mdev, sector);
Andreas Gruenbachere05e1e52011-03-25 15:16:26 +01004725 switch (pi->cmd) {
Philipp Reisnerd612d302010-12-27 10:53:28 +01004726 case P_NEG_RS_DREPLY:
4727 drbd_rs_failed_io(mdev, sector, size);
4728 case P_RS_CANCEL:
4729 break;
4730 default:
Andreas Gruenbacher2735a592011-04-04 15:30:24 +02004731 BUG();
Philipp Reisnerd612d302010-12-27 10:53:28 +01004732 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07004733 put_ldev(mdev);
4734 }
4735
Andreas Gruenbacher2735a592011-04-04 15:30:24 +02004736 return 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004737}
4738
Andreas Gruenbacher1952e912011-03-25 15:37:43 +01004739static int got_BarrierAck(struct drbd_tconn *tconn, struct packet_info *pi)
Philipp Reisnerb411b362009-09-25 16:07:19 -07004740{
Andreas Gruenbacher1952e912011-03-25 15:37:43 +01004741 struct drbd_conf *mdev;
Andreas Gruenbachere6589832011-03-30 12:54:42 +02004742 struct p_barrier_ack *p = pi->data;
Andreas Gruenbacher1952e912011-03-25 15:37:43 +01004743
4744 mdev = vnr_to_mdev(tconn, pi->vnr);
4745 if (!mdev)
Andreas Gruenbacher2735a592011-04-04 15:30:24 +02004746 return -EIO;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004747
Philipp Reisner2f5cdd02011-02-21 14:29:27 +01004748 tl_release(mdev->tconn, p->barrier, be32_to_cpu(p->set_size));
Philipp Reisnerb411b362009-09-25 16:07:19 -07004749
Philipp Reisnerc4752ef2010-10-27 17:32:36 +02004750 if (mdev->state.conn == C_AHEAD &&
4751 atomic_read(&mdev->ap_in_flight) == 0 &&
Philipp Reisner370a43e2011-01-14 16:03:11 +01004752 !test_and_set_bit(AHEAD_TO_SYNC_SOURCE, &mdev->current_epoch->flags)) {
4753 mdev->start_resync_timer.expires = jiffies + HZ;
4754 add_timer(&mdev->start_resync_timer);
Philipp Reisnerc4752ef2010-10-27 17:32:36 +02004755 }
4756
Andreas Gruenbacher2735a592011-04-04 15:30:24 +02004757 return 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004758}
4759
Andreas Gruenbacher1952e912011-03-25 15:37:43 +01004760static int got_OVResult(struct drbd_tconn *tconn, struct packet_info *pi)
Philipp Reisnerb411b362009-09-25 16:07:19 -07004761{
Andreas Gruenbacher1952e912011-03-25 15:37:43 +01004762 struct drbd_conf *mdev;
Andreas Gruenbachere6589832011-03-30 12:54:42 +02004763 struct p_block_ack *p = pi->data;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004764 struct drbd_work *w;
4765 sector_t sector;
4766 int size;
4767
Andreas Gruenbacher1952e912011-03-25 15:37:43 +01004768 mdev = vnr_to_mdev(tconn, pi->vnr);
4769 if (!mdev)
Andreas Gruenbacher2735a592011-04-04 15:30:24 +02004770 return -EIO;
Andreas Gruenbacher1952e912011-03-25 15:37:43 +01004771
Philipp Reisnerb411b362009-09-25 16:07:19 -07004772 sector = be64_to_cpu(p->sector);
4773 size = be32_to_cpu(p->blksize);
4774
4775 update_peer_seq(mdev, be32_to_cpu(p->seq_num));
4776
4777 if (be64_to_cpu(p->block_id) == ID_OUT_OF_SYNC)
Andreas Gruenbacher8f7bed72010-12-19 23:53:14 +01004778 drbd_ov_out_of_sync_found(mdev, sector, size);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004779 else
Andreas Gruenbacher8f7bed72010-12-19 23:53:14 +01004780 ov_out_of_sync_print(mdev);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004781
Lars Ellenberg1d53f092010-09-05 01:13:24 +02004782 if (!get_ldev(mdev))
Andreas Gruenbacher2735a592011-04-04 15:30:24 +02004783 return 0;
Lars Ellenberg1d53f092010-09-05 01:13:24 +02004784
Philipp Reisnerb411b362009-09-25 16:07:19 -07004785 drbd_rs_complete_io(mdev, sector);
4786 dec_rs_pending(mdev);
4787
Lars Ellenbergea5442a2010-11-05 09:48:01 +01004788 --mdev->ov_left;
4789
4790 /* let's advance progress step marks only for every other megabyte */
4791 if ((mdev->ov_left & 0x200) == 0x200)
4792 drbd_advance_rs_marks(mdev, mdev->ov_left);
4793
4794 if (mdev->ov_left == 0) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07004795 w = kmalloc(sizeof(*w), GFP_NOIO);
4796 if (w) {
4797 w->cb = w_ov_finished;
Philipp Reisnera21e9292011-02-08 15:08:49 +01004798 w->mdev = mdev;
Philipp Reisnere42325a2011-01-19 13:55:45 +01004799 drbd_queue_work_front(&mdev->tconn->data.work, w);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004800 } else {
4801 dev_err(DEV, "kmalloc(w) failed.");
Andreas Gruenbacher8f7bed72010-12-19 23:53:14 +01004802 ov_out_of_sync_print(mdev);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004803 drbd_resync_finished(mdev);
4804 }
4805 }
Lars Ellenberg1d53f092010-09-05 01:13:24 +02004806 put_ldev(mdev);
Andreas Gruenbacher2735a592011-04-04 15:30:24 +02004807 return 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004808}
4809
Andreas Gruenbacher1952e912011-03-25 15:37:43 +01004810static int got_skip(struct drbd_tconn *tconn, struct packet_info *pi)
Philipp Reisner0ced55a2010-04-30 15:26:20 +02004811{
Andreas Gruenbacher2735a592011-04-04 15:30:24 +02004812 return 0;
Philipp Reisner0ced55a2010-04-30 15:26:20 +02004813}
4814
Andreas Gruenbachera990be42011-04-06 17:56:48 +02004815static int tconn_finish_peer_reqs(struct drbd_tconn *tconn)
Philipp Reisner32862ec2011-02-08 16:41:01 +01004816{
Philipp Reisner082a3432011-03-15 16:05:42 +01004817 struct drbd_conf *mdev;
4818 int i, not_empty = 0;
Philipp Reisner32862ec2011-02-08 16:41:01 +01004819
4820 do {
4821 clear_bit(SIGNAL_ASENDER, &tconn->flags);
4822 flush_signals(current);
Philipp Reisner082a3432011-03-15 16:05:42 +01004823 idr_for_each_entry(&tconn->volumes, mdev, i) {
Andreas Gruenbachera990be42011-04-06 17:56:48 +02004824 if (drbd_finish_peer_reqs(mdev))
Philipp Reisner082a3432011-03-15 16:05:42 +01004825 return 1; /* error */
4826 }
Philipp Reisner32862ec2011-02-08 16:41:01 +01004827 set_bit(SIGNAL_ASENDER, &tconn->flags);
Philipp Reisner082a3432011-03-15 16:05:42 +01004828
4829 spin_lock_irq(&tconn->req_lock);
4830 idr_for_each_entry(&tconn->volumes, mdev, i) {
4831 not_empty = !list_empty(&mdev->done_ee);
4832 if (not_empty)
4833 break;
4834 }
4835 spin_unlock_irq(&tconn->req_lock);
Philipp Reisner32862ec2011-02-08 16:41:01 +01004836 } while (not_empty);
4837
4838 return 0;
4839}
4840
Andreas Gruenbacher7201b972011-03-14 18:23:00 +01004841struct asender_cmd {
4842 size_t pkt_size;
Andreas Gruenbacher1952e912011-03-25 15:37:43 +01004843 int (*fn)(struct drbd_tconn *tconn, struct packet_info *);
Andreas Gruenbacher7201b972011-03-14 18:23:00 +01004844};
4845
4846static struct asender_cmd asender_tbl[] = {
Andreas Gruenbachere6589832011-03-30 12:54:42 +02004847 [P_PING] = { 0, got_Ping },
4848 [P_PING_ACK] = { 0, got_PingAck },
Andreas Gruenbacher1952e912011-03-25 15:37:43 +01004849 [P_RECV_ACK] = { sizeof(struct p_block_ack), got_BlockAck },
4850 [P_WRITE_ACK] = { sizeof(struct p_block_ack), got_BlockAck },
4851 [P_RS_WRITE_ACK] = { sizeof(struct p_block_ack), got_BlockAck },
4852 [P_DISCARD_WRITE] = { sizeof(struct p_block_ack), got_BlockAck },
4853 [P_NEG_ACK] = { sizeof(struct p_block_ack), got_NegAck },
4854 [P_NEG_DREPLY] = { sizeof(struct p_block_ack), got_NegDReply },
4855 [P_NEG_RS_DREPLY] = { sizeof(struct p_block_ack), got_NegRSDReply },
4856 [P_OV_RESULT] = { sizeof(struct p_block_ack), got_OVResult },
4857 [P_BARRIER_ACK] = { sizeof(struct p_barrier_ack), got_BarrierAck },
4858 [P_STATE_CHG_REPLY] = { sizeof(struct p_req_state_reply), got_RqSReply },
4859 [P_RS_IS_IN_SYNC] = { sizeof(struct p_block_ack), got_IsInSync },
4860 [P_DELAY_PROBE] = { sizeof(struct p_delay_probe93), got_skip },
4861 [P_RS_CANCEL] = { sizeof(struct p_block_ack), got_NegRSDReply },
4862 [P_CONN_ST_CHG_REPLY]={ sizeof(struct p_req_state_reply), got_conn_RqSReply },
4863 [P_RETRY_WRITE] = { sizeof(struct p_block_ack), got_BlockAck },
Andreas Gruenbacher7201b972011-03-14 18:23:00 +01004864};
4865
Philipp Reisnerb411b362009-09-25 16:07:19 -07004866int drbd_asender(struct drbd_thread *thi)
4867{
Philipp Reisner392c8802011-02-09 10:33:31 +01004868 struct drbd_tconn *tconn = thi->tconn;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004869 struct asender_cmd *cmd = NULL;
Philipp Reisner77351055b2011-02-07 17:24:26 +01004870 struct packet_info pi;
Philipp Reisner257d0af2011-01-26 12:15:29 +01004871 int rv;
Andreas Gruenbachere6589832011-03-30 12:54:42 +02004872 void *buf = tconn->meta.rbuf;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004873 int received = 0;
Andreas Gruenbacher52b061a2011-03-30 11:38:49 +02004874 unsigned int header_size = drbd_header_size(tconn);
4875 int expect = header_size;
Lars Ellenbergf36af182011-03-09 22:44:55 +01004876 int ping_timeout_active = 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004877
Philipp Reisnerb411b362009-09-25 16:07:19 -07004878 current->policy = SCHED_RR; /* Make this a realtime task! */
4879 current->rt_priority = 2; /* more important than all other tasks */
4880
Andreas Gruenbachere77a0a52011-01-25 15:43:39 +01004881 while (get_t_state(thi) == RUNNING) {
Philipp Reisner80822282011-02-08 12:46:30 +01004882 drbd_thread_current_set_cpu(thi);
Philipp Reisner32862ec2011-02-08 16:41:01 +01004883 if (test_and_clear_bit(SEND_PING, &tconn->flags)) {
Andreas Gruenbachera17647a2011-04-01 12:49:42 +02004884 if (drbd_send_ping(tconn)) {
Philipp Reisner32862ec2011-02-08 16:41:01 +01004885 conn_err(tconn, "drbd_send_ping has failed\n");
Andreas Gruenbacher841ce242010-12-15 19:31:20 +01004886 goto reconnect;
4887 }
Philipp Reisner32862ec2011-02-08 16:41:01 +01004888 tconn->meta.socket->sk->sk_rcvtimeo =
4889 tconn->net_conf->ping_timeo*HZ/10;
Lars Ellenbergf36af182011-03-09 22:44:55 +01004890 ping_timeout_active = 1;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004891 }
4892
Philipp Reisner32862ec2011-02-08 16:41:01 +01004893 /* TODO: conditionally cork; it may hurt latency if we cork without
4894 much to send */
4895 if (!tconn->net_conf->no_cork)
4896 drbd_tcp_cork(tconn->meta.socket);
Andreas Gruenbachera990be42011-04-06 17:56:48 +02004897 if (tconn_finish_peer_reqs(tconn)) {
4898 conn_err(tconn, "tconn_finish_peer_reqs() failed\n");
Philipp Reisner32862ec2011-02-08 16:41:01 +01004899 goto reconnect;
Philipp Reisner082a3432011-03-15 16:05:42 +01004900 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07004901 /* but unconditionally uncork unless disabled */
Philipp Reisner32862ec2011-02-08 16:41:01 +01004902 if (!tconn->net_conf->no_cork)
4903 drbd_tcp_uncork(tconn->meta.socket);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004904
4905 /* short circuit, recv_msg would return EINTR anyways. */
4906 if (signal_pending(current))
4907 continue;
4908
Philipp Reisner32862ec2011-02-08 16:41:01 +01004909 rv = drbd_recv_short(tconn->meta.socket, buf, expect-received, 0);
4910 clear_bit(SIGNAL_ASENDER, &tconn->flags);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004911
4912 flush_signals(current);
4913
4914 /* Note:
4915 * -EINTR (on meta) we got a signal
4916 * -EAGAIN (on meta) rcvtimeo expired
4917 * -ECONNRESET other side closed the connection
4918 * -ERESTARTSYS (on data) we got a signal
4919 * rv < 0 other than above: unexpected error!
4920 * rv == expected: full header or command
4921 * rv < expected: "woken" by signal during receive
4922 * rv == 0 : "connection shut down by peer"
4923 */
4924 if (likely(rv > 0)) {
4925 received += rv;
4926 buf += rv;
4927 } else if (rv == 0) {
Philipp Reisner32862ec2011-02-08 16:41:01 +01004928 conn_err(tconn, "meta connection shut down by peer.\n");
Philipp Reisnerb411b362009-09-25 16:07:19 -07004929 goto reconnect;
4930 } else if (rv == -EAGAIN) {
Lars Ellenbergcb6518c2011-06-20 14:44:45 +02004931 /* If the data socket received something meanwhile,
4932 * that is good enough: peer is still alive. */
Philipp Reisner32862ec2011-02-08 16:41:01 +01004933 if (time_after(tconn->last_received,
4934 jiffies - tconn->meta.socket->sk->sk_rcvtimeo))
Lars Ellenbergcb6518c2011-06-20 14:44:45 +02004935 continue;
Lars Ellenbergf36af182011-03-09 22:44:55 +01004936 if (ping_timeout_active) {
Philipp Reisner32862ec2011-02-08 16:41:01 +01004937 conn_err(tconn, "PingAck did not arrive in time.\n");
Philipp Reisnerb411b362009-09-25 16:07:19 -07004938 goto reconnect;
4939 }
Philipp Reisner32862ec2011-02-08 16:41:01 +01004940 set_bit(SEND_PING, &tconn->flags);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004941 continue;
4942 } else if (rv == -EINTR) {
4943 continue;
4944 } else {
Philipp Reisner32862ec2011-02-08 16:41:01 +01004945 conn_err(tconn, "sock_recvmsg returned %d\n", rv);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004946 goto reconnect;
4947 }
4948
4949 if (received == expect && cmd == NULL) {
Andreas Gruenbachere6589832011-03-30 12:54:42 +02004950 if (decode_header(tconn, tconn->meta.rbuf, &pi))
Philipp Reisnerb411b362009-09-25 16:07:19 -07004951 goto reconnect;
Andreas Gruenbacher7201b972011-03-14 18:23:00 +01004952 cmd = &asender_tbl[pi.cmd];
Andreas Gruenbacher1952e912011-03-25 15:37:43 +01004953 if (pi.cmd >= ARRAY_SIZE(asender_tbl) || !cmd->fn) {
Philipp Reisner32862ec2011-02-08 16:41:01 +01004954 conn_err(tconn, "unknown command %d on meta (l: %d)\n",
Philipp Reisner77351055b2011-02-07 17:24:26 +01004955 pi.cmd, pi.size);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004956 goto disconnect;
4957 }
Andreas Gruenbachere6589832011-03-30 12:54:42 +02004958 expect = header_size + cmd->pkt_size;
Andreas Gruenbacher52b061a2011-03-30 11:38:49 +02004959 if (pi.size != expect - header_size) {
Philipp Reisner32862ec2011-02-08 16:41:01 +01004960 conn_err(tconn, "Wrong packet size on meta (c: %d, l: %d)\n",
Philipp Reisner77351055b2011-02-07 17:24:26 +01004961 pi.cmd, pi.size);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004962 goto reconnect;
Philipp Reisner257d0af2011-01-26 12:15:29 +01004963 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07004964 }
4965 if (received == expect) {
Andreas Gruenbacher2735a592011-04-04 15:30:24 +02004966 bool err;
Philipp Reisnera4fbda82011-03-16 11:13:17 +01004967
Andreas Gruenbacher2735a592011-04-04 15:30:24 +02004968 err = cmd->fn(tconn, &pi);
4969 if (err) {
Andreas Gruenbacher1952e912011-03-25 15:37:43 +01004970 conn_err(tconn, "%pf failed\n", cmd->fn);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004971 goto reconnect;
Andreas Gruenbacher1952e912011-03-25 15:37:43 +01004972 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07004973
Philipp Reisnera4fbda82011-03-16 11:13:17 +01004974 tconn->last_received = jiffies;
4975
Lars Ellenbergf36af182011-03-09 22:44:55 +01004976 /* the idle_timeout (ping-int)
4977 * has been restored in got_PingAck() */
Andreas Gruenbacher7201b972011-03-14 18:23:00 +01004978 if (cmd == &asender_tbl[P_PING_ACK])
Lars Ellenbergf36af182011-03-09 22:44:55 +01004979 ping_timeout_active = 0;
4980
Andreas Gruenbachere6589832011-03-30 12:54:42 +02004981 buf = tconn->meta.rbuf;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004982 received = 0;
Andreas Gruenbacher52b061a2011-03-30 11:38:49 +02004983 expect = header_size;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004984 cmd = NULL;
4985 }
4986 }
4987
4988 if (0) {
4989reconnect:
Philipp Reisnerbbeb6412011-02-10 13:45:46 +01004990 conn_request_state(tconn, NS(conn, C_NETWORK_FAILURE), CS_HARD);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004991 }
4992 if (0) {
4993disconnect:
Philipp Reisnerbbeb6412011-02-10 13:45:46 +01004994 conn_request_state(tconn, NS(conn, C_DISCONNECTING), CS_HARD);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004995 }
Philipp Reisner32862ec2011-02-08 16:41:01 +01004996 clear_bit(SIGNAL_ASENDER, &tconn->flags);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004997
Philipp Reisner32862ec2011-02-08 16:41:01 +01004998 conn_info(tconn, "asender terminated\n");
Philipp Reisnerb411b362009-09-25 16:07:19 -07004999
5000 return 0;
5001}