blob: d073305ffd5e76e17a7d0804ad92be1ad82bba67 [file] [log] [blame]
Philipp Reisnerb411b362009-09-25 16:07:19 -07001/*
2 drbd_receiver.c
3
4 This file is part of DRBD by Philipp Reisner and Lars Ellenberg.
5
6 Copyright (C) 2001-2008, LINBIT Information Technologies GmbH.
7 Copyright (C) 1999-2008, Philipp Reisner <philipp.reisner@linbit.com>.
8 Copyright (C) 2002-2008, Lars Ellenberg <lars.ellenberg@linbit.com>.
9
10 drbd is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation; either version 2, or (at your option)
13 any later version.
14
15 drbd is distributed in the hope that it will be useful,
16 but WITHOUT ANY WARRANTY; without even the implied warranty of
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 GNU General Public License for more details.
19
20 You should have received a copy of the GNU General Public License
21 along with drbd; see the file COPYING. If not, write to
22 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
23 */
24
25
Philipp Reisnerb411b362009-09-25 16:07:19 -070026#include <linux/module.h>
27
28#include <asm/uaccess.h>
29#include <net/sock.h>
30
Philipp Reisnerb411b362009-09-25 16:07:19 -070031#include <linux/drbd.h>
32#include <linux/fs.h>
33#include <linux/file.h>
34#include <linux/in.h>
35#include <linux/mm.h>
36#include <linux/memcontrol.h>
37#include <linux/mm_inline.h>
38#include <linux/slab.h>
Philipp Reisnerb411b362009-09-25 16:07:19 -070039#include <linux/pkt_sched.h>
40#define __KERNEL_SYSCALLS__
41#include <linux/unistd.h>
42#include <linux/vmalloc.h>
43#include <linux/random.h>
Philipp Reisnerb411b362009-09-25 16:07:19 -070044#include <linux/string.h>
45#include <linux/scatterlist.h>
46#include "drbd_int.h"
Philipp Reisnerb411b362009-09-25 16:07:19 -070047#include "drbd_req.h"
48
49#include "drbd_vli.h"
50
Philipp Reisner77351055b2011-02-07 17:24:26 +010051struct packet_info {
52 enum drbd_packet cmd;
Andreas Gruenbachere2857212011-03-25 00:57:38 +010053 unsigned int size;
54 unsigned int vnr;
Andreas Gruenbachere6589832011-03-30 12:54:42 +020055 void *data;
Philipp Reisner77351055b2011-02-07 17:24:26 +010056};
57
Philipp Reisnerb411b362009-09-25 16:07:19 -070058enum finish_epoch {
59 FE_STILL_LIVE,
60 FE_DESTROYED,
61 FE_RECYCLED,
62};
63
Andreas Gruenbacher60381782011-03-28 17:05:50 +020064static int drbd_do_features(struct drbd_tconn *tconn);
Philipp Reisner13e60372011-02-08 09:54:40 +010065static int drbd_do_auth(struct drbd_tconn *tconn);
Philipp Reisnerc141ebd2011-05-05 16:13:10 +020066static int drbd_disconnected(struct drbd_conf *mdev);
Philipp Reisnerb411b362009-09-25 16:07:19 -070067
Philipp Reisner1e9dd292011-11-10 15:14:53 +010068static enum finish_epoch drbd_may_finish_epoch(struct drbd_tconn *, struct drbd_epoch *, enum epoch_event);
Andreas Gruenbacher99920dc2011-03-16 15:31:39 +010069static int e_end_block(struct drbd_work *, int);
Philipp Reisnerb411b362009-09-25 16:07:19 -070070
Philipp Reisnerb411b362009-09-25 16:07:19 -070071
72#define GFP_TRY (__GFP_HIGHMEM | __GFP_NOWARN)
73
Lars Ellenberg45bb9122010-05-14 17:10:48 +020074/*
75 * some helper functions to deal with single linked page lists,
76 * page->private being our "next" pointer.
77 */
78
79/* If at least n pages are linked at head, get n pages off.
80 * Otherwise, don't modify head, and return NULL.
81 * Locking is the responsibility of the caller.
82 */
83static struct page *page_chain_del(struct page **head, int n)
84{
85 struct page *page;
86 struct page *tmp;
87
88 BUG_ON(!n);
89 BUG_ON(!head);
90
91 page = *head;
Philipp Reisner23ce4222010-05-20 13:35:31 +020092
93 if (!page)
94 return NULL;
95
Lars Ellenberg45bb9122010-05-14 17:10:48 +020096 while (page) {
97 tmp = page_chain_next(page);
98 if (--n == 0)
99 break; /* found sufficient pages */
100 if (tmp == NULL)
101 /* insufficient pages, don't use any of them. */
102 return NULL;
103 page = tmp;
104 }
105
106 /* add end of list marker for the returned list */
107 set_page_private(page, 0);
108 /* actual return value, and adjustment of head */
109 page = *head;
110 *head = tmp;
111 return page;
112}
113
114/* may be used outside of locks to find the tail of a (usually short)
115 * "private" page chain, before adding it back to a global chain head
116 * with page_chain_add() under a spinlock. */
117static struct page *page_chain_tail(struct page *page, int *len)
118{
119 struct page *tmp;
120 int i = 1;
121 while ((tmp = page_chain_next(page)))
122 ++i, page = tmp;
123 if (len)
124 *len = i;
125 return page;
126}
127
128static int page_chain_free(struct page *page)
129{
130 struct page *tmp;
131 int i = 0;
132 page_chain_for_each_safe(page, tmp) {
133 put_page(page);
134 ++i;
135 }
136 return i;
137}
138
139static void page_chain_add(struct page **head,
140 struct page *chain_first, struct page *chain_last)
141{
142#if 1
143 struct page *tmp;
144 tmp = page_chain_tail(chain_first, NULL);
145 BUG_ON(tmp != chain_last);
146#endif
147
148 /* add chain to head */
149 set_page_private(chain_last, (unsigned long)*head);
150 *head = chain_first;
151}
152
Andreas Gruenbacher18c2d522011-04-07 21:08:50 +0200153static struct page *__drbd_alloc_pages(struct drbd_conf *mdev,
154 unsigned int number)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700155{
156 struct page *page = NULL;
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200157 struct page *tmp = NULL;
Andreas Gruenbacher18c2d522011-04-07 21:08:50 +0200158 unsigned int i = 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700159
160 /* Yes, testing drbd_pp_vacant outside the lock is racy.
161 * So what. It saves a spin_lock. */
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200162 if (drbd_pp_vacant >= number) {
Philipp Reisnerb411b362009-09-25 16:07:19 -0700163 spin_lock(&drbd_pp_lock);
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200164 page = page_chain_del(&drbd_pp_pool, number);
165 if (page)
166 drbd_pp_vacant -= number;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700167 spin_unlock(&drbd_pp_lock);
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200168 if (page)
169 return page;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700170 }
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200171
Philipp Reisnerb411b362009-09-25 16:07:19 -0700172 /* GFP_TRY, because we must not cause arbitrary write-out: in a DRBD
173 * "criss-cross" setup, that might cause write-out on some other DRBD,
174 * which in turn might block on the other node at this very place. */
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200175 for (i = 0; i < number; i++) {
176 tmp = alloc_page(GFP_TRY);
177 if (!tmp)
178 break;
179 set_page_private(tmp, (unsigned long)page);
180 page = tmp;
181 }
182
183 if (i == number)
184 return page;
185
186 /* Not enough pages immediately available this time.
Andreas Gruenbacherc37c8ec2011-04-07 21:02:09 +0200187 * No need to jump around here, drbd_alloc_pages will retry this
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200188 * function "soon". */
189 if (page) {
190 tmp = page_chain_tail(page, NULL);
191 spin_lock(&drbd_pp_lock);
192 page_chain_add(&drbd_pp_pool, page, tmp);
193 drbd_pp_vacant += i;
194 spin_unlock(&drbd_pp_lock);
195 }
196 return NULL;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700197}
198
Andreas Gruenbachera990be42011-04-06 17:56:48 +0200199static void reclaim_finished_net_peer_reqs(struct drbd_conf *mdev,
200 struct list_head *to_be_freed)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700201{
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +0100202 struct drbd_peer_request *peer_req;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700203 struct list_head *le, *tle;
204
205 /* The EEs are always appended to the end of the list. Since
206 they are sent in order over the wire, they have to finish
207 in order. As soon as we see the first not finished we can
208 stop to examine the list... */
209
210 list_for_each_safe(le, tle, &mdev->net_ee) {
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +0100211 peer_req = list_entry(le, struct drbd_peer_request, w.list);
Andreas Gruenbacher045417f2011-04-07 21:34:24 +0200212 if (drbd_peer_req_has_active_page(peer_req))
Philipp Reisnerb411b362009-09-25 16:07:19 -0700213 break;
214 list_move(le, to_be_freed);
215 }
216}
217
218static void drbd_kick_lo_and_reclaim_net(struct drbd_conf *mdev)
219{
220 LIST_HEAD(reclaimed);
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +0100221 struct drbd_peer_request *peer_req, *t;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700222
Philipp Reisner87eeee42011-01-19 14:16:30 +0100223 spin_lock_irq(&mdev->tconn->req_lock);
Andreas Gruenbachera990be42011-04-06 17:56:48 +0200224 reclaim_finished_net_peer_reqs(mdev, &reclaimed);
Philipp Reisner87eeee42011-01-19 14:16:30 +0100225 spin_unlock_irq(&mdev->tconn->req_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700226
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +0100227 list_for_each_entry_safe(peer_req, t, &reclaimed, w.list)
Andreas Gruenbacher3967deb2011-04-06 16:16:56 +0200228 drbd_free_net_peer_req(mdev, peer_req);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700229}
230
231/**
Andreas Gruenbacherc37c8ec2011-04-07 21:02:09 +0200232 * drbd_alloc_pages() - Returns @number pages, retries forever (or until signalled)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700233 * @mdev: DRBD device.
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200234 * @number: number of pages requested
235 * @retry: whether to retry, if not enough pages are available right now
Philipp Reisnerb411b362009-09-25 16:07:19 -0700236 *
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200237 * Tries to allocate number pages, first from our own page pool, then from
238 * the kernel, unless this allocation would exceed the max_buffers setting.
239 * Possibly retry until DRBD frees sufficient pages somewhere else.
240 *
241 * Returns a page chain linked via page->private.
Philipp Reisnerb411b362009-09-25 16:07:19 -0700242 */
Andreas Gruenbacherc37c8ec2011-04-07 21:02:09 +0200243struct page *drbd_alloc_pages(struct drbd_conf *mdev, unsigned int number,
244 bool retry)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700245{
246 struct page *page = NULL;
Philipp Reisner44ed1672011-04-19 17:10:19 +0200247 struct net_conf *nc;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700248 DEFINE_WAIT(wait);
Philipp Reisner44ed1672011-04-19 17:10:19 +0200249 int mxb;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700250
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200251 /* Yes, we may run up to @number over max_buffers. If we
252 * follow it strictly, the admin will get it wrong anyways. */
Philipp Reisner44ed1672011-04-19 17:10:19 +0200253 rcu_read_lock();
254 nc = rcu_dereference(mdev->tconn->net_conf);
255 mxb = nc ? nc->max_buffers : 1000000;
256 rcu_read_unlock();
257
258 if (atomic_read(&mdev->pp_in_use) < mxb)
Andreas Gruenbacher18c2d522011-04-07 21:08:50 +0200259 page = __drbd_alloc_pages(mdev, number);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700260
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200261 while (page == NULL) {
Philipp Reisnerb411b362009-09-25 16:07:19 -0700262 prepare_to_wait(&drbd_pp_wait, &wait, TASK_INTERRUPTIBLE);
263
264 drbd_kick_lo_and_reclaim_net(mdev);
265
Philipp Reisner44ed1672011-04-19 17:10:19 +0200266 if (atomic_read(&mdev->pp_in_use) < mxb) {
Andreas Gruenbacher18c2d522011-04-07 21:08:50 +0200267 page = __drbd_alloc_pages(mdev, number);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700268 if (page)
269 break;
270 }
271
272 if (!retry)
273 break;
274
275 if (signal_pending(current)) {
Andreas Gruenbacherc37c8ec2011-04-07 21:02:09 +0200276 dev_warn(DEV, "drbd_alloc_pages interrupted!\n");
Philipp Reisnerb411b362009-09-25 16:07:19 -0700277 break;
278 }
279
280 schedule();
281 }
282 finish_wait(&drbd_pp_wait, &wait);
283
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200284 if (page)
285 atomic_add(number, &mdev->pp_in_use);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700286 return page;
287}
288
Andreas Gruenbacherc37c8ec2011-04-07 21:02:09 +0200289/* Must not be used from irq, as that may deadlock: see drbd_alloc_pages.
Philipp Reisner87eeee42011-01-19 14:16:30 +0100290 * Is also used from inside an other spin_lock_irq(&mdev->tconn->req_lock);
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200291 * Either links the page chain back to the global pool,
292 * or returns all pages to the system. */
Andreas Gruenbacher5cc287e2011-04-07 21:02:59 +0200293static void drbd_free_pages(struct drbd_conf *mdev, struct page *page, int is_net)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700294{
Lars Ellenberg435f0742010-09-06 12:30:25 +0200295 atomic_t *a = is_net ? &mdev->pp_in_use_by_net : &mdev->pp_in_use;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700296 int i;
Lars Ellenberg435f0742010-09-06 12:30:25 +0200297
Lars Ellenberga73ff322012-06-25 19:15:38 +0200298 if (page == NULL)
299 return;
300
Philipp Reisner81a5d602011-02-22 19:53:16 -0500301 if (drbd_pp_vacant > (DRBD_MAX_BIO_SIZE/PAGE_SIZE) * minor_count)
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200302 i = page_chain_free(page);
303 else {
304 struct page *tmp;
305 tmp = page_chain_tail(page, &i);
306 spin_lock(&drbd_pp_lock);
307 page_chain_add(&drbd_pp_pool, page, tmp);
308 drbd_pp_vacant += i;
309 spin_unlock(&drbd_pp_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700310 }
Lars Ellenberg435f0742010-09-06 12:30:25 +0200311 i = atomic_sub_return(i, a);
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200312 if (i < 0)
Lars Ellenberg435f0742010-09-06 12:30:25 +0200313 dev_warn(DEV, "ASSERTION FAILED: %s: %d < 0\n",
314 is_net ? "pp_in_use_by_net" : "pp_in_use", i);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700315 wake_up(&drbd_pp_wait);
316}
317
318/*
319You need to hold the req_lock:
320 _drbd_wait_ee_list_empty()
321
322You must not have the req_lock:
Andreas Gruenbacher3967deb2011-04-06 16:16:56 +0200323 drbd_free_peer_req()
Andreas Gruenbacher0db55362011-04-06 16:09:15 +0200324 drbd_alloc_peer_req()
Andreas Gruenbacher7721f562011-04-06 17:14:02 +0200325 drbd_free_peer_reqs()
Philipp Reisnerb411b362009-09-25 16:07:19 -0700326 drbd_ee_fix_bhs()
Andreas Gruenbachera990be42011-04-06 17:56:48 +0200327 drbd_finish_peer_reqs()
Philipp Reisnerb411b362009-09-25 16:07:19 -0700328 drbd_clear_done_ee()
329 drbd_wait_ee_list_empty()
330*/
331
Andreas Gruenbacherf6ffca92011-02-04 15:30:34 +0100332struct drbd_peer_request *
Andreas Gruenbacher0db55362011-04-06 16:09:15 +0200333drbd_alloc_peer_req(struct drbd_conf *mdev, u64 id, sector_t sector,
334 unsigned int data_size, gfp_t gfp_mask) __must_hold(local)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700335{
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +0100336 struct drbd_peer_request *peer_req;
Lars Ellenberga73ff322012-06-25 19:15:38 +0200337 struct page *page = NULL;
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200338 unsigned nr_pages = (data_size + PAGE_SIZE -1) >> PAGE_SHIFT;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700339
Andreas Gruenbacher0cf9d272010-12-07 10:43:29 +0100340 if (drbd_insert_fault(mdev, DRBD_FAULT_AL_EE))
Philipp Reisnerb411b362009-09-25 16:07:19 -0700341 return NULL;
342
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +0100343 peer_req = mempool_alloc(drbd_ee_mempool, gfp_mask & ~__GFP_HIGHMEM);
344 if (!peer_req) {
Philipp Reisnerb411b362009-09-25 16:07:19 -0700345 if (!(gfp_mask & __GFP_NOWARN))
Andreas Gruenbacher0db55362011-04-06 16:09:15 +0200346 dev_err(DEV, "%s: allocation failed\n", __func__);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700347 return NULL;
348 }
349
Lars Ellenberga73ff322012-06-25 19:15:38 +0200350 if (data_size) {
Lars Ellenberg81a35372012-07-30 09:00:54 +0200351 page = drbd_alloc_pages(mdev, nr_pages, (gfp_mask & __GFP_WAIT));
Lars Ellenberga73ff322012-06-25 19:15:38 +0200352 if (!page)
353 goto fail;
354 }
Philipp Reisnerb411b362009-09-25 16:07:19 -0700355
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +0100356 drbd_clear_interval(&peer_req->i);
357 peer_req->i.size = data_size;
358 peer_req->i.sector = sector;
359 peer_req->i.local = false;
360 peer_req->i.waiting = false;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700361
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +0100362 peer_req->epoch = NULL;
Philipp Reisnera21e9292011-02-08 15:08:49 +0100363 peer_req->w.mdev = mdev;
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +0100364 peer_req->pages = page;
365 atomic_set(&peer_req->pending_bios, 0);
366 peer_req->flags = 0;
Andreas Gruenbacher9a8e7752011-01-11 14:04:09 +0100367 /*
368 * The block_id is opaque to the receiver. It is not endianness
369 * converted, and sent back to the sender unchanged.
370 */
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +0100371 peer_req->block_id = id;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700372
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +0100373 return peer_req;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700374
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200375 fail:
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +0100376 mempool_free(peer_req, drbd_ee_mempool);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700377 return NULL;
378}
379
Andreas Gruenbacher3967deb2011-04-06 16:16:56 +0200380void __drbd_free_peer_req(struct drbd_conf *mdev, struct drbd_peer_request *peer_req,
Andreas Gruenbacherf6ffca92011-02-04 15:30:34 +0100381 int is_net)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700382{
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +0100383 if (peer_req->flags & EE_HAS_DIGEST)
384 kfree(peer_req->digest);
Andreas Gruenbacher5cc287e2011-04-07 21:02:59 +0200385 drbd_free_pages(mdev, peer_req->pages, is_net);
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +0100386 D_ASSERT(atomic_read(&peer_req->pending_bios) == 0);
387 D_ASSERT(drbd_interval_empty(&peer_req->i));
388 mempool_free(peer_req, drbd_ee_mempool);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700389}
390
Andreas Gruenbacher7721f562011-04-06 17:14:02 +0200391int drbd_free_peer_reqs(struct drbd_conf *mdev, struct list_head *list)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700392{
393 LIST_HEAD(work_list);
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +0100394 struct drbd_peer_request *peer_req, *t;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700395 int count = 0;
Lars Ellenberg435f0742010-09-06 12:30:25 +0200396 int is_net = list == &mdev->net_ee;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700397
Philipp Reisner87eeee42011-01-19 14:16:30 +0100398 spin_lock_irq(&mdev->tconn->req_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700399 list_splice_init(list, &work_list);
Philipp Reisner87eeee42011-01-19 14:16:30 +0100400 spin_unlock_irq(&mdev->tconn->req_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700401
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +0100402 list_for_each_entry_safe(peer_req, t, &work_list, w.list) {
Andreas Gruenbacher3967deb2011-04-06 16:16:56 +0200403 __drbd_free_peer_req(mdev, peer_req, is_net);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700404 count++;
405 }
406 return count;
407}
408
Philipp Reisnerb411b362009-09-25 16:07:19 -0700409/*
Andreas Gruenbachera990be42011-04-06 17:56:48 +0200410 * See also comments in _req_mod(,BARRIER_ACKED) and receive_Barrier.
Philipp Reisnerb411b362009-09-25 16:07:19 -0700411 */
Andreas Gruenbachera990be42011-04-06 17:56:48 +0200412static int drbd_finish_peer_reqs(struct drbd_conf *mdev)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700413{
414 LIST_HEAD(work_list);
415 LIST_HEAD(reclaimed);
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +0100416 struct drbd_peer_request *peer_req, *t;
Andreas Gruenbachere2b30322011-03-16 17:16:12 +0100417 int err = 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700418
Philipp Reisner87eeee42011-01-19 14:16:30 +0100419 spin_lock_irq(&mdev->tconn->req_lock);
Andreas Gruenbachera990be42011-04-06 17:56:48 +0200420 reclaim_finished_net_peer_reqs(mdev, &reclaimed);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700421 list_splice_init(&mdev->done_ee, &work_list);
Philipp Reisner87eeee42011-01-19 14:16:30 +0100422 spin_unlock_irq(&mdev->tconn->req_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700423
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +0100424 list_for_each_entry_safe(peer_req, t, &reclaimed, w.list)
Andreas Gruenbacher3967deb2011-04-06 16:16:56 +0200425 drbd_free_net_peer_req(mdev, peer_req);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700426
427 /* possible callbacks here:
Lars Ellenbergd4dabbe2012-08-01 12:33:51 +0200428 * e_end_block, and e_end_resync_block, e_send_superseded.
Philipp Reisnerb411b362009-09-25 16:07:19 -0700429 * all ignore the last argument.
430 */
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +0100431 list_for_each_entry_safe(peer_req, t, &work_list, w.list) {
Andreas Gruenbachere2b30322011-03-16 17:16:12 +0100432 int err2;
433
Philipp Reisnerb411b362009-09-25 16:07:19 -0700434 /* list_del not necessary, next/prev members not touched */
Andreas Gruenbachere2b30322011-03-16 17:16:12 +0100435 err2 = peer_req->w.cb(&peer_req->w, !!err);
436 if (!err)
437 err = err2;
Andreas Gruenbacher3967deb2011-04-06 16:16:56 +0200438 drbd_free_peer_req(mdev, peer_req);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700439 }
440 wake_up(&mdev->ee_wait);
441
Andreas Gruenbachere2b30322011-03-16 17:16:12 +0100442 return err;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700443}
444
Andreas Gruenbacherd4da1532011-04-07 00:06:56 +0200445static void _drbd_wait_ee_list_empty(struct drbd_conf *mdev,
446 struct list_head *head)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700447{
448 DEFINE_WAIT(wait);
449
450 /* avoids spin_lock/unlock
451 * and calling prepare_to_wait in the fast path */
452 while (!list_empty(head)) {
453 prepare_to_wait(&mdev->ee_wait, &wait, TASK_UNINTERRUPTIBLE);
Philipp Reisner87eeee42011-01-19 14:16:30 +0100454 spin_unlock_irq(&mdev->tconn->req_lock);
Jens Axboe7eaceac2011-03-10 08:52:07 +0100455 io_schedule();
Philipp Reisnerb411b362009-09-25 16:07:19 -0700456 finish_wait(&mdev->ee_wait, &wait);
Philipp Reisner87eeee42011-01-19 14:16:30 +0100457 spin_lock_irq(&mdev->tconn->req_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700458 }
459}
460
Andreas Gruenbacherd4da1532011-04-07 00:06:56 +0200461static void drbd_wait_ee_list_empty(struct drbd_conf *mdev,
462 struct list_head *head)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700463{
Philipp Reisner87eeee42011-01-19 14:16:30 +0100464 spin_lock_irq(&mdev->tconn->req_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700465 _drbd_wait_ee_list_empty(mdev, head);
Philipp Reisner87eeee42011-01-19 14:16:30 +0100466 spin_unlock_irq(&mdev->tconn->req_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700467}
468
Philipp Reisnerdbd9eea2011-02-07 15:34:16 +0100469static int drbd_recv_short(struct socket *sock, void *buf, size_t size, int flags)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700470{
471 mm_segment_t oldfs;
472 struct kvec iov = {
473 .iov_base = buf,
474 .iov_len = size,
475 };
476 struct msghdr msg = {
477 .msg_iovlen = 1,
478 .msg_iov = (struct iovec *)&iov,
479 .msg_flags = (flags ? flags : MSG_WAITALL | MSG_NOSIGNAL)
480 };
481 int rv;
482
483 oldfs = get_fs();
484 set_fs(KERNEL_DS);
485 rv = sock_recvmsg(sock, &msg, size, msg.msg_flags);
486 set_fs(oldfs);
487
488 return rv;
489}
490
Philipp Reisnerde0ff332011-02-07 16:56:20 +0100491static int drbd_recv(struct drbd_tconn *tconn, void *buf, size_t size)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700492{
Philipp Reisnerb411b362009-09-25 16:07:19 -0700493 int rv;
494
Philipp Reisner1393b592012-09-03 14:04:23 +0200495 rv = drbd_recv_short(tconn->data.socket, buf, size, 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700496
Philipp Reisnerdbd08202012-08-17 16:55:47 +0200497 if (rv < 0) {
498 if (rv == -ECONNRESET)
Philipp Reisner155522d2012-08-08 21:19:09 +0200499 conn_info(tconn, "sock was reset by peer\n");
Philipp Reisnerdbd08202012-08-17 16:55:47 +0200500 else if (rv != -ERESTARTSYS)
Philipp Reisner155522d2012-08-08 21:19:09 +0200501 conn_err(tconn, "sock_recvmsg returned %d\n", rv);
Philipp Reisnerdbd08202012-08-17 16:55:47 +0200502 } else if (rv == 0) {
Philipp Reisnerb66623e2012-08-08 21:19:09 +0200503 if (test_bit(DISCONNECT_SENT, &tconn->flags)) {
504 long t;
505 rcu_read_lock();
506 t = rcu_dereference(tconn->net_conf)->ping_timeo * HZ/10;
507 rcu_read_unlock();
508
509 t = wait_event_timeout(tconn->ping_wait, tconn->cstate < C_WF_REPORT_PARAMS, t);
510
Philipp Reisner599377a2012-08-17 14:50:22 +0200511 if (t)
512 goto out;
513 }
Philipp Reisnerb66623e2012-08-08 21:19:09 +0200514 conn_info(tconn, "sock was shut down by peer\n");
Philipp Reisner599377a2012-08-17 14:50:22 +0200515 }
516
Philipp Reisnerb411b362009-09-25 16:07:19 -0700517 if (rv != size)
Philipp Reisnerbbeb6412011-02-10 13:45:46 +0100518 conn_request_state(tconn, NS(conn, C_BROKEN_PIPE), CS_HARD);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700519
Philipp Reisner599377a2012-08-17 14:50:22 +0200520out:
Philipp Reisnerb411b362009-09-25 16:07:19 -0700521 return rv;
522}
523
Andreas Gruenbacherc6967742011-03-17 17:15:20 +0100524static int drbd_recv_all(struct drbd_tconn *tconn, void *buf, size_t size)
525{
526 int err;
527
528 err = drbd_recv(tconn, buf, size);
529 if (err != size) {
530 if (err >= 0)
531 err = -EIO;
532 } else
533 err = 0;
534 return err;
535}
536
Andreas Gruenbachera5c31902011-03-24 03:28:04 +0100537static int drbd_recv_all_warn(struct drbd_tconn *tconn, void *buf, size_t size)
538{
539 int err;
540
541 err = drbd_recv_all(tconn, buf, size);
542 if (err && !signal_pending(current))
543 conn_warn(tconn, "short read (expected size %d)\n", (int)size);
544 return err;
545}
546
Lars Ellenberg5dbf1672010-05-25 16:18:01 +0200547/* quoting tcp(7):
548 * On individual connections, the socket buffer size must be set prior to the
549 * listen(2) or connect(2) calls in order to have it take effect.
550 * This is our wrapper to do so.
551 */
552static void drbd_setbufsize(struct socket *sock, unsigned int snd,
553 unsigned int rcv)
554{
555 /* open coded SO_SNDBUF, SO_RCVBUF */
556 if (snd) {
557 sock->sk->sk_sndbuf = snd;
558 sock->sk->sk_userlocks |= SOCK_SNDBUF_LOCK;
559 }
560 if (rcv) {
561 sock->sk->sk_rcvbuf = rcv;
562 sock->sk->sk_userlocks |= SOCK_RCVBUF_LOCK;
563 }
564}
565
Philipp Reisnereac3e992011-02-07 14:05:07 +0100566static struct socket *drbd_try_connect(struct drbd_tconn *tconn)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700567{
568 const char *what;
569 struct socket *sock;
570 struct sockaddr_in6 src_in6;
Philipp Reisner44ed1672011-04-19 17:10:19 +0200571 struct sockaddr_in6 peer_in6;
572 struct net_conf *nc;
573 int err, peer_addr_len, my_addr_len;
Andreas Gruenbacher69ef82d2011-05-11 14:34:35 +0200574 int sndbuf_size, rcvbuf_size, connect_int;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700575 int disconnect_on_error = 1;
576
Philipp Reisner44ed1672011-04-19 17:10:19 +0200577 rcu_read_lock();
578 nc = rcu_dereference(tconn->net_conf);
579 if (!nc) {
580 rcu_read_unlock();
Philipp Reisnerb411b362009-09-25 16:07:19 -0700581 return NULL;
Philipp Reisner44ed1672011-04-19 17:10:19 +0200582 }
Philipp Reisner44ed1672011-04-19 17:10:19 +0200583 sndbuf_size = nc->sndbuf_size;
584 rcvbuf_size = nc->rcvbuf_size;
Andreas Gruenbacher69ef82d2011-05-11 14:34:35 +0200585 connect_int = nc->connect_int;
Andreas Gruenbacher089c0752011-06-14 18:28:09 +0200586 rcu_read_unlock();
Philipp Reisner44ed1672011-04-19 17:10:19 +0200587
Andreas Gruenbacher089c0752011-06-14 18:28:09 +0200588 my_addr_len = min_t(int, tconn->my_addr_len, sizeof(src_in6));
589 memcpy(&src_in6, &tconn->my_addr, my_addr_len);
Philipp Reisner44ed1672011-04-19 17:10:19 +0200590
Andreas Gruenbacher089c0752011-06-14 18:28:09 +0200591 if (((struct sockaddr *)&tconn->my_addr)->sa_family == AF_INET6)
Philipp Reisner44ed1672011-04-19 17:10:19 +0200592 src_in6.sin6_port = 0;
593 else
594 ((struct sockaddr_in *)&src_in6)->sin_port = 0; /* AF_INET & AF_SCI */
595
Andreas Gruenbacher089c0752011-06-14 18:28:09 +0200596 peer_addr_len = min_t(int, tconn->peer_addr_len, sizeof(src_in6));
597 memcpy(&peer_in6, &tconn->peer_addr, peer_addr_len);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700598
599 what = "sock_create_kern";
Philipp Reisner44ed1672011-04-19 17:10:19 +0200600 err = sock_create_kern(((struct sockaddr *)&src_in6)->sa_family,
601 SOCK_STREAM, IPPROTO_TCP, &sock);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700602 if (err < 0) {
603 sock = NULL;
604 goto out;
605 }
606
607 sock->sk->sk_rcvtimeo =
Andreas Gruenbacher69ef82d2011-05-11 14:34:35 +0200608 sock->sk->sk_sndtimeo = connect_int * HZ;
Philipp Reisner44ed1672011-04-19 17:10:19 +0200609 drbd_setbufsize(sock, sndbuf_size, rcvbuf_size);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700610
611 /* explicitly bind to the configured IP as source IP
612 * for the outgoing connections.
613 * This is needed for multihomed hosts and to be
614 * able to use lo: interfaces for drbd.
615 * Make sure to use 0 as port number, so linux selects
616 * a free one dynamically.
617 */
Philipp Reisnerb411b362009-09-25 16:07:19 -0700618 what = "bind before connect";
Philipp Reisner44ed1672011-04-19 17:10:19 +0200619 err = sock->ops->bind(sock, (struct sockaddr *) &src_in6, my_addr_len);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700620 if (err < 0)
621 goto out;
622
623 /* connect may fail, peer not yet available.
624 * stay C_WF_CONNECTION, don't go Disconnecting! */
625 disconnect_on_error = 0;
626 what = "connect";
Philipp Reisner44ed1672011-04-19 17:10:19 +0200627 err = sock->ops->connect(sock, (struct sockaddr *) &peer_in6, peer_addr_len, 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700628
629out:
630 if (err < 0) {
631 if (sock) {
632 sock_release(sock);
633 sock = NULL;
634 }
635 switch (-err) {
636 /* timeout, busy, signal pending */
637 case ETIMEDOUT: case EAGAIN: case EINPROGRESS:
638 case EINTR: case ERESTARTSYS:
639 /* peer not (yet) available, network problem */
640 case ECONNREFUSED: case ENETUNREACH:
641 case EHOSTDOWN: case EHOSTUNREACH:
642 disconnect_on_error = 0;
643 break;
644 default:
Philipp Reisnereac3e992011-02-07 14:05:07 +0100645 conn_err(tconn, "%s failed, err = %d\n", what, err);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700646 }
647 if (disconnect_on_error)
Philipp Reisnerbbeb6412011-02-10 13:45:46 +0100648 conn_request_state(tconn, NS(conn, C_DISCONNECTING), CS_HARD);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700649 }
Philipp Reisner44ed1672011-04-19 17:10:19 +0200650
Philipp Reisnerb411b362009-09-25 16:07:19 -0700651 return sock;
652}
653
Philipp Reisner7a426fd2012-07-12 14:22:37 +0200654struct accept_wait_data {
655 struct drbd_tconn *tconn;
656 struct socket *s_listen;
657 struct completion door_bell;
658 void (*original_sk_state_change)(struct sock *sk);
659
660};
661
Andreas Gruenbacher715306f2012-08-10 17:00:30 +0200662static void drbd_incoming_connection(struct sock *sk)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700663{
Philipp Reisner7a426fd2012-07-12 14:22:37 +0200664 struct accept_wait_data *ad = sk->sk_user_data;
Andreas Gruenbacher715306f2012-08-10 17:00:30 +0200665 void (*state_change)(struct sock *sk);
Philipp Reisner7a426fd2012-07-12 14:22:37 +0200666
Andreas Gruenbacher715306f2012-08-10 17:00:30 +0200667 state_change = ad->original_sk_state_change;
668 if (sk->sk_state == TCP_ESTABLISHED)
669 complete(&ad->door_bell);
670 state_change(sk);
Philipp Reisner7a426fd2012-07-12 14:22:37 +0200671}
672
673static int prepare_listen_socket(struct drbd_tconn *tconn, struct accept_wait_data *ad)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700674{
Philipp Reisner1f3e5092012-07-12 11:08:34 +0200675 int err, sndbuf_size, rcvbuf_size, my_addr_len;
Philipp Reisner44ed1672011-04-19 17:10:19 +0200676 struct sockaddr_in6 my_addr;
Philipp Reisner1f3e5092012-07-12 11:08:34 +0200677 struct socket *s_listen;
Philipp Reisner44ed1672011-04-19 17:10:19 +0200678 struct net_conf *nc;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700679 const char *what;
680
Philipp Reisner44ed1672011-04-19 17:10:19 +0200681 rcu_read_lock();
682 nc = rcu_dereference(tconn->net_conf);
683 if (!nc) {
684 rcu_read_unlock();
Philipp Reisner7a426fd2012-07-12 14:22:37 +0200685 return -EIO;
Philipp Reisner44ed1672011-04-19 17:10:19 +0200686 }
Philipp Reisner44ed1672011-04-19 17:10:19 +0200687 sndbuf_size = nc->sndbuf_size;
688 rcvbuf_size = nc->rcvbuf_size;
Philipp Reisner44ed1672011-04-19 17:10:19 +0200689 rcu_read_unlock();
Philipp Reisnerb411b362009-09-25 16:07:19 -0700690
Andreas Gruenbacher089c0752011-06-14 18:28:09 +0200691 my_addr_len = min_t(int, tconn->my_addr_len, sizeof(struct sockaddr_in6));
692 memcpy(&my_addr, &tconn->my_addr, my_addr_len);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700693
694 what = "sock_create_kern";
Philipp Reisner44ed1672011-04-19 17:10:19 +0200695 err = sock_create_kern(((struct sockaddr *)&my_addr)->sa_family,
Philipp Reisner1f3e5092012-07-12 11:08:34 +0200696 SOCK_STREAM, IPPROTO_TCP, &s_listen);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700697 if (err) {
698 s_listen = NULL;
699 goto out;
700 }
701
Philipp Reisner98683652012-11-09 14:18:43 +0100702 s_listen->sk->sk_reuse = SK_CAN_REUSE; /* SO_REUSEADDR */
Philipp Reisner44ed1672011-04-19 17:10:19 +0200703 drbd_setbufsize(s_listen, sndbuf_size, rcvbuf_size);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700704
705 what = "bind before listen";
Philipp Reisner44ed1672011-04-19 17:10:19 +0200706 err = s_listen->ops->bind(s_listen, (struct sockaddr *)&my_addr, my_addr_len);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700707 if (err < 0)
708 goto out;
709
Philipp Reisner7a426fd2012-07-12 14:22:37 +0200710 ad->s_listen = s_listen;
711 write_lock_bh(&s_listen->sk->sk_callback_lock);
712 ad->original_sk_state_change = s_listen->sk->sk_state_change;
Andreas Gruenbacher715306f2012-08-10 17:00:30 +0200713 s_listen->sk->sk_state_change = drbd_incoming_connection;
Philipp Reisner7a426fd2012-07-12 14:22:37 +0200714 s_listen->sk->sk_user_data = ad;
715 write_unlock_bh(&s_listen->sk->sk_callback_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700716
Philipp Reisner2820fd32012-07-12 10:22:48 +0200717 what = "listen";
718 err = s_listen->ops->listen(s_listen, 5);
719 if (err < 0)
720 goto out;
721
Philipp Reisner7a426fd2012-07-12 14:22:37 +0200722 return 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700723out:
724 if (s_listen)
725 sock_release(s_listen);
726 if (err < 0) {
727 if (err != -EAGAIN && err != -EINTR && err != -ERESTARTSYS) {
Philipp Reisner1f3e5092012-07-12 11:08:34 +0200728 conn_err(tconn, "%s failed, err = %d\n", what, err);
729 conn_request_state(tconn, NS(conn, C_DISCONNECTING), CS_HARD);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700730 }
731 }
Philipp Reisner1f3e5092012-07-12 11:08:34 +0200732
Philipp Reisner7a426fd2012-07-12 14:22:37 +0200733 return -EIO;
Philipp Reisner1f3e5092012-07-12 11:08:34 +0200734}
735
Andreas Gruenbacher715306f2012-08-10 17:00:30 +0200736static void unregister_state_change(struct sock *sk, struct accept_wait_data *ad)
737{
738 write_lock_bh(&sk->sk_callback_lock);
739 sk->sk_state_change = ad->original_sk_state_change;
740 sk->sk_user_data = NULL;
741 write_unlock_bh(&sk->sk_callback_lock);
742}
743
Philipp Reisner7a426fd2012-07-12 14:22:37 +0200744static struct socket *drbd_wait_for_connect(struct drbd_tconn *tconn, struct accept_wait_data *ad)
Philipp Reisner1f3e5092012-07-12 11:08:34 +0200745{
746 int timeo, connect_int, err = 0;
747 struct socket *s_estab = NULL;
Philipp Reisner1f3e5092012-07-12 11:08:34 +0200748 struct net_conf *nc;
749
750 rcu_read_lock();
751 nc = rcu_dereference(tconn->net_conf);
752 if (!nc) {
753 rcu_read_unlock();
754 return NULL;
755 }
756 connect_int = nc->connect_int;
757 rcu_read_unlock();
758
759 timeo = connect_int * HZ;
Akinobu Mita38b682b22013-04-29 16:21:31 -0700760 /* 28.5% random jitter */
761 timeo += (prandom_u32() & 1) ? timeo / 7 : -timeo / 7;
Philipp Reisner1f3e5092012-07-12 11:08:34 +0200762
Philipp Reisner7a426fd2012-07-12 14:22:37 +0200763 err = wait_for_completion_interruptible_timeout(&ad->door_bell, timeo);
764 if (err <= 0)
765 return NULL;
Philipp Reisner1f3e5092012-07-12 11:08:34 +0200766
Philipp Reisner7a426fd2012-07-12 14:22:37 +0200767 err = kernel_accept(ad->s_listen, &s_estab, 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700768 if (err < 0) {
769 if (err != -EAGAIN && err != -EINTR && err != -ERESTARTSYS) {
Philipp Reisner1f3e5092012-07-12 11:08:34 +0200770 conn_err(tconn, "accept failed, err = %d\n", err);
Philipp Reisnerbbeb6412011-02-10 13:45:46 +0100771 conn_request_state(tconn, NS(conn, C_DISCONNECTING), CS_HARD);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700772 }
773 }
Philipp Reisnerb411b362009-09-25 16:07:19 -0700774
Andreas Gruenbacher715306f2012-08-10 17:00:30 +0200775 if (s_estab)
776 unregister_state_change(s_estab->sk, ad);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700777
778 return s_estab;
779}
780
Andreas Gruenbachere6589832011-03-30 12:54:42 +0200781static int decode_header(struct drbd_tconn *, void *, struct packet_info *);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700782
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +0200783static int send_first_packet(struct drbd_tconn *tconn, struct drbd_socket *sock,
784 enum drbd_packet cmd)
785{
786 if (!conn_prepare_command(tconn, sock))
787 return -EIO;
Andreas Gruenbachere6589832011-03-30 12:54:42 +0200788 return conn_send_command(tconn, sock, cmd, 0, NULL, 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700789}
790
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +0200791static int receive_first_packet(struct drbd_tconn *tconn, struct socket *sock)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700792{
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +0200793 unsigned int header_size = drbd_header_size(tconn);
794 struct packet_info pi;
795 int err;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700796
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +0200797 err = drbd_recv_short(sock, tconn->data.rbuf, header_size, 0);
798 if (err != header_size) {
799 if (err >= 0)
800 err = -EIO;
801 return err;
802 }
803 err = decode_header(tconn, tconn->data.rbuf, &pi);
804 if (err)
805 return err;
806 return pi.cmd;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700807}
808
809/**
810 * drbd_socket_okay() - Free the socket if its connection is not okay
Philipp Reisnerb411b362009-09-25 16:07:19 -0700811 * @sock: pointer to the pointer to the socket.
812 */
Philipp Reisnerdbd9eea2011-02-07 15:34:16 +0100813static int drbd_socket_okay(struct socket **sock)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700814{
815 int rr;
816 char tb[4];
817
818 if (!*sock)
Andreas Gruenbacher81e84652010-12-09 15:03:57 +0100819 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700820
Philipp Reisnerdbd9eea2011-02-07 15:34:16 +0100821 rr = drbd_recv_short(*sock, tb, 4, MSG_DONTWAIT | MSG_PEEK);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700822
823 if (rr > 0 || rr == -EAGAIN) {
Andreas Gruenbacher81e84652010-12-09 15:03:57 +0100824 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700825 } else {
826 sock_release(*sock);
827 *sock = NULL;
Andreas Gruenbacher81e84652010-12-09 15:03:57 +0100828 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700829 }
830}
Philipp Reisner2325eb62011-03-15 16:56:18 +0100831/* Gets called if a connection is established, or if a new minor gets created
832 in a connection */
Philipp Reisnerc141ebd2011-05-05 16:13:10 +0200833int drbd_connected(struct drbd_conf *mdev)
Philipp Reisner907599e2011-02-08 11:25:37 +0100834{
Andreas Gruenbacher0829f5e2011-03-24 14:31:22 +0100835 int err;
Philipp Reisner907599e2011-02-08 11:25:37 +0100836
837 atomic_set(&mdev->packet_seq, 0);
838 mdev->peer_seq = 0;
839
Philipp Reisner8410da82011-02-11 20:11:10 +0100840 mdev->state_mutex = mdev->tconn->agreed_pro_version < 100 ?
841 &mdev->tconn->cstate_mutex :
842 &mdev->own_state_mutex;
843
Andreas Gruenbacher0829f5e2011-03-24 14:31:22 +0100844 err = drbd_send_sync_param(mdev);
845 if (!err)
846 err = drbd_send_sizes(mdev, 0, 0);
847 if (!err)
848 err = drbd_send_uuids(mdev);
849 if (!err)
Philipp Reisner43de7c82011-11-10 13:16:13 +0100850 err = drbd_send_current_state(mdev);
Philipp Reisner907599e2011-02-08 11:25:37 +0100851 clear_bit(USE_DEGR_WFC_T, &mdev->flags);
852 clear_bit(RESIZE_PENDING, &mdev->flags);
Philipp Reisner2d56a972013-03-27 14:08:34 +0100853 atomic_set(&mdev->ap_in_flight, 0);
Philipp Reisner8b924f12011-03-01 11:08:28 +0100854 mod_timer(&mdev->request_timer, jiffies + HZ); /* just start it here. */
Andreas Gruenbacher0829f5e2011-03-24 14:31:22 +0100855 return err;
Philipp Reisner907599e2011-02-08 11:25:37 +0100856}
Philipp Reisnerb411b362009-09-25 16:07:19 -0700857
858/*
859 * return values:
860 * 1 yes, we have a valid connection
861 * 0 oops, did not work out, please try again
862 * -1 peer talks different language,
863 * no point in trying again, please go standalone.
864 * -2 We do not have a network config...
865 */
Philipp Reisner81fa2e62011-05-04 15:10:30 +0200866static int conn_connect(struct drbd_tconn *tconn)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700867{
Philipp Reisner7da35862011-12-19 22:42:56 +0100868 struct drbd_socket sock, msock;
Philipp Reisnerc141ebd2011-05-05 16:13:10 +0200869 struct drbd_conf *mdev;
Philipp Reisner44ed1672011-04-19 17:10:19 +0200870 struct net_conf *nc;
Philipp Reisner92f14952012-08-01 11:41:01 +0200871 int vnr, timeout, h, ok;
Philipp Reisner08b165b2011-09-05 16:22:33 +0200872 bool discard_my_data;
Philipp Reisner197296f2012-03-26 16:47:11 +0200873 enum drbd_state_rv rv;
Philipp Reisner7a426fd2012-07-12 14:22:37 +0200874 struct accept_wait_data ad = {
875 .tconn = tconn,
876 .door_bell = COMPLETION_INITIALIZER_ONSTACK(ad.door_bell),
877 };
Philipp Reisnerb411b362009-09-25 16:07:19 -0700878
Philipp Reisnerb66623e2012-08-08 21:19:09 +0200879 clear_bit(DISCONNECT_SENT, &tconn->flags);
Philipp Reisnerbbeb6412011-02-10 13:45:46 +0100880 if (conn_request_state(tconn, NS(conn, C_WF_CONNECTION), CS_VERBOSE) < SS_SUCCESS)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700881 return -2;
882
Philipp Reisner7da35862011-12-19 22:42:56 +0100883 mutex_init(&sock.mutex);
884 sock.sbuf = tconn->data.sbuf;
885 sock.rbuf = tconn->data.rbuf;
886 sock.socket = NULL;
887 mutex_init(&msock.mutex);
888 msock.sbuf = tconn->meta.sbuf;
889 msock.rbuf = tconn->meta.rbuf;
890 msock.socket = NULL;
891
Andreas Gruenbacher0916e0e2011-03-21 14:10:15 +0100892 /* Assume that the peer only understands protocol 80 until we know better. */
893 tconn->agreed_pro_version = 80;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700894
Philipp Reisner7a426fd2012-07-12 14:22:37 +0200895 if (prepare_listen_socket(tconn, &ad))
896 return 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700897
898 do {
Andreas Gruenbacher2bf89622011-03-28 16:33:12 +0200899 struct socket *s;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700900
Philipp Reisner92f14952012-08-01 11:41:01 +0200901 s = drbd_try_connect(tconn);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700902 if (s) {
Philipp Reisner7da35862011-12-19 22:42:56 +0100903 if (!sock.socket) {
904 sock.socket = s;
905 send_first_packet(tconn, &sock, P_INITIAL_DATA);
906 } else if (!msock.socket) {
Lars Ellenberg427c0432012-08-01 12:43:01 +0200907 clear_bit(RESOLVE_CONFLICTS, &tconn->flags);
Philipp Reisner7da35862011-12-19 22:42:56 +0100908 msock.socket = s;
909 send_first_packet(tconn, &msock, P_INITIAL_META);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700910 } else {
Philipp Reisner81fa2e62011-05-04 15:10:30 +0200911 conn_err(tconn, "Logic error in conn_connect()\n");
Philipp Reisnerb411b362009-09-25 16:07:19 -0700912 goto out_release_sockets;
913 }
914 }
915
Philipp Reisner7da35862011-12-19 22:42:56 +0100916 if (sock.socket && msock.socket) {
917 rcu_read_lock();
918 nc = rcu_dereference(tconn->net_conf);
919 timeout = nc->ping_timeo * HZ / 10;
920 rcu_read_unlock();
921 schedule_timeout_interruptible(timeout);
922 ok = drbd_socket_okay(&sock.socket);
923 ok = drbd_socket_okay(&msock.socket) && ok;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700924 if (ok)
925 break;
926 }
927
928retry:
Philipp Reisner7a426fd2012-07-12 14:22:37 +0200929 s = drbd_wait_for_connect(tconn, &ad);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700930 if (s) {
Philipp Reisner92f14952012-08-01 11:41:01 +0200931 int fp = receive_first_packet(tconn, s);
Philipp Reisner7da35862011-12-19 22:42:56 +0100932 drbd_socket_okay(&sock.socket);
933 drbd_socket_okay(&msock.socket);
Philipp Reisner92f14952012-08-01 11:41:01 +0200934 switch (fp) {
Andreas Gruenbachere5d6f332011-03-28 16:44:40 +0200935 case P_INITIAL_DATA:
Philipp Reisner7da35862011-12-19 22:42:56 +0100936 if (sock.socket) {
Philipp Reisner907599e2011-02-08 11:25:37 +0100937 conn_warn(tconn, "initial packet S crossed\n");
Philipp Reisner7da35862011-12-19 22:42:56 +0100938 sock_release(sock.socket);
Philipp Reisner80c6eed2012-08-01 14:53:39 +0200939 sock.socket = s;
940 goto randomize;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700941 }
Philipp Reisner7da35862011-12-19 22:42:56 +0100942 sock.socket = s;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700943 break;
Andreas Gruenbachere5d6f332011-03-28 16:44:40 +0200944 case P_INITIAL_META:
Lars Ellenberg427c0432012-08-01 12:43:01 +0200945 set_bit(RESOLVE_CONFLICTS, &tconn->flags);
Philipp Reisner7da35862011-12-19 22:42:56 +0100946 if (msock.socket) {
Philipp Reisner907599e2011-02-08 11:25:37 +0100947 conn_warn(tconn, "initial packet M crossed\n");
Philipp Reisner7da35862011-12-19 22:42:56 +0100948 sock_release(msock.socket);
Philipp Reisner80c6eed2012-08-01 14:53:39 +0200949 msock.socket = s;
950 goto randomize;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700951 }
Philipp Reisner7da35862011-12-19 22:42:56 +0100952 msock.socket = s;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700953 break;
954 default:
Philipp Reisner907599e2011-02-08 11:25:37 +0100955 conn_warn(tconn, "Error receiving initial packet\n");
Philipp Reisnerb411b362009-09-25 16:07:19 -0700956 sock_release(s);
Philipp Reisner80c6eed2012-08-01 14:53:39 +0200957randomize:
Akinobu Mita38b682b22013-04-29 16:21:31 -0700958 if (prandom_u32() & 1)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700959 goto retry;
960 }
961 }
962
Philipp Reisnerbbeb6412011-02-10 13:45:46 +0100963 if (tconn->cstate <= C_DISCONNECTING)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700964 goto out_release_sockets;
965 if (signal_pending(current)) {
966 flush_signals(current);
967 smp_rmb();
Philipp Reisner907599e2011-02-08 11:25:37 +0100968 if (get_t_state(&tconn->receiver) == EXITING)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700969 goto out_release_sockets;
970 }
971
Philipp Reisnerb666dbf2012-07-26 14:12:59 +0200972 ok = drbd_socket_okay(&sock.socket);
973 ok = drbd_socket_okay(&msock.socket) && ok;
974 } while (!ok);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700975
Philipp Reisner7a426fd2012-07-12 14:22:37 +0200976 if (ad.s_listen)
977 sock_release(ad.s_listen);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700978
Philipp Reisner98683652012-11-09 14:18:43 +0100979 sock.socket->sk->sk_reuse = SK_CAN_REUSE; /* SO_REUSEADDR */
980 msock.socket->sk->sk_reuse = SK_CAN_REUSE; /* SO_REUSEADDR */
Philipp Reisnerb411b362009-09-25 16:07:19 -0700981
Philipp Reisner7da35862011-12-19 22:42:56 +0100982 sock.socket->sk->sk_allocation = GFP_NOIO;
983 msock.socket->sk->sk_allocation = GFP_NOIO;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700984
Philipp Reisner7da35862011-12-19 22:42:56 +0100985 sock.socket->sk->sk_priority = TC_PRIO_INTERACTIVE_BULK;
986 msock.socket->sk->sk_priority = TC_PRIO_INTERACTIVE;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700987
Philipp Reisnerb411b362009-09-25 16:07:19 -0700988 /* NOT YET ...
Philipp Reisner7da35862011-12-19 22:42:56 +0100989 * sock.socket->sk->sk_sndtimeo = tconn->net_conf->timeout*HZ/10;
990 * sock.socket->sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
Andreas Gruenbacher60381782011-03-28 17:05:50 +0200991 * first set it to the P_CONNECTION_FEATURES timeout,
Philipp Reisnerb411b362009-09-25 16:07:19 -0700992 * which we set to 4x the configured ping_timeout. */
Philipp Reisner44ed1672011-04-19 17:10:19 +0200993 rcu_read_lock();
994 nc = rcu_dereference(tconn->net_conf);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700995
Philipp Reisner7da35862011-12-19 22:42:56 +0100996 sock.socket->sk->sk_sndtimeo =
997 sock.socket->sk->sk_rcvtimeo = nc->ping_timeo*4*HZ/10;
Philipp Reisner44ed1672011-04-19 17:10:19 +0200998
Philipp Reisner7da35862011-12-19 22:42:56 +0100999 msock.socket->sk->sk_rcvtimeo = nc->ping_int*HZ;
Philipp Reisner44ed1672011-04-19 17:10:19 +02001000 timeout = nc->timeout * HZ / 10;
Philipp Reisner08b165b2011-09-05 16:22:33 +02001001 discard_my_data = nc->discard_my_data;
Philipp Reisner44ed1672011-04-19 17:10:19 +02001002 rcu_read_unlock();
1003
Philipp Reisner7da35862011-12-19 22:42:56 +01001004 msock.socket->sk->sk_sndtimeo = timeout;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001005
1006 /* we don't want delays.
Lucas De Marchi25985ed2011-03-30 22:57:33 -03001007 * we use TCP_CORK where appropriate, though */
Philipp Reisner7da35862011-12-19 22:42:56 +01001008 drbd_tcp_nodelay(sock.socket);
1009 drbd_tcp_nodelay(msock.socket);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001010
Philipp Reisner7da35862011-12-19 22:42:56 +01001011 tconn->data.socket = sock.socket;
1012 tconn->meta.socket = msock.socket;
Philipp Reisner907599e2011-02-08 11:25:37 +01001013 tconn->last_received = jiffies;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001014
Andreas Gruenbacher60381782011-03-28 17:05:50 +02001015 h = drbd_do_features(tconn);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001016 if (h <= 0)
1017 return h;
1018
Philipp Reisner907599e2011-02-08 11:25:37 +01001019 if (tconn->cram_hmac_tfm) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07001020 /* drbd_request_state(mdev, NS(conn, WFAuth)); */
Philipp Reisner907599e2011-02-08 11:25:37 +01001021 switch (drbd_do_auth(tconn)) {
Johannes Thomab10d96c2010-01-07 16:02:50 +01001022 case -1:
Philipp Reisner907599e2011-02-08 11:25:37 +01001023 conn_err(tconn, "Authentication of peer failed\n");
Philipp Reisnerb411b362009-09-25 16:07:19 -07001024 return -1;
Johannes Thomab10d96c2010-01-07 16:02:50 +01001025 case 0:
Philipp Reisner907599e2011-02-08 11:25:37 +01001026 conn_err(tconn, "Authentication of peer failed, trying again.\n");
Johannes Thomab10d96c2010-01-07 16:02:50 +01001027 return 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001028 }
1029 }
1030
Philipp Reisner7da35862011-12-19 22:42:56 +01001031 tconn->data.socket->sk->sk_sndtimeo = timeout;
1032 tconn->data.socket->sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001033
Andreas Gruenbacher387eb302011-03-16 01:05:37 +01001034 if (drbd_send_protocol(tconn) == -EOPNOTSUPP)
Philipp Reisner7e2455c2010-04-22 14:50:23 +02001035 return -1;
Philipp Reisner1e86ac42011-08-04 10:33:08 +02001036
Philipp Reisnera1096a62012-04-06 12:07:34 +02001037 set_bit(STATE_SENT, &tconn->flags);
Philipp Reisner197296f2012-03-26 16:47:11 +02001038
Philipp Reisnerc141ebd2011-05-05 16:13:10 +02001039 rcu_read_lock();
1040 idr_for_each_entry(&tconn->volumes, mdev, vnr) {
1041 kref_get(&mdev->kref);
Andreas Gruenbacher26ea8f92013-06-25 16:50:03 +02001042 rcu_read_unlock();
1043
Philipp Reisner13c76ab2012-11-22 17:06:00 +01001044 /* Prevent a race between resync-handshake and
1045 * being promoted to Primary.
1046 *
1047 * Grab and release the state mutex, so we know that any current
1048 * drbd_set_role() is finished, and any incoming drbd_set_role
1049 * will see the STATE_SENT flag, and wait for it to be cleared.
1050 */
1051 mutex_lock(mdev->state_mutex);
1052 mutex_unlock(mdev->state_mutex);
1053
Philipp Reisner08b165b2011-09-05 16:22:33 +02001054 if (discard_my_data)
1055 set_bit(DISCARD_MY_DATA, &mdev->flags);
1056 else
1057 clear_bit(DISCARD_MY_DATA, &mdev->flags);
1058
Philipp Reisnerc141ebd2011-05-05 16:13:10 +02001059 drbd_connected(mdev);
1060 kref_put(&mdev->kref, &drbd_minor_destroy);
1061 rcu_read_lock();
1062 }
1063 rcu_read_unlock();
1064
Philipp Reisnera1096a62012-04-06 12:07:34 +02001065 rv = conn_request_state(tconn, NS(conn, C_WF_REPORT_PARAMS), CS_VERBOSE);
Lars Ellenberged635cb02012-11-05 11:54:30 +01001066 if (rv < SS_SUCCESS || tconn->cstate != C_WF_REPORT_PARAMS) {
Philipp Reisnera1096a62012-04-06 12:07:34 +02001067 clear_bit(STATE_SENT, &tconn->flags);
Philipp Reisner1e86ac42011-08-04 10:33:08 +02001068 return 0;
Philipp Reisnera1096a62012-04-06 12:07:34 +02001069 }
Philipp Reisner1e86ac42011-08-04 10:33:08 +02001070
Philipp Reisner823bd832012-11-08 15:04:36 +01001071 drbd_thread_start(&tconn->asender);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001072
Philipp Reisner08b165b2011-09-05 16:22:33 +02001073 mutex_lock(&tconn->conf_update);
1074 /* The discard_my_data flag is a single-shot modifier to the next
1075 * connection attempt, the handshake of which is now well underway.
1076 * No need for rcu style copying of the whole struct
1077 * just to clear a single value. */
1078 tconn->net_conf->discard_my_data = 0;
1079 mutex_unlock(&tconn->conf_update);
1080
Philipp Reisnerd3fcb492011-04-13 14:46:05 -07001081 return h;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001082
1083out_release_sockets:
Philipp Reisner7a426fd2012-07-12 14:22:37 +02001084 if (ad.s_listen)
1085 sock_release(ad.s_listen);
Philipp Reisner7da35862011-12-19 22:42:56 +01001086 if (sock.socket)
1087 sock_release(sock.socket);
1088 if (msock.socket)
1089 sock_release(msock.socket);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001090 return -1;
1091}
1092
Andreas Gruenbachere6589832011-03-30 12:54:42 +02001093static int decode_header(struct drbd_tconn *tconn, void *header, struct packet_info *pi)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001094{
Andreas Gruenbachere6589832011-03-30 12:54:42 +02001095 unsigned int header_size = drbd_header_size(tconn);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001096
Andreas Gruenbacher0c8e36d2011-03-30 16:00:17 +02001097 if (header_size == sizeof(struct p_header100) &&
1098 *(__be32 *)header == cpu_to_be32(DRBD_MAGIC_100)) {
1099 struct p_header100 *h = header;
1100 if (h->pad != 0) {
1101 conn_err(tconn, "Header padding is not zero\n");
1102 return -EINVAL;
1103 }
1104 pi->vnr = be16_to_cpu(h->volume);
1105 pi->cmd = be16_to_cpu(h->command);
1106 pi->size = be32_to_cpu(h->length);
1107 } else if (header_size == sizeof(struct p_header95) &&
1108 *(__be16 *)header == cpu_to_be16(DRBD_MAGIC_BIG)) {
Andreas Gruenbachere6589832011-03-30 12:54:42 +02001109 struct p_header95 *h = header;
Andreas Gruenbachere6589832011-03-30 12:54:42 +02001110 pi->cmd = be16_to_cpu(h->command);
Andreas Gruenbacherb55d84b2011-03-22 13:17:47 +01001111 pi->size = be32_to_cpu(h->length);
1112 pi->vnr = 0;
Andreas Gruenbachere6589832011-03-30 12:54:42 +02001113 } else if (header_size == sizeof(struct p_header80) &&
1114 *(__be32 *)header == cpu_to_be32(DRBD_MAGIC)) {
1115 struct p_header80 *h = header;
1116 pi->cmd = be16_to_cpu(h->command);
1117 pi->size = be16_to_cpu(h->length);
Philipp Reisner77351055b2011-02-07 17:24:26 +01001118 pi->vnr = 0;
Philipp Reisner02918be2010-08-20 14:35:10 +02001119 } else {
Andreas Gruenbachere6589832011-03-30 12:54:42 +02001120 conn_err(tconn, "Wrong magic value 0x%08x in protocol version %d\n",
1121 be32_to_cpu(*(__be32 *)header),
1122 tconn->agreed_pro_version);
Andreas Gruenbacher8172f3e2011-03-16 17:22:39 +01001123 return -EINVAL;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001124 }
Andreas Gruenbachere6589832011-03-30 12:54:42 +02001125 pi->data = header + header_size;
Andreas Gruenbacher8172f3e2011-03-16 17:22:39 +01001126 return 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001127}
1128
Philipp Reisner9ba7aa02011-02-07 17:32:41 +01001129static int drbd_recv_header(struct drbd_tconn *tconn, struct packet_info *pi)
Philipp Reisner257d0af2011-01-26 12:15:29 +01001130{
Andreas Gruenbachere6589832011-03-30 12:54:42 +02001131 void *buffer = tconn->data.rbuf;
Andreas Gruenbacher69bc7bc2011-03-16 17:31:52 +01001132 int err;
Philipp Reisner257d0af2011-01-26 12:15:29 +01001133
Andreas Gruenbachere6589832011-03-30 12:54:42 +02001134 err = drbd_recv_all_warn(tconn, buffer, drbd_header_size(tconn));
Andreas Gruenbachera5c31902011-03-24 03:28:04 +01001135 if (err)
Andreas Gruenbacher69bc7bc2011-03-16 17:31:52 +01001136 return err;
Philipp Reisner257d0af2011-01-26 12:15:29 +01001137
Andreas Gruenbachere6589832011-03-30 12:54:42 +02001138 err = decode_header(tconn, buffer, pi);
Philipp Reisner9ba7aa02011-02-07 17:32:41 +01001139 tconn->last_received = jiffies;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001140
Andreas Gruenbacher69bc7bc2011-03-16 17:31:52 +01001141 return err;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001142}
1143
Philipp Reisner4b0007c2011-11-09 20:12:34 +01001144static void drbd_flush(struct drbd_tconn *tconn)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001145{
1146 int rv;
Philipp Reisner4b0007c2011-11-09 20:12:34 +01001147 struct drbd_conf *mdev;
1148 int vnr;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001149
Philipp Reisner4b0007c2011-11-09 20:12:34 +01001150 if (tconn->write_ordering >= WO_bdev_flush) {
Lars Ellenberg615e0872011-11-17 14:32:12 +01001151 rcu_read_lock();
Philipp Reisner4b0007c2011-11-09 20:12:34 +01001152 idr_for_each_entry(&tconn->volumes, mdev, vnr) {
Lars Ellenberg615e0872011-11-17 14:32:12 +01001153 if (!get_ldev(mdev))
1154 continue;
1155 kref_get(&mdev->kref);
1156 rcu_read_unlock();
Philipp Reisner4b0007c2011-11-09 20:12:34 +01001157
Lars Ellenberg615e0872011-11-17 14:32:12 +01001158 rv = blkdev_issue_flush(mdev->ldev->backing_bdev,
1159 GFP_NOIO, NULL);
1160 if (rv) {
1161 dev_info(DEV, "local disk flush failed with status %d\n", rv);
1162 /* would rather check on EOPNOTSUPP, but that is not reliable.
1163 * don't try again for ANY return value != 0
1164 * if (rv == -EOPNOTSUPP) */
1165 drbd_bump_write_ordering(tconn, WO_drain_io);
Philipp Reisner4b0007c2011-11-09 20:12:34 +01001166 }
Lars Ellenberg615e0872011-11-17 14:32:12 +01001167 put_ldev(mdev);
1168 kref_put(&mdev->kref, &drbd_minor_destroy);
1169
1170 rcu_read_lock();
1171 if (rv)
1172 break;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001173 }
Lars Ellenberg615e0872011-11-17 14:32:12 +01001174 rcu_read_unlock();
Philipp Reisnerb411b362009-09-25 16:07:19 -07001175 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07001176}
1177
1178/**
1179 * drbd_may_finish_epoch() - Applies an epoch_event to the epoch's state, eventually finishes it.
1180 * @mdev: DRBD device.
1181 * @epoch: Epoch object.
1182 * @ev: Epoch event.
1183 */
Philipp Reisner1e9dd292011-11-10 15:14:53 +01001184static enum finish_epoch drbd_may_finish_epoch(struct drbd_tconn *tconn,
Philipp Reisnerb411b362009-09-25 16:07:19 -07001185 struct drbd_epoch *epoch,
1186 enum epoch_event ev)
1187{
Philipp Reisner2451fc32010-08-24 13:43:11 +02001188 int epoch_size;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001189 struct drbd_epoch *next_epoch;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001190 enum finish_epoch rv = FE_STILL_LIVE;
1191
Philipp Reisner12038a32011-11-09 19:18:00 +01001192 spin_lock(&tconn->epoch_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001193 do {
1194 next_epoch = NULL;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001195
1196 epoch_size = atomic_read(&epoch->epoch_size);
1197
1198 switch (ev & ~EV_CLEANUP) {
1199 case EV_PUT:
1200 atomic_dec(&epoch->active);
1201 break;
1202 case EV_GOT_BARRIER_NR:
1203 set_bit(DE_HAVE_BARRIER_NUMBER, &epoch->flags);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001204 break;
1205 case EV_BECAME_LAST:
1206 /* nothing to do*/
1207 break;
1208 }
1209
Philipp Reisnerb411b362009-09-25 16:07:19 -07001210 if (epoch_size != 0 &&
1211 atomic_read(&epoch->active) == 0 &&
Philipp Reisner80f9fd52011-07-18 15:45:15 +02001212 (test_bit(DE_HAVE_BARRIER_NUMBER, &epoch->flags) || ev & EV_CLEANUP)) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07001213 if (!(ev & EV_CLEANUP)) {
Philipp Reisner12038a32011-11-09 19:18:00 +01001214 spin_unlock(&tconn->epoch_lock);
Lars Ellenberg9ed57dc2012-03-26 20:55:17 +02001215 drbd_send_b_ack(epoch->tconn, epoch->barrier_nr, epoch_size);
Philipp Reisner12038a32011-11-09 19:18:00 +01001216 spin_lock(&tconn->epoch_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001217 }
Lars Ellenberg9ed57dc2012-03-26 20:55:17 +02001218#if 0
1219 /* FIXME: dec unacked on connection, once we have
1220 * something to count pending connection packets in. */
Philipp Reisner80f9fd52011-07-18 15:45:15 +02001221 if (test_bit(DE_HAVE_BARRIER_NUMBER, &epoch->flags))
Lars Ellenberg9ed57dc2012-03-26 20:55:17 +02001222 dec_unacked(epoch->tconn);
1223#endif
Philipp Reisnerb411b362009-09-25 16:07:19 -07001224
Philipp Reisner12038a32011-11-09 19:18:00 +01001225 if (tconn->current_epoch != epoch) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07001226 next_epoch = list_entry(epoch->list.next, struct drbd_epoch, list);
1227 list_del(&epoch->list);
1228 ev = EV_BECAME_LAST | (ev & EV_CLEANUP);
Philipp Reisner12038a32011-11-09 19:18:00 +01001229 tconn->epochs--;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001230 kfree(epoch);
1231
1232 if (rv == FE_STILL_LIVE)
1233 rv = FE_DESTROYED;
1234 } else {
1235 epoch->flags = 0;
1236 atomic_set(&epoch->epoch_size, 0);
Uwe Kleine-König698f9312010-07-02 20:41:51 +02001237 /* atomic_set(&epoch->active, 0); is already zero */
Philipp Reisnerb411b362009-09-25 16:07:19 -07001238 if (rv == FE_STILL_LIVE)
1239 rv = FE_RECYCLED;
1240 }
1241 }
1242
1243 if (!next_epoch)
1244 break;
1245
1246 epoch = next_epoch;
1247 } while (1);
1248
Philipp Reisner12038a32011-11-09 19:18:00 +01001249 spin_unlock(&tconn->epoch_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001250
Philipp Reisnerb411b362009-09-25 16:07:19 -07001251 return rv;
1252}
1253
1254/**
1255 * drbd_bump_write_ordering() - Fall back to an other write ordering method
Philipp Reisner4b0007c2011-11-09 20:12:34 +01001256 * @tconn: DRBD connection.
Philipp Reisnerb411b362009-09-25 16:07:19 -07001257 * @wo: Write ordering method to try.
1258 */
Philipp Reisner4b0007c2011-11-09 20:12:34 +01001259void drbd_bump_write_ordering(struct drbd_tconn *tconn, enum write_ordering_e wo)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001260{
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02001261 struct disk_conf *dc;
Philipp Reisner4b0007c2011-11-09 20:12:34 +01001262 struct drbd_conf *mdev;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001263 enum write_ordering_e pwo;
Philipp Reisner4b0007c2011-11-09 20:12:34 +01001264 int vnr;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001265 static char *write_ordering_str[] = {
1266 [WO_none] = "none",
1267 [WO_drain_io] = "drain",
1268 [WO_bdev_flush] = "flush",
Philipp Reisnerb411b362009-09-25 16:07:19 -07001269 };
1270
Philipp Reisner4b0007c2011-11-09 20:12:34 +01001271 pwo = tconn->write_ordering;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001272 wo = min(pwo, wo);
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02001273 rcu_read_lock();
Philipp Reisner4b0007c2011-11-09 20:12:34 +01001274 idr_for_each_entry(&tconn->volumes, mdev, vnr) {
Philipp Reisner27eb13e2012-03-30 14:12:15 +02001275 if (!get_ldev_if_state(mdev, D_ATTACHING))
Philipp Reisner4b0007c2011-11-09 20:12:34 +01001276 continue;
1277 dc = rcu_dereference(mdev->ldev->disk_conf);
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02001278
Philipp Reisner4b0007c2011-11-09 20:12:34 +01001279 if (wo == WO_bdev_flush && !dc->disk_flushes)
1280 wo = WO_drain_io;
1281 if (wo == WO_drain_io && !dc->disk_drain)
1282 wo = WO_none;
1283 put_ldev(mdev);
1284 }
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02001285 rcu_read_unlock();
Philipp Reisner4b0007c2011-11-09 20:12:34 +01001286 tconn->write_ordering = wo;
1287 if (pwo != tconn->write_ordering || wo == WO_bdev_flush)
1288 conn_info(tconn, "Method to ensure write ordering: %s\n", write_ordering_str[tconn->write_ordering]);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001289}
1290
1291/**
Andreas Gruenbacherfbe29de2011-02-17 16:38:35 +01001292 * drbd_submit_peer_request()
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001293 * @mdev: DRBD device.
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01001294 * @peer_req: peer request
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001295 * @rw: flag field, see bio->bi_rw
Lars Ellenberg10f6d9922011-01-24 14:47:09 +01001296 *
1297 * May spread the pages to multiple bios,
1298 * depending on bio_add_page restrictions.
1299 *
1300 * Returns 0 if all bios have been submitted,
1301 * -ENOMEM if we could not allocate enough bios,
1302 * -ENOSPC (any better suggestion?) if we have not been able to bio_add_page a
1303 * single page to an empty bio (which should never happen and likely indicates
1304 * that the lower level IO stack is in some way broken). This has been observed
1305 * on certain Xen deployments.
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001306 */
1307/* TODO allocate from our own bio_set. */
Andreas Gruenbacherfbe29de2011-02-17 16:38:35 +01001308int drbd_submit_peer_request(struct drbd_conf *mdev,
1309 struct drbd_peer_request *peer_req,
1310 const unsigned rw, const int fault_type)
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001311{
1312 struct bio *bios = NULL;
1313 struct bio *bio;
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01001314 struct page *page = peer_req->pages;
1315 sector_t sector = peer_req->i.sector;
1316 unsigned ds = peer_req->i.size;
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001317 unsigned n_bios = 0;
1318 unsigned nr_pages = (ds + PAGE_SIZE -1) >> PAGE_SHIFT;
Lars Ellenberg10f6d9922011-01-24 14:47:09 +01001319 int err = -ENOMEM;
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001320
1321 /* In most cases, we will only need one bio. But in case the lower
1322 * level restrictions happen to be different at this offset on this
1323 * side than those of the sending peer, we may need to submit the
Lars Ellenberg9476f392011-02-23 17:02:01 +01001324 * request in more than one bio.
1325 *
1326 * Plain bio_alloc is good enough here, this is no DRBD internally
1327 * generated bio, but a bio allocated on behalf of the peer.
1328 */
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001329next_bio:
1330 bio = bio_alloc(GFP_NOIO, nr_pages);
1331 if (!bio) {
1332 dev_err(DEV, "submit_ee: Allocation of a bio failed\n");
1333 goto fail;
1334 }
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01001335 /* > peer_req->i.sector, unless this is the first bio */
Kent Overstreet4f024f32013-10-11 15:44:27 -07001336 bio->bi_iter.bi_sector = sector;
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001337 bio->bi_bdev = mdev->ldev->backing_bdev;
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001338 bio->bi_rw = rw;
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01001339 bio->bi_private = peer_req;
Andreas Gruenbacherfcefa622011-02-17 16:46:59 +01001340 bio->bi_end_io = drbd_peer_request_endio;
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001341
1342 bio->bi_next = bios;
1343 bios = bio;
1344 ++n_bios;
1345
1346 page_chain_for_each(page) {
1347 unsigned len = min_t(unsigned, ds, PAGE_SIZE);
1348 if (!bio_add_page(bio, page, len, 0)) {
Lars Ellenberg10f6d9922011-01-24 14:47:09 +01001349 /* A single page must always be possible!
1350 * But in case it fails anyways,
1351 * we deal with it, and complain (below). */
1352 if (bio->bi_vcnt == 0) {
1353 dev_err(DEV,
1354 "bio_add_page failed for len=%u, "
1355 "bi_vcnt=0 (bi_sector=%llu)\n",
Kent Overstreet4f024f32013-10-11 15:44:27 -07001356 len, (uint64_t)bio->bi_iter.bi_sector);
Lars Ellenberg10f6d9922011-01-24 14:47:09 +01001357 err = -ENOSPC;
1358 goto fail;
1359 }
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001360 goto next_bio;
1361 }
1362 ds -= len;
1363 sector += len >> 9;
1364 --nr_pages;
1365 }
1366 D_ASSERT(page == NULL);
1367 D_ASSERT(ds == 0);
1368
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01001369 atomic_set(&peer_req->pending_bios, n_bios);
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001370 do {
1371 bio = bios;
1372 bios = bios->bi_next;
1373 bio->bi_next = NULL;
1374
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001375 drbd_generic_make_request(mdev, fault_type, bio);
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001376 } while (bios);
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001377 return 0;
1378
1379fail:
1380 while (bios) {
1381 bio = bios;
1382 bios = bios->bi_next;
1383 bio_put(bio);
1384 }
Lars Ellenberg10f6d9922011-01-24 14:47:09 +01001385 return err;
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001386}
1387
Andreas Gruenbacher53840642011-01-28 10:31:04 +01001388static void drbd_remove_epoch_entry_interval(struct drbd_conf *mdev,
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01001389 struct drbd_peer_request *peer_req)
Andreas Gruenbacher53840642011-01-28 10:31:04 +01001390{
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01001391 struct drbd_interval *i = &peer_req->i;
Andreas Gruenbacher53840642011-01-28 10:31:04 +01001392
1393 drbd_remove_interval(&mdev->write_requests, i);
1394 drbd_clear_interval(i);
1395
Andreas Gruenbacher6c852be2011-02-04 15:38:52 +01001396 /* Wake up any processes waiting for this peer request to complete. */
Andreas Gruenbacher53840642011-01-28 10:31:04 +01001397 if (i->waiting)
1398 wake_up(&mdev->misc_wait);
1399}
1400
Philipp Reisner77fede52011-11-10 21:19:11 +01001401void conn_wait_active_ee_empty(struct drbd_tconn *tconn)
1402{
1403 struct drbd_conf *mdev;
1404 int vnr;
1405
1406 rcu_read_lock();
1407 idr_for_each_entry(&tconn->volumes, mdev, vnr) {
1408 kref_get(&mdev->kref);
1409 rcu_read_unlock();
1410 drbd_wait_ee_list_empty(mdev, &mdev->active_ee);
1411 kref_put(&mdev->kref, &drbd_minor_destroy);
1412 rcu_read_lock();
1413 }
1414 rcu_read_unlock();
1415}
1416
Andreas Gruenbacher4a76b162011-03-25 02:43:51 +01001417static int receive_Barrier(struct drbd_tconn *tconn, struct packet_info *pi)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001418{
Philipp Reisner2451fc32010-08-24 13:43:11 +02001419 int rv;
Andreas Gruenbachere6589832011-03-30 12:54:42 +02001420 struct p_barrier *p = pi->data;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001421 struct drbd_epoch *epoch;
1422
Lars Ellenberg9ed57dc2012-03-26 20:55:17 +02001423 /* FIXME these are unacked on connection,
1424 * not a specific (peer)device.
1425 */
Philipp Reisner12038a32011-11-09 19:18:00 +01001426 tconn->current_epoch->barrier_nr = p->barrier;
Lars Ellenberg9ed57dc2012-03-26 20:55:17 +02001427 tconn->current_epoch->tconn = tconn;
Philipp Reisner1e9dd292011-11-10 15:14:53 +01001428 rv = drbd_may_finish_epoch(tconn, tconn->current_epoch, EV_GOT_BARRIER_NR);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001429
1430 /* P_BARRIER_ACK may imply that the corresponding extent is dropped from
1431 * the activity log, which means it would not be resynced in case the
1432 * R_PRIMARY crashes now.
1433 * Therefore we must send the barrier_ack after the barrier request was
1434 * completed. */
Philipp Reisner4b0007c2011-11-09 20:12:34 +01001435 switch (tconn->write_ordering) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07001436 case WO_none:
1437 if (rv == FE_RECYCLED)
Andreas Gruenbacher82bc0192011-03-17 12:10:19 +01001438 return 0;
Philipp Reisner2451fc32010-08-24 13:43:11 +02001439
1440 /* receiver context, in the writeout path of the other node.
1441 * avoid potential distributed deadlock */
1442 epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO);
1443 if (epoch)
1444 break;
1445 else
Lars Ellenberg9ed57dc2012-03-26 20:55:17 +02001446 conn_warn(tconn, "Allocation of an epoch failed, slowing down\n");
Philipp Reisner2451fc32010-08-24 13:43:11 +02001447 /* Fall through */
Philipp Reisnerb411b362009-09-25 16:07:19 -07001448
1449 case WO_bdev_flush:
1450 case WO_drain_io:
Philipp Reisner77fede52011-11-10 21:19:11 +01001451 conn_wait_active_ee_empty(tconn);
Philipp Reisner4b0007c2011-11-09 20:12:34 +01001452 drbd_flush(tconn);
Philipp Reisner2451fc32010-08-24 13:43:11 +02001453
Philipp Reisner12038a32011-11-09 19:18:00 +01001454 if (atomic_read(&tconn->current_epoch->epoch_size)) {
Philipp Reisner2451fc32010-08-24 13:43:11 +02001455 epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO);
1456 if (epoch)
1457 break;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001458 }
1459
Andreas Gruenbacher82bc0192011-03-17 12:10:19 +01001460 return 0;
Philipp Reisner2451fc32010-08-24 13:43:11 +02001461 default:
Lars Ellenberg9ed57dc2012-03-26 20:55:17 +02001462 conn_err(tconn, "Strangeness in tconn->write_ordering %d\n", tconn->write_ordering);
Andreas Gruenbacher82bc0192011-03-17 12:10:19 +01001463 return -EIO;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001464 }
1465
1466 epoch->flags = 0;
1467 atomic_set(&epoch->epoch_size, 0);
1468 atomic_set(&epoch->active, 0);
1469
Philipp Reisner12038a32011-11-09 19:18:00 +01001470 spin_lock(&tconn->epoch_lock);
1471 if (atomic_read(&tconn->current_epoch->epoch_size)) {
1472 list_add(&epoch->list, &tconn->current_epoch->list);
1473 tconn->current_epoch = epoch;
1474 tconn->epochs++;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001475 } else {
1476 /* The current_epoch got recycled while we allocated this one... */
1477 kfree(epoch);
1478 }
Philipp Reisner12038a32011-11-09 19:18:00 +01001479 spin_unlock(&tconn->epoch_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001480
Andreas Gruenbacher82bc0192011-03-17 12:10:19 +01001481 return 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001482}
1483
1484/* used from receive_RSDataReply (recv_resync_read)
1485 * and from receive_Data */
Andreas Gruenbacherf6ffca92011-02-04 15:30:34 +01001486static struct drbd_peer_request *
1487read_in_block(struct drbd_conf *mdev, u64 id, sector_t sector,
1488 int data_size) __must_hold(local)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001489{
Lars Ellenberg66660322010-04-06 12:15:04 +02001490 const sector_t capacity = drbd_get_capacity(mdev->this_bdev);
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01001491 struct drbd_peer_request *peer_req;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001492 struct page *page;
Andreas Gruenbachera5c31902011-03-24 03:28:04 +01001493 int dgs, ds, err;
Philipp Reisnera0638452011-01-19 14:31:32 +01001494 void *dig_in = mdev->tconn->int_dig_in;
1495 void *dig_vv = mdev->tconn->int_dig_vv;
Philipp Reisner6b4388a2010-04-26 14:11:45 +02001496 unsigned long *data;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001497
Andreas Gruenbacher88104ca2011-04-28 21:47:21 +02001498 dgs = 0;
1499 if (mdev->tconn->peer_integrity_tfm) {
1500 dgs = crypto_hash_digestsize(mdev->tconn->peer_integrity_tfm);
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001501 /*
1502 * FIXME: Receive the incoming digest into the receive buffer
1503 * here, together with its struct p_data?
1504 */
Andreas Gruenbachera5c31902011-03-24 03:28:04 +01001505 err = drbd_recv_all_warn(mdev->tconn, dig_in, dgs);
1506 if (err)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001507 return NULL;
Andreas Gruenbacher88104ca2011-04-28 21:47:21 +02001508 data_size -= dgs;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001509 }
1510
Andreas Gruenbacher841ce242010-12-15 19:31:20 +01001511 if (!expect(IS_ALIGNED(data_size, 512)))
1512 return NULL;
1513 if (!expect(data_size <= DRBD_MAX_BIO_SIZE))
1514 return NULL;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001515
Lars Ellenberg66660322010-04-06 12:15:04 +02001516 /* even though we trust out peer,
1517 * we sometimes have to double check. */
1518 if (sector + (data_size>>9) > capacity) {
Lars Ellenbergfdda6542011-01-24 15:11:01 +01001519 dev_err(DEV, "request from peer beyond end of local disk: "
1520 "capacity: %llus < sector: %llus + size: %u\n",
Lars Ellenberg66660322010-04-06 12:15:04 +02001521 (unsigned long long)capacity,
1522 (unsigned long long)sector, data_size);
1523 return NULL;
1524 }
1525
Philipp Reisnerb411b362009-09-25 16:07:19 -07001526 /* GFP_NOIO, because we must not cause arbitrary write-out: in a DRBD
1527 * "criss-cross" setup, that might cause write-out on some other DRBD,
1528 * which in turn might block on the other node at this very place. */
Andreas Gruenbacher0db55362011-04-06 16:09:15 +02001529 peer_req = drbd_alloc_peer_req(mdev, id, sector, data_size, GFP_NOIO);
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01001530 if (!peer_req)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001531 return NULL;
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001532
Lars Ellenberga73ff322012-06-25 19:15:38 +02001533 if (!data_size)
Lars Ellenberg81a35372012-07-30 09:00:54 +02001534 return peer_req;
Lars Ellenberga73ff322012-06-25 19:15:38 +02001535
Philipp Reisnerb411b362009-09-25 16:07:19 -07001536 ds = data_size;
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01001537 page = peer_req->pages;
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001538 page_chain_for_each(page) {
1539 unsigned len = min_t(int, ds, PAGE_SIZE);
Philipp Reisner6b4388a2010-04-26 14:11:45 +02001540 data = kmap(page);
Andreas Gruenbachera5c31902011-03-24 03:28:04 +01001541 err = drbd_recv_all_warn(mdev->tconn, data, len);
Andreas Gruenbacher0cf9d272010-12-07 10:43:29 +01001542 if (drbd_insert_fault(mdev, DRBD_FAULT_RECEIVE)) {
Philipp Reisner6b4388a2010-04-26 14:11:45 +02001543 dev_err(DEV, "Fault injection: Corrupting data on receive\n");
1544 data[0] = data[0] ^ (unsigned long)-1;
1545 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07001546 kunmap(page);
Andreas Gruenbachera5c31902011-03-24 03:28:04 +01001547 if (err) {
Andreas Gruenbacher3967deb2011-04-06 16:16:56 +02001548 drbd_free_peer_req(mdev, peer_req);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001549 return NULL;
1550 }
Andreas Gruenbachera5c31902011-03-24 03:28:04 +01001551 ds -= len;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001552 }
1553
1554 if (dgs) {
Andreas Gruenbacher5b614ab2011-04-27 21:00:12 +02001555 drbd_csum_ee(mdev, mdev->tconn->peer_integrity_tfm, peer_req, dig_vv);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001556 if (memcmp(dig_in, dig_vv, dgs)) {
Lars Ellenberg470be442010-11-10 10:36:52 +01001557 dev_err(DEV, "Digest integrity check FAILED: %llus +%u\n",
1558 (unsigned long long)sector, data_size);
Andreas Gruenbacher3967deb2011-04-06 16:16:56 +02001559 drbd_free_peer_req(mdev, peer_req);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001560 return NULL;
1561 }
1562 }
1563 mdev->recv_cnt += data_size>>9;
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01001564 return peer_req;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001565}
1566
1567/* drbd_drain_block() just takes a data block
1568 * out of the socket input buffer, and discards it.
1569 */
1570static int drbd_drain_block(struct drbd_conf *mdev, int data_size)
1571{
1572 struct page *page;
Andreas Gruenbachera5c31902011-03-24 03:28:04 +01001573 int err = 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001574 void *data;
1575
Lars Ellenbergc3470cd2010-04-01 16:57:19 +02001576 if (!data_size)
Andreas Gruenbacherfc5be832011-03-16 17:50:50 +01001577 return 0;
Lars Ellenbergc3470cd2010-04-01 16:57:19 +02001578
Andreas Gruenbacherc37c8ec2011-04-07 21:02:09 +02001579 page = drbd_alloc_pages(mdev, 1, 1);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001580
1581 data = kmap(page);
1582 while (data_size) {
Andreas Gruenbacherfc5be832011-03-16 17:50:50 +01001583 unsigned int len = min_t(int, data_size, PAGE_SIZE);
1584
Andreas Gruenbachera5c31902011-03-24 03:28:04 +01001585 err = drbd_recv_all_warn(mdev->tconn, data, len);
1586 if (err)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001587 break;
Andreas Gruenbachera5c31902011-03-24 03:28:04 +01001588 data_size -= len;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001589 }
1590 kunmap(page);
Andreas Gruenbacher5cc287e2011-04-07 21:02:59 +02001591 drbd_free_pages(mdev, page, 0);
Andreas Gruenbacherfc5be832011-03-16 17:50:50 +01001592 return err;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001593}
1594
1595static int recv_dless_read(struct drbd_conf *mdev, struct drbd_request *req,
1596 sector_t sector, int data_size)
1597{
Kent Overstreet79886132013-11-23 17:19:00 -08001598 struct bio_vec bvec;
1599 struct bvec_iter iter;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001600 struct bio *bio;
Kent Overstreet79886132013-11-23 17:19:00 -08001601 int dgs, err, expect;
Philipp Reisnera0638452011-01-19 14:31:32 +01001602 void *dig_in = mdev->tconn->int_dig_in;
1603 void *dig_vv = mdev->tconn->int_dig_vv;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001604
Andreas Gruenbacher88104ca2011-04-28 21:47:21 +02001605 dgs = 0;
1606 if (mdev->tconn->peer_integrity_tfm) {
1607 dgs = crypto_hash_digestsize(mdev->tconn->peer_integrity_tfm);
Andreas Gruenbachera5c31902011-03-24 03:28:04 +01001608 err = drbd_recv_all_warn(mdev->tconn, dig_in, dgs);
1609 if (err)
1610 return err;
Andreas Gruenbacher88104ca2011-04-28 21:47:21 +02001611 data_size -= dgs;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001612 }
1613
Philipp Reisnerb411b362009-09-25 16:07:19 -07001614 /* optimistically update recv_cnt. if receiving fails below,
1615 * we disconnect anyways, and counters will be reset. */
1616 mdev->recv_cnt += data_size>>9;
1617
1618 bio = req->master_bio;
Kent Overstreet4f024f32013-10-11 15:44:27 -07001619 D_ASSERT(sector == bio->bi_iter.bi_sector);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001620
Kent Overstreet79886132013-11-23 17:19:00 -08001621 bio_for_each_segment(bvec, bio, iter) {
1622 void *mapped = kmap(bvec.bv_page) + bvec.bv_offset;
1623 expect = min_t(int, data_size, bvec.bv_len);
Andreas Gruenbachera5c31902011-03-24 03:28:04 +01001624 err = drbd_recv_all_warn(mdev->tconn, mapped, expect);
Kent Overstreet79886132013-11-23 17:19:00 -08001625 kunmap(bvec.bv_page);
Andreas Gruenbachera5c31902011-03-24 03:28:04 +01001626 if (err)
1627 return err;
1628 data_size -= expect;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001629 }
1630
1631 if (dgs) {
Andreas Gruenbacher5b614ab2011-04-27 21:00:12 +02001632 drbd_csum_bio(mdev, mdev->tconn->peer_integrity_tfm, bio, dig_vv);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001633 if (memcmp(dig_in, dig_vv, dgs)) {
1634 dev_err(DEV, "Digest integrity check FAILED. Broken NICs?\n");
Andreas Gruenbacher28284ce2011-03-16 17:54:02 +01001635 return -EINVAL;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001636 }
1637 }
1638
1639 D_ASSERT(data_size == 0);
Andreas Gruenbacher28284ce2011-03-16 17:54:02 +01001640 return 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001641}
1642
Andreas Gruenbachera990be42011-04-06 17:56:48 +02001643/*
1644 * e_end_resync_block() is called in asender context via
1645 * drbd_finish_peer_reqs().
1646 */
Andreas Gruenbacher99920dc2011-03-16 15:31:39 +01001647static int e_end_resync_block(struct drbd_work *w, int unused)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001648{
Andreas Gruenbacher8050e6d2011-02-18 16:12:48 +01001649 struct drbd_peer_request *peer_req =
1650 container_of(w, struct drbd_peer_request, w);
Philipp Reisner00d56942011-02-09 18:09:48 +01001651 struct drbd_conf *mdev = w->mdev;
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01001652 sector_t sector = peer_req->i.sector;
Andreas Gruenbacher99920dc2011-03-16 15:31:39 +01001653 int err;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001654
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01001655 D_ASSERT(drbd_interval_empty(&peer_req->i));
Philipp Reisnerb411b362009-09-25 16:07:19 -07001656
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01001657 if (likely((peer_req->flags & EE_WAS_ERROR) == 0)) {
1658 drbd_set_in_sync(mdev, sector, peer_req->i.size);
Andreas Gruenbacher99920dc2011-03-16 15:31:39 +01001659 err = drbd_send_ack(mdev, P_RS_WRITE_ACK, peer_req);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001660 } else {
1661 /* Record failure to sync */
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01001662 drbd_rs_failed_io(mdev, sector, peer_req->i.size);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001663
Andreas Gruenbacher99920dc2011-03-16 15:31:39 +01001664 err = drbd_send_ack(mdev, P_NEG_ACK, peer_req);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001665 }
1666 dec_unacked(mdev);
1667
Andreas Gruenbacher99920dc2011-03-16 15:31:39 +01001668 return err;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001669}
1670
1671static int recv_resync_read(struct drbd_conf *mdev, sector_t sector, int data_size) __releases(local)
1672{
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01001673 struct drbd_peer_request *peer_req;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001674
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01001675 peer_req = read_in_block(mdev, ID_SYNCER, sector, data_size);
1676 if (!peer_req)
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001677 goto fail;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001678
1679 dec_rs_pending(mdev);
1680
Philipp Reisnerb411b362009-09-25 16:07:19 -07001681 inc_unacked(mdev);
1682 /* corresponding dec_unacked() in e_end_resync_block()
1683 * respective _drbd_clear_done_ee */
1684
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01001685 peer_req->w.cb = e_end_resync_block;
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001686
Philipp Reisner87eeee42011-01-19 14:16:30 +01001687 spin_lock_irq(&mdev->tconn->req_lock);
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01001688 list_add(&peer_req->w.list, &mdev->sync_ee);
Philipp Reisner87eeee42011-01-19 14:16:30 +01001689 spin_unlock_irq(&mdev->tconn->req_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001690
Lars Ellenberg0f0601f2010-08-11 23:40:24 +02001691 atomic_add(data_size >> 9, &mdev->rs_sect_ev);
Andreas Gruenbacherfbe29de2011-02-17 16:38:35 +01001692 if (drbd_submit_peer_request(mdev, peer_req, WRITE, DRBD_FAULT_RS_WR) == 0)
Andreas Gruenbachere1c1b0f2011-03-16 17:58:27 +01001693 return 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001694
Lars Ellenberg10f6d9922011-01-24 14:47:09 +01001695 /* don't care for the reason here */
1696 dev_err(DEV, "submit failed, triggering re-connect\n");
Philipp Reisner87eeee42011-01-19 14:16:30 +01001697 spin_lock_irq(&mdev->tconn->req_lock);
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01001698 list_del(&peer_req->w.list);
Philipp Reisner87eeee42011-01-19 14:16:30 +01001699 spin_unlock_irq(&mdev->tconn->req_lock);
Lars Ellenberg22cc37a2010-09-14 20:40:41 +02001700
Andreas Gruenbacher3967deb2011-04-06 16:16:56 +02001701 drbd_free_peer_req(mdev, peer_req);
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001702fail:
1703 put_ldev(mdev);
Andreas Gruenbachere1c1b0f2011-03-16 17:58:27 +01001704 return -EIO;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001705}
1706
Andreas Gruenbacher668eebc2011-01-20 17:14:26 +01001707static struct drbd_request *
Andreas Gruenbacherbc9c5c42011-01-21 18:00:55 +01001708find_request(struct drbd_conf *mdev, struct rb_root *root, u64 id,
1709 sector_t sector, bool missing_ok, const char *func)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001710{
1711 struct drbd_request *req;
Andreas Gruenbacher668eebc2011-01-20 17:14:26 +01001712
Andreas Gruenbacherbc9c5c42011-01-21 18:00:55 +01001713 /* Request object according to our peer */
1714 req = (struct drbd_request *)(unsigned long)id;
Andreas Gruenbacher5e472262011-01-27 14:42:51 +01001715 if (drbd_contains_interval(root, sector, &req->i) && req->i.local)
Andreas Gruenbacher668eebc2011-01-20 17:14:26 +01001716 return req;
Andreas Gruenbacherc3afd8f2011-01-20 22:25:40 +01001717 if (!missing_ok) {
Andreas Gruenbacher5af172e2011-07-15 09:43:23 +02001718 dev_err(DEV, "%s: failed to find request 0x%lx, sector %llus\n", func,
Andreas Gruenbacherc3afd8f2011-01-20 22:25:40 +01001719 (unsigned long)id, (unsigned long long)sector);
1720 }
Andreas Gruenbacher668eebc2011-01-20 17:14:26 +01001721 return NULL;
1722}
1723
Andreas Gruenbacher4a76b162011-03-25 02:43:51 +01001724static int receive_DataReply(struct drbd_tconn *tconn, struct packet_info *pi)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001725{
Andreas Gruenbacher4a76b162011-03-25 02:43:51 +01001726 struct drbd_conf *mdev;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001727 struct drbd_request *req;
1728 sector_t sector;
Andreas Gruenbacher82bc0192011-03-17 12:10:19 +01001729 int err;
Andreas Gruenbachere6589832011-03-30 12:54:42 +02001730 struct p_data *p = pi->data;
Andreas Gruenbacher4a76b162011-03-25 02:43:51 +01001731
1732 mdev = vnr_to_mdev(tconn, pi->vnr);
1733 if (!mdev)
1734 return -EIO;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001735
1736 sector = be64_to_cpu(p->sector);
1737
Philipp Reisner87eeee42011-01-19 14:16:30 +01001738 spin_lock_irq(&mdev->tconn->req_lock);
Andreas Gruenbacherbc9c5c42011-01-21 18:00:55 +01001739 req = find_request(mdev, &mdev->read_requests, p->block_id, sector, false, __func__);
Philipp Reisner87eeee42011-01-19 14:16:30 +01001740 spin_unlock_irq(&mdev->tconn->req_lock);
Andreas Gruenbacherc3afd8f2011-01-20 22:25:40 +01001741 if (unlikely(!req))
Andreas Gruenbacher82bc0192011-03-17 12:10:19 +01001742 return -EIO;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001743
Bart Van Assche24c48302011-05-21 18:32:29 +02001744 /* hlist_del(&req->collision) is done in _req_may_be_done, to avoid
Philipp Reisnerb411b362009-09-25 16:07:19 -07001745 * special casing it there for the various failure cases.
1746 * still no race with drbd_fail_pending_reads */
Andreas Gruenbachere2857212011-03-25 00:57:38 +01001747 err = recv_dless_read(mdev, req, sector, pi->size);
Andreas Gruenbacher82bc0192011-03-17 12:10:19 +01001748 if (!err)
Andreas Gruenbacher8554df12011-01-25 15:37:43 +01001749 req_mod(req, DATA_RECEIVED);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001750 /* else: nothing. handled from drbd_disconnect...
1751 * I don't think we may complete this just yet
1752 * in case we are "on-disconnect: freeze" */
1753
Andreas Gruenbacher82bc0192011-03-17 12:10:19 +01001754 return err;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001755}
1756
Andreas Gruenbacher4a76b162011-03-25 02:43:51 +01001757static int receive_RSDataReply(struct drbd_tconn *tconn, struct packet_info *pi)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001758{
Andreas Gruenbacher4a76b162011-03-25 02:43:51 +01001759 struct drbd_conf *mdev;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001760 sector_t sector;
Andreas Gruenbacher82bc0192011-03-17 12:10:19 +01001761 int err;
Andreas Gruenbachere6589832011-03-30 12:54:42 +02001762 struct p_data *p = pi->data;
Andreas Gruenbacher4a76b162011-03-25 02:43:51 +01001763
1764 mdev = vnr_to_mdev(tconn, pi->vnr);
1765 if (!mdev)
1766 return -EIO;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001767
1768 sector = be64_to_cpu(p->sector);
1769 D_ASSERT(p->block_id == ID_SYNCER);
1770
1771 if (get_ldev(mdev)) {
1772 /* data is submitted to disk within recv_resync_read.
1773 * corresponding put_ldev done below on error,
Andreas Gruenbacherfcefa622011-02-17 16:46:59 +01001774 * or in drbd_peer_request_endio. */
Andreas Gruenbachere2857212011-03-25 00:57:38 +01001775 err = recv_resync_read(mdev, sector, pi->size);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001776 } else {
1777 if (__ratelimit(&drbd_ratelimit_state))
1778 dev_err(DEV, "Can not write resync data to local disk.\n");
1779
Andreas Gruenbachere2857212011-03-25 00:57:38 +01001780 err = drbd_drain_block(mdev, pi->size);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001781
Andreas Gruenbachere2857212011-03-25 00:57:38 +01001782 drbd_send_ack_dp(mdev, P_NEG_ACK, p, pi->size);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001783 }
1784
Andreas Gruenbachere2857212011-03-25 00:57:38 +01001785 atomic_add(pi->size >> 9, &mdev->rs_sect_in);
Philipp Reisner778f2712010-07-06 11:14:00 +02001786
Andreas Gruenbacher82bc0192011-03-17 12:10:19 +01001787 return err;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001788}
1789
Andreas Gruenbacher7be8da02011-02-22 02:15:32 +01001790static void restart_conflicting_writes(struct drbd_conf *mdev,
1791 sector_t sector, int size)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001792{
Andreas Gruenbacher7be8da02011-02-22 02:15:32 +01001793 struct drbd_interval *i;
1794 struct drbd_request *req;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001795
Andreas Gruenbacher7be8da02011-02-22 02:15:32 +01001796 drbd_for_each_overlap(i, &mdev->write_requests, sector, size) {
1797 if (!i->local)
1798 continue;
1799 req = container_of(i, struct drbd_request, i);
1800 if (req->rq_state & RQ_LOCAL_PENDING ||
1801 !(req->rq_state & RQ_POSTPONED))
1802 continue;
Lars Ellenberg2312f0b32011-11-24 10:36:25 +01001803 /* as it is RQ_POSTPONED, this will cause it to
1804 * be queued on the retry workqueue. */
Lars Ellenbergd4dabbe2012-08-01 12:33:51 +02001805 __req_mod(req, CONFLICT_RESOLVED, NULL);
Andreas Gruenbacher7be8da02011-02-22 02:15:32 +01001806 }
1807}
1808
Andreas Gruenbachera990be42011-04-06 17:56:48 +02001809/*
1810 * e_end_block() is called in asender context via drbd_finish_peer_reqs().
Philipp Reisnerb411b362009-09-25 16:07:19 -07001811 */
Andreas Gruenbacher99920dc2011-03-16 15:31:39 +01001812static int e_end_block(struct drbd_work *w, int cancel)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001813{
Andreas Gruenbacher8050e6d2011-02-18 16:12:48 +01001814 struct drbd_peer_request *peer_req =
1815 container_of(w, struct drbd_peer_request, w);
Philipp Reisner00d56942011-02-09 18:09:48 +01001816 struct drbd_conf *mdev = w->mdev;
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01001817 sector_t sector = peer_req->i.sector;
Andreas Gruenbacher99920dc2011-03-16 15:31:39 +01001818 int err = 0, pcmd;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001819
Philipp Reisner303d1442011-04-13 16:24:47 -07001820 if (peer_req->flags & EE_SEND_WRITE_ACK) {
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01001821 if (likely((peer_req->flags & EE_WAS_ERROR) == 0)) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07001822 pcmd = (mdev->state.conn >= C_SYNC_SOURCE &&
1823 mdev->state.conn <= C_PAUSED_SYNC_T &&
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01001824 peer_req->flags & EE_MAY_SET_IN_SYNC) ?
Philipp Reisnerb411b362009-09-25 16:07:19 -07001825 P_RS_WRITE_ACK : P_WRITE_ACK;
Andreas Gruenbacher99920dc2011-03-16 15:31:39 +01001826 err = drbd_send_ack(mdev, pcmd, peer_req);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001827 if (pcmd == P_RS_WRITE_ACK)
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01001828 drbd_set_in_sync(mdev, sector, peer_req->i.size);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001829 } else {
Andreas Gruenbacher99920dc2011-03-16 15:31:39 +01001830 err = drbd_send_ack(mdev, P_NEG_ACK, peer_req);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001831 /* we expect it to be marked out of sync anyways...
1832 * maybe assert this? */
1833 }
1834 dec_unacked(mdev);
1835 }
1836 /* we delete from the conflict detection hash _after_ we sent out the
1837 * P_WRITE_ACK / P_NEG_ACK, to get the sequence number right. */
Philipp Reisner302bdea2011-04-21 11:36:49 +02001838 if (peer_req->flags & EE_IN_INTERVAL_TREE) {
Philipp Reisner87eeee42011-01-19 14:16:30 +01001839 spin_lock_irq(&mdev->tconn->req_lock);
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01001840 D_ASSERT(!drbd_interval_empty(&peer_req->i));
1841 drbd_remove_epoch_entry_interval(mdev, peer_req);
Andreas Gruenbacher7be8da02011-02-22 02:15:32 +01001842 if (peer_req->flags & EE_RESTART_REQUESTS)
1843 restart_conflicting_writes(mdev, sector, peer_req->i.size);
Philipp Reisner87eeee42011-01-19 14:16:30 +01001844 spin_unlock_irq(&mdev->tconn->req_lock);
Andreas Gruenbacherbb3bfe92011-01-21 15:59:23 +01001845 } else
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01001846 D_ASSERT(drbd_interval_empty(&peer_req->i));
Philipp Reisnerb411b362009-09-25 16:07:19 -07001847
Philipp Reisner1e9dd292011-11-10 15:14:53 +01001848 drbd_may_finish_epoch(mdev->tconn, peer_req->epoch, EV_PUT + (cancel ? EV_CLEANUP : 0));
Philipp Reisnerb411b362009-09-25 16:07:19 -07001849
Andreas Gruenbacher99920dc2011-03-16 15:31:39 +01001850 return err;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001851}
1852
Andreas Gruenbacher7be8da02011-02-22 02:15:32 +01001853static int e_send_ack(struct drbd_work *w, enum drbd_packet ack)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001854{
Andreas Gruenbacher7be8da02011-02-22 02:15:32 +01001855 struct drbd_conf *mdev = w->mdev;
Andreas Gruenbacher8050e6d2011-02-18 16:12:48 +01001856 struct drbd_peer_request *peer_req =
1857 container_of(w, struct drbd_peer_request, w);
Andreas Gruenbacher99920dc2011-03-16 15:31:39 +01001858 int err;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001859
Andreas Gruenbacher99920dc2011-03-16 15:31:39 +01001860 err = drbd_send_ack(mdev, ack, peer_req);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001861 dec_unacked(mdev);
1862
Andreas Gruenbacher99920dc2011-03-16 15:31:39 +01001863 return err;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001864}
1865
Lars Ellenbergd4dabbe2012-08-01 12:33:51 +02001866static int e_send_superseded(struct drbd_work *w, int unused)
Philipp Reisnerb6a370ba2012-02-19 01:27:53 +01001867{
Lars Ellenbergd4dabbe2012-08-01 12:33:51 +02001868 return e_send_ack(w, P_SUPERSEDED);
Andreas Gruenbacher7be8da02011-02-22 02:15:32 +01001869}
Philipp Reisnerb6a370ba2012-02-19 01:27:53 +01001870
Andreas Gruenbacher99920dc2011-03-16 15:31:39 +01001871static int e_send_retry_write(struct drbd_work *w, int unused)
Andreas Gruenbacher7be8da02011-02-22 02:15:32 +01001872{
1873 struct drbd_tconn *tconn = w->mdev->tconn;
1874
1875 return e_send_ack(w, tconn->agreed_pro_version >= 100 ?
Lars Ellenbergd4dabbe2012-08-01 12:33:51 +02001876 P_RETRY_WRITE : P_SUPERSEDED);
Andreas Gruenbacher7be8da02011-02-22 02:15:32 +01001877}
1878
Andreas Gruenbacher3e394da2011-01-26 18:36:55 +01001879static bool seq_greater(u32 a, u32 b)
1880{
1881 /*
1882 * We assume 32-bit wrap-around here.
1883 * For 24-bit wrap-around, we would have to shift:
1884 * a <<= 8; b <<= 8;
1885 */
1886 return (s32)a - (s32)b > 0;
1887}
1888
1889static u32 seq_max(u32 a, u32 b)
1890{
1891 return seq_greater(a, b) ? a : b;
1892}
1893
Andreas Gruenbacher43ae0772011-02-03 18:42:08 +01001894static void update_peer_seq(struct drbd_conf *mdev, unsigned int peer_seq)
Andreas Gruenbacher3e394da2011-01-26 18:36:55 +01001895{
Lars Ellenberg3c13b682011-02-23 16:10:01 +01001896 unsigned int newest_peer_seq;
Andreas Gruenbacher3e394da2011-01-26 18:36:55 +01001897
Philipp Reisnerb874d232013-10-23 10:59:16 +02001898 if (test_bit(RESOLVE_CONFLICTS, &mdev->tconn->flags)) {
Andreas Gruenbacher7be8da02011-02-22 02:15:32 +01001899 spin_lock(&mdev->peer_seq_lock);
Lars Ellenberg3c13b682011-02-23 16:10:01 +01001900 newest_peer_seq = seq_max(mdev->peer_seq, peer_seq);
1901 mdev->peer_seq = newest_peer_seq;
Andreas Gruenbacher7be8da02011-02-22 02:15:32 +01001902 spin_unlock(&mdev->peer_seq_lock);
Lars Ellenberg3c13b682011-02-23 16:10:01 +01001903 /* wake up only if we actually changed mdev->peer_seq */
1904 if (peer_seq == newest_peer_seq)
Andreas Gruenbacher7be8da02011-02-22 02:15:32 +01001905 wake_up(&mdev->seq_wait);
1906 }
Andreas Gruenbacher3e394da2011-01-26 18:36:55 +01001907}
1908
Lars Ellenbergd93f6302012-03-26 15:49:13 +02001909static inline int overlaps(sector_t s1, int l1, sector_t s2, int l2)
1910{
1911 return !((s1 + (l1>>9) <= s2) || (s1 >= s2 + (l2>>9)));
1912}
1913
1914/* maybe change sync_ee into interval trees as well? */
Philipp Reisner3ea35df2012-04-06 12:13:18 +02001915static bool overlapping_resync_write(struct drbd_conf *mdev, struct drbd_peer_request *peer_req)
Lars Ellenbergd93f6302012-03-26 15:49:13 +02001916{
1917 struct drbd_peer_request *rs_req;
Philipp Reisnerb6a370ba2012-02-19 01:27:53 +01001918 bool rv = 0;
1919
Lars Ellenbergd93f6302012-03-26 15:49:13 +02001920 spin_lock_irq(&mdev->tconn->req_lock);
1921 list_for_each_entry(rs_req, &mdev->sync_ee, w.list) {
1922 if (overlaps(peer_req->i.sector, peer_req->i.size,
1923 rs_req->i.sector, rs_req->i.size)) {
Philipp Reisnerb6a370ba2012-02-19 01:27:53 +01001924 rv = 1;
1925 break;
1926 }
1927 }
Lars Ellenbergd93f6302012-03-26 15:49:13 +02001928 spin_unlock_irq(&mdev->tconn->req_lock);
Philipp Reisnerb6a370ba2012-02-19 01:27:53 +01001929
1930 return rv;
1931}
1932
Philipp Reisnerb411b362009-09-25 16:07:19 -07001933/* Called from receive_Data.
1934 * Synchronize packets on sock with packets on msock.
1935 *
1936 * This is here so even when a P_DATA packet traveling via sock overtook an Ack
1937 * packet traveling on msock, they are still processed in the order they have
1938 * been sent.
1939 *
1940 * Note: we don't care for Ack packets overtaking P_DATA packets.
1941 *
1942 * In case packet_seq is larger than mdev->peer_seq number, there are
1943 * outstanding packets on the msock. We wait for them to arrive.
1944 * In case we are the logically next packet, we update mdev->peer_seq
1945 * ourselves. Correctly handles 32bit wrap around.
1946 *
1947 * Assume we have a 10 GBit connection, that is about 1<<30 byte per second,
1948 * about 1<<21 sectors per second. So "worst" case, we have 1<<3 == 8 seconds
1949 * for the 24bit wrap (historical atomic_t guarantee on some archs), and we have
1950 * 1<<9 == 512 seconds aka ages for the 32bit wrap around...
1951 *
1952 * returns 0 if we may process the packet,
1953 * -ERESTARTSYS if we were interrupted (by disconnect signal). */
Andreas Gruenbacher7be8da02011-02-22 02:15:32 +01001954static int wait_for_and_update_peer_seq(struct drbd_conf *mdev, const u32 peer_seq)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001955{
1956 DEFINE_WAIT(wait);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001957 long timeout;
Philipp Reisnerb874d232013-10-23 10:59:16 +02001958 int ret = 0, tp;
Andreas Gruenbacher7be8da02011-02-22 02:15:32 +01001959
Philipp Reisnerb874d232013-10-23 10:59:16 +02001960 if (!test_bit(RESOLVE_CONFLICTS, &mdev->tconn->flags))
Andreas Gruenbacher7be8da02011-02-22 02:15:32 +01001961 return 0;
1962
Philipp Reisnerb411b362009-09-25 16:07:19 -07001963 spin_lock(&mdev->peer_seq_lock);
1964 for (;;) {
Andreas Gruenbacher7be8da02011-02-22 02:15:32 +01001965 if (!seq_greater(peer_seq - 1, mdev->peer_seq)) {
1966 mdev->peer_seq = seq_max(mdev->peer_seq, peer_seq);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001967 break;
Andreas Gruenbacher7be8da02011-02-22 02:15:32 +01001968 }
Philipp Reisnerb874d232013-10-23 10:59:16 +02001969
Philipp Reisnerb411b362009-09-25 16:07:19 -07001970 if (signal_pending(current)) {
1971 ret = -ERESTARTSYS;
1972 break;
1973 }
Philipp Reisnerb874d232013-10-23 10:59:16 +02001974
1975 rcu_read_lock();
1976 tp = rcu_dereference(mdev->tconn->net_conf)->two_primaries;
1977 rcu_read_unlock();
1978
1979 if (!tp)
1980 break;
1981
1982 /* Only need to wait if two_primaries is enabled */
Andreas Gruenbacher7be8da02011-02-22 02:15:32 +01001983 prepare_to_wait(&mdev->seq_wait, &wait, TASK_INTERRUPTIBLE);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001984 spin_unlock(&mdev->peer_seq_lock);
Philipp Reisner44ed1672011-04-19 17:10:19 +02001985 rcu_read_lock();
1986 timeout = rcu_dereference(mdev->tconn->net_conf)->ping_timeo*HZ/10;
1987 rcu_read_unlock();
Andreas Gruenbacher71b1c1e2011-03-01 15:40:43 +01001988 timeout = schedule_timeout(timeout);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001989 spin_lock(&mdev->peer_seq_lock);
Andreas Gruenbacher7be8da02011-02-22 02:15:32 +01001990 if (!timeout) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07001991 ret = -ETIMEDOUT;
Andreas Gruenbacher71b1c1e2011-03-01 15:40:43 +01001992 dev_err(DEV, "Timed out waiting for missing ack packets; disconnecting\n");
Philipp Reisnerb411b362009-09-25 16:07:19 -07001993 break;
1994 }
1995 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07001996 spin_unlock(&mdev->peer_seq_lock);
Andreas Gruenbacher7be8da02011-02-22 02:15:32 +01001997 finish_wait(&mdev->seq_wait, &wait);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001998 return ret;
1999}
2000
Lars Ellenberg688593c2010-11-17 22:25:03 +01002001/* see also bio_flags_to_wire()
2002 * DRBD_REQ_*, because we need to semantically map the flags to data packet
2003 * flags and back. We may replicate to other kernel versions. */
2004static unsigned long wire_flags_to_bio(struct drbd_conf *mdev, u32 dpf)
Philipp Reisner76d2e7e2010-08-25 11:58:05 +02002005{
Lars Ellenberg688593c2010-11-17 22:25:03 +01002006 return (dpf & DP_RW_SYNC ? REQ_SYNC : 0) |
2007 (dpf & DP_FUA ? REQ_FUA : 0) |
2008 (dpf & DP_FLUSH ? REQ_FLUSH : 0) |
2009 (dpf & DP_DISCARD ? REQ_DISCARD : 0);
Philipp Reisner76d2e7e2010-08-25 11:58:05 +02002010}
2011
Andreas Gruenbacher7be8da02011-02-22 02:15:32 +01002012static void fail_postponed_requests(struct drbd_conf *mdev, sector_t sector,
2013 unsigned int size)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002014{
Andreas Gruenbacher7be8da02011-02-22 02:15:32 +01002015 struct drbd_interval *i;
2016
2017 repeat:
2018 drbd_for_each_overlap(i, &mdev->write_requests, sector, size) {
2019 struct drbd_request *req;
2020 struct bio_and_error m;
2021
2022 if (!i->local)
2023 continue;
2024 req = container_of(i, struct drbd_request, i);
2025 if (!(req->rq_state & RQ_POSTPONED))
2026 continue;
2027 req->rq_state &= ~RQ_POSTPONED;
2028 __req_mod(req, NEG_ACKED, &m);
2029 spin_unlock_irq(&mdev->tconn->req_lock);
2030 if (m.bio)
2031 complete_master_bio(mdev, &m);
2032 spin_lock_irq(&mdev->tconn->req_lock);
2033 goto repeat;
2034 }
2035}
2036
2037static int handle_write_conflicts(struct drbd_conf *mdev,
2038 struct drbd_peer_request *peer_req)
2039{
2040 struct drbd_tconn *tconn = mdev->tconn;
Lars Ellenberg427c0432012-08-01 12:43:01 +02002041 bool resolve_conflicts = test_bit(RESOLVE_CONFLICTS, &tconn->flags);
Andreas Gruenbacher7be8da02011-02-22 02:15:32 +01002042 sector_t sector = peer_req->i.sector;
2043 const unsigned int size = peer_req->i.size;
2044 struct drbd_interval *i;
2045 bool equal;
2046 int err;
2047
2048 /*
2049 * Inserting the peer request into the write_requests tree will prevent
2050 * new conflicting local requests from being added.
2051 */
2052 drbd_insert_interval(&mdev->write_requests, &peer_req->i);
2053
2054 repeat:
2055 drbd_for_each_overlap(i, &mdev->write_requests, sector, size) {
2056 if (i == &peer_req->i)
2057 continue;
2058
2059 if (!i->local) {
2060 /*
2061 * Our peer has sent a conflicting remote request; this
2062 * should not happen in a two-node setup. Wait for the
2063 * earlier peer request to complete.
2064 */
2065 err = drbd_wait_misc(mdev, i);
2066 if (err)
2067 goto out;
2068 goto repeat;
2069 }
2070
2071 equal = i->sector == sector && i->size == size;
2072 if (resolve_conflicts) {
2073 /*
2074 * If the peer request is fully contained within the
Lars Ellenbergd4dabbe2012-08-01 12:33:51 +02002075 * overlapping request, it can be considered overwritten
2076 * and thus superseded; otherwise, it will be retried
2077 * once all overlapping requests have completed.
Andreas Gruenbacher7be8da02011-02-22 02:15:32 +01002078 */
Lars Ellenbergd4dabbe2012-08-01 12:33:51 +02002079 bool superseded = i->sector <= sector && i->sector +
Andreas Gruenbacher7be8da02011-02-22 02:15:32 +01002080 (i->size >> 9) >= sector + (size >> 9);
2081
2082 if (!equal)
2083 dev_alert(DEV, "Concurrent writes detected: "
2084 "local=%llus +%u, remote=%llus +%u, "
2085 "assuming %s came first\n",
2086 (unsigned long long)i->sector, i->size,
2087 (unsigned long long)sector, size,
Lars Ellenbergd4dabbe2012-08-01 12:33:51 +02002088 superseded ? "local" : "remote");
Andreas Gruenbacher7be8da02011-02-22 02:15:32 +01002089
2090 inc_unacked(mdev);
Lars Ellenbergd4dabbe2012-08-01 12:33:51 +02002091 peer_req->w.cb = superseded ? e_send_superseded :
Andreas Gruenbacher7be8da02011-02-22 02:15:32 +01002092 e_send_retry_write;
2093 list_add_tail(&peer_req->w.list, &mdev->done_ee);
2094 wake_asender(mdev->tconn);
2095
2096 err = -ENOENT;
2097 goto out;
2098 } else {
2099 struct drbd_request *req =
2100 container_of(i, struct drbd_request, i);
2101
2102 if (!equal)
2103 dev_alert(DEV, "Concurrent writes detected: "
2104 "local=%llus +%u, remote=%llus +%u\n",
2105 (unsigned long long)i->sector, i->size,
2106 (unsigned long long)sector, size);
2107
2108 if (req->rq_state & RQ_LOCAL_PENDING ||
2109 !(req->rq_state & RQ_POSTPONED)) {
2110 /*
2111 * Wait for the node with the discard flag to
Lars Ellenbergd4dabbe2012-08-01 12:33:51 +02002112 * decide if this request has been superseded
2113 * or needs to be retried.
2114 * Requests that have been superseded will
Andreas Gruenbacher7be8da02011-02-22 02:15:32 +01002115 * disappear from the write_requests tree.
2116 *
2117 * In addition, wait for the conflicting
2118 * request to finish locally before submitting
2119 * the conflicting peer request.
2120 */
2121 err = drbd_wait_misc(mdev, &req->i);
2122 if (err) {
2123 _conn_request_state(mdev->tconn,
2124 NS(conn, C_TIMEOUT),
2125 CS_HARD);
2126 fail_postponed_requests(mdev, sector, size);
2127 goto out;
2128 }
2129 goto repeat;
2130 }
2131 /*
2132 * Remember to restart the conflicting requests after
2133 * the new peer request has completed.
2134 */
2135 peer_req->flags |= EE_RESTART_REQUESTS;
2136 }
2137 }
2138 err = 0;
2139
2140 out:
2141 if (err)
2142 drbd_remove_epoch_entry_interval(mdev, peer_req);
2143 return err;
2144}
2145
Philipp Reisnerb411b362009-09-25 16:07:19 -07002146/* mirrored write */
Andreas Gruenbacher4a76b162011-03-25 02:43:51 +01002147static int receive_Data(struct drbd_tconn *tconn, struct packet_info *pi)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002148{
Andreas Gruenbacher4a76b162011-03-25 02:43:51 +01002149 struct drbd_conf *mdev;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002150 sector_t sector;
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01002151 struct drbd_peer_request *peer_req;
Andreas Gruenbachere6589832011-03-30 12:54:42 +02002152 struct p_data *p = pi->data;
Andreas Gruenbacher7be8da02011-02-22 02:15:32 +01002153 u32 peer_seq = be32_to_cpu(p->seq_num);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002154 int rw = WRITE;
2155 u32 dp_flags;
Philipp Reisner302bdea2011-04-21 11:36:49 +02002156 int err, tp;
Andreas Gruenbacher7be8da02011-02-22 02:15:32 +01002157
Andreas Gruenbacher4a76b162011-03-25 02:43:51 +01002158 mdev = vnr_to_mdev(tconn, pi->vnr);
2159 if (!mdev)
2160 return -EIO;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002161
Philipp Reisnerb411b362009-09-25 16:07:19 -07002162 if (!get_ldev(mdev)) {
Andreas Gruenbacher82bc0192011-03-17 12:10:19 +01002163 int err2;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002164
Andreas Gruenbacher7be8da02011-02-22 02:15:32 +01002165 err = wait_for_and_update_peer_seq(mdev, peer_seq);
Andreas Gruenbachere2857212011-03-25 00:57:38 +01002166 drbd_send_ack_dp(mdev, P_NEG_ACK, p, pi->size);
Philipp Reisner12038a32011-11-09 19:18:00 +01002167 atomic_inc(&tconn->current_epoch->epoch_size);
Andreas Gruenbachere2857212011-03-25 00:57:38 +01002168 err2 = drbd_drain_block(mdev, pi->size);
Andreas Gruenbacher82bc0192011-03-17 12:10:19 +01002169 if (!err)
2170 err = err2;
2171 return err;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002172 }
2173
Andreas Gruenbacherfcefa622011-02-17 16:46:59 +01002174 /*
2175 * Corresponding put_ldev done either below (on various errors), or in
2176 * drbd_peer_request_endio, if we successfully submit the data at the
2177 * end of this function.
2178 */
Philipp Reisnerb411b362009-09-25 16:07:19 -07002179
2180 sector = be64_to_cpu(p->sector);
Andreas Gruenbachere2857212011-03-25 00:57:38 +01002181 peer_req = read_in_block(mdev, p->block_id, sector, pi->size);
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01002182 if (!peer_req) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07002183 put_ldev(mdev);
Andreas Gruenbacher82bc0192011-03-17 12:10:19 +01002184 return -EIO;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002185 }
2186
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01002187 peer_req->w.cb = e_end_block;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002188
Lars Ellenberg688593c2010-11-17 22:25:03 +01002189 dp_flags = be32_to_cpu(p->dp_flags);
2190 rw |= wire_flags_to_bio(mdev, dp_flags);
Lars Ellenberg81a35372012-07-30 09:00:54 +02002191 if (peer_req->pages == NULL) {
2192 D_ASSERT(peer_req->i.size == 0);
Lars Ellenberga73ff322012-06-25 19:15:38 +02002193 D_ASSERT(dp_flags & DP_FLUSH);
2194 }
Lars Ellenberg688593c2010-11-17 22:25:03 +01002195
2196 if (dp_flags & DP_MAY_SET_IN_SYNC)
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01002197 peer_req->flags |= EE_MAY_SET_IN_SYNC;
Lars Ellenberg688593c2010-11-17 22:25:03 +01002198
Philipp Reisner12038a32011-11-09 19:18:00 +01002199 spin_lock(&tconn->epoch_lock);
2200 peer_req->epoch = tconn->current_epoch;
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01002201 atomic_inc(&peer_req->epoch->epoch_size);
2202 atomic_inc(&peer_req->epoch->active);
Philipp Reisner12038a32011-11-09 19:18:00 +01002203 spin_unlock(&tconn->epoch_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002204
Philipp Reisner302bdea2011-04-21 11:36:49 +02002205 rcu_read_lock();
2206 tp = rcu_dereference(mdev->tconn->net_conf)->two_primaries;
2207 rcu_read_unlock();
2208 if (tp) {
2209 peer_req->flags |= EE_IN_INTERVAL_TREE;
Andreas Gruenbacher7be8da02011-02-22 02:15:32 +01002210 err = wait_for_and_update_peer_seq(mdev, peer_seq);
2211 if (err)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002212 goto out_interrupted;
Philipp Reisner87eeee42011-01-19 14:16:30 +01002213 spin_lock_irq(&mdev->tconn->req_lock);
Andreas Gruenbacher7be8da02011-02-22 02:15:32 +01002214 err = handle_write_conflicts(mdev, peer_req);
2215 if (err) {
2216 spin_unlock_irq(&mdev->tconn->req_lock);
2217 if (err == -ENOENT) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07002218 put_ldev(mdev);
Andreas Gruenbacher82bc0192011-03-17 12:10:19 +01002219 return 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002220 }
Andreas Gruenbacher7be8da02011-02-22 02:15:32 +01002221 goto out_interrupted;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002222 }
Philipp Reisnerb874d232013-10-23 10:59:16 +02002223 } else {
2224 update_peer_seq(mdev, peer_seq);
Andreas Gruenbacher7be8da02011-02-22 02:15:32 +01002225 spin_lock_irq(&mdev->tconn->req_lock);
Philipp Reisnerb874d232013-10-23 10:59:16 +02002226 }
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01002227 list_add(&peer_req->w.list, &mdev->active_ee);
Philipp Reisner87eeee42011-01-19 14:16:30 +01002228 spin_unlock_irq(&mdev->tconn->req_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002229
Philipp Reisnerb6a370ba2012-02-19 01:27:53 +01002230 if (mdev->state.conn == C_SYNC_TARGET)
Philipp Reisner3ea35df2012-04-06 12:13:18 +02002231 wait_event(mdev->ee_wait, !overlapping_resync_write(mdev, peer_req));
Philipp Reisnerb6a370ba2012-02-19 01:27:53 +01002232
Philipp Reisner303d1442011-04-13 16:24:47 -07002233 if (mdev->tconn->agreed_pro_version < 100) {
Philipp Reisner44ed1672011-04-19 17:10:19 +02002234 rcu_read_lock();
2235 switch (rcu_dereference(mdev->tconn->net_conf)->wire_protocol) {
Philipp Reisner303d1442011-04-13 16:24:47 -07002236 case DRBD_PROT_C:
2237 dp_flags |= DP_SEND_WRITE_ACK;
2238 break;
2239 case DRBD_PROT_B:
2240 dp_flags |= DP_SEND_RECEIVE_ACK;
2241 break;
2242 }
Philipp Reisner44ed1672011-04-19 17:10:19 +02002243 rcu_read_unlock();
Philipp Reisner303d1442011-04-13 16:24:47 -07002244 }
2245
2246 if (dp_flags & DP_SEND_WRITE_ACK) {
2247 peer_req->flags |= EE_SEND_WRITE_ACK;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002248 inc_unacked(mdev);
2249 /* corresponding dec_unacked() in e_end_block()
2250 * respective _drbd_clear_done_ee */
Philipp Reisner303d1442011-04-13 16:24:47 -07002251 }
2252
2253 if (dp_flags & DP_SEND_RECEIVE_ACK) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07002254 /* I really don't like it that the receiver thread
2255 * sends on the msock, but anyways */
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01002256 drbd_send_ack(mdev, P_RECV_ACK, peer_req);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002257 }
2258
Lars Ellenberg6719fb02010-10-18 23:04:07 +02002259 if (mdev->state.pdsk < D_INCONSISTENT) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07002260 /* In case we have the only disk of the cluster, */
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01002261 drbd_set_out_of_sync(mdev, peer_req->i.sector, peer_req->i.size);
2262 peer_req->flags |= EE_CALL_AL_COMPLETE_IO;
2263 peer_req->flags &= ~EE_MAY_SET_IN_SYNC;
Lars Ellenberg56392d22013-03-19 18:16:48 +01002264 drbd_al_begin_io(mdev, &peer_req->i, true);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002265 }
2266
Andreas Gruenbacher82bc0192011-03-17 12:10:19 +01002267 err = drbd_submit_peer_request(mdev, peer_req, rw, DRBD_FAULT_DT_WR);
2268 if (!err)
2269 return 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002270
Lars Ellenberg10f6d9922011-01-24 14:47:09 +01002271 /* don't care for the reason here */
2272 dev_err(DEV, "submit failed, triggering re-connect\n");
Philipp Reisner87eeee42011-01-19 14:16:30 +01002273 spin_lock_irq(&mdev->tconn->req_lock);
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01002274 list_del(&peer_req->w.list);
2275 drbd_remove_epoch_entry_interval(mdev, peer_req);
Philipp Reisner87eeee42011-01-19 14:16:30 +01002276 spin_unlock_irq(&mdev->tconn->req_lock);
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01002277 if (peer_req->flags & EE_CALL_AL_COMPLETE_IO)
Lars Ellenberg181286a2011-03-31 15:18:56 +02002278 drbd_al_complete_io(mdev, &peer_req->i);
Lars Ellenberg22cc37a2010-09-14 20:40:41 +02002279
Philipp Reisnerb411b362009-09-25 16:07:19 -07002280out_interrupted:
Philipp Reisner1e9dd292011-11-10 15:14:53 +01002281 drbd_may_finish_epoch(tconn, peer_req->epoch, EV_PUT + EV_CLEANUP);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002282 put_ldev(mdev);
Andreas Gruenbacher3967deb2011-04-06 16:16:56 +02002283 drbd_free_peer_req(mdev, peer_req);
Andreas Gruenbacher82bc0192011-03-17 12:10:19 +01002284 return err;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002285}
2286
Lars Ellenberg0f0601f2010-08-11 23:40:24 +02002287/* We may throttle resync, if the lower device seems to be busy,
2288 * and current sync rate is above c_min_rate.
2289 *
2290 * To decide whether or not the lower device is busy, we use a scheme similar
2291 * to MD RAID is_mddev_idle(): if the partition stats reveal "significant"
2292 * (more than 64 sectors) of activity we cannot account for with our own resync
2293 * activity, it obviously is "busy".
2294 *
2295 * The current sync rate used here uses only the most recent two step marks,
2296 * to have a short time average so we can react faster.
2297 */
Philipp Reisnere3555d82010-11-07 15:56:29 +01002298int drbd_rs_should_slow_down(struct drbd_conf *mdev, sector_t sector)
Lars Ellenberg0f0601f2010-08-11 23:40:24 +02002299{
2300 struct gendisk *disk = mdev->ldev->backing_bdev->bd_contains->bd_disk;
2301 unsigned long db, dt, dbdt;
Philipp Reisnere3555d82010-11-07 15:56:29 +01002302 struct lc_element *tmp;
Lars Ellenberg0f0601f2010-08-11 23:40:24 +02002303 int curr_events;
2304 int throttle = 0;
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02002305 unsigned int c_min_rate;
2306
2307 rcu_read_lock();
2308 c_min_rate = rcu_dereference(mdev->ldev->disk_conf)->c_min_rate;
2309 rcu_read_unlock();
Lars Ellenberg0f0601f2010-08-11 23:40:24 +02002310
2311 /* feature disabled? */
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02002312 if (c_min_rate == 0)
Lars Ellenberg0f0601f2010-08-11 23:40:24 +02002313 return 0;
2314
Philipp Reisnere3555d82010-11-07 15:56:29 +01002315 spin_lock_irq(&mdev->al_lock);
2316 tmp = lc_find(mdev->resync, BM_SECT_TO_EXT(sector));
2317 if (tmp) {
2318 struct bm_extent *bm_ext = lc_entry(tmp, struct bm_extent, lce);
2319 if (test_bit(BME_PRIORITY, &bm_ext->flags)) {
2320 spin_unlock_irq(&mdev->al_lock);
2321 return 0;
2322 }
2323 /* Do not slow down if app IO is already waiting for this extent */
2324 }
2325 spin_unlock_irq(&mdev->al_lock);
2326
Lars Ellenberg0f0601f2010-08-11 23:40:24 +02002327 curr_events = (int)part_stat_read(&disk->part0, sectors[0]) +
2328 (int)part_stat_read(&disk->part0, sectors[1]) -
2329 atomic_read(&mdev->rs_sect_ev);
Philipp Reisnere3555d82010-11-07 15:56:29 +01002330
Lars Ellenberg0f0601f2010-08-11 23:40:24 +02002331 if (!mdev->rs_last_events || curr_events - mdev->rs_last_events > 64) {
2332 unsigned long rs_left;
2333 int i;
2334
2335 mdev->rs_last_events = curr_events;
2336
2337 /* sync speed average over the last 2*DRBD_SYNC_MARK_STEP,
2338 * approx. */
Lars Ellenberg2649f082010-11-05 10:05:47 +01002339 i = (mdev->rs_last_mark + DRBD_SYNC_MARKS-1) % DRBD_SYNC_MARKS;
2340
2341 if (mdev->state.conn == C_VERIFY_S || mdev->state.conn == C_VERIFY_T)
2342 rs_left = mdev->ov_left;
2343 else
2344 rs_left = drbd_bm_total_weight(mdev) - mdev->rs_failed;
Lars Ellenberg0f0601f2010-08-11 23:40:24 +02002345
2346 dt = ((long)jiffies - (long)mdev->rs_mark_time[i]) / HZ;
2347 if (!dt)
2348 dt++;
2349 db = mdev->rs_mark_left[i] - rs_left;
2350 dbdt = Bit2KB(db/dt);
2351
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02002352 if (dbdt > c_min_rate)
Lars Ellenberg0f0601f2010-08-11 23:40:24 +02002353 throttle = 1;
2354 }
2355 return throttle;
2356}
2357
2358
Andreas Gruenbacher4a76b162011-03-25 02:43:51 +01002359static int receive_DataRequest(struct drbd_tconn *tconn, struct packet_info *pi)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002360{
Andreas Gruenbacher4a76b162011-03-25 02:43:51 +01002361 struct drbd_conf *mdev;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002362 sector_t sector;
Andreas Gruenbacher4a76b162011-03-25 02:43:51 +01002363 sector_t capacity;
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01002364 struct drbd_peer_request *peer_req;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002365 struct digest_info *di = NULL;
Philipp Reisnerb18b37b2010-10-13 15:32:44 +02002366 int size, verb;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002367 unsigned int fault_type;
Andreas Gruenbachere6589832011-03-30 12:54:42 +02002368 struct p_block_req *p = pi->data;
Andreas Gruenbacher4a76b162011-03-25 02:43:51 +01002369
2370 mdev = vnr_to_mdev(tconn, pi->vnr);
2371 if (!mdev)
2372 return -EIO;
2373 capacity = drbd_get_capacity(mdev->this_bdev);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002374
2375 sector = be64_to_cpu(p->sector);
2376 size = be32_to_cpu(p->blksize);
2377
Andreas Gruenbacherc670a392011-02-21 12:41:39 +01002378 if (size <= 0 || !IS_ALIGNED(size, 512) || size > DRBD_MAX_BIO_SIZE) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07002379 dev_err(DEV, "%s:%d: sector: %llus, size: %u\n", __FILE__, __LINE__,
2380 (unsigned long long)sector, size);
Andreas Gruenbacher82bc0192011-03-17 12:10:19 +01002381 return -EINVAL;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002382 }
2383 if (sector + (size>>9) > capacity) {
2384 dev_err(DEV, "%s:%d: sector: %llus, size: %u\n", __FILE__, __LINE__,
2385 (unsigned long long)sector, size);
Andreas Gruenbacher82bc0192011-03-17 12:10:19 +01002386 return -EINVAL;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002387 }
2388
2389 if (!get_ldev_if_state(mdev, D_UP_TO_DATE)) {
Philipp Reisnerb18b37b2010-10-13 15:32:44 +02002390 verb = 1;
Andreas Gruenbachere2857212011-03-25 00:57:38 +01002391 switch (pi->cmd) {
Philipp Reisnerb18b37b2010-10-13 15:32:44 +02002392 case P_DATA_REQUEST:
2393 drbd_send_ack_rp(mdev, P_NEG_DREPLY, p);
2394 break;
2395 case P_RS_DATA_REQUEST:
2396 case P_CSUM_RS_REQUEST:
2397 case P_OV_REQUEST:
2398 drbd_send_ack_rp(mdev, P_NEG_RS_DREPLY , p);
2399 break;
2400 case P_OV_REPLY:
2401 verb = 0;
2402 dec_rs_pending(mdev);
2403 drbd_send_ack_ex(mdev, P_OV_RESULT, sector, size, ID_IN_SYNC);
2404 break;
2405 default:
Andreas Gruenbacher49ba9b12011-03-25 00:35:45 +01002406 BUG();
Philipp Reisnerb18b37b2010-10-13 15:32:44 +02002407 }
2408 if (verb && __ratelimit(&drbd_ratelimit_state))
Philipp Reisnerb411b362009-09-25 16:07:19 -07002409 dev_err(DEV, "Can not satisfy peer's read request, "
2410 "no local data.\n");
Philipp Reisnerb18b37b2010-10-13 15:32:44 +02002411
Lars Ellenberga821cc42010-09-06 12:31:37 +02002412 /* drain possibly payload */
Andreas Gruenbachere2857212011-03-25 00:57:38 +01002413 return drbd_drain_block(mdev, pi->size);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002414 }
2415
2416 /* GFP_NOIO, because we must not cause arbitrary write-out: in a DRBD
2417 * "criss-cross" setup, that might cause write-out on some other DRBD,
2418 * which in turn might block on the other node at this very place. */
Andreas Gruenbacher0db55362011-04-06 16:09:15 +02002419 peer_req = drbd_alloc_peer_req(mdev, p->block_id, sector, size, GFP_NOIO);
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01002420 if (!peer_req) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07002421 put_ldev(mdev);
Andreas Gruenbacher82bc0192011-03-17 12:10:19 +01002422 return -ENOMEM;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002423 }
2424
Andreas Gruenbachere2857212011-03-25 00:57:38 +01002425 switch (pi->cmd) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07002426 case P_DATA_REQUEST:
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01002427 peer_req->w.cb = w_e_end_data_req;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002428 fault_type = DRBD_FAULT_DT_RD;
Lars Ellenberg80a40e42010-08-11 23:28:00 +02002429 /* application IO, don't drbd_rs_begin_io */
2430 goto submit;
2431
Philipp Reisnerb411b362009-09-25 16:07:19 -07002432 case P_RS_DATA_REQUEST:
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01002433 peer_req->w.cb = w_e_end_rsdata_req;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002434 fault_type = DRBD_FAULT_RS_RD;
Lars Ellenberg5f9915b2010-11-09 14:15:24 +01002435 /* used in the sector offset progress display */
2436 mdev->bm_resync_fo = BM_SECT_TO_BIT(sector);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002437 break;
2438
2439 case P_OV_REPLY:
2440 case P_CSUM_RS_REQUEST:
2441 fault_type = DRBD_FAULT_RS_RD;
Andreas Gruenbachere2857212011-03-25 00:57:38 +01002442 di = kmalloc(sizeof(*di) + pi->size, GFP_NOIO);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002443 if (!di)
2444 goto out_free_e;
2445
Andreas Gruenbachere2857212011-03-25 00:57:38 +01002446 di->digest_size = pi->size;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002447 di->digest = (((char *)di)+sizeof(struct digest_info));
2448
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01002449 peer_req->digest = di;
2450 peer_req->flags |= EE_HAS_DIGEST;
Lars Ellenbergc36c3ce2010-08-11 20:42:55 +02002451
Andreas Gruenbachere2857212011-03-25 00:57:38 +01002452 if (drbd_recv_all(mdev->tconn, di->digest, pi->size))
Philipp Reisnerb411b362009-09-25 16:07:19 -07002453 goto out_free_e;
2454
Andreas Gruenbachere2857212011-03-25 00:57:38 +01002455 if (pi->cmd == P_CSUM_RS_REQUEST) {
Philipp Reisner31890f42011-01-19 14:12:51 +01002456 D_ASSERT(mdev->tconn->agreed_pro_version >= 89);
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01002457 peer_req->w.cb = w_e_end_csum_rs_req;
Lars Ellenberg5f9915b2010-11-09 14:15:24 +01002458 /* used in the sector offset progress display */
2459 mdev->bm_resync_fo = BM_SECT_TO_BIT(sector);
Andreas Gruenbachere2857212011-03-25 00:57:38 +01002460 } else if (pi->cmd == P_OV_REPLY) {
Lars Ellenberg2649f082010-11-05 10:05:47 +01002461 /* track progress, we may need to throttle */
2462 atomic_add(size >> 9, &mdev->rs_sect_in);
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01002463 peer_req->w.cb = w_e_end_ov_reply;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002464 dec_rs_pending(mdev);
Lars Ellenberg0f0601f2010-08-11 23:40:24 +02002465 /* drbd_rs_begin_io done when we sent this request,
2466 * but accounting still needs to be done. */
2467 goto submit_for_resync;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002468 }
2469 break;
2470
2471 case P_OV_REQUEST:
Philipp Reisnerb411b362009-09-25 16:07:19 -07002472 if (mdev->ov_start_sector == ~(sector_t)0 &&
Philipp Reisner31890f42011-01-19 14:12:51 +01002473 mdev->tconn->agreed_pro_version >= 90) {
Lars Ellenbergde228bb2010-11-05 09:43:15 +01002474 unsigned long now = jiffies;
2475 int i;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002476 mdev->ov_start_sector = sector;
2477 mdev->ov_position = sector;
Lars Ellenberg30b743a2010-11-05 09:39:06 +01002478 mdev->ov_left = drbd_bm_bits(mdev) - BM_SECT_TO_BIT(sector);
2479 mdev->rs_total = mdev->ov_left;
Lars Ellenbergde228bb2010-11-05 09:43:15 +01002480 for (i = 0; i < DRBD_SYNC_MARKS; i++) {
2481 mdev->rs_mark_left[i] = mdev->ov_left;
2482 mdev->rs_mark_time[i] = now;
2483 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07002484 dev_info(DEV, "Online Verify start sector: %llu\n",
2485 (unsigned long long)sector);
2486 }
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01002487 peer_req->w.cb = w_e_end_ov_req;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002488 fault_type = DRBD_FAULT_RS_RD;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002489 break;
2490
Philipp Reisnerb411b362009-09-25 16:07:19 -07002491 default:
Andreas Gruenbacher49ba9b12011-03-25 00:35:45 +01002492 BUG();
Philipp Reisnerb411b362009-09-25 16:07:19 -07002493 }
2494
Lars Ellenberg0f0601f2010-08-11 23:40:24 +02002495 /* Throttle, drbd_rs_begin_io and submit should become asynchronous
2496 * wrt the receiver, but it is not as straightforward as it may seem.
2497 * Various places in the resync start and stop logic assume resync
2498 * requests are processed in order, requeuing this on the worker thread
2499 * introduces a bunch of new code for synchronization between threads.
2500 *
2501 * Unlimited throttling before drbd_rs_begin_io may stall the resync
2502 * "forever", throttling after drbd_rs_begin_io will lock that extent
2503 * for application writes for the same time. For now, just throttle
2504 * here, where the rest of the code expects the receiver to sleep for
2505 * a while, anyways.
2506 */
Philipp Reisnerb411b362009-09-25 16:07:19 -07002507
Lars Ellenberg0f0601f2010-08-11 23:40:24 +02002508 /* Throttle before drbd_rs_begin_io, as that locks out application IO;
2509 * this defers syncer requests for some time, before letting at least
2510 * on request through. The resync controller on the receiving side
2511 * will adapt to the incoming rate accordingly.
2512 *
2513 * We cannot throttle here if remote is Primary/SyncTarget:
2514 * we would also throttle its application reads.
2515 * In that case, throttling is done on the SyncTarget only.
2516 */
Philipp Reisnere3555d82010-11-07 15:56:29 +01002517 if (mdev->state.peer != R_PRIMARY && drbd_rs_should_slow_down(mdev, sector))
2518 schedule_timeout_uninterruptible(HZ/10);
2519 if (drbd_rs_begin_io(mdev, sector))
Lars Ellenberg80a40e42010-08-11 23:28:00 +02002520 goto out_free_e;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002521
Lars Ellenberg0f0601f2010-08-11 23:40:24 +02002522submit_for_resync:
2523 atomic_add(size >> 9, &mdev->rs_sect_ev);
2524
Lars Ellenberg80a40e42010-08-11 23:28:00 +02002525submit:
Philipp Reisnerb411b362009-09-25 16:07:19 -07002526 inc_unacked(mdev);
Philipp Reisner87eeee42011-01-19 14:16:30 +01002527 spin_lock_irq(&mdev->tconn->req_lock);
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01002528 list_add_tail(&peer_req->w.list, &mdev->read_ee);
Philipp Reisner87eeee42011-01-19 14:16:30 +01002529 spin_unlock_irq(&mdev->tconn->req_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002530
Andreas Gruenbacherfbe29de2011-02-17 16:38:35 +01002531 if (drbd_submit_peer_request(mdev, peer_req, READ, fault_type) == 0)
Andreas Gruenbacher82bc0192011-03-17 12:10:19 +01002532 return 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002533
Lars Ellenberg10f6d9922011-01-24 14:47:09 +01002534 /* don't care for the reason here */
2535 dev_err(DEV, "submit failed, triggering re-connect\n");
Philipp Reisner87eeee42011-01-19 14:16:30 +01002536 spin_lock_irq(&mdev->tconn->req_lock);
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01002537 list_del(&peer_req->w.list);
Philipp Reisner87eeee42011-01-19 14:16:30 +01002538 spin_unlock_irq(&mdev->tconn->req_lock);
Lars Ellenberg22cc37a2010-09-14 20:40:41 +02002539 /* no drbd_rs_complete_io(), we are dropping the connection anyways */
2540
Philipp Reisnerb411b362009-09-25 16:07:19 -07002541out_free_e:
Philipp Reisnerb411b362009-09-25 16:07:19 -07002542 put_ldev(mdev);
Andreas Gruenbacher3967deb2011-04-06 16:16:56 +02002543 drbd_free_peer_req(mdev, peer_req);
Andreas Gruenbacher82bc0192011-03-17 12:10:19 +01002544 return -EIO;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002545}
2546
2547static int drbd_asb_recover_0p(struct drbd_conf *mdev) __must_hold(local)
2548{
2549 int self, peer, rv = -100;
2550 unsigned long ch_self, ch_peer;
Philipp Reisner44ed1672011-04-19 17:10:19 +02002551 enum drbd_after_sb_p after_sb_0p;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002552
2553 self = mdev->ldev->md.uuid[UI_BITMAP] & 1;
2554 peer = mdev->p_uuid[UI_BITMAP] & 1;
2555
2556 ch_peer = mdev->p_uuid[UI_SIZE];
2557 ch_self = mdev->comm_bm_set;
2558
Philipp Reisner44ed1672011-04-19 17:10:19 +02002559 rcu_read_lock();
2560 after_sb_0p = rcu_dereference(mdev->tconn->net_conf)->after_sb_0p;
2561 rcu_read_unlock();
2562 switch (after_sb_0p) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07002563 case ASB_CONSENSUS:
2564 case ASB_DISCARD_SECONDARY:
2565 case ASB_CALL_HELPER:
Philipp Reisner44ed1672011-04-19 17:10:19 +02002566 case ASB_VIOLENTLY:
Philipp Reisnerb411b362009-09-25 16:07:19 -07002567 dev_err(DEV, "Configuration error.\n");
2568 break;
2569 case ASB_DISCONNECT:
2570 break;
2571 case ASB_DISCARD_YOUNGER_PRI:
2572 if (self == 0 && peer == 1) {
2573 rv = -1;
2574 break;
2575 }
2576 if (self == 1 && peer == 0) {
2577 rv = 1;
2578 break;
2579 }
2580 /* Else fall through to one of the other strategies... */
2581 case ASB_DISCARD_OLDER_PRI:
2582 if (self == 0 && peer == 1) {
2583 rv = 1;
2584 break;
2585 }
2586 if (self == 1 && peer == 0) {
2587 rv = -1;
2588 break;
2589 }
2590 /* Else fall through to one of the other strategies... */
Lars Ellenbergad19bf62009-10-14 09:36:49 +02002591 dev_warn(DEV, "Discard younger/older primary did not find a decision\n"
Philipp Reisnerb411b362009-09-25 16:07:19 -07002592 "Using discard-least-changes instead\n");
2593 case ASB_DISCARD_ZERO_CHG:
2594 if (ch_peer == 0 && ch_self == 0) {
Lars Ellenberg427c0432012-08-01 12:43:01 +02002595 rv = test_bit(RESOLVE_CONFLICTS, &mdev->tconn->flags)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002596 ? -1 : 1;
2597 break;
2598 } else {
2599 if (ch_peer == 0) { rv = 1; break; }
2600 if (ch_self == 0) { rv = -1; break; }
2601 }
Philipp Reisner44ed1672011-04-19 17:10:19 +02002602 if (after_sb_0p == ASB_DISCARD_ZERO_CHG)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002603 break;
2604 case ASB_DISCARD_LEAST_CHG:
2605 if (ch_self < ch_peer)
2606 rv = -1;
2607 else if (ch_self > ch_peer)
2608 rv = 1;
2609 else /* ( ch_self == ch_peer ) */
2610 /* Well, then use something else. */
Lars Ellenberg427c0432012-08-01 12:43:01 +02002611 rv = test_bit(RESOLVE_CONFLICTS, &mdev->tconn->flags)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002612 ? -1 : 1;
2613 break;
2614 case ASB_DISCARD_LOCAL:
2615 rv = -1;
2616 break;
2617 case ASB_DISCARD_REMOTE:
2618 rv = 1;
2619 }
2620
2621 return rv;
2622}
2623
2624static int drbd_asb_recover_1p(struct drbd_conf *mdev) __must_hold(local)
2625{
Andreas Gruenbacher6184ea22010-12-09 14:23:27 +01002626 int hg, rv = -100;
Philipp Reisner44ed1672011-04-19 17:10:19 +02002627 enum drbd_after_sb_p after_sb_1p;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002628
Philipp Reisner44ed1672011-04-19 17:10:19 +02002629 rcu_read_lock();
2630 after_sb_1p = rcu_dereference(mdev->tconn->net_conf)->after_sb_1p;
2631 rcu_read_unlock();
2632 switch (after_sb_1p) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07002633 case ASB_DISCARD_YOUNGER_PRI:
2634 case ASB_DISCARD_OLDER_PRI:
2635 case ASB_DISCARD_LEAST_CHG:
2636 case ASB_DISCARD_LOCAL:
2637 case ASB_DISCARD_REMOTE:
Philipp Reisner44ed1672011-04-19 17:10:19 +02002638 case ASB_DISCARD_ZERO_CHG:
Philipp Reisnerb411b362009-09-25 16:07:19 -07002639 dev_err(DEV, "Configuration error.\n");
2640 break;
2641 case ASB_DISCONNECT:
2642 break;
2643 case ASB_CONSENSUS:
2644 hg = drbd_asb_recover_0p(mdev);
2645 if (hg == -1 && mdev->state.role == R_SECONDARY)
2646 rv = hg;
2647 if (hg == 1 && mdev->state.role == R_PRIMARY)
2648 rv = hg;
2649 break;
2650 case ASB_VIOLENTLY:
2651 rv = drbd_asb_recover_0p(mdev);
2652 break;
2653 case ASB_DISCARD_SECONDARY:
2654 return mdev->state.role == R_PRIMARY ? 1 : -1;
2655 case ASB_CALL_HELPER:
2656 hg = drbd_asb_recover_0p(mdev);
2657 if (hg == -1 && mdev->state.role == R_PRIMARY) {
Andreas Gruenbacherbb437942010-12-09 14:02:35 +01002658 enum drbd_state_rv rv2;
2659
Philipp Reisnerb411b362009-09-25 16:07:19 -07002660 /* drbd_change_state() does not sleep while in SS_IN_TRANSIENT_STATE,
2661 * we might be here in C_WF_REPORT_PARAMS which is transient.
2662 * we do not need to wait for the after state change work either. */
Andreas Gruenbacherbb437942010-12-09 14:02:35 +01002663 rv2 = drbd_change_state(mdev, CS_VERBOSE, NS(role, R_SECONDARY));
2664 if (rv2 != SS_SUCCESS) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07002665 drbd_khelper(mdev, "pri-lost-after-sb");
2666 } else {
2667 dev_warn(DEV, "Successfully gave up primary role.\n");
2668 rv = hg;
2669 }
2670 } else
2671 rv = hg;
2672 }
2673
2674 return rv;
2675}
2676
2677static int drbd_asb_recover_2p(struct drbd_conf *mdev) __must_hold(local)
2678{
Andreas Gruenbacher6184ea22010-12-09 14:23:27 +01002679 int hg, rv = -100;
Philipp Reisner44ed1672011-04-19 17:10:19 +02002680 enum drbd_after_sb_p after_sb_2p;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002681
Philipp Reisner44ed1672011-04-19 17:10:19 +02002682 rcu_read_lock();
2683 after_sb_2p = rcu_dereference(mdev->tconn->net_conf)->after_sb_2p;
2684 rcu_read_unlock();
2685 switch (after_sb_2p) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07002686 case ASB_DISCARD_YOUNGER_PRI:
2687 case ASB_DISCARD_OLDER_PRI:
2688 case ASB_DISCARD_LEAST_CHG:
2689 case ASB_DISCARD_LOCAL:
2690 case ASB_DISCARD_REMOTE:
2691 case ASB_CONSENSUS:
2692 case ASB_DISCARD_SECONDARY:
Philipp Reisner44ed1672011-04-19 17:10:19 +02002693 case ASB_DISCARD_ZERO_CHG:
Philipp Reisnerb411b362009-09-25 16:07:19 -07002694 dev_err(DEV, "Configuration error.\n");
2695 break;
2696 case ASB_VIOLENTLY:
2697 rv = drbd_asb_recover_0p(mdev);
2698 break;
2699 case ASB_DISCONNECT:
2700 break;
2701 case ASB_CALL_HELPER:
2702 hg = drbd_asb_recover_0p(mdev);
2703 if (hg == -1) {
Andreas Gruenbacherbb437942010-12-09 14:02:35 +01002704 enum drbd_state_rv rv2;
2705
Philipp Reisnerb411b362009-09-25 16:07:19 -07002706 /* drbd_change_state() does not sleep while in SS_IN_TRANSIENT_STATE,
2707 * we might be here in C_WF_REPORT_PARAMS which is transient.
2708 * we do not need to wait for the after state change work either. */
Andreas Gruenbacherbb437942010-12-09 14:02:35 +01002709 rv2 = drbd_change_state(mdev, CS_VERBOSE, NS(role, R_SECONDARY));
2710 if (rv2 != SS_SUCCESS) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07002711 drbd_khelper(mdev, "pri-lost-after-sb");
2712 } else {
2713 dev_warn(DEV, "Successfully gave up primary role.\n");
2714 rv = hg;
2715 }
2716 } else
2717 rv = hg;
2718 }
2719
2720 return rv;
2721}
2722
2723static void drbd_uuid_dump(struct drbd_conf *mdev, char *text, u64 *uuid,
2724 u64 bits, u64 flags)
2725{
2726 if (!uuid) {
2727 dev_info(DEV, "%s uuid info vanished while I was looking!\n", text);
2728 return;
2729 }
2730 dev_info(DEV, "%s %016llX:%016llX:%016llX:%016llX bits:%llu flags:%llX\n",
2731 text,
2732 (unsigned long long)uuid[UI_CURRENT],
2733 (unsigned long long)uuid[UI_BITMAP],
2734 (unsigned long long)uuid[UI_HISTORY_START],
2735 (unsigned long long)uuid[UI_HISTORY_END],
2736 (unsigned long long)bits,
2737 (unsigned long long)flags);
2738}
2739
2740/*
2741 100 after split brain try auto recover
2742 2 C_SYNC_SOURCE set BitMap
2743 1 C_SYNC_SOURCE use BitMap
2744 0 no Sync
2745 -1 C_SYNC_TARGET use BitMap
2746 -2 C_SYNC_TARGET set BitMap
2747 -100 after split brain, disconnect
2748-1000 unrelated data
Philipp Reisner4a23f262011-01-11 17:42:17 +01002749-1091 requires proto 91
2750-1096 requires proto 96
Philipp Reisnerb411b362009-09-25 16:07:19 -07002751 */
2752static int drbd_uuid_compare(struct drbd_conf *mdev, int *rule_nr) __must_hold(local)
2753{
2754 u64 self, peer;
2755 int i, j;
2756
2757 self = mdev->ldev->md.uuid[UI_CURRENT] & ~((u64)1);
2758 peer = mdev->p_uuid[UI_CURRENT] & ~((u64)1);
2759
2760 *rule_nr = 10;
2761 if (self == UUID_JUST_CREATED && peer == UUID_JUST_CREATED)
2762 return 0;
2763
2764 *rule_nr = 20;
2765 if ((self == UUID_JUST_CREATED || self == (u64)0) &&
2766 peer != UUID_JUST_CREATED)
2767 return -2;
2768
2769 *rule_nr = 30;
2770 if (self != UUID_JUST_CREATED &&
2771 (peer == UUID_JUST_CREATED || peer == (u64)0))
2772 return 2;
2773
2774 if (self == peer) {
2775 int rct, dc; /* roles at crash time */
2776
2777 if (mdev->p_uuid[UI_BITMAP] == (u64)0 && mdev->ldev->md.uuid[UI_BITMAP] != (u64)0) {
2778
Philipp Reisner31890f42011-01-19 14:12:51 +01002779 if (mdev->tconn->agreed_pro_version < 91)
Philipp Reisner4a23f262011-01-11 17:42:17 +01002780 return -1091;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002781
2782 if ((mdev->ldev->md.uuid[UI_BITMAP] & ~((u64)1)) == (mdev->p_uuid[UI_HISTORY_START] & ~((u64)1)) &&
2783 (mdev->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1)) == (mdev->p_uuid[UI_HISTORY_START + 1] & ~((u64)1))) {
2784 dev_info(DEV, "was SyncSource, missed the resync finished event, corrected myself:\n");
Philipp Reisner9f2247b2012-08-16 14:25:58 +02002785 drbd_uuid_move_history(mdev);
2786 mdev->ldev->md.uuid[UI_HISTORY_START] = mdev->ldev->md.uuid[UI_BITMAP];
2787 mdev->ldev->md.uuid[UI_BITMAP] = 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002788
2789 drbd_uuid_dump(mdev, "self", mdev->ldev->md.uuid,
2790 mdev->state.disk >= D_NEGOTIATING ? drbd_bm_total_weight(mdev) : 0, 0);
2791 *rule_nr = 34;
2792 } else {
2793 dev_info(DEV, "was SyncSource (peer failed to write sync_uuid)\n");
2794 *rule_nr = 36;
2795 }
2796
2797 return 1;
2798 }
2799
2800 if (mdev->ldev->md.uuid[UI_BITMAP] == (u64)0 && mdev->p_uuid[UI_BITMAP] != (u64)0) {
2801
Philipp Reisner31890f42011-01-19 14:12:51 +01002802 if (mdev->tconn->agreed_pro_version < 91)
Philipp Reisner4a23f262011-01-11 17:42:17 +01002803 return -1091;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002804
2805 if ((mdev->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1)) == (mdev->p_uuid[UI_BITMAP] & ~((u64)1)) &&
2806 (mdev->ldev->md.uuid[UI_HISTORY_START + 1] & ~((u64)1)) == (mdev->p_uuid[UI_HISTORY_START] & ~((u64)1))) {
2807 dev_info(DEV, "was SyncTarget, peer missed the resync finished event, corrected peer:\n");
2808
2809 mdev->p_uuid[UI_HISTORY_START + 1] = mdev->p_uuid[UI_HISTORY_START];
2810 mdev->p_uuid[UI_HISTORY_START] = mdev->p_uuid[UI_BITMAP];
2811 mdev->p_uuid[UI_BITMAP] = 0UL;
2812
2813 drbd_uuid_dump(mdev, "peer", mdev->p_uuid, mdev->p_uuid[UI_SIZE], mdev->p_uuid[UI_FLAGS]);
2814 *rule_nr = 35;
2815 } else {
2816 dev_info(DEV, "was SyncTarget (failed to write sync_uuid)\n");
2817 *rule_nr = 37;
2818 }
2819
2820 return -1;
2821 }
2822
2823 /* Common power [off|failure] */
2824 rct = (test_bit(CRASHED_PRIMARY, &mdev->flags) ? 1 : 0) +
2825 (mdev->p_uuid[UI_FLAGS] & 2);
2826 /* lowest bit is set when we were primary,
2827 * next bit (weight 2) is set when peer was primary */
2828 *rule_nr = 40;
2829
2830 switch (rct) {
2831 case 0: /* !self_pri && !peer_pri */ return 0;
2832 case 1: /* self_pri && !peer_pri */ return 1;
2833 case 2: /* !self_pri && peer_pri */ return -1;
2834 case 3: /* self_pri && peer_pri */
Lars Ellenberg427c0432012-08-01 12:43:01 +02002835 dc = test_bit(RESOLVE_CONFLICTS, &mdev->tconn->flags);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002836 return dc ? -1 : 1;
2837 }
2838 }
2839
2840 *rule_nr = 50;
2841 peer = mdev->p_uuid[UI_BITMAP] & ~((u64)1);
2842 if (self == peer)
2843 return -1;
2844
2845 *rule_nr = 51;
2846 peer = mdev->p_uuid[UI_HISTORY_START] & ~((u64)1);
2847 if (self == peer) {
Philipp Reisner31890f42011-01-19 14:12:51 +01002848 if (mdev->tconn->agreed_pro_version < 96 ?
Philipp Reisner4a23f262011-01-11 17:42:17 +01002849 (mdev->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1)) ==
2850 (mdev->p_uuid[UI_HISTORY_START + 1] & ~((u64)1)) :
2851 peer + UUID_NEW_BM_OFFSET == (mdev->p_uuid[UI_BITMAP] & ~((u64)1))) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07002852 /* The last P_SYNC_UUID did not get though. Undo the last start of
2853 resync as sync source modifications of the peer's UUIDs. */
2854
Philipp Reisner31890f42011-01-19 14:12:51 +01002855 if (mdev->tconn->agreed_pro_version < 91)
Philipp Reisner4a23f262011-01-11 17:42:17 +01002856 return -1091;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002857
2858 mdev->p_uuid[UI_BITMAP] = mdev->p_uuid[UI_HISTORY_START];
2859 mdev->p_uuid[UI_HISTORY_START] = mdev->p_uuid[UI_HISTORY_START + 1];
Philipp Reisner4a23f262011-01-11 17:42:17 +01002860
Lars Ellenberg92b4ca22012-04-30 12:53:52 +02002861 dev_info(DEV, "Lost last syncUUID packet, corrected:\n");
Philipp Reisner4a23f262011-01-11 17:42:17 +01002862 drbd_uuid_dump(mdev, "peer", mdev->p_uuid, mdev->p_uuid[UI_SIZE], mdev->p_uuid[UI_FLAGS]);
2863
Philipp Reisnerb411b362009-09-25 16:07:19 -07002864 return -1;
2865 }
2866 }
2867
2868 *rule_nr = 60;
2869 self = mdev->ldev->md.uuid[UI_CURRENT] & ~((u64)1);
2870 for (i = UI_HISTORY_START; i <= UI_HISTORY_END; i++) {
2871 peer = mdev->p_uuid[i] & ~((u64)1);
2872 if (self == peer)
2873 return -2;
2874 }
2875
2876 *rule_nr = 70;
2877 self = mdev->ldev->md.uuid[UI_BITMAP] & ~((u64)1);
2878 peer = mdev->p_uuid[UI_CURRENT] & ~((u64)1);
2879 if (self == peer)
2880 return 1;
2881
2882 *rule_nr = 71;
2883 self = mdev->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1);
2884 if (self == peer) {
Philipp Reisner31890f42011-01-19 14:12:51 +01002885 if (mdev->tconn->agreed_pro_version < 96 ?
Philipp Reisner4a23f262011-01-11 17:42:17 +01002886 (mdev->ldev->md.uuid[UI_HISTORY_START + 1] & ~((u64)1)) ==
2887 (mdev->p_uuid[UI_HISTORY_START] & ~((u64)1)) :
2888 self + UUID_NEW_BM_OFFSET == (mdev->ldev->md.uuid[UI_BITMAP] & ~((u64)1))) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07002889 /* The last P_SYNC_UUID did not get though. Undo the last start of
2890 resync as sync source modifications of our UUIDs. */
2891
Philipp Reisner31890f42011-01-19 14:12:51 +01002892 if (mdev->tconn->agreed_pro_version < 91)
Philipp Reisner4a23f262011-01-11 17:42:17 +01002893 return -1091;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002894
Philipp Reisner9f2247b2012-08-16 14:25:58 +02002895 __drbd_uuid_set(mdev, UI_BITMAP, mdev->ldev->md.uuid[UI_HISTORY_START]);
2896 __drbd_uuid_set(mdev, UI_HISTORY_START, mdev->ldev->md.uuid[UI_HISTORY_START + 1]);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002897
Philipp Reisner4a23f262011-01-11 17:42:17 +01002898 dev_info(DEV, "Last syncUUID did not get through, corrected:\n");
Philipp Reisnerb411b362009-09-25 16:07:19 -07002899 drbd_uuid_dump(mdev, "self", mdev->ldev->md.uuid,
2900 mdev->state.disk >= D_NEGOTIATING ? drbd_bm_total_weight(mdev) : 0, 0);
2901
2902 return 1;
2903 }
2904 }
2905
2906
2907 *rule_nr = 80;
Philipp Reisnerd8c2a362009-11-18 15:52:51 +01002908 peer = mdev->p_uuid[UI_CURRENT] & ~((u64)1);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002909 for (i = UI_HISTORY_START; i <= UI_HISTORY_END; i++) {
2910 self = mdev->ldev->md.uuid[i] & ~((u64)1);
2911 if (self == peer)
2912 return 2;
2913 }
2914
2915 *rule_nr = 90;
2916 self = mdev->ldev->md.uuid[UI_BITMAP] & ~((u64)1);
2917 peer = mdev->p_uuid[UI_BITMAP] & ~((u64)1);
2918 if (self == peer && self != ((u64)0))
2919 return 100;
2920
2921 *rule_nr = 100;
2922 for (i = UI_HISTORY_START; i <= UI_HISTORY_END; i++) {
2923 self = mdev->ldev->md.uuid[i] & ~((u64)1);
2924 for (j = UI_HISTORY_START; j <= UI_HISTORY_END; j++) {
2925 peer = mdev->p_uuid[j] & ~((u64)1);
2926 if (self == peer)
2927 return -100;
2928 }
2929 }
2930
2931 return -1000;
2932}
2933
2934/* drbd_sync_handshake() returns the new conn state on success, or
2935 CONN_MASK (-1) on failure.
2936 */
2937static enum drbd_conns drbd_sync_handshake(struct drbd_conf *mdev, enum drbd_role peer_role,
2938 enum drbd_disk_state peer_disk) __must_hold(local)
2939{
Philipp Reisnerb411b362009-09-25 16:07:19 -07002940 enum drbd_conns rv = C_MASK;
2941 enum drbd_disk_state mydisk;
Philipp Reisner44ed1672011-04-19 17:10:19 +02002942 struct net_conf *nc;
Andreas Gruenbacher6dff2902011-06-28 14:18:12 +02002943 int hg, rule_nr, rr_conflict, tentative;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002944
2945 mydisk = mdev->state.disk;
2946 if (mydisk == D_NEGOTIATING)
2947 mydisk = mdev->new_state_tmp.disk;
2948
2949 dev_info(DEV, "drbd_sync_handshake:\n");
Philipp Reisner9f2247b2012-08-16 14:25:58 +02002950
2951 spin_lock_irq(&mdev->ldev->md.uuid_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002952 drbd_uuid_dump(mdev, "self", mdev->ldev->md.uuid, mdev->comm_bm_set, 0);
2953 drbd_uuid_dump(mdev, "peer", mdev->p_uuid,
2954 mdev->p_uuid[UI_SIZE], mdev->p_uuid[UI_FLAGS]);
2955
2956 hg = drbd_uuid_compare(mdev, &rule_nr);
Philipp Reisner9f2247b2012-08-16 14:25:58 +02002957 spin_unlock_irq(&mdev->ldev->md.uuid_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002958
2959 dev_info(DEV, "uuid_compare()=%d by rule %d\n", hg, rule_nr);
2960
2961 if (hg == -1000) {
2962 dev_alert(DEV, "Unrelated data, aborting!\n");
2963 return C_MASK;
2964 }
Philipp Reisner4a23f262011-01-11 17:42:17 +01002965 if (hg < -1000) {
2966 dev_alert(DEV, "To resolve this both sides have to support at least protocol %d\n", -hg - 1000);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002967 return C_MASK;
2968 }
2969
2970 if ((mydisk == D_INCONSISTENT && peer_disk > D_INCONSISTENT) ||
2971 (peer_disk == D_INCONSISTENT && mydisk > D_INCONSISTENT)) {
2972 int f = (hg == -100) || abs(hg) == 2;
2973 hg = mydisk > D_INCONSISTENT ? 1 : -1;
2974 if (f)
2975 hg = hg*2;
2976 dev_info(DEV, "Becoming sync %s due to disk states.\n",
2977 hg > 0 ? "source" : "target");
2978 }
2979
Adam Gandelman3a11a482010-04-08 16:48:23 -07002980 if (abs(hg) == 100)
2981 drbd_khelper(mdev, "initial-split-brain");
2982
Philipp Reisner44ed1672011-04-19 17:10:19 +02002983 rcu_read_lock();
2984 nc = rcu_dereference(mdev->tconn->net_conf);
2985
2986 if (hg == 100 || (hg == -100 && nc->always_asbp)) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07002987 int pcount = (mdev->state.role == R_PRIMARY)
2988 + (peer_role == R_PRIMARY);
2989 int forced = (hg == -100);
2990
2991 switch (pcount) {
2992 case 0:
2993 hg = drbd_asb_recover_0p(mdev);
2994 break;
2995 case 1:
2996 hg = drbd_asb_recover_1p(mdev);
2997 break;
2998 case 2:
2999 hg = drbd_asb_recover_2p(mdev);
3000 break;
3001 }
3002 if (abs(hg) < 100) {
3003 dev_warn(DEV, "Split-Brain detected, %d primaries, "
3004 "automatically solved. Sync from %s node\n",
3005 pcount, (hg < 0) ? "peer" : "this");
3006 if (forced) {
3007 dev_warn(DEV, "Doing a full sync, since"
3008 " UUIDs where ambiguous.\n");
3009 hg = hg*2;
3010 }
3011 }
3012 }
3013
3014 if (hg == -100) {
Philipp Reisner08b165b2011-09-05 16:22:33 +02003015 if (test_bit(DISCARD_MY_DATA, &mdev->flags) && !(mdev->p_uuid[UI_FLAGS]&1))
Philipp Reisnerb411b362009-09-25 16:07:19 -07003016 hg = -1;
Philipp Reisner08b165b2011-09-05 16:22:33 +02003017 if (!test_bit(DISCARD_MY_DATA, &mdev->flags) && (mdev->p_uuid[UI_FLAGS]&1))
Philipp Reisnerb411b362009-09-25 16:07:19 -07003018 hg = 1;
3019
3020 if (abs(hg) < 100)
3021 dev_warn(DEV, "Split-Brain detected, manually solved. "
3022 "Sync from %s node\n",
3023 (hg < 0) ? "peer" : "this");
3024 }
Philipp Reisner44ed1672011-04-19 17:10:19 +02003025 rr_conflict = nc->rr_conflict;
Andreas Gruenbacher6dff2902011-06-28 14:18:12 +02003026 tentative = nc->tentative;
Philipp Reisner44ed1672011-04-19 17:10:19 +02003027 rcu_read_unlock();
Philipp Reisnerb411b362009-09-25 16:07:19 -07003028
3029 if (hg == -100) {
Lars Ellenberg580b9762010-02-26 23:15:23 +01003030 /* FIXME this log message is not correct if we end up here
3031 * after an attempted attach on a diskless node.
3032 * We just refuse to attach -- well, we drop the "connection"
3033 * to that disk, in a way... */
Adam Gandelman3a11a482010-04-08 16:48:23 -07003034 dev_alert(DEV, "Split-Brain detected but unresolved, dropping connection!\n");
Philipp Reisnerb411b362009-09-25 16:07:19 -07003035 drbd_khelper(mdev, "split-brain");
3036 return C_MASK;
3037 }
3038
3039 if (hg > 0 && mydisk <= D_INCONSISTENT) {
3040 dev_err(DEV, "I shall become SyncSource, but I am inconsistent!\n");
3041 return C_MASK;
3042 }
3043
3044 if (hg < 0 && /* by intention we do not use mydisk here. */
3045 mdev->state.role == R_PRIMARY && mdev->state.disk >= D_CONSISTENT) {
Philipp Reisner44ed1672011-04-19 17:10:19 +02003046 switch (rr_conflict) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07003047 case ASB_CALL_HELPER:
3048 drbd_khelper(mdev, "pri-lost");
3049 /* fall through */
3050 case ASB_DISCONNECT:
3051 dev_err(DEV, "I shall become SyncTarget, but I am primary!\n");
3052 return C_MASK;
3053 case ASB_VIOLENTLY:
3054 dev_warn(DEV, "Becoming SyncTarget, violating the stable-data"
3055 "assumption\n");
3056 }
3057 }
3058
Andreas Gruenbacher6dff2902011-06-28 14:18:12 +02003059 if (tentative || test_bit(CONN_DRY_RUN, &mdev->tconn->flags)) {
Philipp Reisnercf14c2e2010-02-02 21:03:50 +01003060 if (hg == 0)
3061 dev_info(DEV, "dry-run connect: No resync, would become Connected immediately.\n");
3062 else
3063 dev_info(DEV, "dry-run connect: Would become %s, doing a %s resync.",
3064 drbd_conn_str(hg > 0 ? C_SYNC_SOURCE : C_SYNC_TARGET),
3065 abs(hg) >= 2 ? "full" : "bit-map based");
3066 return C_MASK;
3067 }
3068
Philipp Reisnerb411b362009-09-25 16:07:19 -07003069 if (abs(hg) >= 2) {
3070 dev_info(DEV, "Writing the whole bitmap, full sync required after drbd_sync_handshake.\n");
Lars Ellenberg20ceb2b2011-01-21 10:56:44 +01003071 if (drbd_bitmap_io(mdev, &drbd_bmio_set_n_write, "set_n_write from sync_handshake",
3072 BM_LOCKED_SET_ALLOWED))
Philipp Reisnerb411b362009-09-25 16:07:19 -07003073 return C_MASK;
3074 }
3075
3076 if (hg > 0) { /* become sync source. */
3077 rv = C_WF_BITMAP_S;
3078 } else if (hg < 0) { /* become sync target */
3079 rv = C_WF_BITMAP_T;
3080 } else {
3081 rv = C_CONNECTED;
3082 if (drbd_bm_total_weight(mdev)) {
3083 dev_info(DEV, "No resync, but %lu bits in bitmap!\n",
3084 drbd_bm_total_weight(mdev));
3085 }
3086 }
3087
3088 return rv;
3089}
3090
Philipp Reisnerf179d762011-05-16 17:31:47 +02003091static enum drbd_after_sb_p convert_after_sb(enum drbd_after_sb_p peer)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003092{
3093 /* ASB_DISCARD_REMOTE - ASB_DISCARD_LOCAL is valid */
Philipp Reisnerf179d762011-05-16 17:31:47 +02003094 if (peer == ASB_DISCARD_REMOTE)
3095 return ASB_DISCARD_LOCAL;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003096
3097 /* any other things with ASB_DISCARD_REMOTE or ASB_DISCARD_LOCAL are invalid */
Philipp Reisnerf179d762011-05-16 17:31:47 +02003098 if (peer == ASB_DISCARD_LOCAL)
3099 return ASB_DISCARD_REMOTE;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003100
3101 /* everything else is valid if they are equal on both sides. */
Philipp Reisnerf179d762011-05-16 17:31:47 +02003102 return peer;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003103}
3104
Andreas Gruenbachere2857212011-03-25 00:57:38 +01003105static int receive_protocol(struct drbd_tconn *tconn, struct packet_info *pi)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003106{
Andreas Gruenbachere6589832011-03-30 12:54:42 +02003107 struct p_protocol *p = pi->data;
Philipp Reisner036b17e2011-05-16 17:38:11 +02003108 enum drbd_after_sb_p p_after_sb_0p, p_after_sb_1p, p_after_sb_2p;
3109 int p_proto, p_discard_my_data, p_two_primaries, cf;
3110 struct net_conf *nc, *old_net_conf, *new_net_conf = NULL;
3111 char integrity_alg[SHARED_SECRET_MAX] = "";
Andreas Gruenbacheraccdbcc2011-07-15 17:41:09 +02003112 struct crypto_hash *peer_integrity_tfm = NULL;
Philipp Reisner7aca6c72011-05-17 10:12:56 +02003113 void *int_dig_in = NULL, *int_dig_vv = NULL;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003114
Philipp Reisnerb411b362009-09-25 16:07:19 -07003115 p_proto = be32_to_cpu(p->protocol);
3116 p_after_sb_0p = be32_to_cpu(p->after_sb_0p);
3117 p_after_sb_1p = be32_to_cpu(p->after_sb_1p);
3118 p_after_sb_2p = be32_to_cpu(p->after_sb_2p);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003119 p_two_primaries = be32_to_cpu(p->two_primaries);
Philipp Reisnercf14c2e2010-02-02 21:03:50 +01003120 cf = be32_to_cpu(p->conn_flags);
Andreas Gruenbacher6139f602011-05-06 20:00:02 +02003121 p_discard_my_data = cf & CF_DISCARD_MY_DATA;
Philipp Reisnercf14c2e2010-02-02 21:03:50 +01003122
Andreas Gruenbacher86db0612011-04-28 15:24:18 +02003123 if (tconn->agreed_pro_version >= 87) {
3124 int err;
Philipp Reisnercf14c2e2010-02-02 21:03:50 +01003125
Andreas Gruenbacher88104ca2011-04-28 21:47:21 +02003126 if (pi->size > sizeof(integrity_alg))
Andreas Gruenbacher86db0612011-04-28 15:24:18 +02003127 return -EIO;
Andreas Gruenbacher88104ca2011-04-28 21:47:21 +02003128 err = drbd_recv_all(tconn, integrity_alg, pi->size);
Andreas Gruenbacher86db0612011-04-28 15:24:18 +02003129 if (err)
3130 return err;
Philipp Reisner036b17e2011-05-16 17:38:11 +02003131 integrity_alg[SHARED_SECRET_MAX - 1] = 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003132 }
3133
Andreas Gruenbacher7d4c7822011-07-17 23:06:12 +02003134 if (pi->cmd != P_PROTOCOL_UPDATE) {
Andreas Gruenbacherfbc12f42011-07-15 17:04:26 +02003135 clear_bit(CONN_DRY_RUN, &tconn->flags);
Philipp Reisner036b17e2011-05-16 17:38:11 +02003136
Andreas Gruenbacherfbc12f42011-07-15 17:04:26 +02003137 if (cf & CF_DRY_RUN)
3138 set_bit(CONN_DRY_RUN, &tconn->flags);
3139
3140 rcu_read_lock();
3141 nc = rcu_dereference(tconn->net_conf);
3142
3143 if (p_proto != nc->wire_protocol) {
Andreas Gruenbacherd505d9b2011-07-15 17:19:18 +02003144 conn_err(tconn, "incompatible %s settings\n", "protocol");
Andreas Gruenbacherfbc12f42011-07-15 17:04:26 +02003145 goto disconnect_rcu_unlock;
3146 }
3147
3148 if (convert_after_sb(p_after_sb_0p) != nc->after_sb_0p) {
Andreas Gruenbacherd505d9b2011-07-15 17:19:18 +02003149 conn_err(tconn, "incompatible %s settings\n", "after-sb-0pri");
Andreas Gruenbacherfbc12f42011-07-15 17:04:26 +02003150 goto disconnect_rcu_unlock;
3151 }
3152
3153 if (convert_after_sb(p_after_sb_1p) != nc->after_sb_1p) {
Andreas Gruenbacherd505d9b2011-07-15 17:19:18 +02003154 conn_err(tconn, "incompatible %s settings\n", "after-sb-1pri");
Andreas Gruenbacherfbc12f42011-07-15 17:04:26 +02003155 goto disconnect_rcu_unlock;
3156 }
3157
3158 if (convert_after_sb(p_after_sb_2p) != nc->after_sb_2p) {
Andreas Gruenbacherd505d9b2011-07-15 17:19:18 +02003159 conn_err(tconn, "incompatible %s settings\n", "after-sb-2pri");
Andreas Gruenbacherfbc12f42011-07-15 17:04:26 +02003160 goto disconnect_rcu_unlock;
3161 }
3162
3163 if (p_discard_my_data && nc->discard_my_data) {
Andreas Gruenbacherd505d9b2011-07-15 17:19:18 +02003164 conn_err(tconn, "incompatible %s settings\n", "discard-my-data");
Andreas Gruenbacherfbc12f42011-07-15 17:04:26 +02003165 goto disconnect_rcu_unlock;
3166 }
3167
3168 if (p_two_primaries != nc->two_primaries) {
Andreas Gruenbacherd505d9b2011-07-15 17:19:18 +02003169 conn_err(tconn, "incompatible %s settings\n", "allow-two-primaries");
Andreas Gruenbacherfbc12f42011-07-15 17:04:26 +02003170 goto disconnect_rcu_unlock;
3171 }
3172
3173 if (strcmp(integrity_alg, nc->integrity_alg)) {
Andreas Gruenbacherd505d9b2011-07-15 17:19:18 +02003174 conn_err(tconn, "incompatible %s settings\n", "data-integrity-alg");
Andreas Gruenbacherfbc12f42011-07-15 17:04:26 +02003175 goto disconnect_rcu_unlock;
3176 }
3177
3178 rcu_read_unlock();
Philipp Reisnerb411b362009-09-25 16:07:19 -07003179 }
3180
Andreas Gruenbacher7d4c7822011-07-17 23:06:12 +02003181 if (integrity_alg[0]) {
3182 int hash_size;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003183
Andreas Gruenbacher7d4c7822011-07-17 23:06:12 +02003184 /*
3185 * We can only change the peer data integrity algorithm
3186 * here. Changing our own data integrity algorithm
3187 * requires that we send a P_PROTOCOL_UPDATE packet at
3188 * the same time; otherwise, the peer has no way to
3189 * tell between which packets the algorithm should
3190 * change.
3191 */
Philipp Reisnerb411b362009-09-25 16:07:19 -07003192
Andreas Gruenbacher7d4c7822011-07-17 23:06:12 +02003193 peer_integrity_tfm = crypto_alloc_hash(integrity_alg, 0, CRYPTO_ALG_ASYNC);
3194 if (!peer_integrity_tfm) {
3195 conn_err(tconn, "peer data-integrity-alg %s not supported\n",
3196 integrity_alg);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003197 goto disconnect;
3198 }
Andreas Gruenbacher7d4c7822011-07-17 23:06:12 +02003199
3200 hash_size = crypto_hash_digestsize(peer_integrity_tfm);
3201 int_dig_in = kmalloc(hash_size, GFP_KERNEL);
3202 int_dig_vv = kmalloc(hash_size, GFP_KERNEL);
3203 if (!(int_dig_in && int_dig_vv)) {
3204 conn_err(tconn, "Allocation of buffers for data integrity checking failed\n");
3205 goto disconnect;
3206 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07003207 }
3208
Andreas Gruenbacher7d4c7822011-07-17 23:06:12 +02003209 new_net_conf = kmalloc(sizeof(struct net_conf), GFP_KERNEL);
3210 if (!new_net_conf) {
3211 conn_err(tconn, "Allocation of new net_conf failed\n");
3212 goto disconnect;
3213 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07003214
Andreas Gruenbacher7d4c7822011-07-17 23:06:12 +02003215 mutex_lock(&tconn->data.mutex);
3216 mutex_lock(&tconn->conf_update);
3217 old_net_conf = tconn->net_conf;
3218 *new_net_conf = *old_net_conf;
3219
3220 new_net_conf->wire_protocol = p_proto;
3221 new_net_conf->after_sb_0p = convert_after_sb(p_after_sb_0p);
3222 new_net_conf->after_sb_1p = convert_after_sb(p_after_sb_1p);
3223 new_net_conf->after_sb_2p = convert_after_sb(p_after_sb_2p);
3224 new_net_conf->two_primaries = p_two_primaries;
3225
3226 rcu_assign_pointer(tconn->net_conf, new_net_conf);
3227 mutex_unlock(&tconn->conf_update);
3228 mutex_unlock(&tconn->data.mutex);
3229
3230 crypto_free_hash(tconn->peer_integrity_tfm);
3231 kfree(tconn->int_dig_in);
3232 kfree(tconn->int_dig_vv);
3233 tconn->peer_integrity_tfm = peer_integrity_tfm;
3234 tconn->int_dig_in = int_dig_in;
3235 tconn->int_dig_vv = int_dig_vv;
3236
3237 if (strcmp(old_net_conf->integrity_alg, integrity_alg))
3238 conn_info(tconn, "peer data-integrity-alg: %s\n",
3239 integrity_alg[0] ? integrity_alg : "(none)");
3240
3241 synchronize_rcu();
3242 kfree(old_net_conf);
Andreas Gruenbacher82bc0192011-03-17 12:10:19 +01003243 return 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003244
Philipp Reisner44ed1672011-04-19 17:10:19 +02003245disconnect_rcu_unlock:
3246 rcu_read_unlock();
Philipp Reisnerb411b362009-09-25 16:07:19 -07003247disconnect:
Andreas Gruenbacherb792c352011-07-15 16:48:49 +02003248 crypto_free_hash(peer_integrity_tfm);
Philipp Reisner036b17e2011-05-16 17:38:11 +02003249 kfree(int_dig_in);
3250 kfree(int_dig_vv);
Philipp Reisner72046242011-03-15 18:51:47 +01003251 conn_request_state(tconn, NS(conn, C_DISCONNECTING), CS_HARD);
Andreas Gruenbacher82bc0192011-03-17 12:10:19 +01003252 return -EIO;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003253}
3254
3255/* helper function
3256 * input: alg name, feature name
3257 * return: NULL (alg name was "")
3258 * ERR_PTR(error) if something goes wrong
3259 * or the crypto hash ptr, if it worked out ok. */
3260struct crypto_hash *drbd_crypto_alloc_digest_safe(const struct drbd_conf *mdev,
3261 const char *alg, const char *name)
3262{
3263 struct crypto_hash *tfm;
3264
3265 if (!alg[0])
3266 return NULL;
3267
3268 tfm = crypto_alloc_hash(alg, 0, CRYPTO_ALG_ASYNC);
3269 if (IS_ERR(tfm)) {
3270 dev_err(DEV, "Can not allocate \"%s\" as %s (reason: %ld)\n",
3271 alg, name, PTR_ERR(tfm));
3272 return tfm;
3273 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07003274 return tfm;
3275}
3276
Andreas Gruenbacher4a76b162011-03-25 02:43:51 +01003277static int ignore_remaining_packet(struct drbd_tconn *tconn, struct packet_info *pi)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003278{
Andreas Gruenbacher4a76b162011-03-25 02:43:51 +01003279 void *buffer = tconn->data.rbuf;
3280 int size = pi->size;
3281
3282 while (size) {
3283 int s = min_t(int, size, DRBD_SOCKET_BUFFER_SIZE);
3284 s = drbd_recv(tconn, buffer, s);
3285 if (s <= 0) {
3286 if (s < 0)
3287 return s;
3288 break;
3289 }
3290 size -= s;
3291 }
3292 if (size)
3293 return -EIO;
3294 return 0;
3295}
3296
3297/*
3298 * config_unknown_volume - device configuration command for unknown volume
3299 *
3300 * When a device is added to an existing connection, the node on which the
3301 * device is added first will send configuration commands to its peer but the
3302 * peer will not know about the device yet. It will warn and ignore these
3303 * commands. Once the device is added on the second node, the second node will
3304 * send the same device configuration commands, but in the other direction.
3305 *
3306 * (We can also end up here if drbd is misconfigured.)
3307 */
3308static int config_unknown_volume(struct drbd_tconn *tconn, struct packet_info *pi)
3309{
Andreas Gruenbacher2fcb8f32011-07-03 11:41:08 +02003310 conn_warn(tconn, "%s packet received for volume %u, which is not configured locally\n",
3311 cmdname(pi->cmd), pi->vnr);
Andreas Gruenbacher4a76b162011-03-25 02:43:51 +01003312 return ignore_remaining_packet(tconn, pi);
3313}
3314
3315static int receive_SyncParam(struct drbd_tconn *tconn, struct packet_info *pi)
3316{
3317 struct drbd_conf *mdev;
Andreas Gruenbachere6589832011-03-30 12:54:42 +02003318 struct p_rs_param_95 *p;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003319 unsigned int header_size, data_size, exp_max_sz;
3320 struct crypto_hash *verify_tfm = NULL;
3321 struct crypto_hash *csums_tfm = NULL;
Philipp Reisner2ec91e02011-05-03 14:58:00 +02003322 struct net_conf *old_net_conf, *new_net_conf = NULL;
Philipp Reisner813472c2011-05-03 16:47:02 +02003323 struct disk_conf *old_disk_conf = NULL, *new_disk_conf = NULL;
Andreas Gruenbacher4a76b162011-03-25 02:43:51 +01003324 const int apv = tconn->agreed_pro_version;
Philipp Reisner813472c2011-05-03 16:47:02 +02003325 struct fifo_buffer *old_plan = NULL, *new_plan = NULL;
Philipp Reisner778f2712010-07-06 11:14:00 +02003326 int fifo_size = 0;
Andreas Gruenbacher82bc0192011-03-17 12:10:19 +01003327 int err;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003328
Andreas Gruenbacher4a76b162011-03-25 02:43:51 +01003329 mdev = vnr_to_mdev(tconn, pi->vnr);
3330 if (!mdev)
3331 return config_unknown_volume(tconn, pi);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003332
3333 exp_max_sz = apv <= 87 ? sizeof(struct p_rs_param)
3334 : apv == 88 ? sizeof(struct p_rs_param)
3335 + SHARED_SECRET_MAX
Philipp Reisner8e26f9c2010-07-06 17:25:54 +02003336 : apv <= 94 ? sizeof(struct p_rs_param_89)
3337 : /* apv >= 95 */ sizeof(struct p_rs_param_95);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003338
Andreas Gruenbachere2857212011-03-25 00:57:38 +01003339 if (pi->size > exp_max_sz) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07003340 dev_err(DEV, "SyncParam packet too long: received %u, expected <= %u bytes\n",
Andreas Gruenbachere2857212011-03-25 00:57:38 +01003341 pi->size, exp_max_sz);
Andreas Gruenbacher82bc0192011-03-17 12:10:19 +01003342 return -EIO;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003343 }
3344
3345 if (apv <= 88) {
Andreas Gruenbachere6589832011-03-30 12:54:42 +02003346 header_size = sizeof(struct p_rs_param);
Andreas Gruenbachere2857212011-03-25 00:57:38 +01003347 data_size = pi->size - header_size;
Philipp Reisner8e26f9c2010-07-06 17:25:54 +02003348 } else if (apv <= 94) {
Andreas Gruenbachere6589832011-03-30 12:54:42 +02003349 header_size = sizeof(struct p_rs_param_89);
Andreas Gruenbachere2857212011-03-25 00:57:38 +01003350 data_size = pi->size - header_size;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003351 D_ASSERT(data_size == 0);
Philipp Reisner8e26f9c2010-07-06 17:25:54 +02003352 } else {
Andreas Gruenbachere6589832011-03-30 12:54:42 +02003353 header_size = sizeof(struct p_rs_param_95);
Andreas Gruenbachere2857212011-03-25 00:57:38 +01003354 data_size = pi->size - header_size;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003355 D_ASSERT(data_size == 0);
3356 }
3357
3358 /* initialize verify_alg and csums_alg */
Andreas Gruenbachere6589832011-03-30 12:54:42 +02003359 p = pi->data;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003360 memset(p->verify_alg, 0, 2 * SHARED_SECRET_MAX);
3361
Andreas Gruenbachere6589832011-03-30 12:54:42 +02003362 err = drbd_recv_all(mdev->tconn, p, header_size);
Andreas Gruenbacher82bc0192011-03-17 12:10:19 +01003363 if (err)
3364 return err;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003365
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02003366 mutex_lock(&mdev->tconn->conf_update);
3367 old_net_conf = mdev->tconn->net_conf;
Philipp Reisner813472c2011-05-03 16:47:02 +02003368 if (get_ldev(mdev)) {
3369 new_disk_conf = kzalloc(sizeof(struct disk_conf), GFP_KERNEL);
3370 if (!new_disk_conf) {
3371 put_ldev(mdev);
3372 mutex_unlock(&mdev->tconn->conf_update);
3373 dev_err(DEV, "Allocation of new disk_conf failed\n");
3374 return -ENOMEM;
3375 }
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02003376
Philipp Reisner813472c2011-05-03 16:47:02 +02003377 old_disk_conf = mdev->ldev->disk_conf;
3378 *new_disk_conf = *old_disk_conf;
3379
Andreas Gruenbacher6394b932011-05-11 14:29:52 +02003380 new_disk_conf->resync_rate = be32_to_cpu(p->resync_rate);
Philipp Reisner813472c2011-05-03 16:47:02 +02003381 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07003382
3383 if (apv >= 88) {
3384 if (apv == 88) {
Philipp Reisner5de73822012-03-28 10:17:32 +02003385 if (data_size > SHARED_SECRET_MAX || data_size == 0) {
3386 dev_err(DEV, "verify-alg of wrong size, "
3387 "peer wants %u, accepting only up to %u byte\n",
3388 data_size, SHARED_SECRET_MAX);
Philipp Reisner813472c2011-05-03 16:47:02 +02003389 err = -EIO;
3390 goto reconnect;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003391 }
3392
Andreas Gruenbacher82bc0192011-03-17 12:10:19 +01003393 err = drbd_recv_all(mdev->tconn, p->verify_alg, data_size);
Philipp Reisner813472c2011-05-03 16:47:02 +02003394 if (err)
3395 goto reconnect;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003396 /* we expect NUL terminated string */
3397 /* but just in case someone tries to be evil */
3398 D_ASSERT(p->verify_alg[data_size-1] == 0);
3399 p->verify_alg[data_size-1] = 0;
3400
3401 } else /* apv >= 89 */ {
3402 /* we still expect NUL terminated strings */
3403 /* but just in case someone tries to be evil */
3404 D_ASSERT(p->verify_alg[SHARED_SECRET_MAX-1] == 0);
3405 D_ASSERT(p->csums_alg[SHARED_SECRET_MAX-1] == 0);
3406 p->verify_alg[SHARED_SECRET_MAX-1] = 0;
3407 p->csums_alg[SHARED_SECRET_MAX-1] = 0;
3408 }
3409
Philipp Reisner2ec91e02011-05-03 14:58:00 +02003410 if (strcmp(old_net_conf->verify_alg, p->verify_alg)) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07003411 if (mdev->state.conn == C_WF_REPORT_PARAMS) {
3412 dev_err(DEV, "Different verify-alg settings. me=\"%s\" peer=\"%s\"\n",
Philipp Reisner2ec91e02011-05-03 14:58:00 +02003413 old_net_conf->verify_alg, p->verify_alg);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003414 goto disconnect;
3415 }
3416 verify_tfm = drbd_crypto_alloc_digest_safe(mdev,
3417 p->verify_alg, "verify-alg");
3418 if (IS_ERR(verify_tfm)) {
3419 verify_tfm = NULL;
3420 goto disconnect;
3421 }
3422 }
3423
Philipp Reisner2ec91e02011-05-03 14:58:00 +02003424 if (apv >= 89 && strcmp(old_net_conf->csums_alg, p->csums_alg)) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07003425 if (mdev->state.conn == C_WF_REPORT_PARAMS) {
3426 dev_err(DEV, "Different csums-alg settings. me=\"%s\" peer=\"%s\"\n",
Philipp Reisner2ec91e02011-05-03 14:58:00 +02003427 old_net_conf->csums_alg, p->csums_alg);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003428 goto disconnect;
3429 }
3430 csums_tfm = drbd_crypto_alloc_digest_safe(mdev,
3431 p->csums_alg, "csums-alg");
3432 if (IS_ERR(csums_tfm)) {
3433 csums_tfm = NULL;
3434 goto disconnect;
3435 }
3436 }
3437
Philipp Reisner813472c2011-05-03 16:47:02 +02003438 if (apv > 94 && new_disk_conf) {
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02003439 new_disk_conf->c_plan_ahead = be32_to_cpu(p->c_plan_ahead);
3440 new_disk_conf->c_delay_target = be32_to_cpu(p->c_delay_target);
3441 new_disk_conf->c_fill_target = be32_to_cpu(p->c_fill_target);
3442 new_disk_conf->c_max_rate = be32_to_cpu(p->c_max_rate);
Philipp Reisner778f2712010-07-06 11:14:00 +02003443
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02003444 fifo_size = (new_disk_conf->c_plan_ahead * 10 * SLEEP_TIME) / HZ;
Philipp Reisner9958c852011-05-03 16:19:31 +02003445 if (fifo_size != mdev->rs_plan_s->size) {
Philipp Reisner813472c2011-05-03 16:47:02 +02003446 new_plan = fifo_alloc(fifo_size);
3447 if (!new_plan) {
Philipp Reisner778f2712010-07-06 11:14:00 +02003448 dev_err(DEV, "kmalloc of fifo_buffer failed");
Lars Ellenbergf3990022011-03-23 14:31:09 +01003449 put_ldev(mdev);
Philipp Reisner778f2712010-07-06 11:14:00 +02003450 goto disconnect;
3451 }
3452 }
Philipp Reisner8e26f9c2010-07-06 17:25:54 +02003453 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07003454
Philipp Reisner91fd4da2011-04-20 17:47:29 +02003455 if (verify_tfm || csums_tfm) {
Philipp Reisner2ec91e02011-05-03 14:58:00 +02003456 new_net_conf = kzalloc(sizeof(struct net_conf), GFP_KERNEL);
3457 if (!new_net_conf) {
Philipp Reisner91fd4da2011-04-20 17:47:29 +02003458 dev_err(DEV, "Allocation of new net_conf failed\n");
3459 goto disconnect;
3460 }
3461
Philipp Reisner2ec91e02011-05-03 14:58:00 +02003462 *new_net_conf = *old_net_conf;
Philipp Reisner91fd4da2011-04-20 17:47:29 +02003463
3464 if (verify_tfm) {
Philipp Reisner2ec91e02011-05-03 14:58:00 +02003465 strcpy(new_net_conf->verify_alg, p->verify_alg);
3466 new_net_conf->verify_alg_len = strlen(p->verify_alg) + 1;
Philipp Reisner91fd4da2011-04-20 17:47:29 +02003467 crypto_free_hash(mdev->tconn->verify_tfm);
3468 mdev->tconn->verify_tfm = verify_tfm;
3469 dev_info(DEV, "using verify-alg: \"%s\"\n", p->verify_alg);
3470 }
3471 if (csums_tfm) {
Philipp Reisner2ec91e02011-05-03 14:58:00 +02003472 strcpy(new_net_conf->csums_alg, p->csums_alg);
3473 new_net_conf->csums_alg_len = strlen(p->csums_alg) + 1;
Philipp Reisner91fd4da2011-04-20 17:47:29 +02003474 crypto_free_hash(mdev->tconn->csums_tfm);
3475 mdev->tconn->csums_tfm = csums_tfm;
3476 dev_info(DEV, "using csums-alg: \"%s\"\n", p->csums_alg);
3477 }
Philipp Reisner2ec91e02011-05-03 14:58:00 +02003478 rcu_assign_pointer(tconn->net_conf, new_net_conf);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003479 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07003480 }
3481
Philipp Reisner813472c2011-05-03 16:47:02 +02003482 if (new_disk_conf) {
3483 rcu_assign_pointer(mdev->ldev->disk_conf, new_disk_conf);
3484 put_ldev(mdev);
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02003485 }
Philipp Reisner813472c2011-05-03 16:47:02 +02003486
3487 if (new_plan) {
3488 old_plan = mdev->rs_plan_s;
3489 rcu_assign_pointer(mdev->rs_plan_s, new_plan);
3490 }
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02003491
3492 mutex_unlock(&mdev->tconn->conf_update);
3493 synchronize_rcu();
3494 if (new_net_conf)
3495 kfree(old_net_conf);
3496 kfree(old_disk_conf);
Philipp Reisner813472c2011-05-03 16:47:02 +02003497 kfree(old_plan);
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02003498
Andreas Gruenbacher82bc0192011-03-17 12:10:19 +01003499 return 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003500
Philipp Reisner813472c2011-05-03 16:47:02 +02003501reconnect:
3502 if (new_disk_conf) {
3503 put_ldev(mdev);
3504 kfree(new_disk_conf);
3505 }
3506 mutex_unlock(&mdev->tconn->conf_update);
3507 return -EIO;
3508
Philipp Reisnerb411b362009-09-25 16:07:19 -07003509disconnect:
Philipp Reisner813472c2011-05-03 16:47:02 +02003510 kfree(new_plan);
3511 if (new_disk_conf) {
3512 put_ldev(mdev);
3513 kfree(new_disk_conf);
3514 }
Philipp Reisnera0095502011-05-03 13:14:15 +02003515 mutex_unlock(&mdev->tconn->conf_update);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003516 /* just for completeness: actually not needed,
3517 * as this is not reached if csums_tfm was ok. */
3518 crypto_free_hash(csums_tfm);
3519 /* but free the verify_tfm again, if csums_tfm did not work out */
3520 crypto_free_hash(verify_tfm);
Philipp Reisner38fa9982011-03-15 18:24:49 +01003521 conn_request_state(mdev->tconn, NS(conn, C_DISCONNECTING), CS_HARD);
Andreas Gruenbacher82bc0192011-03-17 12:10:19 +01003522 return -EIO;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003523}
3524
Philipp Reisnerb411b362009-09-25 16:07:19 -07003525/* warn if the arguments differ by more than 12.5% */
3526static void warn_if_differ_considerably(struct drbd_conf *mdev,
3527 const char *s, sector_t a, sector_t b)
3528{
3529 sector_t d;
3530 if (a == 0 || b == 0)
3531 return;
3532 d = (a > b) ? (a - b) : (b - a);
3533 if (d > (a>>3) || d > (b>>3))
3534 dev_warn(DEV, "Considerable difference in %s: %llus vs. %llus\n", s,
3535 (unsigned long long)a, (unsigned long long)b);
3536}
3537
Andreas Gruenbacher4a76b162011-03-25 02:43:51 +01003538static int receive_sizes(struct drbd_tconn *tconn, struct packet_info *pi)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003539{
Andreas Gruenbacher4a76b162011-03-25 02:43:51 +01003540 struct drbd_conf *mdev;
Andreas Gruenbachere6589832011-03-30 12:54:42 +02003541 struct p_sizes *p = pi->data;
Philipp Reisnere96c9632013-06-25 16:50:07 +02003542 enum determine_dev_size dd = DS_UNCHANGED;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003543 sector_t p_size, p_usize, my_usize;
3544 int ldsc = 0; /* local disk size changed */
Philipp Reisnere89b5912010-03-24 17:11:33 +01003545 enum dds_flags ddsf;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003546
Andreas Gruenbacher4a76b162011-03-25 02:43:51 +01003547 mdev = vnr_to_mdev(tconn, pi->vnr);
3548 if (!mdev)
3549 return config_unknown_volume(tconn, pi);
3550
Philipp Reisnerb411b362009-09-25 16:07:19 -07003551 p_size = be64_to_cpu(p->d_size);
3552 p_usize = be64_to_cpu(p->u_size);
3553
Philipp Reisnerb411b362009-09-25 16:07:19 -07003554 /* just store the peer's disk size for now.
3555 * we still need to figure out whether we accept that. */
3556 mdev->p_size = p_size;
3557
Philipp Reisnerb411b362009-09-25 16:07:19 -07003558 if (get_ldev(mdev)) {
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02003559 rcu_read_lock();
3560 my_usize = rcu_dereference(mdev->ldev->disk_conf)->disk_size;
3561 rcu_read_unlock();
3562
Philipp Reisnerb411b362009-09-25 16:07:19 -07003563 warn_if_differ_considerably(mdev, "lower level device sizes",
3564 p_size, drbd_get_max_capacity(mdev->ldev));
3565 warn_if_differ_considerably(mdev, "user requested size",
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02003566 p_usize, my_usize);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003567
3568 /* if this is the first connect, or an otherwise expected
3569 * param exchange, choose the minimum */
3570 if (mdev->state.conn == C_WF_REPORT_PARAMS)
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02003571 p_usize = min_not_zero(my_usize, p_usize);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003572
3573 /* Never shrink a device with usable data during connect.
3574 But allow online shrinking if we are connected. */
Philipp Reisneref5e44a2011-05-03 13:27:43 +02003575 if (drbd_new_dev_size(mdev, mdev->ldev, p_usize, 0) <
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02003576 drbd_get_capacity(mdev->this_bdev) &&
3577 mdev->state.disk >= D_OUTDATED &&
3578 mdev->state.conn < C_CONNECTED) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07003579 dev_err(DEV, "The peer's disk size is too small!\n");
Philipp Reisner38fa9982011-03-15 18:24:49 +01003580 conn_request_state(mdev->tconn, NS(conn, C_DISCONNECTING), CS_HARD);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003581 put_ldev(mdev);
Andreas Gruenbacher82bc0192011-03-17 12:10:19 +01003582 return -EIO;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003583 }
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02003584
3585 if (my_usize != p_usize) {
3586 struct disk_conf *old_disk_conf, *new_disk_conf = NULL;
3587
3588 new_disk_conf = kzalloc(sizeof(struct disk_conf), GFP_KERNEL);
3589 if (!new_disk_conf) {
3590 dev_err(DEV, "Allocation of new disk_conf failed\n");
3591 put_ldev(mdev);
3592 return -ENOMEM;
3593 }
3594
3595 mutex_lock(&mdev->tconn->conf_update);
3596 old_disk_conf = mdev->ldev->disk_conf;
3597 *new_disk_conf = *old_disk_conf;
3598 new_disk_conf->disk_size = p_usize;
3599
3600 rcu_assign_pointer(mdev->ldev->disk_conf, new_disk_conf);
3601 mutex_unlock(&mdev->tconn->conf_update);
3602 synchronize_rcu();
3603 kfree(old_disk_conf);
3604
3605 dev_info(DEV, "Peer sets u_size to %lu sectors\n",
3606 (unsigned long)my_usize);
3607 }
3608
Philipp Reisnerb411b362009-09-25 16:07:19 -07003609 put_ldev(mdev);
3610 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07003611
Philipp Reisnere89b5912010-03-24 17:11:33 +01003612 ddsf = be16_to_cpu(p->dds_flags);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003613 if (get_ldev(mdev)) {
Philipp Reisnerd752b262013-06-25 16:50:08 +02003614 dd = drbd_determine_dev_size(mdev, ddsf, NULL);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003615 put_ldev(mdev);
Philipp Reisnere96c9632013-06-25 16:50:07 +02003616 if (dd == DS_ERROR)
Andreas Gruenbacher82bc0192011-03-17 12:10:19 +01003617 return -EIO;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003618 drbd_md_sync(mdev);
3619 } else {
3620 /* I am diskless, need to accept the peer's size. */
3621 drbd_set_my_capacity(mdev, p_size);
3622 }
3623
Philipp Reisner99432fc2011-05-20 16:39:13 +02003624 mdev->peer_max_bio_size = be32_to_cpu(p->max_bio_size);
3625 drbd_reconsider_max_bio_size(mdev);
3626
Philipp Reisnerb411b362009-09-25 16:07:19 -07003627 if (get_ldev(mdev)) {
3628 if (mdev->ldev->known_size != drbd_get_capacity(mdev->ldev->backing_bdev)) {
3629 mdev->ldev->known_size = drbd_get_capacity(mdev->ldev->backing_bdev);
3630 ldsc = 1;
3631 }
3632
Philipp Reisnerb411b362009-09-25 16:07:19 -07003633 put_ldev(mdev);
3634 }
3635
3636 if (mdev->state.conn > C_WF_REPORT_PARAMS) {
3637 if (be64_to_cpu(p->c_size) !=
3638 drbd_get_capacity(mdev->this_bdev) || ldsc) {
3639 /* we have different sizes, probably peer
3640 * needs to know my new size... */
Philipp Reisnere89b5912010-03-24 17:11:33 +01003641 drbd_send_sizes(mdev, 0, ddsf);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003642 }
3643 if (test_and_clear_bit(RESIZE_PENDING, &mdev->flags) ||
Philipp Reisnere96c9632013-06-25 16:50:07 +02003644 (dd == DS_GREW && mdev->state.conn == C_CONNECTED)) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07003645 if (mdev->state.pdsk >= D_INCONSISTENT &&
Philipp Reisnere89b5912010-03-24 17:11:33 +01003646 mdev->state.disk >= D_INCONSISTENT) {
3647 if (ddsf & DDSF_NO_RESYNC)
3648 dev_info(DEV, "Resync of new storage suppressed with --assume-clean\n");
3649 else
3650 resync_after_online_grow(mdev);
3651 } else
Philipp Reisnerb411b362009-09-25 16:07:19 -07003652 set_bit(RESYNC_AFTER_NEG, &mdev->flags);
3653 }
3654 }
3655
Andreas Gruenbacher82bc0192011-03-17 12:10:19 +01003656 return 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003657}
3658
Andreas Gruenbacher4a76b162011-03-25 02:43:51 +01003659static int receive_uuids(struct drbd_tconn *tconn, struct packet_info *pi)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003660{
Andreas Gruenbacher4a76b162011-03-25 02:43:51 +01003661 struct drbd_conf *mdev;
Andreas Gruenbachere6589832011-03-30 12:54:42 +02003662 struct p_uuids *p = pi->data;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003663 u64 *p_uuid;
Lars Ellenberg62b0da32011-01-20 13:25:21 +01003664 int i, updated_uuids = 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003665
Andreas Gruenbacher4a76b162011-03-25 02:43:51 +01003666 mdev = vnr_to_mdev(tconn, pi->vnr);
3667 if (!mdev)
3668 return config_unknown_volume(tconn, pi);
3669
Philipp Reisnerb411b362009-09-25 16:07:19 -07003670 p_uuid = kmalloc(sizeof(u64)*UI_EXTENDED_SIZE, GFP_NOIO);
Jing Wang063eacf2012-10-25 15:00:56 +08003671 if (!p_uuid) {
3672 dev_err(DEV, "kmalloc of p_uuid failed\n");
3673 return false;
3674 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07003675
3676 for (i = UI_CURRENT; i < UI_EXTENDED_SIZE; i++)
3677 p_uuid[i] = be64_to_cpu(p->uuid[i]);
3678
3679 kfree(mdev->p_uuid);
3680 mdev->p_uuid = p_uuid;
3681
3682 if (mdev->state.conn < C_CONNECTED &&
3683 mdev->state.disk < D_INCONSISTENT &&
3684 mdev->state.role == R_PRIMARY &&
3685 (mdev->ed_uuid & ~((u64)1)) != (p_uuid[UI_CURRENT] & ~((u64)1))) {
3686 dev_err(DEV, "Can only connect to data with current UUID=%016llX\n",
3687 (unsigned long long)mdev->ed_uuid);
Philipp Reisner38fa9982011-03-15 18:24:49 +01003688 conn_request_state(mdev->tconn, NS(conn, C_DISCONNECTING), CS_HARD);
Andreas Gruenbacher82bc0192011-03-17 12:10:19 +01003689 return -EIO;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003690 }
3691
3692 if (get_ldev(mdev)) {
3693 int skip_initial_sync =
3694 mdev->state.conn == C_CONNECTED &&
Philipp Reisner31890f42011-01-19 14:12:51 +01003695 mdev->tconn->agreed_pro_version >= 90 &&
Philipp Reisnerb411b362009-09-25 16:07:19 -07003696 mdev->ldev->md.uuid[UI_CURRENT] == UUID_JUST_CREATED &&
3697 (p_uuid[UI_FLAGS] & 8);
3698 if (skip_initial_sync) {
3699 dev_info(DEV, "Accepted new current UUID, preparing to skip initial sync\n");
3700 drbd_bitmap_io(mdev, &drbd_bmio_clear_n_write,
Lars Ellenberg20ceb2b2011-01-21 10:56:44 +01003701 "clear_n_write from receive_uuids",
3702 BM_LOCKED_TEST_ALLOWED);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003703 _drbd_uuid_set(mdev, UI_CURRENT, p_uuid[UI_CURRENT]);
3704 _drbd_uuid_set(mdev, UI_BITMAP, 0);
3705 _drbd_set_state(_NS2(mdev, disk, D_UP_TO_DATE, pdsk, D_UP_TO_DATE),
3706 CS_VERBOSE, NULL);
3707 drbd_md_sync(mdev);
Lars Ellenberg62b0da32011-01-20 13:25:21 +01003708 updated_uuids = 1;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003709 }
3710 put_ldev(mdev);
Philipp Reisner18a50fa2010-06-21 14:14:15 +02003711 } else if (mdev->state.disk < D_INCONSISTENT &&
3712 mdev->state.role == R_PRIMARY) {
3713 /* I am a diskless primary, the peer just created a new current UUID
3714 for me. */
Lars Ellenberg62b0da32011-01-20 13:25:21 +01003715 updated_uuids = drbd_set_ed_uuid(mdev, p_uuid[UI_CURRENT]);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003716 }
3717
3718 /* Before we test for the disk state, we should wait until an eventually
3719 ongoing cluster wide state change is finished. That is important if
3720 we are primary and are detaching from our disk. We need to see the
3721 new disk state... */
Philipp Reisner8410da82011-02-11 20:11:10 +01003722 mutex_lock(mdev->state_mutex);
3723 mutex_unlock(mdev->state_mutex);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003724 if (mdev->state.conn >= C_CONNECTED && mdev->state.disk < D_INCONSISTENT)
Lars Ellenberg62b0da32011-01-20 13:25:21 +01003725 updated_uuids |= drbd_set_ed_uuid(mdev, p_uuid[UI_CURRENT]);
3726
3727 if (updated_uuids)
3728 drbd_print_uuids(mdev, "receiver updated UUIDs to");
Philipp Reisnerb411b362009-09-25 16:07:19 -07003729
Andreas Gruenbacher82bc0192011-03-17 12:10:19 +01003730 return 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003731}
3732
3733/**
3734 * convert_state() - Converts the peer's view of the cluster state to our point of view
3735 * @ps: The state as seen by the peer.
3736 */
3737static union drbd_state convert_state(union drbd_state ps)
3738{
3739 union drbd_state ms;
3740
3741 static enum drbd_conns c_tab[] = {
Philipp Reisner369bea62011-07-06 23:04:44 +02003742 [C_WF_REPORT_PARAMS] = C_WF_REPORT_PARAMS,
Philipp Reisnerb411b362009-09-25 16:07:19 -07003743 [C_CONNECTED] = C_CONNECTED,
3744
3745 [C_STARTING_SYNC_S] = C_STARTING_SYNC_T,
3746 [C_STARTING_SYNC_T] = C_STARTING_SYNC_S,
3747 [C_DISCONNECTING] = C_TEAR_DOWN, /* C_NETWORK_FAILURE, */
3748 [C_VERIFY_S] = C_VERIFY_T,
3749 [C_MASK] = C_MASK,
3750 };
3751
3752 ms.i = ps.i;
3753
3754 ms.conn = c_tab[ps.conn];
3755 ms.peer = ps.role;
3756 ms.role = ps.peer;
3757 ms.pdsk = ps.disk;
3758 ms.disk = ps.pdsk;
3759 ms.peer_isp = (ps.aftr_isp | ps.user_isp);
3760
3761 return ms;
3762}
3763
Andreas Gruenbacher4a76b162011-03-25 02:43:51 +01003764static int receive_req_state(struct drbd_tconn *tconn, struct packet_info *pi)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003765{
Andreas Gruenbacher4a76b162011-03-25 02:43:51 +01003766 struct drbd_conf *mdev;
Andreas Gruenbachere6589832011-03-30 12:54:42 +02003767 struct p_req_state *p = pi->data;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003768 union drbd_state mask, val;
Andreas Gruenbacherbf885f82010-12-08 00:39:32 +01003769 enum drbd_state_rv rv;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003770
Andreas Gruenbacher4a76b162011-03-25 02:43:51 +01003771 mdev = vnr_to_mdev(tconn, pi->vnr);
3772 if (!mdev)
3773 return -EIO;
3774
Philipp Reisnerb411b362009-09-25 16:07:19 -07003775 mask.i = be32_to_cpu(p->mask);
3776 val.i = be32_to_cpu(p->val);
3777
Lars Ellenberg427c0432012-08-01 12:43:01 +02003778 if (test_bit(RESOLVE_CONFLICTS, &mdev->tconn->flags) &&
Philipp Reisner8410da82011-02-11 20:11:10 +01003779 mutex_is_locked(mdev->state_mutex)) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07003780 drbd_send_sr_reply(mdev, SS_CONCURRENT_ST_CHG);
Andreas Gruenbacher82bc0192011-03-17 12:10:19 +01003781 return 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003782 }
3783
3784 mask = convert_state(mask);
3785 val = convert_state(val);
3786
3787 rv = drbd_change_state(mdev, CS_VERBOSE, mask, val);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003788 drbd_send_sr_reply(mdev, rv);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003789
Philipp Reisnerb411b362009-09-25 16:07:19 -07003790 drbd_md_sync(mdev);
3791
Andreas Gruenbacher82bc0192011-03-17 12:10:19 +01003792 return 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003793}
3794
Andreas Gruenbachere2857212011-03-25 00:57:38 +01003795static int receive_req_conn_state(struct drbd_tconn *tconn, struct packet_info *pi)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003796{
Andreas Gruenbachere6589832011-03-30 12:54:42 +02003797 struct p_req_state *p = pi->data;
Philipp Reisnerdfafcc82011-03-16 10:55:07 +01003798 union drbd_state mask, val;
3799 enum drbd_state_rv rv;
3800
3801 mask.i = be32_to_cpu(p->mask);
3802 val.i = be32_to_cpu(p->val);
3803
Lars Ellenberg427c0432012-08-01 12:43:01 +02003804 if (test_bit(RESOLVE_CONFLICTS, &tconn->flags) &&
Philipp Reisnerdfafcc82011-03-16 10:55:07 +01003805 mutex_is_locked(&tconn->cstate_mutex)) {
3806 conn_send_sr_reply(tconn, SS_CONCURRENT_ST_CHG);
Andreas Gruenbacher82bc0192011-03-17 12:10:19 +01003807 return 0;
Philipp Reisnerdfafcc82011-03-16 10:55:07 +01003808 }
3809
3810 mask = convert_state(mask);
3811 val = convert_state(val);
3812
Philipp Reisner778bcf22011-03-28 12:55:03 +02003813 rv = conn_request_state(tconn, mask, val, CS_VERBOSE | CS_LOCAL_ONLY | CS_IGN_OUTD_FAIL);
Philipp Reisnerdfafcc82011-03-16 10:55:07 +01003814 conn_send_sr_reply(tconn, rv);
3815
Andreas Gruenbacher82bc0192011-03-17 12:10:19 +01003816 return 0;
Philipp Reisnerdfafcc82011-03-16 10:55:07 +01003817}
3818
Andreas Gruenbacher4a76b162011-03-25 02:43:51 +01003819static int receive_state(struct drbd_tconn *tconn, struct packet_info *pi)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003820{
Andreas Gruenbacher4a76b162011-03-25 02:43:51 +01003821 struct drbd_conf *mdev;
Andreas Gruenbachere6589832011-03-30 12:54:42 +02003822 struct p_state *p = pi->data;
Lars Ellenberg4ac4aad2010-07-22 17:39:26 +02003823 union drbd_state os, ns, peer_state;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003824 enum drbd_disk_state real_peer_disk;
Philipp Reisner65d922c2010-06-16 16:18:09 +02003825 enum chg_state_flags cs_flags;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003826 int rv;
3827
Andreas Gruenbacher4a76b162011-03-25 02:43:51 +01003828 mdev = vnr_to_mdev(tconn, pi->vnr);
3829 if (!mdev)
3830 return config_unknown_volume(tconn, pi);
3831
Philipp Reisnerb411b362009-09-25 16:07:19 -07003832 peer_state.i = be32_to_cpu(p->state);
3833
3834 real_peer_disk = peer_state.disk;
3835 if (peer_state.disk == D_NEGOTIATING) {
3836 real_peer_disk = mdev->p_uuid[UI_FLAGS] & 4 ? D_INCONSISTENT : D_CONSISTENT;
3837 dev_info(DEV, "real peer disk state = %s\n", drbd_disk_str(real_peer_disk));
3838 }
3839
Philipp Reisner87eeee42011-01-19 14:16:30 +01003840 spin_lock_irq(&mdev->tconn->req_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003841 retry:
Philipp Reisner78bae592011-03-28 15:40:12 +02003842 os = ns = drbd_read_state(mdev);
Philipp Reisner87eeee42011-01-19 14:16:30 +01003843 spin_unlock_irq(&mdev->tconn->req_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003844
Lars Ellenberg545752d2011-12-05 14:39:25 +01003845 /* If some other part of the code (asender thread, timeout)
3846 * already decided to close the connection again,
3847 * we must not "re-establish" it here. */
3848 if (os.conn <= C_TEAR_DOWN)
Lars Ellenberg58ffa582012-07-26 14:09:49 +02003849 return -ECONNRESET;
Lars Ellenberg545752d2011-12-05 14:39:25 +01003850
Lars Ellenberg40424e42011-09-26 15:24:56 +02003851 /* If this is the "end of sync" confirmation, usually the peer disk
3852 * transitions from D_INCONSISTENT to D_UP_TO_DATE. For empty (0 bits
3853 * set) resync started in PausedSyncT, or if the timing of pause-/
3854 * unpause-sync events has been "just right", the peer disk may
3855 * transition from D_CONSISTENT to D_UP_TO_DATE as well.
3856 */
3857 if ((os.pdsk == D_INCONSISTENT || os.pdsk == D_CONSISTENT) &&
3858 real_peer_disk == D_UP_TO_DATE &&
Lars Ellenberge9ef7bb2010-10-07 15:55:39 +02003859 os.conn > C_CONNECTED && os.disk == D_UP_TO_DATE) {
3860 /* If we are (becoming) SyncSource, but peer is still in sync
3861 * preparation, ignore its uptodate-ness to avoid flapping, it
3862 * will change to inconsistent once the peer reaches active
3863 * syncing states.
3864 * It may have changed syncer-paused flags, however, so we
3865 * cannot ignore this completely. */
3866 if (peer_state.conn > C_CONNECTED &&
3867 peer_state.conn < C_SYNC_SOURCE)
3868 real_peer_disk = D_INCONSISTENT;
3869
3870 /* if peer_state changes to connected at the same time,
3871 * it explicitly notifies us that it finished resync.
3872 * Maybe we should finish it up, too? */
3873 else if (os.conn >= C_SYNC_SOURCE &&
3874 peer_state.conn == C_CONNECTED) {
3875 if (drbd_bm_total_weight(mdev) <= mdev->rs_failed)
3876 drbd_resync_finished(mdev);
Andreas Gruenbacher82bc0192011-03-17 12:10:19 +01003877 return 0;
Lars Ellenberge9ef7bb2010-10-07 15:55:39 +02003878 }
3879 }
3880
Lars Ellenberg02b91b52012-06-28 18:26:52 +02003881 /* explicit verify finished notification, stop sector reached. */
3882 if (os.conn == C_VERIFY_T && os.disk == D_UP_TO_DATE &&
3883 peer_state.conn == C_CONNECTED && real_peer_disk == D_UP_TO_DATE) {
Lars Ellenberg58ffa582012-07-26 14:09:49 +02003884 ov_out_of_sync_print(mdev);
Lars Ellenberg02b91b52012-06-28 18:26:52 +02003885 drbd_resync_finished(mdev);
Lars Ellenberg58ffa582012-07-26 14:09:49 +02003886 return 0;
Lars Ellenberg02b91b52012-06-28 18:26:52 +02003887 }
3888
Lars Ellenberge9ef7bb2010-10-07 15:55:39 +02003889 /* peer says his disk is inconsistent, while we think it is uptodate,
3890 * and this happens while the peer still thinks we have a sync going on,
3891 * but we think we are already done with the sync.
3892 * We ignore this to avoid flapping pdsk.
3893 * This should not happen, if the peer is a recent version of drbd. */
3894 if (os.pdsk == D_UP_TO_DATE && real_peer_disk == D_INCONSISTENT &&
3895 os.conn == C_CONNECTED && peer_state.conn > C_SYNC_SOURCE)
3896 real_peer_disk = D_UP_TO_DATE;
3897
Lars Ellenberg4ac4aad2010-07-22 17:39:26 +02003898 if (ns.conn == C_WF_REPORT_PARAMS)
3899 ns.conn = C_CONNECTED;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003900
Philipp Reisner67531712010-10-27 12:21:30 +02003901 if (peer_state.conn == C_AHEAD)
3902 ns.conn = C_BEHIND;
3903
Philipp Reisnerb411b362009-09-25 16:07:19 -07003904 if (mdev->p_uuid && peer_state.disk >= D_NEGOTIATING &&
3905 get_ldev_if_state(mdev, D_NEGOTIATING)) {
3906 int cr; /* consider resync */
3907
3908 /* if we established a new connection */
Lars Ellenberg4ac4aad2010-07-22 17:39:26 +02003909 cr = (os.conn < C_CONNECTED);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003910 /* if we had an established connection
3911 * and one of the nodes newly attaches a disk */
Lars Ellenberg4ac4aad2010-07-22 17:39:26 +02003912 cr |= (os.conn == C_CONNECTED &&
Philipp Reisnerb411b362009-09-25 16:07:19 -07003913 (peer_state.disk == D_NEGOTIATING ||
Lars Ellenberg4ac4aad2010-07-22 17:39:26 +02003914 os.disk == D_NEGOTIATING));
Philipp Reisnerb411b362009-09-25 16:07:19 -07003915 /* if we have both been inconsistent, and the peer has been
3916 * forced to be UpToDate with --overwrite-data */
3917 cr |= test_bit(CONSIDER_RESYNC, &mdev->flags);
3918 /* if we had been plain connected, and the admin requested to
3919 * start a sync by "invalidate" or "invalidate-remote" */
Lars Ellenberg4ac4aad2010-07-22 17:39:26 +02003920 cr |= (os.conn == C_CONNECTED &&
Philipp Reisnerb411b362009-09-25 16:07:19 -07003921 (peer_state.conn >= C_STARTING_SYNC_S &&
3922 peer_state.conn <= C_WF_BITMAP_T));
3923
3924 if (cr)
Lars Ellenberg4ac4aad2010-07-22 17:39:26 +02003925 ns.conn = drbd_sync_handshake(mdev, peer_state.role, real_peer_disk);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003926
3927 put_ldev(mdev);
Lars Ellenberg4ac4aad2010-07-22 17:39:26 +02003928 if (ns.conn == C_MASK) {
3929 ns.conn = C_CONNECTED;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003930 if (mdev->state.disk == D_NEGOTIATING) {
Lars Ellenberg82f59cc2010-10-16 12:13:47 +02003931 drbd_force_state(mdev, NS(disk, D_FAILED));
Philipp Reisnerb411b362009-09-25 16:07:19 -07003932 } else if (peer_state.disk == D_NEGOTIATING) {
3933 dev_err(DEV, "Disk attach process on the peer node was aborted.\n");
3934 peer_state.disk = D_DISKLESS;
Lars Ellenberg580b9762010-02-26 23:15:23 +01003935 real_peer_disk = D_DISKLESS;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003936 } else {
Philipp Reisner8169e412011-03-15 18:40:27 +01003937 if (test_and_clear_bit(CONN_DRY_RUN, &mdev->tconn->flags))
Andreas Gruenbacher82bc0192011-03-17 12:10:19 +01003938 return -EIO;
Lars Ellenberg4ac4aad2010-07-22 17:39:26 +02003939 D_ASSERT(os.conn == C_WF_REPORT_PARAMS);
Philipp Reisner38fa9982011-03-15 18:24:49 +01003940 conn_request_state(mdev->tconn, NS(conn, C_DISCONNECTING), CS_HARD);
Andreas Gruenbacher82bc0192011-03-17 12:10:19 +01003941 return -EIO;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003942 }
3943 }
3944 }
3945
Philipp Reisner87eeee42011-01-19 14:16:30 +01003946 spin_lock_irq(&mdev->tconn->req_lock);
Philipp Reisner78bae592011-03-28 15:40:12 +02003947 if (os.i != drbd_read_state(mdev).i)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003948 goto retry;
3949 clear_bit(CONSIDER_RESYNC, &mdev->flags);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003950 ns.peer = peer_state.role;
3951 ns.pdsk = real_peer_disk;
3952 ns.peer_isp = (peer_state.aftr_isp | peer_state.user_isp);
Lars Ellenberg4ac4aad2010-07-22 17:39:26 +02003953 if ((ns.conn == C_CONNECTED || ns.conn == C_WF_BITMAP_S) && ns.disk == D_NEGOTIATING)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003954 ns.disk = mdev->new_state_tmp.disk;
Lars Ellenberg4ac4aad2010-07-22 17:39:26 +02003955 cs_flags = CS_VERBOSE + (os.conn < C_CONNECTED && ns.conn >= C_CONNECTED ? 0 : CS_HARD);
Philipp Reisner2aebfab2011-03-28 16:48:11 +02003956 if (ns.pdsk == D_CONSISTENT && drbd_suspended(mdev) && ns.conn == C_CONNECTED && os.conn < C_CONNECTED &&
Philipp Reisner481c6f52010-06-22 14:03:27 +02003957 test_bit(NEW_CUR_UUID, &mdev->flags)) {
Andreas Gruenbacher8554df12011-01-25 15:37:43 +01003958 /* Do not allow tl_restart(RESEND) for a rebooted peer. We can only allow this
Philipp Reisner481c6f52010-06-22 14:03:27 +02003959 for temporal network outages! */
Philipp Reisner87eeee42011-01-19 14:16:30 +01003960 spin_unlock_irq(&mdev->tconn->req_lock);
Philipp Reisner481c6f52010-06-22 14:03:27 +02003961 dev_err(DEV, "Aborting Connect, can not thaw IO with an only Consistent peer\n");
Philipp Reisner2f5cdd02011-02-21 14:29:27 +01003962 tl_clear(mdev->tconn);
Philipp Reisner481c6f52010-06-22 14:03:27 +02003963 drbd_uuid_new_current(mdev);
3964 clear_bit(NEW_CUR_UUID, &mdev->flags);
Philipp Reisner38fa9982011-03-15 18:24:49 +01003965 conn_request_state(mdev->tconn, NS2(conn, C_PROTOCOL_ERROR, susp, 0), CS_HARD);
Andreas Gruenbacher82bc0192011-03-17 12:10:19 +01003966 return -EIO;
Philipp Reisner481c6f52010-06-22 14:03:27 +02003967 }
Philipp Reisner65d922c2010-06-16 16:18:09 +02003968 rv = _drbd_set_state(mdev, ns, cs_flags, NULL);
Philipp Reisner78bae592011-03-28 15:40:12 +02003969 ns = drbd_read_state(mdev);
Philipp Reisner87eeee42011-01-19 14:16:30 +01003970 spin_unlock_irq(&mdev->tconn->req_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003971
3972 if (rv < SS_SUCCESS) {
Philipp Reisner38fa9982011-03-15 18:24:49 +01003973 conn_request_state(mdev->tconn, NS(conn, C_DISCONNECTING), CS_HARD);
Andreas Gruenbacher82bc0192011-03-17 12:10:19 +01003974 return -EIO;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003975 }
3976
Lars Ellenberg4ac4aad2010-07-22 17:39:26 +02003977 if (os.conn > C_WF_REPORT_PARAMS) {
3978 if (ns.conn > C_CONNECTED && peer_state.conn <= C_CONNECTED &&
Philipp Reisnerb411b362009-09-25 16:07:19 -07003979 peer_state.disk != D_NEGOTIATING ) {
3980 /* we want resync, peer has not yet decided to sync... */
3981 /* Nowadays only used when forcing a node into primary role and
3982 setting its disk to UpToDate with that */
3983 drbd_send_uuids(mdev);
Lars Ellenbergf479ea02011-10-27 16:52:30 +02003984 drbd_send_current_state(mdev);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003985 }
3986 }
3987
Philipp Reisner08b165b2011-09-05 16:22:33 +02003988 clear_bit(DISCARD_MY_DATA, &mdev->flags);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003989
Lars Ellenbergcccac982013-03-19 18:16:46 +01003990 drbd_md_sync(mdev); /* update connected indicator, la_size_sect, ... */
Philipp Reisnerb411b362009-09-25 16:07:19 -07003991
Andreas Gruenbacher82bc0192011-03-17 12:10:19 +01003992 return 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003993}
3994
Andreas Gruenbacher4a76b162011-03-25 02:43:51 +01003995static int receive_sync_uuid(struct drbd_tconn *tconn, struct packet_info *pi)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003996{
Andreas Gruenbacher4a76b162011-03-25 02:43:51 +01003997 struct drbd_conf *mdev;
Andreas Gruenbachere6589832011-03-30 12:54:42 +02003998 struct p_rs_uuid *p = pi->data;
Andreas Gruenbacher4a76b162011-03-25 02:43:51 +01003999
4000 mdev = vnr_to_mdev(tconn, pi->vnr);
4001 if (!mdev)
4002 return -EIO;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004003
4004 wait_event(mdev->misc_wait,
4005 mdev->state.conn == C_WF_SYNC_UUID ||
Philipp Reisnerc4752ef2010-10-27 17:32:36 +02004006 mdev->state.conn == C_BEHIND ||
Philipp Reisnerb411b362009-09-25 16:07:19 -07004007 mdev->state.conn < C_CONNECTED ||
4008 mdev->state.disk < D_NEGOTIATING);
4009
4010 /* D_ASSERT( mdev->state.conn == C_WF_SYNC_UUID ); */
4011
Philipp Reisnerb411b362009-09-25 16:07:19 -07004012 /* Here the _drbd_uuid_ functions are right, current should
4013 _not_ be rotated into the history */
4014 if (get_ldev_if_state(mdev, D_NEGOTIATING)) {
4015 _drbd_uuid_set(mdev, UI_CURRENT, be64_to_cpu(p->uuid));
4016 _drbd_uuid_set(mdev, UI_BITMAP, 0UL);
4017
Lars Ellenberg62b0da32011-01-20 13:25:21 +01004018 drbd_print_uuids(mdev, "updated sync uuid");
Philipp Reisnerb411b362009-09-25 16:07:19 -07004019 drbd_start_resync(mdev, C_SYNC_TARGET);
4020
4021 put_ldev(mdev);
4022 } else
4023 dev_err(DEV, "Ignoring SyncUUID packet!\n");
4024
Andreas Gruenbacher82bc0192011-03-17 12:10:19 +01004025 return 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004026}
4027
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01004028/**
4029 * receive_bitmap_plain
4030 *
4031 * Return 0 when done, 1 when another iteration is needed, and a negative error
4032 * code upon failure.
4033 */
4034static int
Andreas Gruenbacher50d0b1a2011-03-30 11:53:51 +02004035receive_bitmap_plain(struct drbd_conf *mdev, unsigned int size,
Andreas Gruenbachere6589832011-03-30 12:54:42 +02004036 unsigned long *p, struct bm_xfer_ctx *c)
Philipp Reisnerb411b362009-09-25 16:07:19 -07004037{
Andreas Gruenbacher50d0b1a2011-03-30 11:53:51 +02004038 unsigned int data_size = DRBD_SOCKET_BUFFER_SIZE -
4039 drbd_header_size(mdev->tconn);
Andreas Gruenbachere6589832011-03-30 12:54:42 +02004040 unsigned int num_words = min_t(size_t, data_size / sizeof(*p),
Andreas Gruenbacher50d0b1a2011-03-30 11:53:51 +02004041 c->bm_words - c->word_offset);
Andreas Gruenbachere6589832011-03-30 12:54:42 +02004042 unsigned int want = num_words * sizeof(*p);
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01004043 int err;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004044
Andreas Gruenbacher50d0b1a2011-03-30 11:53:51 +02004045 if (want != size) {
4046 dev_err(DEV, "%s:want (%u) != size (%u)\n", __func__, want, size);
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01004047 return -EIO;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004048 }
4049 if (want == 0)
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01004050 return 0;
Andreas Gruenbachere6589832011-03-30 12:54:42 +02004051 err = drbd_recv_all(mdev->tconn, p, want);
Andreas Gruenbacher82bc0192011-03-17 12:10:19 +01004052 if (err)
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01004053 return err;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004054
Andreas Gruenbachere6589832011-03-30 12:54:42 +02004055 drbd_bm_merge_lel(mdev, c->word_offset, num_words, p);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004056
4057 c->word_offset += num_words;
4058 c->bit_offset = c->word_offset * BITS_PER_LONG;
4059 if (c->bit_offset > c->bm_bits)
4060 c->bit_offset = c->bm_bits;
4061
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01004062 return 1;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004063}
4064
Andreas Gruenbachera02d1242011-03-22 17:20:45 +01004065static enum drbd_bitmap_code dcbp_get_code(struct p_compressed_bm *p)
4066{
4067 return (enum drbd_bitmap_code)(p->encoding & 0x0f);
4068}
4069
4070static int dcbp_get_start(struct p_compressed_bm *p)
4071{
4072 return (p->encoding & 0x80) != 0;
4073}
4074
4075static int dcbp_get_pad_bits(struct p_compressed_bm *p)
4076{
4077 return (p->encoding >> 4) & 0x7;
4078}
4079
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01004080/**
4081 * recv_bm_rle_bits
4082 *
4083 * Return 0 when done, 1 when another iteration is needed, and a negative error
4084 * code upon failure.
4085 */
4086static int
Philipp Reisnerb411b362009-09-25 16:07:19 -07004087recv_bm_rle_bits(struct drbd_conf *mdev,
4088 struct p_compressed_bm *p,
Philipp Reisnerc6d25cf2011-01-19 16:13:06 +01004089 struct bm_xfer_ctx *c,
4090 unsigned int len)
Philipp Reisnerb411b362009-09-25 16:07:19 -07004091{
4092 struct bitstream bs;
4093 u64 look_ahead;
4094 u64 rl;
4095 u64 tmp;
4096 unsigned long s = c->bit_offset;
4097 unsigned long e;
Andreas Gruenbachera02d1242011-03-22 17:20:45 +01004098 int toggle = dcbp_get_start(p);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004099 int have;
4100 int bits;
4101
Andreas Gruenbachera02d1242011-03-22 17:20:45 +01004102 bitstream_init(&bs, p->code, len, dcbp_get_pad_bits(p));
Philipp Reisnerb411b362009-09-25 16:07:19 -07004103
4104 bits = bitstream_get_bits(&bs, &look_ahead, 64);
4105 if (bits < 0)
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01004106 return -EIO;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004107
4108 for (have = bits; have > 0; s += rl, toggle = !toggle) {
4109 bits = vli_decode_bits(&rl, look_ahead);
4110 if (bits <= 0)
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01004111 return -EIO;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004112
4113 if (toggle) {
4114 e = s + rl -1;
4115 if (e >= c->bm_bits) {
4116 dev_err(DEV, "bitmap overflow (e:%lu) while decoding bm RLE packet\n", e);
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01004117 return -EIO;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004118 }
4119 _drbd_bm_set_bits(mdev, s, e);
4120 }
4121
4122 if (have < bits) {
4123 dev_err(DEV, "bitmap decoding error: h:%d b:%d la:0x%08llx l:%u/%u\n",
4124 have, bits, look_ahead,
4125 (unsigned int)(bs.cur.b - p->code),
4126 (unsigned int)bs.buf_len);
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01004127 return -EIO;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004128 }
Lars Ellenbergd2da5b02013-10-23 10:59:18 +02004129 /* if we consumed all 64 bits, assign 0; >> 64 is "undefined"; */
4130 if (likely(bits < 64))
4131 look_ahead >>= bits;
4132 else
4133 look_ahead = 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004134 have -= bits;
4135
4136 bits = bitstream_get_bits(&bs, &tmp, 64 - have);
4137 if (bits < 0)
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01004138 return -EIO;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004139 look_ahead |= tmp << have;
4140 have += bits;
4141 }
4142
4143 c->bit_offset = s;
4144 bm_xfer_ctx_bit_to_word_offset(c);
4145
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01004146 return (s != c->bm_bits);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004147}
4148
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01004149/**
4150 * decode_bitmap_c
4151 *
4152 * Return 0 when done, 1 when another iteration is needed, and a negative error
4153 * code upon failure.
4154 */
4155static int
Philipp Reisnerb411b362009-09-25 16:07:19 -07004156decode_bitmap_c(struct drbd_conf *mdev,
4157 struct p_compressed_bm *p,
Philipp Reisnerc6d25cf2011-01-19 16:13:06 +01004158 struct bm_xfer_ctx *c,
4159 unsigned int len)
Philipp Reisnerb411b362009-09-25 16:07:19 -07004160{
Andreas Gruenbachera02d1242011-03-22 17:20:45 +01004161 if (dcbp_get_code(p) == RLE_VLI_Bits)
Andreas Gruenbachere6589832011-03-30 12:54:42 +02004162 return recv_bm_rle_bits(mdev, p, c, len - sizeof(*p));
Philipp Reisnerb411b362009-09-25 16:07:19 -07004163
4164 /* other variants had been implemented for evaluation,
4165 * but have been dropped as this one turned out to be "best"
4166 * during all our tests. */
4167
4168 dev_err(DEV, "receive_bitmap_c: unknown encoding %u\n", p->encoding);
Philipp Reisner38fa9982011-03-15 18:24:49 +01004169 conn_request_state(mdev->tconn, NS(conn, C_PROTOCOL_ERROR), CS_HARD);
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01004170 return -EIO;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004171}
4172
4173void INFO_bm_xfer_stats(struct drbd_conf *mdev,
4174 const char *direction, struct bm_xfer_ctx *c)
4175{
4176 /* what would it take to transfer it "plaintext" */
Andreas Gruenbacher50d0b1a2011-03-30 11:53:51 +02004177 unsigned int header_size = drbd_header_size(mdev->tconn);
4178 unsigned int data_size = DRBD_SOCKET_BUFFER_SIZE - header_size;
4179 unsigned int plain =
4180 header_size * (DIV_ROUND_UP(c->bm_words, data_size) + 1) +
4181 c->bm_words * sizeof(unsigned long);
4182 unsigned int total = c->bytes[0] + c->bytes[1];
4183 unsigned int r;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004184
4185 /* total can not be zero. but just in case: */
4186 if (total == 0)
4187 return;
4188
4189 /* don't report if not compressed */
4190 if (total >= plain)
4191 return;
4192
4193 /* total < plain. check for overflow, still */
4194 r = (total > UINT_MAX/1000) ? (total / (plain/1000))
4195 : (1000 * total / plain);
4196
4197 if (r > 1000)
4198 r = 1000;
4199
4200 r = 1000 - r;
4201 dev_info(DEV, "%s bitmap stats [Bytes(packets)]: plain %u(%u), RLE %u(%u), "
4202 "total %u; compression: %u.%u%%\n",
4203 direction,
4204 c->bytes[1], c->packets[1],
4205 c->bytes[0], c->packets[0],
4206 total, r/10, r % 10);
4207}
4208
4209/* Since we are processing the bitfield from lower addresses to higher,
4210 it does not matter if the process it in 32 bit chunks or 64 bit
4211 chunks as long as it is little endian. (Understand it as byte stream,
4212 beginning with the lowest byte...) If we would use big endian
4213 we would need to process it from the highest address to the lowest,
4214 in order to be agnostic to the 32 vs 64 bits issue.
4215
4216 returns 0 on failure, 1 if we successfully received it. */
Andreas Gruenbacher4a76b162011-03-25 02:43:51 +01004217static int receive_bitmap(struct drbd_tconn *tconn, struct packet_info *pi)
Philipp Reisnerb411b362009-09-25 16:07:19 -07004218{
Andreas Gruenbacher4a76b162011-03-25 02:43:51 +01004219 struct drbd_conf *mdev;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004220 struct bm_xfer_ctx c;
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01004221 int err;
Andreas Gruenbacher4a76b162011-03-25 02:43:51 +01004222
4223 mdev = vnr_to_mdev(tconn, pi->vnr);
4224 if (!mdev)
4225 return -EIO;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004226
Lars Ellenberg20ceb2b2011-01-21 10:56:44 +01004227 drbd_bm_lock(mdev, "receive bitmap", BM_LOCKED_SET_ALLOWED);
4228 /* you are supposed to send additional out-of-sync information
4229 * if you actually set bits during this phase */
Philipp Reisnerb411b362009-09-25 16:07:19 -07004230
Philipp Reisnerb411b362009-09-25 16:07:19 -07004231 c = (struct bm_xfer_ctx) {
4232 .bm_bits = drbd_bm_bits(mdev),
4233 .bm_words = drbd_bm_words(mdev),
4234 };
4235
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01004236 for(;;) {
Andreas Gruenbachere6589832011-03-30 12:54:42 +02004237 if (pi->cmd == P_BITMAP)
4238 err = receive_bitmap_plain(mdev, pi->size, pi->data, &c);
4239 else if (pi->cmd == P_COMPRESSED_BITMAP) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07004240 /* MAYBE: sanity check that we speak proto >= 90,
4241 * and the feature is enabled! */
Andreas Gruenbachere6589832011-03-30 12:54:42 +02004242 struct p_compressed_bm *p = pi->data;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004243
Andreas Gruenbacher50d0b1a2011-03-30 11:53:51 +02004244 if (pi->size > DRBD_SOCKET_BUFFER_SIZE - drbd_header_size(tconn)) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07004245 dev_err(DEV, "ReportCBitmap packet too large\n");
Andreas Gruenbacher82bc0192011-03-17 12:10:19 +01004246 err = -EIO;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004247 goto out;
4248 }
Andreas Gruenbachere6589832011-03-30 12:54:42 +02004249 if (pi->size <= sizeof(*p)) {
Andreas Gruenbachere2857212011-03-25 00:57:38 +01004250 dev_err(DEV, "ReportCBitmap packet too small (l:%u)\n", pi->size);
Andreas Gruenbacher82bc0192011-03-17 12:10:19 +01004251 err = -EIO;
Andreas Gruenbacher78fcbda2010-12-10 22:18:27 +01004252 goto out;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004253 }
Andreas Gruenbachere6589832011-03-30 12:54:42 +02004254 err = drbd_recv_all(mdev->tconn, p, pi->size);
4255 if (err)
4256 goto out;
Andreas Gruenbachere2857212011-03-25 00:57:38 +01004257 err = decode_bitmap_c(mdev, p, &c, pi->size);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004258 } else {
Andreas Gruenbachere2857212011-03-25 00:57:38 +01004259 dev_warn(DEV, "receive_bitmap: cmd neither ReportBitMap nor ReportCBitMap (is 0x%x)", pi->cmd);
Andreas Gruenbacher82bc0192011-03-17 12:10:19 +01004260 err = -EIO;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004261 goto out;
4262 }
4263
Andreas Gruenbachere2857212011-03-25 00:57:38 +01004264 c.packets[pi->cmd == P_BITMAP]++;
Andreas Gruenbacher50d0b1a2011-03-30 11:53:51 +02004265 c.bytes[pi->cmd == P_BITMAP] += drbd_header_size(tconn) + pi->size;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004266
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01004267 if (err <= 0) {
4268 if (err < 0)
4269 goto out;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004270 break;
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01004271 }
Andreas Gruenbachere2857212011-03-25 00:57:38 +01004272 err = drbd_recv_header(mdev->tconn, pi);
Andreas Gruenbacher82bc0192011-03-17 12:10:19 +01004273 if (err)
Philipp Reisnerb411b362009-09-25 16:07:19 -07004274 goto out;
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01004275 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07004276
4277 INFO_bm_xfer_stats(mdev, "receive", &c);
4278
4279 if (mdev->state.conn == C_WF_BITMAP_T) {
Andreas Gruenbacherde1f8e42010-12-10 21:04:00 +01004280 enum drbd_state_rv rv;
4281
Andreas Gruenbacher82bc0192011-03-17 12:10:19 +01004282 err = drbd_send_bitmap(mdev);
4283 if (err)
Philipp Reisnerb411b362009-09-25 16:07:19 -07004284 goto out;
4285 /* Omit CS_ORDERED with this state transition to avoid deadlocks. */
Andreas Gruenbacherde1f8e42010-12-10 21:04:00 +01004286 rv = _drbd_request_state(mdev, NS(conn, C_WF_SYNC_UUID), CS_VERBOSE);
4287 D_ASSERT(rv == SS_SUCCESS);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004288 } else if (mdev->state.conn != C_WF_BITMAP_S) {
4289 /* admin may have requested C_DISCONNECTING,
4290 * other threads may have noticed network errors */
4291 dev_info(DEV, "unexpected cstate (%s) in receive_bitmap\n",
4292 drbd_conn_str(mdev->state.conn));
4293 }
Andreas Gruenbacher82bc0192011-03-17 12:10:19 +01004294 err = 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004295
Philipp Reisnerb411b362009-09-25 16:07:19 -07004296 out:
Lars Ellenberg20ceb2b2011-01-21 10:56:44 +01004297 drbd_bm_unlock(mdev);
Andreas Gruenbacher82bc0192011-03-17 12:10:19 +01004298 if (!err && mdev->state.conn == C_WF_BITMAP_S)
Philipp Reisnerb411b362009-09-25 16:07:19 -07004299 drbd_start_resync(mdev, C_SYNC_SOURCE);
Andreas Gruenbacher82bc0192011-03-17 12:10:19 +01004300 return err;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004301}
4302
Andreas Gruenbacher4a76b162011-03-25 02:43:51 +01004303static int receive_skip(struct drbd_tconn *tconn, struct packet_info *pi)
Philipp Reisnerb411b362009-09-25 16:07:19 -07004304{
Andreas Gruenbacher4a76b162011-03-25 02:43:51 +01004305 conn_warn(tconn, "skipping unknown optional packet type %d, l: %d!\n",
Andreas Gruenbachere2857212011-03-25 00:57:38 +01004306 pi->cmd, pi->size);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004307
Andreas Gruenbacher4a76b162011-03-25 02:43:51 +01004308 return ignore_remaining_packet(tconn, pi);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004309}
4310
Andreas Gruenbacher4a76b162011-03-25 02:43:51 +01004311static int receive_UnplugRemote(struct drbd_tconn *tconn, struct packet_info *pi)
Philipp Reisnerb411b362009-09-25 16:07:19 -07004312{
Philipp Reisnerb411b362009-09-25 16:07:19 -07004313 /* Make sure we've acked all the TCP data associated
4314 * with the data requests being unplugged */
Andreas Gruenbacher4a76b162011-03-25 02:43:51 +01004315 drbd_tcp_quickack(tconn->data.socket);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004316
Andreas Gruenbacher82bc0192011-03-17 12:10:19 +01004317 return 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004318}
4319
Andreas Gruenbacher4a76b162011-03-25 02:43:51 +01004320static int receive_out_of_sync(struct drbd_tconn *tconn, struct packet_info *pi)
Philipp Reisner73a01a12010-10-27 14:33:00 +02004321{
Andreas Gruenbacher4a76b162011-03-25 02:43:51 +01004322 struct drbd_conf *mdev;
Andreas Gruenbachere6589832011-03-30 12:54:42 +02004323 struct p_block_desc *p = pi->data;
Andreas Gruenbacher4a76b162011-03-25 02:43:51 +01004324
4325 mdev = vnr_to_mdev(tconn, pi->vnr);
4326 if (!mdev)
4327 return -EIO;
Philipp Reisner73a01a12010-10-27 14:33:00 +02004328
Lars Ellenbergf735e3632010-12-17 21:06:18 +01004329 switch (mdev->state.conn) {
4330 case C_WF_SYNC_UUID:
4331 case C_WF_BITMAP_T:
4332 case C_BEHIND:
4333 break;
4334 default:
4335 dev_err(DEV, "ASSERT FAILED cstate = %s, expected: WFSyncUUID|WFBitMapT|Behind\n",
4336 drbd_conn_str(mdev->state.conn));
4337 }
4338
Philipp Reisner73a01a12010-10-27 14:33:00 +02004339 drbd_set_out_of_sync(mdev, be64_to_cpu(p->sector), be32_to_cpu(p->blksize));
4340
Andreas Gruenbacher82bc0192011-03-17 12:10:19 +01004341 return 0;
Philipp Reisner73a01a12010-10-27 14:33:00 +02004342}
4343
Philipp Reisner02918be2010-08-20 14:35:10 +02004344struct data_cmd {
4345 int expect_payload;
4346 size_t pkt_size;
Andreas Gruenbacher4a76b162011-03-25 02:43:51 +01004347 int (*fn)(struct drbd_tconn *, struct packet_info *);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004348};
4349
Philipp Reisner02918be2010-08-20 14:35:10 +02004350static struct data_cmd drbd_cmd_handler[] = {
4351 [P_DATA] = { 1, sizeof(struct p_data), receive_Data },
4352 [P_DATA_REPLY] = { 1, sizeof(struct p_data), receive_DataReply },
4353 [P_RS_DATA_REPLY] = { 1, sizeof(struct p_data), receive_RSDataReply } ,
4354 [P_BARRIER] = { 0, sizeof(struct p_barrier), receive_Barrier } ,
Andreas Gruenbachere6589832011-03-30 12:54:42 +02004355 [P_BITMAP] = { 1, 0, receive_bitmap } ,
4356 [P_COMPRESSED_BITMAP] = { 1, 0, receive_bitmap } ,
4357 [P_UNPLUG_REMOTE] = { 0, 0, receive_UnplugRemote },
Philipp Reisner02918be2010-08-20 14:35:10 +02004358 [P_DATA_REQUEST] = { 0, sizeof(struct p_block_req), receive_DataRequest },
4359 [P_RS_DATA_REQUEST] = { 0, sizeof(struct p_block_req), receive_DataRequest },
Andreas Gruenbachere6589832011-03-30 12:54:42 +02004360 [P_SYNC_PARAM] = { 1, 0, receive_SyncParam },
4361 [P_SYNC_PARAM89] = { 1, 0, receive_SyncParam },
Philipp Reisner02918be2010-08-20 14:35:10 +02004362 [P_PROTOCOL] = { 1, sizeof(struct p_protocol), receive_protocol },
4363 [P_UUIDS] = { 0, sizeof(struct p_uuids), receive_uuids },
4364 [P_SIZES] = { 0, sizeof(struct p_sizes), receive_sizes },
4365 [P_STATE] = { 0, sizeof(struct p_state), receive_state },
4366 [P_STATE_CHG_REQ] = { 0, sizeof(struct p_req_state), receive_req_state },
4367 [P_SYNC_UUID] = { 0, sizeof(struct p_rs_uuid), receive_sync_uuid },
4368 [P_OV_REQUEST] = { 0, sizeof(struct p_block_req), receive_DataRequest },
4369 [P_OV_REPLY] = { 1, sizeof(struct p_block_req), receive_DataRequest },
4370 [P_CSUM_RS_REQUEST] = { 1, sizeof(struct p_block_req), receive_DataRequest },
4371 [P_DELAY_PROBE] = { 0, sizeof(struct p_delay_probe93), receive_skip },
Philipp Reisner73a01a12010-10-27 14:33:00 +02004372 [P_OUT_OF_SYNC] = { 0, sizeof(struct p_block_desc), receive_out_of_sync },
Andreas Gruenbacher4a76b162011-03-25 02:43:51 +01004373 [P_CONN_ST_CHG_REQ] = { 0, sizeof(struct p_req_state), receive_req_conn_state },
Philipp Reisner036b17e2011-05-16 17:38:11 +02004374 [P_PROTOCOL_UPDATE] = { 1, sizeof(struct p_protocol), receive_protocol },
Philipp Reisner02918be2010-08-20 14:35:10 +02004375};
4376
Philipp Reisnereefc2f72011-02-08 12:55:24 +01004377static void drbdd(struct drbd_tconn *tconn)
Philipp Reisnerb411b362009-09-25 16:07:19 -07004378{
Philipp Reisner77351055b2011-02-07 17:24:26 +01004379 struct packet_info pi;
Philipp Reisner02918be2010-08-20 14:35:10 +02004380 size_t shs; /* sub header size */
Andreas Gruenbacher82bc0192011-03-17 12:10:19 +01004381 int err;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004382
Philipp Reisnereefc2f72011-02-08 12:55:24 +01004383 while (get_t_state(&tconn->receiver) == RUNNING) {
Andreas Gruenbacherdeebe192011-03-25 00:01:04 +01004384 struct data_cmd *cmd;
4385
Philipp Reisnereefc2f72011-02-08 12:55:24 +01004386 drbd_thread_current_set_cpu(&tconn->receiver);
Andreas Gruenbacher69bc7bc2011-03-16 17:31:52 +01004387 if (drbd_recv_header(tconn, &pi))
Philipp Reisner02918be2010-08-20 14:35:10 +02004388 goto err_out;
4389
Andreas Gruenbacherdeebe192011-03-25 00:01:04 +01004390 cmd = &drbd_cmd_handler[pi.cmd];
Andreas Gruenbacher4a76b162011-03-25 02:43:51 +01004391 if (unlikely(pi.cmd >= ARRAY_SIZE(drbd_cmd_handler) || !cmd->fn)) {
Andreas Gruenbacher2fcb8f32011-07-03 11:41:08 +02004392 conn_err(tconn, "Unexpected data packet %s (0x%04x)",
4393 cmdname(pi.cmd), pi.cmd);
Philipp Reisner02918be2010-08-20 14:35:10 +02004394 goto err_out;
Lars Ellenberg0b33a912009-11-16 15:58:04 +01004395 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07004396
Andreas Gruenbachere6589832011-03-30 12:54:42 +02004397 shs = cmd->pkt_size;
4398 if (pi.size > shs && !cmd->expect_payload) {
Andreas Gruenbacher2fcb8f32011-07-03 11:41:08 +02004399 conn_err(tconn, "No payload expected %s l:%d\n",
4400 cmdname(pi.cmd), pi.size);
Philipp Reisner02918be2010-08-20 14:35:10 +02004401 goto err_out;
4402 }
4403
Lars Ellenbergc13f7e12010-10-29 23:32:01 +02004404 if (shs) {
Andreas Gruenbachere6589832011-03-30 12:54:42 +02004405 err = drbd_recv_all_warn(tconn, pi.data, shs);
Andreas Gruenbachera5c31902011-03-24 03:28:04 +01004406 if (err)
Lars Ellenbergc13f7e12010-10-29 23:32:01 +02004407 goto err_out;
Andreas Gruenbachere2857212011-03-25 00:57:38 +01004408 pi.size -= shs;
Lars Ellenbergc13f7e12010-10-29 23:32:01 +02004409 }
4410
Andreas Gruenbacher4a76b162011-03-25 02:43:51 +01004411 err = cmd->fn(tconn, &pi);
4412 if (err) {
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02004413 conn_err(tconn, "error receiving %s, e: %d l: %d!\n",
4414 cmdname(pi.cmd), err, pi.size);
Philipp Reisner02918be2010-08-20 14:35:10 +02004415 goto err_out;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004416 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07004417 }
Andreas Gruenbacher82bc0192011-03-17 12:10:19 +01004418 return;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004419
Andreas Gruenbacher82bc0192011-03-17 12:10:19 +01004420 err_out:
4421 conn_request_state(tconn, NS(conn, C_PROTOCOL_ERROR), CS_HARD);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004422}
4423
Philipp Reisner0e29d162011-02-18 14:23:11 +01004424void conn_flush_workqueue(struct drbd_tconn *tconn)
Philipp Reisnerb411b362009-09-25 16:07:19 -07004425{
4426 struct drbd_wq_barrier barr;
4427
4428 barr.w.cb = w_prev_work_done;
Philipp Reisner0e29d162011-02-18 14:23:11 +01004429 barr.w.tconn = tconn;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004430 init_completion(&barr.done);
Lars Ellenbergd5b27b02011-11-14 15:42:37 +01004431 drbd_queue_work(&tconn->sender_work, &barr.w);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004432 wait_for_completion(&barr.done);
4433}
4434
Philipp Reisner81fa2e62011-05-04 15:10:30 +02004435static void conn_disconnect(struct drbd_tconn *tconn)
Philipp Reisnerf70b35112010-06-24 14:34:40 +02004436{
Philipp Reisnerc141ebd2011-05-05 16:13:10 +02004437 struct drbd_conf *mdev;
Philipp Reisnerbbeb6412011-02-10 13:45:46 +01004438 enum drbd_conns oc;
Philipp Reisner376694a2011-11-07 10:54:28 +01004439 int vnr;
Philipp Reisnerf70b35112010-06-24 14:34:40 +02004440
Philipp Reisnerbbeb6412011-02-10 13:45:46 +01004441 if (tconn->cstate == C_STANDALONE)
Philipp Reisnerb411b362009-09-25 16:07:19 -07004442 return;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004443
Lars Ellenberg545752d2011-12-05 14:39:25 +01004444 /* We are about to start the cleanup after connection loss.
4445 * Make sure drbd_make_request knows about that.
4446 * Usually we should be in some network failure state already,
4447 * but just in case we are not, we fix it up here.
4448 */
Philipp Reisnerb8853db2011-12-13 11:09:16 +01004449 conn_request_state(tconn, NS(conn, C_NETWORK_FAILURE), CS_HARD);
Lars Ellenberg545752d2011-12-05 14:39:25 +01004450
Philipp Reisnerb411b362009-09-25 16:07:19 -07004451 /* asender does not clean up anything. it must not interfere, either */
Philipp Reisner360cc742011-02-08 14:29:53 +01004452 drbd_thread_stop(&tconn->asender);
4453 drbd_free_sock(tconn);
4454
Philipp Reisnerc141ebd2011-05-05 16:13:10 +02004455 rcu_read_lock();
4456 idr_for_each_entry(&tconn->volumes, mdev, vnr) {
4457 kref_get(&mdev->kref);
4458 rcu_read_unlock();
4459 drbd_disconnected(mdev);
4460 kref_put(&mdev->kref, &drbd_minor_destroy);
4461 rcu_read_lock();
4462 }
4463 rcu_read_unlock();
4464
Philipp Reisner12038a32011-11-09 19:18:00 +01004465 if (!list_empty(&tconn->current_epoch->list))
4466 conn_err(tconn, "ASSERTION FAILED: tconn->current_epoch->list not empty\n");
4467 /* ok, no more ee's on the fly, it is safe to reset the epoch_size */
4468 atomic_set(&tconn->current_epoch->epoch_size, 0);
Lars Ellenbergb6dd1a82011-11-28 15:04:49 +01004469 tconn->send.seen_any_write_yet = false;
Philipp Reisner12038a32011-11-09 19:18:00 +01004470
Philipp Reisner360cc742011-02-08 14:29:53 +01004471 conn_info(tconn, "Connection closed\n");
4472
Philipp Reisnercb703452011-03-24 11:03:07 +01004473 if (conn_highest_role(tconn) == R_PRIMARY && conn_highest_pdsk(tconn) >= D_UNKNOWN)
4474 conn_try_outdate_peer_async(tconn);
4475
Philipp Reisner360cc742011-02-08 14:29:53 +01004476 spin_lock_irq(&tconn->req_lock);
Philipp Reisnerbbeb6412011-02-10 13:45:46 +01004477 oc = tconn->cstate;
4478 if (oc >= C_UNCONNECTED)
Philipp Reisner376694a2011-11-07 10:54:28 +01004479 _conn_request_state(tconn, NS(conn, C_UNCONNECTED), CS_VERBOSE);
Philipp Reisnerbbeb6412011-02-10 13:45:46 +01004480
Philipp Reisner360cc742011-02-08 14:29:53 +01004481 spin_unlock_irq(&tconn->req_lock);
4482
Lars Ellenbergf3dfa402011-05-02 10:45:05 +02004483 if (oc == C_DISCONNECTING)
Lars Ellenbergd9cc6e22011-04-27 10:25:28 +02004484 conn_request_state(tconn, NS(conn, C_STANDALONE), CS_VERBOSE | CS_HARD);
Philipp Reisner360cc742011-02-08 14:29:53 +01004485}
4486
Philipp Reisnerc141ebd2011-05-05 16:13:10 +02004487static int drbd_disconnected(struct drbd_conf *mdev)
Philipp Reisner360cc742011-02-08 14:29:53 +01004488{
Philipp Reisner360cc742011-02-08 14:29:53 +01004489 unsigned int i;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004490
Philipp Reisner85719572010-07-21 10:20:17 +02004491 /* wait for current activity to cease. */
Philipp Reisner87eeee42011-01-19 14:16:30 +01004492 spin_lock_irq(&mdev->tconn->req_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004493 _drbd_wait_ee_list_empty(mdev, &mdev->active_ee);
4494 _drbd_wait_ee_list_empty(mdev, &mdev->sync_ee);
4495 _drbd_wait_ee_list_empty(mdev, &mdev->read_ee);
Philipp Reisner87eeee42011-01-19 14:16:30 +01004496 spin_unlock_irq(&mdev->tconn->req_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004497
4498 /* We do not have data structures that would allow us to
4499 * get the rs_pending_cnt down to 0 again.
4500 * * On C_SYNC_TARGET we do not have any data structures describing
4501 * the pending RSDataRequest's we have sent.
4502 * * On C_SYNC_SOURCE there is no data structure that tracks
4503 * the P_RS_DATA_REPLY blocks that we sent to the SyncTarget.
4504 * And no, it is not the sum of the reference counts in the
4505 * resync_LRU. The resync_LRU tracks the whole operation including
4506 * the disk-IO, while the rs_pending_cnt only tracks the blocks
4507 * on the fly. */
4508 drbd_rs_cancel_all(mdev);
4509 mdev->rs_total = 0;
4510 mdev->rs_failed = 0;
4511 atomic_set(&mdev->rs_pending_cnt, 0);
4512 wake_up(&mdev->misc_wait);
4513
Philipp Reisnerb411b362009-09-25 16:07:19 -07004514 del_timer_sync(&mdev->resync_timer);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004515 resync_timer_fn((unsigned long)mdev);
4516
Philipp Reisnerb411b362009-09-25 16:07:19 -07004517 /* wait for all w_e_end_data_req, w_e_end_rsdata_req, w_send_barrier,
4518 * w_make_resync_request etc. which may still be on the worker queue
4519 * to be "canceled" */
4520 drbd_flush_workqueue(mdev);
4521
Andreas Gruenbachera990be42011-04-06 17:56:48 +02004522 drbd_finish_peer_reqs(mdev);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004523
Philipp Reisnerd10b4ea2011-11-30 23:25:36 +01004524 /* This second workqueue flush is necessary, since drbd_finish_peer_reqs()
4525 might have issued a work again. The one before drbd_finish_peer_reqs() is
4526 necessary to reclain net_ee in drbd_finish_peer_reqs(). */
4527 drbd_flush_workqueue(mdev);
4528
Lars Ellenberg08332d72012-08-17 15:09:13 +02004529 /* need to do it again, drbd_finish_peer_reqs() may have populated it
4530 * again via drbd_try_clear_on_disk_bm(). */
4531 drbd_rs_cancel_all(mdev);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004532
4533 kfree(mdev->p_uuid);
4534 mdev->p_uuid = NULL;
4535
Philipp Reisner2aebfab2011-03-28 16:48:11 +02004536 if (!drbd_suspended(mdev))
Philipp Reisner2f5cdd02011-02-21 14:29:27 +01004537 tl_clear(mdev->tconn);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004538
4539 drbd_md_sync(mdev);
4540
Lars Ellenberg20ceb2b2011-01-21 10:56:44 +01004541 /* serialize with bitmap writeout triggered by the state change,
4542 * if any. */
4543 wait_event(mdev->misc_wait, !test_bit(BITMAP_IO, &mdev->flags));
4544
Philipp Reisnerb411b362009-09-25 16:07:19 -07004545 /* tcp_close and release of sendpage pages can be deferred. I don't
4546 * want to use SO_LINGER, because apparently it can be deferred for
4547 * more than 20 seconds (longest time I checked).
4548 *
4549 * Actually we don't care for exactly when the network stack does its
4550 * put_page(), but release our reference on these pages right here.
4551 */
Andreas Gruenbacher7721f562011-04-06 17:14:02 +02004552 i = drbd_free_peer_reqs(mdev, &mdev->net_ee);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004553 if (i)
4554 dev_info(DEV, "net_ee not empty, killed %u entries\n", i);
Lars Ellenberg435f0742010-09-06 12:30:25 +02004555 i = atomic_read(&mdev->pp_in_use_by_net);
4556 if (i)
4557 dev_info(DEV, "pp_in_use_by_net = %d, expected 0\n", i);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004558 i = atomic_read(&mdev->pp_in_use);
4559 if (i)
Lars Ellenberg45bb9122010-05-14 17:10:48 +02004560 dev_info(DEV, "pp_in_use = %d, expected 0\n", i);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004561
4562 D_ASSERT(list_empty(&mdev->read_ee));
4563 D_ASSERT(list_empty(&mdev->active_ee));
4564 D_ASSERT(list_empty(&mdev->sync_ee));
4565 D_ASSERT(list_empty(&mdev->done_ee));
4566
Philipp Reisner360cc742011-02-08 14:29:53 +01004567 return 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004568}
4569
4570/*
4571 * We support PRO_VERSION_MIN to PRO_VERSION_MAX. The protocol version
4572 * we can agree on is stored in agreed_pro_version.
4573 *
4574 * feature flags and the reserved array should be enough room for future
4575 * enhancements of the handshake protocol, and possible plugins...
4576 *
4577 * for now, they are expected to be zero, but ignored.
4578 */
Andreas Gruenbacher60381782011-03-28 17:05:50 +02004579static int drbd_send_features(struct drbd_tconn *tconn)
Philipp Reisnerb411b362009-09-25 16:07:19 -07004580{
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02004581 struct drbd_socket *sock;
4582 struct p_connection_features *p;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004583
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02004584 sock = &tconn->data;
4585 p = conn_prepare_command(tconn, sock);
4586 if (!p)
Andreas Gruenbachere8d17b02011-03-16 00:54:19 +01004587 return -EIO;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004588 memset(p, 0, sizeof(*p));
4589 p->protocol_min = cpu_to_be32(PRO_VERSION_MIN);
4590 p->protocol_max = cpu_to_be32(PRO_VERSION_MAX);
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02004591 return conn_send_command(tconn, sock, P_CONNECTION_FEATURES, sizeof(*p), NULL, 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004592}
4593
4594/*
4595 * return values:
4596 * 1 yes, we have a valid connection
4597 * 0 oops, did not work out, please try again
4598 * -1 peer talks different language,
4599 * no point in trying again, please go standalone.
4600 */
Andreas Gruenbacher60381782011-03-28 17:05:50 +02004601static int drbd_do_features(struct drbd_tconn *tconn)
Philipp Reisnerb411b362009-09-25 16:07:19 -07004602{
Philipp Reisner65d11ed2011-02-07 17:35:59 +01004603 /* ASSERT current == tconn->receiver ... */
Andreas Gruenbachere6589832011-03-30 12:54:42 +02004604 struct p_connection_features *p;
4605 const int expect = sizeof(struct p_connection_features);
Philipp Reisner77351055b2011-02-07 17:24:26 +01004606 struct packet_info pi;
Andreas Gruenbachera5c31902011-03-24 03:28:04 +01004607 int err;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004608
Andreas Gruenbacher60381782011-03-28 17:05:50 +02004609 err = drbd_send_features(tconn);
Andreas Gruenbachere8d17b02011-03-16 00:54:19 +01004610 if (err)
Philipp Reisnerb411b362009-09-25 16:07:19 -07004611 return 0;
4612
Andreas Gruenbacher69bc7bc2011-03-16 17:31:52 +01004613 err = drbd_recv_header(tconn, &pi);
4614 if (err)
Philipp Reisnerb411b362009-09-25 16:07:19 -07004615 return 0;
4616
Andreas Gruenbacher60381782011-03-28 17:05:50 +02004617 if (pi.cmd != P_CONNECTION_FEATURES) {
4618 conn_err(tconn, "expected ConnectionFeatures packet, received: %s (0x%04x)\n",
Andreas Gruenbacher2fcb8f32011-07-03 11:41:08 +02004619 cmdname(pi.cmd), pi.cmd);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004620 return -1;
4621 }
4622
Philipp Reisner77351055b2011-02-07 17:24:26 +01004623 if (pi.size != expect) {
Andreas Gruenbacher60381782011-03-28 17:05:50 +02004624 conn_err(tconn, "expected ConnectionFeatures length: %u, received: %u\n",
Philipp Reisner77351055b2011-02-07 17:24:26 +01004625 expect, pi.size);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004626 return -1;
4627 }
4628
Andreas Gruenbachere6589832011-03-30 12:54:42 +02004629 p = pi.data;
4630 err = drbd_recv_all_warn(tconn, p, expect);
Andreas Gruenbachera5c31902011-03-24 03:28:04 +01004631 if (err)
Philipp Reisnerb411b362009-09-25 16:07:19 -07004632 return 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004633
Philipp Reisnerb411b362009-09-25 16:07:19 -07004634 p->protocol_min = be32_to_cpu(p->protocol_min);
4635 p->protocol_max = be32_to_cpu(p->protocol_max);
4636 if (p->protocol_max == 0)
4637 p->protocol_max = p->protocol_min;
4638
4639 if (PRO_VERSION_MAX < p->protocol_min ||
4640 PRO_VERSION_MIN > p->protocol_max)
4641 goto incompat;
4642
Philipp Reisner65d11ed2011-02-07 17:35:59 +01004643 tconn->agreed_pro_version = min_t(int, PRO_VERSION_MAX, p->protocol_max);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004644
Philipp Reisner65d11ed2011-02-07 17:35:59 +01004645 conn_info(tconn, "Handshake successful: "
4646 "Agreed network protocol version %d\n", tconn->agreed_pro_version);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004647
4648 return 1;
4649
4650 incompat:
Philipp Reisner65d11ed2011-02-07 17:35:59 +01004651 conn_err(tconn, "incompatible DRBD dialects: "
Philipp Reisnerb411b362009-09-25 16:07:19 -07004652 "I support %d-%d, peer supports %d-%d\n",
4653 PRO_VERSION_MIN, PRO_VERSION_MAX,
4654 p->protocol_min, p->protocol_max);
4655 return -1;
4656}
4657
4658#if !defined(CONFIG_CRYPTO_HMAC) && !defined(CONFIG_CRYPTO_HMAC_MODULE)
Philipp Reisner13e60372011-02-08 09:54:40 +01004659static int drbd_do_auth(struct drbd_tconn *tconn)
Philipp Reisnerb411b362009-09-25 16:07:19 -07004660{
Philipp Reisneref57f9e2013-03-27 14:08:44 +01004661 conn_err(tconn, "This kernel was build without CONFIG_CRYPTO_HMAC.\n");
4662 conn_err(tconn, "You need to disable 'cram-hmac-alg' in drbd.conf.\n");
Johannes Thomab10d96c2010-01-07 16:02:50 +01004663 return -1;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004664}
4665#else
4666#define CHALLENGE_LEN 64
Johannes Thomab10d96c2010-01-07 16:02:50 +01004667
4668/* Return value:
4669 1 - auth succeeded,
4670 0 - failed, try again (network error),
4671 -1 - auth failed, don't try again.
4672*/
4673
Philipp Reisner13e60372011-02-08 09:54:40 +01004674static int drbd_do_auth(struct drbd_tconn *tconn)
Philipp Reisnerb411b362009-09-25 16:07:19 -07004675{
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02004676 struct drbd_socket *sock;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004677 char my_challenge[CHALLENGE_LEN]; /* 64 Bytes... */
4678 struct scatterlist sg;
4679 char *response = NULL;
4680 char *right_response = NULL;
4681 char *peers_ch = NULL;
Philipp Reisner44ed1672011-04-19 17:10:19 +02004682 unsigned int key_len;
4683 char secret[SHARED_SECRET_MAX]; /* 64 byte */
Philipp Reisnerb411b362009-09-25 16:07:19 -07004684 unsigned int resp_size;
4685 struct hash_desc desc;
Philipp Reisner77351055b2011-02-07 17:24:26 +01004686 struct packet_info pi;
Philipp Reisner44ed1672011-04-19 17:10:19 +02004687 struct net_conf *nc;
Andreas Gruenbacher69bc7bc2011-03-16 17:31:52 +01004688 int err, rv;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004689
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02004690 /* FIXME: Put the challenge/response into the preallocated socket buffer. */
4691
Philipp Reisner44ed1672011-04-19 17:10:19 +02004692 rcu_read_lock();
4693 nc = rcu_dereference(tconn->net_conf);
4694 key_len = strlen(nc->shared_secret);
4695 memcpy(secret, nc->shared_secret, key_len);
4696 rcu_read_unlock();
4697
Philipp Reisner13e60372011-02-08 09:54:40 +01004698 desc.tfm = tconn->cram_hmac_tfm;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004699 desc.flags = 0;
4700
Philipp Reisner44ed1672011-04-19 17:10:19 +02004701 rv = crypto_hash_setkey(tconn->cram_hmac_tfm, (u8 *)secret, key_len);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004702 if (rv) {
Philipp Reisner13e60372011-02-08 09:54:40 +01004703 conn_err(tconn, "crypto_hash_setkey() failed with %d\n", rv);
Johannes Thomab10d96c2010-01-07 16:02:50 +01004704 rv = -1;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004705 goto fail;
4706 }
4707
4708 get_random_bytes(my_challenge, CHALLENGE_LEN);
4709
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02004710 sock = &tconn->data;
4711 if (!conn_prepare_command(tconn, sock)) {
4712 rv = 0;
4713 goto fail;
4714 }
Andreas Gruenbachere6589832011-03-30 12:54:42 +02004715 rv = !conn_send_command(tconn, sock, P_AUTH_CHALLENGE, 0,
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02004716 my_challenge, CHALLENGE_LEN);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004717 if (!rv)
4718 goto fail;
4719
Andreas Gruenbacher69bc7bc2011-03-16 17:31:52 +01004720 err = drbd_recv_header(tconn, &pi);
4721 if (err) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07004722 rv = 0;
4723 goto fail;
4724 }
4725
Philipp Reisner77351055b2011-02-07 17:24:26 +01004726 if (pi.cmd != P_AUTH_CHALLENGE) {
Philipp Reisner13e60372011-02-08 09:54:40 +01004727 conn_err(tconn, "expected AuthChallenge packet, received: %s (0x%04x)\n",
Andreas Gruenbacher2fcb8f32011-07-03 11:41:08 +02004728 cmdname(pi.cmd), pi.cmd);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004729 rv = 0;
4730 goto fail;
4731 }
4732
Philipp Reisner77351055b2011-02-07 17:24:26 +01004733 if (pi.size > CHALLENGE_LEN * 2) {
Philipp Reisner13e60372011-02-08 09:54:40 +01004734 conn_err(tconn, "expected AuthChallenge payload too big.\n");
Johannes Thomab10d96c2010-01-07 16:02:50 +01004735 rv = -1;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004736 goto fail;
4737 }
4738
Philipp Reisner77351055b2011-02-07 17:24:26 +01004739 peers_ch = kmalloc(pi.size, GFP_NOIO);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004740 if (peers_ch == NULL) {
Philipp Reisner13e60372011-02-08 09:54:40 +01004741 conn_err(tconn, "kmalloc of peers_ch failed\n");
Johannes Thomab10d96c2010-01-07 16:02:50 +01004742 rv = -1;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004743 goto fail;
4744 }
4745
Andreas Gruenbachera5c31902011-03-24 03:28:04 +01004746 err = drbd_recv_all_warn(tconn, peers_ch, pi.size);
4747 if (err) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07004748 rv = 0;
4749 goto fail;
4750 }
4751
Philipp Reisner13e60372011-02-08 09:54:40 +01004752 resp_size = crypto_hash_digestsize(tconn->cram_hmac_tfm);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004753 response = kmalloc(resp_size, GFP_NOIO);
4754 if (response == NULL) {
Philipp Reisner13e60372011-02-08 09:54:40 +01004755 conn_err(tconn, "kmalloc of response failed\n");
Johannes Thomab10d96c2010-01-07 16:02:50 +01004756 rv = -1;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004757 goto fail;
4758 }
4759
4760 sg_init_table(&sg, 1);
Philipp Reisner77351055b2011-02-07 17:24:26 +01004761 sg_set_buf(&sg, peers_ch, pi.size);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004762
4763 rv = crypto_hash_digest(&desc, &sg, sg.length, response);
4764 if (rv) {
Philipp Reisner13e60372011-02-08 09:54:40 +01004765 conn_err(tconn, "crypto_hash_digest() failed with %d\n", rv);
Johannes Thomab10d96c2010-01-07 16:02:50 +01004766 rv = -1;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004767 goto fail;
4768 }
4769
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02004770 if (!conn_prepare_command(tconn, sock)) {
4771 rv = 0;
4772 goto fail;
4773 }
Andreas Gruenbachere6589832011-03-30 12:54:42 +02004774 rv = !conn_send_command(tconn, sock, P_AUTH_RESPONSE, 0,
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02004775 response, resp_size);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004776 if (!rv)
4777 goto fail;
4778
Andreas Gruenbacher69bc7bc2011-03-16 17:31:52 +01004779 err = drbd_recv_header(tconn, &pi);
4780 if (err) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07004781 rv = 0;
4782 goto fail;
4783 }
4784
Philipp Reisner77351055b2011-02-07 17:24:26 +01004785 if (pi.cmd != P_AUTH_RESPONSE) {
Philipp Reisner13e60372011-02-08 09:54:40 +01004786 conn_err(tconn, "expected AuthResponse packet, received: %s (0x%04x)\n",
Andreas Gruenbacher2fcb8f32011-07-03 11:41:08 +02004787 cmdname(pi.cmd), pi.cmd);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004788 rv = 0;
4789 goto fail;
4790 }
4791
Philipp Reisner77351055b2011-02-07 17:24:26 +01004792 if (pi.size != resp_size) {
Philipp Reisner13e60372011-02-08 09:54:40 +01004793 conn_err(tconn, "expected AuthResponse payload of wrong size\n");
Philipp Reisnerb411b362009-09-25 16:07:19 -07004794 rv = 0;
4795 goto fail;
4796 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07004797
Andreas Gruenbachera5c31902011-03-24 03:28:04 +01004798 err = drbd_recv_all_warn(tconn, response , resp_size);
4799 if (err) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07004800 rv = 0;
4801 goto fail;
4802 }
4803
4804 right_response = kmalloc(resp_size, GFP_NOIO);
Julia Lawall2d1ee872009-12-27 22:27:11 +01004805 if (right_response == NULL) {
Philipp Reisner13e60372011-02-08 09:54:40 +01004806 conn_err(tconn, "kmalloc of right_response failed\n");
Johannes Thomab10d96c2010-01-07 16:02:50 +01004807 rv = -1;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004808 goto fail;
4809 }
4810
4811 sg_set_buf(&sg, my_challenge, CHALLENGE_LEN);
4812
4813 rv = crypto_hash_digest(&desc, &sg, sg.length, right_response);
4814 if (rv) {
Philipp Reisner13e60372011-02-08 09:54:40 +01004815 conn_err(tconn, "crypto_hash_digest() failed with %d\n", rv);
Johannes Thomab10d96c2010-01-07 16:02:50 +01004816 rv = -1;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004817 goto fail;
4818 }
4819
4820 rv = !memcmp(response, right_response, resp_size);
4821
4822 if (rv)
Philipp Reisner44ed1672011-04-19 17:10:19 +02004823 conn_info(tconn, "Peer authenticated using %d bytes HMAC\n",
4824 resp_size);
Johannes Thomab10d96c2010-01-07 16:02:50 +01004825 else
4826 rv = -1;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004827
4828 fail:
4829 kfree(peers_ch);
4830 kfree(response);
4831 kfree(right_response);
4832
4833 return rv;
4834}
4835#endif
4836
4837int drbdd_init(struct drbd_thread *thi)
4838{
Philipp Reisner392c8802011-02-09 10:33:31 +01004839 struct drbd_tconn *tconn = thi->tconn;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004840 int h;
4841
Philipp Reisner4d641dd2011-02-08 15:40:24 +01004842 conn_info(tconn, "receiver (re)started\n");
Philipp Reisnerb411b362009-09-25 16:07:19 -07004843
4844 do {
Philipp Reisner81fa2e62011-05-04 15:10:30 +02004845 h = conn_connect(tconn);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004846 if (h == 0) {
Philipp Reisner81fa2e62011-05-04 15:10:30 +02004847 conn_disconnect(tconn);
Philipp Reisner20ee6392011-01-18 15:28:59 +01004848 schedule_timeout_interruptible(HZ);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004849 }
4850 if (h == -1) {
Philipp Reisner4d641dd2011-02-08 15:40:24 +01004851 conn_warn(tconn, "Discarding network configuration.\n");
Philipp Reisnerbbeb6412011-02-10 13:45:46 +01004852 conn_request_state(tconn, NS(conn, C_DISCONNECTING), CS_HARD);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004853 }
4854 } while (h == 0);
4855
Philipp Reisner91fd4da2011-04-20 17:47:29 +02004856 if (h > 0)
4857 drbdd(tconn);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004858
Philipp Reisner81fa2e62011-05-04 15:10:30 +02004859 conn_disconnect(tconn);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004860
Philipp Reisner4d641dd2011-02-08 15:40:24 +01004861 conn_info(tconn, "receiver terminated\n");
Philipp Reisnerb411b362009-09-25 16:07:19 -07004862 return 0;
4863}
4864
4865/* ********* acknowledge sender ******** */
4866
Andreas Gruenbachere05e1e52011-03-25 15:16:26 +01004867static int got_conn_RqSReply(struct drbd_tconn *tconn, struct packet_info *pi)
Philipp Reisnerb411b362009-09-25 16:07:19 -07004868{
Andreas Gruenbachere6589832011-03-30 12:54:42 +02004869 struct p_req_state_reply *p = pi->data;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004870 int retcode = be32_to_cpu(p->retcode);
4871
4872 if (retcode >= SS_SUCCESS) {
Philipp Reisnere4f78ed2011-03-16 11:27:48 +01004873 set_bit(CONN_WD_ST_CHG_OKAY, &tconn->flags);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004874 } else {
Philipp Reisnere4f78ed2011-03-16 11:27:48 +01004875 set_bit(CONN_WD_ST_CHG_FAIL, &tconn->flags);
4876 conn_err(tconn, "Requested state change failed by peer: %s (%d)\n",
4877 drbd_set_st_err_str(retcode), retcode);
4878 }
4879 wake_up(&tconn->ping_wait);
4880
Andreas Gruenbacher2735a592011-04-04 15:30:24 +02004881 return 0;
Philipp Reisnere4f78ed2011-03-16 11:27:48 +01004882}
4883
Andreas Gruenbacher1952e912011-03-25 15:37:43 +01004884static int got_RqSReply(struct drbd_tconn *tconn, struct packet_info *pi)
Philipp Reisnerb411b362009-09-25 16:07:19 -07004885{
Andreas Gruenbacher1952e912011-03-25 15:37:43 +01004886 struct drbd_conf *mdev;
Andreas Gruenbachere6589832011-03-30 12:54:42 +02004887 struct p_req_state_reply *p = pi->data;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004888 int retcode = be32_to_cpu(p->retcode);
4889
Andreas Gruenbacher1952e912011-03-25 15:37:43 +01004890 mdev = vnr_to_mdev(tconn, pi->vnr);
4891 if (!mdev)
Andreas Gruenbacher2735a592011-04-04 15:30:24 +02004892 return -EIO;
Andreas Gruenbacher1952e912011-03-25 15:37:43 +01004893
Philipp Reisner4d0fc3f2012-01-20 13:52:27 +01004894 if (test_bit(CONN_WD_ST_CHG_REQ, &tconn->flags)) {
4895 D_ASSERT(tconn->agreed_pro_version < 100);
4896 return got_conn_RqSReply(tconn, pi);
4897 }
4898
Philipp Reisnere4f78ed2011-03-16 11:27:48 +01004899 if (retcode >= SS_SUCCESS) {
4900 set_bit(CL_ST_CHG_SUCCESS, &mdev->flags);
4901 } else {
4902 set_bit(CL_ST_CHG_FAIL, &mdev->flags);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004903 dev_err(DEV, "Requested state change failed by peer: %s (%d)\n",
Philipp Reisnere4f78ed2011-03-16 11:27:48 +01004904 drbd_set_st_err_str(retcode), retcode);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004905 }
4906 wake_up(&mdev->state_wait);
4907
Andreas Gruenbacher2735a592011-04-04 15:30:24 +02004908 return 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004909}
4910
Andreas Gruenbachere05e1e52011-03-25 15:16:26 +01004911static int got_Ping(struct drbd_tconn *tconn, struct packet_info *pi)
Philipp Reisnerb411b362009-09-25 16:07:19 -07004912{
Andreas Gruenbacher2735a592011-04-04 15:30:24 +02004913 return drbd_send_ping_ack(tconn);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004914
4915}
4916
Andreas Gruenbachere05e1e52011-03-25 15:16:26 +01004917static int got_PingAck(struct drbd_tconn *tconn, struct packet_info *pi)
Philipp Reisnerb411b362009-09-25 16:07:19 -07004918{
4919 /* restore idle timeout */
Philipp Reisner2a67d8b2011-02-09 14:10:32 +01004920 tconn->meta.socket->sk->sk_rcvtimeo = tconn->net_conf->ping_int*HZ;
4921 if (!test_and_set_bit(GOT_PING_ACK, &tconn->flags))
4922 wake_up(&tconn->ping_wait);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004923
Andreas Gruenbacher2735a592011-04-04 15:30:24 +02004924 return 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004925}
4926
Andreas Gruenbacher1952e912011-03-25 15:37:43 +01004927static int got_IsInSync(struct drbd_tconn *tconn, struct packet_info *pi)
Philipp Reisnerb411b362009-09-25 16:07:19 -07004928{
Andreas Gruenbacher1952e912011-03-25 15:37:43 +01004929 struct drbd_conf *mdev;
Andreas Gruenbachere6589832011-03-30 12:54:42 +02004930 struct p_block_ack *p = pi->data;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004931 sector_t sector = be64_to_cpu(p->sector);
4932 int blksize = be32_to_cpu(p->blksize);
4933
Andreas Gruenbacher1952e912011-03-25 15:37:43 +01004934 mdev = vnr_to_mdev(tconn, pi->vnr);
4935 if (!mdev)
Andreas Gruenbacher2735a592011-04-04 15:30:24 +02004936 return -EIO;
Andreas Gruenbacher1952e912011-03-25 15:37:43 +01004937
Philipp Reisner31890f42011-01-19 14:12:51 +01004938 D_ASSERT(mdev->tconn->agreed_pro_version >= 89);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004939
4940 update_peer_seq(mdev, be32_to_cpu(p->seq_num));
4941
Lars Ellenberg1d53f092010-09-05 01:13:24 +02004942 if (get_ldev(mdev)) {
4943 drbd_rs_complete_io(mdev, sector);
4944 drbd_set_in_sync(mdev, sector, blksize);
4945 /* rs_same_csums is supposed to count in units of BM_BLOCK_SIZE */
4946 mdev->rs_same_csum += (blksize >> BM_BLOCK_SHIFT);
4947 put_ldev(mdev);
4948 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07004949 dec_rs_pending(mdev);
Philipp Reisner778f2712010-07-06 11:14:00 +02004950 atomic_add(blksize >> 9, &mdev->rs_sect_in);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004951
Andreas Gruenbacher2735a592011-04-04 15:30:24 +02004952 return 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004953}
4954
Andreas Gruenbacherbc9c5c42011-01-21 18:00:55 +01004955static int
4956validate_req_change_req_state(struct drbd_conf *mdev, u64 id, sector_t sector,
4957 struct rb_root *root, const char *func,
4958 enum drbd_req_event what, bool missing_ok)
Philipp Reisnerb411b362009-09-25 16:07:19 -07004959{
4960 struct drbd_request *req;
4961 struct bio_and_error m;
4962
Philipp Reisner87eeee42011-01-19 14:16:30 +01004963 spin_lock_irq(&mdev->tconn->req_lock);
Andreas Gruenbacherbc9c5c42011-01-21 18:00:55 +01004964 req = find_request(mdev, root, id, sector, missing_ok, func);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004965 if (unlikely(!req)) {
Philipp Reisner87eeee42011-01-19 14:16:30 +01004966 spin_unlock_irq(&mdev->tconn->req_lock);
Andreas Gruenbacher85997672011-04-04 13:09:15 +02004967 return -EIO;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004968 }
4969 __req_mod(req, what, &m);
Philipp Reisner87eeee42011-01-19 14:16:30 +01004970 spin_unlock_irq(&mdev->tconn->req_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004971
4972 if (m.bio)
4973 complete_master_bio(mdev, &m);
Andreas Gruenbacher85997672011-04-04 13:09:15 +02004974 return 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004975}
4976
Andreas Gruenbacher1952e912011-03-25 15:37:43 +01004977static int got_BlockAck(struct drbd_tconn *tconn, struct packet_info *pi)
Philipp Reisnerb411b362009-09-25 16:07:19 -07004978{
Andreas Gruenbacher1952e912011-03-25 15:37:43 +01004979 struct drbd_conf *mdev;
Andreas Gruenbachere6589832011-03-30 12:54:42 +02004980 struct p_block_ack *p = pi->data;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004981 sector_t sector = be64_to_cpu(p->sector);
4982 int blksize = be32_to_cpu(p->blksize);
4983 enum drbd_req_event what;
4984
Andreas Gruenbacher1952e912011-03-25 15:37:43 +01004985 mdev = vnr_to_mdev(tconn, pi->vnr);
4986 if (!mdev)
Andreas Gruenbacher2735a592011-04-04 15:30:24 +02004987 return -EIO;
Andreas Gruenbacher1952e912011-03-25 15:37:43 +01004988
Philipp Reisnerb411b362009-09-25 16:07:19 -07004989 update_peer_seq(mdev, be32_to_cpu(p->seq_num));
4990
Andreas Gruenbacher579b57e2011-01-13 18:40:57 +01004991 if (p->block_id == ID_SYNCER) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07004992 drbd_set_in_sync(mdev, sector, blksize);
4993 dec_rs_pending(mdev);
Andreas Gruenbacher2735a592011-04-04 15:30:24 +02004994 return 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004995 }
Andreas Gruenbachere05e1e52011-03-25 15:16:26 +01004996 switch (pi->cmd) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07004997 case P_RS_WRITE_ACK:
Andreas Gruenbacher8554df12011-01-25 15:37:43 +01004998 what = WRITE_ACKED_BY_PEER_AND_SIS;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004999 break;
5000 case P_WRITE_ACK:
Andreas Gruenbacher8554df12011-01-25 15:37:43 +01005001 what = WRITE_ACKED_BY_PEER;
Philipp Reisnerb411b362009-09-25 16:07:19 -07005002 break;
5003 case P_RECV_ACK:
Andreas Gruenbacher8554df12011-01-25 15:37:43 +01005004 what = RECV_ACKED_BY_PEER;
Philipp Reisnerb411b362009-09-25 16:07:19 -07005005 break;
Lars Ellenbergd4dabbe2012-08-01 12:33:51 +02005006 case P_SUPERSEDED:
5007 what = CONFLICT_RESOLVED;
Andreas Gruenbacher7be8da02011-02-22 02:15:32 +01005008 break;
5009 case P_RETRY_WRITE:
Andreas Gruenbacher7be8da02011-02-22 02:15:32 +01005010 what = POSTPONE_WRITE;
Philipp Reisnerb411b362009-09-25 16:07:19 -07005011 break;
5012 default:
Andreas Gruenbacher2735a592011-04-04 15:30:24 +02005013 BUG();
Philipp Reisnerb411b362009-09-25 16:07:19 -07005014 }
5015
5016 return validate_req_change_req_state(mdev, p->block_id, sector,
Andreas Gruenbacher2735a592011-04-04 15:30:24 +02005017 &mdev->write_requests, __func__,
5018 what, false);
Philipp Reisnerb411b362009-09-25 16:07:19 -07005019}
5020
Andreas Gruenbacher1952e912011-03-25 15:37:43 +01005021static int got_NegAck(struct drbd_tconn *tconn, struct packet_info *pi)
Philipp Reisnerb411b362009-09-25 16:07:19 -07005022{
Andreas Gruenbacher1952e912011-03-25 15:37:43 +01005023 struct drbd_conf *mdev;
Andreas Gruenbachere6589832011-03-30 12:54:42 +02005024 struct p_block_ack *p = pi->data;
Philipp Reisnerb411b362009-09-25 16:07:19 -07005025 sector_t sector = be64_to_cpu(p->sector);
Philipp Reisner2deb8332011-01-17 18:39:18 +01005026 int size = be32_to_cpu(p->blksize);
Andreas Gruenbacher85997672011-04-04 13:09:15 +02005027 int err;
Philipp Reisnerb411b362009-09-25 16:07:19 -07005028
Andreas Gruenbacher1952e912011-03-25 15:37:43 +01005029 mdev = vnr_to_mdev(tconn, pi->vnr);
5030 if (!mdev)
Andreas Gruenbacher2735a592011-04-04 15:30:24 +02005031 return -EIO;
Philipp Reisnerb411b362009-09-25 16:07:19 -07005032
5033 update_peer_seq(mdev, be32_to_cpu(p->seq_num));
5034
Andreas Gruenbacher579b57e2011-01-13 18:40:57 +01005035 if (p->block_id == ID_SYNCER) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07005036 dec_rs_pending(mdev);
5037 drbd_rs_failed_io(mdev, sector, size);
Andreas Gruenbacher2735a592011-04-04 15:30:24 +02005038 return 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07005039 }
Philipp Reisner2deb8332011-01-17 18:39:18 +01005040
Andreas Gruenbacher85997672011-04-04 13:09:15 +02005041 err = validate_req_change_req_state(mdev, p->block_id, sector,
5042 &mdev->write_requests, __func__,
Philipp Reisner303d1442011-04-13 16:24:47 -07005043 NEG_ACKED, true);
Andreas Gruenbacher85997672011-04-04 13:09:15 +02005044 if (err) {
Andreas Gruenbacherc3afd8f2011-01-20 22:25:40 +01005045 /* Protocol A has no P_WRITE_ACKs, but has P_NEG_ACKs.
5046 The master bio might already be completed, therefore the
5047 request is no longer in the collision hash. */
5048 /* In Protocol B we might already have got a P_RECV_ACK
5049 but then get a P_NEG_ACK afterwards. */
Andreas Gruenbacherc3afd8f2011-01-20 22:25:40 +01005050 drbd_set_out_of_sync(mdev, sector, size);
Philipp Reisner2deb8332011-01-17 18:39:18 +01005051 }
Andreas Gruenbacher2735a592011-04-04 15:30:24 +02005052 return 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07005053}
5054
Andreas Gruenbacher1952e912011-03-25 15:37:43 +01005055static int got_NegDReply(struct drbd_tconn *tconn, struct packet_info *pi)
Philipp Reisnerb411b362009-09-25 16:07:19 -07005056{
Andreas Gruenbacher1952e912011-03-25 15:37:43 +01005057 struct drbd_conf *mdev;
Andreas Gruenbachere6589832011-03-30 12:54:42 +02005058 struct p_block_ack *p = pi->data;
Philipp Reisnerb411b362009-09-25 16:07:19 -07005059 sector_t sector = be64_to_cpu(p->sector);
5060
Andreas Gruenbacher1952e912011-03-25 15:37:43 +01005061 mdev = vnr_to_mdev(tconn, pi->vnr);
5062 if (!mdev)
Andreas Gruenbacher2735a592011-04-04 15:30:24 +02005063 return -EIO;
Andreas Gruenbacher1952e912011-03-25 15:37:43 +01005064
Philipp Reisnerb411b362009-09-25 16:07:19 -07005065 update_peer_seq(mdev, be32_to_cpu(p->seq_num));
Andreas Gruenbacher7be8da02011-02-22 02:15:32 +01005066
Philipp Reisner380207d2011-11-11 12:31:20 +01005067 dev_err(DEV, "Got NegDReply; Sector %llus, len %u.\n",
Philipp Reisnerb411b362009-09-25 16:07:19 -07005068 (unsigned long long)sector, be32_to_cpu(p->blksize));
5069
5070 return validate_req_change_req_state(mdev, p->block_id, sector,
Andreas Gruenbacher2735a592011-04-04 15:30:24 +02005071 &mdev->read_requests, __func__,
5072 NEG_ACKED, false);
Philipp Reisnerb411b362009-09-25 16:07:19 -07005073}
5074
Andreas Gruenbacher1952e912011-03-25 15:37:43 +01005075static int got_NegRSDReply(struct drbd_tconn *tconn, struct packet_info *pi)
Philipp Reisnerb411b362009-09-25 16:07:19 -07005076{
Andreas Gruenbacher1952e912011-03-25 15:37:43 +01005077 struct drbd_conf *mdev;
Philipp Reisnerb411b362009-09-25 16:07:19 -07005078 sector_t sector;
5079 int size;
Andreas Gruenbachere6589832011-03-30 12:54:42 +02005080 struct p_block_ack *p = pi->data;
Andreas Gruenbacher1952e912011-03-25 15:37:43 +01005081
5082 mdev = vnr_to_mdev(tconn, pi->vnr);
5083 if (!mdev)
Andreas Gruenbacher2735a592011-04-04 15:30:24 +02005084 return -EIO;
Philipp Reisnerb411b362009-09-25 16:07:19 -07005085
5086 sector = be64_to_cpu(p->sector);
5087 size = be32_to_cpu(p->blksize);
Philipp Reisnerb411b362009-09-25 16:07:19 -07005088
5089 update_peer_seq(mdev, be32_to_cpu(p->seq_num));
5090
5091 dec_rs_pending(mdev);
5092
5093 if (get_ldev_if_state(mdev, D_FAILED)) {
5094 drbd_rs_complete_io(mdev, sector);
Andreas Gruenbachere05e1e52011-03-25 15:16:26 +01005095 switch (pi->cmd) {
Philipp Reisnerd612d302010-12-27 10:53:28 +01005096 case P_NEG_RS_DREPLY:
5097 drbd_rs_failed_io(mdev, sector, size);
5098 case P_RS_CANCEL:
5099 break;
5100 default:
Andreas Gruenbacher2735a592011-04-04 15:30:24 +02005101 BUG();
Philipp Reisnerd612d302010-12-27 10:53:28 +01005102 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07005103 put_ldev(mdev);
5104 }
5105
Andreas Gruenbacher2735a592011-04-04 15:30:24 +02005106 return 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07005107}
5108
Andreas Gruenbacher1952e912011-03-25 15:37:43 +01005109static int got_BarrierAck(struct drbd_tconn *tconn, struct packet_info *pi)
Philipp Reisnerb411b362009-09-25 16:07:19 -07005110{
Andreas Gruenbachere6589832011-03-30 12:54:42 +02005111 struct p_barrier_ack *p = pi->data;
Lars Ellenberg9ed57dc2012-03-26 20:55:17 +02005112 struct drbd_conf *mdev;
5113 int vnr;
Philipp Reisnerb411b362009-09-25 16:07:19 -07005114
Lars Ellenberg9ed57dc2012-03-26 20:55:17 +02005115 tl_release(tconn, p->barrier, be32_to_cpu(p->set_size));
Philipp Reisnerb411b362009-09-25 16:07:19 -07005116
Lars Ellenberg9ed57dc2012-03-26 20:55:17 +02005117 rcu_read_lock();
5118 idr_for_each_entry(&tconn->volumes, mdev, vnr) {
5119 if (mdev->state.conn == C_AHEAD &&
5120 atomic_read(&mdev->ap_in_flight) == 0 &&
5121 !test_and_set_bit(AHEAD_TO_SYNC_SOURCE, &mdev->flags)) {
5122 mdev->start_resync_timer.expires = jiffies + HZ;
5123 add_timer(&mdev->start_resync_timer);
5124 }
Philipp Reisnerc4752ef2010-10-27 17:32:36 +02005125 }
Lars Ellenberg9ed57dc2012-03-26 20:55:17 +02005126 rcu_read_unlock();
Philipp Reisnerc4752ef2010-10-27 17:32:36 +02005127
Andreas Gruenbacher2735a592011-04-04 15:30:24 +02005128 return 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07005129}
5130
Andreas Gruenbacher1952e912011-03-25 15:37:43 +01005131static int got_OVResult(struct drbd_tconn *tconn, struct packet_info *pi)
Philipp Reisnerb411b362009-09-25 16:07:19 -07005132{
Andreas Gruenbacher1952e912011-03-25 15:37:43 +01005133 struct drbd_conf *mdev;
Andreas Gruenbachere6589832011-03-30 12:54:42 +02005134 struct p_block_ack *p = pi->data;
Philipp Reisnerb411b362009-09-25 16:07:19 -07005135 struct drbd_work *w;
5136 sector_t sector;
5137 int size;
5138
Andreas Gruenbacher1952e912011-03-25 15:37:43 +01005139 mdev = vnr_to_mdev(tconn, pi->vnr);
5140 if (!mdev)
Andreas Gruenbacher2735a592011-04-04 15:30:24 +02005141 return -EIO;
Andreas Gruenbacher1952e912011-03-25 15:37:43 +01005142
Philipp Reisnerb411b362009-09-25 16:07:19 -07005143 sector = be64_to_cpu(p->sector);
5144 size = be32_to_cpu(p->blksize);
5145
5146 update_peer_seq(mdev, be32_to_cpu(p->seq_num));
5147
5148 if (be64_to_cpu(p->block_id) == ID_OUT_OF_SYNC)
Andreas Gruenbacher8f7bed72010-12-19 23:53:14 +01005149 drbd_ov_out_of_sync_found(mdev, sector, size);
Philipp Reisnerb411b362009-09-25 16:07:19 -07005150 else
Andreas Gruenbacher8f7bed72010-12-19 23:53:14 +01005151 ov_out_of_sync_print(mdev);
Philipp Reisnerb411b362009-09-25 16:07:19 -07005152
Lars Ellenberg1d53f092010-09-05 01:13:24 +02005153 if (!get_ldev(mdev))
Andreas Gruenbacher2735a592011-04-04 15:30:24 +02005154 return 0;
Lars Ellenberg1d53f092010-09-05 01:13:24 +02005155
Philipp Reisnerb411b362009-09-25 16:07:19 -07005156 drbd_rs_complete_io(mdev, sector);
5157 dec_rs_pending(mdev);
5158
Lars Ellenbergea5442a2010-11-05 09:48:01 +01005159 --mdev->ov_left;
5160
5161 /* let's advance progress step marks only for every other megabyte */
5162 if ((mdev->ov_left & 0x200) == 0x200)
5163 drbd_advance_rs_marks(mdev, mdev->ov_left);
5164
5165 if (mdev->ov_left == 0) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07005166 w = kmalloc(sizeof(*w), GFP_NOIO);
5167 if (w) {
5168 w->cb = w_ov_finished;
Philipp Reisnera21e9292011-02-08 15:08:49 +01005169 w->mdev = mdev;
Lars Ellenbergd5b27b02011-11-14 15:42:37 +01005170 drbd_queue_work(&mdev->tconn->sender_work, w);
Philipp Reisnerb411b362009-09-25 16:07:19 -07005171 } else {
5172 dev_err(DEV, "kmalloc(w) failed.");
Andreas Gruenbacher8f7bed72010-12-19 23:53:14 +01005173 ov_out_of_sync_print(mdev);
Philipp Reisnerb411b362009-09-25 16:07:19 -07005174 drbd_resync_finished(mdev);
5175 }
5176 }
Lars Ellenberg1d53f092010-09-05 01:13:24 +02005177 put_ldev(mdev);
Andreas Gruenbacher2735a592011-04-04 15:30:24 +02005178 return 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07005179}
5180
Andreas Gruenbacher1952e912011-03-25 15:37:43 +01005181static int got_skip(struct drbd_tconn *tconn, struct packet_info *pi)
Philipp Reisner0ced55a2010-04-30 15:26:20 +02005182{
Andreas Gruenbacher2735a592011-04-04 15:30:24 +02005183 return 0;
Philipp Reisner0ced55a2010-04-30 15:26:20 +02005184}
5185
Andreas Gruenbachera990be42011-04-06 17:56:48 +02005186static int tconn_finish_peer_reqs(struct drbd_tconn *tconn)
Philipp Reisner32862ec2011-02-08 16:41:01 +01005187{
Philipp Reisner082a3432011-03-15 16:05:42 +01005188 struct drbd_conf *mdev;
Philipp Reisnerc141ebd2011-05-05 16:13:10 +02005189 int vnr, not_empty = 0;
Philipp Reisner32862ec2011-02-08 16:41:01 +01005190
5191 do {
5192 clear_bit(SIGNAL_ASENDER, &tconn->flags);
5193 flush_signals(current);
Philipp Reisnerc141ebd2011-05-05 16:13:10 +02005194
5195 rcu_read_lock();
5196 idr_for_each_entry(&tconn->volumes, mdev, vnr) {
5197 kref_get(&mdev->kref);
5198 rcu_read_unlock();
Philipp Reisnerd3fcb492011-04-13 14:46:05 -07005199 if (drbd_finish_peer_reqs(mdev)) {
Philipp Reisnerc141ebd2011-05-05 16:13:10 +02005200 kref_put(&mdev->kref, &drbd_minor_destroy);
5201 return 1;
Philipp Reisnerd3fcb492011-04-13 14:46:05 -07005202 }
Philipp Reisnerc141ebd2011-05-05 16:13:10 +02005203 kref_put(&mdev->kref, &drbd_minor_destroy);
5204 rcu_read_lock();
Philipp Reisner082a3432011-03-15 16:05:42 +01005205 }
Philipp Reisner32862ec2011-02-08 16:41:01 +01005206 set_bit(SIGNAL_ASENDER, &tconn->flags);
Philipp Reisner082a3432011-03-15 16:05:42 +01005207
5208 spin_lock_irq(&tconn->req_lock);
Philipp Reisnerc141ebd2011-05-05 16:13:10 +02005209 idr_for_each_entry(&tconn->volumes, mdev, vnr) {
Philipp Reisner082a3432011-03-15 16:05:42 +01005210 not_empty = !list_empty(&mdev->done_ee);
5211 if (not_empty)
5212 break;
5213 }
5214 spin_unlock_irq(&tconn->req_lock);
Philipp Reisnerc141ebd2011-05-05 16:13:10 +02005215 rcu_read_unlock();
Philipp Reisner32862ec2011-02-08 16:41:01 +01005216 } while (not_empty);
5217
5218 return 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07005219}
5220
5221struct asender_cmd {
5222 size_t pkt_size;
Andreas Gruenbacher1952e912011-03-25 15:37:43 +01005223 int (*fn)(struct drbd_tconn *tconn, struct packet_info *);
Philipp Reisnerb411b362009-09-25 16:07:19 -07005224};
5225
Andreas Gruenbacher7201b972011-03-14 18:23:00 +01005226static struct asender_cmd asender_tbl[] = {
Andreas Gruenbachere6589832011-03-30 12:54:42 +02005227 [P_PING] = { 0, got_Ping },
5228 [P_PING_ACK] = { 0, got_PingAck },
Philipp Reisnerb411b362009-09-25 16:07:19 -07005229 [P_RECV_ACK] = { sizeof(struct p_block_ack), got_BlockAck },
5230 [P_WRITE_ACK] = { sizeof(struct p_block_ack), got_BlockAck },
5231 [P_RS_WRITE_ACK] = { sizeof(struct p_block_ack), got_BlockAck },
Lars Ellenbergd4dabbe2012-08-01 12:33:51 +02005232 [P_SUPERSEDED] = { sizeof(struct p_block_ack), got_BlockAck },
Philipp Reisnerb411b362009-09-25 16:07:19 -07005233 [P_NEG_ACK] = { sizeof(struct p_block_ack), got_NegAck },
5234 [P_NEG_DREPLY] = { sizeof(struct p_block_ack), got_NegDReply },
Andreas Gruenbacher1952e912011-03-25 15:37:43 +01005235 [P_NEG_RS_DREPLY] = { sizeof(struct p_block_ack), got_NegRSDReply },
Philipp Reisnerb411b362009-09-25 16:07:19 -07005236 [P_OV_RESULT] = { sizeof(struct p_block_ack), got_OVResult },
5237 [P_BARRIER_ACK] = { sizeof(struct p_barrier_ack), got_BarrierAck },
5238 [P_STATE_CHG_REPLY] = { sizeof(struct p_req_state_reply), got_RqSReply },
5239 [P_RS_IS_IN_SYNC] = { sizeof(struct p_block_ack), got_IsInSync },
Philipp Reisner02918be2010-08-20 14:35:10 +02005240 [P_DELAY_PROBE] = { sizeof(struct p_delay_probe93), got_skip },
Andreas Gruenbacher1952e912011-03-25 15:37:43 +01005241 [P_RS_CANCEL] = { sizeof(struct p_block_ack), got_NegRSDReply },
5242 [P_CONN_ST_CHG_REPLY]={ sizeof(struct p_req_state_reply), got_conn_RqSReply },
5243 [P_RETRY_WRITE] = { sizeof(struct p_block_ack), got_BlockAck },
Andreas Gruenbacher7201b972011-03-14 18:23:00 +01005244};
Philipp Reisnerb411b362009-09-25 16:07:19 -07005245
5246int drbd_asender(struct drbd_thread *thi)
5247{
Philipp Reisner392c8802011-02-09 10:33:31 +01005248 struct drbd_tconn *tconn = thi->tconn;
Philipp Reisnerb411b362009-09-25 16:07:19 -07005249 struct asender_cmd *cmd = NULL;
Philipp Reisner77351055b2011-02-07 17:24:26 +01005250 struct packet_info pi;
Philipp Reisner257d0af2011-01-26 12:15:29 +01005251 int rv;
Andreas Gruenbachere6589832011-03-30 12:54:42 +02005252 void *buf = tconn->meta.rbuf;
Philipp Reisnerb411b362009-09-25 16:07:19 -07005253 int received = 0;
Andreas Gruenbacher52b061a2011-03-30 11:38:49 +02005254 unsigned int header_size = drbd_header_size(tconn);
5255 int expect = header_size;
Philipp Reisner44ed1672011-04-19 17:10:19 +02005256 bool ping_timeout_active = false;
5257 struct net_conf *nc;
Andreas Gruenbacherbb77d342011-05-04 15:25:35 +02005258 int ping_timeo, tcp_cork, ping_int;
Philipp Reisner3990e042013-03-27 14:08:48 +01005259 struct sched_param param = { .sched_priority = 2 };
Philipp Reisnerb411b362009-09-25 16:07:19 -07005260
Philipp Reisner3990e042013-03-27 14:08:48 +01005261 rv = sched_setscheduler(current, SCHED_RR, &param);
5262 if (rv < 0)
5263 conn_err(tconn, "drbd_asender: ERROR set priority, ret=%d\n", rv);
Philipp Reisnerb411b362009-09-25 16:07:19 -07005264
Andreas Gruenbachere77a0a52011-01-25 15:43:39 +01005265 while (get_t_state(thi) == RUNNING) {
Philipp Reisner80822282011-02-08 12:46:30 +01005266 drbd_thread_current_set_cpu(thi);
Philipp Reisner44ed1672011-04-19 17:10:19 +02005267
5268 rcu_read_lock();
5269 nc = rcu_dereference(tconn->net_conf);
5270 ping_timeo = nc->ping_timeo;
Andreas Gruenbacherbb77d342011-05-04 15:25:35 +02005271 tcp_cork = nc->tcp_cork;
Philipp Reisner44ed1672011-04-19 17:10:19 +02005272 ping_int = nc->ping_int;
5273 rcu_read_unlock();
5274
Philipp Reisner32862ec2011-02-08 16:41:01 +01005275 if (test_and_clear_bit(SEND_PING, &tconn->flags)) {
Andreas Gruenbachera17647a2011-04-01 12:49:42 +02005276 if (drbd_send_ping(tconn)) {
Philipp Reisner32862ec2011-02-08 16:41:01 +01005277 conn_err(tconn, "drbd_send_ping has failed\n");
Andreas Gruenbacher841ce242010-12-15 19:31:20 +01005278 goto reconnect;
5279 }
Philipp Reisner44ed1672011-04-19 17:10:19 +02005280 tconn->meta.socket->sk->sk_rcvtimeo = ping_timeo * HZ / 10;
5281 ping_timeout_active = true;
Philipp Reisnerb411b362009-09-25 16:07:19 -07005282 }
5283
Philipp Reisner32862ec2011-02-08 16:41:01 +01005284 /* TODO: conditionally cork; it may hurt latency if we cork without
5285 much to send */
Andreas Gruenbacherbb77d342011-05-04 15:25:35 +02005286 if (tcp_cork)
Philipp Reisner32862ec2011-02-08 16:41:01 +01005287 drbd_tcp_cork(tconn->meta.socket);
Andreas Gruenbachera990be42011-04-06 17:56:48 +02005288 if (tconn_finish_peer_reqs(tconn)) {
5289 conn_err(tconn, "tconn_finish_peer_reqs() failed\n");
Philipp Reisner32862ec2011-02-08 16:41:01 +01005290 goto reconnect;
Philipp Reisnerb411b362009-09-25 16:07:19 -07005291 }
5292 /* but unconditionally uncork unless disabled */
Andreas Gruenbacherbb77d342011-05-04 15:25:35 +02005293 if (tcp_cork)
Philipp Reisner32862ec2011-02-08 16:41:01 +01005294 drbd_tcp_uncork(tconn->meta.socket);
Philipp Reisnerb411b362009-09-25 16:07:19 -07005295
5296 /* short circuit, recv_msg would return EINTR anyways. */
5297 if (signal_pending(current))
5298 continue;
5299
Philipp Reisner32862ec2011-02-08 16:41:01 +01005300 rv = drbd_recv_short(tconn->meta.socket, buf, expect-received, 0);
5301 clear_bit(SIGNAL_ASENDER, &tconn->flags);
Philipp Reisnerb411b362009-09-25 16:07:19 -07005302
5303 flush_signals(current);
5304
5305 /* Note:
5306 * -EINTR (on meta) we got a signal
5307 * -EAGAIN (on meta) rcvtimeo expired
5308 * -ECONNRESET other side closed the connection
5309 * -ERESTARTSYS (on data) we got a signal
5310 * rv < 0 other than above: unexpected error!
5311 * rv == expected: full header or command
5312 * rv < expected: "woken" by signal during receive
5313 * rv == 0 : "connection shut down by peer"
5314 */
5315 if (likely(rv > 0)) {
5316 received += rv;
5317 buf += rv;
5318 } else if (rv == 0) {
Philipp Reisnerb66623e2012-08-08 21:19:09 +02005319 if (test_bit(DISCONNECT_SENT, &tconn->flags)) {
5320 long t;
5321 rcu_read_lock();
5322 t = rcu_dereference(tconn->net_conf)->ping_timeo * HZ/10;
5323 rcu_read_unlock();
5324
5325 t = wait_event_timeout(tconn->ping_wait,
5326 tconn->cstate < C_WF_REPORT_PARAMS,
5327 t);
Philipp Reisner599377a2012-08-17 14:50:22 +02005328 if (t)
5329 break;
5330 }
Philipp Reisner32862ec2011-02-08 16:41:01 +01005331 conn_err(tconn, "meta connection shut down by peer.\n");
Philipp Reisnerb411b362009-09-25 16:07:19 -07005332 goto reconnect;
5333 } else if (rv == -EAGAIN) {
Lars Ellenbergcb6518c2011-06-20 14:44:45 +02005334 /* If the data socket received something meanwhile,
5335 * that is good enough: peer is still alive. */
Philipp Reisner32862ec2011-02-08 16:41:01 +01005336 if (time_after(tconn->last_received,
5337 jiffies - tconn->meta.socket->sk->sk_rcvtimeo))
Lars Ellenbergcb6518c2011-06-20 14:44:45 +02005338 continue;
Lars Ellenbergf36af182011-03-09 22:44:55 +01005339 if (ping_timeout_active) {
Philipp Reisner32862ec2011-02-08 16:41:01 +01005340 conn_err(tconn, "PingAck did not arrive in time.\n");
Philipp Reisnerb411b362009-09-25 16:07:19 -07005341 goto reconnect;
5342 }
Philipp Reisner32862ec2011-02-08 16:41:01 +01005343 set_bit(SEND_PING, &tconn->flags);
Philipp Reisnerb411b362009-09-25 16:07:19 -07005344 continue;
5345 } else if (rv == -EINTR) {
5346 continue;
5347 } else {
Philipp Reisner32862ec2011-02-08 16:41:01 +01005348 conn_err(tconn, "sock_recvmsg returned %d\n", rv);
Philipp Reisnerb411b362009-09-25 16:07:19 -07005349 goto reconnect;
5350 }
5351
5352 if (received == expect && cmd == NULL) {
Andreas Gruenbachere6589832011-03-30 12:54:42 +02005353 if (decode_header(tconn, tconn->meta.rbuf, &pi))
Philipp Reisnerb411b362009-09-25 16:07:19 -07005354 goto reconnect;
Andreas Gruenbacher7201b972011-03-14 18:23:00 +01005355 cmd = &asender_tbl[pi.cmd];
Andreas Gruenbacher1952e912011-03-25 15:37:43 +01005356 if (pi.cmd >= ARRAY_SIZE(asender_tbl) || !cmd->fn) {
Andreas Gruenbacher2fcb8f32011-07-03 11:41:08 +02005357 conn_err(tconn, "Unexpected meta packet %s (0x%04x)\n",
5358 cmdname(pi.cmd), pi.cmd);
Philipp Reisnerb411b362009-09-25 16:07:19 -07005359 goto disconnect;
5360 }
Andreas Gruenbachere6589832011-03-30 12:54:42 +02005361 expect = header_size + cmd->pkt_size;
Andreas Gruenbacher52b061a2011-03-30 11:38:49 +02005362 if (pi.size != expect - header_size) {
Philipp Reisner32862ec2011-02-08 16:41:01 +01005363 conn_err(tconn, "Wrong packet size on meta (c: %d, l: %d)\n",
Philipp Reisner77351055b2011-02-07 17:24:26 +01005364 pi.cmd, pi.size);
Philipp Reisnerb411b362009-09-25 16:07:19 -07005365 goto reconnect;
Philipp Reisner257d0af2011-01-26 12:15:29 +01005366 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07005367 }
5368 if (received == expect) {
Andreas Gruenbacher2735a592011-04-04 15:30:24 +02005369 bool err;
Philipp Reisnera4fbda82011-03-16 11:13:17 +01005370
Andreas Gruenbacher2735a592011-04-04 15:30:24 +02005371 err = cmd->fn(tconn, &pi);
5372 if (err) {
Andreas Gruenbacher1952e912011-03-25 15:37:43 +01005373 conn_err(tconn, "%pf failed\n", cmd->fn);
Philipp Reisnerb411b362009-09-25 16:07:19 -07005374 goto reconnect;
Andreas Gruenbacher1952e912011-03-25 15:37:43 +01005375 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07005376
Philipp Reisnera4fbda82011-03-16 11:13:17 +01005377 tconn->last_received = jiffies;
Lars Ellenbergf36af182011-03-09 22:44:55 +01005378
Philipp Reisner44ed1672011-04-19 17:10:19 +02005379 if (cmd == &asender_tbl[P_PING_ACK]) {
5380 /* restore idle timeout */
5381 tconn->meta.socket->sk->sk_rcvtimeo = ping_int * HZ;
5382 ping_timeout_active = false;
5383 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07005384
Andreas Gruenbachere6589832011-03-30 12:54:42 +02005385 buf = tconn->meta.rbuf;
Philipp Reisnerb411b362009-09-25 16:07:19 -07005386 received = 0;
Andreas Gruenbacher52b061a2011-03-30 11:38:49 +02005387 expect = header_size;
Philipp Reisnerb411b362009-09-25 16:07:19 -07005388 cmd = NULL;
5389 }
5390 }
5391
5392 if (0) {
5393reconnect:
Philipp Reisnerbbeb6412011-02-10 13:45:46 +01005394 conn_request_state(tconn, NS(conn, C_NETWORK_FAILURE), CS_HARD);
Philipp Reisner19fffd72012-08-28 16:48:03 +02005395 conn_md_sync(tconn);
Philipp Reisnerb411b362009-09-25 16:07:19 -07005396 }
5397 if (0) {
5398disconnect:
Philipp Reisnerbbeb6412011-02-10 13:45:46 +01005399 conn_request_state(tconn, NS(conn, C_DISCONNECTING), CS_HARD);
Philipp Reisnerb411b362009-09-25 16:07:19 -07005400 }
Philipp Reisner32862ec2011-02-08 16:41:01 +01005401 clear_bit(SIGNAL_ASENDER, &tconn->flags);
Philipp Reisnerb411b362009-09-25 16:07:19 -07005402
Philipp Reisner32862ec2011-02-08 16:41:01 +01005403 conn_info(tconn, "asender terminated\n");
Philipp Reisnerb411b362009-09-25 16:07:19 -07005404
5405 return 0;
5406}