blob: 7af0cc77aa60205a98a5ec6970d177a5bed2a16b [file] [log] [blame]
Philipp Reisnerb411b362009-09-25 16:07:19 -07001/*
2 drbd_receiver.c
3
4 This file is part of DRBD by Philipp Reisner and Lars Ellenberg.
5
6 Copyright (C) 2001-2008, LINBIT Information Technologies GmbH.
7 Copyright (C) 1999-2008, Philipp Reisner <philipp.reisner@linbit.com>.
8 Copyright (C) 2002-2008, Lars Ellenberg <lars.ellenberg@linbit.com>.
9
10 drbd is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation; either version 2, or (at your option)
13 any later version.
14
15 drbd is distributed in the hope that it will be useful,
16 but WITHOUT ANY WARRANTY; without even the implied warranty of
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 GNU General Public License for more details.
19
20 You should have received a copy of the GNU General Public License
21 along with drbd; see the file COPYING. If not, write to
22 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
23 */
24
25
Philipp Reisnerb411b362009-09-25 16:07:19 -070026#include <linux/module.h>
27
28#include <asm/uaccess.h>
29#include <net/sock.h>
30
Philipp Reisnerb411b362009-09-25 16:07:19 -070031#include <linux/drbd.h>
32#include <linux/fs.h>
33#include <linux/file.h>
34#include <linux/in.h>
35#include <linux/mm.h>
36#include <linux/memcontrol.h>
37#include <linux/mm_inline.h>
38#include <linux/slab.h>
Philipp Reisnerb411b362009-09-25 16:07:19 -070039#include <linux/pkt_sched.h>
40#define __KERNEL_SYSCALLS__
41#include <linux/unistd.h>
42#include <linux/vmalloc.h>
43#include <linux/random.h>
Philipp Reisnerb411b362009-09-25 16:07:19 -070044#include <linux/string.h>
45#include <linux/scatterlist.h>
46#include "drbd_int.h"
Philipp Reisnerb411b362009-09-25 16:07:19 -070047#include "drbd_req.h"
48
49#include "drbd_vli.h"
50
Philipp Reisner77351055b2011-02-07 17:24:26 +010051struct packet_info {
52 enum drbd_packet cmd;
Andreas Gruenbachere2857212011-03-25 00:57:38 +010053 unsigned int size;
54 unsigned int vnr;
Andreas Gruenbachere6589832011-03-30 12:54:42 +020055 void *data;
Philipp Reisner77351055b2011-02-07 17:24:26 +010056};
57
Philipp Reisnerb411b362009-09-25 16:07:19 -070058enum finish_epoch {
59 FE_STILL_LIVE,
60 FE_DESTROYED,
61 FE_RECYCLED,
62};
63
Andreas Gruenbacher60381782011-03-28 17:05:50 +020064static int drbd_do_features(struct drbd_tconn *tconn);
Philipp Reisner13e60372011-02-08 09:54:40 +010065static int drbd_do_auth(struct drbd_tconn *tconn);
Philipp Reisnerc141ebd2011-05-05 16:13:10 +020066static int drbd_disconnected(struct drbd_conf *mdev);
Philipp Reisnerb411b362009-09-25 16:07:19 -070067
Philipp Reisner1e9dd292011-11-10 15:14:53 +010068static enum finish_epoch drbd_may_finish_epoch(struct drbd_tconn *, struct drbd_epoch *, enum epoch_event);
Andreas Gruenbacher99920dc2011-03-16 15:31:39 +010069static int e_end_block(struct drbd_work *, int);
Philipp Reisnerb411b362009-09-25 16:07:19 -070070
Philipp Reisnerb411b362009-09-25 16:07:19 -070071
72#define GFP_TRY (__GFP_HIGHMEM | __GFP_NOWARN)
73
Lars Ellenberg45bb9122010-05-14 17:10:48 +020074/*
75 * some helper functions to deal with single linked page lists,
76 * page->private being our "next" pointer.
77 */
78
79/* If at least n pages are linked at head, get n pages off.
80 * Otherwise, don't modify head, and return NULL.
81 * Locking is the responsibility of the caller.
82 */
83static struct page *page_chain_del(struct page **head, int n)
84{
85 struct page *page;
86 struct page *tmp;
87
88 BUG_ON(!n);
89 BUG_ON(!head);
90
91 page = *head;
Philipp Reisner23ce4222010-05-20 13:35:31 +020092
93 if (!page)
94 return NULL;
95
Lars Ellenberg45bb9122010-05-14 17:10:48 +020096 while (page) {
97 tmp = page_chain_next(page);
98 if (--n == 0)
99 break; /* found sufficient pages */
100 if (tmp == NULL)
101 /* insufficient pages, don't use any of them. */
102 return NULL;
103 page = tmp;
104 }
105
106 /* add end of list marker for the returned list */
107 set_page_private(page, 0);
108 /* actual return value, and adjustment of head */
109 page = *head;
110 *head = tmp;
111 return page;
112}
113
114/* may be used outside of locks to find the tail of a (usually short)
115 * "private" page chain, before adding it back to a global chain head
116 * with page_chain_add() under a spinlock. */
117static struct page *page_chain_tail(struct page *page, int *len)
118{
119 struct page *tmp;
120 int i = 1;
121 while ((tmp = page_chain_next(page)))
122 ++i, page = tmp;
123 if (len)
124 *len = i;
125 return page;
126}
127
128static int page_chain_free(struct page *page)
129{
130 struct page *tmp;
131 int i = 0;
132 page_chain_for_each_safe(page, tmp) {
133 put_page(page);
134 ++i;
135 }
136 return i;
137}
138
139static void page_chain_add(struct page **head,
140 struct page *chain_first, struct page *chain_last)
141{
142#if 1
143 struct page *tmp;
144 tmp = page_chain_tail(chain_first, NULL);
145 BUG_ON(tmp != chain_last);
146#endif
147
148 /* add chain to head */
149 set_page_private(chain_last, (unsigned long)*head);
150 *head = chain_first;
151}
152
Andreas Gruenbacher18c2d522011-04-07 21:08:50 +0200153static struct page *__drbd_alloc_pages(struct drbd_conf *mdev,
154 unsigned int number)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700155{
156 struct page *page = NULL;
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200157 struct page *tmp = NULL;
Andreas Gruenbacher18c2d522011-04-07 21:08:50 +0200158 unsigned int i = 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700159
160 /* Yes, testing drbd_pp_vacant outside the lock is racy.
161 * So what. It saves a spin_lock. */
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200162 if (drbd_pp_vacant >= number) {
Philipp Reisnerb411b362009-09-25 16:07:19 -0700163 spin_lock(&drbd_pp_lock);
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200164 page = page_chain_del(&drbd_pp_pool, number);
165 if (page)
166 drbd_pp_vacant -= number;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700167 spin_unlock(&drbd_pp_lock);
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200168 if (page)
169 return page;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700170 }
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200171
Philipp Reisnerb411b362009-09-25 16:07:19 -0700172 /* GFP_TRY, because we must not cause arbitrary write-out: in a DRBD
173 * "criss-cross" setup, that might cause write-out on some other DRBD,
174 * which in turn might block on the other node at this very place. */
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200175 for (i = 0; i < number; i++) {
176 tmp = alloc_page(GFP_TRY);
177 if (!tmp)
178 break;
179 set_page_private(tmp, (unsigned long)page);
180 page = tmp;
181 }
182
183 if (i == number)
184 return page;
185
186 /* Not enough pages immediately available this time.
Andreas Gruenbacherc37c8ec2011-04-07 21:02:09 +0200187 * No need to jump around here, drbd_alloc_pages will retry this
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200188 * function "soon". */
189 if (page) {
190 tmp = page_chain_tail(page, NULL);
191 spin_lock(&drbd_pp_lock);
192 page_chain_add(&drbd_pp_pool, page, tmp);
193 drbd_pp_vacant += i;
194 spin_unlock(&drbd_pp_lock);
195 }
196 return NULL;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700197}
198
Andreas Gruenbachera990be42011-04-06 17:56:48 +0200199static void reclaim_finished_net_peer_reqs(struct drbd_conf *mdev,
200 struct list_head *to_be_freed)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700201{
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +0100202 struct drbd_peer_request *peer_req;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700203 struct list_head *le, *tle;
204
205 /* The EEs are always appended to the end of the list. Since
206 they are sent in order over the wire, they have to finish
207 in order. As soon as we see the first not finished we can
208 stop to examine the list... */
209
210 list_for_each_safe(le, tle, &mdev->net_ee) {
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +0100211 peer_req = list_entry(le, struct drbd_peer_request, w.list);
Andreas Gruenbacher045417f2011-04-07 21:34:24 +0200212 if (drbd_peer_req_has_active_page(peer_req))
Philipp Reisnerb411b362009-09-25 16:07:19 -0700213 break;
214 list_move(le, to_be_freed);
215 }
216}
217
218static void drbd_kick_lo_and_reclaim_net(struct drbd_conf *mdev)
219{
220 LIST_HEAD(reclaimed);
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +0100221 struct drbd_peer_request *peer_req, *t;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700222
Philipp Reisner87eeee42011-01-19 14:16:30 +0100223 spin_lock_irq(&mdev->tconn->req_lock);
Andreas Gruenbachera990be42011-04-06 17:56:48 +0200224 reclaim_finished_net_peer_reqs(mdev, &reclaimed);
Philipp Reisner87eeee42011-01-19 14:16:30 +0100225 spin_unlock_irq(&mdev->tconn->req_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700226
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +0100227 list_for_each_entry_safe(peer_req, t, &reclaimed, w.list)
Andreas Gruenbacher3967deb2011-04-06 16:16:56 +0200228 drbd_free_net_peer_req(mdev, peer_req);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700229}
230
231/**
Andreas Gruenbacherc37c8ec2011-04-07 21:02:09 +0200232 * drbd_alloc_pages() - Returns @number pages, retries forever (or until signalled)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700233 * @mdev: DRBD device.
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200234 * @number: number of pages requested
235 * @retry: whether to retry, if not enough pages are available right now
Philipp Reisnerb411b362009-09-25 16:07:19 -0700236 *
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200237 * Tries to allocate number pages, first from our own page pool, then from
238 * the kernel, unless this allocation would exceed the max_buffers setting.
239 * Possibly retry until DRBD frees sufficient pages somewhere else.
240 *
241 * Returns a page chain linked via page->private.
Philipp Reisnerb411b362009-09-25 16:07:19 -0700242 */
Andreas Gruenbacherc37c8ec2011-04-07 21:02:09 +0200243struct page *drbd_alloc_pages(struct drbd_conf *mdev, unsigned int number,
244 bool retry)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700245{
246 struct page *page = NULL;
Philipp Reisner44ed1672011-04-19 17:10:19 +0200247 struct net_conf *nc;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700248 DEFINE_WAIT(wait);
Philipp Reisner44ed1672011-04-19 17:10:19 +0200249 int mxb;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700250
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200251 /* Yes, we may run up to @number over max_buffers. If we
252 * follow it strictly, the admin will get it wrong anyways. */
Philipp Reisner44ed1672011-04-19 17:10:19 +0200253 rcu_read_lock();
254 nc = rcu_dereference(mdev->tconn->net_conf);
255 mxb = nc ? nc->max_buffers : 1000000;
256 rcu_read_unlock();
257
258 if (atomic_read(&mdev->pp_in_use) < mxb)
Andreas Gruenbacher18c2d522011-04-07 21:08:50 +0200259 page = __drbd_alloc_pages(mdev, number);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700260
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200261 while (page == NULL) {
Philipp Reisnerb411b362009-09-25 16:07:19 -0700262 prepare_to_wait(&drbd_pp_wait, &wait, TASK_INTERRUPTIBLE);
263
264 drbd_kick_lo_and_reclaim_net(mdev);
265
Philipp Reisner44ed1672011-04-19 17:10:19 +0200266 if (atomic_read(&mdev->pp_in_use) < mxb) {
Andreas Gruenbacher18c2d522011-04-07 21:08:50 +0200267 page = __drbd_alloc_pages(mdev, number);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700268 if (page)
269 break;
270 }
271
272 if (!retry)
273 break;
274
275 if (signal_pending(current)) {
Andreas Gruenbacherc37c8ec2011-04-07 21:02:09 +0200276 dev_warn(DEV, "drbd_alloc_pages interrupted!\n");
Philipp Reisnerb411b362009-09-25 16:07:19 -0700277 break;
278 }
279
280 schedule();
281 }
282 finish_wait(&drbd_pp_wait, &wait);
283
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200284 if (page)
285 atomic_add(number, &mdev->pp_in_use);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700286 return page;
287}
288
Andreas Gruenbacherc37c8ec2011-04-07 21:02:09 +0200289/* Must not be used from irq, as that may deadlock: see drbd_alloc_pages.
Philipp Reisner87eeee42011-01-19 14:16:30 +0100290 * Is also used from inside an other spin_lock_irq(&mdev->tconn->req_lock);
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200291 * Either links the page chain back to the global pool,
292 * or returns all pages to the system. */
Andreas Gruenbacher5cc287e2011-04-07 21:02:59 +0200293static void drbd_free_pages(struct drbd_conf *mdev, struct page *page, int is_net)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700294{
Lars Ellenberg435f0742010-09-06 12:30:25 +0200295 atomic_t *a = is_net ? &mdev->pp_in_use_by_net : &mdev->pp_in_use;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700296 int i;
Lars Ellenberg435f0742010-09-06 12:30:25 +0200297
Lars Ellenberga73ff322012-06-25 19:15:38 +0200298 if (page == NULL)
299 return;
300
Philipp Reisner81a5d602011-02-22 19:53:16 -0500301 if (drbd_pp_vacant > (DRBD_MAX_BIO_SIZE/PAGE_SIZE) * minor_count)
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200302 i = page_chain_free(page);
303 else {
304 struct page *tmp;
305 tmp = page_chain_tail(page, &i);
306 spin_lock(&drbd_pp_lock);
307 page_chain_add(&drbd_pp_pool, page, tmp);
308 drbd_pp_vacant += i;
309 spin_unlock(&drbd_pp_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700310 }
Lars Ellenberg435f0742010-09-06 12:30:25 +0200311 i = atomic_sub_return(i, a);
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200312 if (i < 0)
Lars Ellenberg435f0742010-09-06 12:30:25 +0200313 dev_warn(DEV, "ASSERTION FAILED: %s: %d < 0\n",
314 is_net ? "pp_in_use_by_net" : "pp_in_use", i);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700315 wake_up(&drbd_pp_wait);
316}
317
318/*
319You need to hold the req_lock:
320 _drbd_wait_ee_list_empty()
321
322You must not have the req_lock:
Andreas Gruenbacher3967deb2011-04-06 16:16:56 +0200323 drbd_free_peer_req()
Andreas Gruenbacher0db55362011-04-06 16:09:15 +0200324 drbd_alloc_peer_req()
Andreas Gruenbacher7721f562011-04-06 17:14:02 +0200325 drbd_free_peer_reqs()
Philipp Reisnerb411b362009-09-25 16:07:19 -0700326 drbd_ee_fix_bhs()
Andreas Gruenbachera990be42011-04-06 17:56:48 +0200327 drbd_finish_peer_reqs()
Philipp Reisnerb411b362009-09-25 16:07:19 -0700328 drbd_clear_done_ee()
329 drbd_wait_ee_list_empty()
330*/
331
Andreas Gruenbacherf6ffca92011-02-04 15:30:34 +0100332struct drbd_peer_request *
Andreas Gruenbacher0db55362011-04-06 16:09:15 +0200333drbd_alloc_peer_req(struct drbd_conf *mdev, u64 id, sector_t sector,
334 unsigned int data_size, gfp_t gfp_mask) __must_hold(local)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700335{
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +0100336 struct drbd_peer_request *peer_req;
Lars Ellenberga73ff322012-06-25 19:15:38 +0200337 struct page *page = NULL;
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200338 unsigned nr_pages = (data_size + PAGE_SIZE -1) >> PAGE_SHIFT;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700339
Andreas Gruenbacher0cf9d272010-12-07 10:43:29 +0100340 if (drbd_insert_fault(mdev, DRBD_FAULT_AL_EE))
Philipp Reisnerb411b362009-09-25 16:07:19 -0700341 return NULL;
342
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +0100343 peer_req = mempool_alloc(drbd_ee_mempool, gfp_mask & ~__GFP_HIGHMEM);
344 if (!peer_req) {
Philipp Reisnerb411b362009-09-25 16:07:19 -0700345 if (!(gfp_mask & __GFP_NOWARN))
Andreas Gruenbacher0db55362011-04-06 16:09:15 +0200346 dev_err(DEV, "%s: allocation failed\n", __func__);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700347 return NULL;
348 }
349
Lars Ellenberga73ff322012-06-25 19:15:38 +0200350 if (data_size) {
Lars Ellenberg81a35372012-07-30 09:00:54 +0200351 page = drbd_alloc_pages(mdev, nr_pages, (gfp_mask & __GFP_WAIT));
Lars Ellenberga73ff322012-06-25 19:15:38 +0200352 if (!page)
353 goto fail;
354 }
Philipp Reisnerb411b362009-09-25 16:07:19 -0700355
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +0100356 drbd_clear_interval(&peer_req->i);
357 peer_req->i.size = data_size;
358 peer_req->i.sector = sector;
359 peer_req->i.local = false;
360 peer_req->i.waiting = false;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700361
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +0100362 peer_req->epoch = NULL;
Philipp Reisnera21e9292011-02-08 15:08:49 +0100363 peer_req->w.mdev = mdev;
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +0100364 peer_req->pages = page;
365 atomic_set(&peer_req->pending_bios, 0);
366 peer_req->flags = 0;
Andreas Gruenbacher9a8e7752011-01-11 14:04:09 +0100367 /*
368 * The block_id is opaque to the receiver. It is not endianness
369 * converted, and sent back to the sender unchanged.
370 */
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +0100371 peer_req->block_id = id;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700372
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +0100373 return peer_req;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700374
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200375 fail:
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +0100376 mempool_free(peer_req, drbd_ee_mempool);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700377 return NULL;
378}
379
Andreas Gruenbacher3967deb2011-04-06 16:16:56 +0200380void __drbd_free_peer_req(struct drbd_conf *mdev, struct drbd_peer_request *peer_req,
Andreas Gruenbacherf6ffca92011-02-04 15:30:34 +0100381 int is_net)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700382{
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +0100383 if (peer_req->flags & EE_HAS_DIGEST)
384 kfree(peer_req->digest);
Andreas Gruenbacher5cc287e2011-04-07 21:02:59 +0200385 drbd_free_pages(mdev, peer_req->pages, is_net);
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +0100386 D_ASSERT(atomic_read(&peer_req->pending_bios) == 0);
387 D_ASSERT(drbd_interval_empty(&peer_req->i));
388 mempool_free(peer_req, drbd_ee_mempool);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700389}
390
Andreas Gruenbacher7721f562011-04-06 17:14:02 +0200391int drbd_free_peer_reqs(struct drbd_conf *mdev, struct list_head *list)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700392{
393 LIST_HEAD(work_list);
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +0100394 struct drbd_peer_request *peer_req, *t;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700395 int count = 0;
Lars Ellenberg435f0742010-09-06 12:30:25 +0200396 int is_net = list == &mdev->net_ee;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700397
Philipp Reisner87eeee42011-01-19 14:16:30 +0100398 spin_lock_irq(&mdev->tconn->req_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700399 list_splice_init(list, &work_list);
Philipp Reisner87eeee42011-01-19 14:16:30 +0100400 spin_unlock_irq(&mdev->tconn->req_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700401
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +0100402 list_for_each_entry_safe(peer_req, t, &work_list, w.list) {
Andreas Gruenbacher3967deb2011-04-06 16:16:56 +0200403 __drbd_free_peer_req(mdev, peer_req, is_net);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700404 count++;
405 }
406 return count;
407}
408
Philipp Reisnerb411b362009-09-25 16:07:19 -0700409/*
Andreas Gruenbachera990be42011-04-06 17:56:48 +0200410 * See also comments in _req_mod(,BARRIER_ACKED) and receive_Barrier.
Philipp Reisnerb411b362009-09-25 16:07:19 -0700411 */
Andreas Gruenbachera990be42011-04-06 17:56:48 +0200412static int drbd_finish_peer_reqs(struct drbd_conf *mdev)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700413{
414 LIST_HEAD(work_list);
415 LIST_HEAD(reclaimed);
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +0100416 struct drbd_peer_request *peer_req, *t;
Andreas Gruenbachere2b30322011-03-16 17:16:12 +0100417 int err = 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700418
Philipp Reisner87eeee42011-01-19 14:16:30 +0100419 spin_lock_irq(&mdev->tconn->req_lock);
Andreas Gruenbachera990be42011-04-06 17:56:48 +0200420 reclaim_finished_net_peer_reqs(mdev, &reclaimed);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700421 list_splice_init(&mdev->done_ee, &work_list);
Philipp Reisner87eeee42011-01-19 14:16:30 +0100422 spin_unlock_irq(&mdev->tconn->req_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700423
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +0100424 list_for_each_entry_safe(peer_req, t, &reclaimed, w.list)
Andreas Gruenbacher3967deb2011-04-06 16:16:56 +0200425 drbd_free_net_peer_req(mdev, peer_req);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700426
427 /* possible callbacks here:
Lars Ellenbergd4dabbe2012-08-01 12:33:51 +0200428 * e_end_block, and e_end_resync_block, e_send_superseded.
Philipp Reisnerb411b362009-09-25 16:07:19 -0700429 * all ignore the last argument.
430 */
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +0100431 list_for_each_entry_safe(peer_req, t, &work_list, w.list) {
Andreas Gruenbachere2b30322011-03-16 17:16:12 +0100432 int err2;
433
Philipp Reisnerb411b362009-09-25 16:07:19 -0700434 /* list_del not necessary, next/prev members not touched */
Andreas Gruenbachere2b30322011-03-16 17:16:12 +0100435 err2 = peer_req->w.cb(&peer_req->w, !!err);
436 if (!err)
437 err = err2;
Andreas Gruenbacher3967deb2011-04-06 16:16:56 +0200438 drbd_free_peer_req(mdev, peer_req);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700439 }
440 wake_up(&mdev->ee_wait);
441
Andreas Gruenbachere2b30322011-03-16 17:16:12 +0100442 return err;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700443}
444
Andreas Gruenbacherd4da1532011-04-07 00:06:56 +0200445static void _drbd_wait_ee_list_empty(struct drbd_conf *mdev,
446 struct list_head *head)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700447{
448 DEFINE_WAIT(wait);
449
450 /* avoids spin_lock/unlock
451 * and calling prepare_to_wait in the fast path */
452 while (!list_empty(head)) {
453 prepare_to_wait(&mdev->ee_wait, &wait, TASK_UNINTERRUPTIBLE);
Philipp Reisner87eeee42011-01-19 14:16:30 +0100454 spin_unlock_irq(&mdev->tconn->req_lock);
Jens Axboe7eaceac2011-03-10 08:52:07 +0100455 io_schedule();
Philipp Reisnerb411b362009-09-25 16:07:19 -0700456 finish_wait(&mdev->ee_wait, &wait);
Philipp Reisner87eeee42011-01-19 14:16:30 +0100457 spin_lock_irq(&mdev->tconn->req_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700458 }
459}
460
Andreas Gruenbacherd4da1532011-04-07 00:06:56 +0200461static void drbd_wait_ee_list_empty(struct drbd_conf *mdev,
462 struct list_head *head)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700463{
Philipp Reisner87eeee42011-01-19 14:16:30 +0100464 spin_lock_irq(&mdev->tconn->req_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700465 _drbd_wait_ee_list_empty(mdev, head);
Philipp Reisner87eeee42011-01-19 14:16:30 +0100466 spin_unlock_irq(&mdev->tconn->req_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700467}
468
Philipp Reisnerdbd9eea2011-02-07 15:34:16 +0100469static int drbd_recv_short(struct socket *sock, void *buf, size_t size, int flags)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700470{
471 mm_segment_t oldfs;
472 struct kvec iov = {
473 .iov_base = buf,
474 .iov_len = size,
475 };
476 struct msghdr msg = {
477 .msg_iovlen = 1,
478 .msg_iov = (struct iovec *)&iov,
479 .msg_flags = (flags ? flags : MSG_WAITALL | MSG_NOSIGNAL)
480 };
481 int rv;
482
483 oldfs = get_fs();
484 set_fs(KERNEL_DS);
485 rv = sock_recvmsg(sock, &msg, size, msg.msg_flags);
486 set_fs(oldfs);
487
488 return rv;
489}
490
Philipp Reisnerde0ff332011-02-07 16:56:20 +0100491static int drbd_recv(struct drbd_tconn *tconn, void *buf, size_t size)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700492{
Philipp Reisnerb411b362009-09-25 16:07:19 -0700493 int rv;
494
Philipp Reisner1393b592012-09-03 14:04:23 +0200495 rv = drbd_recv_short(tconn->data.socket, buf, size, 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700496
Philipp Reisnerdbd08202012-08-17 16:55:47 +0200497 if (rv < 0) {
498 if (rv == -ECONNRESET)
Philipp Reisner155522d2012-08-08 21:19:09 +0200499 conn_info(tconn, "sock was reset by peer\n");
Philipp Reisnerdbd08202012-08-17 16:55:47 +0200500 else if (rv != -ERESTARTSYS)
Philipp Reisner155522d2012-08-08 21:19:09 +0200501 conn_err(tconn, "sock_recvmsg returned %d\n", rv);
Philipp Reisnerdbd08202012-08-17 16:55:47 +0200502 } else if (rv == 0) {
Philipp Reisnerb66623e2012-08-08 21:19:09 +0200503 if (test_bit(DISCONNECT_SENT, &tconn->flags)) {
504 long t;
505 rcu_read_lock();
506 t = rcu_dereference(tconn->net_conf)->ping_timeo * HZ/10;
507 rcu_read_unlock();
508
509 t = wait_event_timeout(tconn->ping_wait, tconn->cstate < C_WF_REPORT_PARAMS, t);
510
Philipp Reisner599377a2012-08-17 14:50:22 +0200511 if (t)
512 goto out;
513 }
Philipp Reisnerb66623e2012-08-08 21:19:09 +0200514 conn_info(tconn, "sock was shut down by peer\n");
Philipp Reisner599377a2012-08-17 14:50:22 +0200515 }
516
Philipp Reisnerb411b362009-09-25 16:07:19 -0700517 if (rv != size)
Philipp Reisnerbbeb6412011-02-10 13:45:46 +0100518 conn_request_state(tconn, NS(conn, C_BROKEN_PIPE), CS_HARD);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700519
Philipp Reisner599377a2012-08-17 14:50:22 +0200520out:
Philipp Reisnerb411b362009-09-25 16:07:19 -0700521 return rv;
522}
523
Andreas Gruenbacherc6967742011-03-17 17:15:20 +0100524static int drbd_recv_all(struct drbd_tconn *tconn, void *buf, size_t size)
525{
526 int err;
527
528 err = drbd_recv(tconn, buf, size);
529 if (err != size) {
530 if (err >= 0)
531 err = -EIO;
532 } else
533 err = 0;
534 return err;
535}
536
Andreas Gruenbachera5c31902011-03-24 03:28:04 +0100537static int drbd_recv_all_warn(struct drbd_tconn *tconn, void *buf, size_t size)
538{
539 int err;
540
541 err = drbd_recv_all(tconn, buf, size);
542 if (err && !signal_pending(current))
543 conn_warn(tconn, "short read (expected size %d)\n", (int)size);
544 return err;
545}
546
Lars Ellenberg5dbf1672010-05-25 16:18:01 +0200547/* quoting tcp(7):
548 * On individual connections, the socket buffer size must be set prior to the
549 * listen(2) or connect(2) calls in order to have it take effect.
550 * This is our wrapper to do so.
551 */
552static void drbd_setbufsize(struct socket *sock, unsigned int snd,
553 unsigned int rcv)
554{
555 /* open coded SO_SNDBUF, SO_RCVBUF */
556 if (snd) {
557 sock->sk->sk_sndbuf = snd;
558 sock->sk->sk_userlocks |= SOCK_SNDBUF_LOCK;
559 }
560 if (rcv) {
561 sock->sk->sk_rcvbuf = rcv;
562 sock->sk->sk_userlocks |= SOCK_RCVBUF_LOCK;
563 }
564}
565
Philipp Reisnereac3e992011-02-07 14:05:07 +0100566static struct socket *drbd_try_connect(struct drbd_tconn *tconn)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700567{
568 const char *what;
569 struct socket *sock;
570 struct sockaddr_in6 src_in6;
Philipp Reisner44ed1672011-04-19 17:10:19 +0200571 struct sockaddr_in6 peer_in6;
572 struct net_conf *nc;
573 int err, peer_addr_len, my_addr_len;
Andreas Gruenbacher69ef82d2011-05-11 14:34:35 +0200574 int sndbuf_size, rcvbuf_size, connect_int;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700575 int disconnect_on_error = 1;
576
Philipp Reisner44ed1672011-04-19 17:10:19 +0200577 rcu_read_lock();
578 nc = rcu_dereference(tconn->net_conf);
579 if (!nc) {
580 rcu_read_unlock();
Philipp Reisnerb411b362009-09-25 16:07:19 -0700581 return NULL;
Philipp Reisner44ed1672011-04-19 17:10:19 +0200582 }
Philipp Reisner44ed1672011-04-19 17:10:19 +0200583 sndbuf_size = nc->sndbuf_size;
584 rcvbuf_size = nc->rcvbuf_size;
Andreas Gruenbacher69ef82d2011-05-11 14:34:35 +0200585 connect_int = nc->connect_int;
Andreas Gruenbacher089c0752011-06-14 18:28:09 +0200586 rcu_read_unlock();
Philipp Reisner44ed1672011-04-19 17:10:19 +0200587
Andreas Gruenbacher089c0752011-06-14 18:28:09 +0200588 my_addr_len = min_t(int, tconn->my_addr_len, sizeof(src_in6));
589 memcpy(&src_in6, &tconn->my_addr, my_addr_len);
Philipp Reisner44ed1672011-04-19 17:10:19 +0200590
Andreas Gruenbacher089c0752011-06-14 18:28:09 +0200591 if (((struct sockaddr *)&tconn->my_addr)->sa_family == AF_INET6)
Philipp Reisner44ed1672011-04-19 17:10:19 +0200592 src_in6.sin6_port = 0;
593 else
594 ((struct sockaddr_in *)&src_in6)->sin_port = 0; /* AF_INET & AF_SCI */
595
Andreas Gruenbacher089c0752011-06-14 18:28:09 +0200596 peer_addr_len = min_t(int, tconn->peer_addr_len, sizeof(src_in6));
597 memcpy(&peer_in6, &tconn->peer_addr, peer_addr_len);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700598
599 what = "sock_create_kern";
Philipp Reisner44ed1672011-04-19 17:10:19 +0200600 err = sock_create_kern(((struct sockaddr *)&src_in6)->sa_family,
601 SOCK_STREAM, IPPROTO_TCP, &sock);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700602 if (err < 0) {
603 sock = NULL;
604 goto out;
605 }
606
607 sock->sk->sk_rcvtimeo =
Andreas Gruenbacher69ef82d2011-05-11 14:34:35 +0200608 sock->sk->sk_sndtimeo = connect_int * HZ;
Philipp Reisner44ed1672011-04-19 17:10:19 +0200609 drbd_setbufsize(sock, sndbuf_size, rcvbuf_size);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700610
611 /* explicitly bind to the configured IP as source IP
612 * for the outgoing connections.
613 * This is needed for multihomed hosts and to be
614 * able to use lo: interfaces for drbd.
615 * Make sure to use 0 as port number, so linux selects
616 * a free one dynamically.
617 */
Philipp Reisnerb411b362009-09-25 16:07:19 -0700618 what = "bind before connect";
Philipp Reisner44ed1672011-04-19 17:10:19 +0200619 err = sock->ops->bind(sock, (struct sockaddr *) &src_in6, my_addr_len);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700620 if (err < 0)
621 goto out;
622
623 /* connect may fail, peer not yet available.
624 * stay C_WF_CONNECTION, don't go Disconnecting! */
625 disconnect_on_error = 0;
626 what = "connect";
Philipp Reisner44ed1672011-04-19 17:10:19 +0200627 err = sock->ops->connect(sock, (struct sockaddr *) &peer_in6, peer_addr_len, 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700628
629out:
630 if (err < 0) {
631 if (sock) {
632 sock_release(sock);
633 sock = NULL;
634 }
635 switch (-err) {
636 /* timeout, busy, signal pending */
637 case ETIMEDOUT: case EAGAIN: case EINPROGRESS:
638 case EINTR: case ERESTARTSYS:
639 /* peer not (yet) available, network problem */
640 case ECONNREFUSED: case ENETUNREACH:
641 case EHOSTDOWN: case EHOSTUNREACH:
642 disconnect_on_error = 0;
643 break;
644 default:
Philipp Reisnereac3e992011-02-07 14:05:07 +0100645 conn_err(tconn, "%s failed, err = %d\n", what, err);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700646 }
647 if (disconnect_on_error)
Philipp Reisnerbbeb6412011-02-10 13:45:46 +0100648 conn_request_state(tconn, NS(conn, C_DISCONNECTING), CS_HARD);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700649 }
Philipp Reisner44ed1672011-04-19 17:10:19 +0200650
Philipp Reisnerb411b362009-09-25 16:07:19 -0700651 return sock;
652}
653
Philipp Reisner7a426fd2012-07-12 14:22:37 +0200654struct accept_wait_data {
655 struct drbd_tconn *tconn;
656 struct socket *s_listen;
657 struct completion door_bell;
658 void (*original_sk_state_change)(struct sock *sk);
659
660};
661
Andreas Gruenbacher715306f2012-08-10 17:00:30 +0200662static void drbd_incoming_connection(struct sock *sk)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700663{
Philipp Reisner7a426fd2012-07-12 14:22:37 +0200664 struct accept_wait_data *ad = sk->sk_user_data;
Andreas Gruenbacher715306f2012-08-10 17:00:30 +0200665 void (*state_change)(struct sock *sk);
Philipp Reisner7a426fd2012-07-12 14:22:37 +0200666
Andreas Gruenbacher715306f2012-08-10 17:00:30 +0200667 state_change = ad->original_sk_state_change;
668 if (sk->sk_state == TCP_ESTABLISHED)
669 complete(&ad->door_bell);
670 state_change(sk);
Philipp Reisner7a426fd2012-07-12 14:22:37 +0200671}
672
673static int prepare_listen_socket(struct drbd_tconn *tconn, struct accept_wait_data *ad)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700674{
Philipp Reisner1f3e5092012-07-12 11:08:34 +0200675 int err, sndbuf_size, rcvbuf_size, my_addr_len;
Philipp Reisner44ed1672011-04-19 17:10:19 +0200676 struct sockaddr_in6 my_addr;
Philipp Reisner1f3e5092012-07-12 11:08:34 +0200677 struct socket *s_listen;
Philipp Reisner44ed1672011-04-19 17:10:19 +0200678 struct net_conf *nc;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700679 const char *what;
680
Philipp Reisner44ed1672011-04-19 17:10:19 +0200681 rcu_read_lock();
682 nc = rcu_dereference(tconn->net_conf);
683 if (!nc) {
684 rcu_read_unlock();
Philipp Reisner7a426fd2012-07-12 14:22:37 +0200685 return -EIO;
Philipp Reisner44ed1672011-04-19 17:10:19 +0200686 }
Philipp Reisner44ed1672011-04-19 17:10:19 +0200687 sndbuf_size = nc->sndbuf_size;
688 rcvbuf_size = nc->rcvbuf_size;
Philipp Reisner44ed1672011-04-19 17:10:19 +0200689 rcu_read_unlock();
Philipp Reisnerb411b362009-09-25 16:07:19 -0700690
Andreas Gruenbacher089c0752011-06-14 18:28:09 +0200691 my_addr_len = min_t(int, tconn->my_addr_len, sizeof(struct sockaddr_in6));
692 memcpy(&my_addr, &tconn->my_addr, my_addr_len);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700693
694 what = "sock_create_kern";
Philipp Reisner44ed1672011-04-19 17:10:19 +0200695 err = sock_create_kern(((struct sockaddr *)&my_addr)->sa_family,
Philipp Reisner1f3e5092012-07-12 11:08:34 +0200696 SOCK_STREAM, IPPROTO_TCP, &s_listen);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700697 if (err) {
698 s_listen = NULL;
699 goto out;
700 }
701
Philipp Reisner98683652012-11-09 14:18:43 +0100702 s_listen->sk->sk_reuse = SK_CAN_REUSE; /* SO_REUSEADDR */
Philipp Reisner44ed1672011-04-19 17:10:19 +0200703 drbd_setbufsize(s_listen, sndbuf_size, rcvbuf_size);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700704
705 what = "bind before listen";
Philipp Reisner44ed1672011-04-19 17:10:19 +0200706 err = s_listen->ops->bind(s_listen, (struct sockaddr *)&my_addr, my_addr_len);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700707 if (err < 0)
708 goto out;
709
Philipp Reisner7a426fd2012-07-12 14:22:37 +0200710 ad->s_listen = s_listen;
711 write_lock_bh(&s_listen->sk->sk_callback_lock);
712 ad->original_sk_state_change = s_listen->sk->sk_state_change;
Andreas Gruenbacher715306f2012-08-10 17:00:30 +0200713 s_listen->sk->sk_state_change = drbd_incoming_connection;
Philipp Reisner7a426fd2012-07-12 14:22:37 +0200714 s_listen->sk->sk_user_data = ad;
715 write_unlock_bh(&s_listen->sk->sk_callback_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700716
Philipp Reisner2820fd32012-07-12 10:22:48 +0200717 what = "listen";
718 err = s_listen->ops->listen(s_listen, 5);
719 if (err < 0)
720 goto out;
721
Philipp Reisner7a426fd2012-07-12 14:22:37 +0200722 return 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700723out:
724 if (s_listen)
725 sock_release(s_listen);
726 if (err < 0) {
727 if (err != -EAGAIN && err != -EINTR && err != -ERESTARTSYS) {
Philipp Reisner1f3e5092012-07-12 11:08:34 +0200728 conn_err(tconn, "%s failed, err = %d\n", what, err);
729 conn_request_state(tconn, NS(conn, C_DISCONNECTING), CS_HARD);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700730 }
731 }
Philipp Reisner1f3e5092012-07-12 11:08:34 +0200732
Philipp Reisner7a426fd2012-07-12 14:22:37 +0200733 return -EIO;
Philipp Reisner1f3e5092012-07-12 11:08:34 +0200734}
735
Andreas Gruenbacher715306f2012-08-10 17:00:30 +0200736static void unregister_state_change(struct sock *sk, struct accept_wait_data *ad)
737{
738 write_lock_bh(&sk->sk_callback_lock);
739 sk->sk_state_change = ad->original_sk_state_change;
740 sk->sk_user_data = NULL;
741 write_unlock_bh(&sk->sk_callback_lock);
742}
743
Philipp Reisner7a426fd2012-07-12 14:22:37 +0200744static struct socket *drbd_wait_for_connect(struct drbd_tconn *tconn, struct accept_wait_data *ad)
Philipp Reisner1f3e5092012-07-12 11:08:34 +0200745{
746 int timeo, connect_int, err = 0;
747 struct socket *s_estab = NULL;
Philipp Reisner1f3e5092012-07-12 11:08:34 +0200748 struct net_conf *nc;
749
750 rcu_read_lock();
751 nc = rcu_dereference(tconn->net_conf);
752 if (!nc) {
753 rcu_read_unlock();
754 return NULL;
755 }
756 connect_int = nc->connect_int;
757 rcu_read_unlock();
758
759 timeo = connect_int * HZ;
760 timeo += (random32() & 1) ? timeo / 7 : -timeo / 7; /* 28.5% random jitter */
761
Philipp Reisner7a426fd2012-07-12 14:22:37 +0200762 err = wait_for_completion_interruptible_timeout(&ad->door_bell, timeo);
763 if (err <= 0)
764 return NULL;
Philipp Reisner1f3e5092012-07-12 11:08:34 +0200765
Philipp Reisner7a426fd2012-07-12 14:22:37 +0200766 err = kernel_accept(ad->s_listen, &s_estab, 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700767 if (err < 0) {
768 if (err != -EAGAIN && err != -EINTR && err != -ERESTARTSYS) {
Philipp Reisner1f3e5092012-07-12 11:08:34 +0200769 conn_err(tconn, "accept failed, err = %d\n", err);
Philipp Reisnerbbeb6412011-02-10 13:45:46 +0100770 conn_request_state(tconn, NS(conn, C_DISCONNECTING), CS_HARD);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700771 }
772 }
Philipp Reisnerb411b362009-09-25 16:07:19 -0700773
Andreas Gruenbacher715306f2012-08-10 17:00:30 +0200774 if (s_estab)
775 unregister_state_change(s_estab->sk, ad);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700776
777 return s_estab;
778}
779
Andreas Gruenbachere6589832011-03-30 12:54:42 +0200780static int decode_header(struct drbd_tconn *, void *, struct packet_info *);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700781
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +0200782static int send_first_packet(struct drbd_tconn *tconn, struct drbd_socket *sock,
783 enum drbd_packet cmd)
784{
785 if (!conn_prepare_command(tconn, sock))
786 return -EIO;
Andreas Gruenbachere6589832011-03-30 12:54:42 +0200787 return conn_send_command(tconn, sock, cmd, 0, NULL, 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700788}
789
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +0200790static int receive_first_packet(struct drbd_tconn *tconn, struct socket *sock)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700791{
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +0200792 unsigned int header_size = drbd_header_size(tconn);
793 struct packet_info pi;
794 int err;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700795
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +0200796 err = drbd_recv_short(sock, tconn->data.rbuf, header_size, 0);
797 if (err != header_size) {
798 if (err >= 0)
799 err = -EIO;
800 return err;
801 }
802 err = decode_header(tconn, tconn->data.rbuf, &pi);
803 if (err)
804 return err;
805 return pi.cmd;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700806}
807
808/**
809 * drbd_socket_okay() - Free the socket if its connection is not okay
Philipp Reisnerb411b362009-09-25 16:07:19 -0700810 * @sock: pointer to the pointer to the socket.
811 */
Philipp Reisnerdbd9eea2011-02-07 15:34:16 +0100812static int drbd_socket_okay(struct socket **sock)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700813{
814 int rr;
815 char tb[4];
816
817 if (!*sock)
Andreas Gruenbacher81e84652010-12-09 15:03:57 +0100818 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700819
Philipp Reisnerdbd9eea2011-02-07 15:34:16 +0100820 rr = drbd_recv_short(*sock, tb, 4, MSG_DONTWAIT | MSG_PEEK);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700821
822 if (rr > 0 || rr == -EAGAIN) {
Andreas Gruenbacher81e84652010-12-09 15:03:57 +0100823 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700824 } else {
825 sock_release(*sock);
826 *sock = NULL;
Andreas Gruenbacher81e84652010-12-09 15:03:57 +0100827 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700828 }
829}
Philipp Reisner2325eb62011-03-15 16:56:18 +0100830/* Gets called if a connection is established, or if a new minor gets created
831 in a connection */
Philipp Reisnerc141ebd2011-05-05 16:13:10 +0200832int drbd_connected(struct drbd_conf *mdev)
Philipp Reisner907599e2011-02-08 11:25:37 +0100833{
Andreas Gruenbacher0829f5e2011-03-24 14:31:22 +0100834 int err;
Philipp Reisner907599e2011-02-08 11:25:37 +0100835
836 atomic_set(&mdev->packet_seq, 0);
837 mdev->peer_seq = 0;
838
Philipp Reisner8410da82011-02-11 20:11:10 +0100839 mdev->state_mutex = mdev->tconn->agreed_pro_version < 100 ?
840 &mdev->tconn->cstate_mutex :
841 &mdev->own_state_mutex;
842
Andreas Gruenbacher0829f5e2011-03-24 14:31:22 +0100843 err = drbd_send_sync_param(mdev);
844 if (!err)
845 err = drbd_send_sizes(mdev, 0, 0);
846 if (!err)
847 err = drbd_send_uuids(mdev);
848 if (!err)
Philipp Reisner43de7c82011-11-10 13:16:13 +0100849 err = drbd_send_current_state(mdev);
Philipp Reisner907599e2011-02-08 11:25:37 +0100850 clear_bit(USE_DEGR_WFC_T, &mdev->flags);
851 clear_bit(RESIZE_PENDING, &mdev->flags);
Philipp Reisner2d56a972013-03-27 14:08:34 +0100852 atomic_set(&mdev->ap_in_flight, 0);
Philipp Reisner8b924f12011-03-01 11:08:28 +0100853 mod_timer(&mdev->request_timer, jiffies + HZ); /* just start it here. */
Andreas Gruenbacher0829f5e2011-03-24 14:31:22 +0100854 return err;
Philipp Reisner907599e2011-02-08 11:25:37 +0100855}
Philipp Reisnerb411b362009-09-25 16:07:19 -0700856
857/*
858 * return values:
859 * 1 yes, we have a valid connection
860 * 0 oops, did not work out, please try again
861 * -1 peer talks different language,
862 * no point in trying again, please go standalone.
863 * -2 We do not have a network config...
864 */
Philipp Reisner81fa2e62011-05-04 15:10:30 +0200865static int conn_connect(struct drbd_tconn *tconn)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700866{
Philipp Reisner7da35862011-12-19 22:42:56 +0100867 struct drbd_socket sock, msock;
Philipp Reisnerc141ebd2011-05-05 16:13:10 +0200868 struct drbd_conf *mdev;
Philipp Reisner44ed1672011-04-19 17:10:19 +0200869 struct net_conf *nc;
Philipp Reisner92f14952012-08-01 11:41:01 +0200870 int vnr, timeout, h, ok;
Philipp Reisner08b165b2011-09-05 16:22:33 +0200871 bool discard_my_data;
Philipp Reisner197296f2012-03-26 16:47:11 +0200872 enum drbd_state_rv rv;
Philipp Reisner7a426fd2012-07-12 14:22:37 +0200873 struct accept_wait_data ad = {
874 .tconn = tconn,
875 .door_bell = COMPLETION_INITIALIZER_ONSTACK(ad.door_bell),
876 };
Philipp Reisnerb411b362009-09-25 16:07:19 -0700877
Philipp Reisnerb66623e2012-08-08 21:19:09 +0200878 clear_bit(DISCONNECT_SENT, &tconn->flags);
Philipp Reisnerbbeb6412011-02-10 13:45:46 +0100879 if (conn_request_state(tconn, NS(conn, C_WF_CONNECTION), CS_VERBOSE) < SS_SUCCESS)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700880 return -2;
881
Philipp Reisner7da35862011-12-19 22:42:56 +0100882 mutex_init(&sock.mutex);
883 sock.sbuf = tconn->data.sbuf;
884 sock.rbuf = tconn->data.rbuf;
885 sock.socket = NULL;
886 mutex_init(&msock.mutex);
887 msock.sbuf = tconn->meta.sbuf;
888 msock.rbuf = tconn->meta.rbuf;
889 msock.socket = NULL;
890
Andreas Gruenbacher0916e0e2011-03-21 14:10:15 +0100891 /* Assume that the peer only understands protocol 80 until we know better. */
892 tconn->agreed_pro_version = 80;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700893
Philipp Reisner7a426fd2012-07-12 14:22:37 +0200894 if (prepare_listen_socket(tconn, &ad))
895 return 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700896
897 do {
Andreas Gruenbacher2bf89622011-03-28 16:33:12 +0200898 struct socket *s;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700899
Philipp Reisner92f14952012-08-01 11:41:01 +0200900 s = drbd_try_connect(tconn);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700901 if (s) {
Philipp Reisner7da35862011-12-19 22:42:56 +0100902 if (!sock.socket) {
903 sock.socket = s;
904 send_first_packet(tconn, &sock, P_INITIAL_DATA);
905 } else if (!msock.socket) {
Lars Ellenberg427c0432012-08-01 12:43:01 +0200906 clear_bit(RESOLVE_CONFLICTS, &tconn->flags);
Philipp Reisner7da35862011-12-19 22:42:56 +0100907 msock.socket = s;
908 send_first_packet(tconn, &msock, P_INITIAL_META);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700909 } else {
Philipp Reisner81fa2e62011-05-04 15:10:30 +0200910 conn_err(tconn, "Logic error in conn_connect()\n");
Philipp Reisnerb411b362009-09-25 16:07:19 -0700911 goto out_release_sockets;
912 }
913 }
914
Philipp Reisner7da35862011-12-19 22:42:56 +0100915 if (sock.socket && msock.socket) {
916 rcu_read_lock();
917 nc = rcu_dereference(tconn->net_conf);
918 timeout = nc->ping_timeo * HZ / 10;
919 rcu_read_unlock();
920 schedule_timeout_interruptible(timeout);
921 ok = drbd_socket_okay(&sock.socket);
922 ok = drbd_socket_okay(&msock.socket) && ok;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700923 if (ok)
924 break;
925 }
926
927retry:
Philipp Reisner7a426fd2012-07-12 14:22:37 +0200928 s = drbd_wait_for_connect(tconn, &ad);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700929 if (s) {
Philipp Reisner92f14952012-08-01 11:41:01 +0200930 int fp = receive_first_packet(tconn, s);
Philipp Reisner7da35862011-12-19 22:42:56 +0100931 drbd_socket_okay(&sock.socket);
932 drbd_socket_okay(&msock.socket);
Philipp Reisner92f14952012-08-01 11:41:01 +0200933 switch (fp) {
Andreas Gruenbachere5d6f332011-03-28 16:44:40 +0200934 case P_INITIAL_DATA:
Philipp Reisner7da35862011-12-19 22:42:56 +0100935 if (sock.socket) {
Philipp Reisner907599e2011-02-08 11:25:37 +0100936 conn_warn(tconn, "initial packet S crossed\n");
Philipp Reisner7da35862011-12-19 22:42:56 +0100937 sock_release(sock.socket);
Philipp Reisner80c6eed2012-08-01 14:53:39 +0200938 sock.socket = s;
939 goto randomize;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700940 }
Philipp Reisner7da35862011-12-19 22:42:56 +0100941 sock.socket = s;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700942 break;
Andreas Gruenbachere5d6f332011-03-28 16:44:40 +0200943 case P_INITIAL_META:
Lars Ellenberg427c0432012-08-01 12:43:01 +0200944 set_bit(RESOLVE_CONFLICTS, &tconn->flags);
Philipp Reisner7da35862011-12-19 22:42:56 +0100945 if (msock.socket) {
Philipp Reisner907599e2011-02-08 11:25:37 +0100946 conn_warn(tconn, "initial packet M crossed\n");
Philipp Reisner7da35862011-12-19 22:42:56 +0100947 sock_release(msock.socket);
Philipp Reisner80c6eed2012-08-01 14:53:39 +0200948 msock.socket = s;
949 goto randomize;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700950 }
Philipp Reisner7da35862011-12-19 22:42:56 +0100951 msock.socket = s;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700952 break;
953 default:
Philipp Reisner907599e2011-02-08 11:25:37 +0100954 conn_warn(tconn, "Error receiving initial packet\n");
Philipp Reisnerb411b362009-09-25 16:07:19 -0700955 sock_release(s);
Philipp Reisner80c6eed2012-08-01 14:53:39 +0200956randomize:
Philipp Reisnerb411b362009-09-25 16:07:19 -0700957 if (random32() & 1)
958 goto retry;
959 }
960 }
961
Philipp Reisnerbbeb6412011-02-10 13:45:46 +0100962 if (tconn->cstate <= C_DISCONNECTING)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700963 goto out_release_sockets;
964 if (signal_pending(current)) {
965 flush_signals(current);
966 smp_rmb();
Philipp Reisner907599e2011-02-08 11:25:37 +0100967 if (get_t_state(&tconn->receiver) == EXITING)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700968 goto out_release_sockets;
969 }
970
Philipp Reisnerb666dbf2012-07-26 14:12:59 +0200971 ok = drbd_socket_okay(&sock.socket);
972 ok = drbd_socket_okay(&msock.socket) && ok;
973 } while (!ok);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700974
Philipp Reisner7a426fd2012-07-12 14:22:37 +0200975 if (ad.s_listen)
976 sock_release(ad.s_listen);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700977
Philipp Reisner98683652012-11-09 14:18:43 +0100978 sock.socket->sk->sk_reuse = SK_CAN_REUSE; /* SO_REUSEADDR */
979 msock.socket->sk->sk_reuse = SK_CAN_REUSE; /* SO_REUSEADDR */
Philipp Reisnerb411b362009-09-25 16:07:19 -0700980
Philipp Reisner7da35862011-12-19 22:42:56 +0100981 sock.socket->sk->sk_allocation = GFP_NOIO;
982 msock.socket->sk->sk_allocation = GFP_NOIO;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700983
Philipp Reisner7da35862011-12-19 22:42:56 +0100984 sock.socket->sk->sk_priority = TC_PRIO_INTERACTIVE_BULK;
985 msock.socket->sk->sk_priority = TC_PRIO_INTERACTIVE;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700986
Philipp Reisnerb411b362009-09-25 16:07:19 -0700987 /* NOT YET ...
Philipp Reisner7da35862011-12-19 22:42:56 +0100988 * sock.socket->sk->sk_sndtimeo = tconn->net_conf->timeout*HZ/10;
989 * sock.socket->sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
Andreas Gruenbacher60381782011-03-28 17:05:50 +0200990 * first set it to the P_CONNECTION_FEATURES timeout,
Philipp Reisnerb411b362009-09-25 16:07:19 -0700991 * which we set to 4x the configured ping_timeout. */
Philipp Reisner44ed1672011-04-19 17:10:19 +0200992 rcu_read_lock();
993 nc = rcu_dereference(tconn->net_conf);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700994
Philipp Reisner7da35862011-12-19 22:42:56 +0100995 sock.socket->sk->sk_sndtimeo =
996 sock.socket->sk->sk_rcvtimeo = nc->ping_timeo*4*HZ/10;
Philipp Reisner44ed1672011-04-19 17:10:19 +0200997
Philipp Reisner7da35862011-12-19 22:42:56 +0100998 msock.socket->sk->sk_rcvtimeo = nc->ping_int*HZ;
Philipp Reisner44ed1672011-04-19 17:10:19 +0200999 timeout = nc->timeout * HZ / 10;
Philipp Reisner08b165b2011-09-05 16:22:33 +02001000 discard_my_data = nc->discard_my_data;
Philipp Reisner44ed1672011-04-19 17:10:19 +02001001 rcu_read_unlock();
1002
Philipp Reisner7da35862011-12-19 22:42:56 +01001003 msock.socket->sk->sk_sndtimeo = timeout;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001004
1005 /* we don't want delays.
Lucas De Marchi25985ed2011-03-30 22:57:33 -03001006 * we use TCP_CORK where appropriate, though */
Philipp Reisner7da35862011-12-19 22:42:56 +01001007 drbd_tcp_nodelay(sock.socket);
1008 drbd_tcp_nodelay(msock.socket);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001009
Philipp Reisner7da35862011-12-19 22:42:56 +01001010 tconn->data.socket = sock.socket;
1011 tconn->meta.socket = msock.socket;
Philipp Reisner907599e2011-02-08 11:25:37 +01001012 tconn->last_received = jiffies;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001013
Andreas Gruenbacher60381782011-03-28 17:05:50 +02001014 h = drbd_do_features(tconn);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001015 if (h <= 0)
1016 return h;
1017
Philipp Reisner907599e2011-02-08 11:25:37 +01001018 if (tconn->cram_hmac_tfm) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07001019 /* drbd_request_state(mdev, NS(conn, WFAuth)); */
Philipp Reisner907599e2011-02-08 11:25:37 +01001020 switch (drbd_do_auth(tconn)) {
Johannes Thomab10d96c2010-01-07 16:02:50 +01001021 case -1:
Philipp Reisner907599e2011-02-08 11:25:37 +01001022 conn_err(tconn, "Authentication of peer failed\n");
Philipp Reisnerb411b362009-09-25 16:07:19 -07001023 return -1;
Johannes Thomab10d96c2010-01-07 16:02:50 +01001024 case 0:
Philipp Reisner907599e2011-02-08 11:25:37 +01001025 conn_err(tconn, "Authentication of peer failed, trying again.\n");
Johannes Thomab10d96c2010-01-07 16:02:50 +01001026 return 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001027 }
1028 }
1029
Philipp Reisner7da35862011-12-19 22:42:56 +01001030 tconn->data.socket->sk->sk_sndtimeo = timeout;
1031 tconn->data.socket->sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001032
Andreas Gruenbacher387eb302011-03-16 01:05:37 +01001033 if (drbd_send_protocol(tconn) == -EOPNOTSUPP)
Philipp Reisner7e2455c2010-04-22 14:50:23 +02001034 return -1;
Philipp Reisner1e86ac42011-08-04 10:33:08 +02001035
Philipp Reisnera1096a62012-04-06 12:07:34 +02001036 set_bit(STATE_SENT, &tconn->flags);
Philipp Reisner197296f2012-03-26 16:47:11 +02001037
Philipp Reisnerc141ebd2011-05-05 16:13:10 +02001038 rcu_read_lock();
1039 idr_for_each_entry(&tconn->volumes, mdev, vnr) {
1040 kref_get(&mdev->kref);
Philipp Reisner13c76ab2012-11-22 17:06:00 +01001041 /* Prevent a race between resync-handshake and
1042 * being promoted to Primary.
1043 *
1044 * Grab and release the state mutex, so we know that any current
1045 * drbd_set_role() is finished, and any incoming drbd_set_role
1046 * will see the STATE_SENT flag, and wait for it to be cleared.
1047 */
1048 mutex_lock(mdev->state_mutex);
1049 mutex_unlock(mdev->state_mutex);
1050
Philipp Reisnerc141ebd2011-05-05 16:13:10 +02001051 rcu_read_unlock();
Philipp Reisner08b165b2011-09-05 16:22:33 +02001052
1053 if (discard_my_data)
1054 set_bit(DISCARD_MY_DATA, &mdev->flags);
1055 else
1056 clear_bit(DISCARD_MY_DATA, &mdev->flags);
1057
Philipp Reisnerc141ebd2011-05-05 16:13:10 +02001058 drbd_connected(mdev);
1059 kref_put(&mdev->kref, &drbd_minor_destroy);
1060 rcu_read_lock();
1061 }
1062 rcu_read_unlock();
1063
Philipp Reisnera1096a62012-04-06 12:07:34 +02001064 rv = conn_request_state(tconn, NS(conn, C_WF_REPORT_PARAMS), CS_VERBOSE);
Lars Ellenberged635cb02012-11-05 11:54:30 +01001065 if (rv < SS_SUCCESS || tconn->cstate != C_WF_REPORT_PARAMS) {
Philipp Reisnera1096a62012-04-06 12:07:34 +02001066 clear_bit(STATE_SENT, &tconn->flags);
Philipp Reisner1e86ac42011-08-04 10:33:08 +02001067 return 0;
Philipp Reisnera1096a62012-04-06 12:07:34 +02001068 }
Philipp Reisner1e86ac42011-08-04 10:33:08 +02001069
Philipp Reisner823bd832012-11-08 15:04:36 +01001070 drbd_thread_start(&tconn->asender);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001071
Philipp Reisner08b165b2011-09-05 16:22:33 +02001072 mutex_lock(&tconn->conf_update);
1073 /* The discard_my_data flag is a single-shot modifier to the next
1074 * connection attempt, the handshake of which is now well underway.
1075 * No need for rcu style copying of the whole struct
1076 * just to clear a single value. */
1077 tconn->net_conf->discard_my_data = 0;
1078 mutex_unlock(&tconn->conf_update);
1079
Philipp Reisnerd3fcb492011-04-13 14:46:05 -07001080 return h;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001081
1082out_release_sockets:
Philipp Reisner7a426fd2012-07-12 14:22:37 +02001083 if (ad.s_listen)
1084 sock_release(ad.s_listen);
Philipp Reisner7da35862011-12-19 22:42:56 +01001085 if (sock.socket)
1086 sock_release(sock.socket);
1087 if (msock.socket)
1088 sock_release(msock.socket);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001089 return -1;
1090}
1091
Andreas Gruenbachere6589832011-03-30 12:54:42 +02001092static int decode_header(struct drbd_tconn *tconn, void *header, struct packet_info *pi)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001093{
Andreas Gruenbachere6589832011-03-30 12:54:42 +02001094 unsigned int header_size = drbd_header_size(tconn);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001095
Andreas Gruenbacher0c8e36d2011-03-30 16:00:17 +02001096 if (header_size == sizeof(struct p_header100) &&
1097 *(__be32 *)header == cpu_to_be32(DRBD_MAGIC_100)) {
1098 struct p_header100 *h = header;
1099 if (h->pad != 0) {
1100 conn_err(tconn, "Header padding is not zero\n");
1101 return -EINVAL;
1102 }
1103 pi->vnr = be16_to_cpu(h->volume);
1104 pi->cmd = be16_to_cpu(h->command);
1105 pi->size = be32_to_cpu(h->length);
1106 } else if (header_size == sizeof(struct p_header95) &&
1107 *(__be16 *)header == cpu_to_be16(DRBD_MAGIC_BIG)) {
Andreas Gruenbachere6589832011-03-30 12:54:42 +02001108 struct p_header95 *h = header;
Andreas Gruenbachere6589832011-03-30 12:54:42 +02001109 pi->cmd = be16_to_cpu(h->command);
Andreas Gruenbacherb55d84b2011-03-22 13:17:47 +01001110 pi->size = be32_to_cpu(h->length);
1111 pi->vnr = 0;
Andreas Gruenbachere6589832011-03-30 12:54:42 +02001112 } else if (header_size == sizeof(struct p_header80) &&
1113 *(__be32 *)header == cpu_to_be32(DRBD_MAGIC)) {
1114 struct p_header80 *h = header;
1115 pi->cmd = be16_to_cpu(h->command);
1116 pi->size = be16_to_cpu(h->length);
Philipp Reisner77351055b2011-02-07 17:24:26 +01001117 pi->vnr = 0;
Philipp Reisner02918be2010-08-20 14:35:10 +02001118 } else {
Andreas Gruenbachere6589832011-03-30 12:54:42 +02001119 conn_err(tconn, "Wrong magic value 0x%08x in protocol version %d\n",
1120 be32_to_cpu(*(__be32 *)header),
1121 tconn->agreed_pro_version);
Andreas Gruenbacher8172f3e2011-03-16 17:22:39 +01001122 return -EINVAL;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001123 }
Andreas Gruenbachere6589832011-03-30 12:54:42 +02001124 pi->data = header + header_size;
Andreas Gruenbacher8172f3e2011-03-16 17:22:39 +01001125 return 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001126}
1127
Philipp Reisner9ba7aa02011-02-07 17:32:41 +01001128static int drbd_recv_header(struct drbd_tconn *tconn, struct packet_info *pi)
Philipp Reisner257d0af2011-01-26 12:15:29 +01001129{
Andreas Gruenbachere6589832011-03-30 12:54:42 +02001130 void *buffer = tconn->data.rbuf;
Andreas Gruenbacher69bc7bc2011-03-16 17:31:52 +01001131 int err;
Philipp Reisner257d0af2011-01-26 12:15:29 +01001132
Andreas Gruenbachere6589832011-03-30 12:54:42 +02001133 err = drbd_recv_all_warn(tconn, buffer, drbd_header_size(tconn));
Andreas Gruenbachera5c31902011-03-24 03:28:04 +01001134 if (err)
Andreas Gruenbacher69bc7bc2011-03-16 17:31:52 +01001135 return err;
Philipp Reisner257d0af2011-01-26 12:15:29 +01001136
Andreas Gruenbachere6589832011-03-30 12:54:42 +02001137 err = decode_header(tconn, buffer, pi);
Philipp Reisner9ba7aa02011-02-07 17:32:41 +01001138 tconn->last_received = jiffies;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001139
Andreas Gruenbacher69bc7bc2011-03-16 17:31:52 +01001140 return err;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001141}
1142
Philipp Reisner4b0007c2011-11-09 20:12:34 +01001143static void drbd_flush(struct drbd_tconn *tconn)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001144{
1145 int rv;
Philipp Reisner4b0007c2011-11-09 20:12:34 +01001146 struct drbd_conf *mdev;
1147 int vnr;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001148
Philipp Reisner4b0007c2011-11-09 20:12:34 +01001149 if (tconn->write_ordering >= WO_bdev_flush) {
Lars Ellenberg615e0872011-11-17 14:32:12 +01001150 rcu_read_lock();
Philipp Reisner4b0007c2011-11-09 20:12:34 +01001151 idr_for_each_entry(&tconn->volumes, mdev, vnr) {
Lars Ellenberg615e0872011-11-17 14:32:12 +01001152 if (!get_ldev(mdev))
1153 continue;
1154 kref_get(&mdev->kref);
1155 rcu_read_unlock();
Philipp Reisner4b0007c2011-11-09 20:12:34 +01001156
Lars Ellenberg615e0872011-11-17 14:32:12 +01001157 rv = blkdev_issue_flush(mdev->ldev->backing_bdev,
1158 GFP_NOIO, NULL);
1159 if (rv) {
1160 dev_info(DEV, "local disk flush failed with status %d\n", rv);
1161 /* would rather check on EOPNOTSUPP, but that is not reliable.
1162 * don't try again for ANY return value != 0
1163 * if (rv == -EOPNOTSUPP) */
1164 drbd_bump_write_ordering(tconn, WO_drain_io);
Philipp Reisner4b0007c2011-11-09 20:12:34 +01001165 }
Lars Ellenberg615e0872011-11-17 14:32:12 +01001166 put_ldev(mdev);
1167 kref_put(&mdev->kref, &drbd_minor_destroy);
1168
1169 rcu_read_lock();
1170 if (rv)
1171 break;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001172 }
Lars Ellenberg615e0872011-11-17 14:32:12 +01001173 rcu_read_unlock();
Philipp Reisnerb411b362009-09-25 16:07:19 -07001174 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07001175}
1176
1177/**
1178 * drbd_may_finish_epoch() - Applies an epoch_event to the epoch's state, eventually finishes it.
1179 * @mdev: DRBD device.
1180 * @epoch: Epoch object.
1181 * @ev: Epoch event.
1182 */
Philipp Reisner1e9dd292011-11-10 15:14:53 +01001183static enum finish_epoch drbd_may_finish_epoch(struct drbd_tconn *tconn,
Philipp Reisnerb411b362009-09-25 16:07:19 -07001184 struct drbd_epoch *epoch,
1185 enum epoch_event ev)
1186{
Philipp Reisner2451fc32010-08-24 13:43:11 +02001187 int epoch_size;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001188 struct drbd_epoch *next_epoch;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001189 enum finish_epoch rv = FE_STILL_LIVE;
1190
Philipp Reisner12038a32011-11-09 19:18:00 +01001191 spin_lock(&tconn->epoch_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001192 do {
1193 next_epoch = NULL;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001194
1195 epoch_size = atomic_read(&epoch->epoch_size);
1196
1197 switch (ev & ~EV_CLEANUP) {
1198 case EV_PUT:
1199 atomic_dec(&epoch->active);
1200 break;
1201 case EV_GOT_BARRIER_NR:
1202 set_bit(DE_HAVE_BARRIER_NUMBER, &epoch->flags);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001203 break;
1204 case EV_BECAME_LAST:
1205 /* nothing to do*/
1206 break;
1207 }
1208
Philipp Reisnerb411b362009-09-25 16:07:19 -07001209 if (epoch_size != 0 &&
1210 atomic_read(&epoch->active) == 0 &&
Philipp Reisner80f9fd52011-07-18 15:45:15 +02001211 (test_bit(DE_HAVE_BARRIER_NUMBER, &epoch->flags) || ev & EV_CLEANUP)) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07001212 if (!(ev & EV_CLEANUP)) {
Philipp Reisner12038a32011-11-09 19:18:00 +01001213 spin_unlock(&tconn->epoch_lock);
Lars Ellenberg9ed57dc2012-03-26 20:55:17 +02001214 drbd_send_b_ack(epoch->tconn, epoch->barrier_nr, epoch_size);
Philipp Reisner12038a32011-11-09 19:18:00 +01001215 spin_lock(&tconn->epoch_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001216 }
Lars Ellenberg9ed57dc2012-03-26 20:55:17 +02001217#if 0
1218 /* FIXME: dec unacked on connection, once we have
1219 * something to count pending connection packets in. */
Philipp Reisner80f9fd52011-07-18 15:45:15 +02001220 if (test_bit(DE_HAVE_BARRIER_NUMBER, &epoch->flags))
Lars Ellenberg9ed57dc2012-03-26 20:55:17 +02001221 dec_unacked(epoch->tconn);
1222#endif
Philipp Reisnerb411b362009-09-25 16:07:19 -07001223
Philipp Reisner12038a32011-11-09 19:18:00 +01001224 if (tconn->current_epoch != epoch) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07001225 next_epoch = list_entry(epoch->list.next, struct drbd_epoch, list);
1226 list_del(&epoch->list);
1227 ev = EV_BECAME_LAST | (ev & EV_CLEANUP);
Philipp Reisner12038a32011-11-09 19:18:00 +01001228 tconn->epochs--;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001229 kfree(epoch);
1230
1231 if (rv == FE_STILL_LIVE)
1232 rv = FE_DESTROYED;
1233 } else {
1234 epoch->flags = 0;
1235 atomic_set(&epoch->epoch_size, 0);
Uwe Kleine-König698f9312010-07-02 20:41:51 +02001236 /* atomic_set(&epoch->active, 0); is already zero */
Philipp Reisnerb411b362009-09-25 16:07:19 -07001237 if (rv == FE_STILL_LIVE)
1238 rv = FE_RECYCLED;
1239 }
1240 }
1241
1242 if (!next_epoch)
1243 break;
1244
1245 epoch = next_epoch;
1246 } while (1);
1247
Philipp Reisner12038a32011-11-09 19:18:00 +01001248 spin_unlock(&tconn->epoch_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001249
Philipp Reisnerb411b362009-09-25 16:07:19 -07001250 return rv;
1251}
1252
1253/**
1254 * drbd_bump_write_ordering() - Fall back to an other write ordering method
Philipp Reisner4b0007c2011-11-09 20:12:34 +01001255 * @tconn: DRBD connection.
Philipp Reisnerb411b362009-09-25 16:07:19 -07001256 * @wo: Write ordering method to try.
1257 */
Philipp Reisner4b0007c2011-11-09 20:12:34 +01001258void drbd_bump_write_ordering(struct drbd_tconn *tconn, enum write_ordering_e wo)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001259{
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02001260 struct disk_conf *dc;
Philipp Reisner4b0007c2011-11-09 20:12:34 +01001261 struct drbd_conf *mdev;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001262 enum write_ordering_e pwo;
Philipp Reisner4b0007c2011-11-09 20:12:34 +01001263 int vnr;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001264 static char *write_ordering_str[] = {
1265 [WO_none] = "none",
1266 [WO_drain_io] = "drain",
1267 [WO_bdev_flush] = "flush",
Philipp Reisnerb411b362009-09-25 16:07:19 -07001268 };
1269
Philipp Reisner4b0007c2011-11-09 20:12:34 +01001270 pwo = tconn->write_ordering;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001271 wo = min(pwo, wo);
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02001272 rcu_read_lock();
Philipp Reisner4b0007c2011-11-09 20:12:34 +01001273 idr_for_each_entry(&tconn->volumes, mdev, vnr) {
Philipp Reisner27eb13e2012-03-30 14:12:15 +02001274 if (!get_ldev_if_state(mdev, D_ATTACHING))
Philipp Reisner4b0007c2011-11-09 20:12:34 +01001275 continue;
1276 dc = rcu_dereference(mdev->ldev->disk_conf);
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02001277
Philipp Reisner4b0007c2011-11-09 20:12:34 +01001278 if (wo == WO_bdev_flush && !dc->disk_flushes)
1279 wo = WO_drain_io;
1280 if (wo == WO_drain_io && !dc->disk_drain)
1281 wo = WO_none;
1282 put_ldev(mdev);
1283 }
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02001284 rcu_read_unlock();
Philipp Reisner4b0007c2011-11-09 20:12:34 +01001285 tconn->write_ordering = wo;
1286 if (pwo != tconn->write_ordering || wo == WO_bdev_flush)
1287 conn_info(tconn, "Method to ensure write ordering: %s\n", write_ordering_str[tconn->write_ordering]);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001288}
1289
1290/**
Andreas Gruenbacherfbe29de2011-02-17 16:38:35 +01001291 * drbd_submit_peer_request()
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001292 * @mdev: DRBD device.
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01001293 * @peer_req: peer request
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001294 * @rw: flag field, see bio->bi_rw
Lars Ellenberg10f6d9922011-01-24 14:47:09 +01001295 *
1296 * May spread the pages to multiple bios,
1297 * depending on bio_add_page restrictions.
1298 *
1299 * Returns 0 if all bios have been submitted,
1300 * -ENOMEM if we could not allocate enough bios,
1301 * -ENOSPC (any better suggestion?) if we have not been able to bio_add_page a
1302 * single page to an empty bio (which should never happen and likely indicates
1303 * that the lower level IO stack is in some way broken). This has been observed
1304 * on certain Xen deployments.
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001305 */
1306/* TODO allocate from our own bio_set. */
Andreas Gruenbacherfbe29de2011-02-17 16:38:35 +01001307int drbd_submit_peer_request(struct drbd_conf *mdev,
1308 struct drbd_peer_request *peer_req,
1309 const unsigned rw, const int fault_type)
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001310{
1311 struct bio *bios = NULL;
1312 struct bio *bio;
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01001313 struct page *page = peer_req->pages;
1314 sector_t sector = peer_req->i.sector;
1315 unsigned ds = peer_req->i.size;
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001316 unsigned n_bios = 0;
1317 unsigned nr_pages = (ds + PAGE_SIZE -1) >> PAGE_SHIFT;
Lars Ellenberg10f6d9922011-01-24 14:47:09 +01001318 int err = -ENOMEM;
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001319
1320 /* In most cases, we will only need one bio. But in case the lower
1321 * level restrictions happen to be different at this offset on this
1322 * side than those of the sending peer, we may need to submit the
Lars Ellenberg9476f392011-02-23 17:02:01 +01001323 * request in more than one bio.
1324 *
1325 * Plain bio_alloc is good enough here, this is no DRBD internally
1326 * generated bio, but a bio allocated on behalf of the peer.
1327 */
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001328next_bio:
1329 bio = bio_alloc(GFP_NOIO, nr_pages);
1330 if (!bio) {
1331 dev_err(DEV, "submit_ee: Allocation of a bio failed\n");
1332 goto fail;
1333 }
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01001334 /* > peer_req->i.sector, unless this is the first bio */
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001335 bio->bi_sector = sector;
1336 bio->bi_bdev = mdev->ldev->backing_bdev;
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001337 bio->bi_rw = rw;
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01001338 bio->bi_private = peer_req;
Andreas Gruenbacherfcefa622011-02-17 16:46:59 +01001339 bio->bi_end_io = drbd_peer_request_endio;
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001340
1341 bio->bi_next = bios;
1342 bios = bio;
1343 ++n_bios;
1344
1345 page_chain_for_each(page) {
1346 unsigned len = min_t(unsigned, ds, PAGE_SIZE);
1347 if (!bio_add_page(bio, page, len, 0)) {
Lars Ellenberg10f6d9922011-01-24 14:47:09 +01001348 /* A single page must always be possible!
1349 * But in case it fails anyways,
1350 * we deal with it, and complain (below). */
1351 if (bio->bi_vcnt == 0) {
1352 dev_err(DEV,
1353 "bio_add_page failed for len=%u, "
1354 "bi_vcnt=0 (bi_sector=%llu)\n",
1355 len, (unsigned long long)bio->bi_sector);
1356 err = -ENOSPC;
1357 goto fail;
1358 }
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001359 goto next_bio;
1360 }
1361 ds -= len;
1362 sector += len >> 9;
1363 --nr_pages;
1364 }
1365 D_ASSERT(page == NULL);
1366 D_ASSERT(ds == 0);
1367
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01001368 atomic_set(&peer_req->pending_bios, n_bios);
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001369 do {
1370 bio = bios;
1371 bios = bios->bi_next;
1372 bio->bi_next = NULL;
1373
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001374 drbd_generic_make_request(mdev, fault_type, bio);
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001375 } while (bios);
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001376 return 0;
1377
1378fail:
1379 while (bios) {
1380 bio = bios;
1381 bios = bios->bi_next;
1382 bio_put(bio);
1383 }
Lars Ellenberg10f6d9922011-01-24 14:47:09 +01001384 return err;
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001385}
1386
Andreas Gruenbacher53840642011-01-28 10:31:04 +01001387static void drbd_remove_epoch_entry_interval(struct drbd_conf *mdev,
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01001388 struct drbd_peer_request *peer_req)
Andreas Gruenbacher53840642011-01-28 10:31:04 +01001389{
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01001390 struct drbd_interval *i = &peer_req->i;
Andreas Gruenbacher53840642011-01-28 10:31:04 +01001391
1392 drbd_remove_interval(&mdev->write_requests, i);
1393 drbd_clear_interval(i);
1394
Andreas Gruenbacher6c852be2011-02-04 15:38:52 +01001395 /* Wake up any processes waiting for this peer request to complete. */
Andreas Gruenbacher53840642011-01-28 10:31:04 +01001396 if (i->waiting)
1397 wake_up(&mdev->misc_wait);
1398}
1399
Philipp Reisner77fede52011-11-10 21:19:11 +01001400void conn_wait_active_ee_empty(struct drbd_tconn *tconn)
1401{
1402 struct drbd_conf *mdev;
1403 int vnr;
1404
1405 rcu_read_lock();
1406 idr_for_each_entry(&tconn->volumes, mdev, vnr) {
1407 kref_get(&mdev->kref);
1408 rcu_read_unlock();
1409 drbd_wait_ee_list_empty(mdev, &mdev->active_ee);
1410 kref_put(&mdev->kref, &drbd_minor_destroy);
1411 rcu_read_lock();
1412 }
1413 rcu_read_unlock();
1414}
1415
Andreas Gruenbacher4a76b162011-03-25 02:43:51 +01001416static int receive_Barrier(struct drbd_tconn *tconn, struct packet_info *pi)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001417{
Philipp Reisner2451fc32010-08-24 13:43:11 +02001418 int rv;
Andreas Gruenbachere6589832011-03-30 12:54:42 +02001419 struct p_barrier *p = pi->data;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001420 struct drbd_epoch *epoch;
1421
Lars Ellenberg9ed57dc2012-03-26 20:55:17 +02001422 /* FIXME these are unacked on connection,
1423 * not a specific (peer)device.
1424 */
Philipp Reisner12038a32011-11-09 19:18:00 +01001425 tconn->current_epoch->barrier_nr = p->barrier;
Lars Ellenberg9ed57dc2012-03-26 20:55:17 +02001426 tconn->current_epoch->tconn = tconn;
Philipp Reisner1e9dd292011-11-10 15:14:53 +01001427 rv = drbd_may_finish_epoch(tconn, tconn->current_epoch, EV_GOT_BARRIER_NR);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001428
1429 /* P_BARRIER_ACK may imply that the corresponding extent is dropped from
1430 * the activity log, which means it would not be resynced in case the
1431 * R_PRIMARY crashes now.
1432 * Therefore we must send the barrier_ack after the barrier request was
1433 * completed. */
Philipp Reisner4b0007c2011-11-09 20:12:34 +01001434 switch (tconn->write_ordering) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07001435 case WO_none:
1436 if (rv == FE_RECYCLED)
Andreas Gruenbacher82bc0192011-03-17 12:10:19 +01001437 return 0;
Philipp Reisner2451fc32010-08-24 13:43:11 +02001438
1439 /* receiver context, in the writeout path of the other node.
1440 * avoid potential distributed deadlock */
1441 epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO);
1442 if (epoch)
1443 break;
1444 else
Lars Ellenberg9ed57dc2012-03-26 20:55:17 +02001445 conn_warn(tconn, "Allocation of an epoch failed, slowing down\n");
Philipp Reisner2451fc32010-08-24 13:43:11 +02001446 /* Fall through */
Philipp Reisnerb411b362009-09-25 16:07:19 -07001447
1448 case WO_bdev_flush:
1449 case WO_drain_io:
Philipp Reisner77fede52011-11-10 21:19:11 +01001450 conn_wait_active_ee_empty(tconn);
Philipp Reisner4b0007c2011-11-09 20:12:34 +01001451 drbd_flush(tconn);
Philipp Reisner2451fc32010-08-24 13:43:11 +02001452
Philipp Reisner12038a32011-11-09 19:18:00 +01001453 if (atomic_read(&tconn->current_epoch->epoch_size)) {
Philipp Reisner2451fc32010-08-24 13:43:11 +02001454 epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO);
1455 if (epoch)
1456 break;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001457 }
1458
Andreas Gruenbacher82bc0192011-03-17 12:10:19 +01001459 return 0;
Philipp Reisner2451fc32010-08-24 13:43:11 +02001460 default:
Lars Ellenberg9ed57dc2012-03-26 20:55:17 +02001461 conn_err(tconn, "Strangeness in tconn->write_ordering %d\n", tconn->write_ordering);
Andreas Gruenbacher82bc0192011-03-17 12:10:19 +01001462 return -EIO;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001463 }
1464
1465 epoch->flags = 0;
1466 atomic_set(&epoch->epoch_size, 0);
1467 atomic_set(&epoch->active, 0);
1468
Philipp Reisner12038a32011-11-09 19:18:00 +01001469 spin_lock(&tconn->epoch_lock);
1470 if (atomic_read(&tconn->current_epoch->epoch_size)) {
1471 list_add(&epoch->list, &tconn->current_epoch->list);
1472 tconn->current_epoch = epoch;
1473 tconn->epochs++;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001474 } else {
1475 /* The current_epoch got recycled while we allocated this one... */
1476 kfree(epoch);
1477 }
Philipp Reisner12038a32011-11-09 19:18:00 +01001478 spin_unlock(&tconn->epoch_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001479
Andreas Gruenbacher82bc0192011-03-17 12:10:19 +01001480 return 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001481}
1482
1483/* used from receive_RSDataReply (recv_resync_read)
1484 * and from receive_Data */
Andreas Gruenbacherf6ffca92011-02-04 15:30:34 +01001485static struct drbd_peer_request *
1486read_in_block(struct drbd_conf *mdev, u64 id, sector_t sector,
1487 int data_size) __must_hold(local)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001488{
Lars Ellenberg66660322010-04-06 12:15:04 +02001489 const sector_t capacity = drbd_get_capacity(mdev->this_bdev);
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01001490 struct drbd_peer_request *peer_req;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001491 struct page *page;
Andreas Gruenbachera5c31902011-03-24 03:28:04 +01001492 int dgs, ds, err;
Philipp Reisnera0638452011-01-19 14:31:32 +01001493 void *dig_in = mdev->tconn->int_dig_in;
1494 void *dig_vv = mdev->tconn->int_dig_vv;
Philipp Reisner6b4388a2010-04-26 14:11:45 +02001495 unsigned long *data;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001496
Andreas Gruenbacher88104ca2011-04-28 21:47:21 +02001497 dgs = 0;
1498 if (mdev->tconn->peer_integrity_tfm) {
1499 dgs = crypto_hash_digestsize(mdev->tconn->peer_integrity_tfm);
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001500 /*
1501 * FIXME: Receive the incoming digest into the receive buffer
1502 * here, together with its struct p_data?
1503 */
Andreas Gruenbachera5c31902011-03-24 03:28:04 +01001504 err = drbd_recv_all_warn(mdev->tconn, dig_in, dgs);
1505 if (err)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001506 return NULL;
Andreas Gruenbacher88104ca2011-04-28 21:47:21 +02001507 data_size -= dgs;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001508 }
1509
Andreas Gruenbacher841ce242010-12-15 19:31:20 +01001510 if (!expect(IS_ALIGNED(data_size, 512)))
1511 return NULL;
1512 if (!expect(data_size <= DRBD_MAX_BIO_SIZE))
1513 return NULL;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001514
Lars Ellenberg66660322010-04-06 12:15:04 +02001515 /* even though we trust out peer,
1516 * we sometimes have to double check. */
1517 if (sector + (data_size>>9) > capacity) {
Lars Ellenbergfdda6542011-01-24 15:11:01 +01001518 dev_err(DEV, "request from peer beyond end of local disk: "
1519 "capacity: %llus < sector: %llus + size: %u\n",
Lars Ellenberg66660322010-04-06 12:15:04 +02001520 (unsigned long long)capacity,
1521 (unsigned long long)sector, data_size);
1522 return NULL;
1523 }
1524
Philipp Reisnerb411b362009-09-25 16:07:19 -07001525 /* GFP_NOIO, because we must not cause arbitrary write-out: in a DRBD
1526 * "criss-cross" setup, that might cause write-out on some other DRBD,
1527 * which in turn might block on the other node at this very place. */
Andreas Gruenbacher0db55362011-04-06 16:09:15 +02001528 peer_req = drbd_alloc_peer_req(mdev, id, sector, data_size, GFP_NOIO);
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01001529 if (!peer_req)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001530 return NULL;
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001531
Lars Ellenberga73ff322012-06-25 19:15:38 +02001532 if (!data_size)
Lars Ellenberg81a35372012-07-30 09:00:54 +02001533 return peer_req;
Lars Ellenberga73ff322012-06-25 19:15:38 +02001534
Philipp Reisnerb411b362009-09-25 16:07:19 -07001535 ds = data_size;
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01001536 page = peer_req->pages;
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001537 page_chain_for_each(page) {
1538 unsigned len = min_t(int, ds, PAGE_SIZE);
Philipp Reisner6b4388a2010-04-26 14:11:45 +02001539 data = kmap(page);
Andreas Gruenbachera5c31902011-03-24 03:28:04 +01001540 err = drbd_recv_all_warn(mdev->tconn, data, len);
Andreas Gruenbacher0cf9d272010-12-07 10:43:29 +01001541 if (drbd_insert_fault(mdev, DRBD_FAULT_RECEIVE)) {
Philipp Reisner6b4388a2010-04-26 14:11:45 +02001542 dev_err(DEV, "Fault injection: Corrupting data on receive\n");
1543 data[0] = data[0] ^ (unsigned long)-1;
1544 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07001545 kunmap(page);
Andreas Gruenbachera5c31902011-03-24 03:28:04 +01001546 if (err) {
Andreas Gruenbacher3967deb2011-04-06 16:16:56 +02001547 drbd_free_peer_req(mdev, peer_req);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001548 return NULL;
1549 }
Andreas Gruenbachera5c31902011-03-24 03:28:04 +01001550 ds -= len;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001551 }
1552
1553 if (dgs) {
Andreas Gruenbacher5b614ab2011-04-27 21:00:12 +02001554 drbd_csum_ee(mdev, mdev->tconn->peer_integrity_tfm, peer_req, dig_vv);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001555 if (memcmp(dig_in, dig_vv, dgs)) {
Lars Ellenberg470be442010-11-10 10:36:52 +01001556 dev_err(DEV, "Digest integrity check FAILED: %llus +%u\n",
1557 (unsigned long long)sector, data_size);
Andreas Gruenbacher3967deb2011-04-06 16:16:56 +02001558 drbd_free_peer_req(mdev, peer_req);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001559 return NULL;
1560 }
1561 }
1562 mdev->recv_cnt += data_size>>9;
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01001563 return peer_req;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001564}
1565
1566/* drbd_drain_block() just takes a data block
1567 * out of the socket input buffer, and discards it.
1568 */
1569static int drbd_drain_block(struct drbd_conf *mdev, int data_size)
1570{
1571 struct page *page;
Andreas Gruenbachera5c31902011-03-24 03:28:04 +01001572 int err = 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001573 void *data;
1574
Lars Ellenbergc3470cd2010-04-01 16:57:19 +02001575 if (!data_size)
Andreas Gruenbacherfc5be832011-03-16 17:50:50 +01001576 return 0;
Lars Ellenbergc3470cd2010-04-01 16:57:19 +02001577
Andreas Gruenbacherc37c8ec2011-04-07 21:02:09 +02001578 page = drbd_alloc_pages(mdev, 1, 1);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001579
1580 data = kmap(page);
1581 while (data_size) {
Andreas Gruenbacherfc5be832011-03-16 17:50:50 +01001582 unsigned int len = min_t(int, data_size, PAGE_SIZE);
1583
Andreas Gruenbachera5c31902011-03-24 03:28:04 +01001584 err = drbd_recv_all_warn(mdev->tconn, data, len);
1585 if (err)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001586 break;
Andreas Gruenbachera5c31902011-03-24 03:28:04 +01001587 data_size -= len;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001588 }
1589 kunmap(page);
Andreas Gruenbacher5cc287e2011-04-07 21:02:59 +02001590 drbd_free_pages(mdev, page, 0);
Andreas Gruenbacherfc5be832011-03-16 17:50:50 +01001591 return err;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001592}
1593
1594static int recv_dless_read(struct drbd_conf *mdev, struct drbd_request *req,
1595 sector_t sector, int data_size)
1596{
1597 struct bio_vec *bvec;
1598 struct bio *bio;
Andreas Gruenbachera5c31902011-03-24 03:28:04 +01001599 int dgs, err, i, expect;
Philipp Reisnera0638452011-01-19 14:31:32 +01001600 void *dig_in = mdev->tconn->int_dig_in;
1601 void *dig_vv = mdev->tconn->int_dig_vv;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001602
Andreas Gruenbacher88104ca2011-04-28 21:47:21 +02001603 dgs = 0;
1604 if (mdev->tconn->peer_integrity_tfm) {
1605 dgs = crypto_hash_digestsize(mdev->tconn->peer_integrity_tfm);
Andreas Gruenbachera5c31902011-03-24 03:28:04 +01001606 err = drbd_recv_all_warn(mdev->tconn, dig_in, dgs);
1607 if (err)
1608 return err;
Andreas Gruenbacher88104ca2011-04-28 21:47:21 +02001609 data_size -= dgs;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001610 }
1611
Philipp Reisnerb411b362009-09-25 16:07:19 -07001612 /* optimistically update recv_cnt. if receiving fails below,
1613 * we disconnect anyways, and counters will be reset. */
1614 mdev->recv_cnt += data_size>>9;
1615
1616 bio = req->master_bio;
1617 D_ASSERT(sector == bio->bi_sector);
1618
1619 bio_for_each_segment(bvec, bio, i) {
Andreas Gruenbachera5c31902011-03-24 03:28:04 +01001620 void *mapped = kmap(bvec->bv_page) + bvec->bv_offset;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001621 expect = min_t(int, data_size, bvec->bv_len);
Andreas Gruenbachera5c31902011-03-24 03:28:04 +01001622 err = drbd_recv_all_warn(mdev->tconn, mapped, expect);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001623 kunmap(bvec->bv_page);
Andreas Gruenbachera5c31902011-03-24 03:28:04 +01001624 if (err)
1625 return err;
1626 data_size -= expect;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001627 }
1628
1629 if (dgs) {
Andreas Gruenbacher5b614ab2011-04-27 21:00:12 +02001630 drbd_csum_bio(mdev, mdev->tconn->peer_integrity_tfm, bio, dig_vv);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001631 if (memcmp(dig_in, dig_vv, dgs)) {
1632 dev_err(DEV, "Digest integrity check FAILED. Broken NICs?\n");
Andreas Gruenbacher28284ce2011-03-16 17:54:02 +01001633 return -EINVAL;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001634 }
1635 }
1636
1637 D_ASSERT(data_size == 0);
Andreas Gruenbacher28284ce2011-03-16 17:54:02 +01001638 return 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001639}
1640
Andreas Gruenbachera990be42011-04-06 17:56:48 +02001641/*
1642 * e_end_resync_block() is called in asender context via
1643 * drbd_finish_peer_reqs().
1644 */
Andreas Gruenbacher99920dc2011-03-16 15:31:39 +01001645static int e_end_resync_block(struct drbd_work *w, int unused)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001646{
Andreas Gruenbacher8050e6d2011-02-18 16:12:48 +01001647 struct drbd_peer_request *peer_req =
1648 container_of(w, struct drbd_peer_request, w);
Philipp Reisner00d56942011-02-09 18:09:48 +01001649 struct drbd_conf *mdev = w->mdev;
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01001650 sector_t sector = peer_req->i.sector;
Andreas Gruenbacher99920dc2011-03-16 15:31:39 +01001651 int err;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001652
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01001653 D_ASSERT(drbd_interval_empty(&peer_req->i));
Philipp Reisnerb411b362009-09-25 16:07:19 -07001654
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01001655 if (likely((peer_req->flags & EE_WAS_ERROR) == 0)) {
1656 drbd_set_in_sync(mdev, sector, peer_req->i.size);
Andreas Gruenbacher99920dc2011-03-16 15:31:39 +01001657 err = drbd_send_ack(mdev, P_RS_WRITE_ACK, peer_req);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001658 } else {
1659 /* Record failure to sync */
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01001660 drbd_rs_failed_io(mdev, sector, peer_req->i.size);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001661
Andreas Gruenbacher99920dc2011-03-16 15:31:39 +01001662 err = drbd_send_ack(mdev, P_NEG_ACK, peer_req);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001663 }
1664 dec_unacked(mdev);
1665
Andreas Gruenbacher99920dc2011-03-16 15:31:39 +01001666 return err;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001667}
1668
1669static int recv_resync_read(struct drbd_conf *mdev, sector_t sector, int data_size) __releases(local)
1670{
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01001671 struct drbd_peer_request *peer_req;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001672
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01001673 peer_req = read_in_block(mdev, ID_SYNCER, sector, data_size);
1674 if (!peer_req)
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001675 goto fail;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001676
1677 dec_rs_pending(mdev);
1678
Philipp Reisnerb411b362009-09-25 16:07:19 -07001679 inc_unacked(mdev);
1680 /* corresponding dec_unacked() in e_end_resync_block()
1681 * respective _drbd_clear_done_ee */
1682
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01001683 peer_req->w.cb = e_end_resync_block;
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001684
Philipp Reisner87eeee42011-01-19 14:16:30 +01001685 spin_lock_irq(&mdev->tconn->req_lock);
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01001686 list_add(&peer_req->w.list, &mdev->sync_ee);
Philipp Reisner87eeee42011-01-19 14:16:30 +01001687 spin_unlock_irq(&mdev->tconn->req_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001688
Lars Ellenberg0f0601f2010-08-11 23:40:24 +02001689 atomic_add(data_size >> 9, &mdev->rs_sect_ev);
Andreas Gruenbacherfbe29de2011-02-17 16:38:35 +01001690 if (drbd_submit_peer_request(mdev, peer_req, WRITE, DRBD_FAULT_RS_WR) == 0)
Andreas Gruenbachere1c1b0f2011-03-16 17:58:27 +01001691 return 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001692
Lars Ellenberg10f6d9922011-01-24 14:47:09 +01001693 /* don't care for the reason here */
1694 dev_err(DEV, "submit failed, triggering re-connect\n");
Philipp Reisner87eeee42011-01-19 14:16:30 +01001695 spin_lock_irq(&mdev->tconn->req_lock);
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01001696 list_del(&peer_req->w.list);
Philipp Reisner87eeee42011-01-19 14:16:30 +01001697 spin_unlock_irq(&mdev->tconn->req_lock);
Lars Ellenberg22cc37a2010-09-14 20:40:41 +02001698
Andreas Gruenbacher3967deb2011-04-06 16:16:56 +02001699 drbd_free_peer_req(mdev, peer_req);
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001700fail:
1701 put_ldev(mdev);
Andreas Gruenbachere1c1b0f2011-03-16 17:58:27 +01001702 return -EIO;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001703}
1704
Andreas Gruenbacher668eebc2011-01-20 17:14:26 +01001705static struct drbd_request *
Andreas Gruenbacherbc9c5c42011-01-21 18:00:55 +01001706find_request(struct drbd_conf *mdev, struct rb_root *root, u64 id,
1707 sector_t sector, bool missing_ok, const char *func)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001708{
1709 struct drbd_request *req;
Andreas Gruenbacher668eebc2011-01-20 17:14:26 +01001710
Andreas Gruenbacherbc9c5c42011-01-21 18:00:55 +01001711 /* Request object according to our peer */
1712 req = (struct drbd_request *)(unsigned long)id;
Andreas Gruenbacher5e472262011-01-27 14:42:51 +01001713 if (drbd_contains_interval(root, sector, &req->i) && req->i.local)
Andreas Gruenbacher668eebc2011-01-20 17:14:26 +01001714 return req;
Andreas Gruenbacherc3afd8f2011-01-20 22:25:40 +01001715 if (!missing_ok) {
Andreas Gruenbacher5af172e2011-07-15 09:43:23 +02001716 dev_err(DEV, "%s: failed to find request 0x%lx, sector %llus\n", func,
Andreas Gruenbacherc3afd8f2011-01-20 22:25:40 +01001717 (unsigned long)id, (unsigned long long)sector);
1718 }
Andreas Gruenbacher668eebc2011-01-20 17:14:26 +01001719 return NULL;
1720}
1721
Andreas Gruenbacher4a76b162011-03-25 02:43:51 +01001722static int receive_DataReply(struct drbd_tconn *tconn, struct packet_info *pi)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001723{
Andreas Gruenbacher4a76b162011-03-25 02:43:51 +01001724 struct drbd_conf *mdev;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001725 struct drbd_request *req;
1726 sector_t sector;
Andreas Gruenbacher82bc0192011-03-17 12:10:19 +01001727 int err;
Andreas Gruenbachere6589832011-03-30 12:54:42 +02001728 struct p_data *p = pi->data;
Andreas Gruenbacher4a76b162011-03-25 02:43:51 +01001729
1730 mdev = vnr_to_mdev(tconn, pi->vnr);
1731 if (!mdev)
1732 return -EIO;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001733
1734 sector = be64_to_cpu(p->sector);
1735
Philipp Reisner87eeee42011-01-19 14:16:30 +01001736 spin_lock_irq(&mdev->tconn->req_lock);
Andreas Gruenbacherbc9c5c42011-01-21 18:00:55 +01001737 req = find_request(mdev, &mdev->read_requests, p->block_id, sector, false, __func__);
Philipp Reisner87eeee42011-01-19 14:16:30 +01001738 spin_unlock_irq(&mdev->tconn->req_lock);
Andreas Gruenbacherc3afd8f2011-01-20 22:25:40 +01001739 if (unlikely(!req))
Andreas Gruenbacher82bc0192011-03-17 12:10:19 +01001740 return -EIO;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001741
Bart Van Assche24c48302011-05-21 18:32:29 +02001742 /* hlist_del(&req->collision) is done in _req_may_be_done, to avoid
Philipp Reisnerb411b362009-09-25 16:07:19 -07001743 * special casing it there for the various failure cases.
1744 * still no race with drbd_fail_pending_reads */
Andreas Gruenbachere2857212011-03-25 00:57:38 +01001745 err = recv_dless_read(mdev, req, sector, pi->size);
Andreas Gruenbacher82bc0192011-03-17 12:10:19 +01001746 if (!err)
Andreas Gruenbacher8554df12011-01-25 15:37:43 +01001747 req_mod(req, DATA_RECEIVED);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001748 /* else: nothing. handled from drbd_disconnect...
1749 * I don't think we may complete this just yet
1750 * in case we are "on-disconnect: freeze" */
1751
Andreas Gruenbacher82bc0192011-03-17 12:10:19 +01001752 return err;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001753}
1754
Andreas Gruenbacher4a76b162011-03-25 02:43:51 +01001755static int receive_RSDataReply(struct drbd_tconn *tconn, struct packet_info *pi)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001756{
Andreas Gruenbacher4a76b162011-03-25 02:43:51 +01001757 struct drbd_conf *mdev;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001758 sector_t sector;
Andreas Gruenbacher82bc0192011-03-17 12:10:19 +01001759 int err;
Andreas Gruenbachere6589832011-03-30 12:54:42 +02001760 struct p_data *p = pi->data;
Andreas Gruenbacher4a76b162011-03-25 02:43:51 +01001761
1762 mdev = vnr_to_mdev(tconn, pi->vnr);
1763 if (!mdev)
1764 return -EIO;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001765
1766 sector = be64_to_cpu(p->sector);
1767 D_ASSERT(p->block_id == ID_SYNCER);
1768
1769 if (get_ldev(mdev)) {
1770 /* data is submitted to disk within recv_resync_read.
1771 * corresponding put_ldev done below on error,
Andreas Gruenbacherfcefa622011-02-17 16:46:59 +01001772 * or in drbd_peer_request_endio. */
Andreas Gruenbachere2857212011-03-25 00:57:38 +01001773 err = recv_resync_read(mdev, sector, pi->size);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001774 } else {
1775 if (__ratelimit(&drbd_ratelimit_state))
1776 dev_err(DEV, "Can not write resync data to local disk.\n");
1777
Andreas Gruenbachere2857212011-03-25 00:57:38 +01001778 err = drbd_drain_block(mdev, pi->size);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001779
Andreas Gruenbachere2857212011-03-25 00:57:38 +01001780 drbd_send_ack_dp(mdev, P_NEG_ACK, p, pi->size);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001781 }
1782
Andreas Gruenbachere2857212011-03-25 00:57:38 +01001783 atomic_add(pi->size >> 9, &mdev->rs_sect_in);
Philipp Reisner778f2712010-07-06 11:14:00 +02001784
Andreas Gruenbacher82bc0192011-03-17 12:10:19 +01001785 return err;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001786}
1787
Andreas Gruenbacher7be8da02011-02-22 02:15:32 +01001788static void restart_conflicting_writes(struct drbd_conf *mdev,
1789 sector_t sector, int size)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001790{
Andreas Gruenbacher7be8da02011-02-22 02:15:32 +01001791 struct drbd_interval *i;
1792 struct drbd_request *req;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001793
Andreas Gruenbacher7be8da02011-02-22 02:15:32 +01001794 drbd_for_each_overlap(i, &mdev->write_requests, sector, size) {
1795 if (!i->local)
1796 continue;
1797 req = container_of(i, struct drbd_request, i);
1798 if (req->rq_state & RQ_LOCAL_PENDING ||
1799 !(req->rq_state & RQ_POSTPONED))
1800 continue;
Lars Ellenberg2312f0b32011-11-24 10:36:25 +01001801 /* as it is RQ_POSTPONED, this will cause it to
1802 * be queued on the retry workqueue. */
Lars Ellenbergd4dabbe2012-08-01 12:33:51 +02001803 __req_mod(req, CONFLICT_RESOLVED, NULL);
Andreas Gruenbacher7be8da02011-02-22 02:15:32 +01001804 }
1805}
1806
Andreas Gruenbachera990be42011-04-06 17:56:48 +02001807/*
1808 * e_end_block() is called in asender context via drbd_finish_peer_reqs().
Philipp Reisnerb411b362009-09-25 16:07:19 -07001809 */
Andreas Gruenbacher99920dc2011-03-16 15:31:39 +01001810static int e_end_block(struct drbd_work *w, int cancel)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001811{
Andreas Gruenbacher8050e6d2011-02-18 16:12:48 +01001812 struct drbd_peer_request *peer_req =
1813 container_of(w, struct drbd_peer_request, w);
Philipp Reisner00d56942011-02-09 18:09:48 +01001814 struct drbd_conf *mdev = w->mdev;
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01001815 sector_t sector = peer_req->i.sector;
Andreas Gruenbacher99920dc2011-03-16 15:31:39 +01001816 int err = 0, pcmd;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001817
Philipp Reisner303d1442011-04-13 16:24:47 -07001818 if (peer_req->flags & EE_SEND_WRITE_ACK) {
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01001819 if (likely((peer_req->flags & EE_WAS_ERROR) == 0)) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07001820 pcmd = (mdev->state.conn >= C_SYNC_SOURCE &&
1821 mdev->state.conn <= C_PAUSED_SYNC_T &&
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01001822 peer_req->flags & EE_MAY_SET_IN_SYNC) ?
Philipp Reisnerb411b362009-09-25 16:07:19 -07001823 P_RS_WRITE_ACK : P_WRITE_ACK;
Andreas Gruenbacher99920dc2011-03-16 15:31:39 +01001824 err = drbd_send_ack(mdev, pcmd, peer_req);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001825 if (pcmd == P_RS_WRITE_ACK)
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01001826 drbd_set_in_sync(mdev, sector, peer_req->i.size);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001827 } else {
Andreas Gruenbacher99920dc2011-03-16 15:31:39 +01001828 err = drbd_send_ack(mdev, P_NEG_ACK, peer_req);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001829 /* we expect it to be marked out of sync anyways...
1830 * maybe assert this? */
1831 }
1832 dec_unacked(mdev);
1833 }
1834 /* we delete from the conflict detection hash _after_ we sent out the
1835 * P_WRITE_ACK / P_NEG_ACK, to get the sequence number right. */
Philipp Reisner302bdea2011-04-21 11:36:49 +02001836 if (peer_req->flags & EE_IN_INTERVAL_TREE) {
Philipp Reisner87eeee42011-01-19 14:16:30 +01001837 spin_lock_irq(&mdev->tconn->req_lock);
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01001838 D_ASSERT(!drbd_interval_empty(&peer_req->i));
1839 drbd_remove_epoch_entry_interval(mdev, peer_req);
Andreas Gruenbacher7be8da02011-02-22 02:15:32 +01001840 if (peer_req->flags & EE_RESTART_REQUESTS)
1841 restart_conflicting_writes(mdev, sector, peer_req->i.size);
Philipp Reisner87eeee42011-01-19 14:16:30 +01001842 spin_unlock_irq(&mdev->tconn->req_lock);
Andreas Gruenbacherbb3bfe92011-01-21 15:59:23 +01001843 } else
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01001844 D_ASSERT(drbd_interval_empty(&peer_req->i));
Philipp Reisnerb411b362009-09-25 16:07:19 -07001845
Philipp Reisner1e9dd292011-11-10 15:14:53 +01001846 drbd_may_finish_epoch(mdev->tconn, peer_req->epoch, EV_PUT + (cancel ? EV_CLEANUP : 0));
Philipp Reisnerb411b362009-09-25 16:07:19 -07001847
Andreas Gruenbacher99920dc2011-03-16 15:31:39 +01001848 return err;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001849}
1850
Andreas Gruenbacher7be8da02011-02-22 02:15:32 +01001851static int e_send_ack(struct drbd_work *w, enum drbd_packet ack)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001852{
Andreas Gruenbacher7be8da02011-02-22 02:15:32 +01001853 struct drbd_conf *mdev = w->mdev;
Andreas Gruenbacher8050e6d2011-02-18 16:12:48 +01001854 struct drbd_peer_request *peer_req =
1855 container_of(w, struct drbd_peer_request, w);
Andreas Gruenbacher99920dc2011-03-16 15:31:39 +01001856 int err;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001857
Andreas Gruenbacher99920dc2011-03-16 15:31:39 +01001858 err = drbd_send_ack(mdev, ack, peer_req);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001859 dec_unacked(mdev);
1860
Andreas Gruenbacher99920dc2011-03-16 15:31:39 +01001861 return err;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001862}
1863
Lars Ellenbergd4dabbe2012-08-01 12:33:51 +02001864static int e_send_superseded(struct drbd_work *w, int unused)
Philipp Reisnerb6a370ba2012-02-19 01:27:53 +01001865{
Lars Ellenbergd4dabbe2012-08-01 12:33:51 +02001866 return e_send_ack(w, P_SUPERSEDED);
Andreas Gruenbacher7be8da02011-02-22 02:15:32 +01001867}
Philipp Reisnerb6a370ba2012-02-19 01:27:53 +01001868
Andreas Gruenbacher99920dc2011-03-16 15:31:39 +01001869static int e_send_retry_write(struct drbd_work *w, int unused)
Andreas Gruenbacher7be8da02011-02-22 02:15:32 +01001870{
1871 struct drbd_tconn *tconn = w->mdev->tconn;
1872
1873 return e_send_ack(w, tconn->agreed_pro_version >= 100 ?
Lars Ellenbergd4dabbe2012-08-01 12:33:51 +02001874 P_RETRY_WRITE : P_SUPERSEDED);
Andreas Gruenbacher7be8da02011-02-22 02:15:32 +01001875}
1876
Andreas Gruenbacher3e394da2011-01-26 18:36:55 +01001877static bool seq_greater(u32 a, u32 b)
1878{
1879 /*
1880 * We assume 32-bit wrap-around here.
1881 * For 24-bit wrap-around, we would have to shift:
1882 * a <<= 8; b <<= 8;
1883 */
1884 return (s32)a - (s32)b > 0;
1885}
1886
1887static u32 seq_max(u32 a, u32 b)
1888{
1889 return seq_greater(a, b) ? a : b;
1890}
1891
Andreas Gruenbacher7be8da02011-02-22 02:15:32 +01001892static bool need_peer_seq(struct drbd_conf *mdev)
1893{
1894 struct drbd_tconn *tconn = mdev->tconn;
Philipp Reisner302bdea2011-04-21 11:36:49 +02001895 int tp;
Andreas Gruenbacher7be8da02011-02-22 02:15:32 +01001896
1897 /*
1898 * We only need to keep track of the last packet_seq number of our peer
Lars Ellenberg427c0432012-08-01 12:43:01 +02001899 * if we are in dual-primary mode and we have the resolve-conflicts flag set; see
Andreas Gruenbacher7be8da02011-02-22 02:15:32 +01001900 * handle_write_conflicts().
1901 */
Philipp Reisner302bdea2011-04-21 11:36:49 +02001902
1903 rcu_read_lock();
1904 tp = rcu_dereference(mdev->tconn->net_conf)->two_primaries;
1905 rcu_read_unlock();
1906
Lars Ellenberg427c0432012-08-01 12:43:01 +02001907 return tp && test_bit(RESOLVE_CONFLICTS, &tconn->flags);
Andreas Gruenbacher7be8da02011-02-22 02:15:32 +01001908}
1909
Andreas Gruenbacher43ae0772011-02-03 18:42:08 +01001910static void update_peer_seq(struct drbd_conf *mdev, unsigned int peer_seq)
Andreas Gruenbacher3e394da2011-01-26 18:36:55 +01001911{
Lars Ellenberg3c13b682011-02-23 16:10:01 +01001912 unsigned int newest_peer_seq;
Andreas Gruenbacher3e394da2011-01-26 18:36:55 +01001913
Andreas Gruenbacher7be8da02011-02-22 02:15:32 +01001914 if (need_peer_seq(mdev)) {
1915 spin_lock(&mdev->peer_seq_lock);
Lars Ellenberg3c13b682011-02-23 16:10:01 +01001916 newest_peer_seq = seq_max(mdev->peer_seq, peer_seq);
1917 mdev->peer_seq = newest_peer_seq;
Andreas Gruenbacher7be8da02011-02-22 02:15:32 +01001918 spin_unlock(&mdev->peer_seq_lock);
Lars Ellenberg3c13b682011-02-23 16:10:01 +01001919 /* wake up only if we actually changed mdev->peer_seq */
1920 if (peer_seq == newest_peer_seq)
Andreas Gruenbacher7be8da02011-02-22 02:15:32 +01001921 wake_up(&mdev->seq_wait);
1922 }
Andreas Gruenbacher3e394da2011-01-26 18:36:55 +01001923}
1924
Lars Ellenbergd93f6302012-03-26 15:49:13 +02001925static inline int overlaps(sector_t s1, int l1, sector_t s2, int l2)
1926{
1927 return !((s1 + (l1>>9) <= s2) || (s1 >= s2 + (l2>>9)));
1928}
1929
1930/* maybe change sync_ee into interval trees as well? */
Philipp Reisner3ea35df2012-04-06 12:13:18 +02001931static bool overlapping_resync_write(struct drbd_conf *mdev, struct drbd_peer_request *peer_req)
Lars Ellenbergd93f6302012-03-26 15:49:13 +02001932{
1933 struct drbd_peer_request *rs_req;
Philipp Reisnerb6a370ba2012-02-19 01:27:53 +01001934 bool rv = 0;
1935
Lars Ellenbergd93f6302012-03-26 15:49:13 +02001936 spin_lock_irq(&mdev->tconn->req_lock);
1937 list_for_each_entry(rs_req, &mdev->sync_ee, w.list) {
1938 if (overlaps(peer_req->i.sector, peer_req->i.size,
1939 rs_req->i.sector, rs_req->i.size)) {
Philipp Reisnerb6a370ba2012-02-19 01:27:53 +01001940 rv = 1;
1941 break;
1942 }
1943 }
Lars Ellenbergd93f6302012-03-26 15:49:13 +02001944 spin_unlock_irq(&mdev->tconn->req_lock);
Philipp Reisnerb6a370ba2012-02-19 01:27:53 +01001945
1946 return rv;
1947}
1948
Philipp Reisnerb411b362009-09-25 16:07:19 -07001949/* Called from receive_Data.
1950 * Synchronize packets on sock with packets on msock.
1951 *
1952 * This is here so even when a P_DATA packet traveling via sock overtook an Ack
1953 * packet traveling on msock, they are still processed in the order they have
1954 * been sent.
1955 *
1956 * Note: we don't care for Ack packets overtaking P_DATA packets.
1957 *
1958 * In case packet_seq is larger than mdev->peer_seq number, there are
1959 * outstanding packets on the msock. We wait for them to arrive.
1960 * In case we are the logically next packet, we update mdev->peer_seq
1961 * ourselves. Correctly handles 32bit wrap around.
1962 *
1963 * Assume we have a 10 GBit connection, that is about 1<<30 byte per second,
1964 * about 1<<21 sectors per second. So "worst" case, we have 1<<3 == 8 seconds
1965 * for the 24bit wrap (historical atomic_t guarantee on some archs), and we have
1966 * 1<<9 == 512 seconds aka ages for the 32bit wrap around...
1967 *
1968 * returns 0 if we may process the packet,
1969 * -ERESTARTSYS if we were interrupted (by disconnect signal). */
Andreas Gruenbacher7be8da02011-02-22 02:15:32 +01001970static int wait_for_and_update_peer_seq(struct drbd_conf *mdev, const u32 peer_seq)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001971{
1972 DEFINE_WAIT(wait);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001973 long timeout;
Andreas Gruenbacher7be8da02011-02-22 02:15:32 +01001974 int ret;
1975
1976 if (!need_peer_seq(mdev))
1977 return 0;
1978
Philipp Reisnerb411b362009-09-25 16:07:19 -07001979 spin_lock(&mdev->peer_seq_lock);
1980 for (;;) {
Andreas Gruenbacher7be8da02011-02-22 02:15:32 +01001981 if (!seq_greater(peer_seq - 1, mdev->peer_seq)) {
1982 mdev->peer_seq = seq_max(mdev->peer_seq, peer_seq);
1983 ret = 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001984 break;
Andreas Gruenbacher7be8da02011-02-22 02:15:32 +01001985 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07001986 if (signal_pending(current)) {
1987 ret = -ERESTARTSYS;
1988 break;
1989 }
Andreas Gruenbacher7be8da02011-02-22 02:15:32 +01001990 prepare_to_wait(&mdev->seq_wait, &wait, TASK_INTERRUPTIBLE);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001991 spin_unlock(&mdev->peer_seq_lock);
Philipp Reisner44ed1672011-04-19 17:10:19 +02001992 rcu_read_lock();
1993 timeout = rcu_dereference(mdev->tconn->net_conf)->ping_timeo*HZ/10;
1994 rcu_read_unlock();
Andreas Gruenbacher71b1c1e2011-03-01 15:40:43 +01001995 timeout = schedule_timeout(timeout);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001996 spin_lock(&mdev->peer_seq_lock);
Andreas Gruenbacher7be8da02011-02-22 02:15:32 +01001997 if (!timeout) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07001998 ret = -ETIMEDOUT;
Andreas Gruenbacher71b1c1e2011-03-01 15:40:43 +01001999 dev_err(DEV, "Timed out waiting for missing ack packets; disconnecting\n");
Philipp Reisnerb411b362009-09-25 16:07:19 -07002000 break;
2001 }
2002 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07002003 spin_unlock(&mdev->peer_seq_lock);
Andreas Gruenbacher7be8da02011-02-22 02:15:32 +01002004 finish_wait(&mdev->seq_wait, &wait);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002005 return ret;
2006}
2007
Lars Ellenberg688593c2010-11-17 22:25:03 +01002008/* see also bio_flags_to_wire()
2009 * DRBD_REQ_*, because we need to semantically map the flags to data packet
2010 * flags and back. We may replicate to other kernel versions. */
2011static unsigned long wire_flags_to_bio(struct drbd_conf *mdev, u32 dpf)
Philipp Reisner76d2e7e2010-08-25 11:58:05 +02002012{
Lars Ellenberg688593c2010-11-17 22:25:03 +01002013 return (dpf & DP_RW_SYNC ? REQ_SYNC : 0) |
2014 (dpf & DP_FUA ? REQ_FUA : 0) |
2015 (dpf & DP_FLUSH ? REQ_FLUSH : 0) |
2016 (dpf & DP_DISCARD ? REQ_DISCARD : 0);
Philipp Reisner76d2e7e2010-08-25 11:58:05 +02002017}
2018
Andreas Gruenbacher7be8da02011-02-22 02:15:32 +01002019static void fail_postponed_requests(struct drbd_conf *mdev, sector_t sector,
2020 unsigned int size)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002021{
Andreas Gruenbacher7be8da02011-02-22 02:15:32 +01002022 struct drbd_interval *i;
2023
2024 repeat:
2025 drbd_for_each_overlap(i, &mdev->write_requests, sector, size) {
2026 struct drbd_request *req;
2027 struct bio_and_error m;
2028
2029 if (!i->local)
2030 continue;
2031 req = container_of(i, struct drbd_request, i);
2032 if (!(req->rq_state & RQ_POSTPONED))
2033 continue;
2034 req->rq_state &= ~RQ_POSTPONED;
2035 __req_mod(req, NEG_ACKED, &m);
2036 spin_unlock_irq(&mdev->tconn->req_lock);
2037 if (m.bio)
2038 complete_master_bio(mdev, &m);
2039 spin_lock_irq(&mdev->tconn->req_lock);
2040 goto repeat;
2041 }
2042}
2043
2044static int handle_write_conflicts(struct drbd_conf *mdev,
2045 struct drbd_peer_request *peer_req)
2046{
2047 struct drbd_tconn *tconn = mdev->tconn;
Lars Ellenberg427c0432012-08-01 12:43:01 +02002048 bool resolve_conflicts = test_bit(RESOLVE_CONFLICTS, &tconn->flags);
Andreas Gruenbacher7be8da02011-02-22 02:15:32 +01002049 sector_t sector = peer_req->i.sector;
2050 const unsigned int size = peer_req->i.size;
2051 struct drbd_interval *i;
2052 bool equal;
2053 int err;
2054
2055 /*
2056 * Inserting the peer request into the write_requests tree will prevent
2057 * new conflicting local requests from being added.
2058 */
2059 drbd_insert_interval(&mdev->write_requests, &peer_req->i);
2060
2061 repeat:
2062 drbd_for_each_overlap(i, &mdev->write_requests, sector, size) {
2063 if (i == &peer_req->i)
2064 continue;
2065
2066 if (!i->local) {
2067 /*
2068 * Our peer has sent a conflicting remote request; this
2069 * should not happen in a two-node setup. Wait for the
2070 * earlier peer request to complete.
2071 */
2072 err = drbd_wait_misc(mdev, i);
2073 if (err)
2074 goto out;
2075 goto repeat;
2076 }
2077
2078 equal = i->sector == sector && i->size == size;
2079 if (resolve_conflicts) {
2080 /*
2081 * If the peer request is fully contained within the
Lars Ellenbergd4dabbe2012-08-01 12:33:51 +02002082 * overlapping request, it can be considered overwritten
2083 * and thus superseded; otherwise, it will be retried
2084 * once all overlapping requests have completed.
Andreas Gruenbacher7be8da02011-02-22 02:15:32 +01002085 */
Lars Ellenbergd4dabbe2012-08-01 12:33:51 +02002086 bool superseded = i->sector <= sector && i->sector +
Andreas Gruenbacher7be8da02011-02-22 02:15:32 +01002087 (i->size >> 9) >= sector + (size >> 9);
2088
2089 if (!equal)
2090 dev_alert(DEV, "Concurrent writes detected: "
2091 "local=%llus +%u, remote=%llus +%u, "
2092 "assuming %s came first\n",
2093 (unsigned long long)i->sector, i->size,
2094 (unsigned long long)sector, size,
Lars Ellenbergd4dabbe2012-08-01 12:33:51 +02002095 superseded ? "local" : "remote");
Andreas Gruenbacher7be8da02011-02-22 02:15:32 +01002096
2097 inc_unacked(mdev);
Lars Ellenbergd4dabbe2012-08-01 12:33:51 +02002098 peer_req->w.cb = superseded ? e_send_superseded :
Andreas Gruenbacher7be8da02011-02-22 02:15:32 +01002099 e_send_retry_write;
2100 list_add_tail(&peer_req->w.list, &mdev->done_ee);
2101 wake_asender(mdev->tconn);
2102
2103 err = -ENOENT;
2104 goto out;
2105 } else {
2106 struct drbd_request *req =
2107 container_of(i, struct drbd_request, i);
2108
2109 if (!equal)
2110 dev_alert(DEV, "Concurrent writes detected: "
2111 "local=%llus +%u, remote=%llus +%u\n",
2112 (unsigned long long)i->sector, i->size,
2113 (unsigned long long)sector, size);
2114
2115 if (req->rq_state & RQ_LOCAL_PENDING ||
2116 !(req->rq_state & RQ_POSTPONED)) {
2117 /*
2118 * Wait for the node with the discard flag to
Lars Ellenbergd4dabbe2012-08-01 12:33:51 +02002119 * decide if this request has been superseded
2120 * or needs to be retried.
2121 * Requests that have been superseded will
Andreas Gruenbacher7be8da02011-02-22 02:15:32 +01002122 * disappear from the write_requests tree.
2123 *
2124 * In addition, wait for the conflicting
2125 * request to finish locally before submitting
2126 * the conflicting peer request.
2127 */
2128 err = drbd_wait_misc(mdev, &req->i);
2129 if (err) {
2130 _conn_request_state(mdev->tconn,
2131 NS(conn, C_TIMEOUT),
2132 CS_HARD);
2133 fail_postponed_requests(mdev, sector, size);
2134 goto out;
2135 }
2136 goto repeat;
2137 }
2138 /*
2139 * Remember to restart the conflicting requests after
2140 * the new peer request has completed.
2141 */
2142 peer_req->flags |= EE_RESTART_REQUESTS;
2143 }
2144 }
2145 err = 0;
2146
2147 out:
2148 if (err)
2149 drbd_remove_epoch_entry_interval(mdev, peer_req);
2150 return err;
2151}
2152
Philipp Reisnerb411b362009-09-25 16:07:19 -07002153/* mirrored write */
Andreas Gruenbacher4a76b162011-03-25 02:43:51 +01002154static int receive_Data(struct drbd_tconn *tconn, struct packet_info *pi)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002155{
Andreas Gruenbacher4a76b162011-03-25 02:43:51 +01002156 struct drbd_conf *mdev;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002157 sector_t sector;
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01002158 struct drbd_peer_request *peer_req;
Andreas Gruenbachere6589832011-03-30 12:54:42 +02002159 struct p_data *p = pi->data;
Andreas Gruenbacher7be8da02011-02-22 02:15:32 +01002160 u32 peer_seq = be32_to_cpu(p->seq_num);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002161 int rw = WRITE;
2162 u32 dp_flags;
Philipp Reisner302bdea2011-04-21 11:36:49 +02002163 int err, tp;
Andreas Gruenbacher7be8da02011-02-22 02:15:32 +01002164
Andreas Gruenbacher4a76b162011-03-25 02:43:51 +01002165 mdev = vnr_to_mdev(tconn, pi->vnr);
2166 if (!mdev)
2167 return -EIO;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002168
Philipp Reisnerb411b362009-09-25 16:07:19 -07002169 if (!get_ldev(mdev)) {
Andreas Gruenbacher82bc0192011-03-17 12:10:19 +01002170 int err2;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002171
Andreas Gruenbacher7be8da02011-02-22 02:15:32 +01002172 err = wait_for_and_update_peer_seq(mdev, peer_seq);
Andreas Gruenbachere2857212011-03-25 00:57:38 +01002173 drbd_send_ack_dp(mdev, P_NEG_ACK, p, pi->size);
Philipp Reisner12038a32011-11-09 19:18:00 +01002174 atomic_inc(&tconn->current_epoch->epoch_size);
Andreas Gruenbachere2857212011-03-25 00:57:38 +01002175 err2 = drbd_drain_block(mdev, pi->size);
Andreas Gruenbacher82bc0192011-03-17 12:10:19 +01002176 if (!err)
2177 err = err2;
2178 return err;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002179 }
2180
Andreas Gruenbacherfcefa622011-02-17 16:46:59 +01002181 /*
2182 * Corresponding put_ldev done either below (on various errors), or in
2183 * drbd_peer_request_endio, if we successfully submit the data at the
2184 * end of this function.
2185 */
Philipp Reisnerb411b362009-09-25 16:07:19 -07002186
2187 sector = be64_to_cpu(p->sector);
Andreas Gruenbachere2857212011-03-25 00:57:38 +01002188 peer_req = read_in_block(mdev, p->block_id, sector, pi->size);
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01002189 if (!peer_req) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07002190 put_ldev(mdev);
Andreas Gruenbacher82bc0192011-03-17 12:10:19 +01002191 return -EIO;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002192 }
2193
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01002194 peer_req->w.cb = e_end_block;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002195
Lars Ellenberg688593c2010-11-17 22:25:03 +01002196 dp_flags = be32_to_cpu(p->dp_flags);
2197 rw |= wire_flags_to_bio(mdev, dp_flags);
Lars Ellenberg81a35372012-07-30 09:00:54 +02002198 if (peer_req->pages == NULL) {
2199 D_ASSERT(peer_req->i.size == 0);
Lars Ellenberga73ff322012-06-25 19:15:38 +02002200 D_ASSERT(dp_flags & DP_FLUSH);
2201 }
Lars Ellenberg688593c2010-11-17 22:25:03 +01002202
2203 if (dp_flags & DP_MAY_SET_IN_SYNC)
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01002204 peer_req->flags |= EE_MAY_SET_IN_SYNC;
Lars Ellenberg688593c2010-11-17 22:25:03 +01002205
Philipp Reisner12038a32011-11-09 19:18:00 +01002206 spin_lock(&tconn->epoch_lock);
2207 peer_req->epoch = tconn->current_epoch;
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01002208 atomic_inc(&peer_req->epoch->epoch_size);
2209 atomic_inc(&peer_req->epoch->active);
Philipp Reisner12038a32011-11-09 19:18:00 +01002210 spin_unlock(&tconn->epoch_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002211
Philipp Reisner302bdea2011-04-21 11:36:49 +02002212 rcu_read_lock();
2213 tp = rcu_dereference(mdev->tconn->net_conf)->two_primaries;
2214 rcu_read_unlock();
2215 if (tp) {
2216 peer_req->flags |= EE_IN_INTERVAL_TREE;
Andreas Gruenbacher7be8da02011-02-22 02:15:32 +01002217 err = wait_for_and_update_peer_seq(mdev, peer_seq);
2218 if (err)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002219 goto out_interrupted;
Philipp Reisner87eeee42011-01-19 14:16:30 +01002220 spin_lock_irq(&mdev->tconn->req_lock);
Andreas Gruenbacher7be8da02011-02-22 02:15:32 +01002221 err = handle_write_conflicts(mdev, peer_req);
2222 if (err) {
2223 spin_unlock_irq(&mdev->tconn->req_lock);
2224 if (err == -ENOENT) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07002225 put_ldev(mdev);
Andreas Gruenbacher82bc0192011-03-17 12:10:19 +01002226 return 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002227 }
Andreas Gruenbacher7be8da02011-02-22 02:15:32 +01002228 goto out_interrupted;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002229 }
Andreas Gruenbacher7be8da02011-02-22 02:15:32 +01002230 } else
2231 spin_lock_irq(&mdev->tconn->req_lock);
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01002232 list_add(&peer_req->w.list, &mdev->active_ee);
Philipp Reisner87eeee42011-01-19 14:16:30 +01002233 spin_unlock_irq(&mdev->tconn->req_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002234
Philipp Reisnerb6a370ba2012-02-19 01:27:53 +01002235 if (mdev->state.conn == C_SYNC_TARGET)
Philipp Reisner3ea35df2012-04-06 12:13:18 +02002236 wait_event(mdev->ee_wait, !overlapping_resync_write(mdev, peer_req));
Philipp Reisnerb6a370ba2012-02-19 01:27:53 +01002237
Philipp Reisner303d1442011-04-13 16:24:47 -07002238 if (mdev->tconn->agreed_pro_version < 100) {
Philipp Reisner44ed1672011-04-19 17:10:19 +02002239 rcu_read_lock();
2240 switch (rcu_dereference(mdev->tconn->net_conf)->wire_protocol) {
Philipp Reisner303d1442011-04-13 16:24:47 -07002241 case DRBD_PROT_C:
2242 dp_flags |= DP_SEND_WRITE_ACK;
2243 break;
2244 case DRBD_PROT_B:
2245 dp_flags |= DP_SEND_RECEIVE_ACK;
2246 break;
2247 }
Philipp Reisner44ed1672011-04-19 17:10:19 +02002248 rcu_read_unlock();
Philipp Reisner303d1442011-04-13 16:24:47 -07002249 }
2250
2251 if (dp_flags & DP_SEND_WRITE_ACK) {
2252 peer_req->flags |= EE_SEND_WRITE_ACK;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002253 inc_unacked(mdev);
2254 /* corresponding dec_unacked() in e_end_block()
2255 * respective _drbd_clear_done_ee */
Philipp Reisner303d1442011-04-13 16:24:47 -07002256 }
2257
2258 if (dp_flags & DP_SEND_RECEIVE_ACK) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07002259 /* I really don't like it that the receiver thread
2260 * sends on the msock, but anyways */
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01002261 drbd_send_ack(mdev, P_RECV_ACK, peer_req);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002262 }
2263
Lars Ellenberg6719fb02010-10-18 23:04:07 +02002264 if (mdev->state.pdsk < D_INCONSISTENT) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07002265 /* In case we have the only disk of the cluster, */
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01002266 drbd_set_out_of_sync(mdev, peer_req->i.sector, peer_req->i.size);
2267 peer_req->flags |= EE_CALL_AL_COMPLETE_IO;
2268 peer_req->flags &= ~EE_MAY_SET_IN_SYNC;
Lars Ellenberg56392d22013-03-19 18:16:48 +01002269 drbd_al_begin_io(mdev, &peer_req->i, true);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002270 }
2271
Andreas Gruenbacher82bc0192011-03-17 12:10:19 +01002272 err = drbd_submit_peer_request(mdev, peer_req, rw, DRBD_FAULT_DT_WR);
2273 if (!err)
2274 return 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002275
Lars Ellenberg10f6d9922011-01-24 14:47:09 +01002276 /* don't care for the reason here */
2277 dev_err(DEV, "submit failed, triggering re-connect\n");
Philipp Reisner87eeee42011-01-19 14:16:30 +01002278 spin_lock_irq(&mdev->tconn->req_lock);
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01002279 list_del(&peer_req->w.list);
2280 drbd_remove_epoch_entry_interval(mdev, peer_req);
Philipp Reisner87eeee42011-01-19 14:16:30 +01002281 spin_unlock_irq(&mdev->tconn->req_lock);
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01002282 if (peer_req->flags & EE_CALL_AL_COMPLETE_IO)
Lars Ellenberg181286a2011-03-31 15:18:56 +02002283 drbd_al_complete_io(mdev, &peer_req->i);
Lars Ellenberg22cc37a2010-09-14 20:40:41 +02002284
Philipp Reisnerb411b362009-09-25 16:07:19 -07002285out_interrupted:
Philipp Reisner1e9dd292011-11-10 15:14:53 +01002286 drbd_may_finish_epoch(tconn, peer_req->epoch, EV_PUT + EV_CLEANUP);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002287 put_ldev(mdev);
Andreas Gruenbacher3967deb2011-04-06 16:16:56 +02002288 drbd_free_peer_req(mdev, peer_req);
Andreas Gruenbacher82bc0192011-03-17 12:10:19 +01002289 return err;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002290}
2291
Lars Ellenberg0f0601f2010-08-11 23:40:24 +02002292/* We may throttle resync, if the lower device seems to be busy,
2293 * and current sync rate is above c_min_rate.
2294 *
2295 * To decide whether or not the lower device is busy, we use a scheme similar
2296 * to MD RAID is_mddev_idle(): if the partition stats reveal "significant"
2297 * (more than 64 sectors) of activity we cannot account for with our own resync
2298 * activity, it obviously is "busy".
2299 *
2300 * The current sync rate used here uses only the most recent two step marks,
2301 * to have a short time average so we can react faster.
2302 */
Philipp Reisnere3555d82010-11-07 15:56:29 +01002303int drbd_rs_should_slow_down(struct drbd_conf *mdev, sector_t sector)
Lars Ellenberg0f0601f2010-08-11 23:40:24 +02002304{
2305 struct gendisk *disk = mdev->ldev->backing_bdev->bd_contains->bd_disk;
2306 unsigned long db, dt, dbdt;
Philipp Reisnere3555d82010-11-07 15:56:29 +01002307 struct lc_element *tmp;
Lars Ellenberg0f0601f2010-08-11 23:40:24 +02002308 int curr_events;
2309 int throttle = 0;
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02002310 unsigned int c_min_rate;
2311
2312 rcu_read_lock();
2313 c_min_rate = rcu_dereference(mdev->ldev->disk_conf)->c_min_rate;
2314 rcu_read_unlock();
Lars Ellenberg0f0601f2010-08-11 23:40:24 +02002315
2316 /* feature disabled? */
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02002317 if (c_min_rate == 0)
Lars Ellenberg0f0601f2010-08-11 23:40:24 +02002318 return 0;
2319
Philipp Reisnere3555d82010-11-07 15:56:29 +01002320 spin_lock_irq(&mdev->al_lock);
2321 tmp = lc_find(mdev->resync, BM_SECT_TO_EXT(sector));
2322 if (tmp) {
2323 struct bm_extent *bm_ext = lc_entry(tmp, struct bm_extent, lce);
2324 if (test_bit(BME_PRIORITY, &bm_ext->flags)) {
2325 spin_unlock_irq(&mdev->al_lock);
2326 return 0;
2327 }
2328 /* Do not slow down if app IO is already waiting for this extent */
2329 }
2330 spin_unlock_irq(&mdev->al_lock);
2331
Lars Ellenberg0f0601f2010-08-11 23:40:24 +02002332 curr_events = (int)part_stat_read(&disk->part0, sectors[0]) +
2333 (int)part_stat_read(&disk->part0, sectors[1]) -
2334 atomic_read(&mdev->rs_sect_ev);
Philipp Reisnere3555d82010-11-07 15:56:29 +01002335
Lars Ellenberg0f0601f2010-08-11 23:40:24 +02002336 if (!mdev->rs_last_events || curr_events - mdev->rs_last_events > 64) {
2337 unsigned long rs_left;
2338 int i;
2339
2340 mdev->rs_last_events = curr_events;
2341
2342 /* sync speed average over the last 2*DRBD_SYNC_MARK_STEP,
2343 * approx. */
Lars Ellenberg2649f082010-11-05 10:05:47 +01002344 i = (mdev->rs_last_mark + DRBD_SYNC_MARKS-1) % DRBD_SYNC_MARKS;
2345
2346 if (mdev->state.conn == C_VERIFY_S || mdev->state.conn == C_VERIFY_T)
2347 rs_left = mdev->ov_left;
2348 else
2349 rs_left = drbd_bm_total_weight(mdev) - mdev->rs_failed;
Lars Ellenberg0f0601f2010-08-11 23:40:24 +02002350
2351 dt = ((long)jiffies - (long)mdev->rs_mark_time[i]) / HZ;
2352 if (!dt)
2353 dt++;
2354 db = mdev->rs_mark_left[i] - rs_left;
2355 dbdt = Bit2KB(db/dt);
2356
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02002357 if (dbdt > c_min_rate)
Lars Ellenberg0f0601f2010-08-11 23:40:24 +02002358 throttle = 1;
2359 }
2360 return throttle;
2361}
2362
2363
Andreas Gruenbacher4a76b162011-03-25 02:43:51 +01002364static int receive_DataRequest(struct drbd_tconn *tconn, struct packet_info *pi)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002365{
Andreas Gruenbacher4a76b162011-03-25 02:43:51 +01002366 struct drbd_conf *mdev;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002367 sector_t sector;
Andreas Gruenbacher4a76b162011-03-25 02:43:51 +01002368 sector_t capacity;
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01002369 struct drbd_peer_request *peer_req;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002370 struct digest_info *di = NULL;
Philipp Reisnerb18b37b2010-10-13 15:32:44 +02002371 int size, verb;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002372 unsigned int fault_type;
Andreas Gruenbachere6589832011-03-30 12:54:42 +02002373 struct p_block_req *p = pi->data;
Andreas Gruenbacher4a76b162011-03-25 02:43:51 +01002374
2375 mdev = vnr_to_mdev(tconn, pi->vnr);
2376 if (!mdev)
2377 return -EIO;
2378 capacity = drbd_get_capacity(mdev->this_bdev);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002379
2380 sector = be64_to_cpu(p->sector);
2381 size = be32_to_cpu(p->blksize);
2382
Andreas Gruenbacherc670a392011-02-21 12:41:39 +01002383 if (size <= 0 || !IS_ALIGNED(size, 512) || size > DRBD_MAX_BIO_SIZE) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07002384 dev_err(DEV, "%s:%d: sector: %llus, size: %u\n", __FILE__, __LINE__,
2385 (unsigned long long)sector, size);
Andreas Gruenbacher82bc0192011-03-17 12:10:19 +01002386 return -EINVAL;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002387 }
2388 if (sector + (size>>9) > capacity) {
2389 dev_err(DEV, "%s:%d: sector: %llus, size: %u\n", __FILE__, __LINE__,
2390 (unsigned long long)sector, size);
Andreas Gruenbacher82bc0192011-03-17 12:10:19 +01002391 return -EINVAL;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002392 }
2393
2394 if (!get_ldev_if_state(mdev, D_UP_TO_DATE)) {
Philipp Reisnerb18b37b2010-10-13 15:32:44 +02002395 verb = 1;
Andreas Gruenbachere2857212011-03-25 00:57:38 +01002396 switch (pi->cmd) {
Philipp Reisnerb18b37b2010-10-13 15:32:44 +02002397 case P_DATA_REQUEST:
2398 drbd_send_ack_rp(mdev, P_NEG_DREPLY, p);
2399 break;
2400 case P_RS_DATA_REQUEST:
2401 case P_CSUM_RS_REQUEST:
2402 case P_OV_REQUEST:
2403 drbd_send_ack_rp(mdev, P_NEG_RS_DREPLY , p);
2404 break;
2405 case P_OV_REPLY:
2406 verb = 0;
2407 dec_rs_pending(mdev);
2408 drbd_send_ack_ex(mdev, P_OV_RESULT, sector, size, ID_IN_SYNC);
2409 break;
2410 default:
Andreas Gruenbacher49ba9b12011-03-25 00:35:45 +01002411 BUG();
Philipp Reisnerb18b37b2010-10-13 15:32:44 +02002412 }
2413 if (verb && __ratelimit(&drbd_ratelimit_state))
Philipp Reisnerb411b362009-09-25 16:07:19 -07002414 dev_err(DEV, "Can not satisfy peer's read request, "
2415 "no local data.\n");
Philipp Reisnerb18b37b2010-10-13 15:32:44 +02002416
Lars Ellenberga821cc42010-09-06 12:31:37 +02002417 /* drain possibly payload */
Andreas Gruenbachere2857212011-03-25 00:57:38 +01002418 return drbd_drain_block(mdev, pi->size);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002419 }
2420
2421 /* GFP_NOIO, because we must not cause arbitrary write-out: in a DRBD
2422 * "criss-cross" setup, that might cause write-out on some other DRBD,
2423 * which in turn might block on the other node at this very place. */
Andreas Gruenbacher0db55362011-04-06 16:09:15 +02002424 peer_req = drbd_alloc_peer_req(mdev, p->block_id, sector, size, GFP_NOIO);
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01002425 if (!peer_req) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07002426 put_ldev(mdev);
Andreas Gruenbacher82bc0192011-03-17 12:10:19 +01002427 return -ENOMEM;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002428 }
2429
Andreas Gruenbachere2857212011-03-25 00:57:38 +01002430 switch (pi->cmd) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07002431 case P_DATA_REQUEST:
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01002432 peer_req->w.cb = w_e_end_data_req;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002433 fault_type = DRBD_FAULT_DT_RD;
Lars Ellenberg80a40e42010-08-11 23:28:00 +02002434 /* application IO, don't drbd_rs_begin_io */
2435 goto submit;
2436
Philipp Reisnerb411b362009-09-25 16:07:19 -07002437 case P_RS_DATA_REQUEST:
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01002438 peer_req->w.cb = w_e_end_rsdata_req;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002439 fault_type = DRBD_FAULT_RS_RD;
Lars Ellenberg5f9915b2010-11-09 14:15:24 +01002440 /* used in the sector offset progress display */
2441 mdev->bm_resync_fo = BM_SECT_TO_BIT(sector);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002442 break;
2443
2444 case P_OV_REPLY:
2445 case P_CSUM_RS_REQUEST:
2446 fault_type = DRBD_FAULT_RS_RD;
Andreas Gruenbachere2857212011-03-25 00:57:38 +01002447 di = kmalloc(sizeof(*di) + pi->size, GFP_NOIO);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002448 if (!di)
2449 goto out_free_e;
2450
Andreas Gruenbachere2857212011-03-25 00:57:38 +01002451 di->digest_size = pi->size;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002452 di->digest = (((char *)di)+sizeof(struct digest_info));
2453
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01002454 peer_req->digest = di;
2455 peer_req->flags |= EE_HAS_DIGEST;
Lars Ellenbergc36c3ce2010-08-11 20:42:55 +02002456
Andreas Gruenbachere2857212011-03-25 00:57:38 +01002457 if (drbd_recv_all(mdev->tconn, di->digest, pi->size))
Philipp Reisnerb411b362009-09-25 16:07:19 -07002458 goto out_free_e;
2459
Andreas Gruenbachere2857212011-03-25 00:57:38 +01002460 if (pi->cmd == P_CSUM_RS_REQUEST) {
Philipp Reisner31890f42011-01-19 14:12:51 +01002461 D_ASSERT(mdev->tconn->agreed_pro_version >= 89);
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01002462 peer_req->w.cb = w_e_end_csum_rs_req;
Lars Ellenberg5f9915b2010-11-09 14:15:24 +01002463 /* used in the sector offset progress display */
2464 mdev->bm_resync_fo = BM_SECT_TO_BIT(sector);
Andreas Gruenbachere2857212011-03-25 00:57:38 +01002465 } else if (pi->cmd == P_OV_REPLY) {
Lars Ellenberg2649f082010-11-05 10:05:47 +01002466 /* track progress, we may need to throttle */
2467 atomic_add(size >> 9, &mdev->rs_sect_in);
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01002468 peer_req->w.cb = w_e_end_ov_reply;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002469 dec_rs_pending(mdev);
Lars Ellenberg0f0601f2010-08-11 23:40:24 +02002470 /* drbd_rs_begin_io done when we sent this request,
2471 * but accounting still needs to be done. */
2472 goto submit_for_resync;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002473 }
2474 break;
2475
2476 case P_OV_REQUEST:
Philipp Reisnerb411b362009-09-25 16:07:19 -07002477 if (mdev->ov_start_sector == ~(sector_t)0 &&
Philipp Reisner31890f42011-01-19 14:12:51 +01002478 mdev->tconn->agreed_pro_version >= 90) {
Lars Ellenbergde228bb2010-11-05 09:43:15 +01002479 unsigned long now = jiffies;
2480 int i;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002481 mdev->ov_start_sector = sector;
2482 mdev->ov_position = sector;
Lars Ellenberg30b743a2010-11-05 09:39:06 +01002483 mdev->ov_left = drbd_bm_bits(mdev) - BM_SECT_TO_BIT(sector);
2484 mdev->rs_total = mdev->ov_left;
Lars Ellenbergde228bb2010-11-05 09:43:15 +01002485 for (i = 0; i < DRBD_SYNC_MARKS; i++) {
2486 mdev->rs_mark_left[i] = mdev->ov_left;
2487 mdev->rs_mark_time[i] = now;
2488 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07002489 dev_info(DEV, "Online Verify start sector: %llu\n",
2490 (unsigned long long)sector);
2491 }
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01002492 peer_req->w.cb = w_e_end_ov_req;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002493 fault_type = DRBD_FAULT_RS_RD;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002494 break;
2495
Philipp Reisnerb411b362009-09-25 16:07:19 -07002496 default:
Andreas Gruenbacher49ba9b12011-03-25 00:35:45 +01002497 BUG();
Philipp Reisnerb411b362009-09-25 16:07:19 -07002498 }
2499
Lars Ellenberg0f0601f2010-08-11 23:40:24 +02002500 /* Throttle, drbd_rs_begin_io and submit should become asynchronous
2501 * wrt the receiver, but it is not as straightforward as it may seem.
2502 * Various places in the resync start and stop logic assume resync
2503 * requests are processed in order, requeuing this on the worker thread
2504 * introduces a bunch of new code for synchronization between threads.
2505 *
2506 * Unlimited throttling before drbd_rs_begin_io may stall the resync
2507 * "forever", throttling after drbd_rs_begin_io will lock that extent
2508 * for application writes for the same time. For now, just throttle
2509 * here, where the rest of the code expects the receiver to sleep for
2510 * a while, anyways.
2511 */
Philipp Reisnerb411b362009-09-25 16:07:19 -07002512
Lars Ellenberg0f0601f2010-08-11 23:40:24 +02002513 /* Throttle before drbd_rs_begin_io, as that locks out application IO;
2514 * this defers syncer requests for some time, before letting at least
2515 * on request through. The resync controller on the receiving side
2516 * will adapt to the incoming rate accordingly.
2517 *
2518 * We cannot throttle here if remote is Primary/SyncTarget:
2519 * we would also throttle its application reads.
2520 * In that case, throttling is done on the SyncTarget only.
2521 */
Philipp Reisnere3555d82010-11-07 15:56:29 +01002522 if (mdev->state.peer != R_PRIMARY && drbd_rs_should_slow_down(mdev, sector))
2523 schedule_timeout_uninterruptible(HZ/10);
2524 if (drbd_rs_begin_io(mdev, sector))
Lars Ellenberg80a40e42010-08-11 23:28:00 +02002525 goto out_free_e;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002526
Lars Ellenberg0f0601f2010-08-11 23:40:24 +02002527submit_for_resync:
2528 atomic_add(size >> 9, &mdev->rs_sect_ev);
2529
Lars Ellenberg80a40e42010-08-11 23:28:00 +02002530submit:
Philipp Reisnerb411b362009-09-25 16:07:19 -07002531 inc_unacked(mdev);
Philipp Reisner87eeee42011-01-19 14:16:30 +01002532 spin_lock_irq(&mdev->tconn->req_lock);
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01002533 list_add_tail(&peer_req->w.list, &mdev->read_ee);
Philipp Reisner87eeee42011-01-19 14:16:30 +01002534 spin_unlock_irq(&mdev->tconn->req_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002535
Andreas Gruenbacherfbe29de2011-02-17 16:38:35 +01002536 if (drbd_submit_peer_request(mdev, peer_req, READ, fault_type) == 0)
Andreas Gruenbacher82bc0192011-03-17 12:10:19 +01002537 return 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002538
Lars Ellenberg10f6d9922011-01-24 14:47:09 +01002539 /* don't care for the reason here */
2540 dev_err(DEV, "submit failed, triggering re-connect\n");
Philipp Reisner87eeee42011-01-19 14:16:30 +01002541 spin_lock_irq(&mdev->tconn->req_lock);
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01002542 list_del(&peer_req->w.list);
Philipp Reisner87eeee42011-01-19 14:16:30 +01002543 spin_unlock_irq(&mdev->tconn->req_lock);
Lars Ellenberg22cc37a2010-09-14 20:40:41 +02002544 /* no drbd_rs_complete_io(), we are dropping the connection anyways */
2545
Philipp Reisnerb411b362009-09-25 16:07:19 -07002546out_free_e:
Philipp Reisnerb411b362009-09-25 16:07:19 -07002547 put_ldev(mdev);
Andreas Gruenbacher3967deb2011-04-06 16:16:56 +02002548 drbd_free_peer_req(mdev, peer_req);
Andreas Gruenbacher82bc0192011-03-17 12:10:19 +01002549 return -EIO;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002550}
2551
2552static int drbd_asb_recover_0p(struct drbd_conf *mdev) __must_hold(local)
2553{
2554 int self, peer, rv = -100;
2555 unsigned long ch_self, ch_peer;
Philipp Reisner44ed1672011-04-19 17:10:19 +02002556 enum drbd_after_sb_p after_sb_0p;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002557
2558 self = mdev->ldev->md.uuid[UI_BITMAP] & 1;
2559 peer = mdev->p_uuid[UI_BITMAP] & 1;
2560
2561 ch_peer = mdev->p_uuid[UI_SIZE];
2562 ch_self = mdev->comm_bm_set;
2563
Philipp Reisner44ed1672011-04-19 17:10:19 +02002564 rcu_read_lock();
2565 after_sb_0p = rcu_dereference(mdev->tconn->net_conf)->after_sb_0p;
2566 rcu_read_unlock();
2567 switch (after_sb_0p) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07002568 case ASB_CONSENSUS:
2569 case ASB_DISCARD_SECONDARY:
2570 case ASB_CALL_HELPER:
Philipp Reisner44ed1672011-04-19 17:10:19 +02002571 case ASB_VIOLENTLY:
Philipp Reisnerb411b362009-09-25 16:07:19 -07002572 dev_err(DEV, "Configuration error.\n");
2573 break;
2574 case ASB_DISCONNECT:
2575 break;
2576 case ASB_DISCARD_YOUNGER_PRI:
2577 if (self == 0 && peer == 1) {
2578 rv = -1;
2579 break;
2580 }
2581 if (self == 1 && peer == 0) {
2582 rv = 1;
2583 break;
2584 }
2585 /* Else fall through to one of the other strategies... */
2586 case ASB_DISCARD_OLDER_PRI:
2587 if (self == 0 && peer == 1) {
2588 rv = 1;
2589 break;
2590 }
2591 if (self == 1 && peer == 0) {
2592 rv = -1;
2593 break;
2594 }
2595 /* Else fall through to one of the other strategies... */
Lars Ellenbergad19bf62009-10-14 09:36:49 +02002596 dev_warn(DEV, "Discard younger/older primary did not find a decision\n"
Philipp Reisnerb411b362009-09-25 16:07:19 -07002597 "Using discard-least-changes instead\n");
2598 case ASB_DISCARD_ZERO_CHG:
2599 if (ch_peer == 0 && ch_self == 0) {
Lars Ellenberg427c0432012-08-01 12:43:01 +02002600 rv = test_bit(RESOLVE_CONFLICTS, &mdev->tconn->flags)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002601 ? -1 : 1;
2602 break;
2603 } else {
2604 if (ch_peer == 0) { rv = 1; break; }
2605 if (ch_self == 0) { rv = -1; break; }
2606 }
Philipp Reisner44ed1672011-04-19 17:10:19 +02002607 if (after_sb_0p == ASB_DISCARD_ZERO_CHG)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002608 break;
2609 case ASB_DISCARD_LEAST_CHG:
2610 if (ch_self < ch_peer)
2611 rv = -1;
2612 else if (ch_self > ch_peer)
2613 rv = 1;
2614 else /* ( ch_self == ch_peer ) */
2615 /* Well, then use something else. */
Lars Ellenberg427c0432012-08-01 12:43:01 +02002616 rv = test_bit(RESOLVE_CONFLICTS, &mdev->tconn->flags)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002617 ? -1 : 1;
2618 break;
2619 case ASB_DISCARD_LOCAL:
2620 rv = -1;
2621 break;
2622 case ASB_DISCARD_REMOTE:
2623 rv = 1;
2624 }
2625
2626 return rv;
2627}
2628
2629static int drbd_asb_recover_1p(struct drbd_conf *mdev) __must_hold(local)
2630{
Andreas Gruenbacher6184ea22010-12-09 14:23:27 +01002631 int hg, rv = -100;
Philipp Reisner44ed1672011-04-19 17:10:19 +02002632 enum drbd_after_sb_p after_sb_1p;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002633
Philipp Reisner44ed1672011-04-19 17:10:19 +02002634 rcu_read_lock();
2635 after_sb_1p = rcu_dereference(mdev->tconn->net_conf)->after_sb_1p;
2636 rcu_read_unlock();
2637 switch (after_sb_1p) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07002638 case ASB_DISCARD_YOUNGER_PRI:
2639 case ASB_DISCARD_OLDER_PRI:
2640 case ASB_DISCARD_LEAST_CHG:
2641 case ASB_DISCARD_LOCAL:
2642 case ASB_DISCARD_REMOTE:
Philipp Reisner44ed1672011-04-19 17:10:19 +02002643 case ASB_DISCARD_ZERO_CHG:
Philipp Reisnerb411b362009-09-25 16:07:19 -07002644 dev_err(DEV, "Configuration error.\n");
2645 break;
2646 case ASB_DISCONNECT:
2647 break;
2648 case ASB_CONSENSUS:
2649 hg = drbd_asb_recover_0p(mdev);
2650 if (hg == -1 && mdev->state.role == R_SECONDARY)
2651 rv = hg;
2652 if (hg == 1 && mdev->state.role == R_PRIMARY)
2653 rv = hg;
2654 break;
2655 case ASB_VIOLENTLY:
2656 rv = drbd_asb_recover_0p(mdev);
2657 break;
2658 case ASB_DISCARD_SECONDARY:
2659 return mdev->state.role == R_PRIMARY ? 1 : -1;
2660 case ASB_CALL_HELPER:
2661 hg = drbd_asb_recover_0p(mdev);
2662 if (hg == -1 && mdev->state.role == R_PRIMARY) {
Andreas Gruenbacherbb437942010-12-09 14:02:35 +01002663 enum drbd_state_rv rv2;
2664
2665 drbd_set_role(mdev, R_SECONDARY, 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002666 /* drbd_change_state() does not sleep while in SS_IN_TRANSIENT_STATE,
2667 * we might be here in C_WF_REPORT_PARAMS which is transient.
2668 * we do not need to wait for the after state change work either. */
Andreas Gruenbacherbb437942010-12-09 14:02:35 +01002669 rv2 = drbd_change_state(mdev, CS_VERBOSE, NS(role, R_SECONDARY));
2670 if (rv2 != SS_SUCCESS) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07002671 drbd_khelper(mdev, "pri-lost-after-sb");
2672 } else {
2673 dev_warn(DEV, "Successfully gave up primary role.\n");
2674 rv = hg;
2675 }
2676 } else
2677 rv = hg;
2678 }
2679
2680 return rv;
2681}
2682
2683static int drbd_asb_recover_2p(struct drbd_conf *mdev) __must_hold(local)
2684{
Andreas Gruenbacher6184ea22010-12-09 14:23:27 +01002685 int hg, rv = -100;
Philipp Reisner44ed1672011-04-19 17:10:19 +02002686 enum drbd_after_sb_p after_sb_2p;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002687
Philipp Reisner44ed1672011-04-19 17:10:19 +02002688 rcu_read_lock();
2689 after_sb_2p = rcu_dereference(mdev->tconn->net_conf)->after_sb_2p;
2690 rcu_read_unlock();
2691 switch (after_sb_2p) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07002692 case ASB_DISCARD_YOUNGER_PRI:
2693 case ASB_DISCARD_OLDER_PRI:
2694 case ASB_DISCARD_LEAST_CHG:
2695 case ASB_DISCARD_LOCAL:
2696 case ASB_DISCARD_REMOTE:
2697 case ASB_CONSENSUS:
2698 case ASB_DISCARD_SECONDARY:
Philipp Reisner44ed1672011-04-19 17:10:19 +02002699 case ASB_DISCARD_ZERO_CHG:
Philipp Reisnerb411b362009-09-25 16:07:19 -07002700 dev_err(DEV, "Configuration error.\n");
2701 break;
2702 case ASB_VIOLENTLY:
2703 rv = drbd_asb_recover_0p(mdev);
2704 break;
2705 case ASB_DISCONNECT:
2706 break;
2707 case ASB_CALL_HELPER:
2708 hg = drbd_asb_recover_0p(mdev);
2709 if (hg == -1) {
Andreas Gruenbacherbb437942010-12-09 14:02:35 +01002710 enum drbd_state_rv rv2;
2711
Philipp Reisnerb411b362009-09-25 16:07:19 -07002712 /* drbd_change_state() does not sleep while in SS_IN_TRANSIENT_STATE,
2713 * we might be here in C_WF_REPORT_PARAMS which is transient.
2714 * we do not need to wait for the after state change work either. */
Andreas Gruenbacherbb437942010-12-09 14:02:35 +01002715 rv2 = drbd_change_state(mdev, CS_VERBOSE, NS(role, R_SECONDARY));
2716 if (rv2 != SS_SUCCESS) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07002717 drbd_khelper(mdev, "pri-lost-after-sb");
2718 } else {
2719 dev_warn(DEV, "Successfully gave up primary role.\n");
2720 rv = hg;
2721 }
2722 } else
2723 rv = hg;
2724 }
2725
2726 return rv;
2727}
2728
2729static void drbd_uuid_dump(struct drbd_conf *mdev, char *text, u64 *uuid,
2730 u64 bits, u64 flags)
2731{
2732 if (!uuid) {
2733 dev_info(DEV, "%s uuid info vanished while I was looking!\n", text);
2734 return;
2735 }
2736 dev_info(DEV, "%s %016llX:%016llX:%016llX:%016llX bits:%llu flags:%llX\n",
2737 text,
2738 (unsigned long long)uuid[UI_CURRENT],
2739 (unsigned long long)uuid[UI_BITMAP],
2740 (unsigned long long)uuid[UI_HISTORY_START],
2741 (unsigned long long)uuid[UI_HISTORY_END],
2742 (unsigned long long)bits,
2743 (unsigned long long)flags);
2744}
2745
2746/*
2747 100 after split brain try auto recover
2748 2 C_SYNC_SOURCE set BitMap
2749 1 C_SYNC_SOURCE use BitMap
2750 0 no Sync
2751 -1 C_SYNC_TARGET use BitMap
2752 -2 C_SYNC_TARGET set BitMap
2753 -100 after split brain, disconnect
2754-1000 unrelated data
Philipp Reisner4a23f262011-01-11 17:42:17 +01002755-1091 requires proto 91
2756-1096 requires proto 96
Philipp Reisnerb411b362009-09-25 16:07:19 -07002757 */
2758static int drbd_uuid_compare(struct drbd_conf *mdev, int *rule_nr) __must_hold(local)
2759{
2760 u64 self, peer;
2761 int i, j;
2762
2763 self = mdev->ldev->md.uuid[UI_CURRENT] & ~((u64)1);
2764 peer = mdev->p_uuid[UI_CURRENT] & ~((u64)1);
2765
2766 *rule_nr = 10;
2767 if (self == UUID_JUST_CREATED && peer == UUID_JUST_CREATED)
2768 return 0;
2769
2770 *rule_nr = 20;
2771 if ((self == UUID_JUST_CREATED || self == (u64)0) &&
2772 peer != UUID_JUST_CREATED)
2773 return -2;
2774
2775 *rule_nr = 30;
2776 if (self != UUID_JUST_CREATED &&
2777 (peer == UUID_JUST_CREATED || peer == (u64)0))
2778 return 2;
2779
2780 if (self == peer) {
2781 int rct, dc; /* roles at crash time */
2782
2783 if (mdev->p_uuid[UI_BITMAP] == (u64)0 && mdev->ldev->md.uuid[UI_BITMAP] != (u64)0) {
2784
Philipp Reisner31890f42011-01-19 14:12:51 +01002785 if (mdev->tconn->agreed_pro_version < 91)
Philipp Reisner4a23f262011-01-11 17:42:17 +01002786 return -1091;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002787
2788 if ((mdev->ldev->md.uuid[UI_BITMAP] & ~((u64)1)) == (mdev->p_uuid[UI_HISTORY_START] & ~((u64)1)) &&
2789 (mdev->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1)) == (mdev->p_uuid[UI_HISTORY_START + 1] & ~((u64)1))) {
2790 dev_info(DEV, "was SyncSource, missed the resync finished event, corrected myself:\n");
Philipp Reisner9f2247b2012-08-16 14:25:58 +02002791 drbd_uuid_move_history(mdev);
2792 mdev->ldev->md.uuid[UI_HISTORY_START] = mdev->ldev->md.uuid[UI_BITMAP];
2793 mdev->ldev->md.uuid[UI_BITMAP] = 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002794
2795 drbd_uuid_dump(mdev, "self", mdev->ldev->md.uuid,
2796 mdev->state.disk >= D_NEGOTIATING ? drbd_bm_total_weight(mdev) : 0, 0);
2797 *rule_nr = 34;
2798 } else {
2799 dev_info(DEV, "was SyncSource (peer failed to write sync_uuid)\n");
2800 *rule_nr = 36;
2801 }
2802
2803 return 1;
2804 }
2805
2806 if (mdev->ldev->md.uuid[UI_BITMAP] == (u64)0 && mdev->p_uuid[UI_BITMAP] != (u64)0) {
2807
Philipp Reisner31890f42011-01-19 14:12:51 +01002808 if (mdev->tconn->agreed_pro_version < 91)
Philipp Reisner4a23f262011-01-11 17:42:17 +01002809 return -1091;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002810
2811 if ((mdev->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1)) == (mdev->p_uuid[UI_BITMAP] & ~((u64)1)) &&
2812 (mdev->ldev->md.uuid[UI_HISTORY_START + 1] & ~((u64)1)) == (mdev->p_uuid[UI_HISTORY_START] & ~((u64)1))) {
2813 dev_info(DEV, "was SyncTarget, peer missed the resync finished event, corrected peer:\n");
2814
2815 mdev->p_uuid[UI_HISTORY_START + 1] = mdev->p_uuid[UI_HISTORY_START];
2816 mdev->p_uuid[UI_HISTORY_START] = mdev->p_uuid[UI_BITMAP];
2817 mdev->p_uuid[UI_BITMAP] = 0UL;
2818
2819 drbd_uuid_dump(mdev, "peer", mdev->p_uuid, mdev->p_uuid[UI_SIZE], mdev->p_uuid[UI_FLAGS]);
2820 *rule_nr = 35;
2821 } else {
2822 dev_info(DEV, "was SyncTarget (failed to write sync_uuid)\n");
2823 *rule_nr = 37;
2824 }
2825
2826 return -1;
2827 }
2828
2829 /* Common power [off|failure] */
2830 rct = (test_bit(CRASHED_PRIMARY, &mdev->flags) ? 1 : 0) +
2831 (mdev->p_uuid[UI_FLAGS] & 2);
2832 /* lowest bit is set when we were primary,
2833 * next bit (weight 2) is set when peer was primary */
2834 *rule_nr = 40;
2835
2836 switch (rct) {
2837 case 0: /* !self_pri && !peer_pri */ return 0;
2838 case 1: /* self_pri && !peer_pri */ return 1;
2839 case 2: /* !self_pri && peer_pri */ return -1;
2840 case 3: /* self_pri && peer_pri */
Lars Ellenberg427c0432012-08-01 12:43:01 +02002841 dc = test_bit(RESOLVE_CONFLICTS, &mdev->tconn->flags);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002842 return dc ? -1 : 1;
2843 }
2844 }
2845
2846 *rule_nr = 50;
2847 peer = mdev->p_uuid[UI_BITMAP] & ~((u64)1);
2848 if (self == peer)
2849 return -1;
2850
2851 *rule_nr = 51;
2852 peer = mdev->p_uuid[UI_HISTORY_START] & ~((u64)1);
2853 if (self == peer) {
Philipp Reisner31890f42011-01-19 14:12:51 +01002854 if (mdev->tconn->agreed_pro_version < 96 ?
Philipp Reisner4a23f262011-01-11 17:42:17 +01002855 (mdev->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1)) ==
2856 (mdev->p_uuid[UI_HISTORY_START + 1] & ~((u64)1)) :
2857 peer + UUID_NEW_BM_OFFSET == (mdev->p_uuid[UI_BITMAP] & ~((u64)1))) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07002858 /* The last P_SYNC_UUID did not get though. Undo the last start of
2859 resync as sync source modifications of the peer's UUIDs. */
2860
Philipp Reisner31890f42011-01-19 14:12:51 +01002861 if (mdev->tconn->agreed_pro_version < 91)
Philipp Reisner4a23f262011-01-11 17:42:17 +01002862 return -1091;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002863
2864 mdev->p_uuid[UI_BITMAP] = mdev->p_uuid[UI_HISTORY_START];
2865 mdev->p_uuid[UI_HISTORY_START] = mdev->p_uuid[UI_HISTORY_START + 1];
Philipp Reisner4a23f262011-01-11 17:42:17 +01002866
Lars Ellenberg92b4ca22012-04-30 12:53:52 +02002867 dev_info(DEV, "Lost last syncUUID packet, corrected:\n");
Philipp Reisner4a23f262011-01-11 17:42:17 +01002868 drbd_uuid_dump(mdev, "peer", mdev->p_uuid, mdev->p_uuid[UI_SIZE], mdev->p_uuid[UI_FLAGS]);
2869
Philipp Reisnerb411b362009-09-25 16:07:19 -07002870 return -1;
2871 }
2872 }
2873
2874 *rule_nr = 60;
2875 self = mdev->ldev->md.uuid[UI_CURRENT] & ~((u64)1);
2876 for (i = UI_HISTORY_START; i <= UI_HISTORY_END; i++) {
2877 peer = mdev->p_uuid[i] & ~((u64)1);
2878 if (self == peer)
2879 return -2;
2880 }
2881
2882 *rule_nr = 70;
2883 self = mdev->ldev->md.uuid[UI_BITMAP] & ~((u64)1);
2884 peer = mdev->p_uuid[UI_CURRENT] & ~((u64)1);
2885 if (self == peer)
2886 return 1;
2887
2888 *rule_nr = 71;
2889 self = mdev->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1);
2890 if (self == peer) {
Philipp Reisner31890f42011-01-19 14:12:51 +01002891 if (mdev->tconn->agreed_pro_version < 96 ?
Philipp Reisner4a23f262011-01-11 17:42:17 +01002892 (mdev->ldev->md.uuid[UI_HISTORY_START + 1] & ~((u64)1)) ==
2893 (mdev->p_uuid[UI_HISTORY_START] & ~((u64)1)) :
2894 self + UUID_NEW_BM_OFFSET == (mdev->ldev->md.uuid[UI_BITMAP] & ~((u64)1))) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07002895 /* The last P_SYNC_UUID did not get though. Undo the last start of
2896 resync as sync source modifications of our UUIDs. */
2897
Philipp Reisner31890f42011-01-19 14:12:51 +01002898 if (mdev->tconn->agreed_pro_version < 91)
Philipp Reisner4a23f262011-01-11 17:42:17 +01002899 return -1091;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002900
Philipp Reisner9f2247b2012-08-16 14:25:58 +02002901 __drbd_uuid_set(mdev, UI_BITMAP, mdev->ldev->md.uuid[UI_HISTORY_START]);
2902 __drbd_uuid_set(mdev, UI_HISTORY_START, mdev->ldev->md.uuid[UI_HISTORY_START + 1]);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002903
Philipp Reisner4a23f262011-01-11 17:42:17 +01002904 dev_info(DEV, "Last syncUUID did not get through, corrected:\n");
Philipp Reisnerb411b362009-09-25 16:07:19 -07002905 drbd_uuid_dump(mdev, "self", mdev->ldev->md.uuid,
2906 mdev->state.disk >= D_NEGOTIATING ? drbd_bm_total_weight(mdev) : 0, 0);
2907
2908 return 1;
2909 }
2910 }
2911
2912
2913 *rule_nr = 80;
Philipp Reisnerd8c2a362009-11-18 15:52:51 +01002914 peer = mdev->p_uuid[UI_CURRENT] & ~((u64)1);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002915 for (i = UI_HISTORY_START; i <= UI_HISTORY_END; i++) {
2916 self = mdev->ldev->md.uuid[i] & ~((u64)1);
2917 if (self == peer)
2918 return 2;
2919 }
2920
2921 *rule_nr = 90;
2922 self = mdev->ldev->md.uuid[UI_BITMAP] & ~((u64)1);
2923 peer = mdev->p_uuid[UI_BITMAP] & ~((u64)1);
2924 if (self == peer && self != ((u64)0))
2925 return 100;
2926
2927 *rule_nr = 100;
2928 for (i = UI_HISTORY_START; i <= UI_HISTORY_END; i++) {
2929 self = mdev->ldev->md.uuid[i] & ~((u64)1);
2930 for (j = UI_HISTORY_START; j <= UI_HISTORY_END; j++) {
2931 peer = mdev->p_uuid[j] & ~((u64)1);
2932 if (self == peer)
2933 return -100;
2934 }
2935 }
2936
2937 return -1000;
2938}
2939
2940/* drbd_sync_handshake() returns the new conn state on success, or
2941 CONN_MASK (-1) on failure.
2942 */
2943static enum drbd_conns drbd_sync_handshake(struct drbd_conf *mdev, enum drbd_role peer_role,
2944 enum drbd_disk_state peer_disk) __must_hold(local)
2945{
Philipp Reisnerb411b362009-09-25 16:07:19 -07002946 enum drbd_conns rv = C_MASK;
2947 enum drbd_disk_state mydisk;
Philipp Reisner44ed1672011-04-19 17:10:19 +02002948 struct net_conf *nc;
Andreas Gruenbacher6dff2902011-06-28 14:18:12 +02002949 int hg, rule_nr, rr_conflict, tentative;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002950
2951 mydisk = mdev->state.disk;
2952 if (mydisk == D_NEGOTIATING)
2953 mydisk = mdev->new_state_tmp.disk;
2954
2955 dev_info(DEV, "drbd_sync_handshake:\n");
Philipp Reisner9f2247b2012-08-16 14:25:58 +02002956
2957 spin_lock_irq(&mdev->ldev->md.uuid_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002958 drbd_uuid_dump(mdev, "self", mdev->ldev->md.uuid, mdev->comm_bm_set, 0);
2959 drbd_uuid_dump(mdev, "peer", mdev->p_uuid,
2960 mdev->p_uuid[UI_SIZE], mdev->p_uuid[UI_FLAGS]);
2961
2962 hg = drbd_uuid_compare(mdev, &rule_nr);
Philipp Reisner9f2247b2012-08-16 14:25:58 +02002963 spin_unlock_irq(&mdev->ldev->md.uuid_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002964
2965 dev_info(DEV, "uuid_compare()=%d by rule %d\n", hg, rule_nr);
2966
2967 if (hg == -1000) {
2968 dev_alert(DEV, "Unrelated data, aborting!\n");
2969 return C_MASK;
2970 }
Philipp Reisner4a23f262011-01-11 17:42:17 +01002971 if (hg < -1000) {
2972 dev_alert(DEV, "To resolve this both sides have to support at least protocol %d\n", -hg - 1000);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002973 return C_MASK;
2974 }
2975
2976 if ((mydisk == D_INCONSISTENT && peer_disk > D_INCONSISTENT) ||
2977 (peer_disk == D_INCONSISTENT && mydisk > D_INCONSISTENT)) {
2978 int f = (hg == -100) || abs(hg) == 2;
2979 hg = mydisk > D_INCONSISTENT ? 1 : -1;
2980 if (f)
2981 hg = hg*2;
2982 dev_info(DEV, "Becoming sync %s due to disk states.\n",
2983 hg > 0 ? "source" : "target");
2984 }
2985
Adam Gandelman3a11a482010-04-08 16:48:23 -07002986 if (abs(hg) == 100)
2987 drbd_khelper(mdev, "initial-split-brain");
2988
Philipp Reisner44ed1672011-04-19 17:10:19 +02002989 rcu_read_lock();
2990 nc = rcu_dereference(mdev->tconn->net_conf);
2991
2992 if (hg == 100 || (hg == -100 && nc->always_asbp)) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07002993 int pcount = (mdev->state.role == R_PRIMARY)
2994 + (peer_role == R_PRIMARY);
2995 int forced = (hg == -100);
2996
2997 switch (pcount) {
2998 case 0:
2999 hg = drbd_asb_recover_0p(mdev);
3000 break;
3001 case 1:
3002 hg = drbd_asb_recover_1p(mdev);
3003 break;
3004 case 2:
3005 hg = drbd_asb_recover_2p(mdev);
3006 break;
3007 }
3008 if (abs(hg) < 100) {
3009 dev_warn(DEV, "Split-Brain detected, %d primaries, "
3010 "automatically solved. Sync from %s node\n",
3011 pcount, (hg < 0) ? "peer" : "this");
3012 if (forced) {
3013 dev_warn(DEV, "Doing a full sync, since"
3014 " UUIDs where ambiguous.\n");
3015 hg = hg*2;
3016 }
3017 }
3018 }
3019
3020 if (hg == -100) {
Philipp Reisner08b165b2011-09-05 16:22:33 +02003021 if (test_bit(DISCARD_MY_DATA, &mdev->flags) && !(mdev->p_uuid[UI_FLAGS]&1))
Philipp Reisnerb411b362009-09-25 16:07:19 -07003022 hg = -1;
Philipp Reisner08b165b2011-09-05 16:22:33 +02003023 if (!test_bit(DISCARD_MY_DATA, &mdev->flags) && (mdev->p_uuid[UI_FLAGS]&1))
Philipp Reisnerb411b362009-09-25 16:07:19 -07003024 hg = 1;
3025
3026 if (abs(hg) < 100)
3027 dev_warn(DEV, "Split-Brain detected, manually solved. "
3028 "Sync from %s node\n",
3029 (hg < 0) ? "peer" : "this");
3030 }
Philipp Reisner44ed1672011-04-19 17:10:19 +02003031 rr_conflict = nc->rr_conflict;
Andreas Gruenbacher6dff2902011-06-28 14:18:12 +02003032 tentative = nc->tentative;
Philipp Reisner44ed1672011-04-19 17:10:19 +02003033 rcu_read_unlock();
Philipp Reisnerb411b362009-09-25 16:07:19 -07003034
3035 if (hg == -100) {
Lars Ellenberg580b9762010-02-26 23:15:23 +01003036 /* FIXME this log message is not correct if we end up here
3037 * after an attempted attach on a diskless node.
3038 * We just refuse to attach -- well, we drop the "connection"
3039 * to that disk, in a way... */
Adam Gandelman3a11a482010-04-08 16:48:23 -07003040 dev_alert(DEV, "Split-Brain detected but unresolved, dropping connection!\n");
Philipp Reisnerb411b362009-09-25 16:07:19 -07003041 drbd_khelper(mdev, "split-brain");
3042 return C_MASK;
3043 }
3044
3045 if (hg > 0 && mydisk <= D_INCONSISTENT) {
3046 dev_err(DEV, "I shall become SyncSource, but I am inconsistent!\n");
3047 return C_MASK;
3048 }
3049
3050 if (hg < 0 && /* by intention we do not use mydisk here. */
3051 mdev->state.role == R_PRIMARY && mdev->state.disk >= D_CONSISTENT) {
Philipp Reisner44ed1672011-04-19 17:10:19 +02003052 switch (rr_conflict) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07003053 case ASB_CALL_HELPER:
3054 drbd_khelper(mdev, "pri-lost");
3055 /* fall through */
3056 case ASB_DISCONNECT:
3057 dev_err(DEV, "I shall become SyncTarget, but I am primary!\n");
3058 return C_MASK;
3059 case ASB_VIOLENTLY:
3060 dev_warn(DEV, "Becoming SyncTarget, violating the stable-data"
3061 "assumption\n");
3062 }
3063 }
3064
Andreas Gruenbacher6dff2902011-06-28 14:18:12 +02003065 if (tentative || test_bit(CONN_DRY_RUN, &mdev->tconn->flags)) {
Philipp Reisnercf14c2e2010-02-02 21:03:50 +01003066 if (hg == 0)
3067 dev_info(DEV, "dry-run connect: No resync, would become Connected immediately.\n");
3068 else
3069 dev_info(DEV, "dry-run connect: Would become %s, doing a %s resync.",
3070 drbd_conn_str(hg > 0 ? C_SYNC_SOURCE : C_SYNC_TARGET),
3071 abs(hg) >= 2 ? "full" : "bit-map based");
3072 return C_MASK;
3073 }
3074
Philipp Reisnerb411b362009-09-25 16:07:19 -07003075 if (abs(hg) >= 2) {
3076 dev_info(DEV, "Writing the whole bitmap, full sync required after drbd_sync_handshake.\n");
Lars Ellenberg20ceb2b2011-01-21 10:56:44 +01003077 if (drbd_bitmap_io(mdev, &drbd_bmio_set_n_write, "set_n_write from sync_handshake",
3078 BM_LOCKED_SET_ALLOWED))
Philipp Reisnerb411b362009-09-25 16:07:19 -07003079 return C_MASK;
3080 }
3081
3082 if (hg > 0) { /* become sync source. */
3083 rv = C_WF_BITMAP_S;
3084 } else if (hg < 0) { /* become sync target */
3085 rv = C_WF_BITMAP_T;
3086 } else {
3087 rv = C_CONNECTED;
3088 if (drbd_bm_total_weight(mdev)) {
3089 dev_info(DEV, "No resync, but %lu bits in bitmap!\n",
3090 drbd_bm_total_weight(mdev));
3091 }
3092 }
3093
3094 return rv;
3095}
3096
Philipp Reisnerf179d762011-05-16 17:31:47 +02003097static enum drbd_after_sb_p convert_after_sb(enum drbd_after_sb_p peer)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003098{
3099 /* ASB_DISCARD_REMOTE - ASB_DISCARD_LOCAL is valid */
Philipp Reisnerf179d762011-05-16 17:31:47 +02003100 if (peer == ASB_DISCARD_REMOTE)
3101 return ASB_DISCARD_LOCAL;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003102
3103 /* any other things with ASB_DISCARD_REMOTE or ASB_DISCARD_LOCAL are invalid */
Philipp Reisnerf179d762011-05-16 17:31:47 +02003104 if (peer == ASB_DISCARD_LOCAL)
3105 return ASB_DISCARD_REMOTE;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003106
3107 /* everything else is valid if they are equal on both sides. */
Philipp Reisnerf179d762011-05-16 17:31:47 +02003108 return peer;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003109}
3110
Andreas Gruenbachere2857212011-03-25 00:57:38 +01003111static int receive_protocol(struct drbd_tconn *tconn, struct packet_info *pi)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003112{
Andreas Gruenbachere6589832011-03-30 12:54:42 +02003113 struct p_protocol *p = pi->data;
Philipp Reisner036b17e2011-05-16 17:38:11 +02003114 enum drbd_after_sb_p p_after_sb_0p, p_after_sb_1p, p_after_sb_2p;
3115 int p_proto, p_discard_my_data, p_two_primaries, cf;
3116 struct net_conf *nc, *old_net_conf, *new_net_conf = NULL;
3117 char integrity_alg[SHARED_SECRET_MAX] = "";
Andreas Gruenbacheraccdbcc2011-07-15 17:41:09 +02003118 struct crypto_hash *peer_integrity_tfm = NULL;
Philipp Reisner7aca6c72011-05-17 10:12:56 +02003119 void *int_dig_in = NULL, *int_dig_vv = NULL;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003120
Philipp Reisnerb411b362009-09-25 16:07:19 -07003121 p_proto = be32_to_cpu(p->protocol);
3122 p_after_sb_0p = be32_to_cpu(p->after_sb_0p);
3123 p_after_sb_1p = be32_to_cpu(p->after_sb_1p);
3124 p_after_sb_2p = be32_to_cpu(p->after_sb_2p);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003125 p_two_primaries = be32_to_cpu(p->two_primaries);
Philipp Reisnercf14c2e2010-02-02 21:03:50 +01003126 cf = be32_to_cpu(p->conn_flags);
Andreas Gruenbacher6139f602011-05-06 20:00:02 +02003127 p_discard_my_data = cf & CF_DISCARD_MY_DATA;
Philipp Reisnercf14c2e2010-02-02 21:03:50 +01003128
Andreas Gruenbacher86db0612011-04-28 15:24:18 +02003129 if (tconn->agreed_pro_version >= 87) {
3130 int err;
Philipp Reisnercf14c2e2010-02-02 21:03:50 +01003131
Andreas Gruenbacher88104ca2011-04-28 21:47:21 +02003132 if (pi->size > sizeof(integrity_alg))
Andreas Gruenbacher86db0612011-04-28 15:24:18 +02003133 return -EIO;
Andreas Gruenbacher88104ca2011-04-28 21:47:21 +02003134 err = drbd_recv_all(tconn, integrity_alg, pi->size);
Andreas Gruenbacher86db0612011-04-28 15:24:18 +02003135 if (err)
3136 return err;
Philipp Reisner036b17e2011-05-16 17:38:11 +02003137 integrity_alg[SHARED_SECRET_MAX - 1] = 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003138 }
3139
Andreas Gruenbacher7d4c7822011-07-17 23:06:12 +02003140 if (pi->cmd != P_PROTOCOL_UPDATE) {
Andreas Gruenbacherfbc12f42011-07-15 17:04:26 +02003141 clear_bit(CONN_DRY_RUN, &tconn->flags);
Philipp Reisner036b17e2011-05-16 17:38:11 +02003142
Andreas Gruenbacherfbc12f42011-07-15 17:04:26 +02003143 if (cf & CF_DRY_RUN)
3144 set_bit(CONN_DRY_RUN, &tconn->flags);
3145
3146 rcu_read_lock();
3147 nc = rcu_dereference(tconn->net_conf);
3148
3149 if (p_proto != nc->wire_protocol) {
Andreas Gruenbacherd505d9b2011-07-15 17:19:18 +02003150 conn_err(tconn, "incompatible %s settings\n", "protocol");
Andreas Gruenbacherfbc12f42011-07-15 17:04:26 +02003151 goto disconnect_rcu_unlock;
3152 }
3153
3154 if (convert_after_sb(p_after_sb_0p) != nc->after_sb_0p) {
Andreas Gruenbacherd505d9b2011-07-15 17:19:18 +02003155 conn_err(tconn, "incompatible %s settings\n", "after-sb-0pri");
Andreas Gruenbacherfbc12f42011-07-15 17:04:26 +02003156 goto disconnect_rcu_unlock;
3157 }
3158
3159 if (convert_after_sb(p_after_sb_1p) != nc->after_sb_1p) {
Andreas Gruenbacherd505d9b2011-07-15 17:19:18 +02003160 conn_err(tconn, "incompatible %s settings\n", "after-sb-1pri");
Andreas Gruenbacherfbc12f42011-07-15 17:04:26 +02003161 goto disconnect_rcu_unlock;
3162 }
3163
3164 if (convert_after_sb(p_after_sb_2p) != nc->after_sb_2p) {
Andreas Gruenbacherd505d9b2011-07-15 17:19:18 +02003165 conn_err(tconn, "incompatible %s settings\n", "after-sb-2pri");
Andreas Gruenbacherfbc12f42011-07-15 17:04:26 +02003166 goto disconnect_rcu_unlock;
3167 }
3168
3169 if (p_discard_my_data && nc->discard_my_data) {
Andreas Gruenbacherd505d9b2011-07-15 17:19:18 +02003170 conn_err(tconn, "incompatible %s settings\n", "discard-my-data");
Andreas Gruenbacherfbc12f42011-07-15 17:04:26 +02003171 goto disconnect_rcu_unlock;
3172 }
3173
3174 if (p_two_primaries != nc->two_primaries) {
Andreas Gruenbacherd505d9b2011-07-15 17:19:18 +02003175 conn_err(tconn, "incompatible %s settings\n", "allow-two-primaries");
Andreas Gruenbacherfbc12f42011-07-15 17:04:26 +02003176 goto disconnect_rcu_unlock;
3177 }
3178
3179 if (strcmp(integrity_alg, nc->integrity_alg)) {
Andreas Gruenbacherd505d9b2011-07-15 17:19:18 +02003180 conn_err(tconn, "incompatible %s settings\n", "data-integrity-alg");
Andreas Gruenbacherfbc12f42011-07-15 17:04:26 +02003181 goto disconnect_rcu_unlock;
3182 }
3183
3184 rcu_read_unlock();
Philipp Reisnerb411b362009-09-25 16:07:19 -07003185 }
3186
Andreas Gruenbacher7d4c7822011-07-17 23:06:12 +02003187 if (integrity_alg[0]) {
3188 int hash_size;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003189
Andreas Gruenbacher7d4c7822011-07-17 23:06:12 +02003190 /*
3191 * We can only change the peer data integrity algorithm
3192 * here. Changing our own data integrity algorithm
3193 * requires that we send a P_PROTOCOL_UPDATE packet at
3194 * the same time; otherwise, the peer has no way to
3195 * tell between which packets the algorithm should
3196 * change.
3197 */
Philipp Reisnerb411b362009-09-25 16:07:19 -07003198
Andreas Gruenbacher7d4c7822011-07-17 23:06:12 +02003199 peer_integrity_tfm = crypto_alloc_hash(integrity_alg, 0, CRYPTO_ALG_ASYNC);
3200 if (!peer_integrity_tfm) {
3201 conn_err(tconn, "peer data-integrity-alg %s not supported\n",
3202 integrity_alg);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003203 goto disconnect;
3204 }
Andreas Gruenbacher7d4c7822011-07-17 23:06:12 +02003205
3206 hash_size = crypto_hash_digestsize(peer_integrity_tfm);
3207 int_dig_in = kmalloc(hash_size, GFP_KERNEL);
3208 int_dig_vv = kmalloc(hash_size, GFP_KERNEL);
3209 if (!(int_dig_in && int_dig_vv)) {
3210 conn_err(tconn, "Allocation of buffers for data integrity checking failed\n");
3211 goto disconnect;
3212 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07003213 }
3214
Andreas Gruenbacher7d4c7822011-07-17 23:06:12 +02003215 new_net_conf = kmalloc(sizeof(struct net_conf), GFP_KERNEL);
3216 if (!new_net_conf) {
3217 conn_err(tconn, "Allocation of new net_conf failed\n");
3218 goto disconnect;
3219 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07003220
Andreas Gruenbacher7d4c7822011-07-17 23:06:12 +02003221 mutex_lock(&tconn->data.mutex);
3222 mutex_lock(&tconn->conf_update);
3223 old_net_conf = tconn->net_conf;
3224 *new_net_conf = *old_net_conf;
3225
3226 new_net_conf->wire_protocol = p_proto;
3227 new_net_conf->after_sb_0p = convert_after_sb(p_after_sb_0p);
3228 new_net_conf->after_sb_1p = convert_after_sb(p_after_sb_1p);
3229 new_net_conf->after_sb_2p = convert_after_sb(p_after_sb_2p);
3230 new_net_conf->two_primaries = p_two_primaries;
3231
3232 rcu_assign_pointer(tconn->net_conf, new_net_conf);
3233 mutex_unlock(&tconn->conf_update);
3234 mutex_unlock(&tconn->data.mutex);
3235
3236 crypto_free_hash(tconn->peer_integrity_tfm);
3237 kfree(tconn->int_dig_in);
3238 kfree(tconn->int_dig_vv);
3239 tconn->peer_integrity_tfm = peer_integrity_tfm;
3240 tconn->int_dig_in = int_dig_in;
3241 tconn->int_dig_vv = int_dig_vv;
3242
3243 if (strcmp(old_net_conf->integrity_alg, integrity_alg))
3244 conn_info(tconn, "peer data-integrity-alg: %s\n",
3245 integrity_alg[0] ? integrity_alg : "(none)");
3246
3247 synchronize_rcu();
3248 kfree(old_net_conf);
Andreas Gruenbacher82bc0192011-03-17 12:10:19 +01003249 return 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003250
Philipp Reisner44ed1672011-04-19 17:10:19 +02003251disconnect_rcu_unlock:
3252 rcu_read_unlock();
Philipp Reisnerb411b362009-09-25 16:07:19 -07003253disconnect:
Andreas Gruenbacherb792c352011-07-15 16:48:49 +02003254 crypto_free_hash(peer_integrity_tfm);
Philipp Reisner036b17e2011-05-16 17:38:11 +02003255 kfree(int_dig_in);
3256 kfree(int_dig_vv);
Philipp Reisner72046242011-03-15 18:51:47 +01003257 conn_request_state(tconn, NS(conn, C_DISCONNECTING), CS_HARD);
Andreas Gruenbacher82bc0192011-03-17 12:10:19 +01003258 return -EIO;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003259}
3260
3261/* helper function
3262 * input: alg name, feature name
3263 * return: NULL (alg name was "")
3264 * ERR_PTR(error) if something goes wrong
3265 * or the crypto hash ptr, if it worked out ok. */
3266struct crypto_hash *drbd_crypto_alloc_digest_safe(const struct drbd_conf *mdev,
3267 const char *alg, const char *name)
3268{
3269 struct crypto_hash *tfm;
3270
3271 if (!alg[0])
3272 return NULL;
3273
3274 tfm = crypto_alloc_hash(alg, 0, CRYPTO_ALG_ASYNC);
3275 if (IS_ERR(tfm)) {
3276 dev_err(DEV, "Can not allocate \"%s\" as %s (reason: %ld)\n",
3277 alg, name, PTR_ERR(tfm));
3278 return tfm;
3279 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07003280 return tfm;
3281}
3282
Andreas Gruenbacher4a76b162011-03-25 02:43:51 +01003283static int ignore_remaining_packet(struct drbd_tconn *tconn, struct packet_info *pi)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003284{
Andreas Gruenbacher4a76b162011-03-25 02:43:51 +01003285 void *buffer = tconn->data.rbuf;
3286 int size = pi->size;
3287
3288 while (size) {
3289 int s = min_t(int, size, DRBD_SOCKET_BUFFER_SIZE);
3290 s = drbd_recv(tconn, buffer, s);
3291 if (s <= 0) {
3292 if (s < 0)
3293 return s;
3294 break;
3295 }
3296 size -= s;
3297 }
3298 if (size)
3299 return -EIO;
3300 return 0;
3301}
3302
3303/*
3304 * config_unknown_volume - device configuration command for unknown volume
3305 *
3306 * When a device is added to an existing connection, the node on which the
3307 * device is added first will send configuration commands to its peer but the
3308 * peer will not know about the device yet. It will warn and ignore these
3309 * commands. Once the device is added on the second node, the second node will
3310 * send the same device configuration commands, but in the other direction.
3311 *
3312 * (We can also end up here if drbd is misconfigured.)
3313 */
3314static int config_unknown_volume(struct drbd_tconn *tconn, struct packet_info *pi)
3315{
Andreas Gruenbacher2fcb8f32011-07-03 11:41:08 +02003316 conn_warn(tconn, "%s packet received for volume %u, which is not configured locally\n",
3317 cmdname(pi->cmd), pi->vnr);
Andreas Gruenbacher4a76b162011-03-25 02:43:51 +01003318 return ignore_remaining_packet(tconn, pi);
3319}
3320
3321static int receive_SyncParam(struct drbd_tconn *tconn, struct packet_info *pi)
3322{
3323 struct drbd_conf *mdev;
Andreas Gruenbachere6589832011-03-30 12:54:42 +02003324 struct p_rs_param_95 *p;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003325 unsigned int header_size, data_size, exp_max_sz;
3326 struct crypto_hash *verify_tfm = NULL;
3327 struct crypto_hash *csums_tfm = NULL;
Philipp Reisner2ec91e02011-05-03 14:58:00 +02003328 struct net_conf *old_net_conf, *new_net_conf = NULL;
Philipp Reisner813472c2011-05-03 16:47:02 +02003329 struct disk_conf *old_disk_conf = NULL, *new_disk_conf = NULL;
Andreas Gruenbacher4a76b162011-03-25 02:43:51 +01003330 const int apv = tconn->agreed_pro_version;
Philipp Reisner813472c2011-05-03 16:47:02 +02003331 struct fifo_buffer *old_plan = NULL, *new_plan = NULL;
Philipp Reisner778f2712010-07-06 11:14:00 +02003332 int fifo_size = 0;
Andreas Gruenbacher82bc0192011-03-17 12:10:19 +01003333 int err;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003334
Andreas Gruenbacher4a76b162011-03-25 02:43:51 +01003335 mdev = vnr_to_mdev(tconn, pi->vnr);
3336 if (!mdev)
3337 return config_unknown_volume(tconn, pi);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003338
3339 exp_max_sz = apv <= 87 ? sizeof(struct p_rs_param)
3340 : apv == 88 ? sizeof(struct p_rs_param)
3341 + SHARED_SECRET_MAX
Philipp Reisner8e26f9c2010-07-06 17:25:54 +02003342 : apv <= 94 ? sizeof(struct p_rs_param_89)
3343 : /* apv >= 95 */ sizeof(struct p_rs_param_95);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003344
Andreas Gruenbachere2857212011-03-25 00:57:38 +01003345 if (pi->size > exp_max_sz) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07003346 dev_err(DEV, "SyncParam packet too long: received %u, expected <= %u bytes\n",
Andreas Gruenbachere2857212011-03-25 00:57:38 +01003347 pi->size, exp_max_sz);
Andreas Gruenbacher82bc0192011-03-17 12:10:19 +01003348 return -EIO;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003349 }
3350
3351 if (apv <= 88) {
Andreas Gruenbachere6589832011-03-30 12:54:42 +02003352 header_size = sizeof(struct p_rs_param);
Andreas Gruenbachere2857212011-03-25 00:57:38 +01003353 data_size = pi->size - header_size;
Philipp Reisner8e26f9c2010-07-06 17:25:54 +02003354 } else if (apv <= 94) {
Andreas Gruenbachere6589832011-03-30 12:54:42 +02003355 header_size = sizeof(struct p_rs_param_89);
Andreas Gruenbachere2857212011-03-25 00:57:38 +01003356 data_size = pi->size - header_size;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003357 D_ASSERT(data_size == 0);
Philipp Reisner8e26f9c2010-07-06 17:25:54 +02003358 } else {
Andreas Gruenbachere6589832011-03-30 12:54:42 +02003359 header_size = sizeof(struct p_rs_param_95);
Andreas Gruenbachere2857212011-03-25 00:57:38 +01003360 data_size = pi->size - header_size;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003361 D_ASSERT(data_size == 0);
3362 }
3363
3364 /* initialize verify_alg and csums_alg */
Andreas Gruenbachere6589832011-03-30 12:54:42 +02003365 p = pi->data;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003366 memset(p->verify_alg, 0, 2 * SHARED_SECRET_MAX);
3367
Andreas Gruenbachere6589832011-03-30 12:54:42 +02003368 err = drbd_recv_all(mdev->tconn, p, header_size);
Andreas Gruenbacher82bc0192011-03-17 12:10:19 +01003369 if (err)
3370 return err;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003371
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02003372 mutex_lock(&mdev->tconn->conf_update);
3373 old_net_conf = mdev->tconn->net_conf;
Philipp Reisner813472c2011-05-03 16:47:02 +02003374 if (get_ldev(mdev)) {
3375 new_disk_conf = kzalloc(sizeof(struct disk_conf), GFP_KERNEL);
3376 if (!new_disk_conf) {
3377 put_ldev(mdev);
3378 mutex_unlock(&mdev->tconn->conf_update);
3379 dev_err(DEV, "Allocation of new disk_conf failed\n");
3380 return -ENOMEM;
3381 }
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02003382
Philipp Reisner813472c2011-05-03 16:47:02 +02003383 old_disk_conf = mdev->ldev->disk_conf;
3384 *new_disk_conf = *old_disk_conf;
3385
Andreas Gruenbacher6394b932011-05-11 14:29:52 +02003386 new_disk_conf->resync_rate = be32_to_cpu(p->resync_rate);
Philipp Reisner813472c2011-05-03 16:47:02 +02003387 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07003388
3389 if (apv >= 88) {
3390 if (apv == 88) {
Philipp Reisner5de73822012-03-28 10:17:32 +02003391 if (data_size > SHARED_SECRET_MAX || data_size == 0) {
3392 dev_err(DEV, "verify-alg of wrong size, "
3393 "peer wants %u, accepting only up to %u byte\n",
3394 data_size, SHARED_SECRET_MAX);
Philipp Reisner813472c2011-05-03 16:47:02 +02003395 err = -EIO;
3396 goto reconnect;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003397 }
3398
Andreas Gruenbacher82bc0192011-03-17 12:10:19 +01003399 err = drbd_recv_all(mdev->tconn, p->verify_alg, data_size);
Philipp Reisner813472c2011-05-03 16:47:02 +02003400 if (err)
3401 goto reconnect;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003402 /* we expect NUL terminated string */
3403 /* but just in case someone tries to be evil */
3404 D_ASSERT(p->verify_alg[data_size-1] == 0);
3405 p->verify_alg[data_size-1] = 0;
3406
3407 } else /* apv >= 89 */ {
3408 /* we still expect NUL terminated strings */
3409 /* but just in case someone tries to be evil */
3410 D_ASSERT(p->verify_alg[SHARED_SECRET_MAX-1] == 0);
3411 D_ASSERT(p->csums_alg[SHARED_SECRET_MAX-1] == 0);
3412 p->verify_alg[SHARED_SECRET_MAX-1] = 0;
3413 p->csums_alg[SHARED_SECRET_MAX-1] = 0;
3414 }
3415
Philipp Reisner2ec91e02011-05-03 14:58:00 +02003416 if (strcmp(old_net_conf->verify_alg, p->verify_alg)) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07003417 if (mdev->state.conn == C_WF_REPORT_PARAMS) {
3418 dev_err(DEV, "Different verify-alg settings. me=\"%s\" peer=\"%s\"\n",
Philipp Reisner2ec91e02011-05-03 14:58:00 +02003419 old_net_conf->verify_alg, p->verify_alg);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003420 goto disconnect;
3421 }
3422 verify_tfm = drbd_crypto_alloc_digest_safe(mdev,
3423 p->verify_alg, "verify-alg");
3424 if (IS_ERR(verify_tfm)) {
3425 verify_tfm = NULL;
3426 goto disconnect;
3427 }
3428 }
3429
Philipp Reisner2ec91e02011-05-03 14:58:00 +02003430 if (apv >= 89 && strcmp(old_net_conf->csums_alg, p->csums_alg)) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07003431 if (mdev->state.conn == C_WF_REPORT_PARAMS) {
3432 dev_err(DEV, "Different csums-alg settings. me=\"%s\" peer=\"%s\"\n",
Philipp Reisner2ec91e02011-05-03 14:58:00 +02003433 old_net_conf->csums_alg, p->csums_alg);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003434 goto disconnect;
3435 }
3436 csums_tfm = drbd_crypto_alloc_digest_safe(mdev,
3437 p->csums_alg, "csums-alg");
3438 if (IS_ERR(csums_tfm)) {
3439 csums_tfm = NULL;
3440 goto disconnect;
3441 }
3442 }
3443
Philipp Reisner813472c2011-05-03 16:47:02 +02003444 if (apv > 94 && new_disk_conf) {
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02003445 new_disk_conf->c_plan_ahead = be32_to_cpu(p->c_plan_ahead);
3446 new_disk_conf->c_delay_target = be32_to_cpu(p->c_delay_target);
3447 new_disk_conf->c_fill_target = be32_to_cpu(p->c_fill_target);
3448 new_disk_conf->c_max_rate = be32_to_cpu(p->c_max_rate);
Philipp Reisner778f2712010-07-06 11:14:00 +02003449
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02003450 fifo_size = (new_disk_conf->c_plan_ahead * 10 * SLEEP_TIME) / HZ;
Philipp Reisner9958c852011-05-03 16:19:31 +02003451 if (fifo_size != mdev->rs_plan_s->size) {
Philipp Reisner813472c2011-05-03 16:47:02 +02003452 new_plan = fifo_alloc(fifo_size);
3453 if (!new_plan) {
Philipp Reisner778f2712010-07-06 11:14:00 +02003454 dev_err(DEV, "kmalloc of fifo_buffer failed");
Lars Ellenbergf3990022011-03-23 14:31:09 +01003455 put_ldev(mdev);
Philipp Reisner778f2712010-07-06 11:14:00 +02003456 goto disconnect;
3457 }
3458 }
Philipp Reisner8e26f9c2010-07-06 17:25:54 +02003459 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07003460
Philipp Reisner91fd4da2011-04-20 17:47:29 +02003461 if (verify_tfm || csums_tfm) {
Philipp Reisner2ec91e02011-05-03 14:58:00 +02003462 new_net_conf = kzalloc(sizeof(struct net_conf), GFP_KERNEL);
3463 if (!new_net_conf) {
Philipp Reisner91fd4da2011-04-20 17:47:29 +02003464 dev_err(DEV, "Allocation of new net_conf failed\n");
3465 goto disconnect;
3466 }
3467
Philipp Reisner2ec91e02011-05-03 14:58:00 +02003468 *new_net_conf = *old_net_conf;
Philipp Reisner91fd4da2011-04-20 17:47:29 +02003469
3470 if (verify_tfm) {
Philipp Reisner2ec91e02011-05-03 14:58:00 +02003471 strcpy(new_net_conf->verify_alg, p->verify_alg);
3472 new_net_conf->verify_alg_len = strlen(p->verify_alg) + 1;
Philipp Reisner91fd4da2011-04-20 17:47:29 +02003473 crypto_free_hash(mdev->tconn->verify_tfm);
3474 mdev->tconn->verify_tfm = verify_tfm;
3475 dev_info(DEV, "using verify-alg: \"%s\"\n", p->verify_alg);
3476 }
3477 if (csums_tfm) {
Philipp Reisner2ec91e02011-05-03 14:58:00 +02003478 strcpy(new_net_conf->csums_alg, p->csums_alg);
3479 new_net_conf->csums_alg_len = strlen(p->csums_alg) + 1;
Philipp Reisner91fd4da2011-04-20 17:47:29 +02003480 crypto_free_hash(mdev->tconn->csums_tfm);
3481 mdev->tconn->csums_tfm = csums_tfm;
3482 dev_info(DEV, "using csums-alg: \"%s\"\n", p->csums_alg);
3483 }
Philipp Reisner2ec91e02011-05-03 14:58:00 +02003484 rcu_assign_pointer(tconn->net_conf, new_net_conf);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003485 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07003486 }
3487
Philipp Reisner813472c2011-05-03 16:47:02 +02003488 if (new_disk_conf) {
3489 rcu_assign_pointer(mdev->ldev->disk_conf, new_disk_conf);
3490 put_ldev(mdev);
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02003491 }
Philipp Reisner813472c2011-05-03 16:47:02 +02003492
3493 if (new_plan) {
3494 old_plan = mdev->rs_plan_s;
3495 rcu_assign_pointer(mdev->rs_plan_s, new_plan);
3496 }
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02003497
3498 mutex_unlock(&mdev->tconn->conf_update);
3499 synchronize_rcu();
3500 if (new_net_conf)
3501 kfree(old_net_conf);
3502 kfree(old_disk_conf);
Philipp Reisner813472c2011-05-03 16:47:02 +02003503 kfree(old_plan);
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02003504
Andreas Gruenbacher82bc0192011-03-17 12:10:19 +01003505 return 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003506
Philipp Reisner813472c2011-05-03 16:47:02 +02003507reconnect:
3508 if (new_disk_conf) {
3509 put_ldev(mdev);
3510 kfree(new_disk_conf);
3511 }
3512 mutex_unlock(&mdev->tconn->conf_update);
3513 return -EIO;
3514
Philipp Reisnerb411b362009-09-25 16:07:19 -07003515disconnect:
Philipp Reisner813472c2011-05-03 16:47:02 +02003516 kfree(new_plan);
3517 if (new_disk_conf) {
3518 put_ldev(mdev);
3519 kfree(new_disk_conf);
3520 }
Philipp Reisnera0095502011-05-03 13:14:15 +02003521 mutex_unlock(&mdev->tconn->conf_update);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003522 /* just for completeness: actually not needed,
3523 * as this is not reached if csums_tfm was ok. */
3524 crypto_free_hash(csums_tfm);
3525 /* but free the verify_tfm again, if csums_tfm did not work out */
3526 crypto_free_hash(verify_tfm);
Philipp Reisner38fa9982011-03-15 18:24:49 +01003527 conn_request_state(mdev->tconn, NS(conn, C_DISCONNECTING), CS_HARD);
Andreas Gruenbacher82bc0192011-03-17 12:10:19 +01003528 return -EIO;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003529}
3530
Philipp Reisnerb411b362009-09-25 16:07:19 -07003531/* warn if the arguments differ by more than 12.5% */
3532static void warn_if_differ_considerably(struct drbd_conf *mdev,
3533 const char *s, sector_t a, sector_t b)
3534{
3535 sector_t d;
3536 if (a == 0 || b == 0)
3537 return;
3538 d = (a > b) ? (a - b) : (b - a);
3539 if (d > (a>>3) || d > (b>>3))
3540 dev_warn(DEV, "Considerable difference in %s: %llus vs. %llus\n", s,
3541 (unsigned long long)a, (unsigned long long)b);
3542}
3543
Andreas Gruenbacher4a76b162011-03-25 02:43:51 +01003544static int receive_sizes(struct drbd_tconn *tconn, struct packet_info *pi)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003545{
Andreas Gruenbacher4a76b162011-03-25 02:43:51 +01003546 struct drbd_conf *mdev;
Andreas Gruenbachere6589832011-03-30 12:54:42 +02003547 struct p_sizes *p = pi->data;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003548 enum determine_dev_size dd = unchanged;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003549 sector_t p_size, p_usize, my_usize;
3550 int ldsc = 0; /* local disk size changed */
Philipp Reisnere89b5912010-03-24 17:11:33 +01003551 enum dds_flags ddsf;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003552
Andreas Gruenbacher4a76b162011-03-25 02:43:51 +01003553 mdev = vnr_to_mdev(tconn, pi->vnr);
3554 if (!mdev)
3555 return config_unknown_volume(tconn, pi);
3556
Philipp Reisnerb411b362009-09-25 16:07:19 -07003557 p_size = be64_to_cpu(p->d_size);
3558 p_usize = be64_to_cpu(p->u_size);
3559
Philipp Reisnerb411b362009-09-25 16:07:19 -07003560 /* just store the peer's disk size for now.
3561 * we still need to figure out whether we accept that. */
3562 mdev->p_size = p_size;
3563
Philipp Reisnerb411b362009-09-25 16:07:19 -07003564 if (get_ldev(mdev)) {
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02003565 rcu_read_lock();
3566 my_usize = rcu_dereference(mdev->ldev->disk_conf)->disk_size;
3567 rcu_read_unlock();
3568
Philipp Reisnerb411b362009-09-25 16:07:19 -07003569 warn_if_differ_considerably(mdev, "lower level device sizes",
3570 p_size, drbd_get_max_capacity(mdev->ldev));
3571 warn_if_differ_considerably(mdev, "user requested size",
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02003572 p_usize, my_usize);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003573
3574 /* if this is the first connect, or an otherwise expected
3575 * param exchange, choose the minimum */
3576 if (mdev->state.conn == C_WF_REPORT_PARAMS)
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02003577 p_usize = min_not_zero(my_usize, p_usize);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003578
3579 /* Never shrink a device with usable data during connect.
3580 But allow online shrinking if we are connected. */
Philipp Reisneref5e44a2011-05-03 13:27:43 +02003581 if (drbd_new_dev_size(mdev, mdev->ldev, p_usize, 0) <
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02003582 drbd_get_capacity(mdev->this_bdev) &&
3583 mdev->state.disk >= D_OUTDATED &&
3584 mdev->state.conn < C_CONNECTED) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07003585 dev_err(DEV, "The peer's disk size is too small!\n");
Philipp Reisner38fa9982011-03-15 18:24:49 +01003586 conn_request_state(mdev->tconn, NS(conn, C_DISCONNECTING), CS_HARD);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003587 put_ldev(mdev);
Andreas Gruenbacher82bc0192011-03-17 12:10:19 +01003588 return -EIO;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003589 }
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02003590
3591 if (my_usize != p_usize) {
3592 struct disk_conf *old_disk_conf, *new_disk_conf = NULL;
3593
3594 new_disk_conf = kzalloc(sizeof(struct disk_conf), GFP_KERNEL);
3595 if (!new_disk_conf) {
3596 dev_err(DEV, "Allocation of new disk_conf failed\n");
3597 put_ldev(mdev);
3598 return -ENOMEM;
3599 }
3600
3601 mutex_lock(&mdev->tconn->conf_update);
3602 old_disk_conf = mdev->ldev->disk_conf;
3603 *new_disk_conf = *old_disk_conf;
3604 new_disk_conf->disk_size = p_usize;
3605
3606 rcu_assign_pointer(mdev->ldev->disk_conf, new_disk_conf);
3607 mutex_unlock(&mdev->tconn->conf_update);
3608 synchronize_rcu();
3609 kfree(old_disk_conf);
3610
3611 dev_info(DEV, "Peer sets u_size to %lu sectors\n",
3612 (unsigned long)my_usize);
3613 }
3614
Philipp Reisnerb411b362009-09-25 16:07:19 -07003615 put_ldev(mdev);
3616 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07003617
Philipp Reisnere89b5912010-03-24 17:11:33 +01003618 ddsf = be16_to_cpu(p->dds_flags);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003619 if (get_ldev(mdev)) {
Bart Van Assche24c48302011-05-21 18:32:29 +02003620 dd = drbd_determine_dev_size(mdev, ddsf);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003621 put_ldev(mdev);
3622 if (dd == dev_size_error)
Andreas Gruenbacher82bc0192011-03-17 12:10:19 +01003623 return -EIO;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003624 drbd_md_sync(mdev);
3625 } else {
3626 /* I am diskless, need to accept the peer's size. */
3627 drbd_set_my_capacity(mdev, p_size);
3628 }
3629
Philipp Reisner99432fc2011-05-20 16:39:13 +02003630 mdev->peer_max_bio_size = be32_to_cpu(p->max_bio_size);
3631 drbd_reconsider_max_bio_size(mdev);
3632
Philipp Reisnerb411b362009-09-25 16:07:19 -07003633 if (get_ldev(mdev)) {
3634 if (mdev->ldev->known_size != drbd_get_capacity(mdev->ldev->backing_bdev)) {
3635 mdev->ldev->known_size = drbd_get_capacity(mdev->ldev->backing_bdev);
3636 ldsc = 1;
3637 }
3638
Philipp Reisnerb411b362009-09-25 16:07:19 -07003639 put_ldev(mdev);
3640 }
3641
3642 if (mdev->state.conn > C_WF_REPORT_PARAMS) {
3643 if (be64_to_cpu(p->c_size) !=
3644 drbd_get_capacity(mdev->this_bdev) || ldsc) {
3645 /* we have different sizes, probably peer
3646 * needs to know my new size... */
Philipp Reisnere89b5912010-03-24 17:11:33 +01003647 drbd_send_sizes(mdev, 0, ddsf);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003648 }
3649 if (test_and_clear_bit(RESIZE_PENDING, &mdev->flags) ||
3650 (dd == grew && mdev->state.conn == C_CONNECTED)) {
3651 if (mdev->state.pdsk >= D_INCONSISTENT &&
Philipp Reisnere89b5912010-03-24 17:11:33 +01003652 mdev->state.disk >= D_INCONSISTENT) {
3653 if (ddsf & DDSF_NO_RESYNC)
3654 dev_info(DEV, "Resync of new storage suppressed with --assume-clean\n");
3655 else
3656 resync_after_online_grow(mdev);
3657 } else
Philipp Reisnerb411b362009-09-25 16:07:19 -07003658 set_bit(RESYNC_AFTER_NEG, &mdev->flags);
3659 }
3660 }
3661
Andreas Gruenbacher82bc0192011-03-17 12:10:19 +01003662 return 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003663}
3664
Andreas Gruenbacher4a76b162011-03-25 02:43:51 +01003665static int receive_uuids(struct drbd_tconn *tconn, struct packet_info *pi)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003666{
Andreas Gruenbacher4a76b162011-03-25 02:43:51 +01003667 struct drbd_conf *mdev;
Andreas Gruenbachere6589832011-03-30 12:54:42 +02003668 struct p_uuids *p = pi->data;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003669 u64 *p_uuid;
Lars Ellenberg62b0da32011-01-20 13:25:21 +01003670 int i, updated_uuids = 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003671
Andreas Gruenbacher4a76b162011-03-25 02:43:51 +01003672 mdev = vnr_to_mdev(tconn, pi->vnr);
3673 if (!mdev)
3674 return config_unknown_volume(tconn, pi);
3675
Philipp Reisnerb411b362009-09-25 16:07:19 -07003676 p_uuid = kmalloc(sizeof(u64)*UI_EXTENDED_SIZE, GFP_NOIO);
Jing Wang063eacf2012-10-25 15:00:56 +08003677 if (!p_uuid) {
3678 dev_err(DEV, "kmalloc of p_uuid failed\n");
3679 return false;
3680 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07003681
3682 for (i = UI_CURRENT; i < UI_EXTENDED_SIZE; i++)
3683 p_uuid[i] = be64_to_cpu(p->uuid[i]);
3684
3685 kfree(mdev->p_uuid);
3686 mdev->p_uuid = p_uuid;
3687
3688 if (mdev->state.conn < C_CONNECTED &&
3689 mdev->state.disk < D_INCONSISTENT &&
3690 mdev->state.role == R_PRIMARY &&
3691 (mdev->ed_uuid & ~((u64)1)) != (p_uuid[UI_CURRENT] & ~((u64)1))) {
3692 dev_err(DEV, "Can only connect to data with current UUID=%016llX\n",
3693 (unsigned long long)mdev->ed_uuid);
Philipp Reisner38fa9982011-03-15 18:24:49 +01003694 conn_request_state(mdev->tconn, NS(conn, C_DISCONNECTING), CS_HARD);
Andreas Gruenbacher82bc0192011-03-17 12:10:19 +01003695 return -EIO;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003696 }
3697
3698 if (get_ldev(mdev)) {
3699 int skip_initial_sync =
3700 mdev->state.conn == C_CONNECTED &&
Philipp Reisner31890f42011-01-19 14:12:51 +01003701 mdev->tconn->agreed_pro_version >= 90 &&
Philipp Reisnerb411b362009-09-25 16:07:19 -07003702 mdev->ldev->md.uuid[UI_CURRENT] == UUID_JUST_CREATED &&
3703 (p_uuid[UI_FLAGS] & 8);
3704 if (skip_initial_sync) {
3705 dev_info(DEV, "Accepted new current UUID, preparing to skip initial sync\n");
3706 drbd_bitmap_io(mdev, &drbd_bmio_clear_n_write,
Lars Ellenberg20ceb2b2011-01-21 10:56:44 +01003707 "clear_n_write from receive_uuids",
3708 BM_LOCKED_TEST_ALLOWED);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003709 _drbd_uuid_set(mdev, UI_CURRENT, p_uuid[UI_CURRENT]);
3710 _drbd_uuid_set(mdev, UI_BITMAP, 0);
3711 _drbd_set_state(_NS2(mdev, disk, D_UP_TO_DATE, pdsk, D_UP_TO_DATE),
3712 CS_VERBOSE, NULL);
3713 drbd_md_sync(mdev);
Lars Ellenberg62b0da32011-01-20 13:25:21 +01003714 updated_uuids = 1;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003715 }
3716 put_ldev(mdev);
Philipp Reisner18a50fa2010-06-21 14:14:15 +02003717 } else if (mdev->state.disk < D_INCONSISTENT &&
3718 mdev->state.role == R_PRIMARY) {
3719 /* I am a diskless primary, the peer just created a new current UUID
3720 for me. */
Lars Ellenberg62b0da32011-01-20 13:25:21 +01003721 updated_uuids = drbd_set_ed_uuid(mdev, p_uuid[UI_CURRENT]);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003722 }
3723
3724 /* Before we test for the disk state, we should wait until an eventually
3725 ongoing cluster wide state change is finished. That is important if
3726 we are primary and are detaching from our disk. We need to see the
3727 new disk state... */
Philipp Reisner8410da82011-02-11 20:11:10 +01003728 mutex_lock(mdev->state_mutex);
3729 mutex_unlock(mdev->state_mutex);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003730 if (mdev->state.conn >= C_CONNECTED && mdev->state.disk < D_INCONSISTENT)
Lars Ellenberg62b0da32011-01-20 13:25:21 +01003731 updated_uuids |= drbd_set_ed_uuid(mdev, p_uuid[UI_CURRENT]);
3732
3733 if (updated_uuids)
3734 drbd_print_uuids(mdev, "receiver updated UUIDs to");
Philipp Reisnerb411b362009-09-25 16:07:19 -07003735
Andreas Gruenbacher82bc0192011-03-17 12:10:19 +01003736 return 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003737}
3738
3739/**
3740 * convert_state() - Converts the peer's view of the cluster state to our point of view
3741 * @ps: The state as seen by the peer.
3742 */
3743static union drbd_state convert_state(union drbd_state ps)
3744{
3745 union drbd_state ms;
3746
3747 static enum drbd_conns c_tab[] = {
Philipp Reisner369bea62011-07-06 23:04:44 +02003748 [C_WF_REPORT_PARAMS] = C_WF_REPORT_PARAMS,
Philipp Reisnerb411b362009-09-25 16:07:19 -07003749 [C_CONNECTED] = C_CONNECTED,
3750
3751 [C_STARTING_SYNC_S] = C_STARTING_SYNC_T,
3752 [C_STARTING_SYNC_T] = C_STARTING_SYNC_S,
3753 [C_DISCONNECTING] = C_TEAR_DOWN, /* C_NETWORK_FAILURE, */
3754 [C_VERIFY_S] = C_VERIFY_T,
3755 [C_MASK] = C_MASK,
3756 };
3757
3758 ms.i = ps.i;
3759
3760 ms.conn = c_tab[ps.conn];
3761 ms.peer = ps.role;
3762 ms.role = ps.peer;
3763 ms.pdsk = ps.disk;
3764 ms.disk = ps.pdsk;
3765 ms.peer_isp = (ps.aftr_isp | ps.user_isp);
3766
3767 return ms;
3768}
3769
Andreas Gruenbacher4a76b162011-03-25 02:43:51 +01003770static int receive_req_state(struct drbd_tconn *tconn, struct packet_info *pi)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003771{
Andreas Gruenbacher4a76b162011-03-25 02:43:51 +01003772 struct drbd_conf *mdev;
Andreas Gruenbachere6589832011-03-30 12:54:42 +02003773 struct p_req_state *p = pi->data;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003774 union drbd_state mask, val;
Andreas Gruenbacherbf885f82010-12-08 00:39:32 +01003775 enum drbd_state_rv rv;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003776
Andreas Gruenbacher4a76b162011-03-25 02:43:51 +01003777 mdev = vnr_to_mdev(tconn, pi->vnr);
3778 if (!mdev)
3779 return -EIO;
3780
Philipp Reisnerb411b362009-09-25 16:07:19 -07003781 mask.i = be32_to_cpu(p->mask);
3782 val.i = be32_to_cpu(p->val);
3783
Lars Ellenberg427c0432012-08-01 12:43:01 +02003784 if (test_bit(RESOLVE_CONFLICTS, &mdev->tconn->flags) &&
Philipp Reisner8410da82011-02-11 20:11:10 +01003785 mutex_is_locked(mdev->state_mutex)) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07003786 drbd_send_sr_reply(mdev, SS_CONCURRENT_ST_CHG);
Andreas Gruenbacher82bc0192011-03-17 12:10:19 +01003787 return 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003788 }
3789
3790 mask = convert_state(mask);
3791 val = convert_state(val);
3792
3793 rv = drbd_change_state(mdev, CS_VERBOSE, mask, val);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003794 drbd_send_sr_reply(mdev, rv);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003795
Philipp Reisnerb411b362009-09-25 16:07:19 -07003796 drbd_md_sync(mdev);
3797
Andreas Gruenbacher82bc0192011-03-17 12:10:19 +01003798 return 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003799}
3800
Andreas Gruenbachere2857212011-03-25 00:57:38 +01003801static int receive_req_conn_state(struct drbd_tconn *tconn, struct packet_info *pi)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003802{
Andreas Gruenbachere6589832011-03-30 12:54:42 +02003803 struct p_req_state *p = pi->data;
Philipp Reisnerdfafcc82011-03-16 10:55:07 +01003804 union drbd_state mask, val;
3805 enum drbd_state_rv rv;
3806
3807 mask.i = be32_to_cpu(p->mask);
3808 val.i = be32_to_cpu(p->val);
3809
Lars Ellenberg427c0432012-08-01 12:43:01 +02003810 if (test_bit(RESOLVE_CONFLICTS, &tconn->flags) &&
Philipp Reisnerdfafcc82011-03-16 10:55:07 +01003811 mutex_is_locked(&tconn->cstate_mutex)) {
3812 conn_send_sr_reply(tconn, SS_CONCURRENT_ST_CHG);
Andreas Gruenbacher82bc0192011-03-17 12:10:19 +01003813 return 0;
Philipp Reisnerdfafcc82011-03-16 10:55:07 +01003814 }
3815
3816 mask = convert_state(mask);
3817 val = convert_state(val);
3818
Philipp Reisner778bcf22011-03-28 12:55:03 +02003819 rv = conn_request_state(tconn, mask, val, CS_VERBOSE | CS_LOCAL_ONLY | CS_IGN_OUTD_FAIL);
Philipp Reisnerdfafcc82011-03-16 10:55:07 +01003820 conn_send_sr_reply(tconn, rv);
3821
Andreas Gruenbacher82bc0192011-03-17 12:10:19 +01003822 return 0;
Philipp Reisnerdfafcc82011-03-16 10:55:07 +01003823}
3824
Andreas Gruenbacher4a76b162011-03-25 02:43:51 +01003825static int receive_state(struct drbd_tconn *tconn, struct packet_info *pi)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003826{
Andreas Gruenbacher4a76b162011-03-25 02:43:51 +01003827 struct drbd_conf *mdev;
Andreas Gruenbachere6589832011-03-30 12:54:42 +02003828 struct p_state *p = pi->data;
Lars Ellenberg4ac4aad2010-07-22 17:39:26 +02003829 union drbd_state os, ns, peer_state;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003830 enum drbd_disk_state real_peer_disk;
Philipp Reisner65d922c2010-06-16 16:18:09 +02003831 enum chg_state_flags cs_flags;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003832 int rv;
3833
Andreas Gruenbacher4a76b162011-03-25 02:43:51 +01003834 mdev = vnr_to_mdev(tconn, pi->vnr);
3835 if (!mdev)
3836 return config_unknown_volume(tconn, pi);
3837
Philipp Reisnerb411b362009-09-25 16:07:19 -07003838 peer_state.i = be32_to_cpu(p->state);
3839
3840 real_peer_disk = peer_state.disk;
3841 if (peer_state.disk == D_NEGOTIATING) {
3842 real_peer_disk = mdev->p_uuid[UI_FLAGS] & 4 ? D_INCONSISTENT : D_CONSISTENT;
3843 dev_info(DEV, "real peer disk state = %s\n", drbd_disk_str(real_peer_disk));
3844 }
3845
Philipp Reisner87eeee42011-01-19 14:16:30 +01003846 spin_lock_irq(&mdev->tconn->req_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003847 retry:
Philipp Reisner78bae592011-03-28 15:40:12 +02003848 os = ns = drbd_read_state(mdev);
Philipp Reisner87eeee42011-01-19 14:16:30 +01003849 spin_unlock_irq(&mdev->tconn->req_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003850
Lars Ellenberg545752d2011-12-05 14:39:25 +01003851 /* If some other part of the code (asender thread, timeout)
3852 * already decided to close the connection again,
3853 * we must not "re-establish" it here. */
3854 if (os.conn <= C_TEAR_DOWN)
Lars Ellenberg58ffa582012-07-26 14:09:49 +02003855 return -ECONNRESET;
Lars Ellenberg545752d2011-12-05 14:39:25 +01003856
Lars Ellenberg40424e42011-09-26 15:24:56 +02003857 /* If this is the "end of sync" confirmation, usually the peer disk
3858 * transitions from D_INCONSISTENT to D_UP_TO_DATE. For empty (0 bits
3859 * set) resync started in PausedSyncT, or if the timing of pause-/
3860 * unpause-sync events has been "just right", the peer disk may
3861 * transition from D_CONSISTENT to D_UP_TO_DATE as well.
3862 */
3863 if ((os.pdsk == D_INCONSISTENT || os.pdsk == D_CONSISTENT) &&
3864 real_peer_disk == D_UP_TO_DATE &&
Lars Ellenberge9ef7bb2010-10-07 15:55:39 +02003865 os.conn > C_CONNECTED && os.disk == D_UP_TO_DATE) {
3866 /* If we are (becoming) SyncSource, but peer is still in sync
3867 * preparation, ignore its uptodate-ness to avoid flapping, it
3868 * will change to inconsistent once the peer reaches active
3869 * syncing states.
3870 * It may have changed syncer-paused flags, however, so we
3871 * cannot ignore this completely. */
3872 if (peer_state.conn > C_CONNECTED &&
3873 peer_state.conn < C_SYNC_SOURCE)
3874 real_peer_disk = D_INCONSISTENT;
3875
3876 /* if peer_state changes to connected at the same time,
3877 * it explicitly notifies us that it finished resync.
3878 * Maybe we should finish it up, too? */
3879 else if (os.conn >= C_SYNC_SOURCE &&
3880 peer_state.conn == C_CONNECTED) {
3881 if (drbd_bm_total_weight(mdev) <= mdev->rs_failed)
3882 drbd_resync_finished(mdev);
Andreas Gruenbacher82bc0192011-03-17 12:10:19 +01003883 return 0;
Lars Ellenberge9ef7bb2010-10-07 15:55:39 +02003884 }
3885 }
3886
Lars Ellenberg02b91b52012-06-28 18:26:52 +02003887 /* explicit verify finished notification, stop sector reached. */
3888 if (os.conn == C_VERIFY_T && os.disk == D_UP_TO_DATE &&
3889 peer_state.conn == C_CONNECTED && real_peer_disk == D_UP_TO_DATE) {
Lars Ellenberg58ffa582012-07-26 14:09:49 +02003890 ov_out_of_sync_print(mdev);
Lars Ellenberg02b91b52012-06-28 18:26:52 +02003891 drbd_resync_finished(mdev);
Lars Ellenberg58ffa582012-07-26 14:09:49 +02003892 return 0;
Lars Ellenberg02b91b52012-06-28 18:26:52 +02003893 }
3894
Lars Ellenberge9ef7bb2010-10-07 15:55:39 +02003895 /* peer says his disk is inconsistent, while we think it is uptodate,
3896 * and this happens while the peer still thinks we have a sync going on,
3897 * but we think we are already done with the sync.
3898 * We ignore this to avoid flapping pdsk.
3899 * This should not happen, if the peer is a recent version of drbd. */
3900 if (os.pdsk == D_UP_TO_DATE && real_peer_disk == D_INCONSISTENT &&
3901 os.conn == C_CONNECTED && peer_state.conn > C_SYNC_SOURCE)
3902 real_peer_disk = D_UP_TO_DATE;
3903
Lars Ellenberg4ac4aad2010-07-22 17:39:26 +02003904 if (ns.conn == C_WF_REPORT_PARAMS)
3905 ns.conn = C_CONNECTED;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003906
Philipp Reisner67531712010-10-27 12:21:30 +02003907 if (peer_state.conn == C_AHEAD)
3908 ns.conn = C_BEHIND;
3909
Philipp Reisnerb411b362009-09-25 16:07:19 -07003910 if (mdev->p_uuid && peer_state.disk >= D_NEGOTIATING &&
3911 get_ldev_if_state(mdev, D_NEGOTIATING)) {
3912 int cr; /* consider resync */
3913
3914 /* if we established a new connection */
Lars Ellenberg4ac4aad2010-07-22 17:39:26 +02003915 cr = (os.conn < C_CONNECTED);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003916 /* if we had an established connection
3917 * and one of the nodes newly attaches a disk */
Lars Ellenberg4ac4aad2010-07-22 17:39:26 +02003918 cr |= (os.conn == C_CONNECTED &&
Philipp Reisnerb411b362009-09-25 16:07:19 -07003919 (peer_state.disk == D_NEGOTIATING ||
Lars Ellenberg4ac4aad2010-07-22 17:39:26 +02003920 os.disk == D_NEGOTIATING));
Philipp Reisnerb411b362009-09-25 16:07:19 -07003921 /* if we have both been inconsistent, and the peer has been
3922 * forced to be UpToDate with --overwrite-data */
3923 cr |= test_bit(CONSIDER_RESYNC, &mdev->flags);
3924 /* if we had been plain connected, and the admin requested to
3925 * start a sync by "invalidate" or "invalidate-remote" */
Lars Ellenberg4ac4aad2010-07-22 17:39:26 +02003926 cr |= (os.conn == C_CONNECTED &&
Philipp Reisnerb411b362009-09-25 16:07:19 -07003927 (peer_state.conn >= C_STARTING_SYNC_S &&
3928 peer_state.conn <= C_WF_BITMAP_T));
3929
3930 if (cr)
Lars Ellenberg4ac4aad2010-07-22 17:39:26 +02003931 ns.conn = drbd_sync_handshake(mdev, peer_state.role, real_peer_disk);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003932
3933 put_ldev(mdev);
Lars Ellenberg4ac4aad2010-07-22 17:39:26 +02003934 if (ns.conn == C_MASK) {
3935 ns.conn = C_CONNECTED;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003936 if (mdev->state.disk == D_NEGOTIATING) {
Lars Ellenberg82f59cc2010-10-16 12:13:47 +02003937 drbd_force_state(mdev, NS(disk, D_FAILED));
Philipp Reisnerb411b362009-09-25 16:07:19 -07003938 } else if (peer_state.disk == D_NEGOTIATING) {
3939 dev_err(DEV, "Disk attach process on the peer node was aborted.\n");
3940 peer_state.disk = D_DISKLESS;
Lars Ellenberg580b9762010-02-26 23:15:23 +01003941 real_peer_disk = D_DISKLESS;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003942 } else {
Philipp Reisner8169e412011-03-15 18:40:27 +01003943 if (test_and_clear_bit(CONN_DRY_RUN, &mdev->tconn->flags))
Andreas Gruenbacher82bc0192011-03-17 12:10:19 +01003944 return -EIO;
Lars Ellenberg4ac4aad2010-07-22 17:39:26 +02003945 D_ASSERT(os.conn == C_WF_REPORT_PARAMS);
Philipp Reisner38fa9982011-03-15 18:24:49 +01003946 conn_request_state(mdev->tconn, NS(conn, C_DISCONNECTING), CS_HARD);
Andreas Gruenbacher82bc0192011-03-17 12:10:19 +01003947 return -EIO;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003948 }
3949 }
3950 }
3951
Philipp Reisner87eeee42011-01-19 14:16:30 +01003952 spin_lock_irq(&mdev->tconn->req_lock);
Philipp Reisner78bae592011-03-28 15:40:12 +02003953 if (os.i != drbd_read_state(mdev).i)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003954 goto retry;
3955 clear_bit(CONSIDER_RESYNC, &mdev->flags);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003956 ns.peer = peer_state.role;
3957 ns.pdsk = real_peer_disk;
3958 ns.peer_isp = (peer_state.aftr_isp | peer_state.user_isp);
Lars Ellenberg4ac4aad2010-07-22 17:39:26 +02003959 if ((ns.conn == C_CONNECTED || ns.conn == C_WF_BITMAP_S) && ns.disk == D_NEGOTIATING)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003960 ns.disk = mdev->new_state_tmp.disk;
Lars Ellenberg4ac4aad2010-07-22 17:39:26 +02003961 cs_flags = CS_VERBOSE + (os.conn < C_CONNECTED && ns.conn >= C_CONNECTED ? 0 : CS_HARD);
Philipp Reisner2aebfab2011-03-28 16:48:11 +02003962 if (ns.pdsk == D_CONSISTENT && drbd_suspended(mdev) && ns.conn == C_CONNECTED && os.conn < C_CONNECTED &&
Philipp Reisner481c6f52010-06-22 14:03:27 +02003963 test_bit(NEW_CUR_UUID, &mdev->flags)) {
Andreas Gruenbacher8554df12011-01-25 15:37:43 +01003964 /* Do not allow tl_restart(RESEND) for a rebooted peer. We can only allow this
Philipp Reisner481c6f52010-06-22 14:03:27 +02003965 for temporal network outages! */
Philipp Reisner87eeee42011-01-19 14:16:30 +01003966 spin_unlock_irq(&mdev->tconn->req_lock);
Philipp Reisner481c6f52010-06-22 14:03:27 +02003967 dev_err(DEV, "Aborting Connect, can not thaw IO with an only Consistent peer\n");
Philipp Reisner2f5cdd02011-02-21 14:29:27 +01003968 tl_clear(mdev->tconn);
Philipp Reisner481c6f52010-06-22 14:03:27 +02003969 drbd_uuid_new_current(mdev);
3970 clear_bit(NEW_CUR_UUID, &mdev->flags);
Philipp Reisner38fa9982011-03-15 18:24:49 +01003971 conn_request_state(mdev->tconn, NS2(conn, C_PROTOCOL_ERROR, susp, 0), CS_HARD);
Andreas Gruenbacher82bc0192011-03-17 12:10:19 +01003972 return -EIO;
Philipp Reisner481c6f52010-06-22 14:03:27 +02003973 }
Philipp Reisner65d922c2010-06-16 16:18:09 +02003974 rv = _drbd_set_state(mdev, ns, cs_flags, NULL);
Philipp Reisner78bae592011-03-28 15:40:12 +02003975 ns = drbd_read_state(mdev);
Philipp Reisner87eeee42011-01-19 14:16:30 +01003976 spin_unlock_irq(&mdev->tconn->req_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003977
3978 if (rv < SS_SUCCESS) {
Philipp Reisner38fa9982011-03-15 18:24:49 +01003979 conn_request_state(mdev->tconn, NS(conn, C_DISCONNECTING), CS_HARD);
Andreas Gruenbacher82bc0192011-03-17 12:10:19 +01003980 return -EIO;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003981 }
3982
Lars Ellenberg4ac4aad2010-07-22 17:39:26 +02003983 if (os.conn > C_WF_REPORT_PARAMS) {
3984 if (ns.conn > C_CONNECTED && peer_state.conn <= C_CONNECTED &&
Philipp Reisnerb411b362009-09-25 16:07:19 -07003985 peer_state.disk != D_NEGOTIATING ) {
3986 /* we want resync, peer has not yet decided to sync... */
3987 /* Nowadays only used when forcing a node into primary role and
3988 setting its disk to UpToDate with that */
3989 drbd_send_uuids(mdev);
Lars Ellenbergf479ea02011-10-27 16:52:30 +02003990 drbd_send_current_state(mdev);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003991 }
3992 }
3993
Philipp Reisner08b165b2011-09-05 16:22:33 +02003994 clear_bit(DISCARD_MY_DATA, &mdev->flags);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003995
Lars Ellenbergcccac982013-03-19 18:16:46 +01003996 drbd_md_sync(mdev); /* update connected indicator, la_size_sect, ... */
Philipp Reisnerb411b362009-09-25 16:07:19 -07003997
Andreas Gruenbacher82bc0192011-03-17 12:10:19 +01003998 return 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003999}
4000
Andreas Gruenbacher4a76b162011-03-25 02:43:51 +01004001static int receive_sync_uuid(struct drbd_tconn *tconn, struct packet_info *pi)
Philipp Reisnerb411b362009-09-25 16:07:19 -07004002{
Andreas Gruenbacher4a76b162011-03-25 02:43:51 +01004003 struct drbd_conf *mdev;
Andreas Gruenbachere6589832011-03-30 12:54:42 +02004004 struct p_rs_uuid *p = pi->data;
Andreas Gruenbacher4a76b162011-03-25 02:43:51 +01004005
4006 mdev = vnr_to_mdev(tconn, pi->vnr);
4007 if (!mdev)
4008 return -EIO;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004009
4010 wait_event(mdev->misc_wait,
4011 mdev->state.conn == C_WF_SYNC_UUID ||
Philipp Reisnerc4752ef2010-10-27 17:32:36 +02004012 mdev->state.conn == C_BEHIND ||
Philipp Reisnerb411b362009-09-25 16:07:19 -07004013 mdev->state.conn < C_CONNECTED ||
4014 mdev->state.disk < D_NEGOTIATING);
4015
4016 /* D_ASSERT( mdev->state.conn == C_WF_SYNC_UUID ); */
4017
Philipp Reisnerb411b362009-09-25 16:07:19 -07004018 /* Here the _drbd_uuid_ functions are right, current should
4019 _not_ be rotated into the history */
4020 if (get_ldev_if_state(mdev, D_NEGOTIATING)) {
4021 _drbd_uuid_set(mdev, UI_CURRENT, be64_to_cpu(p->uuid));
4022 _drbd_uuid_set(mdev, UI_BITMAP, 0UL);
4023
Lars Ellenberg62b0da32011-01-20 13:25:21 +01004024 drbd_print_uuids(mdev, "updated sync uuid");
Philipp Reisnerb411b362009-09-25 16:07:19 -07004025 drbd_start_resync(mdev, C_SYNC_TARGET);
4026
4027 put_ldev(mdev);
4028 } else
4029 dev_err(DEV, "Ignoring SyncUUID packet!\n");
4030
Andreas Gruenbacher82bc0192011-03-17 12:10:19 +01004031 return 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004032}
4033
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01004034/**
4035 * receive_bitmap_plain
4036 *
4037 * Return 0 when done, 1 when another iteration is needed, and a negative error
4038 * code upon failure.
4039 */
4040static int
Andreas Gruenbacher50d0b1a2011-03-30 11:53:51 +02004041receive_bitmap_plain(struct drbd_conf *mdev, unsigned int size,
Andreas Gruenbachere6589832011-03-30 12:54:42 +02004042 unsigned long *p, struct bm_xfer_ctx *c)
Philipp Reisnerb411b362009-09-25 16:07:19 -07004043{
Andreas Gruenbacher50d0b1a2011-03-30 11:53:51 +02004044 unsigned int data_size = DRBD_SOCKET_BUFFER_SIZE -
4045 drbd_header_size(mdev->tconn);
Andreas Gruenbachere6589832011-03-30 12:54:42 +02004046 unsigned int num_words = min_t(size_t, data_size / sizeof(*p),
Andreas Gruenbacher50d0b1a2011-03-30 11:53:51 +02004047 c->bm_words - c->word_offset);
Andreas Gruenbachere6589832011-03-30 12:54:42 +02004048 unsigned int want = num_words * sizeof(*p);
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01004049 int err;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004050
Andreas Gruenbacher50d0b1a2011-03-30 11:53:51 +02004051 if (want != size) {
4052 dev_err(DEV, "%s:want (%u) != size (%u)\n", __func__, want, size);
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01004053 return -EIO;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004054 }
4055 if (want == 0)
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01004056 return 0;
Andreas Gruenbachere6589832011-03-30 12:54:42 +02004057 err = drbd_recv_all(mdev->tconn, p, want);
Andreas Gruenbacher82bc0192011-03-17 12:10:19 +01004058 if (err)
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01004059 return err;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004060
Andreas Gruenbachere6589832011-03-30 12:54:42 +02004061 drbd_bm_merge_lel(mdev, c->word_offset, num_words, p);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004062
4063 c->word_offset += num_words;
4064 c->bit_offset = c->word_offset * BITS_PER_LONG;
4065 if (c->bit_offset > c->bm_bits)
4066 c->bit_offset = c->bm_bits;
4067
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01004068 return 1;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004069}
4070
Andreas Gruenbachera02d1242011-03-22 17:20:45 +01004071static enum drbd_bitmap_code dcbp_get_code(struct p_compressed_bm *p)
4072{
4073 return (enum drbd_bitmap_code)(p->encoding & 0x0f);
4074}
4075
4076static int dcbp_get_start(struct p_compressed_bm *p)
4077{
4078 return (p->encoding & 0x80) != 0;
4079}
4080
4081static int dcbp_get_pad_bits(struct p_compressed_bm *p)
4082{
4083 return (p->encoding >> 4) & 0x7;
4084}
4085
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01004086/**
4087 * recv_bm_rle_bits
4088 *
4089 * Return 0 when done, 1 when another iteration is needed, and a negative error
4090 * code upon failure.
4091 */
4092static int
Philipp Reisnerb411b362009-09-25 16:07:19 -07004093recv_bm_rle_bits(struct drbd_conf *mdev,
4094 struct p_compressed_bm *p,
Philipp Reisnerc6d25cf2011-01-19 16:13:06 +01004095 struct bm_xfer_ctx *c,
4096 unsigned int len)
Philipp Reisnerb411b362009-09-25 16:07:19 -07004097{
4098 struct bitstream bs;
4099 u64 look_ahead;
4100 u64 rl;
4101 u64 tmp;
4102 unsigned long s = c->bit_offset;
4103 unsigned long e;
Andreas Gruenbachera02d1242011-03-22 17:20:45 +01004104 int toggle = dcbp_get_start(p);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004105 int have;
4106 int bits;
4107
Andreas Gruenbachera02d1242011-03-22 17:20:45 +01004108 bitstream_init(&bs, p->code, len, dcbp_get_pad_bits(p));
Philipp Reisnerb411b362009-09-25 16:07:19 -07004109
4110 bits = bitstream_get_bits(&bs, &look_ahead, 64);
4111 if (bits < 0)
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01004112 return -EIO;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004113
4114 for (have = bits; have > 0; s += rl, toggle = !toggle) {
4115 bits = vli_decode_bits(&rl, look_ahead);
4116 if (bits <= 0)
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01004117 return -EIO;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004118
4119 if (toggle) {
4120 e = s + rl -1;
4121 if (e >= c->bm_bits) {
4122 dev_err(DEV, "bitmap overflow (e:%lu) while decoding bm RLE packet\n", e);
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01004123 return -EIO;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004124 }
4125 _drbd_bm_set_bits(mdev, s, e);
4126 }
4127
4128 if (have < bits) {
4129 dev_err(DEV, "bitmap decoding error: h:%d b:%d la:0x%08llx l:%u/%u\n",
4130 have, bits, look_ahead,
4131 (unsigned int)(bs.cur.b - p->code),
4132 (unsigned int)bs.buf_len);
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01004133 return -EIO;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004134 }
4135 look_ahead >>= bits;
4136 have -= bits;
4137
4138 bits = bitstream_get_bits(&bs, &tmp, 64 - have);
4139 if (bits < 0)
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01004140 return -EIO;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004141 look_ahead |= tmp << have;
4142 have += bits;
4143 }
4144
4145 c->bit_offset = s;
4146 bm_xfer_ctx_bit_to_word_offset(c);
4147
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01004148 return (s != c->bm_bits);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004149}
4150
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01004151/**
4152 * decode_bitmap_c
4153 *
4154 * Return 0 when done, 1 when another iteration is needed, and a negative error
4155 * code upon failure.
4156 */
4157static int
Philipp Reisnerb411b362009-09-25 16:07:19 -07004158decode_bitmap_c(struct drbd_conf *mdev,
4159 struct p_compressed_bm *p,
Philipp Reisnerc6d25cf2011-01-19 16:13:06 +01004160 struct bm_xfer_ctx *c,
4161 unsigned int len)
Philipp Reisnerb411b362009-09-25 16:07:19 -07004162{
Andreas Gruenbachera02d1242011-03-22 17:20:45 +01004163 if (dcbp_get_code(p) == RLE_VLI_Bits)
Andreas Gruenbachere6589832011-03-30 12:54:42 +02004164 return recv_bm_rle_bits(mdev, p, c, len - sizeof(*p));
Philipp Reisnerb411b362009-09-25 16:07:19 -07004165
4166 /* other variants had been implemented for evaluation,
4167 * but have been dropped as this one turned out to be "best"
4168 * during all our tests. */
4169
4170 dev_err(DEV, "receive_bitmap_c: unknown encoding %u\n", p->encoding);
Philipp Reisner38fa9982011-03-15 18:24:49 +01004171 conn_request_state(mdev->tconn, NS(conn, C_PROTOCOL_ERROR), CS_HARD);
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01004172 return -EIO;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004173}
4174
4175void INFO_bm_xfer_stats(struct drbd_conf *mdev,
4176 const char *direction, struct bm_xfer_ctx *c)
4177{
4178 /* what would it take to transfer it "plaintext" */
Andreas Gruenbacher50d0b1a2011-03-30 11:53:51 +02004179 unsigned int header_size = drbd_header_size(mdev->tconn);
4180 unsigned int data_size = DRBD_SOCKET_BUFFER_SIZE - header_size;
4181 unsigned int plain =
4182 header_size * (DIV_ROUND_UP(c->bm_words, data_size) + 1) +
4183 c->bm_words * sizeof(unsigned long);
4184 unsigned int total = c->bytes[0] + c->bytes[1];
4185 unsigned int r;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004186
4187 /* total can not be zero. but just in case: */
4188 if (total == 0)
4189 return;
4190
4191 /* don't report if not compressed */
4192 if (total >= plain)
4193 return;
4194
4195 /* total < plain. check for overflow, still */
4196 r = (total > UINT_MAX/1000) ? (total / (plain/1000))
4197 : (1000 * total / plain);
4198
4199 if (r > 1000)
4200 r = 1000;
4201
4202 r = 1000 - r;
4203 dev_info(DEV, "%s bitmap stats [Bytes(packets)]: plain %u(%u), RLE %u(%u), "
4204 "total %u; compression: %u.%u%%\n",
4205 direction,
4206 c->bytes[1], c->packets[1],
4207 c->bytes[0], c->packets[0],
4208 total, r/10, r % 10);
4209}
4210
4211/* Since we are processing the bitfield from lower addresses to higher,
4212 it does not matter if the process it in 32 bit chunks or 64 bit
4213 chunks as long as it is little endian. (Understand it as byte stream,
4214 beginning with the lowest byte...) If we would use big endian
4215 we would need to process it from the highest address to the lowest,
4216 in order to be agnostic to the 32 vs 64 bits issue.
4217
4218 returns 0 on failure, 1 if we successfully received it. */
Andreas Gruenbacher4a76b162011-03-25 02:43:51 +01004219static int receive_bitmap(struct drbd_tconn *tconn, struct packet_info *pi)
Philipp Reisnerb411b362009-09-25 16:07:19 -07004220{
Andreas Gruenbacher4a76b162011-03-25 02:43:51 +01004221 struct drbd_conf *mdev;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004222 struct bm_xfer_ctx c;
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01004223 int err;
Andreas Gruenbacher4a76b162011-03-25 02:43:51 +01004224
4225 mdev = vnr_to_mdev(tconn, pi->vnr);
4226 if (!mdev)
4227 return -EIO;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004228
Lars Ellenberg20ceb2b2011-01-21 10:56:44 +01004229 drbd_bm_lock(mdev, "receive bitmap", BM_LOCKED_SET_ALLOWED);
4230 /* you are supposed to send additional out-of-sync information
4231 * if you actually set bits during this phase */
Philipp Reisnerb411b362009-09-25 16:07:19 -07004232
Philipp Reisnerb411b362009-09-25 16:07:19 -07004233 c = (struct bm_xfer_ctx) {
4234 .bm_bits = drbd_bm_bits(mdev),
4235 .bm_words = drbd_bm_words(mdev),
4236 };
4237
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01004238 for(;;) {
Andreas Gruenbachere6589832011-03-30 12:54:42 +02004239 if (pi->cmd == P_BITMAP)
4240 err = receive_bitmap_plain(mdev, pi->size, pi->data, &c);
4241 else if (pi->cmd == P_COMPRESSED_BITMAP) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07004242 /* MAYBE: sanity check that we speak proto >= 90,
4243 * and the feature is enabled! */
Andreas Gruenbachere6589832011-03-30 12:54:42 +02004244 struct p_compressed_bm *p = pi->data;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004245
Andreas Gruenbacher50d0b1a2011-03-30 11:53:51 +02004246 if (pi->size > DRBD_SOCKET_BUFFER_SIZE - drbd_header_size(tconn)) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07004247 dev_err(DEV, "ReportCBitmap packet too large\n");
Andreas Gruenbacher82bc0192011-03-17 12:10:19 +01004248 err = -EIO;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004249 goto out;
4250 }
Andreas Gruenbachere6589832011-03-30 12:54:42 +02004251 if (pi->size <= sizeof(*p)) {
Andreas Gruenbachere2857212011-03-25 00:57:38 +01004252 dev_err(DEV, "ReportCBitmap packet too small (l:%u)\n", pi->size);
Andreas Gruenbacher82bc0192011-03-17 12:10:19 +01004253 err = -EIO;
Andreas Gruenbacher78fcbda2010-12-10 22:18:27 +01004254 goto out;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004255 }
Andreas Gruenbachere6589832011-03-30 12:54:42 +02004256 err = drbd_recv_all(mdev->tconn, p, pi->size);
4257 if (err)
4258 goto out;
Andreas Gruenbachere2857212011-03-25 00:57:38 +01004259 err = decode_bitmap_c(mdev, p, &c, pi->size);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004260 } else {
Andreas Gruenbachere2857212011-03-25 00:57:38 +01004261 dev_warn(DEV, "receive_bitmap: cmd neither ReportBitMap nor ReportCBitMap (is 0x%x)", pi->cmd);
Andreas Gruenbacher82bc0192011-03-17 12:10:19 +01004262 err = -EIO;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004263 goto out;
4264 }
4265
Andreas Gruenbachere2857212011-03-25 00:57:38 +01004266 c.packets[pi->cmd == P_BITMAP]++;
Andreas Gruenbacher50d0b1a2011-03-30 11:53:51 +02004267 c.bytes[pi->cmd == P_BITMAP] += drbd_header_size(tconn) + pi->size;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004268
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01004269 if (err <= 0) {
4270 if (err < 0)
4271 goto out;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004272 break;
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01004273 }
Andreas Gruenbachere2857212011-03-25 00:57:38 +01004274 err = drbd_recv_header(mdev->tconn, pi);
Andreas Gruenbacher82bc0192011-03-17 12:10:19 +01004275 if (err)
Philipp Reisnerb411b362009-09-25 16:07:19 -07004276 goto out;
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01004277 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07004278
4279 INFO_bm_xfer_stats(mdev, "receive", &c);
4280
4281 if (mdev->state.conn == C_WF_BITMAP_T) {
Andreas Gruenbacherde1f8e42010-12-10 21:04:00 +01004282 enum drbd_state_rv rv;
4283
Andreas Gruenbacher82bc0192011-03-17 12:10:19 +01004284 err = drbd_send_bitmap(mdev);
4285 if (err)
Philipp Reisnerb411b362009-09-25 16:07:19 -07004286 goto out;
4287 /* Omit CS_ORDERED with this state transition to avoid deadlocks. */
Andreas Gruenbacherde1f8e42010-12-10 21:04:00 +01004288 rv = _drbd_request_state(mdev, NS(conn, C_WF_SYNC_UUID), CS_VERBOSE);
4289 D_ASSERT(rv == SS_SUCCESS);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004290 } else if (mdev->state.conn != C_WF_BITMAP_S) {
4291 /* admin may have requested C_DISCONNECTING,
4292 * other threads may have noticed network errors */
4293 dev_info(DEV, "unexpected cstate (%s) in receive_bitmap\n",
4294 drbd_conn_str(mdev->state.conn));
4295 }
Andreas Gruenbacher82bc0192011-03-17 12:10:19 +01004296 err = 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004297
Philipp Reisnerb411b362009-09-25 16:07:19 -07004298 out:
Lars Ellenberg20ceb2b2011-01-21 10:56:44 +01004299 drbd_bm_unlock(mdev);
Andreas Gruenbacher82bc0192011-03-17 12:10:19 +01004300 if (!err && mdev->state.conn == C_WF_BITMAP_S)
Philipp Reisnerb411b362009-09-25 16:07:19 -07004301 drbd_start_resync(mdev, C_SYNC_SOURCE);
Andreas Gruenbacher82bc0192011-03-17 12:10:19 +01004302 return err;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004303}
4304
Andreas Gruenbacher4a76b162011-03-25 02:43:51 +01004305static int receive_skip(struct drbd_tconn *tconn, struct packet_info *pi)
Philipp Reisnerb411b362009-09-25 16:07:19 -07004306{
Andreas Gruenbacher4a76b162011-03-25 02:43:51 +01004307 conn_warn(tconn, "skipping unknown optional packet type %d, l: %d!\n",
Andreas Gruenbachere2857212011-03-25 00:57:38 +01004308 pi->cmd, pi->size);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004309
Andreas Gruenbacher4a76b162011-03-25 02:43:51 +01004310 return ignore_remaining_packet(tconn, pi);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004311}
4312
Andreas Gruenbacher4a76b162011-03-25 02:43:51 +01004313static int receive_UnplugRemote(struct drbd_tconn *tconn, struct packet_info *pi)
Philipp Reisnerb411b362009-09-25 16:07:19 -07004314{
Philipp Reisnerb411b362009-09-25 16:07:19 -07004315 /* Make sure we've acked all the TCP data associated
4316 * with the data requests being unplugged */
Andreas Gruenbacher4a76b162011-03-25 02:43:51 +01004317 drbd_tcp_quickack(tconn->data.socket);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004318
Andreas Gruenbacher82bc0192011-03-17 12:10:19 +01004319 return 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004320}
4321
Andreas Gruenbacher4a76b162011-03-25 02:43:51 +01004322static int receive_out_of_sync(struct drbd_tconn *tconn, struct packet_info *pi)
Philipp Reisner73a01a12010-10-27 14:33:00 +02004323{
Andreas Gruenbacher4a76b162011-03-25 02:43:51 +01004324 struct drbd_conf *mdev;
Andreas Gruenbachere6589832011-03-30 12:54:42 +02004325 struct p_block_desc *p = pi->data;
Andreas Gruenbacher4a76b162011-03-25 02:43:51 +01004326
4327 mdev = vnr_to_mdev(tconn, pi->vnr);
4328 if (!mdev)
4329 return -EIO;
Philipp Reisner73a01a12010-10-27 14:33:00 +02004330
Lars Ellenbergf735e3632010-12-17 21:06:18 +01004331 switch (mdev->state.conn) {
4332 case C_WF_SYNC_UUID:
4333 case C_WF_BITMAP_T:
4334 case C_BEHIND:
4335 break;
4336 default:
4337 dev_err(DEV, "ASSERT FAILED cstate = %s, expected: WFSyncUUID|WFBitMapT|Behind\n",
4338 drbd_conn_str(mdev->state.conn));
4339 }
4340
Philipp Reisner73a01a12010-10-27 14:33:00 +02004341 drbd_set_out_of_sync(mdev, be64_to_cpu(p->sector), be32_to_cpu(p->blksize));
4342
Andreas Gruenbacher82bc0192011-03-17 12:10:19 +01004343 return 0;
Philipp Reisner73a01a12010-10-27 14:33:00 +02004344}
4345
Philipp Reisner02918be2010-08-20 14:35:10 +02004346struct data_cmd {
4347 int expect_payload;
4348 size_t pkt_size;
Andreas Gruenbacher4a76b162011-03-25 02:43:51 +01004349 int (*fn)(struct drbd_tconn *, struct packet_info *);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004350};
4351
Philipp Reisner02918be2010-08-20 14:35:10 +02004352static struct data_cmd drbd_cmd_handler[] = {
4353 [P_DATA] = { 1, sizeof(struct p_data), receive_Data },
4354 [P_DATA_REPLY] = { 1, sizeof(struct p_data), receive_DataReply },
4355 [P_RS_DATA_REPLY] = { 1, sizeof(struct p_data), receive_RSDataReply } ,
4356 [P_BARRIER] = { 0, sizeof(struct p_barrier), receive_Barrier } ,
Andreas Gruenbachere6589832011-03-30 12:54:42 +02004357 [P_BITMAP] = { 1, 0, receive_bitmap } ,
4358 [P_COMPRESSED_BITMAP] = { 1, 0, receive_bitmap } ,
4359 [P_UNPLUG_REMOTE] = { 0, 0, receive_UnplugRemote },
Philipp Reisner02918be2010-08-20 14:35:10 +02004360 [P_DATA_REQUEST] = { 0, sizeof(struct p_block_req), receive_DataRequest },
4361 [P_RS_DATA_REQUEST] = { 0, sizeof(struct p_block_req), receive_DataRequest },
Andreas Gruenbachere6589832011-03-30 12:54:42 +02004362 [P_SYNC_PARAM] = { 1, 0, receive_SyncParam },
4363 [P_SYNC_PARAM89] = { 1, 0, receive_SyncParam },
Philipp Reisner02918be2010-08-20 14:35:10 +02004364 [P_PROTOCOL] = { 1, sizeof(struct p_protocol), receive_protocol },
4365 [P_UUIDS] = { 0, sizeof(struct p_uuids), receive_uuids },
4366 [P_SIZES] = { 0, sizeof(struct p_sizes), receive_sizes },
4367 [P_STATE] = { 0, sizeof(struct p_state), receive_state },
4368 [P_STATE_CHG_REQ] = { 0, sizeof(struct p_req_state), receive_req_state },
4369 [P_SYNC_UUID] = { 0, sizeof(struct p_rs_uuid), receive_sync_uuid },
4370 [P_OV_REQUEST] = { 0, sizeof(struct p_block_req), receive_DataRequest },
4371 [P_OV_REPLY] = { 1, sizeof(struct p_block_req), receive_DataRequest },
4372 [P_CSUM_RS_REQUEST] = { 1, sizeof(struct p_block_req), receive_DataRequest },
4373 [P_DELAY_PROBE] = { 0, sizeof(struct p_delay_probe93), receive_skip },
Philipp Reisner73a01a12010-10-27 14:33:00 +02004374 [P_OUT_OF_SYNC] = { 0, sizeof(struct p_block_desc), receive_out_of_sync },
Andreas Gruenbacher4a76b162011-03-25 02:43:51 +01004375 [P_CONN_ST_CHG_REQ] = { 0, sizeof(struct p_req_state), receive_req_conn_state },
Philipp Reisner036b17e2011-05-16 17:38:11 +02004376 [P_PROTOCOL_UPDATE] = { 1, sizeof(struct p_protocol), receive_protocol },
Philipp Reisner02918be2010-08-20 14:35:10 +02004377};
4378
Philipp Reisnereefc2f72011-02-08 12:55:24 +01004379static void drbdd(struct drbd_tconn *tconn)
Philipp Reisnerb411b362009-09-25 16:07:19 -07004380{
Philipp Reisner77351055b2011-02-07 17:24:26 +01004381 struct packet_info pi;
Philipp Reisner02918be2010-08-20 14:35:10 +02004382 size_t shs; /* sub header size */
Andreas Gruenbacher82bc0192011-03-17 12:10:19 +01004383 int err;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004384
Philipp Reisnereefc2f72011-02-08 12:55:24 +01004385 while (get_t_state(&tconn->receiver) == RUNNING) {
Andreas Gruenbacherdeebe192011-03-25 00:01:04 +01004386 struct data_cmd *cmd;
4387
Philipp Reisnereefc2f72011-02-08 12:55:24 +01004388 drbd_thread_current_set_cpu(&tconn->receiver);
Andreas Gruenbacher69bc7bc2011-03-16 17:31:52 +01004389 if (drbd_recv_header(tconn, &pi))
Philipp Reisner02918be2010-08-20 14:35:10 +02004390 goto err_out;
4391
Andreas Gruenbacherdeebe192011-03-25 00:01:04 +01004392 cmd = &drbd_cmd_handler[pi.cmd];
Andreas Gruenbacher4a76b162011-03-25 02:43:51 +01004393 if (unlikely(pi.cmd >= ARRAY_SIZE(drbd_cmd_handler) || !cmd->fn)) {
Andreas Gruenbacher2fcb8f32011-07-03 11:41:08 +02004394 conn_err(tconn, "Unexpected data packet %s (0x%04x)",
4395 cmdname(pi.cmd), pi.cmd);
Philipp Reisner02918be2010-08-20 14:35:10 +02004396 goto err_out;
Lars Ellenberg0b33a912009-11-16 15:58:04 +01004397 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07004398
Andreas Gruenbachere6589832011-03-30 12:54:42 +02004399 shs = cmd->pkt_size;
4400 if (pi.size > shs && !cmd->expect_payload) {
Andreas Gruenbacher2fcb8f32011-07-03 11:41:08 +02004401 conn_err(tconn, "No payload expected %s l:%d\n",
4402 cmdname(pi.cmd), pi.size);
Philipp Reisner02918be2010-08-20 14:35:10 +02004403 goto err_out;
4404 }
4405
Lars Ellenbergc13f7e12010-10-29 23:32:01 +02004406 if (shs) {
Andreas Gruenbachere6589832011-03-30 12:54:42 +02004407 err = drbd_recv_all_warn(tconn, pi.data, shs);
Andreas Gruenbachera5c31902011-03-24 03:28:04 +01004408 if (err)
Lars Ellenbergc13f7e12010-10-29 23:32:01 +02004409 goto err_out;
Andreas Gruenbachere2857212011-03-25 00:57:38 +01004410 pi.size -= shs;
Lars Ellenbergc13f7e12010-10-29 23:32:01 +02004411 }
4412
Andreas Gruenbacher4a76b162011-03-25 02:43:51 +01004413 err = cmd->fn(tconn, &pi);
4414 if (err) {
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02004415 conn_err(tconn, "error receiving %s, e: %d l: %d!\n",
4416 cmdname(pi.cmd), err, pi.size);
Philipp Reisner02918be2010-08-20 14:35:10 +02004417 goto err_out;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004418 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07004419 }
Andreas Gruenbacher82bc0192011-03-17 12:10:19 +01004420 return;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004421
Andreas Gruenbacher82bc0192011-03-17 12:10:19 +01004422 err_out:
4423 conn_request_state(tconn, NS(conn, C_PROTOCOL_ERROR), CS_HARD);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004424}
4425
Philipp Reisner0e29d162011-02-18 14:23:11 +01004426void conn_flush_workqueue(struct drbd_tconn *tconn)
Philipp Reisnerb411b362009-09-25 16:07:19 -07004427{
4428 struct drbd_wq_barrier barr;
4429
4430 barr.w.cb = w_prev_work_done;
Philipp Reisner0e29d162011-02-18 14:23:11 +01004431 barr.w.tconn = tconn;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004432 init_completion(&barr.done);
Lars Ellenbergd5b27b02011-11-14 15:42:37 +01004433 drbd_queue_work(&tconn->sender_work, &barr.w);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004434 wait_for_completion(&barr.done);
4435}
4436
Philipp Reisner81fa2e62011-05-04 15:10:30 +02004437static void conn_disconnect(struct drbd_tconn *tconn)
Philipp Reisnerf70b35112010-06-24 14:34:40 +02004438{
Philipp Reisnerc141ebd2011-05-05 16:13:10 +02004439 struct drbd_conf *mdev;
Philipp Reisnerbbeb6412011-02-10 13:45:46 +01004440 enum drbd_conns oc;
Philipp Reisner376694a2011-11-07 10:54:28 +01004441 int vnr;
Philipp Reisnerf70b35112010-06-24 14:34:40 +02004442
Philipp Reisnerbbeb6412011-02-10 13:45:46 +01004443 if (tconn->cstate == C_STANDALONE)
Philipp Reisnerb411b362009-09-25 16:07:19 -07004444 return;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004445
Lars Ellenberg545752d2011-12-05 14:39:25 +01004446 /* We are about to start the cleanup after connection loss.
4447 * Make sure drbd_make_request knows about that.
4448 * Usually we should be in some network failure state already,
4449 * but just in case we are not, we fix it up here.
4450 */
Philipp Reisnerb8853db2011-12-13 11:09:16 +01004451 conn_request_state(tconn, NS(conn, C_NETWORK_FAILURE), CS_HARD);
Lars Ellenberg545752d2011-12-05 14:39:25 +01004452
Philipp Reisnerb411b362009-09-25 16:07:19 -07004453 /* asender does not clean up anything. it must not interfere, either */
Philipp Reisner360cc742011-02-08 14:29:53 +01004454 drbd_thread_stop(&tconn->asender);
4455 drbd_free_sock(tconn);
4456
Philipp Reisnerc141ebd2011-05-05 16:13:10 +02004457 rcu_read_lock();
4458 idr_for_each_entry(&tconn->volumes, mdev, vnr) {
4459 kref_get(&mdev->kref);
4460 rcu_read_unlock();
4461 drbd_disconnected(mdev);
4462 kref_put(&mdev->kref, &drbd_minor_destroy);
4463 rcu_read_lock();
4464 }
4465 rcu_read_unlock();
4466
Philipp Reisner12038a32011-11-09 19:18:00 +01004467 if (!list_empty(&tconn->current_epoch->list))
4468 conn_err(tconn, "ASSERTION FAILED: tconn->current_epoch->list not empty\n");
4469 /* ok, no more ee's on the fly, it is safe to reset the epoch_size */
4470 atomic_set(&tconn->current_epoch->epoch_size, 0);
Lars Ellenbergb6dd1a82011-11-28 15:04:49 +01004471 tconn->send.seen_any_write_yet = false;
Philipp Reisner12038a32011-11-09 19:18:00 +01004472
Philipp Reisner360cc742011-02-08 14:29:53 +01004473 conn_info(tconn, "Connection closed\n");
4474
Philipp Reisnercb703452011-03-24 11:03:07 +01004475 if (conn_highest_role(tconn) == R_PRIMARY && conn_highest_pdsk(tconn) >= D_UNKNOWN)
4476 conn_try_outdate_peer_async(tconn);
4477
Philipp Reisner360cc742011-02-08 14:29:53 +01004478 spin_lock_irq(&tconn->req_lock);
Philipp Reisnerbbeb6412011-02-10 13:45:46 +01004479 oc = tconn->cstate;
4480 if (oc >= C_UNCONNECTED)
Philipp Reisner376694a2011-11-07 10:54:28 +01004481 _conn_request_state(tconn, NS(conn, C_UNCONNECTED), CS_VERBOSE);
Philipp Reisnerbbeb6412011-02-10 13:45:46 +01004482
Philipp Reisner360cc742011-02-08 14:29:53 +01004483 spin_unlock_irq(&tconn->req_lock);
4484
Lars Ellenbergf3dfa402011-05-02 10:45:05 +02004485 if (oc == C_DISCONNECTING)
Lars Ellenbergd9cc6e22011-04-27 10:25:28 +02004486 conn_request_state(tconn, NS(conn, C_STANDALONE), CS_VERBOSE | CS_HARD);
Philipp Reisner360cc742011-02-08 14:29:53 +01004487}
4488
Philipp Reisnerc141ebd2011-05-05 16:13:10 +02004489static int drbd_disconnected(struct drbd_conf *mdev)
Philipp Reisner360cc742011-02-08 14:29:53 +01004490{
Philipp Reisner360cc742011-02-08 14:29:53 +01004491 unsigned int i;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004492
Philipp Reisner85719572010-07-21 10:20:17 +02004493 /* wait for current activity to cease. */
Philipp Reisner87eeee42011-01-19 14:16:30 +01004494 spin_lock_irq(&mdev->tconn->req_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004495 _drbd_wait_ee_list_empty(mdev, &mdev->active_ee);
4496 _drbd_wait_ee_list_empty(mdev, &mdev->sync_ee);
4497 _drbd_wait_ee_list_empty(mdev, &mdev->read_ee);
Philipp Reisner87eeee42011-01-19 14:16:30 +01004498 spin_unlock_irq(&mdev->tconn->req_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004499
4500 /* We do not have data structures that would allow us to
4501 * get the rs_pending_cnt down to 0 again.
4502 * * On C_SYNC_TARGET we do not have any data structures describing
4503 * the pending RSDataRequest's we have sent.
4504 * * On C_SYNC_SOURCE there is no data structure that tracks
4505 * the P_RS_DATA_REPLY blocks that we sent to the SyncTarget.
4506 * And no, it is not the sum of the reference counts in the
4507 * resync_LRU. The resync_LRU tracks the whole operation including
4508 * the disk-IO, while the rs_pending_cnt only tracks the blocks
4509 * on the fly. */
4510 drbd_rs_cancel_all(mdev);
4511 mdev->rs_total = 0;
4512 mdev->rs_failed = 0;
4513 atomic_set(&mdev->rs_pending_cnt, 0);
4514 wake_up(&mdev->misc_wait);
4515
Philipp Reisnerb411b362009-09-25 16:07:19 -07004516 del_timer_sync(&mdev->resync_timer);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004517 resync_timer_fn((unsigned long)mdev);
4518
Philipp Reisnerb411b362009-09-25 16:07:19 -07004519 /* wait for all w_e_end_data_req, w_e_end_rsdata_req, w_send_barrier,
4520 * w_make_resync_request etc. which may still be on the worker queue
4521 * to be "canceled" */
4522 drbd_flush_workqueue(mdev);
4523
Andreas Gruenbachera990be42011-04-06 17:56:48 +02004524 drbd_finish_peer_reqs(mdev);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004525
Philipp Reisnerd10b4ea2011-11-30 23:25:36 +01004526 /* This second workqueue flush is necessary, since drbd_finish_peer_reqs()
4527 might have issued a work again. The one before drbd_finish_peer_reqs() is
4528 necessary to reclain net_ee in drbd_finish_peer_reqs(). */
4529 drbd_flush_workqueue(mdev);
4530
Lars Ellenberg08332d72012-08-17 15:09:13 +02004531 /* need to do it again, drbd_finish_peer_reqs() may have populated it
4532 * again via drbd_try_clear_on_disk_bm(). */
4533 drbd_rs_cancel_all(mdev);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004534
4535 kfree(mdev->p_uuid);
4536 mdev->p_uuid = NULL;
4537
Philipp Reisner2aebfab2011-03-28 16:48:11 +02004538 if (!drbd_suspended(mdev))
Philipp Reisner2f5cdd02011-02-21 14:29:27 +01004539 tl_clear(mdev->tconn);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004540
4541 drbd_md_sync(mdev);
4542
Lars Ellenberg20ceb2b2011-01-21 10:56:44 +01004543 /* serialize with bitmap writeout triggered by the state change,
4544 * if any. */
4545 wait_event(mdev->misc_wait, !test_bit(BITMAP_IO, &mdev->flags));
4546
Philipp Reisnerb411b362009-09-25 16:07:19 -07004547 /* tcp_close and release of sendpage pages can be deferred. I don't
4548 * want to use SO_LINGER, because apparently it can be deferred for
4549 * more than 20 seconds (longest time I checked).
4550 *
4551 * Actually we don't care for exactly when the network stack does its
4552 * put_page(), but release our reference on these pages right here.
4553 */
Andreas Gruenbacher7721f562011-04-06 17:14:02 +02004554 i = drbd_free_peer_reqs(mdev, &mdev->net_ee);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004555 if (i)
4556 dev_info(DEV, "net_ee not empty, killed %u entries\n", i);
Lars Ellenberg435f0742010-09-06 12:30:25 +02004557 i = atomic_read(&mdev->pp_in_use_by_net);
4558 if (i)
4559 dev_info(DEV, "pp_in_use_by_net = %d, expected 0\n", i);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004560 i = atomic_read(&mdev->pp_in_use);
4561 if (i)
Lars Ellenberg45bb9122010-05-14 17:10:48 +02004562 dev_info(DEV, "pp_in_use = %d, expected 0\n", i);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004563
4564 D_ASSERT(list_empty(&mdev->read_ee));
4565 D_ASSERT(list_empty(&mdev->active_ee));
4566 D_ASSERT(list_empty(&mdev->sync_ee));
4567 D_ASSERT(list_empty(&mdev->done_ee));
4568
Philipp Reisner360cc742011-02-08 14:29:53 +01004569 return 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004570}
4571
4572/*
4573 * We support PRO_VERSION_MIN to PRO_VERSION_MAX. The protocol version
4574 * we can agree on is stored in agreed_pro_version.
4575 *
4576 * feature flags and the reserved array should be enough room for future
4577 * enhancements of the handshake protocol, and possible plugins...
4578 *
4579 * for now, they are expected to be zero, but ignored.
4580 */
Andreas Gruenbacher60381782011-03-28 17:05:50 +02004581static int drbd_send_features(struct drbd_tconn *tconn)
Philipp Reisnerb411b362009-09-25 16:07:19 -07004582{
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02004583 struct drbd_socket *sock;
4584 struct p_connection_features *p;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004585
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02004586 sock = &tconn->data;
4587 p = conn_prepare_command(tconn, sock);
4588 if (!p)
Andreas Gruenbachere8d17b02011-03-16 00:54:19 +01004589 return -EIO;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004590 memset(p, 0, sizeof(*p));
4591 p->protocol_min = cpu_to_be32(PRO_VERSION_MIN);
4592 p->protocol_max = cpu_to_be32(PRO_VERSION_MAX);
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02004593 return conn_send_command(tconn, sock, P_CONNECTION_FEATURES, sizeof(*p), NULL, 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004594}
4595
4596/*
4597 * return values:
4598 * 1 yes, we have a valid connection
4599 * 0 oops, did not work out, please try again
4600 * -1 peer talks different language,
4601 * no point in trying again, please go standalone.
4602 */
Andreas Gruenbacher60381782011-03-28 17:05:50 +02004603static int drbd_do_features(struct drbd_tconn *tconn)
Philipp Reisnerb411b362009-09-25 16:07:19 -07004604{
Philipp Reisner65d11ed2011-02-07 17:35:59 +01004605 /* ASSERT current == tconn->receiver ... */
Andreas Gruenbachere6589832011-03-30 12:54:42 +02004606 struct p_connection_features *p;
4607 const int expect = sizeof(struct p_connection_features);
Philipp Reisner77351055b2011-02-07 17:24:26 +01004608 struct packet_info pi;
Andreas Gruenbachera5c31902011-03-24 03:28:04 +01004609 int err;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004610
Andreas Gruenbacher60381782011-03-28 17:05:50 +02004611 err = drbd_send_features(tconn);
Andreas Gruenbachere8d17b02011-03-16 00:54:19 +01004612 if (err)
Philipp Reisnerb411b362009-09-25 16:07:19 -07004613 return 0;
4614
Andreas Gruenbacher69bc7bc2011-03-16 17:31:52 +01004615 err = drbd_recv_header(tconn, &pi);
4616 if (err)
Philipp Reisnerb411b362009-09-25 16:07:19 -07004617 return 0;
4618
Andreas Gruenbacher60381782011-03-28 17:05:50 +02004619 if (pi.cmd != P_CONNECTION_FEATURES) {
4620 conn_err(tconn, "expected ConnectionFeatures packet, received: %s (0x%04x)\n",
Andreas Gruenbacher2fcb8f32011-07-03 11:41:08 +02004621 cmdname(pi.cmd), pi.cmd);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004622 return -1;
4623 }
4624
Philipp Reisner77351055b2011-02-07 17:24:26 +01004625 if (pi.size != expect) {
Andreas Gruenbacher60381782011-03-28 17:05:50 +02004626 conn_err(tconn, "expected ConnectionFeatures length: %u, received: %u\n",
Philipp Reisner77351055b2011-02-07 17:24:26 +01004627 expect, pi.size);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004628 return -1;
4629 }
4630
Andreas Gruenbachere6589832011-03-30 12:54:42 +02004631 p = pi.data;
4632 err = drbd_recv_all_warn(tconn, p, expect);
Andreas Gruenbachera5c31902011-03-24 03:28:04 +01004633 if (err)
Philipp Reisnerb411b362009-09-25 16:07:19 -07004634 return 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004635
Philipp Reisnerb411b362009-09-25 16:07:19 -07004636 p->protocol_min = be32_to_cpu(p->protocol_min);
4637 p->protocol_max = be32_to_cpu(p->protocol_max);
4638 if (p->protocol_max == 0)
4639 p->protocol_max = p->protocol_min;
4640
4641 if (PRO_VERSION_MAX < p->protocol_min ||
4642 PRO_VERSION_MIN > p->protocol_max)
4643 goto incompat;
4644
Philipp Reisner65d11ed2011-02-07 17:35:59 +01004645 tconn->agreed_pro_version = min_t(int, PRO_VERSION_MAX, p->protocol_max);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004646
Philipp Reisner65d11ed2011-02-07 17:35:59 +01004647 conn_info(tconn, "Handshake successful: "
4648 "Agreed network protocol version %d\n", tconn->agreed_pro_version);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004649
4650 return 1;
4651
4652 incompat:
Philipp Reisner65d11ed2011-02-07 17:35:59 +01004653 conn_err(tconn, "incompatible DRBD dialects: "
Philipp Reisnerb411b362009-09-25 16:07:19 -07004654 "I support %d-%d, peer supports %d-%d\n",
4655 PRO_VERSION_MIN, PRO_VERSION_MAX,
4656 p->protocol_min, p->protocol_max);
4657 return -1;
4658}
4659
4660#if !defined(CONFIG_CRYPTO_HMAC) && !defined(CONFIG_CRYPTO_HMAC_MODULE)
Philipp Reisner13e60372011-02-08 09:54:40 +01004661static int drbd_do_auth(struct drbd_tconn *tconn)
Philipp Reisnerb411b362009-09-25 16:07:19 -07004662{
Philipp Reisneref57f9e2013-03-27 14:08:44 +01004663 conn_err(tconn, "This kernel was build without CONFIG_CRYPTO_HMAC.\n");
4664 conn_err(tconn, "You need to disable 'cram-hmac-alg' in drbd.conf.\n");
Johannes Thomab10d96c2010-01-07 16:02:50 +01004665 return -1;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004666}
4667#else
4668#define CHALLENGE_LEN 64
Johannes Thomab10d96c2010-01-07 16:02:50 +01004669
4670/* Return value:
4671 1 - auth succeeded,
4672 0 - failed, try again (network error),
4673 -1 - auth failed, don't try again.
4674*/
4675
Philipp Reisner13e60372011-02-08 09:54:40 +01004676static int drbd_do_auth(struct drbd_tconn *tconn)
Philipp Reisnerb411b362009-09-25 16:07:19 -07004677{
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02004678 struct drbd_socket *sock;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004679 char my_challenge[CHALLENGE_LEN]; /* 64 Bytes... */
4680 struct scatterlist sg;
4681 char *response = NULL;
4682 char *right_response = NULL;
4683 char *peers_ch = NULL;
Philipp Reisner44ed1672011-04-19 17:10:19 +02004684 unsigned int key_len;
4685 char secret[SHARED_SECRET_MAX]; /* 64 byte */
Philipp Reisnerb411b362009-09-25 16:07:19 -07004686 unsigned int resp_size;
4687 struct hash_desc desc;
Philipp Reisner77351055b2011-02-07 17:24:26 +01004688 struct packet_info pi;
Philipp Reisner44ed1672011-04-19 17:10:19 +02004689 struct net_conf *nc;
Andreas Gruenbacher69bc7bc2011-03-16 17:31:52 +01004690 int err, rv;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004691
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02004692 /* FIXME: Put the challenge/response into the preallocated socket buffer. */
4693
Philipp Reisner44ed1672011-04-19 17:10:19 +02004694 rcu_read_lock();
4695 nc = rcu_dereference(tconn->net_conf);
4696 key_len = strlen(nc->shared_secret);
4697 memcpy(secret, nc->shared_secret, key_len);
4698 rcu_read_unlock();
4699
Philipp Reisner13e60372011-02-08 09:54:40 +01004700 desc.tfm = tconn->cram_hmac_tfm;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004701 desc.flags = 0;
4702
Philipp Reisner44ed1672011-04-19 17:10:19 +02004703 rv = crypto_hash_setkey(tconn->cram_hmac_tfm, (u8 *)secret, key_len);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004704 if (rv) {
Philipp Reisner13e60372011-02-08 09:54:40 +01004705 conn_err(tconn, "crypto_hash_setkey() failed with %d\n", rv);
Johannes Thomab10d96c2010-01-07 16:02:50 +01004706 rv = -1;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004707 goto fail;
4708 }
4709
4710 get_random_bytes(my_challenge, CHALLENGE_LEN);
4711
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02004712 sock = &tconn->data;
4713 if (!conn_prepare_command(tconn, sock)) {
4714 rv = 0;
4715 goto fail;
4716 }
Andreas Gruenbachere6589832011-03-30 12:54:42 +02004717 rv = !conn_send_command(tconn, sock, P_AUTH_CHALLENGE, 0,
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02004718 my_challenge, CHALLENGE_LEN);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004719 if (!rv)
4720 goto fail;
4721
Andreas Gruenbacher69bc7bc2011-03-16 17:31:52 +01004722 err = drbd_recv_header(tconn, &pi);
4723 if (err) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07004724 rv = 0;
4725 goto fail;
4726 }
4727
Philipp Reisner77351055b2011-02-07 17:24:26 +01004728 if (pi.cmd != P_AUTH_CHALLENGE) {
Philipp Reisner13e60372011-02-08 09:54:40 +01004729 conn_err(tconn, "expected AuthChallenge packet, received: %s (0x%04x)\n",
Andreas Gruenbacher2fcb8f32011-07-03 11:41:08 +02004730 cmdname(pi.cmd), pi.cmd);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004731 rv = 0;
4732 goto fail;
4733 }
4734
Philipp Reisner77351055b2011-02-07 17:24:26 +01004735 if (pi.size > CHALLENGE_LEN * 2) {
Philipp Reisner13e60372011-02-08 09:54:40 +01004736 conn_err(tconn, "expected AuthChallenge payload too big.\n");
Johannes Thomab10d96c2010-01-07 16:02:50 +01004737 rv = -1;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004738 goto fail;
4739 }
4740
Philipp Reisner77351055b2011-02-07 17:24:26 +01004741 peers_ch = kmalloc(pi.size, GFP_NOIO);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004742 if (peers_ch == NULL) {
Philipp Reisner13e60372011-02-08 09:54:40 +01004743 conn_err(tconn, "kmalloc of peers_ch failed\n");
Johannes Thomab10d96c2010-01-07 16:02:50 +01004744 rv = -1;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004745 goto fail;
4746 }
4747
Andreas Gruenbachera5c31902011-03-24 03:28:04 +01004748 err = drbd_recv_all_warn(tconn, peers_ch, pi.size);
4749 if (err) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07004750 rv = 0;
4751 goto fail;
4752 }
4753
Philipp Reisner13e60372011-02-08 09:54:40 +01004754 resp_size = crypto_hash_digestsize(tconn->cram_hmac_tfm);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004755 response = kmalloc(resp_size, GFP_NOIO);
4756 if (response == NULL) {
Philipp Reisner13e60372011-02-08 09:54:40 +01004757 conn_err(tconn, "kmalloc of response failed\n");
Johannes Thomab10d96c2010-01-07 16:02:50 +01004758 rv = -1;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004759 goto fail;
4760 }
4761
4762 sg_init_table(&sg, 1);
Philipp Reisner77351055b2011-02-07 17:24:26 +01004763 sg_set_buf(&sg, peers_ch, pi.size);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004764
4765 rv = crypto_hash_digest(&desc, &sg, sg.length, response);
4766 if (rv) {
Philipp Reisner13e60372011-02-08 09:54:40 +01004767 conn_err(tconn, "crypto_hash_digest() failed with %d\n", rv);
Johannes Thomab10d96c2010-01-07 16:02:50 +01004768 rv = -1;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004769 goto fail;
4770 }
4771
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02004772 if (!conn_prepare_command(tconn, sock)) {
4773 rv = 0;
4774 goto fail;
4775 }
Andreas Gruenbachere6589832011-03-30 12:54:42 +02004776 rv = !conn_send_command(tconn, sock, P_AUTH_RESPONSE, 0,
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02004777 response, resp_size);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004778 if (!rv)
4779 goto fail;
4780
Andreas Gruenbacher69bc7bc2011-03-16 17:31:52 +01004781 err = drbd_recv_header(tconn, &pi);
4782 if (err) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07004783 rv = 0;
4784 goto fail;
4785 }
4786
Philipp Reisner77351055b2011-02-07 17:24:26 +01004787 if (pi.cmd != P_AUTH_RESPONSE) {
Philipp Reisner13e60372011-02-08 09:54:40 +01004788 conn_err(tconn, "expected AuthResponse packet, received: %s (0x%04x)\n",
Andreas Gruenbacher2fcb8f32011-07-03 11:41:08 +02004789 cmdname(pi.cmd), pi.cmd);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004790 rv = 0;
4791 goto fail;
4792 }
4793
Philipp Reisner77351055b2011-02-07 17:24:26 +01004794 if (pi.size != resp_size) {
Philipp Reisner13e60372011-02-08 09:54:40 +01004795 conn_err(tconn, "expected AuthResponse payload of wrong size\n");
Philipp Reisnerb411b362009-09-25 16:07:19 -07004796 rv = 0;
4797 goto fail;
4798 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07004799
Andreas Gruenbachera5c31902011-03-24 03:28:04 +01004800 err = drbd_recv_all_warn(tconn, response , resp_size);
4801 if (err) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07004802 rv = 0;
4803 goto fail;
4804 }
4805
4806 right_response = kmalloc(resp_size, GFP_NOIO);
Julia Lawall2d1ee872009-12-27 22:27:11 +01004807 if (right_response == NULL) {
Philipp Reisner13e60372011-02-08 09:54:40 +01004808 conn_err(tconn, "kmalloc of right_response failed\n");
Johannes Thomab10d96c2010-01-07 16:02:50 +01004809 rv = -1;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004810 goto fail;
4811 }
4812
4813 sg_set_buf(&sg, my_challenge, CHALLENGE_LEN);
4814
4815 rv = crypto_hash_digest(&desc, &sg, sg.length, right_response);
4816 if (rv) {
Philipp Reisner13e60372011-02-08 09:54:40 +01004817 conn_err(tconn, "crypto_hash_digest() failed with %d\n", rv);
Johannes Thomab10d96c2010-01-07 16:02:50 +01004818 rv = -1;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004819 goto fail;
4820 }
4821
4822 rv = !memcmp(response, right_response, resp_size);
4823
4824 if (rv)
Philipp Reisner44ed1672011-04-19 17:10:19 +02004825 conn_info(tconn, "Peer authenticated using %d bytes HMAC\n",
4826 resp_size);
Johannes Thomab10d96c2010-01-07 16:02:50 +01004827 else
4828 rv = -1;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004829
4830 fail:
4831 kfree(peers_ch);
4832 kfree(response);
4833 kfree(right_response);
4834
4835 return rv;
4836}
4837#endif
4838
4839int drbdd_init(struct drbd_thread *thi)
4840{
Philipp Reisner392c8802011-02-09 10:33:31 +01004841 struct drbd_tconn *tconn = thi->tconn;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004842 int h;
4843
Philipp Reisner4d641dd2011-02-08 15:40:24 +01004844 conn_info(tconn, "receiver (re)started\n");
Philipp Reisnerb411b362009-09-25 16:07:19 -07004845
4846 do {
Philipp Reisner81fa2e62011-05-04 15:10:30 +02004847 h = conn_connect(tconn);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004848 if (h == 0) {
Philipp Reisner81fa2e62011-05-04 15:10:30 +02004849 conn_disconnect(tconn);
Philipp Reisner20ee6392011-01-18 15:28:59 +01004850 schedule_timeout_interruptible(HZ);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004851 }
4852 if (h == -1) {
Philipp Reisner4d641dd2011-02-08 15:40:24 +01004853 conn_warn(tconn, "Discarding network configuration.\n");
Philipp Reisnerbbeb6412011-02-10 13:45:46 +01004854 conn_request_state(tconn, NS(conn, C_DISCONNECTING), CS_HARD);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004855 }
4856 } while (h == 0);
4857
Philipp Reisner91fd4da2011-04-20 17:47:29 +02004858 if (h > 0)
4859 drbdd(tconn);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004860
Philipp Reisner81fa2e62011-05-04 15:10:30 +02004861 conn_disconnect(tconn);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004862
Philipp Reisner4d641dd2011-02-08 15:40:24 +01004863 conn_info(tconn, "receiver terminated\n");
Philipp Reisnerb411b362009-09-25 16:07:19 -07004864 return 0;
4865}
4866
4867/* ********* acknowledge sender ******** */
4868
Andreas Gruenbachere05e1e52011-03-25 15:16:26 +01004869static int got_conn_RqSReply(struct drbd_tconn *tconn, struct packet_info *pi)
Philipp Reisnerb411b362009-09-25 16:07:19 -07004870{
Andreas Gruenbachere6589832011-03-30 12:54:42 +02004871 struct p_req_state_reply *p = pi->data;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004872 int retcode = be32_to_cpu(p->retcode);
4873
4874 if (retcode >= SS_SUCCESS) {
Philipp Reisnere4f78ed2011-03-16 11:27:48 +01004875 set_bit(CONN_WD_ST_CHG_OKAY, &tconn->flags);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004876 } else {
Philipp Reisnere4f78ed2011-03-16 11:27:48 +01004877 set_bit(CONN_WD_ST_CHG_FAIL, &tconn->flags);
4878 conn_err(tconn, "Requested state change failed by peer: %s (%d)\n",
4879 drbd_set_st_err_str(retcode), retcode);
4880 }
4881 wake_up(&tconn->ping_wait);
4882
Andreas Gruenbacher2735a592011-04-04 15:30:24 +02004883 return 0;
Philipp Reisnere4f78ed2011-03-16 11:27:48 +01004884}
4885
Andreas Gruenbacher1952e912011-03-25 15:37:43 +01004886static int got_RqSReply(struct drbd_tconn *tconn, struct packet_info *pi)
Philipp Reisnerb411b362009-09-25 16:07:19 -07004887{
Andreas Gruenbacher1952e912011-03-25 15:37:43 +01004888 struct drbd_conf *mdev;
Andreas Gruenbachere6589832011-03-30 12:54:42 +02004889 struct p_req_state_reply *p = pi->data;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004890 int retcode = be32_to_cpu(p->retcode);
4891
Andreas Gruenbacher1952e912011-03-25 15:37:43 +01004892 mdev = vnr_to_mdev(tconn, pi->vnr);
4893 if (!mdev)
Andreas Gruenbacher2735a592011-04-04 15:30:24 +02004894 return -EIO;
Andreas Gruenbacher1952e912011-03-25 15:37:43 +01004895
Philipp Reisner4d0fc3f2012-01-20 13:52:27 +01004896 if (test_bit(CONN_WD_ST_CHG_REQ, &tconn->flags)) {
4897 D_ASSERT(tconn->agreed_pro_version < 100);
4898 return got_conn_RqSReply(tconn, pi);
4899 }
4900
Philipp Reisnere4f78ed2011-03-16 11:27:48 +01004901 if (retcode >= SS_SUCCESS) {
4902 set_bit(CL_ST_CHG_SUCCESS, &mdev->flags);
4903 } else {
4904 set_bit(CL_ST_CHG_FAIL, &mdev->flags);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004905 dev_err(DEV, "Requested state change failed by peer: %s (%d)\n",
Philipp Reisnere4f78ed2011-03-16 11:27:48 +01004906 drbd_set_st_err_str(retcode), retcode);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004907 }
4908 wake_up(&mdev->state_wait);
4909
Andreas Gruenbacher2735a592011-04-04 15:30:24 +02004910 return 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004911}
4912
Andreas Gruenbachere05e1e52011-03-25 15:16:26 +01004913static int got_Ping(struct drbd_tconn *tconn, struct packet_info *pi)
Philipp Reisnerb411b362009-09-25 16:07:19 -07004914{
Andreas Gruenbacher2735a592011-04-04 15:30:24 +02004915 return drbd_send_ping_ack(tconn);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004916
4917}
4918
Andreas Gruenbachere05e1e52011-03-25 15:16:26 +01004919static int got_PingAck(struct drbd_tconn *tconn, struct packet_info *pi)
Philipp Reisnerb411b362009-09-25 16:07:19 -07004920{
4921 /* restore idle timeout */
Philipp Reisner2a67d8b2011-02-09 14:10:32 +01004922 tconn->meta.socket->sk->sk_rcvtimeo = tconn->net_conf->ping_int*HZ;
4923 if (!test_and_set_bit(GOT_PING_ACK, &tconn->flags))
4924 wake_up(&tconn->ping_wait);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004925
Andreas Gruenbacher2735a592011-04-04 15:30:24 +02004926 return 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004927}
4928
Andreas Gruenbacher1952e912011-03-25 15:37:43 +01004929static int got_IsInSync(struct drbd_tconn *tconn, struct packet_info *pi)
Philipp Reisnerb411b362009-09-25 16:07:19 -07004930{
Andreas Gruenbacher1952e912011-03-25 15:37:43 +01004931 struct drbd_conf *mdev;
Andreas Gruenbachere6589832011-03-30 12:54:42 +02004932 struct p_block_ack *p = pi->data;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004933 sector_t sector = be64_to_cpu(p->sector);
4934 int blksize = be32_to_cpu(p->blksize);
4935
Andreas Gruenbacher1952e912011-03-25 15:37:43 +01004936 mdev = vnr_to_mdev(tconn, pi->vnr);
4937 if (!mdev)
Andreas Gruenbacher2735a592011-04-04 15:30:24 +02004938 return -EIO;
Andreas Gruenbacher1952e912011-03-25 15:37:43 +01004939
Philipp Reisner31890f42011-01-19 14:12:51 +01004940 D_ASSERT(mdev->tconn->agreed_pro_version >= 89);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004941
4942 update_peer_seq(mdev, be32_to_cpu(p->seq_num));
4943
Lars Ellenberg1d53f092010-09-05 01:13:24 +02004944 if (get_ldev(mdev)) {
4945 drbd_rs_complete_io(mdev, sector);
4946 drbd_set_in_sync(mdev, sector, blksize);
4947 /* rs_same_csums is supposed to count in units of BM_BLOCK_SIZE */
4948 mdev->rs_same_csum += (blksize >> BM_BLOCK_SHIFT);
4949 put_ldev(mdev);
4950 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07004951 dec_rs_pending(mdev);
Philipp Reisner778f2712010-07-06 11:14:00 +02004952 atomic_add(blksize >> 9, &mdev->rs_sect_in);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004953
Andreas Gruenbacher2735a592011-04-04 15:30:24 +02004954 return 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004955}
4956
Andreas Gruenbacherbc9c5c42011-01-21 18:00:55 +01004957static int
4958validate_req_change_req_state(struct drbd_conf *mdev, u64 id, sector_t sector,
4959 struct rb_root *root, const char *func,
4960 enum drbd_req_event what, bool missing_ok)
Philipp Reisnerb411b362009-09-25 16:07:19 -07004961{
4962 struct drbd_request *req;
4963 struct bio_and_error m;
4964
Philipp Reisner87eeee42011-01-19 14:16:30 +01004965 spin_lock_irq(&mdev->tconn->req_lock);
Andreas Gruenbacherbc9c5c42011-01-21 18:00:55 +01004966 req = find_request(mdev, root, id, sector, missing_ok, func);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004967 if (unlikely(!req)) {
Philipp Reisner87eeee42011-01-19 14:16:30 +01004968 spin_unlock_irq(&mdev->tconn->req_lock);
Andreas Gruenbacher85997672011-04-04 13:09:15 +02004969 return -EIO;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004970 }
4971 __req_mod(req, what, &m);
Philipp Reisner87eeee42011-01-19 14:16:30 +01004972 spin_unlock_irq(&mdev->tconn->req_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004973
4974 if (m.bio)
4975 complete_master_bio(mdev, &m);
Andreas Gruenbacher85997672011-04-04 13:09:15 +02004976 return 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004977}
4978
Andreas Gruenbacher1952e912011-03-25 15:37:43 +01004979static int got_BlockAck(struct drbd_tconn *tconn, struct packet_info *pi)
Philipp Reisnerb411b362009-09-25 16:07:19 -07004980{
Andreas Gruenbacher1952e912011-03-25 15:37:43 +01004981 struct drbd_conf *mdev;
Andreas Gruenbachere6589832011-03-30 12:54:42 +02004982 struct p_block_ack *p = pi->data;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004983 sector_t sector = be64_to_cpu(p->sector);
4984 int blksize = be32_to_cpu(p->blksize);
4985 enum drbd_req_event what;
4986
Andreas Gruenbacher1952e912011-03-25 15:37:43 +01004987 mdev = vnr_to_mdev(tconn, pi->vnr);
4988 if (!mdev)
Andreas Gruenbacher2735a592011-04-04 15:30:24 +02004989 return -EIO;
Andreas Gruenbacher1952e912011-03-25 15:37:43 +01004990
Philipp Reisnerb411b362009-09-25 16:07:19 -07004991 update_peer_seq(mdev, be32_to_cpu(p->seq_num));
4992
Andreas Gruenbacher579b57e2011-01-13 18:40:57 +01004993 if (p->block_id == ID_SYNCER) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07004994 drbd_set_in_sync(mdev, sector, blksize);
4995 dec_rs_pending(mdev);
Andreas Gruenbacher2735a592011-04-04 15:30:24 +02004996 return 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004997 }
Andreas Gruenbachere05e1e52011-03-25 15:16:26 +01004998 switch (pi->cmd) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07004999 case P_RS_WRITE_ACK:
Andreas Gruenbacher8554df12011-01-25 15:37:43 +01005000 what = WRITE_ACKED_BY_PEER_AND_SIS;
Philipp Reisnerb411b362009-09-25 16:07:19 -07005001 break;
5002 case P_WRITE_ACK:
Andreas Gruenbacher8554df12011-01-25 15:37:43 +01005003 what = WRITE_ACKED_BY_PEER;
Philipp Reisnerb411b362009-09-25 16:07:19 -07005004 break;
5005 case P_RECV_ACK:
Andreas Gruenbacher8554df12011-01-25 15:37:43 +01005006 what = RECV_ACKED_BY_PEER;
Philipp Reisnerb411b362009-09-25 16:07:19 -07005007 break;
Lars Ellenbergd4dabbe2012-08-01 12:33:51 +02005008 case P_SUPERSEDED:
5009 what = CONFLICT_RESOLVED;
Andreas Gruenbacher7be8da02011-02-22 02:15:32 +01005010 break;
5011 case P_RETRY_WRITE:
Andreas Gruenbacher7be8da02011-02-22 02:15:32 +01005012 what = POSTPONE_WRITE;
Philipp Reisnerb411b362009-09-25 16:07:19 -07005013 break;
5014 default:
Andreas Gruenbacher2735a592011-04-04 15:30:24 +02005015 BUG();
Philipp Reisnerb411b362009-09-25 16:07:19 -07005016 }
5017
5018 return validate_req_change_req_state(mdev, p->block_id, sector,
Andreas Gruenbacher2735a592011-04-04 15:30:24 +02005019 &mdev->write_requests, __func__,
5020 what, false);
Philipp Reisnerb411b362009-09-25 16:07:19 -07005021}
5022
Andreas Gruenbacher1952e912011-03-25 15:37:43 +01005023static int got_NegAck(struct drbd_tconn *tconn, struct packet_info *pi)
Philipp Reisnerb411b362009-09-25 16:07:19 -07005024{
Andreas Gruenbacher1952e912011-03-25 15:37:43 +01005025 struct drbd_conf *mdev;
Andreas Gruenbachere6589832011-03-30 12:54:42 +02005026 struct p_block_ack *p = pi->data;
Philipp Reisnerb411b362009-09-25 16:07:19 -07005027 sector_t sector = be64_to_cpu(p->sector);
Philipp Reisner2deb8332011-01-17 18:39:18 +01005028 int size = be32_to_cpu(p->blksize);
Andreas Gruenbacher85997672011-04-04 13:09:15 +02005029 int err;
Philipp Reisnerb411b362009-09-25 16:07:19 -07005030
Andreas Gruenbacher1952e912011-03-25 15:37:43 +01005031 mdev = vnr_to_mdev(tconn, pi->vnr);
5032 if (!mdev)
Andreas Gruenbacher2735a592011-04-04 15:30:24 +02005033 return -EIO;
Philipp Reisnerb411b362009-09-25 16:07:19 -07005034
5035 update_peer_seq(mdev, be32_to_cpu(p->seq_num));
5036
Andreas Gruenbacher579b57e2011-01-13 18:40:57 +01005037 if (p->block_id == ID_SYNCER) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07005038 dec_rs_pending(mdev);
5039 drbd_rs_failed_io(mdev, sector, size);
Andreas Gruenbacher2735a592011-04-04 15:30:24 +02005040 return 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07005041 }
Philipp Reisner2deb8332011-01-17 18:39:18 +01005042
Andreas Gruenbacher85997672011-04-04 13:09:15 +02005043 err = validate_req_change_req_state(mdev, p->block_id, sector,
5044 &mdev->write_requests, __func__,
Philipp Reisner303d1442011-04-13 16:24:47 -07005045 NEG_ACKED, true);
Andreas Gruenbacher85997672011-04-04 13:09:15 +02005046 if (err) {
Andreas Gruenbacherc3afd8f2011-01-20 22:25:40 +01005047 /* Protocol A has no P_WRITE_ACKs, but has P_NEG_ACKs.
5048 The master bio might already be completed, therefore the
5049 request is no longer in the collision hash. */
5050 /* In Protocol B we might already have got a P_RECV_ACK
5051 but then get a P_NEG_ACK afterwards. */
Andreas Gruenbacherc3afd8f2011-01-20 22:25:40 +01005052 drbd_set_out_of_sync(mdev, sector, size);
Philipp Reisner2deb8332011-01-17 18:39:18 +01005053 }
Andreas Gruenbacher2735a592011-04-04 15:30:24 +02005054 return 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07005055}
5056
Andreas Gruenbacher1952e912011-03-25 15:37:43 +01005057static int got_NegDReply(struct drbd_tconn *tconn, struct packet_info *pi)
Philipp Reisnerb411b362009-09-25 16:07:19 -07005058{
Andreas Gruenbacher1952e912011-03-25 15:37:43 +01005059 struct drbd_conf *mdev;
Andreas Gruenbachere6589832011-03-30 12:54:42 +02005060 struct p_block_ack *p = pi->data;
Philipp Reisnerb411b362009-09-25 16:07:19 -07005061 sector_t sector = be64_to_cpu(p->sector);
5062
Andreas Gruenbacher1952e912011-03-25 15:37:43 +01005063 mdev = vnr_to_mdev(tconn, pi->vnr);
5064 if (!mdev)
Andreas Gruenbacher2735a592011-04-04 15:30:24 +02005065 return -EIO;
Andreas Gruenbacher1952e912011-03-25 15:37:43 +01005066
Philipp Reisnerb411b362009-09-25 16:07:19 -07005067 update_peer_seq(mdev, be32_to_cpu(p->seq_num));
Andreas Gruenbacher7be8da02011-02-22 02:15:32 +01005068
Philipp Reisner380207d2011-11-11 12:31:20 +01005069 dev_err(DEV, "Got NegDReply; Sector %llus, len %u.\n",
Philipp Reisnerb411b362009-09-25 16:07:19 -07005070 (unsigned long long)sector, be32_to_cpu(p->blksize));
5071
5072 return validate_req_change_req_state(mdev, p->block_id, sector,
Andreas Gruenbacher2735a592011-04-04 15:30:24 +02005073 &mdev->read_requests, __func__,
5074 NEG_ACKED, false);
Philipp Reisnerb411b362009-09-25 16:07:19 -07005075}
5076
Andreas Gruenbacher1952e912011-03-25 15:37:43 +01005077static int got_NegRSDReply(struct drbd_tconn *tconn, struct packet_info *pi)
Philipp Reisnerb411b362009-09-25 16:07:19 -07005078{
Andreas Gruenbacher1952e912011-03-25 15:37:43 +01005079 struct drbd_conf *mdev;
Philipp Reisnerb411b362009-09-25 16:07:19 -07005080 sector_t sector;
5081 int size;
Andreas Gruenbachere6589832011-03-30 12:54:42 +02005082 struct p_block_ack *p = pi->data;
Andreas Gruenbacher1952e912011-03-25 15:37:43 +01005083
5084 mdev = vnr_to_mdev(tconn, pi->vnr);
5085 if (!mdev)
Andreas Gruenbacher2735a592011-04-04 15:30:24 +02005086 return -EIO;
Philipp Reisnerb411b362009-09-25 16:07:19 -07005087
5088 sector = be64_to_cpu(p->sector);
5089 size = be32_to_cpu(p->blksize);
Philipp Reisnerb411b362009-09-25 16:07:19 -07005090
5091 update_peer_seq(mdev, be32_to_cpu(p->seq_num));
5092
5093 dec_rs_pending(mdev);
5094
5095 if (get_ldev_if_state(mdev, D_FAILED)) {
5096 drbd_rs_complete_io(mdev, sector);
Andreas Gruenbachere05e1e52011-03-25 15:16:26 +01005097 switch (pi->cmd) {
Philipp Reisnerd612d302010-12-27 10:53:28 +01005098 case P_NEG_RS_DREPLY:
5099 drbd_rs_failed_io(mdev, sector, size);
5100 case P_RS_CANCEL:
5101 break;
5102 default:
Andreas Gruenbacher2735a592011-04-04 15:30:24 +02005103 BUG();
Philipp Reisnerd612d302010-12-27 10:53:28 +01005104 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07005105 put_ldev(mdev);
5106 }
5107
Andreas Gruenbacher2735a592011-04-04 15:30:24 +02005108 return 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07005109}
5110
Andreas Gruenbacher1952e912011-03-25 15:37:43 +01005111static int got_BarrierAck(struct drbd_tconn *tconn, struct packet_info *pi)
Philipp Reisnerb411b362009-09-25 16:07:19 -07005112{
Andreas Gruenbachere6589832011-03-30 12:54:42 +02005113 struct p_barrier_ack *p = pi->data;
Lars Ellenberg9ed57dc2012-03-26 20:55:17 +02005114 struct drbd_conf *mdev;
5115 int vnr;
Philipp Reisnerb411b362009-09-25 16:07:19 -07005116
Lars Ellenberg9ed57dc2012-03-26 20:55:17 +02005117 tl_release(tconn, p->barrier, be32_to_cpu(p->set_size));
Philipp Reisnerb411b362009-09-25 16:07:19 -07005118
Lars Ellenberg9ed57dc2012-03-26 20:55:17 +02005119 rcu_read_lock();
5120 idr_for_each_entry(&tconn->volumes, mdev, vnr) {
5121 if (mdev->state.conn == C_AHEAD &&
5122 atomic_read(&mdev->ap_in_flight) == 0 &&
5123 !test_and_set_bit(AHEAD_TO_SYNC_SOURCE, &mdev->flags)) {
5124 mdev->start_resync_timer.expires = jiffies + HZ;
5125 add_timer(&mdev->start_resync_timer);
5126 }
Philipp Reisnerc4752ef2010-10-27 17:32:36 +02005127 }
Lars Ellenberg9ed57dc2012-03-26 20:55:17 +02005128 rcu_read_unlock();
Philipp Reisnerc4752ef2010-10-27 17:32:36 +02005129
Andreas Gruenbacher2735a592011-04-04 15:30:24 +02005130 return 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07005131}
5132
Andreas Gruenbacher1952e912011-03-25 15:37:43 +01005133static int got_OVResult(struct drbd_tconn *tconn, struct packet_info *pi)
Philipp Reisnerb411b362009-09-25 16:07:19 -07005134{
Andreas Gruenbacher1952e912011-03-25 15:37:43 +01005135 struct drbd_conf *mdev;
Andreas Gruenbachere6589832011-03-30 12:54:42 +02005136 struct p_block_ack *p = pi->data;
Philipp Reisnerb411b362009-09-25 16:07:19 -07005137 struct drbd_work *w;
5138 sector_t sector;
5139 int size;
5140
Andreas Gruenbacher1952e912011-03-25 15:37:43 +01005141 mdev = vnr_to_mdev(tconn, pi->vnr);
5142 if (!mdev)
Andreas Gruenbacher2735a592011-04-04 15:30:24 +02005143 return -EIO;
Andreas Gruenbacher1952e912011-03-25 15:37:43 +01005144
Philipp Reisnerb411b362009-09-25 16:07:19 -07005145 sector = be64_to_cpu(p->sector);
5146 size = be32_to_cpu(p->blksize);
5147
5148 update_peer_seq(mdev, be32_to_cpu(p->seq_num));
5149
5150 if (be64_to_cpu(p->block_id) == ID_OUT_OF_SYNC)
Andreas Gruenbacher8f7bed72010-12-19 23:53:14 +01005151 drbd_ov_out_of_sync_found(mdev, sector, size);
Philipp Reisnerb411b362009-09-25 16:07:19 -07005152 else
Andreas Gruenbacher8f7bed72010-12-19 23:53:14 +01005153 ov_out_of_sync_print(mdev);
Philipp Reisnerb411b362009-09-25 16:07:19 -07005154
Lars Ellenberg1d53f092010-09-05 01:13:24 +02005155 if (!get_ldev(mdev))
Andreas Gruenbacher2735a592011-04-04 15:30:24 +02005156 return 0;
Lars Ellenberg1d53f092010-09-05 01:13:24 +02005157
Philipp Reisnerb411b362009-09-25 16:07:19 -07005158 drbd_rs_complete_io(mdev, sector);
5159 dec_rs_pending(mdev);
5160
Lars Ellenbergea5442a2010-11-05 09:48:01 +01005161 --mdev->ov_left;
5162
5163 /* let's advance progress step marks only for every other megabyte */
5164 if ((mdev->ov_left & 0x200) == 0x200)
5165 drbd_advance_rs_marks(mdev, mdev->ov_left);
5166
5167 if (mdev->ov_left == 0) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07005168 w = kmalloc(sizeof(*w), GFP_NOIO);
5169 if (w) {
5170 w->cb = w_ov_finished;
Philipp Reisnera21e9292011-02-08 15:08:49 +01005171 w->mdev = mdev;
Lars Ellenbergd5b27b02011-11-14 15:42:37 +01005172 drbd_queue_work(&mdev->tconn->sender_work, w);
Philipp Reisnerb411b362009-09-25 16:07:19 -07005173 } else {
5174 dev_err(DEV, "kmalloc(w) failed.");
Andreas Gruenbacher8f7bed72010-12-19 23:53:14 +01005175 ov_out_of_sync_print(mdev);
Philipp Reisnerb411b362009-09-25 16:07:19 -07005176 drbd_resync_finished(mdev);
5177 }
5178 }
Lars Ellenberg1d53f092010-09-05 01:13:24 +02005179 put_ldev(mdev);
Andreas Gruenbacher2735a592011-04-04 15:30:24 +02005180 return 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07005181}
5182
Andreas Gruenbacher1952e912011-03-25 15:37:43 +01005183static int got_skip(struct drbd_tconn *tconn, struct packet_info *pi)
Philipp Reisner0ced55a2010-04-30 15:26:20 +02005184{
Andreas Gruenbacher2735a592011-04-04 15:30:24 +02005185 return 0;
Philipp Reisner0ced55a2010-04-30 15:26:20 +02005186}
5187
Andreas Gruenbachera990be42011-04-06 17:56:48 +02005188static int tconn_finish_peer_reqs(struct drbd_tconn *tconn)
Philipp Reisner32862ec2011-02-08 16:41:01 +01005189{
Philipp Reisner082a3432011-03-15 16:05:42 +01005190 struct drbd_conf *mdev;
Philipp Reisnerc141ebd2011-05-05 16:13:10 +02005191 int vnr, not_empty = 0;
Philipp Reisner32862ec2011-02-08 16:41:01 +01005192
5193 do {
5194 clear_bit(SIGNAL_ASENDER, &tconn->flags);
5195 flush_signals(current);
Philipp Reisnerc141ebd2011-05-05 16:13:10 +02005196
5197 rcu_read_lock();
5198 idr_for_each_entry(&tconn->volumes, mdev, vnr) {
5199 kref_get(&mdev->kref);
5200 rcu_read_unlock();
Philipp Reisnerd3fcb492011-04-13 14:46:05 -07005201 if (drbd_finish_peer_reqs(mdev)) {
Philipp Reisnerc141ebd2011-05-05 16:13:10 +02005202 kref_put(&mdev->kref, &drbd_minor_destroy);
5203 return 1;
Philipp Reisnerd3fcb492011-04-13 14:46:05 -07005204 }
Philipp Reisnerc141ebd2011-05-05 16:13:10 +02005205 kref_put(&mdev->kref, &drbd_minor_destroy);
5206 rcu_read_lock();
Philipp Reisner082a3432011-03-15 16:05:42 +01005207 }
Philipp Reisner32862ec2011-02-08 16:41:01 +01005208 set_bit(SIGNAL_ASENDER, &tconn->flags);
Philipp Reisner082a3432011-03-15 16:05:42 +01005209
5210 spin_lock_irq(&tconn->req_lock);
Philipp Reisnerc141ebd2011-05-05 16:13:10 +02005211 idr_for_each_entry(&tconn->volumes, mdev, vnr) {
Philipp Reisner082a3432011-03-15 16:05:42 +01005212 not_empty = !list_empty(&mdev->done_ee);
5213 if (not_empty)
5214 break;
5215 }
5216 spin_unlock_irq(&tconn->req_lock);
Philipp Reisnerc141ebd2011-05-05 16:13:10 +02005217 rcu_read_unlock();
Philipp Reisner32862ec2011-02-08 16:41:01 +01005218 } while (not_empty);
5219
5220 return 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07005221}
5222
5223struct asender_cmd {
5224 size_t pkt_size;
Andreas Gruenbacher1952e912011-03-25 15:37:43 +01005225 int (*fn)(struct drbd_tconn *tconn, struct packet_info *);
Philipp Reisnerb411b362009-09-25 16:07:19 -07005226};
5227
Andreas Gruenbacher7201b972011-03-14 18:23:00 +01005228static struct asender_cmd asender_tbl[] = {
Andreas Gruenbachere6589832011-03-30 12:54:42 +02005229 [P_PING] = { 0, got_Ping },
5230 [P_PING_ACK] = { 0, got_PingAck },
Philipp Reisnerb411b362009-09-25 16:07:19 -07005231 [P_RECV_ACK] = { sizeof(struct p_block_ack), got_BlockAck },
5232 [P_WRITE_ACK] = { sizeof(struct p_block_ack), got_BlockAck },
5233 [P_RS_WRITE_ACK] = { sizeof(struct p_block_ack), got_BlockAck },
Lars Ellenbergd4dabbe2012-08-01 12:33:51 +02005234 [P_SUPERSEDED] = { sizeof(struct p_block_ack), got_BlockAck },
Philipp Reisnerb411b362009-09-25 16:07:19 -07005235 [P_NEG_ACK] = { sizeof(struct p_block_ack), got_NegAck },
5236 [P_NEG_DREPLY] = { sizeof(struct p_block_ack), got_NegDReply },
Andreas Gruenbacher1952e912011-03-25 15:37:43 +01005237 [P_NEG_RS_DREPLY] = { sizeof(struct p_block_ack), got_NegRSDReply },
Philipp Reisnerb411b362009-09-25 16:07:19 -07005238 [P_OV_RESULT] = { sizeof(struct p_block_ack), got_OVResult },
5239 [P_BARRIER_ACK] = { sizeof(struct p_barrier_ack), got_BarrierAck },
5240 [P_STATE_CHG_REPLY] = { sizeof(struct p_req_state_reply), got_RqSReply },
5241 [P_RS_IS_IN_SYNC] = { sizeof(struct p_block_ack), got_IsInSync },
Philipp Reisner02918be2010-08-20 14:35:10 +02005242 [P_DELAY_PROBE] = { sizeof(struct p_delay_probe93), got_skip },
Andreas Gruenbacher1952e912011-03-25 15:37:43 +01005243 [P_RS_CANCEL] = { sizeof(struct p_block_ack), got_NegRSDReply },
5244 [P_CONN_ST_CHG_REPLY]={ sizeof(struct p_req_state_reply), got_conn_RqSReply },
5245 [P_RETRY_WRITE] = { sizeof(struct p_block_ack), got_BlockAck },
Andreas Gruenbacher7201b972011-03-14 18:23:00 +01005246};
Philipp Reisnerb411b362009-09-25 16:07:19 -07005247
5248int drbd_asender(struct drbd_thread *thi)
5249{
Philipp Reisner392c8802011-02-09 10:33:31 +01005250 struct drbd_tconn *tconn = thi->tconn;
Philipp Reisnerb411b362009-09-25 16:07:19 -07005251 struct asender_cmd *cmd = NULL;
Philipp Reisner77351055b2011-02-07 17:24:26 +01005252 struct packet_info pi;
Philipp Reisner257d0af2011-01-26 12:15:29 +01005253 int rv;
Andreas Gruenbachere6589832011-03-30 12:54:42 +02005254 void *buf = tconn->meta.rbuf;
Philipp Reisnerb411b362009-09-25 16:07:19 -07005255 int received = 0;
Andreas Gruenbacher52b061a2011-03-30 11:38:49 +02005256 unsigned int header_size = drbd_header_size(tconn);
5257 int expect = header_size;
Philipp Reisner44ed1672011-04-19 17:10:19 +02005258 bool ping_timeout_active = false;
5259 struct net_conf *nc;
Andreas Gruenbacherbb77d342011-05-04 15:25:35 +02005260 int ping_timeo, tcp_cork, ping_int;
Philipp Reisnerb411b362009-09-25 16:07:19 -07005261
5262 current->policy = SCHED_RR; /* Make this a realtime task! */
5263 current->rt_priority = 2; /* more important than all other tasks */
5264
Andreas Gruenbachere77a0a52011-01-25 15:43:39 +01005265 while (get_t_state(thi) == RUNNING) {
Philipp Reisner80822282011-02-08 12:46:30 +01005266 drbd_thread_current_set_cpu(thi);
Philipp Reisner44ed1672011-04-19 17:10:19 +02005267
5268 rcu_read_lock();
5269 nc = rcu_dereference(tconn->net_conf);
5270 ping_timeo = nc->ping_timeo;
Andreas Gruenbacherbb77d342011-05-04 15:25:35 +02005271 tcp_cork = nc->tcp_cork;
Philipp Reisner44ed1672011-04-19 17:10:19 +02005272 ping_int = nc->ping_int;
5273 rcu_read_unlock();
5274
Philipp Reisner32862ec2011-02-08 16:41:01 +01005275 if (test_and_clear_bit(SEND_PING, &tconn->flags)) {
Andreas Gruenbachera17647a2011-04-01 12:49:42 +02005276 if (drbd_send_ping(tconn)) {
Philipp Reisner32862ec2011-02-08 16:41:01 +01005277 conn_err(tconn, "drbd_send_ping has failed\n");
Andreas Gruenbacher841ce242010-12-15 19:31:20 +01005278 goto reconnect;
5279 }
Philipp Reisner44ed1672011-04-19 17:10:19 +02005280 tconn->meta.socket->sk->sk_rcvtimeo = ping_timeo * HZ / 10;
5281 ping_timeout_active = true;
Philipp Reisnerb411b362009-09-25 16:07:19 -07005282 }
5283
Philipp Reisner32862ec2011-02-08 16:41:01 +01005284 /* TODO: conditionally cork; it may hurt latency if we cork without
5285 much to send */
Andreas Gruenbacherbb77d342011-05-04 15:25:35 +02005286 if (tcp_cork)
Philipp Reisner32862ec2011-02-08 16:41:01 +01005287 drbd_tcp_cork(tconn->meta.socket);
Andreas Gruenbachera990be42011-04-06 17:56:48 +02005288 if (tconn_finish_peer_reqs(tconn)) {
5289 conn_err(tconn, "tconn_finish_peer_reqs() failed\n");
Philipp Reisner32862ec2011-02-08 16:41:01 +01005290 goto reconnect;
Philipp Reisnerb411b362009-09-25 16:07:19 -07005291 }
5292 /* but unconditionally uncork unless disabled */
Andreas Gruenbacherbb77d342011-05-04 15:25:35 +02005293 if (tcp_cork)
Philipp Reisner32862ec2011-02-08 16:41:01 +01005294 drbd_tcp_uncork(tconn->meta.socket);
Philipp Reisnerb411b362009-09-25 16:07:19 -07005295
5296 /* short circuit, recv_msg would return EINTR anyways. */
5297 if (signal_pending(current))
5298 continue;
5299
Philipp Reisner32862ec2011-02-08 16:41:01 +01005300 rv = drbd_recv_short(tconn->meta.socket, buf, expect-received, 0);
5301 clear_bit(SIGNAL_ASENDER, &tconn->flags);
Philipp Reisnerb411b362009-09-25 16:07:19 -07005302
5303 flush_signals(current);
5304
5305 /* Note:
5306 * -EINTR (on meta) we got a signal
5307 * -EAGAIN (on meta) rcvtimeo expired
5308 * -ECONNRESET other side closed the connection
5309 * -ERESTARTSYS (on data) we got a signal
5310 * rv < 0 other than above: unexpected error!
5311 * rv == expected: full header or command
5312 * rv < expected: "woken" by signal during receive
5313 * rv == 0 : "connection shut down by peer"
5314 */
5315 if (likely(rv > 0)) {
5316 received += rv;
5317 buf += rv;
5318 } else if (rv == 0) {
Philipp Reisnerb66623e2012-08-08 21:19:09 +02005319 if (test_bit(DISCONNECT_SENT, &tconn->flags)) {
5320 long t;
5321 rcu_read_lock();
5322 t = rcu_dereference(tconn->net_conf)->ping_timeo * HZ/10;
5323 rcu_read_unlock();
5324
5325 t = wait_event_timeout(tconn->ping_wait,
5326 tconn->cstate < C_WF_REPORT_PARAMS,
5327 t);
Philipp Reisner599377a2012-08-17 14:50:22 +02005328 if (t)
5329 break;
5330 }
Philipp Reisner32862ec2011-02-08 16:41:01 +01005331 conn_err(tconn, "meta connection shut down by peer.\n");
Philipp Reisnerb411b362009-09-25 16:07:19 -07005332 goto reconnect;
5333 } else if (rv == -EAGAIN) {
Lars Ellenbergcb6518c2011-06-20 14:44:45 +02005334 /* If the data socket received something meanwhile,
5335 * that is good enough: peer is still alive. */
Philipp Reisner32862ec2011-02-08 16:41:01 +01005336 if (time_after(tconn->last_received,
5337 jiffies - tconn->meta.socket->sk->sk_rcvtimeo))
Lars Ellenbergcb6518c2011-06-20 14:44:45 +02005338 continue;
Lars Ellenbergf36af182011-03-09 22:44:55 +01005339 if (ping_timeout_active) {
Philipp Reisner32862ec2011-02-08 16:41:01 +01005340 conn_err(tconn, "PingAck did not arrive in time.\n");
Philipp Reisnerb411b362009-09-25 16:07:19 -07005341 goto reconnect;
5342 }
Philipp Reisner32862ec2011-02-08 16:41:01 +01005343 set_bit(SEND_PING, &tconn->flags);
Philipp Reisnerb411b362009-09-25 16:07:19 -07005344 continue;
5345 } else if (rv == -EINTR) {
5346 continue;
5347 } else {
Philipp Reisner32862ec2011-02-08 16:41:01 +01005348 conn_err(tconn, "sock_recvmsg returned %d\n", rv);
Philipp Reisnerb411b362009-09-25 16:07:19 -07005349 goto reconnect;
5350 }
5351
5352 if (received == expect && cmd == NULL) {
Andreas Gruenbachere6589832011-03-30 12:54:42 +02005353 if (decode_header(tconn, tconn->meta.rbuf, &pi))
Philipp Reisnerb411b362009-09-25 16:07:19 -07005354 goto reconnect;
Andreas Gruenbacher7201b972011-03-14 18:23:00 +01005355 cmd = &asender_tbl[pi.cmd];
Andreas Gruenbacher1952e912011-03-25 15:37:43 +01005356 if (pi.cmd >= ARRAY_SIZE(asender_tbl) || !cmd->fn) {
Andreas Gruenbacher2fcb8f32011-07-03 11:41:08 +02005357 conn_err(tconn, "Unexpected meta packet %s (0x%04x)\n",
5358 cmdname(pi.cmd), pi.cmd);
Philipp Reisnerb411b362009-09-25 16:07:19 -07005359 goto disconnect;
5360 }
Andreas Gruenbachere6589832011-03-30 12:54:42 +02005361 expect = header_size + cmd->pkt_size;
Andreas Gruenbacher52b061a2011-03-30 11:38:49 +02005362 if (pi.size != expect - header_size) {
Philipp Reisner32862ec2011-02-08 16:41:01 +01005363 conn_err(tconn, "Wrong packet size on meta (c: %d, l: %d)\n",
Philipp Reisner77351055b2011-02-07 17:24:26 +01005364 pi.cmd, pi.size);
Philipp Reisnerb411b362009-09-25 16:07:19 -07005365 goto reconnect;
Philipp Reisner257d0af2011-01-26 12:15:29 +01005366 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07005367 }
5368 if (received == expect) {
Andreas Gruenbacher2735a592011-04-04 15:30:24 +02005369 bool err;
Philipp Reisnera4fbda82011-03-16 11:13:17 +01005370
Andreas Gruenbacher2735a592011-04-04 15:30:24 +02005371 err = cmd->fn(tconn, &pi);
5372 if (err) {
Andreas Gruenbacher1952e912011-03-25 15:37:43 +01005373 conn_err(tconn, "%pf failed\n", cmd->fn);
Philipp Reisnerb411b362009-09-25 16:07:19 -07005374 goto reconnect;
Andreas Gruenbacher1952e912011-03-25 15:37:43 +01005375 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07005376
Philipp Reisnera4fbda82011-03-16 11:13:17 +01005377 tconn->last_received = jiffies;
Lars Ellenbergf36af182011-03-09 22:44:55 +01005378
Philipp Reisner44ed1672011-04-19 17:10:19 +02005379 if (cmd == &asender_tbl[P_PING_ACK]) {
5380 /* restore idle timeout */
5381 tconn->meta.socket->sk->sk_rcvtimeo = ping_int * HZ;
5382 ping_timeout_active = false;
5383 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07005384
Andreas Gruenbachere6589832011-03-30 12:54:42 +02005385 buf = tconn->meta.rbuf;
Philipp Reisnerb411b362009-09-25 16:07:19 -07005386 received = 0;
Andreas Gruenbacher52b061a2011-03-30 11:38:49 +02005387 expect = header_size;
Philipp Reisnerb411b362009-09-25 16:07:19 -07005388 cmd = NULL;
5389 }
5390 }
5391
5392 if (0) {
5393reconnect:
Philipp Reisnerbbeb6412011-02-10 13:45:46 +01005394 conn_request_state(tconn, NS(conn, C_NETWORK_FAILURE), CS_HARD);
Philipp Reisner19fffd72012-08-28 16:48:03 +02005395 conn_md_sync(tconn);
Philipp Reisnerb411b362009-09-25 16:07:19 -07005396 }
5397 if (0) {
5398disconnect:
Philipp Reisnerbbeb6412011-02-10 13:45:46 +01005399 conn_request_state(tconn, NS(conn, C_DISCONNECTING), CS_HARD);
Philipp Reisnerb411b362009-09-25 16:07:19 -07005400 }
Philipp Reisner32862ec2011-02-08 16:41:01 +01005401 clear_bit(SIGNAL_ASENDER, &tconn->flags);
Philipp Reisnerb411b362009-09-25 16:07:19 -07005402
Philipp Reisner32862ec2011-02-08 16:41:01 +01005403 conn_info(tconn, "asender terminated\n");
Philipp Reisnerb411b362009-09-25 16:07:19 -07005404
5405 return 0;
5406}