blob: 46ba4aa03f3f6b8d3ba40e8c8e478c5aa3f2a33d [file] [log] [blame]
Philipp Reisnerb411b362009-09-25 16:07:19 -07001/*
2 drbd.c
3
4 This file is part of DRBD by Philipp Reisner and Lars Ellenberg.
5
6 Copyright (C) 2001-2008, LINBIT Information Technologies GmbH.
7 Copyright (C) 1999-2008, Philipp Reisner <philipp.reisner@linbit.com>.
8 Copyright (C) 2002-2008, Lars Ellenberg <lars.ellenberg@linbit.com>.
9
10 Thanks to Carter Burden, Bart Grantham and Gennadiy Nerubayev
11 from Logicworks, Inc. for making SDP replication support possible.
12
13 drbd is free software; you can redistribute it and/or modify
14 it under the terms of the GNU General Public License as published by
15 the Free Software Foundation; either version 2, or (at your option)
16 any later version.
17
18 drbd is distributed in the hope that it will be useful,
19 but WITHOUT ANY WARRANTY; without even the implied warranty of
20 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 GNU General Public License for more details.
22
23 You should have received a copy of the GNU General Public License
24 along with drbd; see the file COPYING. If not, write to
25 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
26
27 */
28
Philipp Reisnerb411b362009-09-25 16:07:19 -070029#include <linux/module.h>
Philipp Reisnerb411b362009-09-25 16:07:19 -070030#include <linux/drbd.h>
31#include <asm/uaccess.h>
32#include <asm/types.h>
33#include <net/sock.h>
34#include <linux/ctype.h>
Arnd Bergmann2a48fc02010-06-02 14:28:52 +020035#include <linux/mutex.h>
Philipp Reisnerb411b362009-09-25 16:07:19 -070036#include <linux/fs.h>
37#include <linux/file.h>
38#include <linux/proc_fs.h>
39#include <linux/init.h>
40#include <linux/mm.h>
41#include <linux/memcontrol.h>
42#include <linux/mm_inline.h>
43#include <linux/slab.h>
44#include <linux/random.h>
45#include <linux/reboot.h>
46#include <linux/notifier.h>
47#include <linux/kthread.h>
48
49#define __KERNEL_SYSCALLS__
50#include <linux/unistd.h>
51#include <linux/vmalloc.h>
52
53#include <linux/drbd_limits.h>
54#include "drbd_int.h"
Philipp Reisnerb411b362009-09-25 16:07:19 -070055#include "drbd_req.h" /* only for _req_mod in tl_release and tl_clear */
56
57#include "drbd_vli.h"
58
59struct after_state_chg_work {
60 struct drbd_work w;
61 union drbd_state os;
62 union drbd_state ns;
63 enum chg_state_flags flags;
64 struct completion *done;
65};
66
Arnd Bergmann2a48fc02010-06-02 14:28:52 +020067static DEFINE_MUTEX(drbd_main_mutex);
Philipp Reisnerb411b362009-09-25 16:07:19 -070068int drbdd_init(struct drbd_thread *);
69int drbd_worker(struct drbd_thread *);
70int drbd_asender(struct drbd_thread *);
71
72int drbd_init(void);
73static int drbd_open(struct block_device *bdev, fmode_t mode);
74static int drbd_release(struct gendisk *gd, fmode_t mode);
75static int w_after_state_ch(struct drbd_conf *mdev, struct drbd_work *w, int unused);
76static void after_state_ch(struct drbd_conf *mdev, union drbd_state os,
77 union drbd_state ns, enum chg_state_flags flags);
78static int w_md_sync(struct drbd_conf *mdev, struct drbd_work *w, int unused);
79static void md_sync_timer_fn(unsigned long data);
80static int w_bitmap_io(struct drbd_conf *mdev, struct drbd_work *w, int unused);
Lars Ellenberge9e6f3e2010-09-14 20:26:27 +020081static int w_go_diskless(struct drbd_conf *mdev, struct drbd_work *w, int unused);
Philipp Reisnerb411b362009-09-25 16:07:19 -070082
Philipp Reisnerb411b362009-09-25 16:07:19 -070083MODULE_AUTHOR("Philipp Reisner <phil@linbit.com>, "
84 "Lars Ellenberg <lars@linbit.com>");
85MODULE_DESCRIPTION("drbd - Distributed Replicated Block Device v" REL_VERSION);
86MODULE_VERSION(REL_VERSION);
87MODULE_LICENSE("GPL");
Philipp Reisner2b8a90b2011-01-10 11:15:17 +010088MODULE_PARM_DESC(minor_count, "Maximum number of drbd devices ("
89 __stringify(DRBD_MINOR_COUNT_MIN) "-" __stringify(DRBD_MINOR_COUNT_MAX) ")");
Philipp Reisnerb411b362009-09-25 16:07:19 -070090MODULE_ALIAS_BLOCKDEV_MAJOR(DRBD_MAJOR);
91
92#include <linux/moduleparam.h>
93/* allow_open_on_secondary */
94MODULE_PARM_DESC(allow_oos, "DONT USE!");
95/* thanks to these macros, if compiled into the kernel (not-module),
96 * this becomes the boot parameter drbd.minor_count */
97module_param(minor_count, uint, 0444);
98module_param(disable_sendpage, bool, 0644);
99module_param(allow_oos, bool, 0);
100module_param(cn_idx, uint, 0444);
101module_param(proc_details, int, 0644);
102
103#ifdef CONFIG_DRBD_FAULT_INJECTION
104int enable_faults;
105int fault_rate;
106static int fault_count;
107int fault_devs;
108/* bitmap of enabled faults */
109module_param(enable_faults, int, 0664);
110/* fault rate % value - applies to all enabled faults */
111module_param(fault_rate, int, 0664);
112/* count of faults inserted */
113module_param(fault_count, int, 0664);
114/* bitmap of devices to insert faults on */
115module_param(fault_devs, int, 0644);
116#endif
117
118/* module parameter, defined */
Philipp Reisner2b8a90b2011-01-10 11:15:17 +0100119unsigned int minor_count = DRBD_MINOR_COUNT_DEF;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700120int disable_sendpage;
121int allow_oos;
122unsigned int cn_idx = CN_IDX_DRBD;
123int proc_details; /* Detail level in proc drbd*/
124
125/* Module parameter for setting the user mode helper program
126 * to run. Default is /sbin/drbdadm */
127char usermode_helper[80] = "/sbin/drbdadm";
128
129module_param_string(usermode_helper, usermode_helper, sizeof(usermode_helper), 0644);
130
131/* in 2.6.x, our device mapping and config info contains our virtual gendisks
132 * as member "struct gendisk *vdisk;"
133 */
134struct drbd_conf **minor_table;
135
136struct kmem_cache *drbd_request_cache;
137struct kmem_cache *drbd_ee_cache; /* epoch entries */
138struct kmem_cache *drbd_bm_ext_cache; /* bitmap extents */
139struct kmem_cache *drbd_al_ext_cache; /* activity log extents */
140mempool_t *drbd_request_mempool;
141mempool_t *drbd_ee_mempool;
142
143/* I do not use a standard mempool, because:
144 1) I want to hand out the pre-allocated objects first.
145 2) I want to be able to interrupt sleeping allocation with a signal.
146 Note: This is a single linked list, the next pointer is the private
147 member of struct page.
148 */
149struct page *drbd_pp_pool;
150spinlock_t drbd_pp_lock;
151int drbd_pp_vacant;
152wait_queue_head_t drbd_pp_wait;
153
154DEFINE_RATELIMIT_STATE(drbd_ratelimit_state, 5 * HZ, 5);
155
Emese Revfy7d4e9d02009-12-14 00:59:30 +0100156static const struct block_device_operations drbd_ops = {
Philipp Reisnerb411b362009-09-25 16:07:19 -0700157 .owner = THIS_MODULE,
158 .open = drbd_open,
159 .release = drbd_release,
160};
161
162#define ARRY_SIZE(A) (sizeof(A)/sizeof(A[0]))
163
164#ifdef __CHECKER__
165/* When checking with sparse, and this is an inline function, sparse will
166 give tons of false positives. When this is a real functions sparse works.
167 */
168int _get_ldev_if_state(struct drbd_conf *mdev, enum drbd_disk_state mins)
169{
170 int io_allowed;
171
172 atomic_inc(&mdev->local_cnt);
173 io_allowed = (mdev->state.disk >= mins);
174 if (!io_allowed) {
175 if (atomic_dec_and_test(&mdev->local_cnt))
176 wake_up(&mdev->misc_wait);
177 }
178 return io_allowed;
179}
180
181#endif
182
183/**
184 * DOC: The transfer log
185 *
186 * The transfer log is a single linked list of &struct drbd_tl_epoch objects.
187 * mdev->newest_tle points to the head, mdev->oldest_tle points to the tail
188 * of the list. There is always at least one &struct drbd_tl_epoch object.
189 *
190 * Each &struct drbd_tl_epoch has a circular double linked list of requests
191 * attached.
192 */
193static int tl_init(struct drbd_conf *mdev)
194{
195 struct drbd_tl_epoch *b;
196
197 /* during device minor initialization, we may well use GFP_KERNEL */
198 b = kmalloc(sizeof(struct drbd_tl_epoch), GFP_KERNEL);
199 if (!b)
200 return 0;
201 INIT_LIST_HEAD(&b->requests);
202 INIT_LIST_HEAD(&b->w.list);
203 b->next = NULL;
204 b->br_number = 4711;
Philipp Reisner7e602c02010-05-27 14:49:27 +0200205 b->n_writes = 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700206 b->w.cb = NULL; /* if this is != NULL, we need to dec_ap_pending in tl_clear */
207
208 mdev->oldest_tle = b;
209 mdev->newest_tle = b;
210 INIT_LIST_HEAD(&mdev->out_of_sequence_requests);
211
Philipp Reisnerb411b362009-09-25 16:07:19 -0700212 return 1;
213}
214
215static void tl_cleanup(struct drbd_conf *mdev)
216{
217 D_ASSERT(mdev->oldest_tle == mdev->newest_tle);
218 D_ASSERT(list_empty(&mdev->out_of_sequence_requests));
219 kfree(mdev->oldest_tle);
220 mdev->oldest_tle = NULL;
221 kfree(mdev->unused_spare_tle);
222 mdev->unused_spare_tle = NULL;
Andreas Gruenbacherd6287692011-01-13 23:05:39 +0100223}
224
Philipp Reisnerb411b362009-09-25 16:07:19 -0700225/**
226 * _tl_add_barrier() - Adds a barrier to the transfer log
227 * @mdev: DRBD device.
228 * @new: Barrier to be added before the current head of the TL.
229 *
230 * The caller must hold the req_lock.
231 */
232void _tl_add_barrier(struct drbd_conf *mdev, struct drbd_tl_epoch *new)
233{
234 struct drbd_tl_epoch *newest_before;
235
236 INIT_LIST_HEAD(&new->requests);
237 INIT_LIST_HEAD(&new->w.list);
238 new->w.cb = NULL; /* if this is != NULL, we need to dec_ap_pending in tl_clear */
239 new->next = NULL;
Philipp Reisner7e602c02010-05-27 14:49:27 +0200240 new->n_writes = 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700241
242 newest_before = mdev->newest_tle;
243 /* never send a barrier number == 0, because that is special-cased
244 * when using TCQ for our write ordering code */
245 new->br_number = (newest_before->br_number+1) ?: 1;
246 if (mdev->newest_tle != new) {
247 mdev->newest_tle->next = new;
248 mdev->newest_tle = new;
249 }
250}
251
252/**
253 * tl_release() - Free or recycle the oldest &struct drbd_tl_epoch object of the TL
254 * @mdev: DRBD device.
255 * @barrier_nr: Expected identifier of the DRBD write barrier packet.
256 * @set_size: Expected number of requests before that barrier.
257 *
258 * In case the passed barrier_nr or set_size does not match the oldest
259 * &struct drbd_tl_epoch objects this function will cause a termination
260 * of the connection.
261 */
262void tl_release(struct drbd_conf *mdev, unsigned int barrier_nr,
263 unsigned int set_size)
264{
265 struct drbd_tl_epoch *b, *nob; /* next old barrier */
266 struct list_head *le, *tle;
267 struct drbd_request *r;
268
269 spin_lock_irq(&mdev->req_lock);
270
271 b = mdev->oldest_tle;
272
273 /* first some paranoia code */
274 if (b == NULL) {
275 dev_err(DEV, "BAD! BarrierAck #%u received, but no epoch in tl!?\n",
276 barrier_nr);
277 goto bail;
278 }
279 if (b->br_number != barrier_nr) {
280 dev_err(DEV, "BAD! BarrierAck #%u received, expected #%u!\n",
281 barrier_nr, b->br_number);
282 goto bail;
283 }
Philipp Reisner7e602c02010-05-27 14:49:27 +0200284 if (b->n_writes != set_size) {
285 dev_err(DEV, "BAD! BarrierAck #%u received with n_writes=%u, expected n_writes=%u!\n",
286 barrier_nr, set_size, b->n_writes);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700287 goto bail;
288 }
289
290 /* Clean up list of requests processed during current epoch */
291 list_for_each_safe(le, tle, &b->requests) {
292 r = list_entry(le, struct drbd_request, tl_requests);
Andreas Gruenbacher8554df12011-01-25 15:37:43 +0100293 _req_mod(r, BARRIER_ACKED);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700294 }
295 /* There could be requests on the list waiting for completion
296 of the write to the local disk. To avoid corruptions of
297 slab's data structures we have to remove the lists head.
298
299 Also there could have been a barrier ack out of sequence, overtaking
300 the write acks - which would be a bug and violating write ordering.
301 To not deadlock in case we lose connection while such requests are
302 still pending, we need some way to find them for the
Andreas Gruenbacher8554df12011-01-25 15:37:43 +0100303 _req_mode(CONNECTION_LOST_WHILE_PENDING).
Philipp Reisnerb411b362009-09-25 16:07:19 -0700304
305 These have been list_move'd to the out_of_sequence_requests list in
Andreas Gruenbacher8554df12011-01-25 15:37:43 +0100306 _req_mod(, BARRIER_ACKED) above.
Philipp Reisnerb411b362009-09-25 16:07:19 -0700307 */
308 list_del_init(&b->requests);
309
310 nob = b->next;
311 if (test_and_clear_bit(CREATE_BARRIER, &mdev->flags)) {
312 _tl_add_barrier(mdev, b);
313 if (nob)
314 mdev->oldest_tle = nob;
315 /* if nob == NULL b was the only barrier, and becomes the new
316 barrier. Therefore mdev->oldest_tle points already to b */
317 } else {
318 D_ASSERT(nob != NULL);
319 mdev->oldest_tle = nob;
320 kfree(b);
321 }
322
323 spin_unlock_irq(&mdev->req_lock);
324 dec_ap_pending(mdev);
325
326 return;
327
328bail:
329 spin_unlock_irq(&mdev->req_lock);
330 drbd_force_state(mdev, NS(conn, C_PROTOCOL_ERROR));
331}
332
Philipp Reisner617049a2010-12-22 12:48:31 +0100333
Philipp Reisner11b58e72010-05-12 17:08:26 +0200334/**
335 * _tl_restart() - Walks the transfer log, and applies an action to all requests
336 * @mdev: DRBD device.
337 * @what: The action/event to perform with all request objects
338 *
Andreas Gruenbacher8554df12011-01-25 15:37:43 +0100339 * @what might be one of CONNECTION_LOST_WHILE_PENDING, RESEND, FAIL_FROZEN_DISK_IO,
340 * RESTART_FROZEN_DISK_IO.
Philipp Reisner11b58e72010-05-12 17:08:26 +0200341 */
342static void _tl_restart(struct drbd_conf *mdev, enum drbd_req_event what)
343{
344 struct drbd_tl_epoch *b, *tmp, **pn;
Philipp Reisnerb9b98712010-06-22 11:26:48 +0200345 struct list_head *le, *tle, carry_reads;
Philipp Reisner11b58e72010-05-12 17:08:26 +0200346 struct drbd_request *req;
347 int rv, n_writes, n_reads;
348
349 b = mdev->oldest_tle;
350 pn = &mdev->oldest_tle;
351 while (b) {
352 n_writes = 0;
353 n_reads = 0;
Philipp Reisnerb9b98712010-06-22 11:26:48 +0200354 INIT_LIST_HEAD(&carry_reads);
Philipp Reisner11b58e72010-05-12 17:08:26 +0200355 list_for_each_safe(le, tle, &b->requests) {
356 req = list_entry(le, struct drbd_request, tl_requests);
357 rv = _req_mod(req, what);
358
359 n_writes += (rv & MR_WRITE) >> MR_WRITE_SHIFT;
360 n_reads += (rv & MR_READ) >> MR_READ_SHIFT;
361 }
362 tmp = b->next;
363
Philipp Reisnerb9b98712010-06-22 11:26:48 +0200364 if (n_writes) {
Andreas Gruenbacher8554df12011-01-25 15:37:43 +0100365 if (what == RESEND) {
Philipp Reisner11b58e72010-05-12 17:08:26 +0200366 b->n_writes = n_writes;
367 if (b->w.cb == NULL) {
368 b->w.cb = w_send_barrier;
369 inc_ap_pending(mdev);
370 set_bit(CREATE_BARRIER, &mdev->flags);
371 }
372
373 drbd_queue_work(&mdev->data.work, &b->w);
374 }
375 pn = &b->next;
376 } else {
Philipp Reisnerb9b98712010-06-22 11:26:48 +0200377 if (n_reads)
378 list_add(&carry_reads, &b->requests);
Philipp Reisner11b58e72010-05-12 17:08:26 +0200379 /* there could still be requests on that ring list,
380 * in case local io is still pending */
381 list_del(&b->requests);
382
383 /* dec_ap_pending corresponding to queue_barrier.
384 * the newest barrier may not have been queued yet,
385 * in which case w.cb is still NULL. */
386 if (b->w.cb != NULL)
387 dec_ap_pending(mdev);
388
389 if (b == mdev->newest_tle) {
390 /* recycle, but reinit! */
391 D_ASSERT(tmp == NULL);
392 INIT_LIST_HEAD(&b->requests);
Philipp Reisnerb9b98712010-06-22 11:26:48 +0200393 list_splice(&carry_reads, &b->requests);
Philipp Reisner11b58e72010-05-12 17:08:26 +0200394 INIT_LIST_HEAD(&b->w.list);
395 b->w.cb = NULL;
396 b->br_number = net_random();
397 b->n_writes = 0;
398
399 *pn = b;
400 break;
401 }
402 *pn = tmp;
403 kfree(b);
404 }
405 b = tmp;
Philipp Reisnerb9b98712010-06-22 11:26:48 +0200406 list_splice(&carry_reads, &b->requests);
Philipp Reisner11b58e72010-05-12 17:08:26 +0200407 }
408}
409
Philipp Reisnerb411b362009-09-25 16:07:19 -0700410
411/**
412 * tl_clear() - Clears all requests and &struct drbd_tl_epoch objects out of the TL
413 * @mdev: DRBD device.
414 *
415 * This is called after the connection to the peer was lost. The storage covered
416 * by the requests on the transfer gets marked as our of sync. Called from the
417 * receiver thread and the worker thread.
418 */
419void tl_clear(struct drbd_conf *mdev)
420{
Philipp Reisnerb411b362009-09-25 16:07:19 -0700421 struct list_head *le, *tle;
422 struct drbd_request *r;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700423
424 spin_lock_irq(&mdev->req_lock);
425
Andreas Gruenbacher8554df12011-01-25 15:37:43 +0100426 _tl_restart(mdev, CONNECTION_LOST_WHILE_PENDING);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700427
428 /* we expect this list to be empty. */
429 D_ASSERT(list_empty(&mdev->out_of_sequence_requests));
430
431 /* but just in case, clean it up anyways! */
432 list_for_each_safe(le, tle, &mdev->out_of_sequence_requests) {
433 r = list_entry(le, struct drbd_request, tl_requests);
434 /* It would be nice to complete outside of spinlock.
435 * But this is easier for now. */
Andreas Gruenbacher8554df12011-01-25 15:37:43 +0100436 _req_mod(r, CONNECTION_LOST_WHILE_PENDING);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700437 }
438
439 /* ensure bit indicating barrier is required is clear */
440 clear_bit(CREATE_BARRIER, &mdev->flags);
441
442 spin_unlock_irq(&mdev->req_lock);
443}
444
Philipp Reisner11b58e72010-05-12 17:08:26 +0200445void tl_restart(struct drbd_conf *mdev, enum drbd_req_event what)
446{
447 spin_lock_irq(&mdev->req_lock);
448 _tl_restart(mdev, what);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700449 spin_unlock_irq(&mdev->req_lock);
450}
451
452/**
Andreas Gruenbacher81e84652010-12-09 15:03:57 +0100453 * cl_wide_st_chg() - true if the state change is a cluster wide one
Philipp Reisnerb411b362009-09-25 16:07:19 -0700454 * @mdev: DRBD device.
455 * @os: old (current) state.
456 * @ns: new (wanted) state.
457 */
458static int cl_wide_st_chg(struct drbd_conf *mdev,
459 union drbd_state os, union drbd_state ns)
460{
461 return (os.conn >= C_CONNECTED && ns.conn >= C_CONNECTED &&
462 ((os.role != R_PRIMARY && ns.role == R_PRIMARY) ||
463 (os.conn != C_STARTING_SYNC_T && ns.conn == C_STARTING_SYNC_T) ||
464 (os.conn != C_STARTING_SYNC_S && ns.conn == C_STARTING_SYNC_S) ||
465 (os.disk != D_DISKLESS && ns.disk == D_DISKLESS))) ||
466 (os.conn >= C_CONNECTED && ns.conn == C_DISCONNECTING) ||
467 (os.conn == C_CONNECTED && ns.conn == C_VERIFY_S);
468}
469
Andreas Gruenbacherbf885f82010-12-08 00:39:32 +0100470enum drbd_state_rv
471drbd_change_state(struct drbd_conf *mdev, enum chg_state_flags f,
472 union drbd_state mask, union drbd_state val)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700473{
474 unsigned long flags;
475 union drbd_state os, ns;
Andreas Gruenbacherbf885f82010-12-08 00:39:32 +0100476 enum drbd_state_rv rv;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700477
478 spin_lock_irqsave(&mdev->req_lock, flags);
479 os = mdev->state;
480 ns.i = (os.i & ~mask.i) | val.i;
481 rv = _drbd_set_state(mdev, ns, f, NULL);
482 ns = mdev->state;
483 spin_unlock_irqrestore(&mdev->req_lock, flags);
484
485 return rv;
486}
487
488/**
489 * drbd_force_state() - Impose a change which happens outside our control on our state
490 * @mdev: DRBD device.
491 * @mask: mask of state bits to change.
492 * @val: value of new state bits.
493 */
494void drbd_force_state(struct drbd_conf *mdev,
495 union drbd_state mask, union drbd_state val)
496{
497 drbd_change_state(mdev, CS_HARD, mask, val);
498}
499
Andreas Gruenbacherbf885f82010-12-08 00:39:32 +0100500static enum drbd_state_rv is_valid_state(struct drbd_conf *, union drbd_state);
501static enum drbd_state_rv is_valid_state_transition(struct drbd_conf *,
502 union drbd_state,
503 union drbd_state);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700504static union drbd_state sanitize_state(struct drbd_conf *mdev, union drbd_state os,
Lars Ellenberg02bc7172010-09-06 12:13:20 +0200505 union drbd_state ns, const char **warn_sync_abort);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700506int drbd_send_state_req(struct drbd_conf *,
507 union drbd_state, union drbd_state);
508
Andreas Gruenbacherc8b32562010-12-08 01:06:16 +0100509static enum drbd_state_rv
510_req_st_cond(struct drbd_conf *mdev, union drbd_state mask,
511 union drbd_state val)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700512{
513 union drbd_state os, ns;
514 unsigned long flags;
Andreas Gruenbacherbf885f82010-12-08 00:39:32 +0100515 enum drbd_state_rv rv;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700516
517 if (test_and_clear_bit(CL_ST_CHG_SUCCESS, &mdev->flags))
518 return SS_CW_SUCCESS;
519
520 if (test_and_clear_bit(CL_ST_CHG_FAIL, &mdev->flags))
521 return SS_CW_FAILED_BY_PEER;
522
523 rv = 0;
524 spin_lock_irqsave(&mdev->req_lock, flags);
525 os = mdev->state;
526 ns.i = (os.i & ~mask.i) | val.i;
527 ns = sanitize_state(mdev, os, ns, NULL);
528
529 if (!cl_wide_st_chg(mdev, os, ns))
530 rv = SS_CW_NO_NEED;
531 if (!rv) {
532 rv = is_valid_state(mdev, ns);
533 if (rv == SS_SUCCESS) {
534 rv = is_valid_state_transition(mdev, ns, os);
535 if (rv == SS_SUCCESS)
Andreas Gruenbacherbf885f82010-12-08 00:39:32 +0100536 rv = SS_UNKNOWN_ERROR; /* cont waiting, otherwise fail. */
Philipp Reisnerb411b362009-09-25 16:07:19 -0700537 }
538 }
539 spin_unlock_irqrestore(&mdev->req_lock, flags);
540
541 return rv;
542}
543
544/**
545 * drbd_req_state() - Perform an eventually cluster wide state change
546 * @mdev: DRBD device.
547 * @mask: mask of state bits to change.
548 * @val: value of new state bits.
549 * @f: flags
550 *
551 * Should not be called directly, use drbd_request_state() or
552 * _drbd_request_state().
553 */
Andreas Gruenbacherbf885f82010-12-08 00:39:32 +0100554static enum drbd_state_rv
555drbd_req_state(struct drbd_conf *mdev, union drbd_state mask,
556 union drbd_state val, enum chg_state_flags f)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700557{
558 struct completion done;
559 unsigned long flags;
560 union drbd_state os, ns;
Andreas Gruenbacherbf885f82010-12-08 00:39:32 +0100561 enum drbd_state_rv rv;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700562
563 init_completion(&done);
564
565 if (f & CS_SERIALIZE)
566 mutex_lock(&mdev->state_mutex);
567
568 spin_lock_irqsave(&mdev->req_lock, flags);
569 os = mdev->state;
570 ns.i = (os.i & ~mask.i) | val.i;
571 ns = sanitize_state(mdev, os, ns, NULL);
572
573 if (cl_wide_st_chg(mdev, os, ns)) {
574 rv = is_valid_state(mdev, ns);
575 if (rv == SS_SUCCESS)
576 rv = is_valid_state_transition(mdev, ns, os);
577 spin_unlock_irqrestore(&mdev->req_lock, flags);
578
579 if (rv < SS_SUCCESS) {
580 if (f & CS_VERBOSE)
581 print_st_err(mdev, os, ns, rv);
582 goto abort;
583 }
584
585 drbd_state_lock(mdev);
586 if (!drbd_send_state_req(mdev, mask, val)) {
587 drbd_state_unlock(mdev);
588 rv = SS_CW_FAILED_BY_PEER;
589 if (f & CS_VERBOSE)
590 print_st_err(mdev, os, ns, rv);
591 goto abort;
592 }
593
594 wait_event(mdev->state_wait,
595 (rv = _req_st_cond(mdev, mask, val)));
596
597 if (rv < SS_SUCCESS) {
598 drbd_state_unlock(mdev);
599 if (f & CS_VERBOSE)
600 print_st_err(mdev, os, ns, rv);
601 goto abort;
602 }
603 spin_lock_irqsave(&mdev->req_lock, flags);
604 os = mdev->state;
605 ns.i = (os.i & ~mask.i) | val.i;
606 rv = _drbd_set_state(mdev, ns, f, &done);
607 drbd_state_unlock(mdev);
608 } else {
609 rv = _drbd_set_state(mdev, ns, f, &done);
610 }
611
612 spin_unlock_irqrestore(&mdev->req_lock, flags);
613
614 if (f & CS_WAIT_COMPLETE && rv == SS_SUCCESS) {
615 D_ASSERT(current != mdev->worker.task);
616 wait_for_completion(&done);
617 }
618
619abort:
620 if (f & CS_SERIALIZE)
621 mutex_unlock(&mdev->state_mutex);
622
623 return rv;
624}
625
626/**
627 * _drbd_request_state() - Request a state change (with flags)
628 * @mdev: DRBD device.
629 * @mask: mask of state bits to change.
630 * @val: value of new state bits.
631 * @f: flags
632 *
633 * Cousin of drbd_request_state(), useful with the CS_WAIT_COMPLETE
634 * flag, or when logging of failed state change requests is not desired.
635 */
Andreas Gruenbacherbf885f82010-12-08 00:39:32 +0100636enum drbd_state_rv
637_drbd_request_state(struct drbd_conf *mdev, union drbd_state mask,
638 union drbd_state val, enum chg_state_flags f)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700639{
Andreas Gruenbacherbf885f82010-12-08 00:39:32 +0100640 enum drbd_state_rv rv;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700641
642 wait_event(mdev->state_wait,
643 (rv = drbd_req_state(mdev, mask, val, f)) != SS_IN_TRANSIENT_STATE);
644
645 return rv;
646}
647
648static void print_st(struct drbd_conf *mdev, char *name, union drbd_state ns)
649{
650 dev_err(DEV, " %s = { cs:%s ro:%s/%s ds:%s/%s %c%c%c%c }\n",
651 name,
652 drbd_conn_str(ns.conn),
653 drbd_role_str(ns.role),
654 drbd_role_str(ns.peer),
655 drbd_disk_str(ns.disk),
656 drbd_disk_str(ns.pdsk),
Philipp Reisnerfb22c402010-09-08 23:20:21 +0200657 is_susp(ns) ? 's' : 'r',
Philipp Reisnerb411b362009-09-25 16:07:19 -0700658 ns.aftr_isp ? 'a' : '-',
659 ns.peer_isp ? 'p' : '-',
660 ns.user_isp ? 'u' : '-'
661 );
662}
663
Andreas Gruenbacherbf885f82010-12-08 00:39:32 +0100664void print_st_err(struct drbd_conf *mdev, union drbd_state os,
665 union drbd_state ns, enum drbd_state_rv err)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700666{
667 if (err == SS_IN_TRANSIENT_STATE)
668 return;
669 dev_err(DEV, "State change failed: %s\n", drbd_set_st_err_str(err));
670 print_st(mdev, " state", os);
671 print_st(mdev, "wanted", ns);
672}
673
674
Philipp Reisnerb411b362009-09-25 16:07:19 -0700675/**
676 * is_valid_state() - Returns an SS_ error code if ns is not valid
677 * @mdev: DRBD device.
678 * @ns: State to consider.
679 */
Andreas Gruenbacherbf885f82010-12-08 00:39:32 +0100680static enum drbd_state_rv
681is_valid_state(struct drbd_conf *mdev, union drbd_state ns)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700682{
683 /* See drbd_state_sw_errors in drbd_strings.c */
684
685 enum drbd_fencing_p fp;
Andreas Gruenbacherbf885f82010-12-08 00:39:32 +0100686 enum drbd_state_rv rv = SS_SUCCESS;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700687
688 fp = FP_DONT_CARE;
689 if (get_ldev(mdev)) {
690 fp = mdev->ldev->dc.fencing;
691 put_ldev(mdev);
692 }
693
694 if (get_net_conf(mdev)) {
695 if (!mdev->net_conf->two_primaries &&
696 ns.role == R_PRIMARY && ns.peer == R_PRIMARY)
697 rv = SS_TWO_PRIMARIES;
698 put_net_conf(mdev);
699 }
700
701 if (rv <= 0)
702 /* already found a reason to abort */;
703 else if (ns.role == R_SECONDARY && mdev->open_cnt)
704 rv = SS_DEVICE_IN_USE;
705
706 else if (ns.role == R_PRIMARY && ns.conn < C_CONNECTED && ns.disk < D_UP_TO_DATE)
707 rv = SS_NO_UP_TO_DATE_DISK;
708
709 else if (fp >= FP_RESOURCE &&
710 ns.role == R_PRIMARY && ns.conn < C_CONNECTED && ns.pdsk >= D_UNKNOWN)
711 rv = SS_PRIMARY_NOP;
712
713 else if (ns.role == R_PRIMARY && ns.disk <= D_INCONSISTENT && ns.pdsk <= D_INCONSISTENT)
714 rv = SS_NO_UP_TO_DATE_DISK;
715
716 else if (ns.conn > C_CONNECTED && ns.disk < D_INCONSISTENT)
717 rv = SS_NO_LOCAL_DISK;
718
719 else if (ns.conn > C_CONNECTED && ns.pdsk < D_INCONSISTENT)
720 rv = SS_NO_REMOTE_DISK;
721
Lars Ellenberg8d4ce822010-04-01 16:59:32 +0200722 else if (ns.conn > C_CONNECTED && ns.disk < D_UP_TO_DATE && ns.pdsk < D_UP_TO_DATE)
723 rv = SS_NO_UP_TO_DATE_DISK;
724
Philipp Reisnerb411b362009-09-25 16:07:19 -0700725 else if ((ns.conn == C_CONNECTED ||
726 ns.conn == C_WF_BITMAP_S ||
727 ns.conn == C_SYNC_SOURCE ||
728 ns.conn == C_PAUSED_SYNC_S) &&
729 ns.disk == D_OUTDATED)
730 rv = SS_CONNECTED_OUTDATES;
731
732 else if ((ns.conn == C_VERIFY_S || ns.conn == C_VERIFY_T) &&
733 (mdev->sync_conf.verify_alg[0] == 0))
734 rv = SS_NO_VERIFY_ALG;
735
736 else if ((ns.conn == C_VERIFY_S || ns.conn == C_VERIFY_T) &&
737 mdev->agreed_pro_version < 88)
738 rv = SS_NOT_SUPPORTED;
739
Philipp Reisnerfa7d9392011-05-17 14:48:55 +0200740 else if (ns.conn >= C_CONNECTED && ns.pdsk == D_UNKNOWN)
741 rv = SS_CONNECTED_OUTDATES;
742
Philipp Reisnerb411b362009-09-25 16:07:19 -0700743 return rv;
744}
745
746/**
747 * is_valid_state_transition() - Returns an SS_ error code if the state transition is not possible
748 * @mdev: DRBD device.
749 * @ns: new state.
750 * @os: old state.
751 */
Andreas Gruenbacherbf885f82010-12-08 00:39:32 +0100752static enum drbd_state_rv
753is_valid_state_transition(struct drbd_conf *mdev, union drbd_state ns,
754 union drbd_state os)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700755{
Andreas Gruenbacherbf885f82010-12-08 00:39:32 +0100756 enum drbd_state_rv rv = SS_SUCCESS;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700757
758 if ((ns.conn == C_STARTING_SYNC_T || ns.conn == C_STARTING_SYNC_S) &&
759 os.conn > C_CONNECTED)
760 rv = SS_RESYNC_RUNNING;
761
762 if (ns.conn == C_DISCONNECTING && os.conn == C_STANDALONE)
763 rv = SS_ALREADY_STANDALONE;
764
765 if (ns.disk > D_ATTACHING && os.disk == D_DISKLESS)
766 rv = SS_IS_DISKLESS;
767
768 if (ns.conn == C_WF_CONNECTION && os.conn < C_UNCONNECTED)
769 rv = SS_NO_NET_CONFIG;
770
771 if (ns.disk == D_OUTDATED && os.disk < D_OUTDATED && os.disk != D_ATTACHING)
772 rv = SS_LOWER_THAN_OUTDATED;
773
774 if (ns.conn == C_DISCONNECTING && os.conn == C_UNCONNECTED)
775 rv = SS_IN_TRANSIENT_STATE;
776
777 if (ns.conn == os.conn && ns.conn == C_WF_REPORT_PARAMS)
778 rv = SS_IN_TRANSIENT_STATE;
779
780 if ((ns.conn == C_VERIFY_S || ns.conn == C_VERIFY_T) && os.conn < C_CONNECTED)
781 rv = SS_NEED_CONNECTION;
782
783 if ((ns.conn == C_VERIFY_S || ns.conn == C_VERIFY_T) &&
784 ns.conn != os.conn && os.conn > C_CONNECTED)
785 rv = SS_RESYNC_RUNNING;
786
787 if ((ns.conn == C_STARTING_SYNC_S || ns.conn == C_STARTING_SYNC_T) &&
788 os.conn < C_CONNECTED)
789 rv = SS_NEED_CONNECTION;
790
Philipp Reisner1fc80cf2010-11-22 14:18:47 +0100791 if ((ns.conn == C_SYNC_TARGET || ns.conn == C_SYNC_SOURCE)
792 && os.conn < C_WF_REPORT_PARAMS)
793 rv = SS_NEED_CONNECTION; /* No NetworkFailure -> SyncTarget etc... */
794
Philipp Reisnerb411b362009-09-25 16:07:19 -0700795 return rv;
796}
797
798/**
799 * sanitize_state() - Resolves implicitly necessary additional changes to a state transition
800 * @mdev: DRBD device.
801 * @os: old state.
802 * @ns: new state.
803 * @warn_sync_abort:
804 *
805 * When we loose connection, we have to set the state of the peers disk (pdsk)
806 * to D_UNKNOWN. This rule and many more along those lines are in this function.
807 */
808static union drbd_state sanitize_state(struct drbd_conf *mdev, union drbd_state os,
Lars Ellenberg02bc7172010-09-06 12:13:20 +0200809 union drbd_state ns, const char **warn_sync_abort)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700810{
811 enum drbd_fencing_p fp;
Philipp Reisnerab17b68f2010-11-17 16:54:36 +0100812 enum drbd_disk_state disk_min, disk_max, pdsk_min, pdsk_max;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700813
814 fp = FP_DONT_CARE;
815 if (get_ldev(mdev)) {
816 fp = mdev->ldev->dc.fencing;
817 put_ldev(mdev);
818 }
819
820 /* Disallow Network errors to configure a device's network part */
821 if ((ns.conn >= C_TIMEOUT && ns.conn <= C_TEAR_DOWN) &&
822 os.conn <= C_DISCONNECTING)
823 ns.conn = os.conn;
824
Lars Ellenbergf2906e12010-07-21 17:04:32 +0200825 /* After a network error (+C_TEAR_DOWN) only C_UNCONNECTED or C_DISCONNECTING can follow.
826 * If you try to go into some Sync* state, that shall fail (elsewhere). */
Philipp Reisnerb411b362009-09-25 16:07:19 -0700827 if (os.conn >= C_TIMEOUT && os.conn <= C_TEAR_DOWN &&
Lars Ellenbergf2906e12010-07-21 17:04:32 +0200828 ns.conn != C_UNCONNECTED && ns.conn != C_DISCONNECTING && ns.conn <= C_TEAR_DOWN)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700829 ns.conn = os.conn;
830
Lars Ellenberg82f59cc2010-10-16 12:13:47 +0200831 /* we cannot fail (again) if we already detached */
832 if (ns.disk == D_FAILED && os.disk == D_DISKLESS)
833 ns.disk = D_DISKLESS;
834
835 /* if we are only D_ATTACHING yet,
836 * we can (and should) go directly to D_DISKLESS. */
837 if (ns.disk == D_FAILED && os.disk == D_ATTACHING)
838 ns.disk = D_DISKLESS;
839
Philipp Reisnerb411b362009-09-25 16:07:19 -0700840 /* After C_DISCONNECTING only C_STANDALONE may follow */
841 if (os.conn == C_DISCONNECTING && ns.conn != C_STANDALONE)
842 ns.conn = os.conn;
843
844 if (ns.conn < C_CONNECTED) {
845 ns.peer_isp = 0;
846 ns.peer = R_UNKNOWN;
847 if (ns.pdsk > D_UNKNOWN || ns.pdsk < D_INCONSISTENT)
848 ns.pdsk = D_UNKNOWN;
849 }
850
851 /* Clear the aftr_isp when becoming unconfigured */
852 if (ns.conn == C_STANDALONE && ns.disk == D_DISKLESS && ns.role == R_SECONDARY)
853 ns.aftr_isp = 0;
854
Philipp Reisnerb411b362009-09-25 16:07:19 -0700855 /* Abort resync if a disk fails/detaches */
856 if (os.conn > C_CONNECTED && ns.conn > C_CONNECTED &&
857 (ns.disk <= D_FAILED || ns.pdsk <= D_FAILED)) {
858 if (warn_sync_abort)
Lars Ellenberg02bc7172010-09-06 12:13:20 +0200859 *warn_sync_abort =
860 os.conn == C_VERIFY_S || os.conn == C_VERIFY_T ?
861 "Online-verify" : "Resync";
Philipp Reisnerb411b362009-09-25 16:07:19 -0700862 ns.conn = C_CONNECTED;
863 }
864
Philipp Reisnerb411b362009-09-25 16:07:19 -0700865 /* Connection breaks down before we finished "Negotiating" */
866 if (ns.conn < C_CONNECTED && ns.disk == D_NEGOTIATING &&
867 get_ldev_if_state(mdev, D_NEGOTIATING)) {
868 if (mdev->ed_uuid == mdev->ldev->md.uuid[UI_CURRENT]) {
869 ns.disk = mdev->new_state_tmp.disk;
870 ns.pdsk = mdev->new_state_tmp.pdsk;
871 } else {
872 dev_alert(DEV, "Connection lost while negotiating, no data!\n");
873 ns.disk = D_DISKLESS;
874 ns.pdsk = D_UNKNOWN;
875 }
876 put_ldev(mdev);
877 }
878
Philipp Reisnerab17b68f2010-11-17 16:54:36 +0100879 /* D_CONSISTENT and D_OUTDATED vanish when we get connected */
880 if (ns.conn >= C_CONNECTED && ns.conn < C_AHEAD) {
881 if (ns.disk == D_CONSISTENT || ns.disk == D_OUTDATED)
882 ns.disk = D_UP_TO_DATE;
883 if (ns.pdsk == D_CONSISTENT || ns.pdsk == D_OUTDATED)
884 ns.pdsk = D_UP_TO_DATE;
885 }
886
887 /* Implications of the connection stat on the disk states */
888 disk_min = D_DISKLESS;
889 disk_max = D_UP_TO_DATE;
890 pdsk_min = D_INCONSISTENT;
891 pdsk_max = D_UNKNOWN;
892 switch ((enum drbd_conns)ns.conn) {
893 case C_WF_BITMAP_T:
894 case C_PAUSED_SYNC_T:
895 case C_STARTING_SYNC_T:
896 case C_WF_SYNC_UUID:
897 case C_BEHIND:
898 disk_min = D_INCONSISTENT;
899 disk_max = D_OUTDATED;
900 pdsk_min = D_UP_TO_DATE;
901 pdsk_max = D_UP_TO_DATE;
902 break;
903 case C_VERIFY_S:
904 case C_VERIFY_T:
905 disk_min = D_UP_TO_DATE;
906 disk_max = D_UP_TO_DATE;
907 pdsk_min = D_UP_TO_DATE;
908 pdsk_max = D_UP_TO_DATE;
909 break;
910 case C_CONNECTED:
911 disk_min = D_DISKLESS;
912 disk_max = D_UP_TO_DATE;
913 pdsk_min = D_DISKLESS;
914 pdsk_max = D_UP_TO_DATE;
915 break;
916 case C_WF_BITMAP_S:
917 case C_PAUSED_SYNC_S:
918 case C_STARTING_SYNC_S:
919 case C_AHEAD:
920 disk_min = D_UP_TO_DATE;
921 disk_max = D_UP_TO_DATE;
922 pdsk_min = D_INCONSISTENT;
923 pdsk_max = D_CONSISTENT; /* D_OUTDATED would be nice. But explicit outdate necessary*/
924 break;
925 case C_SYNC_TARGET:
926 disk_min = D_INCONSISTENT;
927 disk_max = D_INCONSISTENT;
928 pdsk_min = D_UP_TO_DATE;
929 pdsk_max = D_UP_TO_DATE;
930 break;
931 case C_SYNC_SOURCE:
932 disk_min = D_UP_TO_DATE;
933 disk_max = D_UP_TO_DATE;
934 pdsk_min = D_INCONSISTENT;
935 pdsk_max = D_INCONSISTENT;
936 break;
937 case C_STANDALONE:
938 case C_DISCONNECTING:
939 case C_UNCONNECTED:
940 case C_TIMEOUT:
941 case C_BROKEN_PIPE:
942 case C_NETWORK_FAILURE:
943 case C_PROTOCOL_ERROR:
944 case C_TEAR_DOWN:
945 case C_WF_CONNECTION:
946 case C_WF_REPORT_PARAMS:
947 case C_MASK:
948 break;
949 }
950 if (ns.disk > disk_max)
951 ns.disk = disk_max;
952
953 if (ns.disk < disk_min) {
954 dev_warn(DEV, "Implicitly set disk from %s to %s\n",
955 drbd_disk_str(ns.disk), drbd_disk_str(disk_min));
956 ns.disk = disk_min;
957 }
958 if (ns.pdsk > pdsk_max)
959 ns.pdsk = pdsk_max;
960
961 if (ns.pdsk < pdsk_min) {
962 dev_warn(DEV, "Implicitly set pdsk from %s to %s\n",
963 drbd_disk_str(ns.pdsk), drbd_disk_str(pdsk_min));
964 ns.pdsk = pdsk_min;
965 }
966
Philipp Reisnerb411b362009-09-25 16:07:19 -0700967 if (fp == FP_STONITH &&
Philipp Reisner0a492162009-10-21 13:08:29 +0200968 (ns.role == R_PRIMARY && ns.conn < C_CONNECTED && ns.pdsk > D_OUTDATED) &&
969 !(os.role == R_PRIMARY && os.conn < C_CONNECTED && os.pdsk > D_OUTDATED))
Philipp Reisnerfb22c402010-09-08 23:20:21 +0200970 ns.susp_fen = 1; /* Suspend IO while fence-peer handler runs (peer lost) */
Philipp Reisner265be2d2010-05-31 10:14:17 +0200971
972 if (mdev->sync_conf.on_no_data == OND_SUSPEND_IO &&
973 (ns.role == R_PRIMARY && ns.disk < D_UP_TO_DATE && ns.pdsk < D_UP_TO_DATE) &&
974 !(os.role == R_PRIMARY && os.disk < D_UP_TO_DATE && os.pdsk < D_UP_TO_DATE))
Philipp Reisnerfb22c402010-09-08 23:20:21 +0200975 ns.susp_nod = 1; /* Suspend IO while no data available (no accessible data available) */
Philipp Reisnerb411b362009-09-25 16:07:19 -0700976
977 if (ns.aftr_isp || ns.peer_isp || ns.user_isp) {
978 if (ns.conn == C_SYNC_SOURCE)
979 ns.conn = C_PAUSED_SYNC_S;
980 if (ns.conn == C_SYNC_TARGET)
981 ns.conn = C_PAUSED_SYNC_T;
982 } else {
983 if (ns.conn == C_PAUSED_SYNC_S)
984 ns.conn = C_SYNC_SOURCE;
985 if (ns.conn == C_PAUSED_SYNC_T)
986 ns.conn = C_SYNC_TARGET;
987 }
988
989 return ns;
990}
991
992/* helper for __drbd_set_state */
993static void set_ov_position(struct drbd_conf *mdev, enum drbd_conns cs)
994{
Lars Ellenberg30b743a2010-11-05 09:39:06 +0100995 if (mdev->agreed_pro_version < 90)
996 mdev->ov_start_sector = 0;
997 mdev->rs_total = drbd_bm_bits(mdev);
998 mdev->ov_position = 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700999 if (cs == C_VERIFY_T) {
1000 /* starting online verify from an arbitrary position
1001 * does not fit well into the existing protocol.
1002 * on C_VERIFY_T, we initialize ov_left and friends
1003 * implicitly in receive_DataRequest once the
1004 * first P_OV_REQUEST is received */
1005 mdev->ov_start_sector = ~(sector_t)0;
1006 } else {
1007 unsigned long bit = BM_SECT_TO_BIT(mdev->ov_start_sector);
Lars Ellenberg30b743a2010-11-05 09:39:06 +01001008 if (bit >= mdev->rs_total) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07001009 mdev->ov_start_sector =
1010 BM_BIT_TO_SECT(mdev->rs_total - 1);
Lars Ellenberg30b743a2010-11-05 09:39:06 +01001011 mdev->rs_total = 1;
1012 } else
1013 mdev->rs_total -= bit;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001014 mdev->ov_position = mdev->ov_start_sector;
1015 }
Lars Ellenberg30b743a2010-11-05 09:39:06 +01001016 mdev->ov_left = mdev->rs_total;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001017}
1018
Philipp Reisner07782862010-08-31 12:00:50 +02001019static void drbd_resume_al(struct drbd_conf *mdev)
1020{
1021 if (test_and_clear_bit(AL_SUSPENDED, &mdev->flags))
1022 dev_info(DEV, "Resumed AL updates\n");
1023}
1024
Philipp Reisnerb411b362009-09-25 16:07:19 -07001025/**
1026 * __drbd_set_state() - Set a new DRBD state
1027 * @mdev: DRBD device.
1028 * @ns: new state.
1029 * @flags: Flags
1030 * @done: Optional completion, that will get completed after the after_state_ch() finished
1031 *
1032 * Caller needs to hold req_lock, and global_state_lock. Do not call directly.
1033 */
Andreas Gruenbacherbf885f82010-12-08 00:39:32 +01001034enum drbd_state_rv
1035__drbd_set_state(struct drbd_conf *mdev, union drbd_state ns,
1036 enum chg_state_flags flags, struct completion *done)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001037{
1038 union drbd_state os;
Andreas Gruenbacherbf885f82010-12-08 00:39:32 +01001039 enum drbd_state_rv rv = SS_SUCCESS;
Lars Ellenberg02bc7172010-09-06 12:13:20 +02001040 const char *warn_sync_abort = NULL;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001041 struct after_state_chg_work *ascw;
1042
1043 os = mdev->state;
1044
1045 ns = sanitize_state(mdev, os, ns, &warn_sync_abort);
1046
1047 if (ns.i == os.i)
1048 return SS_NOTHING_TO_DO;
1049
1050 if (!(flags & CS_HARD)) {
1051 /* pre-state-change checks ; only look at ns */
1052 /* See drbd_state_sw_errors in drbd_strings.c */
1053
1054 rv = is_valid_state(mdev, ns);
1055 if (rv < SS_SUCCESS) {
1056 /* If the old state was illegal as well, then let
1057 this happen...*/
1058
Philipp Reisner1616a252010-06-10 16:55:15 +02001059 if (is_valid_state(mdev, os) == rv)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001060 rv = is_valid_state_transition(mdev, ns, os);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001061 } else
1062 rv = is_valid_state_transition(mdev, ns, os);
1063 }
1064
1065 if (rv < SS_SUCCESS) {
1066 if (flags & CS_VERBOSE)
1067 print_st_err(mdev, os, ns, rv);
1068 return rv;
1069 }
1070
1071 if (warn_sync_abort)
Lars Ellenberg02bc7172010-09-06 12:13:20 +02001072 dev_warn(DEV, "%s aborted.\n", warn_sync_abort);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001073
1074 {
Andreas Gruenbacher662d91a2010-12-07 03:01:41 +01001075 char *pbp, pb[300];
1076 pbp = pb;
1077 *pbp = 0;
1078 if (ns.role != os.role)
1079 pbp += sprintf(pbp, "role( %s -> %s ) ",
1080 drbd_role_str(os.role),
1081 drbd_role_str(ns.role));
1082 if (ns.peer != os.peer)
1083 pbp += sprintf(pbp, "peer( %s -> %s ) ",
1084 drbd_role_str(os.peer),
1085 drbd_role_str(ns.peer));
1086 if (ns.conn != os.conn)
1087 pbp += sprintf(pbp, "conn( %s -> %s ) ",
1088 drbd_conn_str(os.conn),
1089 drbd_conn_str(ns.conn));
1090 if (ns.disk != os.disk)
1091 pbp += sprintf(pbp, "disk( %s -> %s ) ",
1092 drbd_disk_str(os.disk),
1093 drbd_disk_str(ns.disk));
1094 if (ns.pdsk != os.pdsk)
1095 pbp += sprintf(pbp, "pdsk( %s -> %s ) ",
1096 drbd_disk_str(os.pdsk),
1097 drbd_disk_str(ns.pdsk));
1098 if (is_susp(ns) != is_susp(os))
1099 pbp += sprintf(pbp, "susp( %d -> %d ) ",
1100 is_susp(os),
1101 is_susp(ns));
1102 if (ns.aftr_isp != os.aftr_isp)
1103 pbp += sprintf(pbp, "aftr_isp( %d -> %d ) ",
1104 os.aftr_isp,
1105 ns.aftr_isp);
1106 if (ns.peer_isp != os.peer_isp)
1107 pbp += sprintf(pbp, "peer_isp( %d -> %d ) ",
1108 os.peer_isp,
1109 ns.peer_isp);
1110 if (ns.user_isp != os.user_isp)
1111 pbp += sprintf(pbp, "user_isp( %d -> %d ) ",
1112 os.user_isp,
1113 ns.user_isp);
1114 dev_info(DEV, "%s\n", pb);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001115 }
1116
1117 /* solve the race between becoming unconfigured,
1118 * worker doing the cleanup, and
1119 * admin reconfiguring us:
1120 * on (re)configure, first set CONFIG_PENDING,
1121 * then wait for a potentially exiting worker,
1122 * start the worker, and schedule one no_op.
1123 * then proceed with configuration.
1124 */
1125 if (ns.disk == D_DISKLESS &&
1126 ns.conn == C_STANDALONE &&
1127 ns.role == R_SECONDARY &&
1128 !test_and_set_bit(CONFIG_PENDING, &mdev->flags))
1129 set_bit(DEVICE_DYING, &mdev->flags);
1130
Lars Ellenberg82f59cc2010-10-16 12:13:47 +02001131 /* if we are going -> D_FAILED or D_DISKLESS, grab one extra reference
1132 * on the ldev here, to be sure the transition -> D_DISKLESS resp.
1133 * drbd_ldev_destroy() won't happen before our corresponding
1134 * after_state_ch works run, where we put_ldev again. */
1135 if ((os.disk != D_FAILED && ns.disk == D_FAILED) ||
1136 (os.disk != D_DISKLESS && ns.disk == D_DISKLESS))
1137 atomic_inc(&mdev->local_cnt);
1138
1139 mdev->state = ns;
Lars Ellenberg62b0da32011-01-20 13:25:21 +01001140
1141 if (os.disk == D_ATTACHING && ns.disk >= D_NEGOTIATING)
1142 drbd_print_uuids(mdev, "attached to UUIDs");
1143
Philipp Reisnerb411b362009-09-25 16:07:19 -07001144 wake_up(&mdev->misc_wait);
1145 wake_up(&mdev->state_wait);
1146
Philipp Reisnerb411b362009-09-25 16:07:19 -07001147 /* aborted verify run. log the last position */
1148 if ((os.conn == C_VERIFY_S || os.conn == C_VERIFY_T) &&
1149 ns.conn < C_CONNECTED) {
1150 mdev->ov_start_sector =
Lars Ellenberg30b743a2010-11-05 09:39:06 +01001151 BM_BIT_TO_SECT(drbd_bm_bits(mdev) - mdev->ov_left);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001152 dev_info(DEV, "Online Verify reached sector %llu\n",
1153 (unsigned long long)mdev->ov_start_sector);
1154 }
1155
1156 if ((os.conn == C_PAUSED_SYNC_T || os.conn == C_PAUSED_SYNC_S) &&
1157 (ns.conn == C_SYNC_TARGET || ns.conn == C_SYNC_SOURCE)) {
1158 dev_info(DEV, "Syncer continues.\n");
Lars Ellenberg1d7734a2010-08-11 21:21:50 +02001159 mdev->rs_paused += (long)jiffies
1160 -(long)mdev->rs_mark_time[mdev->rs_last_mark];
Philipp Reisner63106d32010-09-01 15:47:15 +02001161 if (ns.conn == C_SYNC_TARGET)
1162 mod_timer(&mdev->resync_timer, jiffies);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001163 }
1164
1165 if ((os.conn == C_SYNC_TARGET || os.conn == C_SYNC_SOURCE) &&
1166 (ns.conn == C_PAUSED_SYNC_T || ns.conn == C_PAUSED_SYNC_S)) {
1167 dev_info(DEV, "Resync suspended\n");
Lars Ellenberg1d7734a2010-08-11 21:21:50 +02001168 mdev->rs_mark_time[mdev->rs_last_mark] = jiffies;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001169 }
1170
1171 if (os.conn == C_CONNECTED &&
1172 (ns.conn == C_VERIFY_S || ns.conn == C_VERIFY_T)) {
Lars Ellenberg1d7734a2010-08-11 21:21:50 +02001173 unsigned long now = jiffies;
1174 int i;
1175
Lars Ellenberg30b743a2010-11-05 09:39:06 +01001176 set_ov_position(mdev, ns.conn);
Lars Ellenberg1d7734a2010-08-11 21:21:50 +02001177 mdev->rs_start = now;
Lars Ellenberg0f0601f2010-08-11 23:40:24 +02001178 mdev->rs_last_events = 0;
1179 mdev->rs_last_sect_ev = 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001180 mdev->ov_last_oos_size = 0;
1181 mdev->ov_last_oos_start = 0;
1182
Lars Ellenberg1d7734a2010-08-11 21:21:50 +02001183 for (i = 0; i < DRBD_SYNC_MARKS; i++) {
Lars Ellenberg30b743a2010-11-05 09:39:06 +01001184 mdev->rs_mark_left[i] = mdev->ov_left;
Lars Ellenberg1d7734a2010-08-11 21:21:50 +02001185 mdev->rs_mark_time[i] = now;
1186 }
1187
Lars Ellenberg2649f082010-11-05 10:05:47 +01001188 drbd_rs_controller_reset(mdev);
1189
Philipp Reisnerb411b362009-09-25 16:07:19 -07001190 if (ns.conn == C_VERIFY_S) {
1191 dev_info(DEV, "Starting Online Verify from sector %llu\n",
1192 (unsigned long long)mdev->ov_position);
1193 mod_timer(&mdev->resync_timer, jiffies);
1194 }
1195 }
1196
1197 if (get_ldev(mdev)) {
1198 u32 mdf = mdev->ldev->md.flags & ~(MDF_CONSISTENT|MDF_PRIMARY_IND|
1199 MDF_CONNECTED_IND|MDF_WAS_UP_TO_DATE|
1200 MDF_PEER_OUT_DATED|MDF_CRASHED_PRIMARY);
1201
1202 if (test_bit(CRASHED_PRIMARY, &mdev->flags))
1203 mdf |= MDF_CRASHED_PRIMARY;
1204 if (mdev->state.role == R_PRIMARY ||
1205 (mdev->state.pdsk < D_INCONSISTENT && mdev->state.peer == R_PRIMARY))
1206 mdf |= MDF_PRIMARY_IND;
1207 if (mdev->state.conn > C_WF_REPORT_PARAMS)
1208 mdf |= MDF_CONNECTED_IND;
1209 if (mdev->state.disk > D_INCONSISTENT)
1210 mdf |= MDF_CONSISTENT;
1211 if (mdev->state.disk > D_OUTDATED)
1212 mdf |= MDF_WAS_UP_TO_DATE;
1213 if (mdev->state.pdsk <= D_OUTDATED && mdev->state.pdsk >= D_INCONSISTENT)
1214 mdf |= MDF_PEER_OUT_DATED;
1215 if (mdf != mdev->ldev->md.flags) {
1216 mdev->ldev->md.flags = mdf;
1217 drbd_md_mark_dirty(mdev);
1218 }
1219 if (os.disk < D_CONSISTENT && ns.disk >= D_CONSISTENT)
1220 drbd_set_ed_uuid(mdev, mdev->ldev->md.uuid[UI_CURRENT]);
1221 put_ldev(mdev);
1222 }
1223
1224 /* Peer was forced D_UP_TO_DATE & R_PRIMARY, consider to resync */
1225 if (os.disk == D_INCONSISTENT && os.pdsk == D_INCONSISTENT &&
1226 os.peer == R_SECONDARY && ns.peer == R_PRIMARY)
1227 set_bit(CONSIDER_RESYNC, &mdev->flags);
1228
1229 /* Receiver should clean up itself */
1230 if (os.conn != C_DISCONNECTING && ns.conn == C_DISCONNECTING)
1231 drbd_thread_stop_nowait(&mdev->receiver);
1232
1233 /* Now the receiver finished cleaning up itself, it should die */
1234 if (os.conn != C_STANDALONE && ns.conn == C_STANDALONE)
1235 drbd_thread_stop_nowait(&mdev->receiver);
1236
1237 /* Upon network failure, we need to restart the receiver. */
1238 if (os.conn > C_TEAR_DOWN &&
1239 ns.conn <= C_TEAR_DOWN && ns.conn >= C_TIMEOUT)
1240 drbd_thread_restart_nowait(&mdev->receiver);
1241
Philipp Reisner07782862010-08-31 12:00:50 +02001242 /* Resume AL writing if we get a connection */
1243 if (os.conn < C_CONNECTED && ns.conn >= C_CONNECTED)
1244 drbd_resume_al(mdev);
1245
Philipp Reisnerb411b362009-09-25 16:07:19 -07001246 ascw = kmalloc(sizeof(*ascw), GFP_ATOMIC);
1247 if (ascw) {
1248 ascw->os = os;
1249 ascw->ns = ns;
1250 ascw->flags = flags;
1251 ascw->w.cb = w_after_state_ch;
1252 ascw->done = done;
1253 drbd_queue_work(&mdev->data.work, &ascw->w);
1254 } else {
1255 dev_warn(DEV, "Could not kmalloc an ascw\n");
1256 }
1257
1258 return rv;
1259}
1260
1261static int w_after_state_ch(struct drbd_conf *mdev, struct drbd_work *w, int unused)
1262{
1263 struct after_state_chg_work *ascw =
1264 container_of(w, struct after_state_chg_work, w);
1265 after_state_ch(mdev, ascw->os, ascw->ns, ascw->flags);
1266 if (ascw->flags & CS_WAIT_COMPLETE) {
1267 D_ASSERT(ascw->done != NULL);
1268 complete(ascw->done);
1269 }
1270 kfree(ascw);
1271
1272 return 1;
1273}
1274
1275static void abw_start_sync(struct drbd_conf *mdev, int rv)
1276{
1277 if (rv) {
1278 dev_err(DEV, "Writing the bitmap failed not starting resync.\n");
1279 _drbd_request_state(mdev, NS(conn, C_CONNECTED), CS_VERBOSE);
1280 return;
1281 }
1282
1283 switch (mdev->state.conn) {
1284 case C_STARTING_SYNC_T:
1285 _drbd_request_state(mdev, NS(conn, C_WF_SYNC_UUID), CS_VERBOSE);
1286 break;
1287 case C_STARTING_SYNC_S:
1288 drbd_start_resync(mdev, C_SYNC_SOURCE);
1289 break;
1290 }
1291}
1292
Lars Ellenberg20ceb2b2011-01-21 10:56:44 +01001293int drbd_bitmap_io_from_worker(struct drbd_conf *mdev,
1294 int (*io_fn)(struct drbd_conf *),
1295 char *why, enum bm_flag flags)
Lars Ellenberg19f843a2010-12-15 08:59:11 +01001296{
1297 int rv;
1298
1299 D_ASSERT(current == mdev->worker.task);
1300
1301 /* open coded non-blocking drbd_suspend_io(mdev); */
1302 set_bit(SUSPEND_IO, &mdev->flags);
Lars Ellenberg19f843a2010-12-15 08:59:11 +01001303
Lars Ellenberg20ceb2b2011-01-21 10:56:44 +01001304 drbd_bm_lock(mdev, why, flags);
Lars Ellenberg19f843a2010-12-15 08:59:11 +01001305 rv = io_fn(mdev);
1306 drbd_bm_unlock(mdev);
1307
1308 drbd_resume_io(mdev);
1309
1310 return rv;
1311}
1312
Philipp Reisnerb411b362009-09-25 16:07:19 -07001313/**
1314 * after_state_ch() - Perform after state change actions that may sleep
1315 * @mdev: DRBD device.
1316 * @os: old state.
1317 * @ns: new state.
1318 * @flags: Flags
1319 */
1320static void after_state_ch(struct drbd_conf *mdev, union drbd_state os,
1321 union drbd_state ns, enum chg_state_flags flags)
1322{
1323 enum drbd_fencing_p fp;
Andreas Gruenbacher8554df12011-01-25 15:37:43 +01001324 enum drbd_req_event what = NOTHING;
Philipp Reisnerfb22c402010-09-08 23:20:21 +02001325 union drbd_state nsm = (union drbd_state){ .i = -1 };
Philipp Reisnerb411b362009-09-25 16:07:19 -07001326
1327 if (os.conn != C_CONNECTED && ns.conn == C_CONNECTED) {
1328 clear_bit(CRASHED_PRIMARY, &mdev->flags);
1329 if (mdev->p_uuid)
1330 mdev->p_uuid[UI_FLAGS] &= ~((u64)2);
1331 }
1332
1333 fp = FP_DONT_CARE;
1334 if (get_ldev(mdev)) {
1335 fp = mdev->ldev->dc.fencing;
1336 put_ldev(mdev);
1337 }
1338
1339 /* Inform userspace about the change... */
1340 drbd_bcast_state(mdev, ns);
1341
1342 if (!(os.role == R_PRIMARY && os.disk < D_UP_TO_DATE && os.pdsk < D_UP_TO_DATE) &&
1343 (ns.role == R_PRIMARY && ns.disk < D_UP_TO_DATE && ns.pdsk < D_UP_TO_DATE))
1344 drbd_khelper(mdev, "pri-on-incon-degr");
1345
1346 /* Here we have the actions that are performed after a
1347 state change. This function might sleep */
1348
Philipp Reisnerfb22c402010-09-08 23:20:21 +02001349 nsm.i = -1;
1350 if (ns.susp_nod) {
Philipp Reisner3f986882010-12-20 14:48:20 +01001351 if (os.conn < C_CONNECTED && ns.conn >= C_CONNECTED)
Andreas Gruenbacher8554df12011-01-25 15:37:43 +01001352 what = RESEND;
Philipp Reisner265be2d2010-05-31 10:14:17 +02001353
Philipp Reisner67098932010-06-24 16:24:25 +02001354 if (os.disk == D_ATTACHING && ns.disk > D_ATTACHING)
Andreas Gruenbacher8554df12011-01-25 15:37:43 +01001355 what = RESTART_FROZEN_DISK_IO;
Philipp Reisnerfb22c402010-09-08 23:20:21 +02001356
Andreas Gruenbacher8554df12011-01-25 15:37:43 +01001357 if (what != NOTHING)
Philipp Reisner3f986882010-12-20 14:48:20 +01001358 nsm.susp_nod = 0;
Philipp Reisner265be2d2010-05-31 10:14:17 +02001359 }
1360
Philipp Reisnerfb22c402010-09-08 23:20:21 +02001361 if (ns.susp_fen) {
Philipp Reisner43a51822010-06-11 11:26:34 +02001362 /* case1: The outdate peer handler is successful: */
1363 if (os.pdsk > D_OUTDATED && ns.pdsk <= D_OUTDATED) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07001364 tl_clear(mdev);
Philipp Reisner43a51822010-06-11 11:26:34 +02001365 if (test_bit(NEW_CUR_UUID, &mdev->flags)) {
1366 drbd_uuid_new_current(mdev);
1367 clear_bit(NEW_CUR_UUID, &mdev->flags);
Philipp Reisner43a51822010-06-11 11:26:34 +02001368 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07001369 spin_lock_irq(&mdev->req_lock);
Philipp Reisnerfb22c402010-09-08 23:20:21 +02001370 _drbd_set_state(_NS(mdev, susp_fen, 0), CS_VERBOSE, NULL);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001371 spin_unlock_irq(&mdev->req_lock);
1372 }
Philipp Reisner43a51822010-06-11 11:26:34 +02001373 /* case2: The connection was established again: */
1374 if (os.conn < C_CONNECTED && ns.conn >= C_CONNECTED) {
1375 clear_bit(NEW_CUR_UUID, &mdev->flags);
Andreas Gruenbacher8554df12011-01-25 15:37:43 +01001376 what = RESEND;
Philipp Reisnerfb22c402010-09-08 23:20:21 +02001377 nsm.susp_fen = 0;
Philipp Reisner43a51822010-06-11 11:26:34 +02001378 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07001379 }
Philipp Reisner67098932010-06-24 16:24:25 +02001380
Andreas Gruenbacher8554df12011-01-25 15:37:43 +01001381 if (what != NOTHING) {
Philipp Reisner67098932010-06-24 16:24:25 +02001382 spin_lock_irq(&mdev->req_lock);
1383 _tl_restart(mdev, what);
Philipp Reisnerfb22c402010-09-08 23:20:21 +02001384 nsm.i &= mdev->state.i;
1385 _drbd_set_state(mdev, nsm, CS_VERBOSE, NULL);
Philipp Reisner67098932010-06-24 16:24:25 +02001386 spin_unlock_irq(&mdev->req_lock);
1387 }
1388
Lars Ellenberg5a22db82010-12-17 21:14:23 +01001389 /* Became sync source. With protocol >= 96, we still need to send out
1390 * the sync uuid now. Need to do that before any drbd_send_state, or
1391 * the other side may go "paused sync" before receiving the sync uuids,
1392 * which is unexpected. */
1393 if ((os.conn != C_SYNC_SOURCE && os.conn != C_PAUSED_SYNC_S) &&
1394 (ns.conn == C_SYNC_SOURCE || ns.conn == C_PAUSED_SYNC_S) &&
1395 mdev->agreed_pro_version >= 96 && get_ldev(mdev)) {
1396 drbd_gen_and_send_sync_uuid(mdev);
1397 put_ldev(mdev);
1398 }
1399
Philipp Reisnerb411b362009-09-25 16:07:19 -07001400 /* Do not change the order of the if above and the two below... */
1401 if (os.pdsk == D_DISKLESS && ns.pdsk > D_DISKLESS) { /* attach on the peer */
1402 drbd_send_uuids(mdev);
1403 drbd_send_state(mdev);
1404 }
Lars Ellenberg54b956a2011-01-20 10:47:53 +01001405 /* No point in queuing send_bitmap if we don't have a connection
1406 * anymore, so check also the _current_ state, not only the new state
1407 * at the time this work was queued. */
1408 if (os.conn != C_WF_BITMAP_S && ns.conn == C_WF_BITMAP_S &&
1409 mdev->state.conn == C_WF_BITMAP_S)
1410 drbd_queue_bitmap_io(mdev, &drbd_send_bitmap, NULL,
Lars Ellenberg20ceb2b2011-01-21 10:56:44 +01001411 "send_bitmap (WFBitMapS)",
1412 BM_LOCKED_TEST_ALLOWED);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001413
1414 /* Lost contact to peer's copy of the data */
1415 if ((os.pdsk >= D_INCONSISTENT &&
1416 os.pdsk != D_UNKNOWN &&
1417 os.pdsk != D_OUTDATED)
1418 && (ns.pdsk < D_INCONSISTENT ||
1419 ns.pdsk == D_UNKNOWN ||
1420 ns.pdsk == D_OUTDATED)) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07001421 if (get_ldev(mdev)) {
1422 if ((ns.role == R_PRIMARY || ns.peer == R_PRIMARY) &&
Philipp Reisner2c8d1962010-05-25 14:32:03 +02001423 mdev->ldev->md.uuid[UI_BITMAP] == 0 && ns.disk >= D_UP_TO_DATE) {
Philipp Reisnerfb22c402010-09-08 23:20:21 +02001424 if (is_susp(mdev->state)) {
Philipp Reisner43a51822010-06-11 11:26:34 +02001425 set_bit(NEW_CUR_UUID, &mdev->flags);
1426 } else {
1427 drbd_uuid_new_current(mdev);
1428 drbd_send_uuids(mdev);
1429 }
Philipp Reisner2c8d1962010-05-25 14:32:03 +02001430 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07001431 put_ldev(mdev);
1432 }
1433 }
1434
1435 if (ns.pdsk < D_INCONSISTENT && get_ldev(mdev)) {
Philipp Reisner18a50fa2010-06-21 14:14:15 +02001436 if (ns.peer == R_PRIMARY && mdev->ldev->md.uuid[UI_BITMAP] == 0) {
Philipp Reisner2c8d1962010-05-25 14:32:03 +02001437 drbd_uuid_new_current(mdev);
Philipp Reisner18a50fa2010-06-21 14:14:15 +02001438 drbd_send_uuids(mdev);
1439 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07001440
1441 /* D_DISKLESS Peer becomes secondary */
1442 if (os.peer == R_PRIMARY && ns.peer == R_SECONDARY)
Lars Ellenberg20ceb2b2011-01-21 10:56:44 +01001443 /* We may still be Primary ourselves.
1444 * No harm done if the bitmap still changes,
1445 * redirtied pages will follow later. */
1446 drbd_bitmap_io_from_worker(mdev, &drbd_bm_write,
1447 "demote diskless peer", BM_LOCKED_SET_ALLOWED);
Lars Ellenberg19f843a2010-12-15 08:59:11 +01001448 put_ldev(mdev);
1449 }
1450
Lars Ellenberg06d33e92010-12-18 17:00:59 +01001451 /* Write out all changed bits on demote.
1452 * Though, no need to da that just yet
1453 * if there is a resync going on still */
1454 if (os.role == R_PRIMARY && ns.role == R_SECONDARY &&
1455 mdev->state.conn <= C_CONNECTED && get_ldev(mdev)) {
Lars Ellenberg20ceb2b2011-01-21 10:56:44 +01001456 /* No changes to the bitmap expected this time, so assert that,
1457 * even though no harm was done if it did change. */
1458 drbd_bitmap_io_from_worker(mdev, &drbd_bm_write,
1459 "demote", BM_LOCKED_TEST_ALLOWED);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001460 put_ldev(mdev);
1461 }
1462
1463 /* Last part of the attaching process ... */
1464 if (ns.conn >= C_CONNECTED &&
1465 os.disk == D_ATTACHING && ns.disk == D_NEGOTIATING) {
Philipp Reisnere89b5912010-03-24 17:11:33 +01001466 drbd_send_sizes(mdev, 0, 0); /* to start sync... */
Philipp Reisnerb411b362009-09-25 16:07:19 -07001467 drbd_send_uuids(mdev);
1468 drbd_send_state(mdev);
1469 }
1470
1471 /* We want to pause/continue resync, tell peer. */
1472 if (ns.conn >= C_CONNECTED &&
1473 ((os.aftr_isp != ns.aftr_isp) ||
1474 (os.user_isp != ns.user_isp)))
1475 drbd_send_state(mdev);
1476
1477 /* In case one of the isp bits got set, suspend other devices. */
1478 if ((!os.aftr_isp && !os.peer_isp && !os.user_isp) &&
1479 (ns.aftr_isp || ns.peer_isp || ns.user_isp))
1480 suspend_other_sg(mdev);
1481
1482 /* Make sure the peer gets informed about eventual state
1483 changes (ISP bits) while we were in WFReportParams. */
1484 if (os.conn == C_WF_REPORT_PARAMS && ns.conn >= C_CONNECTED)
1485 drbd_send_state(mdev);
1486
Philipp Reisner67531712010-10-27 12:21:30 +02001487 if (os.conn != C_AHEAD && ns.conn == C_AHEAD)
1488 drbd_send_state(mdev);
1489
Philipp Reisnerb411b362009-09-25 16:07:19 -07001490 /* We are in the progress to start a full sync... */
1491 if ((os.conn != C_STARTING_SYNC_T && ns.conn == C_STARTING_SYNC_T) ||
1492 (os.conn != C_STARTING_SYNC_S && ns.conn == C_STARTING_SYNC_S))
Lars Ellenberg20ceb2b2011-01-21 10:56:44 +01001493 /* no other bitmap changes expected during this phase */
1494 drbd_queue_bitmap_io(mdev,
1495 &drbd_bmio_set_n_write, &abw_start_sync,
1496 "set_n_write from StartingSync", BM_LOCKED_TEST_ALLOWED);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001497
1498 /* We are invalidating our self... */
1499 if (os.conn < C_CONNECTED && ns.conn < C_CONNECTED &&
1500 os.disk > D_INCONSISTENT && ns.disk == D_INCONSISTENT)
Lars Ellenberg20ceb2b2011-01-21 10:56:44 +01001501 /* other bitmap operation expected during this phase */
1502 drbd_queue_bitmap_io(mdev, &drbd_bmio_set_n_write, NULL,
1503 "set_n_write from invalidate", BM_LOCKED_MASK);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001504
Lars Ellenberg82f59cc2010-10-16 12:13:47 +02001505 /* first half of local IO error, failure to attach,
1506 * or administrative detach */
1507 if (os.disk != D_FAILED && ns.disk == D_FAILED) {
1508 enum drbd_io_error_p eh;
1509 int was_io_error;
1510 /* corresponding get_ldev was in __drbd_set_state, to serialize
1511 * our cleanup here with the transition to D_DISKLESS,
1512 * so it is safe to dreference ldev here. */
1513 eh = mdev->ldev->dc.on_io_error;
1514 was_io_error = test_and_clear_bit(WAS_IO_ERROR, &mdev->flags);
1515
1516 /* current state still has to be D_FAILED,
1517 * there is only one way out: to D_DISKLESS,
1518 * and that may only happen after our put_ldev below. */
1519 if (mdev->state.disk != D_FAILED)
1520 dev_err(DEV,
1521 "ASSERT FAILED: disk is %s during detach\n",
1522 drbd_disk_str(mdev->state.disk));
Philipp Reisnerb411b362009-09-25 16:07:19 -07001523
Lars Ellenberge9e6f3e2010-09-14 20:26:27 +02001524 if (drbd_send_state(mdev))
Lars Ellenberg82f59cc2010-10-16 12:13:47 +02001525 dev_warn(DEV, "Notified peer that I am detaching my disk\n");
Lars Ellenberge9e6f3e2010-09-14 20:26:27 +02001526 else
Lars Ellenberg82f59cc2010-10-16 12:13:47 +02001527 dev_err(DEV, "Sending state for detaching disk failed\n");
Lars Ellenberge9e6f3e2010-09-14 20:26:27 +02001528
1529 drbd_rs_cancel_all(mdev);
1530
Lars Ellenberg82f59cc2010-10-16 12:13:47 +02001531 /* In case we want to get something to stable storage still,
1532 * this may be the last chance.
1533 * Following put_ldev may transition to D_DISKLESS. */
1534 drbd_md_sync(mdev);
1535 put_ldev(mdev);
1536
1537 if (was_io_error && eh == EP_CALL_HELPER)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001538 drbd_khelper(mdev, "local-io-error");
1539 }
1540
Lars Ellenberg82f59cc2010-10-16 12:13:47 +02001541 /* second half of local IO error, failure to attach,
1542 * or administrative detach,
1543 * after local_cnt references have reached zero again */
1544 if (os.disk != D_DISKLESS && ns.disk == D_DISKLESS) {
1545 /* We must still be diskless,
1546 * re-attach has to be serialized with this! */
1547 if (mdev->state.disk != D_DISKLESS)
1548 dev_err(DEV,
1549 "ASSERT FAILED: disk is %s while going diskless\n",
1550 drbd_disk_str(mdev->state.disk));
Lars Ellenberge9e6f3e2010-09-14 20:26:27 +02001551
Lars Ellenberg82f59cc2010-10-16 12:13:47 +02001552 mdev->rs_total = 0;
1553 mdev->rs_failed = 0;
1554 atomic_set(&mdev->rs_pending_cnt, 0);
Lars Ellenberge9e6f3e2010-09-14 20:26:27 +02001555
Lars Ellenberge9e6f3e2010-09-14 20:26:27 +02001556 if (drbd_send_state(mdev))
Lars Ellenberg82f59cc2010-10-16 12:13:47 +02001557 dev_warn(DEV, "Notified peer that I'm now diskless.\n");
Lars Ellenberg82f59cc2010-10-16 12:13:47 +02001558 /* corresponding get_ldev in __drbd_set_state
Lucas De Marchi25985ed2011-03-30 22:57:33 -03001559 * this may finally trigger drbd_ldev_destroy. */
Lars Ellenberg82f59cc2010-10-16 12:13:47 +02001560 put_ldev(mdev);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001561 }
1562
Philipp Reisner738a84b2011-03-03 00:21:30 +01001563 /* Notify peer that I had a local IO error, and did not detached.. */
1564 if (os.disk == D_UP_TO_DATE && ns.disk == D_INCONSISTENT)
1565 drbd_send_state(mdev);
1566
Philipp Reisnerb411b362009-09-25 16:07:19 -07001567 /* Disks got bigger while they were detached */
1568 if (ns.disk > D_NEGOTIATING && ns.pdsk > D_NEGOTIATING &&
1569 test_and_clear_bit(RESYNC_AFTER_NEG, &mdev->flags)) {
1570 if (ns.conn == C_CONNECTED)
1571 resync_after_online_grow(mdev);
1572 }
1573
1574 /* A resync finished or aborted, wake paused devices... */
1575 if ((os.conn > C_CONNECTED && ns.conn <= C_CONNECTED) ||
1576 (os.peer_isp && !ns.peer_isp) ||
1577 (os.user_isp && !ns.user_isp))
1578 resume_next_sg(mdev);
1579
Lars Ellenbergaf85e8e2010-10-07 16:07:55 +02001580 /* sync target done with resync. Explicitly notify peer, even though
1581 * it should (at least for non-empty resyncs) already know itself. */
1582 if (os.disk < D_UP_TO_DATE && os.conn >= C_SYNC_SOURCE && ns.conn == C_CONNECTED)
1583 drbd_send_state(mdev);
1584
Lars Ellenberg79a30d22011-01-20 10:32:05 +01001585 /* This triggers bitmap writeout of potentially still unwritten pages
1586 * if the resync finished cleanly, or aborted because of peer disk
Lars Ellenberg20ceb2b2011-01-21 10:56:44 +01001587 * failure, or because of connection loss.
Lars Ellenberg79a30d22011-01-20 10:32:05 +01001588 * For resync aborted because of local disk failure, we cannot do
1589 * any bitmap writeout anymore.
Lars Ellenberg20ceb2b2011-01-21 10:56:44 +01001590 * No harm done if some bits change during this phase.
Lars Ellenberg79a30d22011-01-20 10:32:05 +01001591 */
Lars Ellenberg20ceb2b2011-01-21 10:56:44 +01001592 if (os.conn > C_CONNECTED && ns.conn <= C_CONNECTED && get_ldev(mdev)) {
1593 drbd_queue_bitmap_io(mdev, &drbd_bm_write, NULL,
1594 "write from resync_finished", BM_LOCKED_SET_ALLOWED);
Lars Ellenberg79a30d22011-01-20 10:32:05 +01001595 put_ldev(mdev);
1596 }
Lars Ellenberg02851e92010-12-16 14:47:39 +01001597
Philipp Reisnerb411b362009-09-25 16:07:19 -07001598 /* Upon network connection, we need to start the receiver */
1599 if (os.conn == C_STANDALONE && ns.conn == C_UNCONNECTED)
1600 drbd_thread_start(&mdev->receiver);
1601
1602 /* Terminate worker thread if we are unconfigured - it will be
1603 restarted as needed... */
1604 if (ns.disk == D_DISKLESS &&
1605 ns.conn == C_STANDALONE &&
1606 ns.role == R_SECONDARY) {
1607 if (os.aftr_isp != ns.aftr_isp)
1608 resume_next_sg(mdev);
1609 /* set in __drbd_set_state, unless CONFIG_PENDING was set */
1610 if (test_bit(DEVICE_DYING, &mdev->flags))
1611 drbd_thread_stop_nowait(&mdev->worker);
1612 }
1613
1614 drbd_md_sync(mdev);
1615}
1616
1617
1618static int drbd_thread_setup(void *arg)
1619{
1620 struct drbd_thread *thi = (struct drbd_thread *) arg;
1621 struct drbd_conf *mdev = thi->mdev;
1622 unsigned long flags;
1623 int retval;
1624
1625restart:
1626 retval = thi->function(thi);
1627
1628 spin_lock_irqsave(&thi->t_lock, flags);
1629
Andreas Gruenbachere77a0a52011-01-25 15:43:39 +01001630 /* if the receiver has been "EXITING", the last thing it did
Philipp Reisnerb411b362009-09-25 16:07:19 -07001631 * was set the conn state to "StandAlone",
1632 * if now a re-connect request comes in, conn state goes C_UNCONNECTED,
1633 * and receiver thread will be "started".
Andreas Gruenbachere77a0a52011-01-25 15:43:39 +01001634 * drbd_thread_start needs to set "RESTARTING" in that case.
Philipp Reisnerb411b362009-09-25 16:07:19 -07001635 * t_state check and assignment needs to be within the same spinlock,
Andreas Gruenbachere77a0a52011-01-25 15:43:39 +01001636 * so either thread_start sees EXITING, and can remap to RESTARTING,
1637 * or thread_start see NONE, and can proceed as normal.
Philipp Reisnerb411b362009-09-25 16:07:19 -07001638 */
1639
Andreas Gruenbachere77a0a52011-01-25 15:43:39 +01001640 if (thi->t_state == RESTARTING) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07001641 dev_info(DEV, "Restarting %s\n", current->comm);
Andreas Gruenbachere77a0a52011-01-25 15:43:39 +01001642 thi->t_state = RUNNING;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001643 spin_unlock_irqrestore(&thi->t_lock, flags);
1644 goto restart;
1645 }
1646
1647 thi->task = NULL;
Andreas Gruenbachere77a0a52011-01-25 15:43:39 +01001648 thi->t_state = NONE;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001649 smp_mb();
1650 complete(&thi->stop);
1651 spin_unlock_irqrestore(&thi->t_lock, flags);
1652
1653 dev_info(DEV, "Terminating %s\n", current->comm);
1654
1655 /* Release mod reference taken when thread was started */
1656 module_put(THIS_MODULE);
1657 return retval;
1658}
1659
1660static void drbd_thread_init(struct drbd_conf *mdev, struct drbd_thread *thi,
1661 int (*func) (struct drbd_thread *))
1662{
1663 spin_lock_init(&thi->t_lock);
1664 thi->task = NULL;
Andreas Gruenbachere77a0a52011-01-25 15:43:39 +01001665 thi->t_state = NONE;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001666 thi->function = func;
1667 thi->mdev = mdev;
1668}
1669
1670int drbd_thread_start(struct drbd_thread *thi)
1671{
1672 struct drbd_conf *mdev = thi->mdev;
1673 struct task_struct *nt;
1674 unsigned long flags;
1675
1676 const char *me =
1677 thi == &mdev->receiver ? "receiver" :
1678 thi == &mdev->asender ? "asender" :
1679 thi == &mdev->worker ? "worker" : "NONSENSE";
1680
1681 /* is used from state engine doing drbd_thread_stop_nowait,
1682 * while holding the req lock irqsave */
1683 spin_lock_irqsave(&thi->t_lock, flags);
1684
1685 switch (thi->t_state) {
Andreas Gruenbachere77a0a52011-01-25 15:43:39 +01001686 case NONE:
Philipp Reisnerb411b362009-09-25 16:07:19 -07001687 dev_info(DEV, "Starting %s thread (from %s [%d])\n",
1688 me, current->comm, current->pid);
1689
1690 /* Get ref on module for thread - this is released when thread exits */
1691 if (!try_module_get(THIS_MODULE)) {
1692 dev_err(DEV, "Failed to get module reference in drbd_thread_start\n");
1693 spin_unlock_irqrestore(&thi->t_lock, flags);
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01001694 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001695 }
1696
1697 init_completion(&thi->stop);
1698 D_ASSERT(thi->task == NULL);
1699 thi->reset_cpu_mask = 1;
Andreas Gruenbachere77a0a52011-01-25 15:43:39 +01001700 thi->t_state = RUNNING;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001701 spin_unlock_irqrestore(&thi->t_lock, flags);
1702 flush_signals(current); /* otherw. may get -ERESTARTNOINTR */
1703
1704 nt = kthread_create(drbd_thread_setup, (void *) thi,
1705 "drbd%d_%s", mdev_to_minor(mdev), me);
1706
1707 if (IS_ERR(nt)) {
1708 dev_err(DEV, "Couldn't start thread\n");
1709
1710 module_put(THIS_MODULE);
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01001711 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001712 }
1713 spin_lock_irqsave(&thi->t_lock, flags);
1714 thi->task = nt;
Andreas Gruenbachere77a0a52011-01-25 15:43:39 +01001715 thi->t_state = RUNNING;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001716 spin_unlock_irqrestore(&thi->t_lock, flags);
1717 wake_up_process(nt);
1718 break;
Andreas Gruenbachere77a0a52011-01-25 15:43:39 +01001719 case EXITING:
1720 thi->t_state = RESTARTING;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001721 dev_info(DEV, "Restarting %s thread (from %s [%d])\n",
1722 me, current->comm, current->pid);
1723 /* fall through */
Andreas Gruenbachere77a0a52011-01-25 15:43:39 +01001724 case RUNNING:
1725 case RESTARTING:
Philipp Reisnerb411b362009-09-25 16:07:19 -07001726 default:
1727 spin_unlock_irqrestore(&thi->t_lock, flags);
1728 break;
1729 }
1730
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01001731 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001732}
1733
1734
1735void _drbd_thread_stop(struct drbd_thread *thi, int restart, int wait)
1736{
1737 unsigned long flags;
1738
Andreas Gruenbachere77a0a52011-01-25 15:43:39 +01001739 enum drbd_thread_state ns = restart ? RESTARTING : EXITING;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001740
1741 /* may be called from state engine, holding the req lock irqsave */
1742 spin_lock_irqsave(&thi->t_lock, flags);
1743
Andreas Gruenbachere77a0a52011-01-25 15:43:39 +01001744 if (thi->t_state == NONE) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07001745 spin_unlock_irqrestore(&thi->t_lock, flags);
1746 if (restart)
1747 drbd_thread_start(thi);
1748 return;
1749 }
1750
1751 if (thi->t_state != ns) {
1752 if (thi->task == NULL) {
1753 spin_unlock_irqrestore(&thi->t_lock, flags);
1754 return;
1755 }
1756
1757 thi->t_state = ns;
1758 smp_mb();
1759 init_completion(&thi->stop);
1760 if (thi->task != current)
1761 force_sig(DRBD_SIGKILL, thi->task);
1762
1763 }
1764
1765 spin_unlock_irqrestore(&thi->t_lock, flags);
1766
1767 if (wait)
1768 wait_for_completion(&thi->stop);
1769}
1770
1771#ifdef CONFIG_SMP
1772/**
1773 * drbd_calc_cpu_mask() - Generate CPU masks, spread over all CPUs
1774 * @mdev: DRBD device.
1775 *
1776 * Forces all threads of a device onto the same CPU. This is beneficial for
1777 * DRBD's performance. May be overwritten by user's configuration.
1778 */
1779void drbd_calc_cpu_mask(struct drbd_conf *mdev)
1780{
1781 int ord, cpu;
1782
1783 /* user override. */
1784 if (cpumask_weight(mdev->cpu_mask))
1785 return;
1786
1787 ord = mdev_to_minor(mdev) % cpumask_weight(cpu_online_mask);
1788 for_each_online_cpu(cpu) {
1789 if (ord-- == 0) {
1790 cpumask_set_cpu(cpu, mdev->cpu_mask);
1791 return;
1792 }
1793 }
1794 /* should not be reached */
1795 cpumask_setall(mdev->cpu_mask);
1796}
1797
1798/**
1799 * drbd_thread_current_set_cpu() - modifies the cpu mask of the _current_ thread
1800 * @mdev: DRBD device.
1801 *
1802 * call in the "main loop" of _all_ threads, no need for any mutex, current won't die
1803 * prematurely.
1804 */
1805void drbd_thread_current_set_cpu(struct drbd_conf *mdev)
1806{
1807 struct task_struct *p = current;
1808 struct drbd_thread *thi =
1809 p == mdev->asender.task ? &mdev->asender :
1810 p == mdev->receiver.task ? &mdev->receiver :
1811 p == mdev->worker.task ? &mdev->worker :
1812 NULL;
Andreas Gruenbacher841ce242010-12-15 19:31:20 +01001813 if (!expect(thi != NULL))
Philipp Reisnerb411b362009-09-25 16:07:19 -07001814 return;
1815 if (!thi->reset_cpu_mask)
1816 return;
1817 thi->reset_cpu_mask = 0;
1818 set_cpus_allowed_ptr(p, mdev->cpu_mask);
1819}
1820#endif
1821
1822/* the appropriate socket mutex must be held already */
1823int _drbd_send_cmd(struct drbd_conf *mdev, struct socket *sock,
Philipp Reisner0b70a132010-08-20 13:36:10 +02001824 enum drbd_packets cmd, struct p_header80 *h,
Philipp Reisnerb411b362009-09-25 16:07:19 -07001825 size_t size, unsigned msg_flags)
1826{
1827 int sent, ok;
1828
Andreas Gruenbacher841ce242010-12-15 19:31:20 +01001829 if (!expect(h))
1830 return false;
1831 if (!expect(size))
1832 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001833
Andreas Gruenbacherca9bc122011-01-11 13:47:24 +01001834 h->magic = cpu_to_be32(DRBD_MAGIC);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001835 h->command = cpu_to_be16(cmd);
Philipp Reisner0b70a132010-08-20 13:36:10 +02001836 h->length = cpu_to_be16(size-sizeof(struct p_header80));
Philipp Reisnerb411b362009-09-25 16:07:19 -07001837
Philipp Reisnerb411b362009-09-25 16:07:19 -07001838 sent = drbd_send(mdev, sock, h, size, msg_flags);
1839
1840 ok = (sent == size);
Lars Ellenberg0ddc5542011-01-21 12:35:15 +01001841 if (!ok && !signal_pending(current))
1842 dev_warn(DEV, "short sent %s size=%d sent=%d\n",
Philipp Reisnerb411b362009-09-25 16:07:19 -07001843 cmdname(cmd), (int)size, sent);
1844 return ok;
1845}
1846
1847/* don't pass the socket. we may only look at it
1848 * when we hold the appropriate socket mutex.
1849 */
1850int drbd_send_cmd(struct drbd_conf *mdev, int use_data_socket,
Philipp Reisner0b70a132010-08-20 13:36:10 +02001851 enum drbd_packets cmd, struct p_header80 *h, size_t size)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001852{
1853 int ok = 0;
1854 struct socket *sock;
1855
1856 if (use_data_socket) {
1857 mutex_lock(&mdev->data.mutex);
1858 sock = mdev->data.socket;
1859 } else {
1860 mutex_lock(&mdev->meta.mutex);
1861 sock = mdev->meta.socket;
1862 }
1863
1864 /* drbd_disconnect() could have called drbd_free_sock()
1865 * while we were waiting in down()... */
1866 if (likely(sock != NULL))
1867 ok = _drbd_send_cmd(mdev, sock, cmd, h, size, 0);
1868
1869 if (use_data_socket)
1870 mutex_unlock(&mdev->data.mutex);
1871 else
1872 mutex_unlock(&mdev->meta.mutex);
1873 return ok;
1874}
1875
1876int drbd_send_cmd2(struct drbd_conf *mdev, enum drbd_packets cmd, char *data,
1877 size_t size)
1878{
Philipp Reisner0b70a132010-08-20 13:36:10 +02001879 struct p_header80 h;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001880 int ok;
1881
Andreas Gruenbacherca9bc122011-01-11 13:47:24 +01001882 h.magic = cpu_to_be32(DRBD_MAGIC);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001883 h.command = cpu_to_be16(cmd);
1884 h.length = cpu_to_be16(size);
1885
1886 if (!drbd_get_data_sock(mdev))
1887 return 0;
1888
Philipp Reisnerb411b362009-09-25 16:07:19 -07001889 ok = (sizeof(h) ==
1890 drbd_send(mdev, mdev->data.socket, &h, sizeof(h), 0));
1891 ok = ok && (size ==
1892 drbd_send(mdev, mdev->data.socket, data, size, 0));
1893
1894 drbd_put_data_sock(mdev);
1895
1896 return ok;
1897}
1898
1899int drbd_send_sync_param(struct drbd_conf *mdev, struct syncer_conf *sc)
1900{
Philipp Reisner8e26f9c2010-07-06 17:25:54 +02001901 struct p_rs_param_95 *p;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001902 struct socket *sock;
1903 int size, rv;
1904 const int apv = mdev->agreed_pro_version;
1905
1906 size = apv <= 87 ? sizeof(struct p_rs_param)
1907 : apv == 88 ? sizeof(struct p_rs_param)
1908 + strlen(mdev->sync_conf.verify_alg) + 1
Philipp Reisner8e26f9c2010-07-06 17:25:54 +02001909 : apv <= 94 ? sizeof(struct p_rs_param_89)
1910 : /* apv >= 95 */ sizeof(struct p_rs_param_95);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001911
1912 /* used from admin command context and receiver/worker context.
1913 * to avoid kmalloc, grab the socket right here,
1914 * then use the pre-allocated sbuf there */
1915 mutex_lock(&mdev->data.mutex);
1916 sock = mdev->data.socket;
1917
1918 if (likely(sock != NULL)) {
1919 enum drbd_packets cmd = apv >= 89 ? P_SYNC_PARAM89 : P_SYNC_PARAM;
1920
Philipp Reisner8e26f9c2010-07-06 17:25:54 +02001921 p = &mdev->data.sbuf.rs_param_95;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001922
1923 /* initialize verify_alg and csums_alg */
1924 memset(p->verify_alg, 0, 2 * SHARED_SECRET_MAX);
1925
1926 p->rate = cpu_to_be32(sc->rate);
Philipp Reisner8e26f9c2010-07-06 17:25:54 +02001927 p->c_plan_ahead = cpu_to_be32(sc->c_plan_ahead);
1928 p->c_delay_target = cpu_to_be32(sc->c_delay_target);
1929 p->c_fill_target = cpu_to_be32(sc->c_fill_target);
1930 p->c_max_rate = cpu_to_be32(sc->c_max_rate);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001931
1932 if (apv >= 88)
1933 strcpy(p->verify_alg, mdev->sync_conf.verify_alg);
1934 if (apv >= 89)
1935 strcpy(p->csums_alg, mdev->sync_conf.csums_alg);
1936
1937 rv = _drbd_send_cmd(mdev, sock, cmd, &p->head, size, 0);
1938 } else
1939 rv = 0; /* not ok */
1940
1941 mutex_unlock(&mdev->data.mutex);
1942
1943 return rv;
1944}
1945
1946int drbd_send_protocol(struct drbd_conf *mdev)
1947{
1948 struct p_protocol *p;
Philipp Reisnercf14c2e2010-02-02 21:03:50 +01001949 int size, cf, rv;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001950
1951 size = sizeof(struct p_protocol);
1952
1953 if (mdev->agreed_pro_version >= 87)
1954 size += strlen(mdev->net_conf->integrity_alg) + 1;
1955
1956 /* we must not recurse into our own queue,
1957 * as that is blocked during handshake */
1958 p = kmalloc(size, GFP_NOIO);
1959 if (p == NULL)
1960 return 0;
1961
1962 p->protocol = cpu_to_be32(mdev->net_conf->wire_protocol);
1963 p->after_sb_0p = cpu_to_be32(mdev->net_conf->after_sb_0p);
1964 p->after_sb_1p = cpu_to_be32(mdev->net_conf->after_sb_1p);
1965 p->after_sb_2p = cpu_to_be32(mdev->net_conf->after_sb_2p);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001966 p->two_primaries = cpu_to_be32(mdev->net_conf->two_primaries);
1967
Philipp Reisnercf14c2e2010-02-02 21:03:50 +01001968 cf = 0;
1969 if (mdev->net_conf->want_lose)
1970 cf |= CF_WANT_LOSE;
1971 if (mdev->net_conf->dry_run) {
1972 if (mdev->agreed_pro_version >= 92)
1973 cf |= CF_DRY_RUN;
1974 else {
1975 dev_err(DEV, "--dry-run is not supported by peer");
Dan Carpenter7ac314c2010-04-22 14:27:23 +02001976 kfree(p);
Philipp Reisner148efa12011-01-15 00:21:15 +01001977 return -1;
Philipp Reisnercf14c2e2010-02-02 21:03:50 +01001978 }
1979 }
1980 p->conn_flags = cpu_to_be32(cf);
1981
Philipp Reisnerb411b362009-09-25 16:07:19 -07001982 if (mdev->agreed_pro_version >= 87)
1983 strcpy(p->integrity_alg, mdev->net_conf->integrity_alg);
1984
1985 rv = drbd_send_cmd(mdev, USE_DATA_SOCKET, P_PROTOCOL,
Philipp Reisner0b70a132010-08-20 13:36:10 +02001986 (struct p_header80 *)p, size);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001987 kfree(p);
1988 return rv;
1989}
1990
1991int _drbd_send_uuids(struct drbd_conf *mdev, u64 uuid_flags)
1992{
1993 struct p_uuids p;
1994 int i;
1995
1996 if (!get_ldev_if_state(mdev, D_NEGOTIATING))
1997 return 1;
1998
1999 for (i = UI_CURRENT; i < UI_SIZE; i++)
2000 p.uuid[i] = mdev->ldev ? cpu_to_be64(mdev->ldev->md.uuid[i]) : 0;
2001
2002 mdev->comm_bm_set = drbd_bm_total_weight(mdev);
2003 p.uuid[UI_SIZE] = cpu_to_be64(mdev->comm_bm_set);
2004 uuid_flags |= mdev->net_conf->want_lose ? 1 : 0;
2005 uuid_flags |= test_bit(CRASHED_PRIMARY, &mdev->flags) ? 2 : 0;
2006 uuid_flags |= mdev->new_state_tmp.disk == D_INCONSISTENT ? 4 : 0;
2007 p.uuid[UI_FLAGS] = cpu_to_be64(uuid_flags);
2008
2009 put_ldev(mdev);
2010
2011 return drbd_send_cmd(mdev, USE_DATA_SOCKET, P_UUIDS,
Philipp Reisner0b70a132010-08-20 13:36:10 +02002012 (struct p_header80 *)&p, sizeof(p));
Philipp Reisnerb411b362009-09-25 16:07:19 -07002013}
2014
2015int drbd_send_uuids(struct drbd_conf *mdev)
2016{
2017 return _drbd_send_uuids(mdev, 0);
2018}
2019
2020int drbd_send_uuids_skip_initial_sync(struct drbd_conf *mdev)
2021{
2022 return _drbd_send_uuids(mdev, 8);
2023}
2024
Lars Ellenberg62b0da32011-01-20 13:25:21 +01002025void drbd_print_uuids(struct drbd_conf *mdev, const char *text)
2026{
2027 if (get_ldev_if_state(mdev, D_NEGOTIATING)) {
2028 u64 *uuid = mdev->ldev->md.uuid;
2029 dev_info(DEV, "%s %016llX:%016llX:%016llX:%016llX\n",
2030 text,
2031 (unsigned long long)uuid[UI_CURRENT],
2032 (unsigned long long)uuid[UI_BITMAP],
2033 (unsigned long long)uuid[UI_HISTORY_START],
2034 (unsigned long long)uuid[UI_HISTORY_END]);
2035 put_ldev(mdev);
2036 } else {
2037 dev_info(DEV, "%s effective data uuid: %016llX\n",
2038 text,
2039 (unsigned long long)mdev->ed_uuid);
2040 }
2041}
2042
Lars Ellenberg5a22db82010-12-17 21:14:23 +01002043int drbd_gen_and_send_sync_uuid(struct drbd_conf *mdev)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002044{
2045 struct p_rs_uuid p;
Lars Ellenberg5a22db82010-12-17 21:14:23 +01002046 u64 uuid;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002047
Lars Ellenberg5a22db82010-12-17 21:14:23 +01002048 D_ASSERT(mdev->state.disk == D_UP_TO_DATE);
2049
Philipp Reisner4a23f262011-01-11 17:42:17 +01002050 uuid = mdev->ldev->md.uuid[UI_BITMAP] + UUID_NEW_BM_OFFSET;
Lars Ellenberg5a22db82010-12-17 21:14:23 +01002051 drbd_uuid_set(mdev, UI_BITMAP, uuid);
Lars Ellenberg62b0da32011-01-20 13:25:21 +01002052 drbd_print_uuids(mdev, "updated sync UUID");
Lars Ellenberg5a22db82010-12-17 21:14:23 +01002053 drbd_md_sync(mdev);
2054 p.uuid = cpu_to_be64(uuid);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002055
2056 return drbd_send_cmd(mdev, USE_DATA_SOCKET, P_SYNC_UUID,
Philipp Reisner0b70a132010-08-20 13:36:10 +02002057 (struct p_header80 *)&p, sizeof(p));
Philipp Reisnerb411b362009-09-25 16:07:19 -07002058}
2059
Philipp Reisnere89b5912010-03-24 17:11:33 +01002060int drbd_send_sizes(struct drbd_conf *mdev, int trigger_reply, enum dds_flags flags)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002061{
2062 struct p_sizes p;
2063 sector_t d_size, u_size;
Philipp Reisner99432fc2011-05-20 16:39:13 +02002064 int q_order_type, max_bio_size;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002065 int ok;
2066
2067 if (get_ldev_if_state(mdev, D_NEGOTIATING)) {
2068 D_ASSERT(mdev->ldev->backing_bdev);
2069 d_size = drbd_get_max_capacity(mdev->ldev);
2070 u_size = mdev->ldev->dc.disk_size;
2071 q_order_type = drbd_queue_order_type(mdev);
Philipp Reisner99432fc2011-05-20 16:39:13 +02002072 max_bio_size = queue_max_hw_sectors(mdev->ldev->backing_bdev->bd_disk->queue) << 9;
2073 max_bio_size = min_t(int, max_bio_size, DRBD_MAX_BIO_SIZE);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002074 put_ldev(mdev);
2075 } else {
2076 d_size = 0;
2077 u_size = 0;
2078 q_order_type = QUEUE_ORDERED_NONE;
Philipp Reisner99432fc2011-05-20 16:39:13 +02002079 max_bio_size = DRBD_MAX_BIO_SIZE; /* ... multiple BIOs per peer_request */
Philipp Reisnerb411b362009-09-25 16:07:19 -07002080 }
2081
2082 p.d_size = cpu_to_be64(d_size);
2083 p.u_size = cpu_to_be64(u_size);
2084 p.c_size = cpu_to_be64(trigger_reply ? 0 : drbd_get_capacity(mdev->this_bdev));
Philipp Reisner99432fc2011-05-20 16:39:13 +02002085 p.max_bio_size = cpu_to_be32(max_bio_size);
Philipp Reisnere89b5912010-03-24 17:11:33 +01002086 p.queue_order_type = cpu_to_be16(q_order_type);
2087 p.dds_flags = cpu_to_be16(flags);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002088
2089 ok = drbd_send_cmd(mdev, USE_DATA_SOCKET, P_SIZES,
Philipp Reisner0b70a132010-08-20 13:36:10 +02002090 (struct p_header80 *)&p, sizeof(p));
Philipp Reisnerb411b362009-09-25 16:07:19 -07002091 return ok;
2092}
2093
2094/**
2095 * drbd_send_state() - Sends the drbd state to the peer
2096 * @mdev: DRBD device.
2097 */
2098int drbd_send_state(struct drbd_conf *mdev)
2099{
2100 struct socket *sock;
2101 struct p_state p;
2102 int ok = 0;
2103
2104 /* Grab state lock so we wont send state if we're in the middle
2105 * of a cluster wide state change on another thread */
2106 drbd_state_lock(mdev);
2107
2108 mutex_lock(&mdev->data.mutex);
2109
2110 p.state = cpu_to_be32(mdev->state.i); /* Within the send mutex */
2111 sock = mdev->data.socket;
2112
2113 if (likely(sock != NULL)) {
2114 ok = _drbd_send_cmd(mdev, sock, P_STATE,
Philipp Reisner0b70a132010-08-20 13:36:10 +02002115 (struct p_header80 *)&p, sizeof(p), 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002116 }
2117
2118 mutex_unlock(&mdev->data.mutex);
2119
2120 drbd_state_unlock(mdev);
2121 return ok;
2122}
2123
2124int drbd_send_state_req(struct drbd_conf *mdev,
2125 union drbd_state mask, union drbd_state val)
2126{
2127 struct p_req_state p;
2128
2129 p.mask = cpu_to_be32(mask.i);
2130 p.val = cpu_to_be32(val.i);
2131
2132 return drbd_send_cmd(mdev, USE_DATA_SOCKET, P_STATE_CHG_REQ,
Philipp Reisner0b70a132010-08-20 13:36:10 +02002133 (struct p_header80 *)&p, sizeof(p));
Philipp Reisnerb411b362009-09-25 16:07:19 -07002134}
2135
Andreas Gruenbacherbf885f82010-12-08 00:39:32 +01002136int drbd_send_sr_reply(struct drbd_conf *mdev, enum drbd_state_rv retcode)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002137{
2138 struct p_req_state_reply p;
2139
2140 p.retcode = cpu_to_be32(retcode);
2141
2142 return drbd_send_cmd(mdev, USE_META_SOCKET, P_STATE_CHG_REPLY,
Philipp Reisner0b70a132010-08-20 13:36:10 +02002143 (struct p_header80 *)&p, sizeof(p));
Philipp Reisnerb411b362009-09-25 16:07:19 -07002144}
2145
2146int fill_bitmap_rle_bits(struct drbd_conf *mdev,
2147 struct p_compressed_bm *p,
2148 struct bm_xfer_ctx *c)
2149{
2150 struct bitstream bs;
2151 unsigned long plain_bits;
2152 unsigned long tmp;
2153 unsigned long rl;
2154 unsigned len;
2155 unsigned toggle;
2156 int bits;
2157
2158 /* may we use this feature? */
2159 if ((mdev->sync_conf.use_rle == 0) ||
2160 (mdev->agreed_pro_version < 90))
2161 return 0;
2162
2163 if (c->bit_offset >= c->bm_bits)
2164 return 0; /* nothing to do. */
2165
2166 /* use at most thus many bytes */
2167 bitstream_init(&bs, p->code, BM_PACKET_VLI_BYTES_MAX, 0);
2168 memset(p->code, 0, BM_PACKET_VLI_BYTES_MAX);
2169 /* plain bits covered in this code string */
2170 plain_bits = 0;
2171
2172 /* p->encoding & 0x80 stores whether the first run length is set.
2173 * bit offset is implicit.
2174 * start with toggle == 2 to be able to tell the first iteration */
2175 toggle = 2;
2176
2177 /* see how much plain bits we can stuff into one packet
2178 * using RLE and VLI. */
2179 do {
2180 tmp = (toggle == 0) ? _drbd_bm_find_next_zero(mdev, c->bit_offset)
2181 : _drbd_bm_find_next(mdev, c->bit_offset);
2182 if (tmp == -1UL)
2183 tmp = c->bm_bits;
2184 rl = tmp - c->bit_offset;
2185
2186 if (toggle == 2) { /* first iteration */
2187 if (rl == 0) {
2188 /* the first checked bit was set,
2189 * store start value, */
2190 DCBP_set_start(p, 1);
2191 /* but skip encoding of zero run length */
2192 toggle = !toggle;
2193 continue;
2194 }
2195 DCBP_set_start(p, 0);
2196 }
2197
2198 /* paranoia: catch zero runlength.
2199 * can only happen if bitmap is modified while we scan it. */
2200 if (rl == 0) {
2201 dev_err(DEV, "unexpected zero runlength while encoding bitmap "
2202 "t:%u bo:%lu\n", toggle, c->bit_offset);
2203 return -1;
2204 }
2205
2206 bits = vli_encode_bits(&bs, rl);
2207 if (bits == -ENOBUFS) /* buffer full */
2208 break;
2209 if (bits <= 0) {
2210 dev_err(DEV, "error while encoding bitmap: %d\n", bits);
2211 return 0;
2212 }
2213
2214 toggle = !toggle;
2215 plain_bits += rl;
2216 c->bit_offset = tmp;
2217 } while (c->bit_offset < c->bm_bits);
2218
2219 len = bs.cur.b - p->code + !!bs.cur.bit;
2220
2221 if (plain_bits < (len << 3)) {
2222 /* incompressible with this method.
2223 * we need to rewind both word and bit position. */
2224 c->bit_offset -= plain_bits;
2225 bm_xfer_ctx_bit_to_word_offset(c);
2226 c->bit_offset = c->word_offset * BITS_PER_LONG;
2227 return 0;
2228 }
2229
2230 /* RLE + VLI was able to compress it just fine.
2231 * update c->word_offset. */
2232 bm_xfer_ctx_bit_to_word_offset(c);
2233
2234 /* store pad_bits */
2235 DCBP_set_pad_bits(p, (8 - bs.cur.bit) & 0x7);
2236
2237 return len;
2238}
2239
Andreas Gruenbacherf70af112010-12-11 18:51:50 +01002240/**
2241 * send_bitmap_rle_or_plain
2242 *
2243 * Return 0 when done, 1 when another iteration is needed, and a negative error
2244 * code upon failure.
2245 */
2246static int
Philipp Reisnerb411b362009-09-25 16:07:19 -07002247send_bitmap_rle_or_plain(struct drbd_conf *mdev,
Andreas Gruenbacherf70af112010-12-11 18:51:50 +01002248 struct p_header80 *h, struct bm_xfer_ctx *c)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002249{
2250 struct p_compressed_bm *p = (void*)h;
2251 unsigned long num_words;
2252 int len;
2253 int ok;
2254
2255 len = fill_bitmap_rle_bits(mdev, p, c);
2256
2257 if (len < 0)
Andreas Gruenbacherf70af112010-12-11 18:51:50 +01002258 return -EIO;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002259
2260 if (len) {
2261 DCBP_set_code(p, RLE_VLI_Bits);
2262 ok = _drbd_send_cmd(mdev, mdev->data.socket, P_COMPRESSED_BITMAP, h,
2263 sizeof(*p) + len, 0);
2264
2265 c->packets[0]++;
2266 c->bytes[0] += sizeof(*p) + len;
2267
2268 if (c->bit_offset >= c->bm_bits)
2269 len = 0; /* DONE */
2270 } else {
2271 /* was not compressible.
2272 * send a buffer full of plain text bits instead. */
2273 num_words = min_t(size_t, BM_PACKET_WORDS, c->bm_words - c->word_offset);
2274 len = num_words * sizeof(long);
2275 if (len)
2276 drbd_bm_get_lel(mdev, c->word_offset, num_words, (unsigned long*)h->payload);
2277 ok = _drbd_send_cmd(mdev, mdev->data.socket, P_BITMAP,
Philipp Reisner0b70a132010-08-20 13:36:10 +02002278 h, sizeof(struct p_header80) + len, 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002279 c->word_offset += num_words;
2280 c->bit_offset = c->word_offset * BITS_PER_LONG;
2281
2282 c->packets[1]++;
Philipp Reisner0b70a132010-08-20 13:36:10 +02002283 c->bytes[1] += sizeof(struct p_header80) + len;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002284
2285 if (c->bit_offset > c->bm_bits)
2286 c->bit_offset = c->bm_bits;
2287 }
Andreas Gruenbacherf70af112010-12-11 18:51:50 +01002288 if (ok) {
2289 if (len == 0) {
2290 INFO_bm_xfer_stats(mdev, "send", c);
2291 return 0;
2292 } else
2293 return 1;
2294 }
2295 return -EIO;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002296}
2297
2298/* See the comment at receive_bitmap() */
2299int _drbd_send_bitmap(struct drbd_conf *mdev)
2300{
2301 struct bm_xfer_ctx c;
Philipp Reisner0b70a132010-08-20 13:36:10 +02002302 struct p_header80 *p;
Andreas Gruenbacherf70af112010-12-11 18:51:50 +01002303 int err;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002304
Andreas Gruenbacher841ce242010-12-15 19:31:20 +01002305 if (!expect(mdev->bitmap))
2306 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002307
2308 /* maybe we should use some per thread scratch page,
2309 * and allocate that during initial device creation? */
Philipp Reisner0b70a132010-08-20 13:36:10 +02002310 p = (struct p_header80 *) __get_free_page(GFP_NOIO);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002311 if (!p) {
2312 dev_err(DEV, "failed to allocate one page buffer in %s\n", __func__);
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01002313 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002314 }
2315
2316 if (get_ldev(mdev)) {
2317 if (drbd_md_test_flag(mdev->ldev, MDF_FULL_SYNC)) {
2318 dev_info(DEV, "Writing the whole bitmap, MDF_FullSync was set.\n");
2319 drbd_bm_set_all(mdev);
2320 if (drbd_bm_write(mdev)) {
2321 /* write_bm did fail! Leave full sync flag set in Meta P_DATA
2322 * but otherwise process as per normal - need to tell other
2323 * side that a full resync is required! */
2324 dev_err(DEV, "Failed to write bitmap to disk!\n");
2325 } else {
2326 drbd_md_clear_flag(mdev, MDF_FULL_SYNC);
2327 drbd_md_sync(mdev);
2328 }
2329 }
2330 put_ldev(mdev);
2331 }
2332
2333 c = (struct bm_xfer_ctx) {
2334 .bm_bits = drbd_bm_bits(mdev),
2335 .bm_words = drbd_bm_words(mdev),
2336 };
2337
2338 do {
Andreas Gruenbacherf70af112010-12-11 18:51:50 +01002339 err = send_bitmap_rle_or_plain(mdev, p, &c);
2340 } while (err > 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002341
2342 free_page((unsigned long) p);
Andreas Gruenbacherf70af112010-12-11 18:51:50 +01002343 return err == 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002344}
2345
2346int drbd_send_bitmap(struct drbd_conf *mdev)
2347{
2348 int err;
2349
2350 if (!drbd_get_data_sock(mdev))
2351 return -1;
2352 err = !_drbd_send_bitmap(mdev);
2353 drbd_put_data_sock(mdev);
2354 return err;
2355}
2356
2357int drbd_send_b_ack(struct drbd_conf *mdev, u32 barrier_nr, u32 set_size)
2358{
2359 int ok;
2360 struct p_barrier_ack p;
2361
2362 p.barrier = barrier_nr;
2363 p.set_size = cpu_to_be32(set_size);
2364
2365 if (mdev->state.conn < C_CONNECTED)
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01002366 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002367 ok = drbd_send_cmd(mdev, USE_META_SOCKET, P_BARRIER_ACK,
Philipp Reisner0b70a132010-08-20 13:36:10 +02002368 (struct p_header80 *)&p, sizeof(p));
Philipp Reisnerb411b362009-09-25 16:07:19 -07002369 return ok;
2370}
2371
2372/**
2373 * _drbd_send_ack() - Sends an ack packet
2374 * @mdev: DRBD device.
2375 * @cmd: Packet command code.
2376 * @sector: sector, needs to be in big endian byte order
2377 * @blksize: size in byte, needs to be in big endian byte order
2378 * @block_id: Id, big endian byte order
2379 */
2380static int _drbd_send_ack(struct drbd_conf *mdev, enum drbd_packets cmd,
2381 u64 sector,
2382 u32 blksize,
2383 u64 block_id)
2384{
2385 int ok;
2386 struct p_block_ack p;
2387
2388 p.sector = sector;
2389 p.block_id = block_id;
2390 p.blksize = blksize;
2391 p.seq_num = cpu_to_be32(atomic_add_return(1, &mdev->packet_seq));
2392
2393 if (!mdev->meta.socket || mdev->state.conn < C_CONNECTED)
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01002394 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002395 ok = drbd_send_cmd(mdev, USE_META_SOCKET, cmd,
Philipp Reisner0b70a132010-08-20 13:36:10 +02002396 (struct p_header80 *)&p, sizeof(p));
Philipp Reisnerb411b362009-09-25 16:07:19 -07002397 return ok;
2398}
2399
Lars Ellenberg2b2bf212010-10-06 11:46:55 +02002400/* dp->sector and dp->block_id already/still in network byte order,
2401 * data_size is payload size according to dp->head,
2402 * and may need to be corrected for digest size. */
Philipp Reisnerb411b362009-09-25 16:07:19 -07002403int drbd_send_ack_dp(struct drbd_conf *mdev, enum drbd_packets cmd,
Lars Ellenberg2b2bf212010-10-06 11:46:55 +02002404 struct p_data *dp, int data_size)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002405{
Lars Ellenberg2b2bf212010-10-06 11:46:55 +02002406 data_size -= (mdev->agreed_pro_version >= 87 && mdev->integrity_r_tfm) ?
2407 crypto_hash_digestsize(mdev->integrity_r_tfm) : 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002408 return _drbd_send_ack(mdev, cmd, dp->sector, cpu_to_be32(data_size),
2409 dp->block_id);
2410}
2411
2412int drbd_send_ack_rp(struct drbd_conf *mdev, enum drbd_packets cmd,
2413 struct p_block_req *rp)
2414{
2415 return _drbd_send_ack(mdev, cmd, rp->sector, rp->blksize, rp->block_id);
2416}
2417
2418/**
2419 * drbd_send_ack() - Sends an ack packet
2420 * @mdev: DRBD device.
2421 * @cmd: Packet command code.
2422 * @e: Epoch entry.
2423 */
2424int drbd_send_ack(struct drbd_conf *mdev,
2425 enum drbd_packets cmd, struct drbd_epoch_entry *e)
2426{
2427 return _drbd_send_ack(mdev, cmd,
Andreas Gruenbacher010f6e62011-01-14 20:59:35 +01002428 cpu_to_be64(e->i.sector),
2429 cpu_to_be32(e->i.size),
Philipp Reisnerb411b362009-09-25 16:07:19 -07002430 e->block_id);
2431}
2432
2433/* This function misuses the block_id field to signal if the blocks
2434 * are is sync or not. */
2435int drbd_send_ack_ex(struct drbd_conf *mdev, enum drbd_packets cmd,
2436 sector_t sector, int blksize, u64 block_id)
2437{
2438 return _drbd_send_ack(mdev, cmd,
2439 cpu_to_be64(sector),
2440 cpu_to_be32(blksize),
2441 cpu_to_be64(block_id));
2442}
2443
2444int drbd_send_drequest(struct drbd_conf *mdev, int cmd,
2445 sector_t sector, int size, u64 block_id)
2446{
2447 int ok;
2448 struct p_block_req p;
2449
2450 p.sector = cpu_to_be64(sector);
2451 p.block_id = block_id;
2452 p.blksize = cpu_to_be32(size);
2453
2454 ok = drbd_send_cmd(mdev, USE_DATA_SOCKET, cmd,
Philipp Reisner0b70a132010-08-20 13:36:10 +02002455 (struct p_header80 *)&p, sizeof(p));
Philipp Reisnerb411b362009-09-25 16:07:19 -07002456 return ok;
2457}
2458
2459int drbd_send_drequest_csum(struct drbd_conf *mdev,
2460 sector_t sector, int size,
2461 void *digest, int digest_size,
2462 enum drbd_packets cmd)
2463{
2464 int ok;
2465 struct p_block_req p;
2466
2467 p.sector = cpu_to_be64(sector);
Andreas Gruenbacher9a8e7752011-01-11 14:04:09 +01002468 p.block_id = ID_SYNCER /* unused */;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002469 p.blksize = cpu_to_be32(size);
2470
Andreas Gruenbacherca9bc122011-01-11 13:47:24 +01002471 p.head.magic = cpu_to_be32(DRBD_MAGIC);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002472 p.head.command = cpu_to_be16(cmd);
Philipp Reisner0b70a132010-08-20 13:36:10 +02002473 p.head.length = cpu_to_be16(sizeof(p) - sizeof(struct p_header80) + digest_size);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002474
2475 mutex_lock(&mdev->data.mutex);
2476
2477 ok = (sizeof(p) == drbd_send(mdev, mdev->data.socket, &p, sizeof(p), 0));
2478 ok = ok && (digest_size == drbd_send(mdev, mdev->data.socket, digest, digest_size, 0));
2479
2480 mutex_unlock(&mdev->data.mutex);
2481
2482 return ok;
2483}
2484
2485int drbd_send_ov_request(struct drbd_conf *mdev, sector_t sector, int size)
2486{
2487 int ok;
2488 struct p_block_req p;
2489
2490 p.sector = cpu_to_be64(sector);
Andreas Gruenbacher9a8e7752011-01-11 14:04:09 +01002491 p.block_id = ID_SYNCER /* unused */;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002492 p.blksize = cpu_to_be32(size);
2493
2494 ok = drbd_send_cmd(mdev, USE_DATA_SOCKET, P_OV_REQUEST,
Philipp Reisner0b70a132010-08-20 13:36:10 +02002495 (struct p_header80 *)&p, sizeof(p));
Philipp Reisnerb411b362009-09-25 16:07:19 -07002496 return ok;
2497}
2498
2499/* called on sndtimeo
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01002500 * returns false if we should retry,
2501 * true if we think connection is dead
Philipp Reisnerb411b362009-09-25 16:07:19 -07002502 */
2503static int we_should_drop_the_connection(struct drbd_conf *mdev, struct socket *sock)
2504{
2505 int drop_it;
2506 /* long elapsed = (long)(jiffies - mdev->last_received); */
2507
2508 drop_it = mdev->meta.socket == sock
2509 || !mdev->asender.task
Andreas Gruenbachere77a0a52011-01-25 15:43:39 +01002510 || get_t_state(&mdev->asender) != RUNNING
Philipp Reisnerb411b362009-09-25 16:07:19 -07002511 || mdev->state.conn < C_CONNECTED;
2512
2513 if (drop_it)
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01002514 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002515
2516 drop_it = !--mdev->ko_count;
2517 if (!drop_it) {
2518 dev_err(DEV, "[%s/%d] sock_sendmsg time expired, ko = %u\n",
2519 current->comm, current->pid, mdev->ko_count);
2520 request_ping(mdev);
2521 }
2522
2523 return drop_it; /* && (mdev->state == R_PRIMARY) */;
2524}
2525
2526/* The idea of sendpage seems to be to put some kind of reference
2527 * to the page into the skb, and to hand it over to the NIC. In
2528 * this process get_page() gets called.
2529 *
2530 * As soon as the page was really sent over the network put_page()
2531 * gets called by some part of the network layer. [ NIC driver? ]
2532 *
2533 * [ get_page() / put_page() increment/decrement the count. If count
2534 * reaches 0 the page will be freed. ]
2535 *
2536 * This works nicely with pages from FSs.
2537 * But this means that in protocol A we might signal IO completion too early!
2538 *
2539 * In order not to corrupt data during a resync we must make sure
2540 * that we do not reuse our own buffer pages (EEs) to early, therefore
2541 * we have the net_ee list.
2542 *
2543 * XFS seems to have problems, still, it submits pages with page_count == 0!
2544 * As a workaround, we disable sendpage on pages
2545 * with page_count == 0 or PageSlab.
2546 */
2547static int _drbd_no_send_page(struct drbd_conf *mdev, struct page *page,
Lars Ellenbergba11ad92010-05-25 16:26:16 +02002548 int offset, size_t size, unsigned msg_flags)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002549{
Lars Ellenbergba11ad92010-05-25 16:26:16 +02002550 int sent = drbd_send(mdev, mdev->data.socket, kmap(page) + offset, size, msg_flags);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002551 kunmap(page);
2552 if (sent == size)
2553 mdev->send_cnt += size>>9;
2554 return sent == size;
2555}
2556
2557static int _drbd_send_page(struct drbd_conf *mdev, struct page *page,
Lars Ellenbergba11ad92010-05-25 16:26:16 +02002558 int offset, size_t size, unsigned msg_flags)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002559{
2560 mm_segment_t oldfs = get_fs();
2561 int sent, ok;
2562 int len = size;
2563
2564 /* e.g. XFS meta- & log-data is in slab pages, which have a
2565 * page_count of 0 and/or have PageSlab() set.
2566 * we cannot use send_page for those, as that does get_page();
2567 * put_page(); and would cause either a VM_BUG directly, or
2568 * __page_cache_release a page that would actually still be referenced
2569 * by someone, leading to some obscure delayed Oops somewhere else. */
2570 if (disable_sendpage || (page_count(page) < 1) || PageSlab(page))
Lars Ellenbergba11ad92010-05-25 16:26:16 +02002571 return _drbd_no_send_page(mdev, page, offset, size, msg_flags);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002572
Lars Ellenbergba11ad92010-05-25 16:26:16 +02002573 msg_flags |= MSG_NOSIGNAL;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002574 drbd_update_congested(mdev);
2575 set_fs(KERNEL_DS);
2576 do {
2577 sent = mdev->data.socket->ops->sendpage(mdev->data.socket, page,
2578 offset, len,
Lars Ellenbergba11ad92010-05-25 16:26:16 +02002579 msg_flags);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002580 if (sent == -EAGAIN) {
2581 if (we_should_drop_the_connection(mdev,
2582 mdev->data.socket))
2583 break;
2584 else
2585 continue;
2586 }
2587 if (sent <= 0) {
2588 dev_warn(DEV, "%s: size=%d len=%d sent=%d\n",
2589 __func__, (int)size, len, sent);
2590 break;
2591 }
2592 len -= sent;
2593 offset += sent;
2594 } while (len > 0 /* THINK && mdev->cstate >= C_CONNECTED*/);
2595 set_fs(oldfs);
2596 clear_bit(NET_CONGESTED, &mdev->flags);
2597
2598 ok = (len == 0);
2599 if (likely(ok))
2600 mdev->send_cnt += size>>9;
2601 return ok;
2602}
2603
2604static int _drbd_send_bio(struct drbd_conf *mdev, struct bio *bio)
2605{
2606 struct bio_vec *bvec;
2607 int i;
Lars Ellenbergba11ad92010-05-25 16:26:16 +02002608 /* hint all but last page with MSG_MORE */
Philipp Reisnerb411b362009-09-25 16:07:19 -07002609 __bio_for_each_segment(bvec, bio, i, 0) {
2610 if (!_drbd_no_send_page(mdev, bvec->bv_page,
Lars Ellenbergba11ad92010-05-25 16:26:16 +02002611 bvec->bv_offset, bvec->bv_len,
2612 i == bio->bi_vcnt -1 ? 0 : MSG_MORE))
Philipp Reisnerb411b362009-09-25 16:07:19 -07002613 return 0;
2614 }
2615 return 1;
2616}
2617
2618static int _drbd_send_zc_bio(struct drbd_conf *mdev, struct bio *bio)
2619{
2620 struct bio_vec *bvec;
2621 int i;
Lars Ellenbergba11ad92010-05-25 16:26:16 +02002622 /* hint all but last page with MSG_MORE */
Philipp Reisnerb411b362009-09-25 16:07:19 -07002623 __bio_for_each_segment(bvec, bio, i, 0) {
2624 if (!_drbd_send_page(mdev, bvec->bv_page,
Lars Ellenbergba11ad92010-05-25 16:26:16 +02002625 bvec->bv_offset, bvec->bv_len,
2626 i == bio->bi_vcnt -1 ? 0 : MSG_MORE))
Philipp Reisnerb411b362009-09-25 16:07:19 -07002627 return 0;
2628 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07002629 return 1;
2630}
2631
Lars Ellenberg45bb9122010-05-14 17:10:48 +02002632static int _drbd_send_zc_ee(struct drbd_conf *mdev, struct drbd_epoch_entry *e)
2633{
2634 struct page *page = e->pages;
Andreas Gruenbacher010f6e62011-01-14 20:59:35 +01002635 unsigned len = e->i.size;
Lars Ellenbergba11ad92010-05-25 16:26:16 +02002636 /* hint all but last page with MSG_MORE */
Lars Ellenberg45bb9122010-05-14 17:10:48 +02002637 page_chain_for_each(page) {
2638 unsigned l = min_t(unsigned, len, PAGE_SIZE);
Lars Ellenbergba11ad92010-05-25 16:26:16 +02002639 if (!_drbd_send_page(mdev, page, 0, l,
2640 page_chain_next(page) ? MSG_MORE : 0))
Lars Ellenberg45bb9122010-05-14 17:10:48 +02002641 return 0;
2642 len -= l;
2643 }
2644 return 1;
2645}
2646
Philipp Reisner76d2e7e2010-08-25 11:58:05 +02002647static u32 bio_flags_to_wire(struct drbd_conf *mdev, unsigned long bi_rw)
2648{
2649 if (mdev->agreed_pro_version >= 95)
2650 return (bi_rw & REQ_SYNC ? DP_RW_SYNC : 0) |
Philipp Reisner76d2e7e2010-08-25 11:58:05 +02002651 (bi_rw & REQ_FUA ? DP_FUA : 0) |
2652 (bi_rw & REQ_FLUSH ? DP_FLUSH : 0) |
2653 (bi_rw & REQ_DISCARD ? DP_DISCARD : 0);
2654 else
Jens Axboe721a9602011-03-09 11:56:30 +01002655 return bi_rw & REQ_SYNC ? DP_RW_SYNC : 0;
Philipp Reisner76d2e7e2010-08-25 11:58:05 +02002656}
2657
Philipp Reisnerb411b362009-09-25 16:07:19 -07002658/* Used to send write requests
2659 * R_PRIMARY -> Peer (P_DATA)
2660 */
2661int drbd_send_dblock(struct drbd_conf *mdev, struct drbd_request *req)
2662{
2663 int ok = 1;
2664 struct p_data p;
2665 unsigned int dp_flags = 0;
2666 void *dgb;
2667 int dgs;
2668
2669 if (!drbd_get_data_sock(mdev))
2670 return 0;
2671
2672 dgs = (mdev->agreed_pro_version >= 87 && mdev->integrity_w_tfm) ?
2673 crypto_hash_digestsize(mdev->integrity_w_tfm) : 0;
2674
Andreas Gruenbacherace652a2011-01-03 17:09:58 +01002675 if (req->i.size <= DRBD_MAX_SIZE_H80_PACKET) {
Andreas Gruenbacherca9bc122011-01-11 13:47:24 +01002676 p.head.h80.magic = cpu_to_be32(DRBD_MAGIC);
Philipp Reisner0b70a132010-08-20 13:36:10 +02002677 p.head.h80.command = cpu_to_be16(P_DATA);
2678 p.head.h80.length =
Andreas Gruenbacherace652a2011-01-03 17:09:58 +01002679 cpu_to_be16(sizeof(p) - sizeof(union p_header) + dgs + req->i.size);
Philipp Reisner0b70a132010-08-20 13:36:10 +02002680 } else {
Andreas Gruenbacherca9bc122011-01-11 13:47:24 +01002681 p.head.h95.magic = cpu_to_be16(DRBD_MAGIC_BIG);
Philipp Reisner0b70a132010-08-20 13:36:10 +02002682 p.head.h95.command = cpu_to_be16(P_DATA);
2683 p.head.h95.length =
Andreas Gruenbacherace652a2011-01-03 17:09:58 +01002684 cpu_to_be32(sizeof(p) - sizeof(union p_header) + dgs + req->i.size);
Philipp Reisner0b70a132010-08-20 13:36:10 +02002685 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07002686
Andreas Gruenbacherace652a2011-01-03 17:09:58 +01002687 p.sector = cpu_to_be64(req->i.sector);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002688 p.block_id = (unsigned long)req;
2689 p.seq_num = cpu_to_be32(req->seq_num =
2690 atomic_add_return(1, &mdev->packet_seq));
Philipp Reisnerb411b362009-09-25 16:07:19 -07002691
Philipp Reisner76d2e7e2010-08-25 11:58:05 +02002692 dp_flags = bio_flags_to_wire(mdev, req->master_bio->bi_rw);
2693
Philipp Reisnerb411b362009-09-25 16:07:19 -07002694 if (mdev->state.conn >= C_SYNC_SOURCE &&
2695 mdev->state.conn <= C_PAUSED_SYNC_T)
2696 dp_flags |= DP_MAY_SET_IN_SYNC;
2697
2698 p.dp_flags = cpu_to_be32(dp_flags);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002699 set_bit(UNPLUG_REMOTE, &mdev->flags);
2700 ok = (sizeof(p) ==
Lars Ellenbergba11ad92010-05-25 16:26:16 +02002701 drbd_send(mdev, mdev->data.socket, &p, sizeof(p), dgs ? MSG_MORE : 0));
Philipp Reisnerb411b362009-09-25 16:07:19 -07002702 if (ok && dgs) {
2703 dgb = mdev->int_dig_out;
Lars Ellenberg45bb9122010-05-14 17:10:48 +02002704 drbd_csum_bio(mdev, mdev->integrity_w_tfm, req->master_bio, dgb);
Andreas Gruenbachercab2f742010-12-09 16:08:46 +01002705 ok = dgs == drbd_send(mdev, mdev->data.socket, dgb, dgs, 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002706 }
2707 if (ok) {
Lars Ellenberg470be442010-11-10 10:36:52 +01002708 /* For protocol A, we have to memcpy the payload into
2709 * socket buffers, as we may complete right away
2710 * as soon as we handed it over to tcp, at which point the data
2711 * pages may become invalid.
2712 *
2713 * For data-integrity enabled, we copy it as well, so we can be
2714 * sure that even if the bio pages may still be modified, it
2715 * won't change the data on the wire, thus if the digest checks
2716 * out ok after sending on this side, but does not fit on the
2717 * receiving side, we sure have detected corruption elsewhere.
2718 */
2719 if (mdev->net_conf->wire_protocol == DRBD_PROT_A || dgs)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002720 ok = _drbd_send_bio(mdev, req->master_bio);
2721 else
2722 ok = _drbd_send_zc_bio(mdev, req->master_bio);
Lars Ellenberg470be442010-11-10 10:36:52 +01002723
2724 /* double check digest, sometimes buffers have been modified in flight. */
2725 if (dgs > 0 && dgs <= 64) {
Bart Van Assche24c48302011-05-21 18:32:29 +02002726 /* 64 byte, 512 bit, is the largest digest size
Lars Ellenberg470be442010-11-10 10:36:52 +01002727 * currently supported in kernel crypto. */
2728 unsigned char digest[64];
2729 drbd_csum_bio(mdev, mdev->integrity_w_tfm, req->master_bio, digest);
2730 if (memcmp(mdev->int_dig_out, digest, dgs)) {
2731 dev_warn(DEV,
2732 "Digest mismatch, buffer modified by upper layers during write: %llus +%u\n",
Andreas Gruenbacherace652a2011-01-03 17:09:58 +01002733 (unsigned long long)req->i.sector, req->i.size);
Lars Ellenberg470be442010-11-10 10:36:52 +01002734 }
2735 } /* else if (dgs > 64) {
2736 ... Be noisy about digest too large ...
2737 } */
Philipp Reisnerb411b362009-09-25 16:07:19 -07002738 }
2739
2740 drbd_put_data_sock(mdev);
Philipp Reisnerbd26bfc52010-05-04 12:33:58 +02002741
Philipp Reisnerb411b362009-09-25 16:07:19 -07002742 return ok;
2743}
2744
2745/* answer packet, used to send data back for read requests:
2746 * Peer -> (diskless) R_PRIMARY (P_DATA_REPLY)
2747 * C_SYNC_SOURCE -> C_SYNC_TARGET (P_RS_DATA_REPLY)
2748 */
2749int drbd_send_block(struct drbd_conf *mdev, enum drbd_packets cmd,
2750 struct drbd_epoch_entry *e)
2751{
2752 int ok;
2753 struct p_data p;
2754 void *dgb;
2755 int dgs;
2756
2757 dgs = (mdev->agreed_pro_version >= 87 && mdev->integrity_w_tfm) ?
2758 crypto_hash_digestsize(mdev->integrity_w_tfm) : 0;
2759
Andreas Gruenbacher010f6e62011-01-14 20:59:35 +01002760 if (e->i.size <= DRBD_MAX_SIZE_H80_PACKET) {
Andreas Gruenbacherca9bc122011-01-11 13:47:24 +01002761 p.head.h80.magic = cpu_to_be32(DRBD_MAGIC);
Philipp Reisner0b70a132010-08-20 13:36:10 +02002762 p.head.h80.command = cpu_to_be16(cmd);
2763 p.head.h80.length =
Andreas Gruenbacher010f6e62011-01-14 20:59:35 +01002764 cpu_to_be16(sizeof(p) - sizeof(struct p_header80) + dgs + e->i.size);
Philipp Reisner0b70a132010-08-20 13:36:10 +02002765 } else {
Andreas Gruenbacherca9bc122011-01-11 13:47:24 +01002766 p.head.h95.magic = cpu_to_be16(DRBD_MAGIC_BIG);
Philipp Reisner0b70a132010-08-20 13:36:10 +02002767 p.head.h95.command = cpu_to_be16(cmd);
2768 p.head.h95.length =
Andreas Gruenbacher010f6e62011-01-14 20:59:35 +01002769 cpu_to_be32(sizeof(p) - sizeof(struct p_header80) + dgs + e->i.size);
Philipp Reisner0b70a132010-08-20 13:36:10 +02002770 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07002771
Andreas Gruenbacher010f6e62011-01-14 20:59:35 +01002772 p.sector = cpu_to_be64(e->i.sector);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002773 p.block_id = e->block_id;
2774 /* p.seq_num = 0; No sequence numbers here.. */
2775
2776 /* Only called by our kernel thread.
2777 * This one may be interrupted by DRBD_SIG and/or DRBD_SIGKILL
2778 * in response to admin command or module unload.
2779 */
2780 if (!drbd_get_data_sock(mdev))
2781 return 0;
2782
Philipp Reisner0b70a132010-08-20 13:36:10 +02002783 ok = sizeof(p) == drbd_send(mdev, mdev->data.socket, &p, sizeof(p), dgs ? MSG_MORE : 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002784 if (ok && dgs) {
2785 dgb = mdev->int_dig_out;
Lars Ellenberg45bb9122010-05-14 17:10:48 +02002786 drbd_csum_ee(mdev, mdev->integrity_w_tfm, e, dgb);
Andreas Gruenbachercab2f742010-12-09 16:08:46 +01002787 ok = dgs == drbd_send(mdev, mdev->data.socket, dgb, dgs, 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002788 }
2789 if (ok)
Lars Ellenberg45bb9122010-05-14 17:10:48 +02002790 ok = _drbd_send_zc_ee(mdev, e);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002791
2792 drbd_put_data_sock(mdev);
Philipp Reisnerbd26bfc52010-05-04 12:33:58 +02002793
Philipp Reisnerb411b362009-09-25 16:07:19 -07002794 return ok;
2795}
2796
Philipp Reisner73a01a12010-10-27 14:33:00 +02002797int drbd_send_oos(struct drbd_conf *mdev, struct drbd_request *req)
2798{
2799 struct p_block_desc p;
2800
Andreas Gruenbacherace652a2011-01-03 17:09:58 +01002801 p.sector = cpu_to_be64(req->i.sector);
2802 p.blksize = cpu_to_be32(req->i.size);
Philipp Reisner73a01a12010-10-27 14:33:00 +02002803
2804 return drbd_send_cmd(mdev, USE_DATA_SOCKET, P_OUT_OF_SYNC, &p.head, sizeof(p));
2805}
2806
Philipp Reisnerb411b362009-09-25 16:07:19 -07002807/*
2808 drbd_send distinguishes two cases:
2809
2810 Packets sent via the data socket "sock"
2811 and packets sent via the meta data socket "msock"
2812
2813 sock msock
2814 -----------------+-------------------------+------------------------------
2815 timeout conf.timeout / 2 conf.timeout / 2
2816 timeout action send a ping via msock Abort communication
2817 and close all sockets
2818*/
2819
2820/*
2821 * you must have down()ed the appropriate [m]sock_mutex elsewhere!
2822 */
2823int drbd_send(struct drbd_conf *mdev, struct socket *sock,
2824 void *buf, size_t size, unsigned msg_flags)
2825{
2826 struct kvec iov;
2827 struct msghdr msg;
2828 int rv, sent = 0;
2829
2830 if (!sock)
2831 return -1000;
2832
2833 /* THINK if (signal_pending) return ... ? */
2834
2835 iov.iov_base = buf;
2836 iov.iov_len = size;
2837
2838 msg.msg_name = NULL;
2839 msg.msg_namelen = 0;
2840 msg.msg_control = NULL;
2841 msg.msg_controllen = 0;
2842 msg.msg_flags = msg_flags | MSG_NOSIGNAL;
2843
2844 if (sock == mdev->data.socket) {
2845 mdev->ko_count = mdev->net_conf->ko_count;
2846 drbd_update_congested(mdev);
2847 }
2848 do {
2849 /* STRANGE
2850 * tcp_sendmsg does _not_ use its size parameter at all ?
2851 *
2852 * -EAGAIN on timeout, -EINTR on signal.
2853 */
2854/* THINK
2855 * do we need to block DRBD_SIG if sock == &meta.socket ??
2856 * otherwise wake_asender() might interrupt some send_*Ack !
2857 */
2858 rv = kernel_sendmsg(sock, &msg, &iov, 1, size);
2859 if (rv == -EAGAIN) {
2860 if (we_should_drop_the_connection(mdev, sock))
2861 break;
2862 else
2863 continue;
2864 }
2865 D_ASSERT(rv != 0);
2866 if (rv == -EINTR) {
2867 flush_signals(current);
2868 rv = 0;
2869 }
2870 if (rv < 0)
2871 break;
2872 sent += rv;
2873 iov.iov_base += rv;
2874 iov.iov_len -= rv;
2875 } while (sent < size);
2876
2877 if (sock == mdev->data.socket)
2878 clear_bit(NET_CONGESTED, &mdev->flags);
2879
2880 if (rv <= 0) {
2881 if (rv != -EAGAIN) {
2882 dev_err(DEV, "%s_sendmsg returned %d\n",
2883 sock == mdev->meta.socket ? "msock" : "sock",
2884 rv);
2885 drbd_force_state(mdev, NS(conn, C_BROKEN_PIPE));
2886 } else
2887 drbd_force_state(mdev, NS(conn, C_TIMEOUT));
2888 }
2889
2890 return sent;
2891}
2892
2893static int drbd_open(struct block_device *bdev, fmode_t mode)
2894{
2895 struct drbd_conf *mdev = bdev->bd_disk->private_data;
2896 unsigned long flags;
2897 int rv = 0;
2898
Arnd Bergmann2a48fc02010-06-02 14:28:52 +02002899 mutex_lock(&drbd_main_mutex);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002900 spin_lock_irqsave(&mdev->req_lock, flags);
2901 /* to have a stable mdev->state.role
2902 * and no race with updating open_cnt */
2903
2904 if (mdev->state.role != R_PRIMARY) {
2905 if (mode & FMODE_WRITE)
2906 rv = -EROFS;
2907 else if (!allow_oos)
2908 rv = -EMEDIUMTYPE;
2909 }
2910
2911 if (!rv)
2912 mdev->open_cnt++;
2913 spin_unlock_irqrestore(&mdev->req_lock, flags);
Arnd Bergmann2a48fc02010-06-02 14:28:52 +02002914 mutex_unlock(&drbd_main_mutex);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002915
2916 return rv;
2917}
2918
2919static int drbd_release(struct gendisk *gd, fmode_t mode)
2920{
2921 struct drbd_conf *mdev = gd->private_data;
Arnd Bergmann2a48fc02010-06-02 14:28:52 +02002922 mutex_lock(&drbd_main_mutex);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002923 mdev->open_cnt--;
Arnd Bergmann2a48fc02010-06-02 14:28:52 +02002924 mutex_unlock(&drbd_main_mutex);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002925 return 0;
2926}
2927
Philipp Reisnerb411b362009-09-25 16:07:19 -07002928static void drbd_set_defaults(struct drbd_conf *mdev)
2929{
Philipp Reisner85f4cc12010-06-29 17:35:34 +02002930 /* This way we get a compile error when sync_conf grows,
2931 and we forgot to initialize it here */
2932 mdev->sync_conf = (struct syncer_conf) {
2933 /* .rate = */ DRBD_RATE_DEF,
2934 /* .after = */ DRBD_AFTER_DEF,
2935 /* .al_extents = */ DRBD_AL_EXTENTS_DEF,
Philipp Reisner85f4cc12010-06-29 17:35:34 +02002936 /* .verify_alg = */ {}, 0,
2937 /* .cpu_mask = */ {}, 0,
2938 /* .csums_alg = */ {}, 0,
Philipp Reisnere7564142010-06-29 17:35:34 +02002939 /* .use_rle = */ 0,
Philipp Reisner9a31d712010-07-05 13:42:03 +02002940 /* .on_no_data = */ DRBD_ON_NO_DATA_DEF,
2941 /* .c_plan_ahead = */ DRBD_C_PLAN_AHEAD_DEF,
2942 /* .c_delay_target = */ DRBD_C_DELAY_TARGET_DEF,
2943 /* .c_fill_target = */ DRBD_C_FILL_TARGET_DEF,
Lars Ellenberg0f0601f2010-08-11 23:40:24 +02002944 /* .c_max_rate = */ DRBD_C_MAX_RATE_DEF,
2945 /* .c_min_rate = */ DRBD_C_MIN_RATE_DEF
Philipp Reisner85f4cc12010-06-29 17:35:34 +02002946 };
2947
2948 /* Have to use that way, because the layout differs between
2949 big endian and little endian */
Philipp Reisnerb411b362009-09-25 16:07:19 -07002950 mdev->state = (union drbd_state) {
2951 { .role = R_SECONDARY,
2952 .peer = R_UNKNOWN,
2953 .conn = C_STANDALONE,
2954 .disk = D_DISKLESS,
2955 .pdsk = D_UNKNOWN,
Philipp Reisnerfb22c402010-09-08 23:20:21 +02002956 .susp = 0,
2957 .susp_nod = 0,
2958 .susp_fen = 0
Philipp Reisnerb411b362009-09-25 16:07:19 -07002959 } };
2960}
2961
2962void drbd_init_set_defaults(struct drbd_conf *mdev)
2963{
2964 /* the memset(,0,) did most of this.
2965 * note: only assignments, no allocation in here */
2966
2967 drbd_set_defaults(mdev);
2968
Philipp Reisnerb411b362009-09-25 16:07:19 -07002969 atomic_set(&mdev->ap_bio_cnt, 0);
2970 atomic_set(&mdev->ap_pending_cnt, 0);
2971 atomic_set(&mdev->rs_pending_cnt, 0);
2972 atomic_set(&mdev->unacked_cnt, 0);
2973 atomic_set(&mdev->local_cnt, 0);
2974 atomic_set(&mdev->net_cnt, 0);
2975 atomic_set(&mdev->packet_seq, 0);
2976 atomic_set(&mdev->pp_in_use, 0);
Lars Ellenberg435f0742010-09-06 12:30:25 +02002977 atomic_set(&mdev->pp_in_use_by_net, 0);
Philipp Reisner778f2712010-07-06 11:14:00 +02002978 atomic_set(&mdev->rs_sect_in, 0);
Lars Ellenberg0f0601f2010-08-11 23:40:24 +02002979 atomic_set(&mdev->rs_sect_ev, 0);
Philipp Reisner759fbdf2010-10-26 16:02:27 +02002980 atomic_set(&mdev->ap_in_flight, 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002981
2982 mutex_init(&mdev->md_io_mutex);
2983 mutex_init(&mdev->data.mutex);
2984 mutex_init(&mdev->meta.mutex);
2985 sema_init(&mdev->data.work.s, 0);
2986 sema_init(&mdev->meta.work.s, 0);
2987 mutex_init(&mdev->state_mutex);
2988
2989 spin_lock_init(&mdev->data.work.q_lock);
2990 spin_lock_init(&mdev->meta.work.q_lock);
2991
2992 spin_lock_init(&mdev->al_lock);
2993 spin_lock_init(&mdev->req_lock);
2994 spin_lock_init(&mdev->peer_seq_lock);
2995 spin_lock_init(&mdev->epoch_lock);
2996
2997 INIT_LIST_HEAD(&mdev->active_ee);
2998 INIT_LIST_HEAD(&mdev->sync_ee);
2999 INIT_LIST_HEAD(&mdev->done_ee);
3000 INIT_LIST_HEAD(&mdev->read_ee);
3001 INIT_LIST_HEAD(&mdev->net_ee);
3002 INIT_LIST_HEAD(&mdev->resync_reads);
3003 INIT_LIST_HEAD(&mdev->data.work.q);
3004 INIT_LIST_HEAD(&mdev->meta.work.q);
3005 INIT_LIST_HEAD(&mdev->resync_work.list);
3006 INIT_LIST_HEAD(&mdev->unplug_work.list);
Lars Ellenberge9e6f3e2010-09-14 20:26:27 +02003007 INIT_LIST_HEAD(&mdev->go_diskless.list);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003008 INIT_LIST_HEAD(&mdev->md_sync_work.list);
Philipp Reisnerc4752ef2010-10-27 17:32:36 +02003009 INIT_LIST_HEAD(&mdev->start_resync_work.list);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003010 INIT_LIST_HEAD(&mdev->bm_io_work.w.list);
Philipp Reisner0ced55a2010-04-30 15:26:20 +02003011
Philipp Reisner794abb72010-12-27 11:51:23 +01003012 mdev->resync_work.cb = w_resync_timer;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003013 mdev->unplug_work.cb = w_send_write_hint;
Lars Ellenberge9e6f3e2010-09-14 20:26:27 +02003014 mdev->go_diskless.cb = w_go_diskless;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003015 mdev->md_sync_work.cb = w_md_sync;
3016 mdev->bm_io_work.w.cb = w_bitmap_io;
Philipp Reisner370a43e2011-01-14 16:03:11 +01003017 mdev->start_resync_work.cb = w_start_resync;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003018 init_timer(&mdev->resync_timer);
3019 init_timer(&mdev->md_sync_timer);
Philipp Reisner370a43e2011-01-14 16:03:11 +01003020 init_timer(&mdev->start_resync_timer);
Philipp Reisner7fde2be2011-03-01 11:08:28 +01003021 init_timer(&mdev->request_timer);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003022 mdev->resync_timer.function = resync_timer_fn;
3023 mdev->resync_timer.data = (unsigned long) mdev;
3024 mdev->md_sync_timer.function = md_sync_timer_fn;
3025 mdev->md_sync_timer.data = (unsigned long) mdev;
Philipp Reisner370a43e2011-01-14 16:03:11 +01003026 mdev->start_resync_timer.function = start_resync_timer_fn;
3027 mdev->start_resync_timer.data = (unsigned long) mdev;
Philipp Reisner7fde2be2011-03-01 11:08:28 +01003028 mdev->request_timer.function = request_timer_fn;
3029 mdev->request_timer.data = (unsigned long) mdev;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003030
3031 init_waitqueue_head(&mdev->misc_wait);
3032 init_waitqueue_head(&mdev->state_wait);
Philipp Reisner84dfb9f2010-06-23 11:20:05 +02003033 init_waitqueue_head(&mdev->net_cnt_wait);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003034 init_waitqueue_head(&mdev->ee_wait);
3035 init_waitqueue_head(&mdev->al_wait);
3036 init_waitqueue_head(&mdev->seq_wait);
3037
3038 drbd_thread_init(mdev, &mdev->receiver, drbdd_init);
3039 drbd_thread_init(mdev, &mdev->worker, drbd_worker);
3040 drbd_thread_init(mdev, &mdev->asender, drbd_asender);
3041
3042 mdev->agreed_pro_version = PRO_VERSION_MAX;
Philipp Reisner2451fc32010-08-24 13:43:11 +02003043 mdev->write_ordering = WO_bdev_flush;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003044 mdev->resync_wenr = LC_FREE;
Philipp Reisner99432fc2011-05-20 16:39:13 +02003045 mdev->peer_max_bio_size = DRBD_MAX_BIO_SIZE_SAFE;
3046 mdev->local_max_bio_size = DRBD_MAX_BIO_SIZE_SAFE;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003047}
3048
3049void drbd_mdev_cleanup(struct drbd_conf *mdev)
3050{
Lars Ellenberg1d7734a2010-08-11 21:21:50 +02003051 int i;
Andreas Gruenbachere77a0a52011-01-25 15:43:39 +01003052 if (mdev->receiver.t_state != NONE)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003053 dev_err(DEV, "ASSERT FAILED: receiver t_state == %d expected 0.\n",
3054 mdev->receiver.t_state);
3055
3056 /* no need to lock it, I'm the only thread alive */
3057 if (atomic_read(&mdev->current_epoch->epoch_size) != 0)
3058 dev_err(DEV, "epoch_size:%d\n", atomic_read(&mdev->current_epoch->epoch_size));
3059 mdev->al_writ_cnt =
3060 mdev->bm_writ_cnt =
3061 mdev->read_cnt =
3062 mdev->recv_cnt =
3063 mdev->send_cnt =
3064 mdev->writ_cnt =
3065 mdev->p_size =
3066 mdev->rs_start =
3067 mdev->rs_total =
Lars Ellenberg1d7734a2010-08-11 21:21:50 +02003068 mdev->rs_failed = 0;
3069 mdev->rs_last_events = 0;
Lars Ellenberg0f0601f2010-08-11 23:40:24 +02003070 mdev->rs_last_sect_ev = 0;
Lars Ellenberg1d7734a2010-08-11 21:21:50 +02003071 for (i = 0; i < DRBD_SYNC_MARKS; i++) {
3072 mdev->rs_mark_left[i] = 0;
3073 mdev->rs_mark_time[i] = 0;
3074 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07003075 D_ASSERT(mdev->net_conf == NULL);
3076
3077 drbd_set_my_capacity(mdev, 0);
3078 if (mdev->bitmap) {
3079 /* maybe never allocated. */
Philipp Reisner02d9a942010-03-24 16:23:03 +01003080 drbd_bm_resize(mdev, 0, 1);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003081 drbd_bm_cleanup(mdev);
3082 }
3083
3084 drbd_free_resources(mdev);
Philipp Reisner07782862010-08-31 12:00:50 +02003085 clear_bit(AL_SUSPENDED, &mdev->flags);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003086
3087 /*
3088 * currently we drbd_init_ee only on module load, so
3089 * we may do drbd_release_ee only on module unload!
3090 */
3091 D_ASSERT(list_empty(&mdev->active_ee));
3092 D_ASSERT(list_empty(&mdev->sync_ee));
3093 D_ASSERT(list_empty(&mdev->done_ee));
3094 D_ASSERT(list_empty(&mdev->read_ee));
3095 D_ASSERT(list_empty(&mdev->net_ee));
3096 D_ASSERT(list_empty(&mdev->resync_reads));
3097 D_ASSERT(list_empty(&mdev->data.work.q));
3098 D_ASSERT(list_empty(&mdev->meta.work.q));
3099 D_ASSERT(list_empty(&mdev->resync_work.list));
3100 D_ASSERT(list_empty(&mdev->unplug_work.list));
Lars Ellenberge9e6f3e2010-09-14 20:26:27 +02003101 D_ASSERT(list_empty(&mdev->go_diskless.list));
Lars Ellenberg2265b472010-12-16 15:41:26 +01003102
3103 drbd_set_defaults(mdev);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003104}
3105
3106
3107static void drbd_destroy_mempools(void)
3108{
3109 struct page *page;
3110
3111 while (drbd_pp_pool) {
3112 page = drbd_pp_pool;
3113 drbd_pp_pool = (struct page *)page_private(page);
3114 __free_page(page);
3115 drbd_pp_vacant--;
3116 }
3117
3118 /* D_ASSERT(atomic_read(&drbd_pp_vacant)==0); */
3119
3120 if (drbd_ee_mempool)
3121 mempool_destroy(drbd_ee_mempool);
3122 if (drbd_request_mempool)
3123 mempool_destroy(drbd_request_mempool);
3124 if (drbd_ee_cache)
3125 kmem_cache_destroy(drbd_ee_cache);
3126 if (drbd_request_cache)
3127 kmem_cache_destroy(drbd_request_cache);
3128 if (drbd_bm_ext_cache)
3129 kmem_cache_destroy(drbd_bm_ext_cache);
3130 if (drbd_al_ext_cache)
3131 kmem_cache_destroy(drbd_al_ext_cache);
3132
3133 drbd_ee_mempool = NULL;
3134 drbd_request_mempool = NULL;
3135 drbd_ee_cache = NULL;
3136 drbd_request_cache = NULL;
3137 drbd_bm_ext_cache = NULL;
3138 drbd_al_ext_cache = NULL;
3139
3140 return;
3141}
3142
3143static int drbd_create_mempools(void)
3144{
3145 struct page *page;
Lars Ellenberg1816a2b2010-11-11 15:19:07 +01003146 const int number = (DRBD_MAX_BIO_SIZE/PAGE_SIZE) * minor_count;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003147 int i;
3148
3149 /* prepare our caches and mempools */
3150 drbd_request_mempool = NULL;
3151 drbd_ee_cache = NULL;
3152 drbd_request_cache = NULL;
3153 drbd_bm_ext_cache = NULL;
3154 drbd_al_ext_cache = NULL;
3155 drbd_pp_pool = NULL;
3156
3157 /* caches */
3158 drbd_request_cache = kmem_cache_create(
3159 "drbd_req", sizeof(struct drbd_request), 0, 0, NULL);
3160 if (drbd_request_cache == NULL)
3161 goto Enomem;
3162
3163 drbd_ee_cache = kmem_cache_create(
3164 "drbd_ee", sizeof(struct drbd_epoch_entry), 0, 0, NULL);
3165 if (drbd_ee_cache == NULL)
3166 goto Enomem;
3167
3168 drbd_bm_ext_cache = kmem_cache_create(
3169 "drbd_bm", sizeof(struct bm_extent), 0, 0, NULL);
3170 if (drbd_bm_ext_cache == NULL)
3171 goto Enomem;
3172
3173 drbd_al_ext_cache = kmem_cache_create(
3174 "drbd_al", sizeof(struct lc_element), 0, 0, NULL);
3175 if (drbd_al_ext_cache == NULL)
3176 goto Enomem;
3177
3178 /* mempools */
3179 drbd_request_mempool = mempool_create(number,
3180 mempool_alloc_slab, mempool_free_slab, drbd_request_cache);
3181 if (drbd_request_mempool == NULL)
3182 goto Enomem;
3183
3184 drbd_ee_mempool = mempool_create(number,
3185 mempool_alloc_slab, mempool_free_slab, drbd_ee_cache);
Nicolas Kaiser2027ae12010-10-28 06:15:26 -06003186 if (drbd_ee_mempool == NULL)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003187 goto Enomem;
3188
3189 /* drbd's page pool */
3190 spin_lock_init(&drbd_pp_lock);
3191
3192 for (i = 0; i < number; i++) {
3193 page = alloc_page(GFP_HIGHUSER);
3194 if (!page)
3195 goto Enomem;
3196 set_page_private(page, (unsigned long)drbd_pp_pool);
3197 drbd_pp_pool = page;
3198 }
3199 drbd_pp_vacant = number;
3200
3201 return 0;
3202
3203Enomem:
3204 drbd_destroy_mempools(); /* in case we allocated some */
3205 return -ENOMEM;
3206}
3207
3208static int drbd_notify_sys(struct notifier_block *this, unsigned long code,
3209 void *unused)
3210{
3211 /* just so we have it. you never know what interesting things we
3212 * might want to do here some day...
3213 */
3214
3215 return NOTIFY_DONE;
3216}
3217
3218static struct notifier_block drbd_notifier = {
3219 .notifier_call = drbd_notify_sys,
3220};
3221
3222static void drbd_release_ee_lists(struct drbd_conf *mdev)
3223{
3224 int rr;
3225
3226 rr = drbd_release_ee(mdev, &mdev->active_ee);
3227 if (rr)
3228 dev_err(DEV, "%d EEs in active list found!\n", rr);
3229
3230 rr = drbd_release_ee(mdev, &mdev->sync_ee);
3231 if (rr)
3232 dev_err(DEV, "%d EEs in sync list found!\n", rr);
3233
3234 rr = drbd_release_ee(mdev, &mdev->read_ee);
3235 if (rr)
3236 dev_err(DEV, "%d EEs in read list found!\n", rr);
3237
3238 rr = drbd_release_ee(mdev, &mdev->done_ee);
3239 if (rr)
3240 dev_err(DEV, "%d EEs in done list found!\n", rr);
3241
3242 rr = drbd_release_ee(mdev, &mdev->net_ee);
3243 if (rr)
3244 dev_err(DEV, "%d EEs in net list found!\n", rr);
3245}
3246
3247/* caution. no locking.
3248 * currently only used from module cleanup code. */
3249static void drbd_delete_device(unsigned int minor)
3250{
3251 struct drbd_conf *mdev = minor_to_mdev(minor);
3252
3253 if (!mdev)
3254 return;
3255
3256 /* paranoia asserts */
3257 if (mdev->open_cnt != 0)
3258 dev_err(DEV, "open_cnt = %d in %s:%u", mdev->open_cnt,
3259 __FILE__ , __LINE__);
3260
Andreas Gruenbacher841ce242010-12-15 19:31:20 +01003261 if (!expect(list_empty(&mdev->data.work.q))) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07003262 struct list_head *lp;
3263 list_for_each(lp, &mdev->data.work.q) {
3264 dev_err(DEV, "lp = %p\n", lp);
3265 }
3266 };
3267 /* end paranoia asserts */
3268
3269 del_gendisk(mdev->vdisk);
3270
3271 /* cleanup stuff that may have been allocated during
3272 * device (re-)configuration or state changes */
3273
3274 if (mdev->this_bdev)
3275 bdput(mdev->this_bdev);
3276
3277 drbd_free_resources(mdev);
3278
3279 drbd_release_ee_lists(mdev);
3280
Philipp Reisnerb411b362009-09-25 16:07:19 -07003281 lc_destroy(mdev->act_log);
3282 lc_destroy(mdev->resync);
3283
3284 kfree(mdev->p_uuid);
3285 /* mdev->p_uuid = NULL; */
3286
3287 kfree(mdev->int_dig_out);
3288 kfree(mdev->int_dig_in);
3289 kfree(mdev->int_dig_vv);
3290
3291 /* cleanup the rest that has been
3292 * allocated from drbd_new_device
3293 * and actually free the mdev itself */
3294 drbd_free_mdev(mdev);
3295}
3296
3297static void drbd_cleanup(void)
3298{
3299 unsigned int i;
3300
3301 unregister_reboot_notifier(&drbd_notifier);
3302
Lars Ellenberg17a93f32010-11-24 10:37:35 +01003303 /* first remove proc,
3304 * drbdsetup uses it's presence to detect
3305 * whether DRBD is loaded.
3306 * If we would get stuck in proc removal,
3307 * but have netlink already deregistered,
3308 * some drbdsetup commands may wait forever
3309 * for an answer.
3310 */
3311 if (drbd_proc)
3312 remove_proc_entry("drbd", NULL);
3313
Philipp Reisnerb411b362009-09-25 16:07:19 -07003314 drbd_nl_cleanup();
3315
3316 if (minor_table) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07003317 i = minor_count;
3318 while (i--)
3319 drbd_delete_device(i);
3320 drbd_destroy_mempools();
3321 }
3322
3323 kfree(minor_table);
3324
3325 unregister_blkdev(DRBD_MAJOR, "drbd");
3326
3327 printk(KERN_INFO "drbd: module cleanup done.\n");
3328}
3329
3330/**
3331 * drbd_congested() - Callback for pdflush
3332 * @congested_data: User data
3333 * @bdi_bits: Bits pdflush is currently interested in
3334 *
3335 * Returns 1<<BDI_async_congested and/or 1<<BDI_sync_congested if we are congested.
3336 */
3337static int drbd_congested(void *congested_data, int bdi_bits)
3338{
3339 struct drbd_conf *mdev = congested_data;
3340 struct request_queue *q;
3341 char reason = '-';
3342 int r = 0;
3343
Andreas Gruenbacher1b881ef2010-12-13 18:03:38 +01003344 if (!may_inc_ap_bio(mdev)) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07003345 /* DRBD has frozen IO */
3346 r = bdi_bits;
3347 reason = 'd';
3348 goto out;
3349 }
3350
3351 if (get_ldev(mdev)) {
3352 q = bdev_get_queue(mdev->ldev->backing_bdev);
3353 r = bdi_congested(&q->backing_dev_info, bdi_bits);
3354 put_ldev(mdev);
3355 if (r)
3356 reason = 'b';
3357 }
3358
3359 if (bdi_bits & (1 << BDI_async_congested) && test_bit(NET_CONGESTED, &mdev->flags)) {
3360 r |= (1 << BDI_async_congested);
3361 reason = reason == 'b' ? 'a' : 'n';
3362 }
3363
3364out:
3365 mdev->congestion_reason = reason;
3366 return r;
3367}
3368
3369struct drbd_conf *drbd_new_device(unsigned int minor)
3370{
3371 struct drbd_conf *mdev;
3372 struct gendisk *disk;
3373 struct request_queue *q;
3374
3375 /* GFP_KERNEL, we are outside of all write-out paths */
3376 mdev = kzalloc(sizeof(struct drbd_conf), GFP_KERNEL);
3377 if (!mdev)
3378 return NULL;
3379 if (!zalloc_cpumask_var(&mdev->cpu_mask, GFP_KERNEL))
3380 goto out_no_cpumask;
3381
3382 mdev->minor = minor;
3383
3384 drbd_init_set_defaults(mdev);
3385
3386 q = blk_alloc_queue(GFP_KERNEL);
3387 if (!q)
3388 goto out_no_q;
3389 mdev->rq_queue = q;
3390 q->queuedata = mdev;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003391
3392 disk = alloc_disk(1);
3393 if (!disk)
3394 goto out_no_disk;
3395 mdev->vdisk = disk;
3396
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01003397 set_disk_ro(disk, true);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003398
3399 disk->queue = q;
3400 disk->major = DRBD_MAJOR;
3401 disk->first_minor = minor;
3402 disk->fops = &drbd_ops;
3403 sprintf(disk->disk_name, "drbd%d", minor);
3404 disk->private_data = mdev;
3405
3406 mdev->this_bdev = bdget(MKDEV(DRBD_MAJOR, minor));
3407 /* we have no partitions. we contain only ourselves. */
3408 mdev->this_bdev->bd_contains = mdev->this_bdev;
3409
3410 q->backing_dev_info.congested_fn = drbd_congested;
3411 q->backing_dev_info.congested_data = mdev;
3412
Andreas Gruenbacher2f58dcf2010-12-13 17:48:19 +01003413 blk_queue_make_request(q, drbd_make_request);
Philipp Reisner99432fc2011-05-20 16:39:13 +02003414 /* Setting the max_hw_sectors to an odd value of 8kibyte here
3415 This triggers a max_bio_size message upon first attach or connect */
3416 blk_queue_max_hw_sectors(q, DRBD_MAX_BIO_SIZE_SAFE >> 8);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003417 blk_queue_bounce_limit(q, BLK_BOUNCE_ANY);
3418 blk_queue_merge_bvec(q, drbd_merge_bvec);
Jens Axboe7eaceac2011-03-10 08:52:07 +01003419 q->queue_lock = &mdev->req_lock;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003420
3421 mdev->md_io_page = alloc_page(GFP_KERNEL);
3422 if (!mdev->md_io_page)
3423 goto out_no_io_page;
3424
3425 if (drbd_bm_init(mdev))
3426 goto out_no_bitmap;
3427 /* no need to lock access, we are still initializing this minor device. */
3428 if (!tl_init(mdev))
3429 goto out_no_tl;
Andreas Gruenbacherdac13892011-01-21 17:18:39 +01003430 mdev->read_requests = RB_ROOT;
Andreas Gruenbacherde696712011-01-20 15:00:24 +01003431 mdev->write_requests = RB_ROOT;
Andreas Gruenbacher8b946252011-01-20 15:23:07 +01003432 mdev->epoch_entries = RB_ROOT;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003433
Philipp Reisnerb411b362009-09-25 16:07:19 -07003434 mdev->current_epoch = kzalloc(sizeof(struct drbd_epoch), GFP_KERNEL);
3435 if (!mdev->current_epoch)
3436 goto out_no_epoch;
3437
3438 INIT_LIST_HEAD(&mdev->current_epoch->list);
3439 mdev->epochs = 1;
3440
3441 return mdev;
3442
3443/* out_whatever_else:
3444 kfree(mdev->current_epoch); */
3445out_no_epoch:
Philipp Reisnerb411b362009-09-25 16:07:19 -07003446 tl_cleanup(mdev);
3447out_no_tl:
3448 drbd_bm_cleanup(mdev);
3449out_no_bitmap:
3450 __free_page(mdev->md_io_page);
3451out_no_io_page:
3452 put_disk(disk);
3453out_no_disk:
3454 blk_cleanup_queue(q);
3455out_no_q:
3456 free_cpumask_var(mdev->cpu_mask);
3457out_no_cpumask:
3458 kfree(mdev);
3459 return NULL;
3460}
3461
3462/* counterpart of drbd_new_device.
3463 * last part of drbd_delete_device. */
3464void drbd_free_mdev(struct drbd_conf *mdev)
3465{
3466 kfree(mdev->current_epoch);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003467 tl_cleanup(mdev);
3468 if (mdev->bitmap) /* should no longer be there. */
3469 drbd_bm_cleanup(mdev);
3470 __free_page(mdev->md_io_page);
3471 put_disk(mdev->vdisk);
3472 blk_cleanup_queue(mdev->rq_queue);
3473 free_cpumask_var(mdev->cpu_mask);
3474 kfree(mdev);
3475}
3476
3477
3478int __init drbd_init(void)
3479{
3480 int err;
3481
3482 if (sizeof(struct p_handshake) != 80) {
3483 printk(KERN_ERR
3484 "drbd: never change the size or layout "
3485 "of the HandShake packet.\n");
3486 return -EINVAL;
3487 }
3488
Philipp Reisner2b8a90b2011-01-10 11:15:17 +01003489 if (minor_count < DRBD_MINOR_COUNT_MIN || minor_count > DRBD_MINOR_COUNT_MAX) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07003490 printk(KERN_ERR
3491 "drbd: invalid minor_count (%d)\n", minor_count);
3492#ifdef MODULE
3493 return -EINVAL;
3494#else
3495 minor_count = 8;
3496#endif
3497 }
3498
3499 err = drbd_nl_init();
3500 if (err)
3501 return err;
3502
3503 err = register_blkdev(DRBD_MAJOR, "drbd");
3504 if (err) {
3505 printk(KERN_ERR
3506 "drbd: unable to register block device major %d\n",
3507 DRBD_MAJOR);
3508 return err;
3509 }
3510
3511 register_reboot_notifier(&drbd_notifier);
3512
3513 /*
3514 * allocate all necessary structs
3515 */
3516 err = -ENOMEM;
3517
3518 init_waitqueue_head(&drbd_pp_wait);
3519
3520 drbd_proc = NULL; /* play safe for drbd_cleanup */
3521 minor_table = kzalloc(sizeof(struct drbd_conf *)*minor_count,
3522 GFP_KERNEL);
3523 if (!minor_table)
3524 goto Enomem;
3525
3526 err = drbd_create_mempools();
3527 if (err)
3528 goto Enomem;
3529
Lars Ellenberg8c484ee2010-03-11 16:47:58 +01003530 drbd_proc = proc_create_data("drbd", S_IFREG | S_IRUGO , NULL, &drbd_proc_fops, NULL);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003531 if (!drbd_proc) {
3532 printk(KERN_ERR "drbd: unable to register proc file\n");
3533 goto Enomem;
3534 }
3535
3536 rwlock_init(&global_state_lock);
3537
3538 printk(KERN_INFO "drbd: initialized. "
3539 "Version: " REL_VERSION " (api:%d/proto:%d-%d)\n",
3540 API_VERSION, PRO_VERSION_MIN, PRO_VERSION_MAX);
3541 printk(KERN_INFO "drbd: %s\n", drbd_buildtag());
3542 printk(KERN_INFO "drbd: registered as block device major %d\n",
3543 DRBD_MAJOR);
3544 printk(KERN_INFO "drbd: minor_table @ 0x%p\n", minor_table);
3545
3546 return 0; /* Success! */
3547
3548Enomem:
3549 drbd_cleanup();
3550 if (err == -ENOMEM)
3551 /* currently always the case */
3552 printk(KERN_ERR "drbd: ran out of memory\n");
3553 else
3554 printk(KERN_ERR "drbd: initialization failure\n");
3555 return err;
3556}
3557
3558void drbd_free_bc(struct drbd_backing_dev *ldev)
3559{
3560 if (ldev == NULL)
3561 return;
3562
Tejun Heoe525fd82010-11-13 11:55:17 +01003563 blkdev_put(ldev->backing_bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL);
3564 blkdev_put(ldev->md_bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003565
3566 kfree(ldev);
3567}
3568
3569void drbd_free_sock(struct drbd_conf *mdev)
3570{
3571 if (mdev->data.socket) {
Lars Ellenberg4589d7f2010-03-03 02:25:33 +01003572 mutex_lock(&mdev->data.mutex);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003573 kernel_sock_shutdown(mdev->data.socket, SHUT_RDWR);
3574 sock_release(mdev->data.socket);
3575 mdev->data.socket = NULL;
Lars Ellenberg4589d7f2010-03-03 02:25:33 +01003576 mutex_unlock(&mdev->data.mutex);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003577 }
3578 if (mdev->meta.socket) {
Lars Ellenberg4589d7f2010-03-03 02:25:33 +01003579 mutex_lock(&mdev->meta.mutex);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003580 kernel_sock_shutdown(mdev->meta.socket, SHUT_RDWR);
3581 sock_release(mdev->meta.socket);
3582 mdev->meta.socket = NULL;
Lars Ellenberg4589d7f2010-03-03 02:25:33 +01003583 mutex_unlock(&mdev->meta.mutex);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003584 }
3585}
3586
3587
3588void drbd_free_resources(struct drbd_conf *mdev)
3589{
3590 crypto_free_hash(mdev->csums_tfm);
3591 mdev->csums_tfm = NULL;
3592 crypto_free_hash(mdev->verify_tfm);
3593 mdev->verify_tfm = NULL;
3594 crypto_free_hash(mdev->cram_hmac_tfm);
3595 mdev->cram_hmac_tfm = NULL;
3596 crypto_free_hash(mdev->integrity_w_tfm);
3597 mdev->integrity_w_tfm = NULL;
3598 crypto_free_hash(mdev->integrity_r_tfm);
3599 mdev->integrity_r_tfm = NULL;
3600
3601 drbd_free_sock(mdev);
3602
3603 __no_warn(local,
3604 drbd_free_bc(mdev->ldev);
3605 mdev->ldev = NULL;);
3606}
3607
3608/* meta data management */
3609
3610struct meta_data_on_disk {
3611 u64 la_size; /* last agreed size. */
3612 u64 uuid[UI_SIZE]; /* UUIDs. */
3613 u64 device_uuid;
3614 u64 reserved_u64_1;
3615 u32 flags; /* MDF */
3616 u32 magic;
3617 u32 md_size_sect;
3618 u32 al_offset; /* offset to this block */
3619 u32 al_nr_extents; /* important for restoring the AL */
3620 /* `-- act_log->nr_elements <-- sync_conf.al_extents */
3621 u32 bm_offset; /* offset to the bitmap, from here */
3622 u32 bm_bytes_per_bit; /* BM_BLOCK_SIZE */
Philipp Reisner99432fc2011-05-20 16:39:13 +02003623 u32 la_peer_max_bio_size; /* last peer max_bio_size */
3624 u32 reserved_u32[3];
Philipp Reisnerb411b362009-09-25 16:07:19 -07003625
3626} __packed;
3627
3628/**
3629 * drbd_md_sync() - Writes the meta data super block if the MD_DIRTY flag bit is set
3630 * @mdev: DRBD device.
3631 */
3632void drbd_md_sync(struct drbd_conf *mdev)
3633{
3634 struct meta_data_on_disk *buffer;
3635 sector_t sector;
3636 int i;
3637
Lars Ellenbergee15b032010-09-03 10:00:09 +02003638 del_timer(&mdev->md_sync_timer);
3639 /* timer may be rearmed by drbd_md_mark_dirty() now. */
Philipp Reisnerb411b362009-09-25 16:07:19 -07003640 if (!test_and_clear_bit(MD_DIRTY, &mdev->flags))
3641 return;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003642
3643 /* We use here D_FAILED and not D_ATTACHING because we try to write
3644 * metadata even if we detach due to a disk failure! */
3645 if (!get_ldev_if_state(mdev, D_FAILED))
3646 return;
3647
Philipp Reisnerb411b362009-09-25 16:07:19 -07003648 mutex_lock(&mdev->md_io_mutex);
3649 buffer = (struct meta_data_on_disk *)page_address(mdev->md_io_page);
3650 memset(buffer, 0, 512);
3651
3652 buffer->la_size = cpu_to_be64(drbd_get_capacity(mdev->this_bdev));
3653 for (i = UI_CURRENT; i < UI_SIZE; i++)
3654 buffer->uuid[i] = cpu_to_be64(mdev->ldev->md.uuid[i]);
3655 buffer->flags = cpu_to_be32(mdev->ldev->md.flags);
3656 buffer->magic = cpu_to_be32(DRBD_MD_MAGIC);
3657
3658 buffer->md_size_sect = cpu_to_be32(mdev->ldev->md.md_size_sect);
3659 buffer->al_offset = cpu_to_be32(mdev->ldev->md.al_offset);
3660 buffer->al_nr_extents = cpu_to_be32(mdev->act_log->nr_elements);
3661 buffer->bm_bytes_per_bit = cpu_to_be32(BM_BLOCK_SIZE);
3662 buffer->device_uuid = cpu_to_be64(mdev->ldev->md.device_uuid);
3663
3664 buffer->bm_offset = cpu_to_be32(mdev->ldev->md.bm_offset);
Philipp Reisner99432fc2011-05-20 16:39:13 +02003665 buffer->la_peer_max_bio_size = cpu_to_be32(mdev->peer_max_bio_size);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003666
3667 D_ASSERT(drbd_md_ss__(mdev, mdev->ldev) == mdev->ldev->md.md_offset);
3668 sector = mdev->ldev->md.md_offset;
3669
Lars Ellenberg3f3a9b82010-09-01 15:12:12 +02003670 if (!drbd_md_sync_page_io(mdev, mdev->ldev, sector, WRITE)) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07003671 /* this was a try anyways ... */
3672 dev_err(DEV, "meta data update failed!\n");
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01003673 drbd_chk_io_error(mdev, 1, true);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003674 }
3675
3676 /* Update mdev->ldev->md.la_size_sect,
3677 * since we updated it on metadata. */
3678 mdev->ldev->md.la_size_sect = drbd_get_capacity(mdev->this_bdev);
3679
3680 mutex_unlock(&mdev->md_io_mutex);
3681 put_ldev(mdev);
3682}
3683
3684/**
3685 * drbd_md_read() - Reads in the meta data super block
3686 * @mdev: DRBD device.
3687 * @bdev: Device from which the meta data should be read in.
3688 *
Andreas Gruenbacher116676c2010-12-08 13:33:11 +01003689 * Return 0 (NO_ERROR) on success, and an enum drbd_ret_code in case
Philipp Reisnerb411b362009-09-25 16:07:19 -07003690 * something goes wrong. Currently only: ERR_IO_MD_DISK, ERR_MD_INVALID.
3691 */
3692int drbd_md_read(struct drbd_conf *mdev, struct drbd_backing_dev *bdev)
3693{
3694 struct meta_data_on_disk *buffer;
3695 int i, rv = NO_ERROR;
3696
3697 if (!get_ldev_if_state(mdev, D_ATTACHING))
3698 return ERR_IO_MD_DISK;
3699
Philipp Reisnerb411b362009-09-25 16:07:19 -07003700 mutex_lock(&mdev->md_io_mutex);
3701 buffer = (struct meta_data_on_disk *)page_address(mdev->md_io_page);
3702
3703 if (!drbd_md_sync_page_io(mdev, bdev, bdev->md.md_offset, READ)) {
Lucas De Marchi25985ed2011-03-30 22:57:33 -03003704 /* NOTE: can't do normal error processing here as this is
Philipp Reisnerb411b362009-09-25 16:07:19 -07003705 called BEFORE disk is attached */
3706 dev_err(DEV, "Error while reading metadata.\n");
3707 rv = ERR_IO_MD_DISK;
3708 goto err;
3709 }
3710
Andreas Gruenbachere7fad8a2011-01-11 13:54:02 +01003711 if (buffer->magic != cpu_to_be32(DRBD_MD_MAGIC)) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07003712 dev_err(DEV, "Error while reading metadata, magic not found.\n");
3713 rv = ERR_MD_INVALID;
3714 goto err;
3715 }
3716 if (be32_to_cpu(buffer->al_offset) != bdev->md.al_offset) {
3717 dev_err(DEV, "unexpected al_offset: %d (expected %d)\n",
3718 be32_to_cpu(buffer->al_offset), bdev->md.al_offset);
3719 rv = ERR_MD_INVALID;
3720 goto err;
3721 }
3722 if (be32_to_cpu(buffer->bm_offset) != bdev->md.bm_offset) {
3723 dev_err(DEV, "unexpected bm_offset: %d (expected %d)\n",
3724 be32_to_cpu(buffer->bm_offset), bdev->md.bm_offset);
3725 rv = ERR_MD_INVALID;
3726 goto err;
3727 }
3728 if (be32_to_cpu(buffer->md_size_sect) != bdev->md.md_size_sect) {
3729 dev_err(DEV, "unexpected md_size: %u (expected %u)\n",
3730 be32_to_cpu(buffer->md_size_sect), bdev->md.md_size_sect);
3731 rv = ERR_MD_INVALID;
3732 goto err;
3733 }
3734
3735 if (be32_to_cpu(buffer->bm_bytes_per_bit) != BM_BLOCK_SIZE) {
3736 dev_err(DEV, "unexpected bm_bytes_per_bit: %u (expected %u)\n",
3737 be32_to_cpu(buffer->bm_bytes_per_bit), BM_BLOCK_SIZE);
3738 rv = ERR_MD_INVALID;
3739 goto err;
3740 }
3741
3742 bdev->md.la_size_sect = be64_to_cpu(buffer->la_size);
3743 for (i = UI_CURRENT; i < UI_SIZE; i++)
3744 bdev->md.uuid[i] = be64_to_cpu(buffer->uuid[i]);
3745 bdev->md.flags = be32_to_cpu(buffer->flags);
3746 mdev->sync_conf.al_extents = be32_to_cpu(buffer->al_nr_extents);
3747 bdev->md.device_uuid = be64_to_cpu(buffer->device_uuid);
3748
Philipp Reisner99432fc2011-05-20 16:39:13 +02003749 spin_lock_irq(&mdev->req_lock);
3750 if (mdev->state.conn < C_CONNECTED) {
3751 int peer;
3752 peer = be32_to_cpu(buffer->la_peer_max_bio_size);
3753 peer = max_t(int, peer, DRBD_MAX_BIO_SIZE_SAFE);
3754 mdev->peer_max_bio_size = peer;
3755 }
3756 spin_unlock_irq(&mdev->req_lock);
3757
Philipp Reisnerb411b362009-09-25 16:07:19 -07003758 if (mdev->sync_conf.al_extents < 7)
3759 mdev->sync_conf.al_extents = 127;
3760
3761 err:
3762 mutex_unlock(&mdev->md_io_mutex);
3763 put_ldev(mdev);
3764
3765 return rv;
3766}
3767
3768/**
3769 * drbd_md_mark_dirty() - Mark meta data super block as dirty
3770 * @mdev: DRBD device.
3771 *
3772 * Call this function if you change anything that should be written to
3773 * the meta-data super block. This function sets MD_DIRTY, and starts a
3774 * timer that ensures that within five seconds you have to call drbd_md_sync().
3775 */
Lars Ellenbergca0e6092010-10-14 15:01:21 +02003776#ifdef DEBUG
Lars Ellenbergee15b032010-09-03 10:00:09 +02003777void drbd_md_mark_dirty_(struct drbd_conf *mdev, unsigned int line, const char *func)
3778{
3779 if (!test_and_set_bit(MD_DIRTY, &mdev->flags)) {
3780 mod_timer(&mdev->md_sync_timer, jiffies + HZ);
3781 mdev->last_md_mark_dirty.line = line;
3782 mdev->last_md_mark_dirty.func = func;
3783 }
3784}
3785#else
Philipp Reisnerb411b362009-09-25 16:07:19 -07003786void drbd_md_mark_dirty(struct drbd_conf *mdev)
3787{
Lars Ellenbergee15b032010-09-03 10:00:09 +02003788 if (!test_and_set_bit(MD_DIRTY, &mdev->flags))
Lars Ellenbergca0e6092010-10-14 15:01:21 +02003789 mod_timer(&mdev->md_sync_timer, jiffies + 5*HZ);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003790}
Lars Ellenbergee15b032010-09-03 10:00:09 +02003791#endif
Philipp Reisnerb411b362009-09-25 16:07:19 -07003792
3793static void drbd_uuid_move_history(struct drbd_conf *mdev) __must_hold(local)
3794{
3795 int i;
3796
Lars Ellenberg62b0da32011-01-20 13:25:21 +01003797 for (i = UI_HISTORY_START; i < UI_HISTORY_END; i++)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003798 mdev->ldev->md.uuid[i+1] = mdev->ldev->md.uuid[i];
Philipp Reisnerb411b362009-09-25 16:07:19 -07003799}
3800
3801void _drbd_uuid_set(struct drbd_conf *mdev, int idx, u64 val) __must_hold(local)
3802{
3803 if (idx == UI_CURRENT) {
3804 if (mdev->state.role == R_PRIMARY)
3805 val |= 1;
3806 else
3807 val &= ~((u64)1);
3808
3809 drbd_set_ed_uuid(mdev, val);
3810 }
3811
3812 mdev->ldev->md.uuid[idx] = val;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003813 drbd_md_mark_dirty(mdev);
3814}
3815
3816
3817void drbd_uuid_set(struct drbd_conf *mdev, int idx, u64 val) __must_hold(local)
3818{
3819 if (mdev->ldev->md.uuid[idx]) {
3820 drbd_uuid_move_history(mdev);
3821 mdev->ldev->md.uuid[UI_HISTORY_START] = mdev->ldev->md.uuid[idx];
Philipp Reisnerb411b362009-09-25 16:07:19 -07003822 }
3823 _drbd_uuid_set(mdev, idx, val);
3824}
3825
3826/**
3827 * drbd_uuid_new_current() - Creates a new current UUID
3828 * @mdev: DRBD device.
3829 *
3830 * Creates a new current UUID, and rotates the old current UUID into
3831 * the bitmap slot. Causes an incremental resync upon next connect.
3832 */
3833void drbd_uuid_new_current(struct drbd_conf *mdev) __must_hold(local)
3834{
3835 u64 val;
Lars Ellenberg62b0da32011-01-20 13:25:21 +01003836 unsigned long long bm_uuid = mdev->ldev->md.uuid[UI_BITMAP];
Philipp Reisnerb411b362009-09-25 16:07:19 -07003837
Lars Ellenberg62b0da32011-01-20 13:25:21 +01003838 if (bm_uuid)
3839 dev_warn(DEV, "bm UUID was already set: %llX\n", bm_uuid);
3840
Philipp Reisnerb411b362009-09-25 16:07:19 -07003841 mdev->ldev->md.uuid[UI_BITMAP] = mdev->ldev->md.uuid[UI_CURRENT];
Philipp Reisnerb411b362009-09-25 16:07:19 -07003842
3843 get_random_bytes(&val, sizeof(u64));
3844 _drbd_uuid_set(mdev, UI_CURRENT, val);
Lars Ellenberg62b0da32011-01-20 13:25:21 +01003845 drbd_print_uuids(mdev, "new current UUID");
Lars Ellenbergaaa8e2b2010-10-15 13:16:53 +02003846 /* get it to stable storage _now_ */
3847 drbd_md_sync(mdev);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003848}
3849
3850void drbd_uuid_set_bm(struct drbd_conf *mdev, u64 val) __must_hold(local)
3851{
3852 if (mdev->ldev->md.uuid[UI_BITMAP] == 0 && val == 0)
3853 return;
3854
3855 if (val == 0) {
3856 drbd_uuid_move_history(mdev);
3857 mdev->ldev->md.uuid[UI_HISTORY_START] = mdev->ldev->md.uuid[UI_BITMAP];
3858 mdev->ldev->md.uuid[UI_BITMAP] = 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003859 } else {
Lars Ellenberg62b0da32011-01-20 13:25:21 +01003860 unsigned long long bm_uuid = mdev->ldev->md.uuid[UI_BITMAP];
3861 if (bm_uuid)
3862 dev_warn(DEV, "bm UUID was already set: %llX\n", bm_uuid);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003863
Lars Ellenberg62b0da32011-01-20 13:25:21 +01003864 mdev->ldev->md.uuid[UI_BITMAP] = val & ~((u64)1);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003865 }
3866 drbd_md_mark_dirty(mdev);
3867}
3868
3869/**
3870 * drbd_bmio_set_n_write() - io_fn for drbd_queue_bitmap_io() or drbd_bitmap_io()
3871 * @mdev: DRBD device.
3872 *
3873 * Sets all bits in the bitmap and writes the whole bitmap to stable storage.
3874 */
3875int drbd_bmio_set_n_write(struct drbd_conf *mdev)
3876{
3877 int rv = -EIO;
3878
3879 if (get_ldev_if_state(mdev, D_ATTACHING)) {
3880 drbd_md_set_flag(mdev, MDF_FULL_SYNC);
3881 drbd_md_sync(mdev);
3882 drbd_bm_set_all(mdev);
3883
3884 rv = drbd_bm_write(mdev);
3885
3886 if (!rv) {
3887 drbd_md_clear_flag(mdev, MDF_FULL_SYNC);
3888 drbd_md_sync(mdev);
3889 }
3890
3891 put_ldev(mdev);
3892 }
3893
3894 return rv;
3895}
3896
3897/**
3898 * drbd_bmio_clear_n_write() - io_fn for drbd_queue_bitmap_io() or drbd_bitmap_io()
3899 * @mdev: DRBD device.
3900 *
3901 * Clears all bits in the bitmap and writes the whole bitmap to stable storage.
3902 */
3903int drbd_bmio_clear_n_write(struct drbd_conf *mdev)
3904{
3905 int rv = -EIO;
3906
Philipp Reisner07782862010-08-31 12:00:50 +02003907 drbd_resume_al(mdev);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003908 if (get_ldev_if_state(mdev, D_ATTACHING)) {
3909 drbd_bm_clear_all(mdev);
3910 rv = drbd_bm_write(mdev);
3911 put_ldev(mdev);
3912 }
3913
3914 return rv;
3915}
3916
3917static int w_bitmap_io(struct drbd_conf *mdev, struct drbd_work *w, int unused)
3918{
3919 struct bm_io_work *work = container_of(w, struct bm_io_work, w);
Lars Ellenberg02851e92010-12-16 14:47:39 +01003920 int rv = -EIO;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003921
3922 D_ASSERT(atomic_read(&mdev->ap_bio_cnt) == 0);
3923
Lars Ellenberg02851e92010-12-16 14:47:39 +01003924 if (get_ldev(mdev)) {
Lars Ellenberg20ceb2b2011-01-21 10:56:44 +01003925 drbd_bm_lock(mdev, work->why, work->flags);
Lars Ellenberg02851e92010-12-16 14:47:39 +01003926 rv = work->io_fn(mdev);
3927 drbd_bm_unlock(mdev);
3928 put_ldev(mdev);
3929 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07003930
3931 clear_bit(BITMAP_IO, &mdev->flags);
Philipp Reisner127b3172010-11-16 10:07:53 +01003932 smp_mb__after_clear_bit();
Philipp Reisnerb411b362009-09-25 16:07:19 -07003933 wake_up(&mdev->misc_wait);
3934
3935 if (work->done)
3936 work->done(mdev, rv);
3937
3938 clear_bit(BITMAP_IO_QUEUED, &mdev->flags);
3939 work->why = NULL;
Lars Ellenberg20ceb2b2011-01-21 10:56:44 +01003940 work->flags = 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003941
3942 return 1;
3943}
3944
Lars Ellenberg82f59cc2010-10-16 12:13:47 +02003945void drbd_ldev_destroy(struct drbd_conf *mdev)
3946{
3947 lc_destroy(mdev->resync);
3948 mdev->resync = NULL;
3949 lc_destroy(mdev->act_log);
3950 mdev->act_log = NULL;
3951 __no_warn(local,
3952 drbd_free_bc(mdev->ldev);
3953 mdev->ldev = NULL;);
3954
3955 if (mdev->md_io_tmpp) {
3956 __free_page(mdev->md_io_tmpp);
3957 mdev->md_io_tmpp = NULL;
3958 }
3959 clear_bit(GO_DISKLESS, &mdev->flags);
3960}
3961
Lars Ellenberge9e6f3e2010-09-14 20:26:27 +02003962static int w_go_diskless(struct drbd_conf *mdev, struct drbd_work *w, int unused)
3963{
3964 D_ASSERT(mdev->state.disk == D_FAILED);
Lars Ellenberg9d282872010-10-14 13:57:07 +02003965 /* we cannot assert local_cnt == 0 here, as get_ldev_if_state will
3966 * inc/dec it frequently. Once we are D_DISKLESS, no one will touch
Lars Ellenberg82f59cc2010-10-16 12:13:47 +02003967 * the protected members anymore, though, so once put_ldev reaches zero
3968 * again, it will be safe to free them. */
Lars Ellenberge9e6f3e2010-09-14 20:26:27 +02003969 drbd_force_state(mdev, NS(disk, D_DISKLESS));
Lars Ellenberge9e6f3e2010-09-14 20:26:27 +02003970 return 1;
3971}
3972
3973void drbd_go_diskless(struct drbd_conf *mdev)
3974{
3975 D_ASSERT(mdev->state.disk == D_FAILED);
3976 if (!test_and_set_bit(GO_DISKLESS, &mdev->flags))
Lars Ellenberg9d282872010-10-14 13:57:07 +02003977 drbd_queue_work(&mdev->data.work, &mdev->go_diskless);
Lars Ellenberge9e6f3e2010-09-14 20:26:27 +02003978}
3979
Philipp Reisnerb411b362009-09-25 16:07:19 -07003980/**
3981 * drbd_queue_bitmap_io() - Queues an IO operation on the whole bitmap
3982 * @mdev: DRBD device.
3983 * @io_fn: IO callback to be called when bitmap IO is possible
3984 * @done: callback to be called after the bitmap IO was performed
3985 * @why: Descriptive text of the reason for doing the IO
3986 *
3987 * While IO on the bitmap happens we freeze application IO thus we ensure
3988 * that drbd_set_out_of_sync() can not be called. This function MAY ONLY be
3989 * called from worker context. It MUST NOT be used while a previous such
3990 * work is still pending!
3991 */
3992void drbd_queue_bitmap_io(struct drbd_conf *mdev,
3993 int (*io_fn)(struct drbd_conf *),
3994 void (*done)(struct drbd_conf *, int),
Lars Ellenberg20ceb2b2011-01-21 10:56:44 +01003995 char *why, enum bm_flag flags)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003996{
3997 D_ASSERT(current == mdev->worker.task);
3998
3999 D_ASSERT(!test_bit(BITMAP_IO_QUEUED, &mdev->flags));
4000 D_ASSERT(!test_bit(BITMAP_IO, &mdev->flags));
4001 D_ASSERT(list_empty(&mdev->bm_io_work.w.list));
4002 if (mdev->bm_io_work.why)
4003 dev_err(DEV, "FIXME going to queue '%s' but '%s' still pending?\n",
4004 why, mdev->bm_io_work.why);
4005
4006 mdev->bm_io_work.io_fn = io_fn;
4007 mdev->bm_io_work.done = done;
4008 mdev->bm_io_work.why = why;
Lars Ellenberg20ceb2b2011-01-21 10:56:44 +01004009 mdev->bm_io_work.flags = flags;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004010
Philipp Reisner22afd7e2010-11-16 15:30:44 +01004011 spin_lock_irq(&mdev->req_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004012 set_bit(BITMAP_IO, &mdev->flags);
4013 if (atomic_read(&mdev->ap_bio_cnt) == 0) {
Philipp Reisner127b3172010-11-16 10:07:53 +01004014 if (!test_and_set_bit(BITMAP_IO_QUEUED, &mdev->flags))
Philipp Reisnerb411b362009-09-25 16:07:19 -07004015 drbd_queue_work(&mdev->data.work, &mdev->bm_io_work.w);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004016 }
Philipp Reisner22afd7e2010-11-16 15:30:44 +01004017 spin_unlock_irq(&mdev->req_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004018}
4019
4020/**
4021 * drbd_bitmap_io() - Does an IO operation on the whole bitmap
4022 * @mdev: DRBD device.
4023 * @io_fn: IO callback to be called when bitmap IO is possible
4024 * @why: Descriptive text of the reason for doing the IO
4025 *
4026 * freezes application IO while that the actual IO operations runs. This
4027 * functions MAY NOT be called from worker context.
4028 */
Lars Ellenberg20ceb2b2011-01-21 10:56:44 +01004029int drbd_bitmap_io(struct drbd_conf *mdev, int (*io_fn)(struct drbd_conf *),
4030 char *why, enum bm_flag flags)
Philipp Reisnerb411b362009-09-25 16:07:19 -07004031{
4032 int rv;
4033
4034 D_ASSERT(current != mdev->worker.task);
4035
Lars Ellenberg20ceb2b2011-01-21 10:56:44 +01004036 if ((flags & BM_LOCKED_SET_ALLOWED) == 0)
4037 drbd_suspend_io(mdev);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004038
Lars Ellenberg20ceb2b2011-01-21 10:56:44 +01004039 drbd_bm_lock(mdev, why, flags);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004040 rv = io_fn(mdev);
4041 drbd_bm_unlock(mdev);
4042
Lars Ellenberg20ceb2b2011-01-21 10:56:44 +01004043 if ((flags & BM_LOCKED_SET_ALLOWED) == 0)
4044 drbd_resume_io(mdev);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004045
4046 return rv;
4047}
4048
4049void drbd_md_set_flag(struct drbd_conf *mdev, int flag) __must_hold(local)
4050{
4051 if ((mdev->ldev->md.flags & flag) != flag) {
4052 drbd_md_mark_dirty(mdev);
4053 mdev->ldev->md.flags |= flag;
4054 }
4055}
4056
4057void drbd_md_clear_flag(struct drbd_conf *mdev, int flag) __must_hold(local)
4058{
4059 if ((mdev->ldev->md.flags & flag) != 0) {
4060 drbd_md_mark_dirty(mdev);
4061 mdev->ldev->md.flags &= ~flag;
4062 }
4063}
4064int drbd_md_test_flag(struct drbd_backing_dev *bdev, int flag)
4065{
4066 return (bdev->md.flags & flag) != 0;
4067}
4068
4069static void md_sync_timer_fn(unsigned long data)
4070{
4071 struct drbd_conf *mdev = (struct drbd_conf *) data;
4072
4073 drbd_queue_work_front(&mdev->data.work, &mdev->md_sync_work);
4074}
4075
4076static int w_md_sync(struct drbd_conf *mdev, struct drbd_work *w, int unused)
4077{
4078 dev_warn(DEV, "md_sync_timer expired! Worker calls drbd_md_sync().\n");
Lars Ellenbergee15b032010-09-03 10:00:09 +02004079#ifdef DEBUG
4080 dev_warn(DEV, "last md_mark_dirty: %s:%u\n",
4081 mdev->last_md_mark_dirty.func, mdev->last_md_mark_dirty.line);
4082#endif
Philipp Reisnerb411b362009-09-25 16:07:19 -07004083 drbd_md_sync(mdev);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004084 return 1;
4085}
4086
4087#ifdef CONFIG_DRBD_FAULT_INJECTION
4088/* Fault insertion support including random number generator shamelessly
4089 * stolen from kernel/rcutorture.c */
4090struct fault_random_state {
4091 unsigned long state;
4092 unsigned long count;
4093};
4094
4095#define FAULT_RANDOM_MULT 39916801 /* prime */
4096#define FAULT_RANDOM_ADD 479001701 /* prime */
4097#define FAULT_RANDOM_REFRESH 10000
4098
4099/*
4100 * Crude but fast random-number generator. Uses a linear congruential
4101 * generator, with occasional help from get_random_bytes().
4102 */
4103static unsigned long
4104_drbd_fault_random(struct fault_random_state *rsp)
4105{
4106 long refresh;
4107
Roel Kluin49829ea2009-12-15 22:55:44 +01004108 if (!rsp->count--) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07004109 get_random_bytes(&refresh, sizeof(refresh));
4110 rsp->state += refresh;
4111 rsp->count = FAULT_RANDOM_REFRESH;
4112 }
4113 rsp->state = rsp->state * FAULT_RANDOM_MULT + FAULT_RANDOM_ADD;
4114 return swahw32(rsp->state);
4115}
4116
4117static char *
4118_drbd_fault_str(unsigned int type) {
4119 static char *_faults[] = {
4120 [DRBD_FAULT_MD_WR] = "Meta-data write",
4121 [DRBD_FAULT_MD_RD] = "Meta-data read",
4122 [DRBD_FAULT_RS_WR] = "Resync write",
4123 [DRBD_FAULT_RS_RD] = "Resync read",
4124 [DRBD_FAULT_DT_WR] = "Data write",
4125 [DRBD_FAULT_DT_RD] = "Data read",
4126 [DRBD_FAULT_DT_RA] = "Data read ahead",
4127 [DRBD_FAULT_BM_ALLOC] = "BM allocation",
Philipp Reisner6b4388a2010-04-26 14:11:45 +02004128 [DRBD_FAULT_AL_EE] = "EE allocation",
4129 [DRBD_FAULT_RECEIVE] = "receive data corruption",
Philipp Reisnerb411b362009-09-25 16:07:19 -07004130 };
4131
4132 return (type < DRBD_FAULT_MAX) ? _faults[type] : "**Unknown**";
4133}
4134
4135unsigned int
4136_drbd_insert_fault(struct drbd_conf *mdev, unsigned int type)
4137{
4138 static struct fault_random_state rrs = {0, 0};
4139
4140 unsigned int ret = (
4141 (fault_devs == 0 ||
4142 ((1 << mdev_to_minor(mdev)) & fault_devs) != 0) &&
4143 (((_drbd_fault_random(&rrs) % 100) + 1) <= fault_rate));
4144
4145 if (ret) {
4146 fault_count++;
4147
Lars Ellenberg73835062010-05-27 11:51:56 +02004148 if (__ratelimit(&drbd_ratelimit_state))
Philipp Reisnerb411b362009-09-25 16:07:19 -07004149 dev_warn(DEV, "***Simulating %s failure\n",
4150 _drbd_fault_str(type));
4151 }
4152
4153 return ret;
4154}
4155#endif
4156
4157const char *drbd_buildtag(void)
4158{
4159 /* DRBD built from external sources has here a reference to the
4160 git hash of the source code. */
4161
4162 static char buildtag[38] = "\0uilt-in";
4163
4164 if (buildtag[0] == 0) {
4165#ifdef CONFIG_MODULES
4166 if (THIS_MODULE != NULL)
4167 sprintf(buildtag, "srcversion: %-24s", THIS_MODULE->srcversion);
4168 else
4169#endif
4170 buildtag[0] = 'b';
4171 }
4172
4173 return buildtag;
4174}
4175
4176module_init(drbd_init)
4177module_exit(drbd_cleanup)
4178
Philipp Reisnerb411b362009-09-25 16:07:19 -07004179EXPORT_SYMBOL(drbd_conn_str);
4180EXPORT_SYMBOL(drbd_role_str);
4181EXPORT_SYMBOL(drbd_disk_str);
4182EXPORT_SYMBOL(drbd_set_st_err_str);