blob: 878f7d4fc885cdcae1b182dcc305d6fdc0df9739 [file] [log] [blame]
Philipp Reisnerb411b362009-09-25 16:07:19 -07001/*
2 drbd.c
3
4 This file is part of DRBD by Philipp Reisner and Lars Ellenberg.
5
6 Copyright (C) 2001-2008, LINBIT Information Technologies GmbH.
7 Copyright (C) 1999-2008, Philipp Reisner <philipp.reisner@linbit.com>.
8 Copyright (C) 2002-2008, Lars Ellenberg <lars.ellenberg@linbit.com>.
9
10 Thanks to Carter Burden, Bart Grantham and Gennadiy Nerubayev
11 from Logicworks, Inc. for making SDP replication support possible.
12
13 drbd is free software; you can redistribute it and/or modify
14 it under the terms of the GNU General Public License as published by
15 the Free Software Foundation; either version 2, or (at your option)
16 any later version.
17
18 drbd is distributed in the hope that it will be useful,
19 but WITHOUT ANY WARRANTY; without even the implied warranty of
20 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 GNU General Public License for more details.
22
23 You should have received a copy of the GNU General Public License
24 along with drbd; see the file COPYING. If not, write to
25 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
26
27 */
28
Philipp Reisnerb411b362009-09-25 16:07:19 -070029#include <linux/module.h>
Philipp Reisnerb411b362009-09-25 16:07:19 -070030#include <linux/drbd.h>
31#include <asm/uaccess.h>
32#include <asm/types.h>
33#include <net/sock.h>
34#include <linux/ctype.h>
Arnd Bergmann2a48fc02010-06-02 14:28:52 +020035#include <linux/mutex.h>
Philipp Reisnerb411b362009-09-25 16:07:19 -070036#include <linux/fs.h>
37#include <linux/file.h>
38#include <linux/proc_fs.h>
39#include <linux/init.h>
40#include <linux/mm.h>
41#include <linux/memcontrol.h>
42#include <linux/mm_inline.h>
43#include <linux/slab.h>
44#include <linux/random.h>
45#include <linux/reboot.h>
46#include <linux/notifier.h>
47#include <linux/kthread.h>
48
49#define __KERNEL_SYSCALLS__
50#include <linux/unistd.h>
51#include <linux/vmalloc.h>
52
53#include <linux/drbd_limits.h>
54#include "drbd_int.h"
Philipp Reisnerb411b362009-09-25 16:07:19 -070055#include "drbd_req.h" /* only for _req_mod in tl_release and tl_clear */
56
57#include "drbd_vli.h"
58
59struct after_state_chg_work {
60 struct drbd_work w;
61 union drbd_state os;
62 union drbd_state ns;
63 enum chg_state_flags flags;
64 struct completion *done;
65};
66
Arnd Bergmann2a48fc02010-06-02 14:28:52 +020067static DEFINE_MUTEX(drbd_main_mutex);
Philipp Reisnerb411b362009-09-25 16:07:19 -070068int drbdd_init(struct drbd_thread *);
69int drbd_worker(struct drbd_thread *);
70int drbd_asender(struct drbd_thread *);
71
72int drbd_init(void);
73static int drbd_open(struct block_device *bdev, fmode_t mode);
74static int drbd_release(struct gendisk *gd, fmode_t mode);
75static int w_after_state_ch(struct drbd_conf *mdev, struct drbd_work *w, int unused);
76static void after_state_ch(struct drbd_conf *mdev, union drbd_state os,
77 union drbd_state ns, enum chg_state_flags flags);
78static int w_md_sync(struct drbd_conf *mdev, struct drbd_work *w, int unused);
79static void md_sync_timer_fn(unsigned long data);
80static int w_bitmap_io(struct drbd_conf *mdev, struct drbd_work *w, int unused);
Lars Ellenberge9e6f3e2010-09-14 20:26:27 +020081static int w_go_diskless(struct drbd_conf *mdev, struct drbd_work *w, int unused);
Philipp Reisnerb411b362009-09-25 16:07:19 -070082
Philipp Reisnerb411b362009-09-25 16:07:19 -070083MODULE_AUTHOR("Philipp Reisner <phil@linbit.com>, "
84 "Lars Ellenberg <lars@linbit.com>");
85MODULE_DESCRIPTION("drbd - Distributed Replicated Block Device v" REL_VERSION);
86MODULE_VERSION(REL_VERSION);
87MODULE_LICENSE("GPL");
Philipp Reisner2b8a90b2011-01-10 11:15:17 +010088MODULE_PARM_DESC(minor_count, "Maximum number of drbd devices ("
89 __stringify(DRBD_MINOR_COUNT_MIN) "-" __stringify(DRBD_MINOR_COUNT_MAX) ")");
Philipp Reisnerb411b362009-09-25 16:07:19 -070090MODULE_ALIAS_BLOCKDEV_MAJOR(DRBD_MAJOR);
91
92#include <linux/moduleparam.h>
93/* allow_open_on_secondary */
94MODULE_PARM_DESC(allow_oos, "DONT USE!");
95/* thanks to these macros, if compiled into the kernel (not-module),
96 * this becomes the boot parameter drbd.minor_count */
97module_param(minor_count, uint, 0444);
98module_param(disable_sendpage, bool, 0644);
99module_param(allow_oos, bool, 0);
100module_param(cn_idx, uint, 0444);
101module_param(proc_details, int, 0644);
102
103#ifdef CONFIG_DRBD_FAULT_INJECTION
104int enable_faults;
105int fault_rate;
106static int fault_count;
107int fault_devs;
108/* bitmap of enabled faults */
109module_param(enable_faults, int, 0664);
110/* fault rate % value - applies to all enabled faults */
111module_param(fault_rate, int, 0664);
112/* count of faults inserted */
113module_param(fault_count, int, 0664);
114/* bitmap of devices to insert faults on */
115module_param(fault_devs, int, 0644);
116#endif
117
118/* module parameter, defined */
Philipp Reisner2b8a90b2011-01-10 11:15:17 +0100119unsigned int minor_count = DRBD_MINOR_COUNT_DEF;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700120int disable_sendpage;
121int allow_oos;
122unsigned int cn_idx = CN_IDX_DRBD;
123int proc_details; /* Detail level in proc drbd*/
124
125/* Module parameter for setting the user mode helper program
126 * to run. Default is /sbin/drbdadm */
127char usermode_helper[80] = "/sbin/drbdadm";
128
129module_param_string(usermode_helper, usermode_helper, sizeof(usermode_helper), 0644);
130
131/* in 2.6.x, our device mapping and config info contains our virtual gendisks
132 * as member "struct gendisk *vdisk;"
133 */
134struct drbd_conf **minor_table;
135
136struct kmem_cache *drbd_request_cache;
137struct kmem_cache *drbd_ee_cache; /* epoch entries */
138struct kmem_cache *drbd_bm_ext_cache; /* bitmap extents */
139struct kmem_cache *drbd_al_ext_cache; /* activity log extents */
140mempool_t *drbd_request_mempool;
141mempool_t *drbd_ee_mempool;
142
143/* I do not use a standard mempool, because:
144 1) I want to hand out the pre-allocated objects first.
145 2) I want to be able to interrupt sleeping allocation with a signal.
146 Note: This is a single linked list, the next pointer is the private
147 member of struct page.
148 */
149struct page *drbd_pp_pool;
150spinlock_t drbd_pp_lock;
151int drbd_pp_vacant;
152wait_queue_head_t drbd_pp_wait;
153
154DEFINE_RATELIMIT_STATE(drbd_ratelimit_state, 5 * HZ, 5);
155
Emese Revfy7d4e9d02009-12-14 00:59:30 +0100156static const struct block_device_operations drbd_ops = {
Philipp Reisnerb411b362009-09-25 16:07:19 -0700157 .owner = THIS_MODULE,
158 .open = drbd_open,
159 .release = drbd_release,
160};
161
162#define ARRY_SIZE(A) (sizeof(A)/sizeof(A[0]))
163
164#ifdef __CHECKER__
165/* When checking with sparse, and this is an inline function, sparse will
166 give tons of false positives. When this is a real functions sparse works.
167 */
168int _get_ldev_if_state(struct drbd_conf *mdev, enum drbd_disk_state mins)
169{
170 int io_allowed;
171
172 atomic_inc(&mdev->local_cnt);
173 io_allowed = (mdev->state.disk >= mins);
174 if (!io_allowed) {
175 if (atomic_dec_and_test(&mdev->local_cnt))
176 wake_up(&mdev->misc_wait);
177 }
178 return io_allowed;
179}
180
181#endif
182
183/**
184 * DOC: The transfer log
185 *
186 * The transfer log is a single linked list of &struct drbd_tl_epoch objects.
187 * mdev->newest_tle points to the head, mdev->oldest_tle points to the tail
188 * of the list. There is always at least one &struct drbd_tl_epoch object.
189 *
190 * Each &struct drbd_tl_epoch has a circular double linked list of requests
191 * attached.
192 */
193static int tl_init(struct drbd_conf *mdev)
194{
195 struct drbd_tl_epoch *b;
196
197 /* during device minor initialization, we may well use GFP_KERNEL */
198 b = kmalloc(sizeof(struct drbd_tl_epoch), GFP_KERNEL);
199 if (!b)
200 return 0;
201 INIT_LIST_HEAD(&b->requests);
202 INIT_LIST_HEAD(&b->w.list);
203 b->next = NULL;
204 b->br_number = 4711;
Philipp Reisner7e602c02010-05-27 14:49:27 +0200205 b->n_writes = 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700206 b->w.cb = NULL; /* if this is != NULL, we need to dec_ap_pending in tl_clear */
207
208 mdev->oldest_tle = b;
209 mdev->newest_tle = b;
210 INIT_LIST_HEAD(&mdev->out_of_sequence_requests);
211
Philipp Reisnerb411b362009-09-25 16:07:19 -0700212 return 1;
213}
214
215static void tl_cleanup(struct drbd_conf *mdev)
216{
217 D_ASSERT(mdev->oldest_tle == mdev->newest_tle);
218 D_ASSERT(list_empty(&mdev->out_of_sequence_requests));
219 kfree(mdev->oldest_tle);
220 mdev->oldest_tle = NULL;
221 kfree(mdev->unused_spare_tle);
222 mdev->unused_spare_tle = NULL;
Andreas Gruenbacherd6287692011-01-13 23:05:39 +0100223}
224
Philipp Reisnerb411b362009-09-25 16:07:19 -0700225/**
226 * _tl_add_barrier() - Adds a barrier to the transfer log
227 * @mdev: DRBD device.
228 * @new: Barrier to be added before the current head of the TL.
229 *
230 * The caller must hold the req_lock.
231 */
232void _tl_add_barrier(struct drbd_conf *mdev, struct drbd_tl_epoch *new)
233{
234 struct drbd_tl_epoch *newest_before;
235
236 INIT_LIST_HEAD(&new->requests);
237 INIT_LIST_HEAD(&new->w.list);
238 new->w.cb = NULL; /* if this is != NULL, we need to dec_ap_pending in tl_clear */
239 new->next = NULL;
Philipp Reisner7e602c02010-05-27 14:49:27 +0200240 new->n_writes = 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700241
242 newest_before = mdev->newest_tle;
243 /* never send a barrier number == 0, because that is special-cased
244 * when using TCQ for our write ordering code */
245 new->br_number = (newest_before->br_number+1) ?: 1;
246 if (mdev->newest_tle != new) {
247 mdev->newest_tle->next = new;
248 mdev->newest_tle = new;
249 }
250}
251
252/**
253 * tl_release() - Free or recycle the oldest &struct drbd_tl_epoch object of the TL
254 * @mdev: DRBD device.
255 * @barrier_nr: Expected identifier of the DRBD write barrier packet.
256 * @set_size: Expected number of requests before that barrier.
257 *
258 * In case the passed barrier_nr or set_size does not match the oldest
259 * &struct drbd_tl_epoch objects this function will cause a termination
260 * of the connection.
261 */
262void tl_release(struct drbd_conf *mdev, unsigned int barrier_nr,
263 unsigned int set_size)
264{
265 struct drbd_tl_epoch *b, *nob; /* next old barrier */
266 struct list_head *le, *tle;
267 struct drbd_request *r;
268
269 spin_lock_irq(&mdev->req_lock);
270
271 b = mdev->oldest_tle;
272
273 /* first some paranoia code */
274 if (b == NULL) {
275 dev_err(DEV, "BAD! BarrierAck #%u received, but no epoch in tl!?\n",
276 barrier_nr);
277 goto bail;
278 }
279 if (b->br_number != barrier_nr) {
280 dev_err(DEV, "BAD! BarrierAck #%u received, expected #%u!\n",
281 barrier_nr, b->br_number);
282 goto bail;
283 }
Philipp Reisner7e602c02010-05-27 14:49:27 +0200284 if (b->n_writes != set_size) {
285 dev_err(DEV, "BAD! BarrierAck #%u received with n_writes=%u, expected n_writes=%u!\n",
286 barrier_nr, set_size, b->n_writes);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700287 goto bail;
288 }
289
290 /* Clean up list of requests processed during current epoch */
291 list_for_each_safe(le, tle, &b->requests) {
292 r = list_entry(le, struct drbd_request, tl_requests);
293 _req_mod(r, barrier_acked);
294 }
295 /* There could be requests on the list waiting for completion
296 of the write to the local disk. To avoid corruptions of
297 slab's data structures we have to remove the lists head.
298
299 Also there could have been a barrier ack out of sequence, overtaking
300 the write acks - which would be a bug and violating write ordering.
301 To not deadlock in case we lose connection while such requests are
302 still pending, we need some way to find them for the
303 _req_mode(connection_lost_while_pending).
304
305 These have been list_move'd to the out_of_sequence_requests list in
306 _req_mod(, barrier_acked) above.
307 */
308 list_del_init(&b->requests);
309
310 nob = b->next;
311 if (test_and_clear_bit(CREATE_BARRIER, &mdev->flags)) {
312 _tl_add_barrier(mdev, b);
313 if (nob)
314 mdev->oldest_tle = nob;
315 /* if nob == NULL b was the only barrier, and becomes the new
316 barrier. Therefore mdev->oldest_tle points already to b */
317 } else {
318 D_ASSERT(nob != NULL);
319 mdev->oldest_tle = nob;
320 kfree(b);
321 }
322
323 spin_unlock_irq(&mdev->req_lock);
324 dec_ap_pending(mdev);
325
326 return;
327
328bail:
329 spin_unlock_irq(&mdev->req_lock);
330 drbd_force_state(mdev, NS(conn, C_PROTOCOL_ERROR));
331}
332
Philipp Reisner617049a2010-12-22 12:48:31 +0100333
Philipp Reisner11b58e72010-05-12 17:08:26 +0200334/**
335 * _tl_restart() - Walks the transfer log, and applies an action to all requests
336 * @mdev: DRBD device.
337 * @what: The action/event to perform with all request objects
338 *
339 * @what might be one of connection_lost_while_pending, resend, fail_frozen_disk_io,
340 * restart_frozen_disk_io.
341 */
342static void _tl_restart(struct drbd_conf *mdev, enum drbd_req_event what)
343{
344 struct drbd_tl_epoch *b, *tmp, **pn;
Philipp Reisnerb9b98712010-06-22 11:26:48 +0200345 struct list_head *le, *tle, carry_reads;
Philipp Reisner11b58e72010-05-12 17:08:26 +0200346 struct drbd_request *req;
347 int rv, n_writes, n_reads;
348
349 b = mdev->oldest_tle;
350 pn = &mdev->oldest_tle;
351 while (b) {
352 n_writes = 0;
353 n_reads = 0;
Philipp Reisnerb9b98712010-06-22 11:26:48 +0200354 INIT_LIST_HEAD(&carry_reads);
Philipp Reisner11b58e72010-05-12 17:08:26 +0200355 list_for_each_safe(le, tle, &b->requests) {
356 req = list_entry(le, struct drbd_request, tl_requests);
357 rv = _req_mod(req, what);
358
359 n_writes += (rv & MR_WRITE) >> MR_WRITE_SHIFT;
360 n_reads += (rv & MR_READ) >> MR_READ_SHIFT;
361 }
362 tmp = b->next;
363
Philipp Reisnerb9b98712010-06-22 11:26:48 +0200364 if (n_writes) {
Philipp Reisner11b58e72010-05-12 17:08:26 +0200365 if (what == resend) {
366 b->n_writes = n_writes;
367 if (b->w.cb == NULL) {
368 b->w.cb = w_send_barrier;
369 inc_ap_pending(mdev);
370 set_bit(CREATE_BARRIER, &mdev->flags);
371 }
372
373 drbd_queue_work(&mdev->data.work, &b->w);
374 }
375 pn = &b->next;
376 } else {
Philipp Reisnerb9b98712010-06-22 11:26:48 +0200377 if (n_reads)
378 list_add(&carry_reads, &b->requests);
Philipp Reisner11b58e72010-05-12 17:08:26 +0200379 /* there could still be requests on that ring list,
380 * in case local io is still pending */
381 list_del(&b->requests);
382
383 /* dec_ap_pending corresponding to queue_barrier.
384 * the newest barrier may not have been queued yet,
385 * in which case w.cb is still NULL. */
386 if (b->w.cb != NULL)
387 dec_ap_pending(mdev);
388
389 if (b == mdev->newest_tle) {
390 /* recycle, but reinit! */
391 D_ASSERT(tmp == NULL);
392 INIT_LIST_HEAD(&b->requests);
Philipp Reisnerb9b98712010-06-22 11:26:48 +0200393 list_splice(&carry_reads, &b->requests);
Philipp Reisner11b58e72010-05-12 17:08:26 +0200394 INIT_LIST_HEAD(&b->w.list);
395 b->w.cb = NULL;
396 b->br_number = net_random();
397 b->n_writes = 0;
398
399 *pn = b;
400 break;
401 }
402 *pn = tmp;
403 kfree(b);
404 }
405 b = tmp;
Philipp Reisnerb9b98712010-06-22 11:26:48 +0200406 list_splice(&carry_reads, &b->requests);
Philipp Reisner11b58e72010-05-12 17:08:26 +0200407 }
408}
409
Philipp Reisnerb411b362009-09-25 16:07:19 -0700410
411/**
412 * tl_clear() - Clears all requests and &struct drbd_tl_epoch objects out of the TL
413 * @mdev: DRBD device.
414 *
415 * This is called after the connection to the peer was lost. The storage covered
416 * by the requests on the transfer gets marked as our of sync. Called from the
417 * receiver thread and the worker thread.
418 */
419void tl_clear(struct drbd_conf *mdev)
420{
Philipp Reisnerb411b362009-09-25 16:07:19 -0700421 struct list_head *le, *tle;
422 struct drbd_request *r;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700423
424 spin_lock_irq(&mdev->req_lock);
425
Philipp Reisner11b58e72010-05-12 17:08:26 +0200426 _tl_restart(mdev, connection_lost_while_pending);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700427
428 /* we expect this list to be empty. */
429 D_ASSERT(list_empty(&mdev->out_of_sequence_requests));
430
431 /* but just in case, clean it up anyways! */
432 list_for_each_safe(le, tle, &mdev->out_of_sequence_requests) {
433 r = list_entry(le, struct drbd_request, tl_requests);
434 /* It would be nice to complete outside of spinlock.
435 * But this is easier for now. */
436 _req_mod(r, connection_lost_while_pending);
437 }
438
439 /* ensure bit indicating barrier is required is clear */
440 clear_bit(CREATE_BARRIER, &mdev->flags);
441
442 spin_unlock_irq(&mdev->req_lock);
443}
444
Philipp Reisner11b58e72010-05-12 17:08:26 +0200445void tl_restart(struct drbd_conf *mdev, enum drbd_req_event what)
446{
447 spin_lock_irq(&mdev->req_lock);
448 _tl_restart(mdev, what);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700449 spin_unlock_irq(&mdev->req_lock);
450}
451
452/**
Andreas Gruenbacher81e84652010-12-09 15:03:57 +0100453 * cl_wide_st_chg() - true if the state change is a cluster wide one
Philipp Reisnerb411b362009-09-25 16:07:19 -0700454 * @mdev: DRBD device.
455 * @os: old (current) state.
456 * @ns: new (wanted) state.
457 */
458static int cl_wide_st_chg(struct drbd_conf *mdev,
459 union drbd_state os, union drbd_state ns)
460{
461 return (os.conn >= C_CONNECTED && ns.conn >= C_CONNECTED &&
462 ((os.role != R_PRIMARY && ns.role == R_PRIMARY) ||
463 (os.conn != C_STARTING_SYNC_T && ns.conn == C_STARTING_SYNC_T) ||
464 (os.conn != C_STARTING_SYNC_S && ns.conn == C_STARTING_SYNC_S) ||
465 (os.disk != D_DISKLESS && ns.disk == D_DISKLESS))) ||
466 (os.conn >= C_CONNECTED && ns.conn == C_DISCONNECTING) ||
467 (os.conn == C_CONNECTED && ns.conn == C_VERIFY_S);
468}
469
Andreas Gruenbacherbf885f82010-12-08 00:39:32 +0100470enum drbd_state_rv
471drbd_change_state(struct drbd_conf *mdev, enum chg_state_flags f,
472 union drbd_state mask, union drbd_state val)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700473{
474 unsigned long flags;
475 union drbd_state os, ns;
Andreas Gruenbacherbf885f82010-12-08 00:39:32 +0100476 enum drbd_state_rv rv;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700477
478 spin_lock_irqsave(&mdev->req_lock, flags);
479 os = mdev->state;
480 ns.i = (os.i & ~mask.i) | val.i;
481 rv = _drbd_set_state(mdev, ns, f, NULL);
482 ns = mdev->state;
483 spin_unlock_irqrestore(&mdev->req_lock, flags);
484
485 return rv;
486}
487
488/**
489 * drbd_force_state() - Impose a change which happens outside our control on our state
490 * @mdev: DRBD device.
491 * @mask: mask of state bits to change.
492 * @val: value of new state bits.
493 */
494void drbd_force_state(struct drbd_conf *mdev,
495 union drbd_state mask, union drbd_state val)
496{
497 drbd_change_state(mdev, CS_HARD, mask, val);
498}
499
Andreas Gruenbacherbf885f82010-12-08 00:39:32 +0100500static enum drbd_state_rv is_valid_state(struct drbd_conf *, union drbd_state);
501static enum drbd_state_rv is_valid_state_transition(struct drbd_conf *,
502 union drbd_state,
503 union drbd_state);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700504static union drbd_state sanitize_state(struct drbd_conf *mdev, union drbd_state os,
Lars Ellenberg02bc7172010-09-06 12:13:20 +0200505 union drbd_state ns, const char **warn_sync_abort);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700506int drbd_send_state_req(struct drbd_conf *,
507 union drbd_state, union drbd_state);
508
Andreas Gruenbacherc8b32562010-12-08 01:06:16 +0100509static enum drbd_state_rv
510_req_st_cond(struct drbd_conf *mdev, union drbd_state mask,
511 union drbd_state val)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700512{
513 union drbd_state os, ns;
514 unsigned long flags;
Andreas Gruenbacherbf885f82010-12-08 00:39:32 +0100515 enum drbd_state_rv rv;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700516
517 if (test_and_clear_bit(CL_ST_CHG_SUCCESS, &mdev->flags))
518 return SS_CW_SUCCESS;
519
520 if (test_and_clear_bit(CL_ST_CHG_FAIL, &mdev->flags))
521 return SS_CW_FAILED_BY_PEER;
522
523 rv = 0;
524 spin_lock_irqsave(&mdev->req_lock, flags);
525 os = mdev->state;
526 ns.i = (os.i & ~mask.i) | val.i;
527 ns = sanitize_state(mdev, os, ns, NULL);
528
529 if (!cl_wide_st_chg(mdev, os, ns))
530 rv = SS_CW_NO_NEED;
531 if (!rv) {
532 rv = is_valid_state(mdev, ns);
533 if (rv == SS_SUCCESS) {
534 rv = is_valid_state_transition(mdev, ns, os);
535 if (rv == SS_SUCCESS)
Andreas Gruenbacherbf885f82010-12-08 00:39:32 +0100536 rv = SS_UNKNOWN_ERROR; /* cont waiting, otherwise fail. */
Philipp Reisnerb411b362009-09-25 16:07:19 -0700537 }
538 }
539 spin_unlock_irqrestore(&mdev->req_lock, flags);
540
541 return rv;
542}
543
544/**
545 * drbd_req_state() - Perform an eventually cluster wide state change
546 * @mdev: DRBD device.
547 * @mask: mask of state bits to change.
548 * @val: value of new state bits.
549 * @f: flags
550 *
551 * Should not be called directly, use drbd_request_state() or
552 * _drbd_request_state().
553 */
Andreas Gruenbacherbf885f82010-12-08 00:39:32 +0100554static enum drbd_state_rv
555drbd_req_state(struct drbd_conf *mdev, union drbd_state mask,
556 union drbd_state val, enum chg_state_flags f)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700557{
558 struct completion done;
559 unsigned long flags;
560 union drbd_state os, ns;
Andreas Gruenbacherbf885f82010-12-08 00:39:32 +0100561 enum drbd_state_rv rv;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700562
563 init_completion(&done);
564
565 if (f & CS_SERIALIZE)
566 mutex_lock(&mdev->state_mutex);
567
568 spin_lock_irqsave(&mdev->req_lock, flags);
569 os = mdev->state;
570 ns.i = (os.i & ~mask.i) | val.i;
571 ns = sanitize_state(mdev, os, ns, NULL);
572
573 if (cl_wide_st_chg(mdev, os, ns)) {
574 rv = is_valid_state(mdev, ns);
575 if (rv == SS_SUCCESS)
576 rv = is_valid_state_transition(mdev, ns, os);
577 spin_unlock_irqrestore(&mdev->req_lock, flags);
578
579 if (rv < SS_SUCCESS) {
580 if (f & CS_VERBOSE)
581 print_st_err(mdev, os, ns, rv);
582 goto abort;
583 }
584
585 drbd_state_lock(mdev);
586 if (!drbd_send_state_req(mdev, mask, val)) {
587 drbd_state_unlock(mdev);
588 rv = SS_CW_FAILED_BY_PEER;
589 if (f & CS_VERBOSE)
590 print_st_err(mdev, os, ns, rv);
591 goto abort;
592 }
593
594 wait_event(mdev->state_wait,
595 (rv = _req_st_cond(mdev, mask, val)));
596
597 if (rv < SS_SUCCESS) {
598 drbd_state_unlock(mdev);
599 if (f & CS_VERBOSE)
600 print_st_err(mdev, os, ns, rv);
601 goto abort;
602 }
603 spin_lock_irqsave(&mdev->req_lock, flags);
604 os = mdev->state;
605 ns.i = (os.i & ~mask.i) | val.i;
606 rv = _drbd_set_state(mdev, ns, f, &done);
607 drbd_state_unlock(mdev);
608 } else {
609 rv = _drbd_set_state(mdev, ns, f, &done);
610 }
611
612 spin_unlock_irqrestore(&mdev->req_lock, flags);
613
614 if (f & CS_WAIT_COMPLETE && rv == SS_SUCCESS) {
615 D_ASSERT(current != mdev->worker.task);
616 wait_for_completion(&done);
617 }
618
619abort:
620 if (f & CS_SERIALIZE)
621 mutex_unlock(&mdev->state_mutex);
622
623 return rv;
624}
625
626/**
627 * _drbd_request_state() - Request a state change (with flags)
628 * @mdev: DRBD device.
629 * @mask: mask of state bits to change.
630 * @val: value of new state bits.
631 * @f: flags
632 *
633 * Cousin of drbd_request_state(), useful with the CS_WAIT_COMPLETE
634 * flag, or when logging of failed state change requests is not desired.
635 */
Andreas Gruenbacherbf885f82010-12-08 00:39:32 +0100636enum drbd_state_rv
637_drbd_request_state(struct drbd_conf *mdev, union drbd_state mask,
638 union drbd_state val, enum chg_state_flags f)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700639{
Andreas Gruenbacherbf885f82010-12-08 00:39:32 +0100640 enum drbd_state_rv rv;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700641
642 wait_event(mdev->state_wait,
643 (rv = drbd_req_state(mdev, mask, val, f)) != SS_IN_TRANSIENT_STATE);
644
645 return rv;
646}
647
648static void print_st(struct drbd_conf *mdev, char *name, union drbd_state ns)
649{
650 dev_err(DEV, " %s = { cs:%s ro:%s/%s ds:%s/%s %c%c%c%c }\n",
651 name,
652 drbd_conn_str(ns.conn),
653 drbd_role_str(ns.role),
654 drbd_role_str(ns.peer),
655 drbd_disk_str(ns.disk),
656 drbd_disk_str(ns.pdsk),
Philipp Reisnerfb22c402010-09-08 23:20:21 +0200657 is_susp(ns) ? 's' : 'r',
Philipp Reisnerb411b362009-09-25 16:07:19 -0700658 ns.aftr_isp ? 'a' : '-',
659 ns.peer_isp ? 'p' : '-',
660 ns.user_isp ? 'u' : '-'
661 );
662}
663
Andreas Gruenbacherbf885f82010-12-08 00:39:32 +0100664void print_st_err(struct drbd_conf *mdev, union drbd_state os,
665 union drbd_state ns, enum drbd_state_rv err)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700666{
667 if (err == SS_IN_TRANSIENT_STATE)
668 return;
669 dev_err(DEV, "State change failed: %s\n", drbd_set_st_err_str(err));
670 print_st(mdev, " state", os);
671 print_st(mdev, "wanted", ns);
672}
673
674
Philipp Reisnerb411b362009-09-25 16:07:19 -0700675/**
676 * is_valid_state() - Returns an SS_ error code if ns is not valid
677 * @mdev: DRBD device.
678 * @ns: State to consider.
679 */
Andreas Gruenbacherbf885f82010-12-08 00:39:32 +0100680static enum drbd_state_rv
681is_valid_state(struct drbd_conf *mdev, union drbd_state ns)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700682{
683 /* See drbd_state_sw_errors in drbd_strings.c */
684
685 enum drbd_fencing_p fp;
Andreas Gruenbacherbf885f82010-12-08 00:39:32 +0100686 enum drbd_state_rv rv = SS_SUCCESS;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700687
688 fp = FP_DONT_CARE;
689 if (get_ldev(mdev)) {
690 fp = mdev->ldev->dc.fencing;
691 put_ldev(mdev);
692 }
693
694 if (get_net_conf(mdev)) {
695 if (!mdev->net_conf->two_primaries &&
696 ns.role == R_PRIMARY && ns.peer == R_PRIMARY)
697 rv = SS_TWO_PRIMARIES;
698 put_net_conf(mdev);
699 }
700
701 if (rv <= 0)
702 /* already found a reason to abort */;
703 else if (ns.role == R_SECONDARY && mdev->open_cnt)
704 rv = SS_DEVICE_IN_USE;
705
706 else if (ns.role == R_PRIMARY && ns.conn < C_CONNECTED && ns.disk < D_UP_TO_DATE)
707 rv = SS_NO_UP_TO_DATE_DISK;
708
709 else if (fp >= FP_RESOURCE &&
710 ns.role == R_PRIMARY && ns.conn < C_CONNECTED && ns.pdsk >= D_UNKNOWN)
711 rv = SS_PRIMARY_NOP;
712
713 else if (ns.role == R_PRIMARY && ns.disk <= D_INCONSISTENT && ns.pdsk <= D_INCONSISTENT)
714 rv = SS_NO_UP_TO_DATE_DISK;
715
716 else if (ns.conn > C_CONNECTED && ns.disk < D_INCONSISTENT)
717 rv = SS_NO_LOCAL_DISK;
718
719 else if (ns.conn > C_CONNECTED && ns.pdsk < D_INCONSISTENT)
720 rv = SS_NO_REMOTE_DISK;
721
Lars Ellenberg8d4ce822010-04-01 16:59:32 +0200722 else if (ns.conn > C_CONNECTED && ns.disk < D_UP_TO_DATE && ns.pdsk < D_UP_TO_DATE)
723 rv = SS_NO_UP_TO_DATE_DISK;
724
Philipp Reisnerb411b362009-09-25 16:07:19 -0700725 else if ((ns.conn == C_CONNECTED ||
726 ns.conn == C_WF_BITMAP_S ||
727 ns.conn == C_SYNC_SOURCE ||
728 ns.conn == C_PAUSED_SYNC_S) &&
729 ns.disk == D_OUTDATED)
730 rv = SS_CONNECTED_OUTDATES;
731
732 else if ((ns.conn == C_VERIFY_S || ns.conn == C_VERIFY_T) &&
733 (mdev->sync_conf.verify_alg[0] == 0))
734 rv = SS_NO_VERIFY_ALG;
735
736 else if ((ns.conn == C_VERIFY_S || ns.conn == C_VERIFY_T) &&
737 mdev->agreed_pro_version < 88)
738 rv = SS_NOT_SUPPORTED;
739
Philipp Reisnerfa7d9392011-05-17 14:48:55 +0200740 else if (ns.conn >= C_CONNECTED && ns.pdsk == D_UNKNOWN)
741 rv = SS_CONNECTED_OUTDATES;
742
Philipp Reisnerb411b362009-09-25 16:07:19 -0700743 return rv;
744}
745
746/**
747 * is_valid_state_transition() - Returns an SS_ error code if the state transition is not possible
748 * @mdev: DRBD device.
749 * @ns: new state.
750 * @os: old state.
751 */
Andreas Gruenbacherbf885f82010-12-08 00:39:32 +0100752static enum drbd_state_rv
753is_valid_state_transition(struct drbd_conf *mdev, union drbd_state ns,
754 union drbd_state os)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700755{
Andreas Gruenbacherbf885f82010-12-08 00:39:32 +0100756 enum drbd_state_rv rv = SS_SUCCESS;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700757
758 if ((ns.conn == C_STARTING_SYNC_T || ns.conn == C_STARTING_SYNC_S) &&
759 os.conn > C_CONNECTED)
760 rv = SS_RESYNC_RUNNING;
761
762 if (ns.conn == C_DISCONNECTING && os.conn == C_STANDALONE)
763 rv = SS_ALREADY_STANDALONE;
764
765 if (ns.disk > D_ATTACHING && os.disk == D_DISKLESS)
766 rv = SS_IS_DISKLESS;
767
768 if (ns.conn == C_WF_CONNECTION && os.conn < C_UNCONNECTED)
769 rv = SS_NO_NET_CONFIG;
770
771 if (ns.disk == D_OUTDATED && os.disk < D_OUTDATED && os.disk != D_ATTACHING)
772 rv = SS_LOWER_THAN_OUTDATED;
773
774 if (ns.conn == C_DISCONNECTING && os.conn == C_UNCONNECTED)
775 rv = SS_IN_TRANSIENT_STATE;
776
777 if (ns.conn == os.conn && ns.conn == C_WF_REPORT_PARAMS)
778 rv = SS_IN_TRANSIENT_STATE;
779
780 if ((ns.conn == C_VERIFY_S || ns.conn == C_VERIFY_T) && os.conn < C_CONNECTED)
781 rv = SS_NEED_CONNECTION;
782
783 if ((ns.conn == C_VERIFY_S || ns.conn == C_VERIFY_T) &&
784 ns.conn != os.conn && os.conn > C_CONNECTED)
785 rv = SS_RESYNC_RUNNING;
786
787 if ((ns.conn == C_STARTING_SYNC_S || ns.conn == C_STARTING_SYNC_T) &&
788 os.conn < C_CONNECTED)
789 rv = SS_NEED_CONNECTION;
790
Philipp Reisner1fc80cf2010-11-22 14:18:47 +0100791 if ((ns.conn == C_SYNC_TARGET || ns.conn == C_SYNC_SOURCE)
792 && os.conn < C_WF_REPORT_PARAMS)
793 rv = SS_NEED_CONNECTION; /* No NetworkFailure -> SyncTarget etc... */
794
Philipp Reisnerb411b362009-09-25 16:07:19 -0700795 return rv;
796}
797
798/**
799 * sanitize_state() - Resolves implicitly necessary additional changes to a state transition
800 * @mdev: DRBD device.
801 * @os: old state.
802 * @ns: new state.
803 * @warn_sync_abort:
804 *
805 * When we loose connection, we have to set the state of the peers disk (pdsk)
806 * to D_UNKNOWN. This rule and many more along those lines are in this function.
807 */
808static union drbd_state sanitize_state(struct drbd_conf *mdev, union drbd_state os,
Lars Ellenberg02bc7172010-09-06 12:13:20 +0200809 union drbd_state ns, const char **warn_sync_abort)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700810{
811 enum drbd_fencing_p fp;
Philipp Reisnerab17b68f2010-11-17 16:54:36 +0100812 enum drbd_disk_state disk_min, disk_max, pdsk_min, pdsk_max;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700813
814 fp = FP_DONT_CARE;
815 if (get_ldev(mdev)) {
816 fp = mdev->ldev->dc.fencing;
817 put_ldev(mdev);
818 }
819
820 /* Disallow Network errors to configure a device's network part */
821 if ((ns.conn >= C_TIMEOUT && ns.conn <= C_TEAR_DOWN) &&
822 os.conn <= C_DISCONNECTING)
823 ns.conn = os.conn;
824
Lars Ellenbergf2906e12010-07-21 17:04:32 +0200825 /* After a network error (+C_TEAR_DOWN) only C_UNCONNECTED or C_DISCONNECTING can follow.
826 * If you try to go into some Sync* state, that shall fail (elsewhere). */
Philipp Reisnerb411b362009-09-25 16:07:19 -0700827 if (os.conn >= C_TIMEOUT && os.conn <= C_TEAR_DOWN &&
Lars Ellenbergf2906e12010-07-21 17:04:32 +0200828 ns.conn != C_UNCONNECTED && ns.conn != C_DISCONNECTING && ns.conn <= C_TEAR_DOWN)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700829 ns.conn = os.conn;
830
Lars Ellenberg82f59cc2010-10-16 12:13:47 +0200831 /* we cannot fail (again) if we already detached */
832 if (ns.disk == D_FAILED && os.disk == D_DISKLESS)
833 ns.disk = D_DISKLESS;
834
835 /* if we are only D_ATTACHING yet,
836 * we can (and should) go directly to D_DISKLESS. */
837 if (ns.disk == D_FAILED && os.disk == D_ATTACHING)
838 ns.disk = D_DISKLESS;
839
Philipp Reisnerb411b362009-09-25 16:07:19 -0700840 /* After C_DISCONNECTING only C_STANDALONE may follow */
841 if (os.conn == C_DISCONNECTING && ns.conn != C_STANDALONE)
842 ns.conn = os.conn;
843
844 if (ns.conn < C_CONNECTED) {
845 ns.peer_isp = 0;
846 ns.peer = R_UNKNOWN;
847 if (ns.pdsk > D_UNKNOWN || ns.pdsk < D_INCONSISTENT)
848 ns.pdsk = D_UNKNOWN;
849 }
850
851 /* Clear the aftr_isp when becoming unconfigured */
852 if (ns.conn == C_STANDALONE && ns.disk == D_DISKLESS && ns.role == R_SECONDARY)
853 ns.aftr_isp = 0;
854
Philipp Reisnerb411b362009-09-25 16:07:19 -0700855 /* Abort resync if a disk fails/detaches */
856 if (os.conn > C_CONNECTED && ns.conn > C_CONNECTED &&
857 (ns.disk <= D_FAILED || ns.pdsk <= D_FAILED)) {
858 if (warn_sync_abort)
Lars Ellenberg02bc7172010-09-06 12:13:20 +0200859 *warn_sync_abort =
860 os.conn == C_VERIFY_S || os.conn == C_VERIFY_T ?
861 "Online-verify" : "Resync";
Philipp Reisnerb411b362009-09-25 16:07:19 -0700862 ns.conn = C_CONNECTED;
863 }
864
Philipp Reisnerb411b362009-09-25 16:07:19 -0700865 /* Connection breaks down before we finished "Negotiating" */
866 if (ns.conn < C_CONNECTED && ns.disk == D_NEGOTIATING &&
867 get_ldev_if_state(mdev, D_NEGOTIATING)) {
868 if (mdev->ed_uuid == mdev->ldev->md.uuid[UI_CURRENT]) {
869 ns.disk = mdev->new_state_tmp.disk;
870 ns.pdsk = mdev->new_state_tmp.pdsk;
871 } else {
872 dev_alert(DEV, "Connection lost while negotiating, no data!\n");
873 ns.disk = D_DISKLESS;
874 ns.pdsk = D_UNKNOWN;
875 }
876 put_ldev(mdev);
877 }
878
Philipp Reisnerab17b68f2010-11-17 16:54:36 +0100879 /* D_CONSISTENT and D_OUTDATED vanish when we get connected */
880 if (ns.conn >= C_CONNECTED && ns.conn < C_AHEAD) {
881 if (ns.disk == D_CONSISTENT || ns.disk == D_OUTDATED)
882 ns.disk = D_UP_TO_DATE;
883 if (ns.pdsk == D_CONSISTENT || ns.pdsk == D_OUTDATED)
884 ns.pdsk = D_UP_TO_DATE;
885 }
886
887 /* Implications of the connection stat on the disk states */
888 disk_min = D_DISKLESS;
889 disk_max = D_UP_TO_DATE;
890 pdsk_min = D_INCONSISTENT;
891 pdsk_max = D_UNKNOWN;
892 switch ((enum drbd_conns)ns.conn) {
893 case C_WF_BITMAP_T:
894 case C_PAUSED_SYNC_T:
895 case C_STARTING_SYNC_T:
896 case C_WF_SYNC_UUID:
897 case C_BEHIND:
898 disk_min = D_INCONSISTENT;
899 disk_max = D_OUTDATED;
900 pdsk_min = D_UP_TO_DATE;
901 pdsk_max = D_UP_TO_DATE;
902 break;
903 case C_VERIFY_S:
904 case C_VERIFY_T:
905 disk_min = D_UP_TO_DATE;
906 disk_max = D_UP_TO_DATE;
907 pdsk_min = D_UP_TO_DATE;
908 pdsk_max = D_UP_TO_DATE;
909 break;
910 case C_CONNECTED:
911 disk_min = D_DISKLESS;
912 disk_max = D_UP_TO_DATE;
913 pdsk_min = D_DISKLESS;
914 pdsk_max = D_UP_TO_DATE;
915 break;
916 case C_WF_BITMAP_S:
917 case C_PAUSED_SYNC_S:
918 case C_STARTING_SYNC_S:
919 case C_AHEAD:
920 disk_min = D_UP_TO_DATE;
921 disk_max = D_UP_TO_DATE;
922 pdsk_min = D_INCONSISTENT;
923 pdsk_max = D_CONSISTENT; /* D_OUTDATED would be nice. But explicit outdate necessary*/
924 break;
925 case C_SYNC_TARGET:
926 disk_min = D_INCONSISTENT;
927 disk_max = D_INCONSISTENT;
928 pdsk_min = D_UP_TO_DATE;
929 pdsk_max = D_UP_TO_DATE;
930 break;
931 case C_SYNC_SOURCE:
932 disk_min = D_UP_TO_DATE;
933 disk_max = D_UP_TO_DATE;
934 pdsk_min = D_INCONSISTENT;
935 pdsk_max = D_INCONSISTENT;
936 break;
937 case C_STANDALONE:
938 case C_DISCONNECTING:
939 case C_UNCONNECTED:
940 case C_TIMEOUT:
941 case C_BROKEN_PIPE:
942 case C_NETWORK_FAILURE:
943 case C_PROTOCOL_ERROR:
944 case C_TEAR_DOWN:
945 case C_WF_CONNECTION:
946 case C_WF_REPORT_PARAMS:
947 case C_MASK:
948 break;
949 }
950 if (ns.disk > disk_max)
951 ns.disk = disk_max;
952
953 if (ns.disk < disk_min) {
954 dev_warn(DEV, "Implicitly set disk from %s to %s\n",
955 drbd_disk_str(ns.disk), drbd_disk_str(disk_min));
956 ns.disk = disk_min;
957 }
958 if (ns.pdsk > pdsk_max)
959 ns.pdsk = pdsk_max;
960
961 if (ns.pdsk < pdsk_min) {
962 dev_warn(DEV, "Implicitly set pdsk from %s to %s\n",
963 drbd_disk_str(ns.pdsk), drbd_disk_str(pdsk_min));
964 ns.pdsk = pdsk_min;
965 }
966
Philipp Reisnerb411b362009-09-25 16:07:19 -0700967 if (fp == FP_STONITH &&
Philipp Reisner0a492162009-10-21 13:08:29 +0200968 (ns.role == R_PRIMARY && ns.conn < C_CONNECTED && ns.pdsk > D_OUTDATED) &&
969 !(os.role == R_PRIMARY && os.conn < C_CONNECTED && os.pdsk > D_OUTDATED))
Philipp Reisnerfb22c402010-09-08 23:20:21 +0200970 ns.susp_fen = 1; /* Suspend IO while fence-peer handler runs (peer lost) */
Philipp Reisner265be2d2010-05-31 10:14:17 +0200971
972 if (mdev->sync_conf.on_no_data == OND_SUSPEND_IO &&
973 (ns.role == R_PRIMARY && ns.disk < D_UP_TO_DATE && ns.pdsk < D_UP_TO_DATE) &&
974 !(os.role == R_PRIMARY && os.disk < D_UP_TO_DATE && os.pdsk < D_UP_TO_DATE))
Philipp Reisnerfb22c402010-09-08 23:20:21 +0200975 ns.susp_nod = 1; /* Suspend IO while no data available (no accessible data available) */
Philipp Reisnerb411b362009-09-25 16:07:19 -0700976
977 if (ns.aftr_isp || ns.peer_isp || ns.user_isp) {
978 if (ns.conn == C_SYNC_SOURCE)
979 ns.conn = C_PAUSED_SYNC_S;
980 if (ns.conn == C_SYNC_TARGET)
981 ns.conn = C_PAUSED_SYNC_T;
982 } else {
983 if (ns.conn == C_PAUSED_SYNC_S)
984 ns.conn = C_SYNC_SOURCE;
985 if (ns.conn == C_PAUSED_SYNC_T)
986 ns.conn = C_SYNC_TARGET;
987 }
988
989 return ns;
990}
991
992/* helper for __drbd_set_state */
993static void set_ov_position(struct drbd_conf *mdev, enum drbd_conns cs)
994{
Lars Ellenberg30b743a2010-11-05 09:39:06 +0100995 if (mdev->agreed_pro_version < 90)
996 mdev->ov_start_sector = 0;
997 mdev->rs_total = drbd_bm_bits(mdev);
998 mdev->ov_position = 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700999 if (cs == C_VERIFY_T) {
1000 /* starting online verify from an arbitrary position
1001 * does not fit well into the existing protocol.
1002 * on C_VERIFY_T, we initialize ov_left and friends
1003 * implicitly in receive_DataRequest once the
1004 * first P_OV_REQUEST is received */
1005 mdev->ov_start_sector = ~(sector_t)0;
1006 } else {
1007 unsigned long bit = BM_SECT_TO_BIT(mdev->ov_start_sector);
Lars Ellenberg30b743a2010-11-05 09:39:06 +01001008 if (bit >= mdev->rs_total) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07001009 mdev->ov_start_sector =
1010 BM_BIT_TO_SECT(mdev->rs_total - 1);
Lars Ellenberg30b743a2010-11-05 09:39:06 +01001011 mdev->rs_total = 1;
1012 } else
1013 mdev->rs_total -= bit;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001014 mdev->ov_position = mdev->ov_start_sector;
1015 }
Lars Ellenberg30b743a2010-11-05 09:39:06 +01001016 mdev->ov_left = mdev->rs_total;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001017}
1018
Philipp Reisner07782862010-08-31 12:00:50 +02001019static void drbd_resume_al(struct drbd_conf *mdev)
1020{
1021 if (test_and_clear_bit(AL_SUSPENDED, &mdev->flags))
1022 dev_info(DEV, "Resumed AL updates\n");
1023}
1024
Philipp Reisnerb411b362009-09-25 16:07:19 -07001025/**
1026 * __drbd_set_state() - Set a new DRBD state
1027 * @mdev: DRBD device.
1028 * @ns: new state.
1029 * @flags: Flags
1030 * @done: Optional completion, that will get completed after the after_state_ch() finished
1031 *
1032 * Caller needs to hold req_lock, and global_state_lock. Do not call directly.
1033 */
Andreas Gruenbacherbf885f82010-12-08 00:39:32 +01001034enum drbd_state_rv
1035__drbd_set_state(struct drbd_conf *mdev, union drbd_state ns,
1036 enum chg_state_flags flags, struct completion *done)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001037{
1038 union drbd_state os;
Andreas Gruenbacherbf885f82010-12-08 00:39:32 +01001039 enum drbd_state_rv rv = SS_SUCCESS;
Lars Ellenberg02bc7172010-09-06 12:13:20 +02001040 const char *warn_sync_abort = NULL;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001041 struct after_state_chg_work *ascw;
1042
1043 os = mdev->state;
1044
1045 ns = sanitize_state(mdev, os, ns, &warn_sync_abort);
1046
1047 if (ns.i == os.i)
1048 return SS_NOTHING_TO_DO;
1049
1050 if (!(flags & CS_HARD)) {
1051 /* pre-state-change checks ; only look at ns */
1052 /* See drbd_state_sw_errors in drbd_strings.c */
1053
1054 rv = is_valid_state(mdev, ns);
1055 if (rv < SS_SUCCESS) {
1056 /* If the old state was illegal as well, then let
1057 this happen...*/
1058
Philipp Reisner1616a252010-06-10 16:55:15 +02001059 if (is_valid_state(mdev, os) == rv)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001060 rv = is_valid_state_transition(mdev, ns, os);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001061 } else
1062 rv = is_valid_state_transition(mdev, ns, os);
1063 }
1064
1065 if (rv < SS_SUCCESS) {
1066 if (flags & CS_VERBOSE)
1067 print_st_err(mdev, os, ns, rv);
1068 return rv;
1069 }
1070
1071 if (warn_sync_abort)
Lars Ellenberg02bc7172010-09-06 12:13:20 +02001072 dev_warn(DEV, "%s aborted.\n", warn_sync_abort);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001073
1074 {
Andreas Gruenbacher662d91a2010-12-07 03:01:41 +01001075 char *pbp, pb[300];
1076 pbp = pb;
1077 *pbp = 0;
1078 if (ns.role != os.role)
1079 pbp += sprintf(pbp, "role( %s -> %s ) ",
1080 drbd_role_str(os.role),
1081 drbd_role_str(ns.role));
1082 if (ns.peer != os.peer)
1083 pbp += sprintf(pbp, "peer( %s -> %s ) ",
1084 drbd_role_str(os.peer),
1085 drbd_role_str(ns.peer));
1086 if (ns.conn != os.conn)
1087 pbp += sprintf(pbp, "conn( %s -> %s ) ",
1088 drbd_conn_str(os.conn),
1089 drbd_conn_str(ns.conn));
1090 if (ns.disk != os.disk)
1091 pbp += sprintf(pbp, "disk( %s -> %s ) ",
1092 drbd_disk_str(os.disk),
1093 drbd_disk_str(ns.disk));
1094 if (ns.pdsk != os.pdsk)
1095 pbp += sprintf(pbp, "pdsk( %s -> %s ) ",
1096 drbd_disk_str(os.pdsk),
1097 drbd_disk_str(ns.pdsk));
1098 if (is_susp(ns) != is_susp(os))
1099 pbp += sprintf(pbp, "susp( %d -> %d ) ",
1100 is_susp(os),
1101 is_susp(ns));
1102 if (ns.aftr_isp != os.aftr_isp)
1103 pbp += sprintf(pbp, "aftr_isp( %d -> %d ) ",
1104 os.aftr_isp,
1105 ns.aftr_isp);
1106 if (ns.peer_isp != os.peer_isp)
1107 pbp += sprintf(pbp, "peer_isp( %d -> %d ) ",
1108 os.peer_isp,
1109 ns.peer_isp);
1110 if (ns.user_isp != os.user_isp)
1111 pbp += sprintf(pbp, "user_isp( %d -> %d ) ",
1112 os.user_isp,
1113 ns.user_isp);
1114 dev_info(DEV, "%s\n", pb);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001115 }
1116
1117 /* solve the race between becoming unconfigured,
1118 * worker doing the cleanup, and
1119 * admin reconfiguring us:
1120 * on (re)configure, first set CONFIG_PENDING,
1121 * then wait for a potentially exiting worker,
1122 * start the worker, and schedule one no_op.
1123 * then proceed with configuration.
1124 */
1125 if (ns.disk == D_DISKLESS &&
1126 ns.conn == C_STANDALONE &&
1127 ns.role == R_SECONDARY &&
1128 !test_and_set_bit(CONFIG_PENDING, &mdev->flags))
1129 set_bit(DEVICE_DYING, &mdev->flags);
1130
Lars Ellenberg82f59cc2010-10-16 12:13:47 +02001131 /* if we are going -> D_FAILED or D_DISKLESS, grab one extra reference
1132 * on the ldev here, to be sure the transition -> D_DISKLESS resp.
1133 * drbd_ldev_destroy() won't happen before our corresponding
1134 * after_state_ch works run, where we put_ldev again. */
1135 if ((os.disk != D_FAILED && ns.disk == D_FAILED) ||
1136 (os.disk != D_DISKLESS && ns.disk == D_DISKLESS))
1137 atomic_inc(&mdev->local_cnt);
1138
1139 mdev->state = ns;
Lars Ellenberg62b0da32011-01-20 13:25:21 +01001140
1141 if (os.disk == D_ATTACHING && ns.disk >= D_NEGOTIATING)
1142 drbd_print_uuids(mdev, "attached to UUIDs");
1143
Philipp Reisnerb411b362009-09-25 16:07:19 -07001144 wake_up(&mdev->misc_wait);
1145 wake_up(&mdev->state_wait);
1146
Philipp Reisnerb411b362009-09-25 16:07:19 -07001147 /* aborted verify run. log the last position */
1148 if ((os.conn == C_VERIFY_S || os.conn == C_VERIFY_T) &&
1149 ns.conn < C_CONNECTED) {
1150 mdev->ov_start_sector =
Lars Ellenberg30b743a2010-11-05 09:39:06 +01001151 BM_BIT_TO_SECT(drbd_bm_bits(mdev) - mdev->ov_left);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001152 dev_info(DEV, "Online Verify reached sector %llu\n",
1153 (unsigned long long)mdev->ov_start_sector);
1154 }
1155
1156 if ((os.conn == C_PAUSED_SYNC_T || os.conn == C_PAUSED_SYNC_S) &&
1157 (ns.conn == C_SYNC_TARGET || ns.conn == C_SYNC_SOURCE)) {
1158 dev_info(DEV, "Syncer continues.\n");
Lars Ellenberg1d7734a2010-08-11 21:21:50 +02001159 mdev->rs_paused += (long)jiffies
1160 -(long)mdev->rs_mark_time[mdev->rs_last_mark];
Philipp Reisner63106d32010-09-01 15:47:15 +02001161 if (ns.conn == C_SYNC_TARGET)
1162 mod_timer(&mdev->resync_timer, jiffies);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001163 }
1164
1165 if ((os.conn == C_SYNC_TARGET || os.conn == C_SYNC_SOURCE) &&
1166 (ns.conn == C_PAUSED_SYNC_T || ns.conn == C_PAUSED_SYNC_S)) {
1167 dev_info(DEV, "Resync suspended\n");
Lars Ellenberg1d7734a2010-08-11 21:21:50 +02001168 mdev->rs_mark_time[mdev->rs_last_mark] = jiffies;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001169 }
1170
1171 if (os.conn == C_CONNECTED &&
1172 (ns.conn == C_VERIFY_S || ns.conn == C_VERIFY_T)) {
Lars Ellenberg1d7734a2010-08-11 21:21:50 +02001173 unsigned long now = jiffies;
1174 int i;
1175
Lars Ellenberg30b743a2010-11-05 09:39:06 +01001176 set_ov_position(mdev, ns.conn);
Lars Ellenberg1d7734a2010-08-11 21:21:50 +02001177 mdev->rs_start = now;
Lars Ellenberg0f0601f2010-08-11 23:40:24 +02001178 mdev->rs_last_events = 0;
1179 mdev->rs_last_sect_ev = 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001180 mdev->ov_last_oos_size = 0;
1181 mdev->ov_last_oos_start = 0;
1182
Lars Ellenberg1d7734a2010-08-11 21:21:50 +02001183 for (i = 0; i < DRBD_SYNC_MARKS; i++) {
Lars Ellenberg30b743a2010-11-05 09:39:06 +01001184 mdev->rs_mark_left[i] = mdev->ov_left;
Lars Ellenberg1d7734a2010-08-11 21:21:50 +02001185 mdev->rs_mark_time[i] = now;
1186 }
1187
Lars Ellenberg2649f082010-11-05 10:05:47 +01001188 drbd_rs_controller_reset(mdev);
1189
Philipp Reisnerb411b362009-09-25 16:07:19 -07001190 if (ns.conn == C_VERIFY_S) {
1191 dev_info(DEV, "Starting Online Verify from sector %llu\n",
1192 (unsigned long long)mdev->ov_position);
1193 mod_timer(&mdev->resync_timer, jiffies);
1194 }
1195 }
1196
1197 if (get_ldev(mdev)) {
1198 u32 mdf = mdev->ldev->md.flags & ~(MDF_CONSISTENT|MDF_PRIMARY_IND|
1199 MDF_CONNECTED_IND|MDF_WAS_UP_TO_DATE|
1200 MDF_PEER_OUT_DATED|MDF_CRASHED_PRIMARY);
1201
1202 if (test_bit(CRASHED_PRIMARY, &mdev->flags))
1203 mdf |= MDF_CRASHED_PRIMARY;
1204 if (mdev->state.role == R_PRIMARY ||
1205 (mdev->state.pdsk < D_INCONSISTENT && mdev->state.peer == R_PRIMARY))
1206 mdf |= MDF_PRIMARY_IND;
1207 if (mdev->state.conn > C_WF_REPORT_PARAMS)
1208 mdf |= MDF_CONNECTED_IND;
1209 if (mdev->state.disk > D_INCONSISTENT)
1210 mdf |= MDF_CONSISTENT;
1211 if (mdev->state.disk > D_OUTDATED)
1212 mdf |= MDF_WAS_UP_TO_DATE;
1213 if (mdev->state.pdsk <= D_OUTDATED && mdev->state.pdsk >= D_INCONSISTENT)
1214 mdf |= MDF_PEER_OUT_DATED;
1215 if (mdf != mdev->ldev->md.flags) {
1216 mdev->ldev->md.flags = mdf;
1217 drbd_md_mark_dirty(mdev);
1218 }
1219 if (os.disk < D_CONSISTENT && ns.disk >= D_CONSISTENT)
1220 drbd_set_ed_uuid(mdev, mdev->ldev->md.uuid[UI_CURRENT]);
1221 put_ldev(mdev);
1222 }
1223
1224 /* Peer was forced D_UP_TO_DATE & R_PRIMARY, consider to resync */
1225 if (os.disk == D_INCONSISTENT && os.pdsk == D_INCONSISTENT &&
1226 os.peer == R_SECONDARY && ns.peer == R_PRIMARY)
1227 set_bit(CONSIDER_RESYNC, &mdev->flags);
1228
1229 /* Receiver should clean up itself */
1230 if (os.conn != C_DISCONNECTING && ns.conn == C_DISCONNECTING)
1231 drbd_thread_stop_nowait(&mdev->receiver);
1232
1233 /* Now the receiver finished cleaning up itself, it should die */
1234 if (os.conn != C_STANDALONE && ns.conn == C_STANDALONE)
1235 drbd_thread_stop_nowait(&mdev->receiver);
1236
1237 /* Upon network failure, we need to restart the receiver. */
1238 if (os.conn > C_TEAR_DOWN &&
1239 ns.conn <= C_TEAR_DOWN && ns.conn >= C_TIMEOUT)
1240 drbd_thread_restart_nowait(&mdev->receiver);
1241
Philipp Reisner07782862010-08-31 12:00:50 +02001242 /* Resume AL writing if we get a connection */
1243 if (os.conn < C_CONNECTED && ns.conn >= C_CONNECTED)
1244 drbd_resume_al(mdev);
1245
Philipp Reisnerb411b362009-09-25 16:07:19 -07001246 ascw = kmalloc(sizeof(*ascw), GFP_ATOMIC);
1247 if (ascw) {
1248 ascw->os = os;
1249 ascw->ns = ns;
1250 ascw->flags = flags;
1251 ascw->w.cb = w_after_state_ch;
1252 ascw->done = done;
1253 drbd_queue_work(&mdev->data.work, &ascw->w);
1254 } else {
1255 dev_warn(DEV, "Could not kmalloc an ascw\n");
1256 }
1257
1258 return rv;
1259}
1260
1261static int w_after_state_ch(struct drbd_conf *mdev, struct drbd_work *w, int unused)
1262{
1263 struct after_state_chg_work *ascw =
1264 container_of(w, struct after_state_chg_work, w);
1265 after_state_ch(mdev, ascw->os, ascw->ns, ascw->flags);
1266 if (ascw->flags & CS_WAIT_COMPLETE) {
1267 D_ASSERT(ascw->done != NULL);
1268 complete(ascw->done);
1269 }
1270 kfree(ascw);
1271
1272 return 1;
1273}
1274
1275static void abw_start_sync(struct drbd_conf *mdev, int rv)
1276{
1277 if (rv) {
1278 dev_err(DEV, "Writing the bitmap failed not starting resync.\n");
1279 _drbd_request_state(mdev, NS(conn, C_CONNECTED), CS_VERBOSE);
1280 return;
1281 }
1282
1283 switch (mdev->state.conn) {
1284 case C_STARTING_SYNC_T:
1285 _drbd_request_state(mdev, NS(conn, C_WF_SYNC_UUID), CS_VERBOSE);
1286 break;
1287 case C_STARTING_SYNC_S:
1288 drbd_start_resync(mdev, C_SYNC_SOURCE);
1289 break;
1290 }
1291}
1292
Lars Ellenberg20ceb2b2011-01-21 10:56:44 +01001293int drbd_bitmap_io_from_worker(struct drbd_conf *mdev,
1294 int (*io_fn)(struct drbd_conf *),
1295 char *why, enum bm_flag flags)
Lars Ellenberg19f843a2010-12-15 08:59:11 +01001296{
1297 int rv;
1298
1299 D_ASSERT(current == mdev->worker.task);
1300
1301 /* open coded non-blocking drbd_suspend_io(mdev); */
1302 set_bit(SUSPEND_IO, &mdev->flags);
Lars Ellenberg19f843a2010-12-15 08:59:11 +01001303
Lars Ellenberg20ceb2b2011-01-21 10:56:44 +01001304 drbd_bm_lock(mdev, why, flags);
Lars Ellenberg19f843a2010-12-15 08:59:11 +01001305 rv = io_fn(mdev);
1306 drbd_bm_unlock(mdev);
1307
1308 drbd_resume_io(mdev);
1309
1310 return rv;
1311}
1312
Philipp Reisnerb411b362009-09-25 16:07:19 -07001313/**
1314 * after_state_ch() - Perform after state change actions that may sleep
1315 * @mdev: DRBD device.
1316 * @os: old state.
1317 * @ns: new state.
1318 * @flags: Flags
1319 */
1320static void after_state_ch(struct drbd_conf *mdev, union drbd_state os,
1321 union drbd_state ns, enum chg_state_flags flags)
1322{
1323 enum drbd_fencing_p fp;
Philipp Reisner67098932010-06-24 16:24:25 +02001324 enum drbd_req_event what = nothing;
Philipp Reisnerfb22c402010-09-08 23:20:21 +02001325 union drbd_state nsm = (union drbd_state){ .i = -1 };
Philipp Reisnerb411b362009-09-25 16:07:19 -07001326
1327 if (os.conn != C_CONNECTED && ns.conn == C_CONNECTED) {
1328 clear_bit(CRASHED_PRIMARY, &mdev->flags);
1329 if (mdev->p_uuid)
1330 mdev->p_uuid[UI_FLAGS] &= ~((u64)2);
1331 }
1332
1333 fp = FP_DONT_CARE;
1334 if (get_ldev(mdev)) {
1335 fp = mdev->ldev->dc.fencing;
1336 put_ldev(mdev);
1337 }
1338
1339 /* Inform userspace about the change... */
1340 drbd_bcast_state(mdev, ns);
1341
1342 if (!(os.role == R_PRIMARY && os.disk < D_UP_TO_DATE && os.pdsk < D_UP_TO_DATE) &&
1343 (ns.role == R_PRIMARY && ns.disk < D_UP_TO_DATE && ns.pdsk < D_UP_TO_DATE))
1344 drbd_khelper(mdev, "pri-on-incon-degr");
1345
1346 /* Here we have the actions that are performed after a
1347 state change. This function might sleep */
1348
Philipp Reisnerfb22c402010-09-08 23:20:21 +02001349 nsm.i = -1;
1350 if (ns.susp_nod) {
Philipp Reisner3f986882010-12-20 14:48:20 +01001351 if (os.conn < C_CONNECTED && ns.conn >= C_CONNECTED)
1352 what = resend;
Philipp Reisner265be2d2010-05-31 10:14:17 +02001353
Philipp Reisner67098932010-06-24 16:24:25 +02001354 if (os.disk == D_ATTACHING && ns.disk > D_ATTACHING)
Philipp Reisner3f986882010-12-20 14:48:20 +01001355 what = restart_frozen_disk_io;
Philipp Reisnerfb22c402010-09-08 23:20:21 +02001356
Philipp Reisner3f986882010-12-20 14:48:20 +01001357 if (what != nothing)
1358 nsm.susp_nod = 0;
Philipp Reisner265be2d2010-05-31 10:14:17 +02001359 }
1360
Philipp Reisnerfb22c402010-09-08 23:20:21 +02001361 if (ns.susp_fen) {
Philipp Reisner43a51822010-06-11 11:26:34 +02001362 /* case1: The outdate peer handler is successful: */
1363 if (os.pdsk > D_OUTDATED && ns.pdsk <= D_OUTDATED) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07001364 tl_clear(mdev);
Philipp Reisner43a51822010-06-11 11:26:34 +02001365 if (test_bit(NEW_CUR_UUID, &mdev->flags)) {
1366 drbd_uuid_new_current(mdev);
1367 clear_bit(NEW_CUR_UUID, &mdev->flags);
Philipp Reisner43a51822010-06-11 11:26:34 +02001368 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07001369 spin_lock_irq(&mdev->req_lock);
Philipp Reisnerfb22c402010-09-08 23:20:21 +02001370 _drbd_set_state(_NS(mdev, susp_fen, 0), CS_VERBOSE, NULL);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001371 spin_unlock_irq(&mdev->req_lock);
1372 }
Philipp Reisner43a51822010-06-11 11:26:34 +02001373 /* case2: The connection was established again: */
1374 if (os.conn < C_CONNECTED && ns.conn >= C_CONNECTED) {
1375 clear_bit(NEW_CUR_UUID, &mdev->flags);
Philipp Reisner67098932010-06-24 16:24:25 +02001376 what = resend;
Philipp Reisnerfb22c402010-09-08 23:20:21 +02001377 nsm.susp_fen = 0;
Philipp Reisner43a51822010-06-11 11:26:34 +02001378 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07001379 }
Philipp Reisner67098932010-06-24 16:24:25 +02001380
1381 if (what != nothing) {
1382 spin_lock_irq(&mdev->req_lock);
1383 _tl_restart(mdev, what);
Philipp Reisnerfb22c402010-09-08 23:20:21 +02001384 nsm.i &= mdev->state.i;
1385 _drbd_set_state(mdev, nsm, CS_VERBOSE, NULL);
Philipp Reisner67098932010-06-24 16:24:25 +02001386 spin_unlock_irq(&mdev->req_lock);
1387 }
1388
Lars Ellenberg5a22db82010-12-17 21:14:23 +01001389 /* Became sync source. With protocol >= 96, we still need to send out
1390 * the sync uuid now. Need to do that before any drbd_send_state, or
1391 * the other side may go "paused sync" before receiving the sync uuids,
1392 * which is unexpected. */
1393 if ((os.conn != C_SYNC_SOURCE && os.conn != C_PAUSED_SYNC_S) &&
1394 (ns.conn == C_SYNC_SOURCE || ns.conn == C_PAUSED_SYNC_S) &&
1395 mdev->agreed_pro_version >= 96 && get_ldev(mdev)) {
1396 drbd_gen_and_send_sync_uuid(mdev);
1397 put_ldev(mdev);
1398 }
1399
Philipp Reisnerb411b362009-09-25 16:07:19 -07001400 /* Do not change the order of the if above and the two below... */
1401 if (os.pdsk == D_DISKLESS && ns.pdsk > D_DISKLESS) { /* attach on the peer */
1402 drbd_send_uuids(mdev);
1403 drbd_send_state(mdev);
1404 }
Lars Ellenberg54b956a2011-01-20 10:47:53 +01001405 /* No point in queuing send_bitmap if we don't have a connection
1406 * anymore, so check also the _current_ state, not only the new state
1407 * at the time this work was queued. */
1408 if (os.conn != C_WF_BITMAP_S && ns.conn == C_WF_BITMAP_S &&
1409 mdev->state.conn == C_WF_BITMAP_S)
1410 drbd_queue_bitmap_io(mdev, &drbd_send_bitmap, NULL,
Lars Ellenberg20ceb2b2011-01-21 10:56:44 +01001411 "send_bitmap (WFBitMapS)",
1412 BM_LOCKED_TEST_ALLOWED);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001413
1414 /* Lost contact to peer's copy of the data */
1415 if ((os.pdsk >= D_INCONSISTENT &&
1416 os.pdsk != D_UNKNOWN &&
1417 os.pdsk != D_OUTDATED)
1418 && (ns.pdsk < D_INCONSISTENT ||
1419 ns.pdsk == D_UNKNOWN ||
1420 ns.pdsk == D_OUTDATED)) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07001421 if (get_ldev(mdev)) {
1422 if ((ns.role == R_PRIMARY || ns.peer == R_PRIMARY) &&
Philipp Reisner2c8d1962010-05-25 14:32:03 +02001423 mdev->ldev->md.uuid[UI_BITMAP] == 0 && ns.disk >= D_UP_TO_DATE) {
Philipp Reisnerfb22c402010-09-08 23:20:21 +02001424 if (is_susp(mdev->state)) {
Philipp Reisner43a51822010-06-11 11:26:34 +02001425 set_bit(NEW_CUR_UUID, &mdev->flags);
1426 } else {
1427 drbd_uuid_new_current(mdev);
1428 drbd_send_uuids(mdev);
1429 }
Philipp Reisner2c8d1962010-05-25 14:32:03 +02001430 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07001431 put_ldev(mdev);
1432 }
1433 }
1434
1435 if (ns.pdsk < D_INCONSISTENT && get_ldev(mdev)) {
Philipp Reisner18a50fa2010-06-21 14:14:15 +02001436 if (ns.peer == R_PRIMARY && mdev->ldev->md.uuid[UI_BITMAP] == 0) {
Philipp Reisner2c8d1962010-05-25 14:32:03 +02001437 drbd_uuid_new_current(mdev);
Philipp Reisner18a50fa2010-06-21 14:14:15 +02001438 drbd_send_uuids(mdev);
1439 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07001440
1441 /* D_DISKLESS Peer becomes secondary */
1442 if (os.peer == R_PRIMARY && ns.peer == R_SECONDARY)
Lars Ellenberg20ceb2b2011-01-21 10:56:44 +01001443 /* We may still be Primary ourselves.
1444 * No harm done if the bitmap still changes,
1445 * redirtied pages will follow later. */
1446 drbd_bitmap_io_from_worker(mdev, &drbd_bm_write,
1447 "demote diskless peer", BM_LOCKED_SET_ALLOWED);
Lars Ellenberg19f843a2010-12-15 08:59:11 +01001448 put_ldev(mdev);
1449 }
1450
Lars Ellenberg06d33e92010-12-18 17:00:59 +01001451 /* Write out all changed bits on demote.
1452 * Though, no need to da that just yet
1453 * if there is a resync going on still */
1454 if (os.role == R_PRIMARY && ns.role == R_SECONDARY &&
1455 mdev->state.conn <= C_CONNECTED && get_ldev(mdev)) {
Lars Ellenberg20ceb2b2011-01-21 10:56:44 +01001456 /* No changes to the bitmap expected this time, so assert that,
1457 * even though no harm was done if it did change. */
1458 drbd_bitmap_io_from_worker(mdev, &drbd_bm_write,
1459 "demote", BM_LOCKED_TEST_ALLOWED);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001460 put_ldev(mdev);
1461 }
1462
1463 /* Last part of the attaching process ... */
1464 if (ns.conn >= C_CONNECTED &&
1465 os.disk == D_ATTACHING && ns.disk == D_NEGOTIATING) {
Philipp Reisnere89b5912010-03-24 17:11:33 +01001466 drbd_send_sizes(mdev, 0, 0); /* to start sync... */
Philipp Reisnerb411b362009-09-25 16:07:19 -07001467 drbd_send_uuids(mdev);
1468 drbd_send_state(mdev);
1469 }
1470
1471 /* We want to pause/continue resync, tell peer. */
1472 if (ns.conn >= C_CONNECTED &&
1473 ((os.aftr_isp != ns.aftr_isp) ||
1474 (os.user_isp != ns.user_isp)))
1475 drbd_send_state(mdev);
1476
1477 /* In case one of the isp bits got set, suspend other devices. */
1478 if ((!os.aftr_isp && !os.peer_isp && !os.user_isp) &&
1479 (ns.aftr_isp || ns.peer_isp || ns.user_isp))
1480 suspend_other_sg(mdev);
1481
1482 /* Make sure the peer gets informed about eventual state
1483 changes (ISP bits) while we were in WFReportParams. */
1484 if (os.conn == C_WF_REPORT_PARAMS && ns.conn >= C_CONNECTED)
1485 drbd_send_state(mdev);
1486
Philipp Reisner67531712010-10-27 12:21:30 +02001487 if (os.conn != C_AHEAD && ns.conn == C_AHEAD)
1488 drbd_send_state(mdev);
1489
Philipp Reisnerb411b362009-09-25 16:07:19 -07001490 /* We are in the progress to start a full sync... */
1491 if ((os.conn != C_STARTING_SYNC_T && ns.conn == C_STARTING_SYNC_T) ||
1492 (os.conn != C_STARTING_SYNC_S && ns.conn == C_STARTING_SYNC_S))
Lars Ellenberg20ceb2b2011-01-21 10:56:44 +01001493 /* no other bitmap changes expected during this phase */
1494 drbd_queue_bitmap_io(mdev,
1495 &drbd_bmio_set_n_write, &abw_start_sync,
1496 "set_n_write from StartingSync", BM_LOCKED_TEST_ALLOWED);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001497
1498 /* We are invalidating our self... */
1499 if (os.conn < C_CONNECTED && ns.conn < C_CONNECTED &&
1500 os.disk > D_INCONSISTENT && ns.disk == D_INCONSISTENT)
Lars Ellenberg20ceb2b2011-01-21 10:56:44 +01001501 /* other bitmap operation expected during this phase */
1502 drbd_queue_bitmap_io(mdev, &drbd_bmio_set_n_write, NULL,
1503 "set_n_write from invalidate", BM_LOCKED_MASK);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001504
Lars Ellenberg82f59cc2010-10-16 12:13:47 +02001505 /* first half of local IO error, failure to attach,
1506 * or administrative detach */
1507 if (os.disk != D_FAILED && ns.disk == D_FAILED) {
1508 enum drbd_io_error_p eh;
1509 int was_io_error;
1510 /* corresponding get_ldev was in __drbd_set_state, to serialize
1511 * our cleanup here with the transition to D_DISKLESS,
1512 * so it is safe to dreference ldev here. */
1513 eh = mdev->ldev->dc.on_io_error;
1514 was_io_error = test_and_clear_bit(WAS_IO_ERROR, &mdev->flags);
1515
1516 /* current state still has to be D_FAILED,
1517 * there is only one way out: to D_DISKLESS,
1518 * and that may only happen after our put_ldev below. */
1519 if (mdev->state.disk != D_FAILED)
1520 dev_err(DEV,
1521 "ASSERT FAILED: disk is %s during detach\n",
1522 drbd_disk_str(mdev->state.disk));
Philipp Reisnerb411b362009-09-25 16:07:19 -07001523
Lars Ellenberge9e6f3e2010-09-14 20:26:27 +02001524 if (drbd_send_state(mdev))
Lars Ellenberg82f59cc2010-10-16 12:13:47 +02001525 dev_warn(DEV, "Notified peer that I am detaching my disk\n");
Lars Ellenberge9e6f3e2010-09-14 20:26:27 +02001526 else
Lars Ellenberg82f59cc2010-10-16 12:13:47 +02001527 dev_err(DEV, "Sending state for detaching disk failed\n");
Lars Ellenberge9e6f3e2010-09-14 20:26:27 +02001528
1529 drbd_rs_cancel_all(mdev);
1530
Lars Ellenberg82f59cc2010-10-16 12:13:47 +02001531 /* In case we want to get something to stable storage still,
1532 * this may be the last chance.
1533 * Following put_ldev may transition to D_DISKLESS. */
1534 drbd_md_sync(mdev);
1535 put_ldev(mdev);
1536
1537 if (was_io_error && eh == EP_CALL_HELPER)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001538 drbd_khelper(mdev, "local-io-error");
1539 }
1540
Lars Ellenberg82f59cc2010-10-16 12:13:47 +02001541 /* second half of local IO error, failure to attach,
1542 * or administrative detach,
1543 * after local_cnt references have reached zero again */
1544 if (os.disk != D_DISKLESS && ns.disk == D_DISKLESS) {
1545 /* We must still be diskless,
1546 * re-attach has to be serialized with this! */
1547 if (mdev->state.disk != D_DISKLESS)
1548 dev_err(DEV,
1549 "ASSERT FAILED: disk is %s while going diskless\n",
1550 drbd_disk_str(mdev->state.disk));
Lars Ellenberge9e6f3e2010-09-14 20:26:27 +02001551
Lars Ellenberg82f59cc2010-10-16 12:13:47 +02001552 mdev->rs_total = 0;
1553 mdev->rs_failed = 0;
1554 atomic_set(&mdev->rs_pending_cnt, 0);
Lars Ellenberge9e6f3e2010-09-14 20:26:27 +02001555
Lars Ellenberge9e6f3e2010-09-14 20:26:27 +02001556 if (drbd_send_state(mdev))
Lars Ellenberg82f59cc2010-10-16 12:13:47 +02001557 dev_warn(DEV, "Notified peer that I'm now diskless.\n");
Lars Ellenberg82f59cc2010-10-16 12:13:47 +02001558 /* corresponding get_ldev in __drbd_set_state
Lucas De Marchi25985ed2011-03-30 22:57:33 -03001559 * this may finally trigger drbd_ldev_destroy. */
Lars Ellenberg82f59cc2010-10-16 12:13:47 +02001560 put_ldev(mdev);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001561 }
1562
Philipp Reisner738a84b2011-03-03 00:21:30 +01001563 /* Notify peer that I had a local IO error, and did not detached.. */
1564 if (os.disk == D_UP_TO_DATE && ns.disk == D_INCONSISTENT)
1565 drbd_send_state(mdev);
1566
Philipp Reisnerb411b362009-09-25 16:07:19 -07001567 /* Disks got bigger while they were detached */
1568 if (ns.disk > D_NEGOTIATING && ns.pdsk > D_NEGOTIATING &&
1569 test_and_clear_bit(RESYNC_AFTER_NEG, &mdev->flags)) {
1570 if (ns.conn == C_CONNECTED)
1571 resync_after_online_grow(mdev);
1572 }
1573
1574 /* A resync finished or aborted, wake paused devices... */
1575 if ((os.conn > C_CONNECTED && ns.conn <= C_CONNECTED) ||
1576 (os.peer_isp && !ns.peer_isp) ||
1577 (os.user_isp && !ns.user_isp))
1578 resume_next_sg(mdev);
1579
Lars Ellenbergaf85e8e2010-10-07 16:07:55 +02001580 /* sync target done with resync. Explicitly notify peer, even though
1581 * it should (at least for non-empty resyncs) already know itself. */
1582 if (os.disk < D_UP_TO_DATE && os.conn >= C_SYNC_SOURCE && ns.conn == C_CONNECTED)
1583 drbd_send_state(mdev);
1584
Lars Ellenberg79a30d22011-01-20 10:32:05 +01001585 /* This triggers bitmap writeout of potentially still unwritten pages
1586 * if the resync finished cleanly, or aborted because of peer disk
Lars Ellenberg20ceb2b2011-01-21 10:56:44 +01001587 * failure, or because of connection loss.
Lars Ellenberg79a30d22011-01-20 10:32:05 +01001588 * For resync aborted because of local disk failure, we cannot do
1589 * any bitmap writeout anymore.
Lars Ellenberg20ceb2b2011-01-21 10:56:44 +01001590 * No harm done if some bits change during this phase.
Lars Ellenberg79a30d22011-01-20 10:32:05 +01001591 */
Lars Ellenberg20ceb2b2011-01-21 10:56:44 +01001592 if (os.conn > C_CONNECTED && ns.conn <= C_CONNECTED && get_ldev(mdev)) {
1593 drbd_queue_bitmap_io(mdev, &drbd_bm_write, NULL,
1594 "write from resync_finished", BM_LOCKED_SET_ALLOWED);
Lars Ellenberg79a30d22011-01-20 10:32:05 +01001595 put_ldev(mdev);
1596 }
Lars Ellenberg02851e92010-12-16 14:47:39 +01001597
Philipp Reisnerb411b362009-09-25 16:07:19 -07001598 /* Upon network connection, we need to start the receiver */
1599 if (os.conn == C_STANDALONE && ns.conn == C_UNCONNECTED)
1600 drbd_thread_start(&mdev->receiver);
1601
1602 /* Terminate worker thread if we are unconfigured - it will be
1603 restarted as needed... */
1604 if (ns.disk == D_DISKLESS &&
1605 ns.conn == C_STANDALONE &&
1606 ns.role == R_SECONDARY) {
1607 if (os.aftr_isp != ns.aftr_isp)
1608 resume_next_sg(mdev);
1609 /* set in __drbd_set_state, unless CONFIG_PENDING was set */
1610 if (test_bit(DEVICE_DYING, &mdev->flags))
1611 drbd_thread_stop_nowait(&mdev->worker);
1612 }
1613
1614 drbd_md_sync(mdev);
1615}
1616
1617
1618static int drbd_thread_setup(void *arg)
1619{
1620 struct drbd_thread *thi = (struct drbd_thread *) arg;
1621 struct drbd_conf *mdev = thi->mdev;
1622 unsigned long flags;
1623 int retval;
1624
1625restart:
1626 retval = thi->function(thi);
1627
1628 spin_lock_irqsave(&thi->t_lock, flags);
1629
1630 /* if the receiver has been "Exiting", the last thing it did
1631 * was set the conn state to "StandAlone",
1632 * if now a re-connect request comes in, conn state goes C_UNCONNECTED,
1633 * and receiver thread will be "started".
1634 * drbd_thread_start needs to set "Restarting" in that case.
1635 * t_state check and assignment needs to be within the same spinlock,
1636 * so either thread_start sees Exiting, and can remap to Restarting,
1637 * or thread_start see None, and can proceed as normal.
1638 */
1639
1640 if (thi->t_state == Restarting) {
1641 dev_info(DEV, "Restarting %s\n", current->comm);
1642 thi->t_state = Running;
1643 spin_unlock_irqrestore(&thi->t_lock, flags);
1644 goto restart;
1645 }
1646
1647 thi->task = NULL;
1648 thi->t_state = None;
1649 smp_mb();
1650 complete(&thi->stop);
1651 spin_unlock_irqrestore(&thi->t_lock, flags);
1652
1653 dev_info(DEV, "Terminating %s\n", current->comm);
1654
1655 /* Release mod reference taken when thread was started */
1656 module_put(THIS_MODULE);
1657 return retval;
1658}
1659
1660static void drbd_thread_init(struct drbd_conf *mdev, struct drbd_thread *thi,
1661 int (*func) (struct drbd_thread *))
1662{
1663 spin_lock_init(&thi->t_lock);
1664 thi->task = NULL;
1665 thi->t_state = None;
1666 thi->function = func;
1667 thi->mdev = mdev;
1668}
1669
1670int drbd_thread_start(struct drbd_thread *thi)
1671{
1672 struct drbd_conf *mdev = thi->mdev;
1673 struct task_struct *nt;
1674 unsigned long flags;
1675
1676 const char *me =
1677 thi == &mdev->receiver ? "receiver" :
1678 thi == &mdev->asender ? "asender" :
1679 thi == &mdev->worker ? "worker" : "NONSENSE";
1680
1681 /* is used from state engine doing drbd_thread_stop_nowait,
1682 * while holding the req lock irqsave */
1683 spin_lock_irqsave(&thi->t_lock, flags);
1684
1685 switch (thi->t_state) {
1686 case None:
1687 dev_info(DEV, "Starting %s thread (from %s [%d])\n",
1688 me, current->comm, current->pid);
1689
1690 /* Get ref on module for thread - this is released when thread exits */
1691 if (!try_module_get(THIS_MODULE)) {
1692 dev_err(DEV, "Failed to get module reference in drbd_thread_start\n");
1693 spin_unlock_irqrestore(&thi->t_lock, flags);
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01001694 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001695 }
1696
1697 init_completion(&thi->stop);
1698 D_ASSERT(thi->task == NULL);
1699 thi->reset_cpu_mask = 1;
1700 thi->t_state = Running;
1701 spin_unlock_irqrestore(&thi->t_lock, flags);
1702 flush_signals(current); /* otherw. may get -ERESTARTNOINTR */
1703
1704 nt = kthread_create(drbd_thread_setup, (void *) thi,
1705 "drbd%d_%s", mdev_to_minor(mdev), me);
1706
1707 if (IS_ERR(nt)) {
1708 dev_err(DEV, "Couldn't start thread\n");
1709
1710 module_put(THIS_MODULE);
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01001711 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001712 }
1713 spin_lock_irqsave(&thi->t_lock, flags);
1714 thi->task = nt;
1715 thi->t_state = Running;
1716 spin_unlock_irqrestore(&thi->t_lock, flags);
1717 wake_up_process(nt);
1718 break;
1719 case Exiting:
1720 thi->t_state = Restarting;
1721 dev_info(DEV, "Restarting %s thread (from %s [%d])\n",
1722 me, current->comm, current->pid);
1723 /* fall through */
1724 case Running:
1725 case Restarting:
1726 default:
1727 spin_unlock_irqrestore(&thi->t_lock, flags);
1728 break;
1729 }
1730
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01001731 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001732}
1733
1734
1735void _drbd_thread_stop(struct drbd_thread *thi, int restart, int wait)
1736{
1737 unsigned long flags;
1738
1739 enum drbd_thread_state ns = restart ? Restarting : Exiting;
1740
1741 /* may be called from state engine, holding the req lock irqsave */
1742 spin_lock_irqsave(&thi->t_lock, flags);
1743
1744 if (thi->t_state == None) {
1745 spin_unlock_irqrestore(&thi->t_lock, flags);
1746 if (restart)
1747 drbd_thread_start(thi);
1748 return;
1749 }
1750
1751 if (thi->t_state != ns) {
1752 if (thi->task == NULL) {
1753 spin_unlock_irqrestore(&thi->t_lock, flags);
1754 return;
1755 }
1756
1757 thi->t_state = ns;
1758 smp_mb();
1759 init_completion(&thi->stop);
1760 if (thi->task != current)
1761 force_sig(DRBD_SIGKILL, thi->task);
1762
1763 }
1764
1765 spin_unlock_irqrestore(&thi->t_lock, flags);
1766
1767 if (wait)
1768 wait_for_completion(&thi->stop);
1769}
1770
1771#ifdef CONFIG_SMP
1772/**
1773 * drbd_calc_cpu_mask() - Generate CPU masks, spread over all CPUs
1774 * @mdev: DRBD device.
1775 *
1776 * Forces all threads of a device onto the same CPU. This is beneficial for
1777 * DRBD's performance. May be overwritten by user's configuration.
1778 */
1779void drbd_calc_cpu_mask(struct drbd_conf *mdev)
1780{
1781 int ord, cpu;
1782
1783 /* user override. */
1784 if (cpumask_weight(mdev->cpu_mask))
1785 return;
1786
1787 ord = mdev_to_minor(mdev) % cpumask_weight(cpu_online_mask);
1788 for_each_online_cpu(cpu) {
1789 if (ord-- == 0) {
1790 cpumask_set_cpu(cpu, mdev->cpu_mask);
1791 return;
1792 }
1793 }
1794 /* should not be reached */
1795 cpumask_setall(mdev->cpu_mask);
1796}
1797
1798/**
1799 * drbd_thread_current_set_cpu() - modifies the cpu mask of the _current_ thread
1800 * @mdev: DRBD device.
1801 *
1802 * call in the "main loop" of _all_ threads, no need for any mutex, current won't die
1803 * prematurely.
1804 */
1805void drbd_thread_current_set_cpu(struct drbd_conf *mdev)
1806{
1807 struct task_struct *p = current;
1808 struct drbd_thread *thi =
1809 p == mdev->asender.task ? &mdev->asender :
1810 p == mdev->receiver.task ? &mdev->receiver :
1811 p == mdev->worker.task ? &mdev->worker :
1812 NULL;
1813 ERR_IF(thi == NULL)
1814 return;
1815 if (!thi->reset_cpu_mask)
1816 return;
1817 thi->reset_cpu_mask = 0;
1818 set_cpus_allowed_ptr(p, mdev->cpu_mask);
1819}
1820#endif
1821
1822/* the appropriate socket mutex must be held already */
1823int _drbd_send_cmd(struct drbd_conf *mdev, struct socket *sock,
Philipp Reisner0b70a132010-08-20 13:36:10 +02001824 enum drbd_packets cmd, struct p_header80 *h,
Philipp Reisnerb411b362009-09-25 16:07:19 -07001825 size_t size, unsigned msg_flags)
1826{
1827 int sent, ok;
1828
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01001829 ERR_IF(!h) return false;
1830 ERR_IF(!size) return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001831
Andreas Gruenbacherca9bc122011-01-11 13:47:24 +01001832 h->magic = cpu_to_be32(DRBD_MAGIC);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001833 h->command = cpu_to_be16(cmd);
Philipp Reisner0b70a132010-08-20 13:36:10 +02001834 h->length = cpu_to_be16(size-sizeof(struct p_header80));
Philipp Reisnerb411b362009-09-25 16:07:19 -07001835
Philipp Reisnerb411b362009-09-25 16:07:19 -07001836 sent = drbd_send(mdev, sock, h, size, msg_flags);
1837
1838 ok = (sent == size);
Lars Ellenberg0ddc5542011-01-21 12:35:15 +01001839 if (!ok && !signal_pending(current))
1840 dev_warn(DEV, "short sent %s size=%d sent=%d\n",
Philipp Reisnerb411b362009-09-25 16:07:19 -07001841 cmdname(cmd), (int)size, sent);
1842 return ok;
1843}
1844
1845/* don't pass the socket. we may only look at it
1846 * when we hold the appropriate socket mutex.
1847 */
1848int drbd_send_cmd(struct drbd_conf *mdev, int use_data_socket,
Philipp Reisner0b70a132010-08-20 13:36:10 +02001849 enum drbd_packets cmd, struct p_header80 *h, size_t size)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001850{
1851 int ok = 0;
1852 struct socket *sock;
1853
1854 if (use_data_socket) {
1855 mutex_lock(&mdev->data.mutex);
1856 sock = mdev->data.socket;
1857 } else {
1858 mutex_lock(&mdev->meta.mutex);
1859 sock = mdev->meta.socket;
1860 }
1861
1862 /* drbd_disconnect() could have called drbd_free_sock()
1863 * while we were waiting in down()... */
1864 if (likely(sock != NULL))
1865 ok = _drbd_send_cmd(mdev, sock, cmd, h, size, 0);
1866
1867 if (use_data_socket)
1868 mutex_unlock(&mdev->data.mutex);
1869 else
1870 mutex_unlock(&mdev->meta.mutex);
1871 return ok;
1872}
1873
1874int drbd_send_cmd2(struct drbd_conf *mdev, enum drbd_packets cmd, char *data,
1875 size_t size)
1876{
Philipp Reisner0b70a132010-08-20 13:36:10 +02001877 struct p_header80 h;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001878 int ok;
1879
Andreas Gruenbacherca9bc122011-01-11 13:47:24 +01001880 h.magic = cpu_to_be32(DRBD_MAGIC);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001881 h.command = cpu_to_be16(cmd);
1882 h.length = cpu_to_be16(size);
1883
1884 if (!drbd_get_data_sock(mdev))
1885 return 0;
1886
Philipp Reisnerb411b362009-09-25 16:07:19 -07001887 ok = (sizeof(h) ==
1888 drbd_send(mdev, mdev->data.socket, &h, sizeof(h), 0));
1889 ok = ok && (size ==
1890 drbd_send(mdev, mdev->data.socket, data, size, 0));
1891
1892 drbd_put_data_sock(mdev);
1893
1894 return ok;
1895}
1896
1897int drbd_send_sync_param(struct drbd_conf *mdev, struct syncer_conf *sc)
1898{
Philipp Reisner8e26f9c2010-07-06 17:25:54 +02001899 struct p_rs_param_95 *p;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001900 struct socket *sock;
1901 int size, rv;
1902 const int apv = mdev->agreed_pro_version;
1903
1904 size = apv <= 87 ? sizeof(struct p_rs_param)
1905 : apv == 88 ? sizeof(struct p_rs_param)
1906 + strlen(mdev->sync_conf.verify_alg) + 1
Philipp Reisner8e26f9c2010-07-06 17:25:54 +02001907 : apv <= 94 ? sizeof(struct p_rs_param_89)
1908 : /* apv >= 95 */ sizeof(struct p_rs_param_95);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001909
1910 /* used from admin command context and receiver/worker context.
1911 * to avoid kmalloc, grab the socket right here,
1912 * then use the pre-allocated sbuf there */
1913 mutex_lock(&mdev->data.mutex);
1914 sock = mdev->data.socket;
1915
1916 if (likely(sock != NULL)) {
1917 enum drbd_packets cmd = apv >= 89 ? P_SYNC_PARAM89 : P_SYNC_PARAM;
1918
Philipp Reisner8e26f9c2010-07-06 17:25:54 +02001919 p = &mdev->data.sbuf.rs_param_95;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001920
1921 /* initialize verify_alg and csums_alg */
1922 memset(p->verify_alg, 0, 2 * SHARED_SECRET_MAX);
1923
1924 p->rate = cpu_to_be32(sc->rate);
Philipp Reisner8e26f9c2010-07-06 17:25:54 +02001925 p->c_plan_ahead = cpu_to_be32(sc->c_plan_ahead);
1926 p->c_delay_target = cpu_to_be32(sc->c_delay_target);
1927 p->c_fill_target = cpu_to_be32(sc->c_fill_target);
1928 p->c_max_rate = cpu_to_be32(sc->c_max_rate);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001929
1930 if (apv >= 88)
1931 strcpy(p->verify_alg, mdev->sync_conf.verify_alg);
1932 if (apv >= 89)
1933 strcpy(p->csums_alg, mdev->sync_conf.csums_alg);
1934
1935 rv = _drbd_send_cmd(mdev, sock, cmd, &p->head, size, 0);
1936 } else
1937 rv = 0; /* not ok */
1938
1939 mutex_unlock(&mdev->data.mutex);
1940
1941 return rv;
1942}
1943
1944int drbd_send_protocol(struct drbd_conf *mdev)
1945{
1946 struct p_protocol *p;
Philipp Reisnercf14c2e2010-02-02 21:03:50 +01001947 int size, cf, rv;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001948
1949 size = sizeof(struct p_protocol);
1950
1951 if (mdev->agreed_pro_version >= 87)
1952 size += strlen(mdev->net_conf->integrity_alg) + 1;
1953
1954 /* we must not recurse into our own queue,
1955 * as that is blocked during handshake */
1956 p = kmalloc(size, GFP_NOIO);
1957 if (p == NULL)
1958 return 0;
1959
1960 p->protocol = cpu_to_be32(mdev->net_conf->wire_protocol);
1961 p->after_sb_0p = cpu_to_be32(mdev->net_conf->after_sb_0p);
1962 p->after_sb_1p = cpu_to_be32(mdev->net_conf->after_sb_1p);
1963 p->after_sb_2p = cpu_to_be32(mdev->net_conf->after_sb_2p);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001964 p->two_primaries = cpu_to_be32(mdev->net_conf->two_primaries);
1965
Philipp Reisnercf14c2e2010-02-02 21:03:50 +01001966 cf = 0;
1967 if (mdev->net_conf->want_lose)
1968 cf |= CF_WANT_LOSE;
1969 if (mdev->net_conf->dry_run) {
1970 if (mdev->agreed_pro_version >= 92)
1971 cf |= CF_DRY_RUN;
1972 else {
1973 dev_err(DEV, "--dry-run is not supported by peer");
Dan Carpenter7ac314c2010-04-22 14:27:23 +02001974 kfree(p);
Philipp Reisner148efa12011-01-15 00:21:15 +01001975 return -1;
Philipp Reisnercf14c2e2010-02-02 21:03:50 +01001976 }
1977 }
1978 p->conn_flags = cpu_to_be32(cf);
1979
Philipp Reisnerb411b362009-09-25 16:07:19 -07001980 if (mdev->agreed_pro_version >= 87)
1981 strcpy(p->integrity_alg, mdev->net_conf->integrity_alg);
1982
1983 rv = drbd_send_cmd(mdev, USE_DATA_SOCKET, P_PROTOCOL,
Philipp Reisner0b70a132010-08-20 13:36:10 +02001984 (struct p_header80 *)p, size);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001985 kfree(p);
1986 return rv;
1987}
1988
1989int _drbd_send_uuids(struct drbd_conf *mdev, u64 uuid_flags)
1990{
1991 struct p_uuids p;
1992 int i;
1993
1994 if (!get_ldev_if_state(mdev, D_NEGOTIATING))
1995 return 1;
1996
1997 for (i = UI_CURRENT; i < UI_SIZE; i++)
1998 p.uuid[i] = mdev->ldev ? cpu_to_be64(mdev->ldev->md.uuid[i]) : 0;
1999
2000 mdev->comm_bm_set = drbd_bm_total_weight(mdev);
2001 p.uuid[UI_SIZE] = cpu_to_be64(mdev->comm_bm_set);
2002 uuid_flags |= mdev->net_conf->want_lose ? 1 : 0;
2003 uuid_flags |= test_bit(CRASHED_PRIMARY, &mdev->flags) ? 2 : 0;
2004 uuid_flags |= mdev->new_state_tmp.disk == D_INCONSISTENT ? 4 : 0;
2005 p.uuid[UI_FLAGS] = cpu_to_be64(uuid_flags);
2006
2007 put_ldev(mdev);
2008
2009 return drbd_send_cmd(mdev, USE_DATA_SOCKET, P_UUIDS,
Philipp Reisner0b70a132010-08-20 13:36:10 +02002010 (struct p_header80 *)&p, sizeof(p));
Philipp Reisnerb411b362009-09-25 16:07:19 -07002011}
2012
2013int drbd_send_uuids(struct drbd_conf *mdev)
2014{
2015 return _drbd_send_uuids(mdev, 0);
2016}
2017
2018int drbd_send_uuids_skip_initial_sync(struct drbd_conf *mdev)
2019{
2020 return _drbd_send_uuids(mdev, 8);
2021}
2022
Lars Ellenberg62b0da32011-01-20 13:25:21 +01002023void drbd_print_uuids(struct drbd_conf *mdev, const char *text)
2024{
2025 if (get_ldev_if_state(mdev, D_NEGOTIATING)) {
2026 u64 *uuid = mdev->ldev->md.uuid;
2027 dev_info(DEV, "%s %016llX:%016llX:%016llX:%016llX\n",
2028 text,
2029 (unsigned long long)uuid[UI_CURRENT],
2030 (unsigned long long)uuid[UI_BITMAP],
2031 (unsigned long long)uuid[UI_HISTORY_START],
2032 (unsigned long long)uuid[UI_HISTORY_END]);
2033 put_ldev(mdev);
2034 } else {
2035 dev_info(DEV, "%s effective data uuid: %016llX\n",
2036 text,
2037 (unsigned long long)mdev->ed_uuid);
2038 }
2039}
2040
Lars Ellenberg5a22db82010-12-17 21:14:23 +01002041int drbd_gen_and_send_sync_uuid(struct drbd_conf *mdev)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002042{
2043 struct p_rs_uuid p;
Lars Ellenberg5a22db82010-12-17 21:14:23 +01002044 u64 uuid;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002045
Lars Ellenberg5a22db82010-12-17 21:14:23 +01002046 D_ASSERT(mdev->state.disk == D_UP_TO_DATE);
2047
Philipp Reisner4a23f262011-01-11 17:42:17 +01002048 uuid = mdev->ldev->md.uuid[UI_BITMAP] + UUID_NEW_BM_OFFSET;
Lars Ellenberg5a22db82010-12-17 21:14:23 +01002049 drbd_uuid_set(mdev, UI_BITMAP, uuid);
Lars Ellenberg62b0da32011-01-20 13:25:21 +01002050 drbd_print_uuids(mdev, "updated sync UUID");
Lars Ellenberg5a22db82010-12-17 21:14:23 +01002051 drbd_md_sync(mdev);
2052 p.uuid = cpu_to_be64(uuid);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002053
2054 return drbd_send_cmd(mdev, USE_DATA_SOCKET, P_SYNC_UUID,
Philipp Reisner0b70a132010-08-20 13:36:10 +02002055 (struct p_header80 *)&p, sizeof(p));
Philipp Reisnerb411b362009-09-25 16:07:19 -07002056}
2057
Philipp Reisnere89b5912010-03-24 17:11:33 +01002058int drbd_send_sizes(struct drbd_conf *mdev, int trigger_reply, enum dds_flags flags)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002059{
2060 struct p_sizes p;
2061 sector_t d_size, u_size;
Philipp Reisner99432fc2011-05-20 16:39:13 +02002062 int q_order_type, max_bio_size;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002063 int ok;
2064
2065 if (get_ldev_if_state(mdev, D_NEGOTIATING)) {
2066 D_ASSERT(mdev->ldev->backing_bdev);
2067 d_size = drbd_get_max_capacity(mdev->ldev);
2068 u_size = mdev->ldev->dc.disk_size;
2069 q_order_type = drbd_queue_order_type(mdev);
Philipp Reisner99432fc2011-05-20 16:39:13 +02002070 max_bio_size = queue_max_hw_sectors(mdev->ldev->backing_bdev->bd_disk->queue) << 9;
2071 max_bio_size = min_t(int, max_bio_size, DRBD_MAX_BIO_SIZE);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002072 put_ldev(mdev);
2073 } else {
2074 d_size = 0;
2075 u_size = 0;
2076 q_order_type = QUEUE_ORDERED_NONE;
Philipp Reisner99432fc2011-05-20 16:39:13 +02002077 max_bio_size = DRBD_MAX_BIO_SIZE; /* ... multiple BIOs per peer_request */
Philipp Reisnerb411b362009-09-25 16:07:19 -07002078 }
2079
2080 p.d_size = cpu_to_be64(d_size);
2081 p.u_size = cpu_to_be64(u_size);
2082 p.c_size = cpu_to_be64(trigger_reply ? 0 : drbd_get_capacity(mdev->this_bdev));
Philipp Reisner99432fc2011-05-20 16:39:13 +02002083 p.max_bio_size = cpu_to_be32(max_bio_size);
Philipp Reisnere89b5912010-03-24 17:11:33 +01002084 p.queue_order_type = cpu_to_be16(q_order_type);
2085 p.dds_flags = cpu_to_be16(flags);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002086
2087 ok = drbd_send_cmd(mdev, USE_DATA_SOCKET, P_SIZES,
Philipp Reisner0b70a132010-08-20 13:36:10 +02002088 (struct p_header80 *)&p, sizeof(p));
Philipp Reisnerb411b362009-09-25 16:07:19 -07002089 return ok;
2090}
2091
2092/**
2093 * drbd_send_state() - Sends the drbd state to the peer
2094 * @mdev: DRBD device.
2095 */
2096int drbd_send_state(struct drbd_conf *mdev)
2097{
2098 struct socket *sock;
2099 struct p_state p;
2100 int ok = 0;
2101
2102 /* Grab state lock so we wont send state if we're in the middle
2103 * of a cluster wide state change on another thread */
2104 drbd_state_lock(mdev);
2105
2106 mutex_lock(&mdev->data.mutex);
2107
2108 p.state = cpu_to_be32(mdev->state.i); /* Within the send mutex */
2109 sock = mdev->data.socket;
2110
2111 if (likely(sock != NULL)) {
2112 ok = _drbd_send_cmd(mdev, sock, P_STATE,
Philipp Reisner0b70a132010-08-20 13:36:10 +02002113 (struct p_header80 *)&p, sizeof(p), 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002114 }
2115
2116 mutex_unlock(&mdev->data.mutex);
2117
2118 drbd_state_unlock(mdev);
2119 return ok;
2120}
2121
2122int drbd_send_state_req(struct drbd_conf *mdev,
2123 union drbd_state mask, union drbd_state val)
2124{
2125 struct p_req_state p;
2126
2127 p.mask = cpu_to_be32(mask.i);
2128 p.val = cpu_to_be32(val.i);
2129
2130 return drbd_send_cmd(mdev, USE_DATA_SOCKET, P_STATE_CHG_REQ,
Philipp Reisner0b70a132010-08-20 13:36:10 +02002131 (struct p_header80 *)&p, sizeof(p));
Philipp Reisnerb411b362009-09-25 16:07:19 -07002132}
2133
Andreas Gruenbacherbf885f82010-12-08 00:39:32 +01002134int drbd_send_sr_reply(struct drbd_conf *mdev, enum drbd_state_rv retcode)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002135{
2136 struct p_req_state_reply p;
2137
2138 p.retcode = cpu_to_be32(retcode);
2139
2140 return drbd_send_cmd(mdev, USE_META_SOCKET, P_STATE_CHG_REPLY,
Philipp Reisner0b70a132010-08-20 13:36:10 +02002141 (struct p_header80 *)&p, sizeof(p));
Philipp Reisnerb411b362009-09-25 16:07:19 -07002142}
2143
2144int fill_bitmap_rle_bits(struct drbd_conf *mdev,
2145 struct p_compressed_bm *p,
2146 struct bm_xfer_ctx *c)
2147{
2148 struct bitstream bs;
2149 unsigned long plain_bits;
2150 unsigned long tmp;
2151 unsigned long rl;
2152 unsigned len;
2153 unsigned toggle;
2154 int bits;
2155
2156 /* may we use this feature? */
2157 if ((mdev->sync_conf.use_rle == 0) ||
2158 (mdev->agreed_pro_version < 90))
2159 return 0;
2160
2161 if (c->bit_offset >= c->bm_bits)
2162 return 0; /* nothing to do. */
2163
2164 /* use at most thus many bytes */
2165 bitstream_init(&bs, p->code, BM_PACKET_VLI_BYTES_MAX, 0);
2166 memset(p->code, 0, BM_PACKET_VLI_BYTES_MAX);
2167 /* plain bits covered in this code string */
2168 plain_bits = 0;
2169
2170 /* p->encoding & 0x80 stores whether the first run length is set.
2171 * bit offset is implicit.
2172 * start with toggle == 2 to be able to tell the first iteration */
2173 toggle = 2;
2174
2175 /* see how much plain bits we can stuff into one packet
2176 * using RLE and VLI. */
2177 do {
2178 tmp = (toggle == 0) ? _drbd_bm_find_next_zero(mdev, c->bit_offset)
2179 : _drbd_bm_find_next(mdev, c->bit_offset);
2180 if (tmp == -1UL)
2181 tmp = c->bm_bits;
2182 rl = tmp - c->bit_offset;
2183
2184 if (toggle == 2) { /* first iteration */
2185 if (rl == 0) {
2186 /* the first checked bit was set,
2187 * store start value, */
2188 DCBP_set_start(p, 1);
2189 /* but skip encoding of zero run length */
2190 toggle = !toggle;
2191 continue;
2192 }
2193 DCBP_set_start(p, 0);
2194 }
2195
2196 /* paranoia: catch zero runlength.
2197 * can only happen if bitmap is modified while we scan it. */
2198 if (rl == 0) {
2199 dev_err(DEV, "unexpected zero runlength while encoding bitmap "
2200 "t:%u bo:%lu\n", toggle, c->bit_offset);
2201 return -1;
2202 }
2203
2204 bits = vli_encode_bits(&bs, rl);
2205 if (bits == -ENOBUFS) /* buffer full */
2206 break;
2207 if (bits <= 0) {
2208 dev_err(DEV, "error while encoding bitmap: %d\n", bits);
2209 return 0;
2210 }
2211
2212 toggle = !toggle;
2213 plain_bits += rl;
2214 c->bit_offset = tmp;
2215 } while (c->bit_offset < c->bm_bits);
2216
2217 len = bs.cur.b - p->code + !!bs.cur.bit;
2218
2219 if (plain_bits < (len << 3)) {
2220 /* incompressible with this method.
2221 * we need to rewind both word and bit position. */
2222 c->bit_offset -= plain_bits;
2223 bm_xfer_ctx_bit_to_word_offset(c);
2224 c->bit_offset = c->word_offset * BITS_PER_LONG;
2225 return 0;
2226 }
2227
2228 /* RLE + VLI was able to compress it just fine.
2229 * update c->word_offset. */
2230 bm_xfer_ctx_bit_to_word_offset(c);
2231
2232 /* store pad_bits */
2233 DCBP_set_pad_bits(p, (8 - bs.cur.bit) & 0x7);
2234
2235 return len;
2236}
2237
Andreas Gruenbacherf70af112010-12-11 18:51:50 +01002238/**
2239 * send_bitmap_rle_or_plain
2240 *
2241 * Return 0 when done, 1 when another iteration is needed, and a negative error
2242 * code upon failure.
2243 */
2244static int
Philipp Reisnerb411b362009-09-25 16:07:19 -07002245send_bitmap_rle_or_plain(struct drbd_conf *mdev,
Andreas Gruenbacherf70af112010-12-11 18:51:50 +01002246 struct p_header80 *h, struct bm_xfer_ctx *c)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002247{
2248 struct p_compressed_bm *p = (void*)h;
2249 unsigned long num_words;
2250 int len;
2251 int ok;
2252
2253 len = fill_bitmap_rle_bits(mdev, p, c);
2254
2255 if (len < 0)
Andreas Gruenbacherf70af112010-12-11 18:51:50 +01002256 return -EIO;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002257
2258 if (len) {
2259 DCBP_set_code(p, RLE_VLI_Bits);
2260 ok = _drbd_send_cmd(mdev, mdev->data.socket, P_COMPRESSED_BITMAP, h,
2261 sizeof(*p) + len, 0);
2262
2263 c->packets[0]++;
2264 c->bytes[0] += sizeof(*p) + len;
2265
2266 if (c->bit_offset >= c->bm_bits)
2267 len = 0; /* DONE */
2268 } else {
2269 /* was not compressible.
2270 * send a buffer full of plain text bits instead. */
2271 num_words = min_t(size_t, BM_PACKET_WORDS, c->bm_words - c->word_offset);
2272 len = num_words * sizeof(long);
2273 if (len)
2274 drbd_bm_get_lel(mdev, c->word_offset, num_words, (unsigned long*)h->payload);
2275 ok = _drbd_send_cmd(mdev, mdev->data.socket, P_BITMAP,
Philipp Reisner0b70a132010-08-20 13:36:10 +02002276 h, sizeof(struct p_header80) + len, 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002277 c->word_offset += num_words;
2278 c->bit_offset = c->word_offset * BITS_PER_LONG;
2279
2280 c->packets[1]++;
Philipp Reisner0b70a132010-08-20 13:36:10 +02002281 c->bytes[1] += sizeof(struct p_header80) + len;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002282
2283 if (c->bit_offset > c->bm_bits)
2284 c->bit_offset = c->bm_bits;
2285 }
Andreas Gruenbacherf70af112010-12-11 18:51:50 +01002286 if (ok) {
2287 if (len == 0) {
2288 INFO_bm_xfer_stats(mdev, "send", c);
2289 return 0;
2290 } else
2291 return 1;
2292 }
2293 return -EIO;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002294}
2295
2296/* See the comment at receive_bitmap() */
2297int _drbd_send_bitmap(struct drbd_conf *mdev)
2298{
2299 struct bm_xfer_ctx c;
Philipp Reisner0b70a132010-08-20 13:36:10 +02002300 struct p_header80 *p;
Andreas Gruenbacherf70af112010-12-11 18:51:50 +01002301 int err;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002302
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01002303 ERR_IF(!mdev->bitmap) return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002304
2305 /* maybe we should use some per thread scratch page,
2306 * and allocate that during initial device creation? */
Philipp Reisner0b70a132010-08-20 13:36:10 +02002307 p = (struct p_header80 *) __get_free_page(GFP_NOIO);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002308 if (!p) {
2309 dev_err(DEV, "failed to allocate one page buffer in %s\n", __func__);
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01002310 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002311 }
2312
2313 if (get_ldev(mdev)) {
2314 if (drbd_md_test_flag(mdev->ldev, MDF_FULL_SYNC)) {
2315 dev_info(DEV, "Writing the whole bitmap, MDF_FullSync was set.\n");
2316 drbd_bm_set_all(mdev);
2317 if (drbd_bm_write(mdev)) {
2318 /* write_bm did fail! Leave full sync flag set in Meta P_DATA
2319 * but otherwise process as per normal - need to tell other
2320 * side that a full resync is required! */
2321 dev_err(DEV, "Failed to write bitmap to disk!\n");
2322 } else {
2323 drbd_md_clear_flag(mdev, MDF_FULL_SYNC);
2324 drbd_md_sync(mdev);
2325 }
2326 }
2327 put_ldev(mdev);
2328 }
2329
2330 c = (struct bm_xfer_ctx) {
2331 .bm_bits = drbd_bm_bits(mdev),
2332 .bm_words = drbd_bm_words(mdev),
2333 };
2334
2335 do {
Andreas Gruenbacherf70af112010-12-11 18:51:50 +01002336 err = send_bitmap_rle_or_plain(mdev, p, &c);
2337 } while (err > 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002338
2339 free_page((unsigned long) p);
Andreas Gruenbacherf70af112010-12-11 18:51:50 +01002340 return err == 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002341}
2342
2343int drbd_send_bitmap(struct drbd_conf *mdev)
2344{
2345 int err;
2346
2347 if (!drbd_get_data_sock(mdev))
2348 return -1;
2349 err = !_drbd_send_bitmap(mdev);
2350 drbd_put_data_sock(mdev);
2351 return err;
2352}
2353
2354int drbd_send_b_ack(struct drbd_conf *mdev, u32 barrier_nr, u32 set_size)
2355{
2356 int ok;
2357 struct p_barrier_ack p;
2358
2359 p.barrier = barrier_nr;
2360 p.set_size = cpu_to_be32(set_size);
2361
2362 if (mdev->state.conn < C_CONNECTED)
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01002363 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002364 ok = drbd_send_cmd(mdev, USE_META_SOCKET, P_BARRIER_ACK,
Philipp Reisner0b70a132010-08-20 13:36:10 +02002365 (struct p_header80 *)&p, sizeof(p));
Philipp Reisnerb411b362009-09-25 16:07:19 -07002366 return ok;
2367}
2368
2369/**
2370 * _drbd_send_ack() - Sends an ack packet
2371 * @mdev: DRBD device.
2372 * @cmd: Packet command code.
2373 * @sector: sector, needs to be in big endian byte order
2374 * @blksize: size in byte, needs to be in big endian byte order
2375 * @block_id: Id, big endian byte order
2376 */
2377static int _drbd_send_ack(struct drbd_conf *mdev, enum drbd_packets cmd,
2378 u64 sector,
2379 u32 blksize,
2380 u64 block_id)
2381{
2382 int ok;
2383 struct p_block_ack p;
2384
2385 p.sector = sector;
2386 p.block_id = block_id;
2387 p.blksize = blksize;
2388 p.seq_num = cpu_to_be32(atomic_add_return(1, &mdev->packet_seq));
2389
2390 if (!mdev->meta.socket || mdev->state.conn < C_CONNECTED)
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01002391 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002392 ok = drbd_send_cmd(mdev, USE_META_SOCKET, cmd,
Philipp Reisner0b70a132010-08-20 13:36:10 +02002393 (struct p_header80 *)&p, sizeof(p));
Philipp Reisnerb411b362009-09-25 16:07:19 -07002394 return ok;
2395}
2396
Lars Ellenberg2b2bf212010-10-06 11:46:55 +02002397/* dp->sector and dp->block_id already/still in network byte order,
2398 * data_size is payload size according to dp->head,
2399 * and may need to be corrected for digest size. */
Philipp Reisnerb411b362009-09-25 16:07:19 -07002400int drbd_send_ack_dp(struct drbd_conf *mdev, enum drbd_packets cmd,
Lars Ellenberg2b2bf212010-10-06 11:46:55 +02002401 struct p_data *dp, int data_size)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002402{
Lars Ellenberg2b2bf212010-10-06 11:46:55 +02002403 data_size -= (mdev->agreed_pro_version >= 87 && mdev->integrity_r_tfm) ?
2404 crypto_hash_digestsize(mdev->integrity_r_tfm) : 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002405 return _drbd_send_ack(mdev, cmd, dp->sector, cpu_to_be32(data_size),
2406 dp->block_id);
2407}
2408
2409int drbd_send_ack_rp(struct drbd_conf *mdev, enum drbd_packets cmd,
2410 struct p_block_req *rp)
2411{
2412 return _drbd_send_ack(mdev, cmd, rp->sector, rp->blksize, rp->block_id);
2413}
2414
2415/**
2416 * drbd_send_ack() - Sends an ack packet
2417 * @mdev: DRBD device.
2418 * @cmd: Packet command code.
2419 * @e: Epoch entry.
2420 */
2421int drbd_send_ack(struct drbd_conf *mdev,
2422 enum drbd_packets cmd, struct drbd_epoch_entry *e)
2423{
2424 return _drbd_send_ack(mdev, cmd,
Andreas Gruenbacher010f6e62011-01-14 20:59:35 +01002425 cpu_to_be64(e->i.sector),
2426 cpu_to_be32(e->i.size),
Philipp Reisnerb411b362009-09-25 16:07:19 -07002427 e->block_id);
2428}
2429
2430/* This function misuses the block_id field to signal if the blocks
2431 * are is sync or not. */
2432int drbd_send_ack_ex(struct drbd_conf *mdev, enum drbd_packets cmd,
2433 sector_t sector, int blksize, u64 block_id)
2434{
2435 return _drbd_send_ack(mdev, cmd,
2436 cpu_to_be64(sector),
2437 cpu_to_be32(blksize),
2438 cpu_to_be64(block_id));
2439}
2440
2441int drbd_send_drequest(struct drbd_conf *mdev, int cmd,
2442 sector_t sector, int size, u64 block_id)
2443{
2444 int ok;
2445 struct p_block_req p;
2446
2447 p.sector = cpu_to_be64(sector);
2448 p.block_id = block_id;
2449 p.blksize = cpu_to_be32(size);
2450
2451 ok = drbd_send_cmd(mdev, USE_DATA_SOCKET, cmd,
Philipp Reisner0b70a132010-08-20 13:36:10 +02002452 (struct p_header80 *)&p, sizeof(p));
Philipp Reisnerb411b362009-09-25 16:07:19 -07002453 return ok;
2454}
2455
2456int drbd_send_drequest_csum(struct drbd_conf *mdev,
2457 sector_t sector, int size,
2458 void *digest, int digest_size,
2459 enum drbd_packets cmd)
2460{
2461 int ok;
2462 struct p_block_req p;
2463
2464 p.sector = cpu_to_be64(sector);
Andreas Gruenbacher9a8e7752011-01-11 14:04:09 +01002465 p.block_id = ID_SYNCER /* unused */;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002466 p.blksize = cpu_to_be32(size);
2467
Andreas Gruenbacherca9bc122011-01-11 13:47:24 +01002468 p.head.magic = cpu_to_be32(DRBD_MAGIC);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002469 p.head.command = cpu_to_be16(cmd);
Philipp Reisner0b70a132010-08-20 13:36:10 +02002470 p.head.length = cpu_to_be16(sizeof(p) - sizeof(struct p_header80) + digest_size);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002471
2472 mutex_lock(&mdev->data.mutex);
2473
2474 ok = (sizeof(p) == drbd_send(mdev, mdev->data.socket, &p, sizeof(p), 0));
2475 ok = ok && (digest_size == drbd_send(mdev, mdev->data.socket, digest, digest_size, 0));
2476
2477 mutex_unlock(&mdev->data.mutex);
2478
2479 return ok;
2480}
2481
2482int drbd_send_ov_request(struct drbd_conf *mdev, sector_t sector, int size)
2483{
2484 int ok;
2485 struct p_block_req p;
2486
2487 p.sector = cpu_to_be64(sector);
Andreas Gruenbacher9a8e7752011-01-11 14:04:09 +01002488 p.block_id = ID_SYNCER /* unused */;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002489 p.blksize = cpu_to_be32(size);
2490
2491 ok = drbd_send_cmd(mdev, USE_DATA_SOCKET, P_OV_REQUEST,
Philipp Reisner0b70a132010-08-20 13:36:10 +02002492 (struct p_header80 *)&p, sizeof(p));
Philipp Reisnerb411b362009-09-25 16:07:19 -07002493 return ok;
2494}
2495
2496/* called on sndtimeo
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01002497 * returns false if we should retry,
2498 * true if we think connection is dead
Philipp Reisnerb411b362009-09-25 16:07:19 -07002499 */
2500static int we_should_drop_the_connection(struct drbd_conf *mdev, struct socket *sock)
2501{
2502 int drop_it;
2503 /* long elapsed = (long)(jiffies - mdev->last_received); */
2504
2505 drop_it = mdev->meta.socket == sock
2506 || !mdev->asender.task
2507 || get_t_state(&mdev->asender) != Running
2508 || mdev->state.conn < C_CONNECTED;
2509
2510 if (drop_it)
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01002511 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002512
2513 drop_it = !--mdev->ko_count;
2514 if (!drop_it) {
2515 dev_err(DEV, "[%s/%d] sock_sendmsg time expired, ko = %u\n",
2516 current->comm, current->pid, mdev->ko_count);
2517 request_ping(mdev);
2518 }
2519
2520 return drop_it; /* && (mdev->state == R_PRIMARY) */;
2521}
2522
2523/* The idea of sendpage seems to be to put some kind of reference
2524 * to the page into the skb, and to hand it over to the NIC. In
2525 * this process get_page() gets called.
2526 *
2527 * As soon as the page was really sent over the network put_page()
2528 * gets called by some part of the network layer. [ NIC driver? ]
2529 *
2530 * [ get_page() / put_page() increment/decrement the count. If count
2531 * reaches 0 the page will be freed. ]
2532 *
2533 * This works nicely with pages from FSs.
2534 * But this means that in protocol A we might signal IO completion too early!
2535 *
2536 * In order not to corrupt data during a resync we must make sure
2537 * that we do not reuse our own buffer pages (EEs) to early, therefore
2538 * we have the net_ee list.
2539 *
2540 * XFS seems to have problems, still, it submits pages with page_count == 0!
2541 * As a workaround, we disable sendpage on pages
2542 * with page_count == 0 or PageSlab.
2543 */
2544static int _drbd_no_send_page(struct drbd_conf *mdev, struct page *page,
Lars Ellenbergba11ad92010-05-25 16:26:16 +02002545 int offset, size_t size, unsigned msg_flags)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002546{
Lars Ellenbergba11ad92010-05-25 16:26:16 +02002547 int sent = drbd_send(mdev, mdev->data.socket, kmap(page) + offset, size, msg_flags);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002548 kunmap(page);
2549 if (sent == size)
2550 mdev->send_cnt += size>>9;
2551 return sent == size;
2552}
2553
2554static int _drbd_send_page(struct drbd_conf *mdev, struct page *page,
Lars Ellenbergba11ad92010-05-25 16:26:16 +02002555 int offset, size_t size, unsigned msg_flags)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002556{
2557 mm_segment_t oldfs = get_fs();
2558 int sent, ok;
2559 int len = size;
2560
2561 /* e.g. XFS meta- & log-data is in slab pages, which have a
2562 * page_count of 0 and/or have PageSlab() set.
2563 * we cannot use send_page for those, as that does get_page();
2564 * put_page(); and would cause either a VM_BUG directly, or
2565 * __page_cache_release a page that would actually still be referenced
2566 * by someone, leading to some obscure delayed Oops somewhere else. */
2567 if (disable_sendpage || (page_count(page) < 1) || PageSlab(page))
Lars Ellenbergba11ad92010-05-25 16:26:16 +02002568 return _drbd_no_send_page(mdev, page, offset, size, msg_flags);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002569
Lars Ellenbergba11ad92010-05-25 16:26:16 +02002570 msg_flags |= MSG_NOSIGNAL;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002571 drbd_update_congested(mdev);
2572 set_fs(KERNEL_DS);
2573 do {
2574 sent = mdev->data.socket->ops->sendpage(mdev->data.socket, page,
2575 offset, len,
Lars Ellenbergba11ad92010-05-25 16:26:16 +02002576 msg_flags);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002577 if (sent == -EAGAIN) {
2578 if (we_should_drop_the_connection(mdev,
2579 mdev->data.socket))
2580 break;
2581 else
2582 continue;
2583 }
2584 if (sent <= 0) {
2585 dev_warn(DEV, "%s: size=%d len=%d sent=%d\n",
2586 __func__, (int)size, len, sent);
2587 break;
2588 }
2589 len -= sent;
2590 offset += sent;
2591 } while (len > 0 /* THINK && mdev->cstate >= C_CONNECTED*/);
2592 set_fs(oldfs);
2593 clear_bit(NET_CONGESTED, &mdev->flags);
2594
2595 ok = (len == 0);
2596 if (likely(ok))
2597 mdev->send_cnt += size>>9;
2598 return ok;
2599}
2600
2601static int _drbd_send_bio(struct drbd_conf *mdev, struct bio *bio)
2602{
2603 struct bio_vec *bvec;
2604 int i;
Lars Ellenbergba11ad92010-05-25 16:26:16 +02002605 /* hint all but last page with MSG_MORE */
Philipp Reisnerb411b362009-09-25 16:07:19 -07002606 __bio_for_each_segment(bvec, bio, i, 0) {
2607 if (!_drbd_no_send_page(mdev, bvec->bv_page,
Lars Ellenbergba11ad92010-05-25 16:26:16 +02002608 bvec->bv_offset, bvec->bv_len,
2609 i == bio->bi_vcnt -1 ? 0 : MSG_MORE))
Philipp Reisnerb411b362009-09-25 16:07:19 -07002610 return 0;
2611 }
2612 return 1;
2613}
2614
2615static int _drbd_send_zc_bio(struct drbd_conf *mdev, struct bio *bio)
2616{
2617 struct bio_vec *bvec;
2618 int i;
Lars Ellenbergba11ad92010-05-25 16:26:16 +02002619 /* hint all but last page with MSG_MORE */
Philipp Reisnerb411b362009-09-25 16:07:19 -07002620 __bio_for_each_segment(bvec, bio, i, 0) {
2621 if (!_drbd_send_page(mdev, bvec->bv_page,
Lars Ellenbergba11ad92010-05-25 16:26:16 +02002622 bvec->bv_offset, bvec->bv_len,
2623 i == bio->bi_vcnt -1 ? 0 : MSG_MORE))
Philipp Reisnerb411b362009-09-25 16:07:19 -07002624 return 0;
2625 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07002626 return 1;
2627}
2628
Lars Ellenberg45bb9122010-05-14 17:10:48 +02002629static int _drbd_send_zc_ee(struct drbd_conf *mdev, struct drbd_epoch_entry *e)
2630{
2631 struct page *page = e->pages;
Andreas Gruenbacher010f6e62011-01-14 20:59:35 +01002632 unsigned len = e->i.size;
Lars Ellenbergba11ad92010-05-25 16:26:16 +02002633 /* hint all but last page with MSG_MORE */
Lars Ellenberg45bb9122010-05-14 17:10:48 +02002634 page_chain_for_each(page) {
2635 unsigned l = min_t(unsigned, len, PAGE_SIZE);
Lars Ellenbergba11ad92010-05-25 16:26:16 +02002636 if (!_drbd_send_page(mdev, page, 0, l,
2637 page_chain_next(page) ? MSG_MORE : 0))
Lars Ellenberg45bb9122010-05-14 17:10:48 +02002638 return 0;
2639 len -= l;
2640 }
2641 return 1;
2642}
2643
Philipp Reisner76d2e7e2010-08-25 11:58:05 +02002644static u32 bio_flags_to_wire(struct drbd_conf *mdev, unsigned long bi_rw)
2645{
2646 if (mdev->agreed_pro_version >= 95)
2647 return (bi_rw & REQ_SYNC ? DP_RW_SYNC : 0) |
Philipp Reisner76d2e7e2010-08-25 11:58:05 +02002648 (bi_rw & REQ_FUA ? DP_FUA : 0) |
2649 (bi_rw & REQ_FLUSH ? DP_FLUSH : 0) |
2650 (bi_rw & REQ_DISCARD ? DP_DISCARD : 0);
2651 else
Jens Axboe721a9602011-03-09 11:56:30 +01002652 return bi_rw & REQ_SYNC ? DP_RW_SYNC : 0;
Philipp Reisner76d2e7e2010-08-25 11:58:05 +02002653}
2654
Philipp Reisnerb411b362009-09-25 16:07:19 -07002655/* Used to send write requests
2656 * R_PRIMARY -> Peer (P_DATA)
2657 */
2658int drbd_send_dblock(struct drbd_conf *mdev, struct drbd_request *req)
2659{
2660 int ok = 1;
2661 struct p_data p;
2662 unsigned int dp_flags = 0;
2663 void *dgb;
2664 int dgs;
2665
2666 if (!drbd_get_data_sock(mdev))
2667 return 0;
2668
2669 dgs = (mdev->agreed_pro_version >= 87 && mdev->integrity_w_tfm) ?
2670 crypto_hash_digestsize(mdev->integrity_w_tfm) : 0;
2671
Andreas Gruenbacherace652a2011-01-03 17:09:58 +01002672 if (req->i.size <= DRBD_MAX_SIZE_H80_PACKET) {
Andreas Gruenbacherca9bc122011-01-11 13:47:24 +01002673 p.head.h80.magic = cpu_to_be32(DRBD_MAGIC);
Philipp Reisner0b70a132010-08-20 13:36:10 +02002674 p.head.h80.command = cpu_to_be16(P_DATA);
2675 p.head.h80.length =
Andreas Gruenbacherace652a2011-01-03 17:09:58 +01002676 cpu_to_be16(sizeof(p) - sizeof(union p_header) + dgs + req->i.size);
Philipp Reisner0b70a132010-08-20 13:36:10 +02002677 } else {
Andreas Gruenbacherca9bc122011-01-11 13:47:24 +01002678 p.head.h95.magic = cpu_to_be16(DRBD_MAGIC_BIG);
Philipp Reisner0b70a132010-08-20 13:36:10 +02002679 p.head.h95.command = cpu_to_be16(P_DATA);
2680 p.head.h95.length =
Andreas Gruenbacherace652a2011-01-03 17:09:58 +01002681 cpu_to_be32(sizeof(p) - sizeof(union p_header) + dgs + req->i.size);
Philipp Reisner0b70a132010-08-20 13:36:10 +02002682 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07002683
Andreas Gruenbacherace652a2011-01-03 17:09:58 +01002684 p.sector = cpu_to_be64(req->i.sector);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002685 p.block_id = (unsigned long)req;
2686 p.seq_num = cpu_to_be32(req->seq_num =
2687 atomic_add_return(1, &mdev->packet_seq));
Philipp Reisnerb411b362009-09-25 16:07:19 -07002688
Philipp Reisner76d2e7e2010-08-25 11:58:05 +02002689 dp_flags = bio_flags_to_wire(mdev, req->master_bio->bi_rw);
2690
Philipp Reisnerb411b362009-09-25 16:07:19 -07002691 if (mdev->state.conn >= C_SYNC_SOURCE &&
2692 mdev->state.conn <= C_PAUSED_SYNC_T)
2693 dp_flags |= DP_MAY_SET_IN_SYNC;
2694
2695 p.dp_flags = cpu_to_be32(dp_flags);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002696 set_bit(UNPLUG_REMOTE, &mdev->flags);
2697 ok = (sizeof(p) ==
Lars Ellenbergba11ad92010-05-25 16:26:16 +02002698 drbd_send(mdev, mdev->data.socket, &p, sizeof(p), dgs ? MSG_MORE : 0));
Philipp Reisnerb411b362009-09-25 16:07:19 -07002699 if (ok && dgs) {
2700 dgb = mdev->int_dig_out;
Lars Ellenberg45bb9122010-05-14 17:10:48 +02002701 drbd_csum_bio(mdev, mdev->integrity_w_tfm, req->master_bio, dgb);
Andreas Gruenbachercab2f742010-12-09 16:08:46 +01002702 ok = dgs == drbd_send(mdev, mdev->data.socket, dgb, dgs, 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002703 }
2704 if (ok) {
Lars Ellenberg470be442010-11-10 10:36:52 +01002705 /* For protocol A, we have to memcpy the payload into
2706 * socket buffers, as we may complete right away
2707 * as soon as we handed it over to tcp, at which point the data
2708 * pages may become invalid.
2709 *
2710 * For data-integrity enabled, we copy it as well, so we can be
2711 * sure that even if the bio pages may still be modified, it
2712 * won't change the data on the wire, thus if the digest checks
2713 * out ok after sending on this side, but does not fit on the
2714 * receiving side, we sure have detected corruption elsewhere.
2715 */
2716 if (mdev->net_conf->wire_protocol == DRBD_PROT_A || dgs)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002717 ok = _drbd_send_bio(mdev, req->master_bio);
2718 else
2719 ok = _drbd_send_zc_bio(mdev, req->master_bio);
Lars Ellenberg470be442010-11-10 10:36:52 +01002720
2721 /* double check digest, sometimes buffers have been modified in flight. */
2722 if (dgs > 0 && dgs <= 64) {
Bart Van Assche24c48302011-05-21 18:32:29 +02002723 /* 64 byte, 512 bit, is the largest digest size
Lars Ellenberg470be442010-11-10 10:36:52 +01002724 * currently supported in kernel crypto. */
2725 unsigned char digest[64];
2726 drbd_csum_bio(mdev, mdev->integrity_w_tfm, req->master_bio, digest);
2727 if (memcmp(mdev->int_dig_out, digest, dgs)) {
2728 dev_warn(DEV,
2729 "Digest mismatch, buffer modified by upper layers during write: %llus +%u\n",
Andreas Gruenbacherace652a2011-01-03 17:09:58 +01002730 (unsigned long long)req->i.sector, req->i.size);
Lars Ellenberg470be442010-11-10 10:36:52 +01002731 }
2732 } /* else if (dgs > 64) {
2733 ... Be noisy about digest too large ...
2734 } */
Philipp Reisnerb411b362009-09-25 16:07:19 -07002735 }
2736
2737 drbd_put_data_sock(mdev);
Philipp Reisnerbd26bfc2010-05-04 12:33:58 +02002738
Philipp Reisnerb411b362009-09-25 16:07:19 -07002739 return ok;
2740}
2741
2742/* answer packet, used to send data back for read requests:
2743 * Peer -> (diskless) R_PRIMARY (P_DATA_REPLY)
2744 * C_SYNC_SOURCE -> C_SYNC_TARGET (P_RS_DATA_REPLY)
2745 */
2746int drbd_send_block(struct drbd_conf *mdev, enum drbd_packets cmd,
2747 struct drbd_epoch_entry *e)
2748{
2749 int ok;
2750 struct p_data p;
2751 void *dgb;
2752 int dgs;
2753
2754 dgs = (mdev->agreed_pro_version >= 87 && mdev->integrity_w_tfm) ?
2755 crypto_hash_digestsize(mdev->integrity_w_tfm) : 0;
2756
Andreas Gruenbacher010f6e62011-01-14 20:59:35 +01002757 if (e->i.size <= DRBD_MAX_SIZE_H80_PACKET) {
Andreas Gruenbacherca9bc122011-01-11 13:47:24 +01002758 p.head.h80.magic = cpu_to_be32(DRBD_MAGIC);
Philipp Reisner0b70a132010-08-20 13:36:10 +02002759 p.head.h80.command = cpu_to_be16(cmd);
2760 p.head.h80.length =
Andreas Gruenbacher010f6e62011-01-14 20:59:35 +01002761 cpu_to_be16(sizeof(p) - sizeof(struct p_header80) + dgs + e->i.size);
Philipp Reisner0b70a132010-08-20 13:36:10 +02002762 } else {
Andreas Gruenbacherca9bc122011-01-11 13:47:24 +01002763 p.head.h95.magic = cpu_to_be16(DRBD_MAGIC_BIG);
Philipp Reisner0b70a132010-08-20 13:36:10 +02002764 p.head.h95.command = cpu_to_be16(cmd);
2765 p.head.h95.length =
Andreas Gruenbacher010f6e62011-01-14 20:59:35 +01002766 cpu_to_be32(sizeof(p) - sizeof(struct p_header80) + dgs + e->i.size);
Philipp Reisner0b70a132010-08-20 13:36:10 +02002767 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07002768
Andreas Gruenbacher010f6e62011-01-14 20:59:35 +01002769 p.sector = cpu_to_be64(e->i.sector);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002770 p.block_id = e->block_id;
2771 /* p.seq_num = 0; No sequence numbers here.. */
2772
2773 /* Only called by our kernel thread.
2774 * This one may be interrupted by DRBD_SIG and/or DRBD_SIGKILL
2775 * in response to admin command or module unload.
2776 */
2777 if (!drbd_get_data_sock(mdev))
2778 return 0;
2779
Philipp Reisner0b70a132010-08-20 13:36:10 +02002780 ok = sizeof(p) == drbd_send(mdev, mdev->data.socket, &p, sizeof(p), dgs ? MSG_MORE : 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002781 if (ok && dgs) {
2782 dgb = mdev->int_dig_out;
Lars Ellenberg45bb9122010-05-14 17:10:48 +02002783 drbd_csum_ee(mdev, mdev->integrity_w_tfm, e, dgb);
Andreas Gruenbachercab2f742010-12-09 16:08:46 +01002784 ok = dgs == drbd_send(mdev, mdev->data.socket, dgb, dgs, 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002785 }
2786 if (ok)
Lars Ellenberg45bb9122010-05-14 17:10:48 +02002787 ok = _drbd_send_zc_ee(mdev, e);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002788
2789 drbd_put_data_sock(mdev);
Philipp Reisnerbd26bfc2010-05-04 12:33:58 +02002790
Philipp Reisnerb411b362009-09-25 16:07:19 -07002791 return ok;
2792}
2793
Philipp Reisner73a01a12010-10-27 14:33:00 +02002794int drbd_send_oos(struct drbd_conf *mdev, struct drbd_request *req)
2795{
2796 struct p_block_desc p;
2797
Andreas Gruenbacherace652a2011-01-03 17:09:58 +01002798 p.sector = cpu_to_be64(req->i.sector);
2799 p.blksize = cpu_to_be32(req->i.size);
Philipp Reisner73a01a12010-10-27 14:33:00 +02002800
2801 return drbd_send_cmd(mdev, USE_DATA_SOCKET, P_OUT_OF_SYNC, &p.head, sizeof(p));
2802}
2803
Philipp Reisnerb411b362009-09-25 16:07:19 -07002804/*
2805 drbd_send distinguishes two cases:
2806
2807 Packets sent via the data socket "sock"
2808 and packets sent via the meta data socket "msock"
2809
2810 sock msock
2811 -----------------+-------------------------+------------------------------
2812 timeout conf.timeout / 2 conf.timeout / 2
2813 timeout action send a ping via msock Abort communication
2814 and close all sockets
2815*/
2816
2817/*
2818 * you must have down()ed the appropriate [m]sock_mutex elsewhere!
2819 */
2820int drbd_send(struct drbd_conf *mdev, struct socket *sock,
2821 void *buf, size_t size, unsigned msg_flags)
2822{
2823 struct kvec iov;
2824 struct msghdr msg;
2825 int rv, sent = 0;
2826
2827 if (!sock)
2828 return -1000;
2829
2830 /* THINK if (signal_pending) return ... ? */
2831
2832 iov.iov_base = buf;
2833 iov.iov_len = size;
2834
2835 msg.msg_name = NULL;
2836 msg.msg_namelen = 0;
2837 msg.msg_control = NULL;
2838 msg.msg_controllen = 0;
2839 msg.msg_flags = msg_flags | MSG_NOSIGNAL;
2840
2841 if (sock == mdev->data.socket) {
2842 mdev->ko_count = mdev->net_conf->ko_count;
2843 drbd_update_congested(mdev);
2844 }
2845 do {
2846 /* STRANGE
2847 * tcp_sendmsg does _not_ use its size parameter at all ?
2848 *
2849 * -EAGAIN on timeout, -EINTR on signal.
2850 */
2851/* THINK
2852 * do we need to block DRBD_SIG if sock == &meta.socket ??
2853 * otherwise wake_asender() might interrupt some send_*Ack !
2854 */
2855 rv = kernel_sendmsg(sock, &msg, &iov, 1, size);
2856 if (rv == -EAGAIN) {
2857 if (we_should_drop_the_connection(mdev, sock))
2858 break;
2859 else
2860 continue;
2861 }
2862 D_ASSERT(rv != 0);
2863 if (rv == -EINTR) {
2864 flush_signals(current);
2865 rv = 0;
2866 }
2867 if (rv < 0)
2868 break;
2869 sent += rv;
2870 iov.iov_base += rv;
2871 iov.iov_len -= rv;
2872 } while (sent < size);
2873
2874 if (sock == mdev->data.socket)
2875 clear_bit(NET_CONGESTED, &mdev->flags);
2876
2877 if (rv <= 0) {
2878 if (rv != -EAGAIN) {
2879 dev_err(DEV, "%s_sendmsg returned %d\n",
2880 sock == mdev->meta.socket ? "msock" : "sock",
2881 rv);
2882 drbd_force_state(mdev, NS(conn, C_BROKEN_PIPE));
2883 } else
2884 drbd_force_state(mdev, NS(conn, C_TIMEOUT));
2885 }
2886
2887 return sent;
2888}
2889
2890static int drbd_open(struct block_device *bdev, fmode_t mode)
2891{
2892 struct drbd_conf *mdev = bdev->bd_disk->private_data;
2893 unsigned long flags;
2894 int rv = 0;
2895
Arnd Bergmann2a48fc02010-06-02 14:28:52 +02002896 mutex_lock(&drbd_main_mutex);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002897 spin_lock_irqsave(&mdev->req_lock, flags);
2898 /* to have a stable mdev->state.role
2899 * and no race with updating open_cnt */
2900
2901 if (mdev->state.role != R_PRIMARY) {
2902 if (mode & FMODE_WRITE)
2903 rv = -EROFS;
2904 else if (!allow_oos)
2905 rv = -EMEDIUMTYPE;
2906 }
2907
2908 if (!rv)
2909 mdev->open_cnt++;
2910 spin_unlock_irqrestore(&mdev->req_lock, flags);
Arnd Bergmann2a48fc02010-06-02 14:28:52 +02002911 mutex_unlock(&drbd_main_mutex);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002912
2913 return rv;
2914}
2915
2916static int drbd_release(struct gendisk *gd, fmode_t mode)
2917{
2918 struct drbd_conf *mdev = gd->private_data;
Arnd Bergmann2a48fc02010-06-02 14:28:52 +02002919 mutex_lock(&drbd_main_mutex);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002920 mdev->open_cnt--;
Arnd Bergmann2a48fc02010-06-02 14:28:52 +02002921 mutex_unlock(&drbd_main_mutex);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002922 return 0;
2923}
2924
Philipp Reisnerb411b362009-09-25 16:07:19 -07002925static void drbd_set_defaults(struct drbd_conf *mdev)
2926{
Philipp Reisner85f4cc12010-06-29 17:35:34 +02002927 /* This way we get a compile error when sync_conf grows,
2928 and we forgot to initialize it here */
2929 mdev->sync_conf = (struct syncer_conf) {
2930 /* .rate = */ DRBD_RATE_DEF,
2931 /* .after = */ DRBD_AFTER_DEF,
2932 /* .al_extents = */ DRBD_AL_EXTENTS_DEF,
Philipp Reisner85f4cc12010-06-29 17:35:34 +02002933 /* .verify_alg = */ {}, 0,
2934 /* .cpu_mask = */ {}, 0,
2935 /* .csums_alg = */ {}, 0,
Philipp Reisnere7564142010-06-29 17:35:34 +02002936 /* .use_rle = */ 0,
Philipp Reisner9a31d712010-07-05 13:42:03 +02002937 /* .on_no_data = */ DRBD_ON_NO_DATA_DEF,
2938 /* .c_plan_ahead = */ DRBD_C_PLAN_AHEAD_DEF,
2939 /* .c_delay_target = */ DRBD_C_DELAY_TARGET_DEF,
2940 /* .c_fill_target = */ DRBD_C_FILL_TARGET_DEF,
Lars Ellenberg0f0601f2010-08-11 23:40:24 +02002941 /* .c_max_rate = */ DRBD_C_MAX_RATE_DEF,
2942 /* .c_min_rate = */ DRBD_C_MIN_RATE_DEF
Philipp Reisner85f4cc12010-06-29 17:35:34 +02002943 };
2944
2945 /* Have to use that way, because the layout differs between
2946 big endian and little endian */
Philipp Reisnerb411b362009-09-25 16:07:19 -07002947 mdev->state = (union drbd_state) {
2948 { .role = R_SECONDARY,
2949 .peer = R_UNKNOWN,
2950 .conn = C_STANDALONE,
2951 .disk = D_DISKLESS,
2952 .pdsk = D_UNKNOWN,
Philipp Reisnerfb22c402010-09-08 23:20:21 +02002953 .susp = 0,
2954 .susp_nod = 0,
2955 .susp_fen = 0
Philipp Reisnerb411b362009-09-25 16:07:19 -07002956 } };
2957}
2958
2959void drbd_init_set_defaults(struct drbd_conf *mdev)
2960{
2961 /* the memset(,0,) did most of this.
2962 * note: only assignments, no allocation in here */
2963
2964 drbd_set_defaults(mdev);
2965
Philipp Reisnerb411b362009-09-25 16:07:19 -07002966 atomic_set(&mdev->ap_bio_cnt, 0);
2967 atomic_set(&mdev->ap_pending_cnt, 0);
2968 atomic_set(&mdev->rs_pending_cnt, 0);
2969 atomic_set(&mdev->unacked_cnt, 0);
2970 atomic_set(&mdev->local_cnt, 0);
2971 atomic_set(&mdev->net_cnt, 0);
2972 atomic_set(&mdev->packet_seq, 0);
2973 atomic_set(&mdev->pp_in_use, 0);
Lars Ellenberg435f0742010-09-06 12:30:25 +02002974 atomic_set(&mdev->pp_in_use_by_net, 0);
Philipp Reisner778f2712010-07-06 11:14:00 +02002975 atomic_set(&mdev->rs_sect_in, 0);
Lars Ellenberg0f0601f2010-08-11 23:40:24 +02002976 atomic_set(&mdev->rs_sect_ev, 0);
Philipp Reisner759fbdf2010-10-26 16:02:27 +02002977 atomic_set(&mdev->ap_in_flight, 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002978
2979 mutex_init(&mdev->md_io_mutex);
2980 mutex_init(&mdev->data.mutex);
2981 mutex_init(&mdev->meta.mutex);
2982 sema_init(&mdev->data.work.s, 0);
2983 sema_init(&mdev->meta.work.s, 0);
2984 mutex_init(&mdev->state_mutex);
2985
2986 spin_lock_init(&mdev->data.work.q_lock);
2987 spin_lock_init(&mdev->meta.work.q_lock);
2988
2989 spin_lock_init(&mdev->al_lock);
2990 spin_lock_init(&mdev->req_lock);
2991 spin_lock_init(&mdev->peer_seq_lock);
2992 spin_lock_init(&mdev->epoch_lock);
2993
2994 INIT_LIST_HEAD(&mdev->active_ee);
2995 INIT_LIST_HEAD(&mdev->sync_ee);
2996 INIT_LIST_HEAD(&mdev->done_ee);
2997 INIT_LIST_HEAD(&mdev->read_ee);
2998 INIT_LIST_HEAD(&mdev->net_ee);
2999 INIT_LIST_HEAD(&mdev->resync_reads);
3000 INIT_LIST_HEAD(&mdev->data.work.q);
3001 INIT_LIST_HEAD(&mdev->meta.work.q);
3002 INIT_LIST_HEAD(&mdev->resync_work.list);
3003 INIT_LIST_HEAD(&mdev->unplug_work.list);
Lars Ellenberge9e6f3e2010-09-14 20:26:27 +02003004 INIT_LIST_HEAD(&mdev->go_diskless.list);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003005 INIT_LIST_HEAD(&mdev->md_sync_work.list);
Philipp Reisnerc4752ef2010-10-27 17:32:36 +02003006 INIT_LIST_HEAD(&mdev->start_resync_work.list);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003007 INIT_LIST_HEAD(&mdev->bm_io_work.w.list);
Philipp Reisner0ced55a2010-04-30 15:26:20 +02003008
Philipp Reisner794abb72010-12-27 11:51:23 +01003009 mdev->resync_work.cb = w_resync_timer;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003010 mdev->unplug_work.cb = w_send_write_hint;
Lars Ellenberge9e6f3e2010-09-14 20:26:27 +02003011 mdev->go_diskless.cb = w_go_diskless;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003012 mdev->md_sync_work.cb = w_md_sync;
3013 mdev->bm_io_work.w.cb = w_bitmap_io;
Philipp Reisner370a43e2011-01-14 16:03:11 +01003014 mdev->start_resync_work.cb = w_start_resync;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003015 init_timer(&mdev->resync_timer);
3016 init_timer(&mdev->md_sync_timer);
Philipp Reisner370a43e2011-01-14 16:03:11 +01003017 init_timer(&mdev->start_resync_timer);
Philipp Reisner7fde2be2011-03-01 11:08:28 +01003018 init_timer(&mdev->request_timer);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003019 mdev->resync_timer.function = resync_timer_fn;
3020 mdev->resync_timer.data = (unsigned long) mdev;
3021 mdev->md_sync_timer.function = md_sync_timer_fn;
3022 mdev->md_sync_timer.data = (unsigned long) mdev;
Philipp Reisner370a43e2011-01-14 16:03:11 +01003023 mdev->start_resync_timer.function = start_resync_timer_fn;
3024 mdev->start_resync_timer.data = (unsigned long) mdev;
Philipp Reisner7fde2be2011-03-01 11:08:28 +01003025 mdev->request_timer.function = request_timer_fn;
3026 mdev->request_timer.data = (unsigned long) mdev;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003027
3028 init_waitqueue_head(&mdev->misc_wait);
3029 init_waitqueue_head(&mdev->state_wait);
Philipp Reisner84dfb9f2010-06-23 11:20:05 +02003030 init_waitqueue_head(&mdev->net_cnt_wait);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003031 init_waitqueue_head(&mdev->ee_wait);
3032 init_waitqueue_head(&mdev->al_wait);
3033 init_waitqueue_head(&mdev->seq_wait);
3034
3035 drbd_thread_init(mdev, &mdev->receiver, drbdd_init);
3036 drbd_thread_init(mdev, &mdev->worker, drbd_worker);
3037 drbd_thread_init(mdev, &mdev->asender, drbd_asender);
3038
3039 mdev->agreed_pro_version = PRO_VERSION_MAX;
Philipp Reisner2451fc32010-08-24 13:43:11 +02003040 mdev->write_ordering = WO_bdev_flush;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003041 mdev->resync_wenr = LC_FREE;
Philipp Reisner99432fc2011-05-20 16:39:13 +02003042 mdev->peer_max_bio_size = DRBD_MAX_BIO_SIZE_SAFE;
3043 mdev->local_max_bio_size = DRBD_MAX_BIO_SIZE_SAFE;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003044}
3045
3046void drbd_mdev_cleanup(struct drbd_conf *mdev)
3047{
Lars Ellenberg1d7734a2010-08-11 21:21:50 +02003048 int i;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003049 if (mdev->receiver.t_state != None)
3050 dev_err(DEV, "ASSERT FAILED: receiver t_state == %d expected 0.\n",
3051 mdev->receiver.t_state);
3052
3053 /* no need to lock it, I'm the only thread alive */
3054 if (atomic_read(&mdev->current_epoch->epoch_size) != 0)
3055 dev_err(DEV, "epoch_size:%d\n", atomic_read(&mdev->current_epoch->epoch_size));
3056 mdev->al_writ_cnt =
3057 mdev->bm_writ_cnt =
3058 mdev->read_cnt =
3059 mdev->recv_cnt =
3060 mdev->send_cnt =
3061 mdev->writ_cnt =
3062 mdev->p_size =
3063 mdev->rs_start =
3064 mdev->rs_total =
Lars Ellenberg1d7734a2010-08-11 21:21:50 +02003065 mdev->rs_failed = 0;
3066 mdev->rs_last_events = 0;
Lars Ellenberg0f0601f2010-08-11 23:40:24 +02003067 mdev->rs_last_sect_ev = 0;
Lars Ellenberg1d7734a2010-08-11 21:21:50 +02003068 for (i = 0; i < DRBD_SYNC_MARKS; i++) {
3069 mdev->rs_mark_left[i] = 0;
3070 mdev->rs_mark_time[i] = 0;
3071 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07003072 D_ASSERT(mdev->net_conf == NULL);
3073
3074 drbd_set_my_capacity(mdev, 0);
3075 if (mdev->bitmap) {
3076 /* maybe never allocated. */
Philipp Reisner02d9a942010-03-24 16:23:03 +01003077 drbd_bm_resize(mdev, 0, 1);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003078 drbd_bm_cleanup(mdev);
3079 }
3080
3081 drbd_free_resources(mdev);
Philipp Reisner07782862010-08-31 12:00:50 +02003082 clear_bit(AL_SUSPENDED, &mdev->flags);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003083
3084 /*
3085 * currently we drbd_init_ee only on module load, so
3086 * we may do drbd_release_ee only on module unload!
3087 */
3088 D_ASSERT(list_empty(&mdev->active_ee));
3089 D_ASSERT(list_empty(&mdev->sync_ee));
3090 D_ASSERT(list_empty(&mdev->done_ee));
3091 D_ASSERT(list_empty(&mdev->read_ee));
3092 D_ASSERT(list_empty(&mdev->net_ee));
3093 D_ASSERT(list_empty(&mdev->resync_reads));
3094 D_ASSERT(list_empty(&mdev->data.work.q));
3095 D_ASSERT(list_empty(&mdev->meta.work.q));
3096 D_ASSERT(list_empty(&mdev->resync_work.list));
3097 D_ASSERT(list_empty(&mdev->unplug_work.list));
Lars Ellenberge9e6f3e2010-09-14 20:26:27 +02003098 D_ASSERT(list_empty(&mdev->go_diskless.list));
Lars Ellenberg2265b472010-12-16 15:41:26 +01003099
3100 drbd_set_defaults(mdev);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003101}
3102
3103
3104static void drbd_destroy_mempools(void)
3105{
3106 struct page *page;
3107
3108 while (drbd_pp_pool) {
3109 page = drbd_pp_pool;
3110 drbd_pp_pool = (struct page *)page_private(page);
3111 __free_page(page);
3112 drbd_pp_vacant--;
3113 }
3114
3115 /* D_ASSERT(atomic_read(&drbd_pp_vacant)==0); */
3116
3117 if (drbd_ee_mempool)
3118 mempool_destroy(drbd_ee_mempool);
3119 if (drbd_request_mempool)
3120 mempool_destroy(drbd_request_mempool);
3121 if (drbd_ee_cache)
3122 kmem_cache_destroy(drbd_ee_cache);
3123 if (drbd_request_cache)
3124 kmem_cache_destroy(drbd_request_cache);
3125 if (drbd_bm_ext_cache)
3126 kmem_cache_destroy(drbd_bm_ext_cache);
3127 if (drbd_al_ext_cache)
3128 kmem_cache_destroy(drbd_al_ext_cache);
3129
3130 drbd_ee_mempool = NULL;
3131 drbd_request_mempool = NULL;
3132 drbd_ee_cache = NULL;
3133 drbd_request_cache = NULL;
3134 drbd_bm_ext_cache = NULL;
3135 drbd_al_ext_cache = NULL;
3136
3137 return;
3138}
3139
3140static int drbd_create_mempools(void)
3141{
3142 struct page *page;
Lars Ellenberg1816a2b2010-11-11 15:19:07 +01003143 const int number = (DRBD_MAX_BIO_SIZE/PAGE_SIZE) * minor_count;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003144 int i;
3145
3146 /* prepare our caches and mempools */
3147 drbd_request_mempool = NULL;
3148 drbd_ee_cache = NULL;
3149 drbd_request_cache = NULL;
3150 drbd_bm_ext_cache = NULL;
3151 drbd_al_ext_cache = NULL;
3152 drbd_pp_pool = NULL;
3153
3154 /* caches */
3155 drbd_request_cache = kmem_cache_create(
3156 "drbd_req", sizeof(struct drbd_request), 0, 0, NULL);
3157 if (drbd_request_cache == NULL)
3158 goto Enomem;
3159
3160 drbd_ee_cache = kmem_cache_create(
3161 "drbd_ee", sizeof(struct drbd_epoch_entry), 0, 0, NULL);
3162 if (drbd_ee_cache == NULL)
3163 goto Enomem;
3164
3165 drbd_bm_ext_cache = kmem_cache_create(
3166 "drbd_bm", sizeof(struct bm_extent), 0, 0, NULL);
3167 if (drbd_bm_ext_cache == NULL)
3168 goto Enomem;
3169
3170 drbd_al_ext_cache = kmem_cache_create(
3171 "drbd_al", sizeof(struct lc_element), 0, 0, NULL);
3172 if (drbd_al_ext_cache == NULL)
3173 goto Enomem;
3174
3175 /* mempools */
3176 drbd_request_mempool = mempool_create(number,
3177 mempool_alloc_slab, mempool_free_slab, drbd_request_cache);
3178 if (drbd_request_mempool == NULL)
3179 goto Enomem;
3180
3181 drbd_ee_mempool = mempool_create(number,
3182 mempool_alloc_slab, mempool_free_slab, drbd_ee_cache);
Nicolas Kaiser2027ae12010-10-28 06:15:26 -06003183 if (drbd_ee_mempool == NULL)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003184 goto Enomem;
3185
3186 /* drbd's page pool */
3187 spin_lock_init(&drbd_pp_lock);
3188
3189 for (i = 0; i < number; i++) {
3190 page = alloc_page(GFP_HIGHUSER);
3191 if (!page)
3192 goto Enomem;
3193 set_page_private(page, (unsigned long)drbd_pp_pool);
3194 drbd_pp_pool = page;
3195 }
3196 drbd_pp_vacant = number;
3197
3198 return 0;
3199
3200Enomem:
3201 drbd_destroy_mempools(); /* in case we allocated some */
3202 return -ENOMEM;
3203}
3204
3205static int drbd_notify_sys(struct notifier_block *this, unsigned long code,
3206 void *unused)
3207{
3208 /* just so we have it. you never know what interesting things we
3209 * might want to do here some day...
3210 */
3211
3212 return NOTIFY_DONE;
3213}
3214
3215static struct notifier_block drbd_notifier = {
3216 .notifier_call = drbd_notify_sys,
3217};
3218
3219static void drbd_release_ee_lists(struct drbd_conf *mdev)
3220{
3221 int rr;
3222
3223 rr = drbd_release_ee(mdev, &mdev->active_ee);
3224 if (rr)
3225 dev_err(DEV, "%d EEs in active list found!\n", rr);
3226
3227 rr = drbd_release_ee(mdev, &mdev->sync_ee);
3228 if (rr)
3229 dev_err(DEV, "%d EEs in sync list found!\n", rr);
3230
3231 rr = drbd_release_ee(mdev, &mdev->read_ee);
3232 if (rr)
3233 dev_err(DEV, "%d EEs in read list found!\n", rr);
3234
3235 rr = drbd_release_ee(mdev, &mdev->done_ee);
3236 if (rr)
3237 dev_err(DEV, "%d EEs in done list found!\n", rr);
3238
3239 rr = drbd_release_ee(mdev, &mdev->net_ee);
3240 if (rr)
3241 dev_err(DEV, "%d EEs in net list found!\n", rr);
3242}
3243
3244/* caution. no locking.
3245 * currently only used from module cleanup code. */
3246static void drbd_delete_device(unsigned int minor)
3247{
3248 struct drbd_conf *mdev = minor_to_mdev(minor);
3249
3250 if (!mdev)
3251 return;
3252
3253 /* paranoia asserts */
3254 if (mdev->open_cnt != 0)
3255 dev_err(DEV, "open_cnt = %d in %s:%u", mdev->open_cnt,
3256 __FILE__ , __LINE__);
3257
3258 ERR_IF (!list_empty(&mdev->data.work.q)) {
3259 struct list_head *lp;
3260 list_for_each(lp, &mdev->data.work.q) {
3261 dev_err(DEV, "lp = %p\n", lp);
3262 }
3263 };
3264 /* end paranoia asserts */
3265
3266 del_gendisk(mdev->vdisk);
3267
3268 /* cleanup stuff that may have been allocated during
3269 * device (re-)configuration or state changes */
3270
3271 if (mdev->this_bdev)
3272 bdput(mdev->this_bdev);
3273
3274 drbd_free_resources(mdev);
3275
3276 drbd_release_ee_lists(mdev);
3277
Philipp Reisnerb411b362009-09-25 16:07:19 -07003278 lc_destroy(mdev->act_log);
3279 lc_destroy(mdev->resync);
3280
3281 kfree(mdev->p_uuid);
3282 /* mdev->p_uuid = NULL; */
3283
3284 kfree(mdev->int_dig_out);
3285 kfree(mdev->int_dig_in);
3286 kfree(mdev->int_dig_vv);
3287
3288 /* cleanup the rest that has been
3289 * allocated from drbd_new_device
3290 * and actually free the mdev itself */
3291 drbd_free_mdev(mdev);
3292}
3293
3294static void drbd_cleanup(void)
3295{
3296 unsigned int i;
3297
3298 unregister_reboot_notifier(&drbd_notifier);
3299
Lars Ellenberg17a93f302010-11-24 10:37:35 +01003300 /* first remove proc,
3301 * drbdsetup uses it's presence to detect
3302 * whether DRBD is loaded.
3303 * If we would get stuck in proc removal,
3304 * but have netlink already deregistered,
3305 * some drbdsetup commands may wait forever
3306 * for an answer.
3307 */
3308 if (drbd_proc)
3309 remove_proc_entry("drbd", NULL);
3310
Philipp Reisnerb411b362009-09-25 16:07:19 -07003311 drbd_nl_cleanup();
3312
3313 if (minor_table) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07003314 i = minor_count;
3315 while (i--)
3316 drbd_delete_device(i);
3317 drbd_destroy_mempools();
3318 }
3319
3320 kfree(minor_table);
3321
3322 unregister_blkdev(DRBD_MAJOR, "drbd");
3323
3324 printk(KERN_INFO "drbd: module cleanup done.\n");
3325}
3326
3327/**
3328 * drbd_congested() - Callback for pdflush
3329 * @congested_data: User data
3330 * @bdi_bits: Bits pdflush is currently interested in
3331 *
3332 * Returns 1<<BDI_async_congested and/or 1<<BDI_sync_congested if we are congested.
3333 */
3334static int drbd_congested(void *congested_data, int bdi_bits)
3335{
3336 struct drbd_conf *mdev = congested_data;
3337 struct request_queue *q;
3338 char reason = '-';
3339 int r = 0;
3340
Andreas Gruenbacher1b881ef2010-12-13 18:03:38 +01003341 if (!may_inc_ap_bio(mdev)) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07003342 /* DRBD has frozen IO */
3343 r = bdi_bits;
3344 reason = 'd';
3345 goto out;
3346 }
3347
3348 if (get_ldev(mdev)) {
3349 q = bdev_get_queue(mdev->ldev->backing_bdev);
3350 r = bdi_congested(&q->backing_dev_info, bdi_bits);
3351 put_ldev(mdev);
3352 if (r)
3353 reason = 'b';
3354 }
3355
3356 if (bdi_bits & (1 << BDI_async_congested) && test_bit(NET_CONGESTED, &mdev->flags)) {
3357 r |= (1 << BDI_async_congested);
3358 reason = reason == 'b' ? 'a' : 'n';
3359 }
3360
3361out:
3362 mdev->congestion_reason = reason;
3363 return r;
3364}
3365
3366struct drbd_conf *drbd_new_device(unsigned int minor)
3367{
3368 struct drbd_conf *mdev;
3369 struct gendisk *disk;
3370 struct request_queue *q;
3371
3372 /* GFP_KERNEL, we are outside of all write-out paths */
3373 mdev = kzalloc(sizeof(struct drbd_conf), GFP_KERNEL);
3374 if (!mdev)
3375 return NULL;
3376 if (!zalloc_cpumask_var(&mdev->cpu_mask, GFP_KERNEL))
3377 goto out_no_cpumask;
3378
3379 mdev->minor = minor;
3380
3381 drbd_init_set_defaults(mdev);
3382
3383 q = blk_alloc_queue(GFP_KERNEL);
3384 if (!q)
3385 goto out_no_q;
3386 mdev->rq_queue = q;
3387 q->queuedata = mdev;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003388
3389 disk = alloc_disk(1);
3390 if (!disk)
3391 goto out_no_disk;
3392 mdev->vdisk = disk;
3393
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01003394 set_disk_ro(disk, true);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003395
3396 disk->queue = q;
3397 disk->major = DRBD_MAJOR;
3398 disk->first_minor = minor;
3399 disk->fops = &drbd_ops;
3400 sprintf(disk->disk_name, "drbd%d", minor);
3401 disk->private_data = mdev;
3402
3403 mdev->this_bdev = bdget(MKDEV(DRBD_MAJOR, minor));
3404 /* we have no partitions. we contain only ourselves. */
3405 mdev->this_bdev->bd_contains = mdev->this_bdev;
3406
3407 q->backing_dev_info.congested_fn = drbd_congested;
3408 q->backing_dev_info.congested_data = mdev;
3409
Andreas Gruenbacher2f58dcf2010-12-13 17:48:19 +01003410 blk_queue_make_request(q, drbd_make_request);
Philipp Reisner99432fc2011-05-20 16:39:13 +02003411 /* Setting the max_hw_sectors to an odd value of 8kibyte here
3412 This triggers a max_bio_size message upon first attach or connect */
3413 blk_queue_max_hw_sectors(q, DRBD_MAX_BIO_SIZE_SAFE >> 8);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003414 blk_queue_bounce_limit(q, BLK_BOUNCE_ANY);
3415 blk_queue_merge_bvec(q, drbd_merge_bvec);
Jens Axboe7eaceac2011-03-10 08:52:07 +01003416 q->queue_lock = &mdev->req_lock;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003417
3418 mdev->md_io_page = alloc_page(GFP_KERNEL);
3419 if (!mdev->md_io_page)
3420 goto out_no_io_page;
3421
3422 if (drbd_bm_init(mdev))
3423 goto out_no_bitmap;
3424 /* no need to lock access, we are still initializing this minor device. */
3425 if (!tl_init(mdev))
3426 goto out_no_tl;
Andreas Gruenbacherdac13892011-01-21 17:18:39 +01003427 mdev->read_requests = RB_ROOT;
Andreas Gruenbacherde696712011-01-20 15:00:24 +01003428 mdev->write_requests = RB_ROOT;
Andreas Gruenbacher8b946252011-01-20 15:23:07 +01003429 mdev->epoch_entries = RB_ROOT;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003430
Philipp Reisnerb411b362009-09-25 16:07:19 -07003431 mdev->current_epoch = kzalloc(sizeof(struct drbd_epoch), GFP_KERNEL);
3432 if (!mdev->current_epoch)
3433 goto out_no_epoch;
3434
3435 INIT_LIST_HEAD(&mdev->current_epoch->list);
3436 mdev->epochs = 1;
3437
3438 return mdev;
3439
3440/* out_whatever_else:
3441 kfree(mdev->current_epoch); */
3442out_no_epoch:
Philipp Reisnerb411b362009-09-25 16:07:19 -07003443 tl_cleanup(mdev);
3444out_no_tl:
3445 drbd_bm_cleanup(mdev);
3446out_no_bitmap:
3447 __free_page(mdev->md_io_page);
3448out_no_io_page:
3449 put_disk(disk);
3450out_no_disk:
3451 blk_cleanup_queue(q);
3452out_no_q:
3453 free_cpumask_var(mdev->cpu_mask);
3454out_no_cpumask:
3455 kfree(mdev);
3456 return NULL;
3457}
3458
3459/* counterpart of drbd_new_device.
3460 * last part of drbd_delete_device. */
3461void drbd_free_mdev(struct drbd_conf *mdev)
3462{
3463 kfree(mdev->current_epoch);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003464 tl_cleanup(mdev);
3465 if (mdev->bitmap) /* should no longer be there. */
3466 drbd_bm_cleanup(mdev);
3467 __free_page(mdev->md_io_page);
3468 put_disk(mdev->vdisk);
3469 blk_cleanup_queue(mdev->rq_queue);
3470 free_cpumask_var(mdev->cpu_mask);
3471 kfree(mdev);
3472}
3473
3474
3475int __init drbd_init(void)
3476{
3477 int err;
3478
3479 if (sizeof(struct p_handshake) != 80) {
3480 printk(KERN_ERR
3481 "drbd: never change the size or layout "
3482 "of the HandShake packet.\n");
3483 return -EINVAL;
3484 }
3485
Philipp Reisner2b8a90b2011-01-10 11:15:17 +01003486 if (minor_count < DRBD_MINOR_COUNT_MIN || minor_count > DRBD_MINOR_COUNT_MAX) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07003487 printk(KERN_ERR
3488 "drbd: invalid minor_count (%d)\n", minor_count);
3489#ifdef MODULE
3490 return -EINVAL;
3491#else
3492 minor_count = 8;
3493#endif
3494 }
3495
3496 err = drbd_nl_init();
3497 if (err)
3498 return err;
3499
3500 err = register_blkdev(DRBD_MAJOR, "drbd");
3501 if (err) {
3502 printk(KERN_ERR
3503 "drbd: unable to register block device major %d\n",
3504 DRBD_MAJOR);
3505 return err;
3506 }
3507
3508 register_reboot_notifier(&drbd_notifier);
3509
3510 /*
3511 * allocate all necessary structs
3512 */
3513 err = -ENOMEM;
3514
3515 init_waitqueue_head(&drbd_pp_wait);
3516
3517 drbd_proc = NULL; /* play safe for drbd_cleanup */
3518 minor_table = kzalloc(sizeof(struct drbd_conf *)*minor_count,
3519 GFP_KERNEL);
3520 if (!minor_table)
3521 goto Enomem;
3522
3523 err = drbd_create_mempools();
3524 if (err)
3525 goto Enomem;
3526
Lars Ellenberg8c484ee2010-03-11 16:47:58 +01003527 drbd_proc = proc_create_data("drbd", S_IFREG | S_IRUGO , NULL, &drbd_proc_fops, NULL);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003528 if (!drbd_proc) {
3529 printk(KERN_ERR "drbd: unable to register proc file\n");
3530 goto Enomem;
3531 }
3532
3533 rwlock_init(&global_state_lock);
3534
3535 printk(KERN_INFO "drbd: initialized. "
3536 "Version: " REL_VERSION " (api:%d/proto:%d-%d)\n",
3537 API_VERSION, PRO_VERSION_MIN, PRO_VERSION_MAX);
3538 printk(KERN_INFO "drbd: %s\n", drbd_buildtag());
3539 printk(KERN_INFO "drbd: registered as block device major %d\n",
3540 DRBD_MAJOR);
3541 printk(KERN_INFO "drbd: minor_table @ 0x%p\n", minor_table);
3542
3543 return 0; /* Success! */
3544
3545Enomem:
3546 drbd_cleanup();
3547 if (err == -ENOMEM)
3548 /* currently always the case */
3549 printk(KERN_ERR "drbd: ran out of memory\n");
3550 else
3551 printk(KERN_ERR "drbd: initialization failure\n");
3552 return err;
3553}
3554
3555void drbd_free_bc(struct drbd_backing_dev *ldev)
3556{
3557 if (ldev == NULL)
3558 return;
3559
Tejun Heoe525fd82010-11-13 11:55:17 +01003560 blkdev_put(ldev->backing_bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL);
3561 blkdev_put(ldev->md_bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003562
3563 kfree(ldev);
3564}
3565
3566void drbd_free_sock(struct drbd_conf *mdev)
3567{
3568 if (mdev->data.socket) {
Lars Ellenberg4589d7f2010-03-03 02:25:33 +01003569 mutex_lock(&mdev->data.mutex);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003570 kernel_sock_shutdown(mdev->data.socket, SHUT_RDWR);
3571 sock_release(mdev->data.socket);
3572 mdev->data.socket = NULL;
Lars Ellenberg4589d7f2010-03-03 02:25:33 +01003573 mutex_unlock(&mdev->data.mutex);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003574 }
3575 if (mdev->meta.socket) {
Lars Ellenberg4589d7f2010-03-03 02:25:33 +01003576 mutex_lock(&mdev->meta.mutex);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003577 kernel_sock_shutdown(mdev->meta.socket, SHUT_RDWR);
3578 sock_release(mdev->meta.socket);
3579 mdev->meta.socket = NULL;
Lars Ellenberg4589d7f2010-03-03 02:25:33 +01003580 mutex_unlock(&mdev->meta.mutex);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003581 }
3582}
3583
3584
3585void drbd_free_resources(struct drbd_conf *mdev)
3586{
3587 crypto_free_hash(mdev->csums_tfm);
3588 mdev->csums_tfm = NULL;
3589 crypto_free_hash(mdev->verify_tfm);
3590 mdev->verify_tfm = NULL;
3591 crypto_free_hash(mdev->cram_hmac_tfm);
3592 mdev->cram_hmac_tfm = NULL;
3593 crypto_free_hash(mdev->integrity_w_tfm);
3594 mdev->integrity_w_tfm = NULL;
3595 crypto_free_hash(mdev->integrity_r_tfm);
3596 mdev->integrity_r_tfm = NULL;
3597
3598 drbd_free_sock(mdev);
3599
3600 __no_warn(local,
3601 drbd_free_bc(mdev->ldev);
3602 mdev->ldev = NULL;);
3603}
3604
3605/* meta data management */
3606
3607struct meta_data_on_disk {
3608 u64 la_size; /* last agreed size. */
3609 u64 uuid[UI_SIZE]; /* UUIDs. */
3610 u64 device_uuid;
3611 u64 reserved_u64_1;
3612 u32 flags; /* MDF */
3613 u32 magic;
3614 u32 md_size_sect;
3615 u32 al_offset; /* offset to this block */
3616 u32 al_nr_extents; /* important for restoring the AL */
3617 /* `-- act_log->nr_elements <-- sync_conf.al_extents */
3618 u32 bm_offset; /* offset to the bitmap, from here */
3619 u32 bm_bytes_per_bit; /* BM_BLOCK_SIZE */
Philipp Reisner99432fc2011-05-20 16:39:13 +02003620 u32 la_peer_max_bio_size; /* last peer max_bio_size */
3621 u32 reserved_u32[3];
Philipp Reisnerb411b362009-09-25 16:07:19 -07003622
3623} __packed;
3624
3625/**
3626 * drbd_md_sync() - Writes the meta data super block if the MD_DIRTY flag bit is set
3627 * @mdev: DRBD device.
3628 */
3629void drbd_md_sync(struct drbd_conf *mdev)
3630{
3631 struct meta_data_on_disk *buffer;
3632 sector_t sector;
3633 int i;
3634
Lars Ellenbergee15b032010-09-03 10:00:09 +02003635 del_timer(&mdev->md_sync_timer);
3636 /* timer may be rearmed by drbd_md_mark_dirty() now. */
Philipp Reisnerb411b362009-09-25 16:07:19 -07003637 if (!test_and_clear_bit(MD_DIRTY, &mdev->flags))
3638 return;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003639
3640 /* We use here D_FAILED and not D_ATTACHING because we try to write
3641 * metadata even if we detach due to a disk failure! */
3642 if (!get_ldev_if_state(mdev, D_FAILED))
3643 return;
3644
Philipp Reisnerb411b362009-09-25 16:07:19 -07003645 mutex_lock(&mdev->md_io_mutex);
3646 buffer = (struct meta_data_on_disk *)page_address(mdev->md_io_page);
3647 memset(buffer, 0, 512);
3648
3649 buffer->la_size = cpu_to_be64(drbd_get_capacity(mdev->this_bdev));
3650 for (i = UI_CURRENT; i < UI_SIZE; i++)
3651 buffer->uuid[i] = cpu_to_be64(mdev->ldev->md.uuid[i]);
3652 buffer->flags = cpu_to_be32(mdev->ldev->md.flags);
3653 buffer->magic = cpu_to_be32(DRBD_MD_MAGIC);
3654
3655 buffer->md_size_sect = cpu_to_be32(mdev->ldev->md.md_size_sect);
3656 buffer->al_offset = cpu_to_be32(mdev->ldev->md.al_offset);
3657 buffer->al_nr_extents = cpu_to_be32(mdev->act_log->nr_elements);
3658 buffer->bm_bytes_per_bit = cpu_to_be32(BM_BLOCK_SIZE);
3659 buffer->device_uuid = cpu_to_be64(mdev->ldev->md.device_uuid);
3660
3661 buffer->bm_offset = cpu_to_be32(mdev->ldev->md.bm_offset);
Philipp Reisner99432fc2011-05-20 16:39:13 +02003662 buffer->la_peer_max_bio_size = cpu_to_be32(mdev->peer_max_bio_size);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003663
3664 D_ASSERT(drbd_md_ss__(mdev, mdev->ldev) == mdev->ldev->md.md_offset);
3665 sector = mdev->ldev->md.md_offset;
3666
Lars Ellenberg3f3a9b82010-09-01 15:12:12 +02003667 if (!drbd_md_sync_page_io(mdev, mdev->ldev, sector, WRITE)) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07003668 /* this was a try anyways ... */
3669 dev_err(DEV, "meta data update failed!\n");
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01003670 drbd_chk_io_error(mdev, 1, true);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003671 }
3672
3673 /* Update mdev->ldev->md.la_size_sect,
3674 * since we updated it on metadata. */
3675 mdev->ldev->md.la_size_sect = drbd_get_capacity(mdev->this_bdev);
3676
3677 mutex_unlock(&mdev->md_io_mutex);
3678 put_ldev(mdev);
3679}
3680
3681/**
3682 * drbd_md_read() - Reads in the meta data super block
3683 * @mdev: DRBD device.
3684 * @bdev: Device from which the meta data should be read in.
3685 *
Andreas Gruenbacher116676c2010-12-08 13:33:11 +01003686 * Return 0 (NO_ERROR) on success, and an enum drbd_ret_code in case
Philipp Reisnerb411b362009-09-25 16:07:19 -07003687 * something goes wrong. Currently only: ERR_IO_MD_DISK, ERR_MD_INVALID.
3688 */
3689int drbd_md_read(struct drbd_conf *mdev, struct drbd_backing_dev *bdev)
3690{
3691 struct meta_data_on_disk *buffer;
3692 int i, rv = NO_ERROR;
3693
3694 if (!get_ldev_if_state(mdev, D_ATTACHING))
3695 return ERR_IO_MD_DISK;
3696
Philipp Reisnerb411b362009-09-25 16:07:19 -07003697 mutex_lock(&mdev->md_io_mutex);
3698 buffer = (struct meta_data_on_disk *)page_address(mdev->md_io_page);
3699
3700 if (!drbd_md_sync_page_io(mdev, bdev, bdev->md.md_offset, READ)) {
Lucas De Marchi25985ed2011-03-30 22:57:33 -03003701 /* NOTE: can't do normal error processing here as this is
Philipp Reisnerb411b362009-09-25 16:07:19 -07003702 called BEFORE disk is attached */
3703 dev_err(DEV, "Error while reading metadata.\n");
3704 rv = ERR_IO_MD_DISK;
3705 goto err;
3706 }
3707
Andreas Gruenbachere7fad8a2011-01-11 13:54:02 +01003708 if (buffer->magic != cpu_to_be32(DRBD_MD_MAGIC)) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07003709 dev_err(DEV, "Error while reading metadata, magic not found.\n");
3710 rv = ERR_MD_INVALID;
3711 goto err;
3712 }
3713 if (be32_to_cpu(buffer->al_offset) != bdev->md.al_offset) {
3714 dev_err(DEV, "unexpected al_offset: %d (expected %d)\n",
3715 be32_to_cpu(buffer->al_offset), bdev->md.al_offset);
3716 rv = ERR_MD_INVALID;
3717 goto err;
3718 }
3719 if (be32_to_cpu(buffer->bm_offset) != bdev->md.bm_offset) {
3720 dev_err(DEV, "unexpected bm_offset: %d (expected %d)\n",
3721 be32_to_cpu(buffer->bm_offset), bdev->md.bm_offset);
3722 rv = ERR_MD_INVALID;
3723 goto err;
3724 }
3725 if (be32_to_cpu(buffer->md_size_sect) != bdev->md.md_size_sect) {
3726 dev_err(DEV, "unexpected md_size: %u (expected %u)\n",
3727 be32_to_cpu(buffer->md_size_sect), bdev->md.md_size_sect);
3728 rv = ERR_MD_INVALID;
3729 goto err;
3730 }
3731
3732 if (be32_to_cpu(buffer->bm_bytes_per_bit) != BM_BLOCK_SIZE) {
3733 dev_err(DEV, "unexpected bm_bytes_per_bit: %u (expected %u)\n",
3734 be32_to_cpu(buffer->bm_bytes_per_bit), BM_BLOCK_SIZE);
3735 rv = ERR_MD_INVALID;
3736 goto err;
3737 }
3738
3739 bdev->md.la_size_sect = be64_to_cpu(buffer->la_size);
3740 for (i = UI_CURRENT; i < UI_SIZE; i++)
3741 bdev->md.uuid[i] = be64_to_cpu(buffer->uuid[i]);
3742 bdev->md.flags = be32_to_cpu(buffer->flags);
3743 mdev->sync_conf.al_extents = be32_to_cpu(buffer->al_nr_extents);
3744 bdev->md.device_uuid = be64_to_cpu(buffer->device_uuid);
3745
Philipp Reisner99432fc2011-05-20 16:39:13 +02003746 spin_lock_irq(&mdev->req_lock);
3747 if (mdev->state.conn < C_CONNECTED) {
3748 int peer;
3749 peer = be32_to_cpu(buffer->la_peer_max_bio_size);
3750 peer = max_t(int, peer, DRBD_MAX_BIO_SIZE_SAFE);
3751 mdev->peer_max_bio_size = peer;
3752 }
3753 spin_unlock_irq(&mdev->req_lock);
3754
Philipp Reisnerb411b362009-09-25 16:07:19 -07003755 if (mdev->sync_conf.al_extents < 7)
3756 mdev->sync_conf.al_extents = 127;
3757
3758 err:
3759 mutex_unlock(&mdev->md_io_mutex);
3760 put_ldev(mdev);
3761
3762 return rv;
3763}
3764
3765/**
3766 * drbd_md_mark_dirty() - Mark meta data super block as dirty
3767 * @mdev: DRBD device.
3768 *
3769 * Call this function if you change anything that should be written to
3770 * the meta-data super block. This function sets MD_DIRTY, and starts a
3771 * timer that ensures that within five seconds you have to call drbd_md_sync().
3772 */
Lars Ellenbergca0e6092010-10-14 15:01:21 +02003773#ifdef DEBUG
Lars Ellenbergee15b032010-09-03 10:00:09 +02003774void drbd_md_mark_dirty_(struct drbd_conf *mdev, unsigned int line, const char *func)
3775{
3776 if (!test_and_set_bit(MD_DIRTY, &mdev->flags)) {
3777 mod_timer(&mdev->md_sync_timer, jiffies + HZ);
3778 mdev->last_md_mark_dirty.line = line;
3779 mdev->last_md_mark_dirty.func = func;
3780 }
3781}
3782#else
Philipp Reisnerb411b362009-09-25 16:07:19 -07003783void drbd_md_mark_dirty(struct drbd_conf *mdev)
3784{
Lars Ellenbergee15b032010-09-03 10:00:09 +02003785 if (!test_and_set_bit(MD_DIRTY, &mdev->flags))
Lars Ellenbergca0e6092010-10-14 15:01:21 +02003786 mod_timer(&mdev->md_sync_timer, jiffies + 5*HZ);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003787}
Lars Ellenbergee15b032010-09-03 10:00:09 +02003788#endif
Philipp Reisnerb411b362009-09-25 16:07:19 -07003789
3790static void drbd_uuid_move_history(struct drbd_conf *mdev) __must_hold(local)
3791{
3792 int i;
3793
Lars Ellenberg62b0da32011-01-20 13:25:21 +01003794 for (i = UI_HISTORY_START; i < UI_HISTORY_END; i++)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003795 mdev->ldev->md.uuid[i+1] = mdev->ldev->md.uuid[i];
Philipp Reisnerb411b362009-09-25 16:07:19 -07003796}
3797
3798void _drbd_uuid_set(struct drbd_conf *mdev, int idx, u64 val) __must_hold(local)
3799{
3800 if (idx == UI_CURRENT) {
3801 if (mdev->state.role == R_PRIMARY)
3802 val |= 1;
3803 else
3804 val &= ~((u64)1);
3805
3806 drbd_set_ed_uuid(mdev, val);
3807 }
3808
3809 mdev->ldev->md.uuid[idx] = val;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003810 drbd_md_mark_dirty(mdev);
3811}
3812
3813
3814void drbd_uuid_set(struct drbd_conf *mdev, int idx, u64 val) __must_hold(local)
3815{
3816 if (mdev->ldev->md.uuid[idx]) {
3817 drbd_uuid_move_history(mdev);
3818 mdev->ldev->md.uuid[UI_HISTORY_START] = mdev->ldev->md.uuid[idx];
Philipp Reisnerb411b362009-09-25 16:07:19 -07003819 }
3820 _drbd_uuid_set(mdev, idx, val);
3821}
3822
3823/**
3824 * drbd_uuid_new_current() - Creates a new current UUID
3825 * @mdev: DRBD device.
3826 *
3827 * Creates a new current UUID, and rotates the old current UUID into
3828 * the bitmap slot. Causes an incremental resync upon next connect.
3829 */
3830void drbd_uuid_new_current(struct drbd_conf *mdev) __must_hold(local)
3831{
3832 u64 val;
Lars Ellenberg62b0da32011-01-20 13:25:21 +01003833 unsigned long long bm_uuid = mdev->ldev->md.uuid[UI_BITMAP];
Philipp Reisnerb411b362009-09-25 16:07:19 -07003834
Lars Ellenberg62b0da32011-01-20 13:25:21 +01003835 if (bm_uuid)
3836 dev_warn(DEV, "bm UUID was already set: %llX\n", bm_uuid);
3837
Philipp Reisnerb411b362009-09-25 16:07:19 -07003838 mdev->ldev->md.uuid[UI_BITMAP] = mdev->ldev->md.uuid[UI_CURRENT];
Philipp Reisnerb411b362009-09-25 16:07:19 -07003839
3840 get_random_bytes(&val, sizeof(u64));
3841 _drbd_uuid_set(mdev, UI_CURRENT, val);
Lars Ellenberg62b0da32011-01-20 13:25:21 +01003842 drbd_print_uuids(mdev, "new current UUID");
Lars Ellenbergaaa8e2b2010-10-15 13:16:53 +02003843 /* get it to stable storage _now_ */
3844 drbd_md_sync(mdev);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003845}
3846
3847void drbd_uuid_set_bm(struct drbd_conf *mdev, u64 val) __must_hold(local)
3848{
3849 if (mdev->ldev->md.uuid[UI_BITMAP] == 0 && val == 0)
3850 return;
3851
3852 if (val == 0) {
3853 drbd_uuid_move_history(mdev);
3854 mdev->ldev->md.uuid[UI_HISTORY_START] = mdev->ldev->md.uuid[UI_BITMAP];
3855 mdev->ldev->md.uuid[UI_BITMAP] = 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003856 } else {
Lars Ellenberg62b0da32011-01-20 13:25:21 +01003857 unsigned long long bm_uuid = mdev->ldev->md.uuid[UI_BITMAP];
3858 if (bm_uuid)
3859 dev_warn(DEV, "bm UUID was already set: %llX\n", bm_uuid);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003860
Lars Ellenberg62b0da32011-01-20 13:25:21 +01003861 mdev->ldev->md.uuid[UI_BITMAP] = val & ~((u64)1);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003862 }
3863 drbd_md_mark_dirty(mdev);
3864}
3865
3866/**
3867 * drbd_bmio_set_n_write() - io_fn for drbd_queue_bitmap_io() or drbd_bitmap_io()
3868 * @mdev: DRBD device.
3869 *
3870 * Sets all bits in the bitmap and writes the whole bitmap to stable storage.
3871 */
3872int drbd_bmio_set_n_write(struct drbd_conf *mdev)
3873{
3874 int rv = -EIO;
3875
3876 if (get_ldev_if_state(mdev, D_ATTACHING)) {
3877 drbd_md_set_flag(mdev, MDF_FULL_SYNC);
3878 drbd_md_sync(mdev);
3879 drbd_bm_set_all(mdev);
3880
3881 rv = drbd_bm_write(mdev);
3882
3883 if (!rv) {
3884 drbd_md_clear_flag(mdev, MDF_FULL_SYNC);
3885 drbd_md_sync(mdev);
3886 }
3887
3888 put_ldev(mdev);
3889 }
3890
3891 return rv;
3892}
3893
3894/**
3895 * drbd_bmio_clear_n_write() - io_fn for drbd_queue_bitmap_io() or drbd_bitmap_io()
3896 * @mdev: DRBD device.
3897 *
3898 * Clears all bits in the bitmap and writes the whole bitmap to stable storage.
3899 */
3900int drbd_bmio_clear_n_write(struct drbd_conf *mdev)
3901{
3902 int rv = -EIO;
3903
Philipp Reisner07782862010-08-31 12:00:50 +02003904 drbd_resume_al(mdev);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003905 if (get_ldev_if_state(mdev, D_ATTACHING)) {
3906 drbd_bm_clear_all(mdev);
3907 rv = drbd_bm_write(mdev);
3908 put_ldev(mdev);
3909 }
3910
3911 return rv;
3912}
3913
3914static int w_bitmap_io(struct drbd_conf *mdev, struct drbd_work *w, int unused)
3915{
3916 struct bm_io_work *work = container_of(w, struct bm_io_work, w);
Lars Ellenberg02851e92010-12-16 14:47:39 +01003917 int rv = -EIO;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003918
3919 D_ASSERT(atomic_read(&mdev->ap_bio_cnt) == 0);
3920
Lars Ellenberg02851e92010-12-16 14:47:39 +01003921 if (get_ldev(mdev)) {
Lars Ellenberg20ceb2b2011-01-21 10:56:44 +01003922 drbd_bm_lock(mdev, work->why, work->flags);
Lars Ellenberg02851e92010-12-16 14:47:39 +01003923 rv = work->io_fn(mdev);
3924 drbd_bm_unlock(mdev);
3925 put_ldev(mdev);
3926 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07003927
3928 clear_bit(BITMAP_IO, &mdev->flags);
Philipp Reisner127b3172010-11-16 10:07:53 +01003929 smp_mb__after_clear_bit();
Philipp Reisnerb411b362009-09-25 16:07:19 -07003930 wake_up(&mdev->misc_wait);
3931
3932 if (work->done)
3933 work->done(mdev, rv);
3934
3935 clear_bit(BITMAP_IO_QUEUED, &mdev->flags);
3936 work->why = NULL;
Lars Ellenberg20ceb2b2011-01-21 10:56:44 +01003937 work->flags = 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003938
3939 return 1;
3940}
3941
Lars Ellenberg82f59cc2010-10-16 12:13:47 +02003942void drbd_ldev_destroy(struct drbd_conf *mdev)
3943{
3944 lc_destroy(mdev->resync);
3945 mdev->resync = NULL;
3946 lc_destroy(mdev->act_log);
3947 mdev->act_log = NULL;
3948 __no_warn(local,
3949 drbd_free_bc(mdev->ldev);
3950 mdev->ldev = NULL;);
3951
3952 if (mdev->md_io_tmpp) {
3953 __free_page(mdev->md_io_tmpp);
3954 mdev->md_io_tmpp = NULL;
3955 }
3956 clear_bit(GO_DISKLESS, &mdev->flags);
3957}
3958
Lars Ellenberge9e6f3e2010-09-14 20:26:27 +02003959static int w_go_diskless(struct drbd_conf *mdev, struct drbd_work *w, int unused)
3960{
3961 D_ASSERT(mdev->state.disk == D_FAILED);
Lars Ellenberg9d282872010-10-14 13:57:07 +02003962 /* we cannot assert local_cnt == 0 here, as get_ldev_if_state will
3963 * inc/dec it frequently. Once we are D_DISKLESS, no one will touch
Lars Ellenberg82f59cc2010-10-16 12:13:47 +02003964 * the protected members anymore, though, so once put_ldev reaches zero
3965 * again, it will be safe to free them. */
Lars Ellenberge9e6f3e2010-09-14 20:26:27 +02003966 drbd_force_state(mdev, NS(disk, D_DISKLESS));
Lars Ellenberge9e6f3e2010-09-14 20:26:27 +02003967 return 1;
3968}
3969
3970void drbd_go_diskless(struct drbd_conf *mdev)
3971{
3972 D_ASSERT(mdev->state.disk == D_FAILED);
3973 if (!test_and_set_bit(GO_DISKLESS, &mdev->flags))
Lars Ellenberg9d282872010-10-14 13:57:07 +02003974 drbd_queue_work(&mdev->data.work, &mdev->go_diskless);
Lars Ellenberge9e6f3e2010-09-14 20:26:27 +02003975}
3976
Philipp Reisnerb411b362009-09-25 16:07:19 -07003977/**
3978 * drbd_queue_bitmap_io() - Queues an IO operation on the whole bitmap
3979 * @mdev: DRBD device.
3980 * @io_fn: IO callback to be called when bitmap IO is possible
3981 * @done: callback to be called after the bitmap IO was performed
3982 * @why: Descriptive text of the reason for doing the IO
3983 *
3984 * While IO on the bitmap happens we freeze application IO thus we ensure
3985 * that drbd_set_out_of_sync() can not be called. This function MAY ONLY be
3986 * called from worker context. It MUST NOT be used while a previous such
3987 * work is still pending!
3988 */
3989void drbd_queue_bitmap_io(struct drbd_conf *mdev,
3990 int (*io_fn)(struct drbd_conf *),
3991 void (*done)(struct drbd_conf *, int),
Lars Ellenberg20ceb2b2011-01-21 10:56:44 +01003992 char *why, enum bm_flag flags)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003993{
3994 D_ASSERT(current == mdev->worker.task);
3995
3996 D_ASSERT(!test_bit(BITMAP_IO_QUEUED, &mdev->flags));
3997 D_ASSERT(!test_bit(BITMAP_IO, &mdev->flags));
3998 D_ASSERT(list_empty(&mdev->bm_io_work.w.list));
3999 if (mdev->bm_io_work.why)
4000 dev_err(DEV, "FIXME going to queue '%s' but '%s' still pending?\n",
4001 why, mdev->bm_io_work.why);
4002
4003 mdev->bm_io_work.io_fn = io_fn;
4004 mdev->bm_io_work.done = done;
4005 mdev->bm_io_work.why = why;
Lars Ellenberg20ceb2b2011-01-21 10:56:44 +01004006 mdev->bm_io_work.flags = flags;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004007
Philipp Reisner22afd7e2010-11-16 15:30:44 +01004008 spin_lock_irq(&mdev->req_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004009 set_bit(BITMAP_IO, &mdev->flags);
4010 if (atomic_read(&mdev->ap_bio_cnt) == 0) {
Philipp Reisner127b3172010-11-16 10:07:53 +01004011 if (!test_and_set_bit(BITMAP_IO_QUEUED, &mdev->flags))
Philipp Reisnerb411b362009-09-25 16:07:19 -07004012 drbd_queue_work(&mdev->data.work, &mdev->bm_io_work.w);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004013 }
Philipp Reisner22afd7e2010-11-16 15:30:44 +01004014 spin_unlock_irq(&mdev->req_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004015}
4016
4017/**
4018 * drbd_bitmap_io() - Does an IO operation on the whole bitmap
4019 * @mdev: DRBD device.
4020 * @io_fn: IO callback to be called when bitmap IO is possible
4021 * @why: Descriptive text of the reason for doing the IO
4022 *
4023 * freezes application IO while that the actual IO operations runs. This
4024 * functions MAY NOT be called from worker context.
4025 */
Lars Ellenberg20ceb2b2011-01-21 10:56:44 +01004026int drbd_bitmap_io(struct drbd_conf *mdev, int (*io_fn)(struct drbd_conf *),
4027 char *why, enum bm_flag flags)
Philipp Reisnerb411b362009-09-25 16:07:19 -07004028{
4029 int rv;
4030
4031 D_ASSERT(current != mdev->worker.task);
4032
Lars Ellenberg20ceb2b2011-01-21 10:56:44 +01004033 if ((flags & BM_LOCKED_SET_ALLOWED) == 0)
4034 drbd_suspend_io(mdev);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004035
Lars Ellenberg20ceb2b2011-01-21 10:56:44 +01004036 drbd_bm_lock(mdev, why, flags);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004037 rv = io_fn(mdev);
4038 drbd_bm_unlock(mdev);
4039
Lars Ellenberg20ceb2b2011-01-21 10:56:44 +01004040 if ((flags & BM_LOCKED_SET_ALLOWED) == 0)
4041 drbd_resume_io(mdev);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004042
4043 return rv;
4044}
4045
4046void drbd_md_set_flag(struct drbd_conf *mdev, int flag) __must_hold(local)
4047{
4048 if ((mdev->ldev->md.flags & flag) != flag) {
4049 drbd_md_mark_dirty(mdev);
4050 mdev->ldev->md.flags |= flag;
4051 }
4052}
4053
4054void drbd_md_clear_flag(struct drbd_conf *mdev, int flag) __must_hold(local)
4055{
4056 if ((mdev->ldev->md.flags & flag) != 0) {
4057 drbd_md_mark_dirty(mdev);
4058 mdev->ldev->md.flags &= ~flag;
4059 }
4060}
4061int drbd_md_test_flag(struct drbd_backing_dev *bdev, int flag)
4062{
4063 return (bdev->md.flags & flag) != 0;
4064}
4065
4066static void md_sync_timer_fn(unsigned long data)
4067{
4068 struct drbd_conf *mdev = (struct drbd_conf *) data;
4069
4070 drbd_queue_work_front(&mdev->data.work, &mdev->md_sync_work);
4071}
4072
4073static int w_md_sync(struct drbd_conf *mdev, struct drbd_work *w, int unused)
4074{
4075 dev_warn(DEV, "md_sync_timer expired! Worker calls drbd_md_sync().\n");
Lars Ellenbergee15b032010-09-03 10:00:09 +02004076#ifdef DEBUG
4077 dev_warn(DEV, "last md_mark_dirty: %s:%u\n",
4078 mdev->last_md_mark_dirty.func, mdev->last_md_mark_dirty.line);
4079#endif
Philipp Reisnerb411b362009-09-25 16:07:19 -07004080 drbd_md_sync(mdev);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004081 return 1;
4082}
4083
4084#ifdef CONFIG_DRBD_FAULT_INJECTION
4085/* Fault insertion support including random number generator shamelessly
4086 * stolen from kernel/rcutorture.c */
4087struct fault_random_state {
4088 unsigned long state;
4089 unsigned long count;
4090};
4091
4092#define FAULT_RANDOM_MULT 39916801 /* prime */
4093#define FAULT_RANDOM_ADD 479001701 /* prime */
4094#define FAULT_RANDOM_REFRESH 10000
4095
4096/*
4097 * Crude but fast random-number generator. Uses a linear congruential
4098 * generator, with occasional help from get_random_bytes().
4099 */
4100static unsigned long
4101_drbd_fault_random(struct fault_random_state *rsp)
4102{
4103 long refresh;
4104
Roel Kluin49829ea2009-12-15 22:55:44 +01004105 if (!rsp->count--) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07004106 get_random_bytes(&refresh, sizeof(refresh));
4107 rsp->state += refresh;
4108 rsp->count = FAULT_RANDOM_REFRESH;
4109 }
4110 rsp->state = rsp->state * FAULT_RANDOM_MULT + FAULT_RANDOM_ADD;
4111 return swahw32(rsp->state);
4112}
4113
4114static char *
4115_drbd_fault_str(unsigned int type) {
4116 static char *_faults[] = {
4117 [DRBD_FAULT_MD_WR] = "Meta-data write",
4118 [DRBD_FAULT_MD_RD] = "Meta-data read",
4119 [DRBD_FAULT_RS_WR] = "Resync write",
4120 [DRBD_FAULT_RS_RD] = "Resync read",
4121 [DRBD_FAULT_DT_WR] = "Data write",
4122 [DRBD_FAULT_DT_RD] = "Data read",
4123 [DRBD_FAULT_DT_RA] = "Data read ahead",
4124 [DRBD_FAULT_BM_ALLOC] = "BM allocation",
Philipp Reisner6b4388a2010-04-26 14:11:45 +02004125 [DRBD_FAULT_AL_EE] = "EE allocation",
4126 [DRBD_FAULT_RECEIVE] = "receive data corruption",
Philipp Reisnerb411b362009-09-25 16:07:19 -07004127 };
4128
4129 return (type < DRBD_FAULT_MAX) ? _faults[type] : "**Unknown**";
4130}
4131
4132unsigned int
4133_drbd_insert_fault(struct drbd_conf *mdev, unsigned int type)
4134{
4135 static struct fault_random_state rrs = {0, 0};
4136
4137 unsigned int ret = (
4138 (fault_devs == 0 ||
4139 ((1 << mdev_to_minor(mdev)) & fault_devs) != 0) &&
4140 (((_drbd_fault_random(&rrs) % 100) + 1) <= fault_rate));
4141
4142 if (ret) {
4143 fault_count++;
4144
Lars Ellenberg73835062010-05-27 11:51:56 +02004145 if (__ratelimit(&drbd_ratelimit_state))
Philipp Reisnerb411b362009-09-25 16:07:19 -07004146 dev_warn(DEV, "***Simulating %s failure\n",
4147 _drbd_fault_str(type));
4148 }
4149
4150 return ret;
4151}
4152#endif
4153
4154const char *drbd_buildtag(void)
4155{
4156 /* DRBD built from external sources has here a reference to the
4157 git hash of the source code. */
4158
4159 static char buildtag[38] = "\0uilt-in";
4160
4161 if (buildtag[0] == 0) {
4162#ifdef CONFIG_MODULES
4163 if (THIS_MODULE != NULL)
4164 sprintf(buildtag, "srcversion: %-24s", THIS_MODULE->srcversion);
4165 else
4166#endif
4167 buildtag[0] = 'b';
4168 }
4169
4170 return buildtag;
4171}
4172
4173module_init(drbd_init)
4174module_exit(drbd_cleanup)
4175
Philipp Reisnerb411b362009-09-25 16:07:19 -07004176EXPORT_SYMBOL(drbd_conn_str);
4177EXPORT_SYMBOL(drbd_role_str);
4178EXPORT_SYMBOL(drbd_disk_str);
4179EXPORT_SYMBOL(drbd_set_st_err_str);