blob: 519f286a429936930a1a28f30fefe6e70dd3eb92 [file] [log] [blame]
Philipp Reisnerb411b362009-09-25 16:07:19 -07001/*
2 drbd.c
3
4 This file is part of DRBD by Philipp Reisner and Lars Ellenberg.
5
6 Copyright (C) 2001-2008, LINBIT Information Technologies GmbH.
7 Copyright (C) 1999-2008, Philipp Reisner <philipp.reisner@linbit.com>.
8 Copyright (C) 2002-2008, Lars Ellenberg <lars.ellenberg@linbit.com>.
9
10 Thanks to Carter Burden, Bart Grantham and Gennadiy Nerubayev
11 from Logicworks, Inc. for making SDP replication support possible.
12
13 drbd is free software; you can redistribute it and/or modify
14 it under the terms of the GNU General Public License as published by
15 the Free Software Foundation; either version 2, or (at your option)
16 any later version.
17
18 drbd is distributed in the hope that it will be useful,
19 but WITHOUT ANY WARRANTY; without even the implied warranty of
20 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 GNU General Public License for more details.
22
23 You should have received a copy of the GNU General Public License
24 along with drbd; see the file COPYING. If not, write to
25 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
26
27 */
28
Philipp Reisnerb411b362009-09-25 16:07:19 -070029#include <linux/module.h>
Philipp Reisnerb411b362009-09-25 16:07:19 -070030#include <linux/drbd.h>
31#include <asm/uaccess.h>
32#include <asm/types.h>
33#include <net/sock.h>
34#include <linux/ctype.h>
Arnd Bergmann2a48fc02010-06-02 14:28:52 +020035#include <linux/mutex.h>
Philipp Reisnerb411b362009-09-25 16:07:19 -070036#include <linux/fs.h>
37#include <linux/file.h>
38#include <linux/proc_fs.h>
39#include <linux/init.h>
40#include <linux/mm.h>
41#include <linux/memcontrol.h>
42#include <linux/mm_inline.h>
43#include <linux/slab.h>
44#include <linux/random.h>
45#include <linux/reboot.h>
46#include <linux/notifier.h>
47#include <linux/kthread.h>
48
49#define __KERNEL_SYSCALLS__
50#include <linux/unistd.h>
51#include <linux/vmalloc.h>
52
53#include <linux/drbd_limits.h>
54#include "drbd_int.h"
Philipp Reisnerb411b362009-09-25 16:07:19 -070055#include "drbd_req.h" /* only for _req_mod in tl_release and tl_clear */
56
57#include "drbd_vli.h"
58
59struct after_state_chg_work {
60 struct drbd_work w;
61 union drbd_state os;
62 union drbd_state ns;
63 enum chg_state_flags flags;
64 struct completion *done;
65};
66
Arnd Bergmann2a48fc02010-06-02 14:28:52 +020067static DEFINE_MUTEX(drbd_main_mutex);
Philipp Reisnerb411b362009-09-25 16:07:19 -070068int drbdd_init(struct drbd_thread *);
69int drbd_worker(struct drbd_thread *);
70int drbd_asender(struct drbd_thread *);
71
72int drbd_init(void);
73static int drbd_open(struct block_device *bdev, fmode_t mode);
74static int drbd_release(struct gendisk *gd, fmode_t mode);
75static int w_after_state_ch(struct drbd_conf *mdev, struct drbd_work *w, int unused);
76static void after_state_ch(struct drbd_conf *mdev, union drbd_state os,
77 union drbd_state ns, enum chg_state_flags flags);
78static int w_md_sync(struct drbd_conf *mdev, struct drbd_work *w, int unused);
79static void md_sync_timer_fn(unsigned long data);
80static int w_bitmap_io(struct drbd_conf *mdev, struct drbd_work *w, int unused);
Lars Ellenberge9e6f3e2010-09-14 20:26:27 +020081static int w_go_diskless(struct drbd_conf *mdev, struct drbd_work *w, int unused);
Philipp Reisnerb411b362009-09-25 16:07:19 -070082
Philipp Reisnerb411b362009-09-25 16:07:19 -070083MODULE_AUTHOR("Philipp Reisner <phil@linbit.com>, "
84 "Lars Ellenberg <lars@linbit.com>");
85MODULE_DESCRIPTION("drbd - Distributed Replicated Block Device v" REL_VERSION);
86MODULE_VERSION(REL_VERSION);
87MODULE_LICENSE("GPL");
Philipp Reisner2b8a90b2011-01-10 11:15:17 +010088MODULE_PARM_DESC(minor_count, "Maximum number of drbd devices ("
89 __stringify(DRBD_MINOR_COUNT_MIN) "-" __stringify(DRBD_MINOR_COUNT_MAX) ")");
Philipp Reisnerb411b362009-09-25 16:07:19 -070090MODULE_ALIAS_BLOCKDEV_MAJOR(DRBD_MAJOR);
91
92#include <linux/moduleparam.h>
93/* allow_open_on_secondary */
94MODULE_PARM_DESC(allow_oos, "DONT USE!");
95/* thanks to these macros, if compiled into the kernel (not-module),
96 * this becomes the boot parameter drbd.minor_count */
97module_param(minor_count, uint, 0444);
98module_param(disable_sendpage, bool, 0644);
99module_param(allow_oos, bool, 0);
100module_param(cn_idx, uint, 0444);
101module_param(proc_details, int, 0644);
102
103#ifdef CONFIG_DRBD_FAULT_INJECTION
104int enable_faults;
105int fault_rate;
106static int fault_count;
107int fault_devs;
108/* bitmap of enabled faults */
109module_param(enable_faults, int, 0664);
110/* fault rate % value - applies to all enabled faults */
111module_param(fault_rate, int, 0664);
112/* count of faults inserted */
113module_param(fault_count, int, 0664);
114/* bitmap of devices to insert faults on */
115module_param(fault_devs, int, 0644);
116#endif
117
118/* module parameter, defined */
Philipp Reisner2b8a90b2011-01-10 11:15:17 +0100119unsigned int minor_count = DRBD_MINOR_COUNT_DEF;
Rusty Russell90ab5ee2012-01-13 09:32:20 +1030120bool disable_sendpage;
121bool allow_oos;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700122unsigned int cn_idx = CN_IDX_DRBD;
123int proc_details; /* Detail level in proc drbd*/
124
125/* Module parameter for setting the user mode helper program
126 * to run. Default is /sbin/drbdadm */
127char usermode_helper[80] = "/sbin/drbdadm";
128
129module_param_string(usermode_helper, usermode_helper, sizeof(usermode_helper), 0644);
130
131/* in 2.6.x, our device mapping and config info contains our virtual gendisks
132 * as member "struct gendisk *vdisk;"
133 */
134struct drbd_conf **minor_table;
135
136struct kmem_cache *drbd_request_cache;
137struct kmem_cache *drbd_ee_cache; /* epoch entries */
138struct kmem_cache *drbd_bm_ext_cache; /* bitmap extents */
139struct kmem_cache *drbd_al_ext_cache; /* activity log extents */
140mempool_t *drbd_request_mempool;
141mempool_t *drbd_ee_mempool;
142
143/* I do not use a standard mempool, because:
144 1) I want to hand out the pre-allocated objects first.
145 2) I want to be able to interrupt sleeping allocation with a signal.
146 Note: This is a single linked list, the next pointer is the private
147 member of struct page.
148 */
149struct page *drbd_pp_pool;
150spinlock_t drbd_pp_lock;
151int drbd_pp_vacant;
152wait_queue_head_t drbd_pp_wait;
153
154DEFINE_RATELIMIT_STATE(drbd_ratelimit_state, 5 * HZ, 5);
155
Emese Revfy7d4e9d02009-12-14 00:59:30 +0100156static const struct block_device_operations drbd_ops = {
Philipp Reisnerb411b362009-09-25 16:07:19 -0700157 .owner = THIS_MODULE,
158 .open = drbd_open,
159 .release = drbd_release,
160};
161
162#define ARRY_SIZE(A) (sizeof(A)/sizeof(A[0]))
163
164#ifdef __CHECKER__
165/* When checking with sparse, and this is an inline function, sparse will
166 give tons of false positives. When this is a real functions sparse works.
167 */
168int _get_ldev_if_state(struct drbd_conf *mdev, enum drbd_disk_state mins)
169{
170 int io_allowed;
171
172 atomic_inc(&mdev->local_cnt);
173 io_allowed = (mdev->state.disk >= mins);
174 if (!io_allowed) {
175 if (atomic_dec_and_test(&mdev->local_cnt))
176 wake_up(&mdev->misc_wait);
177 }
178 return io_allowed;
179}
180
181#endif
182
183/**
184 * DOC: The transfer log
185 *
186 * The transfer log is a single linked list of &struct drbd_tl_epoch objects.
187 * mdev->newest_tle points to the head, mdev->oldest_tle points to the tail
188 * of the list. There is always at least one &struct drbd_tl_epoch object.
189 *
190 * Each &struct drbd_tl_epoch has a circular double linked list of requests
191 * attached.
192 */
193static int tl_init(struct drbd_conf *mdev)
194{
195 struct drbd_tl_epoch *b;
196
197 /* during device minor initialization, we may well use GFP_KERNEL */
198 b = kmalloc(sizeof(struct drbd_tl_epoch), GFP_KERNEL);
199 if (!b)
200 return 0;
201 INIT_LIST_HEAD(&b->requests);
202 INIT_LIST_HEAD(&b->w.list);
203 b->next = NULL;
204 b->br_number = 4711;
Philipp Reisner7e602c02010-05-27 14:49:27 +0200205 b->n_writes = 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700206 b->w.cb = NULL; /* if this is != NULL, we need to dec_ap_pending in tl_clear */
207
208 mdev->oldest_tle = b;
209 mdev->newest_tle = b;
210 INIT_LIST_HEAD(&mdev->out_of_sequence_requests);
Philipp Reisner6d7e32f2011-03-15 10:25:18 +0100211 INIT_LIST_HEAD(&mdev->barrier_acked_requests);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700212
213 mdev->tl_hash = NULL;
214 mdev->tl_hash_s = 0;
215
216 return 1;
217}
218
219static void tl_cleanup(struct drbd_conf *mdev)
220{
221 D_ASSERT(mdev->oldest_tle == mdev->newest_tle);
222 D_ASSERT(list_empty(&mdev->out_of_sequence_requests));
223 kfree(mdev->oldest_tle);
224 mdev->oldest_tle = NULL;
225 kfree(mdev->unused_spare_tle);
226 mdev->unused_spare_tle = NULL;
227 kfree(mdev->tl_hash);
228 mdev->tl_hash = NULL;
229 mdev->tl_hash_s = 0;
230}
231
232/**
233 * _tl_add_barrier() - Adds a barrier to the transfer log
234 * @mdev: DRBD device.
235 * @new: Barrier to be added before the current head of the TL.
236 *
237 * The caller must hold the req_lock.
238 */
239void _tl_add_barrier(struct drbd_conf *mdev, struct drbd_tl_epoch *new)
240{
241 struct drbd_tl_epoch *newest_before;
242
243 INIT_LIST_HEAD(&new->requests);
244 INIT_LIST_HEAD(&new->w.list);
245 new->w.cb = NULL; /* if this is != NULL, we need to dec_ap_pending in tl_clear */
246 new->next = NULL;
Philipp Reisner7e602c02010-05-27 14:49:27 +0200247 new->n_writes = 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700248
249 newest_before = mdev->newest_tle;
Lars Ellenbergc088b2d2012-03-23 13:57:13 +0100250 new->br_number = newest_before->br_number+1;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700251 if (mdev->newest_tle != new) {
252 mdev->newest_tle->next = new;
253 mdev->newest_tle = new;
254 }
255}
256
257/**
258 * tl_release() - Free or recycle the oldest &struct drbd_tl_epoch object of the TL
259 * @mdev: DRBD device.
260 * @barrier_nr: Expected identifier of the DRBD write barrier packet.
261 * @set_size: Expected number of requests before that barrier.
262 *
263 * In case the passed barrier_nr or set_size does not match the oldest
264 * &struct drbd_tl_epoch objects this function will cause a termination
265 * of the connection.
266 */
267void tl_release(struct drbd_conf *mdev, unsigned int barrier_nr,
268 unsigned int set_size)
269{
270 struct drbd_tl_epoch *b, *nob; /* next old barrier */
271 struct list_head *le, *tle;
272 struct drbd_request *r;
273
274 spin_lock_irq(&mdev->req_lock);
275
276 b = mdev->oldest_tle;
277
278 /* first some paranoia code */
279 if (b == NULL) {
280 dev_err(DEV, "BAD! BarrierAck #%u received, but no epoch in tl!?\n",
281 barrier_nr);
282 goto bail;
283 }
284 if (b->br_number != barrier_nr) {
285 dev_err(DEV, "BAD! BarrierAck #%u received, expected #%u!\n",
286 barrier_nr, b->br_number);
287 goto bail;
288 }
Philipp Reisner7e602c02010-05-27 14:49:27 +0200289 if (b->n_writes != set_size) {
290 dev_err(DEV, "BAD! BarrierAck #%u received with n_writes=%u, expected n_writes=%u!\n",
291 barrier_nr, set_size, b->n_writes);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700292 goto bail;
293 }
294
295 /* Clean up list of requests processed during current epoch */
296 list_for_each_safe(le, tle, &b->requests) {
297 r = list_entry(le, struct drbd_request, tl_requests);
298 _req_mod(r, barrier_acked);
299 }
300 /* There could be requests on the list waiting for completion
301 of the write to the local disk. To avoid corruptions of
302 slab's data structures we have to remove the lists head.
303
304 Also there could have been a barrier ack out of sequence, overtaking
305 the write acks - which would be a bug and violating write ordering.
306 To not deadlock in case we lose connection while such requests are
307 still pending, we need some way to find them for the
308 _req_mode(connection_lost_while_pending).
309
310 These have been list_move'd to the out_of_sequence_requests list in
311 _req_mod(, barrier_acked) above.
312 */
Philipp Reisner6d7e32f2011-03-15 10:25:18 +0100313 list_splice_init(&b->requests, &mdev->barrier_acked_requests);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700314
315 nob = b->next;
316 if (test_and_clear_bit(CREATE_BARRIER, &mdev->flags)) {
317 _tl_add_barrier(mdev, b);
318 if (nob)
319 mdev->oldest_tle = nob;
320 /* if nob == NULL b was the only barrier, and becomes the new
321 barrier. Therefore mdev->oldest_tle points already to b */
322 } else {
323 D_ASSERT(nob != NULL);
324 mdev->oldest_tle = nob;
325 kfree(b);
326 }
327
328 spin_unlock_irq(&mdev->req_lock);
329 dec_ap_pending(mdev);
330
331 return;
332
333bail:
334 spin_unlock_irq(&mdev->req_lock);
335 drbd_force_state(mdev, NS(conn, C_PROTOCOL_ERROR));
336}
337
Philipp Reisner617049a2010-12-22 12:48:31 +0100338
Philipp Reisner11b58e72010-05-12 17:08:26 +0200339/**
340 * _tl_restart() - Walks the transfer log, and applies an action to all requests
341 * @mdev: DRBD device.
342 * @what: The action/event to perform with all request objects
343 *
344 * @what might be one of connection_lost_while_pending, resend, fail_frozen_disk_io,
Philipp Reisnerfd2491f2011-07-18 16:25:15 +0200345 * restart_frozen_disk_io.
Philipp Reisner11b58e72010-05-12 17:08:26 +0200346 */
347static void _tl_restart(struct drbd_conf *mdev, enum drbd_req_event what)
348{
349 struct drbd_tl_epoch *b, *tmp, **pn;
Philipp Reisnerb9b98712010-06-22 11:26:48 +0200350 struct list_head *le, *tle, carry_reads;
Philipp Reisner11b58e72010-05-12 17:08:26 +0200351 struct drbd_request *req;
352 int rv, n_writes, n_reads;
353
354 b = mdev->oldest_tle;
355 pn = &mdev->oldest_tle;
356 while (b) {
357 n_writes = 0;
358 n_reads = 0;
Philipp Reisnerb9b98712010-06-22 11:26:48 +0200359 INIT_LIST_HEAD(&carry_reads);
Philipp Reisner11b58e72010-05-12 17:08:26 +0200360 list_for_each_safe(le, tle, &b->requests) {
361 req = list_entry(le, struct drbd_request, tl_requests);
362 rv = _req_mod(req, what);
363
364 n_writes += (rv & MR_WRITE) >> MR_WRITE_SHIFT;
365 n_reads += (rv & MR_READ) >> MR_READ_SHIFT;
366 }
367 tmp = b->next;
368
Philipp Reisnerb9b98712010-06-22 11:26:48 +0200369 if (n_writes) {
Philipp Reisner11b58e72010-05-12 17:08:26 +0200370 if (what == resend) {
371 b->n_writes = n_writes;
372 if (b->w.cb == NULL) {
373 b->w.cb = w_send_barrier;
374 inc_ap_pending(mdev);
375 set_bit(CREATE_BARRIER, &mdev->flags);
376 }
377
378 drbd_queue_work(&mdev->data.work, &b->w);
379 }
380 pn = &b->next;
381 } else {
Philipp Reisnerb9b98712010-06-22 11:26:48 +0200382 if (n_reads)
383 list_add(&carry_reads, &b->requests);
Philipp Reisner11b58e72010-05-12 17:08:26 +0200384 /* there could still be requests on that ring list,
385 * in case local io is still pending */
386 list_del(&b->requests);
387
388 /* dec_ap_pending corresponding to queue_barrier.
389 * the newest barrier may not have been queued yet,
390 * in which case w.cb is still NULL. */
391 if (b->w.cb != NULL)
392 dec_ap_pending(mdev);
393
394 if (b == mdev->newest_tle) {
395 /* recycle, but reinit! */
396 D_ASSERT(tmp == NULL);
397 INIT_LIST_HEAD(&b->requests);
Philipp Reisnerb9b98712010-06-22 11:26:48 +0200398 list_splice(&carry_reads, &b->requests);
Philipp Reisner11b58e72010-05-12 17:08:26 +0200399 INIT_LIST_HEAD(&b->w.list);
400 b->w.cb = NULL;
401 b->br_number = net_random();
402 b->n_writes = 0;
403
404 *pn = b;
405 break;
406 }
407 *pn = tmp;
408 kfree(b);
409 }
410 b = tmp;
Philipp Reisnerb9b98712010-06-22 11:26:48 +0200411 list_splice(&carry_reads, &b->requests);
Philipp Reisner11b58e72010-05-12 17:08:26 +0200412 }
Philipp Reisner6d7e32f2011-03-15 10:25:18 +0100413
414 /* Actions operating on the disk state, also want to work on
415 requests that got barrier acked. */
416 switch (what) {
Philipp Reisner6d7e32f2011-03-15 10:25:18 +0100417 case fail_frozen_disk_io:
418 case restart_frozen_disk_io:
419 list_for_each_safe(le, tle, &mdev->barrier_acked_requests) {
420 req = list_entry(le, struct drbd_request, tl_requests);
421 _req_mod(req, what);
422 }
423
424 case connection_lost_while_pending:
425 case resend:
426 break;
427 default:
428 dev_err(DEV, "what = %d in _tl_restart()\n", what);
429 }
Philipp Reisner11b58e72010-05-12 17:08:26 +0200430}
431
Philipp Reisnerb411b362009-09-25 16:07:19 -0700432
433/**
434 * tl_clear() - Clears all requests and &struct drbd_tl_epoch objects out of the TL
435 * @mdev: DRBD device.
436 *
437 * This is called after the connection to the peer was lost. The storage covered
438 * by the requests on the transfer gets marked as our of sync. Called from the
439 * receiver thread and the worker thread.
440 */
441void tl_clear(struct drbd_conf *mdev)
442{
Philipp Reisnerb411b362009-09-25 16:07:19 -0700443 struct list_head *le, *tle;
444 struct drbd_request *r;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700445
446 spin_lock_irq(&mdev->req_lock);
447
Philipp Reisner11b58e72010-05-12 17:08:26 +0200448 _tl_restart(mdev, connection_lost_while_pending);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700449
450 /* we expect this list to be empty. */
451 D_ASSERT(list_empty(&mdev->out_of_sequence_requests));
452
453 /* but just in case, clean it up anyways! */
454 list_for_each_safe(le, tle, &mdev->out_of_sequence_requests) {
455 r = list_entry(le, struct drbd_request, tl_requests);
456 /* It would be nice to complete outside of spinlock.
457 * But this is easier for now. */
458 _req_mod(r, connection_lost_while_pending);
459 }
460
461 /* ensure bit indicating barrier is required is clear */
462 clear_bit(CREATE_BARRIER, &mdev->flags);
463
Philipp Reisner288f4222010-05-27 15:07:43 +0200464 memset(mdev->app_reads_hash, 0, APP_R_HSIZE*sizeof(void *));
465
Philipp Reisnerb411b362009-09-25 16:07:19 -0700466 spin_unlock_irq(&mdev->req_lock);
467}
468
Philipp Reisner11b58e72010-05-12 17:08:26 +0200469void tl_restart(struct drbd_conf *mdev, enum drbd_req_event what)
470{
471 spin_lock_irq(&mdev->req_lock);
472 _tl_restart(mdev, what);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700473 spin_unlock_irq(&mdev->req_lock);
474}
475
476/**
Philipp Reisnerfd2491f2011-07-18 16:25:15 +0200477 * tl_abort_disk_io() - Abort disk I/O for all requests for a certain mdev in the TL
478 * @mdev: DRBD device.
479 */
480void tl_abort_disk_io(struct drbd_conf *mdev)
481{
482 struct drbd_tl_epoch *b;
483 struct list_head *le, *tle;
484 struct drbd_request *req;
485
486 spin_lock_irq(&mdev->req_lock);
487 b = mdev->oldest_tle;
488 while (b) {
489 list_for_each_safe(le, tle, &b->requests) {
490 req = list_entry(le, struct drbd_request, tl_requests);
491 if (!(req->rq_state & RQ_LOCAL_PENDING))
492 continue;
493 _req_mod(req, abort_disk_io);
494 }
495 b = b->next;
496 }
497
498 list_for_each_safe(le, tle, &mdev->barrier_acked_requests) {
499 req = list_entry(le, struct drbd_request, tl_requests);
500 if (!(req->rq_state & RQ_LOCAL_PENDING))
501 continue;
502 _req_mod(req, abort_disk_io);
503 }
504
505 spin_unlock_irq(&mdev->req_lock);
506}
507
508/**
Andreas Gruenbacher81e84652010-12-09 15:03:57 +0100509 * cl_wide_st_chg() - true if the state change is a cluster wide one
Philipp Reisnerb411b362009-09-25 16:07:19 -0700510 * @mdev: DRBD device.
511 * @os: old (current) state.
512 * @ns: new (wanted) state.
513 */
514static int cl_wide_st_chg(struct drbd_conf *mdev,
515 union drbd_state os, union drbd_state ns)
516{
517 return (os.conn >= C_CONNECTED && ns.conn >= C_CONNECTED &&
518 ((os.role != R_PRIMARY && ns.role == R_PRIMARY) ||
519 (os.conn != C_STARTING_SYNC_T && ns.conn == C_STARTING_SYNC_T) ||
520 (os.conn != C_STARTING_SYNC_S && ns.conn == C_STARTING_SYNC_S) ||
Philipp Reisner02ee8f92011-03-14 11:54:47 +0100521 (os.disk != D_FAILED && ns.disk == D_FAILED))) ||
Philipp Reisnerb411b362009-09-25 16:07:19 -0700522 (os.conn >= C_CONNECTED && ns.conn == C_DISCONNECTING) ||
523 (os.conn == C_CONNECTED && ns.conn == C_VERIFY_S);
524}
525
Andreas Gruenbacherbf885f82010-12-08 00:39:32 +0100526enum drbd_state_rv
527drbd_change_state(struct drbd_conf *mdev, enum chg_state_flags f,
528 union drbd_state mask, union drbd_state val)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700529{
530 unsigned long flags;
531 union drbd_state os, ns;
Andreas Gruenbacherbf885f82010-12-08 00:39:32 +0100532 enum drbd_state_rv rv;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700533
534 spin_lock_irqsave(&mdev->req_lock, flags);
535 os = mdev->state;
536 ns.i = (os.i & ~mask.i) | val.i;
537 rv = _drbd_set_state(mdev, ns, f, NULL);
538 ns = mdev->state;
539 spin_unlock_irqrestore(&mdev->req_lock, flags);
540
541 return rv;
542}
543
544/**
545 * drbd_force_state() - Impose a change which happens outside our control on our state
546 * @mdev: DRBD device.
547 * @mask: mask of state bits to change.
548 * @val: value of new state bits.
549 */
550void drbd_force_state(struct drbd_conf *mdev,
551 union drbd_state mask, union drbd_state val)
552{
553 drbd_change_state(mdev, CS_HARD, mask, val);
554}
555
Andreas Gruenbacherbf885f82010-12-08 00:39:32 +0100556static enum drbd_state_rv is_valid_state(struct drbd_conf *, union drbd_state);
557static enum drbd_state_rv is_valid_state_transition(struct drbd_conf *,
558 union drbd_state,
559 union drbd_state);
Philipp Reisner77e8fdf2011-06-29 10:49:13 +0200560enum sanitize_state_warnings {
561 NO_WARNING,
562 ABORTED_ONLINE_VERIFY,
563 ABORTED_RESYNC,
564 CONNECTION_LOST_NEGOTIATING,
565 IMPLICITLY_UPGRADED_DISK,
566 IMPLICITLY_UPGRADED_PDSK,
567};
Philipp Reisnerb411b362009-09-25 16:07:19 -0700568static union drbd_state sanitize_state(struct drbd_conf *mdev, union drbd_state os,
Philipp Reisner77e8fdf2011-06-29 10:49:13 +0200569 union drbd_state ns, enum sanitize_state_warnings *warn);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700570int drbd_send_state_req(struct drbd_conf *,
571 union drbd_state, union drbd_state);
572
Andreas Gruenbacherc8b32562010-12-08 01:06:16 +0100573static enum drbd_state_rv
574_req_st_cond(struct drbd_conf *mdev, union drbd_state mask,
575 union drbd_state val)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700576{
577 union drbd_state os, ns;
578 unsigned long flags;
Andreas Gruenbacherbf885f82010-12-08 00:39:32 +0100579 enum drbd_state_rv rv;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700580
581 if (test_and_clear_bit(CL_ST_CHG_SUCCESS, &mdev->flags))
582 return SS_CW_SUCCESS;
583
584 if (test_and_clear_bit(CL_ST_CHG_FAIL, &mdev->flags))
585 return SS_CW_FAILED_BY_PEER;
586
587 rv = 0;
588 spin_lock_irqsave(&mdev->req_lock, flags);
589 os = mdev->state;
590 ns.i = (os.i & ~mask.i) | val.i;
591 ns = sanitize_state(mdev, os, ns, NULL);
592
593 if (!cl_wide_st_chg(mdev, os, ns))
594 rv = SS_CW_NO_NEED;
595 if (!rv) {
596 rv = is_valid_state(mdev, ns);
597 if (rv == SS_SUCCESS) {
598 rv = is_valid_state_transition(mdev, ns, os);
599 if (rv == SS_SUCCESS)
Andreas Gruenbacherbf885f82010-12-08 00:39:32 +0100600 rv = SS_UNKNOWN_ERROR; /* cont waiting, otherwise fail. */
Philipp Reisnerb411b362009-09-25 16:07:19 -0700601 }
602 }
603 spin_unlock_irqrestore(&mdev->req_lock, flags);
604
605 return rv;
606}
607
608/**
609 * drbd_req_state() - Perform an eventually cluster wide state change
610 * @mdev: DRBD device.
611 * @mask: mask of state bits to change.
612 * @val: value of new state bits.
613 * @f: flags
614 *
615 * Should not be called directly, use drbd_request_state() or
616 * _drbd_request_state().
617 */
Andreas Gruenbacherbf885f82010-12-08 00:39:32 +0100618static enum drbd_state_rv
619drbd_req_state(struct drbd_conf *mdev, union drbd_state mask,
620 union drbd_state val, enum chg_state_flags f)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700621{
622 struct completion done;
623 unsigned long flags;
624 union drbd_state os, ns;
Andreas Gruenbacherbf885f82010-12-08 00:39:32 +0100625 enum drbd_state_rv rv;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700626
627 init_completion(&done);
628
629 if (f & CS_SERIALIZE)
630 mutex_lock(&mdev->state_mutex);
631
632 spin_lock_irqsave(&mdev->req_lock, flags);
633 os = mdev->state;
634 ns.i = (os.i & ~mask.i) | val.i;
635 ns = sanitize_state(mdev, os, ns, NULL);
636
637 if (cl_wide_st_chg(mdev, os, ns)) {
638 rv = is_valid_state(mdev, ns);
639 if (rv == SS_SUCCESS)
640 rv = is_valid_state_transition(mdev, ns, os);
641 spin_unlock_irqrestore(&mdev->req_lock, flags);
642
643 if (rv < SS_SUCCESS) {
644 if (f & CS_VERBOSE)
645 print_st_err(mdev, os, ns, rv);
646 goto abort;
647 }
648
649 drbd_state_lock(mdev);
650 if (!drbd_send_state_req(mdev, mask, val)) {
651 drbd_state_unlock(mdev);
652 rv = SS_CW_FAILED_BY_PEER;
653 if (f & CS_VERBOSE)
654 print_st_err(mdev, os, ns, rv);
655 goto abort;
656 }
657
658 wait_event(mdev->state_wait,
659 (rv = _req_st_cond(mdev, mask, val)));
660
661 if (rv < SS_SUCCESS) {
662 drbd_state_unlock(mdev);
663 if (f & CS_VERBOSE)
664 print_st_err(mdev, os, ns, rv);
665 goto abort;
666 }
667 spin_lock_irqsave(&mdev->req_lock, flags);
668 os = mdev->state;
669 ns.i = (os.i & ~mask.i) | val.i;
670 rv = _drbd_set_state(mdev, ns, f, &done);
671 drbd_state_unlock(mdev);
672 } else {
673 rv = _drbd_set_state(mdev, ns, f, &done);
674 }
675
676 spin_unlock_irqrestore(&mdev->req_lock, flags);
677
678 if (f & CS_WAIT_COMPLETE && rv == SS_SUCCESS) {
679 D_ASSERT(current != mdev->worker.task);
680 wait_for_completion(&done);
681 }
682
683abort:
684 if (f & CS_SERIALIZE)
685 mutex_unlock(&mdev->state_mutex);
686
687 return rv;
688}
689
690/**
691 * _drbd_request_state() - Request a state change (with flags)
692 * @mdev: DRBD device.
693 * @mask: mask of state bits to change.
694 * @val: value of new state bits.
695 * @f: flags
696 *
697 * Cousin of drbd_request_state(), useful with the CS_WAIT_COMPLETE
698 * flag, or when logging of failed state change requests is not desired.
699 */
Andreas Gruenbacherbf885f82010-12-08 00:39:32 +0100700enum drbd_state_rv
701_drbd_request_state(struct drbd_conf *mdev, union drbd_state mask,
702 union drbd_state val, enum chg_state_flags f)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700703{
Andreas Gruenbacherbf885f82010-12-08 00:39:32 +0100704 enum drbd_state_rv rv;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700705
706 wait_event(mdev->state_wait,
707 (rv = drbd_req_state(mdev, mask, val, f)) != SS_IN_TRANSIENT_STATE);
708
709 return rv;
710}
711
712static void print_st(struct drbd_conf *mdev, char *name, union drbd_state ns)
713{
714 dev_err(DEV, " %s = { cs:%s ro:%s/%s ds:%s/%s %c%c%c%c }\n",
715 name,
716 drbd_conn_str(ns.conn),
717 drbd_role_str(ns.role),
718 drbd_role_str(ns.peer),
719 drbd_disk_str(ns.disk),
720 drbd_disk_str(ns.pdsk),
Philipp Reisnerfb22c402010-09-08 23:20:21 +0200721 is_susp(ns) ? 's' : 'r',
Philipp Reisnerb411b362009-09-25 16:07:19 -0700722 ns.aftr_isp ? 'a' : '-',
723 ns.peer_isp ? 'p' : '-',
724 ns.user_isp ? 'u' : '-'
725 );
726}
727
Andreas Gruenbacherbf885f82010-12-08 00:39:32 +0100728void print_st_err(struct drbd_conf *mdev, union drbd_state os,
729 union drbd_state ns, enum drbd_state_rv err)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700730{
731 if (err == SS_IN_TRANSIENT_STATE)
732 return;
733 dev_err(DEV, "State change failed: %s\n", drbd_set_st_err_str(err));
734 print_st(mdev, " state", os);
735 print_st(mdev, "wanted", ns);
736}
737
738
Philipp Reisnerb411b362009-09-25 16:07:19 -0700739/**
740 * is_valid_state() - Returns an SS_ error code if ns is not valid
741 * @mdev: DRBD device.
742 * @ns: State to consider.
743 */
Andreas Gruenbacherbf885f82010-12-08 00:39:32 +0100744static enum drbd_state_rv
745is_valid_state(struct drbd_conf *mdev, union drbd_state ns)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700746{
747 /* See drbd_state_sw_errors in drbd_strings.c */
748
749 enum drbd_fencing_p fp;
Andreas Gruenbacherbf885f82010-12-08 00:39:32 +0100750 enum drbd_state_rv rv = SS_SUCCESS;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700751
752 fp = FP_DONT_CARE;
753 if (get_ldev(mdev)) {
754 fp = mdev->ldev->dc.fencing;
755 put_ldev(mdev);
756 }
757
758 if (get_net_conf(mdev)) {
759 if (!mdev->net_conf->two_primaries &&
760 ns.role == R_PRIMARY && ns.peer == R_PRIMARY)
761 rv = SS_TWO_PRIMARIES;
762 put_net_conf(mdev);
763 }
764
765 if (rv <= 0)
766 /* already found a reason to abort */;
767 else if (ns.role == R_SECONDARY && mdev->open_cnt)
768 rv = SS_DEVICE_IN_USE;
769
770 else if (ns.role == R_PRIMARY && ns.conn < C_CONNECTED && ns.disk < D_UP_TO_DATE)
771 rv = SS_NO_UP_TO_DATE_DISK;
772
773 else if (fp >= FP_RESOURCE &&
774 ns.role == R_PRIMARY && ns.conn < C_CONNECTED && ns.pdsk >= D_UNKNOWN)
775 rv = SS_PRIMARY_NOP;
776
777 else if (ns.role == R_PRIMARY && ns.disk <= D_INCONSISTENT && ns.pdsk <= D_INCONSISTENT)
778 rv = SS_NO_UP_TO_DATE_DISK;
779
780 else if (ns.conn > C_CONNECTED && ns.disk < D_INCONSISTENT)
781 rv = SS_NO_LOCAL_DISK;
782
783 else if (ns.conn > C_CONNECTED && ns.pdsk < D_INCONSISTENT)
784 rv = SS_NO_REMOTE_DISK;
785
Lars Ellenberg8d4ce822010-04-01 16:59:32 +0200786 else if (ns.conn > C_CONNECTED && ns.disk < D_UP_TO_DATE && ns.pdsk < D_UP_TO_DATE)
787 rv = SS_NO_UP_TO_DATE_DISK;
788
Philipp Reisnerb411b362009-09-25 16:07:19 -0700789 else if ((ns.conn == C_CONNECTED ||
790 ns.conn == C_WF_BITMAP_S ||
791 ns.conn == C_SYNC_SOURCE ||
792 ns.conn == C_PAUSED_SYNC_S) &&
793 ns.disk == D_OUTDATED)
794 rv = SS_CONNECTED_OUTDATES;
795
796 else if ((ns.conn == C_VERIFY_S || ns.conn == C_VERIFY_T) &&
797 (mdev->sync_conf.verify_alg[0] == 0))
798 rv = SS_NO_VERIFY_ALG;
799
800 else if ((ns.conn == C_VERIFY_S || ns.conn == C_VERIFY_T) &&
801 mdev->agreed_pro_version < 88)
802 rv = SS_NOT_SUPPORTED;
803
Philipp Reisnerfa7d9392011-05-17 14:48:55 +0200804 else if (ns.conn >= C_CONNECTED && ns.pdsk == D_UNKNOWN)
805 rv = SS_CONNECTED_OUTDATES;
806
Philipp Reisnerb411b362009-09-25 16:07:19 -0700807 return rv;
808}
809
810/**
811 * is_valid_state_transition() - Returns an SS_ error code if the state transition is not possible
812 * @mdev: DRBD device.
813 * @ns: new state.
814 * @os: old state.
815 */
Andreas Gruenbacherbf885f82010-12-08 00:39:32 +0100816static enum drbd_state_rv
817is_valid_state_transition(struct drbd_conf *mdev, union drbd_state ns,
818 union drbd_state os)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700819{
Andreas Gruenbacherbf885f82010-12-08 00:39:32 +0100820 enum drbd_state_rv rv = SS_SUCCESS;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700821
822 if ((ns.conn == C_STARTING_SYNC_T || ns.conn == C_STARTING_SYNC_S) &&
823 os.conn > C_CONNECTED)
824 rv = SS_RESYNC_RUNNING;
825
826 if (ns.conn == C_DISCONNECTING && os.conn == C_STANDALONE)
827 rv = SS_ALREADY_STANDALONE;
828
829 if (ns.disk > D_ATTACHING && os.disk == D_DISKLESS)
830 rv = SS_IS_DISKLESS;
831
832 if (ns.conn == C_WF_CONNECTION && os.conn < C_UNCONNECTED)
833 rv = SS_NO_NET_CONFIG;
834
835 if (ns.disk == D_OUTDATED && os.disk < D_OUTDATED && os.disk != D_ATTACHING)
836 rv = SS_LOWER_THAN_OUTDATED;
837
838 if (ns.conn == C_DISCONNECTING && os.conn == C_UNCONNECTED)
839 rv = SS_IN_TRANSIENT_STATE;
840
841 if (ns.conn == os.conn && ns.conn == C_WF_REPORT_PARAMS)
842 rv = SS_IN_TRANSIENT_STATE;
843
844 if ((ns.conn == C_VERIFY_S || ns.conn == C_VERIFY_T) && os.conn < C_CONNECTED)
845 rv = SS_NEED_CONNECTION;
846
847 if ((ns.conn == C_VERIFY_S || ns.conn == C_VERIFY_T) &&
848 ns.conn != os.conn && os.conn > C_CONNECTED)
849 rv = SS_RESYNC_RUNNING;
850
851 if ((ns.conn == C_STARTING_SYNC_S || ns.conn == C_STARTING_SYNC_T) &&
852 os.conn < C_CONNECTED)
853 rv = SS_NEED_CONNECTION;
854
Philipp Reisner1fc80cf2010-11-22 14:18:47 +0100855 if ((ns.conn == C_SYNC_TARGET || ns.conn == C_SYNC_SOURCE)
856 && os.conn < C_WF_REPORT_PARAMS)
857 rv = SS_NEED_CONNECTION; /* No NetworkFailure -> SyncTarget etc... */
858
Philipp Reisnerb411b362009-09-25 16:07:19 -0700859 return rv;
860}
861
Philipp Reisner77e8fdf2011-06-29 10:49:13 +0200862static void print_sanitize_warnings(struct drbd_conf *mdev, enum sanitize_state_warnings warn)
863{
864 static const char *msg_table[] = {
865 [NO_WARNING] = "",
866 [ABORTED_ONLINE_VERIFY] = "Online-verify aborted.",
867 [ABORTED_RESYNC] = "Resync aborted.",
868 [CONNECTION_LOST_NEGOTIATING] = "Connection lost while negotiating, no data!",
869 [IMPLICITLY_UPGRADED_DISK] = "Implicitly upgraded disk",
870 [IMPLICITLY_UPGRADED_PDSK] = "Implicitly upgraded pdsk",
871 };
872
873 if (warn != NO_WARNING)
874 dev_warn(DEV, "%s\n", msg_table[warn]);
875}
876
Philipp Reisnerb411b362009-09-25 16:07:19 -0700877/**
878 * sanitize_state() - Resolves implicitly necessary additional changes to a state transition
879 * @mdev: DRBD device.
880 * @os: old state.
881 * @ns: new state.
882 * @warn_sync_abort:
883 *
884 * When we loose connection, we have to set the state of the peers disk (pdsk)
885 * to D_UNKNOWN. This rule and many more along those lines are in this function.
886 */
887static union drbd_state sanitize_state(struct drbd_conf *mdev, union drbd_state os,
Philipp Reisner77e8fdf2011-06-29 10:49:13 +0200888 union drbd_state ns, enum sanitize_state_warnings *warn)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700889{
890 enum drbd_fencing_p fp;
Philipp Reisnerab17b68f2010-11-17 16:54:36 +0100891 enum drbd_disk_state disk_min, disk_max, pdsk_min, pdsk_max;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700892
Philipp Reisner77e8fdf2011-06-29 10:49:13 +0200893 if (warn)
894 *warn = NO_WARNING;
895
Philipp Reisnerb411b362009-09-25 16:07:19 -0700896 fp = FP_DONT_CARE;
897 if (get_ldev(mdev)) {
898 fp = mdev->ldev->dc.fencing;
899 put_ldev(mdev);
900 }
901
902 /* Disallow Network errors to configure a device's network part */
903 if ((ns.conn >= C_TIMEOUT && ns.conn <= C_TEAR_DOWN) &&
904 os.conn <= C_DISCONNECTING)
905 ns.conn = os.conn;
906
Lars Ellenbergf2906e12010-07-21 17:04:32 +0200907 /* After a network error (+C_TEAR_DOWN) only C_UNCONNECTED or C_DISCONNECTING can follow.
908 * If you try to go into some Sync* state, that shall fail (elsewhere). */
Philipp Reisnerb411b362009-09-25 16:07:19 -0700909 if (os.conn >= C_TIMEOUT && os.conn <= C_TEAR_DOWN &&
Lars Ellenberg545752d2011-12-05 14:39:25 +0100910 ns.conn != C_UNCONNECTED && ns.conn != C_DISCONNECTING && ns.conn <= C_CONNECTED)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700911 ns.conn = os.conn;
912
Lars Ellenberg82f59cc2010-10-16 12:13:47 +0200913 /* we cannot fail (again) if we already detached */
914 if (ns.disk == D_FAILED && os.disk == D_DISKLESS)
915 ns.disk = D_DISKLESS;
916
Philipp Reisnerb411b362009-09-25 16:07:19 -0700917 /* After C_DISCONNECTING only C_STANDALONE may follow */
918 if (os.conn == C_DISCONNECTING && ns.conn != C_STANDALONE)
919 ns.conn = os.conn;
920
921 if (ns.conn < C_CONNECTED) {
922 ns.peer_isp = 0;
923 ns.peer = R_UNKNOWN;
924 if (ns.pdsk > D_UNKNOWN || ns.pdsk < D_INCONSISTENT)
925 ns.pdsk = D_UNKNOWN;
926 }
927
928 /* Clear the aftr_isp when becoming unconfigured */
929 if (ns.conn == C_STANDALONE && ns.disk == D_DISKLESS && ns.role == R_SECONDARY)
930 ns.aftr_isp = 0;
931
Philipp Reisnerb411b362009-09-25 16:07:19 -0700932 /* Abort resync if a disk fails/detaches */
933 if (os.conn > C_CONNECTED && ns.conn > C_CONNECTED &&
934 (ns.disk <= D_FAILED || ns.pdsk <= D_FAILED)) {
Philipp Reisner77e8fdf2011-06-29 10:49:13 +0200935 if (warn)
936 *warn = os.conn == C_VERIFY_S || os.conn == C_VERIFY_T ?
937 ABORTED_ONLINE_VERIFY : ABORTED_RESYNC;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700938 ns.conn = C_CONNECTED;
939 }
940
Philipp Reisnerb411b362009-09-25 16:07:19 -0700941 /* Connection breaks down before we finished "Negotiating" */
942 if (ns.conn < C_CONNECTED && ns.disk == D_NEGOTIATING &&
943 get_ldev_if_state(mdev, D_NEGOTIATING)) {
944 if (mdev->ed_uuid == mdev->ldev->md.uuid[UI_CURRENT]) {
945 ns.disk = mdev->new_state_tmp.disk;
946 ns.pdsk = mdev->new_state_tmp.pdsk;
947 } else {
Philipp Reisner77e8fdf2011-06-29 10:49:13 +0200948 if (warn)
949 *warn = CONNECTION_LOST_NEGOTIATING;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700950 ns.disk = D_DISKLESS;
951 ns.pdsk = D_UNKNOWN;
952 }
953 put_ldev(mdev);
954 }
955
Philipp Reisnerab17b68f2010-11-17 16:54:36 +0100956 /* D_CONSISTENT and D_OUTDATED vanish when we get connected */
957 if (ns.conn >= C_CONNECTED && ns.conn < C_AHEAD) {
958 if (ns.disk == D_CONSISTENT || ns.disk == D_OUTDATED)
959 ns.disk = D_UP_TO_DATE;
960 if (ns.pdsk == D_CONSISTENT || ns.pdsk == D_OUTDATED)
961 ns.pdsk = D_UP_TO_DATE;
962 }
963
964 /* Implications of the connection stat on the disk states */
965 disk_min = D_DISKLESS;
966 disk_max = D_UP_TO_DATE;
967 pdsk_min = D_INCONSISTENT;
968 pdsk_max = D_UNKNOWN;
969 switch ((enum drbd_conns)ns.conn) {
970 case C_WF_BITMAP_T:
971 case C_PAUSED_SYNC_T:
972 case C_STARTING_SYNC_T:
973 case C_WF_SYNC_UUID:
974 case C_BEHIND:
975 disk_min = D_INCONSISTENT;
976 disk_max = D_OUTDATED;
977 pdsk_min = D_UP_TO_DATE;
978 pdsk_max = D_UP_TO_DATE;
979 break;
980 case C_VERIFY_S:
981 case C_VERIFY_T:
982 disk_min = D_UP_TO_DATE;
983 disk_max = D_UP_TO_DATE;
984 pdsk_min = D_UP_TO_DATE;
985 pdsk_max = D_UP_TO_DATE;
986 break;
987 case C_CONNECTED:
988 disk_min = D_DISKLESS;
989 disk_max = D_UP_TO_DATE;
990 pdsk_min = D_DISKLESS;
991 pdsk_max = D_UP_TO_DATE;
992 break;
993 case C_WF_BITMAP_S:
994 case C_PAUSED_SYNC_S:
995 case C_STARTING_SYNC_S:
996 case C_AHEAD:
997 disk_min = D_UP_TO_DATE;
998 disk_max = D_UP_TO_DATE;
999 pdsk_min = D_INCONSISTENT;
1000 pdsk_max = D_CONSISTENT; /* D_OUTDATED would be nice. But explicit outdate necessary*/
1001 break;
1002 case C_SYNC_TARGET:
1003 disk_min = D_INCONSISTENT;
1004 disk_max = D_INCONSISTENT;
1005 pdsk_min = D_UP_TO_DATE;
1006 pdsk_max = D_UP_TO_DATE;
1007 break;
1008 case C_SYNC_SOURCE:
1009 disk_min = D_UP_TO_DATE;
1010 disk_max = D_UP_TO_DATE;
1011 pdsk_min = D_INCONSISTENT;
1012 pdsk_max = D_INCONSISTENT;
1013 break;
1014 case C_STANDALONE:
1015 case C_DISCONNECTING:
1016 case C_UNCONNECTED:
1017 case C_TIMEOUT:
1018 case C_BROKEN_PIPE:
1019 case C_NETWORK_FAILURE:
1020 case C_PROTOCOL_ERROR:
1021 case C_TEAR_DOWN:
1022 case C_WF_CONNECTION:
1023 case C_WF_REPORT_PARAMS:
1024 case C_MASK:
1025 break;
1026 }
1027 if (ns.disk > disk_max)
1028 ns.disk = disk_max;
1029
1030 if (ns.disk < disk_min) {
Philipp Reisner77e8fdf2011-06-29 10:49:13 +02001031 if (warn)
1032 *warn = IMPLICITLY_UPGRADED_DISK;
Philipp Reisnerab17b68f2010-11-17 16:54:36 +01001033 ns.disk = disk_min;
1034 }
1035 if (ns.pdsk > pdsk_max)
1036 ns.pdsk = pdsk_max;
1037
1038 if (ns.pdsk < pdsk_min) {
Philipp Reisner77e8fdf2011-06-29 10:49:13 +02001039 if (warn)
1040 *warn = IMPLICITLY_UPGRADED_PDSK;
Philipp Reisnerab17b68f2010-11-17 16:54:36 +01001041 ns.pdsk = pdsk_min;
1042 }
1043
Philipp Reisnerb411b362009-09-25 16:07:19 -07001044 if (fp == FP_STONITH &&
Philipp Reisner0a492162009-10-21 13:08:29 +02001045 (ns.role == R_PRIMARY && ns.conn < C_CONNECTED && ns.pdsk > D_OUTDATED) &&
1046 !(os.role == R_PRIMARY && os.conn < C_CONNECTED && os.pdsk > D_OUTDATED))
Philipp Reisnerfb22c402010-09-08 23:20:21 +02001047 ns.susp_fen = 1; /* Suspend IO while fence-peer handler runs (peer lost) */
Philipp Reisner265be2d2010-05-31 10:14:17 +02001048
1049 if (mdev->sync_conf.on_no_data == OND_SUSPEND_IO &&
1050 (ns.role == R_PRIMARY && ns.disk < D_UP_TO_DATE && ns.pdsk < D_UP_TO_DATE) &&
1051 !(os.role == R_PRIMARY && os.disk < D_UP_TO_DATE && os.pdsk < D_UP_TO_DATE))
Philipp Reisnerfb22c402010-09-08 23:20:21 +02001052 ns.susp_nod = 1; /* Suspend IO while no data available (no accessible data available) */
Philipp Reisnerb411b362009-09-25 16:07:19 -07001053
1054 if (ns.aftr_isp || ns.peer_isp || ns.user_isp) {
1055 if (ns.conn == C_SYNC_SOURCE)
1056 ns.conn = C_PAUSED_SYNC_S;
1057 if (ns.conn == C_SYNC_TARGET)
1058 ns.conn = C_PAUSED_SYNC_T;
1059 } else {
1060 if (ns.conn == C_PAUSED_SYNC_S)
1061 ns.conn = C_SYNC_SOURCE;
1062 if (ns.conn == C_PAUSED_SYNC_T)
1063 ns.conn = C_SYNC_TARGET;
1064 }
1065
1066 return ns;
1067}
1068
1069/* helper for __drbd_set_state */
1070static void set_ov_position(struct drbd_conf *mdev, enum drbd_conns cs)
1071{
Lars Ellenberg30b743a2010-11-05 09:39:06 +01001072 if (mdev->agreed_pro_version < 90)
1073 mdev->ov_start_sector = 0;
1074 mdev->rs_total = drbd_bm_bits(mdev);
1075 mdev->ov_position = 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001076 if (cs == C_VERIFY_T) {
1077 /* starting online verify from an arbitrary position
1078 * does not fit well into the existing protocol.
1079 * on C_VERIFY_T, we initialize ov_left and friends
1080 * implicitly in receive_DataRequest once the
1081 * first P_OV_REQUEST is received */
1082 mdev->ov_start_sector = ~(sector_t)0;
1083 } else {
1084 unsigned long bit = BM_SECT_TO_BIT(mdev->ov_start_sector);
Lars Ellenberg30b743a2010-11-05 09:39:06 +01001085 if (bit >= mdev->rs_total) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07001086 mdev->ov_start_sector =
1087 BM_BIT_TO_SECT(mdev->rs_total - 1);
Lars Ellenberg30b743a2010-11-05 09:39:06 +01001088 mdev->rs_total = 1;
1089 } else
1090 mdev->rs_total -= bit;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001091 mdev->ov_position = mdev->ov_start_sector;
1092 }
Lars Ellenberg30b743a2010-11-05 09:39:06 +01001093 mdev->ov_left = mdev->rs_total;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001094}
1095
Philipp Reisner07782862010-08-31 12:00:50 +02001096static void drbd_resume_al(struct drbd_conf *mdev)
1097{
1098 if (test_and_clear_bit(AL_SUSPENDED, &mdev->flags))
1099 dev_info(DEV, "Resumed AL updates\n");
1100}
1101
Philipp Reisnerb411b362009-09-25 16:07:19 -07001102/**
1103 * __drbd_set_state() - Set a new DRBD state
1104 * @mdev: DRBD device.
1105 * @ns: new state.
1106 * @flags: Flags
1107 * @done: Optional completion, that will get completed after the after_state_ch() finished
1108 *
1109 * Caller needs to hold req_lock, and global_state_lock. Do not call directly.
1110 */
Andreas Gruenbacherbf885f82010-12-08 00:39:32 +01001111enum drbd_state_rv
1112__drbd_set_state(struct drbd_conf *mdev, union drbd_state ns,
1113 enum chg_state_flags flags, struct completion *done)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001114{
1115 union drbd_state os;
Andreas Gruenbacherbf885f82010-12-08 00:39:32 +01001116 enum drbd_state_rv rv = SS_SUCCESS;
Philipp Reisner77e8fdf2011-06-29 10:49:13 +02001117 enum sanitize_state_warnings ssw;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001118 struct after_state_chg_work *ascw;
1119
1120 os = mdev->state;
1121
Philipp Reisner77e8fdf2011-06-29 10:49:13 +02001122 ns = sanitize_state(mdev, os, ns, &ssw);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001123
1124 if (ns.i == os.i)
1125 return SS_NOTHING_TO_DO;
1126
1127 if (!(flags & CS_HARD)) {
1128 /* pre-state-change checks ; only look at ns */
1129 /* See drbd_state_sw_errors in drbd_strings.c */
1130
1131 rv = is_valid_state(mdev, ns);
1132 if (rv < SS_SUCCESS) {
1133 /* If the old state was illegal as well, then let
1134 this happen...*/
1135
Philipp Reisner1616a252010-06-10 16:55:15 +02001136 if (is_valid_state(mdev, os) == rv)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001137 rv = is_valid_state_transition(mdev, ns, os);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001138 } else
1139 rv = is_valid_state_transition(mdev, ns, os);
1140 }
1141
1142 if (rv < SS_SUCCESS) {
1143 if (flags & CS_VERBOSE)
1144 print_st_err(mdev, os, ns, rv);
1145 return rv;
1146 }
1147
Philipp Reisner77e8fdf2011-06-29 10:49:13 +02001148 print_sanitize_warnings(mdev, ssw);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001149
1150 {
Andreas Gruenbacher662d91a2010-12-07 03:01:41 +01001151 char *pbp, pb[300];
1152 pbp = pb;
1153 *pbp = 0;
1154 if (ns.role != os.role)
1155 pbp += sprintf(pbp, "role( %s -> %s ) ",
1156 drbd_role_str(os.role),
1157 drbd_role_str(ns.role));
1158 if (ns.peer != os.peer)
1159 pbp += sprintf(pbp, "peer( %s -> %s ) ",
1160 drbd_role_str(os.peer),
1161 drbd_role_str(ns.peer));
1162 if (ns.conn != os.conn)
1163 pbp += sprintf(pbp, "conn( %s -> %s ) ",
1164 drbd_conn_str(os.conn),
1165 drbd_conn_str(ns.conn));
1166 if (ns.disk != os.disk)
1167 pbp += sprintf(pbp, "disk( %s -> %s ) ",
1168 drbd_disk_str(os.disk),
1169 drbd_disk_str(ns.disk));
1170 if (ns.pdsk != os.pdsk)
1171 pbp += sprintf(pbp, "pdsk( %s -> %s ) ",
1172 drbd_disk_str(os.pdsk),
1173 drbd_disk_str(ns.pdsk));
1174 if (is_susp(ns) != is_susp(os))
1175 pbp += sprintf(pbp, "susp( %d -> %d ) ",
1176 is_susp(os),
1177 is_susp(ns));
1178 if (ns.aftr_isp != os.aftr_isp)
1179 pbp += sprintf(pbp, "aftr_isp( %d -> %d ) ",
1180 os.aftr_isp,
1181 ns.aftr_isp);
1182 if (ns.peer_isp != os.peer_isp)
1183 pbp += sprintf(pbp, "peer_isp( %d -> %d ) ",
1184 os.peer_isp,
1185 ns.peer_isp);
1186 if (ns.user_isp != os.user_isp)
1187 pbp += sprintf(pbp, "user_isp( %d -> %d ) ",
1188 os.user_isp,
1189 ns.user_isp);
1190 dev_info(DEV, "%s\n", pb);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001191 }
1192
1193 /* solve the race between becoming unconfigured,
1194 * worker doing the cleanup, and
1195 * admin reconfiguring us:
1196 * on (re)configure, first set CONFIG_PENDING,
1197 * then wait for a potentially exiting worker,
1198 * start the worker, and schedule one no_op.
1199 * then proceed with configuration.
1200 */
1201 if (ns.disk == D_DISKLESS &&
1202 ns.conn == C_STANDALONE &&
1203 ns.role == R_SECONDARY &&
1204 !test_and_set_bit(CONFIG_PENDING, &mdev->flags))
1205 set_bit(DEVICE_DYING, &mdev->flags);
1206
Lars Ellenberg82f59cc2010-10-16 12:13:47 +02001207 /* if we are going -> D_FAILED or D_DISKLESS, grab one extra reference
1208 * on the ldev here, to be sure the transition -> D_DISKLESS resp.
1209 * drbd_ldev_destroy() won't happen before our corresponding
1210 * after_state_ch works run, where we put_ldev again. */
1211 if ((os.disk != D_FAILED && ns.disk == D_FAILED) ||
1212 (os.disk != D_DISKLESS && ns.disk == D_DISKLESS))
1213 atomic_inc(&mdev->local_cnt);
1214
1215 mdev->state = ns;
Lars Ellenberg62b0da32011-01-20 13:25:21 +01001216
1217 if (os.disk == D_ATTACHING && ns.disk >= D_NEGOTIATING)
1218 drbd_print_uuids(mdev, "attached to UUIDs");
1219
Philipp Reisnerb411b362009-09-25 16:07:19 -07001220 wake_up(&mdev->misc_wait);
1221 wake_up(&mdev->state_wait);
1222
Philipp Reisnerb411b362009-09-25 16:07:19 -07001223 /* aborted verify run. log the last position */
1224 if ((os.conn == C_VERIFY_S || os.conn == C_VERIFY_T) &&
1225 ns.conn < C_CONNECTED) {
1226 mdev->ov_start_sector =
Lars Ellenberg30b743a2010-11-05 09:39:06 +01001227 BM_BIT_TO_SECT(drbd_bm_bits(mdev) - mdev->ov_left);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001228 dev_info(DEV, "Online Verify reached sector %llu\n",
1229 (unsigned long long)mdev->ov_start_sector);
1230 }
1231
1232 if ((os.conn == C_PAUSED_SYNC_T || os.conn == C_PAUSED_SYNC_S) &&
1233 (ns.conn == C_SYNC_TARGET || ns.conn == C_SYNC_SOURCE)) {
1234 dev_info(DEV, "Syncer continues.\n");
Lars Ellenberg1d7734a2010-08-11 21:21:50 +02001235 mdev->rs_paused += (long)jiffies
1236 -(long)mdev->rs_mark_time[mdev->rs_last_mark];
Philipp Reisner63106d32010-09-01 15:47:15 +02001237 if (ns.conn == C_SYNC_TARGET)
1238 mod_timer(&mdev->resync_timer, jiffies);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001239 }
1240
1241 if ((os.conn == C_SYNC_TARGET || os.conn == C_SYNC_SOURCE) &&
1242 (ns.conn == C_PAUSED_SYNC_T || ns.conn == C_PAUSED_SYNC_S)) {
1243 dev_info(DEV, "Resync suspended\n");
Lars Ellenberg1d7734a2010-08-11 21:21:50 +02001244 mdev->rs_mark_time[mdev->rs_last_mark] = jiffies;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001245 }
1246
1247 if (os.conn == C_CONNECTED &&
1248 (ns.conn == C_VERIFY_S || ns.conn == C_VERIFY_T)) {
Lars Ellenberg1d7734a2010-08-11 21:21:50 +02001249 unsigned long now = jiffies;
1250 int i;
1251
Lars Ellenberg30b743a2010-11-05 09:39:06 +01001252 set_ov_position(mdev, ns.conn);
Lars Ellenberg1d7734a2010-08-11 21:21:50 +02001253 mdev->rs_start = now;
Lars Ellenberg0f0601f2010-08-11 23:40:24 +02001254 mdev->rs_last_events = 0;
1255 mdev->rs_last_sect_ev = 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001256 mdev->ov_last_oos_size = 0;
1257 mdev->ov_last_oos_start = 0;
1258
Lars Ellenberg1d7734a2010-08-11 21:21:50 +02001259 for (i = 0; i < DRBD_SYNC_MARKS; i++) {
Lars Ellenberg30b743a2010-11-05 09:39:06 +01001260 mdev->rs_mark_left[i] = mdev->ov_left;
Lars Ellenberg1d7734a2010-08-11 21:21:50 +02001261 mdev->rs_mark_time[i] = now;
1262 }
1263
Lars Ellenberg2649f082010-11-05 10:05:47 +01001264 drbd_rs_controller_reset(mdev);
1265
Philipp Reisnerb411b362009-09-25 16:07:19 -07001266 if (ns.conn == C_VERIFY_S) {
1267 dev_info(DEV, "Starting Online Verify from sector %llu\n",
1268 (unsigned long long)mdev->ov_position);
1269 mod_timer(&mdev->resync_timer, jiffies);
1270 }
1271 }
1272
1273 if (get_ldev(mdev)) {
1274 u32 mdf = mdev->ldev->md.flags & ~(MDF_CONSISTENT|MDF_PRIMARY_IND|
1275 MDF_CONNECTED_IND|MDF_WAS_UP_TO_DATE|
1276 MDF_PEER_OUT_DATED|MDF_CRASHED_PRIMARY);
1277
1278 if (test_bit(CRASHED_PRIMARY, &mdev->flags))
1279 mdf |= MDF_CRASHED_PRIMARY;
1280 if (mdev->state.role == R_PRIMARY ||
1281 (mdev->state.pdsk < D_INCONSISTENT && mdev->state.peer == R_PRIMARY))
1282 mdf |= MDF_PRIMARY_IND;
1283 if (mdev->state.conn > C_WF_REPORT_PARAMS)
1284 mdf |= MDF_CONNECTED_IND;
1285 if (mdev->state.disk > D_INCONSISTENT)
1286 mdf |= MDF_CONSISTENT;
1287 if (mdev->state.disk > D_OUTDATED)
1288 mdf |= MDF_WAS_UP_TO_DATE;
1289 if (mdev->state.pdsk <= D_OUTDATED && mdev->state.pdsk >= D_INCONSISTENT)
1290 mdf |= MDF_PEER_OUT_DATED;
1291 if (mdf != mdev->ldev->md.flags) {
1292 mdev->ldev->md.flags = mdf;
1293 drbd_md_mark_dirty(mdev);
1294 }
1295 if (os.disk < D_CONSISTENT && ns.disk >= D_CONSISTENT)
1296 drbd_set_ed_uuid(mdev, mdev->ldev->md.uuid[UI_CURRENT]);
1297 put_ldev(mdev);
1298 }
1299
1300 /* Peer was forced D_UP_TO_DATE & R_PRIMARY, consider to resync */
1301 if (os.disk == D_INCONSISTENT && os.pdsk == D_INCONSISTENT &&
1302 os.peer == R_SECONDARY && ns.peer == R_PRIMARY)
1303 set_bit(CONSIDER_RESYNC, &mdev->flags);
1304
1305 /* Receiver should clean up itself */
1306 if (os.conn != C_DISCONNECTING && ns.conn == C_DISCONNECTING)
1307 drbd_thread_stop_nowait(&mdev->receiver);
1308
1309 /* Now the receiver finished cleaning up itself, it should die */
1310 if (os.conn != C_STANDALONE && ns.conn == C_STANDALONE)
1311 drbd_thread_stop_nowait(&mdev->receiver);
1312
1313 /* Upon network failure, we need to restart the receiver. */
Philipp Reisner1e86ac42011-08-04 10:33:08 +02001314 if (os.conn > C_WF_CONNECTION &&
Philipp Reisnerb411b362009-09-25 16:07:19 -07001315 ns.conn <= C_TEAR_DOWN && ns.conn >= C_TIMEOUT)
1316 drbd_thread_restart_nowait(&mdev->receiver);
1317
Philipp Reisner07782862010-08-31 12:00:50 +02001318 /* Resume AL writing if we get a connection */
1319 if (os.conn < C_CONNECTED && ns.conn >= C_CONNECTED)
1320 drbd_resume_al(mdev);
1321
Philipp Reisnerb411b362009-09-25 16:07:19 -07001322 ascw = kmalloc(sizeof(*ascw), GFP_ATOMIC);
1323 if (ascw) {
1324 ascw->os = os;
1325 ascw->ns = ns;
1326 ascw->flags = flags;
1327 ascw->w.cb = w_after_state_ch;
1328 ascw->done = done;
1329 drbd_queue_work(&mdev->data.work, &ascw->w);
1330 } else {
1331 dev_warn(DEV, "Could not kmalloc an ascw\n");
1332 }
1333
1334 return rv;
1335}
1336
1337static int w_after_state_ch(struct drbd_conf *mdev, struct drbd_work *w, int unused)
1338{
1339 struct after_state_chg_work *ascw =
1340 container_of(w, struct after_state_chg_work, w);
1341 after_state_ch(mdev, ascw->os, ascw->ns, ascw->flags);
1342 if (ascw->flags & CS_WAIT_COMPLETE) {
1343 D_ASSERT(ascw->done != NULL);
1344 complete(ascw->done);
1345 }
1346 kfree(ascw);
1347
1348 return 1;
1349}
1350
1351static void abw_start_sync(struct drbd_conf *mdev, int rv)
1352{
1353 if (rv) {
1354 dev_err(DEV, "Writing the bitmap failed not starting resync.\n");
1355 _drbd_request_state(mdev, NS(conn, C_CONNECTED), CS_VERBOSE);
1356 return;
1357 }
1358
1359 switch (mdev->state.conn) {
1360 case C_STARTING_SYNC_T:
1361 _drbd_request_state(mdev, NS(conn, C_WF_SYNC_UUID), CS_VERBOSE);
1362 break;
1363 case C_STARTING_SYNC_S:
1364 drbd_start_resync(mdev, C_SYNC_SOURCE);
1365 break;
1366 }
1367}
1368
Lars Ellenberg20ceb2b2011-01-21 10:56:44 +01001369int drbd_bitmap_io_from_worker(struct drbd_conf *mdev,
1370 int (*io_fn)(struct drbd_conf *),
1371 char *why, enum bm_flag flags)
Lars Ellenberg19f843a2010-12-15 08:59:11 +01001372{
1373 int rv;
1374
1375 D_ASSERT(current == mdev->worker.task);
1376
1377 /* open coded non-blocking drbd_suspend_io(mdev); */
1378 set_bit(SUSPEND_IO, &mdev->flags);
Lars Ellenberg19f843a2010-12-15 08:59:11 +01001379
Lars Ellenberg20ceb2b2011-01-21 10:56:44 +01001380 drbd_bm_lock(mdev, why, flags);
Lars Ellenberg19f843a2010-12-15 08:59:11 +01001381 rv = io_fn(mdev);
1382 drbd_bm_unlock(mdev);
1383
1384 drbd_resume_io(mdev);
1385
1386 return rv;
1387}
1388
Philipp Reisnerb411b362009-09-25 16:07:19 -07001389/**
1390 * after_state_ch() - Perform after state change actions that may sleep
1391 * @mdev: DRBD device.
1392 * @os: old state.
1393 * @ns: new state.
1394 * @flags: Flags
1395 */
1396static void after_state_ch(struct drbd_conf *mdev, union drbd_state os,
1397 union drbd_state ns, enum chg_state_flags flags)
1398{
1399 enum drbd_fencing_p fp;
Philipp Reisner67098932010-06-24 16:24:25 +02001400 enum drbd_req_event what = nothing;
Philipp Reisnerfb22c402010-09-08 23:20:21 +02001401 union drbd_state nsm = (union drbd_state){ .i = -1 };
Philipp Reisnerb411b362009-09-25 16:07:19 -07001402
1403 if (os.conn != C_CONNECTED && ns.conn == C_CONNECTED) {
1404 clear_bit(CRASHED_PRIMARY, &mdev->flags);
1405 if (mdev->p_uuid)
1406 mdev->p_uuid[UI_FLAGS] &= ~((u64)2);
1407 }
1408
1409 fp = FP_DONT_CARE;
1410 if (get_ldev(mdev)) {
1411 fp = mdev->ldev->dc.fencing;
1412 put_ldev(mdev);
1413 }
1414
1415 /* Inform userspace about the change... */
1416 drbd_bcast_state(mdev, ns);
1417
1418 if (!(os.role == R_PRIMARY && os.disk < D_UP_TO_DATE && os.pdsk < D_UP_TO_DATE) &&
1419 (ns.role == R_PRIMARY && ns.disk < D_UP_TO_DATE && ns.pdsk < D_UP_TO_DATE))
1420 drbd_khelper(mdev, "pri-on-incon-degr");
1421
1422 /* Here we have the actions that are performed after a
1423 state change. This function might sleep */
1424
Philipp Reisnerdfa8bed2011-06-29 14:06:08 +02001425 if (os.disk <= D_NEGOTIATING && ns.disk > D_NEGOTIATING)
1426 mod_timer(&mdev->request_timer, jiffies + HZ);
1427
Philipp Reisnerfb22c402010-09-08 23:20:21 +02001428 nsm.i = -1;
1429 if (ns.susp_nod) {
Philipp Reisner3f986882010-12-20 14:48:20 +01001430 if (os.conn < C_CONNECTED && ns.conn >= C_CONNECTED)
1431 what = resend;
Philipp Reisner265be2d2010-05-31 10:14:17 +02001432
Philipp Reisner79f16f52011-07-15 18:44:26 +02001433 if ((os.disk == D_ATTACHING || os.disk == D_NEGOTIATING) &&
1434 ns.disk > D_NEGOTIATING)
Philipp Reisner3f986882010-12-20 14:48:20 +01001435 what = restart_frozen_disk_io;
Philipp Reisnerfb22c402010-09-08 23:20:21 +02001436
Philipp Reisner3f986882010-12-20 14:48:20 +01001437 if (what != nothing)
1438 nsm.susp_nod = 0;
Philipp Reisner265be2d2010-05-31 10:14:17 +02001439 }
1440
Philipp Reisnerfb22c402010-09-08 23:20:21 +02001441 if (ns.susp_fen) {
Philipp Reisner43a51822010-06-11 11:26:34 +02001442 /* case1: The outdate peer handler is successful: */
1443 if (os.pdsk > D_OUTDATED && ns.pdsk <= D_OUTDATED) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07001444 tl_clear(mdev);
Philipp Reisner43a51822010-06-11 11:26:34 +02001445 if (test_bit(NEW_CUR_UUID, &mdev->flags)) {
1446 drbd_uuid_new_current(mdev);
1447 clear_bit(NEW_CUR_UUID, &mdev->flags);
Philipp Reisner43a51822010-06-11 11:26:34 +02001448 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07001449 spin_lock_irq(&mdev->req_lock);
Philipp Reisnerfb22c402010-09-08 23:20:21 +02001450 _drbd_set_state(_NS(mdev, susp_fen, 0), CS_VERBOSE, NULL);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001451 spin_unlock_irq(&mdev->req_lock);
1452 }
Philipp Reisner43a51822010-06-11 11:26:34 +02001453 /* case2: The connection was established again: */
1454 if (os.conn < C_CONNECTED && ns.conn >= C_CONNECTED) {
1455 clear_bit(NEW_CUR_UUID, &mdev->flags);
Philipp Reisner67098932010-06-24 16:24:25 +02001456 what = resend;
Philipp Reisnerfb22c402010-09-08 23:20:21 +02001457 nsm.susp_fen = 0;
Philipp Reisner43a51822010-06-11 11:26:34 +02001458 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07001459 }
Philipp Reisner67098932010-06-24 16:24:25 +02001460
1461 if (what != nothing) {
1462 spin_lock_irq(&mdev->req_lock);
1463 _tl_restart(mdev, what);
Philipp Reisnerfb22c402010-09-08 23:20:21 +02001464 nsm.i &= mdev->state.i;
1465 _drbd_set_state(mdev, nsm, CS_VERBOSE, NULL);
Philipp Reisner67098932010-06-24 16:24:25 +02001466 spin_unlock_irq(&mdev->req_lock);
1467 }
1468
Lars Ellenberg5a22db82010-12-17 21:14:23 +01001469 /* Became sync source. With protocol >= 96, we still need to send out
1470 * the sync uuid now. Need to do that before any drbd_send_state, or
1471 * the other side may go "paused sync" before receiving the sync uuids,
1472 * which is unexpected. */
1473 if ((os.conn != C_SYNC_SOURCE && os.conn != C_PAUSED_SYNC_S) &&
1474 (ns.conn == C_SYNC_SOURCE || ns.conn == C_PAUSED_SYNC_S) &&
1475 mdev->agreed_pro_version >= 96 && get_ldev(mdev)) {
1476 drbd_gen_and_send_sync_uuid(mdev);
1477 put_ldev(mdev);
1478 }
1479
Philipp Reisnerb411b362009-09-25 16:07:19 -07001480 /* Do not change the order of the if above and the two below... */
1481 if (os.pdsk == D_DISKLESS && ns.pdsk > D_DISKLESS) { /* attach on the peer */
1482 drbd_send_uuids(mdev);
Lars Ellenbergf479ea02011-10-27 16:52:30 +02001483 drbd_send_state(mdev, ns);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001484 }
Lars Ellenberg54b956a2011-01-20 10:47:53 +01001485 /* No point in queuing send_bitmap if we don't have a connection
1486 * anymore, so check also the _current_ state, not only the new state
1487 * at the time this work was queued. */
1488 if (os.conn != C_WF_BITMAP_S && ns.conn == C_WF_BITMAP_S &&
1489 mdev->state.conn == C_WF_BITMAP_S)
1490 drbd_queue_bitmap_io(mdev, &drbd_send_bitmap, NULL,
Lars Ellenberg20ceb2b2011-01-21 10:56:44 +01001491 "send_bitmap (WFBitMapS)",
1492 BM_LOCKED_TEST_ALLOWED);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001493
1494 /* Lost contact to peer's copy of the data */
1495 if ((os.pdsk >= D_INCONSISTENT &&
1496 os.pdsk != D_UNKNOWN &&
1497 os.pdsk != D_OUTDATED)
1498 && (ns.pdsk < D_INCONSISTENT ||
1499 ns.pdsk == D_UNKNOWN ||
1500 ns.pdsk == D_OUTDATED)) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07001501 if (get_ldev(mdev)) {
1502 if ((ns.role == R_PRIMARY || ns.peer == R_PRIMARY) &&
Philipp Reisner2c8d1962010-05-25 14:32:03 +02001503 mdev->ldev->md.uuid[UI_BITMAP] == 0 && ns.disk >= D_UP_TO_DATE) {
Philipp Reisnerfb22c402010-09-08 23:20:21 +02001504 if (is_susp(mdev->state)) {
Philipp Reisner43a51822010-06-11 11:26:34 +02001505 set_bit(NEW_CUR_UUID, &mdev->flags);
1506 } else {
1507 drbd_uuid_new_current(mdev);
1508 drbd_send_uuids(mdev);
1509 }
Philipp Reisner2c8d1962010-05-25 14:32:03 +02001510 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07001511 put_ldev(mdev);
1512 }
1513 }
1514
1515 if (ns.pdsk < D_INCONSISTENT && get_ldev(mdev)) {
Philipp Reisnerbca482e2011-07-15 12:14:27 +02001516 if (os.peer == R_SECONDARY && ns.peer == R_PRIMARY &&
1517 mdev->ldev->md.uuid[UI_BITMAP] == 0 && ns.disk >= D_UP_TO_DATE) {
Philipp Reisner2c8d1962010-05-25 14:32:03 +02001518 drbd_uuid_new_current(mdev);
Philipp Reisner18a50fa2010-06-21 14:14:15 +02001519 drbd_send_uuids(mdev);
1520 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07001521 /* D_DISKLESS Peer becomes secondary */
1522 if (os.peer == R_PRIMARY && ns.peer == R_SECONDARY)
Lars Ellenberg20ceb2b2011-01-21 10:56:44 +01001523 /* We may still be Primary ourselves.
1524 * No harm done if the bitmap still changes,
1525 * redirtied pages will follow later. */
1526 drbd_bitmap_io_from_worker(mdev, &drbd_bm_write,
1527 "demote diskless peer", BM_LOCKED_SET_ALLOWED);
Lars Ellenberg19f843a2010-12-15 08:59:11 +01001528 put_ldev(mdev);
1529 }
1530
Lars Ellenberg06d33e92010-12-18 17:00:59 +01001531 /* Write out all changed bits on demote.
1532 * Though, no need to da that just yet
1533 * if there is a resync going on still */
1534 if (os.role == R_PRIMARY && ns.role == R_SECONDARY &&
1535 mdev->state.conn <= C_CONNECTED && get_ldev(mdev)) {
Lars Ellenberg20ceb2b2011-01-21 10:56:44 +01001536 /* No changes to the bitmap expected this time, so assert that,
1537 * even though no harm was done if it did change. */
1538 drbd_bitmap_io_from_worker(mdev, &drbd_bm_write,
1539 "demote", BM_LOCKED_TEST_ALLOWED);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001540 put_ldev(mdev);
1541 }
1542
1543 /* Last part of the attaching process ... */
1544 if (ns.conn >= C_CONNECTED &&
1545 os.disk == D_ATTACHING && ns.disk == D_NEGOTIATING) {
Philipp Reisnere89b5912010-03-24 17:11:33 +01001546 drbd_send_sizes(mdev, 0, 0); /* to start sync... */
Philipp Reisnerb411b362009-09-25 16:07:19 -07001547 drbd_send_uuids(mdev);
Lars Ellenbergf479ea02011-10-27 16:52:30 +02001548 drbd_send_state(mdev, ns);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001549 }
1550
1551 /* We want to pause/continue resync, tell peer. */
1552 if (ns.conn >= C_CONNECTED &&
1553 ((os.aftr_isp != ns.aftr_isp) ||
1554 (os.user_isp != ns.user_isp)))
Lars Ellenbergf479ea02011-10-27 16:52:30 +02001555 drbd_send_state(mdev, ns);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001556
1557 /* In case one of the isp bits got set, suspend other devices. */
1558 if ((!os.aftr_isp && !os.peer_isp && !os.user_isp) &&
1559 (ns.aftr_isp || ns.peer_isp || ns.user_isp))
1560 suspend_other_sg(mdev);
1561
1562 /* Make sure the peer gets informed about eventual state
1563 changes (ISP bits) while we were in WFReportParams. */
1564 if (os.conn == C_WF_REPORT_PARAMS && ns.conn >= C_CONNECTED)
Lars Ellenbergf479ea02011-10-27 16:52:30 +02001565 drbd_send_state(mdev, ns);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001566
Philipp Reisner67531712010-10-27 12:21:30 +02001567 if (os.conn != C_AHEAD && ns.conn == C_AHEAD)
Lars Ellenbergf479ea02011-10-27 16:52:30 +02001568 drbd_send_state(mdev, ns);
Philipp Reisner67531712010-10-27 12:21:30 +02001569
Philipp Reisnerb411b362009-09-25 16:07:19 -07001570 /* We are in the progress to start a full sync... */
1571 if ((os.conn != C_STARTING_SYNC_T && ns.conn == C_STARTING_SYNC_T) ||
1572 (os.conn != C_STARTING_SYNC_S && ns.conn == C_STARTING_SYNC_S))
Lars Ellenberg20ceb2b2011-01-21 10:56:44 +01001573 /* no other bitmap changes expected during this phase */
1574 drbd_queue_bitmap_io(mdev,
1575 &drbd_bmio_set_n_write, &abw_start_sync,
1576 "set_n_write from StartingSync", BM_LOCKED_TEST_ALLOWED);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001577
1578 /* We are invalidating our self... */
1579 if (os.conn < C_CONNECTED && ns.conn < C_CONNECTED &&
1580 os.disk > D_INCONSISTENT && ns.disk == D_INCONSISTENT)
Lars Ellenberg20ceb2b2011-01-21 10:56:44 +01001581 /* other bitmap operation expected during this phase */
1582 drbd_queue_bitmap_io(mdev, &drbd_bmio_set_n_write, NULL,
1583 "set_n_write from invalidate", BM_LOCKED_MASK);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001584
Lars Ellenberg82f59cc2010-10-16 12:13:47 +02001585 /* first half of local IO error, failure to attach,
1586 * or administrative detach */
1587 if (os.disk != D_FAILED && ns.disk == D_FAILED) {
Philipp Reisner7caacb62011-12-14 18:01:21 +01001588 enum drbd_io_error_p eh = EP_PASS_ON;
1589 int was_io_error = 0;
Lars Ellenberg82f59cc2010-10-16 12:13:47 +02001590 /* corresponding get_ldev was in __drbd_set_state, to serialize
Philipp Reisner7caacb62011-12-14 18:01:21 +01001591 * our cleanup here with the transition to D_DISKLESS.
1592 * But is is still not save to dreference ldev here, since
1593 * we might come from an failed Attach before ldev was set. */
1594 if (mdev->ldev) {
1595 eh = mdev->ldev->dc.on_io_error;
1596 was_io_error = test_and_clear_bit(WAS_IO_ERROR, &mdev->flags);
Lars Ellenberg82f59cc2010-10-16 12:13:47 +02001597
Philipp Reisner7caacb62011-12-14 18:01:21 +01001598 /* Immediately allow completion of all application IO, that waits
1599 for completion from the local disk. */
1600 tl_abort_disk_io(mdev);
Philipp Reisner2b4dd362011-03-14 13:01:50 +01001601
Philipp Reisner7caacb62011-12-14 18:01:21 +01001602 /* current state still has to be D_FAILED,
1603 * there is only one way out: to D_DISKLESS,
1604 * and that may only happen after our put_ldev below. */
1605 if (mdev->state.disk != D_FAILED)
1606 dev_err(DEV,
1607 "ASSERT FAILED: disk is %s during detach\n",
1608 drbd_disk_str(mdev->state.disk));
Philipp Reisnerb411b362009-09-25 16:07:19 -07001609
Philipp Reisner7caacb62011-12-14 18:01:21 +01001610 if (ns.conn >= C_CONNECTED)
1611 drbd_send_state(mdev, ns);
Lars Ellenberge9e6f3e2010-09-14 20:26:27 +02001612
Philipp Reisner7caacb62011-12-14 18:01:21 +01001613 drbd_rs_cancel_all(mdev);
Lars Ellenberge9e6f3e2010-09-14 20:26:27 +02001614
Philipp Reisner7caacb62011-12-14 18:01:21 +01001615 /* In case we want to get something to stable storage still,
1616 * this may be the last chance.
1617 * Following put_ldev may transition to D_DISKLESS. */
1618 drbd_md_sync(mdev);
1619 }
Lars Ellenberg82f59cc2010-10-16 12:13:47 +02001620 put_ldev(mdev);
1621
1622 if (was_io_error && eh == EP_CALL_HELPER)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001623 drbd_khelper(mdev, "local-io-error");
1624 }
1625
Lars Ellenberg82f59cc2010-10-16 12:13:47 +02001626 /* second half of local IO error, failure to attach,
1627 * or administrative detach,
1628 * after local_cnt references have reached zero again */
1629 if (os.disk != D_DISKLESS && ns.disk == D_DISKLESS) {
1630 /* We must still be diskless,
1631 * re-attach has to be serialized with this! */
1632 if (mdev->state.disk != D_DISKLESS)
1633 dev_err(DEV,
1634 "ASSERT FAILED: disk is %s while going diskless\n",
1635 drbd_disk_str(mdev->state.disk));
Lars Ellenberge9e6f3e2010-09-14 20:26:27 +02001636
Lars Ellenberg82f59cc2010-10-16 12:13:47 +02001637 mdev->rs_total = 0;
1638 mdev->rs_failed = 0;
1639 atomic_set(&mdev->rs_pending_cnt, 0);
Lars Ellenberge9e6f3e2010-09-14 20:26:27 +02001640
Philipp Reisner4afc4332011-12-13 10:31:32 +01001641 if (ns.conn >= C_CONNECTED)
1642 drbd_send_state(mdev, ns);
1643
Lars Ellenberg82f59cc2010-10-16 12:13:47 +02001644 /* corresponding get_ldev in __drbd_set_state
Lucas De Marchi25985ed2011-03-30 22:57:33 -03001645 * this may finally trigger drbd_ldev_destroy. */
Lars Ellenberg82f59cc2010-10-16 12:13:47 +02001646 put_ldev(mdev);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001647 }
1648
Philipp Reisner738a84b2011-03-03 00:21:30 +01001649 /* Notify peer that I had a local IO error, and did not detached.. */
Philipp Reisner4afc4332011-12-13 10:31:32 +01001650 if (os.disk == D_UP_TO_DATE && ns.disk == D_INCONSISTENT && ns.conn >= C_CONNECTED)
Lars Ellenbergf479ea02011-10-27 16:52:30 +02001651 drbd_send_state(mdev, ns);
Philipp Reisner738a84b2011-03-03 00:21:30 +01001652
Philipp Reisnerb411b362009-09-25 16:07:19 -07001653 /* Disks got bigger while they were detached */
1654 if (ns.disk > D_NEGOTIATING && ns.pdsk > D_NEGOTIATING &&
1655 test_and_clear_bit(RESYNC_AFTER_NEG, &mdev->flags)) {
1656 if (ns.conn == C_CONNECTED)
1657 resync_after_online_grow(mdev);
1658 }
1659
1660 /* A resync finished or aborted, wake paused devices... */
1661 if ((os.conn > C_CONNECTED && ns.conn <= C_CONNECTED) ||
1662 (os.peer_isp && !ns.peer_isp) ||
1663 (os.user_isp && !ns.user_isp))
1664 resume_next_sg(mdev);
1665
Lars Ellenbergaf85e8e2010-10-07 16:07:55 +02001666 /* sync target done with resync. Explicitly notify peer, even though
1667 * it should (at least for non-empty resyncs) already know itself. */
1668 if (os.disk < D_UP_TO_DATE && os.conn >= C_SYNC_SOURCE && ns.conn == C_CONNECTED)
Lars Ellenbergf479ea02011-10-27 16:52:30 +02001669 drbd_send_state(mdev, ns);
Lars Ellenbergaf85e8e2010-10-07 16:07:55 +02001670
Lars Ellenberg79a30d22011-01-20 10:32:05 +01001671 /* This triggers bitmap writeout of potentially still unwritten pages
1672 * if the resync finished cleanly, or aborted because of peer disk
Lars Ellenberg20ceb2b2011-01-21 10:56:44 +01001673 * failure, or because of connection loss.
Lars Ellenberg79a30d22011-01-20 10:32:05 +01001674 * For resync aborted because of local disk failure, we cannot do
1675 * any bitmap writeout anymore.
Lars Ellenberg20ceb2b2011-01-21 10:56:44 +01001676 * No harm done if some bits change during this phase.
Lars Ellenberg79a30d22011-01-20 10:32:05 +01001677 */
Lars Ellenberg20ceb2b2011-01-21 10:56:44 +01001678 if (os.conn > C_CONNECTED && ns.conn <= C_CONNECTED && get_ldev(mdev)) {
1679 drbd_queue_bitmap_io(mdev, &drbd_bm_write, NULL,
1680 "write from resync_finished", BM_LOCKED_SET_ALLOWED);
Lars Ellenberg79a30d22011-01-20 10:32:05 +01001681 put_ldev(mdev);
1682 }
Lars Ellenberg02851e92010-12-16 14:47:39 +01001683
Philipp Reisnerf70b35112010-06-24 14:34:40 +02001684 /* free tl_hash if we Got thawed and are C_STANDALONE */
Philipp Reisnerfb22c402010-09-08 23:20:21 +02001685 if (ns.conn == C_STANDALONE && !is_susp(ns) && mdev->tl_hash)
Philipp Reisnerf70b35112010-06-24 14:34:40 +02001686 drbd_free_tl_hash(mdev);
1687
Philipp Reisnerb411b362009-09-25 16:07:19 -07001688 /* Upon network connection, we need to start the receiver */
1689 if (os.conn == C_STANDALONE && ns.conn == C_UNCONNECTED)
1690 drbd_thread_start(&mdev->receiver);
1691
1692 /* Terminate worker thread if we are unconfigured - it will be
1693 restarted as needed... */
1694 if (ns.disk == D_DISKLESS &&
1695 ns.conn == C_STANDALONE &&
1696 ns.role == R_SECONDARY) {
1697 if (os.aftr_isp != ns.aftr_isp)
1698 resume_next_sg(mdev);
1699 /* set in __drbd_set_state, unless CONFIG_PENDING was set */
1700 if (test_bit(DEVICE_DYING, &mdev->flags))
1701 drbd_thread_stop_nowait(&mdev->worker);
1702 }
1703
1704 drbd_md_sync(mdev);
1705}
1706
1707
1708static int drbd_thread_setup(void *arg)
1709{
1710 struct drbd_thread *thi = (struct drbd_thread *) arg;
1711 struct drbd_conf *mdev = thi->mdev;
1712 unsigned long flags;
1713 int retval;
1714
1715restart:
1716 retval = thi->function(thi);
1717
1718 spin_lock_irqsave(&thi->t_lock, flags);
1719
1720 /* if the receiver has been "Exiting", the last thing it did
1721 * was set the conn state to "StandAlone",
1722 * if now a re-connect request comes in, conn state goes C_UNCONNECTED,
1723 * and receiver thread will be "started".
1724 * drbd_thread_start needs to set "Restarting" in that case.
1725 * t_state check and assignment needs to be within the same spinlock,
1726 * so either thread_start sees Exiting, and can remap to Restarting,
1727 * or thread_start see None, and can proceed as normal.
1728 */
1729
1730 if (thi->t_state == Restarting) {
1731 dev_info(DEV, "Restarting %s\n", current->comm);
1732 thi->t_state = Running;
1733 spin_unlock_irqrestore(&thi->t_lock, flags);
1734 goto restart;
1735 }
1736
1737 thi->task = NULL;
1738 thi->t_state = None;
1739 smp_mb();
1740 complete(&thi->stop);
1741 spin_unlock_irqrestore(&thi->t_lock, flags);
1742
1743 dev_info(DEV, "Terminating %s\n", current->comm);
1744
1745 /* Release mod reference taken when thread was started */
1746 module_put(THIS_MODULE);
1747 return retval;
1748}
1749
1750static void drbd_thread_init(struct drbd_conf *mdev, struct drbd_thread *thi,
1751 int (*func) (struct drbd_thread *))
1752{
1753 spin_lock_init(&thi->t_lock);
1754 thi->task = NULL;
1755 thi->t_state = None;
1756 thi->function = func;
1757 thi->mdev = mdev;
1758}
1759
1760int drbd_thread_start(struct drbd_thread *thi)
1761{
1762 struct drbd_conf *mdev = thi->mdev;
1763 struct task_struct *nt;
1764 unsigned long flags;
1765
1766 const char *me =
1767 thi == &mdev->receiver ? "receiver" :
1768 thi == &mdev->asender ? "asender" :
1769 thi == &mdev->worker ? "worker" : "NONSENSE";
1770
1771 /* is used from state engine doing drbd_thread_stop_nowait,
1772 * while holding the req lock irqsave */
1773 spin_lock_irqsave(&thi->t_lock, flags);
1774
1775 switch (thi->t_state) {
1776 case None:
1777 dev_info(DEV, "Starting %s thread (from %s [%d])\n",
1778 me, current->comm, current->pid);
1779
1780 /* Get ref on module for thread - this is released when thread exits */
1781 if (!try_module_get(THIS_MODULE)) {
1782 dev_err(DEV, "Failed to get module reference in drbd_thread_start\n");
1783 spin_unlock_irqrestore(&thi->t_lock, flags);
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01001784 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001785 }
1786
1787 init_completion(&thi->stop);
1788 D_ASSERT(thi->task == NULL);
1789 thi->reset_cpu_mask = 1;
1790 thi->t_state = Running;
1791 spin_unlock_irqrestore(&thi->t_lock, flags);
1792 flush_signals(current); /* otherw. may get -ERESTARTNOINTR */
1793
1794 nt = kthread_create(drbd_thread_setup, (void *) thi,
1795 "drbd%d_%s", mdev_to_minor(mdev), me);
1796
1797 if (IS_ERR(nt)) {
1798 dev_err(DEV, "Couldn't start thread\n");
1799
1800 module_put(THIS_MODULE);
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01001801 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001802 }
1803 spin_lock_irqsave(&thi->t_lock, flags);
1804 thi->task = nt;
1805 thi->t_state = Running;
1806 spin_unlock_irqrestore(&thi->t_lock, flags);
1807 wake_up_process(nt);
1808 break;
1809 case Exiting:
1810 thi->t_state = Restarting;
1811 dev_info(DEV, "Restarting %s thread (from %s [%d])\n",
1812 me, current->comm, current->pid);
1813 /* fall through */
1814 case Running:
1815 case Restarting:
1816 default:
1817 spin_unlock_irqrestore(&thi->t_lock, flags);
1818 break;
1819 }
1820
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01001821 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001822}
1823
1824
1825void _drbd_thread_stop(struct drbd_thread *thi, int restart, int wait)
1826{
1827 unsigned long flags;
1828
1829 enum drbd_thread_state ns = restart ? Restarting : Exiting;
1830
1831 /* may be called from state engine, holding the req lock irqsave */
1832 spin_lock_irqsave(&thi->t_lock, flags);
1833
1834 if (thi->t_state == None) {
1835 spin_unlock_irqrestore(&thi->t_lock, flags);
1836 if (restart)
1837 drbd_thread_start(thi);
1838 return;
1839 }
1840
1841 if (thi->t_state != ns) {
1842 if (thi->task == NULL) {
1843 spin_unlock_irqrestore(&thi->t_lock, flags);
1844 return;
1845 }
1846
1847 thi->t_state = ns;
1848 smp_mb();
1849 init_completion(&thi->stop);
1850 if (thi->task != current)
1851 force_sig(DRBD_SIGKILL, thi->task);
1852
1853 }
1854
1855 spin_unlock_irqrestore(&thi->t_lock, flags);
1856
1857 if (wait)
1858 wait_for_completion(&thi->stop);
1859}
1860
1861#ifdef CONFIG_SMP
1862/**
1863 * drbd_calc_cpu_mask() - Generate CPU masks, spread over all CPUs
1864 * @mdev: DRBD device.
1865 *
1866 * Forces all threads of a device onto the same CPU. This is beneficial for
1867 * DRBD's performance. May be overwritten by user's configuration.
1868 */
1869void drbd_calc_cpu_mask(struct drbd_conf *mdev)
1870{
1871 int ord, cpu;
1872
1873 /* user override. */
1874 if (cpumask_weight(mdev->cpu_mask))
1875 return;
1876
1877 ord = mdev_to_minor(mdev) % cpumask_weight(cpu_online_mask);
1878 for_each_online_cpu(cpu) {
1879 if (ord-- == 0) {
1880 cpumask_set_cpu(cpu, mdev->cpu_mask);
1881 return;
1882 }
1883 }
1884 /* should not be reached */
1885 cpumask_setall(mdev->cpu_mask);
1886}
1887
1888/**
1889 * drbd_thread_current_set_cpu() - modifies the cpu mask of the _current_ thread
1890 * @mdev: DRBD device.
1891 *
1892 * call in the "main loop" of _all_ threads, no need for any mutex, current won't die
1893 * prematurely.
1894 */
1895void drbd_thread_current_set_cpu(struct drbd_conf *mdev)
1896{
1897 struct task_struct *p = current;
1898 struct drbd_thread *thi =
1899 p == mdev->asender.task ? &mdev->asender :
1900 p == mdev->receiver.task ? &mdev->receiver :
1901 p == mdev->worker.task ? &mdev->worker :
1902 NULL;
1903 ERR_IF(thi == NULL)
1904 return;
1905 if (!thi->reset_cpu_mask)
1906 return;
1907 thi->reset_cpu_mask = 0;
1908 set_cpus_allowed_ptr(p, mdev->cpu_mask);
1909}
1910#endif
1911
1912/* the appropriate socket mutex must be held already */
1913int _drbd_send_cmd(struct drbd_conf *mdev, struct socket *sock,
Philipp Reisner0b70a132010-08-20 13:36:10 +02001914 enum drbd_packets cmd, struct p_header80 *h,
Philipp Reisnerb411b362009-09-25 16:07:19 -07001915 size_t size, unsigned msg_flags)
1916{
1917 int sent, ok;
1918
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01001919 ERR_IF(!h) return false;
1920 ERR_IF(!size) return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001921
1922 h->magic = BE_DRBD_MAGIC;
1923 h->command = cpu_to_be16(cmd);
Philipp Reisner0b70a132010-08-20 13:36:10 +02001924 h->length = cpu_to_be16(size-sizeof(struct p_header80));
Philipp Reisnerb411b362009-09-25 16:07:19 -07001925
Philipp Reisnerb411b362009-09-25 16:07:19 -07001926 sent = drbd_send(mdev, sock, h, size, msg_flags);
1927
1928 ok = (sent == size);
Lars Ellenberg0ddc5542011-01-21 12:35:15 +01001929 if (!ok && !signal_pending(current))
1930 dev_warn(DEV, "short sent %s size=%d sent=%d\n",
Philipp Reisnerb411b362009-09-25 16:07:19 -07001931 cmdname(cmd), (int)size, sent);
1932 return ok;
1933}
1934
1935/* don't pass the socket. we may only look at it
1936 * when we hold the appropriate socket mutex.
1937 */
1938int drbd_send_cmd(struct drbd_conf *mdev, int use_data_socket,
Philipp Reisner0b70a132010-08-20 13:36:10 +02001939 enum drbd_packets cmd, struct p_header80 *h, size_t size)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001940{
1941 int ok = 0;
1942 struct socket *sock;
1943
1944 if (use_data_socket) {
1945 mutex_lock(&mdev->data.mutex);
1946 sock = mdev->data.socket;
1947 } else {
1948 mutex_lock(&mdev->meta.mutex);
1949 sock = mdev->meta.socket;
1950 }
1951
1952 /* drbd_disconnect() could have called drbd_free_sock()
1953 * while we were waiting in down()... */
1954 if (likely(sock != NULL))
1955 ok = _drbd_send_cmd(mdev, sock, cmd, h, size, 0);
1956
1957 if (use_data_socket)
1958 mutex_unlock(&mdev->data.mutex);
1959 else
1960 mutex_unlock(&mdev->meta.mutex);
1961 return ok;
1962}
1963
1964int drbd_send_cmd2(struct drbd_conf *mdev, enum drbd_packets cmd, char *data,
1965 size_t size)
1966{
Philipp Reisner0b70a132010-08-20 13:36:10 +02001967 struct p_header80 h;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001968 int ok;
1969
1970 h.magic = BE_DRBD_MAGIC;
1971 h.command = cpu_to_be16(cmd);
1972 h.length = cpu_to_be16(size);
1973
1974 if (!drbd_get_data_sock(mdev))
1975 return 0;
1976
Philipp Reisnerb411b362009-09-25 16:07:19 -07001977 ok = (sizeof(h) ==
1978 drbd_send(mdev, mdev->data.socket, &h, sizeof(h), 0));
1979 ok = ok && (size ==
1980 drbd_send(mdev, mdev->data.socket, data, size, 0));
1981
1982 drbd_put_data_sock(mdev);
1983
1984 return ok;
1985}
1986
1987int drbd_send_sync_param(struct drbd_conf *mdev, struct syncer_conf *sc)
1988{
Philipp Reisner8e26f9c2010-07-06 17:25:54 +02001989 struct p_rs_param_95 *p;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001990 struct socket *sock;
1991 int size, rv;
1992 const int apv = mdev->agreed_pro_version;
1993
1994 size = apv <= 87 ? sizeof(struct p_rs_param)
1995 : apv == 88 ? sizeof(struct p_rs_param)
1996 + strlen(mdev->sync_conf.verify_alg) + 1
Philipp Reisner8e26f9c2010-07-06 17:25:54 +02001997 : apv <= 94 ? sizeof(struct p_rs_param_89)
1998 : /* apv >= 95 */ sizeof(struct p_rs_param_95);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001999
2000 /* used from admin command context and receiver/worker context.
2001 * to avoid kmalloc, grab the socket right here,
2002 * then use the pre-allocated sbuf there */
2003 mutex_lock(&mdev->data.mutex);
2004 sock = mdev->data.socket;
2005
2006 if (likely(sock != NULL)) {
2007 enum drbd_packets cmd = apv >= 89 ? P_SYNC_PARAM89 : P_SYNC_PARAM;
2008
Philipp Reisner8e26f9c2010-07-06 17:25:54 +02002009 p = &mdev->data.sbuf.rs_param_95;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002010
2011 /* initialize verify_alg and csums_alg */
2012 memset(p->verify_alg, 0, 2 * SHARED_SECRET_MAX);
2013
2014 p->rate = cpu_to_be32(sc->rate);
Philipp Reisner8e26f9c2010-07-06 17:25:54 +02002015 p->c_plan_ahead = cpu_to_be32(sc->c_plan_ahead);
2016 p->c_delay_target = cpu_to_be32(sc->c_delay_target);
2017 p->c_fill_target = cpu_to_be32(sc->c_fill_target);
2018 p->c_max_rate = cpu_to_be32(sc->c_max_rate);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002019
2020 if (apv >= 88)
2021 strcpy(p->verify_alg, mdev->sync_conf.verify_alg);
2022 if (apv >= 89)
2023 strcpy(p->csums_alg, mdev->sync_conf.csums_alg);
2024
2025 rv = _drbd_send_cmd(mdev, sock, cmd, &p->head, size, 0);
2026 } else
2027 rv = 0; /* not ok */
2028
2029 mutex_unlock(&mdev->data.mutex);
2030
2031 return rv;
2032}
2033
2034int drbd_send_protocol(struct drbd_conf *mdev)
2035{
2036 struct p_protocol *p;
Philipp Reisnercf14c2e2010-02-02 21:03:50 +01002037 int size, cf, rv;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002038
2039 size = sizeof(struct p_protocol);
2040
2041 if (mdev->agreed_pro_version >= 87)
2042 size += strlen(mdev->net_conf->integrity_alg) + 1;
2043
2044 /* we must not recurse into our own queue,
2045 * as that is blocked during handshake */
2046 p = kmalloc(size, GFP_NOIO);
2047 if (p == NULL)
2048 return 0;
2049
2050 p->protocol = cpu_to_be32(mdev->net_conf->wire_protocol);
2051 p->after_sb_0p = cpu_to_be32(mdev->net_conf->after_sb_0p);
2052 p->after_sb_1p = cpu_to_be32(mdev->net_conf->after_sb_1p);
2053 p->after_sb_2p = cpu_to_be32(mdev->net_conf->after_sb_2p);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002054 p->two_primaries = cpu_to_be32(mdev->net_conf->two_primaries);
2055
Philipp Reisnercf14c2e2010-02-02 21:03:50 +01002056 cf = 0;
2057 if (mdev->net_conf->want_lose)
2058 cf |= CF_WANT_LOSE;
2059 if (mdev->net_conf->dry_run) {
2060 if (mdev->agreed_pro_version >= 92)
2061 cf |= CF_DRY_RUN;
2062 else {
2063 dev_err(DEV, "--dry-run is not supported by peer");
Dan Carpenter7ac314c2010-04-22 14:27:23 +02002064 kfree(p);
Philipp Reisner148efa12011-01-15 00:21:15 +01002065 return -1;
Philipp Reisnercf14c2e2010-02-02 21:03:50 +01002066 }
2067 }
2068 p->conn_flags = cpu_to_be32(cf);
2069
Philipp Reisnerb411b362009-09-25 16:07:19 -07002070 if (mdev->agreed_pro_version >= 87)
2071 strcpy(p->integrity_alg, mdev->net_conf->integrity_alg);
2072
2073 rv = drbd_send_cmd(mdev, USE_DATA_SOCKET, P_PROTOCOL,
Philipp Reisner0b70a132010-08-20 13:36:10 +02002074 (struct p_header80 *)p, size);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002075 kfree(p);
2076 return rv;
2077}
2078
2079int _drbd_send_uuids(struct drbd_conf *mdev, u64 uuid_flags)
2080{
2081 struct p_uuids p;
2082 int i;
2083
2084 if (!get_ldev_if_state(mdev, D_NEGOTIATING))
2085 return 1;
2086
2087 for (i = UI_CURRENT; i < UI_SIZE; i++)
2088 p.uuid[i] = mdev->ldev ? cpu_to_be64(mdev->ldev->md.uuid[i]) : 0;
2089
2090 mdev->comm_bm_set = drbd_bm_total_weight(mdev);
2091 p.uuid[UI_SIZE] = cpu_to_be64(mdev->comm_bm_set);
2092 uuid_flags |= mdev->net_conf->want_lose ? 1 : 0;
2093 uuid_flags |= test_bit(CRASHED_PRIMARY, &mdev->flags) ? 2 : 0;
2094 uuid_flags |= mdev->new_state_tmp.disk == D_INCONSISTENT ? 4 : 0;
2095 p.uuid[UI_FLAGS] = cpu_to_be64(uuid_flags);
2096
2097 put_ldev(mdev);
2098
2099 return drbd_send_cmd(mdev, USE_DATA_SOCKET, P_UUIDS,
Philipp Reisner0b70a132010-08-20 13:36:10 +02002100 (struct p_header80 *)&p, sizeof(p));
Philipp Reisnerb411b362009-09-25 16:07:19 -07002101}
2102
2103int drbd_send_uuids(struct drbd_conf *mdev)
2104{
2105 return _drbd_send_uuids(mdev, 0);
2106}
2107
2108int drbd_send_uuids_skip_initial_sync(struct drbd_conf *mdev)
2109{
2110 return _drbd_send_uuids(mdev, 8);
2111}
2112
Lars Ellenberg62b0da32011-01-20 13:25:21 +01002113void drbd_print_uuids(struct drbd_conf *mdev, const char *text)
2114{
2115 if (get_ldev_if_state(mdev, D_NEGOTIATING)) {
2116 u64 *uuid = mdev->ldev->md.uuid;
2117 dev_info(DEV, "%s %016llX:%016llX:%016llX:%016llX\n",
2118 text,
2119 (unsigned long long)uuid[UI_CURRENT],
2120 (unsigned long long)uuid[UI_BITMAP],
2121 (unsigned long long)uuid[UI_HISTORY_START],
2122 (unsigned long long)uuid[UI_HISTORY_END]);
2123 put_ldev(mdev);
2124 } else {
2125 dev_info(DEV, "%s effective data uuid: %016llX\n",
2126 text,
2127 (unsigned long long)mdev->ed_uuid);
2128 }
2129}
2130
Lars Ellenberg5a22db82010-12-17 21:14:23 +01002131int drbd_gen_and_send_sync_uuid(struct drbd_conf *mdev)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002132{
2133 struct p_rs_uuid p;
Lars Ellenberg5a22db82010-12-17 21:14:23 +01002134 u64 uuid;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002135
Lars Ellenberg5a22db82010-12-17 21:14:23 +01002136 D_ASSERT(mdev->state.disk == D_UP_TO_DATE);
2137
Philipp Reisner5ba3dac2011-10-05 15:54:18 +02002138 uuid = mdev->ldev->md.uuid[UI_BITMAP];
2139 if (uuid && uuid != UUID_JUST_CREATED)
2140 uuid = uuid + UUID_NEW_BM_OFFSET;
2141 else
2142 get_random_bytes(&uuid, sizeof(u64));
Lars Ellenberg5a22db82010-12-17 21:14:23 +01002143 drbd_uuid_set(mdev, UI_BITMAP, uuid);
Lars Ellenberg62b0da32011-01-20 13:25:21 +01002144 drbd_print_uuids(mdev, "updated sync UUID");
Lars Ellenberg5a22db82010-12-17 21:14:23 +01002145 drbd_md_sync(mdev);
2146 p.uuid = cpu_to_be64(uuid);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002147
2148 return drbd_send_cmd(mdev, USE_DATA_SOCKET, P_SYNC_UUID,
Philipp Reisner0b70a132010-08-20 13:36:10 +02002149 (struct p_header80 *)&p, sizeof(p));
Philipp Reisnerb411b362009-09-25 16:07:19 -07002150}
2151
Philipp Reisnere89b5912010-03-24 17:11:33 +01002152int drbd_send_sizes(struct drbd_conf *mdev, int trigger_reply, enum dds_flags flags)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002153{
2154 struct p_sizes p;
2155 sector_t d_size, u_size;
Philipp Reisner99432fc2011-05-20 16:39:13 +02002156 int q_order_type, max_bio_size;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002157 int ok;
2158
2159 if (get_ldev_if_state(mdev, D_NEGOTIATING)) {
2160 D_ASSERT(mdev->ldev->backing_bdev);
2161 d_size = drbd_get_max_capacity(mdev->ldev);
2162 u_size = mdev->ldev->dc.disk_size;
2163 q_order_type = drbd_queue_order_type(mdev);
Philipp Reisner99432fc2011-05-20 16:39:13 +02002164 max_bio_size = queue_max_hw_sectors(mdev->ldev->backing_bdev->bd_disk->queue) << 9;
2165 max_bio_size = min_t(int, max_bio_size, DRBD_MAX_BIO_SIZE);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002166 put_ldev(mdev);
2167 } else {
2168 d_size = 0;
2169 u_size = 0;
2170 q_order_type = QUEUE_ORDERED_NONE;
Philipp Reisner99432fc2011-05-20 16:39:13 +02002171 max_bio_size = DRBD_MAX_BIO_SIZE; /* ... multiple BIOs per peer_request */
Philipp Reisnerb411b362009-09-25 16:07:19 -07002172 }
2173
Philipp Reisner68093842011-06-30 15:43:06 +02002174 /* Never allow old drbd (up to 8.3.7) to see more than 32KiB */
2175 if (mdev->agreed_pro_version <= 94)
2176 max_bio_size = min_t(int, max_bio_size, DRBD_MAX_SIZE_H80_PACKET);
2177
Philipp Reisnerb411b362009-09-25 16:07:19 -07002178 p.d_size = cpu_to_be64(d_size);
2179 p.u_size = cpu_to_be64(u_size);
2180 p.c_size = cpu_to_be64(trigger_reply ? 0 : drbd_get_capacity(mdev->this_bdev));
Philipp Reisner99432fc2011-05-20 16:39:13 +02002181 p.max_bio_size = cpu_to_be32(max_bio_size);
Philipp Reisnere89b5912010-03-24 17:11:33 +01002182 p.queue_order_type = cpu_to_be16(q_order_type);
2183 p.dds_flags = cpu_to_be16(flags);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002184
2185 ok = drbd_send_cmd(mdev, USE_DATA_SOCKET, P_SIZES,
Philipp Reisner0b70a132010-08-20 13:36:10 +02002186 (struct p_header80 *)&p, sizeof(p));
Philipp Reisnerb411b362009-09-25 16:07:19 -07002187 return ok;
2188}
2189
2190/**
Lars Ellenbergf479ea02011-10-27 16:52:30 +02002191 * drbd_send_current_state() - Sends the drbd state to the peer
Philipp Reisnerb411b362009-09-25 16:07:19 -07002192 * @mdev: DRBD device.
2193 */
Lars Ellenbergf479ea02011-10-27 16:52:30 +02002194int drbd_send_current_state(struct drbd_conf *mdev)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002195{
2196 struct socket *sock;
2197 struct p_state p;
2198 int ok = 0;
2199
2200 /* Grab state lock so we wont send state if we're in the middle
2201 * of a cluster wide state change on another thread */
2202 drbd_state_lock(mdev);
2203
2204 mutex_lock(&mdev->data.mutex);
2205
2206 p.state = cpu_to_be32(mdev->state.i); /* Within the send mutex */
2207 sock = mdev->data.socket;
2208
2209 if (likely(sock != NULL)) {
2210 ok = _drbd_send_cmd(mdev, sock, P_STATE,
Philipp Reisner0b70a132010-08-20 13:36:10 +02002211 (struct p_header80 *)&p, sizeof(p), 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002212 }
2213
2214 mutex_unlock(&mdev->data.mutex);
2215
2216 drbd_state_unlock(mdev);
2217 return ok;
2218}
2219
Lars Ellenbergf479ea02011-10-27 16:52:30 +02002220/**
2221 * drbd_send_state() - After a state change, sends the new state to the peer
2222 * @mdev: DRBD device.
2223 * @state: the state to send, not necessarily the current state.
2224 *
2225 * Each state change queues an "after_state_ch" work, which will eventually
2226 * send the resulting new state to the peer. If more state changes happen
2227 * between queuing and processing of the after_state_ch work, we still
2228 * want to send each intermediary state in the order it occurred.
2229 */
2230int drbd_send_state(struct drbd_conf *mdev, union drbd_state state)
2231{
2232 struct socket *sock;
2233 struct p_state p;
2234 int ok = 0;
2235
2236 mutex_lock(&mdev->data.mutex);
2237
2238 p.state = cpu_to_be32(state.i);
2239 sock = mdev->data.socket;
2240
2241 if (likely(sock != NULL)) {
2242 ok = _drbd_send_cmd(mdev, sock, P_STATE,
2243 (struct p_header80 *)&p, sizeof(p), 0);
2244 }
2245
2246 mutex_unlock(&mdev->data.mutex);
2247
2248 return ok;
2249}
2250
Philipp Reisnerb411b362009-09-25 16:07:19 -07002251int drbd_send_state_req(struct drbd_conf *mdev,
2252 union drbd_state mask, union drbd_state val)
2253{
2254 struct p_req_state p;
2255
2256 p.mask = cpu_to_be32(mask.i);
2257 p.val = cpu_to_be32(val.i);
2258
2259 return drbd_send_cmd(mdev, USE_DATA_SOCKET, P_STATE_CHG_REQ,
Philipp Reisner0b70a132010-08-20 13:36:10 +02002260 (struct p_header80 *)&p, sizeof(p));
Philipp Reisnerb411b362009-09-25 16:07:19 -07002261}
2262
Andreas Gruenbacherbf885f82010-12-08 00:39:32 +01002263int drbd_send_sr_reply(struct drbd_conf *mdev, enum drbd_state_rv retcode)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002264{
2265 struct p_req_state_reply p;
2266
2267 p.retcode = cpu_to_be32(retcode);
2268
2269 return drbd_send_cmd(mdev, USE_META_SOCKET, P_STATE_CHG_REPLY,
Philipp Reisner0b70a132010-08-20 13:36:10 +02002270 (struct p_header80 *)&p, sizeof(p));
Philipp Reisnerb411b362009-09-25 16:07:19 -07002271}
2272
2273int fill_bitmap_rle_bits(struct drbd_conf *mdev,
2274 struct p_compressed_bm *p,
2275 struct bm_xfer_ctx *c)
2276{
2277 struct bitstream bs;
2278 unsigned long plain_bits;
2279 unsigned long tmp;
2280 unsigned long rl;
2281 unsigned len;
2282 unsigned toggle;
2283 int bits;
2284
2285 /* may we use this feature? */
2286 if ((mdev->sync_conf.use_rle == 0) ||
2287 (mdev->agreed_pro_version < 90))
2288 return 0;
2289
2290 if (c->bit_offset >= c->bm_bits)
2291 return 0; /* nothing to do. */
2292
2293 /* use at most thus many bytes */
2294 bitstream_init(&bs, p->code, BM_PACKET_VLI_BYTES_MAX, 0);
2295 memset(p->code, 0, BM_PACKET_VLI_BYTES_MAX);
2296 /* plain bits covered in this code string */
2297 plain_bits = 0;
2298
2299 /* p->encoding & 0x80 stores whether the first run length is set.
2300 * bit offset is implicit.
2301 * start with toggle == 2 to be able to tell the first iteration */
2302 toggle = 2;
2303
2304 /* see how much plain bits we can stuff into one packet
2305 * using RLE and VLI. */
2306 do {
2307 tmp = (toggle == 0) ? _drbd_bm_find_next_zero(mdev, c->bit_offset)
2308 : _drbd_bm_find_next(mdev, c->bit_offset);
2309 if (tmp == -1UL)
2310 tmp = c->bm_bits;
2311 rl = tmp - c->bit_offset;
2312
2313 if (toggle == 2) { /* first iteration */
2314 if (rl == 0) {
2315 /* the first checked bit was set,
2316 * store start value, */
2317 DCBP_set_start(p, 1);
2318 /* but skip encoding of zero run length */
2319 toggle = !toggle;
2320 continue;
2321 }
2322 DCBP_set_start(p, 0);
2323 }
2324
2325 /* paranoia: catch zero runlength.
2326 * can only happen if bitmap is modified while we scan it. */
2327 if (rl == 0) {
2328 dev_err(DEV, "unexpected zero runlength while encoding bitmap "
2329 "t:%u bo:%lu\n", toggle, c->bit_offset);
2330 return -1;
2331 }
2332
2333 bits = vli_encode_bits(&bs, rl);
2334 if (bits == -ENOBUFS) /* buffer full */
2335 break;
2336 if (bits <= 0) {
2337 dev_err(DEV, "error while encoding bitmap: %d\n", bits);
2338 return 0;
2339 }
2340
2341 toggle = !toggle;
2342 plain_bits += rl;
2343 c->bit_offset = tmp;
2344 } while (c->bit_offset < c->bm_bits);
2345
2346 len = bs.cur.b - p->code + !!bs.cur.bit;
2347
2348 if (plain_bits < (len << 3)) {
2349 /* incompressible with this method.
2350 * we need to rewind both word and bit position. */
2351 c->bit_offset -= plain_bits;
2352 bm_xfer_ctx_bit_to_word_offset(c);
2353 c->bit_offset = c->word_offset * BITS_PER_LONG;
2354 return 0;
2355 }
2356
2357 /* RLE + VLI was able to compress it just fine.
2358 * update c->word_offset. */
2359 bm_xfer_ctx_bit_to_word_offset(c);
2360
2361 /* store pad_bits */
2362 DCBP_set_pad_bits(p, (8 - bs.cur.bit) & 0x7);
2363
2364 return len;
2365}
2366
Andreas Gruenbacherf70af112010-12-11 18:51:50 +01002367/**
2368 * send_bitmap_rle_or_plain
2369 *
2370 * Return 0 when done, 1 when another iteration is needed, and a negative error
2371 * code upon failure.
2372 */
2373static int
Philipp Reisnerb411b362009-09-25 16:07:19 -07002374send_bitmap_rle_or_plain(struct drbd_conf *mdev,
Andreas Gruenbacherf70af112010-12-11 18:51:50 +01002375 struct p_header80 *h, struct bm_xfer_ctx *c)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002376{
2377 struct p_compressed_bm *p = (void*)h;
2378 unsigned long num_words;
2379 int len;
2380 int ok;
2381
2382 len = fill_bitmap_rle_bits(mdev, p, c);
2383
2384 if (len < 0)
Andreas Gruenbacherf70af112010-12-11 18:51:50 +01002385 return -EIO;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002386
2387 if (len) {
2388 DCBP_set_code(p, RLE_VLI_Bits);
2389 ok = _drbd_send_cmd(mdev, mdev->data.socket, P_COMPRESSED_BITMAP, h,
2390 sizeof(*p) + len, 0);
2391
2392 c->packets[0]++;
2393 c->bytes[0] += sizeof(*p) + len;
2394
2395 if (c->bit_offset >= c->bm_bits)
2396 len = 0; /* DONE */
2397 } else {
2398 /* was not compressible.
2399 * send a buffer full of plain text bits instead. */
2400 num_words = min_t(size_t, BM_PACKET_WORDS, c->bm_words - c->word_offset);
2401 len = num_words * sizeof(long);
2402 if (len)
2403 drbd_bm_get_lel(mdev, c->word_offset, num_words, (unsigned long*)h->payload);
2404 ok = _drbd_send_cmd(mdev, mdev->data.socket, P_BITMAP,
Philipp Reisner0b70a132010-08-20 13:36:10 +02002405 h, sizeof(struct p_header80) + len, 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002406 c->word_offset += num_words;
2407 c->bit_offset = c->word_offset * BITS_PER_LONG;
2408
2409 c->packets[1]++;
Philipp Reisner0b70a132010-08-20 13:36:10 +02002410 c->bytes[1] += sizeof(struct p_header80) + len;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002411
2412 if (c->bit_offset > c->bm_bits)
2413 c->bit_offset = c->bm_bits;
2414 }
Andreas Gruenbacherf70af112010-12-11 18:51:50 +01002415 if (ok) {
2416 if (len == 0) {
2417 INFO_bm_xfer_stats(mdev, "send", c);
2418 return 0;
2419 } else
2420 return 1;
2421 }
2422 return -EIO;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002423}
2424
2425/* See the comment at receive_bitmap() */
2426int _drbd_send_bitmap(struct drbd_conf *mdev)
2427{
2428 struct bm_xfer_ctx c;
Philipp Reisner0b70a132010-08-20 13:36:10 +02002429 struct p_header80 *p;
Andreas Gruenbacherf70af112010-12-11 18:51:50 +01002430 int err;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002431
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01002432 ERR_IF(!mdev->bitmap) return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002433
2434 /* maybe we should use some per thread scratch page,
2435 * and allocate that during initial device creation? */
Philipp Reisner0b70a132010-08-20 13:36:10 +02002436 p = (struct p_header80 *) __get_free_page(GFP_NOIO);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002437 if (!p) {
2438 dev_err(DEV, "failed to allocate one page buffer in %s\n", __func__);
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01002439 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002440 }
2441
2442 if (get_ldev(mdev)) {
2443 if (drbd_md_test_flag(mdev->ldev, MDF_FULL_SYNC)) {
2444 dev_info(DEV, "Writing the whole bitmap, MDF_FullSync was set.\n");
2445 drbd_bm_set_all(mdev);
2446 if (drbd_bm_write(mdev)) {
2447 /* write_bm did fail! Leave full sync flag set in Meta P_DATA
2448 * but otherwise process as per normal - need to tell other
2449 * side that a full resync is required! */
2450 dev_err(DEV, "Failed to write bitmap to disk!\n");
2451 } else {
2452 drbd_md_clear_flag(mdev, MDF_FULL_SYNC);
2453 drbd_md_sync(mdev);
2454 }
2455 }
2456 put_ldev(mdev);
2457 }
2458
2459 c = (struct bm_xfer_ctx) {
2460 .bm_bits = drbd_bm_bits(mdev),
2461 .bm_words = drbd_bm_words(mdev),
2462 };
2463
2464 do {
Andreas Gruenbacherf70af112010-12-11 18:51:50 +01002465 err = send_bitmap_rle_or_plain(mdev, p, &c);
2466 } while (err > 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002467
2468 free_page((unsigned long) p);
Andreas Gruenbacherf70af112010-12-11 18:51:50 +01002469 return err == 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002470}
2471
2472int drbd_send_bitmap(struct drbd_conf *mdev)
2473{
2474 int err;
2475
2476 if (!drbd_get_data_sock(mdev))
2477 return -1;
2478 err = !_drbd_send_bitmap(mdev);
2479 drbd_put_data_sock(mdev);
2480 return err;
2481}
2482
2483int drbd_send_b_ack(struct drbd_conf *mdev, u32 barrier_nr, u32 set_size)
2484{
2485 int ok;
2486 struct p_barrier_ack p;
2487
2488 p.barrier = barrier_nr;
2489 p.set_size = cpu_to_be32(set_size);
2490
2491 if (mdev->state.conn < C_CONNECTED)
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01002492 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002493 ok = drbd_send_cmd(mdev, USE_META_SOCKET, P_BARRIER_ACK,
Philipp Reisner0b70a132010-08-20 13:36:10 +02002494 (struct p_header80 *)&p, sizeof(p));
Philipp Reisnerb411b362009-09-25 16:07:19 -07002495 return ok;
2496}
2497
2498/**
2499 * _drbd_send_ack() - Sends an ack packet
2500 * @mdev: DRBD device.
2501 * @cmd: Packet command code.
2502 * @sector: sector, needs to be in big endian byte order
2503 * @blksize: size in byte, needs to be in big endian byte order
2504 * @block_id: Id, big endian byte order
2505 */
2506static int _drbd_send_ack(struct drbd_conf *mdev, enum drbd_packets cmd,
2507 u64 sector,
2508 u32 blksize,
2509 u64 block_id)
2510{
2511 int ok;
2512 struct p_block_ack p;
2513
2514 p.sector = sector;
2515 p.block_id = block_id;
2516 p.blksize = blksize;
2517 p.seq_num = cpu_to_be32(atomic_add_return(1, &mdev->packet_seq));
2518
2519 if (!mdev->meta.socket || mdev->state.conn < C_CONNECTED)
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01002520 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002521 ok = drbd_send_cmd(mdev, USE_META_SOCKET, cmd,
Philipp Reisner0b70a132010-08-20 13:36:10 +02002522 (struct p_header80 *)&p, sizeof(p));
Philipp Reisnerb411b362009-09-25 16:07:19 -07002523 return ok;
2524}
2525
Lars Ellenberg2b2bf212010-10-06 11:46:55 +02002526/* dp->sector and dp->block_id already/still in network byte order,
2527 * data_size is payload size according to dp->head,
2528 * and may need to be corrected for digest size. */
Philipp Reisnerb411b362009-09-25 16:07:19 -07002529int drbd_send_ack_dp(struct drbd_conf *mdev, enum drbd_packets cmd,
Lars Ellenberg2b2bf212010-10-06 11:46:55 +02002530 struct p_data *dp, int data_size)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002531{
Lars Ellenberg2b2bf212010-10-06 11:46:55 +02002532 data_size -= (mdev->agreed_pro_version >= 87 && mdev->integrity_r_tfm) ?
2533 crypto_hash_digestsize(mdev->integrity_r_tfm) : 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002534 return _drbd_send_ack(mdev, cmd, dp->sector, cpu_to_be32(data_size),
2535 dp->block_id);
2536}
2537
2538int drbd_send_ack_rp(struct drbd_conf *mdev, enum drbd_packets cmd,
2539 struct p_block_req *rp)
2540{
2541 return _drbd_send_ack(mdev, cmd, rp->sector, rp->blksize, rp->block_id);
2542}
2543
2544/**
2545 * drbd_send_ack() - Sends an ack packet
2546 * @mdev: DRBD device.
2547 * @cmd: Packet command code.
2548 * @e: Epoch entry.
2549 */
2550int drbd_send_ack(struct drbd_conf *mdev,
2551 enum drbd_packets cmd, struct drbd_epoch_entry *e)
2552{
2553 return _drbd_send_ack(mdev, cmd,
2554 cpu_to_be64(e->sector),
2555 cpu_to_be32(e->size),
2556 e->block_id);
2557}
2558
2559/* This function misuses the block_id field to signal if the blocks
2560 * are is sync or not. */
2561int drbd_send_ack_ex(struct drbd_conf *mdev, enum drbd_packets cmd,
2562 sector_t sector, int blksize, u64 block_id)
2563{
2564 return _drbd_send_ack(mdev, cmd,
2565 cpu_to_be64(sector),
2566 cpu_to_be32(blksize),
2567 cpu_to_be64(block_id));
2568}
2569
2570int drbd_send_drequest(struct drbd_conf *mdev, int cmd,
2571 sector_t sector, int size, u64 block_id)
2572{
2573 int ok;
2574 struct p_block_req p;
2575
2576 p.sector = cpu_to_be64(sector);
2577 p.block_id = block_id;
2578 p.blksize = cpu_to_be32(size);
2579
2580 ok = drbd_send_cmd(mdev, USE_DATA_SOCKET, cmd,
Philipp Reisner0b70a132010-08-20 13:36:10 +02002581 (struct p_header80 *)&p, sizeof(p));
Philipp Reisnerb411b362009-09-25 16:07:19 -07002582 return ok;
2583}
2584
2585int drbd_send_drequest_csum(struct drbd_conf *mdev,
2586 sector_t sector, int size,
2587 void *digest, int digest_size,
2588 enum drbd_packets cmd)
2589{
2590 int ok;
2591 struct p_block_req p;
2592
2593 p.sector = cpu_to_be64(sector);
2594 p.block_id = BE_DRBD_MAGIC + 0xbeef;
2595 p.blksize = cpu_to_be32(size);
2596
2597 p.head.magic = BE_DRBD_MAGIC;
2598 p.head.command = cpu_to_be16(cmd);
Philipp Reisner0b70a132010-08-20 13:36:10 +02002599 p.head.length = cpu_to_be16(sizeof(p) - sizeof(struct p_header80) + digest_size);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002600
2601 mutex_lock(&mdev->data.mutex);
2602
2603 ok = (sizeof(p) == drbd_send(mdev, mdev->data.socket, &p, sizeof(p), 0));
2604 ok = ok && (digest_size == drbd_send(mdev, mdev->data.socket, digest, digest_size, 0));
2605
2606 mutex_unlock(&mdev->data.mutex);
2607
2608 return ok;
2609}
2610
2611int drbd_send_ov_request(struct drbd_conf *mdev, sector_t sector, int size)
2612{
2613 int ok;
2614 struct p_block_req p;
2615
2616 p.sector = cpu_to_be64(sector);
2617 p.block_id = BE_DRBD_MAGIC + 0xbabe;
2618 p.blksize = cpu_to_be32(size);
2619
2620 ok = drbd_send_cmd(mdev, USE_DATA_SOCKET, P_OV_REQUEST,
Philipp Reisner0b70a132010-08-20 13:36:10 +02002621 (struct p_header80 *)&p, sizeof(p));
Philipp Reisnerb411b362009-09-25 16:07:19 -07002622 return ok;
2623}
2624
2625/* called on sndtimeo
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01002626 * returns false if we should retry,
2627 * true if we think connection is dead
Philipp Reisnerb411b362009-09-25 16:07:19 -07002628 */
2629static int we_should_drop_the_connection(struct drbd_conf *mdev, struct socket *sock)
2630{
2631 int drop_it;
2632 /* long elapsed = (long)(jiffies - mdev->last_received); */
2633
2634 drop_it = mdev->meta.socket == sock
2635 || !mdev->asender.task
2636 || get_t_state(&mdev->asender) != Running
2637 || mdev->state.conn < C_CONNECTED;
2638
2639 if (drop_it)
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01002640 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002641
2642 drop_it = !--mdev->ko_count;
2643 if (!drop_it) {
2644 dev_err(DEV, "[%s/%d] sock_sendmsg time expired, ko = %u\n",
2645 current->comm, current->pid, mdev->ko_count);
2646 request_ping(mdev);
2647 }
2648
2649 return drop_it; /* && (mdev->state == R_PRIMARY) */;
2650}
2651
2652/* The idea of sendpage seems to be to put some kind of reference
2653 * to the page into the skb, and to hand it over to the NIC. In
2654 * this process get_page() gets called.
2655 *
2656 * As soon as the page was really sent over the network put_page()
2657 * gets called by some part of the network layer. [ NIC driver? ]
2658 *
2659 * [ get_page() / put_page() increment/decrement the count. If count
2660 * reaches 0 the page will be freed. ]
2661 *
2662 * This works nicely with pages from FSs.
2663 * But this means that in protocol A we might signal IO completion too early!
2664 *
2665 * In order not to corrupt data during a resync we must make sure
2666 * that we do not reuse our own buffer pages (EEs) to early, therefore
2667 * we have the net_ee list.
2668 *
2669 * XFS seems to have problems, still, it submits pages with page_count == 0!
2670 * As a workaround, we disable sendpage on pages
2671 * with page_count == 0 or PageSlab.
2672 */
2673static int _drbd_no_send_page(struct drbd_conf *mdev, struct page *page,
Lars Ellenbergba11ad92010-05-25 16:26:16 +02002674 int offset, size_t size, unsigned msg_flags)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002675{
Lars Ellenbergba11ad92010-05-25 16:26:16 +02002676 int sent = drbd_send(mdev, mdev->data.socket, kmap(page) + offset, size, msg_flags);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002677 kunmap(page);
2678 if (sent == size)
2679 mdev->send_cnt += size>>9;
2680 return sent == size;
2681}
2682
2683static int _drbd_send_page(struct drbd_conf *mdev, struct page *page,
Lars Ellenbergba11ad92010-05-25 16:26:16 +02002684 int offset, size_t size, unsigned msg_flags)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002685{
2686 mm_segment_t oldfs = get_fs();
2687 int sent, ok;
2688 int len = size;
2689
2690 /* e.g. XFS meta- & log-data is in slab pages, which have a
2691 * page_count of 0 and/or have PageSlab() set.
2692 * we cannot use send_page for those, as that does get_page();
2693 * put_page(); and would cause either a VM_BUG directly, or
2694 * __page_cache_release a page that would actually still be referenced
2695 * by someone, leading to some obscure delayed Oops somewhere else. */
2696 if (disable_sendpage || (page_count(page) < 1) || PageSlab(page))
Lars Ellenbergba11ad92010-05-25 16:26:16 +02002697 return _drbd_no_send_page(mdev, page, offset, size, msg_flags);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002698
Lars Ellenbergba11ad92010-05-25 16:26:16 +02002699 msg_flags |= MSG_NOSIGNAL;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002700 drbd_update_congested(mdev);
2701 set_fs(KERNEL_DS);
2702 do {
2703 sent = mdev->data.socket->ops->sendpage(mdev->data.socket, page,
2704 offset, len,
Lars Ellenbergba11ad92010-05-25 16:26:16 +02002705 msg_flags);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002706 if (sent == -EAGAIN) {
2707 if (we_should_drop_the_connection(mdev,
2708 mdev->data.socket))
2709 break;
2710 else
2711 continue;
2712 }
2713 if (sent <= 0) {
2714 dev_warn(DEV, "%s: size=%d len=%d sent=%d\n",
2715 __func__, (int)size, len, sent);
2716 break;
2717 }
2718 len -= sent;
2719 offset += sent;
2720 } while (len > 0 /* THINK && mdev->cstate >= C_CONNECTED*/);
2721 set_fs(oldfs);
2722 clear_bit(NET_CONGESTED, &mdev->flags);
2723
2724 ok = (len == 0);
2725 if (likely(ok))
2726 mdev->send_cnt += size>>9;
2727 return ok;
2728}
2729
2730static int _drbd_send_bio(struct drbd_conf *mdev, struct bio *bio)
2731{
2732 struct bio_vec *bvec;
2733 int i;
Lars Ellenbergba11ad92010-05-25 16:26:16 +02002734 /* hint all but last page with MSG_MORE */
Lars Ellenberg001a8862012-03-08 16:43:45 +01002735 bio_for_each_segment(bvec, bio, i) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07002736 if (!_drbd_no_send_page(mdev, bvec->bv_page,
Lars Ellenbergba11ad92010-05-25 16:26:16 +02002737 bvec->bv_offset, bvec->bv_len,
2738 i == bio->bi_vcnt -1 ? 0 : MSG_MORE))
Philipp Reisnerb411b362009-09-25 16:07:19 -07002739 return 0;
2740 }
2741 return 1;
2742}
2743
2744static int _drbd_send_zc_bio(struct drbd_conf *mdev, struct bio *bio)
2745{
2746 struct bio_vec *bvec;
2747 int i;
Lars Ellenbergba11ad92010-05-25 16:26:16 +02002748 /* hint all but last page with MSG_MORE */
Lars Ellenberg001a8862012-03-08 16:43:45 +01002749 bio_for_each_segment(bvec, bio, i) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07002750 if (!_drbd_send_page(mdev, bvec->bv_page,
Lars Ellenbergba11ad92010-05-25 16:26:16 +02002751 bvec->bv_offset, bvec->bv_len,
2752 i == bio->bi_vcnt -1 ? 0 : MSG_MORE))
Philipp Reisnerb411b362009-09-25 16:07:19 -07002753 return 0;
2754 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07002755 return 1;
2756}
2757
Lars Ellenberg45bb9122010-05-14 17:10:48 +02002758static int _drbd_send_zc_ee(struct drbd_conf *mdev, struct drbd_epoch_entry *e)
2759{
2760 struct page *page = e->pages;
2761 unsigned len = e->size;
Lars Ellenbergba11ad92010-05-25 16:26:16 +02002762 /* hint all but last page with MSG_MORE */
Lars Ellenberg45bb9122010-05-14 17:10:48 +02002763 page_chain_for_each(page) {
2764 unsigned l = min_t(unsigned, len, PAGE_SIZE);
Lars Ellenbergba11ad92010-05-25 16:26:16 +02002765 if (!_drbd_send_page(mdev, page, 0, l,
2766 page_chain_next(page) ? MSG_MORE : 0))
Lars Ellenberg45bb9122010-05-14 17:10:48 +02002767 return 0;
2768 len -= l;
2769 }
2770 return 1;
2771}
2772
Philipp Reisner76d2e7e2010-08-25 11:58:05 +02002773static u32 bio_flags_to_wire(struct drbd_conf *mdev, unsigned long bi_rw)
2774{
2775 if (mdev->agreed_pro_version >= 95)
2776 return (bi_rw & REQ_SYNC ? DP_RW_SYNC : 0) |
Philipp Reisner76d2e7e2010-08-25 11:58:05 +02002777 (bi_rw & REQ_FUA ? DP_FUA : 0) |
2778 (bi_rw & REQ_FLUSH ? DP_FLUSH : 0) |
2779 (bi_rw & REQ_DISCARD ? DP_DISCARD : 0);
2780 else
Jens Axboe721a9602011-03-09 11:56:30 +01002781 return bi_rw & REQ_SYNC ? DP_RW_SYNC : 0;
Philipp Reisner76d2e7e2010-08-25 11:58:05 +02002782}
2783
Philipp Reisnerb411b362009-09-25 16:07:19 -07002784/* Used to send write requests
2785 * R_PRIMARY -> Peer (P_DATA)
2786 */
2787int drbd_send_dblock(struct drbd_conf *mdev, struct drbd_request *req)
2788{
2789 int ok = 1;
2790 struct p_data p;
2791 unsigned int dp_flags = 0;
2792 void *dgb;
2793 int dgs;
2794
2795 if (!drbd_get_data_sock(mdev))
2796 return 0;
2797
2798 dgs = (mdev->agreed_pro_version >= 87 && mdev->integrity_w_tfm) ?
2799 crypto_hash_digestsize(mdev->integrity_w_tfm) : 0;
2800
Philipp Reisnerd5373382010-08-23 15:18:33 +02002801 if (req->size <= DRBD_MAX_SIZE_H80_PACKET) {
Philipp Reisner0b70a132010-08-20 13:36:10 +02002802 p.head.h80.magic = BE_DRBD_MAGIC;
2803 p.head.h80.command = cpu_to_be16(P_DATA);
2804 p.head.h80.length =
2805 cpu_to_be16(sizeof(p) - sizeof(union p_header) + dgs + req->size);
2806 } else {
2807 p.head.h95.magic = BE_DRBD_MAGIC_BIG;
2808 p.head.h95.command = cpu_to_be16(P_DATA);
2809 p.head.h95.length =
2810 cpu_to_be32(sizeof(p) - sizeof(union p_header) + dgs + req->size);
2811 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07002812
2813 p.sector = cpu_to_be64(req->sector);
2814 p.block_id = (unsigned long)req;
Lars Ellenberg671a74e2012-03-08 11:45:57 +01002815 p.seq_num = cpu_to_be32(atomic_add_return(1, &mdev->packet_seq));
Philipp Reisnerb411b362009-09-25 16:07:19 -07002816
Philipp Reisner76d2e7e2010-08-25 11:58:05 +02002817 dp_flags = bio_flags_to_wire(mdev, req->master_bio->bi_rw);
2818
Philipp Reisnerb411b362009-09-25 16:07:19 -07002819 if (mdev->state.conn >= C_SYNC_SOURCE &&
2820 mdev->state.conn <= C_PAUSED_SYNC_T)
2821 dp_flags |= DP_MAY_SET_IN_SYNC;
2822
2823 p.dp_flags = cpu_to_be32(dp_flags);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002824 set_bit(UNPLUG_REMOTE, &mdev->flags);
2825 ok = (sizeof(p) ==
Lars Ellenbergba11ad92010-05-25 16:26:16 +02002826 drbd_send(mdev, mdev->data.socket, &p, sizeof(p), dgs ? MSG_MORE : 0));
Philipp Reisnerb411b362009-09-25 16:07:19 -07002827 if (ok && dgs) {
2828 dgb = mdev->int_dig_out;
Lars Ellenberg45bb9122010-05-14 17:10:48 +02002829 drbd_csum_bio(mdev, mdev->integrity_w_tfm, req->master_bio, dgb);
Andreas Gruenbachercab2f742010-12-09 16:08:46 +01002830 ok = dgs == drbd_send(mdev, mdev->data.socket, dgb, dgs, 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002831 }
2832 if (ok) {
Lars Ellenberg470be442010-11-10 10:36:52 +01002833 /* For protocol A, we have to memcpy the payload into
2834 * socket buffers, as we may complete right away
2835 * as soon as we handed it over to tcp, at which point the data
2836 * pages may become invalid.
2837 *
2838 * For data-integrity enabled, we copy it as well, so we can be
2839 * sure that even if the bio pages may still be modified, it
2840 * won't change the data on the wire, thus if the digest checks
2841 * out ok after sending on this side, but does not fit on the
2842 * receiving side, we sure have detected corruption elsewhere.
2843 */
2844 if (mdev->net_conf->wire_protocol == DRBD_PROT_A || dgs)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002845 ok = _drbd_send_bio(mdev, req->master_bio);
2846 else
2847 ok = _drbd_send_zc_bio(mdev, req->master_bio);
Lars Ellenberg470be442010-11-10 10:36:52 +01002848
2849 /* double check digest, sometimes buffers have been modified in flight. */
2850 if (dgs > 0 && dgs <= 64) {
Bart Van Assche24c48302011-05-21 18:32:29 +02002851 /* 64 byte, 512 bit, is the largest digest size
Lars Ellenberg470be442010-11-10 10:36:52 +01002852 * currently supported in kernel crypto. */
2853 unsigned char digest[64];
2854 drbd_csum_bio(mdev, mdev->integrity_w_tfm, req->master_bio, digest);
2855 if (memcmp(mdev->int_dig_out, digest, dgs)) {
2856 dev_warn(DEV,
2857 "Digest mismatch, buffer modified by upper layers during write: %llus +%u\n",
2858 (unsigned long long)req->sector, req->size);
2859 }
2860 } /* else if (dgs > 64) {
2861 ... Be noisy about digest too large ...
2862 } */
Philipp Reisnerb411b362009-09-25 16:07:19 -07002863 }
2864
2865 drbd_put_data_sock(mdev);
Philipp Reisnerbd26bfc2010-05-04 12:33:58 +02002866
Philipp Reisnerb411b362009-09-25 16:07:19 -07002867 return ok;
2868}
2869
2870/* answer packet, used to send data back for read requests:
2871 * Peer -> (diskless) R_PRIMARY (P_DATA_REPLY)
2872 * C_SYNC_SOURCE -> C_SYNC_TARGET (P_RS_DATA_REPLY)
2873 */
2874int drbd_send_block(struct drbd_conf *mdev, enum drbd_packets cmd,
2875 struct drbd_epoch_entry *e)
2876{
2877 int ok;
2878 struct p_data p;
2879 void *dgb;
2880 int dgs;
2881
2882 dgs = (mdev->agreed_pro_version >= 87 && mdev->integrity_w_tfm) ?
2883 crypto_hash_digestsize(mdev->integrity_w_tfm) : 0;
2884
Philipp Reisnerd5373382010-08-23 15:18:33 +02002885 if (e->size <= DRBD_MAX_SIZE_H80_PACKET) {
Philipp Reisner0b70a132010-08-20 13:36:10 +02002886 p.head.h80.magic = BE_DRBD_MAGIC;
2887 p.head.h80.command = cpu_to_be16(cmd);
2888 p.head.h80.length =
2889 cpu_to_be16(sizeof(p) - sizeof(struct p_header80) + dgs + e->size);
2890 } else {
2891 p.head.h95.magic = BE_DRBD_MAGIC_BIG;
2892 p.head.h95.command = cpu_to_be16(cmd);
2893 p.head.h95.length =
2894 cpu_to_be32(sizeof(p) - sizeof(struct p_header80) + dgs + e->size);
2895 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07002896
2897 p.sector = cpu_to_be64(e->sector);
2898 p.block_id = e->block_id;
2899 /* p.seq_num = 0; No sequence numbers here.. */
2900
2901 /* Only called by our kernel thread.
2902 * This one may be interrupted by DRBD_SIG and/or DRBD_SIGKILL
2903 * in response to admin command or module unload.
2904 */
2905 if (!drbd_get_data_sock(mdev))
2906 return 0;
2907
Philipp Reisner0b70a132010-08-20 13:36:10 +02002908 ok = sizeof(p) == drbd_send(mdev, mdev->data.socket, &p, sizeof(p), dgs ? MSG_MORE : 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002909 if (ok && dgs) {
2910 dgb = mdev->int_dig_out;
Lars Ellenberg45bb9122010-05-14 17:10:48 +02002911 drbd_csum_ee(mdev, mdev->integrity_w_tfm, e, dgb);
Andreas Gruenbachercab2f742010-12-09 16:08:46 +01002912 ok = dgs == drbd_send(mdev, mdev->data.socket, dgb, dgs, 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002913 }
2914 if (ok)
Lars Ellenberg45bb9122010-05-14 17:10:48 +02002915 ok = _drbd_send_zc_ee(mdev, e);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002916
2917 drbd_put_data_sock(mdev);
Philipp Reisnerbd26bfc2010-05-04 12:33:58 +02002918
Philipp Reisnerb411b362009-09-25 16:07:19 -07002919 return ok;
2920}
2921
Philipp Reisner73a01a12010-10-27 14:33:00 +02002922int drbd_send_oos(struct drbd_conf *mdev, struct drbd_request *req)
2923{
2924 struct p_block_desc p;
2925
2926 p.sector = cpu_to_be64(req->sector);
2927 p.blksize = cpu_to_be32(req->size);
2928
2929 return drbd_send_cmd(mdev, USE_DATA_SOCKET, P_OUT_OF_SYNC, &p.head, sizeof(p));
2930}
2931
Philipp Reisnerb411b362009-09-25 16:07:19 -07002932/*
2933 drbd_send distinguishes two cases:
2934
2935 Packets sent via the data socket "sock"
2936 and packets sent via the meta data socket "msock"
2937
2938 sock msock
2939 -----------------+-------------------------+------------------------------
2940 timeout conf.timeout / 2 conf.timeout / 2
2941 timeout action send a ping via msock Abort communication
2942 and close all sockets
2943*/
2944
2945/*
2946 * you must have down()ed the appropriate [m]sock_mutex elsewhere!
2947 */
2948int drbd_send(struct drbd_conf *mdev, struct socket *sock,
2949 void *buf, size_t size, unsigned msg_flags)
2950{
2951 struct kvec iov;
2952 struct msghdr msg;
2953 int rv, sent = 0;
2954
2955 if (!sock)
2956 return -1000;
2957
2958 /* THINK if (signal_pending) return ... ? */
2959
2960 iov.iov_base = buf;
2961 iov.iov_len = size;
2962
2963 msg.msg_name = NULL;
2964 msg.msg_namelen = 0;
2965 msg.msg_control = NULL;
2966 msg.msg_controllen = 0;
2967 msg.msg_flags = msg_flags | MSG_NOSIGNAL;
2968
2969 if (sock == mdev->data.socket) {
2970 mdev->ko_count = mdev->net_conf->ko_count;
2971 drbd_update_congested(mdev);
2972 }
2973 do {
2974 /* STRANGE
2975 * tcp_sendmsg does _not_ use its size parameter at all ?
2976 *
2977 * -EAGAIN on timeout, -EINTR on signal.
2978 */
2979/* THINK
2980 * do we need to block DRBD_SIG if sock == &meta.socket ??
2981 * otherwise wake_asender() might interrupt some send_*Ack !
2982 */
2983 rv = kernel_sendmsg(sock, &msg, &iov, 1, size);
2984 if (rv == -EAGAIN) {
2985 if (we_should_drop_the_connection(mdev, sock))
2986 break;
2987 else
2988 continue;
2989 }
2990 D_ASSERT(rv != 0);
2991 if (rv == -EINTR) {
2992 flush_signals(current);
2993 rv = 0;
2994 }
2995 if (rv < 0)
2996 break;
2997 sent += rv;
2998 iov.iov_base += rv;
2999 iov.iov_len -= rv;
3000 } while (sent < size);
3001
3002 if (sock == mdev->data.socket)
3003 clear_bit(NET_CONGESTED, &mdev->flags);
3004
3005 if (rv <= 0) {
3006 if (rv != -EAGAIN) {
3007 dev_err(DEV, "%s_sendmsg returned %d\n",
3008 sock == mdev->meta.socket ? "msock" : "sock",
3009 rv);
3010 drbd_force_state(mdev, NS(conn, C_BROKEN_PIPE));
3011 } else
3012 drbd_force_state(mdev, NS(conn, C_TIMEOUT));
3013 }
3014
3015 return sent;
3016}
3017
3018static int drbd_open(struct block_device *bdev, fmode_t mode)
3019{
3020 struct drbd_conf *mdev = bdev->bd_disk->private_data;
3021 unsigned long flags;
3022 int rv = 0;
3023
Arnd Bergmann2a48fc02010-06-02 14:28:52 +02003024 mutex_lock(&drbd_main_mutex);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003025 spin_lock_irqsave(&mdev->req_lock, flags);
3026 /* to have a stable mdev->state.role
3027 * and no race with updating open_cnt */
3028
3029 if (mdev->state.role != R_PRIMARY) {
3030 if (mode & FMODE_WRITE)
3031 rv = -EROFS;
3032 else if (!allow_oos)
3033 rv = -EMEDIUMTYPE;
3034 }
3035
3036 if (!rv)
3037 mdev->open_cnt++;
3038 spin_unlock_irqrestore(&mdev->req_lock, flags);
Arnd Bergmann2a48fc02010-06-02 14:28:52 +02003039 mutex_unlock(&drbd_main_mutex);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003040
3041 return rv;
3042}
3043
3044static int drbd_release(struct gendisk *gd, fmode_t mode)
3045{
3046 struct drbd_conf *mdev = gd->private_data;
Arnd Bergmann2a48fc02010-06-02 14:28:52 +02003047 mutex_lock(&drbd_main_mutex);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003048 mdev->open_cnt--;
Arnd Bergmann2a48fc02010-06-02 14:28:52 +02003049 mutex_unlock(&drbd_main_mutex);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003050 return 0;
3051}
3052
Philipp Reisnerb411b362009-09-25 16:07:19 -07003053static void drbd_set_defaults(struct drbd_conf *mdev)
3054{
Philipp Reisner85f4cc12010-06-29 17:35:34 +02003055 /* This way we get a compile error when sync_conf grows,
3056 and we forgot to initialize it here */
3057 mdev->sync_conf = (struct syncer_conf) {
3058 /* .rate = */ DRBD_RATE_DEF,
3059 /* .after = */ DRBD_AFTER_DEF,
3060 /* .al_extents = */ DRBD_AL_EXTENTS_DEF,
Philipp Reisner85f4cc12010-06-29 17:35:34 +02003061 /* .verify_alg = */ {}, 0,
3062 /* .cpu_mask = */ {}, 0,
3063 /* .csums_alg = */ {}, 0,
Philipp Reisnere7564142010-06-29 17:35:34 +02003064 /* .use_rle = */ 0,
Philipp Reisner9a31d712010-07-05 13:42:03 +02003065 /* .on_no_data = */ DRBD_ON_NO_DATA_DEF,
3066 /* .c_plan_ahead = */ DRBD_C_PLAN_AHEAD_DEF,
3067 /* .c_delay_target = */ DRBD_C_DELAY_TARGET_DEF,
3068 /* .c_fill_target = */ DRBD_C_FILL_TARGET_DEF,
Lars Ellenberg0f0601f2010-08-11 23:40:24 +02003069 /* .c_max_rate = */ DRBD_C_MAX_RATE_DEF,
3070 /* .c_min_rate = */ DRBD_C_MIN_RATE_DEF
Philipp Reisner85f4cc12010-06-29 17:35:34 +02003071 };
3072
3073 /* Have to use that way, because the layout differs between
3074 big endian and little endian */
Philipp Reisnerb411b362009-09-25 16:07:19 -07003075 mdev->state = (union drbd_state) {
3076 { .role = R_SECONDARY,
3077 .peer = R_UNKNOWN,
3078 .conn = C_STANDALONE,
3079 .disk = D_DISKLESS,
3080 .pdsk = D_UNKNOWN,
Philipp Reisnerfb22c402010-09-08 23:20:21 +02003081 .susp = 0,
3082 .susp_nod = 0,
3083 .susp_fen = 0
Philipp Reisnerb411b362009-09-25 16:07:19 -07003084 } };
3085}
3086
3087void drbd_init_set_defaults(struct drbd_conf *mdev)
3088{
3089 /* the memset(,0,) did most of this.
3090 * note: only assignments, no allocation in here */
3091
3092 drbd_set_defaults(mdev);
3093
Philipp Reisnerb411b362009-09-25 16:07:19 -07003094 atomic_set(&mdev->ap_bio_cnt, 0);
3095 atomic_set(&mdev->ap_pending_cnt, 0);
3096 atomic_set(&mdev->rs_pending_cnt, 0);
3097 atomic_set(&mdev->unacked_cnt, 0);
3098 atomic_set(&mdev->local_cnt, 0);
3099 atomic_set(&mdev->net_cnt, 0);
3100 atomic_set(&mdev->packet_seq, 0);
3101 atomic_set(&mdev->pp_in_use, 0);
Lars Ellenberg435f0742010-09-06 12:30:25 +02003102 atomic_set(&mdev->pp_in_use_by_net, 0);
Philipp Reisner778f2712010-07-06 11:14:00 +02003103 atomic_set(&mdev->rs_sect_in, 0);
Lars Ellenberg0f0601f2010-08-11 23:40:24 +02003104 atomic_set(&mdev->rs_sect_ev, 0);
Philipp Reisner759fbdf2010-10-26 16:02:27 +02003105 atomic_set(&mdev->ap_in_flight, 0);
Philipp Reisnere1711732011-06-27 11:51:46 +02003106 atomic_set(&mdev->md_io_in_use, 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003107
Philipp Reisnerb411b362009-09-25 16:07:19 -07003108 mutex_init(&mdev->data.mutex);
3109 mutex_init(&mdev->meta.mutex);
3110 sema_init(&mdev->data.work.s, 0);
3111 sema_init(&mdev->meta.work.s, 0);
3112 mutex_init(&mdev->state_mutex);
3113
3114 spin_lock_init(&mdev->data.work.q_lock);
3115 spin_lock_init(&mdev->meta.work.q_lock);
3116
3117 spin_lock_init(&mdev->al_lock);
3118 spin_lock_init(&mdev->req_lock);
3119 spin_lock_init(&mdev->peer_seq_lock);
3120 spin_lock_init(&mdev->epoch_lock);
3121
3122 INIT_LIST_HEAD(&mdev->active_ee);
3123 INIT_LIST_HEAD(&mdev->sync_ee);
3124 INIT_LIST_HEAD(&mdev->done_ee);
3125 INIT_LIST_HEAD(&mdev->read_ee);
3126 INIT_LIST_HEAD(&mdev->net_ee);
3127 INIT_LIST_HEAD(&mdev->resync_reads);
3128 INIT_LIST_HEAD(&mdev->data.work.q);
3129 INIT_LIST_HEAD(&mdev->meta.work.q);
3130 INIT_LIST_HEAD(&mdev->resync_work.list);
3131 INIT_LIST_HEAD(&mdev->unplug_work.list);
Lars Ellenberge9e6f3e2010-09-14 20:26:27 +02003132 INIT_LIST_HEAD(&mdev->go_diskless.list);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003133 INIT_LIST_HEAD(&mdev->md_sync_work.list);
Philipp Reisnerc4752ef2010-10-27 17:32:36 +02003134 INIT_LIST_HEAD(&mdev->start_resync_work.list);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003135 INIT_LIST_HEAD(&mdev->bm_io_work.w.list);
Philipp Reisner0ced55a2010-04-30 15:26:20 +02003136
Philipp Reisner794abb72010-12-27 11:51:23 +01003137 mdev->resync_work.cb = w_resync_timer;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003138 mdev->unplug_work.cb = w_send_write_hint;
Lars Ellenberge9e6f3e2010-09-14 20:26:27 +02003139 mdev->go_diskless.cb = w_go_diskless;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003140 mdev->md_sync_work.cb = w_md_sync;
3141 mdev->bm_io_work.w.cb = w_bitmap_io;
Philipp Reisner370a43e2011-01-14 16:03:11 +01003142 mdev->start_resync_work.cb = w_start_resync;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003143 init_timer(&mdev->resync_timer);
3144 init_timer(&mdev->md_sync_timer);
Philipp Reisner370a43e2011-01-14 16:03:11 +01003145 init_timer(&mdev->start_resync_timer);
Philipp Reisner7fde2be2011-03-01 11:08:28 +01003146 init_timer(&mdev->request_timer);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003147 mdev->resync_timer.function = resync_timer_fn;
3148 mdev->resync_timer.data = (unsigned long) mdev;
3149 mdev->md_sync_timer.function = md_sync_timer_fn;
3150 mdev->md_sync_timer.data = (unsigned long) mdev;
Philipp Reisner370a43e2011-01-14 16:03:11 +01003151 mdev->start_resync_timer.function = start_resync_timer_fn;
3152 mdev->start_resync_timer.data = (unsigned long) mdev;
Philipp Reisner7fde2be2011-03-01 11:08:28 +01003153 mdev->request_timer.function = request_timer_fn;
3154 mdev->request_timer.data = (unsigned long) mdev;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003155
3156 init_waitqueue_head(&mdev->misc_wait);
3157 init_waitqueue_head(&mdev->state_wait);
Philipp Reisner84dfb9f2010-06-23 11:20:05 +02003158 init_waitqueue_head(&mdev->net_cnt_wait);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003159 init_waitqueue_head(&mdev->ee_wait);
3160 init_waitqueue_head(&mdev->al_wait);
3161 init_waitqueue_head(&mdev->seq_wait);
3162
3163 drbd_thread_init(mdev, &mdev->receiver, drbdd_init);
3164 drbd_thread_init(mdev, &mdev->worker, drbd_worker);
3165 drbd_thread_init(mdev, &mdev->asender, drbd_asender);
3166
3167 mdev->agreed_pro_version = PRO_VERSION_MAX;
Philipp Reisner2451fc32010-08-24 13:43:11 +02003168 mdev->write_ordering = WO_bdev_flush;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003169 mdev->resync_wenr = LC_FREE;
Philipp Reisner99432fc2011-05-20 16:39:13 +02003170 mdev->peer_max_bio_size = DRBD_MAX_BIO_SIZE_SAFE;
3171 mdev->local_max_bio_size = DRBD_MAX_BIO_SIZE_SAFE;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003172}
3173
3174void drbd_mdev_cleanup(struct drbd_conf *mdev)
3175{
Lars Ellenberg1d7734a2010-08-11 21:21:50 +02003176 int i;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003177 if (mdev->receiver.t_state != None)
3178 dev_err(DEV, "ASSERT FAILED: receiver t_state == %d expected 0.\n",
3179 mdev->receiver.t_state);
3180
3181 /* no need to lock it, I'm the only thread alive */
3182 if (atomic_read(&mdev->current_epoch->epoch_size) != 0)
3183 dev_err(DEV, "epoch_size:%d\n", atomic_read(&mdev->current_epoch->epoch_size));
3184 mdev->al_writ_cnt =
3185 mdev->bm_writ_cnt =
3186 mdev->read_cnt =
3187 mdev->recv_cnt =
3188 mdev->send_cnt =
3189 mdev->writ_cnt =
3190 mdev->p_size =
3191 mdev->rs_start =
3192 mdev->rs_total =
Lars Ellenberg1d7734a2010-08-11 21:21:50 +02003193 mdev->rs_failed = 0;
3194 mdev->rs_last_events = 0;
Lars Ellenberg0f0601f2010-08-11 23:40:24 +02003195 mdev->rs_last_sect_ev = 0;
Lars Ellenberg1d7734a2010-08-11 21:21:50 +02003196 for (i = 0; i < DRBD_SYNC_MARKS; i++) {
3197 mdev->rs_mark_left[i] = 0;
3198 mdev->rs_mark_time[i] = 0;
3199 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07003200 D_ASSERT(mdev->net_conf == NULL);
3201
3202 drbd_set_my_capacity(mdev, 0);
3203 if (mdev->bitmap) {
3204 /* maybe never allocated. */
Philipp Reisner02d9a942010-03-24 16:23:03 +01003205 drbd_bm_resize(mdev, 0, 1);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003206 drbd_bm_cleanup(mdev);
3207 }
3208
3209 drbd_free_resources(mdev);
Philipp Reisner07782862010-08-31 12:00:50 +02003210 clear_bit(AL_SUSPENDED, &mdev->flags);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003211
3212 /*
3213 * currently we drbd_init_ee only on module load, so
3214 * we may do drbd_release_ee only on module unload!
3215 */
3216 D_ASSERT(list_empty(&mdev->active_ee));
3217 D_ASSERT(list_empty(&mdev->sync_ee));
3218 D_ASSERT(list_empty(&mdev->done_ee));
3219 D_ASSERT(list_empty(&mdev->read_ee));
3220 D_ASSERT(list_empty(&mdev->net_ee));
3221 D_ASSERT(list_empty(&mdev->resync_reads));
3222 D_ASSERT(list_empty(&mdev->data.work.q));
3223 D_ASSERT(list_empty(&mdev->meta.work.q));
3224 D_ASSERT(list_empty(&mdev->resync_work.list));
3225 D_ASSERT(list_empty(&mdev->unplug_work.list));
Lars Ellenberge9e6f3e2010-09-14 20:26:27 +02003226 D_ASSERT(list_empty(&mdev->go_diskless.list));
Lars Ellenberg2265b472010-12-16 15:41:26 +01003227
3228 drbd_set_defaults(mdev);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003229}
3230
3231
3232static void drbd_destroy_mempools(void)
3233{
3234 struct page *page;
3235
3236 while (drbd_pp_pool) {
3237 page = drbd_pp_pool;
3238 drbd_pp_pool = (struct page *)page_private(page);
3239 __free_page(page);
3240 drbd_pp_vacant--;
3241 }
3242
3243 /* D_ASSERT(atomic_read(&drbd_pp_vacant)==0); */
3244
3245 if (drbd_ee_mempool)
3246 mempool_destroy(drbd_ee_mempool);
3247 if (drbd_request_mempool)
3248 mempool_destroy(drbd_request_mempool);
3249 if (drbd_ee_cache)
3250 kmem_cache_destroy(drbd_ee_cache);
3251 if (drbd_request_cache)
3252 kmem_cache_destroy(drbd_request_cache);
3253 if (drbd_bm_ext_cache)
3254 kmem_cache_destroy(drbd_bm_ext_cache);
3255 if (drbd_al_ext_cache)
3256 kmem_cache_destroy(drbd_al_ext_cache);
3257
3258 drbd_ee_mempool = NULL;
3259 drbd_request_mempool = NULL;
3260 drbd_ee_cache = NULL;
3261 drbd_request_cache = NULL;
3262 drbd_bm_ext_cache = NULL;
3263 drbd_al_ext_cache = NULL;
3264
3265 return;
3266}
3267
3268static int drbd_create_mempools(void)
3269{
3270 struct page *page;
Lars Ellenberg1816a2b2010-11-11 15:19:07 +01003271 const int number = (DRBD_MAX_BIO_SIZE/PAGE_SIZE) * minor_count;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003272 int i;
3273
3274 /* prepare our caches and mempools */
3275 drbd_request_mempool = NULL;
3276 drbd_ee_cache = NULL;
3277 drbd_request_cache = NULL;
3278 drbd_bm_ext_cache = NULL;
3279 drbd_al_ext_cache = NULL;
3280 drbd_pp_pool = NULL;
3281
3282 /* caches */
3283 drbd_request_cache = kmem_cache_create(
3284 "drbd_req", sizeof(struct drbd_request), 0, 0, NULL);
3285 if (drbd_request_cache == NULL)
3286 goto Enomem;
3287
3288 drbd_ee_cache = kmem_cache_create(
3289 "drbd_ee", sizeof(struct drbd_epoch_entry), 0, 0, NULL);
3290 if (drbd_ee_cache == NULL)
3291 goto Enomem;
3292
3293 drbd_bm_ext_cache = kmem_cache_create(
3294 "drbd_bm", sizeof(struct bm_extent), 0, 0, NULL);
3295 if (drbd_bm_ext_cache == NULL)
3296 goto Enomem;
3297
3298 drbd_al_ext_cache = kmem_cache_create(
3299 "drbd_al", sizeof(struct lc_element), 0, 0, NULL);
3300 if (drbd_al_ext_cache == NULL)
3301 goto Enomem;
3302
3303 /* mempools */
3304 drbd_request_mempool = mempool_create(number,
3305 mempool_alloc_slab, mempool_free_slab, drbd_request_cache);
3306 if (drbd_request_mempool == NULL)
3307 goto Enomem;
3308
3309 drbd_ee_mempool = mempool_create(number,
3310 mempool_alloc_slab, mempool_free_slab, drbd_ee_cache);
Nicolas Kaiser2027ae12010-10-28 06:15:26 -06003311 if (drbd_ee_mempool == NULL)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003312 goto Enomem;
3313
3314 /* drbd's page pool */
3315 spin_lock_init(&drbd_pp_lock);
3316
3317 for (i = 0; i < number; i++) {
3318 page = alloc_page(GFP_HIGHUSER);
3319 if (!page)
3320 goto Enomem;
3321 set_page_private(page, (unsigned long)drbd_pp_pool);
3322 drbd_pp_pool = page;
3323 }
3324 drbd_pp_vacant = number;
3325
3326 return 0;
3327
3328Enomem:
3329 drbd_destroy_mempools(); /* in case we allocated some */
3330 return -ENOMEM;
3331}
3332
3333static int drbd_notify_sys(struct notifier_block *this, unsigned long code,
3334 void *unused)
3335{
3336 /* just so we have it. you never know what interesting things we
3337 * might want to do here some day...
3338 */
3339
3340 return NOTIFY_DONE;
3341}
3342
3343static struct notifier_block drbd_notifier = {
3344 .notifier_call = drbd_notify_sys,
3345};
3346
3347static void drbd_release_ee_lists(struct drbd_conf *mdev)
3348{
3349 int rr;
3350
3351 rr = drbd_release_ee(mdev, &mdev->active_ee);
3352 if (rr)
3353 dev_err(DEV, "%d EEs in active list found!\n", rr);
3354
3355 rr = drbd_release_ee(mdev, &mdev->sync_ee);
3356 if (rr)
3357 dev_err(DEV, "%d EEs in sync list found!\n", rr);
3358
3359 rr = drbd_release_ee(mdev, &mdev->read_ee);
3360 if (rr)
3361 dev_err(DEV, "%d EEs in read list found!\n", rr);
3362
3363 rr = drbd_release_ee(mdev, &mdev->done_ee);
3364 if (rr)
3365 dev_err(DEV, "%d EEs in done list found!\n", rr);
3366
3367 rr = drbd_release_ee(mdev, &mdev->net_ee);
3368 if (rr)
3369 dev_err(DEV, "%d EEs in net list found!\n", rr);
3370}
3371
3372/* caution. no locking.
3373 * currently only used from module cleanup code. */
3374static void drbd_delete_device(unsigned int minor)
3375{
3376 struct drbd_conf *mdev = minor_to_mdev(minor);
3377
3378 if (!mdev)
3379 return;
3380
Philipp Reisnerdfa8bed2011-06-29 14:06:08 +02003381 del_timer_sync(&mdev->request_timer);
3382
Philipp Reisnerb411b362009-09-25 16:07:19 -07003383 /* paranoia asserts */
3384 if (mdev->open_cnt != 0)
3385 dev_err(DEV, "open_cnt = %d in %s:%u", mdev->open_cnt,
3386 __FILE__ , __LINE__);
3387
3388 ERR_IF (!list_empty(&mdev->data.work.q)) {
3389 struct list_head *lp;
3390 list_for_each(lp, &mdev->data.work.q) {
3391 dev_err(DEV, "lp = %p\n", lp);
3392 }
3393 };
3394 /* end paranoia asserts */
3395
3396 del_gendisk(mdev->vdisk);
3397
3398 /* cleanup stuff that may have been allocated during
3399 * device (re-)configuration or state changes */
3400
3401 if (mdev->this_bdev)
3402 bdput(mdev->this_bdev);
3403
3404 drbd_free_resources(mdev);
3405
3406 drbd_release_ee_lists(mdev);
3407
Bart Van Assche24c48302011-05-21 18:32:29 +02003408 /* should be freed on disconnect? */
Philipp Reisnerb411b362009-09-25 16:07:19 -07003409 kfree(mdev->ee_hash);
3410 /*
3411 mdev->ee_hash_s = 0;
3412 mdev->ee_hash = NULL;
3413 */
3414
3415 lc_destroy(mdev->act_log);
3416 lc_destroy(mdev->resync);
3417
3418 kfree(mdev->p_uuid);
3419 /* mdev->p_uuid = NULL; */
3420
3421 kfree(mdev->int_dig_out);
3422 kfree(mdev->int_dig_in);
3423 kfree(mdev->int_dig_vv);
3424
3425 /* cleanup the rest that has been
3426 * allocated from drbd_new_device
3427 * and actually free the mdev itself */
3428 drbd_free_mdev(mdev);
3429}
3430
3431static void drbd_cleanup(void)
3432{
3433 unsigned int i;
3434
3435 unregister_reboot_notifier(&drbd_notifier);
3436
Lars Ellenberg17a93f302010-11-24 10:37:35 +01003437 /* first remove proc,
3438 * drbdsetup uses it's presence to detect
3439 * whether DRBD is loaded.
3440 * If we would get stuck in proc removal,
3441 * but have netlink already deregistered,
3442 * some drbdsetup commands may wait forever
3443 * for an answer.
3444 */
3445 if (drbd_proc)
3446 remove_proc_entry("drbd", NULL);
3447
Philipp Reisnerb411b362009-09-25 16:07:19 -07003448 drbd_nl_cleanup();
3449
3450 if (minor_table) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07003451 i = minor_count;
3452 while (i--)
3453 drbd_delete_device(i);
3454 drbd_destroy_mempools();
3455 }
3456
3457 kfree(minor_table);
3458
3459 unregister_blkdev(DRBD_MAJOR, "drbd");
3460
3461 printk(KERN_INFO "drbd: module cleanup done.\n");
3462}
3463
3464/**
3465 * drbd_congested() - Callback for pdflush
3466 * @congested_data: User data
3467 * @bdi_bits: Bits pdflush is currently interested in
3468 *
3469 * Returns 1<<BDI_async_congested and/or 1<<BDI_sync_congested if we are congested.
3470 */
3471static int drbd_congested(void *congested_data, int bdi_bits)
3472{
3473 struct drbd_conf *mdev = congested_data;
3474 struct request_queue *q;
3475 char reason = '-';
3476 int r = 0;
3477
Andreas Gruenbacher1b881ef2010-12-13 18:03:38 +01003478 if (!may_inc_ap_bio(mdev)) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07003479 /* DRBD has frozen IO */
3480 r = bdi_bits;
3481 reason = 'd';
3482 goto out;
3483 }
3484
3485 if (get_ldev(mdev)) {
3486 q = bdev_get_queue(mdev->ldev->backing_bdev);
3487 r = bdi_congested(&q->backing_dev_info, bdi_bits);
3488 put_ldev(mdev);
3489 if (r)
3490 reason = 'b';
3491 }
3492
3493 if (bdi_bits & (1 << BDI_async_congested) && test_bit(NET_CONGESTED, &mdev->flags)) {
3494 r |= (1 << BDI_async_congested);
3495 reason = reason == 'b' ? 'a' : 'n';
3496 }
3497
3498out:
3499 mdev->congestion_reason = reason;
3500 return r;
3501}
3502
3503struct drbd_conf *drbd_new_device(unsigned int minor)
3504{
3505 struct drbd_conf *mdev;
3506 struct gendisk *disk;
3507 struct request_queue *q;
3508
3509 /* GFP_KERNEL, we are outside of all write-out paths */
3510 mdev = kzalloc(sizeof(struct drbd_conf), GFP_KERNEL);
3511 if (!mdev)
3512 return NULL;
3513 if (!zalloc_cpumask_var(&mdev->cpu_mask, GFP_KERNEL))
3514 goto out_no_cpumask;
3515
3516 mdev->minor = minor;
3517
3518 drbd_init_set_defaults(mdev);
3519
3520 q = blk_alloc_queue(GFP_KERNEL);
3521 if (!q)
3522 goto out_no_q;
3523 mdev->rq_queue = q;
3524 q->queuedata = mdev;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003525
3526 disk = alloc_disk(1);
3527 if (!disk)
3528 goto out_no_disk;
3529 mdev->vdisk = disk;
3530
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01003531 set_disk_ro(disk, true);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003532
3533 disk->queue = q;
3534 disk->major = DRBD_MAJOR;
3535 disk->first_minor = minor;
3536 disk->fops = &drbd_ops;
3537 sprintf(disk->disk_name, "drbd%d", minor);
3538 disk->private_data = mdev;
3539
3540 mdev->this_bdev = bdget(MKDEV(DRBD_MAJOR, minor));
3541 /* we have no partitions. we contain only ourselves. */
3542 mdev->this_bdev->bd_contains = mdev->this_bdev;
3543
3544 q->backing_dev_info.congested_fn = drbd_congested;
3545 q->backing_dev_info.congested_data = mdev;
3546
Andreas Gruenbacher2f58dcf2010-12-13 17:48:19 +01003547 blk_queue_make_request(q, drbd_make_request);
Philipp Reisner99432fc2011-05-20 16:39:13 +02003548 /* Setting the max_hw_sectors to an odd value of 8kibyte here
3549 This triggers a max_bio_size message upon first attach or connect */
3550 blk_queue_max_hw_sectors(q, DRBD_MAX_BIO_SIZE_SAFE >> 8);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003551 blk_queue_bounce_limit(q, BLK_BOUNCE_ANY);
3552 blk_queue_merge_bvec(q, drbd_merge_bvec);
Jens Axboe7eaceac2011-03-10 08:52:07 +01003553 q->queue_lock = &mdev->req_lock;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003554
3555 mdev->md_io_page = alloc_page(GFP_KERNEL);
3556 if (!mdev->md_io_page)
3557 goto out_no_io_page;
3558
3559 if (drbd_bm_init(mdev))
3560 goto out_no_bitmap;
3561 /* no need to lock access, we are still initializing this minor device. */
3562 if (!tl_init(mdev))
3563 goto out_no_tl;
3564
3565 mdev->app_reads_hash = kzalloc(APP_R_HSIZE*sizeof(void *), GFP_KERNEL);
3566 if (!mdev->app_reads_hash)
3567 goto out_no_app_reads;
3568
3569 mdev->current_epoch = kzalloc(sizeof(struct drbd_epoch), GFP_KERNEL);
3570 if (!mdev->current_epoch)
3571 goto out_no_epoch;
3572
3573 INIT_LIST_HEAD(&mdev->current_epoch->list);
3574 mdev->epochs = 1;
3575
3576 return mdev;
3577
3578/* out_whatever_else:
3579 kfree(mdev->current_epoch); */
3580out_no_epoch:
3581 kfree(mdev->app_reads_hash);
3582out_no_app_reads:
3583 tl_cleanup(mdev);
3584out_no_tl:
3585 drbd_bm_cleanup(mdev);
3586out_no_bitmap:
3587 __free_page(mdev->md_io_page);
3588out_no_io_page:
3589 put_disk(disk);
3590out_no_disk:
3591 blk_cleanup_queue(q);
3592out_no_q:
3593 free_cpumask_var(mdev->cpu_mask);
3594out_no_cpumask:
3595 kfree(mdev);
3596 return NULL;
3597}
3598
3599/* counterpart of drbd_new_device.
3600 * last part of drbd_delete_device. */
3601void drbd_free_mdev(struct drbd_conf *mdev)
3602{
3603 kfree(mdev->current_epoch);
3604 kfree(mdev->app_reads_hash);
3605 tl_cleanup(mdev);
3606 if (mdev->bitmap) /* should no longer be there. */
3607 drbd_bm_cleanup(mdev);
3608 __free_page(mdev->md_io_page);
3609 put_disk(mdev->vdisk);
3610 blk_cleanup_queue(mdev->rq_queue);
3611 free_cpumask_var(mdev->cpu_mask);
Philipp Reisner37190942010-11-10 12:08:37 +01003612 drbd_free_tl_hash(mdev);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003613 kfree(mdev);
3614}
3615
3616
3617int __init drbd_init(void)
3618{
3619 int err;
3620
3621 if (sizeof(struct p_handshake) != 80) {
3622 printk(KERN_ERR
3623 "drbd: never change the size or layout "
3624 "of the HandShake packet.\n");
3625 return -EINVAL;
3626 }
3627
Philipp Reisner2b8a90b2011-01-10 11:15:17 +01003628 if (minor_count < DRBD_MINOR_COUNT_MIN || minor_count > DRBD_MINOR_COUNT_MAX) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07003629 printk(KERN_ERR
3630 "drbd: invalid minor_count (%d)\n", minor_count);
3631#ifdef MODULE
3632 return -EINVAL;
3633#else
3634 minor_count = 8;
3635#endif
3636 }
3637
3638 err = drbd_nl_init();
3639 if (err)
3640 return err;
3641
3642 err = register_blkdev(DRBD_MAJOR, "drbd");
3643 if (err) {
3644 printk(KERN_ERR
3645 "drbd: unable to register block device major %d\n",
3646 DRBD_MAJOR);
3647 return err;
3648 }
3649
3650 register_reboot_notifier(&drbd_notifier);
3651
3652 /*
3653 * allocate all necessary structs
3654 */
3655 err = -ENOMEM;
3656
3657 init_waitqueue_head(&drbd_pp_wait);
3658
3659 drbd_proc = NULL; /* play safe for drbd_cleanup */
3660 minor_table = kzalloc(sizeof(struct drbd_conf *)*minor_count,
3661 GFP_KERNEL);
3662 if (!minor_table)
3663 goto Enomem;
3664
3665 err = drbd_create_mempools();
3666 if (err)
3667 goto Enomem;
3668
Lars Ellenberg8c484ee2010-03-11 16:47:58 +01003669 drbd_proc = proc_create_data("drbd", S_IFREG | S_IRUGO , NULL, &drbd_proc_fops, NULL);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003670 if (!drbd_proc) {
3671 printk(KERN_ERR "drbd: unable to register proc file\n");
3672 goto Enomem;
3673 }
3674
3675 rwlock_init(&global_state_lock);
3676
3677 printk(KERN_INFO "drbd: initialized. "
3678 "Version: " REL_VERSION " (api:%d/proto:%d-%d)\n",
3679 API_VERSION, PRO_VERSION_MIN, PRO_VERSION_MAX);
3680 printk(KERN_INFO "drbd: %s\n", drbd_buildtag());
3681 printk(KERN_INFO "drbd: registered as block device major %d\n",
3682 DRBD_MAJOR);
3683 printk(KERN_INFO "drbd: minor_table @ 0x%p\n", minor_table);
3684
3685 return 0; /* Success! */
3686
3687Enomem:
3688 drbd_cleanup();
3689 if (err == -ENOMEM)
3690 /* currently always the case */
3691 printk(KERN_ERR "drbd: ran out of memory\n");
3692 else
3693 printk(KERN_ERR "drbd: initialization failure\n");
3694 return err;
3695}
3696
3697void drbd_free_bc(struct drbd_backing_dev *ldev)
3698{
3699 if (ldev == NULL)
3700 return;
3701
Tejun Heoe525fd82010-11-13 11:55:17 +01003702 blkdev_put(ldev->backing_bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL);
3703 blkdev_put(ldev->md_bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003704
3705 kfree(ldev);
3706}
3707
3708void drbd_free_sock(struct drbd_conf *mdev)
3709{
3710 if (mdev->data.socket) {
Lars Ellenberg4589d7f2010-03-03 02:25:33 +01003711 mutex_lock(&mdev->data.mutex);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003712 kernel_sock_shutdown(mdev->data.socket, SHUT_RDWR);
3713 sock_release(mdev->data.socket);
3714 mdev->data.socket = NULL;
Lars Ellenberg4589d7f2010-03-03 02:25:33 +01003715 mutex_unlock(&mdev->data.mutex);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003716 }
3717 if (mdev->meta.socket) {
Lars Ellenberg4589d7f2010-03-03 02:25:33 +01003718 mutex_lock(&mdev->meta.mutex);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003719 kernel_sock_shutdown(mdev->meta.socket, SHUT_RDWR);
3720 sock_release(mdev->meta.socket);
3721 mdev->meta.socket = NULL;
Lars Ellenberg4589d7f2010-03-03 02:25:33 +01003722 mutex_unlock(&mdev->meta.mutex);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003723 }
3724}
3725
3726
3727void drbd_free_resources(struct drbd_conf *mdev)
3728{
3729 crypto_free_hash(mdev->csums_tfm);
3730 mdev->csums_tfm = NULL;
3731 crypto_free_hash(mdev->verify_tfm);
3732 mdev->verify_tfm = NULL;
3733 crypto_free_hash(mdev->cram_hmac_tfm);
3734 mdev->cram_hmac_tfm = NULL;
3735 crypto_free_hash(mdev->integrity_w_tfm);
3736 mdev->integrity_w_tfm = NULL;
3737 crypto_free_hash(mdev->integrity_r_tfm);
3738 mdev->integrity_r_tfm = NULL;
3739
3740 drbd_free_sock(mdev);
3741
3742 __no_warn(local,
3743 drbd_free_bc(mdev->ldev);
3744 mdev->ldev = NULL;);
3745}
3746
3747/* meta data management */
3748
3749struct meta_data_on_disk {
3750 u64 la_size; /* last agreed size. */
3751 u64 uuid[UI_SIZE]; /* UUIDs. */
3752 u64 device_uuid;
3753 u64 reserved_u64_1;
3754 u32 flags; /* MDF */
3755 u32 magic;
3756 u32 md_size_sect;
3757 u32 al_offset; /* offset to this block */
3758 u32 al_nr_extents; /* important for restoring the AL */
3759 /* `-- act_log->nr_elements <-- sync_conf.al_extents */
3760 u32 bm_offset; /* offset to the bitmap, from here */
3761 u32 bm_bytes_per_bit; /* BM_BLOCK_SIZE */
Philipp Reisner99432fc2011-05-20 16:39:13 +02003762 u32 la_peer_max_bio_size; /* last peer max_bio_size */
3763 u32 reserved_u32[3];
Philipp Reisnerb411b362009-09-25 16:07:19 -07003764
3765} __packed;
3766
3767/**
3768 * drbd_md_sync() - Writes the meta data super block if the MD_DIRTY flag bit is set
3769 * @mdev: DRBD device.
3770 */
3771void drbd_md_sync(struct drbd_conf *mdev)
3772{
3773 struct meta_data_on_disk *buffer;
3774 sector_t sector;
3775 int i;
3776
Lars Ellenbergee15b032010-09-03 10:00:09 +02003777 del_timer(&mdev->md_sync_timer);
3778 /* timer may be rearmed by drbd_md_mark_dirty() now. */
Philipp Reisnerb411b362009-09-25 16:07:19 -07003779 if (!test_and_clear_bit(MD_DIRTY, &mdev->flags))
3780 return;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003781
3782 /* We use here D_FAILED and not D_ATTACHING because we try to write
3783 * metadata even if we detach due to a disk failure! */
3784 if (!get_ldev_if_state(mdev, D_FAILED))
3785 return;
3786
Philipp Reisnere1711732011-06-27 11:51:46 +02003787 buffer = drbd_md_get_buffer(mdev);
3788 if (!buffer)
3789 goto out;
3790
Philipp Reisnerb411b362009-09-25 16:07:19 -07003791 memset(buffer, 0, 512);
3792
3793 buffer->la_size = cpu_to_be64(drbd_get_capacity(mdev->this_bdev));
3794 for (i = UI_CURRENT; i < UI_SIZE; i++)
3795 buffer->uuid[i] = cpu_to_be64(mdev->ldev->md.uuid[i]);
3796 buffer->flags = cpu_to_be32(mdev->ldev->md.flags);
3797 buffer->magic = cpu_to_be32(DRBD_MD_MAGIC);
3798
3799 buffer->md_size_sect = cpu_to_be32(mdev->ldev->md.md_size_sect);
3800 buffer->al_offset = cpu_to_be32(mdev->ldev->md.al_offset);
3801 buffer->al_nr_extents = cpu_to_be32(mdev->act_log->nr_elements);
3802 buffer->bm_bytes_per_bit = cpu_to_be32(BM_BLOCK_SIZE);
3803 buffer->device_uuid = cpu_to_be64(mdev->ldev->md.device_uuid);
3804
3805 buffer->bm_offset = cpu_to_be32(mdev->ldev->md.bm_offset);
Philipp Reisner99432fc2011-05-20 16:39:13 +02003806 buffer->la_peer_max_bio_size = cpu_to_be32(mdev->peer_max_bio_size);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003807
3808 D_ASSERT(drbd_md_ss__(mdev, mdev->ldev) == mdev->ldev->md.md_offset);
3809 sector = mdev->ldev->md.md_offset;
3810
Lars Ellenberg3f3a9b82010-09-01 15:12:12 +02003811 if (!drbd_md_sync_page_io(mdev, mdev->ldev, sector, WRITE)) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07003812 /* this was a try anyways ... */
3813 dev_err(DEV, "meta data update failed!\n");
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01003814 drbd_chk_io_error(mdev, 1, true);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003815 }
3816
3817 /* Update mdev->ldev->md.la_size_sect,
3818 * since we updated it on metadata. */
3819 mdev->ldev->md.la_size_sect = drbd_get_capacity(mdev->this_bdev);
3820
Philipp Reisnere1711732011-06-27 11:51:46 +02003821 drbd_md_put_buffer(mdev);
3822out:
Philipp Reisnerb411b362009-09-25 16:07:19 -07003823 put_ldev(mdev);
3824}
3825
3826/**
3827 * drbd_md_read() - Reads in the meta data super block
3828 * @mdev: DRBD device.
3829 * @bdev: Device from which the meta data should be read in.
3830 *
Andreas Gruenbacher116676c2010-12-08 13:33:11 +01003831 * Return 0 (NO_ERROR) on success, and an enum drbd_ret_code in case
Philipp Reisnerb411b362009-09-25 16:07:19 -07003832 * something goes wrong. Currently only: ERR_IO_MD_DISK, ERR_MD_INVALID.
3833 */
3834int drbd_md_read(struct drbd_conf *mdev, struct drbd_backing_dev *bdev)
3835{
3836 struct meta_data_on_disk *buffer;
3837 int i, rv = NO_ERROR;
3838
3839 if (!get_ldev_if_state(mdev, D_ATTACHING))
3840 return ERR_IO_MD_DISK;
3841
Philipp Reisnere1711732011-06-27 11:51:46 +02003842 buffer = drbd_md_get_buffer(mdev);
3843 if (!buffer)
3844 goto out;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003845
3846 if (!drbd_md_sync_page_io(mdev, bdev, bdev->md.md_offset, READ)) {
Lucas De Marchi25985ed2011-03-30 22:57:33 -03003847 /* NOTE: can't do normal error processing here as this is
Philipp Reisnerb411b362009-09-25 16:07:19 -07003848 called BEFORE disk is attached */
3849 dev_err(DEV, "Error while reading metadata.\n");
3850 rv = ERR_IO_MD_DISK;
3851 goto err;
3852 }
3853
3854 if (be32_to_cpu(buffer->magic) != DRBD_MD_MAGIC) {
3855 dev_err(DEV, "Error while reading metadata, magic not found.\n");
3856 rv = ERR_MD_INVALID;
3857 goto err;
3858 }
3859 if (be32_to_cpu(buffer->al_offset) != bdev->md.al_offset) {
3860 dev_err(DEV, "unexpected al_offset: %d (expected %d)\n",
3861 be32_to_cpu(buffer->al_offset), bdev->md.al_offset);
3862 rv = ERR_MD_INVALID;
3863 goto err;
3864 }
3865 if (be32_to_cpu(buffer->bm_offset) != bdev->md.bm_offset) {
3866 dev_err(DEV, "unexpected bm_offset: %d (expected %d)\n",
3867 be32_to_cpu(buffer->bm_offset), bdev->md.bm_offset);
3868 rv = ERR_MD_INVALID;
3869 goto err;
3870 }
3871 if (be32_to_cpu(buffer->md_size_sect) != bdev->md.md_size_sect) {
3872 dev_err(DEV, "unexpected md_size: %u (expected %u)\n",
3873 be32_to_cpu(buffer->md_size_sect), bdev->md.md_size_sect);
3874 rv = ERR_MD_INVALID;
3875 goto err;
3876 }
3877
3878 if (be32_to_cpu(buffer->bm_bytes_per_bit) != BM_BLOCK_SIZE) {
3879 dev_err(DEV, "unexpected bm_bytes_per_bit: %u (expected %u)\n",
3880 be32_to_cpu(buffer->bm_bytes_per_bit), BM_BLOCK_SIZE);
3881 rv = ERR_MD_INVALID;
3882 goto err;
3883 }
3884
3885 bdev->md.la_size_sect = be64_to_cpu(buffer->la_size);
3886 for (i = UI_CURRENT; i < UI_SIZE; i++)
3887 bdev->md.uuid[i] = be64_to_cpu(buffer->uuid[i]);
3888 bdev->md.flags = be32_to_cpu(buffer->flags);
3889 mdev->sync_conf.al_extents = be32_to_cpu(buffer->al_nr_extents);
3890 bdev->md.device_uuid = be64_to_cpu(buffer->device_uuid);
3891
Philipp Reisner99432fc2011-05-20 16:39:13 +02003892 spin_lock_irq(&mdev->req_lock);
3893 if (mdev->state.conn < C_CONNECTED) {
3894 int peer;
3895 peer = be32_to_cpu(buffer->la_peer_max_bio_size);
3896 peer = max_t(int, peer, DRBD_MAX_BIO_SIZE_SAFE);
3897 mdev->peer_max_bio_size = peer;
3898 }
3899 spin_unlock_irq(&mdev->req_lock);
3900
Philipp Reisnerb411b362009-09-25 16:07:19 -07003901 if (mdev->sync_conf.al_extents < 7)
3902 mdev->sync_conf.al_extents = 127;
3903
3904 err:
Philipp Reisnere1711732011-06-27 11:51:46 +02003905 drbd_md_put_buffer(mdev);
3906 out:
Philipp Reisnerb411b362009-09-25 16:07:19 -07003907 put_ldev(mdev);
3908
3909 return rv;
3910}
3911
3912/**
3913 * drbd_md_mark_dirty() - Mark meta data super block as dirty
3914 * @mdev: DRBD device.
3915 *
3916 * Call this function if you change anything that should be written to
3917 * the meta-data super block. This function sets MD_DIRTY, and starts a
3918 * timer that ensures that within five seconds you have to call drbd_md_sync().
3919 */
Lars Ellenbergca0e6092010-10-14 15:01:21 +02003920#ifdef DEBUG
Lars Ellenbergee15b032010-09-03 10:00:09 +02003921void drbd_md_mark_dirty_(struct drbd_conf *mdev, unsigned int line, const char *func)
3922{
3923 if (!test_and_set_bit(MD_DIRTY, &mdev->flags)) {
3924 mod_timer(&mdev->md_sync_timer, jiffies + HZ);
3925 mdev->last_md_mark_dirty.line = line;
3926 mdev->last_md_mark_dirty.func = func;
3927 }
3928}
3929#else
Philipp Reisnerb411b362009-09-25 16:07:19 -07003930void drbd_md_mark_dirty(struct drbd_conf *mdev)
3931{
Lars Ellenbergee15b032010-09-03 10:00:09 +02003932 if (!test_and_set_bit(MD_DIRTY, &mdev->flags))
Lars Ellenbergca0e6092010-10-14 15:01:21 +02003933 mod_timer(&mdev->md_sync_timer, jiffies + 5*HZ);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003934}
Lars Ellenbergee15b032010-09-03 10:00:09 +02003935#endif
Philipp Reisnerb411b362009-09-25 16:07:19 -07003936
3937static void drbd_uuid_move_history(struct drbd_conf *mdev) __must_hold(local)
3938{
3939 int i;
3940
Lars Ellenberg62b0da32011-01-20 13:25:21 +01003941 for (i = UI_HISTORY_START; i < UI_HISTORY_END; i++)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003942 mdev->ldev->md.uuid[i+1] = mdev->ldev->md.uuid[i];
Philipp Reisnerb411b362009-09-25 16:07:19 -07003943}
3944
3945void _drbd_uuid_set(struct drbd_conf *mdev, int idx, u64 val) __must_hold(local)
3946{
3947 if (idx == UI_CURRENT) {
3948 if (mdev->state.role == R_PRIMARY)
3949 val |= 1;
3950 else
3951 val &= ~((u64)1);
3952
3953 drbd_set_ed_uuid(mdev, val);
3954 }
3955
3956 mdev->ldev->md.uuid[idx] = val;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003957 drbd_md_mark_dirty(mdev);
3958}
3959
3960
3961void drbd_uuid_set(struct drbd_conf *mdev, int idx, u64 val) __must_hold(local)
3962{
3963 if (mdev->ldev->md.uuid[idx]) {
3964 drbd_uuid_move_history(mdev);
3965 mdev->ldev->md.uuid[UI_HISTORY_START] = mdev->ldev->md.uuid[idx];
Philipp Reisnerb411b362009-09-25 16:07:19 -07003966 }
3967 _drbd_uuid_set(mdev, idx, val);
3968}
3969
3970/**
3971 * drbd_uuid_new_current() - Creates a new current UUID
3972 * @mdev: DRBD device.
3973 *
3974 * Creates a new current UUID, and rotates the old current UUID into
3975 * the bitmap slot. Causes an incremental resync upon next connect.
3976 */
3977void drbd_uuid_new_current(struct drbd_conf *mdev) __must_hold(local)
3978{
3979 u64 val;
Lars Ellenberg62b0da32011-01-20 13:25:21 +01003980 unsigned long long bm_uuid = mdev->ldev->md.uuid[UI_BITMAP];
Philipp Reisnerb411b362009-09-25 16:07:19 -07003981
Lars Ellenberg62b0da32011-01-20 13:25:21 +01003982 if (bm_uuid)
3983 dev_warn(DEV, "bm UUID was already set: %llX\n", bm_uuid);
3984
Philipp Reisnerb411b362009-09-25 16:07:19 -07003985 mdev->ldev->md.uuid[UI_BITMAP] = mdev->ldev->md.uuid[UI_CURRENT];
Philipp Reisnerb411b362009-09-25 16:07:19 -07003986
3987 get_random_bytes(&val, sizeof(u64));
3988 _drbd_uuid_set(mdev, UI_CURRENT, val);
Lars Ellenberg62b0da32011-01-20 13:25:21 +01003989 drbd_print_uuids(mdev, "new current UUID");
Lars Ellenbergaaa8e2b2010-10-15 13:16:53 +02003990 /* get it to stable storage _now_ */
3991 drbd_md_sync(mdev);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003992}
3993
3994void drbd_uuid_set_bm(struct drbd_conf *mdev, u64 val) __must_hold(local)
3995{
3996 if (mdev->ldev->md.uuid[UI_BITMAP] == 0 && val == 0)
3997 return;
3998
3999 if (val == 0) {
4000 drbd_uuid_move_history(mdev);
4001 mdev->ldev->md.uuid[UI_HISTORY_START] = mdev->ldev->md.uuid[UI_BITMAP];
4002 mdev->ldev->md.uuid[UI_BITMAP] = 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004003 } else {
Lars Ellenberg62b0da32011-01-20 13:25:21 +01004004 unsigned long long bm_uuid = mdev->ldev->md.uuid[UI_BITMAP];
4005 if (bm_uuid)
4006 dev_warn(DEV, "bm UUID was already set: %llX\n", bm_uuid);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004007
Lars Ellenberg62b0da32011-01-20 13:25:21 +01004008 mdev->ldev->md.uuid[UI_BITMAP] = val & ~((u64)1);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004009 }
4010 drbd_md_mark_dirty(mdev);
4011}
4012
4013/**
4014 * drbd_bmio_set_n_write() - io_fn for drbd_queue_bitmap_io() or drbd_bitmap_io()
4015 * @mdev: DRBD device.
4016 *
4017 * Sets all bits in the bitmap and writes the whole bitmap to stable storage.
4018 */
4019int drbd_bmio_set_n_write(struct drbd_conf *mdev)
4020{
4021 int rv = -EIO;
4022
4023 if (get_ldev_if_state(mdev, D_ATTACHING)) {
4024 drbd_md_set_flag(mdev, MDF_FULL_SYNC);
4025 drbd_md_sync(mdev);
4026 drbd_bm_set_all(mdev);
4027
4028 rv = drbd_bm_write(mdev);
4029
4030 if (!rv) {
4031 drbd_md_clear_flag(mdev, MDF_FULL_SYNC);
4032 drbd_md_sync(mdev);
4033 }
4034
4035 put_ldev(mdev);
4036 }
4037
4038 return rv;
4039}
4040
4041/**
4042 * drbd_bmio_clear_n_write() - io_fn for drbd_queue_bitmap_io() or drbd_bitmap_io()
4043 * @mdev: DRBD device.
4044 *
4045 * Clears all bits in the bitmap and writes the whole bitmap to stable storage.
4046 */
4047int drbd_bmio_clear_n_write(struct drbd_conf *mdev)
4048{
4049 int rv = -EIO;
4050
Philipp Reisner07782862010-08-31 12:00:50 +02004051 drbd_resume_al(mdev);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004052 if (get_ldev_if_state(mdev, D_ATTACHING)) {
4053 drbd_bm_clear_all(mdev);
4054 rv = drbd_bm_write(mdev);
4055 put_ldev(mdev);
4056 }
4057
4058 return rv;
4059}
4060
4061static int w_bitmap_io(struct drbd_conf *mdev, struct drbd_work *w, int unused)
4062{
4063 struct bm_io_work *work = container_of(w, struct bm_io_work, w);
Lars Ellenberg02851e92010-12-16 14:47:39 +01004064 int rv = -EIO;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004065
4066 D_ASSERT(atomic_read(&mdev->ap_bio_cnt) == 0);
4067
Lars Ellenberg02851e92010-12-16 14:47:39 +01004068 if (get_ldev(mdev)) {
Lars Ellenberg20ceb2b2011-01-21 10:56:44 +01004069 drbd_bm_lock(mdev, work->why, work->flags);
Lars Ellenberg02851e92010-12-16 14:47:39 +01004070 rv = work->io_fn(mdev);
4071 drbd_bm_unlock(mdev);
4072 put_ldev(mdev);
4073 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07004074
4075 clear_bit(BITMAP_IO, &mdev->flags);
Philipp Reisner127b3172010-11-16 10:07:53 +01004076 smp_mb__after_clear_bit();
Philipp Reisnerb411b362009-09-25 16:07:19 -07004077 wake_up(&mdev->misc_wait);
4078
4079 if (work->done)
4080 work->done(mdev, rv);
4081
4082 clear_bit(BITMAP_IO_QUEUED, &mdev->flags);
4083 work->why = NULL;
Lars Ellenberg20ceb2b2011-01-21 10:56:44 +01004084 work->flags = 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004085
4086 return 1;
4087}
4088
Lars Ellenberg82f59cc2010-10-16 12:13:47 +02004089void drbd_ldev_destroy(struct drbd_conf *mdev)
4090{
4091 lc_destroy(mdev->resync);
4092 mdev->resync = NULL;
4093 lc_destroy(mdev->act_log);
4094 mdev->act_log = NULL;
4095 __no_warn(local,
4096 drbd_free_bc(mdev->ldev);
4097 mdev->ldev = NULL;);
4098
4099 if (mdev->md_io_tmpp) {
4100 __free_page(mdev->md_io_tmpp);
4101 mdev->md_io_tmpp = NULL;
4102 }
4103 clear_bit(GO_DISKLESS, &mdev->flags);
4104}
4105
Lars Ellenberge9e6f3e2010-09-14 20:26:27 +02004106static int w_go_diskless(struct drbd_conf *mdev, struct drbd_work *w, int unused)
4107{
4108 D_ASSERT(mdev->state.disk == D_FAILED);
Lars Ellenberg9d282872010-10-14 13:57:07 +02004109 /* we cannot assert local_cnt == 0 here, as get_ldev_if_state will
4110 * inc/dec it frequently. Once we are D_DISKLESS, no one will touch
Lars Ellenberg82f59cc2010-10-16 12:13:47 +02004111 * the protected members anymore, though, so once put_ldev reaches zero
4112 * again, it will be safe to free them. */
Lars Ellenberge9e6f3e2010-09-14 20:26:27 +02004113 drbd_force_state(mdev, NS(disk, D_DISKLESS));
Lars Ellenberge9e6f3e2010-09-14 20:26:27 +02004114 return 1;
4115}
4116
4117void drbd_go_diskless(struct drbd_conf *mdev)
4118{
4119 D_ASSERT(mdev->state.disk == D_FAILED);
4120 if (!test_and_set_bit(GO_DISKLESS, &mdev->flags))
Lars Ellenberg9d282872010-10-14 13:57:07 +02004121 drbd_queue_work(&mdev->data.work, &mdev->go_diskless);
Lars Ellenberge9e6f3e2010-09-14 20:26:27 +02004122}
4123
Philipp Reisnerb411b362009-09-25 16:07:19 -07004124/**
4125 * drbd_queue_bitmap_io() - Queues an IO operation on the whole bitmap
4126 * @mdev: DRBD device.
4127 * @io_fn: IO callback to be called when bitmap IO is possible
4128 * @done: callback to be called after the bitmap IO was performed
4129 * @why: Descriptive text of the reason for doing the IO
4130 *
4131 * While IO on the bitmap happens we freeze application IO thus we ensure
4132 * that drbd_set_out_of_sync() can not be called. This function MAY ONLY be
4133 * called from worker context. It MUST NOT be used while a previous such
4134 * work is still pending!
4135 */
4136void drbd_queue_bitmap_io(struct drbd_conf *mdev,
4137 int (*io_fn)(struct drbd_conf *),
4138 void (*done)(struct drbd_conf *, int),
Lars Ellenberg20ceb2b2011-01-21 10:56:44 +01004139 char *why, enum bm_flag flags)
Philipp Reisnerb411b362009-09-25 16:07:19 -07004140{
4141 D_ASSERT(current == mdev->worker.task);
4142
4143 D_ASSERT(!test_bit(BITMAP_IO_QUEUED, &mdev->flags));
4144 D_ASSERT(!test_bit(BITMAP_IO, &mdev->flags));
4145 D_ASSERT(list_empty(&mdev->bm_io_work.w.list));
4146 if (mdev->bm_io_work.why)
4147 dev_err(DEV, "FIXME going to queue '%s' but '%s' still pending?\n",
4148 why, mdev->bm_io_work.why);
4149
4150 mdev->bm_io_work.io_fn = io_fn;
4151 mdev->bm_io_work.done = done;
4152 mdev->bm_io_work.why = why;
Lars Ellenberg20ceb2b2011-01-21 10:56:44 +01004153 mdev->bm_io_work.flags = flags;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004154
Philipp Reisner22afd7e2010-11-16 15:30:44 +01004155 spin_lock_irq(&mdev->req_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004156 set_bit(BITMAP_IO, &mdev->flags);
4157 if (atomic_read(&mdev->ap_bio_cnt) == 0) {
Philipp Reisner127b3172010-11-16 10:07:53 +01004158 if (!test_and_set_bit(BITMAP_IO_QUEUED, &mdev->flags))
Philipp Reisnerb411b362009-09-25 16:07:19 -07004159 drbd_queue_work(&mdev->data.work, &mdev->bm_io_work.w);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004160 }
Philipp Reisner22afd7e2010-11-16 15:30:44 +01004161 spin_unlock_irq(&mdev->req_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004162}
4163
4164/**
4165 * drbd_bitmap_io() - Does an IO operation on the whole bitmap
4166 * @mdev: DRBD device.
4167 * @io_fn: IO callback to be called when bitmap IO is possible
4168 * @why: Descriptive text of the reason for doing the IO
4169 *
4170 * freezes application IO while that the actual IO operations runs. This
4171 * functions MAY NOT be called from worker context.
4172 */
Lars Ellenberg20ceb2b2011-01-21 10:56:44 +01004173int drbd_bitmap_io(struct drbd_conf *mdev, int (*io_fn)(struct drbd_conf *),
4174 char *why, enum bm_flag flags)
Philipp Reisnerb411b362009-09-25 16:07:19 -07004175{
4176 int rv;
4177
4178 D_ASSERT(current != mdev->worker.task);
4179
Lars Ellenberg20ceb2b2011-01-21 10:56:44 +01004180 if ((flags & BM_LOCKED_SET_ALLOWED) == 0)
4181 drbd_suspend_io(mdev);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004182
Lars Ellenberg20ceb2b2011-01-21 10:56:44 +01004183 drbd_bm_lock(mdev, why, flags);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004184 rv = io_fn(mdev);
4185 drbd_bm_unlock(mdev);
4186
Lars Ellenberg20ceb2b2011-01-21 10:56:44 +01004187 if ((flags & BM_LOCKED_SET_ALLOWED) == 0)
4188 drbd_resume_io(mdev);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004189
4190 return rv;
4191}
4192
4193void drbd_md_set_flag(struct drbd_conf *mdev, int flag) __must_hold(local)
4194{
4195 if ((mdev->ldev->md.flags & flag) != flag) {
4196 drbd_md_mark_dirty(mdev);
4197 mdev->ldev->md.flags |= flag;
4198 }
4199}
4200
4201void drbd_md_clear_flag(struct drbd_conf *mdev, int flag) __must_hold(local)
4202{
4203 if ((mdev->ldev->md.flags & flag) != 0) {
4204 drbd_md_mark_dirty(mdev);
4205 mdev->ldev->md.flags &= ~flag;
4206 }
4207}
4208int drbd_md_test_flag(struct drbd_backing_dev *bdev, int flag)
4209{
4210 return (bdev->md.flags & flag) != 0;
4211}
4212
4213static void md_sync_timer_fn(unsigned long data)
4214{
4215 struct drbd_conf *mdev = (struct drbd_conf *) data;
4216
4217 drbd_queue_work_front(&mdev->data.work, &mdev->md_sync_work);
4218}
4219
4220static int w_md_sync(struct drbd_conf *mdev, struct drbd_work *w, int unused)
4221{
4222 dev_warn(DEV, "md_sync_timer expired! Worker calls drbd_md_sync().\n");
Lars Ellenbergee15b032010-09-03 10:00:09 +02004223#ifdef DEBUG
4224 dev_warn(DEV, "last md_mark_dirty: %s:%u\n",
4225 mdev->last_md_mark_dirty.func, mdev->last_md_mark_dirty.line);
4226#endif
Philipp Reisnerb411b362009-09-25 16:07:19 -07004227 drbd_md_sync(mdev);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004228 return 1;
4229}
4230
4231#ifdef CONFIG_DRBD_FAULT_INJECTION
4232/* Fault insertion support including random number generator shamelessly
4233 * stolen from kernel/rcutorture.c */
4234struct fault_random_state {
4235 unsigned long state;
4236 unsigned long count;
4237};
4238
4239#define FAULT_RANDOM_MULT 39916801 /* prime */
4240#define FAULT_RANDOM_ADD 479001701 /* prime */
4241#define FAULT_RANDOM_REFRESH 10000
4242
4243/*
4244 * Crude but fast random-number generator. Uses a linear congruential
4245 * generator, with occasional help from get_random_bytes().
4246 */
4247static unsigned long
4248_drbd_fault_random(struct fault_random_state *rsp)
4249{
4250 long refresh;
4251
Roel Kluin49829ea2009-12-15 22:55:44 +01004252 if (!rsp->count--) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07004253 get_random_bytes(&refresh, sizeof(refresh));
4254 rsp->state += refresh;
4255 rsp->count = FAULT_RANDOM_REFRESH;
4256 }
4257 rsp->state = rsp->state * FAULT_RANDOM_MULT + FAULT_RANDOM_ADD;
4258 return swahw32(rsp->state);
4259}
4260
4261static char *
4262_drbd_fault_str(unsigned int type) {
4263 static char *_faults[] = {
4264 [DRBD_FAULT_MD_WR] = "Meta-data write",
4265 [DRBD_FAULT_MD_RD] = "Meta-data read",
4266 [DRBD_FAULT_RS_WR] = "Resync write",
4267 [DRBD_FAULT_RS_RD] = "Resync read",
4268 [DRBD_FAULT_DT_WR] = "Data write",
4269 [DRBD_FAULT_DT_RD] = "Data read",
4270 [DRBD_FAULT_DT_RA] = "Data read ahead",
4271 [DRBD_FAULT_BM_ALLOC] = "BM allocation",
Philipp Reisner6b4388a2010-04-26 14:11:45 +02004272 [DRBD_FAULT_AL_EE] = "EE allocation",
4273 [DRBD_FAULT_RECEIVE] = "receive data corruption",
Philipp Reisnerb411b362009-09-25 16:07:19 -07004274 };
4275
4276 return (type < DRBD_FAULT_MAX) ? _faults[type] : "**Unknown**";
4277}
4278
4279unsigned int
4280_drbd_insert_fault(struct drbd_conf *mdev, unsigned int type)
4281{
4282 static struct fault_random_state rrs = {0, 0};
4283
4284 unsigned int ret = (
4285 (fault_devs == 0 ||
4286 ((1 << mdev_to_minor(mdev)) & fault_devs) != 0) &&
4287 (((_drbd_fault_random(&rrs) % 100) + 1) <= fault_rate));
4288
4289 if (ret) {
4290 fault_count++;
4291
Lars Ellenberg73835062010-05-27 11:51:56 +02004292 if (__ratelimit(&drbd_ratelimit_state))
Philipp Reisnerb411b362009-09-25 16:07:19 -07004293 dev_warn(DEV, "***Simulating %s failure\n",
4294 _drbd_fault_str(type));
4295 }
4296
4297 return ret;
4298}
4299#endif
4300
4301const char *drbd_buildtag(void)
4302{
4303 /* DRBD built from external sources has here a reference to the
4304 git hash of the source code. */
4305
4306 static char buildtag[38] = "\0uilt-in";
4307
4308 if (buildtag[0] == 0) {
4309#ifdef CONFIG_MODULES
4310 if (THIS_MODULE != NULL)
4311 sprintf(buildtag, "srcversion: %-24s", THIS_MODULE->srcversion);
4312 else
4313#endif
4314 buildtag[0] = 'b';
4315 }
4316
4317 return buildtag;
4318}
4319
4320module_init(drbd_init)
4321module_exit(drbd_cleanup)
4322
Philipp Reisnerb411b362009-09-25 16:07:19 -07004323EXPORT_SYMBOL(drbd_conn_str);
4324EXPORT_SYMBOL(drbd_role_str);
4325EXPORT_SYMBOL(drbd_disk_str);
4326EXPORT_SYMBOL(drbd_set_st_err_str);