blob: a08c4a9179f18ed797a79b2556a5f7125a5651b9 [file] [log] [blame]
Philipp Reisnerb411b362009-09-25 16:07:19 -07001/*
2 drbd_int.h
3
4 This file is part of DRBD by Philipp Reisner and Lars Ellenberg.
5
6 Copyright (C) 2001-2008, LINBIT Information Technologies GmbH.
7 Copyright (C) 1999-2008, Philipp Reisner <philipp.reisner@linbit.com>.
8 Copyright (C) 2002-2008, Lars Ellenberg <lars.ellenberg@linbit.com>.
9
10 drbd is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation; either version 2, or (at your option)
13 any later version.
14
15 drbd is distributed in the hope that it will be useful,
16 but WITHOUT ANY WARRANTY; without even the implied warranty of
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 GNU General Public License for more details.
19
20 You should have received a copy of the GNU General Public License
21 along with drbd; see the file COPYING. If not, write to
22 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
23
24*/
25
26#ifndef _DRBD_INT_H
27#define _DRBD_INT_H
28
29#include <linux/compiler.h>
30#include <linux/types.h>
Philipp Reisnerb411b362009-09-25 16:07:19 -070031#include <linux/list.h>
32#include <linux/sched.h>
33#include <linux/bitops.h>
34#include <linux/slab.h>
35#include <linux/crypto.h>
Randy Dunlap132cc532009-10-07 19:26:00 +020036#include <linux/ratelimit.h>
Philipp Reisnerb411b362009-09-25 16:07:19 -070037#include <linux/tcp.h>
38#include <linux/mutex.h>
39#include <linux/major.h>
40#include <linux/blkdev.h>
Tejun Heo66114ca2015-05-22 17:13:32 -040041#include <linux/backing-dev.h>
Philipp Reisnerb411b362009-09-25 16:07:19 -070042#include <linux/genhd.h>
Philipp Reisner062e8792011-02-08 11:09:18 +010043#include <linux/idr.h>
Philipp Reisnerb411b362009-09-25 16:07:19 -070044#include <net/tcp.h>
45#include <linux/lru_cache.h>
Paul Gortmaker70c71602011-05-22 16:47:17 -040046#include <linux/prefetch.h>
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +010047#include <linux/drbd_genl_api.h>
Philipp Reisnerb8907332011-01-27 14:07:51 +010048#include <linux/drbd.h>
Andreas Gruenbacherd9f65222011-09-01 13:18:31 +020049#include "drbd_strings.h"
Philipp Reisnerb8907332011-01-27 14:07:51 +010050#include "drbd_state.h"
Andreas Gruenbachera3603a62011-05-30 11:47:37 +020051#include "drbd_protocol.h"
Philipp Reisnerb411b362009-09-25 16:07:19 -070052
53#ifdef __CHECKER__
54# define __protected_by(x) __attribute__((require_context(x,1,999,"rdwr")))
55# define __protected_read_by(x) __attribute__((require_context(x,1,999,"read")))
56# define __protected_write_by(x) __attribute__((require_context(x,1,999,"write")))
57# define __must_hold(x) __attribute__((context(x,1,1), require_context(x,1,999,"call")))
58#else
59# define __protected_by(x)
60# define __protected_read_by(x)
61# define __protected_write_by(x)
62# define __must_hold(x)
63#endif
64
Philipp Reisnerb411b362009-09-25 16:07:19 -070065/* module parameter, defined in drbd_main.c */
66extern unsigned int minor_count;
Rusty Russell90ab5ee2012-01-13 09:32:20 +103067extern bool disable_sendpage;
68extern bool allow_oos;
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +020069void tl_abort_disk_io(struct drbd_device *device);
Philipp Reisnerb411b362009-09-25 16:07:19 -070070
71#ifdef CONFIG_DRBD_FAULT_INJECTION
72extern int enable_faults;
73extern int fault_rate;
74extern int fault_devs;
75#endif
76
77extern char usermode_helper[];
78
79
Philipp Reisnerb411b362009-09-25 16:07:19 -070080/* I don't remember why XCPU ...
81 * This is used to wake the asender,
82 * and to interrupt sending the sending task
83 * on disconnect.
84 */
85#define DRBD_SIG SIGXCPU
86
87/* This is used to stop/restart our threads.
88 * Cannot use SIGTERM nor SIGKILL, since these
89 * are sent out by init on runlevel changes
90 * I choose SIGHUP for now.
91 */
92#define DRBD_SIGKILL SIGHUP
93
Philipp Reisnerb411b362009-09-25 16:07:19 -070094#define ID_IN_SYNC (4711ULL)
95#define ID_OUT_OF_SYNC (4712ULL)
Philipp Reisnerb411b362009-09-25 16:07:19 -070096#define ID_SYNCER (-1ULL)
Andreas Gruenbacher579b57e2011-01-13 18:40:57 +010097
Philipp Reisner4a23f262011-01-11 17:42:17 +010098#define UUID_NEW_BM_OFFSET ((u64)0x0001000000000000ULL)
Philipp Reisnerb411b362009-09-25 16:07:19 -070099
Andreas Gruenbacher54761692011-05-30 16:15:21 +0200100struct drbd_device;
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +0200101struct drbd_connection;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700102
Andreas Gruenbacher3b52bef2011-07-06 10:57:39 +0200103#define __drbd_printk_device(level, device, fmt, args...) \
104 dev_printk(level, disk_to_dev((device)->vdisk), fmt, ## args)
105#define __drbd_printk_peer_device(level, peer_device, fmt, args...) \
106 dev_printk(level, disk_to_dev((peer_device)->device->vdisk), fmt, ## args)
107#define __drbd_printk_resource(level, resource, fmt, args...) \
108 printk(level "drbd %s: " fmt, (resource)->name, ## args)
109#define __drbd_printk_connection(level, connection, fmt, args...) \
110 printk(level "drbd %s: " fmt, (connection)->resource->name, ## args)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700111
Andreas Gruenbacher3b52bef2011-07-06 10:57:39 +0200112void drbd_printk_with_wrong_object_type(void);
113
114#define __drbd_printk_if_same_type(obj, type, func, level, fmt, args...) \
115 (__builtin_types_compatible_p(typeof(obj), type) || \
116 __builtin_types_compatible_p(typeof(obj), const type)), \
117 func(level, (const type)(obj), fmt, ## args)
118
119#define drbd_printk(level, obj, fmt, args...) \
120 __builtin_choose_expr( \
121 __drbd_printk_if_same_type(obj, struct drbd_device *, \
122 __drbd_printk_device, level, fmt, ## args), \
123 __builtin_choose_expr( \
124 __drbd_printk_if_same_type(obj, struct drbd_resource *, \
125 __drbd_printk_resource, level, fmt, ## args), \
126 __builtin_choose_expr( \
127 __drbd_printk_if_same_type(obj, struct drbd_connection *, \
128 __drbd_printk_connection, level, fmt, ## args), \
129 __builtin_choose_expr( \
130 __drbd_printk_if_same_type(obj, struct drbd_peer_device *, \
131 __drbd_printk_peer_device, level, fmt, ## args), \
132 drbd_printk_with_wrong_object_type()))))
133
134#define drbd_dbg(obj, fmt, args...) \
135 drbd_printk(KERN_DEBUG, obj, fmt, ## args)
136#define drbd_alert(obj, fmt, args...) \
137 drbd_printk(KERN_ALERT, obj, fmt, ## args)
138#define drbd_err(obj, fmt, args...) \
139 drbd_printk(KERN_ERR, obj, fmt, ## args)
140#define drbd_warn(obj, fmt, args...) \
141 drbd_printk(KERN_WARNING, obj, fmt, ## args)
142#define drbd_info(obj, fmt, args...) \
143 drbd_printk(KERN_INFO, obj, fmt, ## args)
144#define drbd_emerg(obj, fmt, args...) \
145 drbd_printk(KERN_EMERG, obj, fmt, ## args)
Andreas Gruenbacherd0180172011-07-03 17:53:52 +0200146
147#define dynamic_drbd_dbg(device, fmt, args...) \
148 dynamic_dev_dbg(disk_to_dev(device->vdisk), fmt, ## args)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700149
Andreas Gruenbacher0b0ba1e2011-06-27 16:23:33 +0200150#define D_ASSERT(device, exp) do { \
151 if (!(exp)) \
152 drbd_err(device, "ASSERT( " #exp " ) in %s:%d\n", __FILE__, __LINE__); \
153 } while (0)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700154
Andreas Gruenbacher841ce242010-12-15 19:31:20 +0100155/**
156 * expect - Make an assertion
157 *
158 * Unlike the assert macro, this macro returns a boolean result.
159 */
160#define expect(exp) ({ \
161 bool _bool = (exp); \
162 if (!_bool) \
Andreas Gruenbacherd0180172011-07-03 17:53:52 +0200163 drbd_err(device, "ASSERTION %s FAILED in %s\n", \
Andreas Gruenbacher841ce242010-12-15 19:31:20 +0100164 #exp, __func__); \
165 _bool; \
166 })
Philipp Reisnerb411b362009-09-25 16:07:19 -0700167
168/* Defines to control fault insertion */
169enum {
170 DRBD_FAULT_MD_WR = 0, /* meta data write */
171 DRBD_FAULT_MD_RD = 1, /* read */
172 DRBD_FAULT_RS_WR = 2, /* resync */
173 DRBD_FAULT_RS_RD = 3,
174 DRBD_FAULT_DT_WR = 4, /* data */
175 DRBD_FAULT_DT_RD = 5,
176 DRBD_FAULT_DT_RA = 6, /* data read ahead */
177 DRBD_FAULT_BM_ALLOC = 7, /* bitmap allocation */
178 DRBD_FAULT_AL_EE = 8, /* alloc ee */
Philipp Reisner6b4388a2010-04-26 14:11:45 +0200179 DRBD_FAULT_RECEIVE = 9, /* Changes some bytes upon receiving a [rs]data block */
Philipp Reisnerb411b362009-09-25 16:07:19 -0700180
181 DRBD_FAULT_MAX,
182};
183
Philipp Reisnerb411b362009-09-25 16:07:19 -0700184extern unsigned int
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +0200185_drbd_insert_fault(struct drbd_device *device, unsigned int type);
Andreas Gruenbacher0cf9d272010-12-07 10:43:29 +0100186
Philipp Reisnerb411b362009-09-25 16:07:19 -0700187static inline int
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +0200188drbd_insert_fault(struct drbd_device *device, unsigned int type) {
Andreas Gruenbacher0cf9d272010-12-07 10:43:29 +0100189#ifdef CONFIG_DRBD_FAULT_INJECTION
Philipp Reisnerb411b362009-09-25 16:07:19 -0700190 return fault_rate &&
191 (enable_faults & (1<<type)) &&
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +0200192 _drbd_insert_fault(device, type);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700193#else
Andreas Gruenbacher0cf9d272010-12-07 10:43:29 +0100194 return 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700195#endif
Andreas Gruenbacher0cf9d272010-12-07 10:43:29 +0100196}
Philipp Reisnerb411b362009-09-25 16:07:19 -0700197
198/* integer division, round _UP_ to the next integer */
199#define div_ceil(A, B) ((A)/(B) + ((A)%(B) ? 1 : 0))
200/* usual integer division */
201#define div_floor(A, B) ((A)/(B))
202
Philipp Reisnerb411b362009-09-25 16:07:19 -0700203extern struct ratelimit_state drbd_ratelimit_state;
Andreas Gruenbacher05a10ec2011-06-07 22:54:17 +0200204extern struct idr drbd_devices; /* RCU, updates: genl_lock() */
Andreas Gruenbacher77c556f2011-06-08 22:17:38 +0200205extern struct list_head drbd_resources; /* RCU, updates: genl_lock() */
Philipp Reisnerb411b362009-09-25 16:07:19 -0700206
Andreas Gruenbacherd8763022011-01-26 17:39:41 +0100207extern const char *cmdname(enum drbd_packet cmd);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700208
209/* for sending/receiving the bitmap,
210 * possibly in some encoding scheme */
211struct bm_xfer_ctx {
212 /* "const"
213 * stores total bits and long words
214 * of the bitmap, so we don't need to
215 * call the accessor functions over and again. */
216 unsigned long bm_bits;
217 unsigned long bm_words;
218 /* during xfer, current position within the bitmap */
219 unsigned long bit_offset;
220 unsigned long word_offset;
221
222 /* statistics; index: (h->command == P_BITMAP) */
223 unsigned packets[2];
224 unsigned bytes[2];
225};
226
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +0200227extern void INFO_bm_xfer_stats(struct drbd_device *device,
Philipp Reisnerb411b362009-09-25 16:07:19 -0700228 const char *direction, struct bm_xfer_ctx *c);
229
230static inline void bm_xfer_ctx_bit_to_word_offset(struct bm_xfer_ctx *c)
231{
232 /* word_offset counts "native long words" (32 or 64 bit),
233 * aligned at 64 bit.
234 * Encoded packet may end at an unaligned bit offset.
235 * In case a fallback clear text packet is transmitted in
236 * between, we adjust this offset back to the last 64bit
237 * aligned "native long word", which makes coding and decoding
238 * the plain text bitmap much more convenient. */
239#if BITS_PER_LONG == 64
240 c->word_offset = c->bit_offset >> 6;
241#elif BITS_PER_LONG == 32
242 c->word_offset = c->bit_offset >> 5;
243 c->word_offset &= ~(1UL);
244#else
245# error "unsupported BITS_PER_LONG"
246#endif
247}
248
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +0200249extern unsigned int drbd_header_size(struct drbd_connection *connection);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700250
Philipp Reisnerb411b362009-09-25 16:07:19 -0700251/**********************************************************************/
252enum drbd_thread_state {
Andreas Gruenbachere77a0a52011-01-25 15:43:39 +0100253 NONE,
254 RUNNING,
255 EXITING,
256 RESTARTING
Philipp Reisnerb411b362009-09-25 16:07:19 -0700257};
258
259struct drbd_thread {
260 spinlock_t t_lock;
261 struct task_struct *task;
262 struct completion stop;
263 enum drbd_thread_state t_state;
264 int (*function) (struct drbd_thread *);
Andreas Gruenbacher2457b6d2011-07-21 13:45:21 +0200265 struct drbd_resource *resource;
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +0200266 struct drbd_connection *connection;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700267 int reset_cpu_mask;
Andreas Gruenbacherc60b0252011-08-10 15:05:02 +0200268 const char *name;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700269};
270
271static inline enum drbd_thread_state get_t_state(struct drbd_thread *thi)
272{
273 /* THINK testing the t_state seems to be uncritical in all cases
274 * (but thread_{start,stop}), so we can read it *without* the lock.
275 * --lge */
276
277 smp_rmb();
278 return thi->t_state;
279}
280
Philipp Reisnerb411b362009-09-25 16:07:19 -0700281struct drbd_work {
282 struct list_head list;
Andreas Gruenbacher309a8342010-12-21 12:38:39 +0100283 int (*cb)(struct drbd_work *, int cancel);
Andreas Gruenbacher84b8c062011-07-28 15:27:51 +0200284};
285
286struct drbd_device_work {
287 struct drbd_work w;
288 struct drbd_device *device;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700289};
290
Andreas Gruenbacherace652a2011-01-03 17:09:58 +0100291#include "drbd_interval.h"
292
Andreas Gruenbacher54761692011-05-30 16:15:21 +0200293extern int drbd_wait_misc(struct drbd_device *, struct drbd_interval *);
Andreas Gruenbacher7be8da02011-02-22 02:15:32 +0100294
Philipp Reisnerb411b362009-09-25 16:07:19 -0700295struct drbd_request {
296 struct drbd_work w;
Andreas Gruenbacher84b8c062011-07-28 15:27:51 +0200297 struct drbd_device *device;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700298
299 /* if local IO is not allowed, will be NULL.
300 * if local IO _is_ allowed, holds the locally submitted bio clone,
301 * or, after local IO completion, the ERR_PTR(error).
Andreas Gruenbacherfcefa622011-02-17 16:46:59 +0100302 * see drbd_request_endio(). */
Philipp Reisnerb411b362009-09-25 16:07:19 -0700303 struct bio *private_bio;
304
Andreas Gruenbacherace652a2011-01-03 17:09:58 +0100305 struct drbd_interval i;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700306
Lars Ellenbergb6dd1a82011-11-28 15:04:49 +0100307 /* epoch: used to check on "completion" whether this req was in
Philipp Reisnerb411b362009-09-25 16:07:19 -0700308 * the current epoch, and we therefore have to close it,
Lars Ellenbergb6dd1a82011-11-28 15:04:49 +0100309 * causing a p_barrier packet to be send, starting a new epoch.
310 *
311 * This corresponds to "barrier" in struct p_barrier[_ack],
312 * and to "barrier_nr" in struct drbd_epoch (and various
313 * comments/function parameters/local variable names).
Philipp Reisnerb411b362009-09-25 16:07:19 -0700314 */
Lars Ellenbergb6dd1a82011-11-28 15:04:49 +0100315 unsigned int epoch;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700316
Philipp Reisnerb411b362009-09-25 16:07:19 -0700317 struct list_head tl_requests; /* ring list in the transfer log */
318 struct bio *master_bio; /* master bio pointer */
Lars Ellenberge5f891b2013-11-22 12:32:01 +0100319
Lars Ellenberg844a6ae2013-11-22 12:52:03 +0100320 /* see struct drbd_device */
321 struct list_head req_pending_master_completion;
322 struct list_head req_pending_local;
323
Lars Ellenberge5f891b2013-11-22 12:32:01 +0100324 /* for generic IO accounting */
325 unsigned long start_jif;
326
327 /* for DRBD internal statistics */
328
329 /* Minimal set of time stamps to determine if we wait for activity log
330 * transactions, local disk or peer. 32 bit "jiffies" are good enough,
331 * we don't expect a DRBD request to be stalled for several month.
332 */
333
334 /* before actual request processing */
335 unsigned long in_actlog_jif;
336
337 /* local disk */
338 unsigned long pre_submit_jif;
339
340 /* per connection */
341 unsigned long pre_send_jif;
342 unsigned long acked_jif;
343 unsigned long net_done_jif;
344
345 /* Possibly even more detail to track each phase:
346 * master_completion_jif
347 * how long did it take to complete the master bio
348 * (application visible latency)
349 * allocated_jif
350 * how long the master bio was blocked until we finally allocated
351 * a tracking struct
352 * in_actlog_jif
353 * how long did we wait for activity log transactions
354 *
355 * net_queued_jif
356 * when did we finally queue it for sending
357 * pre_send_jif
358 * when did we start sending it
359 * post_send_jif
360 * how long did we block in the network stack trying to send it
361 * acked_jif
362 * when did we receive (or fake, in protocol A) a remote ACK
363 * net_done_jif
364 * when did we receive final acknowledgement (P_BARRIER_ACK),
365 * or decide, e.g. on connection loss, that we do no longer expect
366 * anything from this peer for this request.
367 *
368 * pre_submit_jif
369 * post_sub_jif
370 * when did we start submiting to the lower level device,
371 * and how long did we block in that submit function
372 * local_completion_jif
373 * how long did it take the lower level device to complete this request
374 */
375
Lars Ellenbergb4067772012-01-24 16:58:11 +0100376
377 /* once it hits 0, we may complete the master_bio */
378 atomic_t completion_ref;
379 /* once it hits 0, we may destroy this drbd_request object */
380 struct kref kref;
Lars Ellenberga0d856d2012-01-24 17:19:42 +0100381
382 unsigned rq_state; /* see comments above _req_mod() */
Philipp Reisnerb411b362009-09-25 16:07:19 -0700383};
384
Philipp Reisnerb411b362009-09-25 16:07:19 -0700385struct drbd_epoch {
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +0200386 struct drbd_connection *connection;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700387 struct list_head list;
388 unsigned int barrier_nr;
389 atomic_t epoch_size; /* increased on every request added. */
390 atomic_t active; /* increased on every req. added, and dec on every finished. */
391 unsigned long flags;
392};
393
Rashika Kheriade0b2e62013-12-19 15:07:47 +0530394/* Prototype declaration of function defined in drbd_receiver.c */
395int drbdd_init(struct drbd_thread *);
396int drbd_asender(struct drbd_thread *);
397
Philipp Reisnerb411b362009-09-25 16:07:19 -0700398/* drbd_epoch flag bits */
399enum {
Philipp Reisnerb411b362009-09-25 16:07:19 -0700400 DE_HAVE_BARRIER_NUMBER,
Philipp Reisnerb411b362009-09-25 16:07:19 -0700401};
402
403enum epoch_event {
404 EV_PUT,
405 EV_GOT_BARRIER_NR,
Philipp Reisnerb411b362009-09-25 16:07:19 -0700406 EV_BECAME_LAST,
Philipp Reisnerb411b362009-09-25 16:07:19 -0700407 EV_CLEANUP = 32, /* used as flag */
408};
409
Philipp Reisnerb411b362009-09-25 16:07:19 -0700410struct digest_info {
411 int digest_size;
412 void *digest;
413};
414
Andreas Gruenbacherf6ffca92011-02-04 15:30:34 +0100415struct drbd_peer_request {
Andreas Gruenbachera8cd15b2011-08-25 15:49:40 +0200416 struct drbd_work w;
417 struct drbd_peer_device *peer_device;
Philipp Reisner85719572010-07-21 10:20:17 +0200418 struct drbd_epoch *epoch; /* for writes */
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200419 struct page *pages;
420 atomic_t pending_bios;
Andreas Gruenbacher010f6e62011-01-14 20:59:35 +0100421 struct drbd_interval i;
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200422 /* see comments on ee flag bits below */
423 unsigned long flags;
Lars Ellenberg21ae5d72014-05-05 23:42:24 +0200424 unsigned long submit_jif;
Philipp Reisner85719572010-07-21 10:20:17 +0200425 union {
426 u64 block_id;
427 struct digest_info *digest;
428 };
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200429};
430
431/* ee flag bits.
432 * While corresponding bios are in flight, the only modification will be
433 * set_bit WAS_ERROR, which has to be atomic.
434 * If no bios are in flight yet, or all have been completed,
435 * non-atomic modification to ee->flags is ok.
436 */
Philipp Reisnerb411b362009-09-25 16:07:19 -0700437enum {
438 __EE_CALL_AL_COMPLETE_IO,
Philipp Reisnerb411b362009-09-25 16:07:19 -0700439 __EE_MAY_SET_IN_SYNC,
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200440
Lars Ellenberga0fb3c42014-04-28 18:43:23 +0200441 /* is this a TRIM aka REQ_DISCARD? */
442 __EE_IS_TRIM,
443 /* our lower level cannot handle trim,
444 * and we want to fall back to zeroout instead */
445 __EE_IS_TRIM_USE_ZEROOUT,
446
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200447 /* In case a barrier failed,
448 * we need to resubmit without the barrier flag. */
449 __EE_RESUBMITTED,
450
Andreas Gruenbacher6c852be2011-02-04 15:38:52 +0100451 /* we may have several bios per peer request.
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200452 * if any of those fail, we set this flag atomically
453 * from the endio callback */
454 __EE_WAS_ERROR,
Lars Ellenbergc36c3ce2010-08-11 20:42:55 +0200455
456 /* This ee has a pointer to a digest instead of a block id */
457 __EE_HAS_DIGEST,
Andreas Gruenbacher7be8da02011-02-22 02:15:32 +0100458
459 /* Conflicting local requests need to be restarted after this request */
460 __EE_RESTART_REQUESTS,
Philipp Reisner303d1442011-04-13 16:24:47 -0700461
462 /* The peer wants a write ACK for this (wire proto C) */
463 __EE_SEND_WRITE_ACK,
Philipp Reisner302bdea2011-04-21 11:36:49 +0200464
465 /* Is set when net_conf had two_primaries set while creating this peer_req */
466 __EE_IN_INTERVAL_TREE,
Lars Ellenberg21ae5d72014-05-05 23:42:24 +0200467
468 /* for debugfs: */
469 /* has this been submitted, or does it still wait for something else? */
470 __EE_SUBMITTED,
471
472 /* this is/was a write request */
473 __EE_WRITE,
474
475 /* this originates from application on peer
476 * (not some resync or verify or other DRBD internal request) */
477 __EE_APPLICATION,
Philipp Reisnerb411b362009-09-25 16:07:19 -0700478};
479#define EE_CALL_AL_COMPLETE_IO (1<<__EE_CALL_AL_COMPLETE_IO)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700480#define EE_MAY_SET_IN_SYNC (1<<__EE_MAY_SET_IN_SYNC)
Lars Ellenberga0fb3c42014-04-28 18:43:23 +0200481#define EE_IS_TRIM (1<<__EE_IS_TRIM)
482#define EE_IS_TRIM_USE_ZEROOUT (1<<__EE_IS_TRIM_USE_ZEROOUT)
483#define EE_RESUBMITTED (1<<__EE_RESUBMITTED)
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200484#define EE_WAS_ERROR (1<<__EE_WAS_ERROR)
Lars Ellenbergc36c3ce2010-08-11 20:42:55 +0200485#define EE_HAS_DIGEST (1<<__EE_HAS_DIGEST)
Andreas Gruenbacher7be8da02011-02-22 02:15:32 +0100486#define EE_RESTART_REQUESTS (1<<__EE_RESTART_REQUESTS)
Philipp Reisner303d1442011-04-13 16:24:47 -0700487#define EE_SEND_WRITE_ACK (1<<__EE_SEND_WRITE_ACK)
Philipp Reisner302bdea2011-04-21 11:36:49 +0200488#define EE_IN_INTERVAL_TREE (1<<__EE_IN_INTERVAL_TREE)
Lars Ellenberg21ae5d72014-05-05 23:42:24 +0200489#define EE_SUBMITTED (1<<__EE_SUBMITTED)
490#define EE_WRITE (1<<__EE_WRITE)
491#define EE_APPLICATION (1<<__EE_APPLICATION)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700492
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +0200493/* flag bits per device */
Philipp Reisnerb411b362009-09-25 16:07:19 -0700494enum {
Philipp Reisnerb411b362009-09-25 16:07:19 -0700495 UNPLUG_REMOTE, /* sending a "UnplugRemote" could help */
496 MD_DIRTY, /* current uuids and flags not yet on disk */
Philipp Reisnerb411b362009-09-25 16:07:19 -0700497 USE_DEGR_WFC_T, /* degr-wfc-timeout instead of wfc-timeout. */
Philipp Reisnerb411b362009-09-25 16:07:19 -0700498 CL_ST_CHG_SUCCESS,
499 CL_ST_CHG_FAIL,
500 CRASHED_PRIMARY, /* This node was a crashed primary.
501 * Gets cleared when the state.conn
502 * goes into C_CONNECTED state. */
Philipp Reisnerb411b362009-09-25 16:07:19 -0700503 CONSIDER_RESYNC,
504
Philipp Reisnera8a4e512010-08-25 10:21:04 +0200505 MD_NO_FUA, /* Users wants us to not use FUA/FLUSH on meta data dev */
Lars Ellenberg5ab7d2c2014-01-27 15:58:22 +0100506
Philipp Reisnerb411b362009-09-25 16:07:19 -0700507 SUSPEND_IO, /* suspend application io */
508 BITMAP_IO, /* suspend application io;
509 once no more io in flight, start bitmap io */
510 BITMAP_IO_QUEUED, /* Started bitmap IO */
Lars Ellenberga2a3c74f2012-09-22 12:26:57 +0200511 WAS_IO_ERROR, /* Local disk failed, returned IO error */
512 WAS_READ_ERROR, /* Local disk READ failed (set additionally to the above) */
Lars Ellenberg383606e2012-06-14 14:21:32 +0200513 FORCE_DETACH, /* Force-detach from local disk, aborting any pending local IO */
Philipp Reisnerb411b362009-09-25 16:07:19 -0700514 RESYNC_AFTER_NEG, /* Resync after online grow after the attach&negotiate finished. */
Philipp Reisnerb411b362009-09-25 16:07:19 -0700515 RESIZE_PENDING, /* Size change detected locally, waiting for the response from
516 * the peer, if it changed there as well. */
Philipp Reisner43a51822010-06-11 11:26:34 +0200517 NEW_CUR_UUID, /* Create new current UUID when thawing IO */
Philipp Reisner07782862010-08-31 12:00:50 +0200518 AL_SUSPENDED, /* Activity logging is currently suspended. */
Philipp Reisner370a43e2011-01-14 16:03:11 +0100519 AHEAD_TO_SYNC_SOURCE, /* Ahead -> SyncSource queued */
Philipp Reisnere64a3292011-02-05 17:34:11 +0100520 B_RS_H_DONE, /* Before resync handler done (already executed) */
Philipp Reisner08b165b2011-09-05 16:22:33 +0200521 DISCARD_MY_DATA, /* discard_my_data flag per volume */
Philipp Reisner380207d2011-11-11 12:31:20 +0100522 READ_BALANCE_RR,
Lars Ellenberge334f552014-02-11 09:30:49 +0100523
Lars Ellenbergf4188152014-05-05 23:05:47 +0200524 FLUSH_PENDING, /* if set, device->flush_jif is when we submitted that flush
525 * from drbd_flush_after_epoch() */
526
Lars Ellenberge334f552014-02-11 09:30:49 +0100527 /* cleared only after backing device related structures have been destroyed. */
528 GOING_DISKLESS, /* Disk is being detached, because of io-error, or admin request. */
529
530 /* to be used in drbd_device_post_work() */
531 GO_DISKLESS, /* tell worker to schedule cleanup before detach */
532 DESTROY_DISK, /* tell worker to close backing devices and destroy related structures. */
Lars Ellenbergac0acb92014-02-11 09:47:58 +0100533 MD_SYNC, /* tell worker to call drbd_md_sync() */
534 RS_START, /* tell worker to start resync/OV */
Lars Ellenberge334f552014-02-11 09:30:49 +0100535 RS_PROGRESS, /* tell worker that resync made significant progress */
536 RS_DONE, /* tell worker that resync is done */
Philipp Reisnerb411b362009-09-25 16:07:19 -0700537};
538
Andreas Gruenbacher54761692011-05-30 16:15:21 +0200539struct drbd_bitmap; /* opaque for drbd_device */
Philipp Reisnerb411b362009-09-25 16:07:19 -0700540
Lars Ellenberg20ceb2b2011-01-21 10:56:44 +0100541/* definition of bits in bm_flags to be used in drbd_bm_lock
542 * and drbd_bitmap_io and friends. */
543enum bm_flag {
544 /* do we need to kfree, or vfree bm_pages? */
545 BM_P_VMALLOCED = 0x10000, /* internal use only, will be masked out */
546
547 /* currently locked for bulk operation */
Lars Ellenberg0e8488a2012-04-25 23:06:45 +0200548 BM_LOCKED_MASK = 0xf,
Lars Ellenberg20ceb2b2011-01-21 10:56:44 +0100549
550 /* in detail, that is: */
551 BM_DONT_CLEAR = 0x1,
552 BM_DONT_SET = 0x2,
553 BM_DONT_TEST = 0x4,
554
Lars Ellenberg0e8488a2012-04-25 23:06:45 +0200555 /* so we can mark it locked for bulk operation,
556 * and still allow all non-bulk operations */
557 BM_IS_LOCKED = 0x8,
558
Lars Ellenberg20ceb2b2011-01-21 10:56:44 +0100559 /* (test bit, count bit) allowed (common case) */
Lars Ellenberg0e8488a2012-04-25 23:06:45 +0200560 BM_LOCKED_TEST_ALLOWED = BM_DONT_CLEAR | BM_DONT_SET | BM_IS_LOCKED,
Lars Ellenberg20ceb2b2011-01-21 10:56:44 +0100561
562 /* testing bits, as well as setting new bits allowed, but clearing bits
563 * would be unexpected. Used during bitmap receive. Setting new bits
564 * requires sending of "out-of-sync" information, though. */
Lars Ellenberg0e8488a2012-04-25 23:06:45 +0200565 BM_LOCKED_SET_ALLOWED = BM_DONT_CLEAR | BM_IS_LOCKED,
Lars Ellenberg20ceb2b2011-01-21 10:56:44 +0100566
Lars Ellenberg0e8488a2012-04-25 23:06:45 +0200567 /* for drbd_bm_write_copy_pages, everything is allowed,
568 * only concurrent bulk operations are locked out. */
569 BM_LOCKED_CHANGE_ALLOWED = BM_IS_LOCKED,
Lars Ellenberg20ceb2b2011-01-21 10:56:44 +0100570};
571
Philipp Reisnerb411b362009-09-25 16:07:19 -0700572struct drbd_work_queue {
573 struct list_head q;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700574 spinlock_t q_lock; /* to protect the list. */
Lars Ellenberg8c0785a2011-10-19 11:50:57 +0200575 wait_queue_head_t q_wait;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700576};
577
578struct drbd_socket {
Philipp Reisnerb411b362009-09-25 16:07:19 -0700579 struct mutex mutex;
580 struct socket *socket;
581 /* this way we get our
582 * send/receive buffers off the stack */
Andreas Gruenbacher5a87d922011-03-24 21:17:52 +0100583 void *sbuf;
Andreas Gruenbachere6ef8a52011-03-24 18:07:54 +0100584 void *rbuf;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700585};
586
587struct drbd_md {
588 u64 md_offset; /* sector offset to 'super' block */
589
590 u64 la_size_sect; /* last agreed size, unit sectors */
Philipp Reisner9f2247b2012-08-16 14:25:58 +0200591 spinlock_t uuid_lock;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700592 u64 uuid[UI_SIZE];
593 u64 device_uuid;
594 u32 flags;
595 u32 md_size_sect;
596
Lars Ellenbergae8bf312013-03-19 18:16:43 +0100597 s32 al_offset; /* signed relative sector offset to activity log */
Philipp Reisnerb411b362009-09-25 16:07:19 -0700598 s32 bm_offset; /* signed relative sector offset to bitmap */
Lars Ellenberg3a4d4eb2013-03-19 18:16:44 +0100599
600 /* cached value of bdev->disk_conf->meta_dev_idx (see below) */
601 s32 meta_dev_idx;
602
603 /* see al_tr_number_to_on_disk_sector() */
604 u32 al_stripes;
605 u32 al_stripe_size_4k;
606 u32 al_size_4k; /* cached product of the above */
Philipp Reisnerb411b362009-09-25 16:07:19 -0700607};
608
Philipp Reisnerb411b362009-09-25 16:07:19 -0700609struct drbd_backing_dev {
610 struct block_device *backing_bdev;
611 struct block_device *md_bdev;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700612 struct drbd_md md;
Andreas Gruenbacher05008132011-07-07 14:19:42 +0200613 struct disk_conf *disk_conf; /* RCU, for updates: resource->conf_update */
Philipp Reisnerb411b362009-09-25 16:07:19 -0700614 sector_t known_size; /* last known size of that backing device */
615};
616
617struct drbd_md_io {
Lars Ellenberge37d2432014-04-01 23:53:30 +0200618 struct page *page;
619 unsigned long start_jif; /* last call to drbd_md_get_buffer */
620 unsigned long submit_jif; /* last _drbd_md_sync_page_io() submit */
621 const char *current_use;
622 atomic_t in_use;
Philipp Reisner0c464422011-06-26 22:26:31 +0200623 unsigned int done;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700624 int error;
625};
626
627struct bm_io_work {
628 struct drbd_work w;
629 char *why;
Lars Ellenberg20ceb2b2011-01-21 10:56:44 +0100630 enum bm_flag flags;
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +0200631 int (*io_fn)(struct drbd_device *device);
632 void (*done)(struct drbd_device *device, int rv);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700633};
634
635enum write_ordering_e {
636 WO_none,
637 WO_drain_io,
638 WO_bdev_flush,
Philipp Reisnerb411b362009-09-25 16:07:19 -0700639};
640
Philipp Reisner778f2712010-07-06 11:14:00 +0200641struct fifo_buffer {
Philipp Reisner778f2712010-07-06 11:14:00 +0200642 unsigned int head_index;
643 unsigned int size;
Philipp Reisner9958c852011-05-03 16:19:31 +0200644 int total; /* sum of all values */
645 int values[0];
Philipp Reisner778f2712010-07-06 11:14:00 +0200646};
Philipp Reisner9958c852011-05-03 16:19:31 +0200647extern struct fifo_buffer *fifo_alloc(int fifo_size);
Philipp Reisner778f2712010-07-06 11:14:00 +0200648
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +0200649/* flag bits per connection */
Philipp Reisner01a311a2011-02-07 14:30:33 +0100650enum {
651 NET_CONGESTED, /* The data socket is congested */
Lars Ellenberg427c0432012-08-01 12:43:01 +0200652 RESOLVE_CONFLICTS, /* Set on one node, cleared on the peer! */
Philipp Reisnere43ef192011-02-07 14:40:40 +0100653 SEND_PING, /* whether asender should send a ping asap */
Philipp Reisner808e37b2011-02-07 14:44:14 +0100654 SIGNAL_ASENDER, /* whether asender wants to be interrupted */
Philipp Reisner2a67d8b2011-02-09 14:10:32 +0100655 GOT_PING_ACK, /* set when we receive a ping_ack packet, ping_wait gets woken */
Philipp Reisner4d0fc3f2012-01-20 13:52:27 +0100656 CONN_WD_ST_CHG_REQ, /* A cluster wide state change on the connection is active */
Philipp Reisnerfc3b10a2011-02-15 11:07:59 +0100657 CONN_WD_ST_CHG_OKAY,
658 CONN_WD_ST_CHG_FAIL,
Philipp Reisner8169e412011-03-15 18:40:27 +0100659 CONN_DRY_RUN, /* Expect disconnect after resync handshake. */
Philipp Reisner6936fcb2011-11-10 18:45:36 +0100660 CREATE_BARRIER, /* next P_DATA is preceded by a P_BARRIER */
Philipp Reisnera1096a62012-04-06 12:07:34 +0200661 STATE_SENT, /* Do not change state/UUIDs while this is set */
Lars Ellenberg6f3465e2012-07-30 09:08:25 +0200662 CALLBACK_PENDING, /* Whether we have a call_usermodehelper(, UMH_WAIT_PROC)
663 * pending, from drbd worker context.
664 * If set, bdi_write_congested() returns true,
665 * so shrink_page_list() would not recurse into,
666 * and potentially deadlock on, this drbd worker.
667 */
Philipp Reisnerb66623e2012-08-08 21:19:09 +0200668 DISCONNECT_SENT,
Lars Ellenberge334f552014-02-11 09:30:49 +0100669
670 DEVICE_WORK_PENDING, /* tell worker that some device has pending work */
Philipp Reisner01a311a2011-02-07 14:30:33 +0100671};
672
Andreas Gruenbacher77c556f2011-06-08 22:17:38 +0200673struct drbd_resource {
674 char *name;
Lars Ellenberg4d3d5aa2014-05-02 13:19:51 +0200675#ifdef CONFIG_DEBUG_FS
676 struct dentry *debugfs_res;
677 struct dentry *debugfs_res_volumes;
678 struct dentry *debugfs_res_connections;
679 struct dentry *debugfs_res_in_flight_summary;
680#endif
Andreas Gruenbacher77c556f2011-06-08 22:17:38 +0200681 struct kref kref;
Andreas Gruenbacher803ea132011-06-09 01:40:48 +0200682 struct idr devices; /* volume number to device mapping */
Andreas Gruenbacher77c556f2011-06-08 22:17:38 +0200683 struct list_head connections;
684 struct list_head resources;
Andreas Gruenbachereb6bea62011-06-21 16:11:28 +0200685 struct res_opts res_opts;
Andreas Gruenbacher05008132011-07-07 14:19:42 +0200686 struct mutex conf_update; /* mutex for ready-copy-update of net_conf and disk_conf */
Lars Ellenberg9e276872014-04-28 18:43:22 +0200687 struct mutex adm_mutex; /* mutex to serialize administrative requests */
Andreas Gruenbacher05008132011-07-07 14:19:42 +0200688 spinlock_t req_lock;
Andreas Gruenbacher6bbf53c2011-07-08 01:19:44 +0200689
690 unsigned susp:1; /* IO suspended by user */
691 unsigned susp_nod:1; /* IO suspended because no data */
692 unsigned susp_fen:1; /* IO suspended because fence peer handler runs */
Andreas Gruenbacher625a6ba2011-07-22 14:29:02 +0200693
Philipp Reisnere9526582013-11-22 15:53:41 +0100694 enum write_ordering_e write_ordering;
695
Andreas Gruenbacher625a6ba2011-07-22 14:29:02 +0200696 cpumask_var_t cpu_mask;
Andreas Gruenbacher77c556f2011-06-08 22:17:38 +0200697};
698
Lars Ellenberg944410e2014-05-06 15:02:05 +0200699struct drbd_thread_timing_details
700{
701 unsigned long start_jif;
702 void *cb_addr;
703 const char *caller_fn;
704 unsigned int line;
705 unsigned int cb_nr;
706};
707
Andreas Gruenbacher77c556f2011-06-08 22:17:38 +0200708struct drbd_connection {
709 struct list_head connections;
710 struct drbd_resource *resource;
Lars Ellenberg4d3d5aa2014-05-02 13:19:51 +0200711#ifdef CONFIG_DEBUG_FS
712 struct dentry *debugfs_conn;
713 struct dentry *debugfs_conn_callback_history;
714 struct dentry *debugfs_conn_oldest_requests;
715#endif
Philipp Reisner9dc9fbb2011-04-22 15:23:32 +0200716 struct kref kref;
Andreas Gruenbacherc06ece62011-06-21 17:23:59 +0200717 struct idr peer_devices; /* volume number to peer device mapping */
Philipp Reisner8410da82011-02-11 20:11:10 +0100718 enum drbd_conns cstate; /* Only C_STANDALONE to C_WF_REPORT_PARAMS */
719 struct mutex cstate_mutex; /* Protects graceful disconnects */
Philipp Reisner28e448b2013-06-25 16:50:06 +0200720 unsigned int connect_cnt; /* Inc each time a connection is established */
Philipp Reisner21114382011-01-19 12:26:59 +0100721
Philipp Reisner062e8792011-02-08 11:09:18 +0100722 unsigned long flags;
Philipp Reisner44ed1672011-04-19 17:10:19 +0200723 struct net_conf *net_conf; /* content protected by rcu */
Philipp Reisner91fd4da2011-04-20 17:47:29 +0200724 wait_queue_head_t ping_wait; /* Woken upon reception of a ping, and a state change */
Philipp Reisnere42325a2011-01-19 13:55:45 +0100725
Andreas Gruenbacher089c0752011-06-14 18:28:09 +0200726 struct sockaddr_storage my_addr;
727 int my_addr_len;
728 struct sockaddr_storage peer_addr;
729 int peer_addr_len;
730
Philipp Reisnere42325a2011-01-19 13:55:45 +0100731 struct drbd_socket data; /* data/barrier/cstate/parameter packets */
732 struct drbd_socket meta; /* ping/ack (metadata) packets */
Philipp Reisner31890f42011-01-19 14:12:51 +0100733 int agreed_pro_version; /* actually used protocol version */
Lars Ellenberg20c68fd2014-04-28 18:43:25 +0200734 u32 agreed_features;
Philipp Reisner31890f42011-01-19 14:12:51 +0100735 unsigned long last_received; /* in jiffies, either socket */
736 unsigned int ko_count;
Philipp Reisnere6b3ea82011-01-19 14:02:01 +0100737
Lars Ellenbergb6dd1a82011-11-28 15:04:49 +0100738 struct list_head transfer_log; /* all requests not yet fully processed */
Philipp Reisner87eeee42011-01-19 14:16:30 +0100739
Philipp Reisnera0638452011-01-19 14:31:32 +0100740 struct crypto_hash *cram_hmac_tfm;
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +0200741 struct crypto_hash *integrity_tfm; /* checksums we compute, updates protected by connection->data->mutex */
Philipp Reisner036b17e2011-05-16 17:38:11 +0200742 struct crypto_hash *peer_integrity_tfm; /* checksums we verify, only accessed from receiver thread */
Lars Ellenbergf3990022011-03-23 14:31:09 +0100743 struct crypto_hash *csums_tfm;
744 struct crypto_hash *verify_tfm;
Philipp Reisnera0638452011-01-19 14:31:32 +0100745 void *int_dig_in;
746 void *int_dig_vv;
747
Lars Ellenbergb6dd1a82011-11-28 15:04:49 +0100748 /* receiver side */
Philipp Reisner12038a32011-11-09 19:18:00 +0100749 struct drbd_epoch *current_epoch;
750 spinlock_t epoch_lock;
751 unsigned int epochs;
Lars Ellenbergb379c412011-11-17 11:49:46 +0100752 atomic_t current_tle_nr; /* transfer log epoch number */
Lars Ellenbergb6dd1a82011-11-28 15:04:49 +0100753 unsigned current_tle_writes; /* writes seen within this tl epoch */
Philipp Reisner4b0007c2011-11-09 20:12:34 +0100754
Lars Ellenberg07be15b2012-05-07 11:53:08 +0200755 unsigned long last_reconnect_jif;
Philipp Reisnere6b3ea82011-01-19 14:02:01 +0100756 struct drbd_thread receiver;
757 struct drbd_thread worker;
758 struct drbd_thread asender;
Lars Ellenbergb6dd1a82011-11-28 15:04:49 +0100759
Lars Ellenberg7753a4c12013-11-22 13:00:12 +0100760 /* cached pointers,
761 * so we can look up the oldest pending requests more quickly.
762 * protected by resource->req_lock */
763 struct drbd_request *req_next; /* DRBD 9: todo.req_next */
764 struct drbd_request *req_ack_pending;
765 struct drbd_request *req_not_net_done;
766
Lars Ellenbergb6dd1a82011-11-28 15:04:49 +0100767 /* sender side */
Lars Ellenbergd5b27b02011-11-14 15:42:37 +0100768 struct drbd_work_queue sender_work;
Lars Ellenbergb6dd1a82011-11-28 15:04:49 +0100769
Lars Ellenberg944410e2014-05-06 15:02:05 +0200770#define DRBD_THREAD_DETAILS_HIST 16
771 unsigned int w_cb_nr; /* keeps counting up */
772 unsigned int r_cb_nr; /* keeps counting up */
773 struct drbd_thread_timing_details w_timing_details[DRBD_THREAD_DETAILS_HIST];
774 struct drbd_thread_timing_details r_timing_details[DRBD_THREAD_DETAILS_HIST];
775
Lars Ellenbergb6dd1a82011-11-28 15:04:49 +0100776 struct {
777 /* whether this sender thread
778 * has processed a single write yet. */
779 bool seen_any_write_yet;
780
781 /* Which barrier number to send with the next P_BARRIER */
782 int current_epoch_nr;
783
784 /* how many write requests have been sent
785 * with req->epoch == current_epoch_nr.
786 * If none, no P_BARRIER will be sent. */
787 unsigned current_epoch_writes;
788 } send;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700789};
790
Lars Ellenberg944410e2014-05-06 15:02:05 +0200791void __update_timing_details(
792 struct drbd_thread_timing_details *tdp,
793 unsigned int *cb_nr,
794 void *cb,
795 const char *fn, const unsigned int line);
796
797#define update_worker_timing_details(c, cb) \
798 __update_timing_details(c->w_timing_details, &c->w_cb_nr, cb, __func__ , __LINE__ )
799#define update_receiver_timing_details(c, cb) \
800 __update_timing_details(c->r_timing_details, &c->r_cb_nr, cb, __func__ , __LINE__ )
801
Lars Ellenberg113fef92013-03-22 18:14:40 -0600802struct submit_worker {
803 struct workqueue_struct *wq;
804 struct work_struct worker;
805
Lars Ellenberg844a6ae2013-11-22 12:52:03 +0100806 /* protected by ..->resource->req_lock */
Lars Ellenberg113fef92013-03-22 18:14:40 -0600807 struct list_head writes;
808};
809
Andreas Gruenbachera6b32bc2011-05-31 14:33:49 +0200810struct drbd_peer_device {
811 struct list_head peer_devices;
812 struct drbd_device *device;
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +0200813 struct drbd_connection *connection;
Lars Ellenberg4d3d5aa2014-05-02 13:19:51 +0200814#ifdef CONFIG_DEBUG_FS
815 struct dentry *debugfs_peer_dev;
816#endif
Andreas Gruenbachera6b32bc2011-05-31 14:33:49 +0200817};
818
819struct drbd_device {
Andreas Gruenbacherd8628a82011-06-09 01:38:00 +0200820 struct drbd_resource *resource;
Andreas Gruenbachera6b32bc2011-05-31 14:33:49 +0200821 struct list_head peer_devices;
Lars Ellenberg4ce49262014-05-06 00:44:59 +0200822 struct list_head pending_bitmap_io;
Lars Ellenberg4d3d5aa2014-05-02 13:19:51 +0200823
824 unsigned long flush_jif;
825#ifdef CONFIG_DEBUG_FS
826 struct dentry *debugfs_minor;
827 struct dentry *debugfs_vol;
828 struct dentry *debugfs_vol_oldest_requests;
829 struct dentry *debugfs_vol_act_log_extents;
830 struct dentry *debugfs_vol_resync_extents;
831 struct dentry *debugfs_vol_data_gen_id;
832#endif
833
834 unsigned int vnr; /* volume number within the connection */
835 unsigned int minor; /* device minor number */
836
Philipp Reisner81fa2e62011-05-04 15:10:30 +0200837 struct kref kref;
Philipp Reisner21114382011-01-19 12:26:59 +0100838
Philipp Reisnerb411b362009-09-25 16:07:19 -0700839 /* things that are stored as / read from meta data on disk */
840 unsigned long flags;
841
842 /* configured by drbdsetup */
Philipp Reisnerb411b362009-09-25 16:07:19 -0700843 struct drbd_backing_dev *ldev __protected_by(local);
844
845 sector_t p_size; /* partner's disk size */
846 struct request_queue *rq_queue;
847 struct block_device *this_bdev;
848 struct gendisk *vdisk;
849
Lars Ellenberg07be15b2012-05-07 11:53:08 +0200850 unsigned long last_reattach_jif;
Andreas Gruenbacher84b8c062011-07-28 15:27:51 +0200851 struct drbd_work resync_work;
852 struct drbd_work unplug_work;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700853 struct timer_list resync_timer;
854 struct timer_list md_sync_timer;
Philipp Reisner370a43e2011-01-14 16:03:11 +0100855 struct timer_list start_resync_timer;
Philipp Reisner7fde2be2011-03-01 11:08:28 +0100856 struct timer_list request_timer;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700857
858 /* Used after attach while negotiating new disk state. */
859 union drbd_state new_state_tmp;
860
Philipp Reisnerda9fbc22011-03-29 10:52:01 +0200861 union drbd_dev_state state;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700862 wait_queue_head_t misc_wait;
863 wait_queue_head_t state_wait; /* upon each state change. */
864 unsigned int send_cnt;
865 unsigned int recv_cnt;
866 unsigned int read_cnt;
867 unsigned int writ_cnt;
868 unsigned int al_writ_cnt;
869 unsigned int bm_writ_cnt;
870 atomic_t ap_bio_cnt; /* Requests we need to complete */
Lars Ellenbergad3fee72013-12-20 11:22:13 +0100871 atomic_t ap_actlog_cnt; /* Requests waiting for activity log */
Philipp Reisnerb411b362009-09-25 16:07:19 -0700872 atomic_t ap_pending_cnt; /* AP data packets on the wire, ack expected */
873 atomic_t rs_pending_cnt; /* RS request/data packets on the wire */
Philipp Reisnerd942ae42011-05-31 13:07:24 +0200874 atomic_t unacked_cnt; /* Need to send replies for */
Philipp Reisnerb411b362009-09-25 16:07:19 -0700875 atomic_t local_cnt; /* Waiting for local completion */
Philipp Reisnerb2fb6dbe2011-01-19 13:48:44 +0100876
Andreas Gruenbacherdac13892011-01-21 17:18:39 +0100877 /* Interval tree of pending local requests */
878 struct rb_root read_requests;
Andreas Gruenbacherde696712011-01-20 15:00:24 +0100879 struct rb_root write_requests;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700880
Lars Ellenberg844a6ae2013-11-22 12:52:03 +0100881 /* for statistics and timeouts */
882 /* [0] read, [1] write */
883 struct list_head pending_master_completion[2];
884 struct list_head pending_completion[2];
885
Lars Ellenbergaaaba342014-03-18 12:30:09 +0100886 /* use checksums for *this* resync */
887 bool use_csums;
Lars Ellenberg4b0715f2010-12-14 15:13:04 +0100888 /* blocks to resync in this run [unit BM_BLOCK_SIZE] */
Philipp Reisnerb411b362009-09-25 16:07:19 -0700889 unsigned long rs_total;
Lars Ellenberg4b0715f2010-12-14 15:13:04 +0100890 /* number of resync blocks that failed in this run */
Philipp Reisnerb411b362009-09-25 16:07:19 -0700891 unsigned long rs_failed;
892 /* Syncer's start time [unit jiffies] */
893 unsigned long rs_start;
894 /* cumulated time in PausedSyncX state [unit jiffies] */
895 unsigned long rs_paused;
Lars Ellenberg1d7734a2010-08-11 21:21:50 +0200896 /* skipped because csum was equal [unit BM_BLOCK_SIZE] */
Philipp Reisnerb411b362009-09-25 16:07:19 -0700897 unsigned long rs_same_csum;
Lars Ellenberg1d7734a2010-08-11 21:21:50 +0200898#define DRBD_SYNC_MARKS 8
899#define DRBD_SYNC_MARK_STEP (3*HZ)
900 /* block not up-to-date at mark [unit BM_BLOCK_SIZE] */
901 unsigned long rs_mark_left[DRBD_SYNC_MARKS];
902 /* marks's time [unit jiffies] */
903 unsigned long rs_mark_time[DRBD_SYNC_MARKS];
904 /* current index into rs_mark_{left,time} */
905 int rs_last_mark;
Philipp Reisner328e0f122012-10-19 14:37:47 +0200906 unsigned long rs_last_bcast; /* [unit jiffies] */
Philipp Reisnerb411b362009-09-25 16:07:19 -0700907
908 /* where does the admin want us to start? (sector) */
909 sector_t ov_start_sector;
Lars Ellenberg02b91b52012-06-28 18:26:52 +0200910 sector_t ov_stop_sector;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700911 /* where are we now? (sector) */
912 sector_t ov_position;
913 /* Start sector of out of sync range (to merge printk reporting). */
914 sector_t ov_last_oos_start;
915 /* size of out-of-sync range in sectors. */
916 sector_t ov_last_oos_size;
917 unsigned long ov_left; /* in bits */
Philipp Reisnerb411b362009-09-25 16:07:19 -0700918
Philipp Reisnerb411b362009-09-25 16:07:19 -0700919 struct drbd_bitmap *bitmap;
920 unsigned long bm_resync_fo; /* bit offset for drbd_bm_find_next */
921
922 /* Used to track operations of resync... */
923 struct lru_cache *resync;
924 /* Number of locked elements in resync LRU */
925 unsigned int resync_locked;
926 /* resync extent number waiting for application requests */
927 unsigned int resync_wenr;
928
929 int open_cnt;
930 u64 *p_uuid;
Philipp Reisner4b0007c2011-11-09 20:12:34 +0100931
Philipp Reisner85719572010-07-21 10:20:17 +0200932 struct list_head active_ee; /* IO in progress (P_DATA gets written to disk) */
933 struct list_head sync_ee; /* IO in progress (P_RS_DATA_REPLY gets written to disk) */
Andreas Gruenbacher18b75d72011-02-04 15:36:22 +0100934 struct list_head done_ee; /* need to send P_WRITE_ACK */
935 struct list_head read_ee; /* [RS]P_DATA_REQUEST being read */
Philipp Reisnerb411b362009-09-25 16:07:19 -0700936 struct list_head net_ee; /* zero-copy network send in progress */
Philipp Reisnerb411b362009-09-25 16:07:19 -0700937
938 int next_barrier_nr;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700939 struct list_head resync_reads;
Lars Ellenberg435f0742010-09-06 12:30:25 +0200940 atomic_t pp_in_use; /* allocated from page pool */
941 atomic_t pp_in_use_by_net; /* sendpage()d, still referenced by tcp */
Philipp Reisnerb411b362009-09-25 16:07:19 -0700942 wait_queue_head_t ee_wait;
Philipp Reisnercc94c652011-06-26 11:20:27 +0200943 struct drbd_md_io md_io;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700944 spinlock_t al_lock;
945 wait_queue_head_t al_wait;
946 struct lru_cache *act_log; /* activity log */
947 unsigned int al_tr_number;
948 int al_tr_cycle;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700949 wait_queue_head_t seq_wait;
950 atomic_t packet_seq;
951 unsigned int peer_seq;
952 spinlock_t peer_seq_lock;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700953 unsigned long comm_bm_set; /* communicated number of set bits. */
Philipp Reisnerb411b362009-09-25 16:07:19 -0700954 struct bm_io_work bm_io_work;
955 u64 ed_uuid; /* UUID of the exposed data */
Philipp Reisner8410da82011-02-11 20:11:10 +0100956 struct mutex own_state_mutex;
Andreas Gruenbachera6b32bc2011-05-31 14:33:49 +0200957 struct mutex *state_mutex; /* either own_state_mutex or first_peer_device(device)->connection->cstate_mutex */
Philipp Reisnerb411b362009-09-25 16:07:19 -0700958 char congestion_reason; /* Why we where congested... */
Lars Ellenberg1d7734a2010-08-11 21:21:50 +0200959 atomic_t rs_sect_in; /* for incoming resync data rate, SyncTarget */
960 atomic_t rs_sect_ev; /* for submitted resync data rate, both */
961 int rs_last_sect_ev; /* counter to compare with */
962 int rs_last_events; /* counter of read or write "events" (unit sectors)
963 * on the lower level device when we last looked. */
964 int c_sync_rate; /* current resync rate after syncer throttle magic */
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +0200965 struct fifo_buffer *rs_plan_s; /* correction values of resync planer (RCU, connection->conn_update) */
Philipp Reisner778f2712010-07-06 11:14:00 +0200966 int rs_in_flight; /* resync sectors in flight (to proxy, in proxy and from proxy) */
Philipp Reisner759fbdf2010-10-26 16:02:27 +0200967 atomic_t ap_in_flight; /* App sectors in flight (waiting for ack) */
Lars Ellenbergdb141b22012-06-25 19:15:58 +0200968 unsigned int peer_max_bio_size;
969 unsigned int local_max_bio_size;
Lars Ellenberg113fef92013-03-22 18:14:40 -0600970
971 /* any requests that would block in drbd_make_request()
972 * are deferred to this single-threaded work queue */
973 struct submit_worker submit;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700974};
975
Lars Ellenberg4ce49262014-05-06 00:44:59 +0200976struct drbd_bm_aio_ctx {
977 struct drbd_device *device;
978 struct list_head list; /* on device->pending_bitmap_io */;
979 unsigned long start_jif;
980 atomic_t in_flight;
981 unsigned int done;
982 unsigned flags;
983#define BM_AIO_COPY_PAGES 1
984#define BM_AIO_WRITE_HINTED 2
985#define BM_AIO_WRITE_ALL_PAGES 4
986#define BM_AIO_READ 8
987 int error;
988 struct kref kref;
989};
990
Lars Ellenberga910b122014-04-28 18:43:21 +0200991struct drbd_config_context {
992 /* assigned from drbd_genlmsghdr */
993 unsigned int minor;
994 /* assigned from request attributes, if present */
995 unsigned int volume;
996#define VOLUME_UNSPECIFIED (-1U)
997 /* pointer into the request skb,
998 * limited lifetime! */
999 char *resource_name;
1000 struct nlattr *my_addr;
1001 struct nlattr *peer_addr;
1002
1003 /* reply buffer */
1004 struct sk_buff *reply_skb;
1005 /* pointer into reply buffer */
1006 struct drbd_genlmsghdr *reply_dh;
1007 /* resolved from attributes, if possible */
1008 struct drbd_device *device;
1009 struct drbd_resource *resource;
1010 struct drbd_connection *connection;
1011};
1012
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001013static inline struct drbd_device *minor_to_device(unsigned int minor)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001014{
Andreas Gruenbacher05a10ec2011-06-07 22:54:17 +02001015 return (struct drbd_device *)idr_find(&drbd_devices, minor);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001016}
1017
Andreas Gruenbachera6b32bc2011-05-31 14:33:49 +02001018static inline struct drbd_peer_device *first_peer_device(struct drbd_device *device)
1019{
Lars Ellenbergec4a3402014-04-28 18:43:35 +02001020 return list_first_entry_or_null(&device->peer_devices, struct drbd_peer_device, peer_devices);
Andreas Gruenbachera6b32bc2011-05-31 14:33:49 +02001021}
1022
Andreas Gruenbacher77c556f2011-06-08 22:17:38 +02001023#define for_each_resource(resource, _resources) \
1024 list_for_each_entry(resource, _resources, resources)
1025
1026#define for_each_resource_rcu(resource, _resources) \
1027 list_for_each_entry_rcu(resource, _resources, resources)
1028
1029#define for_each_resource_safe(resource, tmp, _resources) \
1030 list_for_each_entry_safe(resource, tmp, _resources, resources)
1031
1032#define for_each_connection(connection, resource) \
1033 list_for_each_entry(connection, &resource->connections, connections)
1034
1035#define for_each_connection_rcu(connection, resource) \
1036 list_for_each_entry_rcu(connection, &resource->connections, connections)
1037
1038#define for_each_connection_safe(connection, tmp, resource) \
1039 list_for_each_entry_safe(connection, tmp, &resource->connections, connections)
1040
Andreas Gruenbachera6b32bc2011-05-31 14:33:49 +02001041#define for_each_peer_device(peer_device, device) \
1042 list_for_each_entry(peer_device, &device->peer_devices, peer_devices)
1043
1044#define for_each_peer_device_rcu(peer_device, device) \
1045 list_for_each_entry_rcu(peer_device, &device->peer_devices, peer_devices)
1046
1047#define for_each_peer_device_safe(peer_device, tmp, device) \
1048 list_for_each_entry_safe(peer_device, tmp, &device->peer_devices, peer_devices)
1049
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001050static inline unsigned int device_to_minor(struct drbd_device *device)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001051{
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001052 return device->minor;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001053}
1054
Philipp Reisnerb411b362009-09-25 16:07:19 -07001055/*
1056 * function declarations
1057 *************************/
1058
1059/* drbd_main.c */
1060
Philipp Reisnere89b5912010-03-24 17:11:33 +01001061enum dds_flags {
1062 DDSF_FORCED = 1,
1063 DDSF_NO_RESYNC = 2, /* Do not run a resync for the new space */
1064};
1065
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001066extern void drbd_init_set_defaults(struct drbd_device *device);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001067extern int drbd_thread_start(struct drbd_thread *thi);
1068extern void _drbd_thread_stop(struct drbd_thread *thi, int restart, int wait);
1069#ifdef CONFIG_SMP
Philipp Reisner80822282011-02-08 12:46:30 +01001070extern void drbd_thread_current_set_cpu(struct drbd_thread *thi);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001071#else
1072#define drbd_thread_current_set_cpu(A) ({})
Philipp Reisnerb411b362009-09-25 16:07:19 -07001073#endif
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +02001074extern void tl_release(struct drbd_connection *, unsigned int barrier_nr,
Philipp Reisnerb411b362009-09-25 16:07:19 -07001075 unsigned int set_size);
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +02001076extern void tl_clear(struct drbd_connection *);
1077extern void drbd_free_sock(struct drbd_connection *connection);
1078extern int drbd_send(struct drbd_connection *connection, struct socket *sock,
Philipp Reisnerbedbd2a2011-02-07 15:08:48 +01001079 void *buf, size_t size, unsigned msg_flags);
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +02001080extern int drbd_send_all(struct drbd_connection *, struct socket *, void *, size_t,
Andreas Gruenbacherfb708e42010-12-15 17:04:36 +01001081 unsigned);
1082
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +02001083extern int __drbd_send_protocol(struct drbd_connection *connection, enum drbd_packet cmd);
1084extern int drbd_send_protocol(struct drbd_connection *connection);
Andreas Gruenbacher69a22772011-08-09 00:47:13 +02001085extern int drbd_send_uuids(struct drbd_peer_device *);
1086extern int drbd_send_uuids_skip_initial_sync(struct drbd_peer_device *);
1087extern void drbd_gen_and_send_sync_uuid(struct drbd_peer_device *);
1088extern int drbd_send_sizes(struct drbd_peer_device *, int trigger_reply, enum dds_flags flags);
1089extern int drbd_send_state(struct drbd_peer_device *, union drbd_state s);
1090extern int drbd_send_current_state(struct drbd_peer_device *);
1091extern int drbd_send_sync_param(struct drbd_peer_device *);
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +02001092extern void drbd_send_b_ack(struct drbd_connection *connection, u32 barrier_nr,
Andreas Gruenbacherd4e67d72011-03-16 01:25:28 +01001093 u32 set_size);
Andreas Gruenbacher69a22772011-08-09 00:47:13 +02001094extern int drbd_send_ack(struct drbd_peer_device *, enum drbd_packet,
Andreas Gruenbacherf6ffca92011-02-04 15:30:34 +01001095 struct drbd_peer_request *);
Andreas Gruenbacher69a22772011-08-09 00:47:13 +02001096extern void drbd_send_ack_rp(struct drbd_peer_device *, enum drbd_packet,
Andreas Gruenbachera9a99942011-03-16 01:30:14 +01001097 struct p_block_req *rp);
Andreas Gruenbacher69a22772011-08-09 00:47:13 +02001098extern void drbd_send_ack_dp(struct drbd_peer_device *, enum drbd_packet,
Andreas Gruenbachera9a99942011-03-16 01:30:14 +01001099 struct p_data *dp, int data_size);
Andreas Gruenbacher69a22772011-08-09 00:47:13 +02001100extern int drbd_send_ack_ex(struct drbd_peer_device *, enum drbd_packet,
Philipp Reisnerb411b362009-09-25 16:07:19 -07001101 sector_t sector, int blksize, u64 block_id);
Andreas Gruenbacher69a22772011-08-09 00:47:13 +02001102extern int drbd_send_out_of_sync(struct drbd_peer_device *, struct drbd_request *);
1103extern int drbd_send_block(struct drbd_peer_device *, enum drbd_packet,
Andreas Gruenbacherf6ffca92011-02-04 15:30:34 +01001104 struct drbd_peer_request *);
Andreas Gruenbacher69a22772011-08-09 00:47:13 +02001105extern int drbd_send_dblock(struct drbd_peer_device *, struct drbd_request *req);
1106extern int drbd_send_drequest(struct drbd_peer_device *, int cmd,
Philipp Reisnerb411b362009-09-25 16:07:19 -07001107 sector_t sector, int size, u64 block_id);
Andreas Gruenbacher69a22772011-08-09 00:47:13 +02001108extern int drbd_send_drequest_csum(struct drbd_peer_device *, sector_t sector,
Andreas Gruenbacherd8763022011-01-26 17:39:41 +01001109 int size, void *digest, int digest_size,
1110 enum drbd_packet cmd);
Andreas Gruenbacher69a22772011-08-09 00:47:13 +02001111extern int drbd_send_ov_request(struct drbd_peer_device *, sector_t sector, int size);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001112
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001113extern int drbd_send_bitmap(struct drbd_device *device);
Andreas Gruenbacher69a22772011-08-09 00:47:13 +02001114extern void drbd_send_sr_reply(struct drbd_peer_device *, enum drbd_state_rv retcode);
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +02001115extern void conn_send_sr_reply(struct drbd_connection *connection, enum drbd_state_rv retcode);
Philipp Reisner28995af2013-11-22 16:48:14 +01001116extern void drbd_free_ldev(struct drbd_backing_dev *ldev);
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001117extern void drbd_device_cleanup(struct drbd_device *device);
1118void drbd_print_uuids(struct drbd_device *device, const char *text);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001119
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +02001120extern void conn_md_sync(struct drbd_connection *connection);
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001121extern void drbd_md_write(struct drbd_device *device, void *buffer);
1122extern void drbd_md_sync(struct drbd_device *device);
1123extern int drbd_md_read(struct drbd_device *device, struct drbd_backing_dev *bdev);
1124extern void drbd_uuid_set(struct drbd_device *device, int idx, u64 val) __must_hold(local);
1125extern void _drbd_uuid_set(struct drbd_device *device, int idx, u64 val) __must_hold(local);
1126extern void drbd_uuid_new_current(struct drbd_device *device) __must_hold(local);
1127extern void drbd_uuid_set_bm(struct drbd_device *device, u64 val) __must_hold(local);
1128extern void drbd_uuid_move_history(struct drbd_device *device) __must_hold(local);
1129extern void __drbd_uuid_set(struct drbd_device *device, int idx, u64 val) __must_hold(local);
1130extern void drbd_md_set_flag(struct drbd_device *device, int flags) __must_hold(local);
1131extern void drbd_md_clear_flag(struct drbd_device *device, int flags)__must_hold(local);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001132extern int drbd_md_test_flag(struct drbd_backing_dev *, int);
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001133extern void drbd_md_mark_dirty(struct drbd_device *device);
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001134extern void drbd_queue_bitmap_io(struct drbd_device *device,
Andreas Gruenbacher54761692011-05-30 16:15:21 +02001135 int (*io_fn)(struct drbd_device *),
1136 void (*done)(struct drbd_device *, int),
Lars Ellenberg20ceb2b2011-01-21 10:56:44 +01001137 char *why, enum bm_flag flags);
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001138extern int drbd_bitmap_io(struct drbd_device *device,
Andreas Gruenbacher54761692011-05-30 16:15:21 +02001139 int (*io_fn)(struct drbd_device *),
Lars Ellenberg20ceb2b2011-01-21 10:56:44 +01001140 char *why, enum bm_flag flags);
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001141extern int drbd_bitmap_io_from_worker(struct drbd_device *device,
Andreas Gruenbacher54761692011-05-30 16:15:21 +02001142 int (*io_fn)(struct drbd_device *),
Lars Ellenbergedc9f5e2012-09-27 15:18:21 +02001143 char *why, enum bm_flag flags);
Philipp Reisner8fe39aa2013-11-22 13:22:13 +01001144extern int drbd_bmio_set_n_write(struct drbd_device *device) __must_hold(local);
1145extern int drbd_bmio_clear_n_write(struct drbd_device *device) __must_hold(local);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001146
Philipp Reisnerb411b362009-09-25 16:07:19 -07001147/* Meta data layout
Lars Ellenbergae8bf312013-03-19 18:16:43 +01001148 *
1149 * We currently have two possible layouts.
1150 * Offsets in (512 byte) sectors.
1151 * external:
1152 * |----------- md_size_sect ------------------|
1153 * [ 4k superblock ][ activity log ][ Bitmap ]
1154 * | al_offset == 8 |
1155 * | bm_offset = al_offset + X |
1156 * ==> bitmap sectors = md_size_sect - bm_offset
1157 *
1158 * Variants:
1159 * old, indexed fixed size meta data:
1160 *
1161 * internal:
1162 * |----------- md_size_sect ------------------|
1163 * [data.....][ Bitmap ][ activity log ][ 4k superblock ][padding*]
1164 * | al_offset < 0 |
1165 * | bm_offset = al_offset - Y |
1166 * ==> bitmap sectors = Y = al_offset - bm_offset
1167 *
1168 * [padding*] are zero or up to 7 unused 512 Byte sectors to the
1169 * end of the device, so that the [4k superblock] will be 4k aligned.
1170 *
1171 * The activity log consists of 4k transaction blocks,
1172 * which are written in a ring-buffer, or striped ring-buffer like fashion,
1173 * which are writtensize used to be fixed 32kB,
1174 * but is about to become configurable.
1175 */
Philipp Reisnerb411b362009-09-25 16:07:19 -07001176
Lars Ellenbergae8bf312013-03-19 18:16:43 +01001177/* Our old fixed size meta data layout
1178 * allows up to about 3.8TB, so if you want more,
Lars Ellenberg7ad651b2011-02-21 13:21:03 +01001179 * you need to use the "flexible" meta data format. */
Lars Ellenbergae8bf312013-03-19 18:16:43 +01001180#define MD_128MB_SECT (128LLU << 11) /* 128 MB, unit sectors */
1181#define MD_4kB_SECT 8
1182#define MD_32kB_SECT 64
Philipp Reisnerb411b362009-09-25 16:07:19 -07001183
Lars Ellenberg7ad651b2011-02-21 13:21:03 +01001184/* One activity log extent represents 4M of storage */
1185#define AL_EXTENT_SHIFT 22
Philipp Reisnerb411b362009-09-25 16:07:19 -07001186#define AL_EXTENT_SIZE (1<<AL_EXTENT_SHIFT)
1187
Lars Ellenberg7ad651b2011-02-21 13:21:03 +01001188/* We could make these currently hardcoded constants configurable
1189 * variables at create-md time (or even re-configurable at runtime?).
1190 * Which will require some more changes to the DRBD "super block"
1191 * and attach code.
1192 *
1193 * updates per transaction:
1194 * This many changes to the active set can be logged with one transaction.
1195 * This number is arbitrary.
1196 * context per transaction:
1197 * This many context extent numbers are logged with each transaction.
1198 * This number is resulting from the transaction block size (4k), the layout
1199 * of the transaction header, and the number of updates per transaction.
1200 * See drbd_actlog.c:struct al_transaction_on_disk
1201 * */
1202#define AL_UPDATES_PER_TRANSACTION 64 // arbitrary
1203#define AL_CONTEXT_PER_TRANSACTION 919 // (4096 - 36 - 6*64)/4
1204
Philipp Reisnerb411b362009-09-25 16:07:19 -07001205#if BITS_PER_LONG == 32
1206#define LN2_BPL 5
1207#define cpu_to_lel(A) cpu_to_le32(A)
1208#define lel_to_cpu(A) le32_to_cpu(A)
1209#elif BITS_PER_LONG == 64
1210#define LN2_BPL 6
1211#define cpu_to_lel(A) cpu_to_le64(A)
1212#define lel_to_cpu(A) le64_to_cpu(A)
1213#else
1214#error "LN2 of BITS_PER_LONG unknown!"
1215#endif
1216
1217/* resync bitmap */
1218/* 16MB sized 'bitmap extent' to track syncer usage */
1219struct bm_extent {
1220 int rs_left; /* number of bits set (out of sync) in this extent. */
1221 int rs_failed; /* number of failed resync requests in this extent. */
1222 unsigned long flags;
1223 struct lc_element lce;
1224};
1225
1226#define BME_NO_WRITES 0 /* bm_extent.flags: no more requests on this one! */
1227#define BME_LOCKED 1 /* bm_extent.flags: syncer active on this one. */
Philipp Reisnere3555d82010-11-07 15:56:29 +01001228#define BME_PRIORITY 2 /* finish resync IO on this extent ASAP! App IO waiting! */
Philipp Reisnerb411b362009-09-25 16:07:19 -07001229
1230/* drbd_bitmap.c */
1231/*
1232 * We need to store one bit for a block.
1233 * Example: 1GB disk @ 4096 byte blocks ==> we need 32 KB bitmap.
1234 * Bit 0 ==> local node thinks this block is binary identical on both nodes
1235 * Bit 1 ==> local node thinks this block needs to be synced.
1236 */
1237
Philipp Reisner8e26f9c2010-07-06 17:25:54 +02001238#define SLEEP_TIME (HZ/10)
1239
Lars Ellenberg45dfffe2011-02-21 13:21:00 +01001240/* We do bitmap IO in units of 4k blocks.
1241 * We also still have a hardcoded 4k per bit relation. */
1242#define BM_BLOCK_SHIFT 12 /* 4k per bit */
Philipp Reisnerb411b362009-09-25 16:07:19 -07001243#define BM_BLOCK_SIZE (1<<BM_BLOCK_SHIFT)
Lars Ellenberg45dfffe2011-02-21 13:21:00 +01001244/* mostly arbitrarily set the represented size of one bitmap extent,
1245 * aka resync extent, to 16 MiB (which is also 512 Byte worth of bitmap
1246 * at 4k per bit resolution) */
1247#define BM_EXT_SHIFT 24 /* 16 MiB per resync extent */
Philipp Reisnerb411b362009-09-25 16:07:19 -07001248#define BM_EXT_SIZE (1<<BM_EXT_SHIFT)
1249
1250#if (BM_EXT_SHIFT != 24) || (BM_BLOCK_SHIFT != 12)
1251#error "HAVE YOU FIXED drbdmeta AS WELL??"
1252#endif
1253
1254/* thus many _storage_ sectors are described by one bit */
1255#define BM_SECT_TO_BIT(x) ((x)>>(BM_BLOCK_SHIFT-9))
1256#define BM_BIT_TO_SECT(x) ((sector_t)(x)<<(BM_BLOCK_SHIFT-9))
1257#define BM_SECT_PER_BIT BM_BIT_TO_SECT(1)
1258
1259/* bit to represented kilo byte conversion */
1260#define Bit2KB(bits) ((bits)<<(BM_BLOCK_SHIFT-10))
1261
1262/* in which _bitmap_ extent (resp. sector) the bit for a certain
1263 * _storage_ sector is located in */
1264#define BM_SECT_TO_EXT(x) ((x)>>(BM_EXT_SHIFT-9))
Lars Ellenberg5ab7d2c2014-01-27 15:58:22 +01001265#define BM_BIT_TO_EXT(x) ((x) >> (BM_EXT_SHIFT - BM_BLOCK_SHIFT))
Philipp Reisnerb411b362009-09-25 16:07:19 -07001266
Lars Ellenberg5ab7d2c2014-01-27 15:58:22 +01001267/* first storage sector a bitmap extent corresponds to */
Philipp Reisnerb411b362009-09-25 16:07:19 -07001268#define BM_EXT_TO_SECT(x) ((sector_t)(x) << (BM_EXT_SHIFT-9))
Lars Ellenberg5ab7d2c2014-01-27 15:58:22 +01001269/* how much _storage_ sectors we have per bitmap extent */
Philipp Reisnerb411b362009-09-25 16:07:19 -07001270#define BM_SECT_PER_EXT BM_EXT_TO_SECT(1)
Lars Ellenberg5ab7d2c2014-01-27 15:58:22 +01001271/* how many bits are covered by one bitmap extent (resync extent) */
1272#define BM_BITS_PER_EXT (1UL << (BM_EXT_SHIFT - BM_BLOCK_SHIFT))
1273
1274#define BM_BLOCKS_PER_BM_EXT_MASK (BM_BITS_PER_EXT - 1)
1275
Philipp Reisnerb411b362009-09-25 16:07:19 -07001276
1277/* in one sector of the bitmap, we have this many activity_log extents. */
1278#define AL_EXT_PER_BM_SECT (1 << (BM_EXT_SHIFT - AL_EXTENT_SHIFT))
Philipp Reisnerb411b362009-09-25 16:07:19 -07001279
Philipp Reisnerb411b362009-09-25 16:07:19 -07001280/* the extent in "PER_EXTENT" below is an activity log extent
1281 * we need that many (long words/bytes) to store the bitmap
1282 * of one AL_EXTENT_SIZE chunk of storage.
1283 * we can store the bitmap for that many AL_EXTENTS within
1284 * one sector of the _on_disk_ bitmap:
1285 * bit 0 bit 37 bit 38 bit (512*8)-1
1286 * ...|........|........|.. // ..|........|
1287 * sect. 0 `296 `304 ^(512*8*8)-1
1288 *
1289#define BM_WORDS_PER_EXT ( (AL_EXT_SIZE/BM_BLOCK_SIZE) / BITS_PER_LONG )
1290#define BM_BYTES_PER_EXT ( (AL_EXT_SIZE/BM_BLOCK_SIZE) / 8 ) // 128
1291#define BM_EXT_PER_SECT ( 512 / BM_BYTES_PER_EXTENT ) // 4
1292 */
1293
1294#define DRBD_MAX_SECTORS_32 (0xffffffffLU)
Lars Ellenbergae8bf312013-03-19 18:16:43 +01001295/* we have a certain meta data variant that has a fixed on-disk size of 128
1296 * MiB, of which 4k are our "superblock", and 32k are the fixed size activity
1297 * log, leaving this many sectors for the bitmap.
1298 */
1299
1300#define DRBD_MAX_SECTORS_FIXED_BM \
1301 ((MD_128MB_SECT - MD_32kB_SECT - MD_4kB_SECT) * (1LL<<(BM_EXT_SHIFT-9)))
1302#if !defined(CONFIG_LBDAF) && BITS_PER_LONG == 32
Philipp Reisnerb411b362009-09-25 16:07:19 -07001303#define DRBD_MAX_SECTORS DRBD_MAX_SECTORS_32
1304#define DRBD_MAX_SECTORS_FLEX DRBD_MAX_SECTORS_32
1305#else
Lars Ellenbergae8bf312013-03-19 18:16:43 +01001306#define DRBD_MAX_SECTORS DRBD_MAX_SECTORS_FIXED_BM
Philipp Reisnerb411b362009-09-25 16:07:19 -07001307/* 16 TB in units of sectors */
1308#if BITS_PER_LONG == 32
1309/* adjust by one page worth of bitmap,
1310 * so we won't wrap around in drbd_bm_find_next_bit.
1311 * you should use 64bit OS for that much storage, anyways. */
1312#define DRBD_MAX_SECTORS_FLEX BM_BIT_TO_SECT(0xffff7fff)
1313#else
Lars Ellenberg4b0715f2010-12-14 15:13:04 +01001314/* we allow up to 1 PiB now on 64bit architecture with "flexible" meta data */
1315#define DRBD_MAX_SECTORS_FLEX (1UL << 51)
1316/* corresponds to (1UL << 38) bits right now. */
Philipp Reisnerb411b362009-09-25 16:07:19 -07001317#endif
1318#endif
1319
Lars Ellenberg23361cf2011-03-31 16:36:43 +02001320/* BIO_MAX_SIZE is 256 * PAGE_CACHE_SIZE,
1321 * so for typical PAGE_CACHE_SIZE of 4k, that is (1<<20) Byte.
1322 * Since we may live in a mixed-platform cluster,
1323 * we limit us to a platform agnostic constant here for now.
1324 * A followup commit may allow even bigger BIO sizes,
1325 * once we thought that through. */
Philipp Reisner98683652012-11-09 14:18:43 +01001326#define DRBD_MAX_BIO_SIZE (1U << 20)
Lars Ellenberg23361cf2011-03-31 16:36:43 +02001327#if DRBD_MAX_BIO_SIZE > BIO_MAX_SIZE
1328#error Architecture not supported: DRBD_MAX_BIO_SIZE > BIO_MAX_SIZE
1329#endif
Lars Ellenbergdb141b22012-06-25 19:15:58 +02001330#define DRBD_MAX_BIO_SIZE_SAFE (1U << 12) /* Works always = 4k */
Philipp Reisnerb411b362009-09-25 16:07:19 -07001331
Philipp Reisner98683652012-11-09 14:18:43 +01001332#define DRBD_MAX_SIZE_H80_PACKET (1U << 15) /* Header 80 only allows packets up to 32KiB data */
1333#define DRBD_MAX_BIO_SIZE_P95 (1U << 17) /* Protocol 95 to 99 allows bios up to 128KiB */
Philipp Reisnerb411b362009-09-25 16:07:19 -07001334
Lars Ellenberga0fb3c42014-04-28 18:43:23 +02001335/* For now, don't allow more than one activity log extent worth of data
1336 * to be discarded in one go. We may need to rework drbd_al_begin_io()
1337 * to allow for even larger discard ranges */
1338#define DRBD_MAX_DISCARD_SIZE AL_EXTENT_SIZE
1339#define DRBD_MAX_DISCARD_SECTORS (DRBD_MAX_DISCARD_SIZE >> 9)
1340
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001341extern int drbd_bm_init(struct drbd_device *device);
1342extern int drbd_bm_resize(struct drbd_device *device, sector_t sectors, int set_new_bits);
1343extern void drbd_bm_cleanup(struct drbd_device *device);
1344extern void drbd_bm_set_all(struct drbd_device *device);
1345extern void drbd_bm_clear_all(struct drbd_device *device);
Lars Ellenberg4b0715f2010-12-14 15:13:04 +01001346/* set/clear/test only a few bits at a time */
Philipp Reisnerb411b362009-09-25 16:07:19 -07001347extern int drbd_bm_set_bits(
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001348 struct drbd_device *device, unsigned long s, unsigned long e);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001349extern int drbd_bm_clear_bits(
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001350 struct drbd_device *device, unsigned long s, unsigned long e);
Lars Ellenberg4b0715f2010-12-14 15:13:04 +01001351extern int drbd_bm_count_bits(
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001352 struct drbd_device *device, const unsigned long s, const unsigned long e);
Lars Ellenberg4b0715f2010-12-14 15:13:04 +01001353/* bm_set_bits variant for use while holding drbd_bm_lock,
1354 * may process the whole bitmap in one go */
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001355extern void _drbd_bm_set_bits(struct drbd_device *device,
Philipp Reisnerb411b362009-09-25 16:07:19 -07001356 const unsigned long s, const unsigned long e);
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001357extern int drbd_bm_test_bit(struct drbd_device *device, unsigned long bitnr);
1358extern int drbd_bm_e_weight(struct drbd_device *device, unsigned long enr);
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001359extern int drbd_bm_read(struct drbd_device *device) __must_hold(local);
1360extern void drbd_bm_mark_for_writeout(struct drbd_device *device, int page_nr);
1361extern int drbd_bm_write(struct drbd_device *device) __must_hold(local);
1362extern int drbd_bm_write_hinted(struct drbd_device *device) __must_hold(local);
Lars Ellenbergc7a58db2013-12-20 11:39:48 +01001363extern int drbd_bm_write_lazy(struct drbd_device *device, unsigned upper_idx) __must_hold(local);
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001364extern int drbd_bm_write_all(struct drbd_device *device) __must_hold(local);
1365extern int drbd_bm_write_copy_pages(struct drbd_device *device) __must_hold(local);
1366extern size_t drbd_bm_words(struct drbd_device *device);
1367extern unsigned long drbd_bm_bits(struct drbd_device *device);
1368extern sector_t drbd_bm_capacity(struct drbd_device *device);
Lars Ellenberg4b0715f2010-12-14 15:13:04 +01001369
1370#define DRBD_END_OF_BITMAP (~(unsigned long)0)
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001371extern unsigned long drbd_bm_find_next(struct drbd_device *device, unsigned long bm_fo);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001372/* bm_find_next variants for use while you hold drbd_bm_lock() */
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001373extern unsigned long _drbd_bm_find_next(struct drbd_device *device, unsigned long bm_fo);
1374extern unsigned long _drbd_bm_find_next_zero(struct drbd_device *device, unsigned long bm_fo);
1375extern unsigned long _drbd_bm_total_weight(struct drbd_device *device);
1376extern unsigned long drbd_bm_total_weight(struct drbd_device *device);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001377/* for receive_bitmap */
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001378extern void drbd_bm_merge_lel(struct drbd_device *device, size_t offset,
Philipp Reisnerb411b362009-09-25 16:07:19 -07001379 size_t number, unsigned long *buffer);
Lars Ellenberg19f843a2010-12-15 08:59:11 +01001380/* for _drbd_send_bitmap */
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001381extern void drbd_bm_get_lel(struct drbd_device *device, size_t offset,
Philipp Reisnerb411b362009-09-25 16:07:19 -07001382 size_t number, unsigned long *buffer);
1383
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001384extern void drbd_bm_lock(struct drbd_device *device, char *why, enum bm_flag flags);
1385extern void drbd_bm_unlock(struct drbd_device *device);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001386/* drbd_main.c */
1387
1388extern struct kmem_cache *drbd_request_cache;
Andreas Gruenbacher6c852be2011-02-04 15:38:52 +01001389extern struct kmem_cache *drbd_ee_cache; /* peer requests */
Philipp Reisnerb411b362009-09-25 16:07:19 -07001390extern struct kmem_cache *drbd_bm_ext_cache; /* bitmap extents */
1391extern struct kmem_cache *drbd_al_ext_cache; /* activity log extents */
1392extern mempool_t *drbd_request_mempool;
1393extern mempool_t *drbd_ee_mempool;
1394
Lars Ellenberg42818082011-02-23 12:39:46 +01001395/* drbd's page pool, used to buffer data received from the peer,
1396 * or data requested by the peer.
1397 *
1398 * This does not have an emergency reserve.
1399 *
1400 * When allocating from this pool, it first takes pages from the pool.
1401 * Only if the pool is depleted will try to allocate from the system.
1402 *
1403 * The assumption is that pages taken from this pool will be processed,
1404 * and given back, "quickly", and then can be recycled, so we can avoid
1405 * frequent calls to alloc_page(), and still will be able to make progress even
1406 * under memory pressure.
1407 */
1408extern struct page *drbd_pp_pool;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001409extern spinlock_t drbd_pp_lock;
1410extern int drbd_pp_vacant;
1411extern wait_queue_head_t drbd_pp_wait;
1412
Lars Ellenberg42818082011-02-23 12:39:46 +01001413/* We also need a standard (emergency-reserve backed) page pool
1414 * for meta data IO (activity log, bitmap).
1415 * We can keep it global, as long as it is used as "N pages at a time".
1416 * 128 should be plenty, currently we probably can get away with as few as 1.
1417 */
1418#define DRBD_MIN_POOL_PAGES 128
1419extern mempool_t *drbd_md_io_page_pool;
1420
Lars Ellenberg9476f392011-02-23 17:02:01 +01001421/* We also need to make sure we get a bio
1422 * when we need it for housekeeping purposes */
1423extern struct bio_set *drbd_md_io_bio_set;
1424/* to allocate from that set */
1425extern struct bio *bio_alloc_drbd(gfp_t gfp_mask);
1426
Philipp Reisnerb411b362009-09-25 16:07:19 -07001427extern rwlock_t global_state_lock;
1428
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +02001429extern int conn_lowest_minor(struct drbd_connection *connection);
Lars Ellenberga910b122014-04-28 18:43:21 +02001430extern enum drbd_ret_code drbd_create_device(struct drbd_config_context *adm_ctx, unsigned int minor);
Andreas Gruenbacher05a10ec2011-06-07 22:54:17 +02001431extern void drbd_destroy_device(struct kref *kref);
Lars Ellenberga910b122014-04-28 18:43:21 +02001432extern void drbd_delete_device(struct drbd_device *device);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001433
Andreas Gruenbacher77c556f2011-06-08 22:17:38 +02001434extern struct drbd_resource *drbd_create_resource(const char *name);
1435extern void drbd_free_resource(struct drbd_resource *resource);
1436
Andreas Gruenbachereb6bea62011-06-21 16:11:28 +02001437extern int set_resource_options(struct drbd_resource *resource, struct res_opts *res_opts);
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +02001438extern struct drbd_connection *conn_create(const char *name, struct res_opts *res_opts);
Andreas Gruenbacher05a10ec2011-06-07 22:54:17 +02001439extern void drbd_destroy_connection(struct kref *kref);
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +02001440extern struct drbd_connection *conn_get_by_addrs(void *my_addr, int my_addr_len,
Andreas Gruenbacher089c0752011-06-14 18:28:09 +02001441 void *peer_addr, int peer_addr_len);
Andreas Gruenbacher4bc76042011-06-13 14:27:45 +02001442extern struct drbd_resource *drbd_find_resource(const char *name);
Andreas Gruenbacher77c556f2011-06-08 22:17:38 +02001443extern void drbd_destroy_resource(struct kref *kref);
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +02001444extern void conn_free_crypto(struct drbd_connection *connection);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001445
1446extern int proc_details;
1447
1448/* drbd_req */
Lars Ellenberg113fef92013-03-22 18:14:40 -06001449extern void do_submit(struct work_struct *ws);
Andreas Gruenbacher54761692011-05-30 16:15:21 +02001450extern void __drbd_make_request(struct drbd_device *, struct bio *, unsigned long);
Christoph Hellwig5a7bbad2011-09-12 12:12:01 +02001451extern void drbd_make_request(struct request_queue *q, struct bio *bio);
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001452extern int drbd_read_remote(struct drbd_device *device, struct drbd_request *req);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001453extern int drbd_merge_bvec(struct request_queue *q, struct bvec_merge_data *bvm, struct bio_vec *bvec);
1454extern int is_valid_ar_handle(struct drbd_request *, sector_t);
1455
1456
1457/* drbd_nl.c */
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001458extern void drbd_suspend_io(struct drbd_device *device);
1459extern void drbd_resume_io(struct drbd_device *device);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001460extern char *ppsize(char *buf, unsigned long long size);
Andreas Gruenbacher54761692011-05-30 16:15:21 +02001461extern sector_t drbd_new_dev_size(struct drbd_device *, struct drbd_backing_dev *, sector_t, int);
Philipp Reisnere96c9632013-06-25 16:50:07 +02001462enum determine_dev_size {
Philipp Reisnerd752b262013-06-25 16:50:08 +02001463 DS_ERROR_SHRINK = -3,
1464 DS_ERROR_SPACE_MD = -2,
Philipp Reisnere96c9632013-06-25 16:50:07 +02001465 DS_ERROR = -1,
1466 DS_UNCHANGED = 0,
1467 DS_SHRUNK = 1,
Philipp Reisner57737ad2013-10-23 10:59:17 +02001468 DS_GREW = 2,
1469 DS_GREW_FROM_ZERO = 3,
Philipp Reisnere96c9632013-06-25 16:50:07 +02001470};
Philipp Reisnerd752b262013-06-25 16:50:08 +02001471extern enum determine_dev_size
Andreas Gruenbacher54761692011-05-30 16:15:21 +02001472drbd_determine_dev_size(struct drbd_device *, enum dds_flags, struct resize_parms *) __must_hold(local);
1473extern void resync_after_online_grow(struct drbd_device *);
Philipp Reisner8fe39aa2013-11-22 13:22:13 +01001474extern void drbd_reconsider_max_bio_size(struct drbd_device *device, struct drbd_backing_dev *bdev);
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001475extern enum drbd_state_rv drbd_set_role(struct drbd_device *device,
Andreas Gruenbacherbf885f82010-12-08 00:39:32 +01001476 enum drbd_role new_role,
1477 int force);
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +02001478extern bool conn_try_outdate_peer(struct drbd_connection *connection);
1479extern void conn_try_outdate_peer_async(struct drbd_connection *connection);
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001480extern int drbd_khelper(struct drbd_device *device, char *cmd);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001481
1482/* drbd_worker.c */
Philipp Reisnerd40e5672014-04-28 18:43:14 +02001483/* bi_end_io handlers */
Christoph Hellwig4246a0b2015-07-20 15:29:37 +02001484extern void drbd_md_endio(struct bio *bio);
1485extern void drbd_peer_request_endio(struct bio *bio);
1486extern void drbd_request_endio(struct bio *bio);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001487extern int drbd_worker(struct drbd_thread *thi);
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001488enum drbd_ret_code drbd_resync_after_valid(struct drbd_device *device, int o_minor);
1489void drbd_resync_after_changed(struct drbd_device *device);
1490extern void drbd_start_resync(struct drbd_device *device, enum drbd_conns side);
1491extern void resume_next_sg(struct drbd_device *device);
1492extern void suspend_other_sg(struct drbd_device *device);
1493extern int drbd_resync_finished(struct drbd_device *device);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001494/* maybe rather drbd_main.c ? */
Lars Ellenberge37d2432014-04-01 23:53:30 +02001495extern void *drbd_md_get_buffer(struct drbd_device *device, const char *intent);
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001496extern void drbd_md_put_buffer(struct drbd_device *device);
1497extern int drbd_md_sync_page_io(struct drbd_device *device,
Philipp Reisnerb411b362009-09-25 16:07:19 -07001498 struct drbd_backing_dev *bdev, sector_t sector, int rw);
Andreas Gruenbacher54761692011-05-30 16:15:21 +02001499extern void drbd_ov_out_of_sync_found(struct drbd_device *, sector_t, int);
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001500extern void wait_until_done_or_force_detached(struct drbd_device *device,
Lars Ellenberg44edfb02012-09-27 13:03:45 +02001501 struct drbd_backing_dev *bdev, unsigned int *done);
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001502extern void drbd_rs_controller_reset(struct drbd_device *device);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001503
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001504static inline void ov_out_of_sync_print(struct drbd_device *device)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001505{
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001506 if (device->ov_last_oos_size) {
Andreas Gruenbacherd0180172011-07-03 17:53:52 +02001507 drbd_err(device, "Out of sync: start=%llu, size=%lu (sectors)\n",
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001508 (unsigned long long)device->ov_last_oos_start,
1509 (unsigned long)device->ov_last_oos_size);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001510 }
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001511 device->ov_last_oos_size = 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001512}
1513
1514
Andreas Gruenbacher79a3c8d2011-08-09 02:49:01 +02001515extern void drbd_csum_bio(struct crypto_hash *, struct bio *, void *);
1516extern void drbd_csum_ee(struct crypto_hash *, struct drbd_peer_request *, void *);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001517/* worker callbacks */
Andreas Gruenbacher99920dc2011-03-16 15:31:39 +01001518extern int w_e_end_data_req(struct drbd_work *, int);
1519extern int w_e_end_rsdata_req(struct drbd_work *, int);
1520extern int w_e_end_csum_rs_req(struct drbd_work *, int);
1521extern int w_e_end_ov_reply(struct drbd_work *, int);
1522extern int w_e_end_ov_req(struct drbd_work *, int);
1523extern int w_ov_finished(struct drbd_work *, int);
1524extern int w_resync_timer(struct drbd_work *, int);
1525extern int w_send_write_hint(struct drbd_work *, int);
Andreas Gruenbacher99920dc2011-03-16 15:31:39 +01001526extern int w_send_dblock(struct drbd_work *, int);
Andreas Gruenbacher99920dc2011-03-16 15:31:39 +01001527extern int w_send_read_req(struct drbd_work *, int);
Andreas Gruenbacher99920dc2011-03-16 15:31:39 +01001528extern int w_e_reissue(struct drbd_work *, int);
1529extern int w_restart_disk_io(struct drbd_work *, int);
Andreas Gruenbacher8f7bed72010-12-19 23:53:14 +01001530extern int w_send_out_of_sync(struct drbd_work *, int);
Andreas Gruenbacher99920dc2011-03-16 15:31:39 +01001531extern int w_start_resync(struct drbd_work *, int);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001532
1533extern void resync_timer_fn(unsigned long data);
Philipp Reisner370a43e2011-01-14 16:03:11 +01001534extern void start_resync_timer_fn(unsigned long data);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001535
Lars Ellenberga0fb3c42014-04-28 18:43:23 +02001536extern void drbd_endio_write_sec_final(struct drbd_peer_request *peer_req);
1537
Philipp Reisnerb411b362009-09-25 16:07:19 -07001538/* drbd_receiver.c */
Andreas Gruenbacher753c61912011-07-22 11:14:41 +02001539extern int drbd_receiver(struct drbd_thread *thi);
1540extern int drbd_asender(struct drbd_thread *thi);
Lars Ellenberge8299872014-04-28 18:43:19 +02001541extern bool drbd_rs_c_min_rate_throttle(struct drbd_device *device);
Lars Ellenbergad3fee72013-12-20 11:22:13 +01001542extern bool drbd_rs_should_slow_down(struct drbd_device *device, sector_t sector,
1543 bool throttle_if_app_is_waiting);
Andreas Gruenbacher54761692011-05-30 16:15:21 +02001544extern int drbd_submit_peer_request(struct drbd_device *,
Andreas Gruenbacherfbe29de2011-02-17 16:38:35 +01001545 struct drbd_peer_request *, const unsigned,
1546 const int);
Andreas Gruenbacher54761692011-05-30 16:15:21 +02001547extern int drbd_free_peer_reqs(struct drbd_device *, struct list_head *);
Andreas Gruenbacher69a22772011-08-09 00:47:13 +02001548extern struct drbd_peer_request *drbd_alloc_peer_req(struct drbd_peer_device *, u64,
Andreas Gruenbacher0db55362011-04-06 16:09:15 +02001549 sector_t, unsigned int,
Lars Ellenberga0fb3c42014-04-28 18:43:23 +02001550 bool,
Andreas Gruenbacher0db55362011-04-06 16:09:15 +02001551 gfp_t) __must_hold(local);
Andreas Gruenbacher54761692011-05-30 16:15:21 +02001552extern void __drbd_free_peer_req(struct drbd_device *, struct drbd_peer_request *,
Andreas Gruenbacher3967deb2011-04-06 16:16:56 +02001553 int);
1554#define drbd_free_peer_req(m,e) __drbd_free_peer_req(m, e, 0)
1555#define drbd_free_net_peer_req(m,e) __drbd_free_peer_req(m, e, 1)
Andreas Gruenbacher69a22772011-08-09 00:47:13 +02001556extern struct page *drbd_alloc_pages(struct drbd_peer_device *, unsigned int, bool);
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001557extern void drbd_set_recv_tcq(struct drbd_device *device, int tcq_enabled);
1558extern void _drbd_clear_done_ee(struct drbd_device *device, struct list_head *to_be_freed);
Andreas Gruenbacher69a22772011-08-09 00:47:13 +02001559extern int drbd_connected(struct drbd_peer_device *);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001560
Philipp Reisnerb411b362009-09-25 16:07:19 -07001561static inline void drbd_tcp_cork(struct socket *sock)
1562{
Lars Ellenberged439842011-04-23 14:45:14 +02001563 int val = 1;
Philipp Reisnere805b982014-11-10 17:21:15 +01001564 (void) kernel_setsockopt(sock, SOL_TCP, TCP_CORK,
Lars Ellenberged439842011-04-23 14:45:14 +02001565 (char*)&val, sizeof(val));
Philipp Reisnerb411b362009-09-25 16:07:19 -07001566}
1567
1568static inline void drbd_tcp_uncork(struct socket *sock)
1569{
Lars Ellenberged439842011-04-23 14:45:14 +02001570 int val = 0;
Philipp Reisnere805b982014-11-10 17:21:15 +01001571 (void) kernel_setsockopt(sock, SOL_TCP, TCP_CORK,
Lars Ellenberged439842011-04-23 14:45:14 +02001572 (char*)&val, sizeof(val));
Philipp Reisnerb411b362009-09-25 16:07:19 -07001573}
1574
1575static inline void drbd_tcp_nodelay(struct socket *sock)
1576{
Lars Ellenberged439842011-04-23 14:45:14 +02001577 int val = 1;
Philipp Reisnere805b982014-11-10 17:21:15 +01001578 (void) kernel_setsockopt(sock, SOL_TCP, TCP_NODELAY,
Lars Ellenberged439842011-04-23 14:45:14 +02001579 (char*)&val, sizeof(val));
Philipp Reisnerb411b362009-09-25 16:07:19 -07001580}
1581
1582static inline void drbd_tcp_quickack(struct socket *sock)
1583{
Lars Ellenberged439842011-04-23 14:45:14 +02001584 int val = 2;
Philipp Reisnere805b982014-11-10 17:21:15 +01001585 (void) kernel_setsockopt(sock, SOL_TCP, TCP_QUICKACK,
Lars Ellenberged439842011-04-23 14:45:14 +02001586 (char*)&val, sizeof(val));
Philipp Reisnerb411b362009-09-25 16:07:19 -07001587}
1588
Philipp Reisnerd40e5672014-04-28 18:43:14 +02001589/* sets the number of 512 byte sectors of our virtual device */
1590static inline void drbd_set_my_capacity(struct drbd_device *device,
1591 sector_t size)
1592{
1593 /* set_capacity(device->this_bdev->bd_disk, size); */
1594 set_capacity(device->vdisk, size);
1595 device->this_bdev->bd_inode->i_size = (loff_t)size << 9;
1596}
1597
1598/*
1599 * used to submit our private bio
1600 */
1601static inline void drbd_generic_make_request(struct drbd_device *device,
1602 int fault_type, struct bio *bio)
1603{
1604 __release(local);
1605 if (!bio->bi_bdev) {
Lars Ellenbergf88c5d92014-03-27 14:10:55 +01001606 drbd_err(device, "drbd_generic_make_request: bio->bi_bdev == NULL\n");
Christoph Hellwig4246a0b2015-07-20 15:29:37 +02001607 bio->bi_error = -ENODEV;
1608 bio_endio(bio);
Philipp Reisnerd40e5672014-04-28 18:43:14 +02001609 return;
1610 }
1611
1612 if (drbd_insert_fault(device, fault_type))
Christoph Hellwig4246a0b2015-07-20 15:29:37 +02001613 bio_io_error(bio);
Philipp Reisnerd40e5672014-04-28 18:43:14 +02001614 else
1615 generic_make_request(bio);
1616}
1617
Philipp Reisner8fe39aa2013-11-22 13:22:13 +01001618void drbd_bump_write_ordering(struct drbd_resource *resource, struct drbd_backing_dev *bdev,
1619 enum write_ordering_e wo);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001620
1621/* drbd_proc.c */
1622extern struct proc_dir_entry *drbd_proc;
Emese Revfy7d4e9d02009-12-14 00:59:30 +01001623extern const struct file_operations drbd_proc_fops;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001624extern const char *drbd_conn_str(enum drbd_conns s);
1625extern const char *drbd_role_str(enum drbd_role s);
1626
1627/* drbd_actlog.c */
Lars Ellenberge4d7d6f2014-04-28 18:43:28 +02001628extern bool drbd_al_begin_io_prepare(struct drbd_device *device, struct drbd_interval *i);
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001629extern int drbd_al_begin_io_nonblock(struct drbd_device *device, struct drbd_interval *i);
Lars Ellenberg4dd726f2014-02-11 11:15:36 +01001630extern void drbd_al_begin_io_commit(struct drbd_device *device);
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001631extern bool drbd_al_begin_io_fastpath(struct drbd_device *device, struct drbd_interval *i);
Lars Ellenberg4dd726f2014-02-11 11:15:36 +01001632extern void drbd_al_begin_io(struct drbd_device *device, struct drbd_interval *i);
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001633extern void drbd_al_complete_io(struct drbd_device *device, struct drbd_interval *i);
1634extern void drbd_rs_complete_io(struct drbd_device *device, sector_t sector);
1635extern int drbd_rs_begin_io(struct drbd_device *device, sector_t sector);
1636extern int drbd_try_rs_begin_io(struct drbd_device *device, sector_t sector);
1637extern void drbd_rs_cancel_all(struct drbd_device *device);
1638extern int drbd_rs_del_all(struct drbd_device *device);
1639extern void drbd_rs_failed_io(struct drbd_device *device,
Philipp Reisnerb411b362009-09-25 16:07:19 -07001640 sector_t sector, int size);
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001641extern void drbd_advance_rs_marks(struct drbd_device *device, unsigned long still_to_go);
Lars Ellenberg5ab7d2c2014-01-27 15:58:22 +01001642
1643enum update_sync_bits_mode { RECORD_RS_FAILED, SET_OUT_OF_SYNC, SET_IN_SYNC };
1644extern int __drbd_change_sync(struct drbd_device *device, sector_t sector, int size,
Andreas Gruenbacher179e20b82014-11-10 17:21:09 +01001645 enum update_sync_bits_mode mode);
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001646#define drbd_set_in_sync(device, sector, size) \
Andreas Gruenbacher179e20b82014-11-10 17:21:09 +01001647 __drbd_change_sync(device, sector, size, SET_IN_SYNC)
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001648#define drbd_set_out_of_sync(device, sector, size) \
Andreas Gruenbacher179e20b82014-11-10 17:21:09 +01001649 __drbd_change_sync(device, sector, size, SET_OUT_OF_SYNC)
Lars Ellenberg5ab7d2c2014-01-27 15:58:22 +01001650#define drbd_rs_failed_io(device, sector, size) \
Andreas Gruenbacher179e20b82014-11-10 17:21:09 +01001651 __drbd_change_sync(device, sector, size, RECORD_RS_FAILED)
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001652extern void drbd_al_shrink(struct drbd_device *device);
Andreas Gruenbacher54761692011-05-30 16:15:21 +02001653extern int drbd_initialize_al(struct drbd_device *, void *);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001654
Philipp Reisnerb411b362009-09-25 16:07:19 -07001655/* drbd_nl.c */
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01001656/* state info broadcast */
1657struct sib_info {
1658 enum drbd_state_info_bcast_reason sib_reason;
1659 union {
1660 struct {
1661 char *helper_name;
1662 unsigned helper_exit_code;
1663 };
1664 struct {
1665 union drbd_state os;
1666 union drbd_state ns;
1667 };
1668 };
1669};
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001670void drbd_bcast_event(struct drbd_device *device, const struct sib_info *sib);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001671
1672/*
1673 * inline helper functions
1674 *************************/
1675
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001676/* see also page_chain_add and friends in drbd_receiver.c */
1677static inline struct page *page_chain_next(struct page *page)
1678{
1679 return (struct page *)page_private(page);
1680}
1681#define page_chain_for_each(page) \
1682 for (; page && ({ prefetch(page_chain_next(page)); 1; }); \
1683 page = page_chain_next(page))
1684#define page_chain_for_each_safe(page, n) \
1685 for (; page && ({ n = page_chain_next(page); 1; }); page = n)
1686
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001687
Andreas Gruenbacher045417f2011-04-07 21:34:24 +02001688static inline int drbd_peer_req_has_active_page(struct drbd_peer_request *peer_req)
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001689{
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01001690 struct page *page = peer_req->pages;
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001691 page_chain_for_each(page) {
1692 if (page_count(page) > 1)
1693 return 1;
1694 }
1695 return 0;
1696}
1697
Andreas Gruenbacherbf885f82010-12-08 00:39:32 +01001698static inline enum drbd_state_rv
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001699_drbd_set_state(struct drbd_device *device, union drbd_state ns,
Andreas Gruenbacherbf885f82010-12-08 00:39:32 +01001700 enum chg_state_flags flags, struct completion *done)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001701{
Andreas Gruenbacherbf885f82010-12-08 00:39:32 +01001702 enum drbd_state_rv rv;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001703
1704 read_lock(&global_state_lock);
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001705 rv = __drbd_set_state(device, ns, flags, done);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001706 read_unlock(&global_state_lock);
1707
1708 return rv;
1709}
1710
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001711static inline union drbd_state drbd_read_state(struct drbd_device *device)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001712{
Andreas Gruenbacher6bbf53c2011-07-08 01:19:44 +02001713 struct drbd_resource *resource = device->resource;
Philipp Reisner78bae592011-03-28 15:40:12 +02001714 union drbd_state rv;
1715
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001716 rv.i = device->state.i;
Andreas Gruenbacher6bbf53c2011-07-08 01:19:44 +02001717 rv.susp = resource->susp;
1718 rv.susp_nod = resource->susp_nod;
1719 rv.susp_fen = resource->susp_fen;
Philipp Reisner78bae592011-03-28 15:40:12 +02001720
1721 return rv;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001722}
1723
Lars Ellenberg383606e2012-06-14 14:21:32 +02001724enum drbd_force_detach_flags {
Lars Ellenberga2a3c74f2012-09-22 12:26:57 +02001725 DRBD_READ_ERROR,
1726 DRBD_WRITE_ERROR,
Lars Ellenberg383606e2012-06-14 14:21:32 +02001727 DRBD_META_IO_ERROR,
1728 DRBD_FORCE_DETACH,
1729};
1730
Philipp Reisnerb411b362009-09-25 16:07:19 -07001731#define __drbd_chk_io_error(m,f) __drbd_chk_io_error_(m,f, __func__)
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001732static inline void __drbd_chk_io_error_(struct drbd_device *device,
Lars Ellenberga2a3c74f2012-09-22 12:26:57 +02001733 enum drbd_force_detach_flags df,
Lars Ellenberg383606e2012-06-14 14:21:32 +02001734 const char *where)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001735{
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02001736 enum drbd_io_error_p ep;
1737
1738 rcu_read_lock();
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001739 ep = rcu_dereference(device->ldev->disk_conf)->on_io_error;
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02001740 rcu_read_unlock();
1741 switch (ep) {
1742 case EP_PASS_ON: /* FIXME would this be better named "Ignore"? */
Lars Ellenberga2a3c74f2012-09-22 12:26:57 +02001743 if (df == DRBD_READ_ERROR || df == DRBD_WRITE_ERROR) {
Lars Ellenberg73835062010-05-27 11:51:56 +02001744 if (__ratelimit(&drbd_ratelimit_state))
Andreas Gruenbacherd0180172011-07-03 17:53:52 +02001745 drbd_err(device, "Local IO failed in %s.\n", where);
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001746 if (device->state.disk > D_INCONSISTENT)
1747 _drbd_set_state(_NS(device, disk, D_INCONSISTENT), CS_HARD, NULL);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001748 break;
1749 }
Lars Ellenberga2a3c74f2012-09-22 12:26:57 +02001750 /* NOTE fall through for DRBD_META_IO_ERROR or DRBD_FORCE_DETACH */
Philipp Reisnerb411b362009-09-25 16:07:19 -07001751 case EP_DETACH:
1752 case EP_CALL_HELPER:
Lars Ellenberga2a3c74f2012-09-22 12:26:57 +02001753 /* Remember whether we saw a READ or WRITE error.
1754 *
1755 * Recovery of the affected area for WRITE failure is covered
1756 * by the activity log.
1757 * READ errors may fall outside that area though. Certain READ
1758 * errors can be "healed" by writing good data to the affected
1759 * blocks, which triggers block re-allocation in lower layers.
1760 *
1761 * If we can not write the bitmap after a READ error,
1762 * we may need to trigger a full sync (see w_go_diskless()).
1763 *
1764 * Force-detach is not really an IO error, but rather a
1765 * desperate measure to try to deal with a completely
1766 * unresponsive lower level IO stack.
1767 * Still it should be treated as a WRITE error.
1768 *
1769 * Meta IO error is always WRITE error:
1770 * we read meta data only once during attach,
1771 * which will fail in case of errors.
1772 */
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001773 set_bit(WAS_IO_ERROR, &device->flags);
Lars Ellenberga2a3c74f2012-09-22 12:26:57 +02001774 if (df == DRBD_READ_ERROR)
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001775 set_bit(WAS_READ_ERROR, &device->flags);
Lars Ellenberga2a3c74f2012-09-22 12:26:57 +02001776 if (df == DRBD_FORCE_DETACH)
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001777 set_bit(FORCE_DETACH, &device->flags);
1778 if (device->state.disk > D_FAILED) {
1779 _drbd_set_state(_NS(device, disk, D_FAILED), CS_HARD, NULL);
Andreas Gruenbacherd0180172011-07-03 17:53:52 +02001780 drbd_err(device,
Lars Ellenberg82f59cc2010-10-16 12:13:47 +02001781 "Local IO failed in %s. Detaching...\n", where);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001782 }
1783 break;
1784 }
1785}
1786
1787/**
1788 * drbd_chk_io_error: Handle the on_io_error setting, should be called from all io completion handlers
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001789 * @device: DRBD device.
Philipp Reisnerb411b362009-09-25 16:07:19 -07001790 * @error: Error code passed to the IO completion callback
1791 * @forcedetach: Force detach. I.e. the error happened while accessing the meta data
1792 *
1793 * See also drbd_main.c:after_state_ch() if (os.disk > D_FAILED && ns.disk == D_FAILED)
1794 */
1795#define drbd_chk_io_error(m,e,f) drbd_chk_io_error_(m,e,f, __func__)
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001796static inline void drbd_chk_io_error_(struct drbd_device *device,
Lars Ellenberg383606e2012-06-14 14:21:32 +02001797 int error, enum drbd_force_detach_flags forcedetach, const char *where)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001798{
1799 if (error) {
1800 unsigned long flags;
Andreas Gruenbacher05008132011-07-07 14:19:42 +02001801 spin_lock_irqsave(&device->resource->req_lock, flags);
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001802 __drbd_chk_io_error_(device, forcedetach, where);
Andreas Gruenbacher05008132011-07-07 14:19:42 +02001803 spin_unlock_irqrestore(&device->resource->req_lock, flags);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001804 }
1805}
1806
1807
1808/**
1809 * drbd_md_first_sector() - Returns the first sector number of the meta data area
1810 * @bdev: Meta data block device.
1811 *
1812 * BTW, for internal meta data, this happens to be the maximum capacity
1813 * we could agree upon with our peer node.
1814 */
Lars Ellenberg68e41a42013-03-19 18:16:45 +01001815static inline sector_t drbd_md_first_sector(struct drbd_backing_dev *bdev)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001816{
Lars Ellenberg68e41a42013-03-19 18:16:45 +01001817 switch (bdev->md.meta_dev_idx) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07001818 case DRBD_MD_INDEX_INTERNAL:
1819 case DRBD_MD_INDEX_FLEX_INT:
1820 return bdev->md.md_offset + bdev->md.bm_offset;
1821 case DRBD_MD_INDEX_FLEX_EXT:
1822 default:
1823 return bdev->md.md_offset;
1824 }
1825}
1826
1827/**
1828 * drbd_md_last_sector() - Return the last sector number of the meta data area
1829 * @bdev: Meta data block device.
1830 */
1831static inline sector_t drbd_md_last_sector(struct drbd_backing_dev *bdev)
1832{
Lars Ellenberg68e41a42013-03-19 18:16:45 +01001833 switch (bdev->md.meta_dev_idx) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07001834 case DRBD_MD_INDEX_INTERNAL:
1835 case DRBD_MD_INDEX_FLEX_INT:
Lars Ellenbergae8bf312013-03-19 18:16:43 +01001836 return bdev->md.md_offset + MD_4kB_SECT -1;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001837 case DRBD_MD_INDEX_FLEX_EXT:
1838 default:
Lars Ellenbergae8bf312013-03-19 18:16:43 +01001839 return bdev->md.md_offset + bdev->md.md_size_sect -1;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001840 }
1841}
1842
1843/* Returns the number of 512 byte sectors of the device */
1844static inline sector_t drbd_get_capacity(struct block_device *bdev)
1845{
1846 /* return bdev ? get_capacity(bdev->bd_disk) : 0; */
Mike Snitzer77304d22010-11-08 14:39:12 +01001847 return bdev ? i_size_read(bdev->bd_inode) >> 9 : 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001848}
1849
1850/**
1851 * drbd_get_max_capacity() - Returns the capacity we announce to out peer
1852 * @bdev: Meta data block device.
1853 *
1854 * returns the capacity we announce to out peer. we clip ourselves at the
1855 * various MAX_SECTORS, because if we don't, current implementation will
1856 * oops sooner or later
1857 */
1858static inline sector_t drbd_get_max_capacity(struct drbd_backing_dev *bdev)
1859{
1860 sector_t s;
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02001861
Lars Ellenberg68e41a42013-03-19 18:16:45 +01001862 switch (bdev->md.meta_dev_idx) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07001863 case DRBD_MD_INDEX_INTERNAL:
1864 case DRBD_MD_INDEX_FLEX_INT:
1865 s = drbd_get_capacity(bdev->backing_bdev)
1866 ? min_t(sector_t, DRBD_MAX_SECTORS_FLEX,
Lars Ellenberg68e41a42013-03-19 18:16:45 +01001867 drbd_md_first_sector(bdev))
Philipp Reisnerb411b362009-09-25 16:07:19 -07001868 : 0;
1869 break;
1870 case DRBD_MD_INDEX_FLEX_EXT:
1871 s = min_t(sector_t, DRBD_MAX_SECTORS_FLEX,
1872 drbd_get_capacity(bdev->backing_bdev));
1873 /* clip at maximum size the meta device can support */
1874 s = min_t(sector_t, s,
1875 BM_EXT_TO_SECT(bdev->md.md_size_sect
1876 - bdev->md.bm_offset));
1877 break;
1878 default:
1879 s = min_t(sector_t, DRBD_MAX_SECTORS,
1880 drbd_get_capacity(bdev->backing_bdev));
1881 }
1882 return s;
1883}
1884
1885/**
Lars Ellenberg3a4d4eb2013-03-19 18:16:44 +01001886 * drbd_md_ss() - Return the sector number of our meta data super block
Philipp Reisnerb411b362009-09-25 16:07:19 -07001887 * @bdev: Meta data block device.
1888 */
Lars Ellenberg3a4d4eb2013-03-19 18:16:44 +01001889static inline sector_t drbd_md_ss(struct drbd_backing_dev *bdev)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001890{
Lars Ellenberg3a4d4eb2013-03-19 18:16:44 +01001891 const int meta_dev_idx = bdev->md.meta_dev_idx;
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02001892
Lars Ellenberg3a4d4eb2013-03-19 18:16:44 +01001893 if (meta_dev_idx == DRBD_MD_INDEX_FLEX_EXT)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001894 return 0;
Lars Ellenberg3a4d4eb2013-03-19 18:16:44 +01001895
1896 /* Since drbd08, internal meta data is always "flexible".
1897 * position: last 4k aligned block of 4k size */
1898 if (meta_dev_idx == DRBD_MD_INDEX_INTERNAL ||
1899 meta_dev_idx == DRBD_MD_INDEX_FLEX_INT)
1900 return (drbd_get_capacity(bdev->backing_bdev) & ~7ULL) - 8;
1901
1902 /* external, some index; this is the old fixed size layout */
1903 return MD_128MB_SECT * bdev->md.meta_dev_idx;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001904}
1905
1906static inline void
Philipp Reisnerb411b362009-09-25 16:07:19 -07001907drbd_queue_work(struct drbd_work_queue *q, struct drbd_work *w)
1908{
1909 unsigned long flags;
1910 spin_lock_irqsave(&q->q_lock, flags);
1911 list_add_tail(&w->list, &q->q);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001912 spin_unlock_irqrestore(&q->q_lock, flags);
Lars Ellenberg8c0785a2011-10-19 11:50:57 +02001913 wake_up(&q->q_wait);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001914}
1915
Lars Ellenberge334f552014-02-11 09:30:49 +01001916static inline void
Lars Ellenberg15e26f62014-04-28 11:43:21 +02001917drbd_queue_work_if_unqueued(struct drbd_work_queue *q, struct drbd_work *w)
1918{
1919 unsigned long flags;
1920 spin_lock_irqsave(&q->q_lock, flags);
1921 if (list_empty_careful(&w->list))
1922 list_add_tail(&w->list, &q->q);
1923 spin_unlock_irqrestore(&q->q_lock, flags);
1924 wake_up(&q->q_wait);
1925}
1926
1927static inline void
Lars Ellenberge334f552014-02-11 09:30:49 +01001928drbd_device_post_work(struct drbd_device *device, int work_bit)
1929{
1930 if (!test_and_set_bit(work_bit, &device->flags)) {
1931 struct drbd_connection *connection =
1932 first_peer_device(device)->connection;
1933 struct drbd_work_queue *q = &connection->sender_work;
1934 if (!test_and_set_bit(DEVICE_WORK_PENDING, &connection->flags))
1935 wake_up(&q->q_wait);
1936 }
1937}
1938
Andreas Gruenbacherb5043c52011-07-28 15:56:02 +02001939extern void drbd_flush_workqueue(struct drbd_work_queue *work_queue);
1940
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +02001941static inline void wake_asender(struct drbd_connection *connection)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001942{
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +02001943 if (test_bit(SIGNAL_ASENDER, &connection->flags))
1944 force_sig(DRBD_SIG, connection->asender.task);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001945}
1946
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +02001947static inline void request_ping(struct drbd_connection *connection)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001948{
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +02001949 set_bit(SEND_PING, &connection->flags);
1950 wake_asender(connection);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001951}
1952
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +02001953extern void *conn_prepare_command(struct drbd_connection *, struct drbd_socket *);
Andreas Gruenbacher69a22772011-08-09 00:47:13 +02001954extern void *drbd_prepare_command(struct drbd_peer_device *, struct drbd_socket *);
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +02001955extern int conn_send_command(struct drbd_connection *, struct drbd_socket *,
Andreas Gruenbacherdba58582011-03-29 16:55:40 +02001956 enum drbd_packet, unsigned int, void *,
1957 unsigned int);
Andreas Gruenbacher69a22772011-08-09 00:47:13 +02001958extern int drbd_send_command(struct drbd_peer_device *, struct drbd_socket *,
Andreas Gruenbacherdba58582011-03-29 16:55:40 +02001959 enum drbd_packet, unsigned int, void *,
1960 unsigned int);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001961
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +02001962extern int drbd_send_ping(struct drbd_connection *connection);
1963extern int drbd_send_ping_ack(struct drbd_connection *connection);
Andreas Gruenbacher69a22772011-08-09 00:47:13 +02001964extern int drbd_send_state_req(struct drbd_peer_device *, union drbd_state, union drbd_state);
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +02001965extern int conn_send_state_req(struct drbd_connection *, union drbd_state, union drbd_state);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001966
1967static inline void drbd_thread_stop(struct drbd_thread *thi)
1968{
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01001969 _drbd_thread_stop(thi, false, true);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001970}
1971
1972static inline void drbd_thread_stop_nowait(struct drbd_thread *thi)
1973{
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01001974 _drbd_thread_stop(thi, false, false);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001975}
1976
1977static inline void drbd_thread_restart_nowait(struct drbd_thread *thi)
1978{
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01001979 _drbd_thread_stop(thi, true, false);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001980}
1981
1982/* counts how many answer packets packets we expect from our peer,
1983 * for either explicit application requests,
1984 * or implicit barrier packets as necessary.
1985 * increased:
1986 * w_send_barrier
Andreas Gruenbacher8554df12011-01-25 15:37:43 +01001987 * _req_mod(req, QUEUE_FOR_NET_WRITE or QUEUE_FOR_NET_READ);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001988 * it is much easier and equally valid to count what we queue for the
1989 * worker, even before it actually was queued or send.
1990 * (drbd_make_request_common; recovery path on read io-error)
1991 * decreased:
1992 * got_BarrierAck (respective tl_clear, tl_clear_barrier)
Andreas Gruenbacher8554df12011-01-25 15:37:43 +01001993 * _req_mod(req, DATA_RECEIVED)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001994 * [from receive_DataReply]
Andreas Gruenbacher8554df12011-01-25 15:37:43 +01001995 * _req_mod(req, WRITE_ACKED_BY_PEER or RECV_ACKED_BY_PEER or NEG_ACKED)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001996 * [from got_BlockAck (P_WRITE_ACK, P_RECV_ACK)]
1997 * for some reason it is NOT decreased in got_NegAck,
1998 * but in the resulting cleanup code from report_params.
1999 * we should try to remember the reason for that...
Andreas Gruenbacher8554df12011-01-25 15:37:43 +01002000 * _req_mod(req, SEND_FAILED or SEND_CANCELED)
2001 * _req_mod(req, CONNECTION_LOST_WHILE_PENDING)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002002 * [from tl_clear_barrier]
2003 */
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02002004static inline void inc_ap_pending(struct drbd_device *device)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002005{
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02002006 atomic_inc(&device->ap_pending_cnt);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002007}
2008
Philipp Reisner49559d82011-02-21 14:19:44 +01002009#define ERR_IF_CNT_IS_NEGATIVE(which, func, line) \
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02002010 if (atomic_read(&device->which) < 0) \
Andreas Gruenbacherd0180172011-07-03 17:53:52 +02002011 drbd_err(device, "in %s:%d: " #which " = %d < 0 !\n", \
Philipp Reisner49559d82011-02-21 14:19:44 +01002012 func, line, \
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02002013 atomic_read(&device->which))
Philipp Reisnerb411b362009-09-25 16:07:19 -07002014
Joe Perches659b2e32014-03-25 12:35:05 -07002015#define dec_ap_pending(device) _dec_ap_pending(device, __func__, __LINE__)
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02002016static inline void _dec_ap_pending(struct drbd_device *device, const char *func, int line)
Philipp Reisner49559d82011-02-21 14:19:44 +01002017{
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02002018 if (atomic_dec_and_test(&device->ap_pending_cnt))
2019 wake_up(&device->misc_wait);
Philipp Reisner49559d82011-02-21 14:19:44 +01002020 ERR_IF_CNT_IS_NEGATIVE(ap_pending_cnt, func, line);
2021}
Philipp Reisnerb411b362009-09-25 16:07:19 -07002022
2023/* counts how many resync-related answers we still expect from the peer
2024 * increase decrease
2025 * C_SYNC_TARGET sends P_RS_DATA_REQUEST (and expects P_RS_DATA_REPLY)
Lucas De Marchi25985ed2011-03-30 22:57:33 -03002026 * C_SYNC_SOURCE sends P_RS_DATA_REPLY (and expects P_WRITE_ACK with ID_SYNCER)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002027 * (or P_NEG_ACK with ID_SYNCER)
2028 */
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02002029static inline void inc_rs_pending(struct drbd_device *device)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002030{
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02002031 atomic_inc(&device->rs_pending_cnt);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002032}
2033
Joe Perches659b2e32014-03-25 12:35:05 -07002034#define dec_rs_pending(device) _dec_rs_pending(device, __func__, __LINE__)
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02002035static inline void _dec_rs_pending(struct drbd_device *device, const char *func, int line)
Philipp Reisner49559d82011-02-21 14:19:44 +01002036{
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02002037 atomic_dec(&device->rs_pending_cnt);
Philipp Reisner49559d82011-02-21 14:19:44 +01002038 ERR_IF_CNT_IS_NEGATIVE(rs_pending_cnt, func, line);
2039}
Philipp Reisnerb411b362009-09-25 16:07:19 -07002040
2041/* counts how many answers we still need to send to the peer.
2042 * increased on
2043 * receive_Data unless protocol A;
2044 * we need to send a P_RECV_ACK (proto B)
2045 * or P_WRITE_ACK (proto C)
2046 * receive_RSDataReply (recv_resync_read) we need to send a P_WRITE_ACK
2047 * receive_DataRequest (receive_RSDataRequest) we need to send back P_DATA
2048 * receive_Barrier_* we need to send a P_BARRIER_ACK
2049 */
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02002050static inline void inc_unacked(struct drbd_device *device)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002051{
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02002052 atomic_inc(&device->unacked_cnt);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002053}
2054
Joe Perches659b2e32014-03-25 12:35:05 -07002055#define dec_unacked(device) _dec_unacked(device, __func__, __LINE__)
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02002056static inline void _dec_unacked(struct drbd_device *device, const char *func, int line)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002057{
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02002058 atomic_dec(&device->unacked_cnt);
Philipp Reisner49559d82011-02-21 14:19:44 +01002059 ERR_IF_CNT_IS_NEGATIVE(unacked_cnt, func, line);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002060}
2061
Joe Perches659b2e32014-03-25 12:35:05 -07002062#define sub_unacked(device, n) _sub_unacked(device, n, __func__, __LINE__)
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02002063static inline void _sub_unacked(struct drbd_device *device, int n, const char *func, int line)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002064{
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02002065 atomic_sub(n, &device->unacked_cnt);
Philipp Reisner49559d82011-02-21 14:19:44 +01002066 ERR_IF_CNT_IS_NEGATIVE(unacked_cnt, func, line);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002067}
2068
Lars Ellenberg5ab7d2c2014-01-27 15:58:22 +01002069static inline bool is_sync_state(enum drbd_conns connection_state)
2070{
2071 return
2072 (connection_state == C_SYNC_SOURCE
2073 || connection_state == C_SYNC_TARGET
2074 || connection_state == C_PAUSED_SYNC_S
2075 || connection_state == C_PAUSED_SYNC_T);
2076}
2077
Philipp Reisnerb411b362009-09-25 16:07:19 -07002078/**
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02002079 * get_ldev() - Increase the ref count on device->ldev. Returns 0 if there is no ldev
Andreas Gruenbacherd1b80852014-09-11 14:29:09 +02002080 * @_device: DRBD device.
2081 * @_min_state: Minimum device state required for success.
Philipp Reisnerb411b362009-09-25 16:07:19 -07002082 *
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02002083 * You have to call put_ldev() when finished working with device->ldev.
Philipp Reisnerb411b362009-09-25 16:07:19 -07002084 */
Andreas Gruenbacherd1b80852014-09-11 14:29:09 +02002085#define get_ldev_if_state(_device, _min_state) \
2086 (_get_ldev_if_state((_device), (_min_state)) ? \
2087 ({ __acquire(x); true; }) : false)
2088#define get_ldev(_device) get_ldev_if_state(_device, D_INCONSISTENT)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002089
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02002090static inline void put_ldev(struct drbd_device *device)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002091{
Andreas Gruenbacher11f8b2b2014-09-11 14:29:05 +02002092 enum drbd_disk_state disk_state = device->state.disk;
Lars Ellenbergba3c6fb2014-02-11 08:57:18 +01002093 /* We must check the state *before* the atomic_dec becomes visible,
2094 * or we have a theoretical race where someone hitting zero,
2095 * while state still D_FAILED, will then see D_DISKLESS in the
2096 * condition below and calling into destroy, where he must not, yet. */
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02002097 int i = atomic_dec_return(&device->local_cnt);
Lars Ellenberg9a0d9d02011-05-02 11:51:31 +02002098
2099 /* This may be called from some endio handler,
2100 * so we must not sleep here. */
2101
Philipp Reisnerb411b362009-09-25 16:07:19 -07002102 __release(local);
Andreas Gruenbacher0b0ba1e2011-06-27 16:23:33 +02002103 D_ASSERT(device, i >= 0);
Lars Ellenberge9e6f3e2010-09-14 20:26:27 +02002104 if (i == 0) {
Andreas Gruenbacher11f8b2b2014-09-11 14:29:05 +02002105 if (disk_state == D_DISKLESS)
Lars Ellenberg82f59cc2010-10-16 12:13:47 +02002106 /* even internal references gone, safe to destroy */
Lars Ellenberge334f552014-02-11 09:30:49 +01002107 drbd_device_post_work(device, DESTROY_DISK);
Andreas Gruenbacher11f8b2b2014-09-11 14:29:05 +02002108 if (disk_state == D_FAILED)
Lars Ellenberg82f59cc2010-10-16 12:13:47 +02002109 /* all application IO references gone. */
Lars Ellenberge334f552014-02-11 09:30:49 +01002110 if (!test_and_set_bit(GOING_DISKLESS, &device->flags))
2111 drbd_device_post_work(device, GO_DISKLESS);
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02002112 wake_up(&device->misc_wait);
Lars Ellenberge9e6f3e2010-09-14 20:26:27 +02002113 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07002114}
2115
2116#ifndef __CHECKER__
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02002117static inline int _get_ldev_if_state(struct drbd_device *device, enum drbd_disk_state mins)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002118{
2119 int io_allowed;
2120
Lars Ellenberg82f59cc2010-10-16 12:13:47 +02002121 /* never get a reference while D_DISKLESS */
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02002122 if (device->state.disk == D_DISKLESS)
Lars Ellenberg82f59cc2010-10-16 12:13:47 +02002123 return 0;
2124
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02002125 atomic_inc(&device->local_cnt);
2126 io_allowed = (device->state.disk >= mins);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002127 if (!io_allowed)
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02002128 put_ldev(device);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002129 return io_allowed;
2130}
2131#else
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02002132extern int _get_ldev_if_state(struct drbd_device *device, enum drbd_disk_state mins);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002133#endif
2134
Philipp Reisnerb411b362009-09-25 16:07:19 -07002135/* this throttles on-the-fly application requests
2136 * according to max_buffers settings;
2137 * maybe re-implement using semaphores? */
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02002138static inline int drbd_get_max_buffers(struct drbd_device *device)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002139{
Philipp Reisner44ed1672011-04-19 17:10:19 +02002140 struct net_conf *nc;
2141 int mxb;
2142
2143 rcu_read_lock();
Andreas Gruenbachera6b32bc2011-05-31 14:33:49 +02002144 nc = rcu_dereference(first_peer_device(device)->connection->net_conf);
Philipp Reisner44ed1672011-04-19 17:10:19 +02002145 mxb = nc ? nc->max_buffers : 1000000; /* arbitrary limit on open requests */
2146 rcu_read_unlock();
2147
Philipp Reisnerb411b362009-09-25 16:07:19 -07002148 return mxb;
2149}
2150
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02002151static inline int drbd_state_is_stable(struct drbd_device *device)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002152{
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02002153 union drbd_dev_state s = device->state;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002154
2155 /* DO NOT add a default clause, we want the compiler to warn us
2156 * for any newly introduced state we may have forgotten to add here */
2157
2158 switch ((enum drbd_conns)s.conn) {
2159 /* new io only accepted when there is no connection, ... */
2160 case C_STANDALONE:
2161 case C_WF_CONNECTION:
2162 /* ... or there is a well established connection. */
2163 case C_CONNECTED:
2164 case C_SYNC_SOURCE:
2165 case C_SYNC_TARGET:
2166 case C_VERIFY_S:
2167 case C_VERIFY_T:
2168 case C_PAUSED_SYNC_S:
2169 case C_PAUSED_SYNC_T:
Philipp Reisner67531712010-10-27 12:21:30 +02002170 case C_AHEAD:
2171 case C_BEHIND:
Philipp Reisner37190942010-11-10 12:08:37 +01002172 /* transitional states, IO allowed */
Philipp Reisnerb411b362009-09-25 16:07:19 -07002173 case C_DISCONNECTING:
2174 case C_UNCONNECTED:
2175 case C_TIMEOUT:
2176 case C_BROKEN_PIPE:
2177 case C_NETWORK_FAILURE:
2178 case C_PROTOCOL_ERROR:
2179 case C_TEAR_DOWN:
2180 case C_WF_REPORT_PARAMS:
2181 case C_STARTING_SYNC_S:
2182 case C_STARTING_SYNC_T:
Philipp Reisner37190942010-11-10 12:08:37 +01002183 break;
2184
2185 /* Allow IO in BM exchange states with new protocols */
Philipp Reisnerb411b362009-09-25 16:07:19 -07002186 case C_WF_BITMAP_S:
Andreas Gruenbachera6b32bc2011-05-31 14:33:49 +02002187 if (first_peer_device(device)->connection->agreed_pro_version < 96)
Philipp Reisner37190942010-11-10 12:08:37 +01002188 return 0;
2189 break;
2190
2191 /* no new io accepted in these states */
Philipp Reisnerb411b362009-09-25 16:07:19 -07002192 case C_WF_BITMAP_T:
2193 case C_WF_SYNC_UUID:
2194 case C_MASK:
2195 /* not "stable" */
2196 return 0;
2197 }
2198
2199 switch ((enum drbd_disk_state)s.disk) {
2200 case D_DISKLESS:
2201 case D_INCONSISTENT:
2202 case D_OUTDATED:
2203 case D_CONSISTENT:
2204 case D_UP_TO_DATE:
Philipp Reisner5ca1de02011-06-28 17:01:19 +02002205 case D_FAILED:
Philipp Reisnerb411b362009-09-25 16:07:19 -07002206 /* disk state is stable as well. */
2207 break;
2208
Philipp Reisnerd942ae42011-05-31 13:07:24 +02002209 /* no new io accepted during transitional states */
Philipp Reisnerb411b362009-09-25 16:07:19 -07002210 case D_ATTACHING:
Philipp Reisnerb411b362009-09-25 16:07:19 -07002211 case D_NEGOTIATING:
2212 case D_UNKNOWN:
2213 case D_MASK:
2214 /* not "stable" */
2215 return 0;
2216 }
2217
2218 return 1;
2219}
2220
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02002221static inline int drbd_suspended(struct drbd_device *device)
Philipp Reisnerfb22c402010-09-08 23:20:21 +02002222{
Andreas Gruenbacher6bbf53c2011-07-08 01:19:44 +02002223 struct drbd_resource *resource = device->resource;
Philipp Reisner8e0af252011-03-28 16:18:39 +02002224
Andreas Gruenbacher6bbf53c2011-07-08 01:19:44 +02002225 return resource->susp || resource->susp_fen || resource->susp_nod;
Philipp Reisnerfb22c402010-09-08 23:20:21 +02002226}
2227
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02002228static inline bool may_inc_ap_bio(struct drbd_device *device)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002229{
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02002230 int mxb = drbd_get_max_buffers(device);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002231
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02002232 if (drbd_suspended(device))
Andreas Gruenbacher1b881ef2010-12-13 18:03:38 +01002233 return false;
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02002234 if (test_bit(SUSPEND_IO, &device->flags))
Andreas Gruenbacher1b881ef2010-12-13 18:03:38 +01002235 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002236
2237 /* to avoid potential deadlock or bitmap corruption,
2238 * in various places, we only allow new application io
2239 * to start during "stable" states. */
2240
2241 /* no new io accepted when attaching or detaching the disk */
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02002242 if (!drbd_state_is_stable(device))
Andreas Gruenbacher1b881ef2010-12-13 18:03:38 +01002243 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002244
2245 /* since some older kernels don't have atomic_add_unless,
2246 * and we are within the spinlock anyways, we have this workaround. */
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02002247 if (atomic_read(&device->ap_bio_cnt) > mxb)
Andreas Gruenbacher1b881ef2010-12-13 18:03:38 +01002248 return false;
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02002249 if (test_bit(BITMAP_IO, &device->flags))
Andreas Gruenbacher1b881ef2010-12-13 18:03:38 +01002250 return false;
2251 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002252}
2253
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02002254static inline bool inc_ap_bio_cond(struct drbd_device *device)
Philipp Reisner8869d682010-11-17 18:24:19 +01002255{
Andreas Gruenbacher1b881ef2010-12-13 18:03:38 +01002256 bool rv = false;
Philipp Reisner8869d682010-11-17 18:24:19 +01002257
Andreas Gruenbacher05008132011-07-07 14:19:42 +02002258 spin_lock_irq(&device->resource->req_lock);
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02002259 rv = may_inc_ap_bio(device);
Philipp Reisner8869d682010-11-17 18:24:19 +01002260 if (rv)
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02002261 atomic_inc(&device->ap_bio_cnt);
Andreas Gruenbacher05008132011-07-07 14:19:42 +02002262 spin_unlock_irq(&device->resource->req_lock);
Philipp Reisner8869d682010-11-17 18:24:19 +01002263
2264 return rv;
2265}
2266
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02002267static inline void inc_ap_bio(struct drbd_device *device)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002268{
Philipp Reisnerb411b362009-09-25 16:07:19 -07002269 /* we wait here
2270 * as long as the device is suspended
2271 * until the bitmap is no longer on the fly during connection
Philipp Reisnerd942ae42011-05-31 13:07:24 +02002272 * handshake as long as we would exceed the max_buffer limit.
Philipp Reisnerb411b362009-09-25 16:07:19 -07002273 *
2274 * to avoid races with the reconnect code,
2275 * we need to atomic_inc within the spinlock. */
2276
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02002277 wait_event(device->misc_wait, inc_ap_bio_cond(device));
Philipp Reisnerb411b362009-09-25 16:07:19 -07002278}
2279
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02002280static inline void dec_ap_bio(struct drbd_device *device)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002281{
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02002282 int mxb = drbd_get_max_buffers(device);
2283 int ap_bio = atomic_dec_return(&device->ap_bio_cnt);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002284
Andreas Gruenbacher0b0ba1e2011-06-27 16:23:33 +02002285 D_ASSERT(device, ap_bio >= 0);
Lars Ellenberg7ee1fb92012-06-19 10:27:58 +02002286
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02002287 if (ap_bio == 0 && test_bit(BITMAP_IO, &device->flags)) {
2288 if (!test_and_set_bit(BITMAP_IO_QUEUED, &device->flags))
Andreas Gruenbacher84b8c062011-07-28 15:27:51 +02002289 drbd_queue_work(&first_peer_device(device)->
2290 connection->sender_work,
2291 &device->bm_io_work.w);
Lars Ellenberg7ee1fb92012-06-19 10:27:58 +02002292 }
2293
Philipp Reisnerb411b362009-09-25 16:07:19 -07002294 /* this currently does wake_up for every dec_ap_bio!
2295 * maybe rather introduce some type of hysteresis?
2296 * e.g. (ap_bio == mxb/2 || ap_bio == 0) ? */
2297 if (ap_bio < mxb)
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02002298 wake_up(&device->misc_wait);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002299}
2300
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02002301static inline bool verify_can_do_stop_sector(struct drbd_device *device)
Lars Ellenberg58ffa582012-07-26 14:09:49 +02002302{
Andreas Gruenbachera6b32bc2011-05-31 14:33:49 +02002303 return first_peer_device(device)->connection->agreed_pro_version >= 97 &&
2304 first_peer_device(device)->connection->agreed_pro_version != 100;
Lars Ellenberg58ffa582012-07-26 14:09:49 +02002305}
2306
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02002307static inline int drbd_set_ed_uuid(struct drbd_device *device, u64 val)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002308{
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02002309 int changed = device->ed_uuid != val;
2310 device->ed_uuid = val;
Lars Ellenberg62b0da32011-01-20 13:25:21 +01002311 return changed;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002312}
2313
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02002314static inline int drbd_queue_order_type(struct drbd_device *device)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002315{
2316 /* sorry, we currently have no working implementation
2317 * of distributed TCQ stuff */
2318#ifndef QUEUE_ORDERED_NONE
2319#define QUEUE_ORDERED_NONE 0
2320#endif
2321 return QUEUE_ORDERED_NONE;
2322}
2323
Andreas Gruenbacher77c556f2011-06-08 22:17:38 +02002324static inline struct drbd_connection *first_connection(struct drbd_resource *resource)
2325{
Lars Ellenbergec4a3402014-04-28 18:43:35 +02002326 return list_first_entry_or_null(&resource->connections,
Andreas Gruenbacher77c556f2011-06-08 22:17:38 +02002327 struct drbd_connection, connections);
2328}
2329
Philipp Reisnerb411b362009-09-25 16:07:19 -07002330#endif