blob: 92479bdc3e78dc3eb899cc5f6b1a1d5689760bf2 [file] [log] [blame]
Philipp Reisnerb411b362009-09-25 16:07:19 -07001/*
2 drbd.c
3
4 This file is part of DRBD by Philipp Reisner and Lars Ellenberg.
5
6 Copyright (C) 2001-2008, LINBIT Information Technologies GmbH.
7 Copyright (C) 1999-2008, Philipp Reisner <philipp.reisner@linbit.com>.
8 Copyright (C) 2002-2008, Lars Ellenberg <lars.ellenberg@linbit.com>.
9
10 Thanks to Carter Burden, Bart Grantham and Gennadiy Nerubayev
11 from Logicworks, Inc. for making SDP replication support possible.
12
13 drbd is free software; you can redistribute it and/or modify
14 it under the terms of the GNU General Public License as published by
15 the Free Software Foundation; either version 2, or (at your option)
16 any later version.
17
18 drbd is distributed in the hope that it will be useful,
19 but WITHOUT ANY WARRANTY; without even the implied warranty of
20 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 GNU General Public License for more details.
22
23 You should have received a copy of the GNU General Public License
24 along with drbd; see the file COPYING. If not, write to
25 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
26
27 */
28
Philipp Reisnerb411b362009-09-25 16:07:19 -070029#include <linux/module.h>
Philipp Reisnerb411b362009-09-25 16:07:19 -070030#include <linux/drbd.h>
31#include <asm/uaccess.h>
32#include <asm/types.h>
33#include <net/sock.h>
34#include <linux/ctype.h>
Arnd Bergmann2a48fc02010-06-02 14:28:52 +020035#include <linux/mutex.h>
Philipp Reisnerb411b362009-09-25 16:07:19 -070036#include <linux/fs.h>
37#include <linux/file.h>
38#include <linux/proc_fs.h>
39#include <linux/init.h>
40#include <linux/mm.h>
41#include <linux/memcontrol.h>
42#include <linux/mm_inline.h>
43#include <linux/slab.h>
44#include <linux/random.h>
45#include <linux/reboot.h>
46#include <linux/notifier.h>
47#include <linux/kthread.h>
Lars Ellenberg113fef92013-03-22 18:14:40 -060048#include <linux/workqueue.h>
Philipp Reisnerb411b362009-09-25 16:07:19 -070049#define __KERNEL_SYSCALLS__
50#include <linux/unistd.h>
51#include <linux/vmalloc.h>
52
53#include <linux/drbd_limits.h>
54#include "drbd_int.h"
Andreas Gruenbachera3603a62011-05-30 11:47:37 +020055#include "drbd_protocol.h"
Philipp Reisnerb411b362009-09-25 16:07:19 -070056#include "drbd_req.h" /* only for _req_mod in tl_release and tl_clear */
57
58#include "drbd_vli.h"
59
Arnd Bergmann2a48fc02010-06-02 14:28:52 +020060static DEFINE_MUTEX(drbd_main_mutex);
Philipp Reisnerb411b362009-09-25 16:07:19 -070061int drbd_worker(struct drbd_thread *);
Philipp Reisnerb411b362009-09-25 16:07:19 -070062
63int drbd_init(void);
64static int drbd_open(struct block_device *bdev, fmode_t mode);
Al Virodb2a1442013-05-05 21:52:57 -040065static void drbd_release(struct gendisk *gd, fmode_t mode);
Andreas Gruenbacher99920dc2011-03-16 15:31:39 +010066static int w_md_sync(struct drbd_work *w, int unused);
Philipp Reisnerb411b362009-09-25 16:07:19 -070067static void md_sync_timer_fn(unsigned long data);
Andreas Gruenbacher99920dc2011-03-16 15:31:39 +010068static int w_bitmap_io(struct drbd_work *w, int unused);
69static int w_go_diskless(struct drbd_work *w, int unused);
Philipp Reisnerb411b362009-09-25 16:07:19 -070070
Philipp Reisnerb411b362009-09-25 16:07:19 -070071MODULE_AUTHOR("Philipp Reisner <phil@linbit.com>, "
72 "Lars Ellenberg <lars@linbit.com>");
73MODULE_DESCRIPTION("drbd - Distributed Replicated Block Device v" REL_VERSION);
74MODULE_VERSION(REL_VERSION);
75MODULE_LICENSE("GPL");
Philipp Reisner81a5d602011-02-22 19:53:16 -050076MODULE_PARM_DESC(minor_count, "Approximate number of drbd devices ("
Philipp Reisner2b8a90b2011-01-10 11:15:17 +010077 __stringify(DRBD_MINOR_COUNT_MIN) "-" __stringify(DRBD_MINOR_COUNT_MAX) ")");
Philipp Reisnerb411b362009-09-25 16:07:19 -070078MODULE_ALIAS_BLOCKDEV_MAJOR(DRBD_MAJOR);
79
80#include <linux/moduleparam.h>
81/* allow_open_on_secondary */
82MODULE_PARM_DESC(allow_oos, "DONT USE!");
83/* thanks to these macros, if compiled into the kernel (not-module),
84 * this becomes the boot parameter drbd.minor_count */
85module_param(minor_count, uint, 0444);
86module_param(disable_sendpage, bool, 0644);
87module_param(allow_oos, bool, 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -070088module_param(proc_details, int, 0644);
89
90#ifdef CONFIG_DRBD_FAULT_INJECTION
91int enable_faults;
92int fault_rate;
93static int fault_count;
94int fault_devs;
95/* bitmap of enabled faults */
96module_param(enable_faults, int, 0664);
97/* fault rate % value - applies to all enabled faults */
98module_param(fault_rate, int, 0664);
99/* count of faults inserted */
100module_param(fault_count, int, 0664);
101/* bitmap of devices to insert faults on */
102module_param(fault_devs, int, 0644);
103#endif
104
105/* module parameter, defined */
Philipp Reisner2b8a90b2011-01-10 11:15:17 +0100106unsigned int minor_count = DRBD_MINOR_COUNT_DEF;
Rusty Russell90ab5ee2012-01-13 09:32:20 +1030107bool disable_sendpage;
108bool allow_oos;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700109int proc_details; /* Detail level in proc drbd*/
110
111/* Module parameter for setting the user mode helper program
112 * to run. Default is /sbin/drbdadm */
113char usermode_helper[80] = "/sbin/drbdadm";
114
115module_param_string(usermode_helper, usermode_helper, sizeof(usermode_helper), 0644);
116
117/* in 2.6.x, our device mapping and config info contains our virtual gendisks
118 * as member "struct gendisk *vdisk;"
119 */
Andreas Gruenbacher05a10ec2011-06-07 22:54:17 +0200120struct idr drbd_devices;
Andreas Gruenbacher77c556f2011-06-08 22:17:38 +0200121struct list_head drbd_resources;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700122
123struct kmem_cache *drbd_request_cache;
Andreas Gruenbacher6c852be2011-02-04 15:38:52 +0100124struct kmem_cache *drbd_ee_cache; /* peer requests */
Philipp Reisnerb411b362009-09-25 16:07:19 -0700125struct kmem_cache *drbd_bm_ext_cache; /* bitmap extents */
126struct kmem_cache *drbd_al_ext_cache; /* activity log extents */
127mempool_t *drbd_request_mempool;
128mempool_t *drbd_ee_mempool;
Lars Ellenberg42818082011-02-23 12:39:46 +0100129mempool_t *drbd_md_io_page_pool;
Lars Ellenberg9476f392011-02-23 17:02:01 +0100130struct bio_set *drbd_md_io_bio_set;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700131
132/* I do not use a standard mempool, because:
133 1) I want to hand out the pre-allocated objects first.
134 2) I want to be able to interrupt sleeping allocation with a signal.
135 Note: This is a single linked list, the next pointer is the private
136 member of struct page.
137 */
138struct page *drbd_pp_pool;
139spinlock_t drbd_pp_lock;
140int drbd_pp_vacant;
141wait_queue_head_t drbd_pp_wait;
142
143DEFINE_RATELIMIT_STATE(drbd_ratelimit_state, 5 * HZ, 5);
144
Emese Revfy7d4e9d02009-12-14 00:59:30 +0100145static const struct block_device_operations drbd_ops = {
Philipp Reisnerb411b362009-09-25 16:07:19 -0700146 .owner = THIS_MODULE,
147 .open = drbd_open,
148 .release = drbd_release,
149};
150
Lars Ellenberg9476f392011-02-23 17:02:01 +0100151struct bio *bio_alloc_drbd(gfp_t gfp_mask)
152{
Lars Ellenbergda4a75d2011-02-23 17:02:01 +0100153 struct bio *bio;
154
Lars Ellenberg9476f392011-02-23 17:02:01 +0100155 if (!drbd_md_io_bio_set)
156 return bio_alloc(gfp_mask, 1);
157
Lars Ellenbergda4a75d2011-02-23 17:02:01 +0100158 bio = bio_alloc_bioset(gfp_mask, 1, drbd_md_io_bio_set);
159 if (!bio)
160 return NULL;
Lars Ellenbergda4a75d2011-02-23 17:02:01 +0100161 return bio;
Lars Ellenberg9476f392011-02-23 17:02:01 +0100162}
163
Philipp Reisnerb411b362009-09-25 16:07:19 -0700164#ifdef __CHECKER__
165/* When checking with sparse, and this is an inline function, sparse will
166 give tons of false positives. When this is a real functions sparse works.
167 */
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +0200168int _get_ldev_if_state(struct drbd_device *device, enum drbd_disk_state mins)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700169{
170 int io_allowed;
171
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +0200172 atomic_inc(&device->local_cnt);
173 io_allowed = (device->state.disk >= mins);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700174 if (!io_allowed) {
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +0200175 if (atomic_dec_and_test(&device->local_cnt))
176 wake_up(&device->misc_wait);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700177 }
178 return io_allowed;
179}
180
181#endif
182
183/**
Lars Ellenbergb6dd1a82011-11-28 15:04:49 +0100184 * tl_release() - mark as BARRIER_ACKED all requests in the corresponding transfer log epoch
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +0200185 * @connection: DRBD connection.
Philipp Reisnerb411b362009-09-25 16:07:19 -0700186 * @barrier_nr: Expected identifier of the DRBD write barrier packet.
187 * @set_size: Expected number of requests before that barrier.
188 *
189 * In case the passed barrier_nr or set_size does not match the oldest
Lars Ellenbergb6dd1a82011-11-28 15:04:49 +0100190 * epoch of not yet barrier-acked requests, this function will cause a
191 * termination of the connection.
Philipp Reisnerb411b362009-09-25 16:07:19 -0700192 */
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +0200193void tl_release(struct drbd_connection *connection, unsigned int barrier_nr,
Philipp Reisner2f5cdd02011-02-21 14:29:27 +0100194 unsigned int set_size)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700195{
Philipp Reisnerb411b362009-09-25 16:07:19 -0700196 struct drbd_request *r;
Lars Ellenbergb6dd1a82011-11-28 15:04:49 +0100197 struct drbd_request *req = NULL;
198 int expect_epoch = 0;
199 int expect_size = 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700200
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +0200201 spin_lock_irq(&connection->req_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700202
Philipp Reisner98683652012-11-09 14:18:43 +0100203 /* find oldest not yet barrier-acked write request,
Lars Ellenbergb6dd1a82011-11-28 15:04:49 +0100204 * count writes in its epoch. */
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +0200205 list_for_each_entry(r, &connection->transfer_log, tl_requests) {
Lars Ellenberga0d856d2012-01-24 17:19:42 +0100206 const unsigned s = r->rq_state;
Lars Ellenbergb6dd1a82011-11-28 15:04:49 +0100207 if (!req) {
208 if (!(s & RQ_WRITE))
209 continue;
210 if (!(s & RQ_NET_MASK))
211 continue;
212 if (s & RQ_NET_DONE)
213 continue;
214 req = r;
215 expect_epoch = req->epoch;
216 expect_size ++;
217 } else {
218 if (r->epoch != expect_epoch)
219 break;
220 if (!(s & RQ_WRITE))
221 continue;
222 /* if (s & RQ_DONE): not expected */
223 /* if (!(s & RQ_NET_MASK)): not expected */
224 expect_size++;
225 }
226 }
Philipp Reisnerb411b362009-09-25 16:07:19 -0700227
228 /* first some paranoia code */
Lars Ellenbergb6dd1a82011-11-28 15:04:49 +0100229 if (req == NULL) {
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +0200230 conn_err(connection, "BAD! BarrierAck #%u received, but no epoch in tl!?\n",
Philipp Reisner2f5cdd02011-02-21 14:29:27 +0100231 barrier_nr);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700232 goto bail;
233 }
Lars Ellenbergb6dd1a82011-11-28 15:04:49 +0100234 if (expect_epoch != barrier_nr) {
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +0200235 conn_err(connection, "BAD! BarrierAck #%u received, expected #%u!\n",
Lars Ellenbergb6dd1a82011-11-28 15:04:49 +0100236 barrier_nr, expect_epoch);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700237 goto bail;
238 }
239
Lars Ellenbergb6dd1a82011-11-28 15:04:49 +0100240 if (expect_size != set_size) {
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +0200241 conn_err(connection, "BAD! BarrierAck #%u received with n_writes=%u, expected n_writes=%u!\n",
Lars Ellenbergb6dd1a82011-11-28 15:04:49 +0100242 barrier_nr, set_size, expect_size);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700243 goto bail;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700244 }
245
Philipp Reisner98683652012-11-09 14:18:43 +0100246 /* Clean up list of requests processed during current epoch. */
247 /* this extra list walk restart is paranoia,
248 * to catch requests being barrier-acked "unexpectedly".
249 * It usually should find the same req again, or some READ preceding it. */
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +0200250 list_for_each_entry(req, &connection->transfer_log, tl_requests)
Philipp Reisner98683652012-11-09 14:18:43 +0100251 if (req->epoch == expect_epoch)
252 break;
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +0200253 list_for_each_entry_safe_from(req, r, &connection->transfer_log, tl_requests) {
Lars Ellenbergb6dd1a82011-11-28 15:04:49 +0100254 if (req->epoch != expect_epoch)
255 break;
256 _req_mod(req, BARRIER_ACKED);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700257 }
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +0200258 spin_unlock_irq(&connection->req_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700259
260 return;
261
262bail:
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +0200263 spin_unlock_irq(&connection->req_lock);
264 conn_request_state(connection, NS(conn, C_PROTOCOL_ERROR), CS_HARD);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700265}
266
Philipp Reisner617049a2010-12-22 12:48:31 +0100267
Philipp Reisner11b58e72010-05-12 17:08:26 +0200268/**
269 * _tl_restart() - Walks the transfer log, and applies an action to all requests
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +0200270 * @device: DRBD device.
Philipp Reisner11b58e72010-05-12 17:08:26 +0200271 * @what: The action/event to perform with all request objects
272 *
Andreas Gruenbacher8554df12011-01-25 15:37:43 +0100273 * @what might be one of CONNECTION_LOST_WHILE_PENDING, RESEND, FAIL_FROZEN_DISK_IO,
274 * RESTART_FROZEN_DISK_IO.
Philipp Reisner11b58e72010-05-12 17:08:26 +0200275 */
Lars Ellenbergb6dd1a82011-11-28 15:04:49 +0100276/* must hold resource->req_lock */
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +0200277void _tl_restart(struct drbd_connection *connection, enum drbd_req_event what)
Philipp Reisner11b58e72010-05-12 17:08:26 +0200278{
Lars Ellenbergb6dd1a82011-11-28 15:04:49 +0100279 struct drbd_request *req, *r;
Philipp Reisner11b58e72010-05-12 17:08:26 +0200280
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +0200281 list_for_each_entry_safe(req, r, &connection->transfer_log, tl_requests)
Philipp Reisner509fc012012-07-31 11:22:58 +0200282 _req_mod(req, what);
Philipp Reisner11b58e72010-05-12 17:08:26 +0200283}
284
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +0200285void tl_restart(struct drbd_connection *connection, enum drbd_req_event what)
Lars Ellenbergb6dd1a82011-11-28 15:04:49 +0100286{
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +0200287 spin_lock_irq(&connection->req_lock);
288 _tl_restart(connection, what);
289 spin_unlock_irq(&connection->req_lock);
Philipp Reisnercdfda632011-07-05 15:38:59 +0200290}
Philipp Reisnerb411b362009-09-25 16:07:19 -0700291
292/**
293 * tl_clear() - Clears all requests and &struct drbd_tl_epoch objects out of the TL
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +0200294 * @device: DRBD device.
Philipp Reisnerb411b362009-09-25 16:07:19 -0700295 *
296 * This is called after the connection to the peer was lost. The storage covered
297 * by the requests on the transfer gets marked as our of sync. Called from the
298 * receiver thread and the worker thread.
299 */
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +0200300void tl_clear(struct drbd_connection *connection)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700301{
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +0200302 tl_restart(connection, CONNECTION_LOST_WHILE_PENDING);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700303}
304
305/**
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +0200306 * tl_abort_disk_io() - Abort disk I/O for all requests for a certain device in the TL
307 * @device: DRBD device.
Philipp Reisnerfd2491f2011-07-18 16:25:15 +0200308 */
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +0200309void tl_abort_disk_io(struct drbd_device *device)
Philipp Reisnerfd2491f2011-07-18 16:25:15 +0200310{
Andreas Gruenbachera6b32bc2011-05-31 14:33:49 +0200311 struct drbd_connection *connection = first_peer_device(device)->connection;
Lars Ellenbergb6dd1a82011-11-28 15:04:49 +0100312 struct drbd_request *req, *r;
Philipp Reisnerfd2491f2011-07-18 16:25:15 +0200313
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +0200314 spin_lock_irq(&connection->req_lock);
315 list_for_each_entry_safe(req, r, &connection->transfer_log, tl_requests) {
Philipp Reisnerfd2491f2011-07-18 16:25:15 +0200316 if (!(req->rq_state & RQ_LOCAL_PENDING))
317 continue;
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +0200318 if (req->w.device != device)
Lars Ellenbergb6dd1a82011-11-28 15:04:49 +0100319 continue;
320 _req_mod(req, ABORT_DISK_IO);
Philipp Reisnerfd2491f2011-07-18 16:25:15 +0200321 }
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +0200322 spin_unlock_irq(&connection->req_lock);
Philipp Reisnerfd2491f2011-07-18 16:25:15 +0200323}
324
Philipp Reisnerb411b362009-09-25 16:07:19 -0700325static int drbd_thread_setup(void *arg)
326{
327 struct drbd_thread *thi = (struct drbd_thread *) arg;
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +0200328 struct drbd_connection *connection = thi->connection;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700329 unsigned long flags;
330 int retval;
331
Philipp Reisnerf1b3a6e2011-02-08 15:35:58 +0100332 snprintf(current->comm, sizeof(current->comm), "drbd_%c_%s",
Andreas Gruenbacher77c556f2011-06-08 22:17:38 +0200333 thi->name[0],
334 thi->connection->resource->name);
Philipp Reisnerf1b3a6e2011-02-08 15:35:58 +0100335
Philipp Reisnerb411b362009-09-25 16:07:19 -0700336restart:
337 retval = thi->function(thi);
338
339 spin_lock_irqsave(&thi->t_lock, flags);
340
Andreas Gruenbachere77a0a52011-01-25 15:43:39 +0100341 /* if the receiver has been "EXITING", the last thing it did
Philipp Reisnerb411b362009-09-25 16:07:19 -0700342 * was set the conn state to "StandAlone",
343 * if now a re-connect request comes in, conn state goes C_UNCONNECTED,
344 * and receiver thread will be "started".
Andreas Gruenbachere77a0a52011-01-25 15:43:39 +0100345 * drbd_thread_start needs to set "RESTARTING" in that case.
Philipp Reisnerb411b362009-09-25 16:07:19 -0700346 * t_state check and assignment needs to be within the same spinlock,
Andreas Gruenbachere77a0a52011-01-25 15:43:39 +0100347 * so either thread_start sees EXITING, and can remap to RESTARTING,
348 * or thread_start see NONE, and can proceed as normal.
Philipp Reisnerb411b362009-09-25 16:07:19 -0700349 */
350
Andreas Gruenbachere77a0a52011-01-25 15:43:39 +0100351 if (thi->t_state == RESTARTING) {
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +0200352 conn_info(connection, "Restarting %s thread\n", thi->name);
Andreas Gruenbachere77a0a52011-01-25 15:43:39 +0100353 thi->t_state = RUNNING;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700354 spin_unlock_irqrestore(&thi->t_lock, flags);
355 goto restart;
356 }
357
358 thi->task = NULL;
Andreas Gruenbachere77a0a52011-01-25 15:43:39 +0100359 thi->t_state = NONE;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700360 smp_mb();
Lars Ellenberg992d6e92011-05-02 11:47:18 +0200361 complete_all(&thi->stop);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700362 spin_unlock_irqrestore(&thi->t_lock, flags);
363
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +0200364 conn_info(connection, "Terminating %s\n", current->comm);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700365
366 /* Release mod reference taken when thread was started */
Philipp Reisner9dc9fbb2011-04-22 15:23:32 +0200367
Andreas Gruenbacher05a10ec2011-06-07 22:54:17 +0200368 kref_put(&connection->kref, drbd_destroy_connection);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700369 module_put(THIS_MODULE);
370 return retval;
371}
372
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +0200373static void drbd_thread_init(struct drbd_connection *connection, struct drbd_thread *thi,
Philipp Reisnerbed879a2011-02-04 14:00:37 +0100374 int (*func) (struct drbd_thread *), char *name)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700375{
376 spin_lock_init(&thi->t_lock);
377 thi->task = NULL;
Andreas Gruenbachere77a0a52011-01-25 15:43:39 +0100378 thi->t_state = NONE;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700379 thi->function = func;
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +0200380 thi->connection = connection;
Philipp Reisnerbed879a2011-02-04 14:00:37 +0100381 strncpy(thi->name, name, ARRAY_SIZE(thi->name));
Philipp Reisnerb411b362009-09-25 16:07:19 -0700382}
383
384int drbd_thread_start(struct drbd_thread *thi)
385{
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +0200386 struct drbd_connection *connection = thi->connection;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700387 struct task_struct *nt;
388 unsigned long flags;
389
Philipp Reisnerb411b362009-09-25 16:07:19 -0700390 /* is used from state engine doing drbd_thread_stop_nowait,
391 * while holding the req lock irqsave */
392 spin_lock_irqsave(&thi->t_lock, flags);
393
394 switch (thi->t_state) {
Andreas Gruenbachere77a0a52011-01-25 15:43:39 +0100395 case NONE:
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +0200396 conn_info(connection, "Starting %s thread (from %s [%d])\n",
Philipp Reisnerbed879a2011-02-04 14:00:37 +0100397 thi->name, current->comm, current->pid);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700398
399 /* Get ref on module for thread - this is released when thread exits */
400 if (!try_module_get(THIS_MODULE)) {
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +0200401 conn_err(connection, "Failed to get module reference in drbd_thread_start\n");
Philipp Reisnerb411b362009-09-25 16:07:19 -0700402 spin_unlock_irqrestore(&thi->t_lock, flags);
Andreas Gruenbacher81e84652010-12-09 15:03:57 +0100403 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700404 }
405
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +0200406 kref_get(&thi->connection->kref);
Philipp Reisner9dc9fbb2011-04-22 15:23:32 +0200407
Philipp Reisnerb411b362009-09-25 16:07:19 -0700408 init_completion(&thi->stop);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700409 thi->reset_cpu_mask = 1;
Andreas Gruenbachere77a0a52011-01-25 15:43:39 +0100410 thi->t_state = RUNNING;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700411 spin_unlock_irqrestore(&thi->t_lock, flags);
412 flush_signals(current); /* otherw. may get -ERESTARTNOINTR */
413
414 nt = kthread_create(drbd_thread_setup, (void *) thi,
Andreas Gruenbacher77c556f2011-06-08 22:17:38 +0200415 "drbd_%c_%s", thi->name[0], thi->connection->resource->name);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700416
417 if (IS_ERR(nt)) {
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +0200418 conn_err(connection, "Couldn't start thread\n");
Philipp Reisnerb411b362009-09-25 16:07:19 -0700419
Andreas Gruenbacher05a10ec2011-06-07 22:54:17 +0200420 kref_put(&connection->kref, drbd_destroy_connection);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700421 module_put(THIS_MODULE);
Andreas Gruenbacher81e84652010-12-09 15:03:57 +0100422 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700423 }
424 spin_lock_irqsave(&thi->t_lock, flags);
425 thi->task = nt;
Andreas Gruenbachere77a0a52011-01-25 15:43:39 +0100426 thi->t_state = RUNNING;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700427 spin_unlock_irqrestore(&thi->t_lock, flags);
428 wake_up_process(nt);
429 break;
Andreas Gruenbachere77a0a52011-01-25 15:43:39 +0100430 case EXITING:
431 thi->t_state = RESTARTING;
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +0200432 conn_info(connection, "Restarting %s thread (from %s [%d])\n",
Philipp Reisnerbed879a2011-02-04 14:00:37 +0100433 thi->name, current->comm, current->pid);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700434 /* fall through */
Andreas Gruenbachere77a0a52011-01-25 15:43:39 +0100435 case RUNNING:
436 case RESTARTING:
Philipp Reisnerb411b362009-09-25 16:07:19 -0700437 default:
438 spin_unlock_irqrestore(&thi->t_lock, flags);
439 break;
440 }
441
Andreas Gruenbacher81e84652010-12-09 15:03:57 +0100442 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700443}
444
445
446void _drbd_thread_stop(struct drbd_thread *thi, int restart, int wait)
447{
448 unsigned long flags;
449
Andreas Gruenbachere77a0a52011-01-25 15:43:39 +0100450 enum drbd_thread_state ns = restart ? RESTARTING : EXITING;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700451
452 /* may be called from state engine, holding the req lock irqsave */
453 spin_lock_irqsave(&thi->t_lock, flags);
454
Andreas Gruenbachere77a0a52011-01-25 15:43:39 +0100455 if (thi->t_state == NONE) {
Philipp Reisnerb411b362009-09-25 16:07:19 -0700456 spin_unlock_irqrestore(&thi->t_lock, flags);
457 if (restart)
458 drbd_thread_start(thi);
459 return;
460 }
461
462 if (thi->t_state != ns) {
463 if (thi->task == NULL) {
464 spin_unlock_irqrestore(&thi->t_lock, flags);
465 return;
466 }
467
468 thi->t_state = ns;
469 smp_mb();
470 init_completion(&thi->stop);
471 if (thi->task != current)
472 force_sig(DRBD_SIGKILL, thi->task);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700473 }
474
475 spin_unlock_irqrestore(&thi->t_lock, flags);
476
477 if (wait)
478 wait_for_completion(&thi->stop);
479}
480
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +0200481static struct drbd_thread *drbd_task_to_thread(struct drbd_connection *connection, struct task_struct *task)
Philipp Reisnerbed879a2011-02-04 14:00:37 +0100482{
Philipp Reisnerbed879a2011-02-04 14:00:37 +0100483 struct drbd_thread *thi =
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +0200484 task == connection->receiver.task ? &connection->receiver :
485 task == connection->asender.task ? &connection->asender :
486 task == connection->worker.task ? &connection->worker : NULL;
Philipp Reisnerbed879a2011-02-04 14:00:37 +0100487
488 return thi;
489}
490
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +0200491char *drbd_task_to_thread_name(struct drbd_connection *connection, struct task_struct *task)
Philipp Reisnerbed879a2011-02-04 14:00:37 +0100492{
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +0200493 struct drbd_thread *thi = drbd_task_to_thread(connection, task);
Philipp Reisnerbed879a2011-02-04 14:00:37 +0100494 return thi ? thi->name : task->comm;
495}
496
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +0200497int conn_lowest_minor(struct drbd_connection *connection)
Philipp Reisner80822282011-02-08 12:46:30 +0100498{
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +0200499 struct drbd_device *device;
Philipp Reisner695d08f2011-04-11 22:53:32 -0700500 int vnr = 0, m;
Philipp Reisner774b3052011-02-22 02:07:03 -0500501
Philipp Reisner695d08f2011-04-11 22:53:32 -0700502 rcu_read_lock();
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +0200503 device = idr_get_next(&connection->volumes, &vnr);
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +0200504 m = device ? device_to_minor(device) : -1;
Philipp Reisner695d08f2011-04-11 22:53:32 -0700505 rcu_read_unlock();
506
507 return m;
Philipp Reisner80822282011-02-08 12:46:30 +0100508}
Philipp Reisner774b3052011-02-22 02:07:03 -0500509
Philipp Reisnerb411b362009-09-25 16:07:19 -0700510#ifdef CONFIG_SMP
511/**
512 * drbd_calc_cpu_mask() - Generate CPU masks, spread over all CPUs
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +0200513 * @device: DRBD device.
Philipp Reisnerb411b362009-09-25 16:07:19 -0700514 *
515 * Forces all threads of a device onto the same CPU. This is beneficial for
516 * DRBD's performance. May be overwritten by user's configuration.
517 */
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +0200518void drbd_calc_cpu_mask(struct drbd_connection *connection)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700519{
520 int ord, cpu;
521
522 /* user override. */
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +0200523 if (cpumask_weight(connection->cpu_mask))
Philipp Reisnerb411b362009-09-25 16:07:19 -0700524 return;
525
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +0200526 ord = conn_lowest_minor(connection) % cpumask_weight(cpu_online_mask);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700527 for_each_online_cpu(cpu) {
528 if (ord-- == 0) {
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +0200529 cpumask_set_cpu(cpu, connection->cpu_mask);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700530 return;
531 }
532 }
533 /* should not be reached */
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +0200534 cpumask_setall(connection->cpu_mask);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700535}
536
537/**
538 * drbd_thread_current_set_cpu() - modifies the cpu mask of the _current_ thread
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +0200539 * @device: DRBD device.
Philipp Reisnerbc31fe32011-02-07 11:14:38 +0100540 * @thi: drbd_thread object
Philipp Reisnerb411b362009-09-25 16:07:19 -0700541 *
542 * call in the "main loop" of _all_ threads, no need for any mutex, current won't die
543 * prematurely.
544 */
Philipp Reisner80822282011-02-08 12:46:30 +0100545void drbd_thread_current_set_cpu(struct drbd_thread *thi)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700546{
547 struct task_struct *p = current;
Philipp Reisnerbed879a2011-02-04 14:00:37 +0100548
Philipp Reisnerb411b362009-09-25 16:07:19 -0700549 if (!thi->reset_cpu_mask)
550 return;
551 thi->reset_cpu_mask = 0;
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +0200552 set_cpus_allowed_ptr(p, thi->connection->cpu_mask);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700553}
554#endif
555
Andreas Gruenbacher52b061a2011-03-30 11:38:49 +0200556/**
557 * drbd_header_size - size of a packet header
558 *
559 * The header size is a multiple of 8, so any payload following the header is
560 * word aligned on 64-bit architectures. (The bitmap send and receive code
561 * relies on this.)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700562 */
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +0200563unsigned int drbd_header_size(struct drbd_connection *connection)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700564{
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +0200565 if (connection->agreed_pro_version >= 100) {
Andreas Gruenbacher0c8e36d2011-03-30 16:00:17 +0200566 BUILD_BUG_ON(!IS_ALIGNED(sizeof(struct p_header100), 8));
567 return sizeof(struct p_header100);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700568 } else {
Andreas Gruenbacher0c8e36d2011-03-30 16:00:17 +0200569 BUILD_BUG_ON(sizeof(struct p_header80) !=
570 sizeof(struct p_header95));
571 BUILD_BUG_ON(!IS_ALIGNED(sizeof(struct p_header80), 8));
572 return sizeof(struct p_header80);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700573 }
Andreas Gruenbacher52b061a2011-03-30 11:38:49 +0200574}
Philipp Reisnerb411b362009-09-25 16:07:19 -0700575
Andreas Gruenbachere6589832011-03-30 12:54:42 +0200576static unsigned int prepare_header80(struct p_header80 *h, enum drbd_packet cmd, int size)
Philipp Reisnerfd340c12011-01-19 16:57:39 +0100577{
578 h->magic = cpu_to_be32(DRBD_MAGIC);
579 h->command = cpu_to_be16(cmd);
580 h->length = cpu_to_be16(size);
Andreas Gruenbachere6589832011-03-30 12:54:42 +0200581 return sizeof(struct p_header80);
Philipp Reisnerfd340c12011-01-19 16:57:39 +0100582}
Philipp Reisnerb411b362009-09-25 16:07:19 -0700583
Andreas Gruenbachere6589832011-03-30 12:54:42 +0200584static unsigned int prepare_header95(struct p_header95 *h, enum drbd_packet cmd, int size)
Philipp Reisnerfd340c12011-01-19 16:57:39 +0100585{
586 h->magic = cpu_to_be16(DRBD_MAGIC_BIG);
587 h->command = cpu_to_be16(cmd);
Andreas Gruenbacherb55d84b2011-03-22 13:17:47 +0100588 h->length = cpu_to_be32(size);
Andreas Gruenbachere6589832011-03-30 12:54:42 +0200589 return sizeof(struct p_header95);
Philipp Reisnerfd340c12011-01-19 16:57:39 +0100590}
591
Andreas Gruenbacher0c8e36d2011-03-30 16:00:17 +0200592static unsigned int prepare_header100(struct p_header100 *h, enum drbd_packet cmd,
593 int size, int vnr)
Philipp Reisnerd38e7872011-02-07 15:32:04 +0100594{
Andreas Gruenbacher0c8e36d2011-03-30 16:00:17 +0200595 h->magic = cpu_to_be32(DRBD_MAGIC_100);
596 h->volume = cpu_to_be16(vnr);
597 h->command = cpu_to_be16(cmd);
598 h->length = cpu_to_be32(size);
599 h->pad = 0;
600 return sizeof(struct p_header100);
601}
602
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +0200603static unsigned int prepare_header(struct drbd_connection *connection, int vnr,
Andreas Gruenbacher0c8e36d2011-03-30 16:00:17 +0200604 void *buffer, enum drbd_packet cmd, int size)
605{
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +0200606 if (connection->agreed_pro_version >= 100)
Andreas Gruenbacher0c8e36d2011-03-30 16:00:17 +0200607 return prepare_header100(buffer, cmd, size, vnr);
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +0200608 else if (connection->agreed_pro_version >= 95 &&
Andreas Gruenbacher0c8e36d2011-03-30 16:00:17 +0200609 size > DRBD_MAX_SIZE_H80_PACKET)
Andreas Gruenbachere6589832011-03-30 12:54:42 +0200610 return prepare_header95(buffer, cmd, size);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700611 else
Andreas Gruenbachere6589832011-03-30 12:54:42 +0200612 return prepare_header80(buffer, cmd, size);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700613}
614
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +0200615static void *__conn_prepare_command(struct drbd_connection *connection,
Andreas Gruenbachera7eb7bd2011-04-29 13:19:58 +0200616 struct drbd_socket *sock)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700617{
Andreas Gruenbachera7eb7bd2011-04-29 13:19:58 +0200618 if (!sock->socket)
619 return NULL;
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +0200620 return sock->sbuf + drbd_header_size(connection);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700621}
622
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +0200623void *conn_prepare_command(struct drbd_connection *connection, struct drbd_socket *sock)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700624{
Andreas Gruenbachera7eb7bd2011-04-29 13:19:58 +0200625 void *p;
626
Andreas Gruenbacherdba58582011-03-29 16:55:40 +0200627 mutex_lock(&sock->mutex);
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +0200628 p = __conn_prepare_command(connection, sock);
Andreas Gruenbachera7eb7bd2011-04-29 13:19:58 +0200629 if (!p)
Andreas Gruenbacherdba58582011-03-29 16:55:40 +0200630 mutex_unlock(&sock->mutex);
Andreas Gruenbachera7eb7bd2011-04-29 13:19:58 +0200631
632 return p;
Andreas Gruenbacherdba58582011-03-29 16:55:40 +0200633}
634
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +0200635void *drbd_prepare_command(struct drbd_device *device, struct drbd_socket *sock)
Andreas Gruenbacherdba58582011-03-29 16:55:40 +0200636{
Andreas Gruenbachera6b32bc2011-05-31 14:33:49 +0200637 return conn_prepare_command(first_peer_device(device)->connection, sock);
Andreas Gruenbacherdba58582011-03-29 16:55:40 +0200638}
639
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +0200640static int __send_command(struct drbd_connection *connection, int vnr,
Andreas Gruenbacherdba58582011-03-29 16:55:40 +0200641 struct drbd_socket *sock, enum drbd_packet cmd,
642 unsigned int header_size, void *data,
643 unsigned int size)
644{
645 int msg_flags;
646 int err;
647
648 /*
649 * Called with @data == NULL and the size of the data blocks in @size
650 * for commands that send data blocks. For those commands, omit the
651 * MSG_MORE flag: this will increase the likelihood that data blocks
652 * which are page aligned on the sender will end up page aligned on the
653 * receiver.
654 */
655 msg_flags = data ? MSG_MORE : 0;
656
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +0200657 header_size += prepare_header(connection, vnr, sock->sbuf, cmd,
Andreas Gruenbachere6589832011-03-30 12:54:42 +0200658 header_size + size);
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +0200659 err = drbd_send_all(connection, sock->socket, sock->sbuf, header_size,
Andreas Gruenbacherdba58582011-03-29 16:55:40 +0200660 msg_flags);
661 if (data && !err)
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +0200662 err = drbd_send_all(connection, sock->socket, data, size, 0);
Andreas Gruenbacherdba58582011-03-29 16:55:40 +0200663 return err;
664}
665
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +0200666static int __conn_send_command(struct drbd_connection *connection, struct drbd_socket *sock,
Andreas Gruenbachera7eb7bd2011-04-29 13:19:58 +0200667 enum drbd_packet cmd, unsigned int header_size,
668 void *data, unsigned int size)
669{
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +0200670 return __send_command(connection, 0, sock, cmd, header_size, data, size);
Andreas Gruenbachera7eb7bd2011-04-29 13:19:58 +0200671}
672
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +0200673int conn_send_command(struct drbd_connection *connection, struct drbd_socket *sock,
Andreas Gruenbacherdba58582011-03-29 16:55:40 +0200674 enum drbd_packet cmd, unsigned int header_size,
675 void *data, unsigned int size)
676{
677 int err;
678
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +0200679 err = __conn_send_command(connection, sock, cmd, header_size, data, size);
Andreas Gruenbacherdba58582011-03-29 16:55:40 +0200680 mutex_unlock(&sock->mutex);
681 return err;
682}
683
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +0200684int drbd_send_command(struct drbd_device *device, struct drbd_socket *sock,
Andreas Gruenbacherdba58582011-03-29 16:55:40 +0200685 enum drbd_packet cmd, unsigned int header_size,
686 void *data, unsigned int size)
687{
688 int err;
689
Andreas Gruenbachera6b32bc2011-05-31 14:33:49 +0200690 err = __send_command(first_peer_device(device)->connection, device->vnr, sock, cmd, header_size,
Andreas Gruenbacherdba58582011-03-29 16:55:40 +0200691 data, size);
692 mutex_unlock(&sock->mutex);
693 return err;
694}
695
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +0200696int drbd_send_ping(struct drbd_connection *connection)
Andreas Gruenbachere307f352011-03-22 10:55:48 +0100697{
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +0200698 struct drbd_socket *sock;
699
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +0200700 sock = &connection->meta;
701 if (!conn_prepare_command(connection, sock))
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +0200702 return -EIO;
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +0200703 return conn_send_command(connection, sock, P_PING, 0, NULL, 0);
Andreas Gruenbachere307f352011-03-22 10:55:48 +0100704}
705
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +0200706int drbd_send_ping_ack(struct drbd_connection *connection)
Andreas Gruenbachere307f352011-03-22 10:55:48 +0100707{
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +0200708 struct drbd_socket *sock;
709
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +0200710 sock = &connection->meta;
711 if (!conn_prepare_command(connection, sock))
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +0200712 return -EIO;
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +0200713 return conn_send_command(connection, sock, P_PING_ACK, 0, NULL, 0);
Andreas Gruenbachere307f352011-03-22 10:55:48 +0100714}
715
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +0200716int drbd_send_sync_param(struct drbd_device *device)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700717{
Andreas Gruenbacher7c967152011-03-22 00:49:36 +0100718 struct drbd_socket *sock;
Philipp Reisner8e26f9c2010-07-06 17:25:54 +0200719 struct p_rs_param_95 *p;
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +0200720 int size;
Andreas Gruenbachera6b32bc2011-05-31 14:33:49 +0200721 const int apv = first_peer_device(device)->connection->agreed_pro_version;
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +0200722 enum drbd_packet cmd;
Philipp Reisner44ed1672011-04-19 17:10:19 +0200723 struct net_conf *nc;
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +0200724 struct disk_conf *dc;
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +0200725
Andreas Gruenbachera6b32bc2011-05-31 14:33:49 +0200726 sock = &first_peer_device(device)->connection->data;
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +0200727 p = drbd_prepare_command(device, sock);
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +0200728 if (!p)
729 return -EIO;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700730
Philipp Reisner44ed1672011-04-19 17:10:19 +0200731 rcu_read_lock();
Andreas Gruenbachera6b32bc2011-05-31 14:33:49 +0200732 nc = rcu_dereference(first_peer_device(device)->connection->net_conf);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700733
734 size = apv <= 87 ? sizeof(struct p_rs_param)
735 : apv == 88 ? sizeof(struct p_rs_param)
Philipp Reisner44ed1672011-04-19 17:10:19 +0200736 + strlen(nc->verify_alg) + 1
Philipp Reisner8e26f9c2010-07-06 17:25:54 +0200737 : apv <= 94 ? sizeof(struct p_rs_param_89)
738 : /* apv >= 95 */ sizeof(struct p_rs_param_95);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700739
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +0200740 cmd = apv >= 89 ? P_SYNC_PARAM89 : P_SYNC_PARAM;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700741
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +0200742 /* initialize verify_alg and csums_alg */
743 memset(p->verify_alg, 0, 2 * SHARED_SECRET_MAX);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700744
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +0200745 if (get_ldev(device)) {
746 dc = rcu_dereference(device->ldev->disk_conf);
Andreas Gruenbacher6394b932011-05-11 14:29:52 +0200747 p->resync_rate = cpu_to_be32(dc->resync_rate);
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +0200748 p->c_plan_ahead = cpu_to_be32(dc->c_plan_ahead);
749 p->c_delay_target = cpu_to_be32(dc->c_delay_target);
750 p->c_fill_target = cpu_to_be32(dc->c_fill_target);
751 p->c_max_rate = cpu_to_be32(dc->c_max_rate);
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +0200752 put_ldev(device);
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +0200753 } else {
Andreas Gruenbacher6394b932011-05-11 14:29:52 +0200754 p->resync_rate = cpu_to_be32(DRBD_RESYNC_RATE_DEF);
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +0200755 p->c_plan_ahead = cpu_to_be32(DRBD_C_PLAN_AHEAD_DEF);
756 p->c_delay_target = cpu_to_be32(DRBD_C_DELAY_TARGET_DEF);
757 p->c_fill_target = cpu_to_be32(DRBD_C_FILL_TARGET_DEF);
758 p->c_max_rate = cpu_to_be32(DRBD_C_MAX_RATE_DEF);
759 }
Philipp Reisnerb411b362009-09-25 16:07:19 -0700760
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +0200761 if (apv >= 88)
Philipp Reisner44ed1672011-04-19 17:10:19 +0200762 strcpy(p->verify_alg, nc->verify_alg);
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +0200763 if (apv >= 89)
Philipp Reisner44ed1672011-04-19 17:10:19 +0200764 strcpy(p->csums_alg, nc->csums_alg);
765 rcu_read_unlock();
Philipp Reisnerb411b362009-09-25 16:07:19 -0700766
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +0200767 return drbd_send_command(device, sock, cmd, size, NULL, 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700768}
769
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +0200770int __drbd_send_protocol(struct drbd_connection *connection, enum drbd_packet cmd)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700771{
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +0200772 struct drbd_socket *sock;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700773 struct p_protocol *p;
Philipp Reisner44ed1672011-04-19 17:10:19 +0200774 struct net_conf *nc;
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +0200775 int size, cf;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700776
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +0200777 sock = &connection->data;
778 p = __conn_prepare_command(connection, sock);
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +0200779 if (!p)
780 return -EIO;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700781
Philipp Reisner44ed1672011-04-19 17:10:19 +0200782 rcu_read_lock();
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +0200783 nc = rcu_dereference(connection->net_conf);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700784
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +0200785 if (nc->tentative && connection->agreed_pro_version < 92) {
Philipp Reisner44ed1672011-04-19 17:10:19 +0200786 rcu_read_unlock();
787 mutex_unlock(&sock->mutex);
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +0200788 conn_err(connection, "--dry-run is not supported by peer");
Philipp Reisner44ed1672011-04-19 17:10:19 +0200789 return -EOPNOTSUPP;
Philipp Reisnercf14c2e2010-02-02 21:03:50 +0100790 }
Philipp Reisner44ed1672011-04-19 17:10:19 +0200791
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +0200792 size = sizeof(*p);
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +0200793 if (connection->agreed_pro_version >= 87)
Philipp Reisner44ed1672011-04-19 17:10:19 +0200794 size += strlen(nc->integrity_alg) + 1;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700795
Philipp Reisner44ed1672011-04-19 17:10:19 +0200796 p->protocol = cpu_to_be32(nc->wire_protocol);
797 p->after_sb_0p = cpu_to_be32(nc->after_sb_0p);
798 p->after_sb_1p = cpu_to_be32(nc->after_sb_1p);
799 p->after_sb_2p = cpu_to_be32(nc->after_sb_2p);
800 p->two_primaries = cpu_to_be32(nc->two_primaries);
Philipp Reisnercf14c2e2010-02-02 21:03:50 +0100801 cf = 0;
Andreas Gruenbacher6139f602011-05-06 20:00:02 +0200802 if (nc->discard_my_data)
803 cf |= CF_DISCARD_MY_DATA;
Andreas Gruenbacher6dff2902011-06-28 14:18:12 +0200804 if (nc->tentative)
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +0200805 cf |= CF_DRY_RUN;
Philipp Reisnercf14c2e2010-02-02 21:03:50 +0100806 p->conn_flags = cpu_to_be32(cf);
807
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +0200808 if (connection->agreed_pro_version >= 87)
Philipp Reisner44ed1672011-04-19 17:10:19 +0200809 strcpy(p->integrity_alg, nc->integrity_alg);
810 rcu_read_unlock();
Philipp Reisnerb411b362009-09-25 16:07:19 -0700811
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +0200812 return __conn_send_command(connection, sock, cmd, size, NULL, 0);
Andreas Gruenbachera7eb7bd2011-04-29 13:19:58 +0200813}
814
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +0200815int drbd_send_protocol(struct drbd_connection *connection)
Andreas Gruenbachera7eb7bd2011-04-29 13:19:58 +0200816{
817 int err;
818
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +0200819 mutex_lock(&connection->data.mutex);
820 err = __drbd_send_protocol(connection, P_PROTOCOL);
821 mutex_unlock(&connection->data.mutex);
Andreas Gruenbachera7eb7bd2011-04-29 13:19:58 +0200822
823 return err;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700824}
825
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +0200826static int _drbd_send_uuids(struct drbd_device *device, u64 uuid_flags)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700827{
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +0200828 struct drbd_socket *sock;
829 struct p_uuids *p;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700830 int i;
831
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +0200832 if (!get_ldev_if_state(device, D_NEGOTIATING))
Andreas Gruenbacher2ae5f952011-03-16 01:07:20 +0100833 return 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700834
Andreas Gruenbachera6b32bc2011-05-31 14:33:49 +0200835 sock = &first_peer_device(device)->connection->data;
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +0200836 p = drbd_prepare_command(device, sock);
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +0200837 if (!p) {
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +0200838 put_ldev(device);
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +0200839 return -EIO;
840 }
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +0200841 spin_lock_irq(&device->ldev->md.uuid_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700842 for (i = UI_CURRENT; i < UI_SIZE; i++)
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +0200843 p->uuid[i] = cpu_to_be64(device->ldev->md.uuid[i]);
844 spin_unlock_irq(&device->ldev->md.uuid_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700845
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +0200846 device->comm_bm_set = drbd_bm_total_weight(device);
847 p->uuid[UI_SIZE] = cpu_to_be64(device->comm_bm_set);
Philipp Reisner44ed1672011-04-19 17:10:19 +0200848 rcu_read_lock();
Andreas Gruenbachera6b32bc2011-05-31 14:33:49 +0200849 uuid_flags |= rcu_dereference(first_peer_device(device)->connection->net_conf)->discard_my_data ? 1 : 0;
Philipp Reisner44ed1672011-04-19 17:10:19 +0200850 rcu_read_unlock();
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +0200851 uuid_flags |= test_bit(CRASHED_PRIMARY, &device->flags) ? 2 : 0;
852 uuid_flags |= device->new_state_tmp.disk == D_INCONSISTENT ? 4 : 0;
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +0200853 p->uuid[UI_FLAGS] = cpu_to_be64(uuid_flags);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700854
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +0200855 put_ldev(device);
856 return drbd_send_command(device, sock, P_UUIDS, sizeof(*p), NULL, 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700857}
858
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +0200859int drbd_send_uuids(struct drbd_device *device)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700860{
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +0200861 return _drbd_send_uuids(device, 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700862}
863
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +0200864int drbd_send_uuids_skip_initial_sync(struct drbd_device *device)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700865{
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +0200866 return _drbd_send_uuids(device, 8);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700867}
868
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +0200869void drbd_print_uuids(struct drbd_device *device, const char *text)
Lars Ellenberg62b0da32011-01-20 13:25:21 +0100870{
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +0200871 if (get_ldev_if_state(device, D_NEGOTIATING)) {
872 u64 *uuid = device->ldev->md.uuid;
Lars Ellenberg62b0da32011-01-20 13:25:21 +0100873 dev_info(DEV, "%s %016llX:%016llX:%016llX:%016llX\n",
874 text,
875 (unsigned long long)uuid[UI_CURRENT],
876 (unsigned long long)uuid[UI_BITMAP],
877 (unsigned long long)uuid[UI_HISTORY_START],
878 (unsigned long long)uuid[UI_HISTORY_END]);
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +0200879 put_ldev(device);
Lars Ellenberg62b0da32011-01-20 13:25:21 +0100880 } else {
881 dev_info(DEV, "%s effective data uuid: %016llX\n",
882 text,
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +0200883 (unsigned long long)device->ed_uuid);
Lars Ellenberg62b0da32011-01-20 13:25:21 +0100884 }
885}
886
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +0200887void drbd_gen_and_send_sync_uuid(struct drbd_device *device)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700888{
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +0200889 struct drbd_socket *sock;
890 struct p_rs_uuid *p;
Lars Ellenberg5a22db82010-12-17 21:14:23 +0100891 u64 uuid;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700892
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +0200893 D_ASSERT(device->state.disk == D_UP_TO_DATE);
Lars Ellenberg5a22db82010-12-17 21:14:23 +0100894
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +0200895 uuid = device->ldev->md.uuid[UI_BITMAP];
Philipp Reisner5ba3dac2011-10-05 15:54:18 +0200896 if (uuid && uuid != UUID_JUST_CREATED)
897 uuid = uuid + UUID_NEW_BM_OFFSET;
898 else
899 get_random_bytes(&uuid, sizeof(u64));
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +0200900 drbd_uuid_set(device, UI_BITMAP, uuid);
901 drbd_print_uuids(device, "updated sync UUID");
902 drbd_md_sync(device);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700903
Andreas Gruenbachera6b32bc2011-05-31 14:33:49 +0200904 sock = &first_peer_device(device)->connection->data;
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +0200905 p = drbd_prepare_command(device, sock);
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +0200906 if (p) {
907 p->uuid = cpu_to_be64(uuid);
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +0200908 drbd_send_command(device, sock, P_SYNC_UUID, sizeof(*p), NULL, 0);
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +0200909 }
Philipp Reisnerb411b362009-09-25 16:07:19 -0700910}
911
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +0200912int drbd_send_sizes(struct drbd_device *device, int trigger_reply, enum dds_flags flags)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700913{
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +0200914 struct drbd_socket *sock;
915 struct p_sizes *p;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700916 sector_t d_size, u_size;
Lars Ellenbergdb141b22012-06-25 19:15:58 +0200917 int q_order_type;
918 unsigned int max_bio_size;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700919
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +0200920 if (get_ldev_if_state(device, D_NEGOTIATING)) {
921 D_ASSERT(device->ldev->backing_bdev);
922 d_size = drbd_get_max_capacity(device->ldev);
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +0200923 rcu_read_lock();
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +0200924 u_size = rcu_dereference(device->ldev->disk_conf)->disk_size;
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +0200925 rcu_read_unlock();
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +0200926 q_order_type = drbd_queue_order_type(device);
927 max_bio_size = queue_max_hw_sectors(device->ldev->backing_bdev->bd_disk->queue) << 9;
Lars Ellenbergdb141b22012-06-25 19:15:58 +0200928 max_bio_size = min(max_bio_size, DRBD_MAX_BIO_SIZE);
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +0200929 put_ldev(device);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700930 } else {
931 d_size = 0;
932 u_size = 0;
933 q_order_type = QUEUE_ORDERED_NONE;
Philipp Reisner99432fc2011-05-20 16:39:13 +0200934 max_bio_size = DRBD_MAX_BIO_SIZE; /* ... multiple BIOs per peer_request */
Philipp Reisnerb411b362009-09-25 16:07:19 -0700935 }
936
Andreas Gruenbachera6b32bc2011-05-31 14:33:49 +0200937 sock = &first_peer_device(device)->connection->data;
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +0200938 p = drbd_prepare_command(device, sock);
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +0200939 if (!p)
940 return -EIO;
Philipp Reisner2ffca4f2011-06-30 15:43:06 +0200941
Andreas Gruenbachera6b32bc2011-05-31 14:33:49 +0200942 if (first_peer_device(device)->connection->agreed_pro_version <= 94)
Lars Ellenbergdb141b22012-06-25 19:15:58 +0200943 max_bio_size = min(max_bio_size, DRBD_MAX_SIZE_H80_PACKET);
Andreas Gruenbachera6b32bc2011-05-31 14:33:49 +0200944 else if (first_peer_device(device)->connection->agreed_pro_version < 100)
Philipp Reisner98683652012-11-09 14:18:43 +0100945 max_bio_size = min(max_bio_size, DRBD_MAX_BIO_SIZE_P95);
Philipp Reisner68093842011-06-30 15:43:06 +0200946
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +0200947 p->d_size = cpu_to_be64(d_size);
948 p->u_size = cpu_to_be64(u_size);
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +0200949 p->c_size = cpu_to_be64(trigger_reply ? 0 : drbd_get_capacity(device->this_bdev));
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +0200950 p->max_bio_size = cpu_to_be32(max_bio_size);
951 p->queue_order_type = cpu_to_be16(q_order_type);
952 p->dds_flags = cpu_to_be16(flags);
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +0200953 return drbd_send_command(device, sock, P_SIZES, sizeof(*p), NULL, 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700954}
955
956/**
Lars Ellenbergf479ea02011-10-27 16:52:30 +0200957 * drbd_send_current_state() - Sends the drbd state to the peer
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +0200958 * @device: DRBD device.
Philipp Reisnerb411b362009-09-25 16:07:19 -0700959 */
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +0200960int drbd_send_current_state(struct drbd_device *device)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700961{
Andreas Gruenbacher7c967152011-03-22 00:49:36 +0100962 struct drbd_socket *sock;
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +0200963 struct p_state *p;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700964
Andreas Gruenbachera6b32bc2011-05-31 14:33:49 +0200965 sock = &first_peer_device(device)->connection->data;
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +0200966 p = drbd_prepare_command(device, sock);
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +0200967 if (!p)
968 return -EIO;
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +0200969 p->state = cpu_to_be32(device->state.i); /* Within the send mutex */
970 return drbd_send_command(device, sock, P_STATE, sizeof(*p), NULL, 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700971}
972
Lars Ellenbergf479ea02011-10-27 16:52:30 +0200973/**
974 * drbd_send_state() - After a state change, sends the new state to the peer
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +0200975 * @device: DRBD device.
Philipp Reisner43de7c82011-11-10 13:16:13 +0100976 * @state: the state to send, not necessarily the current state.
Lars Ellenbergf479ea02011-10-27 16:52:30 +0200977 *
978 * Each state change queues an "after_state_ch" work, which will eventually
979 * send the resulting new state to the peer. If more state changes happen
980 * between queuing and processing of the after_state_ch work, we still
981 * want to send each intermediary state in the order it occurred.
982 */
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +0200983int drbd_send_state(struct drbd_device *device, union drbd_state state)
Lars Ellenbergf479ea02011-10-27 16:52:30 +0200984{
Philipp Reisner43de7c82011-11-10 13:16:13 +0100985 struct drbd_socket *sock;
986 struct p_state *p;
Lars Ellenbergf479ea02011-10-27 16:52:30 +0200987
Andreas Gruenbachera6b32bc2011-05-31 14:33:49 +0200988 sock = &first_peer_device(device)->connection->data;
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +0200989 p = drbd_prepare_command(device, sock);
Philipp Reisner43de7c82011-11-10 13:16:13 +0100990 if (!p)
991 return -EIO;
992 p->state = cpu_to_be32(state.i); /* Within the send mutex */
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +0200993 return drbd_send_command(device, sock, P_STATE, sizeof(*p), NULL, 0);
Philipp Reisner43de7c82011-11-10 13:16:13 +0100994}
Lars Ellenbergf479ea02011-10-27 16:52:30 +0200995
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +0200996int drbd_send_state_req(struct drbd_device *device, union drbd_state mask, union drbd_state val)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700997{
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +0200998 struct drbd_socket *sock;
999 struct p_req_state *p;
Lars Ellenbergf479ea02011-10-27 16:52:30 +02001000
Andreas Gruenbachera6b32bc2011-05-31 14:33:49 +02001001 sock = &first_peer_device(device)->connection->data;
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001002 p = drbd_prepare_command(device, sock);
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001003 if (!p)
1004 return -EIO;
1005 p->mask = cpu_to_be32(mask.i);
1006 p->val = cpu_to_be32(val.i);
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001007 return drbd_send_command(device, sock, P_STATE_CHG_REQ, sizeof(*p), NULL, 0);
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001008}
1009
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +02001010int conn_send_state_req(struct drbd_connection *connection, union drbd_state mask, union drbd_state val)
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001011{
1012 enum drbd_packet cmd;
1013 struct drbd_socket *sock;
1014 struct p_req_state *p;
1015
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +02001016 cmd = connection->agreed_pro_version < 100 ? P_STATE_CHG_REQ : P_CONN_ST_CHG_REQ;
1017 sock = &connection->data;
1018 p = conn_prepare_command(connection, sock);
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001019 if (!p)
1020 return -EIO;
1021 p->mask = cpu_to_be32(mask.i);
1022 p->val = cpu_to_be32(val.i);
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +02001023 return conn_send_command(connection, sock, cmd, sizeof(*p), NULL, 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001024}
1025
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001026void drbd_send_sr_reply(struct drbd_device *device, enum drbd_state_rv retcode)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001027{
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001028 struct drbd_socket *sock;
1029 struct p_req_state_reply *p;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001030
Andreas Gruenbachera6b32bc2011-05-31 14:33:49 +02001031 sock = &first_peer_device(device)->connection->meta;
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001032 p = drbd_prepare_command(device, sock);
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001033 if (p) {
1034 p->retcode = cpu_to_be32(retcode);
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001035 drbd_send_command(device, sock, P_STATE_CHG_REPLY, sizeof(*p), NULL, 0);
Lars Ellenbergf479ea02011-10-27 16:52:30 +02001036 }
Lars Ellenbergf479ea02011-10-27 16:52:30 +02001037}
1038
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +02001039void conn_send_sr_reply(struct drbd_connection *connection, enum drbd_state_rv retcode)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001040{
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001041 struct drbd_socket *sock;
1042 struct p_req_state_reply *p;
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +02001043 enum drbd_packet cmd = connection->agreed_pro_version < 100 ? P_STATE_CHG_REPLY : P_CONN_ST_CHG_REPLY;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001044
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +02001045 sock = &connection->meta;
1046 p = conn_prepare_command(connection, sock);
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001047 if (p) {
1048 p->retcode = cpu_to_be32(retcode);
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +02001049 conn_send_command(connection, sock, cmd, sizeof(*p), NULL, 0);
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001050 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07001051}
1052
Andreas Gruenbachera02d1242011-03-22 17:20:45 +01001053static void dcbp_set_code(struct p_compressed_bm *p, enum drbd_bitmap_code code)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001054{
Andreas Gruenbachera02d1242011-03-22 17:20:45 +01001055 BUG_ON(code & ~0xf);
1056 p->encoding = (p->encoding & ~0xf) | code;
1057}
Philipp Reisnerb411b362009-09-25 16:07:19 -07001058
Andreas Gruenbachera02d1242011-03-22 17:20:45 +01001059static void dcbp_set_start(struct p_compressed_bm *p, int set)
1060{
1061 p->encoding = (p->encoding & ~0x80) | (set ? 0x80 : 0);
1062}
Philipp Reisnerb411b362009-09-25 16:07:19 -07001063
Andreas Gruenbachera02d1242011-03-22 17:20:45 +01001064static void dcbp_set_pad_bits(struct p_compressed_bm *p, int n)
1065{
1066 BUG_ON(n & ~0x7);
1067 p->encoding = (p->encoding & (~0x7 << 4)) | (n << 4);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001068}
1069
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001070static int fill_bitmap_rle_bits(struct drbd_device *device,
Andreas Gruenbacher50d0b1a2011-03-30 11:53:51 +02001071 struct p_compressed_bm *p,
1072 unsigned int size,
1073 struct bm_xfer_ctx *c)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001074{
1075 struct bitstream bs;
1076 unsigned long plain_bits;
1077 unsigned long tmp;
1078 unsigned long rl;
1079 unsigned len;
1080 unsigned toggle;
Philipp Reisner44ed1672011-04-19 17:10:19 +02001081 int bits, use_rle;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001082
1083 /* may we use this feature? */
Philipp Reisner44ed1672011-04-19 17:10:19 +02001084 rcu_read_lock();
Andreas Gruenbachera6b32bc2011-05-31 14:33:49 +02001085 use_rle = rcu_dereference(first_peer_device(device)->connection->net_conf)->use_rle;
Philipp Reisner44ed1672011-04-19 17:10:19 +02001086 rcu_read_unlock();
Andreas Gruenbachera6b32bc2011-05-31 14:33:49 +02001087 if (!use_rle || first_peer_device(device)->connection->agreed_pro_version < 90)
Philipp Reisner44ed1672011-04-19 17:10:19 +02001088 return 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001089
1090 if (c->bit_offset >= c->bm_bits)
1091 return 0; /* nothing to do. */
1092
1093 /* use at most thus many bytes */
Andreas Gruenbacher50d0b1a2011-03-30 11:53:51 +02001094 bitstream_init(&bs, p->code, size, 0);
1095 memset(p->code, 0, size);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001096 /* plain bits covered in this code string */
1097 plain_bits = 0;
1098
1099 /* p->encoding & 0x80 stores whether the first run length is set.
1100 * bit offset is implicit.
1101 * start with toggle == 2 to be able to tell the first iteration */
1102 toggle = 2;
1103
1104 /* see how much plain bits we can stuff into one packet
1105 * using RLE and VLI. */
1106 do {
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001107 tmp = (toggle == 0) ? _drbd_bm_find_next_zero(device, c->bit_offset)
1108 : _drbd_bm_find_next(device, c->bit_offset);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001109 if (tmp == -1UL)
1110 tmp = c->bm_bits;
1111 rl = tmp - c->bit_offset;
1112
1113 if (toggle == 2) { /* first iteration */
1114 if (rl == 0) {
1115 /* the first checked bit was set,
1116 * store start value, */
Andreas Gruenbachera02d1242011-03-22 17:20:45 +01001117 dcbp_set_start(p, 1);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001118 /* but skip encoding of zero run length */
1119 toggle = !toggle;
1120 continue;
1121 }
Andreas Gruenbachera02d1242011-03-22 17:20:45 +01001122 dcbp_set_start(p, 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001123 }
1124
1125 /* paranoia: catch zero runlength.
1126 * can only happen if bitmap is modified while we scan it. */
1127 if (rl == 0) {
1128 dev_err(DEV, "unexpected zero runlength while encoding bitmap "
1129 "t:%u bo:%lu\n", toggle, c->bit_offset);
1130 return -1;
1131 }
1132
1133 bits = vli_encode_bits(&bs, rl);
1134 if (bits == -ENOBUFS) /* buffer full */
1135 break;
1136 if (bits <= 0) {
1137 dev_err(DEV, "error while encoding bitmap: %d\n", bits);
1138 return 0;
1139 }
1140
1141 toggle = !toggle;
1142 plain_bits += rl;
1143 c->bit_offset = tmp;
1144 } while (c->bit_offset < c->bm_bits);
1145
1146 len = bs.cur.b - p->code + !!bs.cur.bit;
1147
1148 if (plain_bits < (len << 3)) {
1149 /* incompressible with this method.
1150 * we need to rewind both word and bit position. */
1151 c->bit_offset -= plain_bits;
1152 bm_xfer_ctx_bit_to_word_offset(c);
1153 c->bit_offset = c->word_offset * BITS_PER_LONG;
1154 return 0;
1155 }
1156
1157 /* RLE + VLI was able to compress it just fine.
1158 * update c->word_offset. */
1159 bm_xfer_ctx_bit_to_word_offset(c);
1160
1161 /* store pad_bits */
Andreas Gruenbachera02d1242011-03-22 17:20:45 +01001162 dcbp_set_pad_bits(p, (8 - bs.cur.bit) & 0x7);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001163
1164 return len;
1165}
1166
Andreas Gruenbacherf70af112010-12-11 18:51:50 +01001167/**
1168 * send_bitmap_rle_or_plain
1169 *
1170 * Return 0 when done, 1 when another iteration is needed, and a negative error
1171 * code upon failure.
1172 */
1173static int
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001174send_bitmap_rle_or_plain(struct drbd_device *device, struct bm_xfer_ctx *c)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001175{
Andreas Gruenbachera6b32bc2011-05-31 14:33:49 +02001176 struct drbd_socket *sock = &first_peer_device(device)->connection->data;
1177 unsigned int header_size = drbd_header_size(first_peer_device(device)->connection);
Andreas Gruenbachere6589832011-03-30 12:54:42 +02001178 struct p_compressed_bm *p = sock->sbuf + header_size;
Andreas Gruenbachera982dd52010-12-10 00:45:25 +01001179 int len, err;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001180
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001181 len = fill_bitmap_rle_bits(device, p,
Andreas Gruenbachere6589832011-03-30 12:54:42 +02001182 DRBD_SOCKET_BUFFER_SIZE - header_size - sizeof(*p), c);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001183 if (len < 0)
Andreas Gruenbacherf70af112010-12-11 18:51:50 +01001184 return -EIO;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001185
1186 if (len) {
Andreas Gruenbachera02d1242011-03-22 17:20:45 +01001187 dcbp_set_code(p, RLE_VLI_Bits);
Andreas Gruenbachera6b32bc2011-05-31 14:33:49 +02001188 err = __send_command(first_peer_device(device)->connection, device->vnr, sock,
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001189 P_COMPRESSED_BITMAP, sizeof(*p) + len,
1190 NULL, 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001191 c->packets[0]++;
Andreas Gruenbachere6589832011-03-30 12:54:42 +02001192 c->bytes[0] += header_size + sizeof(*p) + len;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001193
1194 if (c->bit_offset >= c->bm_bits)
1195 len = 0; /* DONE */
1196 } else {
1197 /* was not compressible.
1198 * send a buffer full of plain text bits instead. */
Andreas Gruenbacher50d0b1a2011-03-30 11:53:51 +02001199 unsigned int data_size;
1200 unsigned long num_words;
Andreas Gruenbachere6589832011-03-30 12:54:42 +02001201 unsigned long *p = sock->sbuf + header_size;
Andreas Gruenbacher50d0b1a2011-03-30 11:53:51 +02001202
1203 data_size = DRBD_SOCKET_BUFFER_SIZE - header_size;
Andreas Gruenbachere6589832011-03-30 12:54:42 +02001204 num_words = min_t(size_t, data_size / sizeof(*p),
Andreas Gruenbacher50d0b1a2011-03-30 11:53:51 +02001205 c->bm_words - c->word_offset);
Andreas Gruenbachere6589832011-03-30 12:54:42 +02001206 len = num_words * sizeof(*p);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001207 if (len)
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001208 drbd_bm_get_lel(device, c->word_offset, num_words, p);
Andreas Gruenbachera6b32bc2011-05-31 14:33:49 +02001209 err = __send_command(first_peer_device(device)->connection, device->vnr, sock, P_BITMAP, len, NULL, 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001210 c->word_offset += num_words;
1211 c->bit_offset = c->word_offset * BITS_PER_LONG;
1212
1213 c->packets[1]++;
Andreas Gruenbacher50d0b1a2011-03-30 11:53:51 +02001214 c->bytes[1] += header_size + len;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001215
1216 if (c->bit_offset > c->bm_bits)
1217 c->bit_offset = c->bm_bits;
1218 }
Andreas Gruenbachera982dd52010-12-10 00:45:25 +01001219 if (!err) {
Andreas Gruenbacherf70af112010-12-11 18:51:50 +01001220 if (len == 0) {
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001221 INFO_bm_xfer_stats(device, "send", c);
Andreas Gruenbacherf70af112010-12-11 18:51:50 +01001222 return 0;
1223 } else
1224 return 1;
1225 }
1226 return -EIO;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001227}
1228
1229/* See the comment at receive_bitmap() */
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001230static int _drbd_send_bitmap(struct drbd_device *device)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001231{
1232 struct bm_xfer_ctx c;
Andreas Gruenbacherf70af112010-12-11 18:51:50 +01001233 int err;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001234
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001235 if (!expect(device->bitmap))
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01001236 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001237
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001238 if (get_ldev(device)) {
1239 if (drbd_md_test_flag(device->ldev, MDF_FULL_SYNC)) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07001240 dev_info(DEV, "Writing the whole bitmap, MDF_FullSync was set.\n");
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001241 drbd_bm_set_all(device);
1242 if (drbd_bm_write(device)) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07001243 /* write_bm did fail! Leave full sync flag set in Meta P_DATA
1244 * but otherwise process as per normal - need to tell other
1245 * side that a full resync is required! */
1246 dev_err(DEV, "Failed to write bitmap to disk!\n");
1247 } else {
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001248 drbd_md_clear_flag(device, MDF_FULL_SYNC);
1249 drbd_md_sync(device);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001250 }
1251 }
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001252 put_ldev(device);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001253 }
1254
1255 c = (struct bm_xfer_ctx) {
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001256 .bm_bits = drbd_bm_bits(device),
1257 .bm_words = drbd_bm_words(device),
Philipp Reisnerb411b362009-09-25 16:07:19 -07001258 };
1259
1260 do {
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001261 err = send_bitmap_rle_or_plain(device, &c);
Andreas Gruenbacherf70af112010-12-11 18:51:50 +01001262 } while (err > 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001263
Andreas Gruenbacherf70af112010-12-11 18:51:50 +01001264 return err == 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001265}
1266
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001267int drbd_send_bitmap(struct drbd_device *device)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001268{
Andreas Gruenbachera6b32bc2011-05-31 14:33:49 +02001269 struct drbd_socket *sock = &first_peer_device(device)->connection->data;
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001270 int err = -1;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001271
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001272 mutex_lock(&sock->mutex);
1273 if (sock->socket)
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001274 err = !_drbd_send_bitmap(device);
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001275 mutex_unlock(&sock->mutex);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001276 return err;
1277}
1278
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +02001279void drbd_send_b_ack(struct drbd_connection *connection, u32 barrier_nr, u32 set_size)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001280{
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001281 struct drbd_socket *sock;
1282 struct p_barrier_ack *p;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001283
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +02001284 if (connection->cstate < C_WF_REPORT_PARAMS)
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001285 return;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001286
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +02001287 sock = &connection->meta;
1288 p = conn_prepare_command(connection, sock);
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001289 if (!p)
1290 return;
1291 p->barrier = barrier_nr;
1292 p->set_size = cpu_to_be32(set_size);
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +02001293 conn_send_command(connection, sock, P_BARRIER_ACK, sizeof(*p), NULL, 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001294}
1295
1296/**
1297 * _drbd_send_ack() - Sends an ack packet
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001298 * @device: DRBD device.
Philipp Reisnerb411b362009-09-25 16:07:19 -07001299 * @cmd: Packet command code.
1300 * @sector: sector, needs to be in big endian byte order
1301 * @blksize: size in byte, needs to be in big endian byte order
1302 * @block_id: Id, big endian byte order
1303 */
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001304static int _drbd_send_ack(struct drbd_device *device, enum drbd_packet cmd,
Andreas Gruenbacherd8763022011-01-26 17:39:41 +01001305 u64 sector, u32 blksize, u64 block_id)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001306{
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001307 struct drbd_socket *sock;
1308 struct p_block_ack *p;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001309
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001310 if (device->state.conn < C_CONNECTED)
Andreas Gruenbachera8c32aa2011-03-16 01:27:22 +01001311 return -EIO;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001312
Andreas Gruenbachera6b32bc2011-05-31 14:33:49 +02001313 sock = &first_peer_device(device)->connection->meta;
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001314 p = drbd_prepare_command(device, sock);
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001315 if (!p)
1316 return -EIO;
1317 p->sector = sector;
1318 p->block_id = block_id;
1319 p->blksize = blksize;
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001320 p->seq_num = cpu_to_be32(atomic_inc_return(&device->packet_seq));
1321 return drbd_send_command(device, sock, cmd, sizeof(*p), NULL, 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001322}
1323
Lars Ellenberg2b2bf212010-10-06 11:46:55 +02001324/* dp->sector and dp->block_id already/still in network byte order,
1325 * data_size is payload size according to dp->head,
1326 * and may need to be corrected for digest size. */
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001327void drbd_send_ack_dp(struct drbd_device *device, enum drbd_packet cmd,
Andreas Gruenbachera9a99942011-03-16 01:30:14 +01001328 struct p_data *dp, int data_size)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001329{
Andreas Gruenbachera6b32bc2011-05-31 14:33:49 +02001330 if (first_peer_device(device)->connection->peer_integrity_tfm)
1331 data_size -= crypto_hash_digestsize(first_peer_device(device)->connection->peer_integrity_tfm);
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001332 _drbd_send_ack(device, cmd, dp->sector, cpu_to_be32(data_size),
Andreas Gruenbachera9a99942011-03-16 01:30:14 +01001333 dp->block_id);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001334}
1335
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001336void drbd_send_ack_rp(struct drbd_device *device, enum drbd_packet cmd,
Andreas Gruenbachera9a99942011-03-16 01:30:14 +01001337 struct p_block_req *rp)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001338{
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001339 _drbd_send_ack(device, cmd, rp->sector, rp->blksize, rp->block_id);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001340}
1341
1342/**
1343 * drbd_send_ack() - Sends an ack packet
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001344 * @device: DRBD device
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01001345 * @cmd: packet command code
1346 * @peer_req: peer request
Philipp Reisnerb411b362009-09-25 16:07:19 -07001347 */
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001348int drbd_send_ack(struct drbd_device *device, enum drbd_packet cmd,
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01001349 struct drbd_peer_request *peer_req)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001350{
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001351 return _drbd_send_ack(device, cmd,
Andreas Gruenbacherdd516122011-03-16 15:39:08 +01001352 cpu_to_be64(peer_req->i.sector),
1353 cpu_to_be32(peer_req->i.size),
1354 peer_req->block_id);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001355}
1356
1357/* This function misuses the block_id field to signal if the blocks
1358 * are is sync or not. */
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001359int drbd_send_ack_ex(struct drbd_device *device, enum drbd_packet cmd,
Philipp Reisnerb411b362009-09-25 16:07:19 -07001360 sector_t sector, int blksize, u64 block_id)
1361{
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001362 return _drbd_send_ack(device, cmd,
Philipp Reisnerb411b362009-09-25 16:07:19 -07001363 cpu_to_be64(sector),
1364 cpu_to_be32(blksize),
1365 cpu_to_be64(block_id));
1366}
1367
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001368int drbd_send_drequest(struct drbd_device *device, int cmd,
Philipp Reisnerb411b362009-09-25 16:07:19 -07001369 sector_t sector, int size, u64 block_id)
1370{
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001371 struct drbd_socket *sock;
1372 struct p_block_req *p;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001373
Andreas Gruenbachera6b32bc2011-05-31 14:33:49 +02001374 sock = &first_peer_device(device)->connection->data;
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001375 p = drbd_prepare_command(device, sock);
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001376 if (!p)
1377 return -EIO;
1378 p->sector = cpu_to_be64(sector);
1379 p->block_id = block_id;
1380 p->blksize = cpu_to_be32(size);
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001381 return drbd_send_command(device, sock, cmd, sizeof(*p), NULL, 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001382}
1383
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001384int drbd_send_drequest_csum(struct drbd_device *device, sector_t sector, int size,
Andreas Gruenbacherd8763022011-01-26 17:39:41 +01001385 void *digest, int digest_size, enum drbd_packet cmd)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001386{
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001387 struct drbd_socket *sock;
1388 struct p_block_req *p;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001389
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001390 /* FIXME: Put the digest into the preallocated socket buffer. */
Philipp Reisnerb411b362009-09-25 16:07:19 -07001391
Andreas Gruenbachera6b32bc2011-05-31 14:33:49 +02001392 sock = &first_peer_device(device)->connection->data;
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001393 p = drbd_prepare_command(device, sock);
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001394 if (!p)
1395 return -EIO;
1396 p->sector = cpu_to_be64(sector);
1397 p->block_id = ID_SYNCER /* unused */;
1398 p->blksize = cpu_to_be32(size);
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001399 return drbd_send_command(device, sock, cmd, sizeof(*p),
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001400 digest, digest_size);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001401}
1402
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001403int drbd_send_ov_request(struct drbd_device *device, sector_t sector, int size)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001404{
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001405 struct drbd_socket *sock;
1406 struct p_block_req *p;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001407
Andreas Gruenbachera6b32bc2011-05-31 14:33:49 +02001408 sock = &first_peer_device(device)->connection->data;
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001409 p = drbd_prepare_command(device, sock);
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001410 if (!p)
1411 return -EIO;
1412 p->sector = cpu_to_be64(sector);
1413 p->block_id = ID_SYNCER /* unused */;
1414 p->blksize = cpu_to_be32(size);
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001415 return drbd_send_command(device, sock, P_OV_REQUEST, sizeof(*p), NULL, 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001416}
1417
1418/* called on sndtimeo
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01001419 * returns false if we should retry,
1420 * true if we think connection is dead
Philipp Reisnerb411b362009-09-25 16:07:19 -07001421 */
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +02001422static int we_should_drop_the_connection(struct drbd_connection *connection, struct socket *sock)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001423{
1424 int drop_it;
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001425 /* long elapsed = (long)(jiffies - device->last_received); */
Philipp Reisnerb411b362009-09-25 16:07:19 -07001426
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +02001427 drop_it = connection->meta.socket == sock
1428 || !connection->asender.task
1429 || get_t_state(&connection->asender) != RUNNING
1430 || connection->cstate < C_WF_REPORT_PARAMS;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001431
1432 if (drop_it)
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01001433 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001434
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +02001435 drop_it = !--connection->ko_count;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001436 if (!drop_it) {
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +02001437 conn_err(connection, "[%s/%d] sock_sendmsg time expired, ko = %u\n",
1438 current->comm, current->pid, connection->ko_count);
1439 request_ping(connection);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001440 }
1441
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001442 return drop_it; /* && (device->state == R_PRIMARY) */;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001443}
1444
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +02001445static void drbd_update_congested(struct drbd_connection *connection)
Andreas Gruenbacher9e204cd2011-01-26 18:45:11 +01001446{
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +02001447 struct sock *sk = connection->data.socket->sk;
Andreas Gruenbacher9e204cd2011-01-26 18:45:11 +01001448 if (sk->sk_wmem_queued > sk->sk_sndbuf * 4 / 5)
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +02001449 set_bit(NET_CONGESTED, &connection->flags);
Andreas Gruenbacher9e204cd2011-01-26 18:45:11 +01001450}
1451
Philipp Reisnerb411b362009-09-25 16:07:19 -07001452/* The idea of sendpage seems to be to put some kind of reference
1453 * to the page into the skb, and to hand it over to the NIC. In
1454 * this process get_page() gets called.
1455 *
1456 * As soon as the page was really sent over the network put_page()
1457 * gets called by some part of the network layer. [ NIC driver? ]
1458 *
1459 * [ get_page() / put_page() increment/decrement the count. If count
1460 * reaches 0 the page will be freed. ]
1461 *
1462 * This works nicely with pages from FSs.
1463 * But this means that in protocol A we might signal IO completion too early!
1464 *
1465 * In order not to corrupt data during a resync we must make sure
1466 * that we do not reuse our own buffer pages (EEs) to early, therefore
1467 * we have the net_ee list.
1468 *
1469 * XFS seems to have problems, still, it submits pages with page_count == 0!
1470 * As a workaround, we disable sendpage on pages
1471 * with page_count == 0 or PageSlab.
1472 */
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001473static int _drbd_no_send_page(struct drbd_device *device, struct page *page,
Andreas Gruenbacherb9874272011-03-16 09:41:10 +01001474 int offset, size_t size, unsigned msg_flags)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001475{
Andreas Gruenbacherb9874272011-03-16 09:41:10 +01001476 struct socket *socket;
1477 void *addr;
1478 int err;
1479
Andreas Gruenbachera6b32bc2011-05-31 14:33:49 +02001480 socket = first_peer_device(device)->connection->data.socket;
Andreas Gruenbacherb9874272011-03-16 09:41:10 +01001481 addr = kmap(page) + offset;
Andreas Gruenbachera6b32bc2011-05-31 14:33:49 +02001482 err = drbd_send_all(first_peer_device(device)->connection, socket, addr, size, msg_flags);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001483 kunmap(page);
Andreas Gruenbacherb9874272011-03-16 09:41:10 +01001484 if (!err)
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001485 device->send_cnt += size >> 9;
Andreas Gruenbacherb9874272011-03-16 09:41:10 +01001486 return err;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001487}
1488
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001489static int _drbd_send_page(struct drbd_device *device, struct page *page,
Lars Ellenbergba11ad92010-05-25 16:26:16 +02001490 int offset, size_t size, unsigned msg_flags)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001491{
Andreas Gruenbachera6b32bc2011-05-31 14:33:49 +02001492 struct socket *socket = first_peer_device(device)->connection->data.socket;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001493 mm_segment_t oldfs = get_fs();
Philipp Reisnerb411b362009-09-25 16:07:19 -07001494 int len = size;
Andreas Gruenbacher88b390f2011-03-16 10:44:16 +01001495 int err = -EIO;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001496
1497 /* e.g. XFS meta- & log-data is in slab pages, which have a
1498 * page_count of 0 and/or have PageSlab() set.
1499 * we cannot use send_page for those, as that does get_page();
1500 * put_page(); and would cause either a VM_BUG directly, or
1501 * __page_cache_release a page that would actually still be referenced
1502 * by someone, leading to some obscure delayed Oops somewhere else. */
1503 if (disable_sendpage || (page_count(page) < 1) || PageSlab(page))
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001504 return _drbd_no_send_page(device, page, offset, size, msg_flags);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001505
Lars Ellenbergba11ad92010-05-25 16:26:16 +02001506 msg_flags |= MSG_NOSIGNAL;
Andreas Gruenbachera6b32bc2011-05-31 14:33:49 +02001507 drbd_update_congested(first_peer_device(device)->connection);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001508 set_fs(KERNEL_DS);
1509 do {
Andreas Gruenbacher88b390f2011-03-16 10:44:16 +01001510 int sent;
1511
1512 sent = socket->ops->sendpage(socket, page, offset, len, msg_flags);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001513 if (sent <= 0) {
Andreas Gruenbacher88b390f2011-03-16 10:44:16 +01001514 if (sent == -EAGAIN) {
Andreas Gruenbachera6b32bc2011-05-31 14:33:49 +02001515 if (we_should_drop_the_connection(first_peer_device(device)->connection, socket))
Andreas Gruenbacher88b390f2011-03-16 10:44:16 +01001516 break;
1517 continue;
1518 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07001519 dev_warn(DEV, "%s: size=%d len=%d sent=%d\n",
1520 __func__, (int)size, len, sent);
Andreas Gruenbacher88b390f2011-03-16 10:44:16 +01001521 if (sent < 0)
1522 err = sent;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001523 break;
1524 }
1525 len -= sent;
1526 offset += sent;
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001527 } while (len > 0 /* THINK && device->cstate >= C_CONNECTED*/);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001528 set_fs(oldfs);
Andreas Gruenbachera6b32bc2011-05-31 14:33:49 +02001529 clear_bit(NET_CONGESTED, &first_peer_device(device)->connection->flags);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001530
Andreas Gruenbacher88b390f2011-03-16 10:44:16 +01001531 if (len == 0) {
1532 err = 0;
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001533 device->send_cnt += size >> 9;
Andreas Gruenbacher88b390f2011-03-16 10:44:16 +01001534 }
1535 return err;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001536}
1537
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001538static int _drbd_send_bio(struct drbd_device *device, struct bio *bio)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001539{
Kent Overstreet79886132013-11-23 17:19:00 -08001540 struct bio_vec bvec;
1541 struct bvec_iter iter;
1542
Lars Ellenbergba11ad92010-05-25 16:26:16 +02001543 /* hint all but last page with MSG_MORE */
Kent Overstreet79886132013-11-23 17:19:00 -08001544 bio_for_each_segment(bvec, bio, iter) {
Andreas Gruenbacher7fae55d2011-03-16 11:46:33 +01001545 int err;
1546
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001547 err = _drbd_no_send_page(device, bvec.bv_page,
Kent Overstreet79886132013-11-23 17:19:00 -08001548 bvec.bv_offset, bvec.bv_len,
Kent Overstreet4550dd62013-08-07 14:26:21 -07001549 bio_iter_last(bvec, iter)
Kent Overstreet79886132013-11-23 17:19:00 -08001550 ? 0 : MSG_MORE);
Andreas Gruenbacher7fae55d2011-03-16 11:46:33 +01001551 if (err)
1552 return err;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001553 }
Andreas Gruenbacher7fae55d2011-03-16 11:46:33 +01001554 return 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001555}
1556
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001557static int _drbd_send_zc_bio(struct drbd_device *device, struct bio *bio)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001558{
Kent Overstreet79886132013-11-23 17:19:00 -08001559 struct bio_vec bvec;
1560 struct bvec_iter iter;
1561
Lars Ellenbergba11ad92010-05-25 16:26:16 +02001562 /* hint all but last page with MSG_MORE */
Kent Overstreet79886132013-11-23 17:19:00 -08001563 bio_for_each_segment(bvec, bio, iter) {
Andreas Gruenbacher7fae55d2011-03-16 11:46:33 +01001564 int err;
1565
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001566 err = _drbd_send_page(device, bvec.bv_page,
Kent Overstreet79886132013-11-23 17:19:00 -08001567 bvec.bv_offset, bvec.bv_len,
Kent Overstreet4550dd62013-08-07 14:26:21 -07001568 bio_iter_last(bvec, iter) ? 0 : MSG_MORE);
Andreas Gruenbacher7fae55d2011-03-16 11:46:33 +01001569 if (err)
1570 return err;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001571 }
Andreas Gruenbacher7fae55d2011-03-16 11:46:33 +01001572 return 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001573}
1574
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001575static int _drbd_send_zc_ee(struct drbd_device *device,
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01001576 struct drbd_peer_request *peer_req)
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001577{
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01001578 struct page *page = peer_req->pages;
1579 unsigned len = peer_req->i.size;
Andreas Gruenbacher9f692302011-03-16 10:49:09 +01001580 int err;
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01001581
Lars Ellenbergba11ad92010-05-25 16:26:16 +02001582 /* hint all but last page with MSG_MORE */
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001583 page_chain_for_each(page) {
1584 unsigned l = min_t(unsigned, len, PAGE_SIZE);
Andreas Gruenbacher9f692302011-03-16 10:49:09 +01001585
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001586 err = _drbd_send_page(device, page, 0, l,
Andreas Gruenbacher9f692302011-03-16 10:49:09 +01001587 page_chain_next(page) ? MSG_MORE : 0);
1588 if (err)
1589 return err;
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001590 len -= l;
1591 }
Andreas Gruenbacher9f692302011-03-16 10:49:09 +01001592 return 0;
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001593}
1594
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001595static u32 bio_flags_to_wire(struct drbd_device *device, unsigned long bi_rw)
Philipp Reisner76d2e7e2010-08-25 11:58:05 +02001596{
Andreas Gruenbachera6b32bc2011-05-31 14:33:49 +02001597 if (first_peer_device(device)->connection->agreed_pro_version >= 95)
Philipp Reisner76d2e7e2010-08-25 11:58:05 +02001598 return (bi_rw & REQ_SYNC ? DP_RW_SYNC : 0) |
Philipp Reisner76d2e7e2010-08-25 11:58:05 +02001599 (bi_rw & REQ_FUA ? DP_FUA : 0) |
1600 (bi_rw & REQ_FLUSH ? DP_FLUSH : 0) |
1601 (bi_rw & REQ_DISCARD ? DP_DISCARD : 0);
1602 else
Jens Axboe721a9602011-03-09 11:56:30 +01001603 return bi_rw & REQ_SYNC ? DP_RW_SYNC : 0;
Philipp Reisner76d2e7e2010-08-25 11:58:05 +02001604}
1605
Philipp Reisnerb411b362009-09-25 16:07:19 -07001606/* Used to send write requests
1607 * R_PRIMARY -> Peer (P_DATA)
1608 */
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001609int drbd_send_dblock(struct drbd_device *device, struct drbd_request *req)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001610{
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001611 struct drbd_socket *sock;
1612 struct p_data *p;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001613 unsigned int dp_flags = 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001614 int dgs;
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001615 int err;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001616
Andreas Gruenbachera6b32bc2011-05-31 14:33:49 +02001617 sock = &first_peer_device(device)->connection->data;
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001618 p = drbd_prepare_command(device, sock);
Andreas Gruenbachera6b32bc2011-05-31 14:33:49 +02001619 dgs = first_peer_device(device)->connection->integrity_tfm ?
1620 crypto_hash_digestsize(first_peer_device(device)->connection->integrity_tfm) : 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001621
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001622 if (!p)
1623 return -EIO;
1624 p->sector = cpu_to_be64(req->i.sector);
1625 p->block_id = (unsigned long)req;
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001626 p->seq_num = cpu_to_be32(atomic_inc_return(&device->packet_seq));
1627 dp_flags = bio_flags_to_wire(device, req->master_bio->bi_rw);
1628 if (device->state.conn >= C_SYNC_SOURCE &&
1629 device->state.conn <= C_PAUSED_SYNC_T)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001630 dp_flags |= DP_MAY_SET_IN_SYNC;
Andreas Gruenbachera6b32bc2011-05-31 14:33:49 +02001631 if (first_peer_device(device)->connection->agreed_pro_version >= 100) {
Philipp Reisner303d1442011-04-13 16:24:47 -07001632 if (req->rq_state & RQ_EXP_RECEIVE_ACK)
1633 dp_flags |= DP_SEND_RECEIVE_ACK;
1634 if (req->rq_state & RQ_EXP_WRITE_ACK)
1635 dp_flags |= DP_SEND_WRITE_ACK;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001636 }
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001637 p->dp_flags = cpu_to_be32(dp_flags);
1638 if (dgs)
Andreas Gruenbachera6b32bc2011-05-31 14:33:49 +02001639 drbd_csum_bio(device, first_peer_device(device)->connection->integrity_tfm, req->master_bio, p + 1);
1640 err = __send_command(first_peer_device(device)->connection, device->vnr, sock, P_DATA, sizeof(*p) + dgs, NULL, req->i.size);
Andreas Gruenbacher6bdb9b02011-03-16 11:52:58 +01001641 if (!err) {
Lars Ellenberg470be442010-11-10 10:36:52 +01001642 /* For protocol A, we have to memcpy the payload into
1643 * socket buffers, as we may complete right away
1644 * as soon as we handed it over to tcp, at which point the data
1645 * pages may become invalid.
1646 *
1647 * For data-integrity enabled, we copy it as well, so we can be
1648 * sure that even if the bio pages may still be modified, it
1649 * won't change the data on the wire, thus if the digest checks
1650 * out ok after sending on this side, but does not fit on the
1651 * receiving side, we sure have detected corruption elsewhere.
1652 */
Philipp Reisner303d1442011-04-13 16:24:47 -07001653 if (!(req->rq_state & (RQ_EXP_RECEIVE_ACK | RQ_EXP_WRITE_ACK)) || dgs)
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001654 err = _drbd_send_bio(device, req->master_bio);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001655 else
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001656 err = _drbd_send_zc_bio(device, req->master_bio);
Lars Ellenberg470be442010-11-10 10:36:52 +01001657
1658 /* double check digest, sometimes buffers have been modified in flight. */
1659 if (dgs > 0 && dgs <= 64) {
Bart Van Assche24c48302011-05-21 18:32:29 +02001660 /* 64 byte, 512 bit, is the largest digest size
Lars Ellenberg470be442010-11-10 10:36:52 +01001661 * currently supported in kernel crypto. */
1662 unsigned char digest[64];
Andreas Gruenbachera6b32bc2011-05-31 14:33:49 +02001663 drbd_csum_bio(device, first_peer_device(device)->connection->integrity_tfm, req->master_bio, digest);
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001664 if (memcmp(p + 1, digest, dgs)) {
Lars Ellenberg470be442010-11-10 10:36:52 +01001665 dev_warn(DEV,
1666 "Digest mismatch, buffer modified by upper layers during write: %llus +%u\n",
Andreas Gruenbacherace652a2011-01-03 17:09:58 +01001667 (unsigned long long)req->i.sector, req->i.size);
Lars Ellenberg470be442010-11-10 10:36:52 +01001668 }
1669 } /* else if (dgs > 64) {
1670 ... Be noisy about digest too large ...
1671 } */
Philipp Reisnerb411b362009-09-25 16:07:19 -07001672 }
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001673 mutex_unlock(&sock->mutex); /* locked by drbd_prepare_command() */
Philipp Reisnerb411b362009-09-25 16:07:19 -07001674
Andreas Gruenbacher6bdb9b02011-03-16 11:52:58 +01001675 return err;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001676}
1677
1678/* answer packet, used to send data back for read requests:
1679 * Peer -> (diskless) R_PRIMARY (P_DATA_REPLY)
1680 * C_SYNC_SOURCE -> C_SYNC_TARGET (P_RS_DATA_REPLY)
1681 */
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001682int drbd_send_block(struct drbd_device *device, enum drbd_packet cmd,
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01001683 struct drbd_peer_request *peer_req)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001684{
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001685 struct drbd_socket *sock;
1686 struct p_data *p;
Andreas Gruenbacher7b57b89d2011-03-16 11:35:20 +01001687 int err;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001688 int dgs;
1689
Andreas Gruenbachera6b32bc2011-05-31 14:33:49 +02001690 sock = &first_peer_device(device)->connection->data;
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001691 p = drbd_prepare_command(device, sock);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001692
Andreas Gruenbachera6b32bc2011-05-31 14:33:49 +02001693 dgs = first_peer_device(device)->connection->integrity_tfm ?
1694 crypto_hash_digestsize(first_peer_device(device)->connection->integrity_tfm) : 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001695
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001696 if (!p)
1697 return -EIO;
1698 p->sector = cpu_to_be64(peer_req->i.sector);
1699 p->block_id = peer_req->block_id;
1700 p->seq_num = 0; /* unused */
Lars Ellenbergb17f33c2012-02-08 15:32:51 +01001701 p->dp_flags = 0;
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001702 if (dgs)
Andreas Gruenbachera6b32bc2011-05-31 14:33:49 +02001703 drbd_csum_ee(device, first_peer_device(device)->connection->integrity_tfm, peer_req, p + 1);
1704 err = __send_command(first_peer_device(device)->connection, device->vnr, sock, cmd, sizeof(*p) + dgs, NULL, peer_req->i.size);
Andreas Gruenbacher7b57b89d2011-03-16 11:35:20 +01001705 if (!err)
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001706 err = _drbd_send_zc_ee(device, peer_req);
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001707 mutex_unlock(&sock->mutex); /* locked by drbd_prepare_command() */
Philipp Reisnerb411b362009-09-25 16:07:19 -07001708
Andreas Gruenbacher7b57b89d2011-03-16 11:35:20 +01001709 return err;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001710}
1711
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001712int drbd_send_out_of_sync(struct drbd_device *device, struct drbd_request *req)
Philipp Reisner73a01a12010-10-27 14:33:00 +02001713{
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001714 struct drbd_socket *sock;
1715 struct p_block_desc *p;
Philipp Reisner73a01a12010-10-27 14:33:00 +02001716
Andreas Gruenbachera6b32bc2011-05-31 14:33:49 +02001717 sock = &first_peer_device(device)->connection->data;
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001718 p = drbd_prepare_command(device, sock);
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001719 if (!p)
1720 return -EIO;
1721 p->sector = cpu_to_be64(req->i.sector);
1722 p->blksize = cpu_to_be32(req->i.size);
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001723 return drbd_send_command(device, sock, P_OUT_OF_SYNC, sizeof(*p), NULL, 0);
Philipp Reisner73a01a12010-10-27 14:33:00 +02001724}
1725
Philipp Reisnerb411b362009-09-25 16:07:19 -07001726/*
1727 drbd_send distinguishes two cases:
1728
1729 Packets sent via the data socket "sock"
1730 and packets sent via the meta data socket "msock"
1731
1732 sock msock
1733 -----------------+-------------------------+------------------------------
1734 timeout conf.timeout / 2 conf.timeout / 2
1735 timeout action send a ping via msock Abort communication
1736 and close all sockets
1737*/
1738
1739/*
1740 * you must have down()ed the appropriate [m]sock_mutex elsewhere!
1741 */
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +02001742int drbd_send(struct drbd_connection *connection, struct socket *sock,
Philipp Reisnerb411b362009-09-25 16:07:19 -07001743 void *buf, size_t size, unsigned msg_flags)
1744{
1745 struct kvec iov;
1746 struct msghdr msg;
1747 int rv, sent = 0;
1748
1749 if (!sock)
Andreas Gruenbacherc0d42c82010-12-09 23:52:22 +01001750 return -EBADR;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001751
1752 /* THINK if (signal_pending) return ... ? */
1753
1754 iov.iov_base = buf;
1755 iov.iov_len = size;
1756
1757 msg.msg_name = NULL;
1758 msg.msg_namelen = 0;
1759 msg.msg_control = NULL;
1760 msg.msg_controllen = 0;
1761 msg.msg_flags = msg_flags | MSG_NOSIGNAL;
1762
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +02001763 if (sock == connection->data.socket) {
Philipp Reisner44ed1672011-04-19 17:10:19 +02001764 rcu_read_lock();
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +02001765 connection->ko_count = rcu_dereference(connection->net_conf)->ko_count;
Philipp Reisner44ed1672011-04-19 17:10:19 +02001766 rcu_read_unlock();
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +02001767 drbd_update_congested(connection);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001768 }
1769 do {
1770 /* STRANGE
1771 * tcp_sendmsg does _not_ use its size parameter at all ?
1772 *
1773 * -EAGAIN on timeout, -EINTR on signal.
1774 */
1775/* THINK
1776 * do we need to block DRBD_SIG if sock == &meta.socket ??
1777 * otherwise wake_asender() might interrupt some send_*Ack !
1778 */
1779 rv = kernel_sendmsg(sock, &msg, &iov, 1, size);
1780 if (rv == -EAGAIN) {
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +02001781 if (we_should_drop_the_connection(connection, sock))
Philipp Reisnerb411b362009-09-25 16:07:19 -07001782 break;
1783 else
1784 continue;
1785 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07001786 if (rv == -EINTR) {
1787 flush_signals(current);
1788 rv = 0;
1789 }
1790 if (rv < 0)
1791 break;
1792 sent += rv;
1793 iov.iov_base += rv;
1794 iov.iov_len -= rv;
1795 } while (sent < size);
1796
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +02001797 if (sock == connection->data.socket)
1798 clear_bit(NET_CONGESTED, &connection->flags);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001799
1800 if (rv <= 0) {
1801 if (rv != -EAGAIN) {
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +02001802 conn_err(connection, "%s_sendmsg returned %d\n",
1803 sock == connection->meta.socket ? "msock" : "sock",
Philipp Reisnerbedbd2a2011-02-07 15:08:48 +01001804 rv);
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +02001805 conn_request_state(connection, NS(conn, C_BROKEN_PIPE), CS_HARD);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001806 } else
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +02001807 conn_request_state(connection, NS(conn, C_TIMEOUT), CS_HARD);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001808 }
1809
1810 return sent;
1811}
1812
Andreas Gruenbacherfb708e42010-12-15 17:04:36 +01001813/**
1814 * drbd_send_all - Send an entire buffer
1815 *
1816 * Returns 0 upon success and a negative error value otherwise.
1817 */
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +02001818int drbd_send_all(struct drbd_connection *connection, struct socket *sock, void *buffer,
Andreas Gruenbacherfb708e42010-12-15 17:04:36 +01001819 size_t size, unsigned msg_flags)
1820{
1821 int err;
1822
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +02001823 err = drbd_send(connection, sock, buffer, size, msg_flags);
Andreas Gruenbacherfb708e42010-12-15 17:04:36 +01001824 if (err < 0)
1825 return err;
1826 if (err != size)
1827 return -EIO;
1828 return 0;
1829}
1830
Philipp Reisnerb411b362009-09-25 16:07:19 -07001831static int drbd_open(struct block_device *bdev, fmode_t mode)
1832{
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001833 struct drbd_device *device = bdev->bd_disk->private_data;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001834 unsigned long flags;
1835 int rv = 0;
1836
Arnd Bergmann2a48fc02010-06-02 14:28:52 +02001837 mutex_lock(&drbd_main_mutex);
Andreas Gruenbachera6b32bc2011-05-31 14:33:49 +02001838 spin_lock_irqsave(&first_peer_device(device)->connection->req_lock, flags);
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001839 /* to have a stable device->state.role
Philipp Reisnerb411b362009-09-25 16:07:19 -07001840 * and no race with updating open_cnt */
1841
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001842 if (device->state.role != R_PRIMARY) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07001843 if (mode & FMODE_WRITE)
1844 rv = -EROFS;
1845 else if (!allow_oos)
1846 rv = -EMEDIUMTYPE;
1847 }
1848
1849 if (!rv)
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001850 device->open_cnt++;
Andreas Gruenbachera6b32bc2011-05-31 14:33:49 +02001851 spin_unlock_irqrestore(&first_peer_device(device)->connection->req_lock, flags);
Arnd Bergmann2a48fc02010-06-02 14:28:52 +02001852 mutex_unlock(&drbd_main_mutex);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001853
1854 return rv;
1855}
1856
Al Virodb2a1442013-05-05 21:52:57 -04001857static void drbd_release(struct gendisk *gd, fmode_t mode)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001858{
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001859 struct drbd_device *device = gd->private_data;
Arnd Bergmann2a48fc02010-06-02 14:28:52 +02001860 mutex_lock(&drbd_main_mutex);
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001861 device->open_cnt--;
Arnd Bergmann2a48fc02010-06-02 14:28:52 +02001862 mutex_unlock(&drbd_main_mutex);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001863}
1864
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001865static void drbd_set_defaults(struct drbd_device *device)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001866{
Lars Ellenbergf3990022011-03-23 14:31:09 +01001867 /* Beware! The actual layout differs
1868 * between big endian and little endian */
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001869 device->state = (union drbd_dev_state) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07001870 { .role = R_SECONDARY,
1871 .peer = R_UNKNOWN,
1872 .conn = C_STANDALONE,
1873 .disk = D_DISKLESS,
1874 .pdsk = D_UNKNOWN,
Philipp Reisnerb411b362009-09-25 16:07:19 -07001875 } };
1876}
1877
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001878void drbd_init_set_defaults(struct drbd_device *device)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001879{
1880 /* the memset(,0,) did most of this.
1881 * note: only assignments, no allocation in here */
1882
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001883 drbd_set_defaults(device);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001884
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001885 atomic_set(&device->ap_bio_cnt, 0);
1886 atomic_set(&device->ap_pending_cnt, 0);
1887 atomic_set(&device->rs_pending_cnt, 0);
1888 atomic_set(&device->unacked_cnt, 0);
1889 atomic_set(&device->local_cnt, 0);
1890 atomic_set(&device->pp_in_use_by_net, 0);
1891 atomic_set(&device->rs_sect_in, 0);
1892 atomic_set(&device->rs_sect_ev, 0);
1893 atomic_set(&device->ap_in_flight, 0);
1894 atomic_set(&device->md_io_in_use, 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001895
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001896 mutex_init(&device->own_state_mutex);
1897 device->state_mutex = &device->own_state_mutex;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001898
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001899 spin_lock_init(&device->al_lock);
1900 spin_lock_init(&device->peer_seq_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001901
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001902 INIT_LIST_HEAD(&device->active_ee);
1903 INIT_LIST_HEAD(&device->sync_ee);
1904 INIT_LIST_HEAD(&device->done_ee);
1905 INIT_LIST_HEAD(&device->read_ee);
1906 INIT_LIST_HEAD(&device->net_ee);
1907 INIT_LIST_HEAD(&device->resync_reads);
1908 INIT_LIST_HEAD(&device->resync_work.list);
1909 INIT_LIST_HEAD(&device->unplug_work.list);
1910 INIT_LIST_HEAD(&device->go_diskless.list);
1911 INIT_LIST_HEAD(&device->md_sync_work.list);
1912 INIT_LIST_HEAD(&device->start_resync_work.list);
1913 INIT_LIST_HEAD(&device->bm_io_work.w.list);
Philipp Reisner0ced55a2010-04-30 15:26:20 +02001914
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001915 device->resync_work.cb = w_resync_timer;
1916 device->unplug_work.cb = w_send_write_hint;
1917 device->go_diskless.cb = w_go_diskless;
1918 device->md_sync_work.cb = w_md_sync;
1919 device->bm_io_work.w.cb = w_bitmap_io;
1920 device->start_resync_work.cb = w_start_resync;
Philipp Reisnera21e9292011-02-08 15:08:49 +01001921
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001922 device->resync_work.device = device;
1923 device->unplug_work.device = device;
1924 device->go_diskless.device = device;
1925 device->md_sync_work.device = device;
1926 device->bm_io_work.w.device = device;
1927 device->start_resync_work.device = device;
Philipp Reisnera21e9292011-02-08 15:08:49 +01001928
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001929 init_timer(&device->resync_timer);
1930 init_timer(&device->md_sync_timer);
1931 init_timer(&device->start_resync_timer);
1932 init_timer(&device->request_timer);
1933 device->resync_timer.function = resync_timer_fn;
1934 device->resync_timer.data = (unsigned long) device;
1935 device->md_sync_timer.function = md_sync_timer_fn;
1936 device->md_sync_timer.data = (unsigned long) device;
1937 device->start_resync_timer.function = start_resync_timer_fn;
1938 device->start_resync_timer.data = (unsigned long) device;
1939 device->request_timer.function = request_timer_fn;
1940 device->request_timer.data = (unsigned long) device;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001941
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001942 init_waitqueue_head(&device->misc_wait);
1943 init_waitqueue_head(&device->state_wait);
1944 init_waitqueue_head(&device->ee_wait);
1945 init_waitqueue_head(&device->al_wait);
1946 init_waitqueue_head(&device->seq_wait);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001947
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001948 device->resync_wenr = LC_FREE;
1949 device->peer_max_bio_size = DRBD_MAX_BIO_SIZE_SAFE;
1950 device->local_max_bio_size = DRBD_MAX_BIO_SIZE_SAFE;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001951}
1952
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001953void drbd_device_cleanup(struct drbd_device *device)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001954{
Lars Ellenberg1d7734a2010-08-11 21:21:50 +02001955 int i;
Andreas Gruenbachera6b32bc2011-05-31 14:33:49 +02001956 if (first_peer_device(device)->connection->receiver.t_state != NONE)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001957 dev_err(DEV, "ASSERT FAILED: receiver t_state == %d expected 0.\n",
Andreas Gruenbachera6b32bc2011-05-31 14:33:49 +02001958 first_peer_device(device)->connection->receiver.t_state);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001959
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001960 device->al_writ_cnt =
1961 device->bm_writ_cnt =
1962 device->read_cnt =
1963 device->recv_cnt =
1964 device->send_cnt =
1965 device->writ_cnt =
1966 device->p_size =
1967 device->rs_start =
1968 device->rs_total =
1969 device->rs_failed = 0;
1970 device->rs_last_events = 0;
1971 device->rs_last_sect_ev = 0;
Lars Ellenberg1d7734a2010-08-11 21:21:50 +02001972 for (i = 0; i < DRBD_SYNC_MARKS; i++) {
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001973 device->rs_mark_left[i] = 0;
1974 device->rs_mark_time[i] = 0;
Lars Ellenberg1d7734a2010-08-11 21:21:50 +02001975 }
Andreas Gruenbachera6b32bc2011-05-31 14:33:49 +02001976 D_ASSERT(first_peer_device(device)->connection->net_conf == NULL);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001977
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001978 drbd_set_my_capacity(device, 0);
1979 if (device->bitmap) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07001980 /* maybe never allocated. */
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001981 drbd_bm_resize(device, 0, 1);
1982 drbd_bm_cleanup(device);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001983 }
1984
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001985 drbd_free_bc(device->ldev);
1986 device->ldev = NULL;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001987
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001988 clear_bit(AL_SUSPENDED, &device->flags);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001989
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001990 D_ASSERT(list_empty(&device->active_ee));
1991 D_ASSERT(list_empty(&device->sync_ee));
1992 D_ASSERT(list_empty(&device->done_ee));
1993 D_ASSERT(list_empty(&device->read_ee));
1994 D_ASSERT(list_empty(&device->net_ee));
1995 D_ASSERT(list_empty(&device->resync_reads));
Andreas Gruenbachera6b32bc2011-05-31 14:33:49 +02001996 D_ASSERT(list_empty(&first_peer_device(device)->connection->sender_work.q));
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001997 D_ASSERT(list_empty(&device->resync_work.list));
1998 D_ASSERT(list_empty(&device->unplug_work.list));
1999 D_ASSERT(list_empty(&device->go_diskless.list));
Lars Ellenberg2265b472010-12-16 15:41:26 +01002000
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02002001 drbd_set_defaults(device);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002002}
2003
2004
2005static void drbd_destroy_mempools(void)
2006{
2007 struct page *page;
2008
2009 while (drbd_pp_pool) {
2010 page = drbd_pp_pool;
2011 drbd_pp_pool = (struct page *)page_private(page);
2012 __free_page(page);
2013 drbd_pp_vacant--;
2014 }
2015
2016 /* D_ASSERT(atomic_read(&drbd_pp_vacant)==0); */
2017
Lars Ellenberg9476f392011-02-23 17:02:01 +01002018 if (drbd_md_io_bio_set)
2019 bioset_free(drbd_md_io_bio_set);
Lars Ellenberg42818082011-02-23 12:39:46 +01002020 if (drbd_md_io_page_pool)
2021 mempool_destroy(drbd_md_io_page_pool);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002022 if (drbd_ee_mempool)
2023 mempool_destroy(drbd_ee_mempool);
2024 if (drbd_request_mempool)
2025 mempool_destroy(drbd_request_mempool);
2026 if (drbd_ee_cache)
2027 kmem_cache_destroy(drbd_ee_cache);
2028 if (drbd_request_cache)
2029 kmem_cache_destroy(drbd_request_cache);
2030 if (drbd_bm_ext_cache)
2031 kmem_cache_destroy(drbd_bm_ext_cache);
2032 if (drbd_al_ext_cache)
2033 kmem_cache_destroy(drbd_al_ext_cache);
2034
Lars Ellenberg9476f392011-02-23 17:02:01 +01002035 drbd_md_io_bio_set = NULL;
Lars Ellenberg42818082011-02-23 12:39:46 +01002036 drbd_md_io_page_pool = NULL;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002037 drbd_ee_mempool = NULL;
2038 drbd_request_mempool = NULL;
2039 drbd_ee_cache = NULL;
2040 drbd_request_cache = NULL;
2041 drbd_bm_ext_cache = NULL;
2042 drbd_al_ext_cache = NULL;
2043
2044 return;
2045}
2046
2047static int drbd_create_mempools(void)
2048{
2049 struct page *page;
Lars Ellenberg1816a2b2010-11-11 15:19:07 +01002050 const int number = (DRBD_MAX_BIO_SIZE/PAGE_SIZE) * minor_count;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002051 int i;
2052
2053 /* prepare our caches and mempools */
2054 drbd_request_mempool = NULL;
2055 drbd_ee_cache = NULL;
2056 drbd_request_cache = NULL;
2057 drbd_bm_ext_cache = NULL;
2058 drbd_al_ext_cache = NULL;
2059 drbd_pp_pool = NULL;
Lars Ellenberg42818082011-02-23 12:39:46 +01002060 drbd_md_io_page_pool = NULL;
Lars Ellenberg9476f392011-02-23 17:02:01 +01002061 drbd_md_io_bio_set = NULL;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002062
2063 /* caches */
2064 drbd_request_cache = kmem_cache_create(
2065 "drbd_req", sizeof(struct drbd_request), 0, 0, NULL);
2066 if (drbd_request_cache == NULL)
2067 goto Enomem;
2068
2069 drbd_ee_cache = kmem_cache_create(
Andreas Gruenbacherf6ffca92011-02-04 15:30:34 +01002070 "drbd_ee", sizeof(struct drbd_peer_request), 0, 0, NULL);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002071 if (drbd_ee_cache == NULL)
2072 goto Enomem;
2073
2074 drbd_bm_ext_cache = kmem_cache_create(
2075 "drbd_bm", sizeof(struct bm_extent), 0, 0, NULL);
2076 if (drbd_bm_ext_cache == NULL)
2077 goto Enomem;
2078
2079 drbd_al_ext_cache = kmem_cache_create(
2080 "drbd_al", sizeof(struct lc_element), 0, 0, NULL);
2081 if (drbd_al_ext_cache == NULL)
2082 goto Enomem;
2083
2084 /* mempools */
Lars Ellenberg9476f392011-02-23 17:02:01 +01002085 drbd_md_io_bio_set = bioset_create(DRBD_MIN_POOL_PAGES, 0);
2086 if (drbd_md_io_bio_set == NULL)
2087 goto Enomem;
Lars Ellenberg9476f392011-02-23 17:02:01 +01002088
Lars Ellenberg42818082011-02-23 12:39:46 +01002089 drbd_md_io_page_pool = mempool_create_page_pool(DRBD_MIN_POOL_PAGES, 0);
2090 if (drbd_md_io_page_pool == NULL)
2091 goto Enomem;
2092
Philipp Reisnerb411b362009-09-25 16:07:19 -07002093 drbd_request_mempool = mempool_create(number,
2094 mempool_alloc_slab, mempool_free_slab, drbd_request_cache);
2095 if (drbd_request_mempool == NULL)
2096 goto Enomem;
2097
2098 drbd_ee_mempool = mempool_create(number,
2099 mempool_alloc_slab, mempool_free_slab, drbd_ee_cache);
Nicolas Kaiser2027ae12010-10-28 06:15:26 -06002100 if (drbd_ee_mempool == NULL)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002101 goto Enomem;
2102
2103 /* drbd's page pool */
2104 spin_lock_init(&drbd_pp_lock);
2105
2106 for (i = 0; i < number; i++) {
2107 page = alloc_page(GFP_HIGHUSER);
2108 if (!page)
2109 goto Enomem;
2110 set_page_private(page, (unsigned long)drbd_pp_pool);
2111 drbd_pp_pool = page;
2112 }
2113 drbd_pp_vacant = number;
2114
2115 return 0;
2116
2117Enomem:
2118 drbd_destroy_mempools(); /* in case we allocated some */
2119 return -ENOMEM;
2120}
2121
2122static int drbd_notify_sys(struct notifier_block *this, unsigned long code,
2123 void *unused)
2124{
2125 /* just so we have it. you never know what interesting things we
2126 * might want to do here some day...
2127 */
2128
2129 return NOTIFY_DONE;
2130}
2131
2132static struct notifier_block drbd_notifier = {
2133 .notifier_call = drbd_notify_sys,
2134};
2135
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02002136static void drbd_release_all_peer_reqs(struct drbd_device *device)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002137{
2138 int rr;
2139
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02002140 rr = drbd_free_peer_reqs(device, &device->active_ee);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002141 if (rr)
2142 dev_err(DEV, "%d EEs in active list found!\n", rr);
2143
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02002144 rr = drbd_free_peer_reqs(device, &device->sync_ee);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002145 if (rr)
2146 dev_err(DEV, "%d EEs in sync list found!\n", rr);
2147
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02002148 rr = drbd_free_peer_reqs(device, &device->read_ee);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002149 if (rr)
2150 dev_err(DEV, "%d EEs in read list found!\n", rr);
2151
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02002152 rr = drbd_free_peer_reqs(device, &device->done_ee);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002153 if (rr)
2154 dev_err(DEV, "%d EEs in done list found!\n", rr);
2155
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02002156 rr = drbd_free_peer_reqs(device, &device->net_ee);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002157 if (rr)
2158 dev_err(DEV, "%d EEs in net list found!\n", rr);
2159}
2160
Philipp Reisner774b3052011-02-22 02:07:03 -05002161/* caution. no locking. */
Andreas Gruenbacher05a10ec2011-06-07 22:54:17 +02002162void drbd_destroy_device(struct kref *kref)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002163{
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02002164 struct drbd_device *device = container_of(kref, struct drbd_device, kref);
Andreas Gruenbacher803ea132011-06-09 01:40:48 +02002165 struct drbd_resource *resource = device->resource;
2166 struct drbd_connection *connection;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002167
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02002168 del_timer_sync(&device->request_timer);
Philipp Reisnerdfa8bed2011-06-29 14:06:08 +02002169
Philipp Reisnerb411b362009-09-25 16:07:19 -07002170 /* paranoia asserts */
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02002171 D_ASSERT(device->open_cnt == 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002172 /* end paranoia asserts */
2173
Philipp Reisnerb411b362009-09-25 16:07:19 -07002174 /* cleanup stuff that may have been allocated during
2175 * device (re-)configuration or state changes */
2176
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02002177 if (device->this_bdev)
2178 bdput(device->this_bdev);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002179
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02002180 drbd_free_bc(device->ldev);
2181 device->ldev = NULL;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002182
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02002183 drbd_release_all_peer_reqs(device);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002184
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02002185 lc_destroy(device->act_log);
2186 lc_destroy(device->resync);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002187
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02002188 kfree(device->p_uuid);
2189 /* device->p_uuid = NULL; */
Philipp Reisnerb411b362009-09-25 16:07:19 -07002190
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02002191 if (device->bitmap) /* should no longer be there. */
2192 drbd_bm_cleanup(device);
2193 __free_page(device->md_io_page);
2194 put_disk(device->vdisk);
2195 blk_cleanup_queue(device->rq_queue);
2196 kfree(device->rs_plan_s);
Andreas Gruenbachera6b32bc2011-05-31 14:33:49 +02002197 kfree(first_peer_device(device));
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02002198 kfree(device);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002199
Andreas Gruenbacher803ea132011-06-09 01:40:48 +02002200 for_each_connection(connection, resource)
2201 kref_put(&connection->kref, drbd_destroy_connection);
2202 kref_put(&resource->kref, drbd_destroy_resource);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002203}
2204
Lars Ellenberg2312f0b32011-11-24 10:36:25 +01002205/* One global retry thread, if we need to push back some bio and have it
2206 * reinserted through our make request function.
2207 */
2208static struct retry_worker {
2209 struct workqueue_struct *wq;
2210 struct work_struct worker;
2211
2212 spinlock_t lock;
2213 struct list_head writes;
2214} retry;
2215
2216static void do_retry(struct work_struct *ws)
2217{
2218 struct retry_worker *retry = container_of(ws, struct retry_worker, worker);
2219 LIST_HEAD(writes);
2220 struct drbd_request *req, *tmp;
2221
2222 spin_lock_irq(&retry->lock);
2223 list_splice_init(&retry->writes, &writes);
2224 spin_unlock_irq(&retry->lock);
2225
2226 list_for_each_entry_safe(req, tmp, &writes, tl_requests) {
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02002227 struct drbd_device *device = req->w.device;
Lars Ellenberg2312f0b32011-11-24 10:36:25 +01002228 struct bio *bio = req->master_bio;
2229 unsigned long start_time = req->start_time;
Lars Ellenberg9a278a72012-07-24 10:12:36 +02002230 bool expected;
Lars Ellenberg2312f0b32011-11-24 10:36:25 +01002231
Lars Ellenberg9a278a72012-07-24 10:12:36 +02002232 expected =
2233 expect(atomic_read(&req->completion_ref) == 0) &&
2234 expect(req->rq_state & RQ_POSTPONED) &&
2235 expect((req->rq_state & RQ_LOCAL_PENDING) == 0 ||
2236 (req->rq_state & RQ_LOCAL_ABORTED) != 0);
2237
2238 if (!expected)
2239 dev_err(DEV, "req=%p completion_ref=%d rq_state=%x\n",
2240 req, atomic_read(&req->completion_ref),
2241 req->rq_state);
2242
2243 /* We still need to put one kref associated with the
2244 * "completion_ref" going zero in the code path that queued it
2245 * here. The request object may still be referenced by a
2246 * frozen local req->private_bio, in case we force-detached.
Lars Ellenberg2312f0b32011-11-24 10:36:25 +01002247 */
Lars Ellenberg9a278a72012-07-24 10:12:36 +02002248 kref_put(&req->kref, drbd_req_destroy);
Lars Ellenberg2312f0b32011-11-24 10:36:25 +01002249
2250 /* A single suspended or otherwise blocking device may stall
2251 * all others as well. Fortunately, this code path is to
2252 * recover from a situation that "should not happen":
2253 * concurrent writes in multi-primary setup.
2254 * In a "normal" lifecycle, this workqueue is supposed to be
2255 * destroyed without ever doing anything.
2256 * If it turns out to be an issue anyways, we can do per
2257 * resource (replication group) or per device (minor) retry
2258 * workqueues instead.
2259 */
2260
2261 /* We are not just doing generic_make_request(),
2262 * as we want to keep the start_time information. */
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02002263 inc_ap_bio(device);
2264 __drbd_make_request(device, bio, start_time);
Lars Ellenberg2312f0b32011-11-24 10:36:25 +01002265 }
2266}
2267
Lars Ellenberg9d05e7c2012-07-17 10:05:04 +02002268void drbd_restart_request(struct drbd_request *req)
Lars Ellenberg2312f0b32011-11-24 10:36:25 +01002269{
2270 unsigned long flags;
2271 spin_lock_irqsave(&retry.lock, flags);
2272 list_move_tail(&req->tl_requests, &retry.writes);
2273 spin_unlock_irqrestore(&retry.lock, flags);
2274
2275 /* Drop the extra reference that would otherwise
2276 * have been dropped by complete_master_bio.
2277 * do_retry() needs to grab a new one. */
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02002278 dec_ap_bio(req->w.device);
Lars Ellenberg2312f0b32011-11-24 10:36:25 +01002279
2280 queue_work(retry.wq, &retry.worker);
2281}
2282
Andreas Gruenbacher77c556f2011-06-08 22:17:38 +02002283void drbd_destroy_resource(struct kref *kref)
2284{
2285 struct drbd_resource *resource =
2286 container_of(kref, struct drbd_resource, kref);
2287
Andreas Gruenbacher803ea132011-06-09 01:40:48 +02002288 idr_destroy(&resource->devices);
Andreas Gruenbacher77c556f2011-06-08 22:17:38 +02002289 kfree(resource->name);
2290 kfree(resource);
2291}
2292
2293void drbd_free_resource(struct drbd_resource *resource)
2294{
2295 struct drbd_connection *connection, *tmp;
2296
2297 for_each_connection_safe(connection, tmp, resource) {
2298 list_del(&connection->connections);
2299 kref_put(&connection->kref, drbd_destroy_connection);
2300 }
2301 kref_put(&resource->kref, drbd_destroy_resource);
2302}
Lars Ellenberg2312f0b32011-11-24 10:36:25 +01002303
Philipp Reisnerb411b362009-09-25 16:07:19 -07002304static void drbd_cleanup(void)
2305{
2306 unsigned int i;
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02002307 struct drbd_device *device;
Andreas Gruenbacher77c556f2011-06-08 22:17:38 +02002308 struct drbd_resource *resource, *tmp;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002309
2310 unregister_reboot_notifier(&drbd_notifier);
2311
Lars Ellenberg17a93f32010-11-24 10:37:35 +01002312 /* first remove proc,
2313 * drbdsetup uses it's presence to detect
2314 * whether DRBD is loaded.
2315 * If we would get stuck in proc removal,
2316 * but have netlink already deregistered,
2317 * some drbdsetup commands may wait forever
2318 * for an answer.
2319 */
2320 if (drbd_proc)
2321 remove_proc_entry("drbd", NULL);
2322
Lars Ellenberg2312f0b32011-11-24 10:36:25 +01002323 if (retry.wq)
2324 destroy_workqueue(retry.wq);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002325
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002326 drbd_genl_unregister();
Philipp Reisnerb411b362009-09-25 16:07:19 -07002327
Andreas Gruenbacher803ea132011-06-09 01:40:48 +02002328 idr_for_each_entry(&drbd_devices, device, i)
2329 drbd_delete_minor(device);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002330
Philipp Reisnerc141ebd2011-05-05 16:13:10 +02002331 /* not _rcu since, no other updater anymore. Genl already unregistered */
Andreas Gruenbacher77c556f2011-06-08 22:17:38 +02002332 for_each_resource_safe(resource, tmp, &drbd_resources) {
2333 list_del(&resource->resources);
2334 drbd_free_resource(resource);
Philipp Reisner81fa2e62011-05-04 15:10:30 +02002335 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07002336
Philipp Reisner81a5d602011-02-22 19:53:16 -05002337 drbd_destroy_mempools();
Philipp Reisnerb411b362009-09-25 16:07:19 -07002338 unregister_blkdev(DRBD_MAJOR, "drbd");
2339
Andreas Gruenbacher05a10ec2011-06-07 22:54:17 +02002340 idr_destroy(&drbd_devices);
Philipp Reisner81a5d602011-02-22 19:53:16 -05002341
Philipp Reisnerb411b362009-09-25 16:07:19 -07002342 printk(KERN_INFO "drbd: module cleanup done.\n");
2343}
2344
2345/**
Artem Bityutskiyd97482e2012-07-25 18:12:12 +03002346 * drbd_congested() - Callback for the flusher thread
Philipp Reisnerb411b362009-09-25 16:07:19 -07002347 * @congested_data: User data
Artem Bityutskiyd97482e2012-07-25 18:12:12 +03002348 * @bdi_bits: Bits the BDI flusher thread is currently interested in
Philipp Reisnerb411b362009-09-25 16:07:19 -07002349 *
2350 * Returns 1<<BDI_async_congested and/or 1<<BDI_sync_congested if we are congested.
2351 */
2352static int drbd_congested(void *congested_data, int bdi_bits)
2353{
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02002354 struct drbd_device *device = congested_data;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002355 struct request_queue *q;
2356 char reason = '-';
2357 int r = 0;
2358
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02002359 if (!may_inc_ap_bio(device)) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07002360 /* DRBD has frozen IO */
2361 r = bdi_bits;
2362 reason = 'd';
2363 goto out;
2364 }
2365
Andreas Gruenbachera6b32bc2011-05-31 14:33:49 +02002366 if (test_bit(CALLBACK_PENDING, &first_peer_device(device)->connection->flags)) {
Lars Ellenbergc2ba6862012-06-14 15:14:06 +02002367 r |= (1 << BDI_async_congested);
2368 /* Without good local data, we would need to read from remote,
2369 * and that would need the worker thread as well, which is
2370 * currently blocked waiting for that usermode helper to
2371 * finish.
2372 */
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02002373 if (!get_ldev_if_state(device, D_UP_TO_DATE))
Lars Ellenbergc2ba6862012-06-14 15:14:06 +02002374 r |= (1 << BDI_sync_congested);
2375 else
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02002376 put_ldev(device);
Lars Ellenbergc2ba6862012-06-14 15:14:06 +02002377 r &= bdi_bits;
2378 reason = 'c';
2379 goto out;
2380 }
2381
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02002382 if (get_ldev(device)) {
2383 q = bdev_get_queue(device->ldev->backing_bdev);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002384 r = bdi_congested(&q->backing_dev_info, bdi_bits);
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02002385 put_ldev(device);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002386 if (r)
2387 reason = 'b';
2388 }
2389
Andreas Gruenbachera6b32bc2011-05-31 14:33:49 +02002390 if (bdi_bits & (1 << BDI_async_congested) &&
2391 test_bit(NET_CONGESTED, &first_peer_device(device)->connection->flags)) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07002392 r |= (1 << BDI_async_congested);
2393 reason = reason == 'b' ? 'a' : 'n';
2394 }
2395
2396out:
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02002397 device->congestion_reason = reason;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002398 return r;
2399}
2400
Philipp Reisner6699b652011-02-09 11:10:24 +01002401static void drbd_init_workqueue(struct drbd_work_queue* wq)
2402{
Philipp Reisner6699b652011-02-09 11:10:24 +01002403 spin_lock_init(&wq->q_lock);
2404 INIT_LIST_HEAD(&wq->q);
Lars Ellenberg8c0785a2011-10-19 11:50:57 +02002405 init_waitqueue_head(&wq->q_wait);
Philipp Reisner6699b652011-02-09 11:10:24 +01002406}
2407
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +02002408struct drbd_connection *conn_get_by_name(const char *name)
Philipp Reisner1aba4d72011-02-21 15:38:08 +01002409{
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +02002410 struct drbd_connection *connection;
Andreas Gruenbacher77c556f2011-06-08 22:17:38 +02002411 struct drbd_resource *resource;
Philipp Reisner1aba4d72011-02-21 15:38:08 +01002412
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002413 if (!name || !name[0])
2414 return NULL;
2415
Philipp Reisnerc141ebd2011-05-05 16:13:10 +02002416 rcu_read_lock();
Andreas Gruenbacher77c556f2011-06-08 22:17:38 +02002417 for_each_resource_rcu(resource, &drbd_resources) {
2418 if (!strcmp(resource->name, name)) {
2419 connection = first_connection(resource);
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +02002420 kref_get(&connection->kref);
Philipp Reisner1aba4d72011-02-21 15:38:08 +01002421 goto found;
Philipp Reisner0ace9df2011-04-24 10:53:19 +02002422 }
Philipp Reisner1aba4d72011-02-21 15:38:08 +01002423 }
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +02002424 connection = NULL;
Philipp Reisner1aba4d72011-02-21 15:38:08 +01002425found:
Philipp Reisnerc141ebd2011-05-05 16:13:10 +02002426 rcu_read_unlock();
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +02002427 return connection;
Philipp Reisner1aba4d72011-02-21 15:38:08 +01002428}
2429
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +02002430struct drbd_connection *conn_get_by_addrs(void *my_addr, int my_addr_len,
Andreas Gruenbacher089c0752011-06-14 18:28:09 +02002431 void *peer_addr, int peer_addr_len)
2432{
Andreas Gruenbacher77c556f2011-06-08 22:17:38 +02002433 struct drbd_resource *resource;
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +02002434 struct drbd_connection *connection;
Andreas Gruenbacher089c0752011-06-14 18:28:09 +02002435
2436 rcu_read_lock();
Andreas Gruenbacher77c556f2011-06-08 22:17:38 +02002437 for_each_resource_rcu(resource, &drbd_resources) {
2438 for_each_connection_rcu(connection, resource) {
2439 if (connection->my_addr_len == my_addr_len &&
2440 connection->peer_addr_len == peer_addr_len &&
2441 !memcmp(&connection->my_addr, my_addr, my_addr_len) &&
2442 !memcmp(&connection->peer_addr, peer_addr, peer_addr_len)) {
2443 kref_get(&connection->kref);
2444 goto found;
2445 }
Andreas Gruenbacher089c0752011-06-14 18:28:09 +02002446 }
2447 }
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +02002448 connection = NULL;
Andreas Gruenbacher089c0752011-06-14 18:28:09 +02002449found:
2450 rcu_read_unlock();
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +02002451 return connection;
Andreas Gruenbacher089c0752011-06-14 18:28:09 +02002452}
2453
Andreas Gruenbachere6ef8a52011-03-24 18:07:54 +01002454static int drbd_alloc_socket(struct drbd_socket *socket)
2455{
2456 socket->rbuf = (void *) __get_free_page(GFP_KERNEL);
2457 if (!socket->rbuf)
2458 return -ENOMEM;
Andreas Gruenbacher5a87d922011-03-24 21:17:52 +01002459 socket->sbuf = (void *) __get_free_page(GFP_KERNEL);
2460 if (!socket->sbuf)
2461 return -ENOMEM;
Andreas Gruenbachere6ef8a52011-03-24 18:07:54 +01002462 return 0;
2463}
2464
2465static void drbd_free_socket(struct drbd_socket *socket)
2466{
Andreas Gruenbacher5a87d922011-03-24 21:17:52 +01002467 free_page((unsigned long) socket->sbuf);
Andreas Gruenbachere6ef8a52011-03-24 18:07:54 +01002468 free_page((unsigned long) socket->rbuf);
2469}
2470
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +02002471void conn_free_crypto(struct drbd_connection *connection)
Philipp Reisner91fd4da2011-04-20 17:47:29 +02002472{
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +02002473 drbd_free_sock(connection);
Philipp Reisner1d041222011-04-22 15:20:23 +02002474
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +02002475 crypto_free_hash(connection->csums_tfm);
2476 crypto_free_hash(connection->verify_tfm);
2477 crypto_free_hash(connection->cram_hmac_tfm);
2478 crypto_free_hash(connection->integrity_tfm);
2479 crypto_free_hash(connection->peer_integrity_tfm);
2480 kfree(connection->int_dig_in);
2481 kfree(connection->int_dig_vv);
Philipp Reisner1d041222011-04-22 15:20:23 +02002482
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +02002483 connection->csums_tfm = NULL;
2484 connection->verify_tfm = NULL;
2485 connection->cram_hmac_tfm = NULL;
2486 connection->integrity_tfm = NULL;
2487 connection->peer_integrity_tfm = NULL;
2488 connection->int_dig_in = NULL;
2489 connection->int_dig_vv = NULL;
Philipp Reisner91fd4da2011-04-20 17:47:29 +02002490}
2491
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +02002492int set_resource_options(struct drbd_connection *connection, struct res_opts *res_opts)
Andreas Gruenbacherafbbfa82011-06-16 17:58:02 +02002493{
2494 cpumask_var_t new_cpu_mask;
2495 int err;
2496
2497 if (!zalloc_cpumask_var(&new_cpu_mask, GFP_KERNEL))
2498 return -ENOMEM;
2499 /*
2500 retcode = ERR_NOMEM;
2501 drbd_msg_put_info("unable to allocate cpumask");
2502 */
2503
2504 /* silently ignore cpu mask on UP kernel */
2505 if (nr_cpu_ids > 1 && res_opts->cpu_mask[0] != 0) {
2506 /* FIXME: Get rid of constant 32 here */
Philipp Reisnerc5b005a2012-04-30 12:53:52 +02002507 err = bitmap_parse(res_opts->cpu_mask, 32,
2508 cpumask_bits(new_cpu_mask), nr_cpu_ids);
Andreas Gruenbacherafbbfa82011-06-16 17:58:02 +02002509 if (err) {
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +02002510 conn_warn(connection, "bitmap_parse() failed with %d\n", err);
Andreas Gruenbacherafbbfa82011-06-16 17:58:02 +02002511 /* retcode = ERR_CPU_MASK_PARSE; */
2512 goto fail;
2513 }
2514 }
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +02002515 connection->res_opts = *res_opts;
2516 if (!cpumask_equal(connection->cpu_mask, new_cpu_mask)) {
2517 cpumask_copy(connection->cpu_mask, new_cpu_mask);
2518 drbd_calc_cpu_mask(connection);
2519 connection->receiver.reset_cpu_mask = 1;
2520 connection->asender.reset_cpu_mask = 1;
2521 connection->worker.reset_cpu_mask = 1;
Andreas Gruenbacherafbbfa82011-06-16 17:58:02 +02002522 }
2523 err = 0;
2524
2525fail:
2526 free_cpumask_var(new_cpu_mask);
2527 return err;
2528
2529}
2530
Andreas Gruenbacher77c556f2011-06-08 22:17:38 +02002531struct drbd_resource *drbd_create_resource(const char *name)
2532{
2533 struct drbd_resource *resource;
2534
2535 resource = kmalloc(sizeof(struct drbd_resource), GFP_KERNEL);
2536 if (!resource)
2537 return NULL;
2538 resource->name = kstrdup(name, GFP_KERNEL);
2539 if (!resource->name) {
2540 kfree(resource);
2541 return NULL;
2542 }
2543 kref_init(&resource->kref);
Andreas Gruenbacher803ea132011-06-09 01:40:48 +02002544 idr_init(&resource->devices);
Andreas Gruenbacher77c556f2011-06-08 22:17:38 +02002545 INIT_LIST_HEAD(&resource->connections);
2546 list_add_tail_rcu(&resource->resources, &drbd_resources);
2547 return resource;
2548}
2549
Philipp Reisnerec0bddb2011-05-04 15:47:01 +02002550/* caller must be under genl_lock() */
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +02002551struct drbd_connection *conn_create(const char *name, struct res_opts *res_opts)
Philipp Reisner21114382011-01-19 12:26:59 +01002552{
Andreas Gruenbacher77c556f2011-06-08 22:17:38 +02002553 struct drbd_resource *resource;
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +02002554 struct drbd_connection *connection;
Philipp Reisner21114382011-01-19 12:26:59 +01002555
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +02002556 connection = kzalloc(sizeof(struct drbd_connection), GFP_KERNEL);
2557 if (!connection)
Philipp Reisner21114382011-01-19 12:26:59 +01002558 return NULL;
2559
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +02002560 if (drbd_alloc_socket(&connection->data))
Andreas Gruenbachere6ef8a52011-03-24 18:07:54 +01002561 goto fail;
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +02002562 if (drbd_alloc_socket(&connection->meta))
Andreas Gruenbachere6ef8a52011-03-24 18:07:54 +01002563 goto fail;
2564
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +02002565 if (!zalloc_cpumask_var(&connection->cpu_mask, GFP_KERNEL))
Philipp Reisner774b3052011-02-22 02:07:03 -05002566 goto fail;
2567
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +02002568 if (set_resource_options(connection, res_opts))
Andreas Gruenbacherafbbfa82011-06-16 17:58:02 +02002569 goto fail;
2570
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +02002571 connection->current_epoch = kzalloc(sizeof(struct drbd_epoch), GFP_KERNEL);
2572 if (!connection->current_epoch)
Philipp Reisner12038a32011-11-09 19:18:00 +01002573 goto fail;
Lars Ellenbergb6dd1a82011-11-28 15:04:49 +01002574
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +02002575 INIT_LIST_HEAD(&connection->transfer_log);
Lars Ellenbergb6dd1a82011-11-28 15:04:49 +01002576
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +02002577 INIT_LIST_HEAD(&connection->current_epoch->list);
2578 connection->epochs = 1;
2579 spin_lock_init(&connection->epoch_lock);
2580 connection->write_ordering = WO_bdev_flush;
Philipp Reisner4b0007c2011-11-09 20:12:34 +01002581
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +02002582 connection->send.seen_any_write_yet = false;
2583 connection->send.current_epoch_nr = 0;
2584 connection->send.current_epoch_writes = 0;
Lars Ellenbergb6dd1a82011-11-28 15:04:49 +01002585
Andreas Gruenbacher77c556f2011-06-08 22:17:38 +02002586 resource = drbd_create_resource(name);
2587 if (!resource)
2588 goto fail;
2589
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +02002590 connection->cstate = C_STANDALONE;
2591 mutex_init(&connection->cstate_mutex);
2592 spin_lock_init(&connection->req_lock);
2593 mutex_init(&connection->conf_update);
2594 init_waitqueue_head(&connection->ping_wait);
2595 idr_init(&connection->volumes);
Philipp Reisnerb2fb6dbe2011-01-19 13:48:44 +01002596
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +02002597 drbd_init_workqueue(&connection->sender_work);
2598 mutex_init(&connection->data.mutex);
2599 mutex_init(&connection->meta.mutex);
Philipp Reisner6699b652011-02-09 11:10:24 +01002600
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +02002601 drbd_thread_init(connection, &connection->receiver, drbdd_init, "receiver");
2602 drbd_thread_init(connection, &connection->worker, drbd_worker, "worker");
2603 drbd_thread_init(connection, &connection->asender, drbd_asender, "asender");
Philipp Reisner392c8802011-02-09 10:33:31 +01002604
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +02002605 kref_init(&connection->kref);
Andreas Gruenbacher77c556f2011-06-08 22:17:38 +02002606
2607 kref_get(&resource->kref);
2608 connection->resource = resource;
2609 list_add_tail_rcu(&connection->connections, &resource->connections);
Philipp Reisner21114382011-01-19 12:26:59 +01002610
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +02002611 return connection;
Philipp Reisner21114382011-01-19 12:26:59 +01002612
2613fail:
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +02002614 kfree(connection->current_epoch);
2615 free_cpumask_var(connection->cpu_mask);
2616 drbd_free_socket(&connection->meta);
2617 drbd_free_socket(&connection->data);
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +02002618 kfree(connection);
Philipp Reisner21114382011-01-19 12:26:59 +01002619
2620 return NULL;
2621}
2622
Andreas Gruenbacher05a10ec2011-06-07 22:54:17 +02002623void drbd_destroy_connection(struct kref *kref)
Philipp Reisner21114382011-01-19 12:26:59 +01002624{
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +02002625 struct drbd_connection *connection = container_of(kref, struct drbd_connection, kref);
Andreas Gruenbacher77c556f2011-06-08 22:17:38 +02002626 struct drbd_resource *resource = connection->resource;
Philipp Reisner9dc9fbb2011-04-22 15:23:32 +02002627
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +02002628 if (atomic_read(&connection->current_epoch->epoch_size) != 0)
2629 conn_err(connection, "epoch_size:%d\n", atomic_read(&connection->current_epoch->epoch_size));
2630 kfree(connection->current_epoch);
Philipp Reisner12038a32011-11-09 19:18:00 +01002631
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +02002632 idr_destroy(&connection->volumes);
Philipp Reisner21114382011-01-19 12:26:59 +01002633
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +02002634 free_cpumask_var(connection->cpu_mask);
2635 drbd_free_socket(&connection->meta);
2636 drbd_free_socket(&connection->data);
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +02002637 kfree(connection->int_dig_in);
2638 kfree(connection->int_dig_vv);
2639 kfree(connection);
Andreas Gruenbacher77c556f2011-06-08 22:17:38 +02002640 kref_put(&resource->kref, drbd_destroy_resource);
Philipp Reisner21114382011-01-19 12:26:59 +01002641}
2642
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02002643static int init_submitter(struct drbd_device *device)
Lars Ellenberg113fef92013-03-22 18:14:40 -06002644{
2645 /* opencoded create_singlethread_workqueue(),
2646 * to be able to say "drbd%d", ..., minor */
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02002647 device->submit.wq = alloc_workqueue("drbd%u_submit",
2648 WQ_UNBOUND | WQ_MEM_RECLAIM, 1, device->minor);
2649 if (!device->submit.wq)
Lars Ellenberg113fef92013-03-22 18:14:40 -06002650 return -ENOMEM;
2651
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02002652 INIT_WORK(&device->submit.worker, do_submit);
2653 spin_lock_init(&device->submit.lock);
2654 INIT_LIST_HEAD(&device->submit.writes);
Lars Ellenberg113fef92013-03-22 18:14:40 -06002655 return 0;
2656}
2657
Andreas Gruenbachera6b32bc2011-05-31 14:33:49 +02002658enum drbd_ret_code drbd_create_minor(struct drbd_connection *connection, unsigned int minor, int vnr)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002659{
Andreas Gruenbacher803ea132011-06-09 01:40:48 +02002660 struct drbd_resource *resource = connection->resource;
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02002661 struct drbd_device *device;
Andreas Gruenbachera6b32bc2011-05-31 14:33:49 +02002662 struct drbd_peer_device *peer_device;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002663 struct gendisk *disk;
2664 struct request_queue *q;
Andreas Gruenbacher93e4bf72013-05-23 14:57:17 +02002665 int id;
Lars Ellenberg8432b312011-03-08 16:11:16 +01002666 enum drbd_ret_code err = ERR_NOMEM;
Philipp Reisner774b3052011-02-22 02:07:03 -05002667
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02002668 device = minor_to_device(minor);
2669 if (device)
Philipp Reisner774b3052011-02-22 02:07:03 -05002670 return ERR_MINOR_EXISTS;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002671
2672 /* GFP_KERNEL, we are outside of all write-out paths */
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02002673 device = kzalloc(sizeof(struct drbd_device), GFP_KERNEL);
2674 if (!device)
Philipp Reisner774b3052011-02-22 02:07:03 -05002675 return ERR_NOMEM;
Andreas Gruenbacher803ea132011-06-09 01:40:48 +02002676 kref_init(&device->kref);
2677
Andreas Gruenbachera6b32bc2011-05-31 14:33:49 +02002678 peer_device = kzalloc(sizeof(struct drbd_peer_device), GFP_KERNEL);
2679 if (!peer_device)
2680 goto out_no_peer_device;
Philipp Reisner774b3052011-02-22 02:07:03 -05002681
Andreas Gruenbachera6b32bc2011-05-31 14:33:49 +02002682 INIT_LIST_HEAD(&device->peer_devices);
2683 list_add(&peer_device->peer_devices, &device->peer_devices);
Andreas Gruenbacher803ea132011-06-09 01:40:48 +02002684 kref_get(&resource->kref);
2685 device->resource = resource;
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +02002686 kref_get(&connection->kref);
Andreas Gruenbachera6b32bc2011-05-31 14:33:49 +02002687 peer_device->connection = connection;
2688 peer_device->device = device;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002689
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02002690 device->minor = minor;
2691 device->vnr = vnr;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002692
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02002693 drbd_init_set_defaults(device);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002694
2695 q = blk_alloc_queue(GFP_KERNEL);
2696 if (!q)
2697 goto out_no_q;
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02002698 device->rq_queue = q;
2699 q->queuedata = device;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002700
2701 disk = alloc_disk(1);
2702 if (!disk)
2703 goto out_no_disk;
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02002704 device->vdisk = disk;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002705
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01002706 set_disk_ro(disk, true);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002707
2708 disk->queue = q;
2709 disk->major = DRBD_MAJOR;
2710 disk->first_minor = minor;
2711 disk->fops = &drbd_ops;
2712 sprintf(disk->disk_name, "drbd%d", minor);
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02002713 disk->private_data = device;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002714
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02002715 device->this_bdev = bdget(MKDEV(DRBD_MAJOR, minor));
Philipp Reisnerb411b362009-09-25 16:07:19 -07002716 /* we have no partitions. we contain only ourselves. */
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02002717 device->this_bdev->bd_contains = device->this_bdev;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002718
2719 q->backing_dev_info.congested_fn = drbd_congested;
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02002720 q->backing_dev_info.congested_data = device;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002721
Andreas Gruenbacher2f58dcf2010-12-13 17:48:19 +01002722 blk_queue_make_request(q, drbd_make_request);
Lars Ellenberga73ff322012-06-25 19:15:38 +02002723 blk_queue_flush(q, REQ_FLUSH | REQ_FUA);
Philipp Reisner99432fc2011-05-20 16:39:13 +02002724 /* Setting the max_hw_sectors to an odd value of 8kibyte here
2725 This triggers a max_bio_size message upon first attach or connect */
2726 blk_queue_max_hw_sectors(q, DRBD_MAX_BIO_SIZE_SAFE >> 8);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002727 blk_queue_bounce_limit(q, BLK_BOUNCE_ANY);
2728 blk_queue_merge_bvec(q, drbd_merge_bvec);
Andreas Gruenbacher803ea132011-06-09 01:40:48 +02002729 q->queue_lock = &connection->req_lock;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002730
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02002731 device->md_io_page = alloc_page(GFP_KERNEL);
2732 if (!device->md_io_page)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002733 goto out_no_io_page;
2734
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02002735 if (drbd_bm_init(device))
Philipp Reisnerb411b362009-09-25 16:07:19 -07002736 goto out_no_bitmap;
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02002737 device->read_requests = RB_ROOT;
2738 device->write_requests = RB_ROOT;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002739
Andreas Gruenbacher93e4bf72013-05-23 14:57:17 +02002740 id = idr_alloc(&drbd_devices, device, minor, minor + 1, GFP_KERNEL);
2741 if (id < 0) {
2742 if (id == -ENOSPC) {
Tejun Heo56de2102013-02-27 17:04:01 -08002743 err = ERR_MINOR_EXISTS;
2744 drbd_msg_put_info("requested minor exists already");
2745 }
Lars Ellenberg8432b312011-03-08 16:11:16 +01002746 goto out_no_minor_idr;
Tejun Heo56de2102013-02-27 17:04:01 -08002747 }
Andreas Gruenbacher803ea132011-06-09 01:40:48 +02002748 kref_get(&device->kref);
2749
2750 id = idr_alloc(&resource->devices, device, vnr, vnr + 1, GFP_KERNEL);
2751 if (id < 0) {
2752 if (id == -ENOSPC) {
2753 err = ERR_MINOR_EXISTS;
2754 drbd_msg_put_info("requested minor exists already");
2755 }
2756 goto out_idr_remove_minor;
2757 }
2758 kref_get(&device->kref);
Tejun Heo56de2102013-02-27 17:04:01 -08002759
Andreas Gruenbacher93e4bf72013-05-23 14:57:17 +02002760 id = idr_alloc(&connection->volumes, device, vnr, vnr + 1, GFP_KERNEL);
2761 if (id < 0) {
2762 if (id == -ENOSPC) {
Tejun Heo56de2102013-02-27 17:04:01 -08002763 err = ERR_INVALID_REQUEST;
2764 drbd_msg_put_info("requested volume exists already");
2765 }
Andreas Gruenbacher803ea132011-06-09 01:40:48 +02002766 goto out_idr_remove_from_resource;
Lars Ellenberg569083c2011-03-07 09:49:02 +01002767 }
Andreas Gruenbacher803ea132011-06-09 01:40:48 +02002768 kref_get(&device->kref);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002769
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02002770 if (init_submitter(device)) {
Lars Ellenberg113fef92013-03-22 18:14:40 -06002771 err = ERR_NOMEM;
2772 drbd_msg_put_info("unable to create submit workqueue");
2773 goto out_idr_remove_vol;
2774 }
2775
Philipp Reisner774b3052011-02-22 02:07:03 -05002776 add_disk(disk);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002777
Philipp Reisner2325eb62011-03-15 16:56:18 +01002778 /* inherit the connection state */
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +02002779 device->state.conn = connection->cstate;
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02002780 if (device->state.conn == C_WF_REPORT_PARAMS)
2781 drbd_connected(device);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002782
Philipp Reisner774b3052011-02-22 02:07:03 -05002783 return NO_ERROR;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002784
Lars Ellenberg113fef92013-03-22 18:14:40 -06002785out_idr_remove_vol:
Andreas Gruenbacher93e4bf72013-05-23 14:57:17 +02002786 idr_remove(&connection->volumes, vnr);
Andreas Gruenbacher803ea132011-06-09 01:40:48 +02002787out_idr_remove_from_resource:
2788 idr_remove(&resource->devices, vnr);
Lars Ellenberg8432b312011-03-08 16:11:16 +01002789out_idr_remove_minor:
Andreas Gruenbacher93e4bf72013-05-23 14:57:17 +02002790 idr_remove(&drbd_devices, minor);
Lars Ellenberg569083c2011-03-07 09:49:02 +01002791 synchronize_rcu();
Lars Ellenberg8432b312011-03-08 16:11:16 +01002792out_no_minor_idr:
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02002793 drbd_bm_cleanup(device);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002794out_no_bitmap:
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02002795 __free_page(device->md_io_page);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002796out_no_io_page:
2797 put_disk(disk);
2798out_no_disk:
2799 blk_cleanup_queue(q);
2800out_no_q:
Andreas Gruenbacher05a10ec2011-06-07 22:54:17 +02002801 kref_put(&connection->kref, drbd_destroy_connection);
Andreas Gruenbacher803ea132011-06-09 01:40:48 +02002802 kref_put(&resource->kref, drbd_destroy_resource);
Andreas Gruenbachera6b32bc2011-05-31 14:33:49 +02002803out_no_peer_device:
2804 kfree(device);
Lars Ellenberg8432b312011-03-08 16:11:16 +01002805 return err;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002806}
2807
Andreas Gruenbacher803ea132011-06-09 01:40:48 +02002808void drbd_delete_minor(struct drbd_device *device)
2809{
2810 struct drbd_resource *resource = device->resource;
2811 struct drbd_connection *connection;
2812 int refs = 3;
2813
2814 for_each_connection(connection, resource) {
2815 idr_remove(&connection->volumes, device->vnr);
2816 refs++;
2817 }
2818 idr_remove(&resource->devices, device->vnr);
2819 idr_remove(&drbd_devices, device_to_minor(device));
2820 del_gendisk(device->vdisk);
2821 synchronize_rcu();
2822 kref_sub(&device->kref, refs, drbd_destroy_device);
2823}
2824
Philipp Reisnerb411b362009-09-25 16:07:19 -07002825int __init drbd_init(void)
2826{
2827 int err;
2828
Philipp Reisner2b8a90b2011-01-10 11:15:17 +01002829 if (minor_count < DRBD_MINOR_COUNT_MIN || minor_count > DRBD_MINOR_COUNT_MAX) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07002830 printk(KERN_ERR
Philipp Reisner81a5d602011-02-22 19:53:16 -05002831 "drbd: invalid minor_count (%d)\n", minor_count);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002832#ifdef MODULE
2833 return -EINVAL;
2834#else
Andreas Gruenbacher46530e82011-05-31 13:08:53 +02002835 minor_count = DRBD_MINOR_COUNT_DEF;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002836#endif
2837 }
2838
Philipp Reisnerb411b362009-09-25 16:07:19 -07002839 err = register_blkdev(DRBD_MAJOR, "drbd");
2840 if (err) {
2841 printk(KERN_ERR
2842 "drbd: unable to register block device major %d\n",
2843 DRBD_MAJOR);
2844 return err;
2845 }
2846
2847 register_reboot_notifier(&drbd_notifier);
2848
2849 /*
2850 * allocate all necessary structs
2851 */
Philipp Reisnerb411b362009-09-25 16:07:19 -07002852 init_waitqueue_head(&drbd_pp_wait);
2853
2854 drbd_proc = NULL; /* play safe for drbd_cleanup */
Andreas Gruenbacher05a10ec2011-06-07 22:54:17 +02002855 idr_init(&drbd_devices);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002856
Lars Ellenberg69babf02013-10-23 10:59:15 +02002857 rwlock_init(&global_state_lock);
Andreas Gruenbacher77c556f2011-06-08 22:17:38 +02002858 INIT_LIST_HEAD(&drbd_resources);
Lars Ellenberg69babf02013-10-23 10:59:15 +02002859
2860 err = drbd_genl_register();
2861 if (err) {
2862 printk(KERN_ERR "drbd: unable to register generic netlink family\n");
2863 goto fail;
2864 }
2865
Philipp Reisnerb411b362009-09-25 16:07:19 -07002866 err = drbd_create_mempools();
2867 if (err)
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002868 goto fail;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002869
Wei Yongjun6110d702013-06-25 16:50:04 +02002870 err = -ENOMEM;
Lars Ellenberg8c484ee2010-03-11 16:47:58 +01002871 drbd_proc = proc_create_data("drbd", S_IFREG | S_IRUGO , NULL, &drbd_proc_fops, NULL);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002872 if (!drbd_proc) {
2873 printk(KERN_ERR "drbd: unable to register proc file\n");
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002874 goto fail;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002875 }
2876
Lars Ellenberg2312f0b32011-11-24 10:36:25 +01002877 retry.wq = create_singlethread_workqueue("drbd-reissue");
2878 if (!retry.wq) {
2879 printk(KERN_ERR "drbd: unable to create retry workqueue\n");
2880 goto fail;
2881 }
2882 INIT_WORK(&retry.worker, do_retry);
2883 spin_lock_init(&retry.lock);
2884 INIT_LIST_HEAD(&retry.writes);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002885
2886 printk(KERN_INFO "drbd: initialized. "
2887 "Version: " REL_VERSION " (api:%d/proto:%d-%d)\n",
2888 API_VERSION, PRO_VERSION_MIN, PRO_VERSION_MAX);
2889 printk(KERN_INFO "drbd: %s\n", drbd_buildtag());
2890 printk(KERN_INFO "drbd: registered as block device major %d\n",
2891 DRBD_MAJOR);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002892
2893 return 0; /* Success! */
2894
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002895fail:
Philipp Reisnerb411b362009-09-25 16:07:19 -07002896 drbd_cleanup();
2897 if (err == -ENOMEM)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002898 printk(KERN_ERR "drbd: ran out of memory\n");
2899 else
2900 printk(KERN_ERR "drbd: initialization failure\n");
2901 return err;
2902}
2903
2904void drbd_free_bc(struct drbd_backing_dev *ldev)
2905{
2906 if (ldev == NULL)
2907 return;
2908
Tejun Heoe525fd82010-11-13 11:55:17 +01002909 blkdev_put(ldev->backing_bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL);
2910 blkdev_put(ldev->md_bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002911
Lars Ellenberg94ad0a12013-03-27 14:08:42 +01002912 kfree(ldev->disk_conf);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002913 kfree(ldev);
2914}
2915
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +02002916void drbd_free_sock(struct drbd_connection *connection)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002917{
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +02002918 if (connection->data.socket) {
2919 mutex_lock(&connection->data.mutex);
2920 kernel_sock_shutdown(connection->data.socket, SHUT_RDWR);
2921 sock_release(connection->data.socket);
2922 connection->data.socket = NULL;
2923 mutex_unlock(&connection->data.mutex);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002924 }
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +02002925 if (connection->meta.socket) {
2926 mutex_lock(&connection->meta.mutex);
2927 kernel_sock_shutdown(connection->meta.socket, SHUT_RDWR);
2928 sock_release(connection->meta.socket);
2929 connection->meta.socket = NULL;
2930 mutex_unlock(&connection->meta.mutex);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002931 }
2932}
2933
Philipp Reisnerb411b362009-09-25 16:07:19 -07002934/* meta data management */
2935
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +02002936void conn_md_sync(struct drbd_connection *connection)
Philipp Reisner19fffd72012-08-28 16:48:03 +02002937{
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02002938 struct drbd_device *device;
Philipp Reisner19fffd72012-08-28 16:48:03 +02002939 int vnr;
2940
2941 rcu_read_lock();
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +02002942 idr_for_each_entry(&connection->volumes, device, vnr) {
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02002943 kref_get(&device->kref);
Philipp Reisner19fffd72012-08-28 16:48:03 +02002944 rcu_read_unlock();
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02002945 drbd_md_sync(device);
Andreas Gruenbacher05a10ec2011-06-07 22:54:17 +02002946 kref_put(&device->kref, drbd_destroy_device);
Philipp Reisner19fffd72012-08-28 16:48:03 +02002947 rcu_read_lock();
2948 }
2949 rcu_read_unlock();
2950}
2951
Lars Ellenbergae8bf312013-03-19 18:16:43 +01002952/* aligned 4kByte */
Philipp Reisnerb411b362009-09-25 16:07:19 -07002953struct meta_data_on_disk {
Lars Ellenbergcccac982013-03-19 18:16:46 +01002954 u64 la_size_sect; /* last agreed size. */
Philipp Reisnerb411b362009-09-25 16:07:19 -07002955 u64 uuid[UI_SIZE]; /* UUIDs. */
2956 u64 device_uuid;
2957 u64 reserved_u64_1;
2958 u32 flags; /* MDF */
2959 u32 magic;
2960 u32 md_size_sect;
2961 u32 al_offset; /* offset to this block */
Lars Ellenbergae8bf312013-03-19 18:16:43 +01002962 u32 al_nr_extents; /* important for restoring the AL (userspace) */
Lars Ellenbergf3990022011-03-23 14:31:09 +01002963 /* `-- act_log->nr_elements <-- ldev->dc.al_extents */
Philipp Reisnerb411b362009-09-25 16:07:19 -07002964 u32 bm_offset; /* offset to the bitmap, from here */
2965 u32 bm_bytes_per_bit; /* BM_BLOCK_SIZE */
Philipp Reisner99432fc2011-05-20 16:39:13 +02002966 u32 la_peer_max_bio_size; /* last peer max_bio_size */
Philipp Reisnerb411b362009-09-25 16:07:19 -07002967
Lars Ellenberg3a4d4eb2013-03-19 18:16:44 +01002968 /* see al_tr_number_to_on_disk_sector() */
2969 u32 al_stripes;
2970 u32 al_stripe_size_4k;
2971
2972 u8 reserved_u8[4096 - (7*8 + 10*4)];
Philipp Reisnerb411b362009-09-25 16:07:19 -07002973} __packed;
2974
Philipp Reisnerd752b262013-06-25 16:50:08 +02002975
2976
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02002977void drbd_md_write(struct drbd_device *device, void *b)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002978{
Philipp Reisnerd752b262013-06-25 16:50:08 +02002979 struct meta_data_on_disk *buffer = b;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002980 sector_t sector;
2981 int i;
2982
Lars Ellenbergae8bf312013-03-19 18:16:43 +01002983 memset(buffer, 0, sizeof(*buffer));
Philipp Reisnerb411b362009-09-25 16:07:19 -07002984
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02002985 buffer->la_size_sect = cpu_to_be64(drbd_get_capacity(device->this_bdev));
Philipp Reisnerb411b362009-09-25 16:07:19 -07002986 for (i = UI_CURRENT; i < UI_SIZE; i++)
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02002987 buffer->uuid[i] = cpu_to_be64(device->ldev->md.uuid[i]);
2988 buffer->flags = cpu_to_be32(device->ldev->md.flags);
Lars Ellenbergd5d7ebd2011-07-05 20:59:26 +02002989 buffer->magic = cpu_to_be32(DRBD_MD_MAGIC_84_UNCLEAN);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002990
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02002991 buffer->md_size_sect = cpu_to_be32(device->ldev->md.md_size_sect);
2992 buffer->al_offset = cpu_to_be32(device->ldev->md.al_offset);
2993 buffer->al_nr_extents = cpu_to_be32(device->act_log->nr_elements);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002994 buffer->bm_bytes_per_bit = cpu_to_be32(BM_BLOCK_SIZE);
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02002995 buffer->device_uuid = cpu_to_be64(device->ldev->md.device_uuid);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002996
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02002997 buffer->bm_offset = cpu_to_be32(device->ldev->md.bm_offset);
2998 buffer->la_peer_max_bio_size = cpu_to_be32(device->peer_max_bio_size);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002999
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003000 buffer->al_stripes = cpu_to_be32(device->ldev->md.al_stripes);
3001 buffer->al_stripe_size_4k = cpu_to_be32(device->ldev->md.al_stripe_size_4k);
Lars Ellenberg3a4d4eb2013-03-19 18:16:44 +01003002
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003003 D_ASSERT(drbd_md_ss(device->ldev) == device->ldev->md.md_offset);
3004 sector = device->ldev->md.md_offset;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003005
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003006 if (drbd_md_sync_page_io(device, device->ldev, sector, WRITE)) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07003007 /* this was a try anyways ... */
3008 dev_err(DEV, "meta data update failed!\n");
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003009 drbd_chk_io_error(device, 1, DRBD_META_IO_ERROR);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003010 }
Philipp Reisnerd752b262013-06-25 16:50:08 +02003011}
3012
3013/**
3014 * drbd_md_sync() - Writes the meta data super block if the MD_DIRTY flag bit is set
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003015 * @device: DRBD device.
Philipp Reisnerd752b262013-06-25 16:50:08 +02003016 */
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003017void drbd_md_sync(struct drbd_device *device)
Philipp Reisnerd752b262013-06-25 16:50:08 +02003018{
3019 struct meta_data_on_disk *buffer;
3020
3021 /* Don't accidentally change the DRBD meta data layout. */
3022 BUILD_BUG_ON(UI_SIZE != 4);
3023 BUILD_BUG_ON(sizeof(struct meta_data_on_disk) != 4096);
3024
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003025 del_timer(&device->md_sync_timer);
Philipp Reisnerd752b262013-06-25 16:50:08 +02003026 /* timer may be rearmed by drbd_md_mark_dirty() now. */
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003027 if (!test_and_clear_bit(MD_DIRTY, &device->flags))
Philipp Reisnerd752b262013-06-25 16:50:08 +02003028 return;
3029
3030 /* We use here D_FAILED and not D_ATTACHING because we try to write
3031 * metadata even if we detach due to a disk failure! */
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003032 if (!get_ldev_if_state(device, D_FAILED))
Philipp Reisnerd752b262013-06-25 16:50:08 +02003033 return;
3034
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003035 buffer = drbd_md_get_buffer(device);
Philipp Reisnerd752b262013-06-25 16:50:08 +02003036 if (!buffer)
3037 goto out;
3038
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003039 drbd_md_write(device, buffer);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003040
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003041 /* Update device->ldev->md.la_size_sect,
Philipp Reisnerb411b362009-09-25 16:07:19 -07003042 * since we updated it on metadata. */
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003043 device->ldev->md.la_size_sect = drbd_get_capacity(device->this_bdev);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003044
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003045 drbd_md_put_buffer(device);
Philipp Reisnere1711732011-06-27 11:51:46 +02003046out:
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003047 put_ldev(device);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003048}
3049
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003050static int check_activity_log_stripe_size(struct drbd_device *device,
Lars Ellenberg3a4d4eb2013-03-19 18:16:44 +01003051 struct meta_data_on_disk *on_disk,
3052 struct drbd_md *in_core)
3053{
3054 u32 al_stripes = be32_to_cpu(on_disk->al_stripes);
3055 u32 al_stripe_size_4k = be32_to_cpu(on_disk->al_stripe_size_4k);
3056 u64 al_size_4k;
3057
3058 /* both not set: default to old fixed size activity log */
3059 if (al_stripes == 0 && al_stripe_size_4k == 0) {
3060 al_stripes = 1;
3061 al_stripe_size_4k = MD_32kB_SECT/8;
3062 }
3063
3064 /* some paranoia plausibility checks */
3065
3066 /* we need both values to be set */
3067 if (al_stripes == 0 || al_stripe_size_4k == 0)
3068 goto err;
3069
3070 al_size_4k = (u64)al_stripes * al_stripe_size_4k;
3071
3072 /* Upper limit of activity log area, to avoid potential overflow
3073 * problems in al_tr_number_to_on_disk_sector(). As right now, more
3074 * than 72 * 4k blocks total only increases the amount of history,
3075 * limiting this arbitrarily to 16 GB is not a real limitation ;-) */
3076 if (al_size_4k > (16 * 1024 * 1024/4))
3077 goto err;
3078
3079 /* Lower limit: we need at least 8 transaction slots (32kB)
3080 * to not break existing setups */
3081 if (al_size_4k < MD_32kB_SECT/8)
3082 goto err;
3083
3084 in_core->al_stripe_size_4k = al_stripe_size_4k;
3085 in_core->al_stripes = al_stripes;
3086 in_core->al_size_4k = al_size_4k;
3087
3088 return 0;
3089err:
3090 dev_err(DEV, "invalid activity log striping: al_stripes=%u, al_stripe_size_4k=%u\n",
3091 al_stripes, al_stripe_size_4k);
3092 return -EINVAL;
3093}
3094
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003095static int check_offsets_and_sizes(struct drbd_device *device, struct drbd_backing_dev *bdev)
Lars Ellenbergc04ccaa2013-03-19 18:16:47 +01003096{
3097 sector_t capacity = drbd_get_capacity(bdev->md_bdev);
3098 struct drbd_md *in_core = &bdev->md;
3099 s32 on_disk_al_sect;
3100 s32 on_disk_bm_sect;
3101
3102 /* The on-disk size of the activity log, calculated from offsets, and
3103 * the size of the activity log calculated from the stripe settings,
3104 * should match.
3105 * Though we could relax this a bit: it is ok, if the striped activity log
3106 * fits in the available on-disk activity log size.
3107 * Right now, that would break how resize is implemented.
3108 * TODO: make drbd_determine_dev_size() (and the drbdmeta tool) aware
3109 * of possible unused padding space in the on disk layout. */
3110 if (in_core->al_offset < 0) {
3111 if (in_core->bm_offset > in_core->al_offset)
3112 goto err;
3113 on_disk_al_sect = -in_core->al_offset;
3114 on_disk_bm_sect = in_core->al_offset - in_core->bm_offset;
3115 } else {
3116 if (in_core->al_offset != MD_4kB_SECT)
3117 goto err;
3118 if (in_core->bm_offset < in_core->al_offset + in_core->al_size_4k * MD_4kB_SECT)
3119 goto err;
3120
3121 on_disk_al_sect = in_core->bm_offset - MD_4kB_SECT;
3122 on_disk_bm_sect = in_core->md_size_sect - in_core->bm_offset;
3123 }
3124
3125 /* old fixed size meta data is exactly that: fixed. */
3126 if (in_core->meta_dev_idx >= 0) {
3127 if (in_core->md_size_sect != MD_128MB_SECT
3128 || in_core->al_offset != MD_4kB_SECT
3129 || in_core->bm_offset != MD_4kB_SECT + MD_32kB_SECT
3130 || in_core->al_stripes != 1
3131 || in_core->al_stripe_size_4k != MD_32kB_SECT/8)
3132 goto err;
3133 }
3134
3135 if (capacity < in_core->md_size_sect)
3136 goto err;
3137 if (capacity - in_core->md_size_sect < drbd_md_first_sector(bdev))
3138 goto err;
3139
3140 /* should be aligned, and at least 32k */
3141 if ((on_disk_al_sect & 7) || (on_disk_al_sect < MD_32kB_SECT))
3142 goto err;
3143
3144 /* should fit (for now: exactly) into the available on-disk space;
3145 * overflow prevention is in check_activity_log_stripe_size() above. */
3146 if (on_disk_al_sect != in_core->al_size_4k * MD_4kB_SECT)
3147 goto err;
3148
3149 /* again, should be aligned */
3150 if (in_core->bm_offset & 7)
3151 goto err;
3152
3153 /* FIXME check for device grow with flex external meta data? */
3154
3155 /* can the available bitmap space cover the last agreed device size? */
3156 if (on_disk_bm_sect < (in_core->la_size_sect+7)/MD_4kB_SECT/8/512)
3157 goto err;
3158
3159 return 0;
3160
3161err:
3162 dev_err(DEV, "meta data offsets don't make sense: idx=%d "
3163 "al_s=%u, al_sz4k=%u, al_offset=%d, bm_offset=%d, "
3164 "md_size_sect=%u, la_size=%llu, md_capacity=%llu\n",
3165 in_core->meta_dev_idx,
3166 in_core->al_stripes, in_core->al_stripe_size_4k,
3167 in_core->al_offset, in_core->bm_offset, in_core->md_size_sect,
3168 (unsigned long long)in_core->la_size_sect,
3169 (unsigned long long)capacity);
3170
3171 return -EINVAL;
3172}
3173
3174
Philipp Reisnerb411b362009-09-25 16:07:19 -07003175/**
3176 * drbd_md_read() - Reads in the meta data super block
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003177 * @device: DRBD device.
Philipp Reisnerb411b362009-09-25 16:07:19 -07003178 * @bdev: Device from which the meta data should be read in.
3179 *
Lars Ellenberg3a4d4eb2013-03-19 18:16:44 +01003180 * Return NO_ERROR on success, and an enum drbd_ret_code in case
Lars Ellenbergd5d7ebd2011-07-05 20:59:26 +02003181 * something goes wrong.
Lars Ellenberg3a4d4eb2013-03-19 18:16:44 +01003182 *
Lars Ellenbergc04ccaa2013-03-19 18:16:47 +01003183 * Called exactly once during drbd_adm_attach(), while still being D_DISKLESS,
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003184 * even before @bdev is assigned to @device->ldev.
Philipp Reisnerb411b362009-09-25 16:07:19 -07003185 */
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003186int drbd_md_read(struct drbd_device *device, struct drbd_backing_dev *bdev)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003187{
3188 struct meta_data_on_disk *buffer;
Lars Ellenbergd5d7ebd2011-07-05 20:59:26 +02003189 u32 magic, flags;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003190 int i, rv = NO_ERROR;
3191
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003192 if (device->state.disk != D_DISKLESS)
Lars Ellenbergc04ccaa2013-03-19 18:16:47 +01003193 return ERR_DISK_CONFIGURED;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003194
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003195 buffer = drbd_md_get_buffer(device);
Philipp Reisnere1711732011-06-27 11:51:46 +02003196 if (!buffer)
Lars Ellenbergc04ccaa2013-03-19 18:16:47 +01003197 return ERR_NOMEM;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003198
Lars Ellenbergc04ccaa2013-03-19 18:16:47 +01003199 /* First, figure out where our meta data superblock is located,
3200 * and read it. */
Lars Ellenberg3a4d4eb2013-03-19 18:16:44 +01003201 bdev->md.meta_dev_idx = bdev->disk_conf->meta_dev_idx;
3202 bdev->md.md_offset = drbd_md_ss(bdev);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003203
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003204 if (drbd_md_sync_page_io(device, bdev, bdev->md.md_offset, READ)) {
Lucas De Marchi25985ed2011-03-30 22:57:33 -03003205 /* NOTE: can't do normal error processing here as this is
Philipp Reisnerb411b362009-09-25 16:07:19 -07003206 called BEFORE disk is attached */
3207 dev_err(DEV, "Error while reading metadata.\n");
3208 rv = ERR_IO_MD_DISK;
3209 goto err;
3210 }
3211
Lars Ellenbergd5d7ebd2011-07-05 20:59:26 +02003212 magic = be32_to_cpu(buffer->magic);
3213 flags = be32_to_cpu(buffer->flags);
3214 if (magic == DRBD_MD_MAGIC_84_UNCLEAN ||
3215 (magic == DRBD_MD_MAGIC_08 && !(flags & MDF_AL_CLEAN))) {
3216 /* btw: that's Activity Log clean, not "all" clean. */
3217 dev_err(DEV, "Found unclean meta data. Did you \"drbdadm apply-al\"?\n");
3218 rv = ERR_MD_UNCLEAN;
3219 goto err;
3220 }
Lars Ellenberg3a4d4eb2013-03-19 18:16:44 +01003221
3222 rv = ERR_MD_INVALID;
Lars Ellenbergd5d7ebd2011-07-05 20:59:26 +02003223 if (magic != DRBD_MD_MAGIC_08) {
Philipp Reisner43de7c82011-11-10 13:16:13 +01003224 if (magic == DRBD_MD_MAGIC_07)
Lars Ellenbergd5d7ebd2011-07-05 20:59:26 +02003225 dev_err(DEV, "Found old (0.7) meta data magic. Did you \"drbdadm create-md\"?\n");
3226 else
3227 dev_err(DEV, "Meta data magic not found. Did you \"drbdadm create-md\"?\n");
Philipp Reisnerb411b362009-09-25 16:07:19 -07003228 goto err;
3229 }
3230
3231 if (be32_to_cpu(buffer->bm_bytes_per_bit) != BM_BLOCK_SIZE) {
3232 dev_err(DEV, "unexpected bm_bytes_per_bit: %u (expected %u)\n",
3233 be32_to_cpu(buffer->bm_bytes_per_bit), BM_BLOCK_SIZE);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003234 goto err;
3235 }
3236
Lars Ellenbergc04ccaa2013-03-19 18:16:47 +01003237
3238 /* convert to in_core endian */
3239 bdev->md.la_size_sect = be64_to_cpu(buffer->la_size_sect);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003240 for (i = UI_CURRENT; i < UI_SIZE; i++)
3241 bdev->md.uuid[i] = be64_to_cpu(buffer->uuid[i]);
3242 bdev->md.flags = be32_to_cpu(buffer->flags);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003243 bdev->md.device_uuid = be64_to_cpu(buffer->device_uuid);
3244
Lars Ellenbergc04ccaa2013-03-19 18:16:47 +01003245 bdev->md.md_size_sect = be32_to_cpu(buffer->md_size_sect);
3246 bdev->md.al_offset = be32_to_cpu(buffer->al_offset);
3247 bdev->md.bm_offset = be32_to_cpu(buffer->bm_offset);
3248
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003249 if (check_activity_log_stripe_size(device, buffer, &bdev->md))
Lars Ellenbergc04ccaa2013-03-19 18:16:47 +01003250 goto err;
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003251 if (check_offsets_and_sizes(device, bdev))
Lars Ellenbergc04ccaa2013-03-19 18:16:47 +01003252 goto err;
3253
Philipp Reisnerb411b362009-09-25 16:07:19 -07003254 if (be32_to_cpu(buffer->bm_offset) != bdev->md.bm_offset) {
3255 dev_err(DEV, "unexpected bm_offset: %d (expected %d)\n",
3256 be32_to_cpu(buffer->bm_offset), bdev->md.bm_offset);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003257 goto err;
3258 }
3259 if (be32_to_cpu(buffer->md_size_sect) != bdev->md.md_size_sect) {
3260 dev_err(DEV, "unexpected md_size: %u (expected %u)\n",
3261 be32_to_cpu(buffer->md_size_sect), bdev->md.md_size_sect);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003262 goto err;
3263 }
3264
Lars Ellenberg3a4d4eb2013-03-19 18:16:44 +01003265 rv = NO_ERROR;
3266
Andreas Gruenbachera6b32bc2011-05-31 14:33:49 +02003267 spin_lock_irq(&first_peer_device(device)->connection->req_lock);
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003268 if (device->state.conn < C_CONNECTED) {
Lars Ellenbergdb141b22012-06-25 19:15:58 +02003269 unsigned int peer;
Philipp Reisner99432fc2011-05-20 16:39:13 +02003270 peer = be32_to_cpu(buffer->la_peer_max_bio_size);
Lars Ellenbergdb141b22012-06-25 19:15:58 +02003271 peer = max(peer, DRBD_MAX_BIO_SIZE_SAFE);
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003272 device->peer_max_bio_size = peer;
Philipp Reisner99432fc2011-05-20 16:39:13 +02003273 }
Andreas Gruenbachera6b32bc2011-05-31 14:33:49 +02003274 spin_unlock_irq(&first_peer_device(device)->connection->req_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003275
3276 err:
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003277 drbd_md_put_buffer(device);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003278
3279 return rv;
3280}
3281
3282/**
3283 * drbd_md_mark_dirty() - Mark meta data super block as dirty
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003284 * @device: DRBD device.
Philipp Reisnerb411b362009-09-25 16:07:19 -07003285 *
3286 * Call this function if you change anything that should be written to
3287 * the meta-data super block. This function sets MD_DIRTY, and starts a
3288 * timer that ensures that within five seconds you have to call drbd_md_sync().
3289 */
Lars Ellenbergca0e6092010-10-14 15:01:21 +02003290#ifdef DEBUG
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003291void drbd_md_mark_dirty_(struct drbd_device *device, unsigned int line, const char *func)
Lars Ellenbergee15b032010-09-03 10:00:09 +02003292{
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003293 if (!test_and_set_bit(MD_DIRTY, &device->flags)) {
3294 mod_timer(&device->md_sync_timer, jiffies + HZ);
3295 device->last_md_mark_dirty.line = line;
3296 device->last_md_mark_dirty.func = func;
Lars Ellenbergee15b032010-09-03 10:00:09 +02003297 }
3298}
3299#else
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003300void drbd_md_mark_dirty(struct drbd_device *device)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003301{
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003302 if (!test_and_set_bit(MD_DIRTY, &device->flags))
3303 mod_timer(&device->md_sync_timer, jiffies + 5*HZ);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003304}
Lars Ellenbergee15b032010-09-03 10:00:09 +02003305#endif
Philipp Reisnerb411b362009-09-25 16:07:19 -07003306
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003307void drbd_uuid_move_history(struct drbd_device *device) __must_hold(local)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003308{
3309 int i;
3310
Lars Ellenberg62b0da32011-01-20 13:25:21 +01003311 for (i = UI_HISTORY_START; i < UI_HISTORY_END; i++)
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003312 device->ldev->md.uuid[i+1] = device->ldev->md.uuid[i];
Philipp Reisnerb411b362009-09-25 16:07:19 -07003313}
3314
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003315void __drbd_uuid_set(struct drbd_device *device, int idx, u64 val) __must_hold(local)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003316{
3317 if (idx == UI_CURRENT) {
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003318 if (device->state.role == R_PRIMARY)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003319 val |= 1;
3320 else
3321 val &= ~((u64)1);
3322
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003323 drbd_set_ed_uuid(device, val);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003324 }
3325
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003326 device->ldev->md.uuid[idx] = val;
3327 drbd_md_mark_dirty(device);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003328}
3329
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003330void _drbd_uuid_set(struct drbd_device *device, int idx, u64 val) __must_hold(local)
Philipp Reisner9f2247b2012-08-16 14:25:58 +02003331{
3332 unsigned long flags;
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003333 spin_lock_irqsave(&device->ldev->md.uuid_lock, flags);
3334 __drbd_uuid_set(device, idx, val);
3335 spin_unlock_irqrestore(&device->ldev->md.uuid_lock, flags);
Philipp Reisner9f2247b2012-08-16 14:25:58 +02003336}
Philipp Reisnerb411b362009-09-25 16:07:19 -07003337
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003338void drbd_uuid_set(struct drbd_device *device, int idx, u64 val) __must_hold(local)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003339{
Philipp Reisner9f2247b2012-08-16 14:25:58 +02003340 unsigned long flags;
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003341 spin_lock_irqsave(&device->ldev->md.uuid_lock, flags);
3342 if (device->ldev->md.uuid[idx]) {
3343 drbd_uuid_move_history(device);
3344 device->ldev->md.uuid[UI_HISTORY_START] = device->ldev->md.uuid[idx];
Philipp Reisnerb411b362009-09-25 16:07:19 -07003345 }
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003346 __drbd_uuid_set(device, idx, val);
3347 spin_unlock_irqrestore(&device->ldev->md.uuid_lock, flags);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003348}
3349
3350/**
3351 * drbd_uuid_new_current() - Creates a new current UUID
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003352 * @device: DRBD device.
Philipp Reisnerb411b362009-09-25 16:07:19 -07003353 *
3354 * Creates a new current UUID, and rotates the old current UUID into
3355 * the bitmap slot. Causes an incremental resync upon next connect.
3356 */
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003357void drbd_uuid_new_current(struct drbd_device *device) __must_hold(local)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003358{
3359 u64 val;
Philipp Reisner9f2247b2012-08-16 14:25:58 +02003360 unsigned long long bm_uuid;
3361
3362 get_random_bytes(&val, sizeof(u64));
3363
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003364 spin_lock_irq(&device->ldev->md.uuid_lock);
3365 bm_uuid = device->ldev->md.uuid[UI_BITMAP];
Philipp Reisnerb411b362009-09-25 16:07:19 -07003366
Lars Ellenberg62b0da32011-01-20 13:25:21 +01003367 if (bm_uuid)
3368 dev_warn(DEV, "bm UUID was already set: %llX\n", bm_uuid);
3369
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003370 device->ldev->md.uuid[UI_BITMAP] = device->ldev->md.uuid[UI_CURRENT];
3371 __drbd_uuid_set(device, UI_CURRENT, val);
3372 spin_unlock_irq(&device->ldev->md.uuid_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003373
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003374 drbd_print_uuids(device, "new current UUID");
Lars Ellenbergaaa8e2b2010-10-15 13:16:53 +02003375 /* get it to stable storage _now_ */
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003376 drbd_md_sync(device);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003377}
3378
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003379void drbd_uuid_set_bm(struct drbd_device *device, u64 val) __must_hold(local)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003380{
Philipp Reisner9f2247b2012-08-16 14:25:58 +02003381 unsigned long flags;
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003382 if (device->ldev->md.uuid[UI_BITMAP] == 0 && val == 0)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003383 return;
3384
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003385 spin_lock_irqsave(&device->ldev->md.uuid_lock, flags);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003386 if (val == 0) {
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003387 drbd_uuid_move_history(device);
3388 device->ldev->md.uuid[UI_HISTORY_START] = device->ldev->md.uuid[UI_BITMAP];
3389 device->ldev->md.uuid[UI_BITMAP] = 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003390 } else {
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003391 unsigned long long bm_uuid = device->ldev->md.uuid[UI_BITMAP];
Lars Ellenberg62b0da32011-01-20 13:25:21 +01003392 if (bm_uuid)
3393 dev_warn(DEV, "bm UUID was already set: %llX\n", bm_uuid);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003394
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003395 device->ldev->md.uuid[UI_BITMAP] = val & ~((u64)1);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003396 }
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003397 spin_unlock_irqrestore(&device->ldev->md.uuid_lock, flags);
Philipp Reisner9f2247b2012-08-16 14:25:58 +02003398
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003399 drbd_md_mark_dirty(device);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003400}
3401
3402/**
3403 * drbd_bmio_set_n_write() - io_fn for drbd_queue_bitmap_io() or drbd_bitmap_io()
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003404 * @device: DRBD device.
Philipp Reisnerb411b362009-09-25 16:07:19 -07003405 *
3406 * Sets all bits in the bitmap and writes the whole bitmap to stable storage.
3407 */
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003408int drbd_bmio_set_n_write(struct drbd_device *device)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003409{
3410 int rv = -EIO;
3411
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003412 if (get_ldev_if_state(device, D_ATTACHING)) {
3413 drbd_md_set_flag(device, MDF_FULL_SYNC);
3414 drbd_md_sync(device);
3415 drbd_bm_set_all(device);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003416
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003417 rv = drbd_bm_write(device);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003418
3419 if (!rv) {
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003420 drbd_md_clear_flag(device, MDF_FULL_SYNC);
3421 drbd_md_sync(device);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003422 }
3423
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003424 put_ldev(device);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003425 }
3426
3427 return rv;
3428}
3429
3430/**
3431 * drbd_bmio_clear_n_write() - io_fn for drbd_queue_bitmap_io() or drbd_bitmap_io()
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003432 * @device: DRBD device.
Philipp Reisnerb411b362009-09-25 16:07:19 -07003433 *
3434 * Clears all bits in the bitmap and writes the whole bitmap to stable storage.
3435 */
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003436int drbd_bmio_clear_n_write(struct drbd_device *device)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003437{
3438 int rv = -EIO;
3439
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003440 drbd_resume_al(device);
3441 if (get_ldev_if_state(device, D_ATTACHING)) {
3442 drbd_bm_clear_all(device);
3443 rv = drbd_bm_write(device);
3444 put_ldev(device);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003445 }
3446
3447 return rv;
3448}
3449
Andreas Gruenbacher99920dc2011-03-16 15:31:39 +01003450static int w_bitmap_io(struct drbd_work *w, int unused)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003451{
3452 struct bm_io_work *work = container_of(w, struct bm_io_work, w);
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003453 struct drbd_device *device = w->device;
Lars Ellenberg02851e92010-12-16 14:47:39 +01003454 int rv = -EIO;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003455
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003456 D_ASSERT(atomic_read(&device->ap_bio_cnt) == 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003457
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003458 if (get_ldev(device)) {
3459 drbd_bm_lock(device, work->why, work->flags);
3460 rv = work->io_fn(device);
3461 drbd_bm_unlock(device);
3462 put_ldev(device);
Lars Ellenberg02851e92010-12-16 14:47:39 +01003463 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07003464
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003465 clear_bit_unlock(BITMAP_IO, &device->flags);
3466 wake_up(&device->misc_wait);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003467
3468 if (work->done)
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003469 work->done(device, rv);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003470
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003471 clear_bit(BITMAP_IO_QUEUED, &device->flags);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003472 work->why = NULL;
Lars Ellenberg20ceb2b2011-01-21 10:56:44 +01003473 work->flags = 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003474
Andreas Gruenbacher99920dc2011-03-16 15:31:39 +01003475 return 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003476}
3477
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003478void drbd_ldev_destroy(struct drbd_device *device)
Lars Ellenberg82f59cc2010-10-16 12:13:47 +02003479{
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003480 lc_destroy(device->resync);
3481 device->resync = NULL;
3482 lc_destroy(device->act_log);
3483 device->act_log = NULL;
Lars Ellenberg82f59cc2010-10-16 12:13:47 +02003484 __no_warn(local,
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003485 drbd_free_bc(device->ldev);
3486 device->ldev = NULL;);
Lars Ellenberg82f59cc2010-10-16 12:13:47 +02003487
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003488 clear_bit(GO_DISKLESS, &device->flags);
Lars Ellenberg82f59cc2010-10-16 12:13:47 +02003489}
3490
Andreas Gruenbacher99920dc2011-03-16 15:31:39 +01003491static int w_go_diskless(struct drbd_work *w, int unused)
Lars Ellenberge9e6f3e2010-09-14 20:26:27 +02003492{
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003493 struct drbd_device *device = w->device;
Philipp Reisner00d56942011-02-09 18:09:48 +01003494
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003495 D_ASSERT(device->state.disk == D_FAILED);
Lars Ellenberg9d282872010-10-14 13:57:07 +02003496 /* we cannot assert local_cnt == 0 here, as get_ldev_if_state will
3497 * inc/dec it frequently. Once we are D_DISKLESS, no one will touch
Lars Ellenberg82f59cc2010-10-16 12:13:47 +02003498 * the protected members anymore, though, so once put_ldev reaches zero
3499 * again, it will be safe to free them. */
Lars Ellenberga2a3c74f2012-09-22 12:26:57 +02003500
3501 /* Try to write changed bitmap pages, read errors may have just
3502 * set some bits outside the area covered by the activity log.
3503 *
3504 * If we have an IO error during the bitmap writeout,
3505 * we will want a full sync next time, just in case.
3506 * (Do we want a specific meta data flag for this?)
3507 *
3508 * If that does not make it to stable storage either,
Philipp Reisnerfd0017c2012-10-19 14:19:23 +02003509 * we cannot do anything about that anymore.
3510 *
3511 * We still need to check if both bitmap and ldev are present, we may
3512 * end up here after a failed attach, before ldev was even assigned.
3513 */
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003514 if (device->bitmap && device->ldev) {
Philipp Reisnerbb451852013-03-27 14:08:39 +01003515 /* An interrupted resync or similar is allowed to recounts bits
3516 * while we detach.
3517 * Any modifications would not be expected anymore, though.
3518 */
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003519 if (drbd_bitmap_io_from_worker(device, drbd_bm_write,
Philipp Reisnerbb451852013-03-27 14:08:39 +01003520 "detach", BM_LOCKED_TEST_ALLOWED)) {
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003521 if (test_bit(WAS_READ_ERROR, &device->flags)) {
3522 drbd_md_set_flag(device, MDF_FULL_SYNC);
3523 drbd_md_sync(device);
Lars Ellenberga2a3c74f2012-09-22 12:26:57 +02003524 }
3525 }
3526 }
3527
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003528 drbd_force_state(device, NS(disk, D_DISKLESS));
Andreas Gruenbacher99920dc2011-03-16 15:31:39 +01003529 return 0;
Lars Ellenberge9e6f3e2010-09-14 20:26:27 +02003530}
3531
Philipp Reisnerb411b362009-09-25 16:07:19 -07003532/**
3533 * drbd_queue_bitmap_io() - Queues an IO operation on the whole bitmap
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003534 * @device: DRBD device.
Philipp Reisnerb411b362009-09-25 16:07:19 -07003535 * @io_fn: IO callback to be called when bitmap IO is possible
3536 * @done: callback to be called after the bitmap IO was performed
3537 * @why: Descriptive text of the reason for doing the IO
3538 *
3539 * While IO on the bitmap happens we freeze application IO thus we ensure
3540 * that drbd_set_out_of_sync() can not be called. This function MAY ONLY be
3541 * called from worker context. It MUST NOT be used while a previous such
3542 * work is still pending!
3543 */
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003544void drbd_queue_bitmap_io(struct drbd_device *device,
Andreas Gruenbacher54761692011-05-30 16:15:21 +02003545 int (*io_fn)(struct drbd_device *),
3546 void (*done)(struct drbd_device *, int),
Lars Ellenberg20ceb2b2011-01-21 10:56:44 +01003547 char *why, enum bm_flag flags)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003548{
Andreas Gruenbachera6b32bc2011-05-31 14:33:49 +02003549 D_ASSERT(current == first_peer_device(device)->connection->worker.task);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003550
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003551 D_ASSERT(!test_bit(BITMAP_IO_QUEUED, &device->flags));
3552 D_ASSERT(!test_bit(BITMAP_IO, &device->flags));
3553 D_ASSERT(list_empty(&device->bm_io_work.w.list));
3554 if (device->bm_io_work.why)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003555 dev_err(DEV, "FIXME going to queue '%s' but '%s' still pending?\n",
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003556 why, device->bm_io_work.why);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003557
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003558 device->bm_io_work.io_fn = io_fn;
3559 device->bm_io_work.done = done;
3560 device->bm_io_work.why = why;
3561 device->bm_io_work.flags = flags;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003562
Andreas Gruenbachera6b32bc2011-05-31 14:33:49 +02003563 spin_lock_irq(&first_peer_device(device)->connection->req_lock);
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003564 set_bit(BITMAP_IO, &device->flags);
3565 if (atomic_read(&device->ap_bio_cnt) == 0) {
3566 if (!test_and_set_bit(BITMAP_IO_QUEUED, &device->flags))
Andreas Gruenbachera6b32bc2011-05-31 14:33:49 +02003567 drbd_queue_work(&first_peer_device(device)->connection->sender_work, &device->bm_io_work.w);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003568 }
Andreas Gruenbachera6b32bc2011-05-31 14:33:49 +02003569 spin_unlock_irq(&first_peer_device(device)->connection->req_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003570}
3571
3572/**
3573 * drbd_bitmap_io() - Does an IO operation on the whole bitmap
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003574 * @device: DRBD device.
Philipp Reisnerb411b362009-09-25 16:07:19 -07003575 * @io_fn: IO callback to be called when bitmap IO is possible
3576 * @why: Descriptive text of the reason for doing the IO
3577 *
3578 * freezes application IO while that the actual IO operations runs. This
3579 * functions MAY NOT be called from worker context.
3580 */
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003581int drbd_bitmap_io(struct drbd_device *device, int (*io_fn)(struct drbd_device *),
Lars Ellenberg20ceb2b2011-01-21 10:56:44 +01003582 char *why, enum bm_flag flags)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003583{
3584 int rv;
3585
Andreas Gruenbachera6b32bc2011-05-31 14:33:49 +02003586 D_ASSERT(current != first_peer_device(device)->connection->worker.task);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003587
Lars Ellenberg20ceb2b2011-01-21 10:56:44 +01003588 if ((flags & BM_LOCKED_SET_ALLOWED) == 0)
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003589 drbd_suspend_io(device);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003590
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003591 drbd_bm_lock(device, why, flags);
3592 rv = io_fn(device);
3593 drbd_bm_unlock(device);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003594
Lars Ellenberg20ceb2b2011-01-21 10:56:44 +01003595 if ((flags & BM_LOCKED_SET_ALLOWED) == 0)
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003596 drbd_resume_io(device);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003597
3598 return rv;
3599}
3600
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003601void drbd_md_set_flag(struct drbd_device *device, int flag) __must_hold(local)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003602{
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003603 if ((device->ldev->md.flags & flag) != flag) {
3604 drbd_md_mark_dirty(device);
3605 device->ldev->md.flags |= flag;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003606 }
3607}
3608
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003609void drbd_md_clear_flag(struct drbd_device *device, int flag) __must_hold(local)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003610{
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003611 if ((device->ldev->md.flags & flag) != 0) {
3612 drbd_md_mark_dirty(device);
3613 device->ldev->md.flags &= ~flag;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003614 }
3615}
3616int drbd_md_test_flag(struct drbd_backing_dev *bdev, int flag)
3617{
3618 return (bdev->md.flags & flag) != 0;
3619}
3620
3621static void md_sync_timer_fn(unsigned long data)
3622{
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003623 struct drbd_device *device = (struct drbd_device *) data;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003624
Lars Ellenbergb792b652012-08-22 14:59:06 +02003625 /* must not double-queue! */
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003626 if (list_empty(&device->md_sync_work.list))
Andreas Gruenbachera6b32bc2011-05-31 14:33:49 +02003627 drbd_queue_work_front(&first_peer_device(device)->connection->sender_work, &device->md_sync_work);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003628}
3629
Andreas Gruenbacher99920dc2011-03-16 15:31:39 +01003630static int w_md_sync(struct drbd_work *w, int unused)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003631{
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003632 struct drbd_device *device = w->device;
Philipp Reisner00d56942011-02-09 18:09:48 +01003633
Philipp Reisnerb411b362009-09-25 16:07:19 -07003634 dev_warn(DEV, "md_sync_timer expired! Worker calls drbd_md_sync().\n");
Lars Ellenbergee15b032010-09-03 10:00:09 +02003635#ifdef DEBUG
3636 dev_warn(DEV, "last md_mark_dirty: %s:%u\n",
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003637 device->last_md_mark_dirty.func, device->last_md_mark_dirty.line);
Lars Ellenbergee15b032010-09-03 10:00:09 +02003638#endif
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003639 drbd_md_sync(device);
Andreas Gruenbacher99920dc2011-03-16 15:31:39 +01003640 return 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003641}
3642
Andreas Gruenbacherd8763022011-01-26 17:39:41 +01003643const char *cmdname(enum drbd_packet cmd)
Andreas Gruenbacherf2ad9062011-01-26 17:13:25 +01003644{
3645 /* THINK may need to become several global tables
3646 * when we want to support more than
3647 * one PRO_VERSION */
3648 static const char *cmdnames[] = {
3649 [P_DATA] = "Data",
3650 [P_DATA_REPLY] = "DataReply",
3651 [P_RS_DATA_REPLY] = "RSDataReply",
3652 [P_BARRIER] = "Barrier",
3653 [P_BITMAP] = "ReportBitMap",
3654 [P_BECOME_SYNC_TARGET] = "BecomeSyncTarget",
3655 [P_BECOME_SYNC_SOURCE] = "BecomeSyncSource",
3656 [P_UNPLUG_REMOTE] = "UnplugRemote",
3657 [P_DATA_REQUEST] = "DataRequest",
3658 [P_RS_DATA_REQUEST] = "RSDataRequest",
3659 [P_SYNC_PARAM] = "SyncParam",
3660 [P_SYNC_PARAM89] = "SyncParam89",
3661 [P_PROTOCOL] = "ReportProtocol",
3662 [P_UUIDS] = "ReportUUIDs",
3663 [P_SIZES] = "ReportSizes",
3664 [P_STATE] = "ReportState",
3665 [P_SYNC_UUID] = "ReportSyncUUID",
3666 [P_AUTH_CHALLENGE] = "AuthChallenge",
3667 [P_AUTH_RESPONSE] = "AuthResponse",
3668 [P_PING] = "Ping",
3669 [P_PING_ACK] = "PingAck",
3670 [P_RECV_ACK] = "RecvAck",
3671 [P_WRITE_ACK] = "WriteAck",
3672 [P_RS_WRITE_ACK] = "RSWriteAck",
Lars Ellenbergd4dabbe2012-08-01 12:33:51 +02003673 [P_SUPERSEDED] = "Superseded",
Andreas Gruenbacherf2ad9062011-01-26 17:13:25 +01003674 [P_NEG_ACK] = "NegAck",
3675 [P_NEG_DREPLY] = "NegDReply",
3676 [P_NEG_RS_DREPLY] = "NegRSDReply",
3677 [P_BARRIER_ACK] = "BarrierAck",
3678 [P_STATE_CHG_REQ] = "StateChgRequest",
3679 [P_STATE_CHG_REPLY] = "StateChgReply",
3680 [P_OV_REQUEST] = "OVRequest",
3681 [P_OV_REPLY] = "OVReply",
3682 [P_OV_RESULT] = "OVResult",
3683 [P_CSUM_RS_REQUEST] = "CsumRSRequest",
3684 [P_RS_IS_IN_SYNC] = "CsumRSIsInSync",
3685 [P_COMPRESSED_BITMAP] = "CBitmap",
3686 [P_DELAY_PROBE] = "DelayProbe",
3687 [P_OUT_OF_SYNC] = "OutOfSync",
Andreas Gruenbacher7be8da02011-02-22 02:15:32 +01003688 [P_RETRY_WRITE] = "RetryWrite",
Lars Ellenbergae25b332011-04-24 00:01:16 +02003689 [P_RS_CANCEL] = "RSCancel",
3690 [P_CONN_ST_CHG_REQ] = "conn_st_chg_req",
3691 [P_CONN_ST_CHG_REPLY] = "conn_st_chg_reply",
Philipp Reisner036b17e2011-05-16 17:38:11 +02003692 [P_RETRY_WRITE] = "retry_write",
3693 [P_PROTOCOL_UPDATE] = "protocol_update",
Lars Ellenbergae25b332011-04-24 00:01:16 +02003694
3695 /* enum drbd_packet, but not commands - obsoleted flags:
3696 * P_MAY_IGNORE
3697 * P_MAX_OPT_CMD
3698 */
Andreas Gruenbacherf2ad9062011-01-26 17:13:25 +01003699 };
3700
Lars Ellenbergae25b332011-04-24 00:01:16 +02003701 /* too big for the array: 0xfffX */
Andreas Gruenbachere5d6f332011-03-28 16:44:40 +02003702 if (cmd == P_INITIAL_META)
3703 return "InitialMeta";
3704 if (cmd == P_INITIAL_DATA)
3705 return "InitialData";
Andreas Gruenbacher60381782011-03-28 17:05:50 +02003706 if (cmd == P_CONNECTION_FEATURES)
3707 return "ConnectionFeatures";
Andreas Gruenbacher6e849ce2011-03-14 17:27:45 +01003708 if (cmd >= ARRAY_SIZE(cmdnames))
Andreas Gruenbacherf2ad9062011-01-26 17:13:25 +01003709 return "Unknown";
3710 return cmdnames[cmd];
3711}
3712
Andreas Gruenbacher7be8da02011-02-22 02:15:32 +01003713/**
3714 * drbd_wait_misc - wait for a request to make progress
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003715 * @device: device associated with the request
Andreas Gruenbacher7be8da02011-02-22 02:15:32 +01003716 * @i: the struct drbd_interval embedded in struct drbd_request or
3717 * struct drbd_peer_request
3718 */
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003719int drbd_wait_misc(struct drbd_device *device, struct drbd_interval *i)
Andreas Gruenbacher7be8da02011-02-22 02:15:32 +01003720{
Philipp Reisner44ed1672011-04-19 17:10:19 +02003721 struct net_conf *nc;
Andreas Gruenbacher7be8da02011-02-22 02:15:32 +01003722 DEFINE_WAIT(wait);
3723 long timeout;
3724
Philipp Reisner44ed1672011-04-19 17:10:19 +02003725 rcu_read_lock();
Andreas Gruenbachera6b32bc2011-05-31 14:33:49 +02003726 nc = rcu_dereference(first_peer_device(device)->connection->net_conf);
Philipp Reisner44ed1672011-04-19 17:10:19 +02003727 if (!nc) {
3728 rcu_read_unlock();
Andreas Gruenbacher7be8da02011-02-22 02:15:32 +01003729 return -ETIMEDOUT;
Philipp Reisner44ed1672011-04-19 17:10:19 +02003730 }
3731 timeout = nc->ko_count ? nc->timeout * HZ / 10 * nc->ko_count : MAX_SCHEDULE_TIMEOUT;
3732 rcu_read_unlock();
Andreas Gruenbacher7be8da02011-02-22 02:15:32 +01003733
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003734 /* Indicate to wake up device->misc_wait on progress. */
Andreas Gruenbacher7be8da02011-02-22 02:15:32 +01003735 i->waiting = true;
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003736 prepare_to_wait(&device->misc_wait, &wait, TASK_INTERRUPTIBLE);
Andreas Gruenbachera6b32bc2011-05-31 14:33:49 +02003737 spin_unlock_irq(&first_peer_device(device)->connection->req_lock);
Andreas Gruenbacher7be8da02011-02-22 02:15:32 +01003738 timeout = schedule_timeout(timeout);
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003739 finish_wait(&device->misc_wait, &wait);
Andreas Gruenbachera6b32bc2011-05-31 14:33:49 +02003740 spin_lock_irq(&first_peer_device(device)->connection->req_lock);
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003741 if (!timeout || device->state.conn < C_CONNECTED)
Andreas Gruenbacher7be8da02011-02-22 02:15:32 +01003742 return -ETIMEDOUT;
3743 if (signal_pending(current))
3744 return -ERESTARTSYS;
3745 return 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003746}
3747
3748#ifdef CONFIG_DRBD_FAULT_INJECTION
3749/* Fault insertion support including random number generator shamelessly
3750 * stolen from kernel/rcutorture.c */
3751struct fault_random_state {
3752 unsigned long state;
3753 unsigned long count;
3754};
3755
3756#define FAULT_RANDOM_MULT 39916801 /* prime */
3757#define FAULT_RANDOM_ADD 479001701 /* prime */
3758#define FAULT_RANDOM_REFRESH 10000
3759
3760/*
3761 * Crude but fast random-number generator. Uses a linear congruential
3762 * generator, with occasional help from get_random_bytes().
3763 */
3764static unsigned long
3765_drbd_fault_random(struct fault_random_state *rsp)
3766{
3767 long refresh;
3768
Roel Kluin49829ea2009-12-15 22:55:44 +01003769 if (!rsp->count--) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07003770 get_random_bytes(&refresh, sizeof(refresh));
3771 rsp->state += refresh;
3772 rsp->count = FAULT_RANDOM_REFRESH;
3773 }
3774 rsp->state = rsp->state * FAULT_RANDOM_MULT + FAULT_RANDOM_ADD;
3775 return swahw32(rsp->state);
3776}
3777
3778static char *
3779_drbd_fault_str(unsigned int type) {
3780 static char *_faults[] = {
3781 [DRBD_FAULT_MD_WR] = "Meta-data write",
3782 [DRBD_FAULT_MD_RD] = "Meta-data read",
3783 [DRBD_FAULT_RS_WR] = "Resync write",
3784 [DRBD_FAULT_RS_RD] = "Resync read",
3785 [DRBD_FAULT_DT_WR] = "Data write",
3786 [DRBD_FAULT_DT_RD] = "Data read",
3787 [DRBD_FAULT_DT_RA] = "Data read ahead",
3788 [DRBD_FAULT_BM_ALLOC] = "BM allocation",
Philipp Reisner6b4388a2010-04-26 14:11:45 +02003789 [DRBD_FAULT_AL_EE] = "EE allocation",
3790 [DRBD_FAULT_RECEIVE] = "receive data corruption",
Philipp Reisnerb411b362009-09-25 16:07:19 -07003791 };
3792
3793 return (type < DRBD_FAULT_MAX) ? _faults[type] : "**Unknown**";
3794}
3795
3796unsigned int
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003797_drbd_insert_fault(struct drbd_device *device, unsigned int type)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003798{
3799 static struct fault_random_state rrs = {0, 0};
3800
3801 unsigned int ret = (
3802 (fault_devs == 0 ||
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003803 ((1 << device_to_minor(device)) & fault_devs) != 0) &&
Philipp Reisnerb411b362009-09-25 16:07:19 -07003804 (((_drbd_fault_random(&rrs) % 100) + 1) <= fault_rate));
3805
3806 if (ret) {
3807 fault_count++;
3808
Lars Ellenberg73835062010-05-27 11:51:56 +02003809 if (__ratelimit(&drbd_ratelimit_state))
Philipp Reisnerb411b362009-09-25 16:07:19 -07003810 dev_warn(DEV, "***Simulating %s failure\n",
3811 _drbd_fault_str(type));
3812 }
3813
3814 return ret;
3815}
3816#endif
3817
3818const char *drbd_buildtag(void)
3819{
3820 /* DRBD built from external sources has here a reference to the
3821 git hash of the source code. */
3822
3823 static char buildtag[38] = "\0uilt-in";
3824
3825 if (buildtag[0] == 0) {
Cong Wangbc4854b2012-04-03 14:13:36 +08003826#ifdef MODULE
3827 sprintf(buildtag, "srcversion: %-24s", THIS_MODULE->srcversion);
3828#else
3829 buildtag[0] = 'b';
Philipp Reisnerb411b362009-09-25 16:07:19 -07003830#endif
Philipp Reisnerb411b362009-09-25 16:07:19 -07003831 }
3832
3833 return buildtag;
3834}
3835
3836module_init(drbd_init)
3837module_exit(drbd_cleanup)
3838
Philipp Reisnerb411b362009-09-25 16:07:19 -07003839EXPORT_SYMBOL(drbd_conn_str);
3840EXPORT_SYMBOL(drbd_role_str);
3841EXPORT_SYMBOL(drbd_disk_str);
3842EXPORT_SYMBOL(drbd_set_st_err_str);