blob: 8c6c48e363cd075f4f7f2c6b4eb69a6bad466a9f [file] [log] [blame]
Philipp Reisnerb411b362009-09-25 16:07:19 -07001/*
2 drbd.c
3
4 This file is part of DRBD by Philipp Reisner and Lars Ellenberg.
5
6 Copyright (C) 2001-2008, LINBIT Information Technologies GmbH.
7 Copyright (C) 1999-2008, Philipp Reisner <philipp.reisner@linbit.com>.
8 Copyright (C) 2002-2008, Lars Ellenberg <lars.ellenberg@linbit.com>.
9
10 Thanks to Carter Burden, Bart Grantham and Gennadiy Nerubayev
11 from Logicworks, Inc. for making SDP replication support possible.
12
13 drbd is free software; you can redistribute it and/or modify
14 it under the terms of the GNU General Public License as published by
15 the Free Software Foundation; either version 2, or (at your option)
16 any later version.
17
18 drbd is distributed in the hope that it will be useful,
19 but WITHOUT ANY WARRANTY; without even the implied warranty of
20 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 GNU General Public License for more details.
22
23 You should have received a copy of the GNU General Public License
24 along with drbd; see the file COPYING. If not, write to
25 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
26
27 */
28
Philipp Reisnerb411b362009-09-25 16:07:19 -070029#include <linux/module.h>
Philipp Reisnerb411b362009-09-25 16:07:19 -070030#include <linux/drbd.h>
31#include <asm/uaccess.h>
32#include <asm/types.h>
33#include <net/sock.h>
34#include <linux/ctype.h>
Arnd Bergmann2a48fc02010-06-02 14:28:52 +020035#include <linux/mutex.h>
Philipp Reisnerb411b362009-09-25 16:07:19 -070036#include <linux/fs.h>
37#include <linux/file.h>
38#include <linux/proc_fs.h>
39#include <linux/init.h>
40#include <linux/mm.h>
41#include <linux/memcontrol.h>
42#include <linux/mm_inline.h>
43#include <linux/slab.h>
44#include <linux/random.h>
45#include <linux/reboot.h>
46#include <linux/notifier.h>
47#include <linux/kthread.h>
48
49#define __KERNEL_SYSCALLS__
50#include <linux/unistd.h>
51#include <linux/vmalloc.h>
52
53#include <linux/drbd_limits.h>
54#include "drbd_int.h"
Philipp Reisnerb411b362009-09-25 16:07:19 -070055#include "drbd_req.h" /* only for _req_mod in tl_release and tl_clear */
56
57#include "drbd_vli.h"
58
Arnd Bergmann2a48fc02010-06-02 14:28:52 +020059static DEFINE_MUTEX(drbd_main_mutex);
Philipp Reisnerb411b362009-09-25 16:07:19 -070060int drbdd_init(struct drbd_thread *);
61int drbd_worker(struct drbd_thread *);
62int drbd_asender(struct drbd_thread *);
63
64int drbd_init(void);
65static int drbd_open(struct block_device *bdev, fmode_t mode);
66static int drbd_release(struct gendisk *gd, fmode_t mode);
Andreas Gruenbacher99920dc2011-03-16 15:31:39 +010067static int w_md_sync(struct drbd_work *w, int unused);
Philipp Reisnerb411b362009-09-25 16:07:19 -070068static void md_sync_timer_fn(unsigned long data);
Andreas Gruenbacher99920dc2011-03-16 15:31:39 +010069static int w_bitmap_io(struct drbd_work *w, int unused);
70static int w_go_diskless(struct drbd_work *w, int unused);
Philipp Reisnerb411b362009-09-25 16:07:19 -070071
Philipp Reisnerb411b362009-09-25 16:07:19 -070072MODULE_AUTHOR("Philipp Reisner <phil@linbit.com>, "
73 "Lars Ellenberg <lars@linbit.com>");
74MODULE_DESCRIPTION("drbd - Distributed Replicated Block Device v" REL_VERSION);
75MODULE_VERSION(REL_VERSION);
76MODULE_LICENSE("GPL");
Philipp Reisner81a5d602011-02-22 19:53:16 -050077MODULE_PARM_DESC(minor_count, "Approximate number of drbd devices ("
Philipp Reisner2b8a90b2011-01-10 11:15:17 +010078 __stringify(DRBD_MINOR_COUNT_MIN) "-" __stringify(DRBD_MINOR_COUNT_MAX) ")");
Philipp Reisnerb411b362009-09-25 16:07:19 -070079MODULE_ALIAS_BLOCKDEV_MAJOR(DRBD_MAJOR);
80
81#include <linux/moduleparam.h>
82/* allow_open_on_secondary */
83MODULE_PARM_DESC(allow_oos, "DONT USE!");
84/* thanks to these macros, if compiled into the kernel (not-module),
85 * this becomes the boot parameter drbd.minor_count */
86module_param(minor_count, uint, 0444);
87module_param(disable_sendpage, bool, 0644);
88module_param(allow_oos, bool, 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -070089module_param(proc_details, int, 0644);
90
91#ifdef CONFIG_DRBD_FAULT_INJECTION
92int enable_faults;
93int fault_rate;
94static int fault_count;
95int fault_devs;
96/* bitmap of enabled faults */
97module_param(enable_faults, int, 0664);
98/* fault rate % value - applies to all enabled faults */
99module_param(fault_rate, int, 0664);
100/* count of faults inserted */
101module_param(fault_count, int, 0664);
102/* bitmap of devices to insert faults on */
103module_param(fault_devs, int, 0644);
104#endif
105
106/* module parameter, defined */
Philipp Reisner2b8a90b2011-01-10 11:15:17 +0100107unsigned int minor_count = DRBD_MINOR_COUNT_DEF;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700108int disable_sendpage;
109int allow_oos;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700110int proc_details; /* Detail level in proc drbd*/
111
112/* Module parameter for setting the user mode helper program
113 * to run. Default is /sbin/drbdadm */
114char usermode_helper[80] = "/sbin/drbdadm";
115
116module_param_string(usermode_helper, usermode_helper, sizeof(usermode_helper), 0644);
117
118/* in 2.6.x, our device mapping and config info contains our virtual gendisks
119 * as member "struct gendisk *vdisk;"
120 */
Philipp Reisner81a5d602011-02-22 19:53:16 -0500121struct idr minors;
Philipp Reisner21114382011-01-19 12:26:59 +0100122struct list_head drbd_tconns; /* list of struct drbd_tconn */
Philipp Reisnerb411b362009-09-25 16:07:19 -0700123
124struct kmem_cache *drbd_request_cache;
Andreas Gruenbacher6c852be2011-02-04 15:38:52 +0100125struct kmem_cache *drbd_ee_cache; /* peer requests */
Philipp Reisnerb411b362009-09-25 16:07:19 -0700126struct kmem_cache *drbd_bm_ext_cache; /* bitmap extents */
127struct kmem_cache *drbd_al_ext_cache; /* activity log extents */
128mempool_t *drbd_request_mempool;
129mempool_t *drbd_ee_mempool;
Lars Ellenberg35abf592011-02-23 12:39:46 +0100130mempool_t *drbd_md_io_page_pool;
Lars Ellenbergda4a75d2011-02-23 17:02:01 +0100131struct bio_set *drbd_md_io_bio_set;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700132
133/* I do not use a standard mempool, because:
134 1) I want to hand out the pre-allocated objects first.
135 2) I want to be able to interrupt sleeping allocation with a signal.
136 Note: This is a single linked list, the next pointer is the private
137 member of struct page.
138 */
139struct page *drbd_pp_pool;
140spinlock_t drbd_pp_lock;
141int drbd_pp_vacant;
142wait_queue_head_t drbd_pp_wait;
143
144DEFINE_RATELIMIT_STATE(drbd_ratelimit_state, 5 * HZ, 5);
145
Emese Revfy7d4e9d02009-12-14 00:59:30 +0100146static const struct block_device_operations drbd_ops = {
Philipp Reisnerb411b362009-09-25 16:07:19 -0700147 .owner = THIS_MODULE,
148 .open = drbd_open,
149 .release = drbd_release,
150};
151
Lars Ellenbergda4a75d2011-02-23 17:02:01 +0100152static void bio_destructor_drbd(struct bio *bio)
153{
154 bio_free(bio, drbd_md_io_bio_set);
155}
156
157struct bio *bio_alloc_drbd(gfp_t gfp_mask)
158{
159 struct bio *bio;
160
161 if (!drbd_md_io_bio_set)
162 return bio_alloc(gfp_mask, 1);
163
164 bio = bio_alloc_bioset(gfp_mask, 1, drbd_md_io_bio_set);
165 if (!bio)
166 return NULL;
167 bio->bi_destructor = bio_destructor_drbd;
168 return bio;
169}
170
Philipp Reisnerb411b362009-09-25 16:07:19 -0700171#ifdef __CHECKER__
172/* When checking with sparse, and this is an inline function, sparse will
173 give tons of false positives. When this is a real functions sparse works.
174 */
175int _get_ldev_if_state(struct drbd_conf *mdev, enum drbd_disk_state mins)
176{
177 int io_allowed;
178
179 atomic_inc(&mdev->local_cnt);
180 io_allowed = (mdev->state.disk >= mins);
181 if (!io_allowed) {
182 if (atomic_dec_and_test(&mdev->local_cnt))
183 wake_up(&mdev->misc_wait);
184 }
185 return io_allowed;
186}
187
188#endif
189
190/**
Lars Ellenbergb6dd1a82011-11-28 15:04:49 +0100191 * tl_release() - mark as BARRIER_ACKED all requests in the corresponding transfer log epoch
192 * @tconn: DRBD connection.
Philipp Reisnerb411b362009-09-25 16:07:19 -0700193 * @barrier_nr: Expected identifier of the DRBD write barrier packet.
194 * @set_size: Expected number of requests before that barrier.
195 *
196 * In case the passed barrier_nr or set_size does not match the oldest
Lars Ellenbergb6dd1a82011-11-28 15:04:49 +0100197 * epoch of not yet barrier-acked requests, this function will cause a
198 * termination of the connection.
Philipp Reisnerb411b362009-09-25 16:07:19 -0700199 */
Philipp Reisner2f5cdd02011-02-21 14:29:27 +0100200void tl_release(struct drbd_tconn *tconn, unsigned int barrier_nr,
201 unsigned int set_size)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700202{
Philipp Reisnerb411b362009-09-25 16:07:19 -0700203 struct drbd_request *r;
Lars Ellenbergb6dd1a82011-11-28 15:04:49 +0100204 struct drbd_request *req = NULL;
205 int expect_epoch = 0;
206 int expect_size = 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700207
Philipp Reisner2f5cdd02011-02-21 14:29:27 +0100208 spin_lock_irq(&tconn->req_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700209
Lars Ellenbergb6dd1a82011-11-28 15:04:49 +0100210 /* find latest not yet barrier-acked write request,
211 * count writes in its epoch. */
212 list_for_each_entry(r, &tconn->transfer_log, tl_requests) {
213 const unsigned long s = r->rq_state;
214 if (!req) {
215 if (!(s & RQ_WRITE))
216 continue;
217 if (!(s & RQ_NET_MASK))
218 continue;
219 if (s & RQ_NET_DONE)
220 continue;
221 req = r;
222 expect_epoch = req->epoch;
223 expect_size ++;
224 } else {
225 if (r->epoch != expect_epoch)
226 break;
227 if (!(s & RQ_WRITE))
228 continue;
229 /* if (s & RQ_DONE): not expected */
230 /* if (!(s & RQ_NET_MASK)): not expected */
231 expect_size++;
232 }
233 }
Philipp Reisnerb411b362009-09-25 16:07:19 -0700234
235 /* first some paranoia code */
Lars Ellenbergb6dd1a82011-11-28 15:04:49 +0100236 if (req == NULL) {
Philipp Reisner2f5cdd02011-02-21 14:29:27 +0100237 conn_err(tconn, "BAD! BarrierAck #%u received, but no epoch in tl!?\n",
238 barrier_nr);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700239 goto bail;
240 }
Lars Ellenbergb6dd1a82011-11-28 15:04:49 +0100241 if (expect_epoch != barrier_nr) {
Philipp Reisner2f5cdd02011-02-21 14:29:27 +0100242 conn_err(tconn, "BAD! BarrierAck #%u received, expected #%u!\n",
Lars Ellenbergb6dd1a82011-11-28 15:04:49 +0100243 barrier_nr, expect_epoch);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700244 goto bail;
245 }
Lars Ellenbergb6dd1a82011-11-28 15:04:49 +0100246
247 if (expect_size != set_size) {
Philipp Reisner2f5cdd02011-02-21 14:29:27 +0100248 conn_err(tconn, "BAD! BarrierAck #%u received with n_writes=%u, expected n_writes=%u!\n",
Lars Ellenbergb6dd1a82011-11-28 15:04:49 +0100249 barrier_nr, set_size, expect_size);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700250 goto bail;
251 }
252
253 /* Clean up list of requests processed during current epoch */
Lars Ellenbergb6dd1a82011-11-28 15:04:49 +0100254 list_for_each_entry_safe(req, r, &tconn->transfer_log, tl_requests) {
255 if (req->epoch != expect_epoch)
256 break;
257 _req_mod(req, BARRIER_ACKED);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700258 }
Philipp Reisner2f5cdd02011-02-21 14:29:27 +0100259 spin_unlock_irq(&tconn->req_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700260
261 return;
262
263bail:
Philipp Reisner2f5cdd02011-02-21 14:29:27 +0100264 spin_unlock_irq(&tconn->req_lock);
265 conn_request_state(tconn, NS(conn, C_PROTOCOL_ERROR), CS_HARD);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700266}
267
Philipp Reisner617049a2010-12-22 12:48:31 +0100268
Philipp Reisner11b58e72010-05-12 17:08:26 +0200269/**
270 * _tl_restart() - Walks the transfer log, and applies an action to all requests
271 * @mdev: DRBD device.
272 * @what: The action/event to perform with all request objects
273 *
Andreas Gruenbacher8554df12011-01-25 15:37:43 +0100274 * @what might be one of CONNECTION_LOST_WHILE_PENDING, RESEND, FAIL_FROZEN_DISK_IO,
275 * RESTART_FROZEN_DISK_IO.
Philipp Reisner11b58e72010-05-12 17:08:26 +0200276 */
Lars Ellenbergb6dd1a82011-11-28 15:04:49 +0100277/* must hold resource->req_lock */
Philipp Reisner2f5cdd02011-02-21 14:29:27 +0100278void _tl_restart(struct drbd_tconn *tconn, enum drbd_req_event what)
Philipp Reisner11b58e72010-05-12 17:08:26 +0200279{
Lars Ellenbergb6dd1a82011-11-28 15:04:49 +0100280 struct drbd_request *req, *r;
Philipp Reisner11b58e72010-05-12 17:08:26 +0200281
Lars Ellenbergb6dd1a82011-11-28 15:04:49 +0100282 list_for_each_entry_safe(req, r, &tconn->transfer_log, tl_requests)
283 _req_mod(req, what);
284}
Philipp Reisner11b58e72010-05-12 17:08:26 +0200285
Lars Ellenbergb6dd1a82011-11-28 15:04:49 +0100286void tl_restart(struct drbd_tconn *tconn, enum drbd_req_event what)
287{
288 spin_lock_irq(&tconn->req_lock);
289 _tl_restart(tconn, what);
290 spin_unlock_irq(&tconn->req_lock);
Philipp Reisnercdfda632011-07-05 15:38:59 +0200291}
Philipp Reisnerb411b362009-09-25 16:07:19 -0700292
293/**
294 * tl_clear() - Clears all requests and &struct drbd_tl_epoch objects out of the TL
295 * @mdev: DRBD device.
296 *
297 * This is called after the connection to the peer was lost. The storage covered
298 * by the requests on the transfer gets marked as our of sync. Called from the
299 * receiver thread and the worker thread.
300 */
Philipp Reisner2f5cdd02011-02-21 14:29:27 +0100301void tl_clear(struct drbd_tconn *tconn)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700302{
Lars Ellenbergb6dd1a82011-11-28 15:04:49 +0100303 tl_restart(tconn, CONNECTION_LOST_WHILE_PENDING);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700304}
305
Philipp Reisnercdfda632011-07-05 15:38:59 +0200306/**
Andreas Gruenbacher71fc7ee2011-07-17 23:06:12 +0200307 * tl_abort_disk_io() - Abort disk I/O for all requests for a certain mdev in the TL
Philipp Reisnercdfda632011-07-05 15:38:59 +0200308 * @mdev: DRBD device.
Philipp Reisnercdfda632011-07-05 15:38:59 +0200309 */
Andreas Gruenbacher71fc7ee2011-07-17 23:06:12 +0200310void tl_abort_disk_io(struct drbd_conf *mdev)
Philipp Reisnercdfda632011-07-05 15:38:59 +0200311{
312 struct drbd_tconn *tconn = mdev->tconn;
Lars Ellenbergb6dd1a82011-11-28 15:04:49 +0100313 struct drbd_request *req, *r;
Philipp Reisnercdfda632011-07-05 15:38:59 +0200314
Philipp Reisnercdfda632011-07-05 15:38:59 +0200315 spin_lock_irq(&tconn->req_lock);
Lars Ellenbergb6dd1a82011-11-28 15:04:49 +0100316 list_for_each_entry_safe(req, r, &tconn->transfer_log, tl_requests) {
Lars Ellenberg97ddb682011-07-15 23:52:44 +0200317 if (!(req->rq_state & RQ_LOCAL_PENDING))
318 continue;
Lars Ellenbergb6dd1a82011-11-28 15:04:49 +0100319 if (req->w.mdev != mdev)
320 continue;
321 _req_mod(req, ABORT_DISK_IO);
Philipp Reisnercdfda632011-07-05 15:38:59 +0200322 }
Philipp Reisnercdfda632011-07-05 15:38:59 +0200323 spin_unlock_irq(&tconn->req_lock);
324}
325
Philipp Reisnerb411b362009-09-25 16:07:19 -0700326static int drbd_thread_setup(void *arg)
327{
328 struct drbd_thread *thi = (struct drbd_thread *) arg;
Philipp Reisner392c8802011-02-09 10:33:31 +0100329 struct drbd_tconn *tconn = thi->tconn;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700330 unsigned long flags;
331 int retval;
332
Philipp Reisnerf1b3a6e2011-02-08 15:35:58 +0100333 snprintf(current->comm, sizeof(current->comm), "drbd_%c_%s",
Philipp Reisner392c8802011-02-09 10:33:31 +0100334 thi->name[0], thi->tconn->name);
Philipp Reisnerf1b3a6e2011-02-08 15:35:58 +0100335
Philipp Reisnerb411b362009-09-25 16:07:19 -0700336restart:
337 retval = thi->function(thi);
338
339 spin_lock_irqsave(&thi->t_lock, flags);
340
Andreas Gruenbachere77a0a52011-01-25 15:43:39 +0100341 /* if the receiver has been "EXITING", the last thing it did
Philipp Reisnerb411b362009-09-25 16:07:19 -0700342 * was set the conn state to "StandAlone",
343 * if now a re-connect request comes in, conn state goes C_UNCONNECTED,
344 * and receiver thread will be "started".
Andreas Gruenbachere77a0a52011-01-25 15:43:39 +0100345 * drbd_thread_start needs to set "RESTARTING" in that case.
Philipp Reisnerb411b362009-09-25 16:07:19 -0700346 * t_state check and assignment needs to be within the same spinlock,
Andreas Gruenbachere77a0a52011-01-25 15:43:39 +0100347 * so either thread_start sees EXITING, and can remap to RESTARTING,
348 * or thread_start see NONE, and can proceed as normal.
Philipp Reisnerb411b362009-09-25 16:07:19 -0700349 */
350
Andreas Gruenbachere77a0a52011-01-25 15:43:39 +0100351 if (thi->t_state == RESTARTING) {
Philipp Reisner392c8802011-02-09 10:33:31 +0100352 conn_info(tconn, "Restarting %s thread\n", thi->name);
Andreas Gruenbachere77a0a52011-01-25 15:43:39 +0100353 thi->t_state = RUNNING;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700354 spin_unlock_irqrestore(&thi->t_lock, flags);
355 goto restart;
356 }
357
358 thi->task = NULL;
Andreas Gruenbachere77a0a52011-01-25 15:43:39 +0100359 thi->t_state = NONE;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700360 smp_mb();
Lars Ellenberg992d6e92011-05-02 11:47:18 +0200361 complete_all(&thi->stop);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700362 spin_unlock_irqrestore(&thi->t_lock, flags);
363
Philipp Reisner392c8802011-02-09 10:33:31 +0100364 conn_info(tconn, "Terminating %s\n", current->comm);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700365
366 /* Release mod reference taken when thread was started */
Philipp Reisner9dc9fbb2011-04-22 15:23:32 +0200367
368 kref_put(&tconn->kref, &conn_destroy);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700369 module_put(THIS_MODULE);
370 return retval;
371}
372
Philipp Reisner392c8802011-02-09 10:33:31 +0100373static void drbd_thread_init(struct drbd_tconn *tconn, struct drbd_thread *thi,
Philipp Reisnerbed879a2011-02-04 14:00:37 +0100374 int (*func) (struct drbd_thread *), char *name)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700375{
376 spin_lock_init(&thi->t_lock);
377 thi->task = NULL;
Andreas Gruenbachere77a0a52011-01-25 15:43:39 +0100378 thi->t_state = NONE;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700379 thi->function = func;
Philipp Reisner392c8802011-02-09 10:33:31 +0100380 thi->tconn = tconn;
Philipp Reisnerbed879a2011-02-04 14:00:37 +0100381 strncpy(thi->name, name, ARRAY_SIZE(thi->name));
Philipp Reisnerb411b362009-09-25 16:07:19 -0700382}
383
384int drbd_thread_start(struct drbd_thread *thi)
385{
Philipp Reisner392c8802011-02-09 10:33:31 +0100386 struct drbd_tconn *tconn = thi->tconn;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700387 struct task_struct *nt;
388 unsigned long flags;
389
Philipp Reisnerb411b362009-09-25 16:07:19 -0700390 /* is used from state engine doing drbd_thread_stop_nowait,
391 * while holding the req lock irqsave */
392 spin_lock_irqsave(&thi->t_lock, flags);
393
394 switch (thi->t_state) {
Andreas Gruenbachere77a0a52011-01-25 15:43:39 +0100395 case NONE:
Philipp Reisner392c8802011-02-09 10:33:31 +0100396 conn_info(tconn, "Starting %s thread (from %s [%d])\n",
Philipp Reisnerbed879a2011-02-04 14:00:37 +0100397 thi->name, current->comm, current->pid);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700398
399 /* Get ref on module for thread - this is released when thread exits */
400 if (!try_module_get(THIS_MODULE)) {
Philipp Reisner392c8802011-02-09 10:33:31 +0100401 conn_err(tconn, "Failed to get module reference in drbd_thread_start\n");
Philipp Reisnerb411b362009-09-25 16:07:19 -0700402 spin_unlock_irqrestore(&thi->t_lock, flags);
Andreas Gruenbacher81e84652010-12-09 15:03:57 +0100403 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700404 }
405
Philipp Reisner9dc9fbb2011-04-22 15:23:32 +0200406 kref_get(&thi->tconn->kref);
407
Philipp Reisnerb411b362009-09-25 16:07:19 -0700408 init_completion(&thi->stop);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700409 thi->reset_cpu_mask = 1;
Andreas Gruenbachere77a0a52011-01-25 15:43:39 +0100410 thi->t_state = RUNNING;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700411 spin_unlock_irqrestore(&thi->t_lock, flags);
412 flush_signals(current); /* otherw. may get -ERESTARTNOINTR */
413
414 nt = kthread_create(drbd_thread_setup, (void *) thi,
Philipp Reisner392c8802011-02-09 10:33:31 +0100415 "drbd_%c_%s", thi->name[0], thi->tconn->name);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700416
417 if (IS_ERR(nt)) {
Philipp Reisner392c8802011-02-09 10:33:31 +0100418 conn_err(tconn, "Couldn't start thread\n");
Philipp Reisnerb411b362009-09-25 16:07:19 -0700419
Philipp Reisner9dc9fbb2011-04-22 15:23:32 +0200420 kref_put(&tconn->kref, &conn_destroy);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700421 module_put(THIS_MODULE);
Andreas Gruenbacher81e84652010-12-09 15:03:57 +0100422 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700423 }
424 spin_lock_irqsave(&thi->t_lock, flags);
425 thi->task = nt;
Andreas Gruenbachere77a0a52011-01-25 15:43:39 +0100426 thi->t_state = RUNNING;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700427 spin_unlock_irqrestore(&thi->t_lock, flags);
428 wake_up_process(nt);
429 break;
Andreas Gruenbachere77a0a52011-01-25 15:43:39 +0100430 case EXITING:
431 thi->t_state = RESTARTING;
Philipp Reisner392c8802011-02-09 10:33:31 +0100432 conn_info(tconn, "Restarting %s thread (from %s [%d])\n",
Philipp Reisnerbed879a2011-02-04 14:00:37 +0100433 thi->name, current->comm, current->pid);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700434 /* fall through */
Andreas Gruenbachere77a0a52011-01-25 15:43:39 +0100435 case RUNNING:
436 case RESTARTING:
Philipp Reisnerb411b362009-09-25 16:07:19 -0700437 default:
438 spin_unlock_irqrestore(&thi->t_lock, flags);
439 break;
440 }
441
Andreas Gruenbacher81e84652010-12-09 15:03:57 +0100442 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700443}
444
445
446void _drbd_thread_stop(struct drbd_thread *thi, int restart, int wait)
447{
448 unsigned long flags;
449
Andreas Gruenbachere77a0a52011-01-25 15:43:39 +0100450 enum drbd_thread_state ns = restart ? RESTARTING : EXITING;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700451
452 /* may be called from state engine, holding the req lock irqsave */
453 spin_lock_irqsave(&thi->t_lock, flags);
454
Andreas Gruenbachere77a0a52011-01-25 15:43:39 +0100455 if (thi->t_state == NONE) {
Philipp Reisnerb411b362009-09-25 16:07:19 -0700456 spin_unlock_irqrestore(&thi->t_lock, flags);
457 if (restart)
458 drbd_thread_start(thi);
459 return;
460 }
461
462 if (thi->t_state != ns) {
463 if (thi->task == NULL) {
464 spin_unlock_irqrestore(&thi->t_lock, flags);
465 return;
466 }
467
468 thi->t_state = ns;
469 smp_mb();
470 init_completion(&thi->stop);
471 if (thi->task != current)
472 force_sig(DRBD_SIGKILL, thi->task);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700473 }
474
475 spin_unlock_irqrestore(&thi->t_lock, flags);
476
477 if (wait)
478 wait_for_completion(&thi->stop);
479}
480
Philipp Reisner392c8802011-02-09 10:33:31 +0100481static struct drbd_thread *drbd_task_to_thread(struct drbd_tconn *tconn, struct task_struct *task)
Philipp Reisnerbed879a2011-02-04 14:00:37 +0100482{
Philipp Reisnerbed879a2011-02-04 14:00:37 +0100483 struct drbd_thread *thi =
484 task == tconn->receiver.task ? &tconn->receiver :
485 task == tconn->asender.task ? &tconn->asender :
486 task == tconn->worker.task ? &tconn->worker : NULL;
487
488 return thi;
489}
490
Philipp Reisner392c8802011-02-09 10:33:31 +0100491char *drbd_task_to_thread_name(struct drbd_tconn *tconn, struct task_struct *task)
Philipp Reisnerbed879a2011-02-04 14:00:37 +0100492{
Philipp Reisner392c8802011-02-09 10:33:31 +0100493 struct drbd_thread *thi = drbd_task_to_thread(tconn, task);
Philipp Reisnerbed879a2011-02-04 14:00:37 +0100494 return thi ? thi->name : task->comm;
495}
496
Philipp Reisner80883192011-02-18 14:56:45 +0100497int conn_lowest_minor(struct drbd_tconn *tconn)
Philipp Reisner80822282011-02-08 12:46:30 +0100498{
Philipp Reisnere90285e2011-03-22 12:51:21 +0100499 struct drbd_conf *mdev;
Philipp Reisner695d08f2011-04-11 22:53:32 -0700500 int vnr = 0, m;
Philipp Reisner774b3052011-02-22 02:07:03 -0500501
Philipp Reisner695d08f2011-04-11 22:53:32 -0700502 rcu_read_lock();
Philipp Reisnere90285e2011-03-22 12:51:21 +0100503 mdev = idr_get_next(&tconn->volumes, &vnr);
Philipp Reisner695d08f2011-04-11 22:53:32 -0700504 m = mdev ? mdev_to_minor(mdev) : -1;
505 rcu_read_unlock();
506
507 return m;
Philipp Reisner80822282011-02-08 12:46:30 +0100508}
Philipp Reisner774b3052011-02-22 02:07:03 -0500509
510#ifdef CONFIG_SMP
Philipp Reisnerb411b362009-09-25 16:07:19 -0700511/**
512 * drbd_calc_cpu_mask() - Generate CPU masks, spread over all CPUs
513 * @mdev: DRBD device.
514 *
515 * Forces all threads of a device onto the same CPU. This is beneficial for
516 * DRBD's performance. May be overwritten by user's configuration.
517 */
Philipp Reisner80822282011-02-08 12:46:30 +0100518void drbd_calc_cpu_mask(struct drbd_tconn *tconn)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700519{
520 int ord, cpu;
521
522 /* user override. */
Philipp Reisner80822282011-02-08 12:46:30 +0100523 if (cpumask_weight(tconn->cpu_mask))
Philipp Reisnerb411b362009-09-25 16:07:19 -0700524 return;
525
Philipp Reisner80822282011-02-08 12:46:30 +0100526 ord = conn_lowest_minor(tconn) % cpumask_weight(cpu_online_mask);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700527 for_each_online_cpu(cpu) {
528 if (ord-- == 0) {
Philipp Reisner80822282011-02-08 12:46:30 +0100529 cpumask_set_cpu(cpu, tconn->cpu_mask);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700530 return;
531 }
532 }
533 /* should not be reached */
Philipp Reisner80822282011-02-08 12:46:30 +0100534 cpumask_setall(tconn->cpu_mask);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700535}
536
537/**
538 * drbd_thread_current_set_cpu() - modifies the cpu mask of the _current_ thread
539 * @mdev: DRBD device.
Philipp Reisnerbc31fe32011-02-07 11:14:38 +0100540 * @thi: drbd_thread object
Philipp Reisnerb411b362009-09-25 16:07:19 -0700541 *
542 * call in the "main loop" of _all_ threads, no need for any mutex, current won't die
543 * prematurely.
544 */
Philipp Reisner80822282011-02-08 12:46:30 +0100545void drbd_thread_current_set_cpu(struct drbd_thread *thi)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700546{
547 struct task_struct *p = current;
Philipp Reisnerbed879a2011-02-04 14:00:37 +0100548
Philipp Reisnerb411b362009-09-25 16:07:19 -0700549 if (!thi->reset_cpu_mask)
550 return;
551 thi->reset_cpu_mask = 0;
Philipp Reisner392c8802011-02-09 10:33:31 +0100552 set_cpus_allowed_ptr(p, thi->tconn->cpu_mask);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700553}
554#endif
555
Andreas Gruenbacher52b061a2011-03-30 11:38:49 +0200556/**
557 * drbd_header_size - size of a packet header
558 *
559 * The header size is a multiple of 8, so any payload following the header is
560 * word aligned on 64-bit architectures. (The bitmap send and receive code
561 * relies on this.)
562 */
563unsigned int drbd_header_size(struct drbd_tconn *tconn)
564{
Andreas Gruenbacher0c8e36d2011-03-30 16:00:17 +0200565 if (tconn->agreed_pro_version >= 100) {
566 BUILD_BUG_ON(!IS_ALIGNED(sizeof(struct p_header100), 8));
567 return sizeof(struct p_header100);
568 } else {
569 BUILD_BUG_ON(sizeof(struct p_header80) !=
570 sizeof(struct p_header95));
571 BUILD_BUG_ON(!IS_ALIGNED(sizeof(struct p_header80), 8));
572 return sizeof(struct p_header80);
573 }
Andreas Gruenbacher52b061a2011-03-30 11:38:49 +0200574}
575
Andreas Gruenbachere6589832011-03-30 12:54:42 +0200576static unsigned int prepare_header80(struct p_header80 *h, enum drbd_packet cmd, int size)
Philipp Reisnerfd340c12011-01-19 16:57:39 +0100577{
578 h->magic = cpu_to_be32(DRBD_MAGIC);
579 h->command = cpu_to_be16(cmd);
580 h->length = cpu_to_be16(size);
Andreas Gruenbachere6589832011-03-30 12:54:42 +0200581 return sizeof(struct p_header80);
Philipp Reisnerfd340c12011-01-19 16:57:39 +0100582}
583
Andreas Gruenbachere6589832011-03-30 12:54:42 +0200584static unsigned int prepare_header95(struct p_header95 *h, enum drbd_packet cmd, int size)
Philipp Reisnerfd340c12011-01-19 16:57:39 +0100585{
586 h->magic = cpu_to_be16(DRBD_MAGIC_BIG);
587 h->command = cpu_to_be16(cmd);
Andreas Gruenbacherb55d84b2011-03-22 13:17:47 +0100588 h->length = cpu_to_be32(size);
Andreas Gruenbachere6589832011-03-30 12:54:42 +0200589 return sizeof(struct p_header95);
Philipp Reisnerfd340c12011-01-19 16:57:39 +0100590}
591
Andreas Gruenbacher0c8e36d2011-03-30 16:00:17 +0200592static unsigned int prepare_header100(struct p_header100 *h, enum drbd_packet cmd,
593 int size, int vnr)
Philipp Reisnerd38e7872011-02-07 15:32:04 +0100594{
Andreas Gruenbacher0c8e36d2011-03-30 16:00:17 +0200595 h->magic = cpu_to_be32(DRBD_MAGIC_100);
596 h->volume = cpu_to_be16(vnr);
597 h->command = cpu_to_be16(cmd);
598 h->length = cpu_to_be32(size);
599 h->pad = 0;
600 return sizeof(struct p_header100);
601}
602
603static unsigned int prepare_header(struct drbd_tconn *tconn, int vnr,
604 void *buffer, enum drbd_packet cmd, int size)
605{
606 if (tconn->agreed_pro_version >= 100)
607 return prepare_header100(buffer, cmd, size, vnr);
608 else if (tconn->agreed_pro_version >= 95 &&
609 size > DRBD_MAX_SIZE_H80_PACKET)
Andreas Gruenbachere6589832011-03-30 12:54:42 +0200610 return prepare_header95(buffer, cmd, size);
Philipp Reisnerd38e7872011-02-07 15:32:04 +0100611 else
Andreas Gruenbachere6589832011-03-30 12:54:42 +0200612 return prepare_header80(buffer, cmd, size);
Philipp Reisnerd38e7872011-02-07 15:32:04 +0100613}
614
Andreas Gruenbachera7eb7bd2011-04-29 13:19:58 +0200615static void *__conn_prepare_command(struct drbd_tconn *tconn,
616 struct drbd_socket *sock)
617{
618 if (!sock->socket)
619 return NULL;
620 return sock->sbuf + drbd_header_size(tconn);
621}
622
Andreas Gruenbacherdba58582011-03-29 16:55:40 +0200623void *conn_prepare_command(struct drbd_tconn *tconn, struct drbd_socket *sock)
624{
Andreas Gruenbachera7eb7bd2011-04-29 13:19:58 +0200625 void *p;
626
Andreas Gruenbacherdba58582011-03-29 16:55:40 +0200627 mutex_lock(&sock->mutex);
Andreas Gruenbachera7eb7bd2011-04-29 13:19:58 +0200628 p = __conn_prepare_command(tconn, sock);
629 if (!p)
Andreas Gruenbacherdba58582011-03-29 16:55:40 +0200630 mutex_unlock(&sock->mutex);
Andreas Gruenbachera7eb7bd2011-04-29 13:19:58 +0200631
632 return p;
Andreas Gruenbacherdba58582011-03-29 16:55:40 +0200633}
634
635void *drbd_prepare_command(struct drbd_conf *mdev, struct drbd_socket *sock)
636{
637 return conn_prepare_command(mdev->tconn, sock);
638}
639
640static int __send_command(struct drbd_tconn *tconn, int vnr,
641 struct drbd_socket *sock, enum drbd_packet cmd,
642 unsigned int header_size, void *data,
643 unsigned int size)
644{
645 int msg_flags;
646 int err;
647
648 /*
649 * Called with @data == NULL and the size of the data blocks in @size
650 * for commands that send data blocks. For those commands, omit the
651 * MSG_MORE flag: this will increase the likelihood that data blocks
652 * which are page aligned on the sender will end up page aligned on the
653 * receiver.
654 */
655 msg_flags = data ? MSG_MORE : 0;
656
Andreas Gruenbachere6589832011-03-30 12:54:42 +0200657 header_size += prepare_header(tconn, vnr, sock->sbuf, cmd,
658 header_size + size);
Andreas Gruenbacherdba58582011-03-29 16:55:40 +0200659 err = drbd_send_all(tconn, sock->socket, sock->sbuf, header_size,
660 msg_flags);
661 if (data && !err)
662 err = drbd_send_all(tconn, sock->socket, data, size, 0);
663 return err;
664}
665
Andreas Gruenbachera7eb7bd2011-04-29 13:19:58 +0200666static int __conn_send_command(struct drbd_tconn *tconn, struct drbd_socket *sock,
667 enum drbd_packet cmd, unsigned int header_size,
668 void *data, unsigned int size)
669{
670 return __send_command(tconn, 0, sock, cmd, header_size, data, size);
671}
672
Andreas Gruenbacherdba58582011-03-29 16:55:40 +0200673int conn_send_command(struct drbd_tconn *tconn, struct drbd_socket *sock,
674 enum drbd_packet cmd, unsigned int header_size,
675 void *data, unsigned int size)
676{
677 int err;
678
Andreas Gruenbachera7eb7bd2011-04-29 13:19:58 +0200679 err = __conn_send_command(tconn, sock, cmd, header_size, data, size);
Andreas Gruenbacherdba58582011-03-29 16:55:40 +0200680 mutex_unlock(&sock->mutex);
681 return err;
682}
683
684int drbd_send_command(struct drbd_conf *mdev, struct drbd_socket *sock,
685 enum drbd_packet cmd, unsigned int header_size,
686 void *data, unsigned int size)
687{
688 int err;
689
690 err = __send_command(mdev->tconn, mdev->vnr, sock, cmd, header_size,
691 data, size);
692 mutex_unlock(&sock->mutex);
693 return err;
694}
695
Andreas Gruenbachere307f352011-03-22 10:55:48 +0100696int drbd_send_ping(struct drbd_tconn *tconn)
697{
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +0200698 struct drbd_socket *sock;
699
700 sock = &tconn->meta;
701 if (!conn_prepare_command(tconn, sock))
702 return -EIO;
Andreas Gruenbachere6589832011-03-30 12:54:42 +0200703 return conn_send_command(tconn, sock, P_PING, 0, NULL, 0);
Andreas Gruenbachere307f352011-03-22 10:55:48 +0100704}
705
706int drbd_send_ping_ack(struct drbd_tconn *tconn)
707{
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +0200708 struct drbd_socket *sock;
709
710 sock = &tconn->meta;
711 if (!conn_prepare_command(tconn, sock))
712 return -EIO;
Andreas Gruenbachere6589832011-03-30 12:54:42 +0200713 return conn_send_command(tconn, sock, P_PING_ACK, 0, NULL, 0);
Andreas Gruenbachere307f352011-03-22 10:55:48 +0100714}
715
Lars Ellenbergf3990022011-03-23 14:31:09 +0100716int drbd_send_sync_param(struct drbd_conf *mdev)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700717{
Andreas Gruenbacher7c967152011-03-22 00:49:36 +0100718 struct drbd_socket *sock;
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +0200719 struct p_rs_param_95 *p;
720 int size;
Philipp Reisner31890f42011-01-19 14:12:51 +0100721 const int apv = mdev->tconn->agreed_pro_version;
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +0200722 enum drbd_packet cmd;
Philipp Reisner44ed1672011-04-19 17:10:19 +0200723 struct net_conf *nc;
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +0200724 struct disk_conf *dc;
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +0200725
726 sock = &mdev->tconn->data;
727 p = drbd_prepare_command(mdev, sock);
728 if (!p)
729 return -EIO;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700730
Philipp Reisner44ed1672011-04-19 17:10:19 +0200731 rcu_read_lock();
732 nc = rcu_dereference(mdev->tconn->net_conf);
733
Philipp Reisnerb411b362009-09-25 16:07:19 -0700734 size = apv <= 87 ? sizeof(struct p_rs_param)
735 : apv == 88 ? sizeof(struct p_rs_param)
Philipp Reisner44ed1672011-04-19 17:10:19 +0200736 + strlen(nc->verify_alg) + 1
Philipp Reisner8e26f9c2010-07-06 17:25:54 +0200737 : apv <= 94 ? sizeof(struct p_rs_param_89)
738 : /* apv >= 95 */ sizeof(struct p_rs_param_95);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700739
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +0200740 cmd = apv >= 89 ? P_SYNC_PARAM89 : P_SYNC_PARAM;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700741
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +0200742 /* initialize verify_alg and csums_alg */
743 memset(p->verify_alg, 0, 2 * SHARED_SECRET_MAX);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700744
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +0200745 if (get_ldev(mdev)) {
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +0200746 dc = rcu_dereference(mdev->ldev->disk_conf);
Andreas Gruenbacher6394b932011-05-11 14:29:52 +0200747 p->resync_rate = cpu_to_be32(dc->resync_rate);
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +0200748 p->c_plan_ahead = cpu_to_be32(dc->c_plan_ahead);
749 p->c_delay_target = cpu_to_be32(dc->c_delay_target);
750 p->c_fill_target = cpu_to_be32(dc->c_fill_target);
751 p->c_max_rate = cpu_to_be32(dc->c_max_rate);
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +0200752 put_ldev(mdev);
753 } else {
Andreas Gruenbacher6394b932011-05-11 14:29:52 +0200754 p->resync_rate = cpu_to_be32(DRBD_RESYNC_RATE_DEF);
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +0200755 p->c_plan_ahead = cpu_to_be32(DRBD_C_PLAN_AHEAD_DEF);
756 p->c_delay_target = cpu_to_be32(DRBD_C_DELAY_TARGET_DEF);
757 p->c_fill_target = cpu_to_be32(DRBD_C_FILL_TARGET_DEF);
758 p->c_max_rate = cpu_to_be32(DRBD_C_MAX_RATE_DEF);
759 }
Philipp Reisnerb411b362009-09-25 16:07:19 -0700760
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +0200761 if (apv >= 88)
Philipp Reisner44ed1672011-04-19 17:10:19 +0200762 strcpy(p->verify_alg, nc->verify_alg);
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +0200763 if (apv >= 89)
Philipp Reisner44ed1672011-04-19 17:10:19 +0200764 strcpy(p->csums_alg, nc->csums_alg);
765 rcu_read_unlock();
Philipp Reisnerb411b362009-09-25 16:07:19 -0700766
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +0200767 return drbd_send_command(mdev, sock, cmd, size, NULL, 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700768}
769
Philipp Reisnerd659f2a2011-05-16 17:38:45 +0200770int __drbd_send_protocol(struct drbd_tconn *tconn, enum drbd_packet cmd)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700771{
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +0200772 struct drbd_socket *sock;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700773 struct p_protocol *p;
Philipp Reisner44ed1672011-04-19 17:10:19 +0200774 struct net_conf *nc;
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +0200775 int size, cf;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700776
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +0200777 sock = &tconn->data;
Andreas Gruenbachera7eb7bd2011-04-29 13:19:58 +0200778 p = __conn_prepare_command(tconn, sock);
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +0200779 if (!p)
780 return -EIO;
781
Philipp Reisner44ed1672011-04-19 17:10:19 +0200782 rcu_read_lock();
783 nc = rcu_dereference(tconn->net_conf);
784
Andreas Gruenbacher6dff2902011-06-28 14:18:12 +0200785 if (nc->tentative && tconn->agreed_pro_version < 92) {
Philipp Reisner44ed1672011-04-19 17:10:19 +0200786 rcu_read_unlock();
787 mutex_unlock(&sock->mutex);
788 conn_err(tconn, "--dry-run is not supported by peer");
789 return -EOPNOTSUPP;
790 }
791
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +0200792 size = sizeof(*p);
Philipp Reisnerdc8228d2011-02-08 10:13:15 +0100793 if (tconn->agreed_pro_version >= 87)
Philipp Reisner44ed1672011-04-19 17:10:19 +0200794 size += strlen(nc->integrity_alg) + 1;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700795
Philipp Reisner44ed1672011-04-19 17:10:19 +0200796 p->protocol = cpu_to_be32(nc->wire_protocol);
797 p->after_sb_0p = cpu_to_be32(nc->after_sb_0p);
798 p->after_sb_1p = cpu_to_be32(nc->after_sb_1p);
799 p->after_sb_2p = cpu_to_be32(nc->after_sb_2p);
800 p->two_primaries = cpu_to_be32(nc->two_primaries);
Philipp Reisnercf14c2e2010-02-02 21:03:50 +0100801 cf = 0;
Andreas Gruenbacher6139f602011-05-06 20:00:02 +0200802 if (nc->discard_my_data)
803 cf |= CF_DISCARD_MY_DATA;
Andreas Gruenbacher6dff2902011-06-28 14:18:12 +0200804 if (nc->tentative)
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +0200805 cf |= CF_DRY_RUN;
Philipp Reisnercf14c2e2010-02-02 21:03:50 +0100806 p->conn_flags = cpu_to_be32(cf);
807
Philipp Reisnerdc8228d2011-02-08 10:13:15 +0100808 if (tconn->agreed_pro_version >= 87)
Philipp Reisner44ed1672011-04-19 17:10:19 +0200809 strcpy(p->integrity_alg, nc->integrity_alg);
810 rcu_read_unlock();
811
Philipp Reisnerd659f2a2011-05-16 17:38:45 +0200812 return __conn_send_command(tconn, sock, cmd, size, NULL, 0);
Andreas Gruenbachera7eb7bd2011-04-29 13:19:58 +0200813}
814
815int drbd_send_protocol(struct drbd_tconn *tconn)
816{
817 int err;
818
819 mutex_lock(&tconn->data.mutex);
Philipp Reisnerd659f2a2011-05-16 17:38:45 +0200820 err = __drbd_send_protocol(tconn, P_PROTOCOL);
Andreas Gruenbachera7eb7bd2011-04-29 13:19:58 +0200821 mutex_unlock(&tconn->data.mutex);
822
823 return err;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700824}
825
826int _drbd_send_uuids(struct drbd_conf *mdev, u64 uuid_flags)
827{
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +0200828 struct drbd_socket *sock;
829 struct p_uuids *p;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700830 int i;
831
832 if (!get_ldev_if_state(mdev, D_NEGOTIATING))
Andreas Gruenbacher2ae5f952011-03-16 01:07:20 +0100833 return 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700834
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +0200835 sock = &mdev->tconn->data;
836 p = drbd_prepare_command(mdev, sock);
837 if (!p) {
838 put_ldev(mdev);
839 return -EIO;
840 }
Philipp Reisnerb411b362009-09-25 16:07:19 -0700841 for (i = UI_CURRENT; i < UI_SIZE; i++)
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +0200842 p->uuid[i] = mdev->ldev ? cpu_to_be64(mdev->ldev->md.uuid[i]) : 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700843
844 mdev->comm_bm_set = drbd_bm_total_weight(mdev);
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +0200845 p->uuid[UI_SIZE] = cpu_to_be64(mdev->comm_bm_set);
Philipp Reisner44ed1672011-04-19 17:10:19 +0200846 rcu_read_lock();
Andreas Gruenbacher6139f602011-05-06 20:00:02 +0200847 uuid_flags |= rcu_dereference(mdev->tconn->net_conf)->discard_my_data ? 1 : 0;
Philipp Reisner44ed1672011-04-19 17:10:19 +0200848 rcu_read_unlock();
Philipp Reisnerb411b362009-09-25 16:07:19 -0700849 uuid_flags |= test_bit(CRASHED_PRIMARY, &mdev->flags) ? 2 : 0;
850 uuid_flags |= mdev->new_state_tmp.disk == D_INCONSISTENT ? 4 : 0;
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +0200851 p->uuid[UI_FLAGS] = cpu_to_be64(uuid_flags);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700852
853 put_ldev(mdev);
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +0200854 return drbd_send_command(mdev, sock, P_UUIDS, sizeof(*p), NULL, 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700855}
856
857int drbd_send_uuids(struct drbd_conf *mdev)
858{
859 return _drbd_send_uuids(mdev, 0);
860}
861
862int drbd_send_uuids_skip_initial_sync(struct drbd_conf *mdev)
863{
864 return _drbd_send_uuids(mdev, 8);
865}
866
Lars Ellenberg62b0da32011-01-20 13:25:21 +0100867void drbd_print_uuids(struct drbd_conf *mdev, const char *text)
868{
869 if (get_ldev_if_state(mdev, D_NEGOTIATING)) {
870 u64 *uuid = mdev->ldev->md.uuid;
871 dev_info(DEV, "%s %016llX:%016llX:%016llX:%016llX\n",
872 text,
873 (unsigned long long)uuid[UI_CURRENT],
874 (unsigned long long)uuid[UI_BITMAP],
875 (unsigned long long)uuid[UI_HISTORY_START],
876 (unsigned long long)uuid[UI_HISTORY_END]);
877 put_ldev(mdev);
878 } else {
879 dev_info(DEV, "%s effective data uuid: %016llX\n",
880 text,
881 (unsigned long long)mdev->ed_uuid);
882 }
883}
884
Andreas Gruenbacher9c1b7f72011-03-16 01:09:01 +0100885void drbd_gen_and_send_sync_uuid(struct drbd_conf *mdev)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700886{
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +0200887 struct drbd_socket *sock;
888 struct p_rs_uuid *p;
Lars Ellenberg5a22db82010-12-17 21:14:23 +0100889 u64 uuid;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700890
Lars Ellenberg5a22db82010-12-17 21:14:23 +0100891 D_ASSERT(mdev->state.disk == D_UP_TO_DATE);
892
Philipp Reisner0cfac5d2011-11-10 12:12:52 +0100893 uuid = mdev->ldev->md.uuid[UI_BITMAP];
894 if (uuid && uuid != UUID_JUST_CREATED)
895 uuid = uuid + UUID_NEW_BM_OFFSET;
896 else
897 get_random_bytes(&uuid, sizeof(u64));
Lars Ellenberg5a22db82010-12-17 21:14:23 +0100898 drbd_uuid_set(mdev, UI_BITMAP, uuid);
Lars Ellenberg62b0da32011-01-20 13:25:21 +0100899 drbd_print_uuids(mdev, "updated sync UUID");
Lars Ellenberg5a22db82010-12-17 21:14:23 +0100900 drbd_md_sync(mdev);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700901
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +0200902 sock = &mdev->tconn->data;
903 p = drbd_prepare_command(mdev, sock);
904 if (p) {
905 p->uuid = cpu_to_be64(uuid);
906 drbd_send_command(mdev, sock, P_SYNC_UUID, sizeof(*p), NULL, 0);
907 }
Philipp Reisnerb411b362009-09-25 16:07:19 -0700908}
909
Philipp Reisnere89b5912010-03-24 17:11:33 +0100910int drbd_send_sizes(struct drbd_conf *mdev, int trigger_reply, enum dds_flags flags)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700911{
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +0200912 struct drbd_socket *sock;
913 struct p_sizes *p;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700914 sector_t d_size, u_size;
Philipp Reisner99432fc2011-05-20 16:39:13 +0200915 int q_order_type, max_bio_size;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700916
917 if (get_ldev_if_state(mdev, D_NEGOTIATING)) {
918 D_ASSERT(mdev->ldev->backing_bdev);
919 d_size = drbd_get_max_capacity(mdev->ldev);
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +0200920 rcu_read_lock();
921 u_size = rcu_dereference(mdev->ldev->disk_conf)->disk_size;
922 rcu_read_unlock();
Philipp Reisnerb411b362009-09-25 16:07:19 -0700923 q_order_type = drbd_queue_order_type(mdev);
Philipp Reisner99432fc2011-05-20 16:39:13 +0200924 max_bio_size = queue_max_hw_sectors(mdev->ldev->backing_bdev->bd_disk->queue) << 9;
925 max_bio_size = min_t(int, max_bio_size, DRBD_MAX_BIO_SIZE);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700926 put_ldev(mdev);
927 } else {
928 d_size = 0;
929 u_size = 0;
930 q_order_type = QUEUE_ORDERED_NONE;
Philipp Reisner99432fc2011-05-20 16:39:13 +0200931 max_bio_size = DRBD_MAX_BIO_SIZE; /* ... multiple BIOs per peer_request */
Philipp Reisnerb411b362009-09-25 16:07:19 -0700932 }
933
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +0200934 sock = &mdev->tconn->data;
935 p = drbd_prepare_command(mdev, sock);
936 if (!p)
937 return -EIO;
Philipp Reisner2ffca4f2011-06-30 15:43:06 +0200938
939 if (mdev->tconn->agreed_pro_version <= 94)
940 max_bio_size = min_t(int, max_bio_size, DRBD_MAX_SIZE_H80_PACKET);
941 else if (mdev->tconn->agreed_pro_version < 100)
942 max_bio_size = min_t(int, max_bio_size, DRBD_MAX_BIO_SIZE_P95);
943
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +0200944 p->d_size = cpu_to_be64(d_size);
945 p->u_size = cpu_to_be64(u_size);
946 p->c_size = cpu_to_be64(trigger_reply ? 0 : drbd_get_capacity(mdev->this_bdev));
947 p->max_bio_size = cpu_to_be32(max_bio_size);
948 p->queue_order_type = cpu_to_be16(q_order_type);
949 p->dds_flags = cpu_to_be16(flags);
950 return drbd_send_command(mdev, sock, P_SIZES, sizeof(*p), NULL, 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700951}
952
953/**
Philipp Reisner43de7c82011-11-10 13:16:13 +0100954 * drbd_send_current_state() - Sends the drbd state to the peer
Philipp Reisnerb411b362009-09-25 16:07:19 -0700955 * @mdev: DRBD device.
956 */
Philipp Reisner43de7c82011-11-10 13:16:13 +0100957int drbd_send_current_state(struct drbd_conf *mdev)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700958{
Andreas Gruenbacher7c967152011-03-22 00:49:36 +0100959 struct drbd_socket *sock;
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +0200960 struct p_state *p;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700961
Andreas Gruenbacher7c967152011-03-22 00:49:36 +0100962 sock = &mdev->tconn->data;
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +0200963 p = drbd_prepare_command(mdev, sock);
964 if (!p)
965 return -EIO;
966 p->state = cpu_to_be32(mdev->state.i); /* Within the send mutex */
967 return drbd_send_command(mdev, sock, P_STATE, sizeof(*p), NULL, 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700968}
969
Philipp Reisner43de7c82011-11-10 13:16:13 +0100970/**
971 * drbd_send_state() - After a state change, sends the new state to the peer
972 * @mdev: DRBD device.
973 * @state: the state to send, not necessarily the current state.
974 *
975 * Each state change queues an "after_state_ch" work, which will eventually
976 * send the resulting new state to the peer. If more state changes happen
977 * between queuing and processing of the after_state_ch work, we still
978 * want to send each intermediary state in the order it occurred.
979 */
980int drbd_send_state(struct drbd_conf *mdev, union drbd_state state)
981{
982 struct drbd_socket *sock;
983 struct p_state *p;
984
985 sock = &mdev->tconn->data;
986 p = drbd_prepare_command(mdev, sock);
987 if (!p)
988 return -EIO;
989 p->state = cpu_to_be32(state.i); /* Within the send mutex */
990 return drbd_send_command(mdev, sock, P_STATE, sizeof(*p), NULL, 0);
991}
992
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +0200993int drbd_send_state_req(struct drbd_conf *mdev, union drbd_state mask, union drbd_state val)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700994{
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +0200995 struct drbd_socket *sock;
996 struct p_req_state *p;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700997
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +0200998 sock = &mdev->tconn->data;
999 p = drbd_prepare_command(mdev, sock);
1000 if (!p)
1001 return -EIO;
1002 p->mask = cpu_to_be32(mask.i);
1003 p->val = cpu_to_be32(val.i);
1004 return drbd_send_command(mdev, sock, P_STATE_CHG_REQ, sizeof(*p), NULL, 0);
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001005}
1006
1007int conn_send_state_req(struct drbd_tconn *tconn, union drbd_state mask, union drbd_state val)
1008{
1009 enum drbd_packet cmd;
1010 struct drbd_socket *sock;
1011 struct p_req_state *p;
1012
1013 cmd = tconn->agreed_pro_version < 100 ? P_STATE_CHG_REQ : P_CONN_ST_CHG_REQ;
1014 sock = &tconn->data;
1015 p = conn_prepare_command(tconn, sock);
1016 if (!p)
1017 return -EIO;
1018 p->mask = cpu_to_be32(mask.i);
1019 p->val = cpu_to_be32(val.i);
1020 return conn_send_command(tconn, sock, cmd, sizeof(*p), NULL, 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001021}
1022
Andreas Gruenbacher2f4e7ab2011-03-16 01:20:38 +01001023void drbd_send_sr_reply(struct drbd_conf *mdev, enum drbd_state_rv retcode)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001024{
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001025 struct drbd_socket *sock;
1026 struct p_req_state_reply *p;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001027
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001028 sock = &mdev->tconn->meta;
1029 p = drbd_prepare_command(mdev, sock);
1030 if (p) {
1031 p->retcode = cpu_to_be32(retcode);
1032 drbd_send_command(mdev, sock, P_STATE_CHG_REPLY, sizeof(*p), NULL, 0);
1033 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07001034}
1035
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001036void conn_send_sr_reply(struct drbd_tconn *tconn, enum drbd_state_rv retcode)
Philipp Reisner047cd4a2011-02-15 11:09:33 +01001037{
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001038 struct drbd_socket *sock;
1039 struct p_req_state_reply *p;
Philipp Reisner047cd4a2011-02-15 11:09:33 +01001040 enum drbd_packet cmd = tconn->agreed_pro_version < 100 ? P_STATE_CHG_REPLY : P_CONN_ST_CHG_REPLY;
1041
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001042 sock = &tconn->meta;
1043 p = conn_prepare_command(tconn, sock);
1044 if (p) {
1045 p->retcode = cpu_to_be32(retcode);
1046 conn_send_command(tconn, sock, cmd, sizeof(*p), NULL, 0);
1047 }
Philipp Reisner047cd4a2011-02-15 11:09:33 +01001048}
1049
Andreas Gruenbachera02d1242011-03-22 17:20:45 +01001050static void dcbp_set_code(struct p_compressed_bm *p, enum drbd_bitmap_code code)
1051{
1052 BUG_ON(code & ~0xf);
1053 p->encoding = (p->encoding & ~0xf) | code;
1054}
1055
1056static void dcbp_set_start(struct p_compressed_bm *p, int set)
1057{
1058 p->encoding = (p->encoding & ~0x80) | (set ? 0x80 : 0);
1059}
1060
1061static void dcbp_set_pad_bits(struct p_compressed_bm *p, int n)
1062{
1063 BUG_ON(n & ~0x7);
1064 p->encoding = (p->encoding & (~0x7 << 4)) | (n << 4);
1065}
1066
Philipp Reisnerb411b362009-09-25 16:07:19 -07001067int fill_bitmap_rle_bits(struct drbd_conf *mdev,
Andreas Gruenbacher50d0b1a2011-03-30 11:53:51 +02001068 struct p_compressed_bm *p,
1069 unsigned int size,
1070 struct bm_xfer_ctx *c)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001071{
1072 struct bitstream bs;
1073 unsigned long plain_bits;
1074 unsigned long tmp;
1075 unsigned long rl;
1076 unsigned len;
1077 unsigned toggle;
Philipp Reisner44ed1672011-04-19 17:10:19 +02001078 int bits, use_rle;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001079
1080 /* may we use this feature? */
Philipp Reisner44ed1672011-04-19 17:10:19 +02001081 rcu_read_lock();
1082 use_rle = rcu_dereference(mdev->tconn->net_conf)->use_rle;
1083 rcu_read_unlock();
1084 if (!use_rle || mdev->tconn->agreed_pro_version < 90)
1085 return 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001086
1087 if (c->bit_offset >= c->bm_bits)
1088 return 0; /* nothing to do. */
1089
1090 /* use at most thus many bytes */
Andreas Gruenbacher50d0b1a2011-03-30 11:53:51 +02001091 bitstream_init(&bs, p->code, size, 0);
1092 memset(p->code, 0, size);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001093 /* plain bits covered in this code string */
1094 plain_bits = 0;
1095
1096 /* p->encoding & 0x80 stores whether the first run length is set.
1097 * bit offset is implicit.
1098 * start with toggle == 2 to be able to tell the first iteration */
1099 toggle = 2;
1100
1101 /* see how much plain bits we can stuff into one packet
1102 * using RLE and VLI. */
1103 do {
1104 tmp = (toggle == 0) ? _drbd_bm_find_next_zero(mdev, c->bit_offset)
1105 : _drbd_bm_find_next(mdev, c->bit_offset);
1106 if (tmp == -1UL)
1107 tmp = c->bm_bits;
1108 rl = tmp - c->bit_offset;
1109
1110 if (toggle == 2) { /* first iteration */
1111 if (rl == 0) {
1112 /* the first checked bit was set,
1113 * store start value, */
Andreas Gruenbachera02d1242011-03-22 17:20:45 +01001114 dcbp_set_start(p, 1);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001115 /* but skip encoding of zero run length */
1116 toggle = !toggle;
1117 continue;
1118 }
Andreas Gruenbachera02d1242011-03-22 17:20:45 +01001119 dcbp_set_start(p, 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001120 }
1121
1122 /* paranoia: catch zero runlength.
1123 * can only happen if bitmap is modified while we scan it. */
1124 if (rl == 0) {
1125 dev_err(DEV, "unexpected zero runlength while encoding bitmap "
1126 "t:%u bo:%lu\n", toggle, c->bit_offset);
1127 return -1;
1128 }
1129
1130 bits = vli_encode_bits(&bs, rl);
1131 if (bits == -ENOBUFS) /* buffer full */
1132 break;
1133 if (bits <= 0) {
1134 dev_err(DEV, "error while encoding bitmap: %d\n", bits);
1135 return 0;
1136 }
1137
1138 toggle = !toggle;
1139 plain_bits += rl;
1140 c->bit_offset = tmp;
1141 } while (c->bit_offset < c->bm_bits);
1142
1143 len = bs.cur.b - p->code + !!bs.cur.bit;
1144
1145 if (plain_bits < (len << 3)) {
1146 /* incompressible with this method.
1147 * we need to rewind both word and bit position. */
1148 c->bit_offset -= plain_bits;
1149 bm_xfer_ctx_bit_to_word_offset(c);
1150 c->bit_offset = c->word_offset * BITS_PER_LONG;
1151 return 0;
1152 }
1153
1154 /* RLE + VLI was able to compress it just fine.
1155 * update c->word_offset. */
1156 bm_xfer_ctx_bit_to_word_offset(c);
1157
1158 /* store pad_bits */
Andreas Gruenbachera02d1242011-03-22 17:20:45 +01001159 dcbp_set_pad_bits(p, (8 - bs.cur.bit) & 0x7);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001160
1161 return len;
1162}
1163
Andreas Gruenbacherf70af112010-12-11 18:51:50 +01001164/**
1165 * send_bitmap_rle_or_plain
1166 *
1167 * Return 0 when done, 1 when another iteration is needed, and a negative error
1168 * code upon failure.
1169 */
1170static int
Andreas Gruenbacher79ed9bd2011-03-24 21:31:38 +01001171send_bitmap_rle_or_plain(struct drbd_conf *mdev, struct bm_xfer_ctx *c)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001172{
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001173 struct drbd_socket *sock = &mdev->tconn->data;
Andreas Gruenbacher50d0b1a2011-03-30 11:53:51 +02001174 unsigned int header_size = drbd_header_size(mdev->tconn);
Andreas Gruenbachere6589832011-03-30 12:54:42 +02001175 struct p_compressed_bm *p = sock->sbuf + header_size;
Andreas Gruenbachera982dd52010-12-10 00:45:25 +01001176 int len, err;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001177
Andreas Gruenbachere6589832011-03-30 12:54:42 +02001178 len = fill_bitmap_rle_bits(mdev, p,
1179 DRBD_SOCKET_BUFFER_SIZE - header_size - sizeof(*p), c);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001180 if (len < 0)
Andreas Gruenbacherf70af112010-12-11 18:51:50 +01001181 return -EIO;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001182
1183 if (len) {
Andreas Gruenbachera02d1242011-03-22 17:20:45 +01001184 dcbp_set_code(p, RLE_VLI_Bits);
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001185 err = __send_command(mdev->tconn, mdev->vnr, sock,
1186 P_COMPRESSED_BITMAP, sizeof(*p) + len,
1187 NULL, 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001188 c->packets[0]++;
Andreas Gruenbachere6589832011-03-30 12:54:42 +02001189 c->bytes[0] += header_size + sizeof(*p) + len;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001190
1191 if (c->bit_offset >= c->bm_bits)
1192 len = 0; /* DONE */
1193 } else {
1194 /* was not compressible.
1195 * send a buffer full of plain text bits instead. */
Andreas Gruenbacher50d0b1a2011-03-30 11:53:51 +02001196 unsigned int data_size;
1197 unsigned long num_words;
Andreas Gruenbachere6589832011-03-30 12:54:42 +02001198 unsigned long *p = sock->sbuf + header_size;
Andreas Gruenbacher50d0b1a2011-03-30 11:53:51 +02001199
1200 data_size = DRBD_SOCKET_BUFFER_SIZE - header_size;
Andreas Gruenbachere6589832011-03-30 12:54:42 +02001201 num_words = min_t(size_t, data_size / sizeof(*p),
Andreas Gruenbacher50d0b1a2011-03-30 11:53:51 +02001202 c->bm_words - c->word_offset);
Andreas Gruenbachere6589832011-03-30 12:54:42 +02001203 len = num_words * sizeof(*p);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001204 if (len)
Andreas Gruenbachere6589832011-03-30 12:54:42 +02001205 drbd_bm_get_lel(mdev, c->word_offset, num_words, p);
1206 err = __send_command(mdev->tconn, mdev->vnr, sock, P_BITMAP, len, NULL, 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001207 c->word_offset += num_words;
1208 c->bit_offset = c->word_offset * BITS_PER_LONG;
1209
1210 c->packets[1]++;
Andreas Gruenbacher50d0b1a2011-03-30 11:53:51 +02001211 c->bytes[1] += header_size + len;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001212
1213 if (c->bit_offset > c->bm_bits)
1214 c->bit_offset = c->bm_bits;
1215 }
Andreas Gruenbachera982dd52010-12-10 00:45:25 +01001216 if (!err) {
Andreas Gruenbacherf70af112010-12-11 18:51:50 +01001217 if (len == 0) {
1218 INFO_bm_xfer_stats(mdev, "send", c);
1219 return 0;
1220 } else
1221 return 1;
1222 }
1223 return -EIO;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001224}
1225
1226/* See the comment at receive_bitmap() */
Andreas Gruenbacher058820c2011-03-22 16:03:43 +01001227static int _drbd_send_bitmap(struct drbd_conf *mdev)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001228{
1229 struct bm_xfer_ctx c;
Andreas Gruenbacherf70af112010-12-11 18:51:50 +01001230 int err;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001231
Andreas Gruenbacher841ce242010-12-15 19:31:20 +01001232 if (!expect(mdev->bitmap))
1233 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001234
Philipp Reisnerb411b362009-09-25 16:07:19 -07001235 if (get_ldev(mdev)) {
1236 if (drbd_md_test_flag(mdev->ldev, MDF_FULL_SYNC)) {
1237 dev_info(DEV, "Writing the whole bitmap, MDF_FullSync was set.\n");
1238 drbd_bm_set_all(mdev);
1239 if (drbd_bm_write(mdev)) {
1240 /* write_bm did fail! Leave full sync flag set in Meta P_DATA
1241 * but otherwise process as per normal - need to tell other
1242 * side that a full resync is required! */
1243 dev_err(DEV, "Failed to write bitmap to disk!\n");
1244 } else {
1245 drbd_md_clear_flag(mdev, MDF_FULL_SYNC);
1246 drbd_md_sync(mdev);
1247 }
1248 }
1249 put_ldev(mdev);
1250 }
1251
1252 c = (struct bm_xfer_ctx) {
1253 .bm_bits = drbd_bm_bits(mdev),
1254 .bm_words = drbd_bm_words(mdev),
1255 };
1256
1257 do {
Andreas Gruenbacher79ed9bd2011-03-24 21:31:38 +01001258 err = send_bitmap_rle_or_plain(mdev, &c);
Andreas Gruenbacherf70af112010-12-11 18:51:50 +01001259 } while (err > 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001260
Andreas Gruenbacherf70af112010-12-11 18:51:50 +01001261 return err == 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001262}
1263
1264int drbd_send_bitmap(struct drbd_conf *mdev)
1265{
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001266 struct drbd_socket *sock = &mdev->tconn->data;
1267 int err = -1;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001268
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001269 mutex_lock(&sock->mutex);
1270 if (sock->socket)
1271 err = !_drbd_send_bitmap(mdev);
1272 mutex_unlock(&sock->mutex);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001273 return err;
1274}
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001275
Lars Ellenberg9ed57dc2012-03-26 20:55:17 +02001276void drbd_send_b_ack(struct drbd_tconn *tconn, u32 barrier_nr, u32 set_size)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001277{
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001278 struct drbd_socket *sock;
1279 struct p_barrier_ack *p;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001280
Lars Ellenberg9ed57dc2012-03-26 20:55:17 +02001281 if (tconn->cstate < C_WF_REPORT_PARAMS)
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001282 return;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001283
Lars Ellenberg9ed57dc2012-03-26 20:55:17 +02001284 sock = &tconn->meta;
1285 p = conn_prepare_command(tconn, sock);
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001286 if (!p)
1287 return;
1288 p->barrier = barrier_nr;
1289 p->set_size = cpu_to_be32(set_size);
Lars Ellenberg9ed57dc2012-03-26 20:55:17 +02001290 conn_send_command(tconn, sock, P_BARRIER_ACK, sizeof(*p), NULL, 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001291}
1292
1293/**
1294 * _drbd_send_ack() - Sends an ack packet
1295 * @mdev: DRBD device.
1296 * @cmd: Packet command code.
1297 * @sector: sector, needs to be in big endian byte order
1298 * @blksize: size in byte, needs to be in big endian byte order
1299 * @block_id: Id, big endian byte order
1300 */
Andreas Gruenbacherd8763022011-01-26 17:39:41 +01001301static int _drbd_send_ack(struct drbd_conf *mdev, enum drbd_packet cmd,
1302 u64 sector, u32 blksize, u64 block_id)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001303{
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001304 struct drbd_socket *sock;
1305 struct p_block_ack *p;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001306
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001307 if (mdev->state.conn < C_CONNECTED)
Andreas Gruenbachera8c32aa2011-03-16 01:27:22 +01001308 return -EIO;
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001309
1310 sock = &mdev->tconn->meta;
1311 p = drbd_prepare_command(mdev, sock);
1312 if (!p)
1313 return -EIO;
1314 p->sector = sector;
1315 p->block_id = block_id;
1316 p->blksize = blksize;
1317 p->seq_num = cpu_to_be32(atomic_inc_return(&mdev->packet_seq));
1318 return drbd_send_command(mdev, sock, cmd, sizeof(*p), NULL, 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001319}
1320
Lars Ellenberg2b2bf212010-10-06 11:46:55 +02001321/* dp->sector and dp->block_id already/still in network byte order,
1322 * data_size is payload size according to dp->head,
1323 * and may need to be corrected for digest size. */
Andreas Gruenbachera9a99942011-03-16 01:30:14 +01001324void drbd_send_ack_dp(struct drbd_conf *mdev, enum drbd_packet cmd,
1325 struct p_data *dp, int data_size)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001326{
Andreas Gruenbacher88104ca2011-04-28 21:47:21 +02001327 if (mdev->tconn->peer_integrity_tfm)
1328 data_size -= crypto_hash_digestsize(mdev->tconn->peer_integrity_tfm);
Andreas Gruenbachera9a99942011-03-16 01:30:14 +01001329 _drbd_send_ack(mdev, cmd, dp->sector, cpu_to_be32(data_size),
1330 dp->block_id);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001331}
1332
Andreas Gruenbachera9a99942011-03-16 01:30:14 +01001333void drbd_send_ack_rp(struct drbd_conf *mdev, enum drbd_packet cmd,
1334 struct p_block_req *rp)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001335{
Andreas Gruenbachera9a99942011-03-16 01:30:14 +01001336 _drbd_send_ack(mdev, cmd, rp->sector, rp->blksize, rp->block_id);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001337}
1338
1339/**
1340 * drbd_send_ack() - Sends an ack packet
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01001341 * @mdev: DRBD device
1342 * @cmd: packet command code
1343 * @peer_req: peer request
Philipp Reisnerb411b362009-09-25 16:07:19 -07001344 */
Andreas Gruenbacherd8763022011-01-26 17:39:41 +01001345int drbd_send_ack(struct drbd_conf *mdev, enum drbd_packet cmd,
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01001346 struct drbd_peer_request *peer_req)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001347{
Andreas Gruenbacherdd516122011-03-16 15:39:08 +01001348 return _drbd_send_ack(mdev, cmd,
1349 cpu_to_be64(peer_req->i.sector),
1350 cpu_to_be32(peer_req->i.size),
1351 peer_req->block_id);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001352}
1353
1354/* This function misuses the block_id field to signal if the blocks
1355 * are is sync or not. */
Andreas Gruenbacherd8763022011-01-26 17:39:41 +01001356int drbd_send_ack_ex(struct drbd_conf *mdev, enum drbd_packet cmd,
Philipp Reisnerb411b362009-09-25 16:07:19 -07001357 sector_t sector, int blksize, u64 block_id)
1358{
Andreas Gruenbacherfa79abd2011-03-16 01:31:39 +01001359 return _drbd_send_ack(mdev, cmd,
1360 cpu_to_be64(sector),
1361 cpu_to_be32(blksize),
1362 cpu_to_be64(block_id));
Philipp Reisnerb411b362009-09-25 16:07:19 -07001363}
1364
1365int drbd_send_drequest(struct drbd_conf *mdev, int cmd,
1366 sector_t sector, int size, u64 block_id)
1367{
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001368 struct drbd_socket *sock;
1369 struct p_block_req *p;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001370
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001371 sock = &mdev->tconn->data;
1372 p = drbd_prepare_command(mdev, sock);
1373 if (!p)
1374 return -EIO;
1375 p->sector = cpu_to_be64(sector);
1376 p->block_id = block_id;
1377 p->blksize = cpu_to_be32(size);
1378 return drbd_send_command(mdev, sock, cmd, sizeof(*p), NULL, 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001379}
1380
Andreas Gruenbacherd8763022011-01-26 17:39:41 +01001381int drbd_send_drequest_csum(struct drbd_conf *mdev, sector_t sector, int size,
1382 void *digest, int digest_size, enum drbd_packet cmd)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001383{
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001384 struct drbd_socket *sock;
1385 struct p_block_req *p;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001386
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001387 /* FIXME: Put the digest into the preallocated socket buffer. */
Philipp Reisnerb411b362009-09-25 16:07:19 -07001388
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001389 sock = &mdev->tconn->data;
1390 p = drbd_prepare_command(mdev, sock);
1391 if (!p)
1392 return -EIO;
1393 p->sector = cpu_to_be64(sector);
1394 p->block_id = ID_SYNCER /* unused */;
1395 p->blksize = cpu_to_be32(size);
1396 return drbd_send_command(mdev, sock, cmd, sizeof(*p),
1397 digest, digest_size);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001398}
1399
1400int drbd_send_ov_request(struct drbd_conf *mdev, sector_t sector, int size)
1401{
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001402 struct drbd_socket *sock;
1403 struct p_block_req *p;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001404
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001405 sock = &mdev->tconn->data;
1406 p = drbd_prepare_command(mdev, sock);
1407 if (!p)
1408 return -EIO;
1409 p->sector = cpu_to_be64(sector);
1410 p->block_id = ID_SYNCER /* unused */;
1411 p->blksize = cpu_to_be32(size);
1412 return drbd_send_command(mdev, sock, P_OV_REQUEST, sizeof(*p), NULL, 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001413}
1414
1415/* called on sndtimeo
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01001416 * returns false if we should retry,
1417 * true if we think connection is dead
Philipp Reisnerb411b362009-09-25 16:07:19 -07001418 */
Philipp Reisner1a7ba642011-02-07 14:56:02 +01001419static int we_should_drop_the_connection(struct drbd_tconn *tconn, struct socket *sock)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001420{
1421 int drop_it;
1422 /* long elapsed = (long)(jiffies - mdev->last_received); */
1423
Philipp Reisner1a7ba642011-02-07 14:56:02 +01001424 drop_it = tconn->meta.socket == sock
1425 || !tconn->asender.task
1426 || get_t_state(&tconn->asender) != RUNNING
Philipp Reisnerbbeb6412011-02-10 13:45:46 +01001427 || tconn->cstate < C_WF_REPORT_PARAMS;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001428
1429 if (drop_it)
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01001430 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001431
Philipp Reisner1a7ba642011-02-07 14:56:02 +01001432 drop_it = !--tconn->ko_count;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001433 if (!drop_it) {
Philipp Reisner1a7ba642011-02-07 14:56:02 +01001434 conn_err(tconn, "[%s/%d] sock_sendmsg time expired, ko = %u\n",
1435 current->comm, current->pid, tconn->ko_count);
1436 request_ping(tconn);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001437 }
1438
1439 return drop_it; /* && (mdev->state == R_PRIMARY) */;
1440}
1441
Philipp Reisner1a7ba642011-02-07 14:56:02 +01001442static void drbd_update_congested(struct drbd_tconn *tconn)
Andreas Gruenbacher9e204cd2011-01-26 18:45:11 +01001443{
Philipp Reisner1a7ba642011-02-07 14:56:02 +01001444 struct sock *sk = tconn->data.socket->sk;
Andreas Gruenbacher9e204cd2011-01-26 18:45:11 +01001445 if (sk->sk_wmem_queued > sk->sk_sndbuf * 4 / 5)
Philipp Reisner1a7ba642011-02-07 14:56:02 +01001446 set_bit(NET_CONGESTED, &tconn->flags);
Andreas Gruenbacher9e204cd2011-01-26 18:45:11 +01001447}
1448
Philipp Reisnerb411b362009-09-25 16:07:19 -07001449/* The idea of sendpage seems to be to put some kind of reference
1450 * to the page into the skb, and to hand it over to the NIC. In
1451 * this process get_page() gets called.
1452 *
1453 * As soon as the page was really sent over the network put_page()
1454 * gets called by some part of the network layer. [ NIC driver? ]
1455 *
1456 * [ get_page() / put_page() increment/decrement the count. If count
1457 * reaches 0 the page will be freed. ]
1458 *
1459 * This works nicely with pages from FSs.
1460 * But this means that in protocol A we might signal IO completion too early!
1461 *
1462 * In order not to corrupt data during a resync we must make sure
1463 * that we do not reuse our own buffer pages (EEs) to early, therefore
1464 * we have the net_ee list.
1465 *
1466 * XFS seems to have problems, still, it submits pages with page_count == 0!
1467 * As a workaround, we disable sendpage on pages
1468 * with page_count == 0 or PageSlab.
1469 */
1470static int _drbd_no_send_page(struct drbd_conf *mdev, struct page *page,
Andreas Gruenbacherb9874272011-03-16 09:41:10 +01001471 int offset, size_t size, unsigned msg_flags)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001472{
Andreas Gruenbacherb9874272011-03-16 09:41:10 +01001473 struct socket *socket;
1474 void *addr;
1475 int err;
1476
1477 socket = mdev->tconn->data.socket;
1478 addr = kmap(page) + offset;
1479 err = drbd_send_all(mdev->tconn, socket, addr, size, msg_flags);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001480 kunmap(page);
Andreas Gruenbacherb9874272011-03-16 09:41:10 +01001481 if (!err)
1482 mdev->send_cnt += size >> 9;
1483 return err;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001484}
1485
1486static int _drbd_send_page(struct drbd_conf *mdev, struct page *page,
Lars Ellenbergba11ad92010-05-25 16:26:16 +02001487 int offset, size_t size, unsigned msg_flags)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001488{
Andreas Gruenbacher88b390f2011-03-16 10:44:16 +01001489 struct socket *socket = mdev->tconn->data.socket;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001490 mm_segment_t oldfs = get_fs();
Philipp Reisnerb411b362009-09-25 16:07:19 -07001491 int len = size;
Andreas Gruenbacher88b390f2011-03-16 10:44:16 +01001492 int err = -EIO;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001493
1494 /* e.g. XFS meta- & log-data is in slab pages, which have a
1495 * page_count of 0 and/or have PageSlab() set.
1496 * we cannot use send_page for those, as that does get_page();
1497 * put_page(); and would cause either a VM_BUG directly, or
1498 * __page_cache_release a page that would actually still be referenced
1499 * by someone, leading to some obscure delayed Oops somewhere else. */
1500 if (disable_sendpage || (page_count(page) < 1) || PageSlab(page))
Andreas Gruenbacher88b390f2011-03-16 10:44:16 +01001501 return _drbd_no_send_page(mdev, page, offset, size, msg_flags);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001502
Lars Ellenbergba11ad92010-05-25 16:26:16 +02001503 msg_flags |= MSG_NOSIGNAL;
Philipp Reisner1a7ba642011-02-07 14:56:02 +01001504 drbd_update_congested(mdev->tconn);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001505 set_fs(KERNEL_DS);
1506 do {
Andreas Gruenbacher88b390f2011-03-16 10:44:16 +01001507 int sent;
1508
1509 sent = socket->ops->sendpage(socket, page, offset, len, msg_flags);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001510 if (sent <= 0) {
Andreas Gruenbacher88b390f2011-03-16 10:44:16 +01001511 if (sent == -EAGAIN) {
1512 if (we_should_drop_the_connection(mdev->tconn, socket))
1513 break;
1514 continue;
1515 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07001516 dev_warn(DEV, "%s: size=%d len=%d sent=%d\n",
1517 __func__, (int)size, len, sent);
Andreas Gruenbacher88b390f2011-03-16 10:44:16 +01001518 if (sent < 0)
1519 err = sent;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001520 break;
1521 }
1522 len -= sent;
1523 offset += sent;
1524 } while (len > 0 /* THINK && mdev->cstate >= C_CONNECTED*/);
1525 set_fs(oldfs);
Philipp Reisner01a311a2011-02-07 14:30:33 +01001526 clear_bit(NET_CONGESTED, &mdev->tconn->flags);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001527
Andreas Gruenbacher88b390f2011-03-16 10:44:16 +01001528 if (len == 0) {
1529 err = 0;
1530 mdev->send_cnt += size >> 9;
1531 }
1532 return err;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001533}
1534
1535static int _drbd_send_bio(struct drbd_conf *mdev, struct bio *bio)
1536{
1537 struct bio_vec *bvec;
1538 int i;
Lars Ellenbergba11ad92010-05-25 16:26:16 +02001539 /* hint all but last page with MSG_MORE */
Lars Ellenberg4b8514e2012-03-26 16:12:49 +02001540 bio_for_each_segment(bvec, bio, i) {
Andreas Gruenbacher7fae55d2011-03-16 11:46:33 +01001541 int err;
1542
1543 err = _drbd_no_send_page(mdev, bvec->bv_page,
1544 bvec->bv_offset, bvec->bv_len,
1545 i == bio->bi_vcnt - 1 ? 0 : MSG_MORE);
1546 if (err)
1547 return err;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001548 }
Andreas Gruenbacher7fae55d2011-03-16 11:46:33 +01001549 return 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001550}
1551
1552static int _drbd_send_zc_bio(struct drbd_conf *mdev, struct bio *bio)
1553{
1554 struct bio_vec *bvec;
1555 int i;
Lars Ellenbergba11ad92010-05-25 16:26:16 +02001556 /* hint all but last page with MSG_MORE */
Lars Ellenberg4b8514e2012-03-26 16:12:49 +02001557 bio_for_each_segment(bvec, bio, i) {
Andreas Gruenbacher7fae55d2011-03-16 11:46:33 +01001558 int err;
1559
1560 err = _drbd_send_page(mdev, bvec->bv_page,
1561 bvec->bv_offset, bvec->bv_len,
1562 i == bio->bi_vcnt - 1 ? 0 : MSG_MORE);
1563 if (err)
1564 return err;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001565 }
Andreas Gruenbacher7fae55d2011-03-16 11:46:33 +01001566 return 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001567}
1568
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01001569static int _drbd_send_zc_ee(struct drbd_conf *mdev,
1570 struct drbd_peer_request *peer_req)
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001571{
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01001572 struct page *page = peer_req->pages;
1573 unsigned len = peer_req->i.size;
Andreas Gruenbacher9f692302011-03-16 10:49:09 +01001574 int err;
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01001575
Lars Ellenbergba11ad92010-05-25 16:26:16 +02001576 /* hint all but last page with MSG_MORE */
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001577 page_chain_for_each(page) {
1578 unsigned l = min_t(unsigned, len, PAGE_SIZE);
Andreas Gruenbacher9f692302011-03-16 10:49:09 +01001579
1580 err = _drbd_send_page(mdev, page, 0, l,
1581 page_chain_next(page) ? MSG_MORE : 0);
1582 if (err)
1583 return err;
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001584 len -= l;
1585 }
Andreas Gruenbacher9f692302011-03-16 10:49:09 +01001586 return 0;
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001587}
1588
Philipp Reisner76d2e7e2010-08-25 11:58:05 +02001589static u32 bio_flags_to_wire(struct drbd_conf *mdev, unsigned long bi_rw)
1590{
Philipp Reisner31890f42011-01-19 14:12:51 +01001591 if (mdev->tconn->agreed_pro_version >= 95)
Philipp Reisner76d2e7e2010-08-25 11:58:05 +02001592 return (bi_rw & REQ_SYNC ? DP_RW_SYNC : 0) |
Philipp Reisner76d2e7e2010-08-25 11:58:05 +02001593 (bi_rw & REQ_FUA ? DP_FUA : 0) |
1594 (bi_rw & REQ_FLUSH ? DP_FLUSH : 0) |
1595 (bi_rw & REQ_DISCARD ? DP_DISCARD : 0);
1596 else
Jens Axboe721a9602011-03-09 11:56:30 +01001597 return bi_rw & REQ_SYNC ? DP_RW_SYNC : 0;
Philipp Reisner76d2e7e2010-08-25 11:58:05 +02001598}
1599
Philipp Reisnerb411b362009-09-25 16:07:19 -07001600/* Used to send write requests
1601 * R_PRIMARY -> Peer (P_DATA)
1602 */
1603int drbd_send_dblock(struct drbd_conf *mdev, struct drbd_request *req)
1604{
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001605 struct drbd_socket *sock;
1606 struct p_data *p;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001607 unsigned int dp_flags = 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001608 int dgs;
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001609 int err;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001610
Philipp Reisner46e1ce42011-05-16 12:57:15 +02001611 sock = &mdev->tconn->data;
1612 p = drbd_prepare_command(mdev, sock);
Andreas Gruenbacher7d4c7822011-07-17 23:06:12 +02001613 dgs = mdev->tconn->integrity_tfm ? crypto_hash_digestsize(mdev->tconn->integrity_tfm) : 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001614
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001615 if (!p)
1616 return -EIO;
1617 p->sector = cpu_to_be64(req->i.sector);
1618 p->block_id = (unsigned long)req;
Lars Ellenberg5cdb0bf32012-03-26 16:21:25 +02001619 p->seq_num = cpu_to_be32(atomic_inc_return(&mdev->packet_seq));
Philipp Reisner76d2e7e2010-08-25 11:58:05 +02001620 dp_flags = bio_flags_to_wire(mdev, req->master_bio->bi_rw);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001621 if (mdev->state.conn >= C_SYNC_SOURCE &&
1622 mdev->state.conn <= C_PAUSED_SYNC_T)
1623 dp_flags |= DP_MAY_SET_IN_SYNC;
Philipp Reisner303d1442011-04-13 16:24:47 -07001624 if (mdev->tconn->agreed_pro_version >= 100) {
1625 if (req->rq_state & RQ_EXP_RECEIVE_ACK)
1626 dp_flags |= DP_SEND_RECEIVE_ACK;
1627 if (req->rq_state & RQ_EXP_WRITE_ACK)
1628 dp_flags |= DP_SEND_WRITE_ACK;
1629 }
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001630 p->dp_flags = cpu_to_be32(dp_flags);
1631 if (dgs)
Andreas Gruenbacher8d412fc2011-04-27 20:59:18 +02001632 drbd_csum_bio(mdev, mdev->tconn->integrity_tfm, req->master_bio, p + 1);
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001633 err = __send_command(mdev->tconn, mdev->vnr, sock, P_DATA, sizeof(*p) + dgs, NULL, req->i.size);
Andreas Gruenbacher6bdb9b02011-03-16 11:52:58 +01001634 if (!err) {
Lars Ellenberg470be442010-11-10 10:36:52 +01001635 /* For protocol A, we have to memcpy the payload into
1636 * socket buffers, as we may complete right away
1637 * as soon as we handed it over to tcp, at which point the data
1638 * pages may become invalid.
1639 *
1640 * For data-integrity enabled, we copy it as well, so we can be
1641 * sure that even if the bio pages may still be modified, it
1642 * won't change the data on the wire, thus if the digest checks
1643 * out ok after sending on this side, but does not fit on the
1644 * receiving side, we sure have detected corruption elsewhere.
1645 */
Philipp Reisner303d1442011-04-13 16:24:47 -07001646 if (!(req->rq_state & (RQ_EXP_RECEIVE_ACK | RQ_EXP_WRITE_ACK)) || dgs)
Andreas Gruenbacher6bdb9b02011-03-16 11:52:58 +01001647 err = _drbd_send_bio(mdev, req->master_bio);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001648 else
Andreas Gruenbacher6bdb9b02011-03-16 11:52:58 +01001649 err = _drbd_send_zc_bio(mdev, req->master_bio);
Lars Ellenberg470be442010-11-10 10:36:52 +01001650
1651 /* double check digest, sometimes buffers have been modified in flight. */
1652 if (dgs > 0 && dgs <= 64) {
Bart Van Assche24c48302011-05-21 18:32:29 +02001653 /* 64 byte, 512 bit, is the largest digest size
Lars Ellenberg470be442010-11-10 10:36:52 +01001654 * currently supported in kernel crypto. */
1655 unsigned char digest[64];
Andreas Gruenbacher8d412fc2011-04-27 20:59:18 +02001656 drbd_csum_bio(mdev, mdev->tconn->integrity_tfm, req->master_bio, digest);
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001657 if (memcmp(p + 1, digest, dgs)) {
Lars Ellenberg470be442010-11-10 10:36:52 +01001658 dev_warn(DEV,
1659 "Digest mismatch, buffer modified by upper layers during write: %llus +%u\n",
Andreas Gruenbacherace652a2011-01-03 17:09:58 +01001660 (unsigned long long)req->i.sector, req->i.size);
Lars Ellenberg470be442010-11-10 10:36:52 +01001661 }
1662 } /* else if (dgs > 64) {
1663 ... Be noisy about digest too large ...
1664 } */
Philipp Reisnerb411b362009-09-25 16:07:19 -07001665 }
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001666 mutex_unlock(&sock->mutex); /* locked by drbd_prepare_command() */
Philipp Reisnerbd26bfc2010-05-04 12:33:58 +02001667
Andreas Gruenbacher6bdb9b02011-03-16 11:52:58 +01001668 return err;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001669}
1670
1671/* answer packet, used to send data back for read requests:
1672 * Peer -> (diskless) R_PRIMARY (P_DATA_REPLY)
1673 * C_SYNC_SOURCE -> C_SYNC_TARGET (P_RS_DATA_REPLY)
1674 */
Andreas Gruenbacherd8763022011-01-26 17:39:41 +01001675int drbd_send_block(struct drbd_conf *mdev, enum drbd_packet cmd,
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01001676 struct drbd_peer_request *peer_req)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001677{
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001678 struct drbd_socket *sock;
1679 struct p_data *p;
Andreas Gruenbacher7b57b89d2011-03-16 11:35:20 +01001680 int err;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001681 int dgs;
1682
Philipp Reisner46e1ce42011-05-16 12:57:15 +02001683 sock = &mdev->tconn->data;
1684 p = drbd_prepare_command(mdev, sock);
1685
Andreas Gruenbacher7d4c7822011-07-17 23:06:12 +02001686 dgs = mdev->tconn->integrity_tfm ? crypto_hash_digestsize(mdev->tconn->integrity_tfm) : 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001687
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001688 if (!p)
1689 return -EIO;
1690 p->sector = cpu_to_be64(peer_req->i.sector);
1691 p->block_id = peer_req->block_id;
1692 p->seq_num = 0; /* unused */
Lars Ellenbergb17f33c2012-02-08 15:32:51 +01001693 p->dp_flags = 0;
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001694 if (dgs)
Andreas Gruenbacher8d412fc2011-04-27 20:59:18 +02001695 drbd_csum_ee(mdev, mdev->tconn->integrity_tfm, peer_req, p + 1);
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001696 err = __send_command(mdev->tconn, mdev->vnr, sock, cmd, sizeof(*p) + dgs, NULL, peer_req->i.size);
Andreas Gruenbacher7b57b89d2011-03-16 11:35:20 +01001697 if (!err)
1698 err = _drbd_send_zc_ee(mdev, peer_req);
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001699 mutex_unlock(&sock->mutex); /* locked by drbd_prepare_command() */
Philipp Reisnerbd26bfc2010-05-04 12:33:58 +02001700
Andreas Gruenbacher7b57b89d2011-03-16 11:35:20 +01001701 return err;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001702}
1703
Andreas Gruenbacher8f7bed72010-12-19 23:53:14 +01001704int drbd_send_out_of_sync(struct drbd_conf *mdev, struct drbd_request *req)
Philipp Reisner73a01a12010-10-27 14:33:00 +02001705{
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001706 struct drbd_socket *sock;
1707 struct p_block_desc *p;
Philipp Reisner73a01a12010-10-27 14:33:00 +02001708
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001709 sock = &mdev->tconn->data;
1710 p = drbd_prepare_command(mdev, sock);
1711 if (!p)
1712 return -EIO;
1713 p->sector = cpu_to_be64(req->i.sector);
1714 p->blksize = cpu_to_be32(req->i.size);
1715 return drbd_send_command(mdev, sock, P_OUT_OF_SYNC, sizeof(*p), NULL, 0);
Philipp Reisner73a01a12010-10-27 14:33:00 +02001716}
1717
Philipp Reisnerb411b362009-09-25 16:07:19 -07001718/*
1719 drbd_send distinguishes two cases:
1720
1721 Packets sent via the data socket "sock"
1722 and packets sent via the meta data socket "msock"
1723
1724 sock msock
1725 -----------------+-------------------------+------------------------------
1726 timeout conf.timeout / 2 conf.timeout / 2
1727 timeout action send a ping via msock Abort communication
1728 and close all sockets
1729*/
1730
1731/*
1732 * you must have down()ed the appropriate [m]sock_mutex elsewhere!
1733 */
Philipp Reisnerbedbd2a2011-02-07 15:08:48 +01001734int drbd_send(struct drbd_tconn *tconn, struct socket *sock,
Philipp Reisnerb411b362009-09-25 16:07:19 -07001735 void *buf, size_t size, unsigned msg_flags)
1736{
1737 struct kvec iov;
1738 struct msghdr msg;
1739 int rv, sent = 0;
1740
1741 if (!sock)
Andreas Gruenbacherc0d42c82010-12-09 23:52:22 +01001742 return -EBADR;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001743
1744 /* THINK if (signal_pending) return ... ? */
1745
1746 iov.iov_base = buf;
1747 iov.iov_len = size;
1748
1749 msg.msg_name = NULL;
1750 msg.msg_namelen = 0;
1751 msg.msg_control = NULL;
1752 msg.msg_controllen = 0;
1753 msg.msg_flags = msg_flags | MSG_NOSIGNAL;
1754
Philipp Reisnerbedbd2a2011-02-07 15:08:48 +01001755 if (sock == tconn->data.socket) {
Philipp Reisner44ed1672011-04-19 17:10:19 +02001756 rcu_read_lock();
1757 tconn->ko_count = rcu_dereference(tconn->net_conf)->ko_count;
1758 rcu_read_unlock();
Philipp Reisnerbedbd2a2011-02-07 15:08:48 +01001759 drbd_update_congested(tconn);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001760 }
1761 do {
1762 /* STRANGE
1763 * tcp_sendmsg does _not_ use its size parameter at all ?
1764 *
1765 * -EAGAIN on timeout, -EINTR on signal.
1766 */
1767/* THINK
1768 * do we need to block DRBD_SIG if sock == &meta.socket ??
1769 * otherwise wake_asender() might interrupt some send_*Ack !
1770 */
1771 rv = kernel_sendmsg(sock, &msg, &iov, 1, size);
1772 if (rv == -EAGAIN) {
Philipp Reisnerbedbd2a2011-02-07 15:08:48 +01001773 if (we_should_drop_the_connection(tconn, sock))
Philipp Reisnerb411b362009-09-25 16:07:19 -07001774 break;
1775 else
1776 continue;
1777 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07001778 if (rv == -EINTR) {
1779 flush_signals(current);
1780 rv = 0;
1781 }
1782 if (rv < 0)
1783 break;
1784 sent += rv;
1785 iov.iov_base += rv;
1786 iov.iov_len -= rv;
1787 } while (sent < size);
1788
Philipp Reisnerbedbd2a2011-02-07 15:08:48 +01001789 if (sock == tconn->data.socket)
1790 clear_bit(NET_CONGESTED, &tconn->flags);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001791
1792 if (rv <= 0) {
1793 if (rv != -EAGAIN) {
Philipp Reisnerbedbd2a2011-02-07 15:08:48 +01001794 conn_err(tconn, "%s_sendmsg returned %d\n",
1795 sock == tconn->meta.socket ? "msock" : "sock",
1796 rv);
Philipp Reisnerbbeb6412011-02-10 13:45:46 +01001797 conn_request_state(tconn, NS(conn, C_BROKEN_PIPE), CS_HARD);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001798 } else
Philipp Reisnerbbeb6412011-02-10 13:45:46 +01001799 conn_request_state(tconn, NS(conn, C_TIMEOUT), CS_HARD);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001800 }
1801
1802 return sent;
1803}
1804
Andreas Gruenbacherfb708e42010-12-15 17:04:36 +01001805/**
1806 * drbd_send_all - Send an entire buffer
1807 *
1808 * Returns 0 upon success and a negative error value otherwise.
1809 */
1810int drbd_send_all(struct drbd_tconn *tconn, struct socket *sock, void *buffer,
1811 size_t size, unsigned msg_flags)
1812{
1813 int err;
1814
1815 err = drbd_send(tconn, sock, buffer, size, msg_flags);
1816 if (err < 0)
1817 return err;
1818 if (err != size)
1819 return -EIO;
1820 return 0;
1821}
1822
Philipp Reisnerb411b362009-09-25 16:07:19 -07001823static int drbd_open(struct block_device *bdev, fmode_t mode)
1824{
1825 struct drbd_conf *mdev = bdev->bd_disk->private_data;
1826 unsigned long flags;
1827 int rv = 0;
1828
Arnd Bergmann2a48fc02010-06-02 14:28:52 +02001829 mutex_lock(&drbd_main_mutex);
Philipp Reisner87eeee42011-01-19 14:16:30 +01001830 spin_lock_irqsave(&mdev->tconn->req_lock, flags);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001831 /* to have a stable mdev->state.role
1832 * and no race with updating open_cnt */
1833
1834 if (mdev->state.role != R_PRIMARY) {
1835 if (mode & FMODE_WRITE)
1836 rv = -EROFS;
1837 else if (!allow_oos)
1838 rv = -EMEDIUMTYPE;
1839 }
1840
1841 if (!rv)
1842 mdev->open_cnt++;
Philipp Reisner87eeee42011-01-19 14:16:30 +01001843 spin_unlock_irqrestore(&mdev->tconn->req_lock, flags);
Arnd Bergmann2a48fc02010-06-02 14:28:52 +02001844 mutex_unlock(&drbd_main_mutex);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001845
1846 return rv;
1847}
1848
1849static int drbd_release(struct gendisk *gd, fmode_t mode)
1850{
1851 struct drbd_conf *mdev = gd->private_data;
Arnd Bergmann2a48fc02010-06-02 14:28:52 +02001852 mutex_lock(&drbd_main_mutex);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001853 mdev->open_cnt--;
Arnd Bergmann2a48fc02010-06-02 14:28:52 +02001854 mutex_unlock(&drbd_main_mutex);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001855 return 0;
1856}
1857
Philipp Reisnerb411b362009-09-25 16:07:19 -07001858static void drbd_set_defaults(struct drbd_conf *mdev)
1859{
Lars Ellenbergf3990022011-03-23 14:31:09 +01001860 /* Beware! The actual layout differs
1861 * between big endian and little endian */
Philipp Reisnerda9fbc22011-03-29 10:52:01 +02001862 mdev->state = (union drbd_dev_state) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07001863 { .role = R_SECONDARY,
1864 .peer = R_UNKNOWN,
1865 .conn = C_STANDALONE,
1866 .disk = D_DISKLESS,
1867 .pdsk = D_UNKNOWN,
Philipp Reisnerb411b362009-09-25 16:07:19 -07001868 } };
1869}
1870
1871void drbd_init_set_defaults(struct drbd_conf *mdev)
1872{
1873 /* the memset(,0,) did most of this.
1874 * note: only assignments, no allocation in here */
1875
1876 drbd_set_defaults(mdev);
1877
Philipp Reisnerb411b362009-09-25 16:07:19 -07001878 atomic_set(&mdev->ap_bio_cnt, 0);
1879 atomic_set(&mdev->ap_pending_cnt, 0);
1880 atomic_set(&mdev->rs_pending_cnt, 0);
1881 atomic_set(&mdev->unacked_cnt, 0);
1882 atomic_set(&mdev->local_cnt, 0);
Lars Ellenberg435f0742010-09-06 12:30:25 +02001883 atomic_set(&mdev->pp_in_use_by_net, 0);
Philipp Reisner778f2712010-07-06 11:14:00 +02001884 atomic_set(&mdev->rs_sect_in, 0);
Lars Ellenberg0f0601f2010-08-11 23:40:24 +02001885 atomic_set(&mdev->rs_sect_ev, 0);
Philipp Reisner759fbdf2010-10-26 16:02:27 +02001886 atomic_set(&mdev->ap_in_flight, 0);
Philipp Reisnercdfda632011-07-05 15:38:59 +02001887 atomic_set(&mdev->md_io_in_use, 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001888
Philipp Reisner8410da82011-02-11 20:11:10 +01001889 mutex_init(&mdev->own_state_mutex);
1890 mdev->state_mutex = &mdev->own_state_mutex;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001891
Philipp Reisnerb411b362009-09-25 16:07:19 -07001892 spin_lock_init(&mdev->al_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001893 spin_lock_init(&mdev->peer_seq_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001894
1895 INIT_LIST_HEAD(&mdev->active_ee);
1896 INIT_LIST_HEAD(&mdev->sync_ee);
1897 INIT_LIST_HEAD(&mdev->done_ee);
1898 INIT_LIST_HEAD(&mdev->read_ee);
1899 INIT_LIST_HEAD(&mdev->net_ee);
1900 INIT_LIST_HEAD(&mdev->resync_reads);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001901 INIT_LIST_HEAD(&mdev->resync_work.list);
1902 INIT_LIST_HEAD(&mdev->unplug_work.list);
Lars Ellenberge9e6f3e2010-09-14 20:26:27 +02001903 INIT_LIST_HEAD(&mdev->go_diskless.list);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001904 INIT_LIST_HEAD(&mdev->md_sync_work.list);
Philipp Reisnerc4752ef2010-10-27 17:32:36 +02001905 INIT_LIST_HEAD(&mdev->start_resync_work.list);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001906 INIT_LIST_HEAD(&mdev->bm_io_work.w.list);
Philipp Reisner0ced55a2010-04-30 15:26:20 +02001907
Philipp Reisner794abb72010-12-27 11:51:23 +01001908 mdev->resync_work.cb = w_resync_timer;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001909 mdev->unplug_work.cb = w_send_write_hint;
Lars Ellenberge9e6f3e2010-09-14 20:26:27 +02001910 mdev->go_diskless.cb = w_go_diskless;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001911 mdev->md_sync_work.cb = w_md_sync;
1912 mdev->bm_io_work.w.cb = w_bitmap_io;
Philipp Reisner370a43e2011-01-14 16:03:11 +01001913 mdev->start_resync_work.cb = w_start_resync;
Philipp Reisnera21e9292011-02-08 15:08:49 +01001914
1915 mdev->resync_work.mdev = mdev;
1916 mdev->unplug_work.mdev = mdev;
1917 mdev->go_diskless.mdev = mdev;
1918 mdev->md_sync_work.mdev = mdev;
1919 mdev->bm_io_work.w.mdev = mdev;
1920 mdev->start_resync_work.mdev = mdev;
1921
Philipp Reisnerb411b362009-09-25 16:07:19 -07001922 init_timer(&mdev->resync_timer);
1923 init_timer(&mdev->md_sync_timer);
Philipp Reisner370a43e2011-01-14 16:03:11 +01001924 init_timer(&mdev->start_resync_timer);
Philipp Reisner7fde2be2011-03-01 11:08:28 +01001925 init_timer(&mdev->request_timer);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001926 mdev->resync_timer.function = resync_timer_fn;
1927 mdev->resync_timer.data = (unsigned long) mdev;
1928 mdev->md_sync_timer.function = md_sync_timer_fn;
1929 mdev->md_sync_timer.data = (unsigned long) mdev;
Philipp Reisner370a43e2011-01-14 16:03:11 +01001930 mdev->start_resync_timer.function = start_resync_timer_fn;
1931 mdev->start_resync_timer.data = (unsigned long) mdev;
Philipp Reisner7fde2be2011-03-01 11:08:28 +01001932 mdev->request_timer.function = request_timer_fn;
1933 mdev->request_timer.data = (unsigned long) mdev;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001934
1935 init_waitqueue_head(&mdev->misc_wait);
1936 init_waitqueue_head(&mdev->state_wait);
1937 init_waitqueue_head(&mdev->ee_wait);
1938 init_waitqueue_head(&mdev->al_wait);
1939 init_waitqueue_head(&mdev->seq_wait);
1940
Philipp Reisnerb411b362009-09-25 16:07:19 -07001941 mdev->resync_wenr = LC_FREE;
Philipp Reisner99432fc2011-05-20 16:39:13 +02001942 mdev->peer_max_bio_size = DRBD_MAX_BIO_SIZE_SAFE;
1943 mdev->local_max_bio_size = DRBD_MAX_BIO_SIZE_SAFE;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001944}
1945
1946void drbd_mdev_cleanup(struct drbd_conf *mdev)
1947{
Lars Ellenberg1d7734a2010-08-11 21:21:50 +02001948 int i;
Philipp Reisnere6b3ea82011-01-19 14:02:01 +01001949 if (mdev->tconn->receiver.t_state != NONE)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001950 dev_err(DEV, "ASSERT FAILED: receiver t_state == %d expected 0.\n",
Philipp Reisnere6b3ea82011-01-19 14:02:01 +01001951 mdev->tconn->receiver.t_state);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001952
Philipp Reisnerb411b362009-09-25 16:07:19 -07001953 mdev->al_writ_cnt =
1954 mdev->bm_writ_cnt =
1955 mdev->read_cnt =
1956 mdev->recv_cnt =
1957 mdev->send_cnt =
1958 mdev->writ_cnt =
1959 mdev->p_size =
1960 mdev->rs_start =
1961 mdev->rs_total =
Lars Ellenberg1d7734a2010-08-11 21:21:50 +02001962 mdev->rs_failed = 0;
1963 mdev->rs_last_events = 0;
Lars Ellenberg0f0601f2010-08-11 23:40:24 +02001964 mdev->rs_last_sect_ev = 0;
Lars Ellenberg1d7734a2010-08-11 21:21:50 +02001965 for (i = 0; i < DRBD_SYNC_MARKS; i++) {
1966 mdev->rs_mark_left[i] = 0;
1967 mdev->rs_mark_time[i] = 0;
1968 }
Philipp Reisner89e58e72011-01-19 13:12:45 +01001969 D_ASSERT(mdev->tconn->net_conf == NULL);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001970
1971 drbd_set_my_capacity(mdev, 0);
1972 if (mdev->bitmap) {
1973 /* maybe never allocated. */
Philipp Reisner02d9a942010-03-24 16:23:03 +01001974 drbd_bm_resize(mdev, 0, 1);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001975 drbd_bm_cleanup(mdev);
1976 }
1977
Philipp Reisner1d041222011-04-22 15:20:23 +02001978 drbd_free_bc(mdev->ldev);
1979 mdev->ldev = NULL;
1980
Philipp Reisner07782862010-08-31 12:00:50 +02001981 clear_bit(AL_SUSPENDED, &mdev->flags);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001982
Philipp Reisnerb411b362009-09-25 16:07:19 -07001983 D_ASSERT(list_empty(&mdev->active_ee));
1984 D_ASSERT(list_empty(&mdev->sync_ee));
1985 D_ASSERT(list_empty(&mdev->done_ee));
1986 D_ASSERT(list_empty(&mdev->read_ee));
1987 D_ASSERT(list_empty(&mdev->net_ee));
1988 D_ASSERT(list_empty(&mdev->resync_reads));
Lars Ellenbergd5b27b02011-11-14 15:42:37 +01001989 D_ASSERT(list_empty(&mdev->tconn->sender_work.q));
Philipp Reisnerb411b362009-09-25 16:07:19 -07001990 D_ASSERT(list_empty(&mdev->resync_work.list));
1991 D_ASSERT(list_empty(&mdev->unplug_work.list));
Lars Ellenberge9e6f3e2010-09-14 20:26:27 +02001992 D_ASSERT(list_empty(&mdev->go_diskless.list));
Lars Ellenberg2265b472010-12-16 15:41:26 +01001993
1994 drbd_set_defaults(mdev);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001995}
1996
1997
1998static void drbd_destroy_mempools(void)
1999{
2000 struct page *page;
2001
2002 while (drbd_pp_pool) {
2003 page = drbd_pp_pool;
2004 drbd_pp_pool = (struct page *)page_private(page);
2005 __free_page(page);
2006 drbd_pp_vacant--;
2007 }
2008
2009 /* D_ASSERT(atomic_read(&drbd_pp_vacant)==0); */
2010
Lars Ellenbergda4a75d2011-02-23 17:02:01 +01002011 if (drbd_md_io_bio_set)
2012 bioset_free(drbd_md_io_bio_set);
Lars Ellenberg35abf592011-02-23 12:39:46 +01002013 if (drbd_md_io_page_pool)
2014 mempool_destroy(drbd_md_io_page_pool);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002015 if (drbd_ee_mempool)
2016 mempool_destroy(drbd_ee_mempool);
2017 if (drbd_request_mempool)
2018 mempool_destroy(drbd_request_mempool);
2019 if (drbd_ee_cache)
2020 kmem_cache_destroy(drbd_ee_cache);
2021 if (drbd_request_cache)
2022 kmem_cache_destroy(drbd_request_cache);
2023 if (drbd_bm_ext_cache)
2024 kmem_cache_destroy(drbd_bm_ext_cache);
2025 if (drbd_al_ext_cache)
2026 kmem_cache_destroy(drbd_al_ext_cache);
2027
Lars Ellenbergda4a75d2011-02-23 17:02:01 +01002028 drbd_md_io_bio_set = NULL;
Lars Ellenberg35abf592011-02-23 12:39:46 +01002029 drbd_md_io_page_pool = NULL;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002030 drbd_ee_mempool = NULL;
2031 drbd_request_mempool = NULL;
2032 drbd_ee_cache = NULL;
2033 drbd_request_cache = NULL;
2034 drbd_bm_ext_cache = NULL;
2035 drbd_al_ext_cache = NULL;
2036
2037 return;
2038}
2039
2040static int drbd_create_mempools(void)
2041{
2042 struct page *page;
Lars Ellenberg1816a2b2010-11-11 15:19:07 +01002043 const int number = (DRBD_MAX_BIO_SIZE/PAGE_SIZE) * minor_count;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002044 int i;
2045
2046 /* prepare our caches and mempools */
2047 drbd_request_mempool = NULL;
2048 drbd_ee_cache = NULL;
2049 drbd_request_cache = NULL;
2050 drbd_bm_ext_cache = NULL;
2051 drbd_al_ext_cache = NULL;
2052 drbd_pp_pool = NULL;
Lars Ellenberg35abf592011-02-23 12:39:46 +01002053 drbd_md_io_page_pool = NULL;
Lars Ellenbergda4a75d2011-02-23 17:02:01 +01002054 drbd_md_io_bio_set = NULL;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002055
2056 /* caches */
2057 drbd_request_cache = kmem_cache_create(
2058 "drbd_req", sizeof(struct drbd_request), 0, 0, NULL);
2059 if (drbd_request_cache == NULL)
2060 goto Enomem;
2061
2062 drbd_ee_cache = kmem_cache_create(
Andreas Gruenbacherf6ffca92011-02-04 15:30:34 +01002063 "drbd_ee", sizeof(struct drbd_peer_request), 0, 0, NULL);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002064 if (drbd_ee_cache == NULL)
2065 goto Enomem;
2066
2067 drbd_bm_ext_cache = kmem_cache_create(
2068 "drbd_bm", sizeof(struct bm_extent), 0, 0, NULL);
2069 if (drbd_bm_ext_cache == NULL)
2070 goto Enomem;
2071
2072 drbd_al_ext_cache = kmem_cache_create(
2073 "drbd_al", sizeof(struct lc_element), 0, 0, NULL);
2074 if (drbd_al_ext_cache == NULL)
2075 goto Enomem;
2076
2077 /* mempools */
Lars Ellenbergda4a75d2011-02-23 17:02:01 +01002078 drbd_md_io_bio_set = bioset_create(DRBD_MIN_POOL_PAGES, 0);
2079 if (drbd_md_io_bio_set == NULL)
2080 goto Enomem;
2081
Lars Ellenberg35abf592011-02-23 12:39:46 +01002082 drbd_md_io_page_pool = mempool_create_page_pool(DRBD_MIN_POOL_PAGES, 0);
2083 if (drbd_md_io_page_pool == NULL)
2084 goto Enomem;
2085
Philipp Reisnerb411b362009-09-25 16:07:19 -07002086 drbd_request_mempool = mempool_create(number,
2087 mempool_alloc_slab, mempool_free_slab, drbd_request_cache);
2088 if (drbd_request_mempool == NULL)
2089 goto Enomem;
2090
2091 drbd_ee_mempool = mempool_create(number,
2092 mempool_alloc_slab, mempool_free_slab, drbd_ee_cache);
Nicolas Kaiser2027ae12010-10-28 06:15:26 -06002093 if (drbd_ee_mempool == NULL)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002094 goto Enomem;
2095
2096 /* drbd's page pool */
2097 spin_lock_init(&drbd_pp_lock);
2098
2099 for (i = 0; i < number; i++) {
2100 page = alloc_page(GFP_HIGHUSER);
2101 if (!page)
2102 goto Enomem;
2103 set_page_private(page, (unsigned long)drbd_pp_pool);
2104 drbd_pp_pool = page;
2105 }
2106 drbd_pp_vacant = number;
2107
2108 return 0;
2109
2110Enomem:
2111 drbd_destroy_mempools(); /* in case we allocated some */
2112 return -ENOMEM;
2113}
2114
2115static int drbd_notify_sys(struct notifier_block *this, unsigned long code,
2116 void *unused)
2117{
2118 /* just so we have it. you never know what interesting things we
2119 * might want to do here some day...
2120 */
2121
2122 return NOTIFY_DONE;
2123}
2124
2125static struct notifier_block drbd_notifier = {
2126 .notifier_call = drbd_notify_sys,
2127};
2128
Andreas Gruenbacher7721f562011-04-06 17:14:02 +02002129static void drbd_release_all_peer_reqs(struct drbd_conf *mdev)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002130{
2131 int rr;
2132
Andreas Gruenbacher7721f562011-04-06 17:14:02 +02002133 rr = drbd_free_peer_reqs(mdev, &mdev->active_ee);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002134 if (rr)
2135 dev_err(DEV, "%d EEs in active list found!\n", rr);
2136
Andreas Gruenbacher7721f562011-04-06 17:14:02 +02002137 rr = drbd_free_peer_reqs(mdev, &mdev->sync_ee);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002138 if (rr)
2139 dev_err(DEV, "%d EEs in sync list found!\n", rr);
2140
Andreas Gruenbacher7721f562011-04-06 17:14:02 +02002141 rr = drbd_free_peer_reqs(mdev, &mdev->read_ee);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002142 if (rr)
2143 dev_err(DEV, "%d EEs in read list found!\n", rr);
2144
Andreas Gruenbacher7721f562011-04-06 17:14:02 +02002145 rr = drbd_free_peer_reqs(mdev, &mdev->done_ee);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002146 if (rr)
2147 dev_err(DEV, "%d EEs in done list found!\n", rr);
2148
Andreas Gruenbacher7721f562011-04-06 17:14:02 +02002149 rr = drbd_free_peer_reqs(mdev, &mdev->net_ee);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002150 if (rr)
2151 dev_err(DEV, "%d EEs in net list found!\n", rr);
2152}
2153
Philipp Reisner774b3052011-02-22 02:07:03 -05002154/* caution. no locking. */
Philipp Reisner81fa2e62011-05-04 15:10:30 +02002155void drbd_minor_destroy(struct kref *kref)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002156{
Philipp Reisner81fa2e62011-05-04 15:10:30 +02002157 struct drbd_conf *mdev = container_of(kref, struct drbd_conf, kref);
Philipp Reisner9dc9fbb2011-04-22 15:23:32 +02002158 struct drbd_tconn *tconn = mdev->tconn;
2159
Philipp Reisnercdfda632011-07-05 15:38:59 +02002160 del_timer_sync(&mdev->request_timer);
2161
Philipp Reisnerb411b362009-09-25 16:07:19 -07002162 /* paranoia asserts */
Andreas Gruenbacher70dc65e2010-12-21 14:46:57 +01002163 D_ASSERT(mdev->open_cnt == 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002164 /* end paranoia asserts */
2165
Philipp Reisnerb411b362009-09-25 16:07:19 -07002166 /* cleanup stuff that may have been allocated during
2167 * device (re-)configuration or state changes */
2168
2169 if (mdev->this_bdev)
2170 bdput(mdev->this_bdev);
2171
Philipp Reisner1d041222011-04-22 15:20:23 +02002172 drbd_free_bc(mdev->ldev);
2173 mdev->ldev = NULL;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002174
Andreas Gruenbacher7721f562011-04-06 17:14:02 +02002175 drbd_release_all_peer_reqs(mdev);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002176
Philipp Reisnerb411b362009-09-25 16:07:19 -07002177 lc_destroy(mdev->act_log);
2178 lc_destroy(mdev->resync);
2179
2180 kfree(mdev->p_uuid);
2181 /* mdev->p_uuid = NULL; */
2182
Philipp Reisnercd1d9952011-04-11 21:24:24 -07002183 if (mdev->bitmap) /* should no longer be there. */
2184 drbd_bm_cleanup(mdev);
2185 __free_page(mdev->md_io_page);
2186 put_disk(mdev->vdisk);
2187 blk_cleanup_queue(mdev->rq_queue);
Philipp Reisner9958c852011-05-03 16:19:31 +02002188 kfree(mdev->rs_plan_s);
Philipp Reisnercd1d9952011-04-11 21:24:24 -07002189 kfree(mdev);
Philipp Reisner9dc9fbb2011-04-22 15:23:32 +02002190
2191 kref_put(&tconn->kref, &conn_destroy);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002192}
2193
Lars Ellenberg2312f0b32011-11-24 10:36:25 +01002194/* One global retry thread, if we need to push back some bio and have it
2195 * reinserted through our make request function.
2196 */
2197static struct retry_worker {
2198 struct workqueue_struct *wq;
2199 struct work_struct worker;
2200
2201 spinlock_t lock;
2202 struct list_head writes;
2203} retry;
2204
2205static void do_retry(struct work_struct *ws)
2206{
2207 struct retry_worker *retry = container_of(ws, struct retry_worker, worker);
2208 LIST_HEAD(writes);
2209 struct drbd_request *req, *tmp;
2210
2211 spin_lock_irq(&retry->lock);
2212 list_splice_init(&retry->writes, &writes);
2213 spin_unlock_irq(&retry->lock);
2214
2215 list_for_each_entry_safe(req, tmp, &writes, tl_requests) {
2216 struct drbd_conf *mdev = req->w.mdev;
2217 struct bio *bio = req->master_bio;
2218 unsigned long start_time = req->start_time;
2219
2220 /* We have exclusive access to this request object.
2221 * If it had not been RQ_POSTPONED, the code path which queued
2222 * it here would have completed and freed it already.
2223 */
2224 mempool_free(req, drbd_request_mempool);
2225
2226 /* A single suspended or otherwise blocking device may stall
2227 * all others as well. Fortunately, this code path is to
2228 * recover from a situation that "should not happen":
2229 * concurrent writes in multi-primary setup.
2230 * In a "normal" lifecycle, this workqueue is supposed to be
2231 * destroyed without ever doing anything.
2232 * If it turns out to be an issue anyways, we can do per
2233 * resource (replication group) or per device (minor) retry
2234 * workqueues instead.
2235 */
2236
2237 /* We are not just doing generic_make_request(),
2238 * as we want to keep the start_time information. */
2239 do {
2240 inc_ap_bio(mdev);
2241 } while(__drbd_make_request(mdev, bio, start_time));
2242 }
2243}
2244
Lars Ellenberg9d05e7c2012-07-17 10:05:04 +02002245void drbd_restart_request(struct drbd_request *req)
Lars Ellenberg2312f0b32011-11-24 10:36:25 +01002246{
2247 unsigned long flags;
2248 spin_lock_irqsave(&retry.lock, flags);
2249 list_move_tail(&req->tl_requests, &retry.writes);
2250 spin_unlock_irqrestore(&retry.lock, flags);
2251
2252 /* Drop the extra reference that would otherwise
2253 * have been dropped by complete_master_bio.
2254 * do_retry() needs to grab a new one. */
2255 dec_ap_bio(req->w.mdev);
2256
2257 queue_work(retry.wq, &retry.worker);
2258}
2259
2260
Philipp Reisnerb411b362009-09-25 16:07:19 -07002261static void drbd_cleanup(void)
2262{
2263 unsigned int i;
Philipp Reisner81a5d602011-02-22 19:53:16 -05002264 struct drbd_conf *mdev;
Philipp Reisner81fa2e62011-05-04 15:10:30 +02002265 struct drbd_tconn *tconn, *tmp;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002266
2267 unregister_reboot_notifier(&drbd_notifier);
2268
Lars Ellenberg17a93f302010-11-24 10:37:35 +01002269 /* first remove proc,
2270 * drbdsetup uses it's presence to detect
2271 * whether DRBD is loaded.
2272 * If we would get stuck in proc removal,
2273 * but have netlink already deregistered,
2274 * some drbdsetup commands may wait forever
2275 * for an answer.
2276 */
2277 if (drbd_proc)
2278 remove_proc_entry("drbd", NULL);
2279
Lars Ellenberg2312f0b32011-11-24 10:36:25 +01002280 if (retry.wq)
2281 destroy_workqueue(retry.wq);
2282
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002283 drbd_genl_unregister();
Philipp Reisnerb411b362009-09-25 16:07:19 -07002284
Philipp Reisner81fa2e62011-05-04 15:10:30 +02002285 idr_for_each_entry(&minors, mdev, i) {
2286 idr_remove(&minors, mdev_to_minor(mdev));
2287 idr_remove(&mdev->tconn->volumes, mdev->vnr);
2288 del_gendisk(mdev->vdisk);
Philipp Reisnerc141ebd2011-05-05 16:13:10 +02002289 /* synchronize_rcu(); No other threads running at this point */
Philipp Reisner81fa2e62011-05-04 15:10:30 +02002290 kref_put(&mdev->kref, &drbd_minor_destroy);
2291 }
2292
Philipp Reisnerc141ebd2011-05-05 16:13:10 +02002293 /* not _rcu since, no other updater anymore. Genl already unregistered */
Philipp Reisner81fa2e62011-05-04 15:10:30 +02002294 list_for_each_entry_safe(tconn, tmp, &drbd_tconns, all_tconn) {
Philipp Reisnerc141ebd2011-05-05 16:13:10 +02002295 list_del(&tconn->all_tconn); /* not _rcu no proc, not other threads */
2296 /* synchronize_rcu(); */
Philipp Reisner81fa2e62011-05-04 15:10:30 +02002297 kref_put(&tconn->kref, &conn_destroy);
2298 }
Philipp Reisnerff370e52011-04-11 21:10:11 -07002299
Philipp Reisner81a5d602011-02-22 19:53:16 -05002300 drbd_destroy_mempools();
Philipp Reisnerb411b362009-09-25 16:07:19 -07002301 unregister_blkdev(DRBD_MAJOR, "drbd");
2302
Philipp Reisner81a5d602011-02-22 19:53:16 -05002303 idr_destroy(&minors);
2304
Philipp Reisnerb411b362009-09-25 16:07:19 -07002305 printk(KERN_INFO "drbd: module cleanup done.\n");
2306}
2307
2308/**
2309 * drbd_congested() - Callback for pdflush
2310 * @congested_data: User data
2311 * @bdi_bits: Bits pdflush is currently interested in
2312 *
2313 * Returns 1<<BDI_async_congested and/or 1<<BDI_sync_congested if we are congested.
2314 */
2315static int drbd_congested(void *congested_data, int bdi_bits)
2316{
2317 struct drbd_conf *mdev = congested_data;
2318 struct request_queue *q;
2319 char reason = '-';
2320 int r = 0;
2321
Andreas Gruenbacher1b881ef2010-12-13 18:03:38 +01002322 if (!may_inc_ap_bio(mdev)) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07002323 /* DRBD has frozen IO */
2324 r = bdi_bits;
2325 reason = 'd';
2326 goto out;
2327 }
2328
2329 if (get_ldev(mdev)) {
2330 q = bdev_get_queue(mdev->ldev->backing_bdev);
2331 r = bdi_congested(&q->backing_dev_info, bdi_bits);
2332 put_ldev(mdev);
2333 if (r)
2334 reason = 'b';
2335 }
2336
Philipp Reisner01a311a2011-02-07 14:30:33 +01002337 if (bdi_bits & (1 << BDI_async_congested) && test_bit(NET_CONGESTED, &mdev->tconn->flags)) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07002338 r |= (1 << BDI_async_congested);
2339 reason = reason == 'b' ? 'a' : 'n';
2340 }
2341
2342out:
2343 mdev->congestion_reason = reason;
2344 return r;
2345}
2346
Philipp Reisner6699b652011-02-09 11:10:24 +01002347static void drbd_init_workqueue(struct drbd_work_queue* wq)
2348{
Philipp Reisner6699b652011-02-09 11:10:24 +01002349 spin_lock_init(&wq->q_lock);
2350 INIT_LIST_HEAD(&wq->q);
Lars Ellenberg8c0785a2011-10-19 11:50:57 +02002351 init_waitqueue_head(&wq->q_wait);
Philipp Reisner6699b652011-02-09 11:10:24 +01002352}
2353
Philipp Reisner0ace9df2011-04-24 10:53:19 +02002354struct drbd_tconn *conn_get_by_name(const char *name)
Philipp Reisner1aba4d72011-02-21 15:38:08 +01002355{
2356 struct drbd_tconn *tconn;
2357
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002358 if (!name || !name[0])
2359 return NULL;
2360
Philipp Reisnerc141ebd2011-05-05 16:13:10 +02002361 rcu_read_lock();
Philipp Reisnerec0bddb2011-05-04 15:47:01 +02002362 list_for_each_entry_rcu(tconn, &drbd_tconns, all_tconn) {
Philipp Reisner0ace9df2011-04-24 10:53:19 +02002363 if (!strcmp(tconn->name, name)) {
2364 kref_get(&tconn->kref);
Philipp Reisner1aba4d72011-02-21 15:38:08 +01002365 goto found;
Philipp Reisner0ace9df2011-04-24 10:53:19 +02002366 }
Philipp Reisner1aba4d72011-02-21 15:38:08 +01002367 }
2368 tconn = NULL;
2369found:
Philipp Reisnerc141ebd2011-05-05 16:13:10 +02002370 rcu_read_unlock();
Philipp Reisner1aba4d72011-02-21 15:38:08 +01002371 return tconn;
2372}
2373
Andreas Gruenbacher089c0752011-06-14 18:28:09 +02002374struct drbd_tconn *conn_get_by_addrs(void *my_addr, int my_addr_len,
2375 void *peer_addr, int peer_addr_len)
2376{
2377 struct drbd_tconn *tconn;
2378
2379 rcu_read_lock();
2380 list_for_each_entry_rcu(tconn, &drbd_tconns, all_tconn) {
2381 if (tconn->my_addr_len == my_addr_len &&
2382 tconn->peer_addr_len == peer_addr_len &&
2383 !memcmp(&tconn->my_addr, my_addr, my_addr_len) &&
2384 !memcmp(&tconn->peer_addr, peer_addr, peer_addr_len)) {
2385 kref_get(&tconn->kref);
2386 goto found;
2387 }
2388 }
2389 tconn = NULL;
2390found:
2391 rcu_read_unlock();
2392 return tconn;
2393}
2394
Andreas Gruenbachere6ef8a52011-03-24 18:07:54 +01002395static int drbd_alloc_socket(struct drbd_socket *socket)
2396{
2397 socket->rbuf = (void *) __get_free_page(GFP_KERNEL);
2398 if (!socket->rbuf)
2399 return -ENOMEM;
Andreas Gruenbacher5a87d922011-03-24 21:17:52 +01002400 socket->sbuf = (void *) __get_free_page(GFP_KERNEL);
2401 if (!socket->sbuf)
2402 return -ENOMEM;
Andreas Gruenbachere6ef8a52011-03-24 18:07:54 +01002403 return 0;
2404}
2405
2406static void drbd_free_socket(struct drbd_socket *socket)
2407{
Andreas Gruenbacher5a87d922011-03-24 21:17:52 +01002408 free_page((unsigned long) socket->sbuf);
Andreas Gruenbachere6ef8a52011-03-24 18:07:54 +01002409 free_page((unsigned long) socket->rbuf);
2410}
2411
Philipp Reisner91fd4da2011-04-20 17:47:29 +02002412void conn_free_crypto(struct drbd_tconn *tconn)
2413{
Philipp Reisner1d041222011-04-22 15:20:23 +02002414 drbd_free_sock(tconn);
2415
2416 crypto_free_hash(tconn->csums_tfm);
2417 crypto_free_hash(tconn->verify_tfm);
Philipp Reisner91fd4da2011-04-20 17:47:29 +02002418 crypto_free_hash(tconn->cram_hmac_tfm);
Andreas Gruenbacher8d412fc2011-04-27 20:59:18 +02002419 crypto_free_hash(tconn->integrity_tfm);
Andreas Gruenbacher5b614ab2011-04-27 21:00:12 +02002420 crypto_free_hash(tconn->peer_integrity_tfm);
Philipp Reisner91fd4da2011-04-20 17:47:29 +02002421 kfree(tconn->int_dig_in);
2422 kfree(tconn->int_dig_vv);
Philipp Reisner1d041222011-04-22 15:20:23 +02002423
2424 tconn->csums_tfm = NULL;
2425 tconn->verify_tfm = NULL;
Philipp Reisner91fd4da2011-04-20 17:47:29 +02002426 tconn->cram_hmac_tfm = NULL;
Andreas Gruenbacher8d412fc2011-04-27 20:59:18 +02002427 tconn->integrity_tfm = NULL;
Andreas Gruenbacher5b614ab2011-04-27 21:00:12 +02002428 tconn->peer_integrity_tfm = NULL;
Philipp Reisner91fd4da2011-04-20 17:47:29 +02002429 tconn->int_dig_in = NULL;
2430 tconn->int_dig_vv = NULL;
2431}
2432
Andreas Gruenbacherafbbfa82011-06-16 17:58:02 +02002433int set_resource_options(struct drbd_tconn *tconn, struct res_opts *res_opts)
2434{
2435 cpumask_var_t new_cpu_mask;
2436 int err;
2437
2438 if (!zalloc_cpumask_var(&new_cpu_mask, GFP_KERNEL))
2439 return -ENOMEM;
2440 /*
2441 retcode = ERR_NOMEM;
2442 drbd_msg_put_info("unable to allocate cpumask");
2443 */
2444
2445 /* silently ignore cpu mask on UP kernel */
2446 if (nr_cpu_ids > 1 && res_opts->cpu_mask[0] != 0) {
2447 /* FIXME: Get rid of constant 32 here */
Philipp Reisnerc5b005a2012-04-30 12:53:52 +02002448 err = bitmap_parse(res_opts->cpu_mask, 32,
2449 cpumask_bits(new_cpu_mask), nr_cpu_ids);
Andreas Gruenbacherafbbfa82011-06-16 17:58:02 +02002450 if (err) {
Philipp Reisnerc5b005a2012-04-30 12:53:52 +02002451 conn_warn(tconn, "bitmap_parse() failed with %d\n", err);
Andreas Gruenbacherafbbfa82011-06-16 17:58:02 +02002452 /* retcode = ERR_CPU_MASK_PARSE; */
2453 goto fail;
2454 }
2455 }
2456 tconn->res_opts = *res_opts;
2457 if (!cpumask_equal(tconn->cpu_mask, new_cpu_mask)) {
2458 cpumask_copy(tconn->cpu_mask, new_cpu_mask);
2459 drbd_calc_cpu_mask(tconn);
2460 tconn->receiver.reset_cpu_mask = 1;
2461 tconn->asender.reset_cpu_mask = 1;
2462 tconn->worker.reset_cpu_mask = 1;
2463 }
2464 err = 0;
2465
2466fail:
2467 free_cpumask_var(new_cpu_mask);
2468 return err;
2469
2470}
2471
Philipp Reisnerec0bddb2011-05-04 15:47:01 +02002472/* caller must be under genl_lock() */
Andreas Gruenbacherafbbfa82011-06-16 17:58:02 +02002473struct drbd_tconn *conn_create(const char *name, struct res_opts *res_opts)
Philipp Reisner21114382011-01-19 12:26:59 +01002474{
2475 struct drbd_tconn *tconn;
2476
2477 tconn = kzalloc(sizeof(struct drbd_tconn), GFP_KERNEL);
2478 if (!tconn)
2479 return NULL;
2480
2481 tconn->name = kstrdup(name, GFP_KERNEL);
2482 if (!tconn->name)
2483 goto fail;
2484
Andreas Gruenbachere6ef8a52011-03-24 18:07:54 +01002485 if (drbd_alloc_socket(&tconn->data))
2486 goto fail;
2487 if (drbd_alloc_socket(&tconn->meta))
2488 goto fail;
2489
Philipp Reisner774b3052011-02-22 02:07:03 -05002490 if (!zalloc_cpumask_var(&tconn->cpu_mask, GFP_KERNEL))
2491 goto fail;
2492
Andreas Gruenbacherafbbfa82011-06-16 17:58:02 +02002493 if (set_resource_options(tconn, res_opts))
2494 goto fail;
2495
Philipp Reisner12038a32011-11-09 19:18:00 +01002496 tconn->current_epoch = kzalloc(sizeof(struct drbd_epoch), GFP_KERNEL);
2497 if (!tconn->current_epoch)
2498 goto fail;
Lars Ellenbergb6dd1a82011-11-28 15:04:49 +01002499
2500 INIT_LIST_HEAD(&tconn->transfer_log);
2501
Philipp Reisner12038a32011-11-09 19:18:00 +01002502 INIT_LIST_HEAD(&tconn->current_epoch->list);
2503 tconn->epochs = 1;
2504 spin_lock_init(&tconn->epoch_lock);
Philipp Reisner4b0007c2011-11-09 20:12:34 +01002505 tconn->write_ordering = WO_bdev_flush;
2506
Lars Ellenbergb6dd1a82011-11-28 15:04:49 +01002507 tconn->send.seen_any_write_yet = false;
2508 tconn->send.current_epoch_nr = 0;
2509 tconn->send.current_epoch_writes = 0;
2510
Philipp Reisnerbbeb6412011-02-10 13:45:46 +01002511 tconn->cstate = C_STANDALONE;
Philipp Reisner8410da82011-02-11 20:11:10 +01002512 mutex_init(&tconn->cstate_mutex);
Philipp Reisner6699b652011-02-09 11:10:24 +01002513 spin_lock_init(&tconn->req_lock);
Philipp Reisnera0095502011-05-03 13:14:15 +02002514 mutex_init(&tconn->conf_update);
Philipp Reisner2a67d8b2011-02-09 14:10:32 +01002515 init_waitqueue_head(&tconn->ping_wait);
Philipp Reisner062e8792011-02-08 11:09:18 +01002516 idr_init(&tconn->volumes);
Philipp Reisnerb2fb6dbe2011-01-19 13:48:44 +01002517
Lars Ellenbergd5b27b02011-11-14 15:42:37 +01002518 drbd_init_workqueue(&tconn->sender_work);
Philipp Reisner6699b652011-02-09 11:10:24 +01002519 mutex_init(&tconn->data.mutex);
Philipp Reisner6699b652011-02-09 11:10:24 +01002520 mutex_init(&tconn->meta.mutex);
2521
Philipp Reisner392c8802011-02-09 10:33:31 +01002522 drbd_thread_init(tconn, &tconn->receiver, drbdd_init, "receiver");
2523 drbd_thread_init(tconn, &tconn->worker, drbd_worker, "worker");
2524 drbd_thread_init(tconn, &tconn->asender, drbd_asender, "asender");
2525
Philipp Reisner9dc9fbb2011-04-22 15:23:32 +02002526 kref_init(&tconn->kref);
Philipp Reisnerec0bddb2011-05-04 15:47:01 +02002527 list_add_tail_rcu(&tconn->all_tconn, &drbd_tconns);
Philipp Reisner21114382011-01-19 12:26:59 +01002528
2529 return tconn;
2530
2531fail:
Philipp Reisner12038a32011-11-09 19:18:00 +01002532 kfree(tconn->current_epoch);
Philipp Reisner774b3052011-02-22 02:07:03 -05002533 free_cpumask_var(tconn->cpu_mask);
Andreas Gruenbachere6ef8a52011-03-24 18:07:54 +01002534 drbd_free_socket(&tconn->meta);
2535 drbd_free_socket(&tconn->data);
Philipp Reisner21114382011-01-19 12:26:59 +01002536 kfree(tconn->name);
2537 kfree(tconn);
2538
2539 return NULL;
2540}
2541
Philipp Reisner9dc9fbb2011-04-22 15:23:32 +02002542void conn_destroy(struct kref *kref)
Philipp Reisner21114382011-01-19 12:26:59 +01002543{
Philipp Reisner9dc9fbb2011-04-22 15:23:32 +02002544 struct drbd_tconn *tconn = container_of(kref, struct drbd_tconn, kref);
2545
Philipp Reisner12038a32011-11-09 19:18:00 +01002546 if (atomic_read(&tconn->current_epoch->epoch_size) != 0)
2547 conn_err(tconn, "epoch_size:%d\n", atomic_read(&tconn->current_epoch->epoch_size));
2548 kfree(tconn->current_epoch);
2549
Philipp Reisner062e8792011-02-08 11:09:18 +01002550 idr_destroy(&tconn->volumes);
Philipp Reisner21114382011-01-19 12:26:59 +01002551
Philipp Reisner774b3052011-02-22 02:07:03 -05002552 free_cpumask_var(tconn->cpu_mask);
Andreas Gruenbachere6ef8a52011-03-24 18:07:54 +01002553 drbd_free_socket(&tconn->meta);
2554 drbd_free_socket(&tconn->data);
Philipp Reisner21114382011-01-19 12:26:59 +01002555 kfree(tconn->name);
Philipp Reisnerb42a70a2011-01-27 10:55:20 +01002556 kfree(tconn->int_dig_in);
2557 kfree(tconn->int_dig_vv);
Philipp Reisner21114382011-01-19 12:26:59 +01002558 kfree(tconn);
2559}
2560
Philipp Reisner774b3052011-02-22 02:07:03 -05002561enum drbd_ret_code conn_new_minor(struct drbd_tconn *tconn, unsigned int minor, int vnr)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002562{
2563 struct drbd_conf *mdev;
2564 struct gendisk *disk;
2565 struct request_queue *q;
Philipp Reisner774b3052011-02-22 02:07:03 -05002566 int vnr_got = vnr;
Philipp Reisner81a5d602011-02-22 19:53:16 -05002567 int minor_got = minor;
Lars Ellenberg8432b312011-03-08 16:11:16 +01002568 enum drbd_ret_code err = ERR_NOMEM;
Philipp Reisner774b3052011-02-22 02:07:03 -05002569
2570 mdev = minor_to_mdev(minor);
2571 if (mdev)
2572 return ERR_MINOR_EXISTS;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002573
2574 /* GFP_KERNEL, we are outside of all write-out paths */
2575 mdev = kzalloc(sizeof(struct drbd_conf), GFP_KERNEL);
2576 if (!mdev)
Philipp Reisner774b3052011-02-22 02:07:03 -05002577 return ERR_NOMEM;
2578
Philipp Reisner9dc9fbb2011-04-22 15:23:32 +02002579 kref_get(&tconn->kref);
Philipp Reisner774b3052011-02-22 02:07:03 -05002580 mdev->tconn = tconn;
Philipp Reisner9dc9fbb2011-04-22 15:23:32 +02002581
Philipp Reisnerb411b362009-09-25 16:07:19 -07002582 mdev->minor = minor;
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002583 mdev->vnr = vnr;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002584
2585 drbd_init_set_defaults(mdev);
2586
2587 q = blk_alloc_queue(GFP_KERNEL);
2588 if (!q)
2589 goto out_no_q;
2590 mdev->rq_queue = q;
2591 q->queuedata = mdev;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002592
2593 disk = alloc_disk(1);
2594 if (!disk)
2595 goto out_no_disk;
2596 mdev->vdisk = disk;
2597
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01002598 set_disk_ro(disk, true);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002599
2600 disk->queue = q;
2601 disk->major = DRBD_MAJOR;
2602 disk->first_minor = minor;
2603 disk->fops = &drbd_ops;
2604 sprintf(disk->disk_name, "drbd%d", minor);
2605 disk->private_data = mdev;
2606
2607 mdev->this_bdev = bdget(MKDEV(DRBD_MAJOR, minor));
2608 /* we have no partitions. we contain only ourselves. */
2609 mdev->this_bdev->bd_contains = mdev->this_bdev;
2610
2611 q->backing_dev_info.congested_fn = drbd_congested;
2612 q->backing_dev_info.congested_data = mdev;
2613
Andreas Gruenbacher2f58dcf2010-12-13 17:48:19 +01002614 blk_queue_make_request(q, drbd_make_request);
Philipp Reisner99432fc2011-05-20 16:39:13 +02002615 /* Setting the max_hw_sectors to an odd value of 8kibyte here
2616 This triggers a max_bio_size message upon first attach or connect */
2617 blk_queue_max_hw_sectors(q, DRBD_MAX_BIO_SIZE_SAFE >> 8);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002618 blk_queue_bounce_limit(q, BLK_BOUNCE_ANY);
2619 blk_queue_merge_bvec(q, drbd_merge_bvec);
Philipp Reisner87eeee42011-01-19 14:16:30 +01002620 q->queue_lock = &mdev->tconn->req_lock; /* needed since we use */
Philipp Reisnerb411b362009-09-25 16:07:19 -07002621
2622 mdev->md_io_page = alloc_page(GFP_KERNEL);
2623 if (!mdev->md_io_page)
2624 goto out_no_io_page;
2625
2626 if (drbd_bm_init(mdev))
2627 goto out_no_bitmap;
Andreas Gruenbacherdac13892011-01-21 17:18:39 +01002628 mdev->read_requests = RB_ROOT;
Andreas Gruenbacherde696712011-01-20 15:00:24 +01002629 mdev->write_requests = RB_ROOT;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002630
Lars Ellenberg8432b312011-03-08 16:11:16 +01002631 if (!idr_pre_get(&minors, GFP_KERNEL))
2632 goto out_no_minor_idr;
2633 if (idr_get_new_above(&minors, mdev, minor, &minor_got))
2634 goto out_no_minor_idr;
2635 if (minor_got != minor) {
2636 err = ERR_MINOR_EXISTS;
2637 drbd_msg_put_info("requested minor exists already");
2638 goto out_idr_remove_minor;
Lars Ellenberg569083c2011-03-07 09:49:02 +01002639 }
2640
Lars Ellenberg8432b312011-03-08 16:11:16 +01002641 if (!idr_pre_get(&tconn->volumes, GFP_KERNEL))
Lars Ellenberg569083c2011-03-07 09:49:02 +01002642 goto out_idr_remove_minor;
Lars Ellenberg8432b312011-03-08 16:11:16 +01002643 if (idr_get_new_above(&tconn->volumes, mdev, vnr, &vnr_got))
2644 goto out_idr_remove_minor;
2645 if (vnr_got != vnr) {
2646 err = ERR_INVALID_REQUEST;
2647 drbd_msg_put_info("requested volume exists already");
2648 goto out_idr_remove_vol;
Philipp Reisner81a5d602011-02-22 19:53:16 -05002649 }
Philipp Reisner774b3052011-02-22 02:07:03 -05002650 add_disk(disk);
Philipp Reisner81fa2e62011-05-04 15:10:30 +02002651 kref_init(&mdev->kref); /* one ref for both idrs and the the add_disk */
Philipp Reisner774b3052011-02-22 02:07:03 -05002652
Philipp Reisner2325eb62011-03-15 16:56:18 +01002653 /* inherit the connection state */
2654 mdev->state.conn = tconn->cstate;
2655 if (mdev->state.conn == C_WF_REPORT_PARAMS)
Philipp Reisnerc141ebd2011-05-05 16:13:10 +02002656 drbd_connected(mdev);
Philipp Reisner2325eb62011-03-15 16:56:18 +01002657
Philipp Reisner774b3052011-02-22 02:07:03 -05002658 return NO_ERROR;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002659
Lars Ellenberg569083c2011-03-07 09:49:02 +01002660out_idr_remove_vol:
2661 idr_remove(&tconn->volumes, vnr_got);
Lars Ellenberg8432b312011-03-08 16:11:16 +01002662out_idr_remove_minor:
2663 idr_remove(&minors, minor_got);
Lars Ellenberg569083c2011-03-07 09:49:02 +01002664 synchronize_rcu();
Lars Ellenberg8432b312011-03-08 16:11:16 +01002665out_no_minor_idr:
Philipp Reisnerb411b362009-09-25 16:07:19 -07002666 drbd_bm_cleanup(mdev);
2667out_no_bitmap:
2668 __free_page(mdev->md_io_page);
2669out_no_io_page:
2670 put_disk(disk);
2671out_no_disk:
2672 blk_cleanup_queue(q);
2673out_no_q:
Philipp Reisnerb411b362009-09-25 16:07:19 -07002674 kfree(mdev);
Philipp Reisner9dc9fbb2011-04-22 15:23:32 +02002675 kref_put(&tconn->kref, &conn_destroy);
Lars Ellenberg8432b312011-03-08 16:11:16 +01002676 return err;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002677}
2678
Philipp Reisnerb411b362009-09-25 16:07:19 -07002679int __init drbd_init(void)
2680{
2681 int err;
2682
Philipp Reisner2b8a90b2011-01-10 11:15:17 +01002683 if (minor_count < DRBD_MINOR_COUNT_MIN || minor_count > DRBD_MINOR_COUNT_MAX) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07002684 printk(KERN_ERR
Philipp Reisner81a5d602011-02-22 19:53:16 -05002685 "drbd: invalid minor_count (%d)\n", minor_count);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002686#ifdef MODULE
2687 return -EINVAL;
2688#else
Andreas Gruenbacher46530e82011-05-31 13:08:53 +02002689 minor_count = DRBD_MINOR_COUNT_DEF;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002690#endif
2691 }
2692
Philipp Reisnerb411b362009-09-25 16:07:19 -07002693 err = register_blkdev(DRBD_MAJOR, "drbd");
2694 if (err) {
2695 printk(KERN_ERR
2696 "drbd: unable to register block device major %d\n",
2697 DRBD_MAJOR);
2698 return err;
2699 }
2700
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002701 err = drbd_genl_register();
2702 if (err) {
2703 printk(KERN_ERR "drbd: unable to register generic netlink family\n");
2704 goto fail;
2705 }
2706
2707
Philipp Reisnerb411b362009-09-25 16:07:19 -07002708 register_reboot_notifier(&drbd_notifier);
2709
2710 /*
2711 * allocate all necessary structs
2712 */
2713 err = -ENOMEM;
2714
2715 init_waitqueue_head(&drbd_pp_wait);
2716
2717 drbd_proc = NULL; /* play safe for drbd_cleanup */
Philipp Reisner81a5d602011-02-22 19:53:16 -05002718 idr_init(&minors);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002719
2720 err = drbd_create_mempools();
2721 if (err)
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002722 goto fail;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002723
Lars Ellenberg8c484ee2010-03-11 16:47:58 +01002724 drbd_proc = proc_create_data("drbd", S_IFREG | S_IRUGO , NULL, &drbd_proc_fops, NULL);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002725 if (!drbd_proc) {
2726 printk(KERN_ERR "drbd: unable to register proc file\n");
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002727 goto fail;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002728 }
2729
2730 rwlock_init(&global_state_lock);
Philipp Reisner21114382011-01-19 12:26:59 +01002731 INIT_LIST_HEAD(&drbd_tconns);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002732
Lars Ellenberg2312f0b32011-11-24 10:36:25 +01002733 retry.wq = create_singlethread_workqueue("drbd-reissue");
2734 if (!retry.wq) {
2735 printk(KERN_ERR "drbd: unable to create retry workqueue\n");
2736 goto fail;
2737 }
2738 INIT_WORK(&retry.worker, do_retry);
2739 spin_lock_init(&retry.lock);
2740 INIT_LIST_HEAD(&retry.writes);
2741
Philipp Reisnerb411b362009-09-25 16:07:19 -07002742 printk(KERN_INFO "drbd: initialized. "
2743 "Version: " REL_VERSION " (api:%d/proto:%d-%d)\n",
2744 API_VERSION, PRO_VERSION_MIN, PRO_VERSION_MAX);
2745 printk(KERN_INFO "drbd: %s\n", drbd_buildtag());
2746 printk(KERN_INFO "drbd: registered as block device major %d\n",
2747 DRBD_MAJOR);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002748
2749 return 0; /* Success! */
2750
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002751fail:
Philipp Reisnerb411b362009-09-25 16:07:19 -07002752 drbd_cleanup();
2753 if (err == -ENOMEM)
2754 /* currently always the case */
2755 printk(KERN_ERR "drbd: ran out of memory\n");
2756 else
2757 printk(KERN_ERR "drbd: initialization failure\n");
2758 return err;
2759}
2760
2761void drbd_free_bc(struct drbd_backing_dev *ldev)
2762{
2763 if (ldev == NULL)
2764 return;
2765
Tejun Heoe525fd82010-11-13 11:55:17 +01002766 blkdev_put(ldev->backing_bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL);
2767 blkdev_put(ldev->md_bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002768
2769 kfree(ldev);
2770}
2771
Philipp Reisner360cc742011-02-08 14:29:53 +01002772void drbd_free_sock(struct drbd_tconn *tconn)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002773{
Philipp Reisner360cc742011-02-08 14:29:53 +01002774 if (tconn->data.socket) {
2775 mutex_lock(&tconn->data.mutex);
2776 kernel_sock_shutdown(tconn->data.socket, SHUT_RDWR);
2777 sock_release(tconn->data.socket);
2778 tconn->data.socket = NULL;
2779 mutex_unlock(&tconn->data.mutex);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002780 }
Philipp Reisner360cc742011-02-08 14:29:53 +01002781 if (tconn->meta.socket) {
2782 mutex_lock(&tconn->meta.mutex);
2783 kernel_sock_shutdown(tconn->meta.socket, SHUT_RDWR);
2784 sock_release(tconn->meta.socket);
2785 tconn->meta.socket = NULL;
2786 mutex_unlock(&tconn->meta.mutex);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002787 }
2788}
2789
Philipp Reisnerb411b362009-09-25 16:07:19 -07002790/* meta data management */
2791
2792struct meta_data_on_disk {
2793 u64 la_size; /* last agreed size. */
2794 u64 uuid[UI_SIZE]; /* UUIDs. */
2795 u64 device_uuid;
2796 u64 reserved_u64_1;
2797 u32 flags; /* MDF */
2798 u32 magic;
2799 u32 md_size_sect;
2800 u32 al_offset; /* offset to this block */
2801 u32 al_nr_extents; /* important for restoring the AL */
Lars Ellenbergf3990022011-03-23 14:31:09 +01002802 /* `-- act_log->nr_elements <-- ldev->dc.al_extents */
Philipp Reisnerb411b362009-09-25 16:07:19 -07002803 u32 bm_offset; /* offset to the bitmap, from here */
2804 u32 bm_bytes_per_bit; /* BM_BLOCK_SIZE */
Philipp Reisner99432fc2011-05-20 16:39:13 +02002805 u32 la_peer_max_bio_size; /* last peer max_bio_size */
2806 u32 reserved_u32[3];
Philipp Reisnerb411b362009-09-25 16:07:19 -07002807
2808} __packed;
2809
2810/**
2811 * drbd_md_sync() - Writes the meta data super block if the MD_DIRTY flag bit is set
2812 * @mdev: DRBD device.
2813 */
2814void drbd_md_sync(struct drbd_conf *mdev)
2815{
2816 struct meta_data_on_disk *buffer;
2817 sector_t sector;
2818 int i;
2819
Lars Ellenbergee15b032010-09-03 10:00:09 +02002820 del_timer(&mdev->md_sync_timer);
2821 /* timer may be rearmed by drbd_md_mark_dirty() now. */
Philipp Reisnerb411b362009-09-25 16:07:19 -07002822 if (!test_and_clear_bit(MD_DIRTY, &mdev->flags))
2823 return;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002824
2825 /* We use here D_FAILED and not D_ATTACHING because we try to write
2826 * metadata even if we detach due to a disk failure! */
2827 if (!get_ldev_if_state(mdev, D_FAILED))
2828 return;
2829
Philipp Reisnercdfda632011-07-05 15:38:59 +02002830 buffer = drbd_md_get_buffer(mdev);
2831 if (!buffer)
2832 goto out;
2833
Philipp Reisnerb411b362009-09-25 16:07:19 -07002834 memset(buffer, 0, 512);
2835
2836 buffer->la_size = cpu_to_be64(drbd_get_capacity(mdev->this_bdev));
2837 for (i = UI_CURRENT; i < UI_SIZE; i++)
2838 buffer->uuid[i] = cpu_to_be64(mdev->ldev->md.uuid[i]);
2839 buffer->flags = cpu_to_be32(mdev->ldev->md.flags);
Lars Ellenbergd5d7ebd2011-07-05 20:59:26 +02002840 buffer->magic = cpu_to_be32(DRBD_MD_MAGIC_84_UNCLEAN);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002841
2842 buffer->md_size_sect = cpu_to_be32(mdev->ldev->md.md_size_sect);
2843 buffer->al_offset = cpu_to_be32(mdev->ldev->md.al_offset);
2844 buffer->al_nr_extents = cpu_to_be32(mdev->act_log->nr_elements);
2845 buffer->bm_bytes_per_bit = cpu_to_be32(BM_BLOCK_SIZE);
2846 buffer->device_uuid = cpu_to_be64(mdev->ldev->md.device_uuid);
2847
2848 buffer->bm_offset = cpu_to_be32(mdev->ldev->md.bm_offset);
Philipp Reisner99432fc2011-05-20 16:39:13 +02002849 buffer->la_peer_max_bio_size = cpu_to_be32(mdev->peer_max_bio_size);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002850
2851 D_ASSERT(drbd_md_ss__(mdev, mdev->ldev) == mdev->ldev->md.md_offset);
2852 sector = mdev->ldev->md.md_offset;
2853
Andreas Gruenbacher3fbf4d22010-12-13 02:25:41 +01002854 if (drbd_md_sync_page_io(mdev, mdev->ldev, sector, WRITE)) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07002855 /* this was a try anyways ... */
2856 dev_err(DEV, "meta data update failed!\n");
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01002857 drbd_chk_io_error(mdev, 1, true);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002858 }
2859
2860 /* Update mdev->ldev->md.la_size_sect,
2861 * since we updated it on metadata. */
2862 mdev->ldev->md.la_size_sect = drbd_get_capacity(mdev->this_bdev);
2863
Philipp Reisnercdfda632011-07-05 15:38:59 +02002864 drbd_md_put_buffer(mdev);
2865out:
Philipp Reisnerb411b362009-09-25 16:07:19 -07002866 put_ldev(mdev);
2867}
2868
2869/**
2870 * drbd_md_read() - Reads in the meta data super block
2871 * @mdev: DRBD device.
2872 * @bdev: Device from which the meta data should be read in.
2873 *
Andreas Gruenbacher116676c2010-12-08 13:33:11 +01002874 * Return 0 (NO_ERROR) on success, and an enum drbd_ret_code in case
Lars Ellenbergd5d7ebd2011-07-05 20:59:26 +02002875 * something goes wrong.
Philipp Reisnerb411b362009-09-25 16:07:19 -07002876 */
2877int drbd_md_read(struct drbd_conf *mdev, struct drbd_backing_dev *bdev)
2878{
2879 struct meta_data_on_disk *buffer;
Lars Ellenbergd5d7ebd2011-07-05 20:59:26 +02002880 u32 magic, flags;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002881 int i, rv = NO_ERROR;
2882
2883 if (!get_ldev_if_state(mdev, D_ATTACHING))
2884 return ERR_IO_MD_DISK;
2885
Philipp Reisnercdfda632011-07-05 15:38:59 +02002886 buffer = drbd_md_get_buffer(mdev);
2887 if (!buffer)
2888 goto out;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002889
Andreas Gruenbacher3fbf4d22010-12-13 02:25:41 +01002890 if (drbd_md_sync_page_io(mdev, bdev, bdev->md.md_offset, READ)) {
Lucas De Marchi25985ed2011-03-30 22:57:33 -03002891 /* NOTE: can't do normal error processing here as this is
Philipp Reisnerb411b362009-09-25 16:07:19 -07002892 called BEFORE disk is attached */
2893 dev_err(DEV, "Error while reading metadata.\n");
2894 rv = ERR_IO_MD_DISK;
2895 goto err;
2896 }
2897
Lars Ellenbergd5d7ebd2011-07-05 20:59:26 +02002898 magic = be32_to_cpu(buffer->magic);
2899 flags = be32_to_cpu(buffer->flags);
2900 if (magic == DRBD_MD_MAGIC_84_UNCLEAN ||
2901 (magic == DRBD_MD_MAGIC_08 && !(flags & MDF_AL_CLEAN))) {
2902 /* btw: that's Activity Log clean, not "all" clean. */
2903 dev_err(DEV, "Found unclean meta data. Did you \"drbdadm apply-al\"?\n");
2904 rv = ERR_MD_UNCLEAN;
2905 goto err;
2906 }
2907 if (magic != DRBD_MD_MAGIC_08) {
Philipp Reisner43de7c82011-11-10 13:16:13 +01002908 if (magic == DRBD_MD_MAGIC_07)
Lars Ellenbergd5d7ebd2011-07-05 20:59:26 +02002909 dev_err(DEV, "Found old (0.7) meta data magic. Did you \"drbdadm create-md\"?\n");
2910 else
2911 dev_err(DEV, "Meta data magic not found. Did you \"drbdadm create-md\"?\n");
Philipp Reisnerb411b362009-09-25 16:07:19 -07002912 rv = ERR_MD_INVALID;
2913 goto err;
2914 }
2915 if (be32_to_cpu(buffer->al_offset) != bdev->md.al_offset) {
2916 dev_err(DEV, "unexpected al_offset: %d (expected %d)\n",
2917 be32_to_cpu(buffer->al_offset), bdev->md.al_offset);
2918 rv = ERR_MD_INVALID;
2919 goto err;
2920 }
2921 if (be32_to_cpu(buffer->bm_offset) != bdev->md.bm_offset) {
2922 dev_err(DEV, "unexpected bm_offset: %d (expected %d)\n",
2923 be32_to_cpu(buffer->bm_offset), bdev->md.bm_offset);
2924 rv = ERR_MD_INVALID;
2925 goto err;
2926 }
2927 if (be32_to_cpu(buffer->md_size_sect) != bdev->md.md_size_sect) {
2928 dev_err(DEV, "unexpected md_size: %u (expected %u)\n",
2929 be32_to_cpu(buffer->md_size_sect), bdev->md.md_size_sect);
2930 rv = ERR_MD_INVALID;
2931 goto err;
2932 }
2933
2934 if (be32_to_cpu(buffer->bm_bytes_per_bit) != BM_BLOCK_SIZE) {
2935 dev_err(DEV, "unexpected bm_bytes_per_bit: %u (expected %u)\n",
2936 be32_to_cpu(buffer->bm_bytes_per_bit), BM_BLOCK_SIZE);
2937 rv = ERR_MD_INVALID;
2938 goto err;
2939 }
2940
2941 bdev->md.la_size_sect = be64_to_cpu(buffer->la_size);
2942 for (i = UI_CURRENT; i < UI_SIZE; i++)
2943 bdev->md.uuid[i] = be64_to_cpu(buffer->uuid[i]);
2944 bdev->md.flags = be32_to_cpu(buffer->flags);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002945 bdev->md.device_uuid = be64_to_cpu(buffer->device_uuid);
2946
Philipp Reisner87eeee42011-01-19 14:16:30 +01002947 spin_lock_irq(&mdev->tconn->req_lock);
Philipp Reisner99432fc2011-05-20 16:39:13 +02002948 if (mdev->state.conn < C_CONNECTED) {
2949 int peer;
2950 peer = be32_to_cpu(buffer->la_peer_max_bio_size);
2951 peer = max_t(int, peer, DRBD_MAX_BIO_SIZE_SAFE);
2952 mdev->peer_max_bio_size = peer;
2953 }
Philipp Reisner87eeee42011-01-19 14:16:30 +01002954 spin_unlock_irq(&mdev->tconn->req_lock);
Philipp Reisner99432fc2011-05-20 16:39:13 +02002955
Philipp Reisnerb411b362009-09-25 16:07:19 -07002956 err:
Philipp Reisnercdfda632011-07-05 15:38:59 +02002957 drbd_md_put_buffer(mdev);
2958 out:
Philipp Reisnerb411b362009-09-25 16:07:19 -07002959 put_ldev(mdev);
2960
2961 return rv;
2962}
2963
2964/**
2965 * drbd_md_mark_dirty() - Mark meta data super block as dirty
2966 * @mdev: DRBD device.
2967 *
2968 * Call this function if you change anything that should be written to
2969 * the meta-data super block. This function sets MD_DIRTY, and starts a
2970 * timer that ensures that within five seconds you have to call drbd_md_sync().
2971 */
Lars Ellenbergca0e6092010-10-14 15:01:21 +02002972#ifdef DEBUG
Lars Ellenbergee15b032010-09-03 10:00:09 +02002973void drbd_md_mark_dirty_(struct drbd_conf *mdev, unsigned int line, const char *func)
2974{
2975 if (!test_and_set_bit(MD_DIRTY, &mdev->flags)) {
2976 mod_timer(&mdev->md_sync_timer, jiffies + HZ);
2977 mdev->last_md_mark_dirty.line = line;
2978 mdev->last_md_mark_dirty.func = func;
2979 }
2980}
2981#else
Philipp Reisnerb411b362009-09-25 16:07:19 -07002982void drbd_md_mark_dirty(struct drbd_conf *mdev)
2983{
Lars Ellenbergee15b032010-09-03 10:00:09 +02002984 if (!test_and_set_bit(MD_DIRTY, &mdev->flags))
Lars Ellenbergca0e6092010-10-14 15:01:21 +02002985 mod_timer(&mdev->md_sync_timer, jiffies + 5*HZ);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002986}
Lars Ellenbergee15b032010-09-03 10:00:09 +02002987#endif
Philipp Reisnerb411b362009-09-25 16:07:19 -07002988
2989static void drbd_uuid_move_history(struct drbd_conf *mdev) __must_hold(local)
2990{
2991 int i;
2992
Lars Ellenberg62b0da32011-01-20 13:25:21 +01002993 for (i = UI_HISTORY_START; i < UI_HISTORY_END; i++)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002994 mdev->ldev->md.uuid[i+1] = mdev->ldev->md.uuid[i];
Philipp Reisnerb411b362009-09-25 16:07:19 -07002995}
2996
2997void _drbd_uuid_set(struct drbd_conf *mdev, int idx, u64 val) __must_hold(local)
2998{
2999 if (idx == UI_CURRENT) {
3000 if (mdev->state.role == R_PRIMARY)
3001 val |= 1;
3002 else
3003 val &= ~((u64)1);
3004
3005 drbd_set_ed_uuid(mdev, val);
3006 }
3007
3008 mdev->ldev->md.uuid[idx] = val;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003009 drbd_md_mark_dirty(mdev);
3010}
3011
3012
3013void drbd_uuid_set(struct drbd_conf *mdev, int idx, u64 val) __must_hold(local)
3014{
3015 if (mdev->ldev->md.uuid[idx]) {
3016 drbd_uuid_move_history(mdev);
3017 mdev->ldev->md.uuid[UI_HISTORY_START] = mdev->ldev->md.uuid[idx];
Philipp Reisnerb411b362009-09-25 16:07:19 -07003018 }
3019 _drbd_uuid_set(mdev, idx, val);
3020}
3021
3022/**
3023 * drbd_uuid_new_current() - Creates a new current UUID
3024 * @mdev: DRBD device.
3025 *
3026 * Creates a new current UUID, and rotates the old current UUID into
3027 * the bitmap slot. Causes an incremental resync upon next connect.
3028 */
3029void drbd_uuid_new_current(struct drbd_conf *mdev) __must_hold(local)
3030{
3031 u64 val;
Lars Ellenberg62b0da32011-01-20 13:25:21 +01003032 unsigned long long bm_uuid = mdev->ldev->md.uuid[UI_BITMAP];
Philipp Reisnerb411b362009-09-25 16:07:19 -07003033
Lars Ellenberg62b0da32011-01-20 13:25:21 +01003034 if (bm_uuid)
3035 dev_warn(DEV, "bm UUID was already set: %llX\n", bm_uuid);
3036
Philipp Reisnerb411b362009-09-25 16:07:19 -07003037 mdev->ldev->md.uuid[UI_BITMAP] = mdev->ldev->md.uuid[UI_CURRENT];
Philipp Reisnerb411b362009-09-25 16:07:19 -07003038
3039 get_random_bytes(&val, sizeof(u64));
3040 _drbd_uuid_set(mdev, UI_CURRENT, val);
Lars Ellenberg62b0da32011-01-20 13:25:21 +01003041 drbd_print_uuids(mdev, "new current UUID");
Lars Ellenbergaaa8e2b2010-10-15 13:16:53 +02003042 /* get it to stable storage _now_ */
3043 drbd_md_sync(mdev);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003044}
3045
3046void drbd_uuid_set_bm(struct drbd_conf *mdev, u64 val) __must_hold(local)
3047{
3048 if (mdev->ldev->md.uuid[UI_BITMAP] == 0 && val == 0)
3049 return;
3050
3051 if (val == 0) {
3052 drbd_uuid_move_history(mdev);
3053 mdev->ldev->md.uuid[UI_HISTORY_START] = mdev->ldev->md.uuid[UI_BITMAP];
3054 mdev->ldev->md.uuid[UI_BITMAP] = 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003055 } else {
Lars Ellenberg62b0da32011-01-20 13:25:21 +01003056 unsigned long long bm_uuid = mdev->ldev->md.uuid[UI_BITMAP];
3057 if (bm_uuid)
3058 dev_warn(DEV, "bm UUID was already set: %llX\n", bm_uuid);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003059
Lars Ellenberg62b0da32011-01-20 13:25:21 +01003060 mdev->ldev->md.uuid[UI_BITMAP] = val & ~((u64)1);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003061 }
3062 drbd_md_mark_dirty(mdev);
3063}
3064
3065/**
3066 * drbd_bmio_set_n_write() - io_fn for drbd_queue_bitmap_io() or drbd_bitmap_io()
3067 * @mdev: DRBD device.
3068 *
3069 * Sets all bits in the bitmap and writes the whole bitmap to stable storage.
3070 */
3071int drbd_bmio_set_n_write(struct drbd_conf *mdev)
3072{
3073 int rv = -EIO;
3074
3075 if (get_ldev_if_state(mdev, D_ATTACHING)) {
3076 drbd_md_set_flag(mdev, MDF_FULL_SYNC);
3077 drbd_md_sync(mdev);
3078 drbd_bm_set_all(mdev);
3079
3080 rv = drbd_bm_write(mdev);
3081
3082 if (!rv) {
3083 drbd_md_clear_flag(mdev, MDF_FULL_SYNC);
3084 drbd_md_sync(mdev);
3085 }
3086
3087 put_ldev(mdev);
3088 }
3089
3090 return rv;
3091}
3092
3093/**
3094 * drbd_bmio_clear_n_write() - io_fn for drbd_queue_bitmap_io() or drbd_bitmap_io()
3095 * @mdev: DRBD device.
3096 *
3097 * Clears all bits in the bitmap and writes the whole bitmap to stable storage.
3098 */
3099int drbd_bmio_clear_n_write(struct drbd_conf *mdev)
3100{
3101 int rv = -EIO;
3102
Philipp Reisner07782862010-08-31 12:00:50 +02003103 drbd_resume_al(mdev);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003104 if (get_ldev_if_state(mdev, D_ATTACHING)) {
3105 drbd_bm_clear_all(mdev);
3106 rv = drbd_bm_write(mdev);
3107 put_ldev(mdev);
3108 }
3109
3110 return rv;
3111}
3112
Andreas Gruenbacher99920dc2011-03-16 15:31:39 +01003113static int w_bitmap_io(struct drbd_work *w, int unused)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003114{
3115 struct bm_io_work *work = container_of(w, struct bm_io_work, w);
Philipp Reisner00d56942011-02-09 18:09:48 +01003116 struct drbd_conf *mdev = w->mdev;
Lars Ellenberg02851e92010-12-16 14:47:39 +01003117 int rv = -EIO;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003118
3119 D_ASSERT(atomic_read(&mdev->ap_bio_cnt) == 0);
3120
Lars Ellenberg02851e92010-12-16 14:47:39 +01003121 if (get_ldev(mdev)) {
Lars Ellenberg20ceb2b2011-01-21 10:56:44 +01003122 drbd_bm_lock(mdev, work->why, work->flags);
Lars Ellenberg02851e92010-12-16 14:47:39 +01003123 rv = work->io_fn(mdev);
3124 drbd_bm_unlock(mdev);
3125 put_ldev(mdev);
3126 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07003127
Lars Ellenberg4738fa12011-02-21 13:20:55 +01003128 clear_bit_unlock(BITMAP_IO, &mdev->flags);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003129 wake_up(&mdev->misc_wait);
3130
3131 if (work->done)
3132 work->done(mdev, rv);
3133
3134 clear_bit(BITMAP_IO_QUEUED, &mdev->flags);
3135 work->why = NULL;
Lars Ellenberg20ceb2b2011-01-21 10:56:44 +01003136 work->flags = 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003137
Andreas Gruenbacher99920dc2011-03-16 15:31:39 +01003138 return 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003139}
3140
Lars Ellenberg82f59cc2010-10-16 12:13:47 +02003141void drbd_ldev_destroy(struct drbd_conf *mdev)
3142{
3143 lc_destroy(mdev->resync);
3144 mdev->resync = NULL;
3145 lc_destroy(mdev->act_log);
3146 mdev->act_log = NULL;
3147 __no_warn(local,
3148 drbd_free_bc(mdev->ldev);
3149 mdev->ldev = NULL;);
3150
Lars Ellenberg82f59cc2010-10-16 12:13:47 +02003151 clear_bit(GO_DISKLESS, &mdev->flags);
3152}
3153
Andreas Gruenbacher99920dc2011-03-16 15:31:39 +01003154static int w_go_diskless(struct drbd_work *w, int unused)
Lars Ellenberge9e6f3e2010-09-14 20:26:27 +02003155{
Philipp Reisner00d56942011-02-09 18:09:48 +01003156 struct drbd_conf *mdev = w->mdev;
3157
Lars Ellenberge9e6f3e2010-09-14 20:26:27 +02003158 D_ASSERT(mdev->state.disk == D_FAILED);
Lars Ellenberg9d282872010-10-14 13:57:07 +02003159 /* we cannot assert local_cnt == 0 here, as get_ldev_if_state will
3160 * inc/dec it frequently. Once we are D_DISKLESS, no one will touch
Lars Ellenberg82f59cc2010-10-16 12:13:47 +02003161 * the protected members anymore, though, so once put_ldev reaches zero
3162 * again, it will be safe to free them. */
Lars Ellenberge9e6f3e2010-09-14 20:26:27 +02003163 drbd_force_state(mdev, NS(disk, D_DISKLESS));
Andreas Gruenbacher99920dc2011-03-16 15:31:39 +01003164 return 0;
Lars Ellenberge9e6f3e2010-09-14 20:26:27 +02003165}
3166
3167void drbd_go_diskless(struct drbd_conf *mdev)
3168{
3169 D_ASSERT(mdev->state.disk == D_FAILED);
3170 if (!test_and_set_bit(GO_DISKLESS, &mdev->flags))
Lars Ellenbergd5b27b02011-11-14 15:42:37 +01003171 drbd_queue_work(&mdev->tconn->sender_work, &mdev->go_diskless);
Lars Ellenberge9e6f3e2010-09-14 20:26:27 +02003172}
3173
Philipp Reisnerb411b362009-09-25 16:07:19 -07003174/**
3175 * drbd_queue_bitmap_io() - Queues an IO operation on the whole bitmap
3176 * @mdev: DRBD device.
3177 * @io_fn: IO callback to be called when bitmap IO is possible
3178 * @done: callback to be called after the bitmap IO was performed
3179 * @why: Descriptive text of the reason for doing the IO
3180 *
3181 * While IO on the bitmap happens we freeze application IO thus we ensure
3182 * that drbd_set_out_of_sync() can not be called. This function MAY ONLY be
3183 * called from worker context. It MUST NOT be used while a previous such
3184 * work is still pending!
3185 */
3186void drbd_queue_bitmap_io(struct drbd_conf *mdev,
3187 int (*io_fn)(struct drbd_conf *),
3188 void (*done)(struct drbd_conf *, int),
Lars Ellenberg20ceb2b2011-01-21 10:56:44 +01003189 char *why, enum bm_flag flags)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003190{
Philipp Reisnere6b3ea82011-01-19 14:02:01 +01003191 D_ASSERT(current == mdev->tconn->worker.task);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003192
3193 D_ASSERT(!test_bit(BITMAP_IO_QUEUED, &mdev->flags));
3194 D_ASSERT(!test_bit(BITMAP_IO, &mdev->flags));
3195 D_ASSERT(list_empty(&mdev->bm_io_work.w.list));
3196 if (mdev->bm_io_work.why)
3197 dev_err(DEV, "FIXME going to queue '%s' but '%s' still pending?\n",
3198 why, mdev->bm_io_work.why);
3199
3200 mdev->bm_io_work.io_fn = io_fn;
3201 mdev->bm_io_work.done = done;
3202 mdev->bm_io_work.why = why;
Lars Ellenberg20ceb2b2011-01-21 10:56:44 +01003203 mdev->bm_io_work.flags = flags;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003204
Philipp Reisner87eeee42011-01-19 14:16:30 +01003205 spin_lock_irq(&mdev->tconn->req_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003206 set_bit(BITMAP_IO, &mdev->flags);
3207 if (atomic_read(&mdev->ap_bio_cnt) == 0) {
Philipp Reisner127b3172010-11-16 10:07:53 +01003208 if (!test_and_set_bit(BITMAP_IO_QUEUED, &mdev->flags))
Lars Ellenbergd5b27b02011-11-14 15:42:37 +01003209 drbd_queue_work(&mdev->tconn->sender_work, &mdev->bm_io_work.w);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003210 }
Philipp Reisner87eeee42011-01-19 14:16:30 +01003211 spin_unlock_irq(&mdev->tconn->req_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003212}
3213
3214/**
3215 * drbd_bitmap_io() - Does an IO operation on the whole bitmap
3216 * @mdev: DRBD device.
3217 * @io_fn: IO callback to be called when bitmap IO is possible
3218 * @why: Descriptive text of the reason for doing the IO
3219 *
3220 * freezes application IO while that the actual IO operations runs. This
3221 * functions MAY NOT be called from worker context.
3222 */
Lars Ellenberg20ceb2b2011-01-21 10:56:44 +01003223int drbd_bitmap_io(struct drbd_conf *mdev, int (*io_fn)(struct drbd_conf *),
3224 char *why, enum bm_flag flags)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003225{
3226 int rv;
3227
Philipp Reisnere6b3ea82011-01-19 14:02:01 +01003228 D_ASSERT(current != mdev->tconn->worker.task);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003229
Lars Ellenberg20ceb2b2011-01-21 10:56:44 +01003230 if ((flags & BM_LOCKED_SET_ALLOWED) == 0)
3231 drbd_suspend_io(mdev);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003232
Lars Ellenberg20ceb2b2011-01-21 10:56:44 +01003233 drbd_bm_lock(mdev, why, flags);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003234 rv = io_fn(mdev);
3235 drbd_bm_unlock(mdev);
3236
Lars Ellenberg20ceb2b2011-01-21 10:56:44 +01003237 if ((flags & BM_LOCKED_SET_ALLOWED) == 0)
3238 drbd_resume_io(mdev);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003239
3240 return rv;
3241}
3242
3243void drbd_md_set_flag(struct drbd_conf *mdev, int flag) __must_hold(local)
3244{
3245 if ((mdev->ldev->md.flags & flag) != flag) {
3246 drbd_md_mark_dirty(mdev);
3247 mdev->ldev->md.flags |= flag;
3248 }
3249}
3250
3251void drbd_md_clear_flag(struct drbd_conf *mdev, int flag) __must_hold(local)
3252{
3253 if ((mdev->ldev->md.flags & flag) != 0) {
3254 drbd_md_mark_dirty(mdev);
3255 mdev->ldev->md.flags &= ~flag;
3256 }
3257}
3258int drbd_md_test_flag(struct drbd_backing_dev *bdev, int flag)
3259{
3260 return (bdev->md.flags & flag) != 0;
3261}
3262
3263static void md_sync_timer_fn(unsigned long data)
3264{
3265 struct drbd_conf *mdev = (struct drbd_conf *) data;
3266
Lars Ellenbergd5b27b02011-11-14 15:42:37 +01003267 drbd_queue_work_front(&mdev->tconn->sender_work, &mdev->md_sync_work);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003268}
3269
Andreas Gruenbacher99920dc2011-03-16 15:31:39 +01003270static int w_md_sync(struct drbd_work *w, int unused)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003271{
Philipp Reisner00d56942011-02-09 18:09:48 +01003272 struct drbd_conf *mdev = w->mdev;
3273
Philipp Reisnerb411b362009-09-25 16:07:19 -07003274 dev_warn(DEV, "md_sync_timer expired! Worker calls drbd_md_sync().\n");
Lars Ellenbergee15b032010-09-03 10:00:09 +02003275#ifdef DEBUG
3276 dev_warn(DEV, "last md_mark_dirty: %s:%u\n",
3277 mdev->last_md_mark_dirty.func, mdev->last_md_mark_dirty.line);
3278#endif
Philipp Reisnerb411b362009-09-25 16:07:19 -07003279 drbd_md_sync(mdev);
Andreas Gruenbacher99920dc2011-03-16 15:31:39 +01003280 return 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003281}
3282
Andreas Gruenbacherd8763022011-01-26 17:39:41 +01003283const char *cmdname(enum drbd_packet cmd)
Andreas Gruenbacherf2ad9062011-01-26 17:13:25 +01003284{
3285 /* THINK may need to become several global tables
3286 * when we want to support more than
3287 * one PRO_VERSION */
3288 static const char *cmdnames[] = {
3289 [P_DATA] = "Data",
3290 [P_DATA_REPLY] = "DataReply",
3291 [P_RS_DATA_REPLY] = "RSDataReply",
3292 [P_BARRIER] = "Barrier",
3293 [P_BITMAP] = "ReportBitMap",
3294 [P_BECOME_SYNC_TARGET] = "BecomeSyncTarget",
3295 [P_BECOME_SYNC_SOURCE] = "BecomeSyncSource",
3296 [P_UNPLUG_REMOTE] = "UnplugRemote",
3297 [P_DATA_REQUEST] = "DataRequest",
3298 [P_RS_DATA_REQUEST] = "RSDataRequest",
3299 [P_SYNC_PARAM] = "SyncParam",
3300 [P_SYNC_PARAM89] = "SyncParam89",
3301 [P_PROTOCOL] = "ReportProtocol",
3302 [P_UUIDS] = "ReportUUIDs",
3303 [P_SIZES] = "ReportSizes",
3304 [P_STATE] = "ReportState",
3305 [P_SYNC_UUID] = "ReportSyncUUID",
3306 [P_AUTH_CHALLENGE] = "AuthChallenge",
3307 [P_AUTH_RESPONSE] = "AuthResponse",
3308 [P_PING] = "Ping",
3309 [P_PING_ACK] = "PingAck",
3310 [P_RECV_ACK] = "RecvAck",
3311 [P_WRITE_ACK] = "WriteAck",
3312 [P_RS_WRITE_ACK] = "RSWriteAck",
Andreas Gruenbacher7be8da02011-02-22 02:15:32 +01003313 [P_DISCARD_WRITE] = "DiscardWrite",
Andreas Gruenbacherf2ad9062011-01-26 17:13:25 +01003314 [P_NEG_ACK] = "NegAck",
3315 [P_NEG_DREPLY] = "NegDReply",
3316 [P_NEG_RS_DREPLY] = "NegRSDReply",
3317 [P_BARRIER_ACK] = "BarrierAck",
3318 [P_STATE_CHG_REQ] = "StateChgRequest",
3319 [P_STATE_CHG_REPLY] = "StateChgReply",
3320 [P_OV_REQUEST] = "OVRequest",
3321 [P_OV_REPLY] = "OVReply",
3322 [P_OV_RESULT] = "OVResult",
3323 [P_CSUM_RS_REQUEST] = "CsumRSRequest",
3324 [P_RS_IS_IN_SYNC] = "CsumRSIsInSync",
3325 [P_COMPRESSED_BITMAP] = "CBitmap",
3326 [P_DELAY_PROBE] = "DelayProbe",
3327 [P_OUT_OF_SYNC] = "OutOfSync",
Andreas Gruenbacher7be8da02011-02-22 02:15:32 +01003328 [P_RETRY_WRITE] = "RetryWrite",
Lars Ellenbergae25b332011-04-24 00:01:16 +02003329 [P_RS_CANCEL] = "RSCancel",
3330 [P_CONN_ST_CHG_REQ] = "conn_st_chg_req",
3331 [P_CONN_ST_CHG_REPLY] = "conn_st_chg_reply",
Philipp Reisner036b17e2011-05-16 17:38:11 +02003332 [P_RETRY_WRITE] = "retry_write",
3333 [P_PROTOCOL_UPDATE] = "protocol_update",
Lars Ellenbergae25b332011-04-24 00:01:16 +02003334
3335 /* enum drbd_packet, but not commands - obsoleted flags:
3336 * P_MAY_IGNORE
3337 * P_MAX_OPT_CMD
3338 */
Andreas Gruenbacherf2ad9062011-01-26 17:13:25 +01003339 };
3340
Lars Ellenbergae25b332011-04-24 00:01:16 +02003341 /* too big for the array: 0xfffX */
Andreas Gruenbachere5d6f332011-03-28 16:44:40 +02003342 if (cmd == P_INITIAL_META)
3343 return "InitialMeta";
3344 if (cmd == P_INITIAL_DATA)
3345 return "InitialData";
Andreas Gruenbacher60381782011-03-28 17:05:50 +02003346 if (cmd == P_CONNECTION_FEATURES)
3347 return "ConnectionFeatures";
Andreas Gruenbacher6e849ce2011-03-14 17:27:45 +01003348 if (cmd >= ARRAY_SIZE(cmdnames))
Andreas Gruenbacherf2ad9062011-01-26 17:13:25 +01003349 return "Unknown";
3350 return cmdnames[cmd];
3351}
3352
Andreas Gruenbacher7be8da02011-02-22 02:15:32 +01003353/**
3354 * drbd_wait_misc - wait for a request to make progress
3355 * @mdev: device associated with the request
3356 * @i: the struct drbd_interval embedded in struct drbd_request or
3357 * struct drbd_peer_request
3358 */
3359int drbd_wait_misc(struct drbd_conf *mdev, struct drbd_interval *i)
3360{
Philipp Reisner44ed1672011-04-19 17:10:19 +02003361 struct net_conf *nc;
Andreas Gruenbacher7be8da02011-02-22 02:15:32 +01003362 DEFINE_WAIT(wait);
3363 long timeout;
3364
Philipp Reisner44ed1672011-04-19 17:10:19 +02003365 rcu_read_lock();
3366 nc = rcu_dereference(mdev->tconn->net_conf);
3367 if (!nc) {
3368 rcu_read_unlock();
Andreas Gruenbacher7be8da02011-02-22 02:15:32 +01003369 return -ETIMEDOUT;
Philipp Reisner44ed1672011-04-19 17:10:19 +02003370 }
3371 timeout = nc->ko_count ? nc->timeout * HZ / 10 * nc->ko_count : MAX_SCHEDULE_TIMEOUT;
3372 rcu_read_unlock();
Andreas Gruenbacher7be8da02011-02-22 02:15:32 +01003373
3374 /* Indicate to wake up mdev->misc_wait on progress. */
3375 i->waiting = true;
3376 prepare_to_wait(&mdev->misc_wait, &wait, TASK_INTERRUPTIBLE);
3377 spin_unlock_irq(&mdev->tconn->req_lock);
3378 timeout = schedule_timeout(timeout);
3379 finish_wait(&mdev->misc_wait, &wait);
3380 spin_lock_irq(&mdev->tconn->req_lock);
3381 if (!timeout || mdev->state.conn < C_CONNECTED)
3382 return -ETIMEDOUT;
3383 if (signal_pending(current))
3384 return -ERESTARTSYS;
3385 return 0;
3386}
3387
Philipp Reisnerb411b362009-09-25 16:07:19 -07003388#ifdef CONFIG_DRBD_FAULT_INJECTION
3389/* Fault insertion support including random number generator shamelessly
3390 * stolen from kernel/rcutorture.c */
3391struct fault_random_state {
3392 unsigned long state;
3393 unsigned long count;
3394};
3395
3396#define FAULT_RANDOM_MULT 39916801 /* prime */
3397#define FAULT_RANDOM_ADD 479001701 /* prime */
3398#define FAULT_RANDOM_REFRESH 10000
3399
3400/*
3401 * Crude but fast random-number generator. Uses a linear congruential
3402 * generator, with occasional help from get_random_bytes().
3403 */
3404static unsigned long
3405_drbd_fault_random(struct fault_random_state *rsp)
3406{
3407 long refresh;
3408
Roel Kluin49829ea2009-12-15 22:55:44 +01003409 if (!rsp->count--) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07003410 get_random_bytes(&refresh, sizeof(refresh));
3411 rsp->state += refresh;
3412 rsp->count = FAULT_RANDOM_REFRESH;
3413 }
3414 rsp->state = rsp->state * FAULT_RANDOM_MULT + FAULT_RANDOM_ADD;
3415 return swahw32(rsp->state);
3416}
3417
3418static char *
3419_drbd_fault_str(unsigned int type) {
3420 static char *_faults[] = {
3421 [DRBD_FAULT_MD_WR] = "Meta-data write",
3422 [DRBD_FAULT_MD_RD] = "Meta-data read",
3423 [DRBD_FAULT_RS_WR] = "Resync write",
3424 [DRBD_FAULT_RS_RD] = "Resync read",
3425 [DRBD_FAULT_DT_WR] = "Data write",
3426 [DRBD_FAULT_DT_RD] = "Data read",
3427 [DRBD_FAULT_DT_RA] = "Data read ahead",
3428 [DRBD_FAULT_BM_ALLOC] = "BM allocation",
Philipp Reisner6b4388a2010-04-26 14:11:45 +02003429 [DRBD_FAULT_AL_EE] = "EE allocation",
3430 [DRBD_FAULT_RECEIVE] = "receive data corruption",
Philipp Reisnerb411b362009-09-25 16:07:19 -07003431 };
3432
3433 return (type < DRBD_FAULT_MAX) ? _faults[type] : "**Unknown**";
3434}
3435
3436unsigned int
3437_drbd_insert_fault(struct drbd_conf *mdev, unsigned int type)
3438{
3439 static struct fault_random_state rrs = {0, 0};
3440
3441 unsigned int ret = (
3442 (fault_devs == 0 ||
3443 ((1 << mdev_to_minor(mdev)) & fault_devs) != 0) &&
3444 (((_drbd_fault_random(&rrs) % 100) + 1) <= fault_rate));
3445
3446 if (ret) {
3447 fault_count++;
3448
Lars Ellenberg73835062010-05-27 11:51:56 +02003449 if (__ratelimit(&drbd_ratelimit_state))
Philipp Reisnerb411b362009-09-25 16:07:19 -07003450 dev_warn(DEV, "***Simulating %s failure\n",
3451 _drbd_fault_str(type));
3452 }
3453
3454 return ret;
3455}
3456#endif
3457
3458const char *drbd_buildtag(void)
3459{
3460 /* DRBD built from external sources has here a reference to the
3461 git hash of the source code. */
3462
3463 static char buildtag[38] = "\0uilt-in";
3464
3465 if (buildtag[0] == 0) {
3466#ifdef CONFIG_MODULES
3467 if (THIS_MODULE != NULL)
3468 sprintf(buildtag, "srcversion: %-24s", THIS_MODULE->srcversion);
3469 else
3470#endif
3471 buildtag[0] = 'b';
3472 }
3473
3474 return buildtag;
3475}
3476
3477module_init(drbd_init)
3478module_exit(drbd_cleanup)
3479
Philipp Reisnerb411b362009-09-25 16:07:19 -07003480EXPORT_SYMBOL(drbd_conn_str);
3481EXPORT_SYMBOL(drbd_role_str);
3482EXPORT_SYMBOL(drbd_disk_str);
3483EXPORT_SYMBOL(drbd_set_st_err_str);