Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 1 | /* |
| 2 | drbd_worker.c |
| 3 | |
| 4 | This file is part of DRBD by Philipp Reisner and Lars Ellenberg. |
| 5 | |
| 6 | Copyright (C) 2001-2008, LINBIT Information Technologies GmbH. |
| 7 | Copyright (C) 1999-2008, Philipp Reisner <philipp.reisner@linbit.com>. |
| 8 | Copyright (C) 2002-2008, Lars Ellenberg <lars.ellenberg@linbit.com>. |
| 9 | |
| 10 | drbd is free software; you can redistribute it and/or modify |
| 11 | it under the terms of the GNU General Public License as published by |
| 12 | the Free Software Foundation; either version 2, or (at your option) |
| 13 | any later version. |
| 14 | |
| 15 | drbd is distributed in the hope that it will be useful, |
| 16 | but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 17 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| 18 | GNU General Public License for more details. |
| 19 | |
| 20 | You should have received a copy of the GNU General Public License |
| 21 | along with drbd; see the file COPYING. If not, write to |
| 22 | the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. |
| 23 | |
| 24 | */ |
| 25 | |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 26 | #include <linux/module.h> |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 27 | #include <linux/drbd.h> |
| 28 | #include <linux/sched.h> |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 29 | #include <linux/wait.h> |
| 30 | #include <linux/mm.h> |
| 31 | #include <linux/memcontrol.h> |
| 32 | #include <linux/mm_inline.h> |
| 33 | #include <linux/slab.h> |
| 34 | #include <linux/random.h> |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 35 | #include <linux/string.h> |
| 36 | #include <linux/scatterlist.h> |
| 37 | |
| 38 | #include "drbd_int.h" |
| 39 | #include "drbd_req.h" |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 40 | |
Philipp Reisner | 00d5694 | 2011-02-09 18:09:48 +0100 | [diff] [blame] | 41 | static int w_make_ov_request(struct drbd_work *w, int cancel); |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 42 | |
| 43 | |
Andreas Gruenbacher | c5a9161 | 2011-01-25 17:33:38 +0100 | [diff] [blame] | 44 | /* endio handlers: |
| 45 | * drbd_md_io_complete (defined here) |
Andreas Gruenbacher | fcefa62 | 2011-02-17 16:46:59 +0100 | [diff] [blame] | 46 | * drbd_request_endio (defined here) |
| 47 | * drbd_peer_request_endio (defined here) |
Andreas Gruenbacher | c5a9161 | 2011-01-25 17:33:38 +0100 | [diff] [blame] | 48 | * bm_async_io_complete (defined in drbd_bitmap.c) |
| 49 | * |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 50 | * For all these callbacks, note the following: |
| 51 | * The callbacks will be called in irq context by the IDE drivers, |
| 52 | * and in Softirqs/Tasklets/BH context by the SCSI drivers. |
| 53 | * Try to get the locking right :) |
| 54 | * |
| 55 | */ |
| 56 | |
| 57 | |
| 58 | /* About the global_state_lock |
| 59 | Each state transition on an device holds a read lock. In case we have |
Andreas Gruenbacher | 95f8efd | 2011-05-12 11:15:34 +0200 | [diff] [blame] | 60 | to evaluate the resync after dependencies, we grab a write lock, because |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 61 | we need stable states on all devices for that. */ |
| 62 | rwlock_t global_state_lock; |
| 63 | |
| 64 | /* used for synchronous meta data and bitmap IO |
| 65 | * submitted by drbd_md_sync_page_io() |
| 66 | */ |
| 67 | void drbd_md_io_complete(struct bio *bio, int error) |
| 68 | { |
| 69 | struct drbd_md_io *md_io; |
Philipp Reisner | cdfda63 | 2011-07-05 15:38:59 +0200 | [diff] [blame] | 70 | struct drbd_conf *mdev; |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 71 | |
| 72 | md_io = (struct drbd_md_io *)bio->bi_private; |
Philipp Reisner | cdfda63 | 2011-07-05 15:38:59 +0200 | [diff] [blame] | 73 | mdev = container_of(md_io, struct drbd_conf, md_io); |
| 74 | |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 75 | md_io->error = error; |
| 76 | |
Philipp Reisner | 0cfac5d | 2011-11-10 12:12:52 +0100 | [diff] [blame] | 77 | /* We grabbed an extra reference in _drbd_md_sync_page_io() to be able |
| 78 | * to timeout on the lower level device, and eventually detach from it. |
| 79 | * If this io completion runs after that timeout expired, this |
| 80 | * drbd_md_put_buffer() may allow us to finally try and re-attach. |
| 81 | * During normal operation, this only puts that extra reference |
| 82 | * down to 1 again. |
| 83 | * Make sure we first drop the reference, and only then signal |
| 84 | * completion, or we may (in drbd_al_read_log()) cycle so fast into the |
| 85 | * next drbd_md_sync_page_io(), that we trigger the |
| 86 | * ASSERT(atomic_read(&mdev->md_io_in_use) == 1) there. |
| 87 | */ |
| 88 | drbd_md_put_buffer(mdev); |
Philipp Reisner | cdfda63 | 2011-07-05 15:38:59 +0200 | [diff] [blame] | 89 | md_io->done = 1; |
| 90 | wake_up(&mdev->misc_wait); |
| 91 | bio_put(bio); |
Philipp Reisner | cdfda63 | 2011-07-05 15:38:59 +0200 | [diff] [blame] | 92 | put_ldev(mdev); |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 93 | } |
| 94 | |
| 95 | /* reads on behalf of the partner, |
| 96 | * "submitted" by the receiver |
| 97 | */ |
Andreas Gruenbacher | db830c4 | 2011-02-04 15:57:48 +0100 | [diff] [blame] | 98 | void drbd_endio_read_sec_final(struct drbd_peer_request *peer_req) __releases(local) |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 99 | { |
| 100 | unsigned long flags = 0; |
Philipp Reisner | a21e929 | 2011-02-08 15:08:49 +0100 | [diff] [blame] | 101 | struct drbd_conf *mdev = peer_req->w.mdev; |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 102 | |
Philipp Reisner | 87eeee4 | 2011-01-19 14:16:30 +0100 | [diff] [blame] | 103 | spin_lock_irqsave(&mdev->tconn->req_lock, flags); |
Andreas Gruenbacher | db830c4 | 2011-02-04 15:57:48 +0100 | [diff] [blame] | 104 | mdev->read_cnt += peer_req->i.size >> 9; |
| 105 | list_del(&peer_req->w.list); |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 106 | if (list_empty(&mdev->read_ee)) |
| 107 | wake_up(&mdev->ee_wait); |
Andreas Gruenbacher | db830c4 | 2011-02-04 15:57:48 +0100 | [diff] [blame] | 108 | if (test_bit(__EE_WAS_ERROR, &peer_req->flags)) |
Lars Ellenberg | 0c84966 | 2012-07-30 09:07:28 +0200 | [diff] [blame] | 109 | __drbd_chk_io_error(mdev, DRBD_IO_ERROR); |
Philipp Reisner | 87eeee4 | 2011-01-19 14:16:30 +0100 | [diff] [blame] | 110 | spin_unlock_irqrestore(&mdev->tconn->req_lock, flags); |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 111 | |
Lars Ellenberg | d5b27b0 | 2011-11-14 15:42:37 +0100 | [diff] [blame] | 112 | drbd_queue_work(&mdev->tconn->sender_work, &peer_req->w); |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 113 | put_ldev(mdev); |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 114 | } |
| 115 | |
| 116 | /* writes on behalf of the partner, or resync writes, |
Lars Ellenberg | 45bb912 | 2010-05-14 17:10:48 +0200 | [diff] [blame] | 117 | * "submitted" by the receiver, final stage. */ |
Andreas Gruenbacher | db830c4 | 2011-02-04 15:57:48 +0100 | [diff] [blame] | 118 | static void drbd_endio_write_sec_final(struct drbd_peer_request *peer_req) __releases(local) |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 119 | { |
| 120 | unsigned long flags = 0; |
Philipp Reisner | a21e929 | 2011-02-08 15:08:49 +0100 | [diff] [blame] | 121 | struct drbd_conf *mdev = peer_req->w.mdev; |
Lars Ellenberg | 181286a | 2011-03-31 15:18:56 +0200 | [diff] [blame] | 122 | struct drbd_interval i; |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 123 | int do_wake; |
Andreas Gruenbacher | 579b57e | 2011-01-13 18:40:57 +0100 | [diff] [blame] | 124 | u64 block_id; |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 125 | int do_al_complete_io; |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 126 | |
Andreas Gruenbacher | db830c4 | 2011-02-04 15:57:48 +0100 | [diff] [blame] | 127 | /* after we moved peer_req to done_ee, |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 128 | * we may no longer access it, |
| 129 | * it may be freed/reused already! |
| 130 | * (as soon as we release the req_lock) */ |
Lars Ellenberg | 181286a | 2011-03-31 15:18:56 +0200 | [diff] [blame] | 131 | i = peer_req->i; |
Andreas Gruenbacher | db830c4 | 2011-02-04 15:57:48 +0100 | [diff] [blame] | 132 | do_al_complete_io = peer_req->flags & EE_CALL_AL_COMPLETE_IO; |
| 133 | block_id = peer_req->block_id; |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 134 | |
Philipp Reisner | 87eeee4 | 2011-01-19 14:16:30 +0100 | [diff] [blame] | 135 | spin_lock_irqsave(&mdev->tconn->req_lock, flags); |
Andreas Gruenbacher | db830c4 | 2011-02-04 15:57:48 +0100 | [diff] [blame] | 136 | mdev->writ_cnt += peer_req->i.size >> 9; |
| 137 | list_del(&peer_req->w.list); /* has been on active_ee or sync_ee */ |
| 138 | list_add_tail(&peer_req->w.list, &mdev->done_ee); |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 139 | |
Andreas Gruenbacher | bb3bfe9 | 2011-01-21 15:59:23 +0100 | [diff] [blame] | 140 | /* |
Andreas Gruenbacher | 5e47226 | 2011-01-27 14:42:51 +0100 | [diff] [blame] | 141 | * Do not remove from the write_requests tree here: we did not send the |
Andreas Gruenbacher | bb3bfe9 | 2011-01-21 15:59:23 +0100 | [diff] [blame] | 142 | * Ack yet and did not wake possibly waiting conflicting requests. |
| 143 | * Removed from the tree from "drbd_process_done_ee" within the |
| 144 | * appropriate w.cb (e_end_block/e_end_resync_block) or from |
| 145 | * _drbd_clear_done_ee. |
| 146 | */ |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 147 | |
Andreas Gruenbacher | 579b57e | 2011-01-13 18:40:57 +0100 | [diff] [blame] | 148 | do_wake = list_empty(block_id == ID_SYNCER ? &mdev->sync_ee : &mdev->active_ee); |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 149 | |
Andreas Gruenbacher | db830c4 | 2011-02-04 15:57:48 +0100 | [diff] [blame] | 150 | if (test_bit(__EE_WAS_ERROR, &peer_req->flags)) |
Lars Ellenberg | 0c84966 | 2012-07-30 09:07:28 +0200 | [diff] [blame] | 151 | __drbd_chk_io_error(mdev, DRBD_IO_ERROR); |
Philipp Reisner | 87eeee4 | 2011-01-19 14:16:30 +0100 | [diff] [blame] | 152 | spin_unlock_irqrestore(&mdev->tconn->req_lock, flags); |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 153 | |
Andreas Gruenbacher | 579b57e | 2011-01-13 18:40:57 +0100 | [diff] [blame] | 154 | if (block_id == ID_SYNCER) |
Lars Ellenberg | 181286a | 2011-03-31 15:18:56 +0200 | [diff] [blame] | 155 | drbd_rs_complete_io(mdev, i.sector); |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 156 | |
| 157 | if (do_wake) |
| 158 | wake_up(&mdev->ee_wait); |
| 159 | |
| 160 | if (do_al_complete_io) |
Lars Ellenberg | 181286a | 2011-03-31 15:18:56 +0200 | [diff] [blame] | 161 | drbd_al_complete_io(mdev, &i); |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 162 | |
Philipp Reisner | 0625ac1 | 2011-02-07 14:49:19 +0100 | [diff] [blame] | 163 | wake_asender(mdev->tconn); |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 164 | put_ldev(mdev); |
Lars Ellenberg | 45bb912 | 2010-05-14 17:10:48 +0200 | [diff] [blame] | 165 | } |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 166 | |
Lars Ellenberg | 45bb912 | 2010-05-14 17:10:48 +0200 | [diff] [blame] | 167 | /* writes on behalf of the partner, or resync writes, |
| 168 | * "submitted" by the receiver. |
| 169 | */ |
Andreas Gruenbacher | fcefa62 | 2011-02-17 16:46:59 +0100 | [diff] [blame] | 170 | void drbd_peer_request_endio(struct bio *bio, int error) |
Lars Ellenberg | 45bb912 | 2010-05-14 17:10:48 +0200 | [diff] [blame] | 171 | { |
Andreas Gruenbacher | db830c4 | 2011-02-04 15:57:48 +0100 | [diff] [blame] | 172 | struct drbd_peer_request *peer_req = bio->bi_private; |
Philipp Reisner | a21e929 | 2011-02-08 15:08:49 +0100 | [diff] [blame] | 173 | struct drbd_conf *mdev = peer_req->w.mdev; |
Lars Ellenberg | 45bb912 | 2010-05-14 17:10:48 +0200 | [diff] [blame] | 174 | int uptodate = bio_flagged(bio, BIO_UPTODATE); |
| 175 | int is_write = bio_data_dir(bio) == WRITE; |
| 176 | |
Lars Ellenberg | 0719427 | 2010-12-20 15:38:07 +0100 | [diff] [blame] | 177 | if (error && __ratelimit(&drbd_ratelimit_state)) |
Lars Ellenberg | 45bb912 | 2010-05-14 17:10:48 +0200 | [diff] [blame] | 178 | dev_warn(DEV, "%s: error=%d s=%llus\n", |
| 179 | is_write ? "write" : "read", error, |
Andreas Gruenbacher | db830c4 | 2011-02-04 15:57:48 +0100 | [diff] [blame] | 180 | (unsigned long long)peer_req->i.sector); |
Lars Ellenberg | 45bb912 | 2010-05-14 17:10:48 +0200 | [diff] [blame] | 181 | if (!error && !uptodate) { |
Lars Ellenberg | 0719427 | 2010-12-20 15:38:07 +0100 | [diff] [blame] | 182 | if (__ratelimit(&drbd_ratelimit_state)) |
| 183 | dev_warn(DEV, "%s: setting error to -EIO s=%llus\n", |
| 184 | is_write ? "write" : "read", |
Andreas Gruenbacher | db830c4 | 2011-02-04 15:57:48 +0100 | [diff] [blame] | 185 | (unsigned long long)peer_req->i.sector); |
Lars Ellenberg | 45bb912 | 2010-05-14 17:10:48 +0200 | [diff] [blame] | 186 | /* strange behavior of some lower level drivers... |
| 187 | * fail the request by clearing the uptodate flag, |
| 188 | * but do not return any error?! */ |
| 189 | error = -EIO; |
| 190 | } |
| 191 | |
| 192 | if (error) |
Andreas Gruenbacher | db830c4 | 2011-02-04 15:57:48 +0100 | [diff] [blame] | 193 | set_bit(__EE_WAS_ERROR, &peer_req->flags); |
Lars Ellenberg | 45bb912 | 2010-05-14 17:10:48 +0200 | [diff] [blame] | 194 | |
| 195 | bio_put(bio); /* no need for the bio anymore */ |
Andreas Gruenbacher | db830c4 | 2011-02-04 15:57:48 +0100 | [diff] [blame] | 196 | if (atomic_dec_and_test(&peer_req->pending_bios)) { |
Lars Ellenberg | 45bb912 | 2010-05-14 17:10:48 +0200 | [diff] [blame] | 197 | if (is_write) |
Andreas Gruenbacher | db830c4 | 2011-02-04 15:57:48 +0100 | [diff] [blame] | 198 | drbd_endio_write_sec_final(peer_req); |
Lars Ellenberg | 45bb912 | 2010-05-14 17:10:48 +0200 | [diff] [blame] | 199 | else |
Andreas Gruenbacher | db830c4 | 2011-02-04 15:57:48 +0100 | [diff] [blame] | 200 | drbd_endio_read_sec_final(peer_req); |
Lars Ellenberg | 45bb912 | 2010-05-14 17:10:48 +0200 | [diff] [blame] | 201 | } |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 202 | } |
| 203 | |
| 204 | /* read, readA or write requests on R_PRIMARY coming from drbd_make_request |
| 205 | */ |
Andreas Gruenbacher | fcefa62 | 2011-02-17 16:46:59 +0100 | [diff] [blame] | 206 | void drbd_request_endio(struct bio *bio, int error) |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 207 | { |
Lars Ellenberg | a115413 | 2010-11-13 20:42:29 +0100 | [diff] [blame] | 208 | unsigned long flags; |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 209 | struct drbd_request *req = bio->bi_private; |
Philipp Reisner | a21e929 | 2011-02-08 15:08:49 +0100 | [diff] [blame] | 210 | struct drbd_conf *mdev = req->w.mdev; |
Lars Ellenberg | a115413 | 2010-11-13 20:42:29 +0100 | [diff] [blame] | 211 | struct bio_and_error m; |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 212 | enum drbd_req_event what; |
| 213 | int uptodate = bio_flagged(bio, BIO_UPTODATE); |
| 214 | |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 215 | if (!error && !uptodate) { |
| 216 | dev_warn(DEV, "p %s: setting error to -EIO\n", |
| 217 | bio_data_dir(bio) == WRITE ? "write" : "read"); |
| 218 | /* strange behavior of some lower level drivers... |
| 219 | * fail the request by clearing the uptodate flag, |
| 220 | * but do not return any error?! */ |
| 221 | error = -EIO; |
| 222 | } |
| 223 | |
Philipp Reisner | 1b6dd25 | 2012-09-04 15:16:20 +0200 | [diff] [blame^] | 224 | |
| 225 | /* If this request was aborted locally before, |
| 226 | * but now was completed "successfully", |
| 227 | * chances are that this caused arbitrary data corruption. |
| 228 | * |
| 229 | * "aborting" requests, or force-detaching the disk, is intended for |
| 230 | * completely blocked/hung local backing devices which do no longer |
| 231 | * complete requests at all, not even do error completions. In this |
| 232 | * situation, usually a hard-reset and failover is the only way out. |
| 233 | * |
| 234 | * By "aborting", basically faking a local error-completion, |
| 235 | * we allow for a more graceful swichover by cleanly migrating services. |
| 236 | * Still the affected node has to be rebooted "soon". |
| 237 | * |
| 238 | * By completing these requests, we allow the upper layers to re-use |
| 239 | * the associated data pages. |
| 240 | * |
| 241 | * If later the local backing device "recovers", and now DMAs some data |
| 242 | * from disk into the original request pages, in the best case it will |
| 243 | * just put random data into unused pages; but typically it will corrupt |
| 244 | * meanwhile completely unrelated data, causing all sorts of damage. |
| 245 | * |
| 246 | * Which means delayed successful completion, |
| 247 | * especially for READ requests, |
| 248 | * is a reason to panic(). |
| 249 | * |
| 250 | * We assume that a delayed *error* completion is OK, |
| 251 | * though we still will complain noisily about it. |
| 252 | */ |
| 253 | if (unlikely(req->rq_state & RQ_LOCAL_ABORTED)) { |
| 254 | if (__ratelimit(&drbd_ratelimit_state)) |
| 255 | dev_emerg(DEV, "delayed completion of aborted local request; disk-timeout may be too aggressive\n"); |
| 256 | |
| 257 | if (!error) |
| 258 | panic("possible random memory corruption caused by delayed completion of aborted local request\n"); |
| 259 | } |
| 260 | |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 261 | /* to avoid recursion in __req_mod */ |
| 262 | if (unlikely(error)) { |
| 263 | what = (bio_data_dir(bio) == WRITE) |
Andreas Gruenbacher | 8554df1 | 2011-01-25 15:37:43 +0100 | [diff] [blame] | 264 | ? WRITE_COMPLETED_WITH_ERROR |
Lars Ellenberg | 5c3c7e6 | 2010-04-10 02:10:09 +0200 | [diff] [blame] | 265 | : (bio_rw(bio) == READ) |
Andreas Gruenbacher | 8554df1 | 2011-01-25 15:37:43 +0100 | [diff] [blame] | 266 | ? READ_COMPLETED_WITH_ERROR |
| 267 | : READ_AHEAD_COMPLETED_WITH_ERROR; |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 268 | } else |
Andreas Gruenbacher | 8554df1 | 2011-01-25 15:37:43 +0100 | [diff] [blame] | 269 | what = COMPLETED_OK; |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 270 | |
| 271 | bio_put(req->private_bio); |
| 272 | req->private_bio = ERR_PTR(error); |
| 273 | |
Lars Ellenberg | a115413 | 2010-11-13 20:42:29 +0100 | [diff] [blame] | 274 | /* not req_mod(), we need irqsave here! */ |
Philipp Reisner | 87eeee4 | 2011-01-19 14:16:30 +0100 | [diff] [blame] | 275 | spin_lock_irqsave(&mdev->tconn->req_lock, flags); |
Lars Ellenberg | a115413 | 2010-11-13 20:42:29 +0100 | [diff] [blame] | 276 | __req_mod(req, what, &m); |
Philipp Reisner | 87eeee4 | 2011-01-19 14:16:30 +0100 | [diff] [blame] | 277 | spin_unlock_irqrestore(&mdev->tconn->req_lock, flags); |
Lars Ellenberg | 2415308 | 2012-03-26 17:06:29 +0200 | [diff] [blame] | 278 | put_ldev(mdev); |
Lars Ellenberg | a115413 | 2010-11-13 20:42:29 +0100 | [diff] [blame] | 279 | |
| 280 | if (m.bio) |
| 281 | complete_master_bio(mdev, &m); |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 282 | } |
| 283 | |
Andreas Gruenbacher | f6ffca9 | 2011-02-04 15:30:34 +0100 | [diff] [blame] | 284 | void drbd_csum_ee(struct drbd_conf *mdev, struct crypto_hash *tfm, |
Andreas Gruenbacher | db830c4 | 2011-02-04 15:57:48 +0100 | [diff] [blame] | 285 | struct drbd_peer_request *peer_req, void *digest) |
Lars Ellenberg | 45bb912 | 2010-05-14 17:10:48 +0200 | [diff] [blame] | 286 | { |
| 287 | struct hash_desc desc; |
| 288 | struct scatterlist sg; |
Andreas Gruenbacher | db830c4 | 2011-02-04 15:57:48 +0100 | [diff] [blame] | 289 | struct page *page = peer_req->pages; |
Lars Ellenberg | 45bb912 | 2010-05-14 17:10:48 +0200 | [diff] [blame] | 290 | struct page *tmp; |
| 291 | unsigned len; |
| 292 | |
| 293 | desc.tfm = tfm; |
| 294 | desc.flags = 0; |
| 295 | |
| 296 | sg_init_table(&sg, 1); |
| 297 | crypto_hash_init(&desc); |
| 298 | |
| 299 | while ((tmp = page_chain_next(page))) { |
| 300 | /* all but the last page will be fully used */ |
| 301 | sg_set_page(&sg, page, PAGE_SIZE, 0); |
| 302 | crypto_hash_update(&desc, &sg, sg.length); |
| 303 | page = tmp; |
| 304 | } |
| 305 | /* and now the last, possibly only partially used page */ |
Andreas Gruenbacher | db830c4 | 2011-02-04 15:57:48 +0100 | [diff] [blame] | 306 | len = peer_req->i.size & (PAGE_SIZE - 1); |
Lars Ellenberg | 45bb912 | 2010-05-14 17:10:48 +0200 | [diff] [blame] | 307 | sg_set_page(&sg, page, len ?: PAGE_SIZE, 0); |
| 308 | crypto_hash_update(&desc, &sg, sg.length); |
| 309 | crypto_hash_final(&desc, digest); |
| 310 | } |
| 311 | |
| 312 | void drbd_csum_bio(struct drbd_conf *mdev, struct crypto_hash *tfm, struct bio *bio, void *digest) |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 313 | { |
| 314 | struct hash_desc desc; |
| 315 | struct scatterlist sg; |
| 316 | struct bio_vec *bvec; |
| 317 | int i; |
| 318 | |
| 319 | desc.tfm = tfm; |
| 320 | desc.flags = 0; |
| 321 | |
| 322 | sg_init_table(&sg, 1); |
| 323 | crypto_hash_init(&desc); |
| 324 | |
Lars Ellenberg | 4b8514e | 2012-03-26 16:12:49 +0200 | [diff] [blame] | 325 | bio_for_each_segment(bvec, bio, i) { |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 326 | sg_set_page(&sg, bvec->bv_page, bvec->bv_len, bvec->bv_offset); |
| 327 | crypto_hash_update(&desc, &sg, sg.length); |
| 328 | } |
| 329 | crypto_hash_final(&desc, digest); |
| 330 | } |
| 331 | |
Lars Ellenberg | 9676c76 | 2011-02-22 14:02:31 +0100 | [diff] [blame] | 332 | /* MAYBE merge common code with w_e_end_ov_req */ |
Andreas Gruenbacher | 99920dc | 2011-03-16 15:31:39 +0100 | [diff] [blame] | 333 | static int w_e_send_csum(struct drbd_work *w, int cancel) |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 334 | { |
Philipp Reisner | 00d5694 | 2011-02-09 18:09:48 +0100 | [diff] [blame] | 335 | struct drbd_peer_request *peer_req = container_of(w, struct drbd_peer_request, w); |
| 336 | struct drbd_conf *mdev = w->mdev; |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 337 | int digest_size; |
| 338 | void *digest; |
Andreas Gruenbacher | 99920dc | 2011-03-16 15:31:39 +0100 | [diff] [blame] | 339 | int err = 0; |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 340 | |
Lars Ellenberg | 53ea433 | 2011-03-08 17:11:40 +0100 | [diff] [blame] | 341 | if (unlikely(cancel)) |
| 342 | goto out; |
| 343 | |
Lars Ellenberg | 9676c76 | 2011-02-22 14:02:31 +0100 | [diff] [blame] | 344 | if (unlikely((peer_req->flags & EE_WAS_ERROR) != 0)) |
Lars Ellenberg | 53ea433 | 2011-03-08 17:11:40 +0100 | [diff] [blame] | 345 | goto out; |
| 346 | |
Lars Ellenberg | f399002 | 2011-03-23 14:31:09 +0100 | [diff] [blame] | 347 | digest_size = crypto_hash_digestsize(mdev->tconn->csums_tfm); |
Lars Ellenberg | 53ea433 | 2011-03-08 17:11:40 +0100 | [diff] [blame] | 348 | digest = kmalloc(digest_size, GFP_NOIO); |
| 349 | if (digest) { |
Andreas Gruenbacher | db830c4 | 2011-02-04 15:57:48 +0100 | [diff] [blame] | 350 | sector_t sector = peer_req->i.sector; |
| 351 | unsigned int size = peer_req->i.size; |
Lars Ellenberg | f399002 | 2011-03-23 14:31:09 +0100 | [diff] [blame] | 352 | drbd_csum_ee(mdev, mdev->tconn->csums_tfm, peer_req, digest); |
Lars Ellenberg | 9676c76 | 2011-02-22 14:02:31 +0100 | [diff] [blame] | 353 | /* Free peer_req and pages before send. |
Lars Ellenberg | 53ea433 | 2011-03-08 17:11:40 +0100 | [diff] [blame] | 354 | * In case we block on congestion, we could otherwise run into |
| 355 | * some distributed deadlock, if the other side blocks on |
| 356 | * congestion as well, because our receiver blocks in |
Andreas Gruenbacher | c37c8ec | 2011-04-07 21:02:09 +0200 | [diff] [blame] | 357 | * drbd_alloc_pages due to pp_in_use > max_buffers. */ |
Andreas Gruenbacher | 3967deb | 2011-04-06 16:16:56 +0200 | [diff] [blame] | 358 | drbd_free_peer_req(mdev, peer_req); |
Andreas Gruenbacher | db830c4 | 2011-02-04 15:57:48 +0100 | [diff] [blame] | 359 | peer_req = NULL; |
Lars Ellenberg | 53ea433 | 2011-03-08 17:11:40 +0100 | [diff] [blame] | 360 | inc_rs_pending(mdev); |
Andreas Gruenbacher | 99920dc | 2011-03-16 15:31:39 +0100 | [diff] [blame] | 361 | err = drbd_send_drequest_csum(mdev, sector, size, |
Andreas Gruenbacher | db1b0b7 | 2011-03-16 01:37:21 +0100 | [diff] [blame] | 362 | digest, digest_size, |
| 363 | P_CSUM_RS_REQUEST); |
Lars Ellenberg | 53ea433 | 2011-03-08 17:11:40 +0100 | [diff] [blame] | 364 | kfree(digest); |
| 365 | } else { |
| 366 | dev_err(DEV, "kmalloc() of digest failed.\n"); |
Andreas Gruenbacher | 99920dc | 2011-03-16 15:31:39 +0100 | [diff] [blame] | 367 | err = -ENOMEM; |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 368 | } |
| 369 | |
Lars Ellenberg | 53ea433 | 2011-03-08 17:11:40 +0100 | [diff] [blame] | 370 | out: |
Andreas Gruenbacher | db830c4 | 2011-02-04 15:57:48 +0100 | [diff] [blame] | 371 | if (peer_req) |
Andreas Gruenbacher | 3967deb | 2011-04-06 16:16:56 +0200 | [diff] [blame] | 372 | drbd_free_peer_req(mdev, peer_req); |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 373 | |
Andreas Gruenbacher | 99920dc | 2011-03-16 15:31:39 +0100 | [diff] [blame] | 374 | if (unlikely(err)) |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 375 | dev_err(DEV, "drbd_send_drequest(..., csum) failed\n"); |
Andreas Gruenbacher | 99920dc | 2011-03-16 15:31:39 +0100 | [diff] [blame] | 376 | return err; |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 377 | } |
| 378 | |
| 379 | #define GFP_TRY (__GFP_HIGHMEM | __GFP_NOWARN) |
| 380 | |
| 381 | static int read_for_csum(struct drbd_conf *mdev, sector_t sector, int size) |
| 382 | { |
Andreas Gruenbacher | db830c4 | 2011-02-04 15:57:48 +0100 | [diff] [blame] | 383 | struct drbd_peer_request *peer_req; |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 384 | |
| 385 | if (!get_ldev(mdev)) |
Lars Ellenberg | 80a40e4 | 2010-08-11 23:28:00 +0200 | [diff] [blame] | 386 | return -EIO; |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 387 | |
Philipp Reisner | e3555d8 | 2010-11-07 15:56:29 +0100 | [diff] [blame] | 388 | if (drbd_rs_should_slow_down(mdev, sector)) |
Lars Ellenberg | 0f0601f | 2010-08-11 23:40:24 +0200 | [diff] [blame] | 389 | goto defer; |
| 390 | |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 391 | /* GFP_TRY, because if there is no memory available right now, this may |
| 392 | * be rescheduled for later. It is "only" background resync, after all. */ |
Andreas Gruenbacher | 0db5536 | 2011-04-06 16:09:15 +0200 | [diff] [blame] | 393 | peer_req = drbd_alloc_peer_req(mdev, ID_SYNCER /* unused */, sector, |
| 394 | size, GFP_TRY); |
Andreas Gruenbacher | db830c4 | 2011-02-04 15:57:48 +0100 | [diff] [blame] | 395 | if (!peer_req) |
Lars Ellenberg | 80a40e4 | 2010-08-11 23:28:00 +0200 | [diff] [blame] | 396 | goto defer; |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 397 | |
Andreas Gruenbacher | db830c4 | 2011-02-04 15:57:48 +0100 | [diff] [blame] | 398 | peer_req->w.cb = w_e_send_csum; |
Philipp Reisner | 87eeee4 | 2011-01-19 14:16:30 +0100 | [diff] [blame] | 399 | spin_lock_irq(&mdev->tconn->req_lock); |
Andreas Gruenbacher | db830c4 | 2011-02-04 15:57:48 +0100 | [diff] [blame] | 400 | list_add(&peer_req->w.list, &mdev->read_ee); |
Philipp Reisner | 87eeee4 | 2011-01-19 14:16:30 +0100 | [diff] [blame] | 401 | spin_unlock_irq(&mdev->tconn->req_lock); |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 402 | |
Lars Ellenberg | 0f0601f | 2010-08-11 23:40:24 +0200 | [diff] [blame] | 403 | atomic_add(size >> 9, &mdev->rs_sect_ev); |
Andreas Gruenbacher | fbe29de | 2011-02-17 16:38:35 +0100 | [diff] [blame] | 404 | if (drbd_submit_peer_request(mdev, peer_req, READ, DRBD_FAULT_RS_RD) == 0) |
Lars Ellenberg | 80a40e4 | 2010-08-11 23:28:00 +0200 | [diff] [blame] | 405 | return 0; |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 406 | |
Lars Ellenberg | 10f6d992 | 2011-01-24 14:47:09 +0100 | [diff] [blame] | 407 | /* If it failed because of ENOMEM, retry should help. If it failed |
| 408 | * because bio_add_page failed (probably broken lower level driver), |
| 409 | * retry may or may not help. |
| 410 | * If it does not, you may need to force disconnect. */ |
Philipp Reisner | 87eeee4 | 2011-01-19 14:16:30 +0100 | [diff] [blame] | 411 | spin_lock_irq(&mdev->tconn->req_lock); |
Andreas Gruenbacher | db830c4 | 2011-02-04 15:57:48 +0100 | [diff] [blame] | 412 | list_del(&peer_req->w.list); |
Philipp Reisner | 87eeee4 | 2011-01-19 14:16:30 +0100 | [diff] [blame] | 413 | spin_unlock_irq(&mdev->tconn->req_lock); |
Lars Ellenberg | 22cc37a | 2010-09-14 20:40:41 +0200 | [diff] [blame] | 414 | |
Andreas Gruenbacher | 3967deb | 2011-04-06 16:16:56 +0200 | [diff] [blame] | 415 | drbd_free_peer_req(mdev, peer_req); |
Lars Ellenberg | 80a40e4 | 2010-08-11 23:28:00 +0200 | [diff] [blame] | 416 | defer: |
Lars Ellenberg | 45bb912 | 2010-05-14 17:10:48 +0200 | [diff] [blame] | 417 | put_ldev(mdev); |
Lars Ellenberg | 80a40e4 | 2010-08-11 23:28:00 +0200 | [diff] [blame] | 418 | return -EAGAIN; |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 419 | } |
| 420 | |
Andreas Gruenbacher | 99920dc | 2011-03-16 15:31:39 +0100 | [diff] [blame] | 421 | int w_resync_timer(struct drbd_work *w, int cancel) |
Philipp Reisner | 794abb7 | 2010-12-27 11:51:23 +0100 | [diff] [blame] | 422 | { |
Philipp Reisner | 00d5694 | 2011-02-09 18:09:48 +0100 | [diff] [blame] | 423 | struct drbd_conf *mdev = w->mdev; |
Philipp Reisner | 794abb7 | 2010-12-27 11:51:23 +0100 | [diff] [blame] | 424 | switch (mdev->state.conn) { |
| 425 | case C_VERIFY_S: |
Philipp Reisner | 00d5694 | 2011-02-09 18:09:48 +0100 | [diff] [blame] | 426 | w_make_ov_request(w, cancel); |
Philipp Reisner | 794abb7 | 2010-12-27 11:51:23 +0100 | [diff] [blame] | 427 | break; |
| 428 | case C_SYNC_TARGET: |
Philipp Reisner | 00d5694 | 2011-02-09 18:09:48 +0100 | [diff] [blame] | 429 | w_make_resync_request(w, cancel); |
Philipp Reisner | 794abb7 | 2010-12-27 11:51:23 +0100 | [diff] [blame] | 430 | break; |
| 431 | } |
| 432 | |
Andreas Gruenbacher | 99920dc | 2011-03-16 15:31:39 +0100 | [diff] [blame] | 433 | return 0; |
Philipp Reisner | 794abb7 | 2010-12-27 11:51:23 +0100 | [diff] [blame] | 434 | } |
| 435 | |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 436 | void resync_timer_fn(unsigned long data) |
| 437 | { |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 438 | struct drbd_conf *mdev = (struct drbd_conf *) data; |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 439 | |
Philipp Reisner | 794abb7 | 2010-12-27 11:51:23 +0100 | [diff] [blame] | 440 | if (list_empty(&mdev->resync_work.list)) |
Lars Ellenberg | d5b27b0 | 2011-11-14 15:42:37 +0100 | [diff] [blame] | 441 | drbd_queue_work(&mdev->tconn->sender_work, &mdev->resync_work); |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 442 | } |
| 443 | |
Philipp Reisner | 778f271 | 2010-07-06 11:14:00 +0200 | [diff] [blame] | 444 | static void fifo_set(struct fifo_buffer *fb, int value) |
| 445 | { |
| 446 | int i; |
| 447 | |
| 448 | for (i = 0; i < fb->size; i++) |
Philipp Reisner | f10f262 | 2010-10-05 16:50:17 +0200 | [diff] [blame] | 449 | fb->values[i] = value; |
Philipp Reisner | 778f271 | 2010-07-06 11:14:00 +0200 | [diff] [blame] | 450 | } |
| 451 | |
| 452 | static int fifo_push(struct fifo_buffer *fb, int value) |
| 453 | { |
| 454 | int ov; |
| 455 | |
| 456 | ov = fb->values[fb->head_index]; |
| 457 | fb->values[fb->head_index++] = value; |
| 458 | |
| 459 | if (fb->head_index >= fb->size) |
| 460 | fb->head_index = 0; |
| 461 | |
| 462 | return ov; |
| 463 | } |
| 464 | |
| 465 | static void fifo_add_val(struct fifo_buffer *fb, int value) |
| 466 | { |
| 467 | int i; |
| 468 | |
| 469 | for (i = 0; i < fb->size; i++) |
| 470 | fb->values[i] += value; |
| 471 | } |
| 472 | |
Philipp Reisner | 9958c85 | 2011-05-03 16:19:31 +0200 | [diff] [blame] | 473 | struct fifo_buffer *fifo_alloc(int fifo_size) |
| 474 | { |
| 475 | struct fifo_buffer *fb; |
| 476 | |
| 477 | fb = kzalloc(sizeof(struct fifo_buffer) + sizeof(int) * fifo_size, GFP_KERNEL); |
| 478 | if (!fb) |
| 479 | return NULL; |
| 480 | |
| 481 | fb->head_index = 0; |
| 482 | fb->size = fifo_size; |
| 483 | fb->total = 0; |
| 484 | |
| 485 | return fb; |
| 486 | } |
| 487 | |
Philipp Reisner | 9d77a5f | 2010-11-07 18:02:56 +0100 | [diff] [blame] | 488 | static int drbd_rs_controller(struct drbd_conf *mdev) |
Philipp Reisner | 778f271 | 2010-07-06 11:14:00 +0200 | [diff] [blame] | 489 | { |
Philipp Reisner | daeda1c | 2011-05-03 15:00:55 +0200 | [diff] [blame] | 490 | struct disk_conf *dc; |
Philipp Reisner | 778f271 | 2010-07-06 11:14:00 +0200 | [diff] [blame] | 491 | unsigned int sect_in; /* Number of sectors that came in since the last turn */ |
| 492 | unsigned int want; /* The number of sectors we want in the proxy */ |
| 493 | int req_sect; /* Number of sectors to request in this turn */ |
| 494 | int correction; /* Number of sectors more we need in the proxy*/ |
| 495 | int cps; /* correction per invocation of drbd_rs_controller() */ |
| 496 | int steps; /* Number of time steps to plan ahead */ |
| 497 | int curr_corr; |
| 498 | int max_sect; |
Philipp Reisner | 813472c | 2011-05-03 16:47:02 +0200 | [diff] [blame] | 499 | struct fifo_buffer *plan; |
Philipp Reisner | 778f271 | 2010-07-06 11:14:00 +0200 | [diff] [blame] | 500 | |
| 501 | sect_in = atomic_xchg(&mdev->rs_sect_in, 0); /* Number of sectors that came in */ |
| 502 | mdev->rs_in_flight -= sect_in; |
| 503 | |
Philipp Reisner | daeda1c | 2011-05-03 15:00:55 +0200 | [diff] [blame] | 504 | dc = rcu_dereference(mdev->ldev->disk_conf); |
Philipp Reisner | 813472c | 2011-05-03 16:47:02 +0200 | [diff] [blame] | 505 | plan = rcu_dereference(mdev->rs_plan_s); |
Philipp Reisner | 778f271 | 2010-07-06 11:14:00 +0200 | [diff] [blame] | 506 | |
Philipp Reisner | 813472c | 2011-05-03 16:47:02 +0200 | [diff] [blame] | 507 | steps = plan->size; /* (dc->c_plan_ahead * 10 * SLEEP_TIME) / HZ; */ |
Philipp Reisner | 778f271 | 2010-07-06 11:14:00 +0200 | [diff] [blame] | 508 | |
| 509 | if (mdev->rs_in_flight + sect_in == 0) { /* At start of resync */ |
Philipp Reisner | daeda1c | 2011-05-03 15:00:55 +0200 | [diff] [blame] | 510 | want = ((dc->resync_rate * 2 * SLEEP_TIME) / HZ) * steps; |
Philipp Reisner | 778f271 | 2010-07-06 11:14:00 +0200 | [diff] [blame] | 511 | } else { /* normal path */ |
Philipp Reisner | daeda1c | 2011-05-03 15:00:55 +0200 | [diff] [blame] | 512 | want = dc->c_fill_target ? dc->c_fill_target : |
| 513 | sect_in * dc->c_delay_target * HZ / (SLEEP_TIME * 10); |
Philipp Reisner | 778f271 | 2010-07-06 11:14:00 +0200 | [diff] [blame] | 514 | } |
| 515 | |
Philipp Reisner | 813472c | 2011-05-03 16:47:02 +0200 | [diff] [blame] | 516 | correction = want - mdev->rs_in_flight - plan->total; |
Philipp Reisner | 778f271 | 2010-07-06 11:14:00 +0200 | [diff] [blame] | 517 | |
| 518 | /* Plan ahead */ |
| 519 | cps = correction / steps; |
Philipp Reisner | 813472c | 2011-05-03 16:47:02 +0200 | [diff] [blame] | 520 | fifo_add_val(plan, cps); |
| 521 | plan->total += cps * steps; |
Philipp Reisner | 778f271 | 2010-07-06 11:14:00 +0200 | [diff] [blame] | 522 | |
| 523 | /* What we do in this step */ |
Philipp Reisner | 813472c | 2011-05-03 16:47:02 +0200 | [diff] [blame] | 524 | curr_corr = fifo_push(plan, 0); |
| 525 | plan->total -= curr_corr; |
Philipp Reisner | 778f271 | 2010-07-06 11:14:00 +0200 | [diff] [blame] | 526 | |
| 527 | req_sect = sect_in + curr_corr; |
| 528 | if (req_sect < 0) |
| 529 | req_sect = 0; |
| 530 | |
Philipp Reisner | daeda1c | 2011-05-03 15:00:55 +0200 | [diff] [blame] | 531 | max_sect = (dc->c_max_rate * 2 * SLEEP_TIME) / HZ; |
Philipp Reisner | 778f271 | 2010-07-06 11:14:00 +0200 | [diff] [blame] | 532 | if (req_sect > max_sect) |
| 533 | req_sect = max_sect; |
| 534 | |
| 535 | /* |
| 536 | dev_warn(DEV, "si=%u if=%d wa=%u co=%d st=%d cps=%d pl=%d cc=%d rs=%d\n", |
| 537 | sect_in, mdev->rs_in_flight, want, correction, |
| 538 | steps, cps, mdev->rs_planed, curr_corr, req_sect); |
| 539 | */ |
| 540 | |
| 541 | return req_sect; |
| 542 | } |
| 543 | |
Philipp Reisner | 9d77a5f | 2010-11-07 18:02:56 +0100 | [diff] [blame] | 544 | static int drbd_rs_number_requests(struct drbd_conf *mdev) |
Lars Ellenberg | e65f440 | 2010-11-05 10:04:07 +0100 | [diff] [blame] | 545 | { |
| 546 | int number; |
Philipp Reisner | 813472c | 2011-05-03 16:47:02 +0200 | [diff] [blame] | 547 | |
| 548 | rcu_read_lock(); |
| 549 | if (rcu_dereference(mdev->rs_plan_s)->size) { |
Lars Ellenberg | e65f440 | 2010-11-05 10:04:07 +0100 | [diff] [blame] | 550 | number = drbd_rs_controller(mdev) >> (BM_BLOCK_SHIFT - 9); |
| 551 | mdev->c_sync_rate = number * HZ * (BM_BLOCK_SIZE / 1024) / SLEEP_TIME; |
| 552 | } else { |
Philipp Reisner | daeda1c | 2011-05-03 15:00:55 +0200 | [diff] [blame] | 553 | mdev->c_sync_rate = rcu_dereference(mdev->ldev->disk_conf)->resync_rate; |
Lars Ellenberg | e65f440 | 2010-11-05 10:04:07 +0100 | [diff] [blame] | 554 | number = SLEEP_TIME * mdev->c_sync_rate / ((BM_BLOCK_SIZE / 1024) * HZ); |
| 555 | } |
Philipp Reisner | 813472c | 2011-05-03 16:47:02 +0200 | [diff] [blame] | 556 | rcu_read_unlock(); |
Lars Ellenberg | e65f440 | 2010-11-05 10:04:07 +0100 | [diff] [blame] | 557 | |
Lars Ellenberg | e65f440 | 2010-11-05 10:04:07 +0100 | [diff] [blame] | 558 | /* ignore the amount of pending requests, the resync controller should |
| 559 | * throttle down to incoming reply rate soon enough anyways. */ |
| 560 | return number; |
| 561 | } |
| 562 | |
Andreas Gruenbacher | 99920dc | 2011-03-16 15:31:39 +0100 | [diff] [blame] | 563 | int w_make_resync_request(struct drbd_work *w, int cancel) |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 564 | { |
Philipp Reisner | 00d5694 | 2011-02-09 18:09:48 +0100 | [diff] [blame] | 565 | struct drbd_conf *mdev = w->mdev; |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 566 | unsigned long bit; |
| 567 | sector_t sector; |
| 568 | const sector_t capacity = drbd_get_capacity(mdev->this_bdev); |
Lars Ellenberg | 1816a2b | 2010-11-11 15:19:07 +0100 | [diff] [blame] | 569 | int max_bio_size; |
Lars Ellenberg | e65f440 | 2010-11-05 10:04:07 +0100 | [diff] [blame] | 570 | int number, rollback_i, size; |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 571 | int align, queued, sndbuf; |
Lars Ellenberg | 0f0601f | 2010-08-11 23:40:24 +0200 | [diff] [blame] | 572 | int i = 0; |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 573 | |
| 574 | if (unlikely(cancel)) |
Andreas Gruenbacher | 99920dc | 2011-03-16 15:31:39 +0100 | [diff] [blame] | 575 | return 0; |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 576 | |
Lars Ellenberg | af85e8e | 2010-10-07 16:07:55 +0200 | [diff] [blame] | 577 | if (mdev->rs_total == 0) { |
| 578 | /* empty resync? */ |
| 579 | drbd_resync_finished(mdev); |
Andreas Gruenbacher | 99920dc | 2011-03-16 15:31:39 +0100 | [diff] [blame] | 580 | return 0; |
Lars Ellenberg | af85e8e | 2010-10-07 16:07:55 +0200 | [diff] [blame] | 581 | } |
| 582 | |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 583 | if (!get_ldev(mdev)) { |
| 584 | /* Since we only need to access mdev->rsync a |
| 585 | get_ldev_if_state(mdev,D_FAILED) would be sufficient, but |
| 586 | to continue resync with a broken disk makes no sense at |
| 587 | all */ |
| 588 | dev_err(DEV, "Disk broke down during resync!\n"); |
Andreas Gruenbacher | 99920dc | 2011-03-16 15:31:39 +0100 | [diff] [blame] | 589 | return 0; |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 590 | } |
| 591 | |
Philipp Reisner | 0cfdd24 | 2011-05-25 11:14:35 +0200 | [diff] [blame] | 592 | max_bio_size = queue_max_hw_sectors(mdev->rq_queue) << 9; |
Lars Ellenberg | e65f440 | 2010-11-05 10:04:07 +0100 | [diff] [blame] | 593 | number = drbd_rs_number_requests(mdev); |
| 594 | if (number == 0) |
Lars Ellenberg | 0f0601f | 2010-08-11 23:40:24 +0200 | [diff] [blame] | 595 | goto requeue; |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 596 | |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 597 | for (i = 0; i < number; i++) { |
| 598 | /* Stop generating RS requests, when half of the send buffer is filled */ |
Philipp Reisner | e42325a | 2011-01-19 13:55:45 +0100 | [diff] [blame] | 599 | mutex_lock(&mdev->tconn->data.mutex); |
| 600 | if (mdev->tconn->data.socket) { |
| 601 | queued = mdev->tconn->data.socket->sk->sk_wmem_queued; |
| 602 | sndbuf = mdev->tconn->data.socket->sk->sk_sndbuf; |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 603 | } else { |
| 604 | queued = 1; |
| 605 | sndbuf = 0; |
| 606 | } |
Philipp Reisner | e42325a | 2011-01-19 13:55:45 +0100 | [diff] [blame] | 607 | mutex_unlock(&mdev->tconn->data.mutex); |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 608 | if (queued > sndbuf / 2) |
| 609 | goto requeue; |
| 610 | |
| 611 | next_sector: |
| 612 | size = BM_BLOCK_SIZE; |
| 613 | bit = drbd_bm_find_next(mdev, mdev->bm_resync_fo); |
| 614 | |
Lars Ellenberg | 4b0715f | 2010-12-14 15:13:04 +0100 | [diff] [blame] | 615 | if (bit == DRBD_END_OF_BITMAP) { |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 616 | mdev->bm_resync_fo = drbd_bm_bits(mdev); |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 617 | put_ldev(mdev); |
Andreas Gruenbacher | 99920dc | 2011-03-16 15:31:39 +0100 | [diff] [blame] | 618 | return 0; |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 619 | } |
| 620 | |
| 621 | sector = BM_BIT_TO_SECT(bit); |
| 622 | |
Philipp Reisner | e3555d8 | 2010-11-07 15:56:29 +0100 | [diff] [blame] | 623 | if (drbd_rs_should_slow_down(mdev, sector) || |
| 624 | drbd_try_rs_begin_io(mdev, sector)) { |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 625 | mdev->bm_resync_fo = bit; |
| 626 | goto requeue; |
| 627 | } |
| 628 | mdev->bm_resync_fo = bit + 1; |
| 629 | |
| 630 | if (unlikely(drbd_bm_test_bit(mdev, bit) == 0)) { |
| 631 | drbd_rs_complete_io(mdev, sector); |
| 632 | goto next_sector; |
| 633 | } |
| 634 | |
Lars Ellenberg | 1816a2b | 2010-11-11 15:19:07 +0100 | [diff] [blame] | 635 | #if DRBD_MAX_BIO_SIZE > BM_BLOCK_SIZE |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 636 | /* try to find some adjacent bits. |
| 637 | * we stop if we have already the maximum req size. |
| 638 | * |
| 639 | * Additionally always align bigger requests, in order to |
| 640 | * be prepared for all stripe sizes of software RAIDs. |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 641 | */ |
| 642 | align = 1; |
Philipp Reisner | d207450 | 2010-07-22 15:27:27 +0200 | [diff] [blame] | 643 | rollback_i = i; |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 644 | for (;;) { |
Lars Ellenberg | 1816a2b | 2010-11-11 15:19:07 +0100 | [diff] [blame] | 645 | if (size + BM_BLOCK_SIZE > max_bio_size) |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 646 | break; |
| 647 | |
| 648 | /* Be always aligned */ |
| 649 | if (sector & ((1<<(align+3))-1)) |
| 650 | break; |
| 651 | |
| 652 | /* do not cross extent boundaries */ |
| 653 | if (((bit+1) & BM_BLOCKS_PER_BM_EXT_MASK) == 0) |
| 654 | break; |
| 655 | /* now, is it actually dirty, after all? |
| 656 | * caution, drbd_bm_test_bit is tri-state for some |
| 657 | * obscure reason; ( b == 0 ) would get the out-of-band |
| 658 | * only accidentally right because of the "oddly sized" |
| 659 | * adjustment below */ |
| 660 | if (drbd_bm_test_bit(mdev, bit+1) != 1) |
| 661 | break; |
| 662 | bit++; |
| 663 | size += BM_BLOCK_SIZE; |
| 664 | if ((BM_BLOCK_SIZE << align) <= size) |
| 665 | align++; |
| 666 | i++; |
| 667 | } |
| 668 | /* if we merged some, |
| 669 | * reset the offset to start the next drbd_bm_find_next from */ |
| 670 | if (size > BM_BLOCK_SIZE) |
| 671 | mdev->bm_resync_fo = bit + 1; |
| 672 | #endif |
| 673 | |
| 674 | /* adjust very last sectors, in case we are oddly sized */ |
| 675 | if (sector + (size>>9) > capacity) |
| 676 | size = (capacity-sector)<<9; |
Lars Ellenberg | f399002 | 2011-03-23 14:31:09 +0100 | [diff] [blame] | 677 | if (mdev->tconn->agreed_pro_version >= 89 && mdev->tconn->csums_tfm) { |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 678 | switch (read_for_csum(mdev, sector, size)) { |
Lars Ellenberg | 80a40e4 | 2010-08-11 23:28:00 +0200 | [diff] [blame] | 679 | case -EIO: /* Disk failure */ |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 680 | put_ldev(mdev); |
Andreas Gruenbacher | 99920dc | 2011-03-16 15:31:39 +0100 | [diff] [blame] | 681 | return -EIO; |
Lars Ellenberg | 80a40e4 | 2010-08-11 23:28:00 +0200 | [diff] [blame] | 682 | case -EAGAIN: /* allocation failed, or ldev busy */ |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 683 | drbd_rs_complete_io(mdev, sector); |
| 684 | mdev->bm_resync_fo = BM_SECT_TO_BIT(sector); |
Philipp Reisner | d207450 | 2010-07-22 15:27:27 +0200 | [diff] [blame] | 685 | i = rollback_i; |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 686 | goto requeue; |
Lars Ellenberg | 80a40e4 | 2010-08-11 23:28:00 +0200 | [diff] [blame] | 687 | case 0: |
| 688 | /* everything ok */ |
| 689 | break; |
| 690 | default: |
| 691 | BUG(); |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 692 | } |
| 693 | } else { |
Andreas Gruenbacher | 99920dc | 2011-03-16 15:31:39 +0100 | [diff] [blame] | 694 | int err; |
| 695 | |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 696 | inc_rs_pending(mdev); |
Andreas Gruenbacher | 99920dc | 2011-03-16 15:31:39 +0100 | [diff] [blame] | 697 | err = drbd_send_drequest(mdev, P_RS_DATA_REQUEST, |
| 698 | sector, size, ID_SYNCER); |
| 699 | if (err) { |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 700 | dev_err(DEV, "drbd_send_drequest() failed, aborting...\n"); |
| 701 | dec_rs_pending(mdev); |
| 702 | put_ldev(mdev); |
Andreas Gruenbacher | 99920dc | 2011-03-16 15:31:39 +0100 | [diff] [blame] | 703 | return err; |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 704 | } |
| 705 | } |
| 706 | } |
| 707 | |
| 708 | if (mdev->bm_resync_fo >= drbd_bm_bits(mdev)) { |
| 709 | /* last syncer _request_ was sent, |
| 710 | * but the P_RS_DATA_REPLY not yet received. sync will end (and |
| 711 | * next sync group will resume), as soon as we receive the last |
| 712 | * resync data block, and the last bit is cleared. |
| 713 | * until then resync "work" is "inactive" ... |
| 714 | */ |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 715 | put_ldev(mdev); |
Andreas Gruenbacher | 99920dc | 2011-03-16 15:31:39 +0100 | [diff] [blame] | 716 | return 0; |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 717 | } |
| 718 | |
| 719 | requeue: |
Philipp Reisner | 778f271 | 2010-07-06 11:14:00 +0200 | [diff] [blame] | 720 | mdev->rs_in_flight += (i << (BM_BLOCK_SHIFT - 9)); |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 721 | mod_timer(&mdev->resync_timer, jiffies + SLEEP_TIME); |
| 722 | put_ldev(mdev); |
Andreas Gruenbacher | 99920dc | 2011-03-16 15:31:39 +0100 | [diff] [blame] | 723 | return 0; |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 724 | } |
| 725 | |
Philipp Reisner | 00d5694 | 2011-02-09 18:09:48 +0100 | [diff] [blame] | 726 | static int w_make_ov_request(struct drbd_work *w, int cancel) |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 727 | { |
Philipp Reisner | 00d5694 | 2011-02-09 18:09:48 +0100 | [diff] [blame] | 728 | struct drbd_conf *mdev = w->mdev; |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 729 | int number, i, size; |
| 730 | sector_t sector; |
| 731 | const sector_t capacity = drbd_get_capacity(mdev->this_bdev); |
Lars Ellenberg | 58ffa58 | 2012-07-26 14:09:49 +0200 | [diff] [blame] | 732 | bool stop_sector_reached = false; |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 733 | |
| 734 | if (unlikely(cancel)) |
| 735 | return 1; |
| 736 | |
Lars Ellenberg | 2649f08 | 2010-11-05 10:05:47 +0100 | [diff] [blame] | 737 | number = drbd_rs_number_requests(mdev); |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 738 | |
| 739 | sector = mdev->ov_position; |
| 740 | for (i = 0; i < number; i++) { |
Lars Ellenberg | 58ffa58 | 2012-07-26 14:09:49 +0200 | [diff] [blame] | 741 | if (sector >= capacity) |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 742 | return 1; |
Lars Ellenberg | 58ffa58 | 2012-07-26 14:09:49 +0200 | [diff] [blame] | 743 | |
| 744 | /* We check for "finished" only in the reply path: |
| 745 | * w_e_end_ov_reply(). |
| 746 | * We need to send at least one request out. */ |
| 747 | stop_sector_reached = i > 0 |
| 748 | && verify_can_do_stop_sector(mdev) |
| 749 | && sector >= mdev->ov_stop_sector; |
| 750 | if (stop_sector_reached) |
| 751 | break; |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 752 | |
| 753 | size = BM_BLOCK_SIZE; |
| 754 | |
Philipp Reisner | e3555d8 | 2010-11-07 15:56:29 +0100 | [diff] [blame] | 755 | if (drbd_rs_should_slow_down(mdev, sector) || |
| 756 | drbd_try_rs_begin_io(mdev, sector)) { |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 757 | mdev->ov_position = sector; |
| 758 | goto requeue; |
| 759 | } |
| 760 | |
| 761 | if (sector + (size>>9) > capacity) |
| 762 | size = (capacity-sector)<<9; |
| 763 | |
| 764 | inc_rs_pending(mdev); |
Andreas Gruenbacher | 5b9f499 | 2011-03-16 01:31:39 +0100 | [diff] [blame] | 765 | if (drbd_send_ov_request(mdev, sector, size)) { |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 766 | dec_rs_pending(mdev); |
| 767 | return 0; |
| 768 | } |
| 769 | sector += BM_SECT_PER_BIT; |
| 770 | } |
| 771 | mdev->ov_position = sector; |
| 772 | |
| 773 | requeue: |
Lars Ellenberg | 2649f08 | 2010-11-05 10:05:47 +0100 | [diff] [blame] | 774 | mdev->rs_in_flight += (i << (BM_BLOCK_SHIFT - 9)); |
Lars Ellenberg | 58ffa58 | 2012-07-26 14:09:49 +0200 | [diff] [blame] | 775 | if (i == 0 || !stop_sector_reached) |
| 776 | mod_timer(&mdev->resync_timer, jiffies + SLEEP_TIME); |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 777 | return 1; |
| 778 | } |
| 779 | |
Andreas Gruenbacher | 99920dc | 2011-03-16 15:31:39 +0100 | [diff] [blame] | 780 | int w_ov_finished(struct drbd_work *w, int cancel) |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 781 | { |
Philipp Reisner | 00d5694 | 2011-02-09 18:09:48 +0100 | [diff] [blame] | 782 | struct drbd_conf *mdev = w->mdev; |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 783 | kfree(w); |
Andreas Gruenbacher | 8f7bed7 | 2010-12-19 23:53:14 +0100 | [diff] [blame] | 784 | ov_out_of_sync_print(mdev); |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 785 | drbd_resync_finished(mdev); |
| 786 | |
Andreas Gruenbacher | 99920dc | 2011-03-16 15:31:39 +0100 | [diff] [blame] | 787 | return 0; |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 788 | } |
| 789 | |
Andreas Gruenbacher | 99920dc | 2011-03-16 15:31:39 +0100 | [diff] [blame] | 790 | static int w_resync_finished(struct drbd_work *w, int cancel) |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 791 | { |
Philipp Reisner | 00d5694 | 2011-02-09 18:09:48 +0100 | [diff] [blame] | 792 | struct drbd_conf *mdev = w->mdev; |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 793 | kfree(w); |
| 794 | |
| 795 | drbd_resync_finished(mdev); |
| 796 | |
Andreas Gruenbacher | 99920dc | 2011-03-16 15:31:39 +0100 | [diff] [blame] | 797 | return 0; |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 798 | } |
| 799 | |
Lars Ellenberg | af85e8e | 2010-10-07 16:07:55 +0200 | [diff] [blame] | 800 | static void ping_peer(struct drbd_conf *mdev) |
| 801 | { |
Philipp Reisner | 2a67d8b | 2011-02-09 14:10:32 +0100 | [diff] [blame] | 802 | struct drbd_tconn *tconn = mdev->tconn; |
| 803 | |
| 804 | clear_bit(GOT_PING_ACK, &tconn->flags); |
| 805 | request_ping(tconn); |
| 806 | wait_event(tconn->ping_wait, |
| 807 | test_bit(GOT_PING_ACK, &tconn->flags) || mdev->state.conn < C_CONNECTED); |
Lars Ellenberg | af85e8e | 2010-10-07 16:07:55 +0200 | [diff] [blame] | 808 | } |
| 809 | |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 810 | int drbd_resync_finished(struct drbd_conf *mdev) |
| 811 | { |
| 812 | unsigned long db, dt, dbdt; |
| 813 | unsigned long n_oos; |
| 814 | union drbd_state os, ns; |
| 815 | struct drbd_work *w; |
| 816 | char *khelper_cmd = NULL; |
Lars Ellenberg | 2652561 | 2010-11-05 09:56:33 +0100 | [diff] [blame] | 817 | int verify_done = 0; |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 818 | |
| 819 | /* Remove all elements from the resync LRU. Since future actions |
| 820 | * might set bits in the (main) bitmap, then the entries in the |
| 821 | * resync LRU would be wrong. */ |
| 822 | if (drbd_rs_del_all(mdev)) { |
| 823 | /* In case this is not possible now, most probably because |
| 824 | * there are P_RS_DATA_REPLY Packets lingering on the worker's |
| 825 | * queue (or even the read operations for those packets |
| 826 | * is not finished by now). Retry in 100ms. */ |
| 827 | |
Philipp Reisner | 20ee639 | 2011-01-18 15:28:59 +0100 | [diff] [blame] | 828 | schedule_timeout_interruptible(HZ / 10); |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 829 | w = kmalloc(sizeof(struct drbd_work), GFP_ATOMIC); |
| 830 | if (w) { |
| 831 | w->cb = w_resync_finished; |
Philipp Reisner | 9b743da | 2011-07-15 18:15:45 +0200 | [diff] [blame] | 832 | w->mdev = mdev; |
Lars Ellenberg | d5b27b0 | 2011-11-14 15:42:37 +0100 | [diff] [blame] | 833 | drbd_queue_work(&mdev->tconn->sender_work, w); |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 834 | return 1; |
| 835 | } |
| 836 | dev_err(DEV, "Warn failed to drbd_rs_del_all() and to kmalloc(w).\n"); |
| 837 | } |
| 838 | |
| 839 | dt = (jiffies - mdev->rs_start - mdev->rs_paused) / HZ; |
| 840 | if (dt <= 0) |
| 841 | dt = 1; |
Lars Ellenberg | 58ffa58 | 2012-07-26 14:09:49 +0200 | [diff] [blame] | 842 | |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 843 | db = mdev->rs_total; |
Lars Ellenberg | 58ffa58 | 2012-07-26 14:09:49 +0200 | [diff] [blame] | 844 | /* adjust for verify start and stop sectors, respective reached position */ |
| 845 | if (mdev->state.conn == C_VERIFY_S || mdev->state.conn == C_VERIFY_T) |
| 846 | db -= mdev->ov_left; |
| 847 | |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 848 | dbdt = Bit2KB(db/dt); |
| 849 | mdev->rs_paused /= HZ; |
| 850 | |
| 851 | if (!get_ldev(mdev)) |
| 852 | goto out; |
| 853 | |
Lars Ellenberg | af85e8e | 2010-10-07 16:07:55 +0200 | [diff] [blame] | 854 | ping_peer(mdev); |
| 855 | |
Philipp Reisner | 87eeee4 | 2011-01-19 14:16:30 +0100 | [diff] [blame] | 856 | spin_lock_irq(&mdev->tconn->req_lock); |
Philipp Reisner | 78bae59 | 2011-03-28 15:40:12 +0200 | [diff] [blame] | 857 | os = drbd_read_state(mdev); |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 858 | |
Lars Ellenberg | 2652561 | 2010-11-05 09:56:33 +0100 | [diff] [blame] | 859 | verify_done = (os.conn == C_VERIFY_S || os.conn == C_VERIFY_T); |
| 860 | |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 861 | /* This protects us against multiple calls (that can happen in the presence |
| 862 | of application IO), and against connectivity loss just before we arrive here. */ |
| 863 | if (os.conn <= C_CONNECTED) |
| 864 | goto out_unlock; |
| 865 | |
| 866 | ns = os; |
| 867 | ns.conn = C_CONNECTED; |
| 868 | |
| 869 | dev_info(DEV, "%s done (total %lu sec; paused %lu sec; %lu K/sec)\n", |
Lars Ellenberg | 58ffa58 | 2012-07-26 14:09:49 +0200 | [diff] [blame] | 870 | verify_done ? "Online verify" : "Resync", |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 871 | dt + mdev->rs_paused, mdev->rs_paused, dbdt); |
| 872 | |
| 873 | n_oos = drbd_bm_total_weight(mdev); |
| 874 | |
| 875 | if (os.conn == C_VERIFY_S || os.conn == C_VERIFY_T) { |
| 876 | if (n_oos) { |
| 877 | dev_alert(DEV, "Online verify found %lu %dk block out of sync!\n", |
| 878 | n_oos, Bit2KB(1)); |
| 879 | khelper_cmd = "out-of-sync"; |
| 880 | } |
| 881 | } else { |
| 882 | D_ASSERT((n_oos - mdev->rs_failed) == 0); |
| 883 | |
| 884 | if (os.conn == C_SYNC_TARGET || os.conn == C_PAUSED_SYNC_T) |
| 885 | khelper_cmd = "after-resync-target"; |
| 886 | |
Lars Ellenberg | f399002 | 2011-03-23 14:31:09 +0100 | [diff] [blame] | 887 | if (mdev->tconn->csums_tfm && mdev->rs_total) { |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 888 | const unsigned long s = mdev->rs_same_csum; |
| 889 | const unsigned long t = mdev->rs_total; |
| 890 | const int ratio = |
| 891 | (t == 0) ? 0 : |
| 892 | (t < 100000) ? ((s*100)/t) : (s/(t/100)); |
Bart Van Assche | 24c4830 | 2011-05-21 18:32:29 +0200 | [diff] [blame] | 893 | dev_info(DEV, "%u %% had equal checksums, eliminated: %luK; " |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 894 | "transferred %luK total %luK\n", |
| 895 | ratio, |
| 896 | Bit2KB(mdev->rs_same_csum), |
| 897 | Bit2KB(mdev->rs_total - mdev->rs_same_csum), |
| 898 | Bit2KB(mdev->rs_total)); |
| 899 | } |
| 900 | } |
| 901 | |
| 902 | if (mdev->rs_failed) { |
| 903 | dev_info(DEV, " %lu failed blocks\n", mdev->rs_failed); |
| 904 | |
| 905 | if (os.conn == C_SYNC_TARGET || os.conn == C_PAUSED_SYNC_T) { |
| 906 | ns.disk = D_INCONSISTENT; |
| 907 | ns.pdsk = D_UP_TO_DATE; |
| 908 | } else { |
| 909 | ns.disk = D_UP_TO_DATE; |
| 910 | ns.pdsk = D_INCONSISTENT; |
| 911 | } |
| 912 | } else { |
| 913 | ns.disk = D_UP_TO_DATE; |
| 914 | ns.pdsk = D_UP_TO_DATE; |
| 915 | |
| 916 | if (os.conn == C_SYNC_TARGET || os.conn == C_PAUSED_SYNC_T) { |
| 917 | if (mdev->p_uuid) { |
| 918 | int i; |
| 919 | for (i = UI_BITMAP ; i <= UI_HISTORY_END ; i++) |
| 920 | _drbd_uuid_set(mdev, i, mdev->p_uuid[i]); |
| 921 | drbd_uuid_set(mdev, UI_BITMAP, mdev->ldev->md.uuid[UI_CURRENT]); |
| 922 | _drbd_uuid_set(mdev, UI_CURRENT, mdev->p_uuid[UI_CURRENT]); |
| 923 | } else { |
| 924 | dev_err(DEV, "mdev->p_uuid is NULL! BUG\n"); |
| 925 | } |
| 926 | } |
| 927 | |
Lars Ellenberg | 62b0da3 | 2011-01-20 13:25:21 +0100 | [diff] [blame] | 928 | if (!(os.conn == C_VERIFY_S || os.conn == C_VERIFY_T)) { |
| 929 | /* for verify runs, we don't update uuids here, |
| 930 | * so there would be nothing to report. */ |
| 931 | drbd_uuid_set_bm(mdev, 0UL); |
| 932 | drbd_print_uuids(mdev, "updated UUIDs"); |
| 933 | if (mdev->p_uuid) { |
| 934 | /* Now the two UUID sets are equal, update what we |
| 935 | * know of the peer. */ |
| 936 | int i; |
| 937 | for (i = UI_CURRENT ; i <= UI_HISTORY_END ; i++) |
| 938 | mdev->p_uuid[i] = mdev->ldev->md.uuid[i]; |
| 939 | } |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 940 | } |
| 941 | } |
| 942 | |
| 943 | _drbd_set_state(mdev, ns, CS_VERBOSE, NULL); |
| 944 | out_unlock: |
Philipp Reisner | 87eeee4 | 2011-01-19 14:16:30 +0100 | [diff] [blame] | 945 | spin_unlock_irq(&mdev->tconn->req_lock); |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 946 | put_ldev(mdev); |
| 947 | out: |
| 948 | mdev->rs_total = 0; |
| 949 | mdev->rs_failed = 0; |
| 950 | mdev->rs_paused = 0; |
Lars Ellenberg | 58ffa58 | 2012-07-26 14:09:49 +0200 | [diff] [blame] | 951 | |
| 952 | /* reset start sector, if we reached end of device */ |
| 953 | if (verify_done && mdev->ov_left == 0) |
Lars Ellenberg | 2652561 | 2010-11-05 09:56:33 +0100 | [diff] [blame] | 954 | mdev->ov_start_sector = 0; |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 955 | |
Lars Ellenberg | 13d4268 | 2010-10-13 17:37:54 +0200 | [diff] [blame] | 956 | drbd_md_sync(mdev); |
| 957 | |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 958 | if (khelper_cmd) |
| 959 | drbd_khelper(mdev, khelper_cmd); |
| 960 | |
| 961 | return 1; |
| 962 | } |
| 963 | |
| 964 | /* helper */ |
Andreas Gruenbacher | db830c4 | 2011-02-04 15:57:48 +0100 | [diff] [blame] | 965 | static void move_to_net_ee_or_free(struct drbd_conf *mdev, struct drbd_peer_request *peer_req) |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 966 | { |
Andreas Gruenbacher | 045417f | 2011-04-07 21:34:24 +0200 | [diff] [blame] | 967 | if (drbd_peer_req_has_active_page(peer_req)) { |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 968 | /* This might happen if sendpage() has not finished */ |
Andreas Gruenbacher | db830c4 | 2011-02-04 15:57:48 +0100 | [diff] [blame] | 969 | int i = (peer_req->i.size + PAGE_SIZE -1) >> PAGE_SHIFT; |
Lars Ellenberg | 435f074 | 2010-09-06 12:30:25 +0200 | [diff] [blame] | 970 | atomic_add(i, &mdev->pp_in_use_by_net); |
| 971 | atomic_sub(i, &mdev->pp_in_use); |
Philipp Reisner | 87eeee4 | 2011-01-19 14:16:30 +0100 | [diff] [blame] | 972 | spin_lock_irq(&mdev->tconn->req_lock); |
Andreas Gruenbacher | db830c4 | 2011-02-04 15:57:48 +0100 | [diff] [blame] | 973 | list_add_tail(&peer_req->w.list, &mdev->net_ee); |
Philipp Reisner | 87eeee4 | 2011-01-19 14:16:30 +0100 | [diff] [blame] | 974 | spin_unlock_irq(&mdev->tconn->req_lock); |
Lars Ellenberg | 435f074 | 2010-09-06 12:30:25 +0200 | [diff] [blame] | 975 | wake_up(&drbd_pp_wait); |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 976 | } else |
Andreas Gruenbacher | 3967deb | 2011-04-06 16:16:56 +0200 | [diff] [blame] | 977 | drbd_free_peer_req(mdev, peer_req); |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 978 | } |
| 979 | |
| 980 | /** |
| 981 | * w_e_end_data_req() - Worker callback, to send a P_DATA_REPLY packet in response to a P_DATA_REQUEST |
| 982 | * @mdev: DRBD device. |
| 983 | * @w: work object. |
| 984 | * @cancel: The connection will be closed anyways |
| 985 | */ |
Andreas Gruenbacher | 99920dc | 2011-03-16 15:31:39 +0100 | [diff] [blame] | 986 | int w_e_end_data_req(struct drbd_work *w, int cancel) |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 987 | { |
Andreas Gruenbacher | db830c4 | 2011-02-04 15:57:48 +0100 | [diff] [blame] | 988 | struct drbd_peer_request *peer_req = container_of(w, struct drbd_peer_request, w); |
Philipp Reisner | 00d5694 | 2011-02-09 18:09:48 +0100 | [diff] [blame] | 989 | struct drbd_conf *mdev = w->mdev; |
Andreas Gruenbacher | 99920dc | 2011-03-16 15:31:39 +0100 | [diff] [blame] | 990 | int err; |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 991 | |
| 992 | if (unlikely(cancel)) { |
Andreas Gruenbacher | 3967deb | 2011-04-06 16:16:56 +0200 | [diff] [blame] | 993 | drbd_free_peer_req(mdev, peer_req); |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 994 | dec_unacked(mdev); |
Andreas Gruenbacher | 99920dc | 2011-03-16 15:31:39 +0100 | [diff] [blame] | 995 | return 0; |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 996 | } |
| 997 | |
Andreas Gruenbacher | db830c4 | 2011-02-04 15:57:48 +0100 | [diff] [blame] | 998 | if (likely((peer_req->flags & EE_WAS_ERROR) == 0)) { |
Andreas Gruenbacher | 99920dc | 2011-03-16 15:31:39 +0100 | [diff] [blame] | 999 | err = drbd_send_block(mdev, P_DATA_REPLY, peer_req); |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 1000 | } else { |
| 1001 | if (__ratelimit(&drbd_ratelimit_state)) |
| 1002 | dev_err(DEV, "Sending NegDReply. sector=%llus.\n", |
Andreas Gruenbacher | db830c4 | 2011-02-04 15:57:48 +0100 | [diff] [blame] | 1003 | (unsigned long long)peer_req->i.sector); |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 1004 | |
Andreas Gruenbacher | 99920dc | 2011-03-16 15:31:39 +0100 | [diff] [blame] | 1005 | err = drbd_send_ack(mdev, P_NEG_DREPLY, peer_req); |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 1006 | } |
| 1007 | |
| 1008 | dec_unacked(mdev); |
| 1009 | |
Andreas Gruenbacher | db830c4 | 2011-02-04 15:57:48 +0100 | [diff] [blame] | 1010 | move_to_net_ee_or_free(mdev, peer_req); |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 1011 | |
Andreas Gruenbacher | 99920dc | 2011-03-16 15:31:39 +0100 | [diff] [blame] | 1012 | if (unlikely(err)) |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 1013 | dev_err(DEV, "drbd_send_block() failed\n"); |
Andreas Gruenbacher | 99920dc | 2011-03-16 15:31:39 +0100 | [diff] [blame] | 1014 | return err; |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 1015 | } |
| 1016 | |
| 1017 | /** |
Andreas Gruenbacher | a209b4a | 2011-08-17 12:43:25 +0200 | [diff] [blame] | 1018 | * w_e_end_rsdata_req() - Worker callback to send a P_RS_DATA_REPLY packet in response to a P_RS_DATA_REQUEST |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 1019 | * @mdev: DRBD device. |
| 1020 | * @w: work object. |
| 1021 | * @cancel: The connection will be closed anyways |
| 1022 | */ |
Andreas Gruenbacher | 99920dc | 2011-03-16 15:31:39 +0100 | [diff] [blame] | 1023 | int w_e_end_rsdata_req(struct drbd_work *w, int cancel) |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 1024 | { |
Andreas Gruenbacher | db830c4 | 2011-02-04 15:57:48 +0100 | [diff] [blame] | 1025 | struct drbd_peer_request *peer_req = container_of(w, struct drbd_peer_request, w); |
Philipp Reisner | 00d5694 | 2011-02-09 18:09:48 +0100 | [diff] [blame] | 1026 | struct drbd_conf *mdev = w->mdev; |
Andreas Gruenbacher | 99920dc | 2011-03-16 15:31:39 +0100 | [diff] [blame] | 1027 | int err; |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 1028 | |
| 1029 | if (unlikely(cancel)) { |
Andreas Gruenbacher | 3967deb | 2011-04-06 16:16:56 +0200 | [diff] [blame] | 1030 | drbd_free_peer_req(mdev, peer_req); |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 1031 | dec_unacked(mdev); |
Andreas Gruenbacher | 99920dc | 2011-03-16 15:31:39 +0100 | [diff] [blame] | 1032 | return 0; |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 1033 | } |
| 1034 | |
| 1035 | if (get_ldev_if_state(mdev, D_FAILED)) { |
Andreas Gruenbacher | db830c4 | 2011-02-04 15:57:48 +0100 | [diff] [blame] | 1036 | drbd_rs_complete_io(mdev, peer_req->i.sector); |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 1037 | put_ldev(mdev); |
| 1038 | } |
| 1039 | |
Philipp Reisner | d612d30 | 2010-12-27 10:53:28 +0100 | [diff] [blame] | 1040 | if (mdev->state.conn == C_AHEAD) { |
Andreas Gruenbacher | 99920dc | 2011-03-16 15:31:39 +0100 | [diff] [blame] | 1041 | err = drbd_send_ack(mdev, P_RS_CANCEL, peer_req); |
Andreas Gruenbacher | db830c4 | 2011-02-04 15:57:48 +0100 | [diff] [blame] | 1042 | } else if (likely((peer_req->flags & EE_WAS_ERROR) == 0)) { |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 1043 | if (likely(mdev->state.pdsk >= D_INCONSISTENT)) { |
| 1044 | inc_rs_pending(mdev); |
Andreas Gruenbacher | 99920dc | 2011-03-16 15:31:39 +0100 | [diff] [blame] | 1045 | err = drbd_send_block(mdev, P_RS_DATA_REPLY, peer_req); |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 1046 | } else { |
| 1047 | if (__ratelimit(&drbd_ratelimit_state)) |
| 1048 | dev_err(DEV, "Not sending RSDataReply, " |
| 1049 | "partner DISKLESS!\n"); |
Andreas Gruenbacher | 99920dc | 2011-03-16 15:31:39 +0100 | [diff] [blame] | 1050 | err = 0; |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 1051 | } |
| 1052 | } else { |
| 1053 | if (__ratelimit(&drbd_ratelimit_state)) |
| 1054 | dev_err(DEV, "Sending NegRSDReply. sector %llus.\n", |
Andreas Gruenbacher | db830c4 | 2011-02-04 15:57:48 +0100 | [diff] [blame] | 1055 | (unsigned long long)peer_req->i.sector); |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 1056 | |
Andreas Gruenbacher | 99920dc | 2011-03-16 15:31:39 +0100 | [diff] [blame] | 1057 | err = drbd_send_ack(mdev, P_NEG_RS_DREPLY, peer_req); |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 1058 | |
| 1059 | /* update resync data with failure */ |
Andreas Gruenbacher | db830c4 | 2011-02-04 15:57:48 +0100 | [diff] [blame] | 1060 | drbd_rs_failed_io(mdev, peer_req->i.sector, peer_req->i.size); |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 1061 | } |
| 1062 | |
| 1063 | dec_unacked(mdev); |
| 1064 | |
Andreas Gruenbacher | db830c4 | 2011-02-04 15:57:48 +0100 | [diff] [blame] | 1065 | move_to_net_ee_or_free(mdev, peer_req); |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 1066 | |
Andreas Gruenbacher | 99920dc | 2011-03-16 15:31:39 +0100 | [diff] [blame] | 1067 | if (unlikely(err)) |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 1068 | dev_err(DEV, "drbd_send_block() failed\n"); |
Andreas Gruenbacher | 99920dc | 2011-03-16 15:31:39 +0100 | [diff] [blame] | 1069 | return err; |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 1070 | } |
| 1071 | |
Andreas Gruenbacher | 99920dc | 2011-03-16 15:31:39 +0100 | [diff] [blame] | 1072 | int w_e_end_csum_rs_req(struct drbd_work *w, int cancel) |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 1073 | { |
Andreas Gruenbacher | db830c4 | 2011-02-04 15:57:48 +0100 | [diff] [blame] | 1074 | struct drbd_peer_request *peer_req = container_of(w, struct drbd_peer_request, w); |
Philipp Reisner | 00d5694 | 2011-02-09 18:09:48 +0100 | [diff] [blame] | 1075 | struct drbd_conf *mdev = w->mdev; |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 1076 | struct digest_info *di; |
| 1077 | int digest_size; |
| 1078 | void *digest = NULL; |
Andreas Gruenbacher | 99920dc | 2011-03-16 15:31:39 +0100 | [diff] [blame] | 1079 | int err, eq = 0; |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 1080 | |
| 1081 | if (unlikely(cancel)) { |
Andreas Gruenbacher | 3967deb | 2011-04-06 16:16:56 +0200 | [diff] [blame] | 1082 | drbd_free_peer_req(mdev, peer_req); |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 1083 | dec_unacked(mdev); |
Andreas Gruenbacher | 99920dc | 2011-03-16 15:31:39 +0100 | [diff] [blame] | 1084 | return 0; |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 1085 | } |
| 1086 | |
Lars Ellenberg | 1d53f09 | 2010-09-05 01:13:24 +0200 | [diff] [blame] | 1087 | if (get_ldev(mdev)) { |
Andreas Gruenbacher | db830c4 | 2011-02-04 15:57:48 +0100 | [diff] [blame] | 1088 | drbd_rs_complete_io(mdev, peer_req->i.sector); |
Lars Ellenberg | 1d53f09 | 2010-09-05 01:13:24 +0200 | [diff] [blame] | 1089 | put_ldev(mdev); |
| 1090 | } |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 1091 | |
Andreas Gruenbacher | db830c4 | 2011-02-04 15:57:48 +0100 | [diff] [blame] | 1092 | di = peer_req->digest; |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 1093 | |
Andreas Gruenbacher | db830c4 | 2011-02-04 15:57:48 +0100 | [diff] [blame] | 1094 | if (likely((peer_req->flags & EE_WAS_ERROR) == 0)) { |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 1095 | /* quick hack to try to avoid a race against reconfiguration. |
| 1096 | * a real fix would be much more involved, |
| 1097 | * introducing more locking mechanisms */ |
Lars Ellenberg | f399002 | 2011-03-23 14:31:09 +0100 | [diff] [blame] | 1098 | if (mdev->tconn->csums_tfm) { |
| 1099 | digest_size = crypto_hash_digestsize(mdev->tconn->csums_tfm); |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 1100 | D_ASSERT(digest_size == di->digest_size); |
| 1101 | digest = kmalloc(digest_size, GFP_NOIO); |
| 1102 | } |
| 1103 | if (digest) { |
Lars Ellenberg | f399002 | 2011-03-23 14:31:09 +0100 | [diff] [blame] | 1104 | drbd_csum_ee(mdev, mdev->tconn->csums_tfm, peer_req, digest); |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 1105 | eq = !memcmp(digest, di->digest, digest_size); |
| 1106 | kfree(digest); |
| 1107 | } |
| 1108 | |
| 1109 | if (eq) { |
Andreas Gruenbacher | db830c4 | 2011-02-04 15:57:48 +0100 | [diff] [blame] | 1110 | drbd_set_in_sync(mdev, peer_req->i.sector, peer_req->i.size); |
Lars Ellenberg | 676396d | 2010-03-03 02:08:22 +0100 | [diff] [blame] | 1111 | /* rs_same_csums unit is BM_BLOCK_SIZE */ |
Andreas Gruenbacher | db830c4 | 2011-02-04 15:57:48 +0100 | [diff] [blame] | 1112 | mdev->rs_same_csum += peer_req->i.size >> BM_BLOCK_SHIFT; |
Andreas Gruenbacher | 99920dc | 2011-03-16 15:31:39 +0100 | [diff] [blame] | 1113 | err = drbd_send_ack(mdev, P_RS_IS_IN_SYNC, peer_req); |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 1114 | } else { |
| 1115 | inc_rs_pending(mdev); |
Andreas Gruenbacher | db830c4 | 2011-02-04 15:57:48 +0100 | [diff] [blame] | 1116 | peer_req->block_id = ID_SYNCER; /* By setting block_id, digest pointer becomes invalid! */ |
| 1117 | peer_req->flags &= ~EE_HAS_DIGEST; /* This peer request no longer has a digest pointer */ |
Philipp Reisner | 204bba9 | 2010-08-23 16:17:13 +0200 | [diff] [blame] | 1118 | kfree(di); |
Andreas Gruenbacher | 99920dc | 2011-03-16 15:31:39 +0100 | [diff] [blame] | 1119 | err = drbd_send_block(mdev, P_RS_DATA_REPLY, peer_req); |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 1120 | } |
| 1121 | } else { |
Andreas Gruenbacher | 99920dc | 2011-03-16 15:31:39 +0100 | [diff] [blame] | 1122 | err = drbd_send_ack(mdev, P_NEG_RS_DREPLY, peer_req); |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 1123 | if (__ratelimit(&drbd_ratelimit_state)) |
| 1124 | dev_err(DEV, "Sending NegDReply. I guess it gets messy.\n"); |
| 1125 | } |
| 1126 | |
| 1127 | dec_unacked(mdev); |
Andreas Gruenbacher | db830c4 | 2011-02-04 15:57:48 +0100 | [diff] [blame] | 1128 | move_to_net_ee_or_free(mdev, peer_req); |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 1129 | |
Andreas Gruenbacher | 99920dc | 2011-03-16 15:31:39 +0100 | [diff] [blame] | 1130 | if (unlikely(err)) |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 1131 | dev_err(DEV, "drbd_send_block/ack() failed\n"); |
Andreas Gruenbacher | 99920dc | 2011-03-16 15:31:39 +0100 | [diff] [blame] | 1132 | return err; |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 1133 | } |
| 1134 | |
Andreas Gruenbacher | 99920dc | 2011-03-16 15:31:39 +0100 | [diff] [blame] | 1135 | int w_e_end_ov_req(struct drbd_work *w, int cancel) |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 1136 | { |
Andreas Gruenbacher | db830c4 | 2011-02-04 15:57:48 +0100 | [diff] [blame] | 1137 | struct drbd_peer_request *peer_req = container_of(w, struct drbd_peer_request, w); |
Philipp Reisner | 00d5694 | 2011-02-09 18:09:48 +0100 | [diff] [blame] | 1138 | struct drbd_conf *mdev = w->mdev; |
Andreas Gruenbacher | db830c4 | 2011-02-04 15:57:48 +0100 | [diff] [blame] | 1139 | sector_t sector = peer_req->i.sector; |
| 1140 | unsigned int size = peer_req->i.size; |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 1141 | int digest_size; |
| 1142 | void *digest; |
Andreas Gruenbacher | 99920dc | 2011-03-16 15:31:39 +0100 | [diff] [blame] | 1143 | int err = 0; |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 1144 | |
| 1145 | if (unlikely(cancel)) |
| 1146 | goto out; |
| 1147 | |
Lars Ellenberg | f399002 | 2011-03-23 14:31:09 +0100 | [diff] [blame] | 1148 | digest_size = crypto_hash_digestsize(mdev->tconn->verify_tfm); |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 1149 | digest = kmalloc(digest_size, GFP_NOIO); |
Philipp Reisner | 8f21420 | 2011-03-01 15:52:35 +0100 | [diff] [blame] | 1150 | if (!digest) { |
Andreas Gruenbacher | 99920dc | 2011-03-16 15:31:39 +0100 | [diff] [blame] | 1151 | err = 1; /* terminate the connection in case the allocation failed */ |
Philipp Reisner | 8f21420 | 2011-03-01 15:52:35 +0100 | [diff] [blame] | 1152 | goto out; |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 1153 | } |
| 1154 | |
Andreas Gruenbacher | db830c4 | 2011-02-04 15:57:48 +0100 | [diff] [blame] | 1155 | if (likely(!(peer_req->flags & EE_WAS_ERROR))) |
Lars Ellenberg | f399002 | 2011-03-23 14:31:09 +0100 | [diff] [blame] | 1156 | drbd_csum_ee(mdev, mdev->tconn->verify_tfm, peer_req, digest); |
Philipp Reisner | 8f21420 | 2011-03-01 15:52:35 +0100 | [diff] [blame] | 1157 | else |
| 1158 | memset(digest, 0, digest_size); |
| 1159 | |
Lars Ellenberg | 53ea433 | 2011-03-08 17:11:40 +0100 | [diff] [blame] | 1160 | /* Free e and pages before send. |
| 1161 | * In case we block on congestion, we could otherwise run into |
| 1162 | * some distributed deadlock, if the other side blocks on |
| 1163 | * congestion as well, because our receiver blocks in |
Andreas Gruenbacher | c37c8ec | 2011-04-07 21:02:09 +0200 | [diff] [blame] | 1164 | * drbd_alloc_pages due to pp_in_use > max_buffers. */ |
Andreas Gruenbacher | 3967deb | 2011-04-06 16:16:56 +0200 | [diff] [blame] | 1165 | drbd_free_peer_req(mdev, peer_req); |
Andreas Gruenbacher | db830c4 | 2011-02-04 15:57:48 +0100 | [diff] [blame] | 1166 | peer_req = NULL; |
Philipp Reisner | 8f21420 | 2011-03-01 15:52:35 +0100 | [diff] [blame] | 1167 | inc_rs_pending(mdev); |
Andreas Gruenbacher | 99920dc | 2011-03-16 15:31:39 +0100 | [diff] [blame] | 1168 | err = drbd_send_drequest_csum(mdev, sector, size, digest, digest_size, P_OV_REPLY); |
| 1169 | if (err) |
Philipp Reisner | 8f21420 | 2011-03-01 15:52:35 +0100 | [diff] [blame] | 1170 | dec_rs_pending(mdev); |
| 1171 | kfree(digest); |
| 1172 | |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 1173 | out: |
Andreas Gruenbacher | db830c4 | 2011-02-04 15:57:48 +0100 | [diff] [blame] | 1174 | if (peer_req) |
Andreas Gruenbacher | 3967deb | 2011-04-06 16:16:56 +0200 | [diff] [blame] | 1175 | drbd_free_peer_req(mdev, peer_req); |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 1176 | dec_unacked(mdev); |
Andreas Gruenbacher | 99920dc | 2011-03-16 15:31:39 +0100 | [diff] [blame] | 1177 | return err; |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 1178 | } |
| 1179 | |
Andreas Gruenbacher | 8f7bed7 | 2010-12-19 23:53:14 +0100 | [diff] [blame] | 1180 | void drbd_ov_out_of_sync_found(struct drbd_conf *mdev, sector_t sector, int size) |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 1181 | { |
| 1182 | if (mdev->ov_last_oos_start + mdev->ov_last_oos_size == sector) { |
| 1183 | mdev->ov_last_oos_size += size>>9; |
| 1184 | } else { |
| 1185 | mdev->ov_last_oos_start = sector; |
| 1186 | mdev->ov_last_oos_size = size>>9; |
| 1187 | } |
| 1188 | drbd_set_out_of_sync(mdev, sector, size); |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 1189 | } |
| 1190 | |
Andreas Gruenbacher | 99920dc | 2011-03-16 15:31:39 +0100 | [diff] [blame] | 1191 | int w_e_end_ov_reply(struct drbd_work *w, int cancel) |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 1192 | { |
Andreas Gruenbacher | db830c4 | 2011-02-04 15:57:48 +0100 | [diff] [blame] | 1193 | struct drbd_peer_request *peer_req = container_of(w, struct drbd_peer_request, w); |
Philipp Reisner | 00d5694 | 2011-02-09 18:09:48 +0100 | [diff] [blame] | 1194 | struct drbd_conf *mdev = w->mdev; |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 1195 | struct digest_info *di; |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 1196 | void *digest; |
Andreas Gruenbacher | db830c4 | 2011-02-04 15:57:48 +0100 | [diff] [blame] | 1197 | sector_t sector = peer_req->i.sector; |
| 1198 | unsigned int size = peer_req->i.size; |
Lars Ellenberg | 53ea433 | 2011-03-08 17:11:40 +0100 | [diff] [blame] | 1199 | int digest_size; |
Andreas Gruenbacher | 99920dc | 2011-03-16 15:31:39 +0100 | [diff] [blame] | 1200 | int err, eq = 0; |
Lars Ellenberg | 58ffa58 | 2012-07-26 14:09:49 +0200 | [diff] [blame] | 1201 | bool stop_sector_reached = false; |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 1202 | |
| 1203 | if (unlikely(cancel)) { |
Andreas Gruenbacher | 3967deb | 2011-04-06 16:16:56 +0200 | [diff] [blame] | 1204 | drbd_free_peer_req(mdev, peer_req); |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 1205 | dec_unacked(mdev); |
Andreas Gruenbacher | 99920dc | 2011-03-16 15:31:39 +0100 | [diff] [blame] | 1206 | return 0; |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 1207 | } |
| 1208 | |
| 1209 | /* after "cancel", because after drbd_disconnect/drbd_rs_cancel_all |
| 1210 | * the resync lru has been cleaned up already */ |
Lars Ellenberg | 1d53f09 | 2010-09-05 01:13:24 +0200 | [diff] [blame] | 1211 | if (get_ldev(mdev)) { |
Andreas Gruenbacher | db830c4 | 2011-02-04 15:57:48 +0100 | [diff] [blame] | 1212 | drbd_rs_complete_io(mdev, peer_req->i.sector); |
Lars Ellenberg | 1d53f09 | 2010-09-05 01:13:24 +0200 | [diff] [blame] | 1213 | put_ldev(mdev); |
| 1214 | } |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 1215 | |
Andreas Gruenbacher | db830c4 | 2011-02-04 15:57:48 +0100 | [diff] [blame] | 1216 | di = peer_req->digest; |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 1217 | |
Andreas Gruenbacher | db830c4 | 2011-02-04 15:57:48 +0100 | [diff] [blame] | 1218 | if (likely((peer_req->flags & EE_WAS_ERROR) == 0)) { |
Lars Ellenberg | f399002 | 2011-03-23 14:31:09 +0100 | [diff] [blame] | 1219 | digest_size = crypto_hash_digestsize(mdev->tconn->verify_tfm); |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 1220 | digest = kmalloc(digest_size, GFP_NOIO); |
| 1221 | if (digest) { |
Lars Ellenberg | f399002 | 2011-03-23 14:31:09 +0100 | [diff] [blame] | 1222 | drbd_csum_ee(mdev, mdev->tconn->verify_tfm, peer_req, digest); |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 1223 | |
| 1224 | D_ASSERT(digest_size == di->digest_size); |
| 1225 | eq = !memcmp(digest, di->digest, digest_size); |
| 1226 | kfree(digest); |
| 1227 | } |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 1228 | } |
| 1229 | |
Lars Ellenberg | 9676c76 | 2011-02-22 14:02:31 +0100 | [diff] [blame] | 1230 | /* Free peer_req and pages before send. |
| 1231 | * In case we block on congestion, we could otherwise run into |
| 1232 | * some distributed deadlock, if the other side blocks on |
| 1233 | * congestion as well, because our receiver blocks in |
Andreas Gruenbacher | c37c8ec | 2011-04-07 21:02:09 +0200 | [diff] [blame] | 1234 | * drbd_alloc_pages due to pp_in_use > max_buffers. */ |
Andreas Gruenbacher | 3967deb | 2011-04-06 16:16:56 +0200 | [diff] [blame] | 1235 | drbd_free_peer_req(mdev, peer_req); |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 1236 | if (!eq) |
Andreas Gruenbacher | 8f7bed7 | 2010-12-19 23:53:14 +0100 | [diff] [blame] | 1237 | drbd_ov_out_of_sync_found(mdev, sector, size); |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 1238 | else |
Andreas Gruenbacher | 8f7bed7 | 2010-12-19 23:53:14 +0100 | [diff] [blame] | 1239 | ov_out_of_sync_print(mdev); |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 1240 | |
Andreas Gruenbacher | 99920dc | 2011-03-16 15:31:39 +0100 | [diff] [blame] | 1241 | err = drbd_send_ack_ex(mdev, P_OV_RESULT, sector, size, |
Andreas Gruenbacher | fa79abd | 2011-03-16 01:31:39 +0100 | [diff] [blame] | 1242 | eq ? ID_IN_SYNC : ID_OUT_OF_SYNC); |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 1243 | |
Lars Ellenberg | 53ea433 | 2011-03-08 17:11:40 +0100 | [diff] [blame] | 1244 | dec_unacked(mdev); |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 1245 | |
Lars Ellenberg | ea5442a | 2010-11-05 09:48:01 +0100 | [diff] [blame] | 1246 | --mdev->ov_left; |
| 1247 | |
| 1248 | /* let's advance progress step marks only for every other megabyte */ |
| 1249 | if ((mdev->ov_left & 0x200) == 0x200) |
| 1250 | drbd_advance_rs_marks(mdev, mdev->ov_left); |
| 1251 | |
Lars Ellenberg | 58ffa58 | 2012-07-26 14:09:49 +0200 | [diff] [blame] | 1252 | stop_sector_reached = verify_can_do_stop_sector(mdev) && |
| 1253 | (sector + (size>>9)) >= mdev->ov_stop_sector; |
| 1254 | |
| 1255 | if (mdev->ov_left == 0 || stop_sector_reached) { |
Andreas Gruenbacher | 8f7bed7 | 2010-12-19 23:53:14 +0100 | [diff] [blame] | 1256 | ov_out_of_sync_print(mdev); |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 1257 | drbd_resync_finished(mdev); |
| 1258 | } |
| 1259 | |
Andreas Gruenbacher | 99920dc | 2011-03-16 15:31:39 +0100 | [diff] [blame] | 1260 | return err; |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 1261 | } |
| 1262 | |
Andreas Gruenbacher | 99920dc | 2011-03-16 15:31:39 +0100 | [diff] [blame] | 1263 | int w_prev_work_done(struct drbd_work *w, int cancel) |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 1264 | { |
| 1265 | struct drbd_wq_barrier *b = container_of(w, struct drbd_wq_barrier, w); |
Philipp Reisner | 00d5694 | 2011-02-09 18:09:48 +0100 | [diff] [blame] | 1266 | |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 1267 | complete(&b->done); |
Andreas Gruenbacher | 99920dc | 2011-03-16 15:31:39 +0100 | [diff] [blame] | 1268 | return 0; |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 1269 | } |
| 1270 | |
Lars Ellenberg | b6dd1a8 | 2011-11-28 15:04:49 +0100 | [diff] [blame] | 1271 | /* FIXME |
| 1272 | * We need to track the number of pending barrier acks, |
| 1273 | * and to be able to wait for them. |
| 1274 | * See also comment in drbd_adm_attach before drbd_suspend_io. |
| 1275 | */ |
| 1276 | int drbd_send_barrier(struct drbd_tconn *tconn) |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 1277 | { |
Andreas Gruenbacher | 9f5bdc3 | 2011-03-28 14:23:08 +0200 | [diff] [blame] | 1278 | struct p_barrier *p; |
Lars Ellenberg | b6dd1a8 | 2011-11-28 15:04:49 +0100 | [diff] [blame] | 1279 | struct drbd_socket *sock; |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 1280 | |
Lars Ellenberg | b6dd1a8 | 2011-11-28 15:04:49 +0100 | [diff] [blame] | 1281 | sock = &tconn->data; |
| 1282 | p = conn_prepare_command(tconn, sock); |
Andreas Gruenbacher | 9f5bdc3 | 2011-03-28 14:23:08 +0200 | [diff] [blame] | 1283 | if (!p) |
| 1284 | return -EIO; |
Lars Ellenberg | b6dd1a8 | 2011-11-28 15:04:49 +0100 | [diff] [blame] | 1285 | p->barrier = tconn->send.current_epoch_nr; |
| 1286 | p->pad = 0; |
| 1287 | tconn->send.current_epoch_writes = 0; |
| 1288 | |
| 1289 | return conn_send_command(tconn, sock, P_BARRIER, sizeof(*p), NULL, 0); |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 1290 | } |
| 1291 | |
Andreas Gruenbacher | 99920dc | 2011-03-16 15:31:39 +0100 | [diff] [blame] | 1292 | int w_send_write_hint(struct drbd_work *w, int cancel) |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 1293 | { |
Philipp Reisner | 00d5694 | 2011-02-09 18:09:48 +0100 | [diff] [blame] | 1294 | struct drbd_conf *mdev = w->mdev; |
Andreas Gruenbacher | 9f5bdc3 | 2011-03-28 14:23:08 +0200 | [diff] [blame] | 1295 | struct drbd_socket *sock; |
| 1296 | |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 1297 | if (cancel) |
Andreas Gruenbacher | 99920dc | 2011-03-16 15:31:39 +0100 | [diff] [blame] | 1298 | return 0; |
Andreas Gruenbacher | 9f5bdc3 | 2011-03-28 14:23:08 +0200 | [diff] [blame] | 1299 | sock = &mdev->tconn->data; |
| 1300 | if (!drbd_prepare_command(mdev, sock)) |
| 1301 | return -EIO; |
Andreas Gruenbacher | e658983 | 2011-03-30 12:54:42 +0200 | [diff] [blame] | 1302 | return drbd_send_command(mdev, sock, P_UNPLUG_REMOTE, 0, NULL, 0); |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 1303 | } |
| 1304 | |
Lars Ellenberg | 4eb9b3c | 2012-08-20 11:05:23 +0200 | [diff] [blame] | 1305 | static void re_init_if_first_write(struct drbd_tconn *tconn, unsigned int epoch) |
| 1306 | { |
| 1307 | if (!tconn->send.seen_any_write_yet) { |
| 1308 | tconn->send.seen_any_write_yet = true; |
| 1309 | tconn->send.current_epoch_nr = epoch; |
| 1310 | tconn->send.current_epoch_writes = 0; |
| 1311 | } |
| 1312 | } |
| 1313 | |
| 1314 | static void maybe_send_barrier(struct drbd_tconn *tconn, unsigned int epoch) |
| 1315 | { |
| 1316 | /* re-init if first write on this connection */ |
| 1317 | if (!tconn->send.seen_any_write_yet) |
| 1318 | return; |
| 1319 | if (tconn->send.current_epoch_nr != epoch) { |
| 1320 | if (tconn->send.current_epoch_writes) |
| 1321 | drbd_send_barrier(tconn); |
| 1322 | tconn->send.current_epoch_nr = epoch; |
| 1323 | } |
| 1324 | } |
| 1325 | |
Andreas Gruenbacher | 8f7bed7 | 2010-12-19 23:53:14 +0100 | [diff] [blame] | 1326 | int w_send_out_of_sync(struct drbd_work *w, int cancel) |
Philipp Reisner | 73a01a1 | 2010-10-27 14:33:00 +0200 | [diff] [blame] | 1327 | { |
| 1328 | struct drbd_request *req = container_of(w, struct drbd_request, w); |
Philipp Reisner | 00d5694 | 2011-02-09 18:09:48 +0100 | [diff] [blame] | 1329 | struct drbd_conf *mdev = w->mdev; |
Lars Ellenberg | b6dd1a8 | 2011-11-28 15:04:49 +0100 | [diff] [blame] | 1330 | struct drbd_tconn *tconn = mdev->tconn; |
Andreas Gruenbacher | 99920dc | 2011-03-16 15:31:39 +0100 | [diff] [blame] | 1331 | int err; |
Philipp Reisner | 73a01a1 | 2010-10-27 14:33:00 +0200 | [diff] [blame] | 1332 | |
| 1333 | if (unlikely(cancel)) { |
Andreas Gruenbacher | 8554df1 | 2011-01-25 15:37:43 +0100 | [diff] [blame] | 1334 | req_mod(req, SEND_CANCELED); |
Andreas Gruenbacher | 99920dc | 2011-03-16 15:31:39 +0100 | [diff] [blame] | 1335 | return 0; |
Philipp Reisner | 73a01a1 | 2010-10-27 14:33:00 +0200 | [diff] [blame] | 1336 | } |
| 1337 | |
Lars Ellenberg | b6dd1a8 | 2011-11-28 15:04:49 +0100 | [diff] [blame] | 1338 | /* this time, no tconn->send.current_epoch_writes++; |
| 1339 | * If it was sent, it was the closing barrier for the last |
| 1340 | * replicated epoch, before we went into AHEAD mode. |
| 1341 | * No more barriers will be sent, until we leave AHEAD mode again. */ |
Lars Ellenberg | 4eb9b3c | 2012-08-20 11:05:23 +0200 | [diff] [blame] | 1342 | maybe_send_barrier(tconn, req->epoch); |
Lars Ellenberg | b6dd1a8 | 2011-11-28 15:04:49 +0100 | [diff] [blame] | 1343 | |
Andreas Gruenbacher | 8f7bed7 | 2010-12-19 23:53:14 +0100 | [diff] [blame] | 1344 | err = drbd_send_out_of_sync(mdev, req); |
Andreas Gruenbacher | 8554df1 | 2011-01-25 15:37:43 +0100 | [diff] [blame] | 1345 | req_mod(req, OOS_HANDED_TO_NETWORK); |
Philipp Reisner | 73a01a1 | 2010-10-27 14:33:00 +0200 | [diff] [blame] | 1346 | |
Andreas Gruenbacher | 99920dc | 2011-03-16 15:31:39 +0100 | [diff] [blame] | 1347 | return err; |
Philipp Reisner | 73a01a1 | 2010-10-27 14:33:00 +0200 | [diff] [blame] | 1348 | } |
| 1349 | |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 1350 | /** |
| 1351 | * w_send_dblock() - Worker callback to send a P_DATA packet in order to mirror a write request |
| 1352 | * @mdev: DRBD device. |
| 1353 | * @w: work object. |
| 1354 | * @cancel: The connection will be closed anyways |
| 1355 | */ |
Andreas Gruenbacher | 99920dc | 2011-03-16 15:31:39 +0100 | [diff] [blame] | 1356 | int w_send_dblock(struct drbd_work *w, int cancel) |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 1357 | { |
| 1358 | struct drbd_request *req = container_of(w, struct drbd_request, w); |
Philipp Reisner | 00d5694 | 2011-02-09 18:09:48 +0100 | [diff] [blame] | 1359 | struct drbd_conf *mdev = w->mdev; |
Lars Ellenberg | b6dd1a8 | 2011-11-28 15:04:49 +0100 | [diff] [blame] | 1360 | struct drbd_tconn *tconn = mdev->tconn; |
Andreas Gruenbacher | 99920dc | 2011-03-16 15:31:39 +0100 | [diff] [blame] | 1361 | int err; |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 1362 | |
| 1363 | if (unlikely(cancel)) { |
Andreas Gruenbacher | 8554df1 | 2011-01-25 15:37:43 +0100 | [diff] [blame] | 1364 | req_mod(req, SEND_CANCELED); |
Andreas Gruenbacher | 99920dc | 2011-03-16 15:31:39 +0100 | [diff] [blame] | 1365 | return 0; |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 1366 | } |
| 1367 | |
Lars Ellenberg | 4eb9b3c | 2012-08-20 11:05:23 +0200 | [diff] [blame] | 1368 | re_init_if_first_write(tconn, req->epoch); |
| 1369 | maybe_send_barrier(tconn, req->epoch); |
Lars Ellenberg | b6dd1a8 | 2011-11-28 15:04:49 +0100 | [diff] [blame] | 1370 | tconn->send.current_epoch_writes++; |
| 1371 | |
Andreas Gruenbacher | 99920dc | 2011-03-16 15:31:39 +0100 | [diff] [blame] | 1372 | err = drbd_send_dblock(mdev, req); |
| 1373 | req_mod(req, err ? SEND_FAILED : HANDED_OVER_TO_NETWORK); |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 1374 | |
Andreas Gruenbacher | 99920dc | 2011-03-16 15:31:39 +0100 | [diff] [blame] | 1375 | return err; |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 1376 | } |
| 1377 | |
| 1378 | /** |
| 1379 | * w_send_read_req() - Worker callback to send a read request (P_DATA_REQUEST) packet |
| 1380 | * @mdev: DRBD device. |
| 1381 | * @w: work object. |
| 1382 | * @cancel: The connection will be closed anyways |
| 1383 | */ |
Andreas Gruenbacher | 99920dc | 2011-03-16 15:31:39 +0100 | [diff] [blame] | 1384 | int w_send_read_req(struct drbd_work *w, int cancel) |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 1385 | { |
| 1386 | struct drbd_request *req = container_of(w, struct drbd_request, w); |
Philipp Reisner | 00d5694 | 2011-02-09 18:09:48 +0100 | [diff] [blame] | 1387 | struct drbd_conf *mdev = w->mdev; |
Lars Ellenberg | b6dd1a8 | 2011-11-28 15:04:49 +0100 | [diff] [blame] | 1388 | struct drbd_tconn *tconn = mdev->tconn; |
Andreas Gruenbacher | 99920dc | 2011-03-16 15:31:39 +0100 | [diff] [blame] | 1389 | int err; |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 1390 | |
| 1391 | if (unlikely(cancel)) { |
Andreas Gruenbacher | 8554df1 | 2011-01-25 15:37:43 +0100 | [diff] [blame] | 1392 | req_mod(req, SEND_CANCELED); |
Andreas Gruenbacher | 99920dc | 2011-03-16 15:31:39 +0100 | [diff] [blame] | 1393 | return 0; |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 1394 | } |
| 1395 | |
Lars Ellenberg | b6dd1a8 | 2011-11-28 15:04:49 +0100 | [diff] [blame] | 1396 | /* Even read requests may close a write epoch, |
| 1397 | * if there was any yet. */ |
Lars Ellenberg | 4eb9b3c | 2012-08-20 11:05:23 +0200 | [diff] [blame] | 1398 | maybe_send_barrier(tconn, req->epoch); |
Lars Ellenberg | b6dd1a8 | 2011-11-28 15:04:49 +0100 | [diff] [blame] | 1399 | |
Andreas Gruenbacher | 99920dc | 2011-03-16 15:31:39 +0100 | [diff] [blame] | 1400 | err = drbd_send_drequest(mdev, P_DATA_REQUEST, req->i.sector, req->i.size, |
Andreas Gruenbacher | 6c1005e | 2011-03-16 01:34:24 +0100 | [diff] [blame] | 1401 | (unsigned long)req); |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 1402 | |
Andreas Gruenbacher | 99920dc | 2011-03-16 15:31:39 +0100 | [diff] [blame] | 1403 | req_mod(req, err ? SEND_FAILED : HANDED_OVER_TO_NETWORK); |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 1404 | |
Andreas Gruenbacher | 99920dc | 2011-03-16 15:31:39 +0100 | [diff] [blame] | 1405 | return err; |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 1406 | } |
| 1407 | |
Andreas Gruenbacher | 99920dc | 2011-03-16 15:31:39 +0100 | [diff] [blame] | 1408 | int w_restart_disk_io(struct drbd_work *w, int cancel) |
Philipp Reisner | 265be2d | 2010-05-31 10:14:17 +0200 | [diff] [blame] | 1409 | { |
| 1410 | struct drbd_request *req = container_of(w, struct drbd_request, w); |
Philipp Reisner | 00d5694 | 2011-02-09 18:09:48 +0100 | [diff] [blame] | 1411 | struct drbd_conf *mdev = w->mdev; |
Philipp Reisner | 265be2d | 2010-05-31 10:14:17 +0200 | [diff] [blame] | 1412 | |
Philipp Reisner | 0778286 | 2010-08-31 12:00:50 +0200 | [diff] [blame] | 1413 | if (bio_data_dir(req->master_bio) == WRITE && req->rq_state & RQ_IN_ACT_LOG) |
Lars Ellenberg | 181286a | 2011-03-31 15:18:56 +0200 | [diff] [blame] | 1414 | drbd_al_begin_io(mdev, &req->i); |
Philipp Reisner | 265be2d | 2010-05-31 10:14:17 +0200 | [diff] [blame] | 1415 | |
| 1416 | drbd_req_make_private_bio(req, req->master_bio); |
| 1417 | req->private_bio->bi_bdev = mdev->ldev->backing_bdev; |
| 1418 | generic_make_request(req->private_bio); |
| 1419 | |
Andreas Gruenbacher | 99920dc | 2011-03-16 15:31:39 +0100 | [diff] [blame] | 1420 | return 0; |
Philipp Reisner | 265be2d | 2010-05-31 10:14:17 +0200 | [diff] [blame] | 1421 | } |
| 1422 | |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 1423 | static int _drbd_may_sync_now(struct drbd_conf *mdev) |
| 1424 | { |
| 1425 | struct drbd_conf *odev = mdev; |
Andreas Gruenbacher | 95f8efd | 2011-05-12 11:15:34 +0200 | [diff] [blame] | 1426 | int resync_after; |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 1427 | |
| 1428 | while (1) { |
Philipp Reisner | 438c837 | 2011-03-28 14:48:01 +0200 | [diff] [blame] | 1429 | if (!odev->ldev) |
| 1430 | return 1; |
Philipp Reisner | daeda1c | 2011-05-03 15:00:55 +0200 | [diff] [blame] | 1431 | rcu_read_lock(); |
Andreas Gruenbacher | 95f8efd | 2011-05-12 11:15:34 +0200 | [diff] [blame] | 1432 | resync_after = rcu_dereference(odev->ldev->disk_conf)->resync_after; |
Philipp Reisner | daeda1c | 2011-05-03 15:00:55 +0200 | [diff] [blame] | 1433 | rcu_read_unlock(); |
Andreas Gruenbacher | 95f8efd | 2011-05-12 11:15:34 +0200 | [diff] [blame] | 1434 | if (resync_after == -1) |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 1435 | return 1; |
Andreas Gruenbacher | 95f8efd | 2011-05-12 11:15:34 +0200 | [diff] [blame] | 1436 | odev = minor_to_mdev(resync_after); |
Andreas Gruenbacher | 841ce24 | 2010-12-15 19:31:20 +0100 | [diff] [blame] | 1437 | if (!expect(odev)) |
| 1438 | return 1; |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 1439 | if ((odev->state.conn >= C_SYNC_SOURCE && |
| 1440 | odev->state.conn <= C_PAUSED_SYNC_T) || |
| 1441 | odev->state.aftr_isp || odev->state.peer_isp || |
| 1442 | odev->state.user_isp) |
| 1443 | return 0; |
| 1444 | } |
| 1445 | } |
| 1446 | |
| 1447 | /** |
| 1448 | * _drbd_pause_after() - Pause resync on all devices that may not resync now |
| 1449 | * @mdev: DRBD device. |
| 1450 | * |
| 1451 | * Called from process context only (admin command and after_state_ch). |
| 1452 | */ |
| 1453 | static int _drbd_pause_after(struct drbd_conf *mdev) |
| 1454 | { |
| 1455 | struct drbd_conf *odev; |
| 1456 | int i, rv = 0; |
| 1457 | |
Philipp Reisner | 695d08f | 2011-04-11 22:53:32 -0700 | [diff] [blame] | 1458 | rcu_read_lock(); |
Philipp Reisner | 81a5d60 | 2011-02-22 19:53:16 -0500 | [diff] [blame] | 1459 | idr_for_each_entry(&minors, odev, i) { |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 1460 | if (odev->state.conn == C_STANDALONE && odev->state.disk == D_DISKLESS) |
| 1461 | continue; |
| 1462 | if (!_drbd_may_sync_now(odev)) |
| 1463 | rv |= (__drbd_set_state(_NS(odev, aftr_isp, 1), CS_HARD, NULL) |
| 1464 | != SS_NOTHING_TO_DO); |
| 1465 | } |
Philipp Reisner | 695d08f | 2011-04-11 22:53:32 -0700 | [diff] [blame] | 1466 | rcu_read_unlock(); |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 1467 | |
| 1468 | return rv; |
| 1469 | } |
| 1470 | |
| 1471 | /** |
| 1472 | * _drbd_resume_next() - Resume resync on all devices that may resync now |
| 1473 | * @mdev: DRBD device. |
| 1474 | * |
| 1475 | * Called from process context only (admin command and worker). |
| 1476 | */ |
| 1477 | static int _drbd_resume_next(struct drbd_conf *mdev) |
| 1478 | { |
| 1479 | struct drbd_conf *odev; |
| 1480 | int i, rv = 0; |
| 1481 | |
Philipp Reisner | 695d08f | 2011-04-11 22:53:32 -0700 | [diff] [blame] | 1482 | rcu_read_lock(); |
Philipp Reisner | 81a5d60 | 2011-02-22 19:53:16 -0500 | [diff] [blame] | 1483 | idr_for_each_entry(&minors, odev, i) { |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 1484 | if (odev->state.conn == C_STANDALONE && odev->state.disk == D_DISKLESS) |
| 1485 | continue; |
| 1486 | if (odev->state.aftr_isp) { |
| 1487 | if (_drbd_may_sync_now(odev)) |
| 1488 | rv |= (__drbd_set_state(_NS(odev, aftr_isp, 0), |
| 1489 | CS_HARD, NULL) |
| 1490 | != SS_NOTHING_TO_DO) ; |
| 1491 | } |
| 1492 | } |
Philipp Reisner | 695d08f | 2011-04-11 22:53:32 -0700 | [diff] [blame] | 1493 | rcu_read_unlock(); |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 1494 | return rv; |
| 1495 | } |
| 1496 | |
| 1497 | void resume_next_sg(struct drbd_conf *mdev) |
| 1498 | { |
| 1499 | write_lock_irq(&global_state_lock); |
| 1500 | _drbd_resume_next(mdev); |
| 1501 | write_unlock_irq(&global_state_lock); |
| 1502 | } |
| 1503 | |
| 1504 | void suspend_other_sg(struct drbd_conf *mdev) |
| 1505 | { |
| 1506 | write_lock_irq(&global_state_lock); |
| 1507 | _drbd_pause_after(mdev); |
| 1508 | write_unlock_irq(&global_state_lock); |
| 1509 | } |
| 1510 | |
Philipp Reisner | dc97b70 | 2011-05-03 14:27:15 +0200 | [diff] [blame] | 1511 | /* caller must hold global_state_lock */ |
Andreas Gruenbacher | 95f8efd | 2011-05-12 11:15:34 +0200 | [diff] [blame] | 1512 | enum drbd_ret_code drbd_resync_after_valid(struct drbd_conf *mdev, int o_minor) |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 1513 | { |
| 1514 | struct drbd_conf *odev; |
Andreas Gruenbacher | 95f8efd | 2011-05-12 11:15:34 +0200 | [diff] [blame] | 1515 | int resync_after; |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 1516 | |
| 1517 | if (o_minor == -1) |
| 1518 | return NO_ERROR; |
| 1519 | if (o_minor < -1 || minor_to_mdev(o_minor) == NULL) |
Andreas Gruenbacher | 95f8efd | 2011-05-12 11:15:34 +0200 | [diff] [blame] | 1520 | return ERR_RESYNC_AFTER; |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 1521 | |
| 1522 | /* check for loops */ |
| 1523 | odev = minor_to_mdev(o_minor); |
| 1524 | while (1) { |
| 1525 | if (odev == mdev) |
Andreas Gruenbacher | 95f8efd | 2011-05-12 11:15:34 +0200 | [diff] [blame] | 1526 | return ERR_RESYNC_AFTER_CYCLE; |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 1527 | |
Philipp Reisner | daeda1c | 2011-05-03 15:00:55 +0200 | [diff] [blame] | 1528 | rcu_read_lock(); |
Andreas Gruenbacher | 95f8efd | 2011-05-12 11:15:34 +0200 | [diff] [blame] | 1529 | resync_after = rcu_dereference(odev->ldev->disk_conf)->resync_after; |
Philipp Reisner | daeda1c | 2011-05-03 15:00:55 +0200 | [diff] [blame] | 1530 | rcu_read_unlock(); |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 1531 | /* dependency chain ends here, no cycles. */ |
Andreas Gruenbacher | 95f8efd | 2011-05-12 11:15:34 +0200 | [diff] [blame] | 1532 | if (resync_after == -1) |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 1533 | return NO_ERROR; |
| 1534 | |
| 1535 | /* follow the dependency chain */ |
Andreas Gruenbacher | 95f8efd | 2011-05-12 11:15:34 +0200 | [diff] [blame] | 1536 | odev = minor_to_mdev(resync_after); |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 1537 | } |
| 1538 | } |
| 1539 | |
Philipp Reisner | dc97b70 | 2011-05-03 14:27:15 +0200 | [diff] [blame] | 1540 | /* caller must hold global_state_lock */ |
Andreas Gruenbacher | 95f8efd | 2011-05-12 11:15:34 +0200 | [diff] [blame] | 1541 | void drbd_resync_after_changed(struct drbd_conf *mdev) |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 1542 | { |
| 1543 | int changes; |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 1544 | |
Philipp Reisner | dc97b70 | 2011-05-03 14:27:15 +0200 | [diff] [blame] | 1545 | do { |
| 1546 | changes = _drbd_pause_after(mdev); |
| 1547 | changes |= _drbd_resume_next(mdev); |
| 1548 | } while (changes); |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 1549 | } |
| 1550 | |
Lars Ellenberg | 9bd28d3 | 2010-11-05 09:55:18 +0100 | [diff] [blame] | 1551 | void drbd_rs_controller_reset(struct drbd_conf *mdev) |
| 1552 | { |
Philipp Reisner | 813472c | 2011-05-03 16:47:02 +0200 | [diff] [blame] | 1553 | struct fifo_buffer *plan; |
| 1554 | |
Lars Ellenberg | 9bd28d3 | 2010-11-05 09:55:18 +0100 | [diff] [blame] | 1555 | atomic_set(&mdev->rs_sect_in, 0); |
| 1556 | atomic_set(&mdev->rs_sect_ev, 0); |
| 1557 | mdev->rs_in_flight = 0; |
Philipp Reisner | 813472c | 2011-05-03 16:47:02 +0200 | [diff] [blame] | 1558 | |
| 1559 | /* Updating the RCU protected object in place is necessary since |
| 1560 | this function gets called from atomic context. |
| 1561 | It is valid since all other updates also lead to an completely |
| 1562 | empty fifo */ |
| 1563 | rcu_read_lock(); |
| 1564 | plan = rcu_dereference(mdev->rs_plan_s); |
| 1565 | plan->total = 0; |
| 1566 | fifo_set(plan, 0); |
| 1567 | rcu_read_unlock(); |
Lars Ellenberg | 9bd28d3 | 2010-11-05 09:55:18 +0100 | [diff] [blame] | 1568 | } |
| 1569 | |
Philipp Reisner | 1f04af3 | 2011-02-07 11:33:59 +0100 | [diff] [blame] | 1570 | void start_resync_timer_fn(unsigned long data) |
| 1571 | { |
| 1572 | struct drbd_conf *mdev = (struct drbd_conf *) data; |
| 1573 | |
Lars Ellenberg | d5b27b0 | 2011-11-14 15:42:37 +0100 | [diff] [blame] | 1574 | drbd_queue_work(&mdev->tconn->sender_work, &mdev->start_resync_work); |
Philipp Reisner | 1f04af3 | 2011-02-07 11:33:59 +0100 | [diff] [blame] | 1575 | } |
| 1576 | |
Andreas Gruenbacher | 99920dc | 2011-03-16 15:31:39 +0100 | [diff] [blame] | 1577 | int w_start_resync(struct drbd_work *w, int cancel) |
Philipp Reisner | 1f04af3 | 2011-02-07 11:33:59 +0100 | [diff] [blame] | 1578 | { |
Philipp Reisner | 00d5694 | 2011-02-09 18:09:48 +0100 | [diff] [blame] | 1579 | struct drbd_conf *mdev = w->mdev; |
| 1580 | |
Philipp Reisner | 1f04af3 | 2011-02-07 11:33:59 +0100 | [diff] [blame] | 1581 | if (atomic_read(&mdev->unacked_cnt) || atomic_read(&mdev->rs_pending_cnt)) { |
| 1582 | dev_warn(DEV, "w_start_resync later...\n"); |
| 1583 | mdev->start_resync_timer.expires = jiffies + HZ/10; |
| 1584 | add_timer(&mdev->start_resync_timer); |
Andreas Gruenbacher | 99920dc | 2011-03-16 15:31:39 +0100 | [diff] [blame] | 1585 | return 0; |
Philipp Reisner | 1f04af3 | 2011-02-07 11:33:59 +0100 | [diff] [blame] | 1586 | } |
| 1587 | |
| 1588 | drbd_start_resync(mdev, C_SYNC_SOURCE); |
Philipp Reisner | 36baf61 | 2011-11-10 14:27:34 +0100 | [diff] [blame] | 1589 | clear_bit(AHEAD_TO_SYNC_SOURCE, &mdev->flags); |
Andreas Gruenbacher | 99920dc | 2011-03-16 15:31:39 +0100 | [diff] [blame] | 1590 | return 0; |
Philipp Reisner | 1f04af3 | 2011-02-07 11:33:59 +0100 | [diff] [blame] | 1591 | } |
| 1592 | |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 1593 | /** |
| 1594 | * drbd_start_resync() - Start the resync process |
| 1595 | * @mdev: DRBD device. |
| 1596 | * @side: Either C_SYNC_SOURCE or C_SYNC_TARGET |
| 1597 | * |
| 1598 | * This function might bring you directly into one of the |
| 1599 | * C_PAUSED_SYNC_* states. |
| 1600 | */ |
| 1601 | void drbd_start_resync(struct drbd_conf *mdev, enum drbd_conns side) |
| 1602 | { |
| 1603 | union drbd_state ns; |
| 1604 | int r; |
| 1605 | |
Philipp Reisner | c4752ef | 2010-10-27 17:32:36 +0200 | [diff] [blame] | 1606 | if (mdev->state.conn >= C_SYNC_SOURCE && mdev->state.conn < C_AHEAD) { |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 1607 | dev_err(DEV, "Resync already running!\n"); |
| 1608 | return; |
| 1609 | } |
| 1610 | |
Philipp Reisner | e64a329 | 2011-02-05 17:34:11 +0100 | [diff] [blame] | 1611 | if (!test_bit(B_RS_H_DONE, &mdev->flags)) { |
| 1612 | if (side == C_SYNC_TARGET) { |
| 1613 | /* Since application IO was locked out during C_WF_BITMAP_T and |
| 1614 | C_WF_SYNC_UUID we are still unmodified. Before going to C_SYNC_TARGET |
| 1615 | we check that we might make the data inconsistent. */ |
| 1616 | r = drbd_khelper(mdev, "before-resync-target"); |
| 1617 | r = (r >> 8) & 0xff; |
| 1618 | if (r > 0) { |
| 1619 | dev_info(DEV, "before-resync-target handler returned %d, " |
Philipp Reisner | 09b9e79 | 2010-12-03 16:04:24 +0100 | [diff] [blame] | 1620 | "dropping connection.\n", r); |
Philipp Reisner | 38fa998 | 2011-03-15 18:24:49 +0100 | [diff] [blame] | 1621 | conn_request_state(mdev->tconn, NS(conn, C_DISCONNECTING), CS_HARD); |
Philipp Reisner | 09b9e79 | 2010-12-03 16:04:24 +0100 | [diff] [blame] | 1622 | return; |
| 1623 | } |
Philipp Reisner | e64a329 | 2011-02-05 17:34:11 +0100 | [diff] [blame] | 1624 | } else /* C_SYNC_SOURCE */ { |
| 1625 | r = drbd_khelper(mdev, "before-resync-source"); |
| 1626 | r = (r >> 8) & 0xff; |
| 1627 | if (r > 0) { |
| 1628 | if (r == 3) { |
| 1629 | dev_info(DEV, "before-resync-source handler returned %d, " |
| 1630 | "ignoring. Old userland tools?", r); |
| 1631 | } else { |
| 1632 | dev_info(DEV, "before-resync-source handler returned %d, " |
| 1633 | "dropping connection.\n", r); |
Philipp Reisner | 38fa998 | 2011-03-15 18:24:49 +0100 | [diff] [blame] | 1634 | conn_request_state(mdev->tconn, NS(conn, C_DISCONNECTING), CS_HARD); |
Philipp Reisner | e64a329 | 2011-02-05 17:34:11 +0100 | [diff] [blame] | 1635 | return; |
| 1636 | } |
| 1637 | } |
Philipp Reisner | 09b9e79 | 2010-12-03 16:04:24 +0100 | [diff] [blame] | 1638 | } |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 1639 | } |
| 1640 | |
Philipp Reisner | e64a329 | 2011-02-05 17:34:11 +0100 | [diff] [blame] | 1641 | if (current == mdev->tconn->worker.task) { |
Philipp Reisner | dad2055 | 2011-02-11 19:43:55 +0100 | [diff] [blame] | 1642 | /* The worker should not sleep waiting for state_mutex, |
Philipp Reisner | e64a329 | 2011-02-05 17:34:11 +0100 | [diff] [blame] | 1643 | that can take long */ |
Philipp Reisner | 8410da8 | 2011-02-11 20:11:10 +0100 | [diff] [blame] | 1644 | if (!mutex_trylock(mdev->state_mutex)) { |
Philipp Reisner | e64a329 | 2011-02-05 17:34:11 +0100 | [diff] [blame] | 1645 | set_bit(B_RS_H_DONE, &mdev->flags); |
| 1646 | mdev->start_resync_timer.expires = jiffies + HZ/5; |
| 1647 | add_timer(&mdev->start_resync_timer); |
| 1648 | return; |
| 1649 | } |
| 1650 | } else { |
Philipp Reisner | 8410da8 | 2011-02-11 20:11:10 +0100 | [diff] [blame] | 1651 | mutex_lock(mdev->state_mutex); |
Philipp Reisner | e64a329 | 2011-02-05 17:34:11 +0100 | [diff] [blame] | 1652 | } |
| 1653 | clear_bit(B_RS_H_DONE, &mdev->flags); |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 1654 | |
Philipp Reisner | 0cfac5d | 2011-11-10 12:12:52 +0100 | [diff] [blame] | 1655 | write_lock_irq(&global_state_lock); |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 1656 | if (!get_ldev_if_state(mdev, D_NEGOTIATING)) { |
Philipp Reisner | 0cfac5d | 2011-11-10 12:12:52 +0100 | [diff] [blame] | 1657 | write_unlock_irq(&global_state_lock); |
Philipp Reisner | 8410da8 | 2011-02-11 20:11:10 +0100 | [diff] [blame] | 1658 | mutex_unlock(mdev->state_mutex); |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 1659 | return; |
| 1660 | } |
| 1661 | |
Philipp Reisner | 78bae59 | 2011-03-28 15:40:12 +0200 | [diff] [blame] | 1662 | ns = drbd_read_state(mdev); |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 1663 | |
| 1664 | ns.aftr_isp = !_drbd_may_sync_now(mdev); |
| 1665 | |
| 1666 | ns.conn = side; |
| 1667 | |
| 1668 | if (side == C_SYNC_TARGET) |
| 1669 | ns.disk = D_INCONSISTENT; |
| 1670 | else /* side == C_SYNC_SOURCE */ |
| 1671 | ns.pdsk = D_INCONSISTENT; |
| 1672 | |
| 1673 | r = __drbd_set_state(mdev, ns, CS_VERBOSE, NULL); |
Philipp Reisner | 78bae59 | 2011-03-28 15:40:12 +0200 | [diff] [blame] | 1674 | ns = drbd_read_state(mdev); |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 1675 | |
| 1676 | if (ns.conn < C_CONNECTED) |
| 1677 | r = SS_UNKNOWN_ERROR; |
| 1678 | |
| 1679 | if (r == SS_SUCCESS) { |
Lars Ellenberg | 1d7734a | 2010-08-11 21:21:50 +0200 | [diff] [blame] | 1680 | unsigned long tw = drbd_bm_total_weight(mdev); |
| 1681 | unsigned long now = jiffies; |
| 1682 | int i; |
| 1683 | |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 1684 | mdev->rs_failed = 0; |
| 1685 | mdev->rs_paused = 0; |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 1686 | mdev->rs_same_csum = 0; |
Lars Ellenberg | 0f0601f | 2010-08-11 23:40:24 +0200 | [diff] [blame] | 1687 | mdev->rs_last_events = 0; |
| 1688 | mdev->rs_last_sect_ev = 0; |
Lars Ellenberg | 1d7734a | 2010-08-11 21:21:50 +0200 | [diff] [blame] | 1689 | mdev->rs_total = tw; |
| 1690 | mdev->rs_start = now; |
| 1691 | for (i = 0; i < DRBD_SYNC_MARKS; i++) { |
| 1692 | mdev->rs_mark_left[i] = tw; |
| 1693 | mdev->rs_mark_time[i] = now; |
| 1694 | } |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 1695 | _drbd_pause_after(mdev); |
| 1696 | } |
| 1697 | write_unlock_irq(&global_state_lock); |
Lars Ellenberg | 5a22db8 | 2010-12-17 21:14:23 +0100 | [diff] [blame] | 1698 | |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 1699 | if (r == SS_SUCCESS) { |
| 1700 | dev_info(DEV, "Began resync as %s (will sync %lu KB [%lu bits set]).\n", |
| 1701 | drbd_conn_str(ns.conn), |
| 1702 | (unsigned long) mdev->rs_total << (BM_BLOCK_SHIFT-10), |
| 1703 | (unsigned long) mdev->rs_total); |
Lars Ellenberg | 6c922ed | 2011-01-12 11:51:13 +0100 | [diff] [blame] | 1704 | if (side == C_SYNC_TARGET) |
| 1705 | mdev->bm_resync_fo = 0; |
| 1706 | |
| 1707 | /* Since protocol 96, we must serialize drbd_gen_and_send_sync_uuid |
| 1708 | * with w_send_oos, or the sync target will get confused as to |
| 1709 | * how much bits to resync. We cannot do that always, because for an |
| 1710 | * empty resync and protocol < 95, we need to do it here, as we call |
| 1711 | * drbd_resync_finished from here in that case. |
| 1712 | * We drbd_gen_and_send_sync_uuid here for protocol < 96, |
| 1713 | * and from after_state_ch otherwise. */ |
Philipp Reisner | 31890f4 | 2011-01-19 14:12:51 +0100 | [diff] [blame] | 1714 | if (side == C_SYNC_SOURCE && mdev->tconn->agreed_pro_version < 96) |
Lars Ellenberg | 6c922ed | 2011-01-12 11:51:13 +0100 | [diff] [blame] | 1715 | drbd_gen_and_send_sync_uuid(mdev); |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 1716 | |
Philipp Reisner | 31890f4 | 2011-01-19 14:12:51 +0100 | [diff] [blame] | 1717 | if (mdev->tconn->agreed_pro_version < 95 && mdev->rs_total == 0) { |
Lars Ellenberg | af85e8e | 2010-10-07 16:07:55 +0200 | [diff] [blame] | 1718 | /* This still has a race (about when exactly the peers |
| 1719 | * detect connection loss) that can lead to a full sync |
| 1720 | * on next handshake. In 8.3.9 we fixed this with explicit |
| 1721 | * resync-finished notifications, but the fix |
| 1722 | * introduces a protocol change. Sleeping for some |
| 1723 | * time longer than the ping interval + timeout on the |
| 1724 | * SyncSource, to give the SyncTarget the chance to |
| 1725 | * detect connection loss, then waiting for a ping |
| 1726 | * response (implicit in drbd_resync_finished) reduces |
| 1727 | * the race considerably, but does not solve it. */ |
Philipp Reisner | 44ed167 | 2011-04-19 17:10:19 +0200 | [diff] [blame] | 1728 | if (side == C_SYNC_SOURCE) { |
| 1729 | struct net_conf *nc; |
| 1730 | int timeo; |
| 1731 | |
| 1732 | rcu_read_lock(); |
| 1733 | nc = rcu_dereference(mdev->tconn->net_conf); |
| 1734 | timeo = nc->ping_int * HZ + nc->ping_timeo * HZ / 9; |
| 1735 | rcu_read_unlock(); |
| 1736 | schedule_timeout_interruptible(timeo); |
| 1737 | } |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 1738 | drbd_resync_finished(mdev); |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 1739 | } |
| 1740 | |
Lars Ellenberg | 9bd28d3 | 2010-11-05 09:55:18 +0100 | [diff] [blame] | 1741 | drbd_rs_controller_reset(mdev); |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 1742 | /* ns.conn may already be != mdev->state.conn, |
| 1743 | * we may have been paused in between, or become paused until |
| 1744 | * the timer triggers. |
| 1745 | * No matter, that is handled in resync_timer_fn() */ |
| 1746 | if (ns.conn == C_SYNC_TARGET) |
| 1747 | mod_timer(&mdev->resync_timer, jiffies); |
| 1748 | |
| 1749 | drbd_md_sync(mdev); |
| 1750 | } |
Lars Ellenberg | 5a22db8 | 2010-12-17 21:14:23 +0100 | [diff] [blame] | 1751 | put_ldev(mdev); |
Philipp Reisner | 8410da8 | 2011-02-11 20:11:10 +0100 | [diff] [blame] | 1752 | mutex_unlock(mdev->state_mutex); |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 1753 | } |
| 1754 | |
Lars Ellenberg | b6dd1a8 | 2011-11-28 15:04:49 +0100 | [diff] [blame] | 1755 | /* If the resource already closed the current epoch, but we did not |
| 1756 | * (because we have not yet seen new requests), we should send the |
| 1757 | * corresponding barrier now. Must be checked within the same spinlock |
| 1758 | * that is used to check for new requests. */ |
| 1759 | bool need_to_send_barrier(struct drbd_tconn *connection) |
| 1760 | { |
| 1761 | if (!connection->send.seen_any_write_yet) |
| 1762 | return false; |
| 1763 | |
| 1764 | /* Skip barriers that do not contain any writes. |
| 1765 | * This may happen during AHEAD mode. */ |
| 1766 | if (!connection->send.current_epoch_writes) |
| 1767 | return false; |
| 1768 | |
| 1769 | /* ->req_lock is held when requests are queued on |
| 1770 | * connection->sender_work, and put into ->transfer_log. |
| 1771 | * It is also held when ->current_tle_nr is increased. |
| 1772 | * So either there are already new requests queued, |
| 1773 | * and corresponding barriers will be send there. |
| 1774 | * Or nothing new is queued yet, so the difference will be 1. |
| 1775 | */ |
| 1776 | if (atomic_read(&connection->current_tle_nr) != |
| 1777 | connection->send.current_epoch_nr + 1) |
| 1778 | return false; |
| 1779 | |
| 1780 | return true; |
| 1781 | } |
| 1782 | |
Lars Ellenberg | 8c0785a | 2011-10-19 11:50:57 +0200 | [diff] [blame] | 1783 | bool dequeue_work_batch(struct drbd_work_queue *queue, struct list_head *work_list) |
| 1784 | { |
| 1785 | spin_lock_irq(&queue->q_lock); |
| 1786 | list_splice_init(&queue->q, work_list); |
| 1787 | spin_unlock_irq(&queue->q_lock); |
| 1788 | return !list_empty(work_list); |
| 1789 | } |
| 1790 | |
| 1791 | bool dequeue_work_item(struct drbd_work_queue *queue, struct list_head *work_list) |
| 1792 | { |
| 1793 | spin_lock_irq(&queue->q_lock); |
| 1794 | if (!list_empty(&queue->q)) |
| 1795 | list_move(queue->q.next, work_list); |
| 1796 | spin_unlock_irq(&queue->q_lock); |
| 1797 | return !list_empty(work_list); |
| 1798 | } |
| 1799 | |
Lars Ellenberg | b6dd1a8 | 2011-11-28 15:04:49 +0100 | [diff] [blame] | 1800 | void wait_for_work(struct drbd_tconn *connection, struct list_head *work_list) |
| 1801 | { |
| 1802 | DEFINE_WAIT(wait); |
| 1803 | struct net_conf *nc; |
| 1804 | int uncork, cork; |
| 1805 | |
| 1806 | dequeue_work_item(&connection->sender_work, work_list); |
| 1807 | if (!list_empty(work_list)) |
| 1808 | return; |
| 1809 | |
| 1810 | /* Still nothing to do? |
| 1811 | * Maybe we still need to close the current epoch, |
| 1812 | * even if no new requests are queued yet. |
| 1813 | * |
| 1814 | * Also, poke TCP, just in case. |
| 1815 | * Then wait for new work (or signal). */ |
| 1816 | rcu_read_lock(); |
| 1817 | nc = rcu_dereference(connection->net_conf); |
| 1818 | uncork = nc ? nc->tcp_cork : 0; |
| 1819 | rcu_read_unlock(); |
| 1820 | if (uncork) { |
| 1821 | mutex_lock(&connection->data.mutex); |
| 1822 | if (connection->data.socket) |
| 1823 | drbd_tcp_uncork(connection->data.socket); |
| 1824 | mutex_unlock(&connection->data.mutex); |
| 1825 | } |
| 1826 | |
| 1827 | for (;;) { |
| 1828 | int send_barrier; |
| 1829 | prepare_to_wait(&connection->sender_work.q_wait, &wait, TASK_INTERRUPTIBLE); |
| 1830 | spin_lock_irq(&connection->req_lock); |
| 1831 | spin_lock(&connection->sender_work.q_lock); /* FIXME get rid of this one? */ |
Lars Ellenberg | bc317a9 | 2012-08-22 11:47:14 +0200 | [diff] [blame] | 1832 | /* dequeue single item only, |
| 1833 | * we still use drbd_queue_work_front() in some places */ |
| 1834 | if (!list_empty(&connection->sender_work.q)) |
| 1835 | list_move(connection->sender_work.q.next, work_list); |
Lars Ellenberg | b6dd1a8 | 2011-11-28 15:04:49 +0100 | [diff] [blame] | 1836 | spin_unlock(&connection->sender_work.q_lock); /* FIXME get rid of this one? */ |
| 1837 | if (!list_empty(work_list) || signal_pending(current)) { |
| 1838 | spin_unlock_irq(&connection->req_lock); |
| 1839 | break; |
| 1840 | } |
| 1841 | send_barrier = need_to_send_barrier(connection); |
| 1842 | spin_unlock_irq(&connection->req_lock); |
| 1843 | if (send_barrier) { |
| 1844 | drbd_send_barrier(connection); |
| 1845 | connection->send.current_epoch_nr++; |
| 1846 | } |
| 1847 | schedule(); |
| 1848 | /* may be woken up for other things but new work, too, |
| 1849 | * e.g. if the current epoch got closed. |
| 1850 | * In which case we send the barrier above. */ |
| 1851 | } |
| 1852 | finish_wait(&connection->sender_work.q_wait, &wait); |
| 1853 | |
| 1854 | /* someone may have changed the config while we have been waiting above. */ |
| 1855 | rcu_read_lock(); |
| 1856 | nc = rcu_dereference(connection->net_conf); |
| 1857 | cork = nc ? nc->tcp_cork : 0; |
| 1858 | rcu_read_unlock(); |
| 1859 | mutex_lock(&connection->data.mutex); |
| 1860 | if (connection->data.socket) { |
| 1861 | if (cork) |
| 1862 | drbd_tcp_cork(connection->data.socket); |
| 1863 | else if (!uncork) |
| 1864 | drbd_tcp_uncork(connection->data.socket); |
| 1865 | } |
| 1866 | mutex_unlock(&connection->data.mutex); |
| 1867 | } |
| 1868 | |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 1869 | int drbd_worker(struct drbd_thread *thi) |
| 1870 | { |
Philipp Reisner | 392c880 | 2011-02-09 10:33:31 +0100 | [diff] [blame] | 1871 | struct drbd_tconn *tconn = thi->tconn; |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 1872 | struct drbd_work *w = NULL; |
Philipp Reisner | 0e29d16 | 2011-02-18 14:23:11 +0100 | [diff] [blame] | 1873 | struct drbd_conf *mdev; |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 1874 | LIST_HEAD(work_list); |
Lars Ellenberg | 8c0785a | 2011-10-19 11:50:57 +0200 | [diff] [blame] | 1875 | int vnr; |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 1876 | |
Andreas Gruenbacher | e77a0a5 | 2011-01-25 15:43:39 +0100 | [diff] [blame] | 1877 | while (get_t_state(thi) == RUNNING) { |
Philipp Reisner | 8082228 | 2011-02-08 12:46:30 +0100 | [diff] [blame] | 1878 | drbd_thread_current_set_cpu(thi); |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 1879 | |
Lars Ellenberg | 8c0785a | 2011-10-19 11:50:57 +0200 | [diff] [blame] | 1880 | /* as long as we use drbd_queue_work_front(), |
| 1881 | * we may only dequeue single work items here, not batches. */ |
| 1882 | if (list_empty(&work_list)) |
Lars Ellenberg | b6dd1a8 | 2011-11-28 15:04:49 +0100 | [diff] [blame] | 1883 | wait_for_work(tconn, &work_list); |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 1884 | |
Lars Ellenberg | 8c0785a | 2011-10-19 11:50:57 +0200 | [diff] [blame] | 1885 | if (signal_pending(current)) { |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 1886 | flush_signals(current); |
Philipp Reisner | 19393e1 | 2011-02-09 10:09:07 +0100 | [diff] [blame] | 1887 | if (get_t_state(thi) == RUNNING) { |
| 1888 | conn_warn(tconn, "Worker got an unexpected signal\n"); |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 1889 | continue; |
Philipp Reisner | 19393e1 | 2011-02-09 10:09:07 +0100 | [diff] [blame] | 1890 | } |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 1891 | break; |
| 1892 | } |
| 1893 | |
Andreas Gruenbacher | e77a0a5 | 2011-01-25 15:43:39 +0100 | [diff] [blame] | 1894 | if (get_t_state(thi) != RUNNING) |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 1895 | break; |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 1896 | |
Lars Ellenberg | 8c0785a | 2011-10-19 11:50:57 +0200 | [diff] [blame] | 1897 | while (!list_empty(&work_list)) { |
| 1898 | w = list_first_entry(&work_list, struct drbd_work, list); |
| 1899 | list_del_init(&w->list); |
| 1900 | if (w->cb(w, tconn->cstate < C_WF_REPORT_PARAMS) == 0) |
| 1901 | continue; |
Philipp Reisner | bbeb641 | 2011-02-10 13:45:46 +0100 | [diff] [blame] | 1902 | if (tconn->cstate >= C_WF_REPORT_PARAMS) |
| 1903 | conn_request_state(tconn, NS(conn, C_NETWORK_FAILURE), CS_HARD); |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 1904 | } |
| 1905 | } |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 1906 | |
Lars Ellenberg | 8c0785a | 2011-10-19 11:50:57 +0200 | [diff] [blame] | 1907 | do { |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 1908 | while (!list_empty(&work_list)) { |
Lars Ellenberg | 8c0785a | 2011-10-19 11:50:57 +0200 | [diff] [blame] | 1909 | w = list_first_entry(&work_list, struct drbd_work, list); |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 1910 | list_del_init(&w->list); |
Philipp Reisner | 00d5694 | 2011-02-09 18:09:48 +0100 | [diff] [blame] | 1911 | w->cb(w, 1); |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 1912 | } |
Lars Ellenberg | d5b27b0 | 2011-11-14 15:42:37 +0100 | [diff] [blame] | 1913 | dequeue_work_batch(&tconn->sender_work, &work_list); |
Lars Ellenberg | 8c0785a | 2011-10-19 11:50:57 +0200 | [diff] [blame] | 1914 | } while (!list_empty(&work_list)); |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 1915 | |
Philipp Reisner | c141ebd | 2011-05-05 16:13:10 +0200 | [diff] [blame] | 1916 | rcu_read_lock(); |
Lars Ellenberg | f399002 | 2011-03-23 14:31:09 +0100 | [diff] [blame] | 1917 | idr_for_each_entry(&tconn->volumes, mdev, vnr) { |
Philipp Reisner | 0e29d16 | 2011-02-18 14:23:11 +0100 | [diff] [blame] | 1918 | D_ASSERT(mdev->state.disk == D_DISKLESS && mdev->state.conn == C_STANDALONE); |
Philipp Reisner | c141ebd | 2011-05-05 16:13:10 +0200 | [diff] [blame] | 1919 | kref_get(&mdev->kref); |
| 1920 | rcu_read_unlock(); |
Philipp Reisner | 0e29d16 | 2011-02-18 14:23:11 +0100 | [diff] [blame] | 1921 | drbd_mdev_cleanup(mdev); |
Philipp Reisner | c141ebd | 2011-05-05 16:13:10 +0200 | [diff] [blame] | 1922 | kref_put(&mdev->kref, &drbd_minor_destroy); |
| 1923 | rcu_read_lock(); |
Philipp Reisner | 0e29d16 | 2011-02-18 14:23:11 +0100 | [diff] [blame] | 1924 | } |
Philipp Reisner | c141ebd | 2011-05-05 16:13:10 +0200 | [diff] [blame] | 1925 | rcu_read_unlock(); |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 1926 | |
| 1927 | return 0; |
| 1928 | } |