Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 1 | /* |
| 2 | drbd_req.c |
| 3 | |
| 4 | This file is part of DRBD by Philipp Reisner and Lars Ellenberg. |
| 5 | |
| 6 | Copyright (C) 2001-2008, LINBIT Information Technologies GmbH. |
| 7 | Copyright (C) 1999-2008, Philipp Reisner <philipp.reisner@linbit.com>. |
| 8 | Copyright (C) 2002-2008, Lars Ellenberg <lars.ellenberg@linbit.com>. |
| 9 | |
| 10 | drbd is free software; you can redistribute it and/or modify |
| 11 | it under the terms of the GNU General Public License as published by |
| 12 | the Free Software Foundation; either version 2, or (at your option) |
| 13 | any later version. |
| 14 | |
| 15 | drbd is distributed in the hope that it will be useful, |
| 16 | but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 17 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| 18 | GNU General Public License for more details. |
| 19 | |
| 20 | You should have received a copy of the GNU General Public License |
| 21 | along with drbd; see the file COPYING. If not, write to |
| 22 | the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. |
| 23 | |
| 24 | */ |
| 25 | |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 26 | #include <linux/module.h> |
| 27 | |
| 28 | #include <linux/slab.h> |
| 29 | #include <linux/drbd.h> |
| 30 | #include "drbd_int.h" |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 31 | #include "drbd_req.h" |
| 32 | |
| 33 | |
| 34 | /* Update disk stats at start of I/O request */ |
| 35 | static void _drbd_start_io_acct(struct drbd_conf *mdev, struct drbd_request *req, struct bio *bio) |
| 36 | { |
| 37 | const int rw = bio_data_dir(bio); |
| 38 | int cpu; |
| 39 | cpu = part_stat_lock(); |
| 40 | part_stat_inc(cpu, &mdev->vdisk->part0, ios[rw]); |
| 41 | part_stat_add(cpu, &mdev->vdisk->part0, sectors[rw], bio_sectors(bio)); |
Philipp Reisner | 753c891 | 2009-11-18 15:52:51 +0100 | [diff] [blame] | 42 | part_inc_in_flight(&mdev->vdisk->part0, rw); |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 43 | part_stat_unlock(); |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 44 | } |
| 45 | |
| 46 | /* Update disk stats when completing request upwards */ |
| 47 | static void _drbd_end_io_acct(struct drbd_conf *mdev, struct drbd_request *req) |
| 48 | { |
| 49 | int rw = bio_data_dir(req->master_bio); |
| 50 | unsigned long duration = jiffies - req->start_time; |
| 51 | int cpu; |
| 52 | cpu = part_stat_lock(); |
| 53 | part_stat_add(cpu, &mdev->vdisk->part0, ticks[rw], duration); |
| 54 | part_round_stats(cpu, &mdev->vdisk->part0); |
Philipp Reisner | 753c891 | 2009-11-18 15:52:51 +0100 | [diff] [blame] | 55 | part_dec_in_flight(&mdev->vdisk->part0, rw); |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 56 | part_stat_unlock(); |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 57 | } |
| 58 | |
Andreas Gruenbacher | 9e204cd | 2011-01-26 18:45:11 +0100 | [diff] [blame] | 59 | static struct drbd_request *drbd_req_new(struct drbd_conf *mdev, |
| 60 | struct bio *bio_src) |
| 61 | { |
| 62 | struct drbd_request *req; |
| 63 | |
| 64 | req = mempool_alloc(drbd_request_mempool, GFP_NOIO); |
| 65 | if (!req) |
| 66 | return NULL; |
| 67 | |
| 68 | drbd_req_make_private_bio(req, bio_src); |
| 69 | req->rq_state = bio_data_dir(bio_src) == WRITE ? RQ_WRITE : 0; |
Philipp Reisner | a21e929 | 2011-02-08 15:08:49 +0100 | [diff] [blame] | 70 | req->w.mdev = mdev; |
Andreas Gruenbacher | 9e204cd | 2011-01-26 18:45:11 +0100 | [diff] [blame] | 71 | req->master_bio = bio_src; |
| 72 | req->epoch = 0; |
Andreas Gruenbacher | 5384064 | 2011-01-28 10:31:04 +0100 | [diff] [blame] | 73 | |
Andreas Gruenbacher | 9e204cd | 2011-01-26 18:45:11 +0100 | [diff] [blame] | 74 | drbd_clear_interval(&req->i); |
| 75 | req->i.sector = bio_src->bi_sector; |
| 76 | req->i.size = bio_src->bi_size; |
Andreas Gruenbacher | 5e47226 | 2011-01-27 14:42:51 +0100 | [diff] [blame] | 77 | req->i.local = true; |
Andreas Gruenbacher | 5384064 | 2011-01-28 10:31:04 +0100 | [diff] [blame] | 78 | req->i.waiting = false; |
| 79 | |
Andreas Gruenbacher | 9e204cd | 2011-01-26 18:45:11 +0100 | [diff] [blame] | 80 | INIT_LIST_HEAD(&req->tl_requests); |
| 81 | INIT_LIST_HEAD(&req->w.list); |
| 82 | |
| 83 | return req; |
| 84 | } |
| 85 | |
| 86 | static void drbd_req_free(struct drbd_request *req) |
| 87 | { |
| 88 | mempool_free(req, drbd_request_mempool); |
| 89 | } |
| 90 | |
| 91 | /* rw is bio_data_dir(), only READ or WRITE */ |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 92 | static void _req_is_done(struct drbd_conf *mdev, struct drbd_request *req, const int rw) |
| 93 | { |
| 94 | const unsigned long s = req->rq_state; |
Philipp Reisner | 288f422 | 2010-05-27 15:07:43 +0200 | [diff] [blame] | 95 | |
| 96 | /* remove it from the transfer log. |
| 97 | * well, only if it had been there in the first |
| 98 | * place... if it had not (local only or conflicting |
| 99 | * and never sent), it should still be "empty" as |
| 100 | * initialized in drbd_req_new(), so we can list_del() it |
| 101 | * here unconditionally */ |
| 102 | list_del(&req->tl_requests); |
| 103 | |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 104 | /* if it was a write, we may have to set the corresponding |
| 105 | * bit(s) out-of-sync first. If it had a local part, we need to |
| 106 | * release the reference to the activity log. */ |
| 107 | if (rw == WRITE) { |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 108 | /* Set out-of-sync unless both OK flags are set |
| 109 | * (local only or remote failed). |
| 110 | * Other places where we set out-of-sync: |
| 111 | * READ with local io-error */ |
| 112 | if (!(s & RQ_NET_OK) || !(s & RQ_LOCAL_OK)) |
Andreas Gruenbacher | ace652a | 2011-01-03 17:09:58 +0100 | [diff] [blame] | 113 | drbd_set_out_of_sync(mdev, req->i.sector, req->i.size); |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 114 | |
| 115 | if ((s & RQ_NET_OK) && (s & RQ_LOCAL_OK) && (s & RQ_NET_SIS)) |
Andreas Gruenbacher | ace652a | 2011-01-03 17:09:58 +0100 | [diff] [blame] | 116 | drbd_set_in_sync(mdev, req->i.sector, req->i.size); |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 117 | |
| 118 | /* one might be tempted to move the drbd_al_complete_io |
Andreas Gruenbacher | fcefa62 | 2011-02-17 16:46:59 +0100 | [diff] [blame] | 119 | * to the local io completion callback drbd_request_endio. |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 120 | * but, if this was a mirror write, we may only |
| 121 | * drbd_al_complete_io after this is RQ_NET_DONE, |
| 122 | * otherwise the extent could be dropped from the al |
| 123 | * before it has actually been written on the peer. |
| 124 | * if we crash before our peer knows about the request, |
| 125 | * but after the extent has been dropped from the al, |
| 126 | * we would forget to resync the corresponding extent. |
| 127 | */ |
| 128 | if (s & RQ_LOCAL_MASK) { |
| 129 | if (get_ldev_if_state(mdev, D_FAILED)) { |
Philipp Reisner | 0778286 | 2010-08-31 12:00:50 +0200 | [diff] [blame] | 130 | if (s & RQ_IN_ACT_LOG) |
Lars Ellenberg | 181286a | 2011-03-31 15:18:56 +0200 | [diff] [blame] | 131 | drbd_al_complete_io(mdev, &req->i); |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 132 | put_ldev(mdev); |
| 133 | } else if (__ratelimit(&drbd_ratelimit_state)) { |
Lars Ellenberg | 181286a | 2011-03-31 15:18:56 +0200 | [diff] [blame] | 134 | dev_warn(DEV, "Should have called drbd_al_complete_io(, %llu, %u), " |
| 135 | "but my Disk seems to have failed :(\n", |
| 136 | (unsigned long long) req->i.sector, req->i.size); |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 137 | } |
| 138 | } |
| 139 | } |
| 140 | |
Philipp Reisner | 32fa7e9 | 2010-05-26 17:13:18 +0200 | [diff] [blame] | 141 | drbd_req_free(req); |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 142 | } |
| 143 | |
| 144 | static void queue_barrier(struct drbd_conf *mdev) |
| 145 | { |
| 146 | struct drbd_tl_epoch *b; |
| 147 | |
| 148 | /* We are within the req_lock. Once we queued the barrier for sending, |
| 149 | * we set the CREATE_BARRIER bit. It is cleared as soon as a new |
| 150 | * barrier/epoch object is added. This is the only place this bit is |
| 151 | * set. It indicates that the barrier for this epoch is already queued, |
| 152 | * and no new epoch has been created yet. */ |
| 153 | if (test_bit(CREATE_BARRIER, &mdev->flags)) |
| 154 | return; |
| 155 | |
Philipp Reisner | 87eeee4 | 2011-01-19 14:16:30 +0100 | [diff] [blame] | 156 | b = mdev->tconn->newest_tle; |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 157 | b->w.cb = w_send_barrier; |
Philipp Reisner | a21e929 | 2011-02-08 15:08:49 +0100 | [diff] [blame] | 158 | b->w.mdev = mdev; |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 159 | /* inc_ap_pending done here, so we won't |
| 160 | * get imbalanced on connection loss. |
| 161 | * dec_ap_pending will be done in got_BarrierAck |
| 162 | * or (on connection loss) in tl_clear. */ |
| 163 | inc_ap_pending(mdev); |
Philipp Reisner | e42325a | 2011-01-19 13:55:45 +0100 | [diff] [blame] | 164 | drbd_queue_work(&mdev->tconn->data.work, &b->w); |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 165 | set_bit(CREATE_BARRIER, &mdev->flags); |
| 166 | } |
| 167 | |
| 168 | static void _about_to_complete_local_write(struct drbd_conf *mdev, |
| 169 | struct drbd_request *req) |
| 170 | { |
| 171 | const unsigned long s = req->rq_state; |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 172 | |
Lars Ellenberg | 8a3c104 | 2010-12-05 14:11:14 +0100 | [diff] [blame] | 173 | /* Before we can signal completion to the upper layers, |
| 174 | * we may need to close the current epoch. |
| 175 | * We can skip this, if this request has not even been sent, because we |
| 176 | * did not have a fully established connection yet/anymore, during |
| 177 | * bitmap exchange, or while we are C_AHEAD due to congestion policy. |
| 178 | */ |
| 179 | if (mdev->state.conn >= C_CONNECTED && |
| 180 | (s & RQ_NET_SENT) != 0 && |
Philipp Reisner | 87eeee4 | 2011-01-19 14:16:30 +0100 | [diff] [blame] | 181 | req->epoch == mdev->tconn->newest_tle->br_number) |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 182 | queue_barrier(mdev); |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 183 | } |
| 184 | |
| 185 | void complete_master_bio(struct drbd_conf *mdev, |
| 186 | struct bio_and_error *m) |
| 187 | { |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 188 | bio_endio(m->bio, m->error); |
| 189 | dec_ap_bio(mdev); |
| 190 | } |
| 191 | |
Andreas Gruenbacher | 5384064 | 2011-01-28 10:31:04 +0100 | [diff] [blame] | 192 | |
| 193 | static void drbd_remove_request_interval(struct rb_root *root, |
| 194 | struct drbd_request *req) |
| 195 | { |
Philipp Reisner | a21e929 | 2011-02-08 15:08:49 +0100 | [diff] [blame] | 196 | struct drbd_conf *mdev = req->w.mdev; |
Andreas Gruenbacher | 5384064 | 2011-01-28 10:31:04 +0100 | [diff] [blame] | 197 | struct drbd_interval *i = &req->i; |
| 198 | |
| 199 | drbd_remove_interval(root, i); |
| 200 | |
| 201 | /* Wake up any processes waiting for this request to complete. */ |
| 202 | if (i->waiting) |
| 203 | wake_up(&mdev->misc_wait); |
| 204 | } |
| 205 | |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 206 | /* Helper for __req_mod(). |
| 207 | * Set m->bio to the master bio, if it is fit to be completed, |
| 208 | * or leave it alone (it is initialized to NULL in __req_mod), |
| 209 | * if it has already been completed, or cannot be completed yet. |
| 210 | * If m->bio is set, the error status to be returned is placed in m->error. |
| 211 | */ |
| 212 | void _req_may_be_done(struct drbd_request *req, struct bio_and_error *m) |
| 213 | { |
| 214 | const unsigned long s = req->rq_state; |
Philipp Reisner | a21e929 | 2011-02-08 15:08:49 +0100 | [diff] [blame] | 215 | struct drbd_conf *mdev = req->w.mdev; |
Philipp Reisner | cdfda63 | 2011-07-05 15:38:59 +0200 | [diff] [blame] | 216 | int rw = req->rq_state & RQ_WRITE ? WRITE : READ; |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 217 | |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 218 | /* we must not complete the master bio, while it is |
| 219 | * still being processed by _drbd_send_zc_bio (drbd_send_dblock) |
| 220 | * not yet acknowledged by the peer |
| 221 | * not yet completed by the local io subsystem |
| 222 | * these flags may get cleared in any order by |
| 223 | * the worker, |
| 224 | * the receiver, |
| 225 | * the bio_endio completion callbacks. |
| 226 | */ |
Philipp Reisner | cdfda63 | 2011-07-05 15:38:59 +0200 | [diff] [blame] | 227 | if (s & RQ_LOCAL_PENDING && !(s & RQ_LOCAL_ABORTED)) |
Andreas Gruenbacher | 7be8da0 | 2011-02-22 02:15:32 +0100 | [diff] [blame] | 228 | return; |
| 229 | if (req->i.waiting) { |
| 230 | /* Retry all conflicting peer requests. */ |
| 231 | wake_up(&mdev->misc_wait); |
| 232 | } |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 233 | if (s & RQ_NET_QUEUED) |
| 234 | return; |
| 235 | if (s & RQ_NET_PENDING) |
| 236 | return; |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 237 | |
| 238 | if (req->master_bio) { |
Andreas Gruenbacher | 8554df1 | 2011-01-25 15:37:43 +0100 | [diff] [blame] | 239 | /* this is DATA_RECEIVED (remote read) |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 240 | * or protocol C P_WRITE_ACK |
| 241 | * or protocol B P_RECV_ACK |
Andreas Gruenbacher | 8554df1 | 2011-01-25 15:37:43 +0100 | [diff] [blame] | 242 | * or protocol A "HANDED_OVER_TO_NETWORK" (SendAck) |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 243 | * or canceled or failed, |
| 244 | * or killed from the transfer log due to connection loss. |
| 245 | */ |
| 246 | |
| 247 | /* |
| 248 | * figure out whether to report success or failure. |
| 249 | * |
| 250 | * report success when at least one of the operations succeeded. |
| 251 | * or, to put the other way, |
| 252 | * only report failure, when both operations failed. |
| 253 | * |
| 254 | * what to do about the failures is handled elsewhere. |
| 255 | * what we need to do here is just: complete the master_bio. |
| 256 | * |
| 257 | * local completion error, if any, has been stored as ERR_PTR |
Andreas Gruenbacher | fcefa62 | 2011-02-17 16:46:59 +0100 | [diff] [blame] | 258 | * in private_bio within drbd_request_endio. |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 259 | */ |
| 260 | int ok = (s & RQ_LOCAL_OK) || (s & RQ_NET_OK); |
| 261 | int error = PTR_ERR(req->private_bio); |
| 262 | |
| 263 | /* remove the request from the conflict detection |
| 264 | * respective block_id verification hash */ |
Andreas Gruenbacher | dac1389 | 2011-01-21 17:18:39 +0100 | [diff] [blame] | 265 | if (!drbd_interval_empty(&req->i)) { |
| 266 | struct rb_root *root; |
| 267 | |
Andreas Gruenbacher | dac1389 | 2011-01-21 17:18:39 +0100 | [diff] [blame] | 268 | if (rw == WRITE) |
| 269 | root = &mdev->write_requests; |
| 270 | else |
| 271 | root = &mdev->read_requests; |
Andreas Gruenbacher | 5384064 | 2011-01-28 10:31:04 +0100 | [diff] [blame] | 272 | drbd_remove_request_interval(root, req); |
Andreas Gruenbacher | 7be8da0 | 2011-02-22 02:15:32 +0100 | [diff] [blame] | 273 | } else if (!(s & RQ_POSTPONED)) |
Philipp Reisner | 8825f7c | 2010-10-21 17:21:19 +0200 | [diff] [blame] | 274 | D_ASSERT((s & (RQ_NET_MASK & ~RQ_NET_DONE)) == 0); |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 275 | |
| 276 | /* for writes we need to do some extra housekeeping */ |
| 277 | if (rw == WRITE) |
| 278 | _about_to_complete_local_write(mdev, req); |
| 279 | |
| 280 | /* Update disk stats */ |
| 281 | _drbd_end_io_acct(mdev, req); |
| 282 | |
Andreas Gruenbacher | 7be8da0 | 2011-02-22 02:15:32 +0100 | [diff] [blame] | 283 | if (!(s & RQ_POSTPONED)) { |
| 284 | m->error = ok ? 0 : (error ?: -EIO); |
| 285 | m->bio = req->master_bio; |
| 286 | } |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 287 | req->master_bio = NULL; |
| 288 | } |
| 289 | |
Philipp Reisner | cdfda63 | 2011-07-05 15:38:59 +0200 | [diff] [blame] | 290 | if (s & RQ_LOCAL_PENDING) |
| 291 | return; |
| 292 | |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 293 | if ((s & RQ_NET_MASK) == 0 || (s & RQ_NET_DONE)) { |
| 294 | /* this is disconnected (local only) operation, |
| 295 | * or protocol C P_WRITE_ACK, |
| 296 | * or protocol A or B P_BARRIER_ACK, |
| 297 | * or killed from the transfer log due to connection loss. */ |
| 298 | _req_is_done(mdev, req, rw); |
| 299 | } |
| 300 | /* else: network part and not DONE yet. that is |
| 301 | * protocol A or B, barrier ack still pending... */ |
| 302 | } |
| 303 | |
Philipp Reisner | cfa0341 | 2010-06-23 17:18:51 +0200 | [diff] [blame] | 304 | static void _req_may_be_done_not_susp(struct drbd_request *req, struct bio_and_error *m) |
| 305 | { |
Philipp Reisner | a21e929 | 2011-02-08 15:08:49 +0100 | [diff] [blame] | 306 | struct drbd_conf *mdev = req->w.mdev; |
Philipp Reisner | cfa0341 | 2010-06-23 17:18:51 +0200 | [diff] [blame] | 307 | |
Philipp Reisner | 2aebfab | 2011-03-28 16:48:11 +0200 | [diff] [blame] | 308 | if (!drbd_suspended(mdev)) |
Philipp Reisner | cfa0341 | 2010-06-23 17:18:51 +0200 | [diff] [blame] | 309 | _req_may_be_done(req, m); |
| 310 | } |
| 311 | |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 312 | /* obviously this could be coded as many single functions |
| 313 | * instead of one huge switch, |
| 314 | * or by putting the code directly in the respective locations |
| 315 | * (as it has been before). |
| 316 | * |
| 317 | * but having it this way |
| 318 | * enforces that it is all in this one place, where it is easier to audit, |
| 319 | * it makes it obvious that whatever "event" "happens" to a request should |
| 320 | * happen "atomically" within the req_lock, |
| 321 | * and it enforces that we have to think in a very structured manner |
| 322 | * about the "events" that may happen to a request during its life time ... |
| 323 | */ |
Philipp Reisner | 2a80699 | 2010-06-09 14:07:43 +0200 | [diff] [blame] | 324 | int __req_mod(struct drbd_request *req, enum drbd_req_event what, |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 325 | struct bio_and_error *m) |
| 326 | { |
Philipp Reisner | a21e929 | 2011-02-08 15:08:49 +0100 | [diff] [blame] | 327 | struct drbd_conf *mdev = req->w.mdev; |
Philipp Reisner | 44ed167 | 2011-04-19 17:10:19 +0200 | [diff] [blame] | 328 | struct net_conf *nc; |
Philipp Reisner | 303d144 | 2011-04-13 16:24:47 -0700 | [diff] [blame] | 329 | int p, rv = 0; |
Andreas Gruenbacher | 7be8da0 | 2011-02-22 02:15:32 +0100 | [diff] [blame] | 330 | |
| 331 | if (m) |
| 332 | m->bio = NULL; |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 333 | |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 334 | switch (what) { |
| 335 | default: |
| 336 | dev_err(DEV, "LOGIC BUG in %s:%u\n", __FILE__ , __LINE__); |
| 337 | break; |
| 338 | |
| 339 | /* does not happen... |
| 340 | * initialization done in drbd_req_new |
Andreas Gruenbacher | 8554df1 | 2011-01-25 15:37:43 +0100 | [diff] [blame] | 341 | case CREATED: |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 342 | break; |
| 343 | */ |
| 344 | |
Andreas Gruenbacher | 8554df1 | 2011-01-25 15:37:43 +0100 | [diff] [blame] | 345 | case TO_BE_SENT: /* via network */ |
Andreas Gruenbacher | 7be8da0 | 2011-02-22 02:15:32 +0100 | [diff] [blame] | 346 | /* reached via __drbd_make_request |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 347 | * and from w_read_retry_remote */ |
| 348 | D_ASSERT(!(req->rq_state & RQ_NET_MASK)); |
| 349 | req->rq_state |= RQ_NET_PENDING; |
Philipp Reisner | 44ed167 | 2011-04-19 17:10:19 +0200 | [diff] [blame] | 350 | rcu_read_lock(); |
| 351 | nc = rcu_dereference(mdev->tconn->net_conf); |
| 352 | p = nc->wire_protocol; |
| 353 | rcu_read_unlock(); |
Philipp Reisner | 303d144 | 2011-04-13 16:24:47 -0700 | [diff] [blame] | 354 | req->rq_state |= |
| 355 | p == DRBD_PROT_C ? RQ_EXP_WRITE_ACK : |
| 356 | p == DRBD_PROT_B ? RQ_EXP_RECEIVE_ACK : 0; |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 357 | inc_ap_pending(mdev); |
| 358 | break; |
| 359 | |
Andreas Gruenbacher | 8554df1 | 2011-01-25 15:37:43 +0100 | [diff] [blame] | 360 | case TO_BE_SUBMITTED: /* locally */ |
Andreas Gruenbacher | 7be8da0 | 2011-02-22 02:15:32 +0100 | [diff] [blame] | 361 | /* reached via __drbd_make_request */ |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 362 | D_ASSERT(!(req->rq_state & RQ_LOCAL_MASK)); |
| 363 | req->rq_state |= RQ_LOCAL_PENDING; |
| 364 | break; |
| 365 | |
Andreas Gruenbacher | 8554df1 | 2011-01-25 15:37:43 +0100 | [diff] [blame] | 366 | case COMPLETED_OK: |
Philipp Reisner | cdfda63 | 2011-07-05 15:38:59 +0200 | [diff] [blame] | 367 | if (req->rq_state & RQ_WRITE) |
Andreas Gruenbacher | ace652a | 2011-01-03 17:09:58 +0100 | [diff] [blame] | 368 | mdev->writ_cnt += req->i.size >> 9; |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 369 | else |
Andreas Gruenbacher | ace652a | 2011-01-03 17:09:58 +0100 | [diff] [blame] | 370 | mdev->read_cnt += req->i.size >> 9; |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 371 | |
| 372 | req->rq_state |= (RQ_LOCAL_COMPLETED|RQ_LOCAL_OK); |
| 373 | req->rq_state &= ~RQ_LOCAL_PENDING; |
| 374 | |
Philipp Reisner | cfa0341 | 2010-06-23 17:18:51 +0200 | [diff] [blame] | 375 | _req_may_be_done_not_susp(req, m); |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 376 | put_ldev(mdev); |
| 377 | break; |
| 378 | |
Philipp Reisner | cdfda63 | 2011-07-05 15:38:59 +0200 | [diff] [blame] | 379 | case ABORT_DISK_IO: |
| 380 | req->rq_state |= RQ_LOCAL_ABORTED; |
| 381 | if (req->rq_state & RQ_WRITE) |
| 382 | _req_may_be_done_not_susp(req, m); |
| 383 | else |
| 384 | goto goto_queue_for_net_read; |
| 385 | break; |
| 386 | |
Andreas Gruenbacher | 8554df1 | 2011-01-25 15:37:43 +0100 | [diff] [blame] | 387 | case WRITE_COMPLETED_WITH_ERROR: |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 388 | req->rq_state |= RQ_LOCAL_COMPLETED; |
| 389 | req->rq_state &= ~RQ_LOCAL_PENDING; |
| 390 | |
Andreas Gruenbacher | 81e8465 | 2010-12-09 15:03:57 +0100 | [diff] [blame] | 391 | __drbd_chk_io_error(mdev, false); |
Philipp Reisner | cfa0341 | 2010-06-23 17:18:51 +0200 | [diff] [blame] | 392 | _req_may_be_done_not_susp(req, m); |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 393 | put_ldev(mdev); |
| 394 | break; |
| 395 | |
Andreas Gruenbacher | 8554df1 | 2011-01-25 15:37:43 +0100 | [diff] [blame] | 396 | case READ_AHEAD_COMPLETED_WITH_ERROR: |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 397 | /* it is legal to fail READA */ |
| 398 | req->rq_state |= RQ_LOCAL_COMPLETED; |
| 399 | req->rq_state &= ~RQ_LOCAL_PENDING; |
Philipp Reisner | cfa0341 | 2010-06-23 17:18:51 +0200 | [diff] [blame] | 400 | _req_may_be_done_not_susp(req, m); |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 401 | put_ldev(mdev); |
| 402 | break; |
| 403 | |
Andreas Gruenbacher | 8554df1 | 2011-01-25 15:37:43 +0100 | [diff] [blame] | 404 | case READ_COMPLETED_WITH_ERROR: |
Andreas Gruenbacher | ace652a | 2011-01-03 17:09:58 +0100 | [diff] [blame] | 405 | drbd_set_out_of_sync(mdev, req->i.sector, req->i.size); |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 406 | |
| 407 | req->rq_state |= RQ_LOCAL_COMPLETED; |
| 408 | req->rq_state &= ~RQ_LOCAL_PENDING; |
| 409 | |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 410 | D_ASSERT(!(req->rq_state & RQ_NET_MASK)); |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 411 | |
Andreas Gruenbacher | 81e8465 | 2010-12-09 15:03:57 +0100 | [diff] [blame] | 412 | __drbd_chk_io_error(mdev, false); |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 413 | put_ldev(mdev); |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 414 | |
Philipp Reisner | cdfda63 | 2011-07-05 15:38:59 +0200 | [diff] [blame] | 415 | goto_queue_for_net_read: |
| 416 | |
Lars Ellenberg | d255e5f | 2010-05-27 09:45:45 +0200 | [diff] [blame] | 417 | /* no point in retrying if there is no good remote data, |
| 418 | * or we have no connection. */ |
| 419 | if (mdev->state.pdsk != D_UP_TO_DATE) { |
Philipp Reisner | cfa0341 | 2010-06-23 17:18:51 +0200 | [diff] [blame] | 420 | _req_may_be_done_not_susp(req, m); |
Lars Ellenberg | d255e5f | 2010-05-27 09:45:45 +0200 | [diff] [blame] | 421 | break; |
| 422 | } |
| 423 | |
Andreas Gruenbacher | 8554df1 | 2011-01-25 15:37:43 +0100 | [diff] [blame] | 424 | /* _req_mod(req,TO_BE_SENT); oops, recursion... */ |
Lars Ellenberg | d255e5f | 2010-05-27 09:45:45 +0200 | [diff] [blame] | 425 | req->rq_state |= RQ_NET_PENDING; |
| 426 | inc_ap_pending(mdev); |
Andreas Gruenbacher | 8554df1 | 2011-01-25 15:37:43 +0100 | [diff] [blame] | 427 | /* fall through: _req_mod(req,QUEUE_FOR_NET_READ); */ |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 428 | |
Andreas Gruenbacher | 8554df1 | 2011-01-25 15:37:43 +0100 | [diff] [blame] | 429 | case QUEUE_FOR_NET_READ: |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 430 | /* READ or READA, and |
| 431 | * no local disk, |
| 432 | * or target area marked as invalid, |
| 433 | * or just got an io-error. */ |
Andreas Gruenbacher | 7be8da0 | 2011-02-22 02:15:32 +0100 | [diff] [blame] | 434 | /* from __drbd_make_request |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 435 | * or from bio_endio during read io-error recovery */ |
| 436 | |
| 437 | /* so we can verify the handle in the answer packet |
| 438 | * corresponding hlist_del is in _req_may_be_done() */ |
Andreas Gruenbacher | dac1389 | 2011-01-21 17:18:39 +0100 | [diff] [blame] | 439 | drbd_insert_interval(&mdev->read_requests, &req->i); |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 440 | |
Lars Ellenberg | 83c3883 | 2009-11-03 02:22:06 +0100 | [diff] [blame] | 441 | set_bit(UNPLUG_REMOTE, &mdev->flags); |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 442 | |
| 443 | D_ASSERT(req->rq_state & RQ_NET_PENDING); |
| 444 | req->rq_state |= RQ_NET_QUEUED; |
| 445 | req->w.cb = (req->rq_state & RQ_LOCAL_MASK) |
| 446 | ? w_read_retry_remote |
| 447 | : w_send_read_req; |
Philipp Reisner | e42325a | 2011-01-19 13:55:45 +0100 | [diff] [blame] | 448 | drbd_queue_work(&mdev->tconn->data.work, &req->w); |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 449 | break; |
| 450 | |
Andreas Gruenbacher | 8554df1 | 2011-01-25 15:37:43 +0100 | [diff] [blame] | 451 | case QUEUE_FOR_NET_WRITE: |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 452 | /* assert something? */ |
Andreas Gruenbacher | 7be8da0 | 2011-02-22 02:15:32 +0100 | [diff] [blame] | 453 | /* from __drbd_make_request only */ |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 454 | |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 455 | /* corresponding hlist_del is in _req_may_be_done() */ |
Andreas Gruenbacher | de69671 | 2011-01-20 15:00:24 +0100 | [diff] [blame] | 456 | drbd_insert_interval(&mdev->write_requests, &req->i); |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 457 | |
| 458 | /* NOTE |
| 459 | * In case the req ended up on the transfer log before being |
| 460 | * queued on the worker, it could lead to this request being |
| 461 | * missed during cleanup after connection loss. |
| 462 | * So we have to do both operations here, |
| 463 | * within the same lock that protects the transfer log. |
| 464 | * |
| 465 | * _req_add_to_epoch(req); this has to be after the |
| 466 | * _maybe_start_new_epoch(req); which happened in |
Andreas Gruenbacher | 7be8da0 | 2011-02-22 02:15:32 +0100 | [diff] [blame] | 467 | * __drbd_make_request, because we now may set the bit |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 468 | * again ourselves to close the current epoch. |
| 469 | * |
| 470 | * Add req to the (now) current epoch (barrier). */ |
| 471 | |
Lars Ellenberg | 83c3883 | 2009-11-03 02:22:06 +0100 | [diff] [blame] | 472 | /* otherwise we may lose an unplug, which may cause some remote |
| 473 | * io-scheduler timeout to expire, increasing maximum latency, |
| 474 | * hurting performance. */ |
| 475 | set_bit(UNPLUG_REMOTE, &mdev->flags); |
| 476 | |
Andreas Gruenbacher | 7be8da0 | 2011-02-22 02:15:32 +0100 | [diff] [blame] | 477 | /* see __drbd_make_request, |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 478 | * just after it grabs the req_lock */ |
| 479 | D_ASSERT(test_bit(CREATE_BARRIER, &mdev->flags) == 0); |
| 480 | |
Philipp Reisner | 87eeee4 | 2011-01-19 14:16:30 +0100 | [diff] [blame] | 481 | req->epoch = mdev->tconn->newest_tle->br_number; |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 482 | |
| 483 | /* increment size of current epoch */ |
Philipp Reisner | 87eeee4 | 2011-01-19 14:16:30 +0100 | [diff] [blame] | 484 | mdev->tconn->newest_tle->n_writes++; |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 485 | |
| 486 | /* queue work item to send data */ |
| 487 | D_ASSERT(req->rq_state & RQ_NET_PENDING); |
| 488 | req->rq_state |= RQ_NET_QUEUED; |
| 489 | req->w.cb = w_send_dblock; |
Philipp Reisner | e42325a | 2011-01-19 13:55:45 +0100 | [diff] [blame] | 490 | drbd_queue_work(&mdev->tconn->data.work, &req->w); |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 491 | |
| 492 | /* close the epoch, in case it outgrew the limit */ |
Philipp Reisner | 44ed167 | 2011-04-19 17:10:19 +0200 | [diff] [blame] | 493 | rcu_read_lock(); |
| 494 | nc = rcu_dereference(mdev->tconn->net_conf); |
| 495 | p = nc->max_epoch_size; |
| 496 | rcu_read_unlock(); |
| 497 | if (mdev->tconn->newest_tle->n_writes >= p) |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 498 | queue_barrier(mdev); |
| 499 | |
| 500 | break; |
| 501 | |
Andreas Gruenbacher | 8554df1 | 2011-01-25 15:37:43 +0100 | [diff] [blame] | 502 | case QUEUE_FOR_SEND_OOS: |
Philipp Reisner | 73a01a1 | 2010-10-27 14:33:00 +0200 | [diff] [blame] | 503 | req->rq_state |= RQ_NET_QUEUED; |
Andreas Gruenbacher | 8f7bed7 | 2010-12-19 23:53:14 +0100 | [diff] [blame] | 504 | req->w.cb = w_send_out_of_sync; |
Philipp Reisner | e42325a | 2011-01-19 13:55:45 +0100 | [diff] [blame] | 505 | drbd_queue_work(&mdev->tconn->data.work, &req->w); |
Philipp Reisner | 73a01a1 | 2010-10-27 14:33:00 +0200 | [diff] [blame] | 506 | break; |
| 507 | |
Andreas Gruenbacher | 8554df1 | 2011-01-25 15:37:43 +0100 | [diff] [blame] | 508 | case OOS_HANDED_TO_NETWORK: |
Philipp Reisner | 73a01a1 | 2010-10-27 14:33:00 +0200 | [diff] [blame] | 509 | /* actually the same */ |
Andreas Gruenbacher | 8554df1 | 2011-01-25 15:37:43 +0100 | [diff] [blame] | 510 | case SEND_CANCELED: |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 511 | /* treat it the same */ |
Andreas Gruenbacher | 8554df1 | 2011-01-25 15:37:43 +0100 | [diff] [blame] | 512 | case SEND_FAILED: |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 513 | /* real cleanup will be done from tl_clear. just update flags |
| 514 | * so it is no longer marked as on the worker queue */ |
| 515 | req->rq_state &= ~RQ_NET_QUEUED; |
| 516 | /* if we did it right, tl_clear should be scheduled only after |
| 517 | * this, so this should not be necessary! */ |
Philipp Reisner | cfa0341 | 2010-06-23 17:18:51 +0200 | [diff] [blame] | 518 | _req_may_be_done_not_susp(req, m); |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 519 | break; |
| 520 | |
Andreas Gruenbacher | 8554df1 | 2011-01-25 15:37:43 +0100 | [diff] [blame] | 521 | case HANDED_OVER_TO_NETWORK: |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 522 | /* assert something? */ |
Philipp Reisner | 759fbdf | 2010-10-26 16:02:27 +0200 | [diff] [blame] | 523 | if (bio_data_dir(req->master_bio) == WRITE) |
Andreas Gruenbacher | ace652a | 2011-01-03 17:09:58 +0100 | [diff] [blame] | 524 | atomic_add(req->i.size >> 9, &mdev->ap_in_flight); |
Philipp Reisner | 759fbdf | 2010-10-26 16:02:27 +0200 | [diff] [blame] | 525 | |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 526 | if (bio_data_dir(req->master_bio) == WRITE && |
Philipp Reisner | 303d144 | 2011-04-13 16:24:47 -0700 | [diff] [blame] | 527 | !(req->rq_state & (RQ_EXP_RECEIVE_ACK | RQ_EXP_WRITE_ACK))) { |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 528 | /* this is what is dangerous about protocol A: |
| 529 | * pretend it was successfully written on the peer. */ |
| 530 | if (req->rq_state & RQ_NET_PENDING) { |
| 531 | dec_ap_pending(mdev); |
| 532 | req->rq_state &= ~RQ_NET_PENDING; |
| 533 | req->rq_state |= RQ_NET_OK; |
| 534 | } /* else: neg-ack was faster... */ |
| 535 | /* it is still not yet RQ_NET_DONE until the |
| 536 | * corresponding epoch barrier got acked as well, |
| 537 | * so we know what to dirty on connection loss */ |
| 538 | } |
| 539 | req->rq_state &= ~RQ_NET_QUEUED; |
| 540 | req->rq_state |= RQ_NET_SENT; |
| 541 | /* because _drbd_send_zc_bio could sleep, and may want to |
Andreas Gruenbacher | 8554df1 | 2011-01-25 15:37:43 +0100 | [diff] [blame] | 542 | * dereference the bio even after the "WRITE_ACKED_BY_PEER" and |
| 543 | * "COMPLETED_OK" events came in, once we return from |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 544 | * _drbd_send_zc_bio (drbd_send_dblock), we have to check |
| 545 | * whether it is done already, and end it. */ |
Philipp Reisner | cfa0341 | 2010-06-23 17:18:51 +0200 | [diff] [blame] | 546 | _req_may_be_done_not_susp(req, m); |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 547 | break; |
| 548 | |
Andreas Gruenbacher | 8554df1 | 2011-01-25 15:37:43 +0100 | [diff] [blame] | 549 | case READ_RETRY_REMOTE_CANCELED: |
Lars Ellenberg | d255e5f | 2010-05-27 09:45:45 +0200 | [diff] [blame] | 550 | req->rq_state &= ~RQ_NET_QUEUED; |
| 551 | /* fall through, in case we raced with drbd_disconnect */ |
Andreas Gruenbacher | 8554df1 | 2011-01-25 15:37:43 +0100 | [diff] [blame] | 552 | case CONNECTION_LOST_WHILE_PENDING: |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 553 | /* transfer log cleanup after connection loss */ |
| 554 | /* assert something? */ |
| 555 | if (req->rq_state & RQ_NET_PENDING) |
| 556 | dec_ap_pending(mdev); |
| 557 | req->rq_state &= ~(RQ_NET_OK|RQ_NET_PENDING); |
| 558 | req->rq_state |= RQ_NET_DONE; |
Philipp Reisner | 759fbdf | 2010-10-26 16:02:27 +0200 | [diff] [blame] | 559 | if (req->rq_state & RQ_NET_SENT && req->rq_state & RQ_WRITE) |
Andreas Gruenbacher | ace652a | 2011-01-03 17:09:58 +0100 | [diff] [blame] | 560 | atomic_sub(req->i.size >> 9, &mdev->ap_in_flight); |
Philipp Reisner | 759fbdf | 2010-10-26 16:02:27 +0200 | [diff] [blame] | 561 | |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 562 | /* if it is still queued, we may not complete it here. |
| 563 | * it will be canceled soon. */ |
| 564 | if (!(req->rq_state & RQ_NET_QUEUED)) |
Philipp Reisner | cfa0341 | 2010-06-23 17:18:51 +0200 | [diff] [blame] | 565 | _req_may_be_done(req, m); /* Allowed while state.susp */ |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 566 | break; |
| 567 | |
Andreas Gruenbacher | 8554df1 | 2011-01-25 15:37:43 +0100 | [diff] [blame] | 568 | case WRITE_ACKED_BY_PEER_AND_SIS: |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 569 | req->rq_state |= RQ_NET_SIS; |
Andreas Gruenbacher | 7be8da0 | 2011-02-22 02:15:32 +0100 | [diff] [blame] | 570 | case DISCARD_WRITE: |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 571 | /* for discarded conflicting writes of multiple primaries, |
| 572 | * there is no need to keep anything in the tl, potential |
| 573 | * node crashes are covered by the activity log. */ |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 574 | req->rq_state |= RQ_NET_DONE; |
| 575 | /* fall through */ |
Andreas Gruenbacher | 8554df1 | 2011-01-25 15:37:43 +0100 | [diff] [blame] | 576 | case WRITE_ACKED_BY_PEER: |
Philipp Reisner | 303d144 | 2011-04-13 16:24:47 -0700 | [diff] [blame] | 577 | D_ASSERT(req->rq_state & RQ_EXP_WRITE_ACK); |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 578 | /* protocol C; successfully written on peer. |
| 579 | * Nothing to do here. |
| 580 | * We want to keep the tl in place for all protocols, to cater |
| 581 | * for volatile write-back caches on lower level devices. |
| 582 | * |
| 583 | * A barrier request is expected to have forced all prior |
| 584 | * requests onto stable storage, so completion of a barrier |
| 585 | * request could set NET_DONE right here, and not wait for the |
| 586 | * P_BARRIER_ACK, but that is an unnecessary optimization. */ |
| 587 | |
Philipp Reisner | 303d144 | 2011-04-13 16:24:47 -0700 | [diff] [blame] | 588 | goto ack_common; |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 589 | /* this makes it effectively the same as for: */ |
Andreas Gruenbacher | 8554df1 | 2011-01-25 15:37:43 +0100 | [diff] [blame] | 590 | case RECV_ACKED_BY_PEER: |
Philipp Reisner | 303d144 | 2011-04-13 16:24:47 -0700 | [diff] [blame] | 591 | D_ASSERT(req->rq_state & RQ_EXP_RECEIVE_ACK); |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 592 | /* protocol B; pretends to be successfully written on peer. |
Andreas Gruenbacher | 8554df1 | 2011-01-25 15:37:43 +0100 | [diff] [blame] | 593 | * see also notes above in HANDED_OVER_TO_NETWORK about |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 594 | * protocol != C */ |
Philipp Reisner | 303d144 | 2011-04-13 16:24:47 -0700 | [diff] [blame] | 595 | ack_common: |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 596 | req->rq_state |= RQ_NET_OK; |
| 597 | D_ASSERT(req->rq_state & RQ_NET_PENDING); |
| 598 | dec_ap_pending(mdev); |
Andreas Gruenbacher | ace652a | 2011-01-03 17:09:58 +0100 | [diff] [blame] | 599 | atomic_sub(req->i.size >> 9, &mdev->ap_in_flight); |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 600 | req->rq_state &= ~RQ_NET_PENDING; |
Philipp Reisner | cfa0341 | 2010-06-23 17:18:51 +0200 | [diff] [blame] | 601 | _req_may_be_done_not_susp(req, m); |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 602 | break; |
| 603 | |
Andreas Gruenbacher | 7be8da0 | 2011-02-22 02:15:32 +0100 | [diff] [blame] | 604 | case POSTPONE_WRITE: |
Philipp Reisner | 303d144 | 2011-04-13 16:24:47 -0700 | [diff] [blame] | 605 | D_ASSERT(req->rq_state & RQ_EXP_WRITE_ACK); |
| 606 | /* If this node has already detected the write conflict, the |
Andreas Gruenbacher | 7be8da0 | 2011-02-22 02:15:32 +0100 | [diff] [blame] | 607 | * worker will be waiting on misc_wait. Wake it up once this |
| 608 | * request has completed locally. |
| 609 | */ |
| 610 | D_ASSERT(req->rq_state & RQ_NET_PENDING); |
| 611 | req->rq_state |= RQ_POSTPONED; |
| 612 | _req_may_be_done_not_susp(req, m); |
| 613 | break; |
| 614 | |
Andreas Gruenbacher | 8554df1 | 2011-01-25 15:37:43 +0100 | [diff] [blame] | 615 | case NEG_ACKED: |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 616 | /* assert something? */ |
Philipp Reisner | 759fbdf | 2010-10-26 16:02:27 +0200 | [diff] [blame] | 617 | if (req->rq_state & RQ_NET_PENDING) { |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 618 | dec_ap_pending(mdev); |
Andreas Gruenbacher | ace652a | 2011-01-03 17:09:58 +0100 | [diff] [blame] | 619 | atomic_sub(req->i.size >> 9, &mdev->ap_in_flight); |
Philipp Reisner | 759fbdf | 2010-10-26 16:02:27 +0200 | [diff] [blame] | 620 | } |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 621 | req->rq_state &= ~(RQ_NET_OK|RQ_NET_PENDING); |
| 622 | |
| 623 | req->rq_state |= RQ_NET_DONE; |
Philipp Reisner | cfa0341 | 2010-06-23 17:18:51 +0200 | [diff] [blame] | 624 | _req_may_be_done_not_susp(req, m); |
Andreas Gruenbacher | 8554df1 | 2011-01-25 15:37:43 +0100 | [diff] [blame] | 625 | /* else: done by HANDED_OVER_TO_NETWORK */ |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 626 | break; |
| 627 | |
Andreas Gruenbacher | 8554df1 | 2011-01-25 15:37:43 +0100 | [diff] [blame] | 628 | case FAIL_FROZEN_DISK_IO: |
Philipp Reisner | 265be2d | 2010-05-31 10:14:17 +0200 | [diff] [blame] | 629 | if (!(req->rq_state & RQ_LOCAL_COMPLETED)) |
| 630 | break; |
| 631 | |
Philipp Reisner | cfa0341 | 2010-06-23 17:18:51 +0200 | [diff] [blame] | 632 | _req_may_be_done(req, m); /* Allowed while state.susp */ |
Philipp Reisner | 265be2d | 2010-05-31 10:14:17 +0200 | [diff] [blame] | 633 | break; |
| 634 | |
Andreas Gruenbacher | 8554df1 | 2011-01-25 15:37:43 +0100 | [diff] [blame] | 635 | case RESTART_FROZEN_DISK_IO: |
Philipp Reisner | 265be2d | 2010-05-31 10:14:17 +0200 | [diff] [blame] | 636 | if (!(req->rq_state & RQ_LOCAL_COMPLETED)) |
| 637 | break; |
| 638 | |
| 639 | req->rq_state &= ~RQ_LOCAL_COMPLETED; |
| 640 | |
| 641 | rv = MR_READ; |
| 642 | if (bio_data_dir(req->master_bio) == WRITE) |
| 643 | rv = MR_WRITE; |
| 644 | |
| 645 | get_ldev(mdev); |
| 646 | req->w.cb = w_restart_disk_io; |
Philipp Reisner | e42325a | 2011-01-19 13:55:45 +0100 | [diff] [blame] | 647 | drbd_queue_work(&mdev->tconn->data.work, &req->w); |
Philipp Reisner | 265be2d | 2010-05-31 10:14:17 +0200 | [diff] [blame] | 648 | break; |
| 649 | |
Andreas Gruenbacher | 8554df1 | 2011-01-25 15:37:43 +0100 | [diff] [blame] | 650 | case RESEND: |
Philipp Reisner | 11b58e7 | 2010-05-12 17:08:26 +0200 | [diff] [blame] | 651 | /* If RQ_NET_OK is already set, we got a P_WRITE_ACK or P_RECV_ACK |
Philipp Reisner | 47ff2d0 | 2010-06-18 13:56:57 +0200 | [diff] [blame] | 652 | before the connection loss (B&C only); only P_BARRIER_ACK was missing. |
Philipp Reisner | 11b58e7 | 2010-05-12 17:08:26 +0200 | [diff] [blame] | 653 | Trowing them out of the TL here by pretending we got a BARRIER_ACK |
Philipp Reisner | 481c6f5 | 2010-06-22 14:03:27 +0200 | [diff] [blame] | 654 | We ensure that the peer was not rebooted */ |
Philipp Reisner | 11b58e7 | 2010-05-12 17:08:26 +0200 | [diff] [blame] | 655 | if (!(req->rq_state & RQ_NET_OK)) { |
| 656 | if (req->w.cb) { |
Philipp Reisner | e42325a | 2011-01-19 13:55:45 +0100 | [diff] [blame] | 657 | drbd_queue_work(&mdev->tconn->data.work, &req->w); |
Philipp Reisner | 11b58e7 | 2010-05-12 17:08:26 +0200 | [diff] [blame] | 658 | rv = req->rq_state & RQ_WRITE ? MR_WRITE : MR_READ; |
| 659 | } |
| 660 | break; |
| 661 | } |
Andreas Gruenbacher | 8554df1 | 2011-01-25 15:37:43 +0100 | [diff] [blame] | 662 | /* else, fall through to BARRIER_ACKED */ |
Philipp Reisner | 11b58e7 | 2010-05-12 17:08:26 +0200 | [diff] [blame] | 663 | |
Andreas Gruenbacher | 8554df1 | 2011-01-25 15:37:43 +0100 | [diff] [blame] | 664 | case BARRIER_ACKED: |
Philipp Reisner | 288f422 | 2010-05-27 15:07:43 +0200 | [diff] [blame] | 665 | if (!(req->rq_state & RQ_WRITE)) |
| 666 | break; |
| 667 | |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 668 | if (req->rq_state & RQ_NET_PENDING) { |
| 669 | /* barrier came in before all requests have been acked. |
| 670 | * this is bad, because if the connection is lost now, |
| 671 | * we won't be able to clean them up... */ |
Andreas Gruenbacher | 8554df1 | 2011-01-25 15:37:43 +0100 | [diff] [blame] | 672 | dev_err(DEV, "FIXME (BARRIER_ACKED but pending)\n"); |
Philipp Reisner | 87eeee4 | 2011-01-19 14:16:30 +0100 | [diff] [blame] | 673 | list_move(&req->tl_requests, &mdev->tconn->out_of_sequence_requests); |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 674 | } |
Lars Ellenberg | e636db5 | 2011-01-21 17:10:37 +0100 | [diff] [blame] | 675 | if ((req->rq_state & RQ_NET_MASK) != 0) { |
| 676 | req->rq_state |= RQ_NET_DONE; |
Philipp Reisner | 303d144 | 2011-04-13 16:24:47 -0700 | [diff] [blame] | 677 | if (!(req->rq_state & (RQ_EXP_RECEIVE_ACK | RQ_EXP_WRITE_ACK))) |
Philipp Reisner | 89e58e7 | 2011-01-19 13:12:45 +0100 | [diff] [blame] | 678 | atomic_sub(req->i.size>>9, &mdev->ap_in_flight); |
Lars Ellenberg | e636db5 | 2011-01-21 17:10:37 +0100 | [diff] [blame] | 679 | } |
Philipp Reisner | cfa0341 | 2010-06-23 17:18:51 +0200 | [diff] [blame] | 680 | _req_may_be_done(req, m); /* Allowed while state.susp */ |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 681 | break; |
| 682 | |
Andreas Gruenbacher | 8554df1 | 2011-01-25 15:37:43 +0100 | [diff] [blame] | 683 | case DATA_RECEIVED: |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 684 | D_ASSERT(req->rq_state & RQ_NET_PENDING); |
| 685 | dec_ap_pending(mdev); |
| 686 | req->rq_state &= ~RQ_NET_PENDING; |
| 687 | req->rq_state |= (RQ_NET_OK|RQ_NET_DONE); |
Philipp Reisner | cfa0341 | 2010-06-23 17:18:51 +0200 | [diff] [blame] | 688 | _req_may_be_done_not_susp(req, m); |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 689 | break; |
| 690 | }; |
Philipp Reisner | 2a80699 | 2010-06-09 14:07:43 +0200 | [diff] [blame] | 691 | |
| 692 | return rv; |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 693 | } |
| 694 | |
| 695 | /* we may do a local read if: |
| 696 | * - we are consistent (of course), |
| 697 | * - or we are generally inconsistent, |
| 698 | * BUT we are still/already IN SYNC for this area. |
| 699 | * since size may be bigger than BM_BLOCK_SIZE, |
| 700 | * we may need to check several bits. |
| 701 | */ |
Andreas Gruenbacher | 0da34df | 2010-12-19 20:48:29 +0100 | [diff] [blame] | 702 | static bool drbd_may_do_local_read(struct drbd_conf *mdev, sector_t sector, int size) |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 703 | { |
| 704 | unsigned long sbnr, ebnr; |
| 705 | sector_t esector, nr_sectors; |
| 706 | |
| 707 | if (mdev->state.disk == D_UP_TO_DATE) |
Andreas Gruenbacher | 0da34df | 2010-12-19 20:48:29 +0100 | [diff] [blame] | 708 | return true; |
Lars Ellenberg | 8c387de | 2011-02-18 14:13:07 +0100 | [diff] [blame] | 709 | if (mdev->state.disk != D_INCONSISTENT) |
Andreas Gruenbacher | 0da34df | 2010-12-19 20:48:29 +0100 | [diff] [blame] | 710 | return false; |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 711 | esector = sector + (size >> 9) - 1; |
Andreas Gruenbacher | 8ca9844 | 2011-02-21 12:34:58 +0100 | [diff] [blame] | 712 | nr_sectors = drbd_get_capacity(mdev->this_bdev); |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 713 | D_ASSERT(sector < nr_sectors); |
| 714 | D_ASSERT(esector < nr_sectors); |
| 715 | |
| 716 | sbnr = BM_SECT_TO_BIT(sector); |
| 717 | ebnr = BM_SECT_TO_BIT(esector); |
| 718 | |
Andreas Gruenbacher | 0da34df | 2010-12-19 20:48:29 +0100 | [diff] [blame] | 719 | return drbd_bm_count_bits(mdev, sbnr, ebnr) == 0; |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 720 | } |
| 721 | |
Andreas Gruenbacher | 6024fec | 2011-01-28 15:53:51 +0100 | [diff] [blame] | 722 | /* |
| 723 | * complete_conflicting_writes - wait for any conflicting write requests |
| 724 | * |
| 725 | * The write_requests tree contains all active write requests which we |
| 726 | * currently know about. Wait for any requests to complete which conflict with |
| 727 | * the new one. |
| 728 | */ |
| 729 | static int complete_conflicting_writes(struct drbd_conf *mdev, |
| 730 | sector_t sector, int size) |
| 731 | { |
| 732 | for(;;) { |
Andreas Gruenbacher | 6024fec | 2011-01-28 15:53:51 +0100 | [diff] [blame] | 733 | struct drbd_interval *i; |
Andreas Gruenbacher | 7be8da0 | 2011-02-22 02:15:32 +0100 | [diff] [blame] | 734 | int err; |
Andreas Gruenbacher | 6024fec | 2011-01-28 15:53:51 +0100 | [diff] [blame] | 735 | |
| 736 | i = drbd_find_overlap(&mdev->write_requests, sector, size); |
| 737 | if (!i) |
| 738 | return 0; |
Andreas Gruenbacher | 7be8da0 | 2011-02-22 02:15:32 +0100 | [diff] [blame] | 739 | err = drbd_wait_misc(mdev, i); |
| 740 | if (err) |
| 741 | return err; |
Andreas Gruenbacher | 6024fec | 2011-01-28 15:53:51 +0100 | [diff] [blame] | 742 | } |
| 743 | } |
| 744 | |
Andreas Gruenbacher | 7be8da0 | 2011-02-22 02:15:32 +0100 | [diff] [blame] | 745 | int __drbd_make_request(struct drbd_conf *mdev, struct bio *bio, unsigned long start_time) |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 746 | { |
| 747 | const int rw = bio_rw(bio); |
| 748 | const int size = bio->bi_size; |
| 749 | const sector_t sector = bio->bi_sector; |
| 750 | struct drbd_tl_epoch *b = NULL; |
| 751 | struct drbd_request *req; |
Philipp Reisner | 44ed167 | 2011-04-19 17:10:19 +0200 | [diff] [blame] | 752 | struct net_conf *nc; |
Philipp Reisner | 73a01a1 | 2010-10-27 14:33:00 +0200 | [diff] [blame] | 753 | int local, remote, send_oos = 0; |
Andreas Gruenbacher | 6024fec | 2011-01-28 15:53:51 +0100 | [diff] [blame] | 754 | int err; |
Philipp Reisner | 9a25a04 | 2010-05-10 16:42:23 +0200 | [diff] [blame] | 755 | int ret = 0; |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 756 | |
| 757 | /* allocate outside of all locks; */ |
| 758 | req = drbd_req_new(mdev, bio); |
| 759 | if (!req) { |
| 760 | dec_ap_bio(mdev); |
| 761 | /* only pass the error to the upper layers. |
| 762 | * if user cannot handle io errors, that's not our business. */ |
| 763 | dev_err(DEV, "could not kmalloc() req\n"); |
| 764 | bio_endio(bio, -ENOMEM); |
| 765 | return 0; |
| 766 | } |
Philipp Reisner | aeda1cd6 | 2010-11-09 17:45:06 +0100 | [diff] [blame] | 767 | req->start_time = start_time; |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 768 | |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 769 | local = get_ldev(mdev); |
| 770 | if (!local) { |
| 771 | bio_put(req->private_bio); /* or we get a bio leak */ |
| 772 | req->private_bio = NULL; |
| 773 | } |
| 774 | if (rw == WRITE) { |
| 775 | remote = 1; |
| 776 | } else { |
| 777 | /* READ || READA */ |
| 778 | if (local) { |
| 779 | if (!drbd_may_do_local_read(mdev, sector, size)) { |
| 780 | /* we could kick the syncer to |
| 781 | * sync this extent asap, wait for |
| 782 | * it, then continue locally. |
| 783 | * Or just issue the request remotely. |
| 784 | */ |
| 785 | local = 0; |
| 786 | bio_put(req->private_bio); |
| 787 | req->private_bio = NULL; |
| 788 | put_ldev(mdev); |
| 789 | } |
| 790 | } |
| 791 | remote = !local && mdev->state.pdsk >= D_UP_TO_DATE; |
| 792 | } |
| 793 | |
| 794 | /* If we have a disk, but a READA request is mapped to remote, |
| 795 | * we are R_PRIMARY, D_INCONSISTENT, SyncTarget. |
| 796 | * Just fail that READA request right here. |
| 797 | * |
| 798 | * THINK: maybe fail all READA when not local? |
| 799 | * or make this configurable... |
| 800 | * if network is slow, READA won't do any good. |
| 801 | */ |
| 802 | if (rw == READA && mdev->state.disk >= D_INCONSISTENT && !local) { |
| 803 | err = -EWOULDBLOCK; |
| 804 | goto fail_and_free_req; |
| 805 | } |
| 806 | |
| 807 | /* For WRITES going to the local disk, grab a reference on the target |
| 808 | * extent. This waits for any resync activity in the corresponding |
| 809 | * resync extent to finish, and, if necessary, pulls in the target |
| 810 | * extent into the activity log, which involves further disk io because |
| 811 | * of transactional on-disk meta data updates. */ |
Philipp Reisner | 0778286 | 2010-08-31 12:00:50 +0200 | [diff] [blame] | 812 | if (rw == WRITE && local && !test_bit(AL_SUSPENDED, &mdev->flags)) { |
| 813 | req->rq_state |= RQ_IN_ACT_LOG; |
Lars Ellenberg | 181286a | 2011-03-31 15:18:56 +0200 | [diff] [blame] | 814 | drbd_al_begin_io(mdev, &req->i); |
Philipp Reisner | 0778286 | 2010-08-31 12:00:50 +0200 | [diff] [blame] | 815 | } |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 816 | |
Philipp Reisner | 6a35c45 | 2011-01-17 20:27:30 +0100 | [diff] [blame] | 817 | remote = remote && drbd_should_do_remote(mdev->state); |
Andreas Gruenbacher | 8f7bed7 | 2010-12-19 23:53:14 +0100 | [diff] [blame] | 818 | send_oos = rw == WRITE && drbd_should_send_out_of_sync(mdev->state); |
Philipp Reisner | 3719094 | 2010-11-10 12:08:37 +0100 | [diff] [blame] | 819 | D_ASSERT(!(remote && send_oos)); |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 820 | |
Philipp Reisner | 2aebfab | 2011-03-28 16:48:11 +0200 | [diff] [blame] | 821 | if (!(local || remote) && !drbd_suspended(mdev)) { |
Lars Ellenberg | fb2c7a1 | 2010-10-19 12:08:13 +0200 | [diff] [blame] | 822 | if (__ratelimit(&drbd_ratelimit_state)) |
| 823 | dev_err(DEV, "IO ERROR: neither local nor remote disk\n"); |
Andreas Gruenbacher | 6024fec | 2011-01-28 15:53:51 +0100 | [diff] [blame] | 824 | err = -EIO; |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 825 | goto fail_free_complete; |
| 826 | } |
| 827 | |
| 828 | /* For WRITE request, we have to make sure that we have an |
| 829 | * unused_spare_tle, in case we need to start a new epoch. |
| 830 | * I try to be smart and avoid to pre-allocate always "just in case", |
| 831 | * but there is a race between testing the bit and pointer outside the |
| 832 | * spinlock, and grabbing the spinlock. |
| 833 | * if we lost that race, we retry. */ |
Philipp Reisner | 73a01a1 | 2010-10-27 14:33:00 +0200 | [diff] [blame] | 834 | if (rw == WRITE && (remote || send_oos) && |
Philipp Reisner | 87eeee4 | 2011-01-19 14:16:30 +0100 | [diff] [blame] | 835 | mdev->tconn->unused_spare_tle == NULL && |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 836 | test_bit(CREATE_BARRIER, &mdev->flags)) { |
| 837 | allocate_barrier: |
| 838 | b = kmalloc(sizeof(struct drbd_tl_epoch), GFP_NOIO); |
| 839 | if (!b) { |
| 840 | dev_err(DEV, "Failed to alloc barrier.\n"); |
| 841 | err = -ENOMEM; |
| 842 | goto fail_free_complete; |
| 843 | } |
| 844 | } |
| 845 | |
| 846 | /* GOOD, everything prepared, grab the spin_lock */ |
Philipp Reisner | 87eeee4 | 2011-01-19 14:16:30 +0100 | [diff] [blame] | 847 | spin_lock_irq(&mdev->tconn->req_lock); |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 848 | |
Andreas Gruenbacher | 6024fec | 2011-01-28 15:53:51 +0100 | [diff] [blame] | 849 | if (rw == WRITE) { |
| 850 | err = complete_conflicting_writes(mdev, sector, size); |
| 851 | if (err) { |
Andreas Gruenbacher | 7be8da0 | 2011-02-22 02:15:32 +0100 | [diff] [blame] | 852 | if (err != -ERESTARTSYS) |
| 853 | _conn_request_state(mdev->tconn, |
| 854 | NS(conn, C_TIMEOUT), |
| 855 | CS_HARD); |
Andreas Gruenbacher | 6024fec | 2011-01-28 15:53:51 +0100 | [diff] [blame] | 856 | spin_unlock_irq(&mdev->tconn->req_lock); |
Andreas Gruenbacher | 7be8da0 | 2011-02-22 02:15:32 +0100 | [diff] [blame] | 857 | err = -EIO; |
Andreas Gruenbacher | 6024fec | 2011-01-28 15:53:51 +0100 | [diff] [blame] | 858 | goto fail_free_complete; |
| 859 | } |
| 860 | } |
| 861 | |
Philipp Reisner | 2aebfab | 2011-03-28 16:48:11 +0200 | [diff] [blame] | 862 | if (drbd_suspended(mdev)) { |
Philipp Reisner | 9a25a04 | 2010-05-10 16:42:23 +0200 | [diff] [blame] | 863 | /* If we got suspended, use the retry mechanism of |
| 864 | generic_make_request() to restart processing of this |
Andreas Gruenbacher | 2f58dcf | 2010-12-13 17:48:19 +0100 | [diff] [blame] | 865 | bio. In the next call to drbd_make_request |
Philipp Reisner | 9a25a04 | 2010-05-10 16:42:23 +0200 | [diff] [blame] | 866 | we sleep in inc_ap_bio() */ |
| 867 | ret = 1; |
Philipp Reisner | 87eeee4 | 2011-01-19 14:16:30 +0100 | [diff] [blame] | 868 | spin_unlock_irq(&mdev->tconn->req_lock); |
Philipp Reisner | 9a25a04 | 2010-05-10 16:42:23 +0200 | [diff] [blame] | 869 | goto fail_free_complete; |
| 870 | } |
| 871 | |
Philipp Reisner | 73a01a1 | 2010-10-27 14:33:00 +0200 | [diff] [blame] | 872 | if (remote || send_oos) { |
Philipp Reisner | 6a35c45 | 2011-01-17 20:27:30 +0100 | [diff] [blame] | 873 | remote = drbd_should_do_remote(mdev->state); |
Andreas Gruenbacher | 8f7bed7 | 2010-12-19 23:53:14 +0100 | [diff] [blame] | 874 | send_oos = rw == WRITE && drbd_should_send_out_of_sync(mdev->state); |
Philipp Reisner | 3719094 | 2010-11-10 12:08:37 +0100 | [diff] [blame] | 875 | D_ASSERT(!(remote && send_oos)); |
Philipp Reisner | 73a01a1 | 2010-10-27 14:33:00 +0200 | [diff] [blame] | 876 | |
| 877 | if (!(remote || send_oos)) |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 878 | dev_warn(DEV, "lost connection while grabbing the req_lock!\n"); |
| 879 | if (!(local || remote)) { |
| 880 | dev_err(DEV, "IO ERROR: neither local nor remote disk\n"); |
Philipp Reisner | 87eeee4 | 2011-01-19 14:16:30 +0100 | [diff] [blame] | 881 | spin_unlock_irq(&mdev->tconn->req_lock); |
Andreas Gruenbacher | 6024fec | 2011-01-28 15:53:51 +0100 | [diff] [blame] | 882 | err = -EIO; |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 883 | goto fail_free_complete; |
| 884 | } |
| 885 | } |
| 886 | |
Philipp Reisner | 87eeee4 | 2011-01-19 14:16:30 +0100 | [diff] [blame] | 887 | if (b && mdev->tconn->unused_spare_tle == NULL) { |
| 888 | mdev->tconn->unused_spare_tle = b; |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 889 | b = NULL; |
| 890 | } |
Philipp Reisner | 73a01a1 | 2010-10-27 14:33:00 +0200 | [diff] [blame] | 891 | if (rw == WRITE && (remote || send_oos) && |
Philipp Reisner | 87eeee4 | 2011-01-19 14:16:30 +0100 | [diff] [blame] | 892 | mdev->tconn->unused_spare_tle == NULL && |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 893 | test_bit(CREATE_BARRIER, &mdev->flags)) { |
| 894 | /* someone closed the current epoch |
| 895 | * while we were grabbing the spinlock */ |
Philipp Reisner | 87eeee4 | 2011-01-19 14:16:30 +0100 | [diff] [blame] | 896 | spin_unlock_irq(&mdev->tconn->req_lock); |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 897 | goto allocate_barrier; |
| 898 | } |
| 899 | |
| 900 | |
| 901 | /* Update disk stats */ |
| 902 | _drbd_start_io_acct(mdev, req, bio); |
| 903 | |
| 904 | /* _maybe_start_new_epoch(mdev); |
| 905 | * If we need to generate a write barrier packet, we have to add the |
| 906 | * new epoch (barrier) object, and queue the barrier packet for sending, |
| 907 | * and queue the req's data after it _within the same lock_, otherwise |
| 908 | * we have race conditions were the reorder domains could be mixed up. |
| 909 | * |
| 910 | * Even read requests may start a new epoch and queue the corresponding |
| 911 | * barrier packet. To get the write ordering right, we only have to |
| 912 | * make sure that, if this is a write request and it triggered a |
| 913 | * barrier packet, this request is queued within the same spinlock. */ |
Philipp Reisner | 87eeee4 | 2011-01-19 14:16:30 +0100 | [diff] [blame] | 914 | if ((remote || send_oos) && mdev->tconn->unused_spare_tle && |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 915 | test_and_clear_bit(CREATE_BARRIER, &mdev->flags)) { |
Philipp Reisner | 2f5cdd0 | 2011-02-21 14:29:27 +0100 | [diff] [blame] | 916 | _tl_add_barrier(mdev->tconn, mdev->tconn->unused_spare_tle); |
Philipp Reisner | 87eeee4 | 2011-01-19 14:16:30 +0100 | [diff] [blame] | 917 | mdev->tconn->unused_spare_tle = NULL; |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 918 | } else { |
| 919 | D_ASSERT(!(remote && rw == WRITE && |
| 920 | test_bit(CREATE_BARRIER, &mdev->flags))); |
| 921 | } |
| 922 | |
| 923 | /* NOTE |
| 924 | * Actually, 'local' may be wrong here already, since we may have failed |
| 925 | * to write to the meta data, and may become wrong anytime because of |
| 926 | * local io-error for some other request, which would lead to us |
| 927 | * "detaching" the local disk. |
| 928 | * |
| 929 | * 'remote' may become wrong any time because the network could fail. |
| 930 | * |
| 931 | * This is a harmless race condition, though, since it is handled |
| 932 | * correctly at the appropriate places; so it just defers the failure |
| 933 | * of the respective operation. |
| 934 | */ |
| 935 | |
| 936 | /* mark them early for readability. |
| 937 | * this just sets some state flags. */ |
| 938 | if (remote) |
Andreas Gruenbacher | 8554df1 | 2011-01-25 15:37:43 +0100 | [diff] [blame] | 939 | _req_mod(req, TO_BE_SENT); |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 940 | if (local) |
Andreas Gruenbacher | 8554df1 | 2011-01-25 15:37:43 +0100 | [diff] [blame] | 941 | _req_mod(req, TO_BE_SUBMITTED); |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 942 | |
Philipp Reisner | 87eeee4 | 2011-01-19 14:16:30 +0100 | [diff] [blame] | 943 | list_add_tail(&req->tl_requests, &mdev->tconn->newest_tle->requests); |
Philipp Reisner | 288f422 | 2010-05-27 15:07:43 +0200 | [diff] [blame] | 944 | |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 945 | /* NOTE remote first: to get the concurrent write detection right, |
| 946 | * we must register the request before start of local IO. */ |
| 947 | if (remote) { |
| 948 | /* either WRITE and C_CONNECTED, |
| 949 | * or READ, and no local disk, |
| 950 | * or READ, but not in sync. |
| 951 | */ |
| 952 | _req_mod(req, (rw == WRITE) |
Andreas Gruenbacher | 8554df1 | 2011-01-25 15:37:43 +0100 | [diff] [blame] | 953 | ? QUEUE_FOR_NET_WRITE |
| 954 | : QUEUE_FOR_NET_READ); |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 955 | } |
Philipp Reisner | 73a01a1 | 2010-10-27 14:33:00 +0200 | [diff] [blame] | 956 | if (send_oos && drbd_set_out_of_sync(mdev, sector, size)) |
Andreas Gruenbacher | 8554df1 | 2011-01-25 15:37:43 +0100 | [diff] [blame] | 957 | _req_mod(req, QUEUE_FOR_SEND_OOS); |
Philipp Reisner | 6753171 | 2010-10-27 12:21:30 +0200 | [diff] [blame] | 958 | |
Philipp Reisner | 44ed167 | 2011-04-19 17:10:19 +0200 | [diff] [blame] | 959 | rcu_read_lock(); |
| 960 | nc = rcu_dereference(mdev->tconn->net_conf); |
Philipp Reisner | 73a01a1 | 2010-10-27 14:33:00 +0200 | [diff] [blame] | 961 | if (remote && |
Philipp Reisner | 44ed167 | 2011-04-19 17:10:19 +0200 | [diff] [blame] | 962 | nc->on_congestion != OC_BLOCK && mdev->tconn->agreed_pro_version >= 96) { |
Philipp Reisner | 6753171 | 2010-10-27 12:21:30 +0200 | [diff] [blame] | 963 | int congested = 0; |
| 964 | |
Philipp Reisner | 44ed167 | 2011-04-19 17:10:19 +0200 | [diff] [blame] | 965 | if (nc->cong_fill && |
| 966 | atomic_read(&mdev->ap_in_flight) >= nc->cong_fill) { |
Philipp Reisner | 6753171 | 2010-10-27 12:21:30 +0200 | [diff] [blame] | 967 | dev_info(DEV, "Congestion-fill threshold reached\n"); |
| 968 | congested = 1; |
| 969 | } |
| 970 | |
Philipp Reisner | 44ed167 | 2011-04-19 17:10:19 +0200 | [diff] [blame] | 971 | if (mdev->act_log->used >= nc->cong_extents) { |
Philipp Reisner | 6753171 | 2010-10-27 12:21:30 +0200 | [diff] [blame] | 972 | dev_info(DEV, "Congestion-extents threshold reached\n"); |
| 973 | congested = 1; |
| 974 | } |
| 975 | |
Philipp Reisner | 71c78cf | 2011-01-14 19:20:34 +0100 | [diff] [blame] | 976 | if (congested) { |
Philipp Reisner | 039312b | 2011-01-21 14:13:22 +0100 | [diff] [blame] | 977 | queue_barrier(mdev); /* last barrier, after mirrored writes */ |
Philipp Reisner | 73a01a1 | 2010-10-27 14:33:00 +0200 | [diff] [blame] | 978 | |
Philipp Reisner | 44ed167 | 2011-04-19 17:10:19 +0200 | [diff] [blame] | 979 | if (nc->on_congestion == OC_PULL_AHEAD) |
Philipp Reisner | 6753171 | 2010-10-27 12:21:30 +0200 | [diff] [blame] | 980 | _drbd_set_state(_NS(mdev, conn, C_AHEAD), 0, NULL); |
Philipp Reisner | 44ed167 | 2011-04-19 17:10:19 +0200 | [diff] [blame] | 981 | else /*nc->on_congestion == OC_DISCONNECT */ |
Philipp Reisner | 6753171 | 2010-10-27 12:21:30 +0200 | [diff] [blame] | 982 | _drbd_set_state(_NS(mdev, conn, C_DISCONNECTING), 0, NULL); |
| 983 | } |
| 984 | } |
Philipp Reisner | 44ed167 | 2011-04-19 17:10:19 +0200 | [diff] [blame] | 985 | rcu_read_unlock(); |
Philipp Reisner | 6753171 | 2010-10-27 12:21:30 +0200 | [diff] [blame] | 986 | |
Philipp Reisner | 87eeee4 | 2011-01-19 14:16:30 +0100 | [diff] [blame] | 987 | spin_unlock_irq(&mdev->tconn->req_lock); |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 988 | kfree(b); /* if someone else has beaten us to it... */ |
| 989 | |
| 990 | if (local) { |
| 991 | req->private_bio->bi_bdev = mdev->ldev->backing_bdev; |
| 992 | |
Lars Ellenberg | 6719fb0 | 2010-10-18 23:04:07 +0200 | [diff] [blame] | 993 | /* State may have changed since we grabbed our reference on the |
| 994 | * mdev->ldev member. Double check, and short-circuit to endio. |
| 995 | * In case the last activity log transaction failed to get on |
| 996 | * stable storage, and this is a WRITE, we may not even submit |
| 997 | * this bio. */ |
| 998 | if (get_ldev(mdev)) { |
Andreas Gruenbacher | 0cf9d27 | 2010-12-07 10:43:29 +0100 | [diff] [blame] | 999 | if (drbd_insert_fault(mdev, rw == WRITE ? DRBD_FAULT_DT_WR |
| 1000 | : rw == READ ? DRBD_FAULT_DT_RD |
| 1001 | : DRBD_FAULT_DT_RA)) |
Lars Ellenberg | 6719fb0 | 2010-10-18 23:04:07 +0200 | [diff] [blame] | 1002 | bio_endio(req->private_bio, -EIO); |
| 1003 | else |
| 1004 | generic_make_request(req->private_bio); |
| 1005 | put_ldev(mdev); |
| 1006 | } else |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 1007 | bio_endio(req->private_bio, -EIO); |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 1008 | } |
| 1009 | |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 1010 | return 0; |
| 1011 | |
| 1012 | fail_free_complete: |
Lars Ellenberg | 76727f6 | 2011-05-16 15:31:45 +0200 | [diff] [blame] | 1013 | if (req->rq_state & RQ_IN_ACT_LOG) |
Lars Ellenberg | 181286a | 2011-03-31 15:18:56 +0200 | [diff] [blame] | 1014 | drbd_al_complete_io(mdev, &req->i); |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 1015 | fail_and_free_req: |
| 1016 | if (local) { |
| 1017 | bio_put(req->private_bio); |
| 1018 | req->private_bio = NULL; |
| 1019 | put_ldev(mdev); |
| 1020 | } |
Philipp Reisner | 9a25a04 | 2010-05-10 16:42:23 +0200 | [diff] [blame] | 1021 | if (!ret) |
| 1022 | bio_endio(bio, err); |
| 1023 | |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 1024 | drbd_req_free(req); |
| 1025 | dec_ap_bio(mdev); |
| 1026 | kfree(b); |
| 1027 | |
Philipp Reisner | 9a25a04 | 2010-05-10 16:42:23 +0200 | [diff] [blame] | 1028 | return ret; |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 1029 | } |
| 1030 | |
Andreas Gruenbacher | 2f58dcf | 2010-12-13 17:48:19 +0100 | [diff] [blame] | 1031 | int drbd_make_request(struct request_queue *q, struct bio *bio) |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 1032 | { |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 1033 | struct drbd_conf *mdev = (struct drbd_conf *) q->queuedata; |
Philipp Reisner | aeda1cd6 | 2010-11-09 17:45:06 +0100 | [diff] [blame] | 1034 | unsigned long start_time; |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 1035 | |
Philipp Reisner | aeda1cd6 | 2010-11-09 17:45:06 +0100 | [diff] [blame] | 1036 | start_time = jiffies; |
| 1037 | |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 1038 | /* |
| 1039 | * what we "blindly" assume: |
| 1040 | */ |
| 1041 | D_ASSERT(bio->bi_size > 0); |
Andreas Gruenbacher | c670a39 | 2011-02-21 12:41:39 +0100 | [diff] [blame] | 1042 | D_ASSERT(IS_ALIGNED(bio->bi_size, 512)); |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 1043 | |
Lars Ellenberg | 23361cf | 2011-03-31 16:36:43 +0200 | [diff] [blame] | 1044 | inc_ap_bio(mdev); |
| 1045 | return __drbd_make_request(mdev, bio, start_time); |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 1046 | } |
| 1047 | |
Lars Ellenberg | 23361cf | 2011-03-31 16:36:43 +0200 | [diff] [blame] | 1048 | /* This is called by bio_add_page(). |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 1049 | * |
Lars Ellenberg | 23361cf | 2011-03-31 16:36:43 +0200 | [diff] [blame] | 1050 | * q->max_hw_sectors and other global limits are already enforced there. |
| 1051 | * |
| 1052 | * We need to call down to our lower level device, |
| 1053 | * in case it has special restrictions. |
| 1054 | * |
| 1055 | * We also may need to enforce configured max-bio-bvecs limits. |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 1056 | * |
| 1057 | * As long as the BIO is empty we have to allow at least one bvec, |
Lars Ellenberg | 23361cf | 2011-03-31 16:36:43 +0200 | [diff] [blame] | 1058 | * regardless of size and offset, so no need to ask lower levels. |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 1059 | */ |
| 1060 | int drbd_merge_bvec(struct request_queue *q, struct bvec_merge_data *bvm, struct bio_vec *bvec) |
| 1061 | { |
| 1062 | struct drbd_conf *mdev = (struct drbd_conf *) q->queuedata; |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 1063 | unsigned int bio_size = bvm->bi_size; |
Lars Ellenberg | 23361cf | 2011-03-31 16:36:43 +0200 | [diff] [blame] | 1064 | int limit = DRBD_MAX_BIO_SIZE; |
| 1065 | int backing_limit; |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 1066 | |
Lars Ellenberg | 23361cf | 2011-03-31 16:36:43 +0200 | [diff] [blame] | 1067 | if (bio_size && get_ldev(mdev)) { |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 1068 | struct request_queue * const b = |
| 1069 | mdev->ldev->backing_bdev->bd_disk->queue; |
Lars Ellenberg | a1c88d0 | 2010-05-14 19:16:41 +0200 | [diff] [blame] | 1070 | if (b->merge_bvec_fn) { |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 1071 | backing_limit = b->merge_bvec_fn(b, bvm, bvec); |
| 1072 | limit = min(limit, backing_limit); |
| 1073 | } |
| 1074 | put_ldev(mdev); |
| 1075 | } |
| 1076 | return limit; |
| 1077 | } |
Philipp Reisner | 7fde2be | 2011-03-01 11:08:28 +0100 | [diff] [blame] | 1078 | |
| 1079 | void request_timer_fn(unsigned long data) |
| 1080 | { |
| 1081 | struct drbd_conf *mdev = (struct drbd_conf *) data; |
Philipp Reisner | 8b924f1 | 2011-03-01 11:08:28 +0100 | [diff] [blame] | 1082 | struct drbd_tconn *tconn = mdev->tconn; |
Philipp Reisner | 7fde2be | 2011-03-01 11:08:28 +0100 | [diff] [blame] | 1083 | struct drbd_request *req; /* oldest request */ |
| 1084 | struct list_head *le; |
Philipp Reisner | 44ed167 | 2011-04-19 17:10:19 +0200 | [diff] [blame] | 1085 | struct net_conf *nc; |
Philipp Reisner | 3b03ad5 | 2011-07-15 13:53:06 +0200 | [diff] [blame^] | 1086 | unsigned long ent = 0, dt = 0, et, nt; /* effective timeout = ko_count * timeout */ |
Philipp Reisner | 7fde2be | 2011-03-01 11:08:28 +0100 | [diff] [blame] | 1087 | |
Philipp Reisner | 44ed167 | 2011-04-19 17:10:19 +0200 | [diff] [blame] | 1088 | rcu_read_lock(); |
| 1089 | nc = rcu_dereference(tconn->net_conf); |
Philipp Reisner | cdfda63 | 2011-07-05 15:38:59 +0200 | [diff] [blame] | 1090 | ent = nc ? nc->timeout * HZ/10 * nc->ko_count : 0; |
| 1091 | |
| 1092 | if (get_ldev(mdev)) { |
| 1093 | dt = rcu_dereference(mdev->ldev->disk_conf)->disk_timeout * HZ / 10; |
| 1094 | put_ldev(mdev); |
| 1095 | } |
Philipp Reisner | 44ed167 | 2011-04-19 17:10:19 +0200 | [diff] [blame] | 1096 | rcu_read_unlock(); |
| 1097 | |
Philipp Reisner | cdfda63 | 2011-07-05 15:38:59 +0200 | [diff] [blame] | 1098 | et = min_not_zero(dt, ent); |
| 1099 | |
| 1100 | if (!et || (mdev->state.conn < C_WF_REPORT_PARAMS && mdev->state.disk <= D_FAILED)) |
Philipp Reisner | 7fde2be | 2011-03-01 11:08:28 +0100 | [diff] [blame] | 1101 | return; /* Recurring timer stopped */ |
| 1102 | |
Philipp Reisner | 8b924f1 | 2011-03-01 11:08:28 +0100 | [diff] [blame] | 1103 | spin_lock_irq(&tconn->req_lock); |
| 1104 | le = &tconn->oldest_tle->requests; |
Philipp Reisner | 7fde2be | 2011-03-01 11:08:28 +0100 | [diff] [blame] | 1105 | if (list_empty(le)) { |
Philipp Reisner | 8b924f1 | 2011-03-01 11:08:28 +0100 | [diff] [blame] | 1106 | spin_unlock_irq(&tconn->req_lock); |
Philipp Reisner | 7fde2be | 2011-03-01 11:08:28 +0100 | [diff] [blame] | 1107 | mod_timer(&mdev->request_timer, jiffies + et); |
| 1108 | return; |
| 1109 | } |
| 1110 | |
| 1111 | le = le->prev; |
| 1112 | req = list_entry(le, struct drbd_request, tl_requests); |
Philipp Reisner | cdfda63 | 2011-07-05 15:38:59 +0200 | [diff] [blame] | 1113 | if (ent && req->rq_state & RQ_NET_PENDING) { |
| 1114 | if (time_is_before_eq_jiffies(req->start_time + ent)) { |
Philipp Reisner | 7fde2be | 2011-03-01 11:08:28 +0100 | [diff] [blame] | 1115 | dev_warn(DEV, "Remote failed to finish a request within ko-count * timeout\n"); |
Philipp Reisner | cdfda63 | 2011-07-05 15:38:59 +0200 | [diff] [blame] | 1116 | _drbd_set_state(_NS(mdev, conn, C_TIMEOUT), CS_VERBOSE | CS_HARD, NULL); |
Philipp Reisner | 7fde2be | 2011-03-01 11:08:28 +0100 | [diff] [blame] | 1117 | } |
Philipp Reisner | 7fde2be | 2011-03-01 11:08:28 +0100 | [diff] [blame] | 1118 | } |
Philipp Reisner | cdfda63 | 2011-07-05 15:38:59 +0200 | [diff] [blame] | 1119 | if (dt && req->rq_state & RQ_LOCAL_PENDING) { |
| 1120 | if (time_is_before_eq_jiffies(req->start_time + dt)) { |
| 1121 | dev_warn(DEV, "Local backing device failed to meet the disk-timeout\n"); |
| 1122 | __drbd_chk_io_error(mdev, 1); |
| 1123 | } |
| 1124 | } |
Philipp Reisner | 3b03ad5 | 2011-07-15 13:53:06 +0200 | [diff] [blame^] | 1125 | nt = (time_is_before_eq_jiffies(req->start_time + et) ? jiffies : req->start_time) + et; |
Philipp Reisner | 8b924f1 | 2011-03-01 11:08:28 +0100 | [diff] [blame] | 1126 | spin_unlock_irq(&tconn->req_lock); |
Philipp Reisner | 3b03ad5 | 2011-07-15 13:53:06 +0200 | [diff] [blame^] | 1127 | mod_timer(&mdev->request_timer, nt); |
Philipp Reisner | 7fde2be | 2011-03-01 11:08:28 +0100 | [diff] [blame] | 1128 | } |