blob: 336937a14d3fc5cd60354e036ee3f97ebd613aeb [file] [log] [blame]
Philipp Reisnerb411b362009-09-25 16:07:19 -07001/*
2 drbd_req.c
3
4 This file is part of DRBD by Philipp Reisner and Lars Ellenberg.
5
6 Copyright (C) 2001-2008, LINBIT Information Technologies GmbH.
7 Copyright (C) 1999-2008, Philipp Reisner <philipp.reisner@linbit.com>.
8 Copyright (C) 2002-2008, Lars Ellenberg <lars.ellenberg@linbit.com>.
9
10 drbd is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation; either version 2, or (at your option)
13 any later version.
14
15 drbd is distributed in the hope that it will be useful,
16 but WITHOUT ANY WARRANTY; without even the implied warranty of
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 GNU General Public License for more details.
19
20 You should have received a copy of the GNU General Public License
21 along with drbd; see the file COPYING. If not, write to
22 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
23
24 */
25
Philipp Reisnerb411b362009-09-25 16:07:19 -070026#include <linux/module.h>
27
28#include <linux/slab.h>
29#include <linux/drbd.h>
30#include "drbd_int.h"
Philipp Reisnerb411b362009-09-25 16:07:19 -070031#include "drbd_req.h"
32
33
34/* Update disk stats at start of I/O request */
35static void _drbd_start_io_acct(struct drbd_conf *mdev, struct drbd_request *req, struct bio *bio)
36{
37 const int rw = bio_data_dir(bio);
38 int cpu;
39 cpu = part_stat_lock();
40 part_stat_inc(cpu, &mdev->vdisk->part0, ios[rw]);
41 part_stat_add(cpu, &mdev->vdisk->part0, sectors[rw], bio_sectors(bio));
Philipp Reisner753c8912009-11-18 15:52:51 +010042 part_inc_in_flight(&mdev->vdisk->part0, rw);
Philipp Reisnerb411b362009-09-25 16:07:19 -070043 part_stat_unlock();
Philipp Reisnerb411b362009-09-25 16:07:19 -070044}
45
46/* Update disk stats when completing request upwards */
47static void _drbd_end_io_acct(struct drbd_conf *mdev, struct drbd_request *req)
48{
49 int rw = bio_data_dir(req->master_bio);
50 unsigned long duration = jiffies - req->start_time;
51 int cpu;
52 cpu = part_stat_lock();
53 part_stat_add(cpu, &mdev->vdisk->part0, ticks[rw], duration);
54 part_round_stats(cpu, &mdev->vdisk->part0);
Philipp Reisner753c8912009-11-18 15:52:51 +010055 part_dec_in_flight(&mdev->vdisk->part0, rw);
Philipp Reisnerb411b362009-09-25 16:07:19 -070056 part_stat_unlock();
Philipp Reisnerb411b362009-09-25 16:07:19 -070057}
58
59static void _req_is_done(struct drbd_conf *mdev, struct drbd_request *req, const int rw)
60{
61 const unsigned long s = req->rq_state;
Philipp Reisner288f4222010-05-27 15:07:43 +020062
63 /* remove it from the transfer log.
64 * well, only if it had been there in the first
65 * place... if it had not (local only or conflicting
66 * and never sent), it should still be "empty" as
67 * initialized in drbd_req_new(), so we can list_del() it
68 * here unconditionally */
69 list_del(&req->tl_requests);
70
Philipp Reisnerb411b362009-09-25 16:07:19 -070071 /* if it was a write, we may have to set the corresponding
72 * bit(s) out-of-sync first. If it had a local part, we need to
73 * release the reference to the activity log. */
74 if (rw == WRITE) {
Philipp Reisnerb411b362009-09-25 16:07:19 -070075 /* Set out-of-sync unless both OK flags are set
76 * (local only or remote failed).
77 * Other places where we set out-of-sync:
78 * READ with local io-error */
79 if (!(s & RQ_NET_OK) || !(s & RQ_LOCAL_OK))
80 drbd_set_out_of_sync(mdev, req->sector, req->size);
81
82 if ((s & RQ_NET_OK) && (s & RQ_LOCAL_OK) && (s & RQ_NET_SIS))
83 drbd_set_in_sync(mdev, req->sector, req->size);
84
85 /* one might be tempted to move the drbd_al_complete_io
86 * to the local io completion callback drbd_endio_pri.
87 * but, if this was a mirror write, we may only
88 * drbd_al_complete_io after this is RQ_NET_DONE,
89 * otherwise the extent could be dropped from the al
90 * before it has actually been written on the peer.
91 * if we crash before our peer knows about the request,
92 * but after the extent has been dropped from the al,
93 * we would forget to resync the corresponding extent.
94 */
95 if (s & RQ_LOCAL_MASK) {
96 if (get_ldev_if_state(mdev, D_FAILED)) {
Philipp Reisner07782862010-08-31 12:00:50 +020097 if (s & RQ_IN_ACT_LOG)
98 drbd_al_complete_io(mdev, req->sector);
Philipp Reisnerb411b362009-09-25 16:07:19 -070099 put_ldev(mdev);
100 } else if (__ratelimit(&drbd_ratelimit_state)) {
101 dev_warn(DEV, "Should have called drbd_al_complete_io(, %llu), "
102 "but my Disk seems to have failed :(\n",
103 (unsigned long long) req->sector);
104 }
105 }
106 }
107
Philipp Reisner32fa7e92010-05-26 17:13:18 +0200108 drbd_req_free(req);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700109}
110
111static void queue_barrier(struct drbd_conf *mdev)
112{
113 struct drbd_tl_epoch *b;
114
115 /* We are within the req_lock. Once we queued the barrier for sending,
116 * we set the CREATE_BARRIER bit. It is cleared as soon as a new
117 * barrier/epoch object is added. This is the only place this bit is
118 * set. It indicates that the barrier for this epoch is already queued,
119 * and no new epoch has been created yet. */
120 if (test_bit(CREATE_BARRIER, &mdev->flags))
121 return;
122
123 b = mdev->newest_tle;
124 b->w.cb = w_send_barrier;
125 /* inc_ap_pending done here, so we won't
126 * get imbalanced on connection loss.
127 * dec_ap_pending will be done in got_BarrierAck
128 * or (on connection loss) in tl_clear. */
129 inc_ap_pending(mdev);
130 drbd_queue_work(&mdev->data.work, &b->w);
131 set_bit(CREATE_BARRIER, &mdev->flags);
132}
133
134static void _about_to_complete_local_write(struct drbd_conf *mdev,
135 struct drbd_request *req)
136{
137 const unsigned long s = req->rq_state;
138 struct drbd_request *i;
139 struct drbd_epoch_entry *e;
140 struct hlist_node *n;
141 struct hlist_head *slot;
142
Lars Ellenberg8a3c1042010-12-05 14:11:14 +0100143 /* Before we can signal completion to the upper layers,
144 * we may need to close the current epoch.
145 * We can skip this, if this request has not even been sent, because we
146 * did not have a fully established connection yet/anymore, during
147 * bitmap exchange, or while we are C_AHEAD due to congestion policy.
148 */
149 if (mdev->state.conn >= C_CONNECTED &&
150 (s & RQ_NET_SENT) != 0 &&
Philipp Reisnerb411b362009-09-25 16:07:19 -0700151 req->epoch == mdev->newest_tle->br_number)
152 queue_barrier(mdev);
153
154 /* we need to do the conflict detection stuff,
155 * if we have the ee_hash (two_primaries) and
156 * this has been on the network */
157 if ((s & RQ_NET_DONE) && mdev->ee_hash != NULL) {
158 const sector_t sector = req->sector;
159 const int size = req->size;
160
161 /* ASSERT:
162 * there must be no conflicting requests, since
163 * they must have been failed on the spot */
164#define OVERLAPS overlaps(sector, size, i->sector, i->size)
165 slot = tl_hash_slot(mdev, sector);
166 hlist_for_each_entry(i, n, slot, colision) {
167 if (OVERLAPS) {
168 dev_alert(DEV, "LOGIC BUG: completed: %p %llus +%u; "
169 "other: %p %llus +%u\n",
170 req, (unsigned long long)sector, size,
171 i, (unsigned long long)i->sector, i->size);
172 }
173 }
174
175 /* maybe "wake" those conflicting epoch entries
176 * that wait for this request to finish.
177 *
178 * currently, there can be only _one_ such ee
179 * (well, or some more, which would be pending
180 * P_DISCARD_ACK not yet sent by the asender...),
181 * since we block the receiver thread upon the
182 * first conflict detection, which will wait on
183 * misc_wait. maybe we want to assert that?
184 *
185 * anyways, if we found one,
186 * we just have to do a wake_up. */
187#undef OVERLAPS
188#define OVERLAPS overlaps(sector, size, e->sector, e->size)
189 slot = ee_hash_slot(mdev, req->sector);
190 hlist_for_each_entry(e, n, slot, colision) {
191 if (OVERLAPS) {
192 wake_up(&mdev->misc_wait);
193 break;
194 }
195 }
196 }
197#undef OVERLAPS
198}
199
200void complete_master_bio(struct drbd_conf *mdev,
201 struct bio_and_error *m)
202{
Philipp Reisnerb411b362009-09-25 16:07:19 -0700203 bio_endio(m->bio, m->error);
204 dec_ap_bio(mdev);
205}
206
207/* Helper for __req_mod().
208 * Set m->bio to the master bio, if it is fit to be completed,
209 * or leave it alone (it is initialized to NULL in __req_mod),
210 * if it has already been completed, or cannot be completed yet.
211 * If m->bio is set, the error status to be returned is placed in m->error.
212 */
213void _req_may_be_done(struct drbd_request *req, struct bio_and_error *m)
214{
215 const unsigned long s = req->rq_state;
216 struct drbd_conf *mdev = req->mdev;
217 /* only WRITES may end up here without a master bio (on barrier ack) */
218 int rw = req->master_bio ? bio_data_dir(req->master_bio) : WRITE;
219
Philipp Reisnerb411b362009-09-25 16:07:19 -0700220 /* we must not complete the master bio, while it is
221 * still being processed by _drbd_send_zc_bio (drbd_send_dblock)
222 * not yet acknowledged by the peer
223 * not yet completed by the local io subsystem
224 * these flags may get cleared in any order by
225 * the worker,
226 * the receiver,
227 * the bio_endio completion callbacks.
228 */
229 if (s & RQ_NET_QUEUED)
230 return;
231 if (s & RQ_NET_PENDING)
232 return;
233 if (s & RQ_LOCAL_PENDING)
234 return;
235
236 if (req->master_bio) {
237 /* this is data_received (remote read)
238 * or protocol C P_WRITE_ACK
239 * or protocol B P_RECV_ACK
240 * or protocol A "handed_over_to_network" (SendAck)
241 * or canceled or failed,
242 * or killed from the transfer log due to connection loss.
243 */
244
245 /*
246 * figure out whether to report success or failure.
247 *
248 * report success when at least one of the operations succeeded.
249 * or, to put the other way,
250 * only report failure, when both operations failed.
251 *
252 * what to do about the failures is handled elsewhere.
253 * what we need to do here is just: complete the master_bio.
254 *
255 * local completion error, if any, has been stored as ERR_PTR
256 * in private_bio within drbd_endio_pri.
257 */
258 int ok = (s & RQ_LOCAL_OK) || (s & RQ_NET_OK);
259 int error = PTR_ERR(req->private_bio);
260
261 /* remove the request from the conflict detection
262 * respective block_id verification hash */
263 if (!hlist_unhashed(&req->colision))
264 hlist_del(&req->colision);
265 else
Philipp Reisner8825f7c2010-10-21 17:21:19 +0200266 D_ASSERT((s & (RQ_NET_MASK & ~RQ_NET_DONE)) == 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700267
268 /* for writes we need to do some extra housekeeping */
269 if (rw == WRITE)
270 _about_to_complete_local_write(mdev, req);
271
272 /* Update disk stats */
273 _drbd_end_io_acct(mdev, req);
274
275 m->error = ok ? 0 : (error ?: -EIO);
276 m->bio = req->master_bio;
277 req->master_bio = NULL;
278 }
279
280 if ((s & RQ_NET_MASK) == 0 || (s & RQ_NET_DONE)) {
281 /* this is disconnected (local only) operation,
282 * or protocol C P_WRITE_ACK,
283 * or protocol A or B P_BARRIER_ACK,
284 * or killed from the transfer log due to connection loss. */
285 _req_is_done(mdev, req, rw);
286 }
287 /* else: network part and not DONE yet. that is
288 * protocol A or B, barrier ack still pending... */
289}
290
Philipp Reisnercfa03412010-06-23 17:18:51 +0200291static void _req_may_be_done_not_susp(struct drbd_request *req, struct bio_and_error *m)
292{
293 struct drbd_conf *mdev = req->mdev;
294
Philipp Reisnerfb22c402010-09-08 23:20:21 +0200295 if (!is_susp(mdev->state))
Philipp Reisnercfa03412010-06-23 17:18:51 +0200296 _req_may_be_done(req, m);
297}
298
Philipp Reisnerb411b362009-09-25 16:07:19 -0700299/*
300 * checks whether there was an overlapping request
301 * or ee already registered.
302 *
303 * if so, return 1, in which case this request is completed on the spot,
304 * without ever being submitted or send.
305 *
306 * return 0 if it is ok to submit this request.
307 *
308 * NOTE:
309 * paranoia: assume something above us is broken, and issues different write
310 * requests for the same block simultaneously...
311 *
312 * To ensure these won't be reordered differently on both nodes, resulting in
313 * diverging data sets, we discard the later one(s). Not that this is supposed
314 * to happen, but this is the rationale why we also have to check for
315 * conflicting requests with local origin, and why we have to do so regardless
316 * of whether we allowed multiple primaries.
317 *
318 * BTW, in case we only have one primary, the ee_hash is empty anyways, and the
319 * second hlist_for_each_entry becomes a noop. This is even simpler than to
320 * grab a reference on the net_conf, and check for the two_primaries flag...
321 */
322static int _req_conflicts(struct drbd_request *req)
323{
324 struct drbd_conf *mdev = req->mdev;
325 const sector_t sector = req->sector;
326 const int size = req->size;
327 struct drbd_request *i;
328 struct drbd_epoch_entry *e;
329 struct hlist_node *n;
330 struct hlist_head *slot;
331
332 D_ASSERT(hlist_unhashed(&req->colision));
333
334 if (!get_net_conf(mdev))
335 return 0;
336
337 /* BUG_ON */
338 ERR_IF (mdev->tl_hash_s == 0)
339 goto out_no_conflict;
340 BUG_ON(mdev->tl_hash == NULL);
341
342#define OVERLAPS overlaps(i->sector, i->size, sector, size)
343 slot = tl_hash_slot(mdev, sector);
344 hlist_for_each_entry(i, n, slot, colision) {
345 if (OVERLAPS) {
346 dev_alert(DEV, "%s[%u] Concurrent local write detected! "
347 "[DISCARD L] new: %llus +%u; "
348 "pending: %llus +%u\n",
349 current->comm, current->pid,
350 (unsigned long long)sector, size,
351 (unsigned long long)i->sector, i->size);
352 goto out_conflict;
353 }
354 }
355
356 if (mdev->ee_hash_s) {
357 /* now, check for overlapping requests with remote origin */
358 BUG_ON(mdev->ee_hash == NULL);
359#undef OVERLAPS
360#define OVERLAPS overlaps(e->sector, e->size, sector, size)
361 slot = ee_hash_slot(mdev, sector);
362 hlist_for_each_entry(e, n, slot, colision) {
363 if (OVERLAPS) {
364 dev_alert(DEV, "%s[%u] Concurrent remote write detected!"
365 " [DISCARD L] new: %llus +%u; "
366 "pending: %llus +%u\n",
367 current->comm, current->pid,
368 (unsigned long long)sector, size,
369 (unsigned long long)e->sector, e->size);
370 goto out_conflict;
371 }
372 }
373 }
374#undef OVERLAPS
375
376out_no_conflict:
377 /* this is like it should be, and what we expected.
378 * our users do behave after all... */
379 put_net_conf(mdev);
380 return 0;
381
382out_conflict:
383 put_net_conf(mdev);
384 return 1;
385}
386
387/* obviously this could be coded as many single functions
388 * instead of one huge switch,
389 * or by putting the code directly in the respective locations
390 * (as it has been before).
391 *
392 * but having it this way
393 * enforces that it is all in this one place, where it is easier to audit,
394 * it makes it obvious that whatever "event" "happens" to a request should
395 * happen "atomically" within the req_lock,
396 * and it enforces that we have to think in a very structured manner
397 * about the "events" that may happen to a request during its life time ...
398 */
Philipp Reisner2a806992010-06-09 14:07:43 +0200399int __req_mod(struct drbd_request *req, enum drbd_req_event what,
Philipp Reisnerb411b362009-09-25 16:07:19 -0700400 struct bio_and_error *m)
401{
402 struct drbd_conf *mdev = req->mdev;
Philipp Reisner2a806992010-06-09 14:07:43 +0200403 int rv = 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700404 m->bio = NULL;
405
Philipp Reisnerb411b362009-09-25 16:07:19 -0700406 switch (what) {
407 default:
408 dev_err(DEV, "LOGIC BUG in %s:%u\n", __FILE__ , __LINE__);
409 break;
410
411 /* does not happen...
412 * initialization done in drbd_req_new
413 case created:
414 break;
415 */
416
417 case to_be_send: /* via network */
418 /* reached via drbd_make_request_common
419 * and from w_read_retry_remote */
420 D_ASSERT(!(req->rq_state & RQ_NET_MASK));
421 req->rq_state |= RQ_NET_PENDING;
422 inc_ap_pending(mdev);
423 break;
424
425 case to_be_submitted: /* locally */
426 /* reached via drbd_make_request_common */
427 D_ASSERT(!(req->rq_state & RQ_LOCAL_MASK));
428 req->rq_state |= RQ_LOCAL_PENDING;
429 break;
430
431 case completed_ok:
432 if (bio_data_dir(req->master_bio) == WRITE)
433 mdev->writ_cnt += req->size>>9;
434 else
435 mdev->read_cnt += req->size>>9;
436
437 req->rq_state |= (RQ_LOCAL_COMPLETED|RQ_LOCAL_OK);
438 req->rq_state &= ~RQ_LOCAL_PENDING;
439
Philipp Reisnercfa03412010-06-23 17:18:51 +0200440 _req_may_be_done_not_susp(req, m);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700441 put_ldev(mdev);
442 break;
443
444 case write_completed_with_error:
445 req->rq_state |= RQ_LOCAL_COMPLETED;
446 req->rq_state &= ~RQ_LOCAL_PENDING;
447
Andreas Gruenbacher81e84652010-12-09 15:03:57 +0100448 __drbd_chk_io_error(mdev, false);
Philipp Reisnercfa03412010-06-23 17:18:51 +0200449 _req_may_be_done_not_susp(req, m);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700450 put_ldev(mdev);
451 break;
452
453 case read_ahead_completed_with_error:
454 /* it is legal to fail READA */
455 req->rq_state |= RQ_LOCAL_COMPLETED;
456 req->rq_state &= ~RQ_LOCAL_PENDING;
Philipp Reisnercfa03412010-06-23 17:18:51 +0200457 _req_may_be_done_not_susp(req, m);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700458 put_ldev(mdev);
459 break;
460
461 case read_completed_with_error:
462 drbd_set_out_of_sync(mdev, req->sector, req->size);
463
464 req->rq_state |= RQ_LOCAL_COMPLETED;
465 req->rq_state &= ~RQ_LOCAL_PENDING;
466
Philipp Reisnerb411b362009-09-25 16:07:19 -0700467 D_ASSERT(!(req->rq_state & RQ_NET_MASK));
Philipp Reisnerb411b362009-09-25 16:07:19 -0700468
Andreas Gruenbacher81e84652010-12-09 15:03:57 +0100469 __drbd_chk_io_error(mdev, false);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700470 put_ldev(mdev);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700471
Lars Ellenbergd255e5f2010-05-27 09:45:45 +0200472 /* no point in retrying if there is no good remote data,
473 * or we have no connection. */
474 if (mdev->state.pdsk != D_UP_TO_DATE) {
Philipp Reisnercfa03412010-06-23 17:18:51 +0200475 _req_may_be_done_not_susp(req, m);
Lars Ellenbergd255e5f2010-05-27 09:45:45 +0200476 break;
477 }
478
479 /* _req_mod(req,to_be_send); oops, recursion... */
480 req->rq_state |= RQ_NET_PENDING;
481 inc_ap_pending(mdev);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700482 /* fall through: _req_mod(req,queue_for_net_read); */
483
484 case queue_for_net_read:
485 /* READ or READA, and
486 * no local disk,
487 * or target area marked as invalid,
488 * or just got an io-error. */
489 /* from drbd_make_request_common
490 * or from bio_endio during read io-error recovery */
491
492 /* so we can verify the handle in the answer packet
493 * corresponding hlist_del is in _req_may_be_done() */
494 hlist_add_head(&req->colision, ar_hash_slot(mdev, req->sector));
495
Lars Ellenberg83c38832009-11-03 02:22:06 +0100496 set_bit(UNPLUG_REMOTE, &mdev->flags);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700497
498 D_ASSERT(req->rq_state & RQ_NET_PENDING);
499 req->rq_state |= RQ_NET_QUEUED;
500 req->w.cb = (req->rq_state & RQ_LOCAL_MASK)
501 ? w_read_retry_remote
502 : w_send_read_req;
503 drbd_queue_work(&mdev->data.work, &req->w);
504 break;
505
506 case queue_for_net_write:
507 /* assert something? */
508 /* from drbd_make_request_common only */
509
510 hlist_add_head(&req->colision, tl_hash_slot(mdev, req->sector));
511 /* corresponding hlist_del is in _req_may_be_done() */
512
513 /* NOTE
514 * In case the req ended up on the transfer log before being
515 * queued on the worker, it could lead to this request being
516 * missed during cleanup after connection loss.
517 * So we have to do both operations here,
518 * within the same lock that protects the transfer log.
519 *
520 * _req_add_to_epoch(req); this has to be after the
521 * _maybe_start_new_epoch(req); which happened in
522 * drbd_make_request_common, because we now may set the bit
523 * again ourselves to close the current epoch.
524 *
525 * Add req to the (now) current epoch (barrier). */
526
Lars Ellenberg83c38832009-11-03 02:22:06 +0100527 /* otherwise we may lose an unplug, which may cause some remote
528 * io-scheduler timeout to expire, increasing maximum latency,
529 * hurting performance. */
530 set_bit(UNPLUG_REMOTE, &mdev->flags);
531
Philipp Reisnerb411b362009-09-25 16:07:19 -0700532 /* see drbd_make_request_common,
533 * just after it grabs the req_lock */
534 D_ASSERT(test_bit(CREATE_BARRIER, &mdev->flags) == 0);
535
536 req->epoch = mdev->newest_tle->br_number;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700537
538 /* increment size of current epoch */
Philipp Reisner7e602c02010-05-27 14:49:27 +0200539 mdev->newest_tle->n_writes++;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700540
541 /* queue work item to send data */
542 D_ASSERT(req->rq_state & RQ_NET_PENDING);
543 req->rq_state |= RQ_NET_QUEUED;
544 req->w.cb = w_send_dblock;
545 drbd_queue_work(&mdev->data.work, &req->w);
546
547 /* close the epoch, in case it outgrew the limit */
Philipp Reisner7e602c02010-05-27 14:49:27 +0200548 if (mdev->newest_tle->n_writes >= mdev->net_conf->max_epoch_size)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700549 queue_barrier(mdev);
550
551 break;
552
Philipp Reisner73a01a12010-10-27 14:33:00 +0200553 case queue_for_send_oos:
554 req->rq_state |= RQ_NET_QUEUED;
555 req->w.cb = w_send_oos;
556 drbd_queue_work(&mdev->data.work, &req->w);
557 break;
558
559 case oos_handed_to_network:
560 /* actually the same */
Philipp Reisnerb411b362009-09-25 16:07:19 -0700561 case send_canceled:
562 /* treat it the same */
563 case send_failed:
564 /* real cleanup will be done from tl_clear. just update flags
565 * so it is no longer marked as on the worker queue */
566 req->rq_state &= ~RQ_NET_QUEUED;
567 /* if we did it right, tl_clear should be scheduled only after
568 * this, so this should not be necessary! */
Philipp Reisnercfa03412010-06-23 17:18:51 +0200569 _req_may_be_done_not_susp(req, m);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700570 break;
571
572 case handed_over_to_network:
573 /* assert something? */
Philipp Reisner759fbdf2010-10-26 16:02:27 +0200574 if (bio_data_dir(req->master_bio) == WRITE)
575 atomic_add(req->size>>9, &mdev->ap_in_flight);
576
Philipp Reisnerb411b362009-09-25 16:07:19 -0700577 if (bio_data_dir(req->master_bio) == WRITE &&
578 mdev->net_conf->wire_protocol == DRBD_PROT_A) {
579 /* this is what is dangerous about protocol A:
580 * pretend it was successfully written on the peer. */
581 if (req->rq_state & RQ_NET_PENDING) {
582 dec_ap_pending(mdev);
583 req->rq_state &= ~RQ_NET_PENDING;
584 req->rq_state |= RQ_NET_OK;
585 } /* else: neg-ack was faster... */
586 /* it is still not yet RQ_NET_DONE until the
587 * corresponding epoch barrier got acked as well,
588 * so we know what to dirty on connection loss */
589 }
590 req->rq_state &= ~RQ_NET_QUEUED;
591 req->rq_state |= RQ_NET_SENT;
592 /* because _drbd_send_zc_bio could sleep, and may want to
593 * dereference the bio even after the "write_acked_by_peer" and
594 * "completed_ok" events came in, once we return from
595 * _drbd_send_zc_bio (drbd_send_dblock), we have to check
596 * whether it is done already, and end it. */
Philipp Reisnercfa03412010-06-23 17:18:51 +0200597 _req_may_be_done_not_susp(req, m);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700598 break;
599
Lars Ellenbergd255e5f2010-05-27 09:45:45 +0200600 case read_retry_remote_canceled:
601 req->rq_state &= ~RQ_NET_QUEUED;
602 /* fall through, in case we raced with drbd_disconnect */
Philipp Reisnerb411b362009-09-25 16:07:19 -0700603 case connection_lost_while_pending:
604 /* transfer log cleanup after connection loss */
605 /* assert something? */
606 if (req->rq_state & RQ_NET_PENDING)
607 dec_ap_pending(mdev);
608 req->rq_state &= ~(RQ_NET_OK|RQ_NET_PENDING);
609 req->rq_state |= RQ_NET_DONE;
Philipp Reisner759fbdf2010-10-26 16:02:27 +0200610 if (req->rq_state & RQ_NET_SENT && req->rq_state & RQ_WRITE)
611 atomic_sub(req->size>>9, &mdev->ap_in_flight);
612
Philipp Reisnerb411b362009-09-25 16:07:19 -0700613 /* if it is still queued, we may not complete it here.
614 * it will be canceled soon. */
615 if (!(req->rq_state & RQ_NET_QUEUED))
Philipp Reisnercfa03412010-06-23 17:18:51 +0200616 _req_may_be_done(req, m); /* Allowed while state.susp */
Philipp Reisnerb411b362009-09-25 16:07:19 -0700617 break;
618
619 case write_acked_by_peer_and_sis:
620 req->rq_state |= RQ_NET_SIS;
621 case conflict_discarded_by_peer:
622 /* for discarded conflicting writes of multiple primaries,
623 * there is no need to keep anything in the tl, potential
624 * node crashes are covered by the activity log. */
625 if (what == conflict_discarded_by_peer)
626 dev_alert(DEV, "Got DiscardAck packet %llus +%u!"
627 " DRBD is not a random data generator!\n",
628 (unsigned long long)req->sector, req->size);
629 req->rq_state |= RQ_NET_DONE;
630 /* fall through */
631 case write_acked_by_peer:
632 /* protocol C; successfully written on peer.
633 * Nothing to do here.
634 * We want to keep the tl in place for all protocols, to cater
635 * for volatile write-back caches on lower level devices.
636 *
637 * A barrier request is expected to have forced all prior
638 * requests onto stable storage, so completion of a barrier
639 * request could set NET_DONE right here, and not wait for the
640 * P_BARRIER_ACK, but that is an unnecessary optimization. */
641
642 /* this makes it effectively the same as for: */
643 case recv_acked_by_peer:
644 /* protocol B; pretends to be successfully written on peer.
645 * see also notes above in handed_over_to_network about
646 * protocol != C */
647 req->rq_state |= RQ_NET_OK;
648 D_ASSERT(req->rq_state & RQ_NET_PENDING);
649 dec_ap_pending(mdev);
Philipp Reisner759fbdf2010-10-26 16:02:27 +0200650 atomic_sub(req->size>>9, &mdev->ap_in_flight);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700651 req->rq_state &= ~RQ_NET_PENDING;
Philipp Reisnercfa03412010-06-23 17:18:51 +0200652 _req_may_be_done_not_susp(req, m);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700653 break;
654
655 case neg_acked:
656 /* assert something? */
Philipp Reisner759fbdf2010-10-26 16:02:27 +0200657 if (req->rq_state & RQ_NET_PENDING) {
Philipp Reisnerb411b362009-09-25 16:07:19 -0700658 dec_ap_pending(mdev);
Philipp Reisner759fbdf2010-10-26 16:02:27 +0200659 atomic_sub(req->size>>9, &mdev->ap_in_flight);
660 }
Philipp Reisnerb411b362009-09-25 16:07:19 -0700661 req->rq_state &= ~(RQ_NET_OK|RQ_NET_PENDING);
662
663 req->rq_state |= RQ_NET_DONE;
Philipp Reisnercfa03412010-06-23 17:18:51 +0200664 _req_may_be_done_not_susp(req, m);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700665 /* else: done by handed_over_to_network */
666 break;
667
Philipp Reisner265be2d2010-05-31 10:14:17 +0200668 case fail_frozen_disk_io:
669 if (!(req->rq_state & RQ_LOCAL_COMPLETED))
670 break;
671
Philipp Reisnercfa03412010-06-23 17:18:51 +0200672 _req_may_be_done(req, m); /* Allowed while state.susp */
Philipp Reisner265be2d2010-05-31 10:14:17 +0200673 break;
674
675 case restart_frozen_disk_io:
676 if (!(req->rq_state & RQ_LOCAL_COMPLETED))
677 break;
678
679 req->rq_state &= ~RQ_LOCAL_COMPLETED;
680
681 rv = MR_READ;
682 if (bio_data_dir(req->master_bio) == WRITE)
683 rv = MR_WRITE;
684
685 get_ldev(mdev);
686 req->w.cb = w_restart_disk_io;
687 drbd_queue_work(&mdev->data.work, &req->w);
688 break;
689
Philipp Reisner11b58e72010-05-12 17:08:26 +0200690 case resend:
691 /* If RQ_NET_OK is already set, we got a P_WRITE_ACK or P_RECV_ACK
Philipp Reisner47ff2d02010-06-18 13:56:57 +0200692 before the connection loss (B&C only); only P_BARRIER_ACK was missing.
Philipp Reisner11b58e72010-05-12 17:08:26 +0200693 Trowing them out of the TL here by pretending we got a BARRIER_ACK
Philipp Reisner481c6f52010-06-22 14:03:27 +0200694 We ensure that the peer was not rebooted */
Philipp Reisner11b58e72010-05-12 17:08:26 +0200695 if (!(req->rq_state & RQ_NET_OK)) {
696 if (req->w.cb) {
697 drbd_queue_work(&mdev->data.work, &req->w);
698 rv = req->rq_state & RQ_WRITE ? MR_WRITE : MR_READ;
699 }
700 break;
701 }
702 /* else, fall through to barrier_acked */
703
Philipp Reisnerb411b362009-09-25 16:07:19 -0700704 case barrier_acked:
Philipp Reisner288f4222010-05-27 15:07:43 +0200705 if (!(req->rq_state & RQ_WRITE))
706 break;
707
Philipp Reisnerb411b362009-09-25 16:07:19 -0700708 if (req->rq_state & RQ_NET_PENDING) {
709 /* barrier came in before all requests have been acked.
710 * this is bad, because if the connection is lost now,
711 * we won't be able to clean them up... */
712 dev_err(DEV, "FIXME (barrier_acked but pending)\n");
Philipp Reisnerb411b362009-09-25 16:07:19 -0700713 list_move(&req->tl_requests, &mdev->out_of_sequence_requests);
714 }
715 D_ASSERT(req->rq_state & RQ_NET_SENT);
716 req->rq_state |= RQ_NET_DONE;
Philipp Reisner759fbdf2010-10-26 16:02:27 +0200717 if (mdev->net_conf->wire_protocol == DRBD_PROT_A)
718 atomic_sub(req->size>>9, &mdev->ap_in_flight);
Philipp Reisnercfa03412010-06-23 17:18:51 +0200719 _req_may_be_done(req, m); /* Allowed while state.susp */
Philipp Reisnerb411b362009-09-25 16:07:19 -0700720 break;
721
722 case data_received:
723 D_ASSERT(req->rq_state & RQ_NET_PENDING);
724 dec_ap_pending(mdev);
725 req->rq_state &= ~RQ_NET_PENDING;
726 req->rq_state |= (RQ_NET_OK|RQ_NET_DONE);
Philipp Reisnercfa03412010-06-23 17:18:51 +0200727 _req_may_be_done_not_susp(req, m);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700728 break;
729 };
Philipp Reisner2a806992010-06-09 14:07:43 +0200730
731 return rv;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700732}
733
734/* we may do a local read if:
735 * - we are consistent (of course),
736 * - or we are generally inconsistent,
737 * BUT we are still/already IN SYNC for this area.
738 * since size may be bigger than BM_BLOCK_SIZE,
739 * we may need to check several bits.
740 */
741static int drbd_may_do_local_read(struct drbd_conf *mdev, sector_t sector, int size)
742{
743 unsigned long sbnr, ebnr;
744 sector_t esector, nr_sectors;
745
746 if (mdev->state.disk == D_UP_TO_DATE)
747 return 1;
748 if (mdev->state.disk >= D_OUTDATED)
749 return 0;
750 if (mdev->state.disk < D_INCONSISTENT)
751 return 0;
752 /* state.disk == D_INCONSISTENT We will have a look at the BitMap */
753 nr_sectors = drbd_get_capacity(mdev->this_bdev);
754 esector = sector + (size >> 9) - 1;
755
756 D_ASSERT(sector < nr_sectors);
757 D_ASSERT(esector < nr_sectors);
758
759 sbnr = BM_SECT_TO_BIT(sector);
760 ebnr = BM_SECT_TO_BIT(esector);
761
762 return 0 == drbd_bm_count_bits(mdev, sbnr, ebnr);
763}
764
Philipp Reisneraeda1cd62010-11-09 17:45:06 +0100765static int drbd_make_request_common(struct drbd_conf *mdev, struct bio *bio, unsigned long start_time)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700766{
767 const int rw = bio_rw(bio);
768 const int size = bio->bi_size;
769 const sector_t sector = bio->bi_sector;
770 struct drbd_tl_epoch *b = NULL;
771 struct drbd_request *req;
Philipp Reisner73a01a12010-10-27 14:33:00 +0200772 int local, remote, send_oos = 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700773 int err = -EIO;
Philipp Reisner9a25a042010-05-10 16:42:23 +0200774 int ret = 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700775
776 /* allocate outside of all locks; */
777 req = drbd_req_new(mdev, bio);
778 if (!req) {
779 dec_ap_bio(mdev);
780 /* only pass the error to the upper layers.
781 * if user cannot handle io errors, that's not our business. */
782 dev_err(DEV, "could not kmalloc() req\n");
783 bio_endio(bio, -ENOMEM);
784 return 0;
785 }
Philipp Reisneraeda1cd62010-11-09 17:45:06 +0100786 req->start_time = start_time;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700787
Philipp Reisnerb411b362009-09-25 16:07:19 -0700788 local = get_ldev(mdev);
789 if (!local) {
790 bio_put(req->private_bio); /* or we get a bio leak */
791 req->private_bio = NULL;
792 }
793 if (rw == WRITE) {
794 remote = 1;
795 } else {
796 /* READ || READA */
797 if (local) {
798 if (!drbd_may_do_local_read(mdev, sector, size)) {
799 /* we could kick the syncer to
800 * sync this extent asap, wait for
801 * it, then continue locally.
802 * Or just issue the request remotely.
803 */
804 local = 0;
805 bio_put(req->private_bio);
806 req->private_bio = NULL;
807 put_ldev(mdev);
808 }
809 }
810 remote = !local && mdev->state.pdsk >= D_UP_TO_DATE;
811 }
812
813 /* If we have a disk, but a READA request is mapped to remote,
814 * we are R_PRIMARY, D_INCONSISTENT, SyncTarget.
815 * Just fail that READA request right here.
816 *
817 * THINK: maybe fail all READA when not local?
818 * or make this configurable...
819 * if network is slow, READA won't do any good.
820 */
821 if (rw == READA && mdev->state.disk >= D_INCONSISTENT && !local) {
822 err = -EWOULDBLOCK;
823 goto fail_and_free_req;
824 }
825
826 /* For WRITES going to the local disk, grab a reference on the target
827 * extent. This waits for any resync activity in the corresponding
828 * resync extent to finish, and, if necessary, pulls in the target
829 * extent into the activity log, which involves further disk io because
830 * of transactional on-disk meta data updates. */
Philipp Reisner07782862010-08-31 12:00:50 +0200831 if (rw == WRITE && local && !test_bit(AL_SUSPENDED, &mdev->flags)) {
832 req->rq_state |= RQ_IN_ACT_LOG;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700833 drbd_al_begin_io(mdev, sector);
Philipp Reisner07782862010-08-31 12:00:50 +0200834 }
Philipp Reisnerb411b362009-09-25 16:07:19 -0700835
Philipp Reisner6a35c452011-01-17 20:27:30 +0100836 remote = remote && drbd_should_do_remote(mdev->state);
837 send_oos = rw == WRITE && drbd_should_send_oos(mdev->state);
Philipp Reisner37190942010-11-10 12:08:37 +0100838 D_ASSERT(!(remote && send_oos));
Philipp Reisnerb411b362009-09-25 16:07:19 -0700839
Philipp Reisnerfb22c402010-09-08 23:20:21 +0200840 if (!(local || remote) && !is_susp(mdev->state)) {
Lars Ellenbergfb2c7a12010-10-19 12:08:13 +0200841 if (__ratelimit(&drbd_ratelimit_state))
842 dev_err(DEV, "IO ERROR: neither local nor remote disk\n");
Philipp Reisnerb411b362009-09-25 16:07:19 -0700843 goto fail_free_complete;
844 }
845
846 /* For WRITE request, we have to make sure that we have an
847 * unused_spare_tle, in case we need to start a new epoch.
848 * I try to be smart and avoid to pre-allocate always "just in case",
849 * but there is a race between testing the bit and pointer outside the
850 * spinlock, and grabbing the spinlock.
851 * if we lost that race, we retry. */
Philipp Reisner73a01a12010-10-27 14:33:00 +0200852 if (rw == WRITE && (remote || send_oos) &&
Philipp Reisnerb411b362009-09-25 16:07:19 -0700853 mdev->unused_spare_tle == NULL &&
854 test_bit(CREATE_BARRIER, &mdev->flags)) {
855allocate_barrier:
856 b = kmalloc(sizeof(struct drbd_tl_epoch), GFP_NOIO);
857 if (!b) {
858 dev_err(DEV, "Failed to alloc barrier.\n");
859 err = -ENOMEM;
860 goto fail_free_complete;
861 }
862 }
863
864 /* GOOD, everything prepared, grab the spin_lock */
865 spin_lock_irq(&mdev->req_lock);
866
Philipp Reisnerfb22c402010-09-08 23:20:21 +0200867 if (is_susp(mdev->state)) {
Philipp Reisner9a25a042010-05-10 16:42:23 +0200868 /* If we got suspended, use the retry mechanism of
869 generic_make_request() to restart processing of this
Andreas Gruenbacher2f58dcf2010-12-13 17:48:19 +0100870 bio. In the next call to drbd_make_request
Philipp Reisner9a25a042010-05-10 16:42:23 +0200871 we sleep in inc_ap_bio() */
872 ret = 1;
873 spin_unlock_irq(&mdev->req_lock);
874 goto fail_free_complete;
875 }
876
Philipp Reisner73a01a12010-10-27 14:33:00 +0200877 if (remote || send_oos) {
Philipp Reisner6a35c452011-01-17 20:27:30 +0100878 remote = drbd_should_do_remote(mdev->state);
879 send_oos = rw == WRITE && drbd_should_send_oos(mdev->state);
Philipp Reisner37190942010-11-10 12:08:37 +0100880 D_ASSERT(!(remote && send_oos));
Philipp Reisner73a01a12010-10-27 14:33:00 +0200881
882 if (!(remote || send_oos))
Philipp Reisnerb411b362009-09-25 16:07:19 -0700883 dev_warn(DEV, "lost connection while grabbing the req_lock!\n");
884 if (!(local || remote)) {
885 dev_err(DEV, "IO ERROR: neither local nor remote disk\n");
886 spin_unlock_irq(&mdev->req_lock);
887 goto fail_free_complete;
888 }
889 }
890
891 if (b && mdev->unused_spare_tle == NULL) {
892 mdev->unused_spare_tle = b;
893 b = NULL;
894 }
Philipp Reisner73a01a12010-10-27 14:33:00 +0200895 if (rw == WRITE && (remote || send_oos) &&
Philipp Reisnerb411b362009-09-25 16:07:19 -0700896 mdev->unused_spare_tle == NULL &&
897 test_bit(CREATE_BARRIER, &mdev->flags)) {
898 /* someone closed the current epoch
899 * while we were grabbing the spinlock */
900 spin_unlock_irq(&mdev->req_lock);
901 goto allocate_barrier;
902 }
903
904
905 /* Update disk stats */
906 _drbd_start_io_acct(mdev, req, bio);
907
908 /* _maybe_start_new_epoch(mdev);
909 * If we need to generate a write barrier packet, we have to add the
910 * new epoch (barrier) object, and queue the barrier packet for sending,
911 * and queue the req's data after it _within the same lock_, otherwise
912 * we have race conditions were the reorder domains could be mixed up.
913 *
914 * Even read requests may start a new epoch and queue the corresponding
915 * barrier packet. To get the write ordering right, we only have to
916 * make sure that, if this is a write request and it triggered a
917 * barrier packet, this request is queued within the same spinlock. */
Philipp Reisner73a01a12010-10-27 14:33:00 +0200918 if ((remote || send_oos) && mdev->unused_spare_tle &&
Philipp Reisnerb411b362009-09-25 16:07:19 -0700919 test_and_clear_bit(CREATE_BARRIER, &mdev->flags)) {
920 _tl_add_barrier(mdev, mdev->unused_spare_tle);
921 mdev->unused_spare_tle = NULL;
922 } else {
923 D_ASSERT(!(remote && rw == WRITE &&
924 test_bit(CREATE_BARRIER, &mdev->flags)));
925 }
926
927 /* NOTE
928 * Actually, 'local' may be wrong here already, since we may have failed
929 * to write to the meta data, and may become wrong anytime because of
930 * local io-error for some other request, which would lead to us
931 * "detaching" the local disk.
932 *
933 * 'remote' may become wrong any time because the network could fail.
934 *
935 * This is a harmless race condition, though, since it is handled
936 * correctly at the appropriate places; so it just defers the failure
937 * of the respective operation.
938 */
939
940 /* mark them early for readability.
941 * this just sets some state flags. */
942 if (remote)
943 _req_mod(req, to_be_send);
944 if (local)
945 _req_mod(req, to_be_submitted);
946
947 /* check this request on the collision detection hash tables.
948 * if we have a conflict, just complete it here.
949 * THINK do we want to check reads, too? (I don't think so...) */
Lars Ellenbergd28fd092010-07-09 23:28:10 +0200950 if (rw == WRITE && _req_conflicts(req))
951 goto fail_conflicting;
Philipp Reisner288f4222010-05-27 15:07:43 +0200952
953 list_add_tail(&req->tl_requests, &mdev->newest_tle->requests);
954
Philipp Reisnerb411b362009-09-25 16:07:19 -0700955 /* NOTE remote first: to get the concurrent write detection right,
956 * we must register the request before start of local IO. */
957 if (remote) {
958 /* either WRITE and C_CONNECTED,
959 * or READ, and no local disk,
960 * or READ, but not in sync.
961 */
962 _req_mod(req, (rw == WRITE)
963 ? queue_for_net_write
964 : queue_for_net_read);
965 }
Philipp Reisner73a01a12010-10-27 14:33:00 +0200966 if (send_oos && drbd_set_out_of_sync(mdev, sector, size))
967 _req_mod(req, queue_for_send_oos);
Philipp Reisner67531712010-10-27 12:21:30 +0200968
Philipp Reisner73a01a12010-10-27 14:33:00 +0200969 if (remote &&
970 mdev->net_conf->on_congestion != OC_BLOCK && mdev->agreed_pro_version >= 96) {
Philipp Reisner67531712010-10-27 12:21:30 +0200971 int congested = 0;
972
973 if (mdev->net_conf->cong_fill &&
974 atomic_read(&mdev->ap_in_flight) >= mdev->net_conf->cong_fill) {
975 dev_info(DEV, "Congestion-fill threshold reached\n");
976 congested = 1;
977 }
978
979 if (mdev->act_log->used >= mdev->net_conf->cong_extents) {
980 dev_info(DEV, "Congestion-extents threshold reached\n");
981 congested = 1;
982 }
983
Philipp Reisner71c78cf2011-01-14 19:20:34 +0100984 if (congested) {
Philipp Reisnerda0a7812010-12-23 14:24:33 +0100985 /* rs_pending_cnt must be zero, otherwise the two peers
986 might get different bitmaps. With sane configurations
987 the resync stalls long before we might want to go into
988 AHEAD mode.
989 We could force the resync into PAUSE mode here if
990 rs_pending_cnt is > 0 ... */
Philipp Reisner73a01a12010-10-27 14:33:00 +0200991 queue_barrier(mdev);
992
Philipp Reisner67531712010-10-27 12:21:30 +0200993 if (mdev->net_conf->on_congestion == OC_PULL_AHEAD)
994 _drbd_set_state(_NS(mdev, conn, C_AHEAD), 0, NULL);
995 else /*mdev->net_conf->on_congestion == OC_DISCONNECT */
996 _drbd_set_state(_NS(mdev, conn, C_DISCONNECTING), 0, NULL);
997 }
998 }
999
Philipp Reisnerb411b362009-09-25 16:07:19 -07001000 spin_unlock_irq(&mdev->req_lock);
1001 kfree(b); /* if someone else has beaten us to it... */
1002
1003 if (local) {
1004 req->private_bio->bi_bdev = mdev->ldev->backing_bdev;
1005
Lars Ellenberg6719fb02010-10-18 23:04:07 +02001006 /* State may have changed since we grabbed our reference on the
1007 * mdev->ldev member. Double check, and short-circuit to endio.
1008 * In case the last activity log transaction failed to get on
1009 * stable storage, and this is a WRITE, we may not even submit
1010 * this bio. */
1011 if (get_ldev(mdev)) {
Andreas Gruenbacher0cf9d272010-12-07 10:43:29 +01001012 if (drbd_insert_fault(mdev, rw == WRITE ? DRBD_FAULT_DT_WR
1013 : rw == READ ? DRBD_FAULT_DT_RD
1014 : DRBD_FAULT_DT_RA))
Lars Ellenberg6719fb02010-10-18 23:04:07 +02001015 bio_endio(req->private_bio, -EIO);
1016 else
1017 generic_make_request(req->private_bio);
1018 put_ldev(mdev);
1019 } else
Philipp Reisnerb411b362009-09-25 16:07:19 -07001020 bio_endio(req->private_bio, -EIO);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001021 }
1022
Philipp Reisnerb411b362009-09-25 16:07:19 -07001023 return 0;
1024
Lars Ellenbergd28fd092010-07-09 23:28:10 +02001025fail_conflicting:
1026 /* this is a conflicting request.
1027 * even though it may have been only _partially_
1028 * overlapping with one of the currently pending requests,
1029 * without even submitting or sending it, we will
1030 * pretend that it was successfully served right now.
1031 */
1032 _drbd_end_io_acct(mdev, req);
1033 spin_unlock_irq(&mdev->req_lock);
1034 if (remote)
1035 dec_ap_pending(mdev);
1036 /* THINK: do we want to fail it (-EIO), or pretend success?
1037 * this pretends success. */
1038 err = 0;
1039
Philipp Reisnerb411b362009-09-25 16:07:19 -07001040fail_free_complete:
1041 if (rw == WRITE && local)
1042 drbd_al_complete_io(mdev, sector);
1043fail_and_free_req:
1044 if (local) {
1045 bio_put(req->private_bio);
1046 req->private_bio = NULL;
1047 put_ldev(mdev);
1048 }
Philipp Reisner9a25a042010-05-10 16:42:23 +02001049 if (!ret)
1050 bio_endio(bio, err);
1051
Philipp Reisnerb411b362009-09-25 16:07:19 -07001052 drbd_req_free(req);
1053 dec_ap_bio(mdev);
1054 kfree(b);
1055
Philipp Reisner9a25a042010-05-10 16:42:23 +02001056 return ret;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001057}
1058
1059/* helper function for drbd_make_request
1060 * if we can determine just by the mdev (state) that this request will fail,
1061 * return 1
1062 * otherwise return 0
1063 */
1064static int drbd_fail_request_early(struct drbd_conf *mdev, int is_write)
1065{
Philipp Reisnerb411b362009-09-25 16:07:19 -07001066 if (mdev->state.role != R_PRIMARY &&
1067 (!allow_oos || is_write)) {
1068 if (__ratelimit(&drbd_ratelimit_state)) {
1069 dev_err(DEV, "Process %s[%u] tried to %s; "
1070 "since we are not in Primary state, "
1071 "we cannot allow this\n",
1072 current->comm, current->pid,
1073 is_write ? "WRITE" : "READ");
1074 }
1075 return 1;
1076 }
1077
Philipp Reisnerb411b362009-09-25 16:07:19 -07001078 return 0;
1079}
1080
Andreas Gruenbacher2f58dcf2010-12-13 17:48:19 +01001081int drbd_make_request(struct request_queue *q, struct bio *bio)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001082{
1083 unsigned int s_enr, e_enr;
1084 struct drbd_conf *mdev = (struct drbd_conf *) q->queuedata;
Philipp Reisneraeda1cd62010-11-09 17:45:06 +01001085 unsigned long start_time;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001086
1087 if (drbd_fail_request_early(mdev, bio_data_dir(bio) & WRITE)) {
1088 bio_endio(bio, -EPERM);
1089 return 0;
1090 }
1091
Philipp Reisneraeda1cd62010-11-09 17:45:06 +01001092 start_time = jiffies;
1093
Philipp Reisnerb411b362009-09-25 16:07:19 -07001094 /*
1095 * what we "blindly" assume:
1096 */
1097 D_ASSERT(bio->bi_size > 0);
1098 D_ASSERT((bio->bi_size & 0x1ff) == 0);
1099 D_ASSERT(bio->bi_idx == 0);
1100
1101 /* to make some things easier, force alignment of requests within the
1102 * granularity of our hash tables */
1103 s_enr = bio->bi_sector >> HT_SHIFT;
1104 e_enr = (bio->bi_sector+(bio->bi_size>>9)-1) >> HT_SHIFT;
1105
1106 if (likely(s_enr == e_enr)) {
1107 inc_ap_bio(mdev, 1);
Philipp Reisneraeda1cd62010-11-09 17:45:06 +01001108 return drbd_make_request_common(mdev, bio, start_time);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001109 }
1110
1111 /* can this bio be split generically?
1112 * Maybe add our own split-arbitrary-bios function. */
Lars Ellenberg1816a2b2010-11-11 15:19:07 +01001113 if (bio->bi_vcnt != 1 || bio->bi_idx != 0 || bio->bi_size > DRBD_MAX_BIO_SIZE) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07001114 /* rather error out here than BUG in bio_split */
1115 dev_err(DEV, "bio would need to, but cannot, be split: "
1116 "(vcnt=%u,idx=%u,size=%u,sector=%llu)\n",
1117 bio->bi_vcnt, bio->bi_idx, bio->bi_size,
1118 (unsigned long long)bio->bi_sector);
1119 bio_endio(bio, -EINVAL);
1120 } else {
1121 /* This bio crosses some boundary, so we have to split it. */
1122 struct bio_pair *bp;
1123 /* works for the "do not cross hash slot boundaries" case
1124 * e.g. sector 262269, size 4096
1125 * s_enr = 262269 >> 6 = 4097
1126 * e_enr = (262269+8-1) >> 6 = 4098
1127 * HT_SHIFT = 6
1128 * sps = 64, mask = 63
1129 * first_sectors = 64 - (262269 & 63) = 3
1130 */
1131 const sector_t sect = bio->bi_sector;
1132 const int sps = 1 << HT_SHIFT; /* sectors per slot */
1133 const int mask = sps - 1;
1134 const sector_t first_sectors = sps - (sect & mask);
1135 bp = bio_split(bio,
1136#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28)
1137 bio_split_pool,
1138#endif
1139 first_sectors);
1140
1141 /* we need to get a "reference count" (ap_bio_cnt)
1142 * to avoid races with the disconnect/reconnect/suspend code.
Philipp Reisner9a25a042010-05-10 16:42:23 +02001143 * In case we need to split the bio here, we need to get three references
Philipp Reisnerb411b362009-09-25 16:07:19 -07001144 * atomically, otherwise we might deadlock when trying to submit the
1145 * second one! */
Philipp Reisner9a25a042010-05-10 16:42:23 +02001146 inc_ap_bio(mdev, 3);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001147
1148 D_ASSERT(e_enr == s_enr + 1);
1149
Philipp Reisneraeda1cd62010-11-09 17:45:06 +01001150 while (drbd_make_request_common(mdev, &bp->bio1, start_time))
Philipp Reisner9a25a042010-05-10 16:42:23 +02001151 inc_ap_bio(mdev, 1);
1152
Philipp Reisneraeda1cd62010-11-09 17:45:06 +01001153 while (drbd_make_request_common(mdev, &bp->bio2, start_time))
Philipp Reisner9a25a042010-05-10 16:42:23 +02001154 inc_ap_bio(mdev, 1);
1155
1156 dec_ap_bio(mdev);
1157
Philipp Reisnerb411b362009-09-25 16:07:19 -07001158 bio_pair_release(bp);
1159 }
1160 return 0;
1161}
1162
1163/* This is called by bio_add_page(). With this function we reduce
Lars Ellenberg1816a2b2010-11-11 15:19:07 +01001164 * the number of BIOs that span over multiple DRBD_MAX_BIO_SIZEs
Philipp Reisnerb411b362009-09-25 16:07:19 -07001165 * units (was AL_EXTENTs).
1166 *
1167 * we do the calculation within the lower 32bit of the byte offsets,
1168 * since we don't care for actual offset, but only check whether it
1169 * would cross "activity log extent" boundaries.
1170 *
1171 * As long as the BIO is empty we have to allow at least one bvec,
1172 * regardless of size and offset. so the resulting bio may still
1173 * cross extent boundaries. those are dealt with (bio_split) in
Andreas Gruenbacher2f58dcf2010-12-13 17:48:19 +01001174 * drbd_make_request.
Philipp Reisnerb411b362009-09-25 16:07:19 -07001175 */
1176int drbd_merge_bvec(struct request_queue *q, struct bvec_merge_data *bvm, struct bio_vec *bvec)
1177{
1178 struct drbd_conf *mdev = (struct drbd_conf *) q->queuedata;
1179 unsigned int bio_offset =
1180 (unsigned int)bvm->bi_sector << 9; /* 32 bit */
1181 unsigned int bio_size = bvm->bi_size;
1182 int limit, backing_limit;
1183
Lars Ellenberg1816a2b2010-11-11 15:19:07 +01001184 limit = DRBD_MAX_BIO_SIZE
1185 - ((bio_offset & (DRBD_MAX_BIO_SIZE-1)) + bio_size);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001186 if (limit < 0)
1187 limit = 0;
1188 if (bio_size == 0) {
1189 if (limit <= bvec->bv_len)
1190 limit = bvec->bv_len;
1191 } else if (limit && get_ldev(mdev)) {
1192 struct request_queue * const b =
1193 mdev->ldev->backing_bdev->bd_disk->queue;
Lars Ellenberga1c88d02010-05-14 19:16:41 +02001194 if (b->merge_bvec_fn) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07001195 backing_limit = b->merge_bvec_fn(b, bvm, bvec);
1196 limit = min(limit, backing_limit);
1197 }
1198 put_ldev(mdev);
1199 }
1200 return limit;
1201}