block: rename bio bi_rw to bi_opf
[linux-2.6-block.git] / drivers / block / drbd / drbd_req.c
CommitLineData
b411b363
PR
1/*
2 drbd_req.c
3
4 This file is part of DRBD by Philipp Reisner and Lars Ellenberg.
5
6 Copyright (C) 2001-2008, LINBIT Information Technologies GmbH.
7 Copyright (C) 1999-2008, Philipp Reisner <philipp.reisner@linbit.com>.
8 Copyright (C) 2002-2008, Lars Ellenberg <lars.ellenberg@linbit.com>.
9
10 drbd is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation; either version 2, or (at your option)
13 any later version.
14
15 drbd is distributed in the hope that it will be useful,
16 but WITHOUT ANY WARRANTY; without even the implied warranty of
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 GNU General Public License for more details.
19
20 You should have received a copy of the GNU General Public License
21 along with drbd; see the file COPYING. If not, write to
22 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
23
24 */
25
b411b363
PR
26#include <linux/module.h>
27
28#include <linux/slab.h>
29#include <linux/drbd.h>
30#include "drbd_int.h"
b411b363
PR
31#include "drbd_req.h"
32
33
b30ab791 34static bool drbd_may_do_local_read(struct drbd_device *device, sector_t sector, int size);
57bcb6cf 35
b411b363 36/* Update disk stats at start of I/O request */
b30ab791 37static void _drbd_start_io_acct(struct drbd_device *device, struct drbd_request *req)
b411b363 38{
24480854
GZ
39 generic_start_io_acct(bio_data_dir(req->master_bio), req->i.size >> 9,
40 &device->vdisk->part0);
b411b363
PR
41}
42
43/* Update disk stats when completing request upwards */
b30ab791 44static void _drbd_end_io_acct(struct drbd_device *device, struct drbd_request *req)
b411b363 45{
24480854
GZ
46 generic_end_io_acct(bio_data_dir(req->master_bio),
47 &device->vdisk->part0, req->start_jif);
b411b363
PR
48}
49
9104d31a 50static struct drbd_request *drbd_req_new(struct drbd_device *device, struct bio *bio_src)
9e204cdd
AG
51{
52 struct drbd_request *req;
53
23fe8f8b 54 req = mempool_alloc(drbd_request_mempool, GFP_NOIO);
9e204cdd
AG
55 if (!req)
56 return NULL;
23fe8f8b 57 memset(req, 0, sizeof(*req));
9e204cdd
AG
58
59 drbd_req_make_private_bio(req, bio_src);
9104d31a
LE
60 req->rq_state = (bio_data_dir(bio_src) == WRITE ? RQ_WRITE : 0)
61 | (bio_op(bio_src) == REQ_OP_WRITE_SAME ? RQ_WSAME : 0)
62 | (bio_op(bio_src) == REQ_OP_DISCARD ? RQ_UNMAP : 0);
63 req->device = device;
64 req->master_bio = bio_src;
65 req->epoch = 0;
53840641 66
9e204cdd 67 drbd_clear_interval(&req->i);
4f024f37
KO
68 req->i.sector = bio_src->bi_iter.bi_sector;
69 req->i.size = bio_src->bi_iter.bi_size;
5e472264 70 req->i.local = true;
53840641
AG
71 req->i.waiting = false;
72
9e204cdd
AG
73 INIT_LIST_HEAD(&req->tl_requests);
74 INIT_LIST_HEAD(&req->w.list);
844a6ae7
LE
75 INIT_LIST_HEAD(&req->req_pending_master_completion);
76 INIT_LIST_HEAD(&req->req_pending_local);
9e204cdd 77
a0d856df 78 /* one reference to be put by __drbd_make_request */
b406777e 79 atomic_set(&req->completion_ref, 1);
a0d856df 80 /* one kref as long as completion_ref > 0 */
b406777e 81 kref_init(&req->kref);
9e204cdd
AG
82 return req;
83}
84
08d0dabf
LE
85static void drbd_remove_request_interval(struct rb_root *root,
86 struct drbd_request *req)
87{
88 struct drbd_device *device = req->device;
89 struct drbd_interval *i = &req->i;
90
91 drbd_remove_interval(root, i);
92
93 /* Wake up any processes waiting for this request to complete. */
94 if (i->waiting)
95 wake_up(&device->misc_wait);
96}
97
9a278a79 98void drbd_req_destroy(struct kref *kref)
b411b363 99{
b406777e 100 struct drbd_request *req = container_of(kref, struct drbd_request, kref);
84b8c06b 101 struct drbd_device *device = req->device;
a0d856df
LE
102 const unsigned s = req->rq_state;
103
104 if ((req->master_bio && !(s & RQ_POSTPONED)) ||
105 atomic_read(&req->completion_ref) ||
106 (s & RQ_LOCAL_PENDING) ||
107 ((s & RQ_NET_MASK) && !(s & RQ_NET_DONE))) {
d0180171 108 drbd_err(device, "drbd_req_destroy: Logic BUG rq_state = 0x%x, completion_ref = %d\n",
a0d856df
LE
109 s, atomic_read(&req->completion_ref));
110 return;
111 }
288f422e 112
844a6ae7
LE
113 /* If called from mod_rq_state (expected normal case) or
114 * drbd_send_and_submit (the less likely normal path), this holds the
115 * req_lock, and req->tl_requests will typicaly be on ->transfer_log,
116 * though it may be still empty (never added to the transfer log).
117 *
118 * If called from do_retry(), we do NOT hold the req_lock, but we are
119 * still allowed to unconditionally list_del(&req->tl_requests),
120 * because it will be on a local on-stack list only. */
2312f0b3 121 list_del_init(&req->tl_requests);
288f422e 122
08d0dabf
LE
123 /* finally remove the request from the conflict detection
124 * respective block_id verification interval tree. */
125 if (!drbd_interval_empty(&req->i)) {
126 struct rb_root *root;
127
128 if (s & RQ_WRITE)
129 root = &device->write_requests;
130 else
131 root = &device->read_requests;
132 drbd_remove_request_interval(root, req);
133 } else if (s & (RQ_NET_MASK & ~RQ_NET_DONE) && req->i.size != 0)
134 drbd_err(device, "drbd_req_destroy: Logic BUG: interval empty, but: rq_state=0x%x, sect=%llu, size=%u\n",
135 s, (unsigned long long)req->i.sector, req->i.size);
136
b411b363
PR
137 /* if it was a write, we may have to set the corresponding
138 * bit(s) out-of-sync first. If it had a local part, we need to
139 * release the reference to the activity log. */
b406777e 140 if (s & RQ_WRITE) {
b411b363
PR
141 /* Set out-of-sync unless both OK flags are set
142 * (local only or remote failed).
143 * Other places where we set out-of-sync:
144 * READ with local io-error */
b411b363 145
70f17b6b
LE
146 /* There is a special case:
147 * we may notice late that IO was suspended,
148 * and postpone, or schedule for retry, a write,
149 * before it even was submitted or sent.
150 * In that case we do not want to touch the bitmap at all.
151 */
152 if ((s & (RQ_POSTPONED|RQ_LOCAL_MASK|RQ_NET_MASK)) != RQ_POSTPONED) {
d7644018 153 if (!(s & RQ_NET_OK) || !(s & RQ_LOCAL_OK))
b30ab791 154 drbd_set_out_of_sync(device, req->i.sector, req->i.size);
b411b363 155
d7644018 156 if ((s & RQ_NET_OK) && (s & RQ_LOCAL_OK) && (s & RQ_NET_SIS))
b30ab791 157 drbd_set_in_sync(device, req->i.sector, req->i.size);
d7644018 158 }
b411b363
PR
159
160 /* one might be tempted to move the drbd_al_complete_io
fcefa62e 161 * to the local io completion callback drbd_request_endio.
b411b363
PR
162 * but, if this was a mirror write, we may only
163 * drbd_al_complete_io after this is RQ_NET_DONE,
164 * otherwise the extent could be dropped from the al
165 * before it has actually been written on the peer.
166 * if we crash before our peer knows about the request,
167 * but after the extent has been dropped from the al,
168 * we would forget to resync the corresponding extent.
169 */
76590cd1 170 if (s & RQ_IN_ACT_LOG) {
b30ab791
AG
171 if (get_ldev_if_state(device, D_FAILED)) {
172 drbd_al_complete_io(device, &req->i);
173 put_ldev(device);
b411b363 174 } else if (__ratelimit(&drbd_ratelimit_state)) {
d0180171 175 drbd_warn(device, "Should have called drbd_al_complete_io(, %llu, %u), "
181286ad
LE
176 "but my Disk seems to have failed :(\n",
177 (unsigned long long) req->i.sector, req->i.size);
b411b363
PR
178 }
179 }
180 }
181
9a278a79 182 mempool_free(req, drbd_request_mempool);
b411b363
PR
183}
184
bde89a9e
AG
185static void wake_all_senders(struct drbd_connection *connection)
186{
187 wake_up(&connection->sender_work.q_wait);
b411b363
PR
188}
189
b6dd1a89 190/* must hold resource->req_lock */
bde89a9e 191void start_new_tl_epoch(struct drbd_connection *connection)
b411b363 192{
99b4d8fe 193 /* no point closing an epoch, if it is empty, anyways. */
bde89a9e 194 if (connection->current_tle_writes == 0)
99b4d8fe 195 return;
b411b363 196
bde89a9e
AG
197 connection->current_tle_writes = 0;
198 atomic_inc(&connection->current_tle_nr);
199 wake_all_senders(connection);
b411b363
PR
200}
201
b30ab791 202void complete_master_bio(struct drbd_device *device,
b411b363
PR
203 struct bio_and_error *m)
204{
4246a0b6
CH
205 m->bio->bi_error = m->error;
206 bio_endio(m->bio);
b30ab791 207 dec_ap_bio(device);
b411b363
PR
208}
209
53840641 210
b411b363
PR
211/* Helper for __req_mod().
212 * Set m->bio to the master bio, if it is fit to be completed,
213 * or leave it alone (it is initialized to NULL in __req_mod),
214 * if it has already been completed, or cannot be completed yet.
215 * If m->bio is set, the error status to be returned is placed in m->error.
216 */
6870ca6d 217static
a0d856df 218void drbd_req_complete(struct drbd_request *req, struct bio_and_error *m)
b411b363 219{
a0d856df 220 const unsigned s = req->rq_state;
84b8c06b 221 struct drbd_device *device = req->device;
a0d856df 222 int error, ok;
b411b363 223
b411b363
PR
224 /* we must not complete the master bio, while it is
225 * still being processed by _drbd_send_zc_bio (drbd_send_dblock)
226 * not yet acknowledged by the peer
227 * not yet completed by the local io subsystem
228 * these flags may get cleared in any order by
229 * the worker,
230 * the receiver,
231 * the bio_endio completion callbacks.
232 */
a0d856df
LE
233 if ((s & RQ_LOCAL_PENDING && !(s & RQ_LOCAL_ABORTED)) ||
234 (s & RQ_NET_QUEUED) || (s & RQ_NET_PENDING) ||
235 (s & RQ_COMPLETION_SUSP)) {
d0180171 236 drbd_err(device, "drbd_req_complete: Logic BUG rq_state = 0x%x\n", s);
b411b363 237 return;
a0d856df
LE
238 }
239
240 if (!req->master_bio) {
d0180171 241 drbd_err(device, "drbd_req_complete: Logic BUG, master_bio == NULL!\n");
b411b363 242 return;
a0d856df 243 }
b411b363 244
a0d856df
LE
245 /*
246 * figure out whether to report success or failure.
247 *
248 * report success when at least one of the operations succeeded.
249 * or, to put the other way,
250 * only report failure, when both operations failed.
251 *
252 * what to do about the failures is handled elsewhere.
253 * what we need to do here is just: complete the master_bio.
254 *
255 * local completion error, if any, has been stored as ERR_PTR
256 * in private_bio within drbd_request_endio.
257 */
258 ok = (s & RQ_LOCAL_OK) || (s & RQ_NET_OK);
259 error = PTR_ERR(req->private_bio);
b411b363 260
a0d856df
LE
261 /* Before we can signal completion to the upper layers,
262 * we may need to close the current transfer log epoch.
263 * We are within the request lock, so we can simply compare
264 * the request epoch number with the current transfer log
265 * epoch number. If they match, increase the current_tle_nr,
266 * and reset the transfer log epoch write_cnt.
267 */
70246286 268 if (op_is_write(bio_op(req->master_bio)) &&
a6b32bc3
AG
269 req->epoch == atomic_read(&first_peer_device(device)->connection->current_tle_nr))
270 start_new_tl_epoch(first_peer_device(device)->connection);
b411b363 271
a0d856df 272 /* Update disk stats */
b30ab791 273 _drbd_end_io_acct(device, req);
b411b363 274
a0d856df
LE
275 /* If READ failed,
276 * have it be pushed back to the retry work queue,
277 * so it will re-enter __drbd_make_request(),
278 * and be re-assigned to a suitable local or remote path,
279 * or failed if we do not have access to good data anymore.
280 *
281 * Unless it was failed early by __drbd_make_request(),
282 * because no path was available, in which case
283 * it was not even added to the transfer_log.
284 *
70246286 285 * read-ahead may fail, and will not be retried.
a0d856df
LE
286 *
287 * WRITE should have used all available paths already.
288 */
70246286
CH
289 if (!ok &&
290 bio_op(req->master_bio) == REQ_OP_READ &&
1eff9d32 291 !(req->master_bio->bi_opf & REQ_RAHEAD) &&
70246286 292 !list_empty(&req->tl_requests))
a0d856df 293 req->rq_state |= RQ_POSTPONED;
b411b363 294
a0d856df 295 if (!(req->rq_state & RQ_POSTPONED)) {
b411b363
PR
296 m->error = ok ? 0 : (error ?: -EIO);
297 m->bio = req->master_bio;
298 req->master_bio = NULL;
08d0dabf
LE
299 /* We leave it in the tree, to be able to verify later
300 * write-acks in protocol != C during resync.
301 * But we mark it as "complete", so it won't be counted as
302 * conflict in a multi-primary setup. */
303 req->i.completed = true;
b411b363 304 }
08d0dabf
LE
305
306 if (req->i.waiting)
307 wake_up(&device->misc_wait);
844a6ae7
LE
308
309 /* Either we are about to complete to upper layers,
310 * or we will restart this request.
311 * In either case, the request object will be destroyed soon,
312 * so better remove it from all lists. */
313 list_del_init(&req->req_pending_master_completion);
b411b363 314}
b411b363 315
844a6ae7 316/* still holds resource->req_lock */
a0d856df 317static int drbd_req_put_completion_ref(struct drbd_request *req, struct bio_and_error *m, int put)
cfa03415 318{
84b8c06b 319 struct drbd_device *device = req->device;
0b0ba1ef 320 D_ASSERT(device, m || (req->rq_state & RQ_POSTPONED));
a0d856df
LE
321
322 if (!atomic_sub_and_test(put, &req->completion_ref))
323 return 0;
2b4dd36f 324
a0d856df 325 drbd_req_complete(req, m);
9a278a79
LE
326
327 if (req->rq_state & RQ_POSTPONED) {
328 /* don't destroy the req object just yet,
329 * but queue it for retry */
330 drbd_restart_request(req);
331 return 0;
b411b363 332 }
9a278a79 333
a0d856df 334 return 1;
b411b363
PR
335}
336
7753a4c1
LE
337static void set_if_null_req_next(struct drbd_peer_device *peer_device, struct drbd_request *req)
338{
339 struct drbd_connection *connection = peer_device ? peer_device->connection : NULL;
340 if (!connection)
341 return;
342 if (connection->req_next == NULL)
343 connection->req_next = req;
344}
345
346static void advance_conn_req_next(struct drbd_peer_device *peer_device, struct drbd_request *req)
347{
348 struct drbd_connection *connection = peer_device ? peer_device->connection : NULL;
349 if (!connection)
350 return;
351 if (connection->req_next != req)
352 return;
353 list_for_each_entry_continue(req, &connection->transfer_log, tl_requests) {
354 const unsigned s = req->rq_state;
355 if (s & RQ_NET_QUEUED)
356 break;
357 }
358 if (&req->tl_requests == &connection->transfer_log)
359 req = NULL;
360 connection->req_next = req;
361}
362
363static void set_if_null_req_ack_pending(struct drbd_peer_device *peer_device, struct drbd_request *req)
364{
365 struct drbd_connection *connection = peer_device ? peer_device->connection : NULL;
366 if (!connection)
367 return;
368 if (connection->req_ack_pending == NULL)
369 connection->req_ack_pending = req;
370}
371
372static void advance_conn_req_ack_pending(struct drbd_peer_device *peer_device, struct drbd_request *req)
373{
374 struct drbd_connection *connection = peer_device ? peer_device->connection : NULL;
375 if (!connection)
376 return;
377 if (connection->req_ack_pending != req)
378 return;
379 list_for_each_entry_continue(req, &connection->transfer_log, tl_requests) {
380 const unsigned s = req->rq_state;
381 if ((s & RQ_NET_SENT) && (s & RQ_NET_PENDING))
382 break;
383 }
384 if (&req->tl_requests == &connection->transfer_log)
385 req = NULL;
386 connection->req_ack_pending = req;
387}
388
389static void set_if_null_req_not_net_done(struct drbd_peer_device *peer_device, struct drbd_request *req)
390{
391 struct drbd_connection *connection = peer_device ? peer_device->connection : NULL;
392 if (!connection)
393 return;
394 if (connection->req_not_net_done == NULL)
395 connection->req_not_net_done = req;
396}
397
398static void advance_conn_req_not_net_done(struct drbd_peer_device *peer_device, struct drbd_request *req)
399{
400 struct drbd_connection *connection = peer_device ? peer_device->connection : NULL;
401 if (!connection)
402 return;
403 if (connection->req_not_net_done != req)
404 return;
405 list_for_each_entry_continue(req, &connection->transfer_log, tl_requests) {
406 const unsigned s = req->rq_state;
407 if ((s & RQ_NET_SENT) && !(s & RQ_NET_DONE))
408 break;
409 }
410 if (&req->tl_requests == &connection->transfer_log)
411 req = NULL;
412 connection->req_not_net_done = req;
413}
414
a0d856df
LE
415/* I'd like this to be the only place that manipulates
416 * req->completion_ref and req->kref. */
417static void mod_rq_state(struct drbd_request *req, struct bio_and_error *m,
418 int clear, int set)
cfa03415 419{
84b8c06b 420 struct drbd_device *device = req->device;
7753a4c1 421 struct drbd_peer_device *peer_device = first_peer_device(device);
a0d856df
LE
422 unsigned s = req->rq_state;
423 int c_put = 0;
424 int k_put = 0;
cfa03415 425
b30ab791 426 if (drbd_suspended(device) && !((s | clear) & RQ_COMPLETION_SUSP))
5af2e8ce 427 set |= RQ_COMPLETION_SUSP;
cfa03415 428
a0d856df 429 /* apply */
b411b363 430
a0d856df
LE
431 req->rq_state &= ~clear;
432 req->rq_state |= set;
b411b363 433
a0d856df
LE
434 /* no change? */
435 if (req->rq_state == s)
436 return;
b411b363 437
a0d856df
LE
438 /* intent: get references */
439
440 if (!(s & RQ_LOCAL_PENDING) && (set & RQ_LOCAL_PENDING))
441 atomic_inc(&req->completion_ref);
442
443 if (!(s & RQ_NET_PENDING) && (set & RQ_NET_PENDING)) {
b30ab791 444 inc_ap_pending(device);
a0d856df 445 atomic_inc(&req->completion_ref);
b411b363
PR
446 }
447
e5f891b2 448 if (!(s & RQ_NET_QUEUED) && (set & RQ_NET_QUEUED)) {
a0d856df 449 atomic_inc(&req->completion_ref);
7753a4c1 450 set_if_null_req_next(peer_device, req);
e5f891b2 451 }
a0d856df
LE
452
453 if (!(s & RQ_EXP_BARR_ACK) && (set & RQ_EXP_BARR_ACK))
454 kref_get(&req->kref); /* wait for the DONE */
455
e5f891b2 456 if (!(s & RQ_NET_SENT) && (set & RQ_NET_SENT)) {
668700b4 457 /* potentially already completed in the ack_receiver thread */
7753a4c1 458 if (!(s & RQ_NET_DONE)) {
e5f891b2 459 atomic_add(req->i.size >> 9, &device->ap_in_flight);
7753a4c1
LE
460 set_if_null_req_not_net_done(peer_device, req);
461 }
f85d9f2d 462 if (req->rq_state & RQ_NET_PENDING)
7753a4c1 463 set_if_null_req_ack_pending(peer_device, req);
e5f891b2 464 }
a0d856df 465
5af2e8ce
PR
466 if (!(s & RQ_COMPLETION_SUSP) && (set & RQ_COMPLETION_SUSP))
467 atomic_inc(&req->completion_ref);
468
a0d856df
LE
469 /* progress: put references */
470
471 if ((s & RQ_COMPLETION_SUSP) && (clear & RQ_COMPLETION_SUSP))
472 ++c_put;
473
474 if (!(s & RQ_LOCAL_ABORTED) && (set & RQ_LOCAL_ABORTED)) {
0b0ba1ef 475 D_ASSERT(device, req->rq_state & RQ_LOCAL_PENDING);
a0d856df
LE
476 /* local completion may still come in later,
477 * we need to keep the req object around. */
478 kref_get(&req->kref);
479 ++c_put;
b411b363 480 }
b411b363 481
a0d856df
LE
482 if ((s & RQ_LOCAL_PENDING) && (clear & RQ_LOCAL_PENDING)) {
483 if (req->rq_state & RQ_LOCAL_ABORTED)
484 ++k_put;
485 else
486 ++c_put;
844a6ae7 487 list_del_init(&req->req_pending_local);
a0d856df 488 }
b411b363 489
a0d856df 490 if ((s & RQ_NET_PENDING) && (clear & RQ_NET_PENDING)) {
b30ab791 491 dec_ap_pending(device);
a0d856df 492 ++c_put;
e5f891b2 493 req->acked_jif = jiffies;
7753a4c1 494 advance_conn_req_ack_pending(peer_device, req);
a0d856df
LE
495 }
496
7753a4c1 497 if ((s & RQ_NET_QUEUED) && (clear & RQ_NET_QUEUED)) {
a0d856df 498 ++c_put;
7753a4c1
LE
499 advance_conn_req_next(peer_device, req);
500 }
a0d856df 501
e5f891b2
LE
502 if (!(s & RQ_NET_DONE) && (set & RQ_NET_DONE)) {
503 if (s & RQ_NET_SENT)
b30ab791 504 atomic_sub(req->i.size >> 9, &device->ap_in_flight);
e5f891b2
LE
505 if (s & RQ_EXP_BARR_ACK)
506 ++k_put;
507 req->net_done_jif = jiffies;
7753a4c1
LE
508
509 /* in ahead/behind mode, or just in case,
510 * before we finally destroy this request,
511 * the caching pointers must not reference it anymore */
512 advance_conn_req_next(peer_device, req);
513 advance_conn_req_ack_pending(peer_device, req);
514 advance_conn_req_not_net_done(peer_device, req);
a0d856df
LE
515 }
516
517 /* potentially complete and destroy */
518
519 if (k_put || c_put) {
520 /* Completion does it's own kref_put. If we are going to
521 * kref_sub below, we need req to be still around then. */
522 int at_least = k_put + !!c_put;
523 int refcount = atomic_read(&req->kref.refcount);
524 if (refcount < at_least)
d0180171 525 drbd_err(device,
a0d856df
LE
526 "mod_rq_state: Logic BUG: %x -> %x: refcount = %d, should be >= %d\n",
527 s, req->rq_state, refcount, at_least);
528 }
529
530 /* If we made progress, retry conflicting peer requests, if any. */
531 if (req->i.waiting)
b30ab791 532 wake_up(&device->misc_wait);
a0d856df
LE
533
534 if (c_put)
535 k_put += drbd_req_put_completion_ref(req, m, c_put);
536 if (k_put)
537 kref_sub(&req->kref, k_put, drbd_req_destroy);
b411b363
PR
538}
539
b30ab791 540static void drbd_report_io_error(struct drbd_device *device, struct drbd_request *req)
ccae7868
LE
541{
542 char b[BDEVNAME_SIZE];
543
42839f65 544 if (!__ratelimit(&drbd_ratelimit_state))
ccae7868
LE
545 return;
546
d0180171 547 drbd_warn(device, "local %s IO error sector %llu+%u on %s\n",
ccae7868 548 (req->rq_state & RQ_WRITE) ? "WRITE" : "READ",
42839f65
LE
549 (unsigned long long)req->i.sector,
550 req->i.size >> 9,
b30ab791 551 bdevname(device->ldev->backing_bdev, b));
ccae7868
LE
552}
553
e5f891b2
LE
554/* Helper for HANDED_OVER_TO_NETWORK.
555 * Is this a protocol A write (neither WRITE_ACK nor RECEIVE_ACK expected)?
556 * Is it also still "PENDING"?
557 * --> If so, clear PENDING and set NET_OK below.
558 * If it is a protocol A write, but not RQ_PENDING anymore, neg-ack was faster
559 * (and we must not set RQ_NET_OK) */
560static inline bool is_pending_write_protocol_A(struct drbd_request *req)
561{
562 return (req->rq_state &
563 (RQ_WRITE|RQ_NET_PENDING|RQ_EXP_WRITE_ACK|RQ_EXP_RECEIVE_ACK))
564 == (RQ_WRITE|RQ_NET_PENDING);
565}
566
b411b363
PR
567/* obviously this could be coded as many single functions
568 * instead of one huge switch,
569 * or by putting the code directly in the respective locations
570 * (as it has been before).
571 *
572 * but having it this way
573 * enforces that it is all in this one place, where it is easier to audit,
574 * it makes it obvious that whatever "event" "happens" to a request should
575 * happen "atomically" within the req_lock,
576 * and it enforces that we have to think in a very structured manner
577 * about the "events" that may happen to a request during its life time ...
578 */
2a80699f 579int __req_mod(struct drbd_request *req, enum drbd_req_event what,
b411b363
PR
580 struct bio_and_error *m)
581{
44a4d551
LE
582 struct drbd_device *const device = req->device;
583 struct drbd_peer_device *const peer_device = first_peer_device(device);
584 struct drbd_connection *const connection = peer_device ? peer_device->connection : NULL;
44ed167d 585 struct net_conf *nc;
303d1448 586 int p, rv = 0;
7be8da07
AG
587
588 if (m)
589 m->bio = NULL;
b411b363 590
b411b363
PR
591 switch (what) {
592 default:
d0180171 593 drbd_err(device, "LOGIC BUG in %s:%u\n", __FILE__ , __LINE__);
b411b363
PR
594 break;
595
596 /* does not happen...
597 * initialization done in drbd_req_new
8554df1c 598 case CREATED:
b411b363
PR
599 break;
600 */
601
8554df1c 602 case TO_BE_SENT: /* via network */
7be8da07 603 /* reached via __drbd_make_request
b411b363 604 * and from w_read_retry_remote */
0b0ba1ef 605 D_ASSERT(device, !(req->rq_state & RQ_NET_MASK));
44ed167d 606 rcu_read_lock();
44a4d551 607 nc = rcu_dereference(connection->net_conf);
44ed167d
PR
608 p = nc->wire_protocol;
609 rcu_read_unlock();
303d1448
PR
610 req->rq_state |=
611 p == DRBD_PROT_C ? RQ_EXP_WRITE_ACK :
612 p == DRBD_PROT_B ? RQ_EXP_RECEIVE_ACK : 0;
a0d856df 613 mod_rq_state(req, m, 0, RQ_NET_PENDING);
b411b363
PR
614 break;
615
8554df1c 616 case TO_BE_SUBMITTED: /* locally */
7be8da07 617 /* reached via __drbd_make_request */
0b0ba1ef 618 D_ASSERT(device, !(req->rq_state & RQ_LOCAL_MASK));
a0d856df 619 mod_rq_state(req, m, 0, RQ_LOCAL_PENDING);
b411b363
PR
620 break;
621
8554df1c 622 case COMPLETED_OK:
2b4dd36f 623 if (req->rq_state & RQ_WRITE)
b30ab791 624 device->writ_cnt += req->i.size >> 9;
b411b363 625 else
b30ab791 626 device->read_cnt += req->i.size >> 9;
b411b363 627
a0d856df
LE
628 mod_rq_state(req, m, RQ_LOCAL_PENDING,
629 RQ_LOCAL_COMPLETED|RQ_LOCAL_OK);
b411b363
PR
630 break;
631
cdfda633 632 case ABORT_DISK_IO:
a0d856df 633 mod_rq_state(req, m, 0, RQ_LOCAL_ABORTED);
2b4dd36f
PR
634 break;
635
edc9f5eb 636 case WRITE_COMPLETED_WITH_ERROR:
b30ab791
AG
637 drbd_report_io_error(device, req);
638 __drbd_chk_io_error(device, DRBD_WRITE_ERROR);
edc9f5eb 639 mod_rq_state(req, m, RQ_LOCAL_PENDING, RQ_LOCAL_COMPLETED);
b411b363
PR
640 break;
641
8554df1c 642 case READ_COMPLETED_WITH_ERROR:
b30ab791
AG
643 drbd_set_out_of_sync(device, req->i.sector, req->i.size);
644 drbd_report_io_error(device, req);
645 __drbd_chk_io_error(device, DRBD_READ_ERROR);
a0d856df
LE
646 /* fall through. */
647 case READ_AHEAD_COMPLETED_WITH_ERROR:
70246286 648 /* it is legal to fail read-ahead, no __drbd_chk_io_error in that case. */
a0d856df 649 mod_rq_state(req, m, RQ_LOCAL_PENDING, RQ_LOCAL_COMPLETED);
2f632aeb
LE
650 break;
651
652 case DISCARD_COMPLETED_NOTSUPP:
653 case DISCARD_COMPLETED_WITH_ERROR:
654 /* I'd rather not detach from local disk just because it
655 * failed a REQ_DISCARD. */
656 mod_rq_state(req, m, RQ_LOCAL_PENDING, RQ_LOCAL_COMPLETED);
4439c400 657 break;
b411b363 658
8554df1c 659 case QUEUE_FOR_NET_READ:
70246286 660 /* READ, and
b411b363
PR
661 * no local disk,
662 * or target area marked as invalid,
663 * or just got an io-error. */
7be8da07 664 /* from __drbd_make_request
b411b363
PR
665 * or from bio_endio during read io-error recovery */
666
6870ca6d
LE
667 /* So we can verify the handle in the answer packet.
668 * Corresponding drbd_remove_request_interval is in
a0d856df 669 * drbd_req_complete() */
0b0ba1ef 670 D_ASSERT(device, drbd_interval_empty(&req->i));
b30ab791 671 drbd_insert_interval(&device->read_requests, &req->i);
b411b363 672
b30ab791 673 set_bit(UNPLUG_REMOTE, &device->flags);
b411b363 674
0b0ba1ef
AG
675 D_ASSERT(device, req->rq_state & RQ_NET_PENDING);
676 D_ASSERT(device, (req->rq_state & RQ_LOCAL_MASK) == 0);
a0d856df 677 mod_rq_state(req, m, 0, RQ_NET_QUEUED);
4439c400 678 req->w.cb = w_send_read_req;
44a4d551 679 drbd_queue_work(&connection->sender_work,
84b8c06b 680 &req->w);
b411b363
PR
681 break;
682
8554df1c 683 case QUEUE_FOR_NET_WRITE:
b411b363 684 /* assert something? */
7be8da07 685 /* from __drbd_make_request only */
b411b363 686
6870ca6d 687 /* Corresponding drbd_remove_request_interval is in
a0d856df 688 * drbd_req_complete() */
0b0ba1ef 689 D_ASSERT(device, drbd_interval_empty(&req->i));
b30ab791 690 drbd_insert_interval(&device->write_requests, &req->i);
b411b363
PR
691
692 /* NOTE
693 * In case the req ended up on the transfer log before being
694 * queued on the worker, it could lead to this request being
695 * missed during cleanup after connection loss.
696 * So we have to do both operations here,
697 * within the same lock that protects the transfer log.
698 *
699 * _req_add_to_epoch(req); this has to be after the
700 * _maybe_start_new_epoch(req); which happened in
7be8da07 701 * __drbd_make_request, because we now may set the bit
b411b363
PR
702 * again ourselves to close the current epoch.
703 *
704 * Add req to the (now) current epoch (barrier). */
705
83c38830
LE
706 /* otherwise we may lose an unplug, which may cause some remote
707 * io-scheduler timeout to expire, increasing maximum latency,
708 * hurting performance. */
b30ab791 709 set_bit(UNPLUG_REMOTE, &device->flags);
b411b363
PR
710
711 /* queue work item to send data */
0b0ba1ef 712 D_ASSERT(device, req->rq_state & RQ_NET_PENDING);
a0d856df 713 mod_rq_state(req, m, 0, RQ_NET_QUEUED|RQ_EXP_BARR_ACK);
b411b363 714 req->w.cb = w_send_dblock;
44a4d551 715 drbd_queue_work(&connection->sender_work,
84b8c06b 716 &req->w);
b411b363
PR
717
718 /* close the epoch, in case it outgrew the limit */
44ed167d 719 rcu_read_lock();
44a4d551 720 nc = rcu_dereference(connection->net_conf);
44ed167d
PR
721 p = nc->max_epoch_size;
722 rcu_read_unlock();
44a4d551
LE
723 if (connection->current_tle_writes >= p)
724 start_new_tl_epoch(connection);
b411b363
PR
725
726 break;
727
8554df1c 728 case QUEUE_FOR_SEND_OOS:
a0d856df 729 mod_rq_state(req, m, 0, RQ_NET_QUEUED);
8f7bed77 730 req->w.cb = w_send_out_of_sync;
44a4d551 731 drbd_queue_work(&connection->sender_work,
84b8c06b 732 &req->w);
73a01a18
PR
733 break;
734
ea9d6729 735 case READ_RETRY_REMOTE_CANCELED:
8554df1c 736 case SEND_CANCELED:
8554df1c 737 case SEND_FAILED:
b411b363
PR
738 /* real cleanup will be done from tl_clear. just update flags
739 * so it is no longer marked as on the worker queue */
a0d856df 740 mod_rq_state(req, m, RQ_NET_QUEUED, 0);
b411b363
PR
741 break;
742
8554df1c 743 case HANDED_OVER_TO_NETWORK:
b411b363 744 /* assert something? */
e5f891b2 745 if (is_pending_write_protocol_A(req))
b411b363
PR
746 /* this is what is dangerous about protocol A:
747 * pretend it was successfully written on the peer. */
e5f891b2
LE
748 mod_rq_state(req, m, RQ_NET_QUEUED|RQ_NET_PENDING,
749 RQ_NET_SENT|RQ_NET_OK);
750 else
751 mod_rq_state(req, m, RQ_NET_QUEUED, RQ_NET_SENT);
752 /* It is still not yet RQ_NET_DONE until the
753 * corresponding epoch barrier got acked as well,
754 * so we know what to dirty on connection loss. */
6d49e101
LE
755 break;
756
27a434fe 757 case OOS_HANDED_TO_NETWORK:
6d49e101
LE
758 /* Was not set PENDING, no longer QUEUED, so is now DONE
759 * as far as this connection is concerned. */
a0d856df 760 mod_rq_state(req, m, RQ_NET_QUEUED, RQ_NET_DONE);
b411b363
PR
761 break;
762
8554df1c 763 case CONNECTION_LOST_WHILE_PENDING:
b411b363 764 /* transfer log cleanup after connection loss */
a0d856df
LE
765 mod_rq_state(req, m,
766 RQ_NET_OK|RQ_NET_PENDING|RQ_COMPLETION_SUSP,
767 RQ_NET_DONE);
b411b363
PR
768 break;
769
d4dabbe2
LE
770 case CONFLICT_RESOLVED:
771 /* for superseded conflicting writes of multiple primaries,
b411b363 772 * there is no need to keep anything in the tl, potential
934722a2
LE
773 * node crashes are covered by the activity log.
774 *
775 * If this request had been marked as RQ_POSTPONED before,
d4dabbe2 776 * it will actually not be completed, but "restarted",
934722a2 777 * resubmitted from the retry worker context. */
0b0ba1ef
AG
778 D_ASSERT(device, req->rq_state & RQ_NET_PENDING);
779 D_ASSERT(device, req->rq_state & RQ_EXP_WRITE_ACK);
934722a2
LE
780 mod_rq_state(req, m, RQ_NET_PENDING, RQ_NET_DONE|RQ_NET_OK);
781 break;
782
0afd569a 783 case WRITE_ACKED_BY_PEER_AND_SIS:
934722a2 784 req->rq_state |= RQ_NET_SIS;
8554df1c 785 case WRITE_ACKED_BY_PEER:
08d0dabf
LE
786 /* Normal operation protocol C: successfully written on peer.
787 * During resync, even in protocol != C,
788 * we requested an explicit write ack anyways.
789 * Which means we cannot even assert anything here.
d64957c9 790 * Nothing more to do here.
b411b363 791 * We want to keep the tl in place for all protocols, to cater
d64957c9 792 * for volatile write-back caches on lower level devices. */
303d1448 793 goto ack_common;
8554df1c 794 case RECV_ACKED_BY_PEER:
0b0ba1ef 795 D_ASSERT(device, req->rq_state & RQ_EXP_RECEIVE_ACK);
b411b363 796 /* protocol B; pretends to be successfully written on peer.
8554df1c 797 * see also notes above in HANDED_OVER_TO_NETWORK about
b411b363 798 * protocol != C */
303d1448 799 ack_common:
a0d856df 800 mod_rq_state(req, m, RQ_NET_PENDING, RQ_NET_OK);
b411b363
PR
801 break;
802
7be8da07 803 case POSTPONE_WRITE:
0b0ba1ef 804 D_ASSERT(device, req->rq_state & RQ_EXP_WRITE_ACK);
303d1448 805 /* If this node has already detected the write conflict, the
7be8da07
AG
806 * worker will be waiting on misc_wait. Wake it up once this
807 * request has completed locally.
808 */
0b0ba1ef 809 D_ASSERT(device, req->rq_state & RQ_NET_PENDING);
7be8da07 810 req->rq_state |= RQ_POSTPONED;
a0d856df 811 if (req->i.waiting)
b30ab791 812 wake_up(&device->misc_wait);
a0d856df
LE
813 /* Do not clear RQ_NET_PENDING. This request will make further
814 * progress via restart_conflicting_writes() or
815 * fail_postponed_requests(). Hopefully. */
7be8da07 816 break;
b411b363 817
8554df1c 818 case NEG_ACKED:
46e21bba 819 mod_rq_state(req, m, RQ_NET_OK|RQ_NET_PENDING, 0);
b411b363
PR
820 break;
821
8554df1c 822 case FAIL_FROZEN_DISK_IO:
265be2d0
PR
823 if (!(req->rq_state & RQ_LOCAL_COMPLETED))
824 break;
a0d856df 825 mod_rq_state(req, m, RQ_COMPLETION_SUSP, 0);
265be2d0
PR
826 break;
827
8554df1c 828 case RESTART_FROZEN_DISK_IO:
265be2d0
PR
829 if (!(req->rq_state & RQ_LOCAL_COMPLETED))
830 break;
831
a0d856df
LE
832 mod_rq_state(req, m,
833 RQ_COMPLETION_SUSP|RQ_LOCAL_COMPLETED,
834 RQ_LOCAL_PENDING);
265be2d0
PR
835
836 rv = MR_READ;
837 if (bio_data_dir(req->master_bio) == WRITE)
838 rv = MR_WRITE;
839
b30ab791 840 get_ldev(device); /* always succeeds in this call path */
265be2d0 841 req->w.cb = w_restart_disk_io;
44a4d551 842 drbd_queue_work(&connection->sender_work,
84b8c06b 843 &req->w);
265be2d0
PR
844 break;
845
8554df1c 846 case RESEND:
509fc019
PR
847 /* Simply complete (local only) READs. */
848 if (!(req->rq_state & RQ_WRITE) && !req->w.cb) {
8a0bab2a 849 mod_rq_state(req, m, RQ_COMPLETION_SUSP, 0);
509fc019
PR
850 break;
851 }
852
11b58e73 853 /* If RQ_NET_OK is already set, we got a P_WRITE_ACK or P_RECV_ACK
a0d856df
LE
854 before the connection loss (B&C only); only P_BARRIER_ACK
855 (or the local completion?) was missing when we suspended.
6870ca6d
LE
856 Throwing them out of the TL here by pretending we got a BARRIER_ACK.
857 During connection handshake, we ensure that the peer was not rebooted. */
11b58e73 858 if (!(req->rq_state & RQ_NET_OK)) {
84b8c06b 859 /* FIXME could this possibly be a req->dw.cb == w_send_out_of_sync?
a0d856df
LE
860 * in that case we must not set RQ_NET_PENDING. */
861
862 mod_rq_state(req, m, RQ_COMPLETION_SUSP, RQ_NET_QUEUED|RQ_NET_PENDING);
11b58e73 863 if (req->w.cb) {
44a4d551
LE
864 /* w.cb expected to be w_send_dblock, or w_send_read_req */
865 drbd_queue_work(&connection->sender_work,
84b8c06b 866 &req->w);
11b58e73 867 rv = req->rq_state & RQ_WRITE ? MR_WRITE : MR_READ;
a0d856df 868 } /* else: FIXME can this happen? */
11b58e73
PR
869 break;
870 }
8554df1c 871 /* else, fall through to BARRIER_ACKED */
11b58e73 872
8554df1c 873 case BARRIER_ACKED:
a0d856df 874 /* barrier ack for READ requests does not make sense */
288f422e
PR
875 if (!(req->rq_state & RQ_WRITE))
876 break;
877
b411b363 878 if (req->rq_state & RQ_NET_PENDING) {
a209b4ae 879 /* barrier came in before all requests were acked.
b411b363
PR
880 * this is bad, because if the connection is lost now,
881 * we won't be able to clean them up... */
d0180171 882 drbd_err(device, "FIXME (BARRIER_ACKED but pending)\n");
b411b363 883 }
a0d856df
LE
884 /* Allowed to complete requests, even while suspended.
885 * As this is called for all requests within a matching epoch,
886 * we need to filter, and only set RQ_NET_DONE for those that
887 * have actually been on the wire. */
888 mod_rq_state(req, m, RQ_COMPLETION_SUSP,
889 (req->rq_state & RQ_NET_MASK) ? RQ_NET_DONE : 0);
b411b363
PR
890 break;
891
8554df1c 892 case DATA_RECEIVED:
0b0ba1ef 893 D_ASSERT(device, req->rq_state & RQ_NET_PENDING);
a0d856df 894 mod_rq_state(req, m, RQ_NET_PENDING, RQ_NET_OK|RQ_NET_DONE);
b411b363 895 break;
7074e4a7
LE
896
897 case QUEUE_AS_DRBD_BARRIER:
44a4d551 898 start_new_tl_epoch(connection);
7074e4a7
LE
899 mod_rq_state(req, m, 0, RQ_NET_OK|RQ_NET_DONE);
900 break;
b411b363 901 };
2a80699f
PR
902
903 return rv;
b411b363
PR
904}
905
906/* we may do a local read if:
907 * - we are consistent (of course),
908 * - or we are generally inconsistent,
909 * BUT we are still/already IN SYNC for this area.
910 * since size may be bigger than BM_BLOCK_SIZE,
911 * we may need to check several bits.
912 */
b30ab791 913static bool drbd_may_do_local_read(struct drbd_device *device, sector_t sector, int size)
b411b363
PR
914{
915 unsigned long sbnr, ebnr;
916 sector_t esector, nr_sectors;
917
b30ab791 918 if (device->state.disk == D_UP_TO_DATE)
0da34df0 919 return true;
b30ab791 920 if (device->state.disk != D_INCONSISTENT)
0da34df0 921 return false;
b411b363 922 esector = sector + (size >> 9) - 1;
b30ab791 923 nr_sectors = drbd_get_capacity(device->this_bdev);
0b0ba1ef
AG
924 D_ASSERT(device, sector < nr_sectors);
925 D_ASSERT(device, esector < nr_sectors);
b411b363
PR
926
927 sbnr = BM_SECT_TO_BIT(sector);
928 ebnr = BM_SECT_TO_BIT(esector);
929
b30ab791 930 return drbd_bm_count_bits(device, sbnr, ebnr) == 0;
b411b363
PR
931}
932
b30ab791 933static bool remote_due_to_read_balancing(struct drbd_device *device, sector_t sector,
5da9c836 934 enum drbd_read_balancing rbm)
380207d0 935{
380207d0 936 struct backing_dev_info *bdi;
d60de03a 937 int stripe_shift;
380207d0 938
380207d0
PR
939 switch (rbm) {
940 case RB_CONGESTED_REMOTE:
b30ab791 941 bdi = &device->ldev->backing_bdev->bd_disk->queue->backing_dev_info;
380207d0
PR
942 return bdi_read_congested(bdi);
943 case RB_LEAST_PENDING:
b30ab791
AG
944 return atomic_read(&device->local_cnt) >
945 atomic_read(&device->ap_pending_cnt) + atomic_read(&device->rs_pending_cnt);
d60de03a
PR
946 case RB_32K_STRIPING: /* stripe_shift = 15 */
947 case RB_64K_STRIPING:
948 case RB_128K_STRIPING:
949 case RB_256K_STRIPING:
950 case RB_512K_STRIPING:
951 case RB_1M_STRIPING: /* stripe_shift = 20 */
952 stripe_shift = (rbm - RB_32K_STRIPING + 15);
953 return (sector >> (stripe_shift - 9)) & 1;
380207d0 954 case RB_ROUND_ROBIN:
b30ab791 955 return test_and_change_bit(READ_BALANCE_RR, &device->flags);
380207d0
PR
956 case RB_PREFER_REMOTE:
957 return true;
958 case RB_PREFER_LOCAL:
959 default:
960 return false;
961 }
962}
963
6024fece
AG
964/*
965 * complete_conflicting_writes - wait for any conflicting write requests
966 *
967 * The write_requests tree contains all active write requests which we
968 * currently know about. Wait for any requests to complete which conflict with
969 * the new one.
648e46b5
LE
970 *
971 * Only way out: remove the conflicting intervals from the tree.
6024fece 972 */
648e46b5 973static void complete_conflicting_writes(struct drbd_request *req)
6024fece 974{
648e46b5 975 DEFINE_WAIT(wait);
84b8c06b 976 struct drbd_device *device = req->device;
648e46b5
LE
977 struct drbd_interval *i;
978 sector_t sector = req->i.sector;
979 int size = req->i.size;
980
648e46b5 981 for (;;) {
1b228c98
LE
982 drbd_for_each_overlap(i, &device->write_requests, sector, size) {
983 /* Ignore, if already completed to upper layers. */
984 if (i->completed)
985 continue;
986 /* Handle the first found overlap. After the schedule
987 * we have to restart the tree walk. */
648e46b5 988 break;
1b228c98
LE
989 }
990 if (!i) /* if any */
991 break;
992
648e46b5 993 /* Indicate to wake up device->misc_wait on progress. */
1b228c98 994 prepare_to_wait(&device->misc_wait, &wait, TASK_UNINTERRUPTIBLE);
648e46b5 995 i->waiting = true;
0500813f 996 spin_unlock_irq(&device->resource->req_lock);
648e46b5 997 schedule();
0500813f 998 spin_lock_irq(&device->resource->req_lock);
6024fece 999 }
b30ab791 1000 finish_wait(&device->misc_wait, &wait);
b411b363
PR
1001}
1002
7e5fec31 1003/* called within req_lock */
b30ab791 1004static void maybe_pull_ahead(struct drbd_device *device)
0d5934e3 1005{
a6b32bc3 1006 struct drbd_connection *connection = first_peer_device(device)->connection;
5da9c836
LE
1007 struct net_conf *nc;
1008 bool congested = false;
1009 enum drbd_on_congestion on_congestion;
1010
607f25e5 1011 rcu_read_lock();
bde89a9e 1012 nc = rcu_dereference(connection->net_conf);
5da9c836 1013 on_congestion = nc ? nc->on_congestion : OC_BLOCK;
607f25e5 1014 rcu_read_unlock();
5da9c836 1015 if (on_congestion == OC_BLOCK ||
bde89a9e 1016 connection->agreed_pro_version < 96)
3b9ef85e 1017 return;
0d5934e3 1018
0c066bc3
LE
1019 if (on_congestion == OC_PULL_AHEAD && device->state.conn == C_AHEAD)
1020 return; /* nothing to do ... */
1021
0d5934e3
LE
1022 /* If I don't even have good local storage, we can not reasonably try
1023 * to pull ahead of the peer. We also need the local reference to make
b30ab791 1024 * sure device->act_log is there.
0d5934e3 1025 */
b30ab791 1026 if (!get_ldev_if_state(device, D_UP_TO_DATE))
0d5934e3
LE
1027 return;
1028
5da9c836 1029 if (nc->cong_fill &&
b30ab791 1030 atomic_read(&device->ap_in_flight) >= nc->cong_fill) {
d0180171 1031 drbd_info(device, "Congestion-fill threshold reached\n");
5da9c836 1032 congested = true;
0d5934e3
LE
1033 }
1034
b30ab791 1035 if (device->act_log->used >= nc->cong_extents) {
d0180171 1036 drbd_info(device, "Congestion-extents threshold reached\n");
5da9c836 1037 congested = true;
0d5934e3
LE
1038 }
1039
1040 if (congested) {
99b4d8fe 1041 /* start a new epoch for non-mirrored writes */
a6b32bc3 1042 start_new_tl_epoch(first_peer_device(device)->connection);
0d5934e3 1043
5da9c836 1044 if (on_congestion == OC_PULL_AHEAD)
b30ab791 1045 _drbd_set_state(_NS(device, conn, C_AHEAD), 0, NULL);
5da9c836 1046 else /*nc->on_congestion == OC_DISCONNECT */
b30ab791 1047 _drbd_set_state(_NS(device, conn, C_DISCONNECTING), 0, NULL);
0d5934e3 1048 }
b30ab791 1049 put_ldev(device);
0d5934e3
LE
1050}
1051
5da9c836
LE
1052/* If this returns false, and req->private_bio is still set,
1053 * this should be submitted locally.
1054 *
1055 * If it returns false, but req->private_bio is not set,
1056 * we do not have access to good data :(
1057 *
1058 * Otherwise, this destroys req->private_bio, if any,
1059 * and returns true.
1060 */
1061static bool do_remote_read(struct drbd_request *req)
1062{
84b8c06b 1063 struct drbd_device *device = req->device;
5da9c836
LE
1064 enum drbd_read_balancing rbm;
1065
1066 if (req->private_bio) {
b30ab791 1067 if (!drbd_may_do_local_read(device,
5da9c836
LE
1068 req->i.sector, req->i.size)) {
1069 bio_put(req->private_bio);
1070 req->private_bio = NULL;
b30ab791 1071 put_ldev(device);
5da9c836
LE
1072 }
1073 }
1074
b30ab791 1075 if (device->state.pdsk != D_UP_TO_DATE)
5da9c836
LE
1076 return false;
1077
a0d856df
LE
1078 if (req->private_bio == NULL)
1079 return true;
1080
5da9c836
LE
1081 /* TODO: improve read balancing decisions, take into account drbd
1082 * protocol, pending requests etc. */
1083
1084 rcu_read_lock();
b30ab791 1085 rbm = rcu_dereference(device->ldev->disk_conf)->read_balancing;
5da9c836
LE
1086 rcu_read_unlock();
1087
1088 if (rbm == RB_PREFER_LOCAL && req->private_bio)
1089 return false; /* submit locally */
1090
b30ab791 1091 if (remote_due_to_read_balancing(device, req->i.sector, rbm)) {
5da9c836
LE
1092 if (req->private_bio) {
1093 bio_put(req->private_bio);
1094 req->private_bio = NULL;
b30ab791 1095 put_ldev(device);
5da9c836
LE
1096 }
1097 return true;
1098 }
1099
1100 return false;
1101}
1102
2e9ffde6
AG
1103bool drbd_should_do_remote(union drbd_dev_state s)
1104{
1105 return s.pdsk == D_UP_TO_DATE ||
1106 (s.pdsk >= D_INCONSISTENT &&
1107 s.conn >= C_WF_BITMAP_T &&
1108 s.conn < C_AHEAD);
1109 /* Before proto 96 that was >= CONNECTED instead of >= C_WF_BITMAP_T.
1110 That is equivalent since before 96 IO was frozen in the C_WF_BITMAP*
1111 states. */
1112}
1113
1114static bool drbd_should_send_out_of_sync(union drbd_dev_state s)
1115{
1116 return s.conn == C_AHEAD || s.conn == C_WF_BITMAP_S;
1117 /* pdsk = D_INCONSISTENT as a consequence. Protocol 96 check not necessary
1118 since we enter state C_AHEAD only if proto >= 96 */
1119}
1120
5da9c836
LE
1121/* returns number of connections (== 1, for drbd 8.4)
1122 * expected to actually write this data,
1123 * which does NOT include those that we are L_AHEAD for. */
1124static int drbd_process_write_request(struct drbd_request *req)
1125{
84b8c06b 1126 struct drbd_device *device = req->device;
5da9c836
LE
1127 int remote, send_oos;
1128
b30ab791
AG
1129 remote = drbd_should_do_remote(device->state);
1130 send_oos = drbd_should_send_out_of_sync(device->state);
5da9c836 1131
519b6d3e
LE
1132 /* Need to replicate writes. Unless it is an empty flush,
1133 * which is better mapped to a DRBD P_BARRIER packet,
1134 * also for drbd wire protocol compatibility reasons.
1135 * If this was a flush, just start a new epoch.
1136 * Unless the current epoch was empty anyways, or we are not currently
1137 * replicating, in which case there is no point. */
1138 if (unlikely(req->i.size == 0)) {
1139 /* The only size==0 bios we expect are empty flushes. */
1eff9d32 1140 D_ASSERT(device, req->master_bio->bi_opf & REQ_PREFLUSH);
99b4d8fe 1141 if (remote)
7074e4a7
LE
1142 _req_mod(req, QUEUE_AS_DRBD_BARRIER);
1143 return remote;
519b6d3e
LE
1144 }
1145
5da9c836
LE
1146 if (!remote && !send_oos)
1147 return 0;
1148
0b0ba1ef 1149 D_ASSERT(device, !(remote && send_oos));
5da9c836
LE
1150
1151 if (remote) {
1152 _req_mod(req, TO_BE_SENT);
1153 _req_mod(req, QUEUE_FOR_NET_WRITE);
b30ab791 1154 } else if (drbd_set_out_of_sync(device, req->i.sector, req->i.size))
5da9c836
LE
1155 _req_mod(req, QUEUE_FOR_SEND_OOS);
1156
1157 return remote;
1158}
1159
7435e901
LE
1160static void drbd_process_discard_req(struct drbd_request *req)
1161{
1162 int err = drbd_issue_discard_or_zero_out(req->device,
1163 req->i.sector, req->i.size >> 9, true);
1164
1165 if (err)
1166 req->private_bio->bi_error = -EIO;
1167 bio_endio(req->private_bio);
1168}
1169
5da9c836
LE
1170static void
1171drbd_submit_req_private_bio(struct drbd_request *req)
1172{
84b8c06b 1173 struct drbd_device *device = req->device;
5da9c836 1174 struct bio *bio = req->private_bio;
70246286
CH
1175 unsigned int type;
1176
1177 if (bio_op(bio) != REQ_OP_READ)
1178 type = DRBD_FAULT_DT_WR;
1eff9d32 1179 else if (bio->bi_opf & REQ_RAHEAD)
70246286
CH
1180 type = DRBD_FAULT_DT_RA;
1181 else
1182 type = DRBD_FAULT_DT_RD;
5da9c836 1183
b30ab791 1184 bio->bi_bdev = device->ldev->backing_bdev;
5da9c836
LE
1185
1186 /* State may have changed since we grabbed our reference on the
1187 * ->ldev member. Double check, and short-circuit to endio.
1188 * In case the last activity log transaction failed to get on
1189 * stable storage, and this is a WRITE, we may not even submit
1190 * this bio. */
b30ab791 1191 if (get_ldev(device)) {
70246286 1192 if (drbd_insert_fault(device, type))
4246a0b6 1193 bio_io_error(bio);
7435e901
LE
1194 else if (bio_op(bio) == REQ_OP_DISCARD)
1195 drbd_process_discard_req(req);
5da9c836
LE
1196 else
1197 generic_make_request(bio);
b30ab791 1198 put_ldev(device);
5da9c836 1199 } else
4246a0b6 1200 bio_io_error(bio);
5da9c836
LE
1201}
1202
b30ab791 1203static void drbd_queue_write(struct drbd_device *device, struct drbd_request *req)
779b3fe4 1204{
844a6ae7 1205 spin_lock_irq(&device->resource->req_lock);
b30ab791 1206 list_add_tail(&req->tl_requests, &device->submit.writes);
844a6ae7
LE
1207 list_add_tail(&req->req_pending_master_completion,
1208 &device->pending_master_completion[1 /* WRITE */]);
1209 spin_unlock_irq(&device->resource->req_lock);
b30ab791 1210 queue_work(device->submit.wq, &device->submit.worker);
f5b90b6b
LE
1211 /* do_submit() may sleep internally on al_wait, too */
1212 wake_up(&device->al_wait);
779b3fe4
LE
1213}
1214
6d9febe2
LE
1215/* returns the new drbd_request pointer, if the caller is expected to
1216 * drbd_send_and_submit() it (to save latency), or NULL if we queued the
1217 * request on the submitter thread.
1218 * Returns ERR_PTR(-ENOMEM) if we cannot allocate a drbd_request.
1219 */
01cd2636 1220static struct drbd_request *
e5f891b2 1221drbd_request_prepare(struct drbd_device *device, struct bio *bio, unsigned long start_jif)
b411b363 1222{
6d9febe2 1223 const int rw = bio_data_dir(bio);
b411b363 1224 struct drbd_request *req;
b411b363
PR
1225
1226 /* allocate outside of all locks; */
b30ab791 1227 req = drbd_req_new(device, bio);
b411b363 1228 if (!req) {
b30ab791 1229 dec_ap_bio(device);
b411b363
PR
1230 /* only pass the error to the upper layers.
1231 * if user cannot handle io errors, that's not our business. */
d0180171 1232 drbd_err(device, "could not kmalloc() req\n");
4246a0b6
CH
1233 bio->bi_error = -ENOMEM;
1234 bio_endio(bio);
6d9febe2 1235 return ERR_PTR(-ENOMEM);
b411b363 1236 }
e5f891b2 1237 req->start_jif = start_jif;
b411b363 1238
b30ab791 1239 if (!get_ldev(device)) {
5da9c836 1240 bio_put(req->private_bio);
b411b363
PR
1241 req->private_bio = NULL;
1242 }
b411b363 1243
7e8c288f 1244 /* Update disk stats */
b30ab791 1245 _drbd_start_io_acct(device, req);
7e8c288f 1246
7435e901
LE
1247 /* process discards always from our submitter thread */
1248 if (bio_op(bio) & REQ_OP_DISCARD)
1249 goto queue_for_submitter_thread;
1250
519b6d3e 1251 if (rw == WRITE && req->private_bio && req->i.size
b30ab791 1252 && !test_bit(AL_SUSPENDED, &device->flags)) {
7435e901
LE
1253 if (!drbd_al_begin_io_fastpath(device, &req->i))
1254 goto queue_for_submitter_thread;
0778286a 1255 req->rq_state |= RQ_IN_ACT_LOG;
e5f891b2 1256 req->in_actlog_jif = jiffies;
0778286a 1257 }
6d9febe2 1258 return req;
7435e901
LE
1259
1260 queue_for_submitter_thread:
1261 atomic_inc(&device->ap_actlog_cnt);
1262 drbd_queue_write(device, req);
1263 return NULL;
6d9febe2
LE
1264}
1265
0ead5cca
LE
1266/* Require at least one path to current data.
1267 * We don't want to allow writes on C_STANDALONE D_INCONSISTENT:
1268 * We would not allow to read what was written,
1269 * we would not have bumped the data generation uuids,
1270 * we would cause data divergence for all the wrong reasons.
1271 *
1272 * If we don't see at least one D_UP_TO_DATE, we will fail this request,
1273 * which either returns EIO, or, if OND_SUSPEND_IO is set, suspends IO,
1274 * and queues for retry later.
1275 */
1276static bool may_do_writes(struct drbd_device *device)
1277{
1278 const union drbd_dev_state s = device->state;
1279 return s.disk == D_UP_TO_DATE || s.pdsk == D_UP_TO_DATE;
1280}
1281
b30ab791 1282static void drbd_send_and_submit(struct drbd_device *device, struct drbd_request *req)
6d9febe2 1283{
35b5ed5b 1284 struct drbd_resource *resource = device->resource;
70246286 1285 const int rw = bio_data_dir(req->master_bio);
6d9febe2
LE
1286 struct bio_and_error m = { NULL, };
1287 bool no_remote = false;
35b5ed5b 1288 bool submit_private_bio = false;
6d9febe2 1289
35b5ed5b 1290 spin_lock_irq(&resource->req_lock);
6024fece 1291 if (rw == WRITE) {
648e46b5
LE
1292 /* This may temporarily give up the req_lock,
1293 * but will re-aquire it before it returns here.
1294 * Needs to be before the check on drbd_suspended() */
1295 complete_conflicting_writes(req);
607f25e5
LE
1296 /* no more giving up req_lock from now on! */
1297
1298 /* check for congestion, and potentially stop sending
1299 * full data updates, but start sending "dirty bits" only. */
b30ab791 1300 maybe_pull_ahead(device);
b411b363
PR
1301 }
1302
9a25a04c 1303
b30ab791 1304 if (drbd_suspended(device)) {
5da9c836
LE
1305 /* push back and retry: */
1306 req->rq_state |= RQ_POSTPONED;
1307 if (req->private_bio) {
1308 bio_put(req->private_bio);
1309 req->private_bio = NULL;
b30ab791 1310 put_ldev(device);
b411b363 1311 }
5da9c836 1312 goto out;
b411b363
PR
1313 }
1314
70246286 1315 /* We fail READ early, if we can not serve it.
5da9c836 1316 * We must do this before req is registered on any lists.
a0d856df 1317 * Otherwise, drbd_req_complete() will queue failed READ for retry. */
5da9c836
LE
1318 if (rw != WRITE) {
1319 if (!do_remote_read(req) && !req->private_bio)
1320 goto nodata;
b411b363
PR
1321 }
1322
b6dd1a89 1323 /* which transfer log epoch does this belong to? */
a6b32bc3 1324 req->epoch = atomic_read(&first_peer_device(device)->connection->current_tle_nr);
288f422e 1325
227f052f
LE
1326 /* no point in adding empty flushes to the transfer log,
1327 * they are mapped to drbd barriers already. */
99b4d8fe
LE
1328 if (likely(req->i.size!=0)) {
1329 if (rw == WRITE)
a6b32bc3 1330 first_peer_device(device)->connection->current_tle_writes++;
288f422e 1331
a6b32bc3 1332 list_add_tail(&req->tl_requests, &first_peer_device(device)->connection->transfer_log);
b411b363 1333 }
67531718 1334
5da9c836 1335 if (rw == WRITE) {
0ead5cca
LE
1336 if (req->private_bio && !may_do_writes(device)) {
1337 bio_put(req->private_bio);
1338 req->private_bio = NULL;
1339 put_ldev(device);
1340 goto nodata;
1341 }
5da9c836
LE
1342 if (!drbd_process_write_request(req))
1343 no_remote = true;
1344 } else {
1345 /* We either have a private_bio, or we can read from remote.
1346 * Otherwise we had done the goto nodata above. */
1347 if (req->private_bio == NULL) {
1348 _req_mod(req, TO_BE_SENT);
1349 _req_mod(req, QUEUE_FOR_NET_READ);
6719fb03 1350 } else
5da9c836 1351 no_remote = true;
b411b363
PR
1352 }
1353
844a6ae7
LE
1354 /* If it took the fast path in drbd_request_prepare, add it here.
1355 * The slow path has added it already. */
1356 if (list_empty(&req->req_pending_master_completion))
1357 list_add_tail(&req->req_pending_master_completion,
1358 &device->pending_master_completion[rw == WRITE]);
5da9c836
LE
1359 if (req->private_bio) {
1360 /* needs to be marked within the same spinlock */
05cbbb39 1361 req->pre_submit_jif = jiffies;
844a6ae7
LE
1362 list_add_tail(&req->req_pending_local,
1363 &device->pending_completion[rw == WRITE]);
5da9c836
LE
1364 _req_mod(req, TO_BE_SUBMITTED);
1365 /* but we need to give up the spinlock to submit */
35b5ed5b 1366 submit_private_bio = true;
5da9c836
LE
1367 } else if (no_remote) {
1368nodata:
1369 if (__ratelimit(&drbd_ratelimit_state))
d0180171 1370 drbd_err(device, "IO ERROR: neither local nor remote data, sector %llu+%u\n",
42839f65 1371 (unsigned long long)req->i.sector, req->i.size >> 9);
5da9c836 1372 /* A write may have been queued for send_oos, however.
a0d856df 1373 * So we can not simply free it, we must go through drbd_req_put_completion_ref() */
b411b363 1374 }
b411b363 1375
5da9c836 1376out:
a0d856df
LE
1377 if (drbd_req_put_completion_ref(req, &m, 1))
1378 kref_put(&req->kref, drbd_req_destroy);
35b5ed5b
LE
1379 spin_unlock_irq(&resource->req_lock);
1380
1381 /* Even though above is a kref_put(), this is safe.
1382 * As long as we still need to submit our private bio,
1383 * we hold a completion ref, and the request cannot disappear.
1384 * If however this request did not even have a private bio to submit
1385 * (e.g. remote read), req may already be invalid now.
1386 * That's why we cannot check on req->private_bio. */
1387 if (submit_private_bio)
1388 drbd_submit_req_private_bio(req);
5da9c836 1389 if (m.bio)
b30ab791 1390 complete_master_bio(device, &m);
6d9febe2
LE
1391}
1392
e5f891b2 1393void __drbd_make_request(struct drbd_device *device, struct bio *bio, unsigned long start_jif)
6d9febe2 1394{
e5f891b2 1395 struct drbd_request *req = drbd_request_prepare(device, bio, start_jif);
6d9febe2
LE
1396 if (IS_ERR_OR_NULL(req))
1397 return;
b30ab791 1398 drbd_send_and_submit(device, req);
b411b363
PR
1399}
1400
b30ab791 1401static void submit_fast_path(struct drbd_device *device, struct list_head *incoming)
113fef9e 1402{
08a1ddab
LE
1403 struct drbd_request *req, *tmp;
1404 list_for_each_entry_safe(req, tmp, incoming, tl_requests) {
1405 const int rw = bio_data_dir(req->master_bio);
113fef9e 1406
08a1ddab
LE
1407 if (rw == WRITE /* rw != WRITE should not even end up here! */
1408 && req->private_bio && req->i.size
b30ab791
AG
1409 && !test_bit(AL_SUSPENDED, &device->flags)) {
1410 if (!drbd_al_begin_io_fastpath(device, &req->i))
08a1ddab
LE
1411 continue;
1412
1413 req->rq_state |= RQ_IN_ACT_LOG;
e5f891b2 1414 req->in_actlog_jif = jiffies;
ad3fee79 1415 atomic_dec(&device->ap_actlog_cnt);
08a1ddab
LE
1416 }
1417
1418 list_del_init(&req->tl_requests);
b30ab791 1419 drbd_send_and_submit(device, req);
113fef9e 1420 }
113fef9e
LE
1421}
1422
b30ab791 1423static bool prepare_al_transaction_nonblock(struct drbd_device *device,
08a1ddab 1424 struct list_head *incoming,
f5b90b6b
LE
1425 struct list_head *pending,
1426 struct list_head *later)
08a1ddab
LE
1427{
1428 struct drbd_request *req, *tmp;
1429 int wake = 0;
1430 int err;
1431
b30ab791 1432 spin_lock_irq(&device->al_lock);
08a1ddab 1433 list_for_each_entry_safe(req, tmp, incoming, tl_requests) {
b30ab791 1434 err = drbd_al_begin_io_nonblock(device, &req->i);
f5b90b6b
LE
1435 if (err == -ENOBUFS)
1436 break;
08a1ddab
LE
1437 if (err == -EBUSY)
1438 wake = 1;
1439 if (err)
f5b90b6b
LE
1440 list_move_tail(&req->tl_requests, later);
1441 else
1442 list_move_tail(&req->tl_requests, pending);
08a1ddab 1443 }
b30ab791 1444 spin_unlock_irq(&device->al_lock);
08a1ddab 1445 if (wake)
b30ab791 1446 wake_up(&device->al_wait);
08a1ddab
LE
1447 return !list_empty(pending);
1448}
113fef9e 1449
f5b90b6b
LE
1450void send_and_submit_pending(struct drbd_device *device, struct list_head *pending)
1451{
1452 struct drbd_request *req, *tmp;
1453
1454 list_for_each_entry_safe(req, tmp, pending, tl_requests) {
1455 req->rq_state |= RQ_IN_ACT_LOG;
1456 req->in_actlog_jif = jiffies;
1457 atomic_dec(&device->ap_actlog_cnt);
1458 list_del_init(&req->tl_requests);
1459 drbd_send_and_submit(device, req);
1460 }
1461}
1462
113fef9e
LE
1463void do_submit(struct work_struct *ws)
1464{
b30ab791 1465 struct drbd_device *device = container_of(ws, struct drbd_device, submit.worker);
f5b90b6b
LE
1466 LIST_HEAD(incoming); /* from drbd_make_request() */
1467 LIST_HEAD(pending); /* to be submitted after next AL-transaction commit */
1468 LIST_HEAD(busy); /* blocked by resync requests */
1469
1470 /* grab new incoming requests */
1471 spin_lock_irq(&device->resource->req_lock);
1472 list_splice_tail_init(&device->submit.writes, &incoming);
1473 spin_unlock_irq(&device->resource->req_lock);
113fef9e 1474
08a1ddab 1475 for (;;) {
f5b90b6b 1476 DEFINE_WAIT(wait);
113fef9e 1477
f5b90b6b
LE
1478 /* move used-to-be-busy back to front of incoming */
1479 list_splice_init(&busy, &incoming);
b30ab791 1480 submit_fast_path(device, &incoming);
08a1ddab
LE
1481 if (list_empty(&incoming))
1482 break;
1483
45ad07b3 1484 for (;;) {
f5b90b6b
LE
1485 prepare_to_wait(&device->al_wait, &wait, TASK_UNINTERRUPTIBLE);
1486
1487 list_splice_init(&busy, &incoming);
1488 prepare_al_transaction_nonblock(device, &incoming, &pending, &busy);
1489 if (!list_empty(&pending))
1490 break;
1491
1492 schedule();
1493
1494 /* If all currently "hot" activity log extents are kept busy by
1495 * incoming requests, we still must not totally starve new
1496 * requests to "cold" extents.
1497 * Something left on &incoming means there had not been
1498 * enough update slots available, and the activity log
1499 * has been marked as "starving".
1500 *
1501 * Try again now, without looking for new requests,
1502 * effectively blocking all new requests until we made
1503 * at least _some_ progress with what we currently have.
1504 */
1505 if (!list_empty(&incoming))
1506 continue;
1507
1508 /* Nothing moved to pending, but nothing left
1509 * on incoming: all moved to busy!
1510 * Grab new and iterate. */
1511 spin_lock_irq(&device->resource->req_lock);
1512 list_splice_tail_init(&device->submit.writes, &incoming);
1513 spin_unlock_irq(&device->resource->req_lock);
1514 }
1515 finish_wait(&device->al_wait, &wait);
1516
1517 /* If the transaction was full, before all incoming requests
1518 * had been processed, skip ahead to commit, and iterate
1519 * without splicing in more incoming requests from upper layers.
1520 *
1521 * Else, if all incoming have been processed,
1522 * they have become either "pending" (to be submitted after
1523 * next transaction commit) or "busy" (blocked by resync).
1524 *
1525 * Maybe more was queued, while we prepared the transaction?
1526 * Try to stuff those into this transaction as well.
1527 * Be strictly non-blocking here,
1528 * we already have something to commit.
1529 *
1530 * Commit if we don't make any more progres.
1531 */
1532
1533 while (list_empty(&incoming)) {
45ad07b3
LE
1534 LIST_HEAD(more_pending);
1535 LIST_HEAD(more_incoming);
1536 bool made_progress;
1537
1538 /* It is ok to look outside the lock,
1539 * it's only an optimization anyways */
b30ab791 1540 if (list_empty(&device->submit.writes))
45ad07b3
LE
1541 break;
1542
844a6ae7 1543 spin_lock_irq(&device->resource->req_lock);
b30ab791 1544 list_splice_tail_init(&device->submit.writes, &more_incoming);
844a6ae7 1545 spin_unlock_irq(&device->resource->req_lock);
45ad07b3
LE
1546
1547 if (list_empty(&more_incoming))
1548 break;
1549
f5b90b6b 1550 made_progress = prepare_al_transaction_nonblock(device, &more_incoming, &more_pending, &busy);
45ad07b3
LE
1551
1552 list_splice_tail_init(&more_pending, &pending);
1553 list_splice_tail_init(&more_incoming, &incoming);
45ad07b3
LE
1554 if (!made_progress)
1555 break;
1556 }
08a1ddab 1557
f5b90b6b
LE
1558 drbd_al_begin_io_commit(device);
1559 send_and_submit_pending(device, &pending);
113fef9e
LE
1560 }
1561}
1562
dece1635 1563blk_qc_t drbd_make_request(struct request_queue *q, struct bio *bio)
b411b363 1564{
b30ab791 1565 struct drbd_device *device = (struct drbd_device *) q->queuedata;
e5f891b2 1566 unsigned long start_jif;
b411b363 1567
54efd50b
KO
1568 blk_queue_split(q, &bio, q->bio_split);
1569
e5f891b2 1570 start_jif = jiffies;
aeda1cd6 1571
b411b363
PR
1572 /*
1573 * what we "blindly" assume:
1574 */
0b0ba1ef 1575 D_ASSERT(device, IS_ALIGNED(bio->bi_iter.bi_size, 512));
b411b363 1576
b30ab791 1577 inc_ap_bio(device);
e5f891b2 1578 __drbd_make_request(device, bio, start_jif);
dece1635 1579 return BLK_QC_T_NONE;
b411b363
PR
1580}
1581
84d34f2f
LE
1582static bool net_timeout_reached(struct drbd_request *net_req,
1583 struct drbd_connection *connection,
1584 unsigned long now, unsigned long ent,
1585 unsigned int ko_count, unsigned int timeout)
1586{
1587 struct drbd_device *device = net_req->device;
1588
1589 if (!time_after(now, net_req->pre_send_jif + ent))
1590 return false;
1591
1592 if (time_in_range(now, connection->last_reconnect_jif, connection->last_reconnect_jif + ent))
1593 return false;
1594
1595 if (net_req->rq_state & RQ_NET_PENDING) {
1596 drbd_warn(device, "Remote failed to finish a request within %ums > ko-count (%u) * timeout (%u * 0.1s)\n",
1597 jiffies_to_msecs(now - net_req->pre_send_jif), ko_count, timeout);
1598 return true;
1599 }
1600
1601 /* We received an ACK already (or are using protocol A),
1602 * but are waiting for the epoch closing barrier ack.
1603 * Check if we sent the barrier already. We should not blame the peer
1604 * for being unresponsive, if we did not even ask it yet. */
1605 if (net_req->epoch == connection->send.current_epoch_nr) {
1606 drbd_warn(device,
1607 "We did not send a P_BARRIER for %ums > ko-count (%u) * timeout (%u * 0.1s); drbd kernel thread blocked?\n",
1608 jiffies_to_msecs(now - net_req->pre_send_jif), ko_count, timeout);
1609 return false;
1610 }
1611
1612 /* Worst case: we may have been blocked for whatever reason, then
1613 * suddenly are able to send a lot of requests (and epoch separating
1614 * barriers) in quick succession.
1615 * The timestamp of the net_req may be much too old and not correspond
1616 * to the sending time of the relevant unack'ed barrier packet, so
1617 * would trigger a spurious timeout. The latest barrier packet may
1618 * have a too recent timestamp to trigger the timeout, potentially miss
1619 * a timeout. Right now we don't have a place to conveniently store
1620 * these timestamps.
1621 * But in this particular situation, the application requests are still
1622 * completed to upper layers, DRBD should still "feel" responsive.
1623 * No need yet to kill this connection, it may still recover.
1624 * If not, eventually we will have queued enough into the network for
1625 * us to block. From that point of view, the timestamp of the last sent
1626 * barrier packet is relevant enough.
1627 */
1628 if (time_after(now, connection->send.last_sent_barrier_jif + ent)) {
1629 drbd_warn(device, "Remote failed to answer a P_BARRIER (sent at %lu jif; now=%lu jif) within %ums > ko-count (%u) * timeout (%u * 0.1s)\n",
1630 connection->send.last_sent_barrier_jif, now,
1631 jiffies_to_msecs(now - connection->send.last_sent_barrier_jif), ko_count, timeout);
1632 return true;
1633 }
1634 return false;
1635}
1636
1637/* A request is considered timed out, if
1638 * - we have some effective timeout from the configuration,
1639 * with some state restrictions applied,
1640 * - the oldest request is waiting for a response from the network
1641 * resp. the local disk,
1642 * - the oldest request is in fact older than the effective timeout,
1643 * - the connection was established (resp. disk was attached)
1644 * for longer than the timeout already.
1645 * Note that for 32bit jiffies and very stable connections/disks,
1646 * we may have a wrap around, which is catched by
1647 * !time_in_range(now, last_..._jif, last_..._jif + timeout).
1648 *
1649 * Side effect: once per 32bit wrap-around interval, which means every
1650 * ~198 days with 250 HZ, we have a window where the timeout would need
1651 * to expire twice (worst case) to become effective. Good enough.
1652 */
1653
7fde2be9
PR
1654void request_timer_fn(unsigned long data)
1655{
b30ab791 1656 struct drbd_device *device = (struct drbd_device *) data;
a6b32bc3 1657 struct drbd_connection *connection = first_peer_device(device)->connection;
7753a4c1 1658 struct drbd_request *req_read, *req_write, *req_peer; /* oldest request */
44ed167d 1659 struct net_conf *nc;
7753a4c1 1660 unsigned long oldest_submit_jif;
dfa8bedb 1661 unsigned long ent = 0, dt = 0, et, nt; /* effective timeout = ko_count * timeout */
ba280c09 1662 unsigned long now;
84d34f2f 1663 unsigned int ko_count = 0, timeout = 0;
7fde2be9 1664
44ed167d 1665 rcu_read_lock();
bde89a9e 1666 nc = rcu_dereference(connection->net_conf);
84d34f2f
LE
1667 if (nc && device->state.conn >= C_WF_REPORT_PARAMS) {
1668 ko_count = nc->ko_count;
1669 timeout = nc->timeout;
1670 }
cdfda633 1671
b30ab791
AG
1672 if (get_ldev(device)) { /* implicit state.disk >= D_INCONSISTENT */
1673 dt = rcu_dereference(device->ldev->disk_conf)->disk_timeout * HZ / 10;
1674 put_ldev(device);
dfa8bedb 1675 }
44ed167d 1676 rcu_read_unlock();
7fde2be9 1677
84d34f2f
LE
1678
1679 ent = timeout * HZ/10 * ko_count;
dfa8bedb
PR
1680 et = min_not_zero(dt, ent);
1681
ba280c09 1682 if (!et)
7fde2be9
PR
1683 return; /* Recurring timer stopped */
1684
ba280c09 1685 now = jiffies;
7753a4c1 1686 nt = now + et;
ba280c09 1687
0500813f 1688 spin_lock_irq(&device->resource->req_lock);
7753a4c1
LE
1689 req_read = list_first_entry_or_null(&device->pending_completion[0], struct drbd_request, req_pending_local);
1690 req_write = list_first_entry_or_null(&device->pending_completion[1], struct drbd_request, req_pending_local);
84d34f2f 1691
7753a4c1 1692 /* maybe the oldest request waiting for the peer is in fact still
84d34f2f
LE
1693 * blocking in tcp sendmsg. That's ok, though, that's handled via the
1694 * socket send timeout, requesting a ping, and bumping ko-count in
1695 * we_should_drop_the_connection().
1696 */
1697
1698 /* check the oldest request we did successfully sent,
1699 * but which is still waiting for an ACK. */
1700 req_peer = connection->req_ack_pending;
1701
1702 /* if we don't have such request (e.g. protocoll A)
1703 * check the oldest requests which is still waiting on its epoch
1704 * closing barrier ack. */
1705 if (!req_peer)
1706 req_peer = connection->req_not_net_done;
7753a4c1
LE
1707
1708 /* evaluate the oldest peer request only in one timer! */
1709 if (req_peer && req_peer->device != device)
1710 req_peer = NULL;
1711
1712 /* do we have something to evaluate? */
1713 if (req_peer == NULL && req_write == NULL && req_read == NULL)
1714 goto out;
1715
1716 oldest_submit_jif =
1717 (req_write && req_read)
1718 ? ( time_before(req_write->pre_submit_jif, req_read->pre_submit_jif)
1719 ? req_write->pre_submit_jif : req_read->pre_submit_jif )
1720 : req_write ? req_write->pre_submit_jif
1721 : req_read ? req_read->pre_submit_jif : now;
7fde2be9 1722
84d34f2f 1723 if (ent && req_peer && net_timeout_reached(req_peer, connection, now, ent, ko_count, timeout))
9581f97a 1724 _conn_request_state(connection, NS(conn, C_TIMEOUT), CS_VERBOSE | CS_HARD);
84d34f2f 1725
7753a4c1
LE
1726 if (dt && oldest_submit_jif != now &&
1727 time_after(now, oldest_submit_jif + dt) &&
b30ab791 1728 !time_in_range(now, device->last_reattach_jif, device->last_reattach_jif + dt)) {
d0180171 1729 drbd_warn(device, "Local backing device failed to meet the disk-timeout\n");
b30ab791 1730 __drbd_chk_io_error(device, DRBD_FORCE_DETACH);
dfa8bedb 1731 }
08535466
LE
1732
1733 /* Reschedule timer for the nearest not already expired timeout.
1734 * Fallback to now + min(effective network timeout, disk timeout). */
7753a4c1
LE
1735 ent = (ent && req_peer && time_before(now, req_peer->pre_send_jif + ent))
1736 ? req_peer->pre_send_jif + ent : now + et;
1737 dt = (dt && oldest_submit_jif != now && time_before(now, oldest_submit_jif + dt))
1738 ? oldest_submit_jif + dt : now + et;
08535466 1739 nt = time_before(ent, dt) ? ent : dt;
7753a4c1 1740out:
8d4ba3f0 1741 spin_unlock_irq(&device->resource->req_lock);
b30ab791 1742 mod_timer(&device->request_timer, nt);
7fde2be9 1743}