Commit | Line | Data |
---|---|---|
8c16567d | 1 | // SPDX-License-Identifier: GPL-2.0 |
86db1e29 | 2 | /* |
3140c3cf | 3 | * Functions to sequence PREFLUSH and FUA writes. |
ae1b1539 TH |
4 | * |
5 | * Copyright (C) 2011 Max Planck Institute for Gravitational Physics | |
6 | * Copyright (C) 2011 Tejun Heo <tj@kernel.org> | |
7 | * | |
3140c3cf | 8 | * REQ_{PREFLUSH|FUA} requests are decomposed to sequences consisted of three |
ae1b1539 TH |
9 | * optional steps - PREFLUSH, DATA and POSTFLUSH - according to the request |
10 | * properties and hardware capability. | |
11 | * | |
28a8f0d3 MC |
12 | * If a request doesn't have data, only REQ_PREFLUSH makes sense, which |
13 | * indicates a simple flush request. If there is data, REQ_PREFLUSH indicates | |
ae1b1539 TH |
14 | * that the device cache should be flushed before the data is executed, and |
15 | * REQ_FUA means that the data must be on non-volatile media on request | |
16 | * completion. | |
17 | * | |
3140c3cf OS |
18 | * If the device doesn't have writeback cache, PREFLUSH and FUA don't make any |
19 | * difference. The requests are either completed immediately if there's no data | |
20 | * or executed as normal requests otherwise. | |
ae1b1539 | 21 | * |
28a8f0d3 | 22 | * If the device has writeback cache and supports FUA, REQ_PREFLUSH is |
ae1b1539 TH |
23 | * translated to PREFLUSH but REQ_FUA is passed down directly with DATA. |
24 | * | |
28a8f0d3 MC |
25 | * If the device has writeback cache and doesn't support FUA, REQ_PREFLUSH |
26 | * is translated to PREFLUSH and REQ_FUA to POSTFLUSH. | |
ae1b1539 TH |
27 | * |
28 | * The actual execution of flush is double buffered. Whenever a request | |
29 | * needs to execute PRE or POSTFLUSH, it queues at | |
7c94e1c1 | 30 | * fq->flush_queue[fq->flush_pending_idx]. Once certain criteria are met, a |
3a5e02ce | 31 | * REQ_OP_FLUSH is issued and the pending_idx is toggled. When the flush |
ae1b1539 | 32 | * completes, all the requests which were pending are proceeded to the next |
3140c3cf | 33 | * step. This allows arbitrary merging of different types of PREFLUSH/FUA |
ae1b1539 TH |
34 | * requests. |
35 | * | |
36 | * Currently, the following conditions are used to determine when to issue | |
37 | * flush. | |
38 | * | |
39 | * C1. At any given time, only one flush shall be in progress. This makes | |
40 | * double buffering sufficient. | |
41 | * | |
42 | * C2. Flush is deferred if any request is executing DATA of its sequence. | |
43 | * This avoids issuing separate POSTFLUSHes for requests which shared | |
44 | * PREFLUSH. | |
45 | * | |
46 | * C3. The second condition is ignored if there is a request which has | |
47 | * waited longer than FLUSH_PENDING_TIMEOUT. This is to avoid | |
48 | * starvation in the unlikely case where there are continuous stream of | |
3140c3cf | 49 | * FUA (without PREFLUSH) requests. |
ae1b1539 TH |
50 | * |
51 | * For devices which support FUA, it isn't clear whether C2 (and thus C3) | |
52 | * is beneficial. | |
53 | * | |
3140c3cf | 54 | * Note that a sequenced PREFLUSH/FUA request with DATA is completed twice. |
ae1b1539 TH |
55 | * Once while executing DATA and again after the whole sequence is |
56 | * complete. The first completion updates the contained bio but doesn't | |
57 | * finish it so that the bio submitter is notified only after the whole | |
e8064021 | 58 | * sequence is complete. This is implemented by testing RQF_FLUSH_SEQ in |
ae1b1539 TH |
59 | * req_bio_endio(). |
60 | * | |
3140c3cf | 61 | * The above peculiarity requires that each PREFLUSH/FUA request has only one |
ae1b1539 TH |
62 | * bio attached to it, which is guaranteed as they aren't allowed to be |
63 | * merged in the usual way. | |
86db1e29 | 64 | */ |
ae1b1539 | 65 | |
86db1e29 JA |
66 | #include <linux/kernel.h> |
67 | #include <linux/module.h> | |
68 | #include <linux/bio.h> | |
69 | #include <linux/blkdev.h> | |
5a0e3ad6 | 70 | #include <linux/gfp.h> |
82d981d4 | 71 | #include <linux/part_stat.h> |
86db1e29 JA |
72 | |
73 | #include "blk.h" | |
320ae51f | 74 | #include "blk-mq.h" |
bd166ef1 | 75 | #include "blk-mq-sched.h" |
86db1e29 | 76 | |
3140c3cf | 77 | /* PREFLUSH/FUA sequences */ |
4fed947c | 78 | enum { |
ae1b1539 TH |
79 | REQ_FSEQ_PREFLUSH = (1 << 0), /* pre-flushing in progress */ |
80 | REQ_FSEQ_DATA = (1 << 1), /* data write in progress */ | |
81 | REQ_FSEQ_POSTFLUSH = (1 << 2), /* post-flushing in progress */ | |
82 | REQ_FSEQ_DONE = (1 << 3), | |
83 | ||
84 | REQ_FSEQ_ACTIONS = REQ_FSEQ_PREFLUSH | REQ_FSEQ_DATA | | |
85 | REQ_FSEQ_POSTFLUSH, | |
86 | ||
87 | /* | |
88 | * If flush has been pending longer than the following timeout, | |
89 | * it's issued even if flush_data requests are still in flight. | |
90 | */ | |
91 | FLUSH_PENDING_TIMEOUT = 5 * HZ, | |
4fed947c TH |
92 | }; |
93 | ||
404b8f5a | 94 | static void blk_kick_flush(struct request_queue *q, |
16458cf3 | 95 | struct blk_flush_queue *fq, blk_opf_t flags); |
28e7d184 | 96 | |
0281ed3c | 97 | static inline struct blk_flush_queue * |
61667cb6 | 98 | blk_get_flush_queue(struct blk_mq_ctx *ctx) |
0281ed3c | 99 | { |
61667cb6 | 100 | return blk_mq_map_queue(REQ_OP_FLUSH, ctx)->fq; |
0281ed3c CH |
101 | } |
102 | ||
ae1b1539 | 103 | static unsigned int blk_flush_cur_seq(struct request *rq) |
47f70d5a | 104 | { |
ae1b1539 TH |
105 | return 1 << ffz(rq->flush.seq); |
106 | } | |
47f70d5a | 107 | |
ae1b1539 TH |
108 | static void blk_flush_restore_request(struct request *rq) |
109 | { | |
47f70d5a | 110 | /* |
ae1b1539 TH |
111 | * After flush data completion, @rq->bio is %NULL but we need to |
112 | * complete the bio again. @rq->biotail is guaranteed to equal the | |
113 | * original @rq->bio. Restore it. | |
47f70d5a | 114 | */ |
ae1b1539 | 115 | rq->bio = rq->biotail; |
af147b74 DLM |
116 | if (rq->bio) |
117 | rq->__sector = rq->bio->bi_iter.bi_sector; | |
ae1b1539 TH |
118 | |
119 | /* make @rq a normal request */ | |
e8064021 | 120 | rq->rq_flags &= ~RQF_FLUSH_SEQ; |
4853abaa | 121 | rq->end_io = rq->flush.saved_end_io; |
320ae51f JA |
122 | } |
123 | ||
b6866318 KK |
124 | static void blk_account_io_flush(struct request *rq) |
125 | { | |
f3fa33ac | 126 | struct block_device *part = rq->q->disk->part0; |
b6866318 KK |
127 | |
128 | part_stat_lock(); | |
129 | part_stat_inc(part, ios[STAT_FLUSH]); | |
130 | part_stat_add(part, nsecs[STAT_FLUSH], | |
08420cf7 | 131 | blk_time_get_ns() - rq->start_time_ns); |
b6866318 KK |
132 | part_stat_unlock(); |
133 | } | |
134 | ||
ae1b1539 TH |
135 | /** |
136 | * blk_flush_complete_seq - complete flush sequence | |
3140c3cf | 137 | * @rq: PREFLUSH/FUA request being sequenced |
0bae352d | 138 | * @fq: flush queue |
ae1b1539 TH |
139 | * @seq: sequences to complete (mask of %REQ_FSEQ_*, can be zero) |
140 | * @error: whether an error occurred | |
141 | * | |
142 | * @rq just completed @seq part of its flush sequence, record the | |
143 | * completion and trigger the next step. | |
144 | * | |
145 | * CONTEXT: | |
9809b4ee | 146 | * spin_lock_irq(fq->mq_flush_lock) |
ae1b1539 | 147 | */ |
404b8f5a | 148 | static void blk_flush_complete_seq(struct request *rq, |
0bae352d | 149 | struct blk_flush_queue *fq, |
2a842aca | 150 | unsigned int seq, blk_status_t error) |
86db1e29 | 151 | { |
ae1b1539 | 152 | struct request_queue *q = rq->q; |
7c94e1c1 | 153 | struct list_head *pending = &fq->flush_queue[fq->flush_pending_idx]; |
16458cf3 | 154 | blk_opf_t cmd_flags; |
ae1b1539 TH |
155 | |
156 | BUG_ON(rq->flush.seq & seq); | |
157 | rq->flush.seq |= seq; | |
190b02ed | 158 | cmd_flags = rq->cmd_flags; |
ae1b1539 TH |
159 | |
160 | if (likely(!error)) | |
161 | seq = blk_flush_cur_seq(rq); | |
162 | else | |
163 | seq = REQ_FSEQ_DONE; | |
164 | ||
165 | switch (seq) { | |
166 | case REQ_FSEQ_PREFLUSH: | |
167 | case REQ_FSEQ_POSTFLUSH: | |
168 | /* queue for flush */ | |
169 | if (list_empty(pending)) | |
7c94e1c1 | 170 | fq->flush_pending_since = jiffies; |
d0321c81 | 171 | list_add_tail(&rq->queuelist, pending); |
ae1b1539 TH |
172 | break; |
173 | ||
174 | case REQ_FSEQ_DATA: | |
b175c867 | 175 | fq->flush_data_in_flight++; |
9a67aa52 | 176 | spin_lock(&q->requeue_lock); |
81ada09c | 177 | list_move(&rq->queuelist, &q->requeue_list); |
9a67aa52 | 178 | spin_unlock(&q->requeue_lock); |
214a4418 | 179 | blk_mq_kick_requeue_list(q); |
ae1b1539 TH |
180 | break; |
181 | ||
182 | case REQ_FSEQ_DONE: | |
183 | /* | |
b6866318 | 184 | * @rq was previously adjusted by blk_insert_flush() for |
ae1b1539 TH |
185 | * flush sequencing and may already have gone through the |
186 | * flush data request completion path. Restore @rq for | |
187 | * normal completion and end it. | |
188 | */ | |
81ada09c | 189 | list_del_init(&rq->queuelist); |
ae1b1539 | 190 | blk_flush_restore_request(rq); |
7e992f84 | 191 | blk_mq_end_request(rq, error); |
ae1b1539 TH |
192 | break; |
193 | ||
194 | default: | |
195 | BUG(); | |
196 | } | |
197 | ||
404b8f5a | 198 | blk_kick_flush(q, fq, cmd_flags); |
86db1e29 JA |
199 | } |
200 | ||
de671d61 JA |
201 | static enum rq_end_io_ret flush_end_io(struct request *flush_rq, |
202 | blk_status_t error) | |
86db1e29 | 203 | { |
ae1b1539 | 204 | struct request_queue *q = flush_rq->q; |
320ae51f | 205 | struct list_head *running; |
ae1b1539 | 206 | struct request *rq, *n; |
320ae51f | 207 | unsigned long flags = 0; |
61667cb6 | 208 | struct blk_flush_queue *fq = blk_get_flush_queue(flush_rq->mq_ctx); |
ae1b1539 | 209 | |
7e992f84 JA |
210 | /* release the tag's ownership to the req cloned from */ |
211 | spin_lock_irqsave(&fq->mq_flush_lock, flags); | |
8d699663 | 212 | |
0a467d0f | 213 | if (!req_ref_put_and_test(flush_rq)) { |
8d699663 YY |
214 | fq->rq_status = error; |
215 | spin_unlock_irqrestore(&fq->mq_flush_lock, flags); | |
de671d61 | 216 | return RQ_END_IO_NONE; |
8d699663 YY |
217 | } |
218 | ||
84da7acc | 219 | blk_account_io_flush(flush_rq); |
9f16a667 ML |
220 | /* |
221 | * Flush request has to be marked as IDLE when it is really ended | |
222 | * because its .end_io() is called from timeout code path too for | |
223 | * avoiding use-after-free. | |
224 | */ | |
225 | WRITE_ONCE(flush_rq->state, MQ_RQ_IDLE); | |
8a751893 | 226 | if (fq->rq_status != BLK_STS_OK) { |
8d699663 | 227 | error = fq->rq_status; |
8a751893 YB |
228 | fq->rq_status = BLK_STS_OK; |
229 | } | |
8d699663 | 230 | |
4e2f62e5 | 231 | if (!q->elevator) { |
568f2700 | 232 | flush_rq->tag = BLK_MQ_NO_TAG; |
4e2f62e5 JA |
233 | } else { |
234 | blk_mq_put_driver_tag(flush_rq); | |
568f2700 | 235 | flush_rq->internal_tag = BLK_MQ_NO_TAG; |
4e2f62e5 | 236 | } |
18741986 | 237 | |
7c94e1c1 ML |
238 | running = &fq->flush_queue[fq->flush_running_idx]; |
239 | BUG_ON(fq->flush_pending_idx == fq->flush_running_idx); | |
ae1b1539 TH |
240 | |
241 | /* account completion of the flush request */ | |
7c94e1c1 | 242 | fq->flush_running_idx ^= 1; |
320ae51f | 243 | |
ae1b1539 | 244 | /* and push the waiting requests to the next stage */ |
81ada09c | 245 | list_for_each_entry_safe(rq, n, running, queuelist) { |
ae1b1539 TH |
246 | unsigned int seq = blk_flush_cur_seq(rq); |
247 | ||
248 | BUG_ON(seq != REQ_FSEQ_PREFLUSH && seq != REQ_FSEQ_POSTFLUSH); | |
d0321c81 | 249 | list_del_init(&rq->queuelist); |
404b8f5a | 250 | blk_flush_complete_seq(rq, fq, seq, error); |
ae1b1539 TH |
251 | } |
252 | ||
7e992f84 | 253 | spin_unlock_irqrestore(&fq->mq_flush_lock, flags); |
de671d61 | 254 | return RQ_END_IO_NONE; |
320ae51f JA |
255 | } |
256 | ||
a9ed27a7 ML |
257 | bool is_flush_rq(struct request *rq) |
258 | { | |
259 | return rq->end_io == flush_end_io; | |
260 | } | |
261 | ||
ae1b1539 TH |
262 | /** |
263 | * blk_kick_flush - consider issuing flush request | |
264 | * @q: request_queue being kicked | |
0bae352d | 265 | * @fq: flush queue |
84fca1b0 | 266 | * @flags: cmd_flags of the original request |
ae1b1539 TH |
267 | * |
268 | * Flush related states of @q have changed, consider issuing flush request. | |
269 | * Please read the comment at the top of this file for more info. | |
270 | * | |
271 | * CONTEXT: | |
9809b4ee | 272 | * spin_lock_irq(fq->mq_flush_lock) |
ae1b1539 | 273 | * |
ae1b1539 | 274 | */ |
404b8f5a | 275 | static void blk_kick_flush(struct request_queue *q, struct blk_flush_queue *fq, |
16458cf3 | 276 | blk_opf_t flags) |
86db1e29 | 277 | { |
7c94e1c1 | 278 | struct list_head *pending = &fq->flush_queue[fq->flush_pending_idx]; |
ae1b1539 | 279 | struct request *first_rq = |
81ada09c | 280 | list_first_entry(pending, struct request, queuelist); |
7c94e1c1 | 281 | struct request *flush_rq = fq->flush_rq; |
ae1b1539 TH |
282 | |
283 | /* C1 described at the top of this file */ | |
7c94e1c1 | 284 | if (fq->flush_pending_idx != fq->flush_running_idx || list_empty(pending)) |
404b8f5a | 285 | return; |
ae1b1539 | 286 | |
b5718d6c | 287 | /* C2 and C3 */ |
b175c867 | 288 | if (fq->flush_data_in_flight && |
ae1b1539 | 289 | time_before(jiffies, |
7c94e1c1 | 290 | fq->flush_pending_since + FLUSH_PENDING_TIMEOUT)) |
404b8f5a | 291 | return; |
ae1b1539 TH |
292 | |
293 | /* | |
294 | * Issue flush and toggle pending_idx. This makes pending_idx | |
295 | * different from running_idx, which means flush is in flight. | |
296 | */ | |
7c94e1c1 | 297 | fq->flush_pending_idx ^= 1; |
18741986 | 298 | |
7ddab5de | 299 | blk_rq_init(q, flush_rq); |
f70ced09 ML |
300 | |
301 | /* | |
923218f6 ML |
302 | * In case of none scheduler, borrow tag from the first request |
303 | * since they can't be in flight at the same time. And acquire | |
304 | * the tag's ownership for flush req. | |
305 | * | |
306 | * In case of IO scheduler, flush rq need to borrow scheduler tag | |
307 | * just for cheating put/get driver tag. | |
f70ced09 | 308 | */ |
7e992f84 | 309 | flush_rq->mq_ctx = first_rq->mq_ctx; |
ea4f995e | 310 | flush_rq->mq_hctx = first_rq->mq_hctx; |
7e992f84 | 311 | |
48554df6 | 312 | if (!q->elevator) |
7e992f84 | 313 | flush_rq->tag = first_rq->tag; |
48554df6 | 314 | else |
7e992f84 | 315 | flush_rq->internal_tag = first_rq->internal_tag; |
320ae51f | 316 | |
70fd7614 | 317 | flush_rq->cmd_flags = REQ_OP_FLUSH | REQ_PREFLUSH; |
84fca1b0 | 318 | flush_rq->cmd_flags |= (flags & REQ_DRV) | (flags & REQ_FAILFAST_MASK); |
e8064021 | 319 | flush_rq->rq_flags |= RQF_FLUSH_SEQ; |
7ddab5de | 320 | flush_rq->end_io = flush_end_io; |
c2da19ed ML |
321 | /* |
322 | * Order WRITE ->end_io and WRITE rq->ref, and its pair is the one | |
323 | * implied in refcount_inc_not_zero() called from | |
324 | * blk_mq_find_and_get_req(), which orders WRITE/READ flush_rq->ref | |
325 | * and READ flush_rq->end_io | |
326 | */ | |
327 | smp_wmb(); | |
0a467d0f | 328 | req_ref_set(flush_rq, 1); |
ae1b1539 | 329 | |
9a67aa52 CH |
330 | spin_lock(&q->requeue_lock); |
331 | list_add_tail(&flush_rq->queuelist, &q->flush_list); | |
332 | spin_unlock(&q->requeue_lock); | |
333 | ||
214a4418 | 334 | blk_mq_kick_requeue_list(q); |
86db1e29 JA |
335 | } |
336 | ||
de671d61 JA |
337 | static enum rq_end_io_ret mq_flush_data_end_io(struct request *rq, |
338 | blk_status_t error) | |
320ae51f JA |
339 | { |
340 | struct request_queue *q = rq->q; | |
ea4f995e | 341 | struct blk_mq_hw_ctx *hctx = rq->mq_hctx; |
e97c293c | 342 | struct blk_mq_ctx *ctx = rq->mq_ctx; |
320ae51f | 343 | unsigned long flags; |
61667cb6 | 344 | struct blk_flush_queue *fq = blk_get_flush_queue(ctx); |
320ae51f | 345 | |
4e2f62e5 JA |
346 | if (q->elevator) { |
347 | WARN_ON(rq->tag < 0); | |
348 | blk_mq_put_driver_tag(rq); | |
349 | } | |
350 | ||
320ae51f JA |
351 | /* |
352 | * After populating an empty queue, kick it to avoid stall. Read | |
353 | * the comment in flush_end_io(). | |
354 | */ | |
7c94e1c1 | 355 | spin_lock_irqsave(&fq->mq_flush_lock, flags); |
b175c867 | 356 | fq->flush_data_in_flight--; |
81ada09c CZ |
357 | /* |
358 | * May have been corrupted by rq->rq_next reuse, we need to | |
359 | * re-initialize rq->queuelist before reusing it here. | |
360 | */ | |
361 | INIT_LIST_HEAD(&rq->queuelist); | |
bd166ef1 | 362 | blk_flush_complete_seq(rq, fq, REQ_FSEQ_DATA, error); |
7c94e1c1 | 363 | spin_unlock_irqrestore(&fq->mq_flush_lock, flags); |
bd166ef1 | 364 | |
85bd6e61 | 365 | blk_mq_sched_restart(hctx); |
de671d61 | 366 | return RQ_END_IO_NONE; |
320ae51f JA |
367 | } |
368 | ||
0b573692 CH |
369 | static void blk_rq_init_flush(struct request *rq) |
370 | { | |
371 | rq->flush.seq = 0; | |
0b573692 CH |
372 | rq->rq_flags |= RQF_FLUSH_SEQ; |
373 | rq->flush.saved_end_io = rq->end_io; /* Usually NULL */ | |
374 | rq->end_io = mq_flush_data_end_io; | |
375 | } | |
376 | ||
360f2648 CH |
377 | /* |
378 | * Insert a PREFLUSH/FUA request into the flush state machine. | |
379 | * Returns true if the request has been consumed by the flush state machine, | |
380 | * or false if the caller should continue to process it. | |
ae1b1539 | 381 | */ |
360f2648 | 382 | bool blk_insert_flush(struct request *rq) |
86db1e29 | 383 | { |
ae1b1539 | 384 | struct request_queue *q = rq->q; |
61667cb6 | 385 | struct blk_flush_queue *fq = blk_get_flush_queue(rq->mq_ctx); |
1122c0c1 | 386 | bool supports_fua = q->limits.features & BLK_FEAT_FUA; |
70905f87 | 387 | unsigned int policy = 0; |
86db1e29 | 388 | |
c1075e54 CH |
389 | /* FLUSH/FUA request must never be merged */ |
390 | WARN_ON_ONCE(rq->bio != rq->biotail); | |
391 | ||
70905f87 CH |
392 | if (blk_rq_sectors(rq)) |
393 | policy |= REQ_FSEQ_DATA; | |
394 | ||
395 | /* | |
396 | * Check which flushes we need to sequence for this operation. | |
397 | */ | |
1122c0c1 | 398 | if (blk_queue_write_cache(q)) { |
70905f87 CH |
399 | if (rq->cmd_flags & REQ_PREFLUSH) |
400 | policy |= REQ_FSEQ_PREFLUSH; | |
1122c0c1 | 401 | if ((rq->cmd_flags & REQ_FUA) && !supports_fua) |
70905f87 CH |
402 | policy |= REQ_FSEQ_POSTFLUSH; |
403 | } | |
404 | ||
ae1b1539 TH |
405 | /* |
406 | * @policy now records what operations need to be done. Adjust | |
28a8f0d3 | 407 | * REQ_PREFLUSH and FUA for the driver. |
ae1b1539 | 408 | */ |
28a8f0d3 | 409 | rq->cmd_flags &= ~REQ_PREFLUSH; |
1122c0c1 | 410 | if (!supports_fua) |
ae1b1539 TH |
411 | rq->cmd_flags &= ~REQ_FUA; |
412 | ||
ae5b2ec8 JA |
413 | /* |
414 | * REQ_PREFLUSH|REQ_FUA implies REQ_SYNC, so if we clear any | |
415 | * of those flags, we have to set REQ_SYNC to avoid skewing | |
416 | * the request accounting. | |
417 | */ | |
418 | rq->cmd_flags |= REQ_SYNC; | |
419 | ||
c1075e54 CH |
420 | switch (policy) { |
421 | case 0: | |
422 | /* | |
423 | * An empty flush handed down from a stacking driver may | |
424 | * translate into nothing if the underlying device does not | |
425 | * advertise a write-back cache. In this case, simply | |
426 | * complete the request. | |
427 | */ | |
7e992f84 | 428 | blk_mq_end_request(rq, 0); |
360f2648 | 429 | return true; |
c1075e54 CH |
430 | case REQ_FSEQ_DATA: |
431 | /* | |
432 | * If there's data, but no flush is necessary, the request can | |
433 | * be processed directly without going through flush machinery. | |
434 | * Queue for normal execution. | |
435 | */ | |
360f2648 | 436 | return false; |
615939a2 CH |
437 | case REQ_FSEQ_DATA | REQ_FSEQ_POSTFLUSH: |
438 | /* | |
439 | * Initialize the flush fields and completion handler to trigger | |
440 | * the post flush, and then just pass the command on. | |
441 | */ | |
442 | blk_rq_init_flush(rq); | |
28b24123 | 443 | rq->flush.seq |= REQ_FSEQ_PREFLUSH; |
615939a2 | 444 | spin_lock_irq(&fq->mq_flush_lock); |
b175c867 | 445 | fq->flush_data_in_flight++; |
615939a2 CH |
446 | spin_unlock_irq(&fq->mq_flush_lock); |
447 | return false; | |
c1075e54 CH |
448 | default: |
449 | /* | |
450 | * Mark the request as part of a flush sequence and submit it | |
451 | * for further processing to the flush state machine. | |
452 | */ | |
453 | blk_rq_init_flush(rq); | |
454 | spin_lock_irq(&fq->mq_flush_lock); | |
455 | blk_flush_complete_seq(rq, fq, REQ_FSEQ_ACTIONS & ~policy, 0); | |
456 | spin_unlock_irq(&fq->mq_flush_lock); | |
360f2648 | 457 | return true; |
2b504bd4 | 458 | } |
86db1e29 JA |
459 | } |
460 | ||
86db1e29 JA |
461 | /** |
462 | * blkdev_issue_flush - queue a flush | |
463 | * @bdev: blockdev to issue flush for | |
86db1e29 JA |
464 | * |
465 | * Description: | |
9398554f | 466 | * Issue a flush for the block device in question. |
86db1e29 | 467 | */ |
c6bf3f0e | 468 | int blkdev_issue_flush(struct block_device *bdev) |
86db1e29 | 469 | { |
c6bf3f0e | 470 | struct bio bio; |
86db1e29 | 471 | |
49add496 | 472 | bio_init(&bio, bdev, NULL, 0, REQ_OP_WRITE | REQ_PREFLUSH); |
c6bf3f0e | 473 | return submit_bio_wait(&bio); |
86db1e29 | 474 | } |
86db1e29 | 475 | EXPORT_SYMBOL(blkdev_issue_flush); |
320ae51f | 476 | |
754a1572 GJ |
477 | struct blk_flush_queue *blk_alloc_flush_queue(int node, int cmd_size, |
478 | gfp_t flags) | |
320ae51f | 479 | { |
7c94e1c1 ML |
480 | struct blk_flush_queue *fq; |
481 | int rq_sz = sizeof(struct request); | |
1bcb1ead | 482 | |
5b202853 | 483 | fq = kzalloc_node(sizeof(*fq), flags, node); |
7c94e1c1 ML |
484 | if (!fq) |
485 | goto fail; | |
1bcb1ead | 486 | |
7e992f84 | 487 | spin_lock_init(&fq->mq_flush_lock); |
7c94e1c1 | 488 | |
6d247d7f | 489 | rq_sz = round_up(rq_sz + cmd_size, cache_line_size()); |
5b202853 | 490 | fq->flush_rq = kzalloc_node(rq_sz, flags, node); |
7c94e1c1 ML |
491 | if (!fq->flush_rq) |
492 | goto fail_rq; | |
493 | ||
494 | INIT_LIST_HEAD(&fq->flush_queue[0]); | |
495 | INIT_LIST_HEAD(&fq->flush_queue[1]); | |
7c94e1c1 ML |
496 | |
497 | return fq; | |
498 | ||
499 | fail_rq: | |
500 | kfree(fq); | |
501 | fail: | |
502 | return NULL; | |
320ae51f | 503 | } |
f3552655 | 504 | |
ba483388 | 505 | void blk_free_flush_queue(struct blk_flush_queue *fq) |
f3552655 | 506 | { |
7c94e1c1 ML |
507 | /* bio based request queue hasn't flush queue */ |
508 | if (!fq) | |
509 | return; | |
3c09676c | 510 | |
7c94e1c1 ML |
511 | kfree(fq->flush_rq); |
512 | kfree(fq); | |
513 | } | |
fb01a293 ML |
514 | |
515 | /* | |
516 | * Allow driver to set its own lock class to fq->mq_flush_lock for | |
517 | * avoiding lockdep complaint. | |
518 | * | |
519 | * flush_end_io() may be called recursively from some driver, such as | |
520 | * nvme-loop, so lockdep may complain 'possible recursive locking' because | |
521 | * all 'struct blk_flush_queue' instance share same mq_flush_lock lock class | |
522 | * key. We need to assign different lock class for these driver's | |
523 | * fq->mq_flush_lock for avoiding the lockdep warning. | |
524 | * | |
525 | * Use dynamically allocated lock class key for each 'blk_flush_queue' | |
526 | * instance is over-kill, and more worse it introduces horrible boot delay | |
527 | * issue because synchronize_rcu() is implied in lockdep_unregister_key which | |
528 | * is called for each hctx release. SCSI probing may synchronously create and | |
529 | * destroy lots of MQ request_queues for non-existent devices, and some robot | |
530 | * test kernel always enable lockdep option. It is observed that more than half | |
531 | * an hour is taken during SCSI MQ probe with per-fq lock class. | |
532 | */ | |
533 | void blk_mq_hctx_set_fq_lock_class(struct blk_mq_hw_ctx *hctx, | |
534 | struct lock_class_key *key) | |
535 | { | |
536 | lockdep_set_class(&hctx->fq->mq_flush_lock, key); | |
537 | } | |
538 | EXPORT_SYMBOL_GPL(blk_mq_hctx_set_fq_lock_class); |