Commit | Line | Data |
---|---|---|
8c16567d | 1 | // SPDX-License-Identifier: GPL-2.0 |
86db1e29 | 2 | /* |
3140c3cf | 3 | * Functions to sequence PREFLUSH and FUA writes. |
ae1b1539 TH |
4 | * |
5 | * Copyright (C) 2011 Max Planck Institute for Gravitational Physics | |
6 | * Copyright (C) 2011 Tejun Heo <tj@kernel.org> | |
7 | * | |
3140c3cf | 8 | * REQ_{PREFLUSH|FUA} requests are decomposed to sequences consisted of three |
ae1b1539 TH |
9 | * optional steps - PREFLUSH, DATA and POSTFLUSH - according to the request |
10 | * properties and hardware capability. | |
11 | * | |
28a8f0d3 MC |
12 | * If a request doesn't have data, only REQ_PREFLUSH makes sense, which |
13 | * indicates a simple flush request. If there is data, REQ_PREFLUSH indicates | |
ae1b1539 TH |
14 | * that the device cache should be flushed before the data is executed, and |
15 | * REQ_FUA means that the data must be on non-volatile media on request | |
16 | * completion. | |
17 | * | |
3140c3cf OS |
18 | * If the device doesn't have writeback cache, PREFLUSH and FUA don't make any |
19 | * difference. The requests are either completed immediately if there's no data | |
20 | * or executed as normal requests otherwise. | |
ae1b1539 | 21 | * |
28a8f0d3 | 22 | * If the device has writeback cache and supports FUA, REQ_PREFLUSH is |
ae1b1539 TH |
23 | * translated to PREFLUSH but REQ_FUA is passed down directly with DATA. |
24 | * | |
28a8f0d3 MC |
25 | * If the device has writeback cache and doesn't support FUA, REQ_PREFLUSH |
26 | * is translated to PREFLUSH and REQ_FUA to POSTFLUSH. | |
ae1b1539 TH |
27 | * |
28 | * The actual execution of flush is double buffered. Whenever a request | |
29 | * needs to execute PRE or POSTFLUSH, it queues at | |
7c94e1c1 | 30 | * fq->flush_queue[fq->flush_pending_idx]. Once certain criteria are met, a |
3a5e02ce | 31 | * REQ_OP_FLUSH is issued and the pending_idx is toggled. When the flush |
ae1b1539 | 32 | * completes, all the requests which were pending are proceeded to the next |
3140c3cf | 33 | * step. This allows arbitrary merging of different types of PREFLUSH/FUA |
ae1b1539 TH |
34 | * requests. |
35 | * | |
36 | * Currently, the following conditions are used to determine when to issue | |
37 | * flush. | |
38 | * | |
39 | * C1. At any given time, only one flush shall be in progress. This makes | |
40 | * double buffering sufficient. | |
41 | * | |
42 | * C2. Flush is deferred if any request is executing DATA of its sequence. | |
43 | * This avoids issuing separate POSTFLUSHes for requests which shared | |
44 | * PREFLUSH. | |
45 | * | |
46 | * C3. The second condition is ignored if there is a request which has | |
47 | * waited longer than FLUSH_PENDING_TIMEOUT. This is to avoid | |
48 | * starvation in the unlikely case where there are continuous stream of | |
3140c3cf | 49 | * FUA (without PREFLUSH) requests. |
ae1b1539 TH |
50 | * |
51 | * For devices which support FUA, it isn't clear whether C2 (and thus C3) | |
52 | * is beneficial. | |
53 | * | |
3140c3cf | 54 | * Note that a sequenced PREFLUSH/FUA request with DATA is completed twice. |
ae1b1539 TH |
55 | * Once while executing DATA and again after the whole sequence is |
56 | * complete. The first completion updates the contained bio but doesn't | |
57 | * finish it so that the bio submitter is notified only after the whole | |
e8064021 | 58 | * sequence is complete. This is implemented by testing RQF_FLUSH_SEQ in |
ae1b1539 TH |
59 | * req_bio_endio(). |
60 | * | |
3140c3cf | 61 | * The above peculiarity requires that each PREFLUSH/FUA request has only one |
ae1b1539 TH |
62 | * bio attached to it, which is guaranteed as they aren't allowed to be |
63 | * merged in the usual way. | |
86db1e29 | 64 | */ |
ae1b1539 | 65 | |
86db1e29 JA |
66 | #include <linux/kernel.h> |
67 | #include <linux/module.h> | |
68 | #include <linux/bio.h> | |
69 | #include <linux/blkdev.h> | |
5a0e3ad6 | 70 | #include <linux/gfp.h> |
320ae51f | 71 | #include <linux/blk-mq.h> |
82d981d4 | 72 | #include <linux/part_stat.h> |
86db1e29 JA |
73 | |
74 | #include "blk.h" | |
320ae51f | 75 | #include "blk-mq.h" |
0048b483 | 76 | #include "blk-mq-tag.h" |
bd166ef1 | 77 | #include "blk-mq-sched.h" |
86db1e29 | 78 | |
3140c3cf | 79 | /* PREFLUSH/FUA sequences */ |
4fed947c | 80 | enum { |
ae1b1539 TH |
81 | REQ_FSEQ_PREFLUSH = (1 << 0), /* pre-flushing in progress */ |
82 | REQ_FSEQ_DATA = (1 << 1), /* data write in progress */ | |
83 | REQ_FSEQ_POSTFLUSH = (1 << 2), /* post-flushing in progress */ | |
84 | REQ_FSEQ_DONE = (1 << 3), | |
85 | ||
86 | REQ_FSEQ_ACTIONS = REQ_FSEQ_PREFLUSH | REQ_FSEQ_DATA | | |
87 | REQ_FSEQ_POSTFLUSH, | |
88 | ||
89 | /* | |
90 | * If flush has been pending longer than the following timeout, | |
91 | * it's issued even if flush_data requests are still in flight. | |
92 | */ | |
93 | FLUSH_PENDING_TIMEOUT = 5 * HZ, | |
4fed947c TH |
94 | }; |
95 | ||
404b8f5a | 96 | static void blk_kick_flush(struct request_queue *q, |
16458cf3 | 97 | struct blk_flush_queue *fq, blk_opf_t flags); |
28e7d184 | 98 | |
0281ed3c CH |
99 | static inline struct blk_flush_queue * |
100 | blk_get_flush_queue(struct request_queue *q, struct blk_mq_ctx *ctx) | |
101 | { | |
102 | return blk_mq_map_queue(q, REQ_OP_FLUSH, ctx)->fq; | |
103 | } | |
104 | ||
c888a8f9 | 105 | static unsigned int blk_flush_policy(unsigned long fflags, struct request *rq) |
86db1e29 | 106 | { |
ae1b1539 | 107 | unsigned int policy = 0; |
86db1e29 | 108 | |
fa1bf42f JM |
109 | if (blk_rq_sectors(rq)) |
110 | policy |= REQ_FSEQ_DATA; | |
111 | ||
c888a8f9 | 112 | if (fflags & (1UL << QUEUE_FLAG_WC)) { |
28a8f0d3 | 113 | if (rq->cmd_flags & REQ_PREFLUSH) |
ae1b1539 | 114 | policy |= REQ_FSEQ_PREFLUSH; |
c888a8f9 JA |
115 | if (!(fflags & (1UL << QUEUE_FLAG_FUA)) && |
116 | (rq->cmd_flags & REQ_FUA)) | |
ae1b1539 | 117 | policy |= REQ_FSEQ_POSTFLUSH; |
28e7d184 | 118 | } |
ae1b1539 | 119 | return policy; |
86db1e29 JA |
120 | } |
121 | ||
ae1b1539 | 122 | static unsigned int blk_flush_cur_seq(struct request *rq) |
47f70d5a | 123 | { |
ae1b1539 TH |
124 | return 1 << ffz(rq->flush.seq); |
125 | } | |
47f70d5a | 126 | |
ae1b1539 TH |
127 | static void blk_flush_restore_request(struct request *rq) |
128 | { | |
47f70d5a | 129 | /* |
ae1b1539 TH |
130 | * After flush data completion, @rq->bio is %NULL but we need to |
131 | * complete the bio again. @rq->biotail is guaranteed to equal the | |
132 | * original @rq->bio. Restore it. | |
47f70d5a | 133 | */ |
ae1b1539 TH |
134 | rq->bio = rq->biotail; |
135 | ||
136 | /* make @rq a normal request */ | |
e8064021 | 137 | rq->rq_flags &= ~RQF_FLUSH_SEQ; |
4853abaa | 138 | rq->end_io = rq->flush.saved_end_io; |
320ae51f JA |
139 | } |
140 | ||
404b8f5a | 141 | static void blk_flush_queue_rq(struct request *rq, bool add_front) |
320ae51f | 142 | { |
7e992f84 | 143 | blk_mq_add_to_requeue_list(rq, add_front, true); |
47f70d5a TH |
144 | } |
145 | ||
b6866318 KK |
146 | static void blk_account_io_flush(struct request *rq) |
147 | { | |
f3fa33ac | 148 | struct block_device *part = rq->q->disk->part0; |
b6866318 KK |
149 | |
150 | part_stat_lock(); | |
151 | part_stat_inc(part, ios[STAT_FLUSH]); | |
152 | part_stat_add(part, nsecs[STAT_FLUSH], | |
153 | ktime_get_ns() - rq->start_time_ns); | |
154 | part_stat_unlock(); | |
155 | } | |
156 | ||
ae1b1539 TH |
157 | /** |
158 | * blk_flush_complete_seq - complete flush sequence | |
3140c3cf | 159 | * @rq: PREFLUSH/FUA request being sequenced |
0bae352d | 160 | * @fq: flush queue |
ae1b1539 TH |
161 | * @seq: sequences to complete (mask of %REQ_FSEQ_*, can be zero) |
162 | * @error: whether an error occurred | |
163 | * | |
164 | * @rq just completed @seq part of its flush sequence, record the | |
165 | * completion and trigger the next step. | |
166 | * | |
167 | * CONTEXT: | |
9809b4ee | 168 | * spin_lock_irq(fq->mq_flush_lock) |
ae1b1539 | 169 | */ |
404b8f5a | 170 | static void blk_flush_complete_seq(struct request *rq, |
0bae352d | 171 | struct blk_flush_queue *fq, |
2a842aca | 172 | unsigned int seq, blk_status_t error) |
86db1e29 | 173 | { |
ae1b1539 | 174 | struct request_queue *q = rq->q; |
7c94e1c1 | 175 | struct list_head *pending = &fq->flush_queue[fq->flush_pending_idx]; |
16458cf3 | 176 | blk_opf_t cmd_flags; |
ae1b1539 TH |
177 | |
178 | BUG_ON(rq->flush.seq & seq); | |
179 | rq->flush.seq |= seq; | |
190b02ed | 180 | cmd_flags = rq->cmd_flags; |
ae1b1539 TH |
181 | |
182 | if (likely(!error)) | |
183 | seq = blk_flush_cur_seq(rq); | |
184 | else | |
185 | seq = REQ_FSEQ_DONE; | |
186 | ||
187 | switch (seq) { | |
188 | case REQ_FSEQ_PREFLUSH: | |
189 | case REQ_FSEQ_POSTFLUSH: | |
190 | /* queue for flush */ | |
191 | if (list_empty(pending)) | |
7c94e1c1 | 192 | fq->flush_pending_since = jiffies; |
ae1b1539 TH |
193 | list_move_tail(&rq->flush.list, pending); |
194 | break; | |
195 | ||
196 | case REQ_FSEQ_DATA: | |
7c94e1c1 | 197 | list_move_tail(&rq->flush.list, &fq->flush_data_in_flight); |
404b8f5a | 198 | blk_flush_queue_rq(rq, true); |
ae1b1539 TH |
199 | break; |
200 | ||
201 | case REQ_FSEQ_DONE: | |
202 | /* | |
b6866318 | 203 | * @rq was previously adjusted by blk_insert_flush() for |
ae1b1539 TH |
204 | * flush sequencing and may already have gone through the |
205 | * flush data request completion path. Restore @rq for | |
206 | * normal completion and end it. | |
207 | */ | |
208 | BUG_ON(!list_empty(&rq->queuelist)); | |
209 | list_del_init(&rq->flush.list); | |
210 | blk_flush_restore_request(rq); | |
7e992f84 | 211 | blk_mq_end_request(rq, error); |
ae1b1539 TH |
212 | break; |
213 | ||
214 | default: | |
215 | BUG(); | |
216 | } | |
217 | ||
404b8f5a | 218 | blk_kick_flush(q, fq, cmd_flags); |
86db1e29 JA |
219 | } |
220 | ||
2a842aca | 221 | static void flush_end_io(struct request *flush_rq, blk_status_t error) |
86db1e29 | 222 | { |
ae1b1539 | 223 | struct request_queue *q = flush_rq->q; |
320ae51f | 224 | struct list_head *running; |
ae1b1539 | 225 | struct request *rq, *n; |
320ae51f | 226 | unsigned long flags = 0; |
e97c293c | 227 | struct blk_flush_queue *fq = blk_get_flush_queue(q, flush_rq->mq_ctx); |
ae1b1539 | 228 | |
7e992f84 JA |
229 | /* release the tag's ownership to the req cloned from */ |
230 | spin_lock_irqsave(&fq->mq_flush_lock, flags); | |
8d699663 | 231 | |
0a467d0f | 232 | if (!req_ref_put_and_test(flush_rq)) { |
8d699663 YY |
233 | fq->rq_status = error; |
234 | spin_unlock_irqrestore(&fq->mq_flush_lock, flags); | |
235 | return; | |
236 | } | |
237 | ||
84da7acc | 238 | blk_account_io_flush(flush_rq); |
9f16a667 ML |
239 | /* |
240 | * Flush request has to be marked as IDLE when it is really ended | |
241 | * because its .end_io() is called from timeout code path too for | |
242 | * avoiding use-after-free. | |
243 | */ | |
244 | WRITE_ONCE(flush_rq->state, MQ_RQ_IDLE); | |
8a751893 | 245 | if (fq->rq_status != BLK_STS_OK) { |
8d699663 | 246 | error = fq->rq_status; |
8a751893 YB |
247 | fq->rq_status = BLK_STS_OK; |
248 | } | |
8d699663 | 249 | |
4e2f62e5 | 250 | if (!q->elevator) { |
568f2700 | 251 | flush_rq->tag = BLK_MQ_NO_TAG; |
4e2f62e5 JA |
252 | } else { |
253 | blk_mq_put_driver_tag(flush_rq); | |
568f2700 | 254 | flush_rq->internal_tag = BLK_MQ_NO_TAG; |
4e2f62e5 | 255 | } |
18741986 | 256 | |
7c94e1c1 ML |
257 | running = &fq->flush_queue[fq->flush_running_idx]; |
258 | BUG_ON(fq->flush_pending_idx == fq->flush_running_idx); | |
ae1b1539 TH |
259 | |
260 | /* account completion of the flush request */ | |
7c94e1c1 | 261 | fq->flush_running_idx ^= 1; |
320ae51f | 262 | |
ae1b1539 TH |
263 | /* and push the waiting requests to the next stage */ |
264 | list_for_each_entry_safe(rq, n, running, flush.list) { | |
265 | unsigned int seq = blk_flush_cur_seq(rq); | |
266 | ||
267 | BUG_ON(seq != REQ_FSEQ_PREFLUSH && seq != REQ_FSEQ_POSTFLUSH); | |
404b8f5a | 268 | blk_flush_complete_seq(rq, fq, seq, error); |
ae1b1539 TH |
269 | } |
270 | ||
7e992f84 | 271 | spin_unlock_irqrestore(&fq->mq_flush_lock, flags); |
320ae51f JA |
272 | } |
273 | ||
a9ed27a7 ML |
274 | bool is_flush_rq(struct request *rq) |
275 | { | |
276 | return rq->end_io == flush_end_io; | |
277 | } | |
278 | ||
ae1b1539 TH |
279 | /** |
280 | * blk_kick_flush - consider issuing flush request | |
281 | * @q: request_queue being kicked | |
0bae352d | 282 | * @fq: flush queue |
84fca1b0 | 283 | * @flags: cmd_flags of the original request |
ae1b1539 TH |
284 | * |
285 | * Flush related states of @q have changed, consider issuing flush request. | |
286 | * Please read the comment at the top of this file for more info. | |
287 | * | |
288 | * CONTEXT: | |
9809b4ee | 289 | * spin_lock_irq(fq->mq_flush_lock) |
ae1b1539 | 290 | * |
ae1b1539 | 291 | */ |
404b8f5a | 292 | static void blk_kick_flush(struct request_queue *q, struct blk_flush_queue *fq, |
16458cf3 | 293 | blk_opf_t flags) |
86db1e29 | 294 | { |
7c94e1c1 | 295 | struct list_head *pending = &fq->flush_queue[fq->flush_pending_idx]; |
ae1b1539 TH |
296 | struct request *first_rq = |
297 | list_first_entry(pending, struct request, flush.list); | |
7c94e1c1 | 298 | struct request *flush_rq = fq->flush_rq; |
ae1b1539 TH |
299 | |
300 | /* C1 described at the top of this file */ | |
7c94e1c1 | 301 | if (fq->flush_pending_idx != fq->flush_running_idx || list_empty(pending)) |
404b8f5a | 302 | return; |
ae1b1539 | 303 | |
b5718d6c YY |
304 | /* C2 and C3 */ |
305 | if (!list_empty(&fq->flush_data_in_flight) && | |
ae1b1539 | 306 | time_before(jiffies, |
7c94e1c1 | 307 | fq->flush_pending_since + FLUSH_PENDING_TIMEOUT)) |
404b8f5a | 308 | return; |
ae1b1539 TH |
309 | |
310 | /* | |
311 | * Issue flush and toggle pending_idx. This makes pending_idx | |
312 | * different from running_idx, which means flush is in flight. | |
313 | */ | |
7c94e1c1 | 314 | fq->flush_pending_idx ^= 1; |
18741986 | 315 | |
7ddab5de | 316 | blk_rq_init(q, flush_rq); |
f70ced09 ML |
317 | |
318 | /* | |
923218f6 ML |
319 | * In case of none scheduler, borrow tag from the first request |
320 | * since they can't be in flight at the same time. And acquire | |
321 | * the tag's ownership for flush req. | |
322 | * | |
323 | * In case of IO scheduler, flush rq need to borrow scheduler tag | |
324 | * just for cheating put/get driver tag. | |
f70ced09 | 325 | */ |
7e992f84 | 326 | flush_rq->mq_ctx = first_rq->mq_ctx; |
ea4f995e | 327 | flush_rq->mq_hctx = first_rq->mq_hctx; |
7e992f84 | 328 | |
c1e2b842 | 329 | if (!q->elevator) { |
7e992f84 | 330 | flush_rq->tag = first_rq->tag; |
c1e2b842 ML |
331 | |
332 | /* | |
333 | * We borrow data request's driver tag, so have to mark | |
334 | * this flush request as INFLIGHT for avoiding double | |
335 | * account of this driver tag | |
336 | */ | |
337 | flush_rq->rq_flags |= RQF_MQ_INFLIGHT; | |
338 | } else | |
7e992f84 | 339 | flush_rq->internal_tag = first_rq->internal_tag; |
320ae51f | 340 | |
70fd7614 | 341 | flush_rq->cmd_flags = REQ_OP_FLUSH | REQ_PREFLUSH; |
84fca1b0 | 342 | flush_rq->cmd_flags |= (flags & REQ_DRV) | (flags & REQ_FAILFAST_MASK); |
e8064021 | 343 | flush_rq->rq_flags |= RQF_FLUSH_SEQ; |
7ddab5de | 344 | flush_rq->end_io = flush_end_io; |
c2da19ed ML |
345 | /* |
346 | * Order WRITE ->end_io and WRITE rq->ref, and its pair is the one | |
347 | * implied in refcount_inc_not_zero() called from | |
348 | * blk_mq_find_and_get_req(), which orders WRITE/READ flush_rq->ref | |
349 | * and READ flush_rq->end_io | |
350 | */ | |
351 | smp_wmb(); | |
0a467d0f | 352 | req_ref_set(flush_rq, 1); |
ae1b1539 | 353 | |
404b8f5a | 354 | blk_flush_queue_rq(flush_rq, false); |
86db1e29 JA |
355 | } |
356 | ||
2a842aca | 357 | static void mq_flush_data_end_io(struct request *rq, blk_status_t error) |
320ae51f JA |
358 | { |
359 | struct request_queue *q = rq->q; | |
ea4f995e | 360 | struct blk_mq_hw_ctx *hctx = rq->mq_hctx; |
e97c293c | 361 | struct blk_mq_ctx *ctx = rq->mq_ctx; |
320ae51f | 362 | unsigned long flags; |
e97c293c | 363 | struct blk_flush_queue *fq = blk_get_flush_queue(q, ctx); |
320ae51f | 364 | |
4e2f62e5 JA |
365 | if (q->elevator) { |
366 | WARN_ON(rq->tag < 0); | |
367 | blk_mq_put_driver_tag(rq); | |
368 | } | |
369 | ||
320ae51f JA |
370 | /* |
371 | * After populating an empty queue, kick it to avoid stall. Read | |
372 | * the comment in flush_end_io(). | |
373 | */ | |
7c94e1c1 | 374 | spin_lock_irqsave(&fq->mq_flush_lock, flags); |
bd166ef1 | 375 | blk_flush_complete_seq(rq, fq, REQ_FSEQ_DATA, error); |
7c94e1c1 | 376 | spin_unlock_irqrestore(&fq->mq_flush_lock, flags); |
bd166ef1 | 377 | |
85bd6e61 | 378 | blk_mq_sched_restart(hctx); |
320ae51f JA |
379 | } |
380 | ||
ae1b1539 | 381 | /** |
3140c3cf | 382 | * blk_insert_flush - insert a new PREFLUSH/FUA request |
ae1b1539 TH |
383 | * @rq: request to insert |
384 | * | |
b710a480 | 385 | * To be called from __elv_add_request() for %ELEVATOR_INSERT_FLUSH insertions. |
320ae51f | 386 | * or __blk_mq_run_hw_queue() to dispatch request. |
ae1b1539 TH |
387 | * @rq is being submitted. Analyze what needs to be done and put it on the |
388 | * right queue. | |
ae1b1539 | 389 | */ |
2b504bd4 | 390 | void blk_insert_flush(struct request *rq) |
86db1e29 | 391 | { |
ae1b1539 | 392 | struct request_queue *q = rq->q; |
c888a8f9 | 393 | unsigned long fflags = q->queue_flags; /* may change, cache */ |
ae1b1539 | 394 | unsigned int policy = blk_flush_policy(fflags, rq); |
e97c293c | 395 | struct blk_flush_queue *fq = blk_get_flush_queue(q, rq->mq_ctx); |
86db1e29 | 396 | |
ae1b1539 TH |
397 | /* |
398 | * @policy now records what operations need to be done. Adjust | |
28a8f0d3 | 399 | * REQ_PREFLUSH and FUA for the driver. |
ae1b1539 | 400 | */ |
28a8f0d3 | 401 | rq->cmd_flags &= ~REQ_PREFLUSH; |
c888a8f9 | 402 | if (!(fflags & (1UL << QUEUE_FLAG_FUA))) |
ae1b1539 TH |
403 | rq->cmd_flags &= ~REQ_FUA; |
404 | ||
ae5b2ec8 JA |
405 | /* |
406 | * REQ_PREFLUSH|REQ_FUA implies REQ_SYNC, so if we clear any | |
407 | * of those flags, we have to set REQ_SYNC to avoid skewing | |
408 | * the request accounting. | |
409 | */ | |
410 | rq->cmd_flags |= REQ_SYNC; | |
411 | ||
4853abaa JM |
412 | /* |
413 | * An empty flush handed down from a stacking driver may | |
414 | * translate into nothing if the underlying device does not | |
415 | * advertise a write-back cache. In this case, simply | |
416 | * complete the request. | |
417 | */ | |
418 | if (!policy) { | |
7e992f84 | 419 | blk_mq_end_request(rq, 0); |
2b504bd4 | 420 | return; |
4853abaa JM |
421 | } |
422 | ||
834f9f61 | 423 | BUG_ON(rq->bio != rq->biotail); /*assumes zero or single bio rq */ |
4853abaa | 424 | |
ae1b1539 TH |
425 | /* |
426 | * If there's data but flush is not necessary, the request can be | |
427 | * processed directly without going through flush machinery. Queue | |
428 | * for normal execution. | |
429 | */ | |
430 | if ((policy & REQ_FSEQ_DATA) && | |
2b504bd4 ML |
431 | !(policy & (REQ_FSEQ_PREFLUSH | REQ_FSEQ_POSTFLUSH))) { |
432 | blk_mq_request_bypass_insert(rq, false, true); | |
433 | return; | |
434 | } | |
cde4c406 | 435 | |
ae1b1539 TH |
436 | /* |
437 | * @rq should go through flush machinery. Mark it part of flush | |
438 | * sequence and submit for further processing. | |
439 | */ | |
440 | memset(&rq->flush, 0, sizeof(rq->flush)); | |
441 | INIT_LIST_HEAD(&rq->flush.list); | |
e8064021 | 442 | rq->rq_flags |= RQF_FLUSH_SEQ; |
4853abaa | 443 | rq->flush.saved_end_io = rq->end_io; /* Usually NULL */ |
320ae51f | 444 | |
7e992f84 | 445 | rq->end_io = mq_flush_data_end_io; |
ae1b1539 | 446 | |
7e992f84 | 447 | spin_lock_irq(&fq->mq_flush_lock); |
0bae352d | 448 | blk_flush_complete_seq(rq, fq, REQ_FSEQ_ACTIONS & ~policy, 0); |
7e992f84 | 449 | spin_unlock_irq(&fq->mq_flush_lock); |
86db1e29 JA |
450 | } |
451 | ||
86db1e29 JA |
452 | /** |
453 | * blkdev_issue_flush - queue a flush | |
454 | * @bdev: blockdev to issue flush for | |
86db1e29 JA |
455 | * |
456 | * Description: | |
9398554f | 457 | * Issue a flush for the block device in question. |
86db1e29 | 458 | */ |
c6bf3f0e | 459 | int blkdev_issue_flush(struct block_device *bdev) |
86db1e29 | 460 | { |
c6bf3f0e | 461 | struct bio bio; |
86db1e29 | 462 | |
49add496 | 463 | bio_init(&bio, bdev, NULL, 0, REQ_OP_WRITE | REQ_PREFLUSH); |
c6bf3f0e | 464 | return submit_bio_wait(&bio); |
86db1e29 | 465 | } |
86db1e29 | 466 | EXPORT_SYMBOL(blkdev_issue_flush); |
320ae51f | 467 | |
754a1572 GJ |
468 | struct blk_flush_queue *blk_alloc_flush_queue(int node, int cmd_size, |
469 | gfp_t flags) | |
320ae51f | 470 | { |
7c94e1c1 ML |
471 | struct blk_flush_queue *fq; |
472 | int rq_sz = sizeof(struct request); | |
1bcb1ead | 473 | |
5b202853 | 474 | fq = kzalloc_node(sizeof(*fq), flags, node); |
7c94e1c1 ML |
475 | if (!fq) |
476 | goto fail; | |
1bcb1ead | 477 | |
7e992f84 | 478 | spin_lock_init(&fq->mq_flush_lock); |
7c94e1c1 | 479 | |
6d247d7f | 480 | rq_sz = round_up(rq_sz + cmd_size, cache_line_size()); |
5b202853 | 481 | fq->flush_rq = kzalloc_node(rq_sz, flags, node); |
7c94e1c1 ML |
482 | if (!fq->flush_rq) |
483 | goto fail_rq; | |
484 | ||
485 | INIT_LIST_HEAD(&fq->flush_queue[0]); | |
486 | INIT_LIST_HEAD(&fq->flush_queue[1]); | |
487 | INIT_LIST_HEAD(&fq->flush_data_in_flight); | |
488 | ||
489 | return fq; | |
490 | ||
491 | fail_rq: | |
492 | kfree(fq); | |
493 | fail: | |
494 | return NULL; | |
320ae51f | 495 | } |
f3552655 | 496 | |
ba483388 | 497 | void blk_free_flush_queue(struct blk_flush_queue *fq) |
f3552655 | 498 | { |
7c94e1c1 ML |
499 | /* bio based request queue hasn't flush queue */ |
500 | if (!fq) | |
501 | return; | |
3c09676c | 502 | |
7c94e1c1 ML |
503 | kfree(fq->flush_rq); |
504 | kfree(fq); | |
505 | } | |
fb01a293 ML |
506 | |
507 | /* | |
508 | * Allow driver to set its own lock class to fq->mq_flush_lock for | |
509 | * avoiding lockdep complaint. | |
510 | * | |
511 | * flush_end_io() may be called recursively from some driver, such as | |
512 | * nvme-loop, so lockdep may complain 'possible recursive locking' because | |
513 | * all 'struct blk_flush_queue' instance share same mq_flush_lock lock class | |
514 | * key. We need to assign different lock class for these driver's | |
515 | * fq->mq_flush_lock for avoiding the lockdep warning. | |
516 | * | |
517 | * Use dynamically allocated lock class key for each 'blk_flush_queue' | |
518 | * instance is over-kill, and more worse it introduces horrible boot delay | |
519 | * issue because synchronize_rcu() is implied in lockdep_unregister_key which | |
520 | * is called for each hctx release. SCSI probing may synchronously create and | |
521 | * destroy lots of MQ request_queues for non-existent devices, and some robot | |
522 | * test kernel always enable lockdep option. It is observed that more than half | |
523 | * an hour is taken during SCSI MQ probe with per-fq lock class. | |
524 | */ | |
525 | void blk_mq_hctx_set_fq_lock_class(struct blk_mq_hw_ctx *hctx, | |
526 | struct lock_class_key *key) | |
527 | { | |
528 | lockdep_set_class(&hctx->fq->mq_flush_lock, key); | |
529 | } | |
530 | EXPORT_SYMBOL_GPL(blk_mq_hctx_set_fq_lock_class); |