Commit | Line | Data |
---|---|---|
86db1e29 | 1 | /* |
4fed947c | 2 | * Functions to sequence FLUSH and FUA writes. |
ae1b1539 TH |
3 | * |
4 | * Copyright (C) 2011 Max Planck Institute for Gravitational Physics | |
5 | * Copyright (C) 2011 Tejun Heo <tj@kernel.org> | |
6 | * | |
7 | * This file is released under the GPLv2. | |
8 | * | |
9 | * REQ_{FLUSH|FUA} requests are decomposed to sequences consisted of three | |
10 | * optional steps - PREFLUSH, DATA and POSTFLUSH - according to the request | |
11 | * properties and hardware capability. | |
12 | * | |
28a8f0d3 MC |
13 | * If a request doesn't have data, only REQ_PREFLUSH makes sense, which |
14 | * indicates a simple flush request. If there is data, REQ_PREFLUSH indicates | |
ae1b1539 TH |
15 | * that the device cache should be flushed before the data is executed, and |
16 | * REQ_FUA means that the data must be on non-volatile media on request | |
17 | * completion. | |
18 | * | |
19 | * If the device doesn't have writeback cache, FLUSH and FUA don't make any | |
20 | * difference. The requests are either completed immediately if there's no | |
21 | * data or executed as normal requests otherwise. | |
22 | * | |
28a8f0d3 | 23 | * If the device has writeback cache and supports FUA, REQ_PREFLUSH is |
ae1b1539 TH |
24 | * translated to PREFLUSH but REQ_FUA is passed down directly with DATA. |
25 | * | |
28a8f0d3 MC |
26 | * If the device has writeback cache and doesn't support FUA, REQ_PREFLUSH |
27 | * is translated to PREFLUSH and REQ_FUA to POSTFLUSH. | |
ae1b1539 TH |
28 | * |
29 | * The actual execution of flush is double buffered. Whenever a request | |
30 | * needs to execute PRE or POSTFLUSH, it queues at | |
7c94e1c1 | 31 | * fq->flush_queue[fq->flush_pending_idx]. Once certain criteria are met, a |
3a5e02ce | 32 | * REQ_OP_FLUSH is issued and the pending_idx is toggled. When the flush |
ae1b1539 TH |
33 | * completes, all the requests which were pending are proceeded to the next |
34 | * step. This allows arbitrary merging of different types of FLUSH/FUA | |
35 | * requests. | |
36 | * | |
37 | * Currently, the following conditions are used to determine when to issue | |
38 | * flush. | |
39 | * | |
40 | * C1. At any given time, only one flush shall be in progress. This makes | |
41 | * double buffering sufficient. | |
42 | * | |
43 | * C2. Flush is deferred if any request is executing DATA of its sequence. | |
44 | * This avoids issuing separate POSTFLUSHes for requests which shared | |
45 | * PREFLUSH. | |
46 | * | |
47 | * C3. The second condition is ignored if there is a request which has | |
48 | * waited longer than FLUSH_PENDING_TIMEOUT. This is to avoid | |
49 | * starvation in the unlikely case where there are continuous stream of | |
50 | * FUA (without FLUSH) requests. | |
51 | * | |
52 | * For devices which support FUA, it isn't clear whether C2 (and thus C3) | |
53 | * is beneficial. | |
54 | * | |
55 | * Note that a sequenced FLUSH/FUA request with DATA is completed twice. | |
56 | * Once while executing DATA and again after the whole sequence is | |
57 | * complete. The first completion updates the contained bio but doesn't | |
58 | * finish it so that the bio submitter is notified only after the whole | |
59 | * sequence is complete. This is implemented by testing REQ_FLUSH_SEQ in | |
60 | * req_bio_endio(). | |
61 | * | |
62 | * The above peculiarity requires that each FLUSH/FUA request has only one | |
63 | * bio attached to it, which is guaranteed as they aren't allowed to be | |
64 | * merged in the usual way. | |
86db1e29 | 65 | */ |
ae1b1539 | 66 | |
86db1e29 JA |
67 | #include <linux/kernel.h> |
68 | #include <linux/module.h> | |
69 | #include <linux/bio.h> | |
70 | #include <linux/blkdev.h> | |
5a0e3ad6 | 71 | #include <linux/gfp.h> |
320ae51f | 72 | #include <linux/blk-mq.h> |
86db1e29 JA |
73 | |
74 | #include "blk.h" | |
320ae51f | 75 | #include "blk-mq.h" |
0048b483 | 76 | #include "blk-mq-tag.h" |
86db1e29 | 77 | |
4fed947c TH |
78 | /* FLUSH/FUA sequences */ |
79 | enum { | |
ae1b1539 TH |
80 | REQ_FSEQ_PREFLUSH = (1 << 0), /* pre-flushing in progress */ |
81 | REQ_FSEQ_DATA = (1 << 1), /* data write in progress */ | |
82 | REQ_FSEQ_POSTFLUSH = (1 << 2), /* post-flushing in progress */ | |
83 | REQ_FSEQ_DONE = (1 << 3), | |
84 | ||
85 | REQ_FSEQ_ACTIONS = REQ_FSEQ_PREFLUSH | REQ_FSEQ_DATA | | |
86 | REQ_FSEQ_POSTFLUSH, | |
87 | ||
88 | /* | |
89 | * If flush has been pending longer than the following timeout, | |
90 | * it's issued even if flush_data requests are still in flight. | |
91 | */ | |
92 | FLUSH_PENDING_TIMEOUT = 5 * HZ, | |
4fed947c TH |
93 | }; |
94 | ||
0bae352d ML |
95 | static bool blk_kick_flush(struct request_queue *q, |
96 | struct blk_flush_queue *fq); | |
28e7d184 | 97 | |
c888a8f9 | 98 | static unsigned int blk_flush_policy(unsigned long fflags, struct request *rq) |
86db1e29 | 99 | { |
ae1b1539 | 100 | unsigned int policy = 0; |
86db1e29 | 101 | |
fa1bf42f JM |
102 | if (blk_rq_sectors(rq)) |
103 | policy |= REQ_FSEQ_DATA; | |
104 | ||
c888a8f9 | 105 | if (fflags & (1UL << QUEUE_FLAG_WC)) { |
28a8f0d3 | 106 | if (rq->cmd_flags & REQ_PREFLUSH) |
ae1b1539 | 107 | policy |= REQ_FSEQ_PREFLUSH; |
c888a8f9 JA |
108 | if (!(fflags & (1UL << QUEUE_FLAG_FUA)) && |
109 | (rq->cmd_flags & REQ_FUA)) | |
ae1b1539 | 110 | policy |= REQ_FSEQ_POSTFLUSH; |
28e7d184 | 111 | } |
ae1b1539 | 112 | return policy; |
86db1e29 JA |
113 | } |
114 | ||
ae1b1539 | 115 | static unsigned int blk_flush_cur_seq(struct request *rq) |
47f70d5a | 116 | { |
ae1b1539 TH |
117 | return 1 << ffz(rq->flush.seq); |
118 | } | |
47f70d5a | 119 | |
ae1b1539 TH |
120 | static void blk_flush_restore_request(struct request *rq) |
121 | { | |
47f70d5a | 122 | /* |
ae1b1539 TH |
123 | * After flush data completion, @rq->bio is %NULL but we need to |
124 | * complete the bio again. @rq->biotail is guaranteed to equal the | |
125 | * original @rq->bio. Restore it. | |
47f70d5a | 126 | */ |
ae1b1539 TH |
127 | rq->bio = rq->biotail; |
128 | ||
129 | /* make @rq a normal request */ | |
130 | rq->cmd_flags &= ~REQ_FLUSH_SEQ; | |
4853abaa | 131 | rq->end_io = rq->flush.saved_end_io; |
320ae51f JA |
132 | } |
133 | ||
10beafc1 | 134 | static bool blk_flush_queue_rq(struct request *rq, bool add_front) |
320ae51f | 135 | { |
18741986 | 136 | if (rq->q->mq_ops) { |
6fca6a61 CH |
137 | struct request_queue *q = rq->q; |
138 | ||
139 | blk_mq_add_to_requeue_list(rq, add_front); | |
140 | blk_mq_kick_requeue_list(q); | |
18741986 CH |
141 | return false; |
142 | } else { | |
10beafc1 MS |
143 | if (add_front) |
144 | list_add(&rq->queuelist, &rq->q->queue_head); | |
145 | else | |
146 | list_add_tail(&rq->queuelist, &rq->q->queue_head); | |
18741986 CH |
147 | return true; |
148 | } | |
47f70d5a TH |
149 | } |
150 | ||
ae1b1539 TH |
151 | /** |
152 | * blk_flush_complete_seq - complete flush sequence | |
153 | * @rq: FLUSH/FUA request being sequenced | |
0bae352d | 154 | * @fq: flush queue |
ae1b1539 TH |
155 | * @seq: sequences to complete (mask of %REQ_FSEQ_*, can be zero) |
156 | * @error: whether an error occurred | |
157 | * | |
158 | * @rq just completed @seq part of its flush sequence, record the | |
159 | * completion and trigger the next step. | |
160 | * | |
161 | * CONTEXT: | |
7c94e1c1 | 162 | * spin_lock_irq(q->queue_lock or fq->mq_flush_lock) |
ae1b1539 TH |
163 | * |
164 | * RETURNS: | |
165 | * %true if requests were added to the dispatch queue, %false otherwise. | |
166 | */ | |
0bae352d ML |
167 | static bool blk_flush_complete_seq(struct request *rq, |
168 | struct blk_flush_queue *fq, | |
169 | unsigned int seq, int error) | |
86db1e29 | 170 | { |
ae1b1539 | 171 | struct request_queue *q = rq->q; |
7c94e1c1 | 172 | struct list_head *pending = &fq->flush_queue[fq->flush_pending_idx]; |
320ae51f | 173 | bool queued = false, kicked; |
ae1b1539 TH |
174 | |
175 | BUG_ON(rq->flush.seq & seq); | |
176 | rq->flush.seq |= seq; | |
177 | ||
178 | if (likely(!error)) | |
179 | seq = blk_flush_cur_seq(rq); | |
180 | else | |
181 | seq = REQ_FSEQ_DONE; | |
182 | ||
183 | switch (seq) { | |
184 | case REQ_FSEQ_PREFLUSH: | |
185 | case REQ_FSEQ_POSTFLUSH: | |
186 | /* queue for flush */ | |
187 | if (list_empty(pending)) | |
7c94e1c1 | 188 | fq->flush_pending_since = jiffies; |
ae1b1539 TH |
189 | list_move_tail(&rq->flush.list, pending); |
190 | break; | |
191 | ||
192 | case REQ_FSEQ_DATA: | |
7c94e1c1 | 193 | list_move_tail(&rq->flush.list, &fq->flush_data_in_flight); |
10beafc1 | 194 | queued = blk_flush_queue_rq(rq, true); |
ae1b1539 TH |
195 | break; |
196 | ||
197 | case REQ_FSEQ_DONE: | |
198 | /* | |
199 | * @rq was previously adjusted by blk_flush_issue() for | |
200 | * flush sequencing and may already have gone through the | |
201 | * flush data request completion path. Restore @rq for | |
202 | * normal completion and end it. | |
203 | */ | |
204 | BUG_ON(!list_empty(&rq->queuelist)); | |
205 | list_del_init(&rq->flush.list); | |
206 | blk_flush_restore_request(rq); | |
320ae51f | 207 | if (q->mq_ops) |
c8a446ad | 208 | blk_mq_end_request(rq, error); |
320ae51f JA |
209 | else |
210 | __blk_end_request_all(rq, error); | |
ae1b1539 TH |
211 | break; |
212 | ||
213 | default: | |
214 | BUG(); | |
215 | } | |
216 | ||
0bae352d | 217 | kicked = blk_kick_flush(q, fq); |
320ae51f | 218 | return kicked | queued; |
86db1e29 JA |
219 | } |
220 | ||
ae1b1539 | 221 | static void flush_end_io(struct request *flush_rq, int error) |
86db1e29 | 222 | { |
ae1b1539 | 223 | struct request_queue *q = flush_rq->q; |
320ae51f | 224 | struct list_head *running; |
ae1b1539 TH |
225 | bool queued = false; |
226 | struct request *rq, *n; | |
320ae51f | 227 | unsigned long flags = 0; |
e97c293c | 228 | struct blk_flush_queue *fq = blk_get_flush_queue(q, flush_rq->mq_ctx); |
ae1b1539 | 229 | |
22302375 | 230 | if (q->mq_ops) { |
0048b483 ML |
231 | struct blk_mq_hw_ctx *hctx; |
232 | ||
233 | /* release the tag's ownership to the req cloned from */ | |
7c94e1c1 | 234 | spin_lock_irqsave(&fq->mq_flush_lock, flags); |
7d7e0f90 | 235 | hctx = blk_mq_map_queue(q, flush_rq->mq_ctx->cpu); |
0048b483 | 236 | blk_mq_tag_set_rq(hctx, flush_rq->tag, fq->orig_rq); |
7ddab5de | 237 | flush_rq->tag = -1; |
22302375 | 238 | } |
18741986 | 239 | |
7c94e1c1 ML |
240 | running = &fq->flush_queue[fq->flush_running_idx]; |
241 | BUG_ON(fq->flush_pending_idx == fq->flush_running_idx); | |
ae1b1539 TH |
242 | |
243 | /* account completion of the flush request */ | |
7c94e1c1 | 244 | fq->flush_running_idx ^= 1; |
320ae51f JA |
245 | |
246 | if (!q->mq_ops) | |
247 | elv_completed_request(q, flush_rq); | |
ae1b1539 TH |
248 | |
249 | /* and push the waiting requests to the next stage */ | |
250 | list_for_each_entry_safe(rq, n, running, flush.list) { | |
251 | unsigned int seq = blk_flush_cur_seq(rq); | |
252 | ||
253 | BUG_ON(seq != REQ_FSEQ_PREFLUSH && seq != REQ_FSEQ_POSTFLUSH); | |
0bae352d | 254 | queued |= blk_flush_complete_seq(rq, fq, seq, error); |
ae1b1539 TH |
255 | } |
256 | ||
47f70d5a | 257 | /* |
3ac0cc45 | 258 | * Kick the queue to avoid stall for two cases: |
259 | * 1. Moving a request silently to empty queue_head may stall the | |
260 | * queue. | |
261 | * 2. When flush request is running in non-queueable queue, the | |
262 | * queue is hold. Restart the queue after flush request is finished | |
263 | * to avoid stall. | |
264 | * This function is called from request completion path and calling | |
265 | * directly into request_fn may confuse the driver. Always use | |
266 | * kblockd. | |
47f70d5a | 267 | */ |
7c94e1c1 | 268 | if (queued || fq->flush_queue_delayed) { |
18741986 CH |
269 | WARN_ON(q->mq_ops); |
270 | blk_run_queue_async(q); | |
320ae51f | 271 | } |
7c94e1c1 | 272 | fq->flush_queue_delayed = 0; |
320ae51f | 273 | if (q->mq_ops) |
7c94e1c1 | 274 | spin_unlock_irqrestore(&fq->mq_flush_lock, flags); |
320ae51f JA |
275 | } |
276 | ||
ae1b1539 TH |
277 | /** |
278 | * blk_kick_flush - consider issuing flush request | |
279 | * @q: request_queue being kicked | |
0bae352d | 280 | * @fq: flush queue |
ae1b1539 TH |
281 | * |
282 | * Flush related states of @q have changed, consider issuing flush request. | |
283 | * Please read the comment at the top of this file for more info. | |
284 | * | |
285 | * CONTEXT: | |
7c94e1c1 | 286 | * spin_lock_irq(q->queue_lock or fq->mq_flush_lock) |
ae1b1539 TH |
287 | * |
288 | * RETURNS: | |
289 | * %true if flush was issued, %false otherwise. | |
290 | */ | |
0bae352d | 291 | static bool blk_kick_flush(struct request_queue *q, struct blk_flush_queue *fq) |
86db1e29 | 292 | { |
7c94e1c1 | 293 | struct list_head *pending = &fq->flush_queue[fq->flush_pending_idx]; |
ae1b1539 TH |
294 | struct request *first_rq = |
295 | list_first_entry(pending, struct request, flush.list); | |
7c94e1c1 | 296 | struct request *flush_rq = fq->flush_rq; |
ae1b1539 TH |
297 | |
298 | /* C1 described at the top of this file */ | |
7c94e1c1 | 299 | if (fq->flush_pending_idx != fq->flush_running_idx || list_empty(pending)) |
ae1b1539 TH |
300 | return false; |
301 | ||
302 | /* C2 and C3 */ | |
7c94e1c1 | 303 | if (!list_empty(&fq->flush_data_in_flight) && |
ae1b1539 | 304 | time_before(jiffies, |
7c94e1c1 | 305 | fq->flush_pending_since + FLUSH_PENDING_TIMEOUT)) |
ae1b1539 TH |
306 | return false; |
307 | ||
308 | /* | |
309 | * Issue flush and toggle pending_idx. This makes pending_idx | |
310 | * different from running_idx, which means flush is in flight. | |
311 | */ | |
7c94e1c1 | 312 | fq->flush_pending_idx ^= 1; |
18741986 | 313 | |
7ddab5de | 314 | blk_rq_init(q, flush_rq); |
f70ced09 ML |
315 | |
316 | /* | |
317 | * Borrow tag from the first request since they can't | |
0048b483 ML |
318 | * be in flight at the same time. And acquire the tag's |
319 | * ownership for flush req. | |
f70ced09 ML |
320 | */ |
321 | if (q->mq_ops) { | |
0048b483 ML |
322 | struct blk_mq_hw_ctx *hctx; |
323 | ||
f70ced09 ML |
324 | flush_rq->mq_ctx = first_rq->mq_ctx; |
325 | flush_rq->tag = first_rq->tag; | |
0048b483 ML |
326 | fq->orig_rq = first_rq; |
327 | ||
7d7e0f90 | 328 | hctx = blk_mq_map_queue(q, first_rq->mq_ctx->cpu); |
0048b483 | 329 | blk_mq_tag_set_rq(hctx, first_rq->tag, flush_rq); |
f70ced09 | 330 | } |
320ae51f | 331 | |
7ddab5de | 332 | flush_rq->cmd_type = REQ_TYPE_FS; |
3a5e02ce | 333 | req_set_op_attrs(flush_rq, REQ_OP_FLUSH, WRITE_FLUSH | REQ_FLUSH_SEQ); |
7ddab5de ML |
334 | flush_rq->rq_disk = first_rq->rq_disk; |
335 | flush_rq->end_io = flush_end_io; | |
ae1b1539 | 336 | |
7ddab5de | 337 | return blk_flush_queue_rq(flush_rq, false); |
86db1e29 JA |
338 | } |
339 | ||
ae1b1539 | 340 | static void flush_data_end_io(struct request *rq, int error) |
86db1e29 | 341 | { |
ae1b1539 | 342 | struct request_queue *q = rq->q; |
e97c293c | 343 | struct blk_flush_queue *fq = blk_get_flush_queue(q, NULL); |
ae1b1539 | 344 | |
e83a46bb TH |
345 | /* |
346 | * After populating an empty queue, kick it to avoid stall. Read | |
347 | * the comment in flush_end_io(). | |
348 | */ | |
0bae352d | 349 | if (blk_flush_complete_seq(rq, fq, REQ_FSEQ_DATA, error)) |
24ecfbe2 | 350 | blk_run_queue_async(q); |
86db1e29 JA |
351 | } |
352 | ||
320ae51f JA |
353 | static void mq_flush_data_end_io(struct request *rq, int error) |
354 | { | |
355 | struct request_queue *q = rq->q; | |
356 | struct blk_mq_hw_ctx *hctx; | |
e97c293c | 357 | struct blk_mq_ctx *ctx = rq->mq_ctx; |
320ae51f | 358 | unsigned long flags; |
e97c293c | 359 | struct blk_flush_queue *fq = blk_get_flush_queue(q, ctx); |
320ae51f | 360 | |
7d7e0f90 | 361 | hctx = blk_mq_map_queue(q, ctx->cpu); |
320ae51f JA |
362 | |
363 | /* | |
364 | * After populating an empty queue, kick it to avoid stall. Read | |
365 | * the comment in flush_end_io(). | |
366 | */ | |
7c94e1c1 | 367 | spin_lock_irqsave(&fq->mq_flush_lock, flags); |
0bae352d | 368 | if (blk_flush_complete_seq(rq, fq, REQ_FSEQ_DATA, error)) |
320ae51f | 369 | blk_mq_run_hw_queue(hctx, true); |
7c94e1c1 | 370 | spin_unlock_irqrestore(&fq->mq_flush_lock, flags); |
320ae51f JA |
371 | } |
372 | ||
ae1b1539 TH |
373 | /** |
374 | * blk_insert_flush - insert a new FLUSH/FUA request | |
375 | * @rq: request to insert | |
376 | * | |
b710a480 | 377 | * To be called from __elv_add_request() for %ELEVATOR_INSERT_FLUSH insertions. |
320ae51f | 378 | * or __blk_mq_run_hw_queue() to dispatch request. |
ae1b1539 TH |
379 | * @rq is being submitted. Analyze what needs to be done and put it on the |
380 | * right queue. | |
381 | * | |
382 | * CONTEXT: | |
320ae51f | 383 | * spin_lock_irq(q->queue_lock) in !mq case |
ae1b1539 TH |
384 | */ |
385 | void blk_insert_flush(struct request *rq) | |
86db1e29 | 386 | { |
ae1b1539 | 387 | struct request_queue *q = rq->q; |
c888a8f9 | 388 | unsigned long fflags = q->queue_flags; /* may change, cache */ |
ae1b1539 | 389 | unsigned int policy = blk_flush_policy(fflags, rq); |
e97c293c | 390 | struct blk_flush_queue *fq = blk_get_flush_queue(q, rq->mq_ctx); |
86db1e29 | 391 | |
ae1b1539 TH |
392 | /* |
393 | * @policy now records what operations need to be done. Adjust | |
28a8f0d3 | 394 | * REQ_PREFLUSH and FUA for the driver. |
ae1b1539 | 395 | */ |
28a8f0d3 | 396 | rq->cmd_flags &= ~REQ_PREFLUSH; |
c888a8f9 | 397 | if (!(fflags & (1UL << QUEUE_FLAG_FUA))) |
ae1b1539 TH |
398 | rq->cmd_flags &= ~REQ_FUA; |
399 | ||
4853abaa JM |
400 | /* |
401 | * An empty flush handed down from a stacking driver may | |
402 | * translate into nothing if the underlying device does not | |
403 | * advertise a write-back cache. In this case, simply | |
404 | * complete the request. | |
405 | */ | |
406 | if (!policy) { | |
320ae51f | 407 | if (q->mq_ops) |
c8a446ad | 408 | blk_mq_end_request(rq, 0); |
320ae51f JA |
409 | else |
410 | __blk_end_bidi_request(rq, 0, 0, 0); | |
4853abaa JM |
411 | return; |
412 | } | |
413 | ||
834f9f61 | 414 | BUG_ON(rq->bio != rq->biotail); /*assumes zero or single bio rq */ |
4853abaa | 415 | |
ae1b1539 TH |
416 | /* |
417 | * If there's data but flush is not necessary, the request can be | |
418 | * processed directly without going through flush machinery. Queue | |
419 | * for normal execution. | |
420 | */ | |
421 | if ((policy & REQ_FSEQ_DATA) && | |
422 | !(policy & (REQ_FSEQ_PREFLUSH | REQ_FSEQ_POSTFLUSH))) { | |
320ae51f | 423 | if (q->mq_ops) { |
feb71dae | 424 | blk_mq_insert_request(rq, false, false, true); |
320ae51f | 425 | } else |
dcd8376c | 426 | list_add_tail(&rq->queuelist, &q->queue_head); |
ae1b1539 | 427 | return; |
28e7d184 | 428 | } |
cde4c406 | 429 | |
ae1b1539 TH |
430 | /* |
431 | * @rq should go through flush machinery. Mark it part of flush | |
432 | * sequence and submit for further processing. | |
433 | */ | |
434 | memset(&rq->flush, 0, sizeof(rq->flush)); | |
435 | INIT_LIST_HEAD(&rq->flush.list); | |
414b4ff5 | 436 | rq->cmd_flags |= REQ_FLUSH_SEQ; |
4853abaa | 437 | rq->flush.saved_end_io = rq->end_io; /* Usually NULL */ |
320ae51f JA |
438 | if (q->mq_ops) { |
439 | rq->end_io = mq_flush_data_end_io; | |
440 | ||
7c94e1c1 | 441 | spin_lock_irq(&fq->mq_flush_lock); |
0bae352d | 442 | blk_flush_complete_seq(rq, fq, REQ_FSEQ_ACTIONS & ~policy, 0); |
7c94e1c1 | 443 | spin_unlock_irq(&fq->mq_flush_lock); |
320ae51f JA |
444 | return; |
445 | } | |
ae1b1539 TH |
446 | rq->end_io = flush_data_end_io; |
447 | ||
0bae352d | 448 | blk_flush_complete_seq(rq, fq, REQ_FSEQ_ACTIONS & ~policy, 0); |
86db1e29 JA |
449 | } |
450 | ||
86db1e29 JA |
451 | /** |
452 | * blkdev_issue_flush - queue a flush | |
453 | * @bdev: blockdev to issue flush for | |
fbd9b09a | 454 | * @gfp_mask: memory allocation flags (for bio_alloc) |
86db1e29 JA |
455 | * @error_sector: error sector |
456 | * | |
457 | * Description: | |
458 | * Issue a flush for the block device in question. Caller can supply | |
459 | * room for storing the error offset in case of a flush error, if they | |
f17e232e DM |
460 | * wish to. If WAIT flag is not passed then caller may check only what |
461 | * request was pushed in some internal queue for later handling. | |
86db1e29 | 462 | */ |
fbd9b09a | 463 | int blkdev_issue_flush(struct block_device *bdev, gfp_t gfp_mask, |
dd3932ed | 464 | sector_t *error_sector) |
86db1e29 | 465 | { |
86db1e29 JA |
466 | struct request_queue *q; |
467 | struct bio *bio; | |
fbd9b09a | 468 | int ret = 0; |
86db1e29 JA |
469 | |
470 | if (bdev->bd_disk == NULL) | |
471 | return -ENXIO; | |
472 | ||
473 | q = bdev_get_queue(bdev); | |
474 | if (!q) | |
475 | return -ENXIO; | |
476 | ||
f10d9f61 DC |
477 | /* |
478 | * some block devices may not have their queue correctly set up here | |
479 | * (e.g. loop device without a backing file) and so issuing a flush | |
480 | * here will panic. Ensure there is a request function before issuing | |
d391a2dd | 481 | * the flush. |
f10d9f61 DC |
482 | */ |
483 | if (!q->make_request_fn) | |
484 | return -ENXIO; | |
485 | ||
fbd9b09a | 486 | bio = bio_alloc(gfp_mask, 0); |
86db1e29 | 487 | bio->bi_bdev = bdev; |
95fe6c1a | 488 | bio_set_op_attrs(bio, REQ_OP_WRITE, WRITE_FLUSH); |
86db1e29 | 489 | |
4e49ea4a | 490 | ret = submit_bio_wait(bio); |
dd3932ed CH |
491 | |
492 | /* | |
493 | * The driver must store the error location in ->bi_sector, if | |
494 | * it supports it. For non-stacked drivers, this should be | |
495 | * copied from blk_rq_pos(rq). | |
496 | */ | |
497 | if (error_sector) | |
4f024f37 | 498 | *error_sector = bio->bi_iter.bi_sector; |
86db1e29 | 499 | |
86db1e29 JA |
500 | bio_put(bio); |
501 | return ret; | |
502 | } | |
86db1e29 | 503 | EXPORT_SYMBOL(blkdev_issue_flush); |
320ae51f | 504 | |
f70ced09 ML |
505 | struct blk_flush_queue *blk_alloc_flush_queue(struct request_queue *q, |
506 | int node, int cmd_size) | |
320ae51f | 507 | { |
7c94e1c1 ML |
508 | struct blk_flush_queue *fq; |
509 | int rq_sz = sizeof(struct request); | |
1bcb1ead | 510 | |
f70ced09 | 511 | fq = kzalloc_node(sizeof(*fq), GFP_KERNEL, node); |
7c94e1c1 ML |
512 | if (!fq) |
513 | goto fail; | |
1bcb1ead | 514 | |
7c94e1c1 ML |
515 | if (q->mq_ops) { |
516 | spin_lock_init(&fq->mq_flush_lock); | |
f70ced09 | 517 | rq_sz = round_up(rq_sz + cmd_size, cache_line_size()); |
7c94e1c1 ML |
518 | } |
519 | ||
f70ced09 | 520 | fq->flush_rq = kzalloc_node(rq_sz, GFP_KERNEL, node); |
7c94e1c1 ML |
521 | if (!fq->flush_rq) |
522 | goto fail_rq; | |
523 | ||
524 | INIT_LIST_HEAD(&fq->flush_queue[0]); | |
525 | INIT_LIST_HEAD(&fq->flush_queue[1]); | |
526 | INIT_LIST_HEAD(&fq->flush_data_in_flight); | |
527 | ||
528 | return fq; | |
529 | ||
530 | fail_rq: | |
531 | kfree(fq); | |
532 | fail: | |
533 | return NULL; | |
320ae51f | 534 | } |
f3552655 | 535 | |
ba483388 | 536 | void blk_free_flush_queue(struct blk_flush_queue *fq) |
f3552655 | 537 | { |
7c94e1c1 ML |
538 | /* bio based request queue hasn't flush queue */ |
539 | if (!fq) | |
540 | return; | |
3c09676c | 541 | |
7c94e1c1 ML |
542 | kfree(fq->flush_rq); |
543 | kfree(fq); | |
544 | } |