block: don't check ->rq_disk in merges
[linux-2.6-block.git] / block / blk-flush.c
CommitLineData
8c16567d 1// SPDX-License-Identifier: GPL-2.0
86db1e29 2/*
3140c3cf 3 * Functions to sequence PREFLUSH and FUA writes.
ae1b1539
TH
4 *
5 * Copyright (C) 2011 Max Planck Institute for Gravitational Physics
6 * Copyright (C) 2011 Tejun Heo <tj@kernel.org>
7 *
3140c3cf 8 * REQ_{PREFLUSH|FUA} requests are decomposed to sequences consisted of three
ae1b1539
TH
9 * optional steps - PREFLUSH, DATA and POSTFLUSH - according to the request
10 * properties and hardware capability.
11 *
28a8f0d3
MC
12 * If a request doesn't have data, only REQ_PREFLUSH makes sense, which
13 * indicates a simple flush request. If there is data, REQ_PREFLUSH indicates
ae1b1539
TH
14 * that the device cache should be flushed before the data is executed, and
15 * REQ_FUA means that the data must be on non-volatile media on request
16 * completion.
17 *
3140c3cf
OS
18 * If the device doesn't have writeback cache, PREFLUSH and FUA don't make any
19 * difference. The requests are either completed immediately if there's no data
20 * or executed as normal requests otherwise.
ae1b1539 21 *
28a8f0d3 22 * If the device has writeback cache and supports FUA, REQ_PREFLUSH is
ae1b1539
TH
23 * translated to PREFLUSH but REQ_FUA is passed down directly with DATA.
24 *
28a8f0d3
MC
25 * If the device has writeback cache and doesn't support FUA, REQ_PREFLUSH
26 * is translated to PREFLUSH and REQ_FUA to POSTFLUSH.
ae1b1539
TH
27 *
28 * The actual execution of flush is double buffered. Whenever a request
29 * needs to execute PRE or POSTFLUSH, it queues at
7c94e1c1 30 * fq->flush_queue[fq->flush_pending_idx]. Once certain criteria are met, a
3a5e02ce 31 * REQ_OP_FLUSH is issued and the pending_idx is toggled. When the flush
ae1b1539 32 * completes, all the requests which were pending are proceeded to the next
3140c3cf 33 * step. This allows arbitrary merging of different types of PREFLUSH/FUA
ae1b1539
TH
34 * requests.
35 *
36 * Currently, the following conditions are used to determine when to issue
37 * flush.
38 *
39 * C1. At any given time, only one flush shall be in progress. This makes
40 * double buffering sufficient.
41 *
42 * C2. Flush is deferred if any request is executing DATA of its sequence.
43 * This avoids issuing separate POSTFLUSHes for requests which shared
44 * PREFLUSH.
45 *
46 * C3. The second condition is ignored if there is a request which has
47 * waited longer than FLUSH_PENDING_TIMEOUT. This is to avoid
48 * starvation in the unlikely case where there are continuous stream of
3140c3cf 49 * FUA (without PREFLUSH) requests.
ae1b1539
TH
50 *
51 * For devices which support FUA, it isn't clear whether C2 (and thus C3)
52 * is beneficial.
53 *
3140c3cf 54 * Note that a sequenced PREFLUSH/FUA request with DATA is completed twice.
ae1b1539
TH
55 * Once while executing DATA and again after the whole sequence is
56 * complete. The first completion updates the contained bio but doesn't
57 * finish it so that the bio submitter is notified only after the whole
e8064021 58 * sequence is complete. This is implemented by testing RQF_FLUSH_SEQ in
ae1b1539
TH
59 * req_bio_endio().
60 *
3140c3cf 61 * The above peculiarity requires that each PREFLUSH/FUA request has only one
ae1b1539
TH
62 * bio attached to it, which is guaranteed as they aren't allowed to be
63 * merged in the usual way.
86db1e29 64 */
ae1b1539 65
86db1e29
JA
66#include <linux/kernel.h>
67#include <linux/module.h>
68#include <linux/bio.h>
69#include <linux/blkdev.h>
5a0e3ad6 70#include <linux/gfp.h>
320ae51f 71#include <linux/blk-mq.h>
82d981d4 72#include <linux/part_stat.h>
86db1e29
JA
73
74#include "blk.h"
320ae51f 75#include "blk-mq.h"
0048b483 76#include "blk-mq-tag.h"
bd166ef1 77#include "blk-mq-sched.h"
86db1e29 78
3140c3cf 79/* PREFLUSH/FUA sequences */
4fed947c 80enum {
ae1b1539
TH
81 REQ_FSEQ_PREFLUSH = (1 << 0), /* pre-flushing in progress */
82 REQ_FSEQ_DATA = (1 << 1), /* data write in progress */
83 REQ_FSEQ_POSTFLUSH = (1 << 2), /* post-flushing in progress */
84 REQ_FSEQ_DONE = (1 << 3),
85
86 REQ_FSEQ_ACTIONS = REQ_FSEQ_PREFLUSH | REQ_FSEQ_DATA |
87 REQ_FSEQ_POSTFLUSH,
88
89 /*
90 * If flush has been pending longer than the following timeout,
91 * it's issued even if flush_data requests are still in flight.
92 */
93 FLUSH_PENDING_TIMEOUT = 5 * HZ,
4fed947c
TH
94};
95
404b8f5a 96static void blk_kick_flush(struct request_queue *q,
84fca1b0 97 struct blk_flush_queue *fq, unsigned int flags);
28e7d184 98
0281ed3c
CH
99static inline struct blk_flush_queue *
100blk_get_flush_queue(struct request_queue *q, struct blk_mq_ctx *ctx)
101{
102 return blk_mq_map_queue(q, REQ_OP_FLUSH, ctx)->fq;
103}
104
c888a8f9 105static unsigned int blk_flush_policy(unsigned long fflags, struct request *rq)
86db1e29 106{
ae1b1539 107 unsigned int policy = 0;
86db1e29 108
fa1bf42f
JM
109 if (blk_rq_sectors(rq))
110 policy |= REQ_FSEQ_DATA;
111
c888a8f9 112 if (fflags & (1UL << QUEUE_FLAG_WC)) {
28a8f0d3 113 if (rq->cmd_flags & REQ_PREFLUSH)
ae1b1539 114 policy |= REQ_FSEQ_PREFLUSH;
c888a8f9
JA
115 if (!(fflags & (1UL << QUEUE_FLAG_FUA)) &&
116 (rq->cmd_flags & REQ_FUA))
ae1b1539 117 policy |= REQ_FSEQ_POSTFLUSH;
28e7d184 118 }
ae1b1539 119 return policy;
86db1e29
JA
120}
121
ae1b1539 122static unsigned int blk_flush_cur_seq(struct request *rq)
47f70d5a 123{
ae1b1539
TH
124 return 1 << ffz(rq->flush.seq);
125}
47f70d5a 126
ae1b1539
TH
127static void blk_flush_restore_request(struct request *rq)
128{
47f70d5a 129 /*
ae1b1539
TH
130 * After flush data completion, @rq->bio is %NULL but we need to
131 * complete the bio again. @rq->biotail is guaranteed to equal the
132 * original @rq->bio. Restore it.
47f70d5a 133 */
ae1b1539
TH
134 rq->bio = rq->biotail;
135
136 /* make @rq a normal request */
e8064021 137 rq->rq_flags &= ~RQF_FLUSH_SEQ;
4853abaa 138 rq->end_io = rq->flush.saved_end_io;
320ae51f
JA
139}
140
404b8f5a 141static void blk_flush_queue_rq(struct request *rq, bool add_front)
320ae51f 142{
7e992f84 143 blk_mq_add_to_requeue_list(rq, add_front, true);
47f70d5a
TH
144}
145
b6866318
KK
146static void blk_account_io_flush(struct request *rq)
147{
8446fe92 148 struct block_device *part = rq->rq_disk->part0;
b6866318
KK
149
150 part_stat_lock();
151 part_stat_inc(part, ios[STAT_FLUSH]);
152 part_stat_add(part, nsecs[STAT_FLUSH],
153 ktime_get_ns() - rq->start_time_ns);
154 part_stat_unlock();
155}
156
ae1b1539
TH
157/**
158 * blk_flush_complete_seq - complete flush sequence
3140c3cf 159 * @rq: PREFLUSH/FUA request being sequenced
0bae352d 160 * @fq: flush queue
ae1b1539
TH
161 * @seq: sequences to complete (mask of %REQ_FSEQ_*, can be zero)
162 * @error: whether an error occurred
163 *
164 * @rq just completed @seq part of its flush sequence, record the
165 * completion and trigger the next step.
166 *
167 * CONTEXT:
9809b4ee 168 * spin_lock_irq(fq->mq_flush_lock)
ae1b1539 169 */
404b8f5a 170static void blk_flush_complete_seq(struct request *rq,
0bae352d 171 struct blk_flush_queue *fq,
2a842aca 172 unsigned int seq, blk_status_t error)
86db1e29 173{
ae1b1539 174 struct request_queue *q = rq->q;
7c94e1c1 175 struct list_head *pending = &fq->flush_queue[fq->flush_pending_idx];
190b02ed 176 unsigned int cmd_flags;
ae1b1539
TH
177
178 BUG_ON(rq->flush.seq & seq);
179 rq->flush.seq |= seq;
190b02ed 180 cmd_flags = rq->cmd_flags;
ae1b1539
TH
181
182 if (likely(!error))
183 seq = blk_flush_cur_seq(rq);
184 else
185 seq = REQ_FSEQ_DONE;
186
187 switch (seq) {
188 case REQ_FSEQ_PREFLUSH:
189 case REQ_FSEQ_POSTFLUSH:
190 /* queue for flush */
191 if (list_empty(pending))
7c94e1c1 192 fq->flush_pending_since = jiffies;
ae1b1539
TH
193 list_move_tail(&rq->flush.list, pending);
194 break;
195
196 case REQ_FSEQ_DATA:
7c94e1c1 197 list_move_tail(&rq->flush.list, &fq->flush_data_in_flight);
404b8f5a 198 blk_flush_queue_rq(rq, true);
ae1b1539
TH
199 break;
200
201 case REQ_FSEQ_DONE:
202 /*
b6866318 203 * @rq was previously adjusted by blk_insert_flush() for
ae1b1539
TH
204 * flush sequencing and may already have gone through the
205 * flush data request completion path. Restore @rq for
206 * normal completion and end it.
207 */
208 BUG_ON(!list_empty(&rq->queuelist));
209 list_del_init(&rq->flush.list);
210 blk_flush_restore_request(rq);
7e992f84 211 blk_mq_end_request(rq, error);
ae1b1539
TH
212 break;
213
214 default:
215 BUG();
216 }
217
404b8f5a 218 blk_kick_flush(q, fq, cmd_flags);
86db1e29
JA
219}
220
2a842aca 221static void flush_end_io(struct request *flush_rq, blk_status_t error)
86db1e29 222{
ae1b1539 223 struct request_queue *q = flush_rq->q;
320ae51f 224 struct list_head *running;
ae1b1539 225 struct request *rq, *n;
320ae51f 226 unsigned long flags = 0;
e97c293c 227 struct blk_flush_queue *fq = blk_get_flush_queue(q, flush_rq->mq_ctx);
ae1b1539 228
7e992f84
JA
229 /* release the tag's ownership to the req cloned from */
230 spin_lock_irqsave(&fq->mq_flush_lock, flags);
8d699663
YY
231
232 if (!refcount_dec_and_test(&flush_rq->ref)) {
233 fq->rq_status = error;
234 spin_unlock_irqrestore(&fq->mq_flush_lock, flags);
235 return;
236 }
237
84da7acc 238 blk_account_io_flush(flush_rq);
9f16a667
ML
239 /*
240 * Flush request has to be marked as IDLE when it is really ended
241 * because its .end_io() is called from timeout code path too for
242 * avoiding use-after-free.
243 */
244 WRITE_ONCE(flush_rq->state, MQ_RQ_IDLE);
8d699663
YY
245 if (fq->rq_status != BLK_STS_OK)
246 error = fq->rq_status;
247
4e2f62e5 248 if (!q->elevator) {
568f2700 249 flush_rq->tag = BLK_MQ_NO_TAG;
4e2f62e5
JA
250 } else {
251 blk_mq_put_driver_tag(flush_rq);
568f2700 252 flush_rq->internal_tag = BLK_MQ_NO_TAG;
4e2f62e5 253 }
18741986 254
7c94e1c1
ML
255 running = &fq->flush_queue[fq->flush_running_idx];
256 BUG_ON(fq->flush_pending_idx == fq->flush_running_idx);
ae1b1539
TH
257
258 /* account completion of the flush request */
7c94e1c1 259 fq->flush_running_idx ^= 1;
320ae51f 260
ae1b1539
TH
261 /* and push the waiting requests to the next stage */
262 list_for_each_entry_safe(rq, n, running, flush.list) {
263 unsigned int seq = blk_flush_cur_seq(rq);
264
265 BUG_ON(seq != REQ_FSEQ_PREFLUSH && seq != REQ_FSEQ_POSTFLUSH);
404b8f5a 266 blk_flush_complete_seq(rq, fq, seq, error);
ae1b1539
TH
267 }
268
7e992f84 269 spin_unlock_irqrestore(&fq->mq_flush_lock, flags);
320ae51f
JA
270}
271
a9ed27a7
ML
272bool is_flush_rq(struct request *rq)
273{
274 return rq->end_io == flush_end_io;
275}
276
ae1b1539
TH
277/**
278 * blk_kick_flush - consider issuing flush request
279 * @q: request_queue being kicked
0bae352d 280 * @fq: flush queue
84fca1b0 281 * @flags: cmd_flags of the original request
ae1b1539
TH
282 *
283 * Flush related states of @q have changed, consider issuing flush request.
284 * Please read the comment at the top of this file for more info.
285 *
286 * CONTEXT:
9809b4ee 287 * spin_lock_irq(fq->mq_flush_lock)
ae1b1539 288 *
ae1b1539 289 */
404b8f5a 290static void blk_kick_flush(struct request_queue *q, struct blk_flush_queue *fq,
84fca1b0 291 unsigned int flags)
86db1e29 292{
7c94e1c1 293 struct list_head *pending = &fq->flush_queue[fq->flush_pending_idx];
ae1b1539
TH
294 struct request *first_rq =
295 list_first_entry(pending, struct request, flush.list);
7c94e1c1 296 struct request *flush_rq = fq->flush_rq;
ae1b1539
TH
297
298 /* C1 described at the top of this file */
7c94e1c1 299 if (fq->flush_pending_idx != fq->flush_running_idx || list_empty(pending))
404b8f5a 300 return;
ae1b1539 301
b5718d6c
YY
302 /* C2 and C3 */
303 if (!list_empty(&fq->flush_data_in_flight) &&
ae1b1539 304 time_before(jiffies,
7c94e1c1 305 fq->flush_pending_since + FLUSH_PENDING_TIMEOUT))
404b8f5a 306 return;
ae1b1539
TH
307
308 /*
309 * Issue flush and toggle pending_idx. This makes pending_idx
310 * different from running_idx, which means flush is in flight.
311 */
7c94e1c1 312 fq->flush_pending_idx ^= 1;
18741986 313
7ddab5de 314 blk_rq_init(q, flush_rq);
f70ced09
ML
315
316 /*
923218f6
ML
317 * In case of none scheduler, borrow tag from the first request
318 * since they can't be in flight at the same time. And acquire
319 * the tag's ownership for flush req.
320 *
321 * In case of IO scheduler, flush rq need to borrow scheduler tag
322 * just for cheating put/get driver tag.
f70ced09 323 */
7e992f84 324 flush_rq->mq_ctx = first_rq->mq_ctx;
ea4f995e 325 flush_rq->mq_hctx = first_rq->mq_hctx;
7e992f84 326
c1e2b842 327 if (!q->elevator) {
7e992f84 328 flush_rq->tag = first_rq->tag;
c1e2b842
ML
329
330 /*
331 * We borrow data request's driver tag, so have to mark
332 * this flush request as INFLIGHT for avoiding double
333 * account of this driver tag
334 */
335 flush_rq->rq_flags |= RQF_MQ_INFLIGHT;
336 } else
7e992f84 337 flush_rq->internal_tag = first_rq->internal_tag;
320ae51f 338
70fd7614 339 flush_rq->cmd_flags = REQ_OP_FLUSH | REQ_PREFLUSH;
84fca1b0 340 flush_rq->cmd_flags |= (flags & REQ_DRV) | (flags & REQ_FAILFAST_MASK);
e8064021 341 flush_rq->rq_flags |= RQF_FLUSH_SEQ;
7ddab5de
ML
342 flush_rq->rq_disk = first_rq->rq_disk;
343 flush_rq->end_io = flush_end_io;
c2da19ed
ML
344 /*
345 * Order WRITE ->end_io and WRITE rq->ref, and its pair is the one
346 * implied in refcount_inc_not_zero() called from
347 * blk_mq_find_and_get_req(), which orders WRITE/READ flush_rq->ref
348 * and READ flush_rq->end_io
349 */
350 smp_wmb();
351 refcount_set(&flush_rq->ref, 1);
ae1b1539 352
404b8f5a 353 blk_flush_queue_rq(flush_rq, false);
86db1e29
JA
354}
355
2a842aca 356static void mq_flush_data_end_io(struct request *rq, blk_status_t error)
320ae51f
JA
357{
358 struct request_queue *q = rq->q;
ea4f995e 359 struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
e97c293c 360 struct blk_mq_ctx *ctx = rq->mq_ctx;
320ae51f 361 unsigned long flags;
e97c293c 362 struct blk_flush_queue *fq = blk_get_flush_queue(q, ctx);
320ae51f 363
4e2f62e5
JA
364 if (q->elevator) {
365 WARN_ON(rq->tag < 0);
366 blk_mq_put_driver_tag(rq);
367 }
368
320ae51f
JA
369 /*
370 * After populating an empty queue, kick it to avoid stall. Read
371 * the comment in flush_end_io().
372 */
7c94e1c1 373 spin_lock_irqsave(&fq->mq_flush_lock, flags);
bd166ef1 374 blk_flush_complete_seq(rq, fq, REQ_FSEQ_DATA, error);
7c94e1c1 375 spin_unlock_irqrestore(&fq->mq_flush_lock, flags);
bd166ef1 376
85bd6e61 377 blk_mq_sched_restart(hctx);
320ae51f
JA
378}
379
ae1b1539 380/**
3140c3cf 381 * blk_insert_flush - insert a new PREFLUSH/FUA request
ae1b1539
TH
382 * @rq: request to insert
383 *
b710a480 384 * To be called from __elv_add_request() for %ELEVATOR_INSERT_FLUSH insertions.
320ae51f 385 * or __blk_mq_run_hw_queue() to dispatch request.
ae1b1539
TH
386 * @rq is being submitted. Analyze what needs to be done and put it on the
387 * right queue.
ae1b1539 388 */
2b504bd4 389void blk_insert_flush(struct request *rq)
86db1e29 390{
ae1b1539 391 struct request_queue *q = rq->q;
c888a8f9 392 unsigned long fflags = q->queue_flags; /* may change, cache */
ae1b1539 393 unsigned int policy = blk_flush_policy(fflags, rq);
e97c293c 394 struct blk_flush_queue *fq = blk_get_flush_queue(q, rq->mq_ctx);
86db1e29 395
ae1b1539
TH
396 /*
397 * @policy now records what operations need to be done. Adjust
28a8f0d3 398 * REQ_PREFLUSH and FUA for the driver.
ae1b1539 399 */
28a8f0d3 400 rq->cmd_flags &= ~REQ_PREFLUSH;
c888a8f9 401 if (!(fflags & (1UL << QUEUE_FLAG_FUA)))
ae1b1539
TH
402 rq->cmd_flags &= ~REQ_FUA;
403
ae5b2ec8
JA
404 /*
405 * REQ_PREFLUSH|REQ_FUA implies REQ_SYNC, so if we clear any
406 * of those flags, we have to set REQ_SYNC to avoid skewing
407 * the request accounting.
408 */
409 rq->cmd_flags |= REQ_SYNC;
410
4853abaa
JM
411 /*
412 * An empty flush handed down from a stacking driver may
413 * translate into nothing if the underlying device does not
414 * advertise a write-back cache. In this case, simply
415 * complete the request.
416 */
417 if (!policy) {
7e992f84 418 blk_mq_end_request(rq, 0);
2b504bd4 419 return;
4853abaa
JM
420 }
421
834f9f61 422 BUG_ON(rq->bio != rq->biotail); /*assumes zero or single bio rq */
4853abaa 423
ae1b1539
TH
424 /*
425 * If there's data but flush is not necessary, the request can be
426 * processed directly without going through flush machinery. Queue
427 * for normal execution.
428 */
429 if ((policy & REQ_FSEQ_DATA) &&
2b504bd4
ML
430 !(policy & (REQ_FSEQ_PREFLUSH | REQ_FSEQ_POSTFLUSH))) {
431 blk_mq_request_bypass_insert(rq, false, true);
432 return;
433 }
cde4c406 434
ae1b1539
TH
435 /*
436 * @rq should go through flush machinery. Mark it part of flush
437 * sequence and submit for further processing.
438 */
439 memset(&rq->flush, 0, sizeof(rq->flush));
440 INIT_LIST_HEAD(&rq->flush.list);
e8064021 441 rq->rq_flags |= RQF_FLUSH_SEQ;
4853abaa 442 rq->flush.saved_end_io = rq->end_io; /* Usually NULL */
320ae51f 443
7e992f84 444 rq->end_io = mq_flush_data_end_io;
ae1b1539 445
7e992f84 446 spin_lock_irq(&fq->mq_flush_lock);
0bae352d 447 blk_flush_complete_seq(rq, fq, REQ_FSEQ_ACTIONS & ~policy, 0);
7e992f84 448 spin_unlock_irq(&fq->mq_flush_lock);
86db1e29
JA
449}
450
86db1e29
JA
451/**
452 * blkdev_issue_flush - queue a flush
453 * @bdev: blockdev to issue flush for
86db1e29
JA
454 *
455 * Description:
9398554f 456 * Issue a flush for the block device in question.
86db1e29 457 */
c6bf3f0e 458int blkdev_issue_flush(struct block_device *bdev)
86db1e29 459{
c6bf3f0e 460 struct bio bio;
86db1e29 461
c6bf3f0e
CH
462 bio_init(&bio, NULL, 0);
463 bio_set_dev(&bio, bdev);
464 bio.bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
465 return submit_bio_wait(&bio);
86db1e29 466}
86db1e29 467EXPORT_SYMBOL(blkdev_issue_flush);
320ae51f 468
754a1572
GJ
469struct blk_flush_queue *blk_alloc_flush_queue(int node, int cmd_size,
470 gfp_t flags)
320ae51f 471{
7c94e1c1
ML
472 struct blk_flush_queue *fq;
473 int rq_sz = sizeof(struct request);
1bcb1ead 474
5b202853 475 fq = kzalloc_node(sizeof(*fq), flags, node);
7c94e1c1
ML
476 if (!fq)
477 goto fail;
1bcb1ead 478
7e992f84 479 spin_lock_init(&fq->mq_flush_lock);
7c94e1c1 480
6d247d7f 481 rq_sz = round_up(rq_sz + cmd_size, cache_line_size());
5b202853 482 fq->flush_rq = kzalloc_node(rq_sz, flags, node);
7c94e1c1
ML
483 if (!fq->flush_rq)
484 goto fail_rq;
485
486 INIT_LIST_HEAD(&fq->flush_queue[0]);
487 INIT_LIST_HEAD(&fq->flush_queue[1]);
488 INIT_LIST_HEAD(&fq->flush_data_in_flight);
489
490 return fq;
491
492 fail_rq:
493 kfree(fq);
494 fail:
495 return NULL;
320ae51f 496}
f3552655 497
ba483388 498void blk_free_flush_queue(struct blk_flush_queue *fq)
f3552655 499{
7c94e1c1
ML
500 /* bio based request queue hasn't flush queue */
501 if (!fq)
502 return;
3c09676c 503
7c94e1c1
ML
504 kfree(fq->flush_rq);
505 kfree(fq);
506}
fb01a293
ML
507
508/*
509 * Allow driver to set its own lock class to fq->mq_flush_lock for
510 * avoiding lockdep complaint.
511 *
512 * flush_end_io() may be called recursively from some driver, such as
513 * nvme-loop, so lockdep may complain 'possible recursive locking' because
514 * all 'struct blk_flush_queue' instance share same mq_flush_lock lock class
515 * key. We need to assign different lock class for these driver's
516 * fq->mq_flush_lock for avoiding the lockdep warning.
517 *
518 * Use dynamically allocated lock class key for each 'blk_flush_queue'
519 * instance is over-kill, and more worse it introduces horrible boot delay
520 * issue because synchronize_rcu() is implied in lockdep_unregister_key which
521 * is called for each hctx release. SCSI probing may synchronously create and
522 * destroy lots of MQ request_queues for non-existent devices, and some robot
523 * test kernel always enable lockdep option. It is observed that more than half
524 * an hour is taken during SCSI MQ probe with per-fq lock class.
525 */
526void blk_mq_hctx_set_fq_lock_class(struct blk_mq_hw_ctx *hctx,
527 struct lock_class_key *key)
528{
529 lockdep_set_class(&hctx->fq->mq_flush_lock, key);
530}
531EXPORT_SYMBOL_GPL(blk_mq_hctx_set_fq_lock_class);