| 1 | // SPDX-License-Identifier: GPL-2.0 |
| 2 | /* |
| 3 | * Functions to sequence PREFLUSH and FUA writes. |
| 4 | * |
| 5 | * Copyright (C) 2011 Max Planck Institute for Gravitational Physics |
| 6 | * Copyright (C) 2011 Tejun Heo <tj@kernel.org> |
| 7 | * |
| 8 | * REQ_{PREFLUSH|FUA} requests are decomposed to sequences consisted of three |
| 9 | * optional steps - PREFLUSH, DATA and POSTFLUSH - according to the request |
| 10 | * properties and hardware capability. |
| 11 | * |
| 12 | * If a request doesn't have data, only REQ_PREFLUSH makes sense, which |
| 13 | * indicates a simple flush request. If there is data, REQ_PREFLUSH indicates |
| 14 | * that the device cache should be flushed before the data is executed, and |
| 15 | * REQ_FUA means that the data must be on non-volatile media on request |
| 16 | * completion. |
| 17 | * |
| 18 | * If the device doesn't have writeback cache, PREFLUSH and FUA don't make any |
| 19 | * difference. The requests are either completed immediately if there's no data |
| 20 | * or executed as normal requests otherwise. |
| 21 | * |
| 22 | * If the device has writeback cache and supports FUA, REQ_PREFLUSH is |
| 23 | * translated to PREFLUSH but REQ_FUA is passed down directly with DATA. |
| 24 | * |
| 25 | * If the device has writeback cache and doesn't support FUA, REQ_PREFLUSH |
| 26 | * is translated to PREFLUSH and REQ_FUA to POSTFLUSH. |
| 27 | * |
| 28 | * The actual execution of flush is double buffered. Whenever a request |
| 29 | * needs to execute PRE or POSTFLUSH, it queues at |
| 30 | * fq->flush_queue[fq->flush_pending_idx]. Once certain criteria are met, a |
| 31 | * REQ_OP_FLUSH is issued and the pending_idx is toggled. When the flush |
| 32 | * completes, all the requests which were pending are proceeded to the next |
| 33 | * step. This allows arbitrary merging of different types of PREFLUSH/FUA |
| 34 | * requests. |
| 35 | * |
| 36 | * Currently, the following conditions are used to determine when to issue |
| 37 | * flush. |
| 38 | * |
| 39 | * C1. At any given time, only one flush shall be in progress. This makes |
| 40 | * double buffering sufficient. |
| 41 | * |
| 42 | * C2. Flush is deferred if any request is executing DATA of its sequence. |
| 43 | * This avoids issuing separate POSTFLUSHes for requests which shared |
| 44 | * PREFLUSH. |
| 45 | * |
| 46 | * C3. The second condition is ignored if there is a request which has |
| 47 | * waited longer than FLUSH_PENDING_TIMEOUT. This is to avoid |
| 48 | * starvation in the unlikely case where there are continuous stream of |
| 49 | * FUA (without PREFLUSH) requests. |
| 50 | * |
| 51 | * For devices which support FUA, it isn't clear whether C2 (and thus C3) |
| 52 | * is beneficial. |
| 53 | * |
| 54 | * Note that a sequenced PREFLUSH/FUA request with DATA is completed twice. |
| 55 | * Once while executing DATA and again after the whole sequence is |
| 56 | * complete. The first completion updates the contained bio but doesn't |
| 57 | * finish it so that the bio submitter is notified only after the whole |
| 58 | * sequence is complete. This is implemented by testing RQF_FLUSH_SEQ in |
| 59 | * req_bio_endio(). |
| 60 | * |
| 61 | * The above peculiarity requires that each PREFLUSH/FUA request has only one |
| 62 | * bio attached to it, which is guaranteed as they aren't allowed to be |
| 63 | * merged in the usual way. |
| 64 | */ |
| 65 | |
| 66 | #include <linux/kernel.h> |
| 67 | #include <linux/module.h> |
| 68 | #include <linux/bio.h> |
| 69 | #include <linux/blkdev.h> |
| 70 | #include <linux/gfp.h> |
| 71 | #include <linux/part_stat.h> |
| 72 | |
| 73 | #include "blk.h" |
| 74 | #include "blk-mq.h" |
| 75 | #include "blk-mq-sched.h" |
| 76 | |
| 77 | /* PREFLUSH/FUA sequences */ |
| 78 | enum { |
| 79 | REQ_FSEQ_PREFLUSH = (1 << 0), /* pre-flushing in progress */ |
| 80 | REQ_FSEQ_DATA = (1 << 1), /* data write in progress */ |
| 81 | REQ_FSEQ_POSTFLUSH = (1 << 2), /* post-flushing in progress */ |
| 82 | REQ_FSEQ_DONE = (1 << 3), |
| 83 | |
| 84 | REQ_FSEQ_ACTIONS = REQ_FSEQ_PREFLUSH | REQ_FSEQ_DATA | |
| 85 | REQ_FSEQ_POSTFLUSH, |
| 86 | |
| 87 | /* |
| 88 | * If flush has been pending longer than the following timeout, |
| 89 | * it's issued even if flush_data requests are still in flight. |
| 90 | */ |
| 91 | FLUSH_PENDING_TIMEOUT = 5 * HZ, |
| 92 | }; |
| 93 | |
| 94 | static void blk_kick_flush(struct request_queue *q, |
| 95 | struct blk_flush_queue *fq, blk_opf_t flags); |
| 96 | |
| 97 | static inline struct blk_flush_queue * |
| 98 | blk_get_flush_queue(struct request_queue *q, struct blk_mq_ctx *ctx) |
| 99 | { |
| 100 | return blk_mq_map_queue(q, REQ_OP_FLUSH, ctx)->fq; |
| 101 | } |
| 102 | |
| 103 | static unsigned int blk_flush_policy(unsigned long fflags, struct request *rq) |
| 104 | { |
| 105 | unsigned int policy = 0; |
| 106 | |
| 107 | if (blk_rq_sectors(rq)) |
| 108 | policy |= REQ_FSEQ_DATA; |
| 109 | |
| 110 | if (fflags & (1UL << QUEUE_FLAG_WC)) { |
| 111 | if (rq->cmd_flags & REQ_PREFLUSH) |
| 112 | policy |= REQ_FSEQ_PREFLUSH; |
| 113 | if (!(fflags & (1UL << QUEUE_FLAG_FUA)) && |
| 114 | (rq->cmd_flags & REQ_FUA)) |
| 115 | policy |= REQ_FSEQ_POSTFLUSH; |
| 116 | } |
| 117 | return policy; |
| 118 | } |
| 119 | |
| 120 | static unsigned int blk_flush_cur_seq(struct request *rq) |
| 121 | { |
| 122 | return 1 << ffz(rq->flush.seq); |
| 123 | } |
| 124 | |
| 125 | static void blk_flush_restore_request(struct request *rq) |
| 126 | { |
| 127 | /* |
| 128 | * After flush data completion, @rq->bio is %NULL but we need to |
| 129 | * complete the bio again. @rq->biotail is guaranteed to equal the |
| 130 | * original @rq->bio. Restore it. |
| 131 | */ |
| 132 | rq->bio = rq->biotail; |
| 133 | |
| 134 | /* make @rq a normal request */ |
| 135 | rq->rq_flags &= ~RQF_FLUSH_SEQ; |
| 136 | rq->end_io = rq->flush.saved_end_io; |
| 137 | } |
| 138 | |
| 139 | static void blk_account_io_flush(struct request *rq) |
| 140 | { |
| 141 | struct block_device *part = rq->q->disk->part0; |
| 142 | |
| 143 | part_stat_lock(); |
| 144 | part_stat_inc(part, ios[STAT_FLUSH]); |
| 145 | part_stat_add(part, nsecs[STAT_FLUSH], |
| 146 | ktime_get_ns() - rq->start_time_ns); |
| 147 | part_stat_unlock(); |
| 148 | } |
| 149 | |
| 150 | /** |
| 151 | * blk_flush_complete_seq - complete flush sequence |
| 152 | * @rq: PREFLUSH/FUA request being sequenced |
| 153 | * @fq: flush queue |
| 154 | * @seq: sequences to complete (mask of %REQ_FSEQ_*, can be zero) |
| 155 | * @error: whether an error occurred |
| 156 | * |
| 157 | * @rq just completed @seq part of its flush sequence, record the |
| 158 | * completion and trigger the next step. |
| 159 | * |
| 160 | * CONTEXT: |
| 161 | * spin_lock_irq(fq->mq_flush_lock) |
| 162 | */ |
| 163 | static void blk_flush_complete_seq(struct request *rq, |
| 164 | struct blk_flush_queue *fq, |
| 165 | unsigned int seq, blk_status_t error) |
| 166 | { |
| 167 | struct request_queue *q = rq->q; |
| 168 | struct list_head *pending = &fq->flush_queue[fq->flush_pending_idx]; |
| 169 | blk_opf_t cmd_flags; |
| 170 | |
| 171 | BUG_ON(rq->flush.seq & seq); |
| 172 | rq->flush.seq |= seq; |
| 173 | cmd_flags = rq->cmd_flags; |
| 174 | |
| 175 | if (likely(!error)) |
| 176 | seq = blk_flush_cur_seq(rq); |
| 177 | else |
| 178 | seq = REQ_FSEQ_DONE; |
| 179 | |
| 180 | switch (seq) { |
| 181 | case REQ_FSEQ_PREFLUSH: |
| 182 | case REQ_FSEQ_POSTFLUSH: |
| 183 | /* queue for flush */ |
| 184 | if (list_empty(pending)) |
| 185 | fq->flush_pending_since = jiffies; |
| 186 | list_move_tail(&rq->flush.list, pending); |
| 187 | break; |
| 188 | |
| 189 | case REQ_FSEQ_DATA: |
| 190 | list_del_init(&rq->flush.list); |
| 191 | fq->flush_data_in_flight++; |
| 192 | spin_lock(&q->requeue_lock); |
| 193 | list_add(&rq->queuelist, &q->requeue_list); |
| 194 | spin_unlock(&q->requeue_lock); |
| 195 | blk_mq_kick_requeue_list(q); |
| 196 | break; |
| 197 | |
| 198 | case REQ_FSEQ_DONE: |
| 199 | /* |
| 200 | * @rq was previously adjusted by blk_insert_flush() for |
| 201 | * flush sequencing and may already have gone through the |
| 202 | * flush data request completion path. Restore @rq for |
| 203 | * normal completion and end it. |
| 204 | */ |
| 205 | list_del_init(&rq->flush.list); |
| 206 | blk_flush_restore_request(rq); |
| 207 | blk_mq_end_request(rq, error); |
| 208 | break; |
| 209 | |
| 210 | default: |
| 211 | BUG(); |
| 212 | } |
| 213 | |
| 214 | blk_kick_flush(q, fq, cmd_flags); |
| 215 | } |
| 216 | |
| 217 | static enum rq_end_io_ret flush_end_io(struct request *flush_rq, |
| 218 | blk_status_t error) |
| 219 | { |
| 220 | struct request_queue *q = flush_rq->q; |
| 221 | struct list_head *running; |
| 222 | struct request *rq, *n; |
| 223 | unsigned long flags = 0; |
| 224 | struct blk_flush_queue *fq = blk_get_flush_queue(q, flush_rq->mq_ctx); |
| 225 | |
| 226 | /* release the tag's ownership to the req cloned from */ |
| 227 | spin_lock_irqsave(&fq->mq_flush_lock, flags); |
| 228 | |
| 229 | if (!req_ref_put_and_test(flush_rq)) { |
| 230 | fq->rq_status = error; |
| 231 | spin_unlock_irqrestore(&fq->mq_flush_lock, flags); |
| 232 | return RQ_END_IO_NONE; |
| 233 | } |
| 234 | |
| 235 | blk_account_io_flush(flush_rq); |
| 236 | /* |
| 237 | * Flush request has to be marked as IDLE when it is really ended |
| 238 | * because its .end_io() is called from timeout code path too for |
| 239 | * avoiding use-after-free. |
| 240 | */ |
| 241 | WRITE_ONCE(flush_rq->state, MQ_RQ_IDLE); |
| 242 | if (fq->rq_status != BLK_STS_OK) { |
| 243 | error = fq->rq_status; |
| 244 | fq->rq_status = BLK_STS_OK; |
| 245 | } |
| 246 | |
| 247 | if (!q->elevator) { |
| 248 | flush_rq->tag = BLK_MQ_NO_TAG; |
| 249 | } else { |
| 250 | blk_mq_put_driver_tag(flush_rq); |
| 251 | flush_rq->internal_tag = BLK_MQ_NO_TAG; |
| 252 | } |
| 253 | |
| 254 | running = &fq->flush_queue[fq->flush_running_idx]; |
| 255 | BUG_ON(fq->flush_pending_idx == fq->flush_running_idx); |
| 256 | |
| 257 | /* account completion of the flush request */ |
| 258 | fq->flush_running_idx ^= 1; |
| 259 | |
| 260 | /* and push the waiting requests to the next stage */ |
| 261 | list_for_each_entry_safe(rq, n, running, flush.list) { |
| 262 | unsigned int seq = blk_flush_cur_seq(rq); |
| 263 | |
| 264 | BUG_ON(seq != REQ_FSEQ_PREFLUSH && seq != REQ_FSEQ_POSTFLUSH); |
| 265 | blk_flush_complete_seq(rq, fq, seq, error); |
| 266 | } |
| 267 | |
| 268 | spin_unlock_irqrestore(&fq->mq_flush_lock, flags); |
| 269 | return RQ_END_IO_NONE; |
| 270 | } |
| 271 | |
| 272 | bool is_flush_rq(struct request *rq) |
| 273 | { |
| 274 | return rq->end_io == flush_end_io; |
| 275 | } |
| 276 | |
| 277 | /** |
| 278 | * blk_kick_flush - consider issuing flush request |
| 279 | * @q: request_queue being kicked |
| 280 | * @fq: flush queue |
| 281 | * @flags: cmd_flags of the original request |
| 282 | * |
| 283 | * Flush related states of @q have changed, consider issuing flush request. |
| 284 | * Please read the comment at the top of this file for more info. |
| 285 | * |
| 286 | * CONTEXT: |
| 287 | * spin_lock_irq(fq->mq_flush_lock) |
| 288 | * |
| 289 | */ |
| 290 | static void blk_kick_flush(struct request_queue *q, struct blk_flush_queue *fq, |
| 291 | blk_opf_t flags) |
| 292 | { |
| 293 | struct list_head *pending = &fq->flush_queue[fq->flush_pending_idx]; |
| 294 | struct request *first_rq = |
| 295 | list_first_entry(pending, struct request, flush.list); |
| 296 | struct request *flush_rq = fq->flush_rq; |
| 297 | |
| 298 | /* C1 described at the top of this file */ |
| 299 | if (fq->flush_pending_idx != fq->flush_running_idx || list_empty(pending)) |
| 300 | return; |
| 301 | |
| 302 | /* C2 and C3 */ |
| 303 | if (fq->flush_data_in_flight && |
| 304 | time_before(jiffies, |
| 305 | fq->flush_pending_since + FLUSH_PENDING_TIMEOUT)) |
| 306 | return; |
| 307 | |
| 308 | /* |
| 309 | * Issue flush and toggle pending_idx. This makes pending_idx |
| 310 | * different from running_idx, which means flush is in flight. |
| 311 | */ |
| 312 | fq->flush_pending_idx ^= 1; |
| 313 | |
| 314 | blk_rq_init(q, flush_rq); |
| 315 | |
| 316 | /* |
| 317 | * In case of none scheduler, borrow tag from the first request |
| 318 | * since they can't be in flight at the same time. And acquire |
| 319 | * the tag's ownership for flush req. |
| 320 | * |
| 321 | * In case of IO scheduler, flush rq need to borrow scheduler tag |
| 322 | * just for cheating put/get driver tag. |
| 323 | */ |
| 324 | flush_rq->mq_ctx = first_rq->mq_ctx; |
| 325 | flush_rq->mq_hctx = first_rq->mq_hctx; |
| 326 | |
| 327 | if (!q->elevator) { |
| 328 | flush_rq->tag = first_rq->tag; |
| 329 | |
| 330 | /* |
| 331 | * We borrow data request's driver tag, so have to mark |
| 332 | * this flush request as INFLIGHT for avoiding double |
| 333 | * account of this driver tag |
| 334 | */ |
| 335 | flush_rq->rq_flags |= RQF_MQ_INFLIGHT; |
| 336 | } else |
| 337 | flush_rq->internal_tag = first_rq->internal_tag; |
| 338 | |
| 339 | flush_rq->cmd_flags = REQ_OP_FLUSH | REQ_PREFLUSH; |
| 340 | flush_rq->cmd_flags |= (flags & REQ_DRV) | (flags & REQ_FAILFAST_MASK); |
| 341 | flush_rq->rq_flags |= RQF_FLUSH_SEQ; |
| 342 | flush_rq->end_io = flush_end_io; |
| 343 | /* |
| 344 | * Order WRITE ->end_io and WRITE rq->ref, and its pair is the one |
| 345 | * implied in refcount_inc_not_zero() called from |
| 346 | * blk_mq_find_and_get_req(), which orders WRITE/READ flush_rq->ref |
| 347 | * and READ flush_rq->end_io |
| 348 | */ |
| 349 | smp_wmb(); |
| 350 | req_ref_set(flush_rq, 1); |
| 351 | |
| 352 | spin_lock(&q->requeue_lock); |
| 353 | list_add_tail(&flush_rq->queuelist, &q->flush_list); |
| 354 | spin_unlock(&q->requeue_lock); |
| 355 | |
| 356 | blk_mq_kick_requeue_list(q); |
| 357 | } |
| 358 | |
| 359 | static enum rq_end_io_ret mq_flush_data_end_io(struct request *rq, |
| 360 | blk_status_t error) |
| 361 | { |
| 362 | struct request_queue *q = rq->q; |
| 363 | struct blk_mq_hw_ctx *hctx = rq->mq_hctx; |
| 364 | struct blk_mq_ctx *ctx = rq->mq_ctx; |
| 365 | unsigned long flags; |
| 366 | struct blk_flush_queue *fq = blk_get_flush_queue(q, ctx); |
| 367 | |
| 368 | if (q->elevator) { |
| 369 | WARN_ON(rq->tag < 0); |
| 370 | blk_mq_put_driver_tag(rq); |
| 371 | } |
| 372 | |
| 373 | /* |
| 374 | * After populating an empty queue, kick it to avoid stall. Read |
| 375 | * the comment in flush_end_io(). |
| 376 | */ |
| 377 | spin_lock_irqsave(&fq->mq_flush_lock, flags); |
| 378 | fq->flush_data_in_flight--; |
| 379 | blk_flush_complete_seq(rq, fq, REQ_FSEQ_DATA, error); |
| 380 | spin_unlock_irqrestore(&fq->mq_flush_lock, flags); |
| 381 | |
| 382 | blk_mq_sched_restart(hctx); |
| 383 | return RQ_END_IO_NONE; |
| 384 | } |
| 385 | |
| 386 | static void blk_rq_init_flush(struct request *rq) |
| 387 | { |
| 388 | rq->flush.seq = 0; |
| 389 | INIT_LIST_HEAD(&rq->flush.list); |
| 390 | rq->rq_flags |= RQF_FLUSH_SEQ; |
| 391 | rq->flush.saved_end_io = rq->end_io; /* Usually NULL */ |
| 392 | rq->end_io = mq_flush_data_end_io; |
| 393 | } |
| 394 | |
| 395 | /* |
| 396 | * Insert a PREFLUSH/FUA request into the flush state machine. |
| 397 | * Returns true if the request has been consumed by the flush state machine, |
| 398 | * or false if the caller should continue to process it. |
| 399 | */ |
| 400 | bool blk_insert_flush(struct request *rq) |
| 401 | { |
| 402 | struct request_queue *q = rq->q; |
| 403 | unsigned long fflags = q->queue_flags; /* may change, cache */ |
| 404 | unsigned int policy = blk_flush_policy(fflags, rq); |
| 405 | struct blk_flush_queue *fq = blk_get_flush_queue(q, rq->mq_ctx); |
| 406 | |
| 407 | /* FLUSH/FUA request must never be merged */ |
| 408 | WARN_ON_ONCE(rq->bio != rq->biotail); |
| 409 | |
| 410 | /* |
| 411 | * @policy now records what operations need to be done. Adjust |
| 412 | * REQ_PREFLUSH and FUA for the driver. |
| 413 | */ |
| 414 | rq->cmd_flags &= ~REQ_PREFLUSH; |
| 415 | if (!(fflags & (1UL << QUEUE_FLAG_FUA))) |
| 416 | rq->cmd_flags &= ~REQ_FUA; |
| 417 | |
| 418 | /* |
| 419 | * REQ_PREFLUSH|REQ_FUA implies REQ_SYNC, so if we clear any |
| 420 | * of those flags, we have to set REQ_SYNC to avoid skewing |
| 421 | * the request accounting. |
| 422 | */ |
| 423 | rq->cmd_flags |= REQ_SYNC; |
| 424 | |
| 425 | switch (policy) { |
| 426 | case 0: |
| 427 | /* |
| 428 | * An empty flush handed down from a stacking driver may |
| 429 | * translate into nothing if the underlying device does not |
| 430 | * advertise a write-back cache. In this case, simply |
| 431 | * complete the request. |
| 432 | */ |
| 433 | blk_mq_end_request(rq, 0); |
| 434 | return true; |
| 435 | case REQ_FSEQ_DATA: |
| 436 | /* |
| 437 | * If there's data, but no flush is necessary, the request can |
| 438 | * be processed directly without going through flush machinery. |
| 439 | * Queue for normal execution. |
| 440 | */ |
| 441 | return false; |
| 442 | case REQ_FSEQ_DATA | REQ_FSEQ_POSTFLUSH: |
| 443 | /* |
| 444 | * Initialize the flush fields and completion handler to trigger |
| 445 | * the post flush, and then just pass the command on. |
| 446 | */ |
| 447 | blk_rq_init_flush(rq); |
| 448 | rq->flush.seq |= REQ_FSEQ_PREFLUSH; |
| 449 | spin_lock_irq(&fq->mq_flush_lock); |
| 450 | fq->flush_data_in_flight++; |
| 451 | spin_unlock_irq(&fq->mq_flush_lock); |
| 452 | return false; |
| 453 | default: |
| 454 | /* |
| 455 | * Mark the request as part of a flush sequence and submit it |
| 456 | * for further processing to the flush state machine. |
| 457 | */ |
| 458 | blk_rq_init_flush(rq); |
| 459 | spin_lock_irq(&fq->mq_flush_lock); |
| 460 | blk_flush_complete_seq(rq, fq, REQ_FSEQ_ACTIONS & ~policy, 0); |
| 461 | spin_unlock_irq(&fq->mq_flush_lock); |
| 462 | return true; |
| 463 | } |
| 464 | } |
| 465 | |
| 466 | /** |
| 467 | * blkdev_issue_flush - queue a flush |
| 468 | * @bdev: blockdev to issue flush for |
| 469 | * |
| 470 | * Description: |
| 471 | * Issue a flush for the block device in question. |
| 472 | */ |
| 473 | int blkdev_issue_flush(struct block_device *bdev) |
| 474 | { |
| 475 | struct bio bio; |
| 476 | |
| 477 | bio_init(&bio, bdev, NULL, 0, REQ_OP_WRITE | REQ_PREFLUSH); |
| 478 | return submit_bio_wait(&bio); |
| 479 | } |
| 480 | EXPORT_SYMBOL(blkdev_issue_flush); |
| 481 | |
| 482 | struct blk_flush_queue *blk_alloc_flush_queue(int node, int cmd_size, |
| 483 | gfp_t flags) |
| 484 | { |
| 485 | struct blk_flush_queue *fq; |
| 486 | int rq_sz = sizeof(struct request); |
| 487 | |
| 488 | fq = kzalloc_node(sizeof(*fq), flags, node); |
| 489 | if (!fq) |
| 490 | goto fail; |
| 491 | |
| 492 | spin_lock_init(&fq->mq_flush_lock); |
| 493 | |
| 494 | rq_sz = round_up(rq_sz + cmd_size, cache_line_size()); |
| 495 | fq->flush_rq = kzalloc_node(rq_sz, flags, node); |
| 496 | if (!fq->flush_rq) |
| 497 | goto fail_rq; |
| 498 | |
| 499 | INIT_LIST_HEAD(&fq->flush_queue[0]); |
| 500 | INIT_LIST_HEAD(&fq->flush_queue[1]); |
| 501 | |
| 502 | return fq; |
| 503 | |
| 504 | fail_rq: |
| 505 | kfree(fq); |
| 506 | fail: |
| 507 | return NULL; |
| 508 | } |
| 509 | |
| 510 | void blk_free_flush_queue(struct blk_flush_queue *fq) |
| 511 | { |
| 512 | /* bio based request queue hasn't flush queue */ |
| 513 | if (!fq) |
| 514 | return; |
| 515 | |
| 516 | kfree(fq->flush_rq); |
| 517 | kfree(fq); |
| 518 | } |
| 519 | |
| 520 | /* |
| 521 | * Allow driver to set its own lock class to fq->mq_flush_lock for |
| 522 | * avoiding lockdep complaint. |
| 523 | * |
| 524 | * flush_end_io() may be called recursively from some driver, such as |
| 525 | * nvme-loop, so lockdep may complain 'possible recursive locking' because |
| 526 | * all 'struct blk_flush_queue' instance share same mq_flush_lock lock class |
| 527 | * key. We need to assign different lock class for these driver's |
| 528 | * fq->mq_flush_lock for avoiding the lockdep warning. |
| 529 | * |
| 530 | * Use dynamically allocated lock class key for each 'blk_flush_queue' |
| 531 | * instance is over-kill, and more worse it introduces horrible boot delay |
| 532 | * issue because synchronize_rcu() is implied in lockdep_unregister_key which |
| 533 | * is called for each hctx release. SCSI probing may synchronously create and |
| 534 | * destroy lots of MQ request_queues for non-existent devices, and some robot |
| 535 | * test kernel always enable lockdep option. It is observed that more than half |
| 536 | * an hour is taken during SCSI MQ probe with per-fq lock class. |
| 537 | */ |
| 538 | void blk_mq_hctx_set_fq_lock_class(struct blk_mq_hw_ctx *hctx, |
| 539 | struct lock_class_key *key) |
| 540 | { |
| 541 | lockdep_set_class(&hctx->fq->mq_flush_lock, key); |
| 542 | } |
| 543 | EXPORT_SYMBOL_GPL(blk_mq_hctx_set_fq_lock_class); |