Merge tag 'for-5.17/parisc-2' of git://git.kernel.org/pub/scm/linux/kernel/git/deller...
[linux-block.git] / block / blk-core.c
CommitLineData
3dcf60bc 1// SPDX-License-Identifier: GPL-2.0
1da177e4 2/*
1da177e4
LT
3 * Copyright (C) 1991, 1992 Linus Torvalds
4 * Copyright (C) 1994, Karl Keyte: Added support for disk statistics
5 * Elevator latency, (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE
6 * Queue request tables / lock, selectable elevator, Jens Axboe <axboe@suse.de>
6728cb0e
JA
7 * kernel-doc documentation started by NeilBrown <neilb@cse.unsw.edu.au>
8 * - July2000
1da177e4
LT
9 * bio rewrite, highmem i/o, etc, Jens Axboe <axboe@suse.de> - may 2001
10 */
11
12/*
13 * This handles all read/write requests to block devices
14 */
1da177e4
LT
15#include <linux/kernel.h>
16#include <linux/module.h>
1da177e4
LT
17#include <linux/bio.h>
18#include <linux/blkdev.h>
52abca64 19#include <linux/blk-pm.h>
fe45e630 20#include <linux/blk-integrity.h>
1da177e4
LT
21#include <linux/highmem.h>
22#include <linux/mm.h>
cee9a0c4 23#include <linux/pagemap.h>
1da177e4
LT
24#include <linux/kernel_stat.h>
25#include <linux/string.h>
26#include <linux/init.h>
1da177e4
LT
27#include <linux/completion.h>
28#include <linux/slab.h>
29#include <linux/swap.h>
30#include <linux/writeback.h>
faccbd4b 31#include <linux/task_io_accounting_ops.h>
c17bb495 32#include <linux/fault-inject.h>
73c10101 33#include <linux/list_sort.h>
e3c78ca5 34#include <linux/delay.h>
aaf7c680 35#include <linux/ratelimit.h>
6c954667 36#include <linux/pm_runtime.h>
eea8f41c 37#include <linux/blk-cgroup.h>
54d4e6ab 38#include <linux/t10-pi.h>
18fbda91 39#include <linux/debugfs.h>
30abb3a6 40#include <linux/bpf.h>
b8e24a93 41#include <linux/psi.h>
82d981d4 42#include <linux/part_stat.h>
71ac860a 43#include <linux/sched/sysctl.h>
a892c8d5 44#include <linux/blk-crypto.h>
55782138
LZ
45
46#define CREATE_TRACE_POINTS
47#include <trace/events/block.h>
1da177e4 48
8324aa91 49#include "blk.h"
2aa7745b 50#include "blk-mq-sched.h"
bca6b067 51#include "blk-pm.h"
a7b36ee6 52#include "blk-throttle.h"
8324aa91 53
18fbda91 54struct dentry *blk_debugfs_root;
18fbda91 55
d07335e5 56EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_remap);
b0da3f0d 57EXPORT_TRACEPOINT_SYMBOL_GPL(block_rq_remap);
0a82a8d1 58EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_complete);
3291fa57 59EXPORT_TRACEPOINT_SYMBOL_GPL(block_split);
cbae8d45 60EXPORT_TRACEPOINT_SYMBOL_GPL(block_unplug);
b357e4a6 61EXPORT_TRACEPOINT_SYMBOL_GPL(block_rq_insert);
0bfc2455 62
a73f730d
TH
63DEFINE_IDA(blk_queue_ida);
64
1da177e4
LT
65/*
66 * For queue allocation
67 */
6728cb0e 68struct kmem_cache *blk_requestq_cachep;
704b914f 69struct kmem_cache *blk_requestq_srcu_cachep;
1da177e4 70
1da177e4
LT
71/*
72 * Controlling structure to kblockd
73 */
ff856bad 74static struct workqueue_struct *kblockd_workqueue;
1da177e4 75
8814ce8a
BVA
76/**
77 * blk_queue_flag_set - atomically set a queue flag
78 * @flag: flag to be set
79 * @q: request queue
80 */
81void blk_queue_flag_set(unsigned int flag, struct request_queue *q)
82{
57d74df9 83 set_bit(flag, &q->queue_flags);
8814ce8a
BVA
84}
85EXPORT_SYMBOL(blk_queue_flag_set);
86
87/**
88 * blk_queue_flag_clear - atomically clear a queue flag
89 * @flag: flag to be cleared
90 * @q: request queue
91 */
92void blk_queue_flag_clear(unsigned int flag, struct request_queue *q)
93{
57d74df9 94 clear_bit(flag, &q->queue_flags);
8814ce8a
BVA
95}
96EXPORT_SYMBOL(blk_queue_flag_clear);
97
98/**
99 * blk_queue_flag_test_and_set - atomically test and set a queue flag
100 * @flag: flag to be set
101 * @q: request queue
102 *
103 * Returns the previous value of @flag - 0 if the flag was not set and 1 if
104 * the flag was already set.
105 */
106bool blk_queue_flag_test_and_set(unsigned int flag, struct request_queue *q)
107{
57d74df9 108 return test_and_set_bit(flag, &q->queue_flags);
8814ce8a
BVA
109}
110EXPORT_SYMBOL_GPL(blk_queue_flag_test_and_set);
111
e47bc4ed
CK
112#define REQ_OP_NAME(name) [REQ_OP_##name] = #name
113static const char *const blk_op_name[] = {
114 REQ_OP_NAME(READ),
115 REQ_OP_NAME(WRITE),
116 REQ_OP_NAME(FLUSH),
117 REQ_OP_NAME(DISCARD),
118 REQ_OP_NAME(SECURE_ERASE),
119 REQ_OP_NAME(ZONE_RESET),
6e33dbf2 120 REQ_OP_NAME(ZONE_RESET_ALL),
6c1b1da5
AJ
121 REQ_OP_NAME(ZONE_OPEN),
122 REQ_OP_NAME(ZONE_CLOSE),
123 REQ_OP_NAME(ZONE_FINISH),
0512a75b 124 REQ_OP_NAME(ZONE_APPEND),
e47bc4ed
CK
125 REQ_OP_NAME(WRITE_SAME),
126 REQ_OP_NAME(WRITE_ZEROES),
e47bc4ed
CK
127 REQ_OP_NAME(DRV_IN),
128 REQ_OP_NAME(DRV_OUT),
129};
130#undef REQ_OP_NAME
131
132/**
133 * blk_op_str - Return string XXX in the REQ_OP_XXX.
134 * @op: REQ_OP_XXX.
135 *
136 * Description: Centralize block layer function to convert REQ_OP_XXX into
137 * string format. Useful in the debugging and tracing bio or request. For
138 * invalid REQ_OP_XXX it returns string "UNKNOWN".
139 */
140inline const char *blk_op_str(unsigned int op)
141{
142 const char *op_str = "UNKNOWN";
143
144 if (op < ARRAY_SIZE(blk_op_name) && blk_op_name[op])
145 op_str = blk_op_name[op];
146
147 return op_str;
148}
149EXPORT_SYMBOL_GPL(blk_op_str);
150
2a842aca
CH
151static const struct {
152 int errno;
153 const char *name;
154} blk_errors[] = {
155 [BLK_STS_OK] = { 0, "" },
156 [BLK_STS_NOTSUPP] = { -EOPNOTSUPP, "operation not supported" },
157 [BLK_STS_TIMEOUT] = { -ETIMEDOUT, "timeout" },
158 [BLK_STS_NOSPC] = { -ENOSPC, "critical space allocation" },
159 [BLK_STS_TRANSPORT] = { -ENOLINK, "recoverable transport" },
160 [BLK_STS_TARGET] = { -EREMOTEIO, "critical target" },
161 [BLK_STS_NEXUS] = { -EBADE, "critical nexus" },
162 [BLK_STS_MEDIUM] = { -ENODATA, "critical medium" },
163 [BLK_STS_PROTECTION] = { -EILSEQ, "protection" },
164 [BLK_STS_RESOURCE] = { -ENOMEM, "kernel resource" },
86ff7c2a 165 [BLK_STS_DEV_RESOURCE] = { -EBUSY, "device resource" },
03a07c92 166 [BLK_STS_AGAIN] = { -EAGAIN, "nonblocking retry" },
2a842aca 167
4e4cbee9
CH
168 /* device mapper special case, should not leak out: */
169 [BLK_STS_DM_REQUEUE] = { -EREMCHG, "dm internal retry" },
170
3b481d91
KB
171 /* zone device specific errors */
172 [BLK_STS_ZONE_OPEN_RESOURCE] = { -ETOOMANYREFS, "open zones exceeded" },
173 [BLK_STS_ZONE_ACTIVE_RESOURCE] = { -EOVERFLOW, "active zones exceeded" },
174
2a842aca
CH
175 /* everything else not covered above: */
176 [BLK_STS_IOERR] = { -EIO, "I/O" },
177};
178
179blk_status_t errno_to_blk_status(int errno)
180{
181 int i;
182
183 for (i = 0; i < ARRAY_SIZE(blk_errors); i++) {
184 if (blk_errors[i].errno == errno)
185 return (__force blk_status_t)i;
186 }
187
188 return BLK_STS_IOERR;
189}
190EXPORT_SYMBOL_GPL(errno_to_blk_status);
191
192int blk_status_to_errno(blk_status_t status)
193{
194 int idx = (__force int)status;
195
34bd9c1c 196 if (WARN_ON_ONCE(idx >= ARRAY_SIZE(blk_errors)))
2a842aca
CH
197 return -EIO;
198 return blk_errors[idx].errno;
199}
200EXPORT_SYMBOL_GPL(blk_status_to_errno);
201
0d7a29a2 202const char *blk_status_to_str(blk_status_t status)
2a842aca
CH
203{
204 int idx = (__force int)status;
205
34bd9c1c 206 if (WARN_ON_ONCE(idx >= ARRAY_SIZE(blk_errors)))
0d7a29a2
CH
207 return "<null>";
208 return blk_errors[idx].name;
2a842aca
CH
209}
210
1da177e4
LT
211/**
212 * blk_sync_queue - cancel any pending callbacks on a queue
213 * @q: the queue
214 *
215 * Description:
216 * The block layer may perform asynchronous callback activity
217 * on a queue, such as calling the unplug function after a timeout.
218 * A block device may call blk_sync_queue to ensure that any
219 * such activity is cancelled, thus allowing it to release resources
59c51591 220 * that the callbacks might use. The caller must already have made sure
c62b37d9 221 * that its ->submit_bio will not re-add plugging prior to calling
1da177e4
LT
222 * this function.
223 *
da527770 224 * This function does not cancel any asynchronous activity arising
da3dae54 225 * out of elevator or throttling code. That would require elevator_exit()
5efd6113 226 * and blkcg_exit_queue() to be called with queue lock initialized.
da527770 227 *
1da177e4
LT
228 */
229void blk_sync_queue(struct request_queue *q)
230{
70ed28b9 231 del_timer_sync(&q->timeout);
4e9b6f20 232 cancel_work_sync(&q->timeout_work);
1da177e4
LT
233}
234EXPORT_SYMBOL(blk_sync_queue);
235
c9254f2d 236/**
cd84a62e 237 * blk_set_pm_only - increment pm_only counter
c9254f2d 238 * @q: request queue pointer
c9254f2d 239 */
cd84a62e 240void blk_set_pm_only(struct request_queue *q)
c9254f2d 241{
cd84a62e 242 atomic_inc(&q->pm_only);
c9254f2d 243}
cd84a62e 244EXPORT_SYMBOL_GPL(blk_set_pm_only);
c9254f2d 245
cd84a62e 246void blk_clear_pm_only(struct request_queue *q)
c9254f2d 247{
cd84a62e
BVA
248 int pm_only;
249
250 pm_only = atomic_dec_return(&q->pm_only);
251 WARN_ON_ONCE(pm_only < 0);
252 if (pm_only == 0)
253 wake_up_all(&q->mq_freeze_wq);
c9254f2d 254}
cd84a62e 255EXPORT_SYMBOL_GPL(blk_clear_pm_only);
c9254f2d 256
b5bd357c
LC
257/**
258 * blk_put_queue - decrement the request_queue refcount
259 * @q: the request_queue structure to decrement the refcount for
260 *
261 * Decrements the refcount of the request_queue kobject. When this reaches 0
262 * we'll have blk_release_queue() called.
e8c7d14a
LC
263 *
264 * Context: Any context, but the last reference must not be dropped from
265 * atomic context.
b5bd357c 266 */
165125e1 267void blk_put_queue(struct request_queue *q)
483f4afc
AV
268{
269 kobject_put(&q->kobj);
270}
d86e0e83 271EXPORT_SYMBOL(blk_put_queue);
483f4afc 272
8e141f9e 273void blk_queue_start_drain(struct request_queue *q)
aed3ea94 274{
d3cfb2a0
ML
275 /*
276 * When queue DYING flag is set, we need to block new req
277 * entering queue, so we call blk_freeze_queue_start() to
278 * prevent I/O from crossing blk_queue_enter().
279 */
280 blk_freeze_queue_start(q);
344e9ffc 281 if (queue_is_mq(q))
aed3ea94 282 blk_mq_wake_waiters(q);
055f6e18
ML
283 /* Make blk_queue_enter() reexamine the DYING flag. */
284 wake_up_all(&q->mq_freeze_wq);
aed3ea94 285}
8e141f9e
CH
286
287void blk_set_queue_dying(struct request_queue *q)
288{
289 blk_queue_flag_set(QUEUE_FLAG_DYING, q);
290 blk_queue_start_drain(q);
291}
aed3ea94
JA
292EXPORT_SYMBOL_GPL(blk_set_queue_dying);
293
c9a929dd
TH
294/**
295 * blk_cleanup_queue - shutdown a request queue
296 * @q: request queue to shutdown
297 *
c246e80d
BVA
298 * Mark @q DYING, drain all pending requests, mark @q DEAD, destroy and
299 * put it. All future requests will be failed immediately with -ENODEV.
e8c7d14a
LC
300 *
301 * Context: can sleep
c94a96ac 302 */
6728cb0e 303void blk_cleanup_queue(struct request_queue *q)
483f4afc 304{
e8c7d14a
LC
305 /* cannot be called from atomic context */
306 might_sleep();
307
bae85c15
BVA
308 WARN_ON_ONCE(blk_queue_registered(q));
309
3f3299d5 310 /* mark @q DYING, no new request or merges will be allowed afterwards */
aed3ea94 311 blk_set_queue_dying(q);
6ecf23af 312
57d74df9
CH
313 blk_queue_flag_set(QUEUE_FLAG_NOMERGES, q);
314 blk_queue_flag_set(QUEUE_FLAG_NOXMERGES, q);
c9a929dd 315
c246e80d
BVA
316 /*
317 * Drain all requests queued before DYING marking. Set DEAD flag to
67ed8b73
BVA
318 * prevent that blk_mq_run_hw_queues() accesses the hardware queues
319 * after draining finished.
c246e80d 320 */
3ef28e83 321 blk_freeze_queue(q);
c57cdf7a 322
57d74df9 323 blk_queue_flag_set(QUEUE_FLAG_DEAD, q);
c9a929dd 324
c9a929dd 325 blk_sync_queue(q);
2a19b28f
ML
326 if (queue_is_mq(q)) {
327 blk_mq_cancel_work_sync(q);
c7e2d94b 328 blk_mq_exit_queue(q);
2a19b28f 329 }
a1ce35fa 330
c3e22192
ML
331 /*
332 * In theory, request pool of sched_tags belongs to request queue.
333 * However, the current implementation requires tag_set for freeing
334 * requests, so free the pool now.
335 *
336 * Queue has become frozen, there can't be any in-queue requests, so
337 * it is safe to free requests now.
338 */
339 mutex_lock(&q->sysfs_lock);
340 if (q->elevator)
1820f4f0 341 blk_mq_sched_free_rqs(q);
c3e22192
ML
342 mutex_unlock(&q->sysfs_lock);
343
3ef28e83 344 percpu_ref_exit(&q->q_usage_counter);
45a9c9d9 345
c9a929dd 346 /* @q is and will stay empty, shutdown and put */
483f4afc
AV
347 blk_put_queue(q);
348}
1da177e4
LT
349EXPORT_SYMBOL(blk_cleanup_queue);
350
3a0a5299
BVA
351/**
352 * blk_queue_enter() - try to increase q->q_usage_counter
353 * @q: request queue pointer
a4d34da7 354 * @flags: BLK_MQ_REQ_NOWAIT and/or BLK_MQ_REQ_PM
3a0a5299 355 */
9a95e4ef 356int blk_queue_enter(struct request_queue *q, blk_mq_req_flags_t flags)
3ef28e83 357{
a4d34da7 358 const bool pm = flags & BLK_MQ_REQ_PM;
3a0a5299 359
1f14a098 360 while (!blk_try_enter_queue(q, pm)) {
3a0a5299 361 if (flags & BLK_MQ_REQ_NOWAIT)
3ef28e83
DW
362 return -EBUSY;
363
5ed61d3f 364 /*
1f14a098
CH
365 * read pair of barrier in blk_freeze_queue_start(), we need to
366 * order reading __PERCPU_REF_DEAD flag of .q_usage_counter and
367 * reading .mq_freeze_depth or queue dying flag, otherwise the
368 * following wait may never return if the two reads are
369 * reordered.
5ed61d3f
ML
370 */
371 smp_rmb();
1dc3039b 372 wait_event(q->mq_freeze_wq,
7996a8b5 373 (!q->mq_freeze_depth &&
52abca64 374 blk_pm_resume_queue(pm, q)) ||
1dc3039b 375 blk_queue_dying(q));
3ef28e83
DW
376 if (blk_queue_dying(q))
377 return -ENODEV;
3ef28e83 378 }
1f14a098
CH
379
380 return 0;
3ef28e83
DW
381}
382
c98cb5bb 383int __bio_queue_enter(struct request_queue *q, struct bio *bio)
accea322 384{
a6741536 385 while (!blk_try_enter_queue(q, false)) {
eab4e027
PB
386 struct gendisk *disk = bio->bi_bdev->bd_disk;
387
a6741536 388 if (bio->bi_opf & REQ_NOWAIT) {
8e141f9e 389 if (test_bit(GD_DEAD, &disk->state))
a6741536 390 goto dead;
accea322 391 bio_wouldblock_error(bio);
a6741536
CH
392 return -EBUSY;
393 }
394
395 /*
396 * read pair of barrier in blk_freeze_queue_start(), we need to
397 * order reading __PERCPU_REF_DEAD flag of .q_usage_counter and
398 * reading .mq_freeze_depth or queue dying flag, otherwise the
399 * following wait may never return if the two reads are
400 * reordered.
401 */
402 smp_rmb();
403 wait_event(q->mq_freeze_wq,
404 (!q->mq_freeze_depth &&
405 blk_pm_resume_queue(false, q)) ||
8e141f9e
CH
406 test_bit(GD_DEAD, &disk->state));
407 if (test_bit(GD_DEAD, &disk->state))
a6741536 408 goto dead;
accea322
CH
409 }
410
a6741536
CH
411 return 0;
412dead:
413 bio_io_error(bio);
414 return -ENODEV;
accea322
CH
415}
416
3ef28e83
DW
417void blk_queue_exit(struct request_queue *q)
418{
419 percpu_ref_put(&q->q_usage_counter);
420}
421
422static void blk_queue_usage_counter_release(struct percpu_ref *ref)
423{
424 struct request_queue *q =
425 container_of(ref, struct request_queue, q_usage_counter);
426
427 wake_up_all(&q->mq_freeze_wq);
428}
429
bca237a5 430static void blk_rq_timed_out_timer(struct timer_list *t)
287922eb 431{
bca237a5 432 struct request_queue *q = from_timer(q, t, timeout);
287922eb
CH
433
434 kblockd_schedule_work(&q->timeout_work);
435}
436
2e3c18d0
TH
437static void blk_timeout_work(struct work_struct *work)
438{
439}
440
704b914f 441struct request_queue *blk_alloc_queue(int node_id, bool alloc_srcu)
1946089a 442{
165125e1 443 struct request_queue *q;
338aa96d 444 int ret;
1946089a 445
704b914f
ML
446 q = kmem_cache_alloc_node(blk_get_queue_kmem_cache(alloc_srcu),
447 GFP_KERNEL | __GFP_ZERO, node_id);
1da177e4
LT
448 if (!q)
449 return NULL;
450
704b914f
ML
451 if (alloc_srcu) {
452 blk_queue_flag_set(QUEUE_FLAG_HAS_SRCU, q);
453 if (init_srcu_struct(q->srcu) != 0)
454 goto fail_q;
455 }
456
cbf62af3 457 q->last_merge = NULL;
cbf62af3 458
3d745ea5 459 q->id = ida_simple_get(&blk_queue_ida, 0, 0, GFP_KERNEL);
a73f730d 460 if (q->id < 0)
704b914f 461 goto fail_srcu;
a73f730d 462
c495a176 463 ret = bioset_init(&q->bio_split, BIO_POOL_SIZE, 0, 0);
338aa96d 464 if (ret)
54efd50b
KO
465 goto fail_id;
466
a83b576c
JA
467 q->stats = blk_alloc_queue_stats();
468 if (!q->stats)
edb0872f 469 goto fail_split;
a83b576c 470
5151412d 471 q->node = node_id;
0989a025 472
079a2e3e 473 atomic_set(&q->nr_active_requests_shared_tags, 0);
bccf5e26 474
bca237a5 475 timer_setup(&q->timeout, blk_rq_timed_out_timer, 0);
2e3c18d0 476 INIT_WORK(&q->timeout_work, blk_timeout_work);
a612fddf 477 INIT_LIST_HEAD(&q->icq_list);
4eef3049 478#ifdef CONFIG_BLK_CGROUP
e8989fae 479 INIT_LIST_HEAD(&q->blkg_list);
4eef3049 480#endif
483f4afc 481
8324aa91 482 kobject_init(&q->kobj, &blk_queue_ktype);
1da177e4 483
85e0cbbb 484 mutex_init(&q->debugfs_mutex);
483f4afc 485 mutex_init(&q->sysfs_lock);
cecf5d87 486 mutex_init(&q->sysfs_dir_lock);
0d945c1f 487 spin_lock_init(&q->queue_lock);
c94a96ac 488
320ae51f 489 init_waitqueue_head(&q->mq_freeze_wq);
7996a8b5 490 mutex_init(&q->mq_freeze_lock);
320ae51f 491
3ef28e83
DW
492 /*
493 * Init percpu_ref in atomic mode so that it's faster to shutdown.
494 * See blk_register_queue() for details.
495 */
496 if (percpu_ref_init(&q->q_usage_counter,
497 blk_queue_usage_counter_release,
498 PERCPU_REF_INIT_ATOMIC, GFP_KERNEL))
edb0872f 499 goto fail_stats;
f51b802c 500
3ef28e83
DW
501 if (blkcg_init_queue(q))
502 goto fail_ref;
503
3d745ea5
CH
504 blk_queue_dma_alignment(q, 511);
505 blk_set_default_limits(&q->limits);
d2a27964 506 q->nr_requests = BLKDEV_DEFAULT_RQ;
3d745ea5 507
1da177e4 508 return q;
a73f730d 509
3ef28e83
DW
510fail_ref:
511 percpu_ref_exit(&q->q_usage_counter);
a83b576c 512fail_stats:
edb0872f 513 blk_free_queue_stats(q->stats);
54efd50b 514fail_split:
338aa96d 515 bioset_exit(&q->bio_split);
a73f730d
TH
516fail_id:
517 ida_simple_remove(&blk_queue_ida, q->id);
704b914f
ML
518fail_srcu:
519 if (alloc_srcu)
520 cleanup_srcu_struct(q->srcu);
a73f730d 521fail_q:
704b914f 522 kmem_cache_free(blk_get_queue_kmem_cache(alloc_srcu), q);
a73f730d 523 return NULL;
1da177e4 524}
1da177e4 525
b5bd357c
LC
526/**
527 * blk_get_queue - increment the request_queue refcount
528 * @q: the request_queue structure to increment the refcount for
529 *
530 * Increment the refcount of the request_queue kobject.
763b5892
LC
531 *
532 * Context: Any context.
b5bd357c 533 */
09ac46c4 534bool blk_get_queue(struct request_queue *q)
1da177e4 535{
3f3299d5 536 if (likely(!blk_queue_dying(q))) {
09ac46c4
TH
537 __blk_get_queue(q);
538 return true;
1da177e4
LT
539 }
540
09ac46c4 541 return false;
1da177e4 542}
d86e0e83 543EXPORT_SYMBOL(blk_get_queue);
1da177e4 544
52c5e62d 545static void handle_bad_sector(struct bio *bio, sector_t maxsector)
1da177e4
LT
546{
547 char b[BDEVNAME_SIZE];
548
8a3ee677 549 pr_info_ratelimited("%s: attempt to access beyond end of device\n"
f4ac712e 550 "%s: rw=%d, want=%llu, limit=%llu\n",
8a3ee677 551 current->comm,
f4ac712e
TH
552 bio_devname(bio, b), bio->bi_opf,
553 bio_end_sector(bio), maxsector);
1da177e4
LT
554}
555
c17bb495
AM
556#ifdef CONFIG_FAIL_MAKE_REQUEST
557
558static DECLARE_FAULT_ATTR(fail_make_request);
559
560static int __init setup_fail_make_request(char *str)
561{
562 return setup_fault_attr(&fail_make_request, str);
563}
564__setup("fail_make_request=", setup_fail_make_request);
565
06c8c691 566bool should_fail_request(struct block_device *part, unsigned int bytes)
c17bb495 567{
8446fe92 568 return part->bd_make_it_fail && should_fail(&fail_make_request, bytes);
c17bb495
AM
569}
570
571static int __init fail_make_request_debugfs(void)
572{
dd48c085
AM
573 struct dentry *dir = fault_create_debugfs_attr("fail_make_request",
574 NULL, &fail_make_request);
575
21f9fcd8 576 return PTR_ERR_OR_ZERO(dir);
c17bb495
AM
577}
578
579late_initcall(fail_make_request_debugfs);
c17bb495
AM
580#endif /* CONFIG_FAIL_MAKE_REQUEST */
581
2f9f6221 582static inline bool bio_check_ro(struct bio *bio)
721c7fc7 583{
2f9f6221 584 if (op_is_write(bio_op(bio)) && bdev_read_only(bio->bi_bdev)) {
721c7fc7
ID
585 char b[BDEVNAME_SIZE];
586
8b2ded1c
MP
587 if (op_is_flush(bio->bi_opf) && !bio_sectors(bio))
588 return false;
589
a32e236e 590 WARN_ONCE(1,
c8178674 591 "Trying to write to read-only block-device %s (partno %d)\n",
2f9f6221 592 bio_devname(bio, b), bio->bi_bdev->bd_partno);
a32e236e
LT
593 /* Older lvm-tools actually trigger this */
594 return false;
721c7fc7
ID
595 }
596
597 return false;
598}
599
30abb3a6
HM
600static noinline int should_fail_bio(struct bio *bio)
601{
309dca30 602 if (should_fail_request(bdev_whole(bio->bi_bdev), bio->bi_iter.bi_size))
30abb3a6
HM
603 return -EIO;
604 return 0;
605}
606ALLOW_ERROR_INJECTION(should_fail_bio, ERRNO);
607
52c5e62d
CH
608/*
609 * Check whether this bio extends beyond the end of the device or partition.
610 * This may well happen - the kernel calls bread() without checking the size of
611 * the device, e.g., when mounting a file system.
612 */
2f9f6221 613static inline int bio_check_eod(struct bio *bio)
52c5e62d 614{
2f9f6221 615 sector_t maxsector = bdev_nr_sectors(bio->bi_bdev);
52c5e62d
CH
616 unsigned int nr_sectors = bio_sectors(bio);
617
618 if (nr_sectors && maxsector &&
619 (nr_sectors > maxsector ||
620 bio->bi_iter.bi_sector > maxsector - nr_sectors)) {
621 handle_bad_sector(bio, maxsector);
622 return -EIO;
623 }
624 return 0;
625}
626
74d46992
CH
627/*
628 * Remap block n of partition p to block n+start(p) of the disk.
629 */
2f9f6221 630static int blk_partition_remap(struct bio *bio)
74d46992 631{
309dca30 632 struct block_device *p = bio->bi_bdev;
74d46992 633
52c5e62d 634 if (unlikely(should_fail_request(p, bio->bi_iter.bi_size)))
2f9f6221 635 return -EIO;
5eac3eb3 636 if (bio_sectors(bio)) {
8446fe92 637 bio->bi_iter.bi_sector += p->bd_start_sect;
1c02fca6 638 trace_block_bio_remap(bio, p->bd_dev,
29ff57c6 639 bio->bi_iter.bi_sector -
8446fe92 640 p->bd_start_sect);
52c5e62d 641 }
30c5d345 642 bio_set_flag(bio, BIO_REMAPPED);
2f9f6221 643 return 0;
74d46992
CH
644}
645
0512a75b
KB
646/*
647 * Check write append to a zoned block device.
648 */
649static inline blk_status_t blk_check_zone_append(struct request_queue *q,
650 struct bio *bio)
651{
652 sector_t pos = bio->bi_iter.bi_sector;
653 int nr_sectors = bio_sectors(bio);
654
655 /* Only applicable to zoned block devices */
656 if (!blk_queue_is_zoned(q))
657 return BLK_STS_NOTSUPP;
658
659 /* The bio sector must point to the start of a sequential zone */
660 if (pos & (blk_queue_zone_sectors(q) - 1) ||
661 !blk_queue_zone_is_seq(q, pos))
662 return BLK_STS_IOERR;
663
664 /*
665 * Not allowed to cross zone boundaries. Otherwise, the BIO will be
666 * split and could result in non-contiguous sectors being written in
667 * different zones.
668 */
669 if (nr_sectors > q->limits.chunk_sectors)
670 return BLK_STS_IOERR;
671
672 /* Make sure the BIO is small enough and will not get split */
673 if (nr_sectors > q->limits.max_zone_append_sectors)
674 return BLK_STS_IOERR;
675
676 bio->bi_opf |= REQ_NOMERGE;
677
678 return BLK_STS_OK;
679}
680
900e0807 681noinline_for_stack bool submit_bio_checks(struct bio *bio)
1da177e4 682{
309dca30 683 struct block_device *bdev = bio->bi_bdev;
eab4e027 684 struct request_queue *q = bdev_get_queue(bdev);
4e4cbee9 685 blk_status_t status = BLK_STS_IOERR;
5a473e83 686 struct blk_plug *plug;
1da177e4
LT
687
688 might_sleep();
1da177e4 689
5a473e83
JA
690 plug = blk_mq_plug(q, bio);
691 if (plug && plug->nowait)
692 bio->bi_opf |= REQ_NOWAIT;
693
03a07c92 694 /*
b0beb280 695 * For a REQ_NOWAIT based request, return -EOPNOTSUPP
021a2446 696 * if queue does not support NOWAIT.
03a07c92 697 */
021a2446 698 if ((bio->bi_opf & REQ_NOWAIT) && !blk_queue_nowait(q))
b0beb280 699 goto not_supported;
03a07c92 700
30abb3a6 701 if (should_fail_bio(bio))
5a7bbad2 702 goto end_io;
2f9f6221
CH
703 if (unlikely(bio_check_ro(bio)))
704 goto end_io;
3a905c37
CH
705 if (!bio_flagged(bio, BIO_REMAPPED)) {
706 if (unlikely(bio_check_eod(bio)))
707 goto end_io;
708 if (bdev->bd_partno && unlikely(blk_partition_remap(bio)))
709 goto end_io;
710 }
2056a782 711
5a7bbad2 712 /*
ed00aabd
CH
713 * Filter flush bio's early so that bio based drivers without flush
714 * support don't have to worry about them.
5a7bbad2 715 */
f3a8ab7d 716 if (op_is_flush(bio->bi_opf) &&
c888a8f9 717 !test_bit(QUEUE_FLAG_WC, &q->queue_flags)) {
1eff9d32 718 bio->bi_opf &= ~(REQ_PREFLUSH | REQ_FUA);
e439ab71 719 if (!bio_sectors(bio)) {
4e4cbee9 720 status = BLK_STS_OK;
51fd77bd
JA
721 goto end_io;
722 }
5a7bbad2 723 }
5ddfe969 724
d04c406f 725 if (!test_bit(QUEUE_FLAG_POLL, &q->queue_flags))
6ce913fe 726 bio_clear_polled(bio);
d04c406f 727
288dab8a
CH
728 switch (bio_op(bio)) {
729 case REQ_OP_DISCARD:
730 if (!blk_queue_discard(q))
731 goto not_supported;
732 break;
733 case REQ_OP_SECURE_ERASE:
734 if (!blk_queue_secure_erase(q))
735 goto not_supported;
736 break;
737 case REQ_OP_WRITE_SAME:
74d46992 738 if (!q->limits.max_write_same_sectors)
288dab8a 739 goto not_supported;
58886785 740 break;
0512a75b
KB
741 case REQ_OP_ZONE_APPEND:
742 status = blk_check_zone_append(q, bio);
743 if (status != BLK_STS_OK)
744 goto end_io;
745 break;
2d253440 746 case REQ_OP_ZONE_RESET:
6c1b1da5
AJ
747 case REQ_OP_ZONE_OPEN:
748 case REQ_OP_ZONE_CLOSE:
749 case REQ_OP_ZONE_FINISH:
74d46992 750 if (!blk_queue_is_zoned(q))
2d253440 751 goto not_supported;
288dab8a 752 break;
6e33dbf2
CK
753 case REQ_OP_ZONE_RESET_ALL:
754 if (!blk_queue_is_zoned(q) || !blk_queue_zone_resetall(q))
755 goto not_supported;
756 break;
a6f0788e 757 case REQ_OP_WRITE_ZEROES:
74d46992 758 if (!q->limits.max_write_zeroes_sectors)
a6f0788e
CK
759 goto not_supported;
760 break;
288dab8a
CH
761 default:
762 break;
5a7bbad2 763 }
01edede4 764
b781d8db 765 if (blk_throtl_bio(bio))
ae118896 766 return false;
db18a53e
CH
767
768 blk_cgroup_bio_start(bio);
769 blkcg_bio_issue_init(bio);
27a84d54 770
fbbaf700 771 if (!bio_flagged(bio, BIO_TRACE_COMPLETION)) {
e8a676d6 772 trace_block_bio_queue(bio);
fbbaf700
N
773 /* Now that enqueuing has been traced, we need to trace
774 * completion as well.
775 */
776 bio_set_flag(bio, BIO_TRACE_COMPLETION);
777 }
27a84d54 778 return true;
a7384677 779
288dab8a 780not_supported:
4e4cbee9 781 status = BLK_STS_NOTSUPP;
a7384677 782end_io:
4e4cbee9 783 bio->bi_status = status;
4246a0b6 784 bio_endio(bio);
27a84d54 785 return false;
1da177e4
LT
786}
787
900e0807 788static void __submit_bio_fops(struct gendisk *disk, struct bio *bio)
ac7c5675 789{
9d497e29
ML
790 if (blk_crypto_bio_prep(&bio)) {
791 if (likely(bio_queue_enter(bio) == 0)) {
792 disk->fops->submit_bio(bio);
793 blk_queue_exit(disk->queue);
794 }
795 }
900e0807 796}
cc9c884d 797
900e0807
JA
798static void __submit_bio(struct bio *bio)
799{
800 struct gendisk *disk = bio->bi_bdev->bd_disk;
cc9c884d 801
9d497e29
ML
802 if (unlikely(!submit_bio_checks(bio)))
803 return;
804
900e0807 805 if (!disk->fops->submit_bio)
3e08773c 806 blk_mq_submit_bio(bio);
900e0807
JA
807 else
808 __submit_bio_fops(disk, bio);
ac7c5675
CH
809}
810
566acf2d
CH
811/*
812 * The loop in this function may be a bit non-obvious, and so deserves some
813 * explanation:
814 *
815 * - Before entering the loop, bio->bi_next is NULL (as all callers ensure
816 * that), so we have a list with a single bio.
817 * - We pretend that we have just taken it off a longer list, so we assign
818 * bio_list to a pointer to the bio_list_on_stack, thus initialising the
819 * bio_list of new bios to be added. ->submit_bio() may indeed add some more
820 * bios through a recursive call to submit_bio_noacct. If it did, we find a
821 * non-NULL value in bio_list and re-enter the loop from the top.
822 * - In this case we really did just take the bio of the top of the list (no
823 * pretending) and so remove it from bio_list, and call into ->submit_bio()
824 * again.
825 *
826 * bio_list_on_stack[0] contains bios submitted by the current ->submit_bio.
827 * bio_list_on_stack[1] contains bios that were submitted before the current
828 * ->submit_bio_bio, but that haven't been processed yet.
829 */
3e08773c 830static void __submit_bio_noacct(struct bio *bio)
566acf2d
CH
831{
832 struct bio_list bio_list_on_stack[2];
566acf2d
CH
833
834 BUG_ON(bio->bi_next);
835
836 bio_list_init(&bio_list_on_stack[0]);
837 current->bio_list = bio_list_on_stack;
838
839 do {
eab4e027 840 struct request_queue *q = bdev_get_queue(bio->bi_bdev);
566acf2d
CH
841 struct bio_list lower, same;
842
566acf2d
CH
843 /*
844 * Create a fresh bio_list for all subordinate requests.
845 */
846 bio_list_on_stack[1] = bio_list_on_stack[0];
847 bio_list_init(&bio_list_on_stack[0]);
848
3e08773c 849 __submit_bio(bio);
566acf2d
CH
850
851 /*
852 * Sort new bios into those for a lower level and those for the
853 * same level.
854 */
855 bio_list_init(&lower);
856 bio_list_init(&same);
857 while ((bio = bio_list_pop(&bio_list_on_stack[0])) != NULL)
eab4e027 858 if (q == bdev_get_queue(bio->bi_bdev))
566acf2d
CH
859 bio_list_add(&same, bio);
860 else
861 bio_list_add(&lower, bio);
862
863 /*
864 * Now assemble so we handle the lowest level first.
865 */
866 bio_list_merge(&bio_list_on_stack[0], &lower);
867 bio_list_merge(&bio_list_on_stack[0], &same);
868 bio_list_merge(&bio_list_on_stack[0], &bio_list_on_stack[1]);
869 } while ((bio = bio_list_pop(&bio_list_on_stack[0])));
870
871 current->bio_list = NULL;
566acf2d
CH
872}
873
3e08773c 874static void __submit_bio_noacct_mq(struct bio *bio)
ff93ea0c 875{
7c792f33 876 struct bio_list bio_list[2] = { };
ff93ea0c 877
7c792f33 878 current->bio_list = bio_list;
ff93ea0c
CH
879
880 do {
3e08773c 881 __submit_bio(bio);
7c792f33 882 } while ((bio = bio_list_pop(&bio_list[0])));
ff93ea0c
CH
883
884 current->bio_list = NULL;
ff93ea0c
CH
885}
886
27a84d54 887/**
ed00aabd 888 * submit_bio_noacct - re-submit a bio to the block device layer for I/O
27a84d54
CH
889 * @bio: The bio describing the location in memory and on the device.
890 *
3fdd4086
CH
891 * This is a version of submit_bio() that shall only be used for I/O that is
892 * resubmitted to lower level drivers by stacking block drivers. All file
893 * systems and other upper level users of the block layer should use
894 * submit_bio() instead.
d89d8796 895 */
3e08773c 896void submit_bio_noacct(struct bio *bio)
d89d8796 897{
27a84d54 898 /*
566acf2d
CH
899 * We only want one ->submit_bio to be active at a time, else stack
900 * usage with stacked devices could be a problem. Use current->bio_list
901 * to collect a list of requests submited by a ->submit_bio method while
902 * it is active, and then process them after it returned.
27a84d54 903 */
3e08773c 904 if (current->bio_list)
f5fe1b51 905 bio_list_add(&current->bio_list[0], bio);
3e08773c
CH
906 else if (!bio->bi_bdev->bd_disk->fops->submit_bio)
907 __submit_bio_noacct_mq(bio);
908 else
909 __submit_bio_noacct(bio);
d89d8796 910}
ed00aabd 911EXPORT_SYMBOL(submit_bio_noacct);
1da177e4
LT
912
913/**
710027a4 914 * submit_bio - submit a bio to the block device layer for I/O
1da177e4
LT
915 * @bio: The &struct bio which describes the I/O
916 *
3fdd4086
CH
917 * submit_bio() is used to submit I/O requests to block devices. It is passed a
918 * fully set up &struct bio that describes the I/O that needs to be done. The
309dca30 919 * bio will be send to the device described by the bi_bdev field.
1da177e4 920 *
3fdd4086
CH
921 * The success/failure status of the request, along with notification of
922 * completion, is delivered asynchronously through the ->bi_end_io() callback
923 * in @bio. The bio must NOT be touched by thecaller until ->bi_end_io() has
924 * been called.
1da177e4 925 */
3e08773c 926void submit_bio(struct bio *bio)
1da177e4 927{
d3f77dfd 928 if (blkcg_punt_bio_submit(bio))
3e08773c 929 return;
d3f77dfd 930
bf2de6f5
JA
931 /*
932 * If it's a regular read/write or a barrier with data attached,
933 * go through the normal accounting stuff before submission.
934 */
e2a60da7 935 if (bio_has_data(bio)) {
4363ac7c
MP
936 unsigned int count;
937
95fe6c1a 938 if (unlikely(bio_op(bio) == REQ_OP_WRITE_SAME))
309dca30 939 count = queue_logical_block_size(
eab4e027 940 bdev_get_queue(bio->bi_bdev)) >> 9;
4363ac7c
MP
941 else
942 count = bio_sectors(bio);
943
a8ebb056 944 if (op_is_write(bio_op(bio))) {
bf2de6f5
JA
945 count_vm_events(PGPGOUT, count);
946 } else {
4f024f37 947 task_io_account_read(bio->bi_iter.bi_size);
bf2de6f5
JA
948 count_vm_events(PGPGIN, count);
949 }
1da177e4
LT
950 }
951
b8e24a93 952 /*
760f83ea
CH
953 * If we're reading data that is part of the userspace workingset, count
954 * submission time as memory stall. When the device is congested, or
955 * the submitting cgroup IO-throttled, submission can be a significant
956 * part of overall IO time.
b8e24a93 957 */
760f83ea
CH
958 if (unlikely(bio_op(bio) == REQ_OP_READ &&
959 bio_flagged(bio, BIO_WORKINGSET))) {
960 unsigned long pflags;
b8e24a93 961
760f83ea 962 psi_memstall_enter(&pflags);
3e08773c 963 submit_bio_noacct(bio);
b8e24a93 964 psi_memstall_leave(&pflags);
3e08773c 965 return;
760f83ea
CH
966 }
967
3e08773c 968 submit_bio_noacct(bio);
1da177e4 969}
1da177e4
LT
970EXPORT_SYMBOL(submit_bio);
971
3e08773c
CH
972/**
973 * bio_poll - poll for BIO completions
974 * @bio: bio to poll for
e30028ac 975 * @iob: batches of IO
3e08773c
CH
976 * @flags: BLK_POLL_* flags that control the behavior
977 *
978 * Poll for completions on queue associated with the bio. Returns number of
979 * completed entries found.
980 *
981 * Note: the caller must either be the context that submitted @bio, or
982 * be in a RCU critical section to prevent freeing of @bio.
983 */
5a72e899 984int bio_poll(struct bio *bio, struct io_comp_batch *iob, unsigned int flags)
3e08773c 985{
859897c3 986 struct request_queue *q = bdev_get_queue(bio->bi_bdev);
3e08773c
CH
987 blk_qc_t cookie = READ_ONCE(bio->bi_cookie);
988 int ret;
989
990 if (cookie == BLK_QC_T_NONE ||
991 !test_bit(QUEUE_FLAG_POLL, &q->queue_flags))
992 return 0;
993
994 if (current->plug)
008f75a2 995 blk_flush_plug(current->plug, false);
3e08773c
CH
996
997 if (blk_queue_enter(q, BLK_MQ_REQ_NOWAIT))
998 return 0;
999 if (WARN_ON_ONCE(!queue_is_mq(q)))
1000 ret = 0; /* not yet implemented, should not happen */
1001 else
5a72e899 1002 ret = blk_mq_poll(q, cookie, iob, flags);
3e08773c
CH
1003 blk_queue_exit(q);
1004 return ret;
1005}
1006EXPORT_SYMBOL_GPL(bio_poll);
1007
1008/*
1009 * Helper to implement file_operations.iopoll. Requires the bio to be stored
1010 * in iocb->private, and cleared before freeing the bio.
1011 */
5a72e899
JA
1012int iocb_bio_iopoll(struct kiocb *kiocb, struct io_comp_batch *iob,
1013 unsigned int flags)
3e08773c
CH
1014{
1015 struct bio *bio;
1016 int ret = 0;
1017
1018 /*
1019 * Note: the bio cache only uses SLAB_TYPESAFE_BY_RCU, so bio can
1020 * point to a freshly allocated bio at this point. If that happens
1021 * we have a few cases to consider:
1022 *
1023 * 1) the bio is beeing initialized and bi_bdev is NULL. We can just
1024 * simply nothing in this case
1025 * 2) the bio points to a not poll enabled device. bio_poll will catch
1026 * this and return 0
1027 * 3) the bio points to a poll capable device, including but not
1028 * limited to the one that the original bio pointed to. In this
1029 * case we will call into the actual poll method and poll for I/O,
1030 * even if we don't need to, but it won't cause harm either.
1031 *
1032 * For cases 2) and 3) above the RCU grace period ensures that bi_bdev
1033 * is still allocated. Because partitions hold a reference to the whole
1034 * device bdev and thus disk, the disk is also still valid. Grabbing
1035 * a reference to the queue in bio_poll() ensures the hctxs and requests
1036 * are still valid as well.
1037 */
1038 rcu_read_lock();
1039 bio = READ_ONCE(kiocb->private);
1040 if (bio && bio->bi_bdev)
5a72e899 1041 ret = bio_poll(bio, iob, flags);
3e08773c
CH
1042 rcu_read_unlock();
1043
1044 return ret;
1045}
1046EXPORT_SYMBOL_GPL(iocb_bio_iopoll);
1047
450b7879 1048void update_io_ticks(struct block_device *part, unsigned long now, bool end)
9123bf6f
CH
1049{
1050 unsigned long stamp;
1051again:
8446fe92 1052 stamp = READ_ONCE(part->bd_stamp);
d80c228d 1053 if (unlikely(time_after(now, stamp))) {
8446fe92 1054 if (likely(cmpxchg(&part->bd_stamp, stamp, now) == stamp))
9123bf6f
CH
1055 __part_stat_add(part, io_ticks, end ? now - stamp : 1);
1056 }
8446fe92
CH
1057 if (part->bd_partno) {
1058 part = bdev_whole(part);
9123bf6f
CH
1059 goto again;
1060 }
1061}
1062
8446fe92 1063static unsigned long __part_start_io_acct(struct block_device *part,
7b26410b 1064 unsigned int sectors, unsigned int op)
956d510e 1065{
956d510e
CH
1066 const int sgrp = op_stat_group(op);
1067 unsigned long now = READ_ONCE(jiffies);
1068
1069 part_stat_lock();
1070 update_io_ticks(part, now, false);
1071 part_stat_inc(part, ios[sgrp]);
1072 part_stat_add(part, sectors[sgrp], sectors);
1073 part_stat_local_inc(part, in_flight[op_is_write(op)]);
1074 part_stat_unlock();
320ae51f 1075
956d510e
CH
1076 return now;
1077}
7b26410b 1078
99dfc43e
CH
1079/**
1080 * bio_start_io_acct - start I/O accounting for bio based drivers
1081 * @bio: bio to start account for
1082 *
1083 * Returns the start time that should be passed back to bio_end_io_acct().
1084 */
1085unsigned long bio_start_io_acct(struct bio *bio)
7b26410b 1086{
99dfc43e 1087 return __part_start_io_acct(bio->bi_bdev, bio_sectors(bio), bio_op(bio));
7b26410b 1088}
99dfc43e 1089EXPORT_SYMBOL_GPL(bio_start_io_acct);
7b26410b
SL
1090
1091unsigned long disk_start_io_acct(struct gendisk *disk, unsigned int sectors,
1092 unsigned int op)
1093{
8446fe92 1094 return __part_start_io_acct(disk->part0, sectors, op);
7b26410b 1095}
956d510e
CH
1096EXPORT_SYMBOL(disk_start_io_acct);
1097
8446fe92 1098static void __part_end_io_acct(struct block_device *part, unsigned int op,
7b26410b 1099 unsigned long start_time)
956d510e 1100{
956d510e
CH
1101 const int sgrp = op_stat_group(op);
1102 unsigned long now = READ_ONCE(jiffies);
1103 unsigned long duration = now - start_time;
5b18b5a7 1104
956d510e
CH
1105 part_stat_lock();
1106 update_io_ticks(part, now, true);
1107 part_stat_add(part, nsecs[sgrp], jiffies_to_nsecs(duration));
1108 part_stat_local_dec(part, in_flight[op_is_write(op)]);
320ae51f
JA
1109 part_stat_unlock();
1110}
7b26410b 1111
99dfc43e
CH
1112void bio_end_io_acct_remapped(struct bio *bio, unsigned long start_time,
1113 struct block_device *orig_bdev)
7b26410b 1114{
99dfc43e 1115 __part_end_io_acct(orig_bdev, bio_op(bio), start_time);
7b26410b 1116}
99dfc43e 1117EXPORT_SYMBOL_GPL(bio_end_io_acct_remapped);
7b26410b
SL
1118
1119void disk_end_io_acct(struct gendisk *disk, unsigned int op,
1120 unsigned long start_time)
1121{
8446fe92 1122 __part_end_io_acct(disk->part0, op, start_time);
7b26410b 1123}
956d510e 1124EXPORT_SYMBOL(disk_end_io_acct);
320ae51f 1125
ef9e3fac
KU
1126/**
1127 * blk_lld_busy - Check if underlying low-level drivers of a device are busy
1128 * @q : the queue of the device being checked
1129 *
1130 * Description:
1131 * Check if underlying low-level drivers of a device are busy.
1132 * If the drivers want to export their busy state, they must set own
1133 * exporting function using blk_queue_lld_busy() first.
1134 *
1135 * Basically, this function is used only by request stacking drivers
1136 * to stop dispatching requests to underlying devices when underlying
1137 * devices are busy. This behavior helps more I/O merging on the queue
1138 * of the request stacking driver and prevents I/O throughput regression
1139 * on burst I/O load.
1140 *
1141 * Return:
1142 * 0 - Not busy (The request stacking driver should dispatch request)
1143 * 1 - Busy (The request stacking driver should stop dispatching request)
1144 */
1145int blk_lld_busy(struct request_queue *q)
1146{
344e9ffc 1147 if (queue_is_mq(q) && q->mq_ops->busy)
9ba20527 1148 return q->mq_ops->busy(q);
ef9e3fac
KU
1149
1150 return 0;
1151}
1152EXPORT_SYMBOL_GPL(blk_lld_busy);
1153
59c3d45e 1154int kblockd_schedule_work(struct work_struct *work)
1da177e4
LT
1155{
1156 return queue_work(kblockd_workqueue, work);
1157}
1da177e4
LT
1158EXPORT_SYMBOL(kblockd_schedule_work);
1159
818cd1cb
JA
1160int kblockd_mod_delayed_work_on(int cpu, struct delayed_work *dwork,
1161 unsigned long delay)
1162{
1163 return mod_delayed_work_on(cpu, kblockd_workqueue, dwork, delay);
1164}
1165EXPORT_SYMBOL(kblockd_mod_delayed_work_on);
1166
47c122e3
JA
1167void blk_start_plug_nr_ios(struct blk_plug *plug, unsigned short nr_ios)
1168{
1169 struct task_struct *tsk = current;
1170
1171 /*
1172 * If this is a nested plug, don't actually assign it.
1173 */
1174 if (tsk->plug)
1175 return;
1176
bc490f81 1177 plug->mq_list = NULL;
47c122e3
JA
1178 plug->cached_rq = NULL;
1179 plug->nr_ios = min_t(unsigned short, nr_ios, BLK_MAX_REQUEST_COUNT);
1180 plug->rq_count = 0;
1181 plug->multiple_queues = false;
dc5fc361 1182 plug->has_elevator = false;
47c122e3
JA
1183 plug->nowait = false;
1184 INIT_LIST_HEAD(&plug->cb_list);
1185
1186 /*
1187 * Store ordering should not be needed here, since a potential
1188 * preempt will imply a full memory barrier
1189 */
1190 tsk->plug = plug;
1191}
1192
75df7136
SJ
1193/**
1194 * blk_start_plug - initialize blk_plug and track it inside the task_struct
1195 * @plug: The &struct blk_plug that needs to be initialized
1196 *
1197 * Description:
40405851
JM
1198 * blk_start_plug() indicates to the block layer an intent by the caller
1199 * to submit multiple I/O requests in a batch. The block layer may use
1200 * this hint to defer submitting I/Os from the caller until blk_finish_plug()
1201 * is called. However, the block layer may choose to submit requests
1202 * before a call to blk_finish_plug() if the number of queued I/Os
1203 * exceeds %BLK_MAX_REQUEST_COUNT, or if the size of the I/O is larger than
1204 * %BLK_PLUG_FLUSH_SIZE. The queued I/Os may also be submitted early if
1205 * the task schedules (see below).
1206 *
75df7136
SJ
1207 * Tracking blk_plug inside the task_struct will help with auto-flushing the
1208 * pending I/O should the task end up blocking between blk_start_plug() and
1209 * blk_finish_plug(). This is important from a performance perspective, but
1210 * also ensures that we don't deadlock. For instance, if the task is blocking
1211 * for a memory allocation, memory reclaim could end up wanting to free a
1212 * page belonging to that request that is currently residing in our private
1213 * plug. By flushing the pending I/O when the process goes to sleep, we avoid
1214 * this kind of deadlock.
1215 */
73c10101
JA
1216void blk_start_plug(struct blk_plug *plug)
1217{
47c122e3 1218 blk_start_plug_nr_ios(plug, 1);
73c10101
JA
1219}
1220EXPORT_SYMBOL(blk_start_plug);
1221
74018dc3 1222static void flush_plug_callbacks(struct blk_plug *plug, bool from_schedule)
048c9374
N
1223{
1224 LIST_HEAD(callbacks);
1225
2a7d5559
SL
1226 while (!list_empty(&plug->cb_list)) {
1227 list_splice_init(&plug->cb_list, &callbacks);
048c9374 1228
2a7d5559
SL
1229 while (!list_empty(&callbacks)) {
1230 struct blk_plug_cb *cb = list_first_entry(&callbacks,
048c9374
N
1231 struct blk_plug_cb,
1232 list);
2a7d5559 1233 list_del(&cb->list);
74018dc3 1234 cb->callback(cb, from_schedule);
2a7d5559 1235 }
048c9374
N
1236 }
1237}
1238
9cbb1750
N
1239struct blk_plug_cb *blk_check_plugged(blk_plug_cb_fn unplug, void *data,
1240 int size)
1241{
1242 struct blk_plug *plug = current->plug;
1243 struct blk_plug_cb *cb;
1244
1245 if (!plug)
1246 return NULL;
1247
1248 list_for_each_entry(cb, &plug->cb_list, list)
1249 if (cb->callback == unplug && cb->data == data)
1250 return cb;
1251
1252 /* Not currently on the callback list */
1253 BUG_ON(size < sizeof(*cb));
1254 cb = kzalloc(size, GFP_ATOMIC);
1255 if (cb) {
1256 cb->data = data;
1257 cb->callback = unplug;
1258 list_add(&cb->list, &plug->cb_list);
1259 }
1260 return cb;
1261}
1262EXPORT_SYMBOL(blk_check_plugged);
1263
008f75a2 1264void blk_flush_plug(struct blk_plug *plug, bool from_schedule)
73c10101 1265{
b600455d
PB
1266 if (!list_empty(&plug->cb_list))
1267 flush_plug_callbacks(plug, from_schedule);
bc490f81 1268 if (!rq_list_empty(plug->mq_list))
320ae51f 1269 blk_mq_flush_plug_list(plug, from_schedule);
c5fc7b93
JA
1270 /*
1271 * Unconditionally flush out cached requests, even if the unplug
1272 * event came from schedule. Since we know hold references to the
1273 * queue for cached requests, we don't want a blocked task holding
1274 * up a queue freeze/quiesce event.
1275 */
1276 if (unlikely(!rq_list_empty(plug->cached_rq)))
47c122e3 1277 blk_mq_free_plug_rqs(plug);
73c10101 1278}
73c10101 1279
40405851
JM
1280/**
1281 * blk_finish_plug - mark the end of a batch of submitted I/O
1282 * @plug: The &struct blk_plug passed to blk_start_plug()
1283 *
1284 * Description:
1285 * Indicate that a batch of I/O submissions is complete. This function
1286 * must be paired with an initial call to blk_start_plug(). The intent
1287 * is to allow the block layer to optimize I/O submission. See the
1288 * documentation for blk_start_plug() for more information.
1289 */
73c10101
JA
1290void blk_finish_plug(struct blk_plug *plug)
1291{
008f75a2
CH
1292 if (plug == current->plug) {
1293 blk_flush_plug(plug, false);
1294 current->plug = NULL;
1295 }
73c10101 1296}
88b996cd 1297EXPORT_SYMBOL(blk_finish_plug);
73c10101 1298
71ac860a
ML
1299void blk_io_schedule(void)
1300{
1301 /* Prevent hang_check timer from firing at us during very long I/O */
1302 unsigned long timeout = sysctl_hung_task_timeout_secs * HZ / 2;
1303
1304 if (timeout)
1305 io_schedule_timeout(timeout);
1306 else
1307 io_schedule();
1308}
1309EXPORT_SYMBOL_GPL(blk_io_schedule);
1310
1da177e4
LT
1311int __init blk_dev_init(void)
1312{
ef295ecf
CH
1313 BUILD_BUG_ON(REQ_OP_LAST >= (1 << REQ_OP_BITS));
1314 BUILD_BUG_ON(REQ_OP_BITS + REQ_FLAG_BITS > 8 *
c593642c 1315 sizeof_field(struct request, cmd_flags));
ef295ecf 1316 BUILD_BUG_ON(REQ_OP_BITS + REQ_FLAG_BITS > 8 *
c593642c 1317 sizeof_field(struct bio, bi_opf));
704b914f
ML
1318 BUILD_BUG_ON(ALIGN(offsetof(struct request_queue, srcu),
1319 __alignof__(struct request_queue)) !=
1320 sizeof(struct request_queue));
9eb55b03 1321
89b90be2
TH
1322 /* used for unplugging and affects IO latency/throughput - HIGHPRI */
1323 kblockd_workqueue = alloc_workqueue("kblockd",
28747fcd 1324 WQ_MEM_RECLAIM | WQ_HIGHPRI, 0);
1da177e4
LT
1325 if (!kblockd_workqueue)
1326 panic("Failed to create kblockd\n");
1327
c2789bd4 1328 blk_requestq_cachep = kmem_cache_create("request_queue",
165125e1 1329 sizeof(struct request_queue), 0, SLAB_PANIC, NULL);
1da177e4 1330
704b914f
ML
1331 blk_requestq_srcu_cachep = kmem_cache_create("request_queue_srcu",
1332 sizeof(struct request_queue) +
1333 sizeof(struct srcu_struct), 0, SLAB_PANIC, NULL);
1334
18fbda91 1335 blk_debugfs_root = debugfs_create_dir("block", NULL);
18fbda91 1336
d38ecf93 1337 return 0;
1da177e4 1338}