Commit | Line | Data |
---|---|---|
3dcf60bc | 1 | // SPDX-License-Identifier: GPL-2.0 |
1da177e4 | 2 | /* |
1da177e4 LT |
3 | * Copyright (C) 1991, 1992 Linus Torvalds |
4 | * Copyright (C) 1994, Karl Keyte: Added support for disk statistics | |
5 | * Elevator latency, (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE | |
6 | * Queue request tables / lock, selectable elevator, Jens Axboe <axboe@suse.de> | |
6728cb0e JA |
7 | * kernel-doc documentation started by NeilBrown <neilb@cse.unsw.edu.au> |
8 | * - July2000 | |
1da177e4 LT |
9 | * bio rewrite, highmem i/o, etc, Jens Axboe <axboe@suse.de> - may 2001 |
10 | */ | |
11 | ||
12 | /* | |
13 | * This handles all read/write requests to block devices | |
14 | */ | |
1da177e4 LT |
15 | #include <linux/kernel.h> |
16 | #include <linux/module.h> | |
1da177e4 LT |
17 | #include <linux/bio.h> |
18 | #include <linux/blkdev.h> | |
52abca64 | 19 | #include <linux/blk-pm.h> |
fe45e630 | 20 | #include <linux/blk-integrity.h> |
1da177e4 LT |
21 | #include <linux/highmem.h> |
22 | #include <linux/mm.h> | |
cee9a0c4 | 23 | #include <linux/pagemap.h> |
1da177e4 LT |
24 | #include <linux/kernel_stat.h> |
25 | #include <linux/string.h> | |
26 | #include <linux/init.h> | |
1da177e4 LT |
27 | #include <linux/completion.h> |
28 | #include <linux/slab.h> | |
29 | #include <linux/swap.h> | |
30 | #include <linux/writeback.h> | |
faccbd4b | 31 | #include <linux/task_io_accounting_ops.h> |
c17bb495 | 32 | #include <linux/fault-inject.h> |
73c10101 | 33 | #include <linux/list_sort.h> |
e3c78ca5 | 34 | #include <linux/delay.h> |
aaf7c680 | 35 | #include <linux/ratelimit.h> |
6c954667 | 36 | #include <linux/pm_runtime.h> |
54d4e6ab | 37 | #include <linux/t10-pi.h> |
18fbda91 | 38 | #include <linux/debugfs.h> |
30abb3a6 | 39 | #include <linux/bpf.h> |
82d981d4 | 40 | #include <linux/part_stat.h> |
71ac860a | 41 | #include <linux/sched/sysctl.h> |
a892c8d5 | 42 | #include <linux/blk-crypto.h> |
55782138 LZ |
43 | |
44 | #define CREATE_TRACE_POINTS | |
45 | #include <trace/events/block.h> | |
1da177e4 | 46 | |
8324aa91 | 47 | #include "blk.h" |
2aa7745b | 48 | #include "blk-mq-sched.h" |
bca6b067 | 49 | #include "blk-pm.h" |
672fdcf0 | 50 | #include "blk-cgroup.h" |
a7b36ee6 | 51 | #include "blk-throttle.h" |
f3c89983 | 52 | #include "blk-ioprio.h" |
8324aa91 | 53 | |
18fbda91 | 54 | struct dentry *blk_debugfs_root; |
18fbda91 | 55 | |
d07335e5 | 56 | EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_remap); |
b0da3f0d | 57 | EXPORT_TRACEPOINT_SYMBOL_GPL(block_rq_remap); |
0a82a8d1 | 58 | EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_complete); |
3291fa57 | 59 | EXPORT_TRACEPOINT_SYMBOL_GPL(block_split); |
cbae8d45 | 60 | EXPORT_TRACEPOINT_SYMBOL_GPL(block_unplug); |
b357e4a6 | 61 | EXPORT_TRACEPOINT_SYMBOL_GPL(block_rq_insert); |
0bfc2455 | 62 | |
2bd85221 | 63 | static DEFINE_IDA(blk_queue_ida); |
a73f730d | 64 | |
1da177e4 LT |
65 | /* |
66 | * For queue allocation | |
67 | */ | |
2bd85221 | 68 | static struct kmem_cache *blk_requestq_cachep; |
1da177e4 | 69 | |
1da177e4 LT |
70 | /* |
71 | * Controlling structure to kblockd | |
72 | */ | |
ff856bad | 73 | static struct workqueue_struct *kblockd_workqueue; |
1da177e4 | 74 | |
8814ce8a BVA |
75 | /** |
76 | * blk_queue_flag_set - atomically set a queue flag | |
77 | * @flag: flag to be set | |
78 | * @q: request queue | |
79 | */ | |
80 | void blk_queue_flag_set(unsigned int flag, struct request_queue *q) | |
81 | { | |
57d74df9 | 82 | set_bit(flag, &q->queue_flags); |
8814ce8a BVA |
83 | } |
84 | EXPORT_SYMBOL(blk_queue_flag_set); | |
85 | ||
86 | /** | |
87 | * blk_queue_flag_clear - atomically clear a queue flag | |
88 | * @flag: flag to be cleared | |
89 | * @q: request queue | |
90 | */ | |
91 | void blk_queue_flag_clear(unsigned int flag, struct request_queue *q) | |
92 | { | |
57d74df9 | 93 | clear_bit(flag, &q->queue_flags); |
8814ce8a BVA |
94 | } |
95 | EXPORT_SYMBOL(blk_queue_flag_clear); | |
96 | ||
e47bc4ed CK |
97 | #define REQ_OP_NAME(name) [REQ_OP_##name] = #name |
98 | static const char *const blk_op_name[] = { | |
99 | REQ_OP_NAME(READ), | |
100 | REQ_OP_NAME(WRITE), | |
101 | REQ_OP_NAME(FLUSH), | |
102 | REQ_OP_NAME(DISCARD), | |
103 | REQ_OP_NAME(SECURE_ERASE), | |
104 | REQ_OP_NAME(ZONE_RESET), | |
6e33dbf2 | 105 | REQ_OP_NAME(ZONE_RESET_ALL), |
6c1b1da5 AJ |
106 | REQ_OP_NAME(ZONE_OPEN), |
107 | REQ_OP_NAME(ZONE_CLOSE), | |
108 | REQ_OP_NAME(ZONE_FINISH), | |
0512a75b | 109 | REQ_OP_NAME(ZONE_APPEND), |
e47bc4ed | 110 | REQ_OP_NAME(WRITE_ZEROES), |
e47bc4ed CK |
111 | REQ_OP_NAME(DRV_IN), |
112 | REQ_OP_NAME(DRV_OUT), | |
113 | }; | |
114 | #undef REQ_OP_NAME | |
115 | ||
116 | /** | |
117 | * blk_op_str - Return string XXX in the REQ_OP_XXX. | |
118 | * @op: REQ_OP_XXX. | |
119 | * | |
120 | * Description: Centralize block layer function to convert REQ_OP_XXX into | |
121 | * string format. Useful in the debugging and tracing bio or request. For | |
122 | * invalid REQ_OP_XXX it returns string "UNKNOWN". | |
123 | */ | |
77e7ffd7 | 124 | inline const char *blk_op_str(enum req_op op) |
e47bc4ed CK |
125 | { |
126 | const char *op_str = "UNKNOWN"; | |
127 | ||
128 | if (op < ARRAY_SIZE(blk_op_name) && blk_op_name[op]) | |
129 | op_str = blk_op_name[op]; | |
130 | ||
131 | return op_str; | |
132 | } | |
133 | EXPORT_SYMBOL_GPL(blk_op_str); | |
134 | ||
2a842aca CH |
135 | static const struct { |
136 | int errno; | |
137 | const char *name; | |
138 | } blk_errors[] = { | |
139 | [BLK_STS_OK] = { 0, "" }, | |
140 | [BLK_STS_NOTSUPP] = { -EOPNOTSUPP, "operation not supported" }, | |
141 | [BLK_STS_TIMEOUT] = { -ETIMEDOUT, "timeout" }, | |
142 | [BLK_STS_NOSPC] = { -ENOSPC, "critical space allocation" }, | |
143 | [BLK_STS_TRANSPORT] = { -ENOLINK, "recoverable transport" }, | |
144 | [BLK_STS_TARGET] = { -EREMOTEIO, "critical target" }, | |
7ba15083 | 145 | [BLK_STS_RESV_CONFLICT] = { -EBADE, "reservation conflict" }, |
2a842aca CH |
146 | [BLK_STS_MEDIUM] = { -ENODATA, "critical medium" }, |
147 | [BLK_STS_PROTECTION] = { -EILSEQ, "protection" }, | |
148 | [BLK_STS_RESOURCE] = { -ENOMEM, "kernel resource" }, | |
86ff7c2a | 149 | [BLK_STS_DEV_RESOURCE] = { -EBUSY, "device resource" }, |
03a07c92 | 150 | [BLK_STS_AGAIN] = { -EAGAIN, "nonblocking retry" }, |
7d32c027 | 151 | [BLK_STS_OFFLINE] = { -ENODEV, "device offline" }, |
2a842aca | 152 | |
4e4cbee9 CH |
153 | /* device mapper special case, should not leak out: */ |
154 | [BLK_STS_DM_REQUEUE] = { -EREMCHG, "dm internal retry" }, | |
155 | ||
3b481d91 KB |
156 | /* zone device specific errors */ |
157 | [BLK_STS_ZONE_OPEN_RESOURCE] = { -ETOOMANYREFS, "open zones exceeded" }, | |
158 | [BLK_STS_ZONE_ACTIVE_RESOURCE] = { -EOVERFLOW, "active zones exceeded" }, | |
159 | ||
dffc480d DLM |
160 | /* Command duration limit device-side timeout */ |
161 | [BLK_STS_DURATION_LIMIT] = { -ETIME, "duration limit exceeded" }, | |
162 | ||
9da3d1e9 JG |
163 | [BLK_STS_INVAL] = { -EINVAL, "invalid" }, |
164 | ||
2a842aca CH |
165 | /* everything else not covered above: */ |
166 | [BLK_STS_IOERR] = { -EIO, "I/O" }, | |
167 | }; | |
168 | ||
169 | blk_status_t errno_to_blk_status(int errno) | |
170 | { | |
171 | int i; | |
172 | ||
173 | for (i = 0; i < ARRAY_SIZE(blk_errors); i++) { | |
174 | if (blk_errors[i].errno == errno) | |
175 | return (__force blk_status_t)i; | |
176 | } | |
177 | ||
178 | return BLK_STS_IOERR; | |
179 | } | |
180 | EXPORT_SYMBOL_GPL(errno_to_blk_status); | |
181 | ||
182 | int blk_status_to_errno(blk_status_t status) | |
183 | { | |
184 | int idx = (__force int)status; | |
185 | ||
34bd9c1c | 186 | if (WARN_ON_ONCE(idx >= ARRAY_SIZE(blk_errors))) |
2a842aca CH |
187 | return -EIO; |
188 | return blk_errors[idx].errno; | |
189 | } | |
190 | EXPORT_SYMBOL_GPL(blk_status_to_errno); | |
191 | ||
0d7a29a2 | 192 | const char *blk_status_to_str(blk_status_t status) |
2a842aca CH |
193 | { |
194 | int idx = (__force int)status; | |
195 | ||
34bd9c1c | 196 | if (WARN_ON_ONCE(idx >= ARRAY_SIZE(blk_errors))) |
0d7a29a2 CH |
197 | return "<null>"; |
198 | return blk_errors[idx].name; | |
2a842aca | 199 | } |
7ba37927 | 200 | EXPORT_SYMBOL_GPL(blk_status_to_str); |
2a842aca | 201 | |
1da177e4 LT |
202 | /** |
203 | * blk_sync_queue - cancel any pending callbacks on a queue | |
204 | * @q: the queue | |
205 | * | |
206 | * Description: | |
207 | * The block layer may perform asynchronous callback activity | |
208 | * on a queue, such as calling the unplug function after a timeout. | |
209 | * A block device may call blk_sync_queue to ensure that any | |
210 | * such activity is cancelled, thus allowing it to release resources | |
59c51591 | 211 | * that the callbacks might use. The caller must already have made sure |
c62b37d9 | 212 | * that its ->submit_bio will not re-add plugging prior to calling |
1da177e4 LT |
213 | * this function. |
214 | * | |
da527770 | 215 | * This function does not cancel any asynchronous activity arising |
da3dae54 | 216 | * out of elevator or throttling code. That would require elevator_exit() |
5efd6113 | 217 | * and blkcg_exit_queue() to be called with queue lock initialized. |
da527770 | 218 | * |
1da177e4 LT |
219 | */ |
220 | void blk_sync_queue(struct request_queue *q) | |
221 | { | |
8fa7292f | 222 | timer_delete_sync(&q->timeout); |
4e9b6f20 | 223 | cancel_work_sync(&q->timeout_work); |
1da177e4 LT |
224 | } |
225 | EXPORT_SYMBOL(blk_sync_queue); | |
226 | ||
c9254f2d | 227 | /** |
cd84a62e | 228 | * blk_set_pm_only - increment pm_only counter |
c9254f2d | 229 | * @q: request queue pointer |
c9254f2d | 230 | */ |
cd84a62e | 231 | void blk_set_pm_only(struct request_queue *q) |
c9254f2d | 232 | { |
cd84a62e | 233 | atomic_inc(&q->pm_only); |
c9254f2d | 234 | } |
cd84a62e | 235 | EXPORT_SYMBOL_GPL(blk_set_pm_only); |
c9254f2d | 236 | |
cd84a62e | 237 | void blk_clear_pm_only(struct request_queue *q) |
c9254f2d | 238 | { |
cd84a62e BVA |
239 | int pm_only; |
240 | ||
241 | pm_only = atomic_dec_return(&q->pm_only); | |
242 | WARN_ON_ONCE(pm_only < 0); | |
243 | if (pm_only == 0) | |
244 | wake_up_all(&q->mq_freeze_wq); | |
c9254f2d | 245 | } |
cd84a62e | 246 | EXPORT_SYMBOL_GPL(blk_clear_pm_only); |
c9254f2d | 247 | |
2bd85221 CH |
248 | static void blk_free_queue_rcu(struct rcu_head *rcu_head) |
249 | { | |
d36a9ea5 ML |
250 | struct request_queue *q = container_of(rcu_head, |
251 | struct request_queue, rcu_head); | |
252 | ||
253 | percpu_ref_exit(&q->q_usage_counter); | |
254 | kmem_cache_free(blk_requestq_cachep, q); | |
2bd85221 CH |
255 | } |
256 | ||
257 | static void blk_free_queue(struct request_queue *q) | |
258 | { | |
2bd85221 | 259 | blk_free_queue_stats(q->stats); |
2bd85221 CH |
260 | if (queue_is_mq(q)) |
261 | blk_mq_release(q); | |
262 | ||
263 | ida_free(&blk_queue_ida, q->id); | |
f1be1788 ML |
264 | lockdep_unregister_key(&q->io_lock_cls_key); |
265 | lockdep_unregister_key(&q->q_lock_cls_key); | |
2bd85221 CH |
266 | call_rcu(&q->rcu_head, blk_free_queue_rcu); |
267 | } | |
268 | ||
b5bd357c LC |
269 | /** |
270 | * blk_put_queue - decrement the request_queue refcount | |
271 | * @q: the request_queue structure to decrement the refcount for | |
272 | * | |
2bd85221 CH |
273 | * Decrements the refcount of the request_queue and free it when the refcount |
274 | * reaches 0. | |
b5bd357c | 275 | */ |
165125e1 | 276 | void blk_put_queue(struct request_queue *q) |
483f4afc | 277 | { |
2bd85221 CH |
278 | if (refcount_dec_and_test(&q->refs)) |
279 | blk_free_queue(q); | |
483f4afc | 280 | } |
d86e0e83 | 281 | EXPORT_SYMBOL(blk_put_queue); |
483f4afc | 282 | |
f1be1788 | 283 | bool blk_queue_start_drain(struct request_queue *q) |
aed3ea94 | 284 | { |
d3cfb2a0 ML |
285 | /* |
286 | * When queue DYING flag is set, we need to block new req | |
287 | * entering queue, so we call blk_freeze_queue_start() to | |
288 | * prevent I/O from crossing blk_queue_enter(). | |
289 | */ | |
6a786998 | 290 | bool freeze = __blk_freeze_queue_start(q, current); |
344e9ffc | 291 | if (queue_is_mq(q)) |
aed3ea94 | 292 | blk_mq_wake_waiters(q); |
055f6e18 ML |
293 | /* Make blk_queue_enter() reexamine the DYING flag. */ |
294 | wake_up_all(&q->mq_freeze_wq); | |
f1be1788 ML |
295 | |
296 | return freeze; | |
aed3ea94 | 297 | } |
8e141f9e | 298 | |
3a0a5299 BVA |
299 | /** |
300 | * blk_queue_enter() - try to increase q->q_usage_counter | |
301 | * @q: request queue pointer | |
a4d34da7 | 302 | * @flags: BLK_MQ_REQ_NOWAIT and/or BLK_MQ_REQ_PM |
3a0a5299 | 303 | */ |
9a95e4ef | 304 | int blk_queue_enter(struct request_queue *q, blk_mq_req_flags_t flags) |
3ef28e83 | 305 | { |
a4d34da7 | 306 | const bool pm = flags & BLK_MQ_REQ_PM; |
3a0a5299 | 307 | |
1f14a098 | 308 | while (!blk_try_enter_queue(q, pm)) { |
3a0a5299 | 309 | if (flags & BLK_MQ_REQ_NOWAIT) |
56f99b8d | 310 | return -EAGAIN; |
3ef28e83 | 311 | |
5ed61d3f | 312 | /* |
1f14a098 CH |
313 | * read pair of barrier in blk_freeze_queue_start(), we need to |
314 | * order reading __PERCPU_REF_DEAD flag of .q_usage_counter and | |
315 | * reading .mq_freeze_depth or queue dying flag, otherwise the | |
316 | * following wait may never return if the two reads are | |
317 | * reordered. | |
5ed61d3f ML |
318 | */ |
319 | smp_rmb(); | |
1dc3039b | 320 | wait_event(q->mq_freeze_wq, |
7996a8b5 | 321 | (!q->mq_freeze_depth && |
52abca64 | 322 | blk_pm_resume_queue(pm, q)) || |
1dc3039b | 323 | blk_queue_dying(q)); |
3ef28e83 DW |
324 | if (blk_queue_dying(q)) |
325 | return -ENODEV; | |
3ef28e83 | 326 | } |
1f14a098 | 327 | |
f1be1788 ML |
328 | rwsem_acquire_read(&q->q_lockdep_map, 0, 0, _RET_IP_); |
329 | rwsem_release(&q->q_lockdep_map, _RET_IP_); | |
1f14a098 | 330 | return 0; |
3ef28e83 DW |
331 | } |
332 | ||
c98cb5bb | 333 | int __bio_queue_enter(struct request_queue *q, struct bio *bio) |
accea322 | 334 | { |
a6741536 | 335 | while (!blk_try_enter_queue(q, false)) { |
eab4e027 PB |
336 | struct gendisk *disk = bio->bi_bdev->bd_disk; |
337 | ||
a6741536 | 338 | if (bio->bi_opf & REQ_NOWAIT) { |
8e141f9e | 339 | if (test_bit(GD_DEAD, &disk->state)) |
a6741536 | 340 | goto dead; |
accea322 | 341 | bio_wouldblock_error(bio); |
56f99b8d | 342 | return -EAGAIN; |
a6741536 CH |
343 | } |
344 | ||
345 | /* | |
346 | * read pair of barrier in blk_freeze_queue_start(), we need to | |
347 | * order reading __PERCPU_REF_DEAD flag of .q_usage_counter and | |
348 | * reading .mq_freeze_depth or queue dying flag, otherwise the | |
349 | * following wait may never return if the two reads are | |
350 | * reordered. | |
351 | */ | |
352 | smp_rmb(); | |
353 | wait_event(q->mq_freeze_wq, | |
354 | (!q->mq_freeze_depth && | |
355 | blk_pm_resume_queue(false, q)) || | |
8e141f9e CH |
356 | test_bit(GD_DEAD, &disk->state)); |
357 | if (test_bit(GD_DEAD, &disk->state)) | |
a6741536 | 358 | goto dead; |
accea322 CH |
359 | } |
360 | ||
f1be1788 ML |
361 | rwsem_acquire_read(&q->io_lockdep_map, 0, 0, _RET_IP_); |
362 | rwsem_release(&q->io_lockdep_map, _RET_IP_); | |
a6741536 CH |
363 | return 0; |
364 | dead: | |
365 | bio_io_error(bio); | |
366 | return -ENODEV; | |
accea322 CH |
367 | } |
368 | ||
3ef28e83 DW |
369 | void blk_queue_exit(struct request_queue *q) |
370 | { | |
371 | percpu_ref_put(&q->q_usage_counter); | |
372 | } | |
373 | ||
374 | static void blk_queue_usage_counter_release(struct percpu_ref *ref) | |
375 | { | |
376 | struct request_queue *q = | |
377 | container_of(ref, struct request_queue, q_usage_counter); | |
378 | ||
379 | wake_up_all(&q->mq_freeze_wq); | |
380 | } | |
381 | ||
bca237a5 | 382 | static void blk_rq_timed_out_timer(struct timer_list *t) |
287922eb | 383 | { |
41cb0855 | 384 | struct request_queue *q = timer_container_of(q, t, timeout); |
287922eb CH |
385 | |
386 | kblockd_schedule_work(&q->timeout_work); | |
387 | } | |
388 | ||
2e3c18d0 TH |
389 | static void blk_timeout_work(struct work_struct *work) |
390 | { | |
391 | } | |
392 | ||
ad751ba1 | 393 | struct request_queue *blk_alloc_queue(struct queue_limits *lim, int node_id) |
1946089a | 394 | { |
165125e1 | 395 | struct request_queue *q; |
ad751ba1 | 396 | int error; |
1946089a | 397 | |
80bd4a7a CH |
398 | q = kmem_cache_alloc_node(blk_requestq_cachep, GFP_KERNEL | __GFP_ZERO, |
399 | node_id); | |
1da177e4 | 400 | if (!q) |
ad751ba1 | 401 | return ERR_PTR(-ENOMEM); |
1da177e4 | 402 | |
cbf62af3 | 403 | q->last_merge = NULL; |
cbf62af3 | 404 | |
798f2a6f | 405 | q->id = ida_alloc(&blk_queue_ida, GFP_KERNEL); |
ad751ba1 CH |
406 | if (q->id < 0) { |
407 | error = q->id; | |
80bd4a7a | 408 | goto fail_q; |
ad751ba1 | 409 | } |
a73f730d | 410 | |
a83b576c | 411 | q->stats = blk_alloc_queue_stats(); |
ad751ba1 CH |
412 | if (!q->stats) { |
413 | error = -ENOMEM; | |
46754bd0 | 414 | goto fail_id; |
ad751ba1 CH |
415 | } |
416 | ||
417 | error = blk_set_default_limits(lim); | |
418 | if (error) | |
419 | goto fail_stats; | |
420 | q->limits = *lim; | |
a83b576c | 421 | |
5151412d | 422 | q->node = node_id; |
0989a025 | 423 | |
079a2e3e | 424 | atomic_set(&q->nr_active_requests_shared_tags, 0); |
bccf5e26 | 425 | |
bca237a5 | 426 | timer_setup(&q->timeout, blk_rq_timed_out_timer, 0); |
2e3c18d0 | 427 | INIT_WORK(&q->timeout_work, blk_timeout_work); |
a612fddf | 428 | INIT_LIST_HEAD(&q->icq_list); |
483f4afc | 429 | |
2bd85221 | 430 | refcount_set(&q->refs, 1); |
85e0cbbb | 431 | mutex_init(&q->debugfs_mutex); |
1bf70d08 | 432 | mutex_init(&q->elevator_lock); |
483f4afc | 433 | mutex_init(&q->sysfs_lock); |
d690cb8a | 434 | mutex_init(&q->limits_lock); |
a13bd91b | 435 | mutex_init(&q->rq_qos_mutex); |
0d945c1f | 436 | spin_lock_init(&q->queue_lock); |
c94a96ac | 437 | |
320ae51f | 438 | init_waitqueue_head(&q->mq_freeze_wq); |
7996a8b5 | 439 | mutex_init(&q->mq_freeze_lock); |
320ae51f | 440 | |
8b8ace08 ML |
441 | blkg_init_queue(q); |
442 | ||
3ef28e83 DW |
443 | /* |
444 | * Init percpu_ref in atomic mode so that it's faster to shutdown. | |
445 | * See blk_register_queue() for details. | |
446 | */ | |
ad751ba1 | 447 | error = percpu_ref_init(&q->q_usage_counter, |
3ef28e83 | 448 | blk_queue_usage_counter_release, |
ad751ba1 CH |
449 | PERCPU_REF_INIT_ATOMIC, GFP_KERNEL); |
450 | if (error) | |
edb0872f | 451 | goto fail_stats; |
f1be1788 ML |
452 | lockdep_register_key(&q->io_lock_cls_key); |
453 | lockdep_register_key(&q->q_lock_cls_key); | |
454 | lockdep_init_map(&q->io_lockdep_map, "&q->q_usage_counter(io)", | |
455 | &q->io_lock_cls_key, 0); | |
456 | lockdep_init_map(&q->q_lockdep_map, "&q->q_usage_counter(queue)", | |
457 | &q->q_lock_cls_key, 0); | |
f51b802c | 458 | |
ffa1e7ad TH |
459 | /* Teach lockdep about lock ordering (reclaim WRT queue freeze lock). */ |
460 | fs_reclaim_acquire(GFP_KERNEL); | |
461 | rwsem_acquire_read(&q->io_lockdep_map, 0, 0, _RET_IP_); | |
462 | rwsem_release(&q->io_lockdep_map, _RET_IP_); | |
463 | fs_reclaim_release(GFP_KERNEL); | |
464 | ||
d2a27964 | 465 | q->nr_requests = BLKDEV_DEFAULT_RQ; |
3d745ea5 | 466 | |
1da177e4 | 467 | return q; |
a73f730d | 468 | |
a83b576c | 469 | fail_stats: |
edb0872f | 470 | blk_free_queue_stats(q->stats); |
a73f730d | 471 | fail_id: |
798f2a6f | 472 | ida_free(&blk_queue_ida, q->id); |
a73f730d | 473 | fail_q: |
80bd4a7a | 474 | kmem_cache_free(blk_requestq_cachep, q); |
ad751ba1 | 475 | return ERR_PTR(error); |
1da177e4 | 476 | } |
1da177e4 | 477 | |
b5bd357c LC |
478 | /** |
479 | * blk_get_queue - increment the request_queue refcount | |
480 | * @q: the request_queue structure to increment the refcount for | |
481 | * | |
482 | * Increment the refcount of the request_queue kobject. | |
763b5892 LC |
483 | * |
484 | * Context: Any context. | |
b5bd357c | 485 | */ |
09ac46c4 | 486 | bool blk_get_queue(struct request_queue *q) |
1da177e4 | 487 | { |
828b5f01 CH |
488 | if (unlikely(blk_queue_dying(q))) |
489 | return false; | |
2bd85221 | 490 | refcount_inc(&q->refs); |
828b5f01 | 491 | return true; |
1da177e4 | 492 | } |
d86e0e83 | 493 | EXPORT_SYMBOL(blk_get_queue); |
1da177e4 | 494 | |
c17bb495 AM |
495 | #ifdef CONFIG_FAIL_MAKE_REQUEST |
496 | ||
497 | static DECLARE_FAULT_ATTR(fail_make_request); | |
498 | ||
499 | static int __init setup_fail_make_request(char *str) | |
500 | { | |
501 | return setup_fault_attr(&fail_make_request, str); | |
502 | } | |
503 | __setup("fail_make_request=", setup_fail_make_request); | |
504 | ||
06c8c691 | 505 | bool should_fail_request(struct block_device *part, unsigned int bytes) |
c17bb495 | 506 | { |
811ba89a AV |
507 | return bdev_test_flag(part, BD_MAKE_IT_FAIL) && |
508 | should_fail(&fail_make_request, bytes); | |
c17bb495 AM |
509 | } |
510 | ||
511 | static int __init fail_make_request_debugfs(void) | |
512 | { | |
dd48c085 AM |
513 | struct dentry *dir = fault_create_debugfs_attr("fail_make_request", |
514 | NULL, &fail_make_request); | |
515 | ||
21f9fcd8 | 516 | return PTR_ERR_OR_ZERO(dir); |
c17bb495 AM |
517 | } |
518 | ||
519 | late_initcall(fail_make_request_debugfs); | |
c17bb495 AM |
520 | #endif /* CONFIG_FAIL_MAKE_REQUEST */ |
521 | ||
bdb7d420 | 522 | static inline void bio_check_ro(struct bio *bio) |
721c7fc7 | 523 | { |
2f9f6221 | 524 | if (op_is_write(bio_op(bio)) && bdev_read_only(bio->bi_bdev)) { |
8b2ded1c | 525 | if (op_is_flush(bio->bi_opf) && !bio_sectors(bio)) |
bdb7d420 | 526 | return; |
67d995e0 | 527 | |
49a43dae | 528 | if (bdev_test_flag(bio->bi_bdev, BD_RO_WARNED)) |
67d995e0 YK |
529 | return; |
530 | ||
49a43dae AV |
531 | bdev_set_flag(bio->bi_bdev, BD_RO_WARNED); |
532 | ||
67d995e0 YK |
533 | /* |
534 | * Use ioctl to set underlying disk of raid/dm to read-only | |
535 | * will trigger this. | |
536 | */ | |
537 | pr_warn("Trying to write to read-only block-device %pg\n", | |
538 | bio->bi_bdev); | |
721c7fc7 | 539 | } |
721c7fc7 ID |
540 | } |
541 | ||
30abb3a6 HM |
542 | static noinline int should_fail_bio(struct bio *bio) |
543 | { | |
309dca30 | 544 | if (should_fail_request(bdev_whole(bio->bi_bdev), bio->bi_iter.bi_size)) |
30abb3a6 HM |
545 | return -EIO; |
546 | return 0; | |
547 | } | |
548 | ALLOW_ERROR_INJECTION(should_fail_bio, ERRNO); | |
549 | ||
52c5e62d CH |
550 | /* |
551 | * Check whether this bio extends beyond the end of the device or partition. | |
552 | * This may well happen - the kernel calls bread() without checking the size of | |
553 | * the device, e.g., when mounting a file system. | |
554 | */ | |
2f9f6221 | 555 | static inline int bio_check_eod(struct bio *bio) |
52c5e62d | 556 | { |
2f9f6221 | 557 | sector_t maxsector = bdev_nr_sectors(bio->bi_bdev); |
52c5e62d CH |
558 | unsigned int nr_sectors = bio_sectors(bio); |
559 | ||
3eb96946 | 560 | if (nr_sectors && |
52c5e62d CH |
561 | (nr_sectors > maxsector || |
562 | bio->bi_iter.bi_sector > maxsector - nr_sectors)) { | |
ad740780 | 563 | pr_info_ratelimited("%s: attempt to access beyond end of device\n" |
069adbac CH |
564 | "%pg: rw=%d, sector=%llu, nr_sectors = %u limit=%llu\n", |
565 | current->comm, bio->bi_bdev, bio->bi_opf, | |
566 | bio->bi_iter.bi_sector, nr_sectors, maxsector); | |
52c5e62d CH |
567 | return -EIO; |
568 | } | |
569 | return 0; | |
570 | } | |
571 | ||
74d46992 CH |
572 | /* |
573 | * Remap block n of partition p to block n+start(p) of the disk. | |
574 | */ | |
2f9f6221 | 575 | static int blk_partition_remap(struct bio *bio) |
74d46992 | 576 | { |
309dca30 | 577 | struct block_device *p = bio->bi_bdev; |
74d46992 | 578 | |
52c5e62d | 579 | if (unlikely(should_fail_request(p, bio->bi_iter.bi_size))) |
2f9f6221 | 580 | return -EIO; |
5eac3eb3 | 581 | if (bio_sectors(bio)) { |
8446fe92 | 582 | bio->bi_iter.bi_sector += p->bd_start_sect; |
1c02fca6 | 583 | trace_block_bio_remap(bio, p->bd_dev, |
29ff57c6 | 584 | bio->bi_iter.bi_sector - |
8446fe92 | 585 | p->bd_start_sect); |
52c5e62d | 586 | } |
30c5d345 | 587 | bio_set_flag(bio, BIO_REMAPPED); |
2f9f6221 | 588 | return 0; |
74d46992 CH |
589 | } |
590 | ||
0512a75b KB |
591 | /* |
592 | * Check write append to a zoned block device. | |
593 | */ | |
594 | static inline blk_status_t blk_check_zone_append(struct request_queue *q, | |
595 | struct bio *bio) | |
596 | { | |
0512a75b KB |
597 | int nr_sectors = bio_sectors(bio); |
598 | ||
599 | /* Only applicable to zoned block devices */ | |
edd1dbc8 | 600 | if (!bdev_is_zoned(bio->bi_bdev)) |
0512a75b KB |
601 | return BLK_STS_NOTSUPP; |
602 | ||
603 | /* The bio sector must point to the start of a sequential zone */ | |
bca150f0 | 604 | if (!bdev_is_zone_start(bio->bi_bdev, bio->bi_iter.bi_sector)) |
0512a75b KB |
605 | return BLK_STS_IOERR; |
606 | ||
607 | /* | |
608 | * Not allowed to cross zone boundaries. Otherwise, the BIO will be | |
609 | * split and could result in non-contiguous sectors being written in | |
610 | * different zones. | |
611 | */ | |
612 | if (nr_sectors > q->limits.chunk_sectors) | |
613 | return BLK_STS_IOERR; | |
614 | ||
615 | /* Make sure the BIO is small enough and will not get split */ | |
559218d4 | 616 | if (nr_sectors > q->limits.max_zone_append_sectors) |
0512a75b KB |
617 | return BLK_STS_IOERR; |
618 | ||
619 | bio->bi_opf |= REQ_NOMERGE; | |
620 | ||
621 | return BLK_STS_OK; | |
622 | } | |
623 | ||
900e0807 JA |
624 | static void __submit_bio(struct bio *bio) |
625 | { | |
9a42891c YK |
626 | /* If plug is not used, add new plug here to cache nsecs time. */ |
627 | struct blk_plug plug; | |
628 | ||
7f36b7d0 ML |
629 | if (unlikely(!blk_crypto_bio_prep(&bio))) |
630 | return; | |
631 | ||
9a42891c YK |
632 | blk_start_plug(&plug); |
633 | ||
ac2b6f9d | 634 | if (!bdev_test_flag(bio->bi_bdev, BD_HAS_SUBMIT_BIO)) { |
3e08773c | 635 | blk_mq_submit_bio(bio); |
7f36b7d0 | 636 | } else if (likely(bio_queue_enter(bio) == 0)) { |
9f4107b0 | 637 | struct gendisk *disk = bio->bi_bdev->bd_disk; |
958148a6 CH |
638 | |
639 | if ((bio->bi_opf & REQ_POLLED) && | |
640 | !(disk->queue->limits.features & BLK_FEAT_POLL)) { | |
641 | bio->bi_status = BLK_STS_NOTSUPP; | |
642 | bio_endio(bio); | |
643 | } else { | |
644 | disk->fops->submit_bio(bio); | |
645 | } | |
7f36b7d0 ML |
646 | blk_queue_exit(disk->queue); |
647 | } | |
9a42891c YK |
648 | |
649 | blk_finish_plug(&plug); | |
ac7c5675 CH |
650 | } |
651 | ||
566acf2d CH |
652 | /* |
653 | * The loop in this function may be a bit non-obvious, and so deserves some | |
654 | * explanation: | |
655 | * | |
656 | * - Before entering the loop, bio->bi_next is NULL (as all callers ensure | |
657 | * that), so we have a list with a single bio. | |
658 | * - We pretend that we have just taken it off a longer list, so we assign | |
659 | * bio_list to a pointer to the bio_list_on_stack, thus initialising the | |
660 | * bio_list of new bios to be added. ->submit_bio() may indeed add some more | |
661 | * bios through a recursive call to submit_bio_noacct. If it did, we find a | |
662 | * non-NULL value in bio_list and re-enter the loop from the top. | |
663 | * - In this case we really did just take the bio of the top of the list (no | |
664 | * pretending) and so remove it from bio_list, and call into ->submit_bio() | |
665 | * again. | |
666 | * | |
667 | * bio_list_on_stack[0] contains bios submitted by the current ->submit_bio. | |
668 | * bio_list_on_stack[1] contains bios that were submitted before the current | |
69fe0f29 | 669 | * ->submit_bio, but that haven't been processed yet. |
566acf2d | 670 | */ |
3e08773c | 671 | static void __submit_bio_noacct(struct bio *bio) |
566acf2d CH |
672 | { |
673 | struct bio_list bio_list_on_stack[2]; | |
566acf2d CH |
674 | |
675 | BUG_ON(bio->bi_next); | |
676 | ||
677 | bio_list_init(&bio_list_on_stack[0]); | |
678 | current->bio_list = bio_list_on_stack; | |
679 | ||
680 | do { | |
eab4e027 | 681 | struct request_queue *q = bdev_get_queue(bio->bi_bdev); |
566acf2d CH |
682 | struct bio_list lower, same; |
683 | ||
566acf2d CH |
684 | /* |
685 | * Create a fresh bio_list for all subordinate requests. | |
686 | */ | |
687 | bio_list_on_stack[1] = bio_list_on_stack[0]; | |
688 | bio_list_init(&bio_list_on_stack[0]); | |
689 | ||
3e08773c | 690 | __submit_bio(bio); |
566acf2d CH |
691 | |
692 | /* | |
693 | * Sort new bios into those for a lower level and those for the | |
694 | * same level. | |
695 | */ | |
696 | bio_list_init(&lower); | |
697 | bio_list_init(&same); | |
698 | while ((bio = bio_list_pop(&bio_list_on_stack[0])) != NULL) | |
eab4e027 | 699 | if (q == bdev_get_queue(bio->bi_bdev)) |
566acf2d CH |
700 | bio_list_add(&same, bio); |
701 | else | |
702 | bio_list_add(&lower, bio); | |
703 | ||
704 | /* | |
705 | * Now assemble so we handle the lowest level first. | |
706 | */ | |
707 | bio_list_merge(&bio_list_on_stack[0], &lower); | |
708 | bio_list_merge(&bio_list_on_stack[0], &same); | |
709 | bio_list_merge(&bio_list_on_stack[0], &bio_list_on_stack[1]); | |
710 | } while ((bio = bio_list_pop(&bio_list_on_stack[0]))); | |
711 | ||
712 | current->bio_list = NULL; | |
566acf2d CH |
713 | } |
714 | ||
3e08773c | 715 | static void __submit_bio_noacct_mq(struct bio *bio) |
ff93ea0c | 716 | { |
7c792f33 | 717 | struct bio_list bio_list[2] = { }; |
ff93ea0c | 718 | |
7c792f33 | 719 | current->bio_list = bio_list; |
ff93ea0c CH |
720 | |
721 | do { | |
3e08773c | 722 | __submit_bio(bio); |
7c792f33 | 723 | } while ((bio = bio_list_pop(&bio_list[0]))); |
ff93ea0c CH |
724 | |
725 | current->bio_list = NULL; | |
ff93ea0c CH |
726 | } |
727 | ||
3f98c753 | 728 | void submit_bio_noacct_nocheck(struct bio *bio) |
d89d8796 | 729 | { |
0f7c8f0f JH |
730 | blk_cgroup_bio_start(bio); |
731 | blkcg_bio_issue_init(bio); | |
732 | ||
733 | if (!bio_flagged(bio, BIO_TRACE_COMPLETION)) { | |
734 | trace_block_bio_queue(bio); | |
735 | /* | |
736 | * Now that enqueuing has been traced, we need to trace | |
737 | * completion as well. | |
738 | */ | |
739 | bio_set_flag(bio, BIO_TRACE_COMPLETION); | |
740 | } | |
741 | ||
27a84d54 | 742 | /* |
566acf2d CH |
743 | * We only want one ->submit_bio to be active at a time, else stack |
744 | * usage with stacked devices could be a problem. Use current->bio_list | |
745 | * to collect a list of requests submited by a ->submit_bio method while | |
746 | * it is active, and then process them after it returned. | |
27a84d54 | 747 | */ |
3e08773c | 748 | if (current->bio_list) |
f5fe1b51 | 749 | bio_list_add(¤t->bio_list[0], bio); |
ac2b6f9d | 750 | else if (!bdev_test_flag(bio->bi_bdev, BD_HAS_SUBMIT_BIO)) |
3e08773c CH |
751 | __submit_bio_noacct_mq(bio); |
752 | else | |
753 | __submit_bio_noacct(bio); | |
d89d8796 | 754 | } |
3f98c753 | 755 | |
9da3d1e9 JG |
756 | static blk_status_t blk_validate_atomic_write_op_size(struct request_queue *q, |
757 | struct bio *bio) | |
758 | { | |
759 | if (bio->bi_iter.bi_size > queue_atomic_write_unit_max_bytes(q)) | |
760 | return BLK_STS_INVAL; | |
761 | ||
762 | if (bio->bi_iter.bi_size % queue_atomic_write_unit_min_bytes(q)) | |
763 | return BLK_STS_INVAL; | |
764 | ||
765 | return BLK_STS_OK; | |
766 | } | |
767 | ||
3f98c753 ML |
768 | /** |
769 | * submit_bio_noacct - re-submit a bio to the block device layer for I/O | |
770 | * @bio: The bio describing the location in memory and on the device. | |
771 | * | |
772 | * This is a version of submit_bio() that shall only be used for I/O that is | |
773 | * resubmitted to lower level drivers by stacking block drivers. All file | |
774 | * systems and other upper level users of the block layer should use | |
775 | * submit_bio() instead. | |
776 | */ | |
777 | void submit_bio_noacct(struct bio *bio) | |
1da177e4 | 778 | { |
309dca30 | 779 | struct block_device *bdev = bio->bi_bdev; |
eab4e027 | 780 | struct request_queue *q = bdev_get_queue(bdev); |
4e4cbee9 | 781 | blk_status_t status = BLK_STS_IOERR; |
1da177e4 LT |
782 | |
783 | might_sleep(); | |
1da177e4 | 784 | |
03a07c92 | 785 | /* |
b0beb280 | 786 | * For a REQ_NOWAIT based request, return -EOPNOTSUPP |
021a2446 | 787 | * if queue does not support NOWAIT. |
03a07c92 | 788 | */ |
568ec936 | 789 | if ((bio->bi_opf & REQ_NOWAIT) && !bdev_nowait(bdev)) |
b0beb280 | 790 | goto not_supported; |
03a07c92 | 791 | |
30abb3a6 | 792 | if (should_fail_bio(bio)) |
5a7bbad2 | 793 | goto end_io; |
bdb7d420 | 794 | bio_check_ro(bio); |
3a905c37 CH |
795 | if (!bio_flagged(bio, BIO_REMAPPED)) { |
796 | if (unlikely(bio_check_eod(bio))) | |
797 | goto end_io; | |
3f9b8fb4 AV |
798 | if (bdev_is_partition(bdev) && |
799 | unlikely(blk_partition_remap(bio))) | |
3a905c37 CH |
800 | goto end_io; |
801 | } | |
2056a782 | 802 | |
5a7bbad2 | 803 | /* |
ed00aabd CH |
804 | * Filter flush bio's early so that bio based drivers without flush |
805 | * support don't have to worry about them. | |
5a7bbad2 | 806 | */ |
b4a6bb3a CH |
807 | if (op_is_flush(bio->bi_opf)) { |
808 | if (WARN_ON_ONCE(bio_op(bio) != REQ_OP_WRITE && | |
809 | bio_op(bio) != REQ_OP_ZONE_APPEND)) | |
51fd77bd | 810 | goto end_io; |
1122c0c1 | 811 | if (!bdev_write_cache(bdev)) { |
b4a6bb3a CH |
812 | bio->bi_opf &= ~(REQ_PREFLUSH | REQ_FUA); |
813 | if (!bio_sectors(bio)) { | |
814 | status = BLK_STS_OK; | |
815 | goto end_io; | |
816 | } | |
51fd77bd | 817 | } |
5a7bbad2 | 818 | } |
5ddfe969 | 819 | |
288dab8a | 820 | switch (bio_op(bio)) { |
1c042f8d | 821 | case REQ_OP_READ: |
ea6787c6 | 822 | break; |
1c042f8d | 823 | case REQ_OP_WRITE: |
9da3d1e9 JG |
824 | if (bio->bi_opf & REQ_ATOMIC) { |
825 | status = blk_validate_atomic_write_op_size(q, bio); | |
826 | if (status != BLK_STS_OK) | |
827 | goto end_io; | |
828 | } | |
1c042f8d CH |
829 | break; |
830 | case REQ_OP_FLUSH: | |
831 | /* | |
832 | * REQ_OP_FLUSH can't be submitted through bios, it is only | |
833 | * synthetized in struct request by the flush state machine. | |
834 | */ | |
835 | goto not_supported; | |
288dab8a | 836 | case REQ_OP_DISCARD: |
70200574 | 837 | if (!bdev_max_discard_sectors(bdev)) |
288dab8a CH |
838 | goto not_supported; |
839 | break; | |
840 | case REQ_OP_SECURE_ERASE: | |
44abff2c | 841 | if (!bdev_max_secure_erase_sectors(bdev)) |
288dab8a CH |
842 | goto not_supported; |
843 | break; | |
0512a75b KB |
844 | case REQ_OP_ZONE_APPEND: |
845 | status = blk_check_zone_append(q, bio); | |
846 | if (status != BLK_STS_OK) | |
847 | goto end_io; | |
848 | break; | |
1c042f8d CH |
849 | case REQ_OP_WRITE_ZEROES: |
850 | if (!q->limits.max_write_zeroes_sectors) | |
851 | goto not_supported; | |
852 | break; | |
2d253440 | 853 | case REQ_OP_ZONE_RESET: |
6c1b1da5 AJ |
854 | case REQ_OP_ZONE_OPEN: |
855 | case REQ_OP_ZONE_CLOSE: | |
856 | case REQ_OP_ZONE_FINISH: | |
6e33dbf2 | 857 | case REQ_OP_ZONE_RESET_ALL: |
f2a7bea2 | 858 | if (!bdev_is_zoned(bio->bi_bdev)) |
6e33dbf2 CK |
859 | goto not_supported; |
860 | break; | |
1c042f8d CH |
861 | case REQ_OP_DRV_IN: |
862 | case REQ_OP_DRV_OUT: | |
863 | /* | |
864 | * Driver private operations are only used with passthrough | |
865 | * requests. | |
866 | */ | |
867 | fallthrough; | |
288dab8a | 868 | default: |
1c042f8d | 869 | goto not_supported; |
5a7bbad2 | 870 | } |
01edede4 | 871 | |
b781d8db | 872 | if (blk_throtl_bio(bio)) |
3f98c753 ML |
873 | return; |
874 | submit_bio_noacct_nocheck(bio); | |
d24c670e | 875 | return; |
a7384677 | 876 | |
288dab8a | 877 | not_supported: |
4e4cbee9 | 878 | status = BLK_STS_NOTSUPP; |
a7384677 | 879 | end_io: |
4e4cbee9 | 880 | bio->bi_status = status; |
4246a0b6 | 881 | bio_endio(bio); |
d89d8796 | 882 | } |
ed00aabd | 883 | EXPORT_SYMBOL(submit_bio_noacct); |
1da177e4 | 884 | |
f3c89983 HJ |
885 | static void bio_set_ioprio(struct bio *bio) |
886 | { | |
887 | /* Nobody set ioprio so far? Initialize it based on task's nice value */ | |
888 | if (IOPRIO_PRIO_CLASS(bio->bi_ioprio) == IOPRIO_CLASS_NONE) | |
889 | bio->bi_ioprio = get_current_ioprio(); | |
890 | blkcg_set_ioprio(bio); | |
891 | } | |
892 | ||
1da177e4 | 893 | /** |
710027a4 | 894 | * submit_bio - submit a bio to the block device layer for I/O |
1da177e4 LT |
895 | * @bio: The &struct bio which describes the I/O |
896 | * | |
3fdd4086 CH |
897 | * submit_bio() is used to submit I/O requests to block devices. It is passed a |
898 | * fully set up &struct bio that describes the I/O that needs to be done. The | |
309dca30 | 899 | * bio will be send to the device described by the bi_bdev field. |
1da177e4 | 900 | * |
3fdd4086 CH |
901 | * The success/failure status of the request, along with notification of |
902 | * completion, is delivered asynchronously through the ->bi_end_io() callback | |
e8848087 | 903 | * in @bio. The bio must NOT be touched by the caller until ->bi_end_io() has |
3fdd4086 | 904 | * been called. |
1da177e4 | 905 | */ |
3e08773c | 906 | void submit_bio(struct bio *bio) |
1da177e4 | 907 | { |
a3e7689b CH |
908 | if (bio_op(bio) == REQ_OP_READ) { |
909 | task_io_account_read(bio->bi_iter.bi_size); | |
910 | count_vm_events(PGPGIN, bio_sectors(bio)); | |
911 | } else if (bio_op(bio) == REQ_OP_WRITE) { | |
912 | count_vm_events(PGPGOUT, bio_sectors(bio)); | |
1da177e4 LT |
913 | } |
914 | ||
f3c89983 | 915 | bio_set_ioprio(bio); |
3e08773c | 916 | submit_bio_noacct(bio); |
1da177e4 | 917 | } |
1da177e4 LT |
918 | EXPORT_SYMBOL(submit_bio); |
919 | ||
3e08773c CH |
920 | /** |
921 | * bio_poll - poll for BIO completions | |
922 | * @bio: bio to poll for | |
e30028ac | 923 | * @iob: batches of IO |
3e08773c CH |
924 | * @flags: BLK_POLL_* flags that control the behavior |
925 | * | |
926 | * Poll for completions on queue associated with the bio. Returns number of | |
927 | * completed entries found. | |
928 | * | |
929 | * Note: the caller must either be the context that submitted @bio, or | |
930 | * be in a RCU critical section to prevent freeing of @bio. | |
931 | */ | |
5a72e899 | 932 | int bio_poll(struct bio *bio, struct io_comp_batch *iob, unsigned int flags) |
3e08773c | 933 | { |
3e08773c | 934 | blk_qc_t cookie = READ_ONCE(bio->bi_cookie); |
310726c3 JA |
935 | struct block_device *bdev; |
936 | struct request_queue *q; | |
69fe0f29 | 937 | int ret = 0; |
3e08773c | 938 | |
310726c3 JA |
939 | bdev = READ_ONCE(bio->bi_bdev); |
940 | if (!bdev) | |
941 | return 0; | |
942 | ||
943 | q = bdev_get_queue(bdev); | |
958148a6 | 944 | if (cookie == BLK_QC_T_NONE) |
3e08773c CH |
945 | return 0; |
946 | ||
aa8dccca | 947 | blk_flush_plug(current->plug, false); |
3e08773c | 948 | |
33391eec JA |
949 | /* |
950 | * We need to be able to enter a frozen queue, similar to how | |
951 | * timeouts also need to do that. If that is blocked, then we can | |
952 | * have pending IO when a queue freeze is started, and then the | |
953 | * wait for the freeze to finish will wait for polled requests to | |
954 | * timeout as the poller is preventer from entering the queue and | |
955 | * completing them. As long as we prevent new IO from being queued, | |
956 | * that should be all that matters. | |
957 | */ | |
958 | if (!percpu_ref_tryget(&q->q_usage_counter)) | |
3e08773c | 959 | return 0; |
d432c817 | 960 | if (queue_is_mq(q)) { |
5a72e899 | 961 | ret = blk_mq_poll(q, cookie, iob, flags); |
69fe0f29 ML |
962 | } else { |
963 | struct gendisk *disk = q->disk; | |
964 | ||
d432c817 CH |
965 | if ((q->limits.features & BLK_FEAT_POLL) && disk && |
966 | disk->fops->poll_bio) | |
69fe0f29 ML |
967 | ret = disk->fops->poll_bio(bio, iob, flags); |
968 | } | |
3e08773c CH |
969 | blk_queue_exit(q); |
970 | return ret; | |
971 | } | |
972 | EXPORT_SYMBOL_GPL(bio_poll); | |
973 | ||
974 | /* | |
975 | * Helper to implement file_operations.iopoll. Requires the bio to be stored | |
976 | * in iocb->private, and cleared before freeing the bio. | |
977 | */ | |
5a72e899 JA |
978 | int iocb_bio_iopoll(struct kiocb *kiocb, struct io_comp_batch *iob, |
979 | unsigned int flags) | |
3e08773c CH |
980 | { |
981 | struct bio *bio; | |
982 | int ret = 0; | |
983 | ||
984 | /* | |
985 | * Note: the bio cache only uses SLAB_TYPESAFE_BY_RCU, so bio can | |
986 | * point to a freshly allocated bio at this point. If that happens | |
987 | * we have a few cases to consider: | |
988 | * | |
989 | * 1) the bio is beeing initialized and bi_bdev is NULL. We can just | |
990 | * simply nothing in this case | |
991 | * 2) the bio points to a not poll enabled device. bio_poll will catch | |
992 | * this and return 0 | |
993 | * 3) the bio points to a poll capable device, including but not | |
994 | * limited to the one that the original bio pointed to. In this | |
995 | * case we will call into the actual poll method and poll for I/O, | |
996 | * even if we don't need to, but it won't cause harm either. | |
997 | * | |
998 | * For cases 2) and 3) above the RCU grace period ensures that bi_bdev | |
999 | * is still allocated. Because partitions hold a reference to the whole | |
1000 | * device bdev and thus disk, the disk is also still valid. Grabbing | |
1001 | * a reference to the queue in bio_poll() ensures the hctxs and requests | |
1002 | * are still valid as well. | |
1003 | */ | |
1004 | rcu_read_lock(); | |
1005 | bio = READ_ONCE(kiocb->private); | |
310726c3 | 1006 | if (bio) |
5a72e899 | 1007 | ret = bio_poll(bio, iob, flags); |
3e08773c CH |
1008 | rcu_read_unlock(); |
1009 | ||
1010 | return ret; | |
1011 | } | |
1012 | EXPORT_SYMBOL_GPL(iocb_bio_iopoll); | |
1013 | ||
450b7879 | 1014 | void update_io_ticks(struct block_device *part, unsigned long now, bool end) |
9123bf6f CH |
1015 | { |
1016 | unsigned long stamp; | |
1017 | again: | |
8446fe92 | 1018 | stamp = READ_ONCE(part->bd_stamp); |
99dc4223 YK |
1019 | if (unlikely(time_after(now, stamp)) && |
1020 | likely(try_cmpxchg(&part->bd_stamp, &stamp, now)) && | |
f2987c58 | 1021 | (end || bdev_count_inflight(part))) |
99dc4223 YK |
1022 | __part_stat_add(part, io_ticks, now - stamp); |
1023 | ||
3f9b8fb4 | 1024 | if (bdev_is_partition(part)) { |
8446fe92 | 1025 | part = bdev_whole(part); |
9123bf6f CH |
1026 | goto again; |
1027 | } | |
1028 | } | |
1029 | ||
5f275713 | 1030 | unsigned long bdev_start_io_acct(struct block_device *bdev, enum req_op op, |
5f0614a5 | 1031 | unsigned long start_time) |
956d510e | 1032 | { |
956d510e | 1033 | part_stat_lock(); |
5f0614a5 | 1034 | update_io_ticks(bdev, start_time, false); |
5f0614a5 | 1035 | part_stat_local_inc(bdev, in_flight[op_is_write(op)]); |
956d510e | 1036 | part_stat_unlock(); |
320ae51f | 1037 | |
e45c47d1 MS |
1038 | return start_time; |
1039 | } | |
5f0614a5 | 1040 | EXPORT_SYMBOL(bdev_start_io_acct); |
e45c47d1 | 1041 | |
99dfc43e CH |
1042 | /** |
1043 | * bio_start_io_acct - start I/O accounting for bio based drivers | |
1044 | * @bio: bio to start account for | |
1045 | * | |
1046 | * Returns the start time that should be passed back to bio_end_io_acct(). | |
1047 | */ | |
1048 | unsigned long bio_start_io_acct(struct bio *bio) | |
7b26410b | 1049 | { |
5f275713 | 1050 | return bdev_start_io_acct(bio->bi_bdev, bio_op(bio), jiffies); |
7b26410b | 1051 | } |
99dfc43e | 1052 | EXPORT_SYMBOL_GPL(bio_start_io_acct); |
7b26410b | 1053 | |
77e7ffd7 | 1054 | void bdev_end_io_acct(struct block_device *bdev, enum req_op op, |
5f275713 | 1055 | unsigned int sectors, unsigned long start_time) |
956d510e | 1056 | { |
956d510e CH |
1057 | const int sgrp = op_stat_group(op); |
1058 | unsigned long now = READ_ONCE(jiffies); | |
1059 | unsigned long duration = now - start_time; | |
5b18b5a7 | 1060 | |
956d510e | 1061 | part_stat_lock(); |
5f0614a5 | 1062 | update_io_ticks(bdev, now, true); |
5f275713 YK |
1063 | part_stat_inc(bdev, ios[sgrp]); |
1064 | part_stat_add(bdev, sectors[sgrp], sectors); | |
5f0614a5 ML |
1065 | part_stat_add(bdev, nsecs[sgrp], jiffies_to_nsecs(duration)); |
1066 | part_stat_local_dec(bdev, in_flight[op_is_write(op)]); | |
320ae51f JA |
1067 | part_stat_unlock(); |
1068 | } | |
5f0614a5 | 1069 | EXPORT_SYMBOL(bdev_end_io_acct); |
7b26410b | 1070 | |
99dfc43e | 1071 | void bio_end_io_acct_remapped(struct bio *bio, unsigned long start_time, |
5f0614a5 | 1072 | struct block_device *orig_bdev) |
7b26410b | 1073 | { |
5f275713 | 1074 | bdev_end_io_acct(orig_bdev, bio_op(bio), bio_sectors(bio), start_time); |
7b26410b | 1075 | } |
99dfc43e | 1076 | EXPORT_SYMBOL_GPL(bio_end_io_acct_remapped); |
7b26410b | 1077 | |
ef9e3fac KU |
1078 | /** |
1079 | * blk_lld_busy - Check if underlying low-level drivers of a device are busy | |
1080 | * @q : the queue of the device being checked | |
1081 | * | |
1082 | * Description: | |
1083 | * Check if underlying low-level drivers of a device are busy. | |
1084 | * If the drivers want to export their busy state, they must set own | |
1085 | * exporting function using blk_queue_lld_busy() first. | |
1086 | * | |
1087 | * Basically, this function is used only by request stacking drivers | |
1088 | * to stop dispatching requests to underlying devices when underlying | |
1089 | * devices are busy. This behavior helps more I/O merging on the queue | |
1090 | * of the request stacking driver and prevents I/O throughput regression | |
1091 | * on burst I/O load. | |
1092 | * | |
1093 | * Return: | |
1094 | * 0 - Not busy (The request stacking driver should dispatch request) | |
1095 | * 1 - Busy (The request stacking driver should stop dispatching request) | |
1096 | */ | |
1097 | int blk_lld_busy(struct request_queue *q) | |
1098 | { | |
344e9ffc | 1099 | if (queue_is_mq(q) && q->mq_ops->busy) |
9ba20527 | 1100 | return q->mq_ops->busy(q); |
ef9e3fac KU |
1101 | |
1102 | return 0; | |
1103 | } | |
1104 | EXPORT_SYMBOL_GPL(blk_lld_busy); | |
1105 | ||
59c3d45e | 1106 | int kblockd_schedule_work(struct work_struct *work) |
1da177e4 LT |
1107 | { |
1108 | return queue_work(kblockd_workqueue, work); | |
1109 | } | |
1da177e4 LT |
1110 | EXPORT_SYMBOL(kblockd_schedule_work); |
1111 | ||
818cd1cb JA |
1112 | int kblockd_mod_delayed_work_on(int cpu, struct delayed_work *dwork, |
1113 | unsigned long delay) | |
1114 | { | |
1115 | return mod_delayed_work_on(cpu, kblockd_workqueue, dwork, delay); | |
1116 | } | |
1117 | EXPORT_SYMBOL(kblockd_mod_delayed_work_on); | |
1118 | ||
47c122e3 JA |
1119 | void blk_start_plug_nr_ios(struct blk_plug *plug, unsigned short nr_ios) |
1120 | { | |
1121 | struct task_struct *tsk = current; | |
1122 | ||
1123 | /* | |
1124 | * If this is a nested plug, don't actually assign it. | |
1125 | */ | |
1126 | if (tsk->plug) | |
1127 | return; | |
1128 | ||
da4c8c3d | 1129 | plug->cur_ktime = 0; |
a3396b99 CH |
1130 | rq_list_init(&plug->mq_list); |
1131 | rq_list_init(&plug->cached_rqs); | |
47c122e3 JA |
1132 | plug->nr_ios = min_t(unsigned short, nr_ios, BLK_MAX_REQUEST_COUNT); |
1133 | plug->rq_count = 0; | |
1134 | plug->multiple_queues = false; | |
dc5fc361 | 1135 | plug->has_elevator = false; |
47c122e3 JA |
1136 | INIT_LIST_HEAD(&plug->cb_list); |
1137 | ||
1138 | /* | |
1139 | * Store ordering should not be needed here, since a potential | |
1140 | * preempt will imply a full memory barrier | |
1141 | */ | |
1142 | tsk->plug = plug; | |
1143 | } | |
1144 | ||
75df7136 SJ |
1145 | /** |
1146 | * blk_start_plug - initialize blk_plug and track it inside the task_struct | |
1147 | * @plug: The &struct blk_plug that needs to be initialized | |
1148 | * | |
1149 | * Description: | |
40405851 JM |
1150 | * blk_start_plug() indicates to the block layer an intent by the caller |
1151 | * to submit multiple I/O requests in a batch. The block layer may use | |
1152 | * this hint to defer submitting I/Os from the caller until blk_finish_plug() | |
1153 | * is called. However, the block layer may choose to submit requests | |
1154 | * before a call to blk_finish_plug() if the number of queued I/Os | |
1155 | * exceeds %BLK_MAX_REQUEST_COUNT, or if the size of the I/O is larger than | |
1156 | * %BLK_PLUG_FLUSH_SIZE. The queued I/Os may also be submitted early if | |
1157 | * the task schedules (see below). | |
1158 | * | |
75df7136 SJ |
1159 | * Tracking blk_plug inside the task_struct will help with auto-flushing the |
1160 | * pending I/O should the task end up blocking between blk_start_plug() and | |
1161 | * blk_finish_plug(). This is important from a performance perspective, but | |
1162 | * also ensures that we don't deadlock. For instance, if the task is blocking | |
1163 | * for a memory allocation, memory reclaim could end up wanting to free a | |
1164 | * page belonging to that request that is currently residing in our private | |
1165 | * plug. By flushing the pending I/O when the process goes to sleep, we avoid | |
1166 | * this kind of deadlock. | |
1167 | */ | |
73c10101 JA |
1168 | void blk_start_plug(struct blk_plug *plug) |
1169 | { | |
47c122e3 | 1170 | blk_start_plug_nr_ios(plug, 1); |
73c10101 JA |
1171 | } |
1172 | EXPORT_SYMBOL(blk_start_plug); | |
1173 | ||
74018dc3 | 1174 | static void flush_plug_callbacks(struct blk_plug *plug, bool from_schedule) |
048c9374 N |
1175 | { |
1176 | LIST_HEAD(callbacks); | |
1177 | ||
2a7d5559 SL |
1178 | while (!list_empty(&plug->cb_list)) { |
1179 | list_splice_init(&plug->cb_list, &callbacks); | |
048c9374 | 1180 | |
2a7d5559 SL |
1181 | while (!list_empty(&callbacks)) { |
1182 | struct blk_plug_cb *cb = list_first_entry(&callbacks, | |
048c9374 N |
1183 | struct blk_plug_cb, |
1184 | list); | |
2a7d5559 | 1185 | list_del(&cb->list); |
74018dc3 | 1186 | cb->callback(cb, from_schedule); |
2a7d5559 | 1187 | } |
048c9374 N |
1188 | } |
1189 | } | |
1190 | ||
9cbb1750 N |
1191 | struct blk_plug_cb *blk_check_plugged(blk_plug_cb_fn unplug, void *data, |
1192 | int size) | |
1193 | { | |
1194 | struct blk_plug *plug = current->plug; | |
1195 | struct blk_plug_cb *cb; | |
1196 | ||
1197 | if (!plug) | |
1198 | return NULL; | |
1199 | ||
1200 | list_for_each_entry(cb, &plug->cb_list, list) | |
1201 | if (cb->callback == unplug && cb->data == data) | |
1202 | return cb; | |
1203 | ||
1204 | /* Not currently on the callback list */ | |
1205 | BUG_ON(size < sizeof(*cb)); | |
1206 | cb = kzalloc(size, GFP_ATOMIC); | |
1207 | if (cb) { | |
1208 | cb->data = data; | |
1209 | cb->callback = unplug; | |
1210 | list_add(&cb->list, &plug->cb_list); | |
1211 | } | |
1212 | return cb; | |
1213 | } | |
1214 | EXPORT_SYMBOL(blk_check_plugged); | |
1215 | ||
aa8dccca | 1216 | void __blk_flush_plug(struct blk_plug *plug, bool from_schedule) |
73c10101 | 1217 | { |
b600455d PB |
1218 | if (!list_empty(&plug->cb_list)) |
1219 | flush_plug_callbacks(plug, from_schedule); | |
70904263 | 1220 | blk_mq_flush_plug_list(plug, from_schedule); |
c5fc7b93 JA |
1221 | /* |
1222 | * Unconditionally flush out cached requests, even if the unplug | |
1223 | * event came from schedule. Since we know hold references to the | |
1224 | * queue for cached requests, we don't want a blocked task holding | |
1225 | * up a queue freeze/quiesce event. | |
1226 | */ | |
a3396b99 | 1227 | if (unlikely(!rq_list_empty(&plug->cached_rqs))) |
47c122e3 | 1228 | blk_mq_free_plug_rqs(plug); |
06b23f92 | 1229 | |
3ec48489 | 1230 | plug->cur_ktime = 0; |
06b23f92 | 1231 | current->flags &= ~PF_BLOCK_TS; |
73c10101 | 1232 | } |
73c10101 | 1233 | |
40405851 JM |
1234 | /** |
1235 | * blk_finish_plug - mark the end of a batch of submitted I/O | |
1236 | * @plug: The &struct blk_plug passed to blk_start_plug() | |
1237 | * | |
1238 | * Description: | |
1239 | * Indicate that a batch of I/O submissions is complete. This function | |
1240 | * must be paired with an initial call to blk_start_plug(). The intent | |
1241 | * is to allow the block layer to optimize I/O submission. See the | |
1242 | * documentation for blk_start_plug() for more information. | |
1243 | */ | |
73c10101 JA |
1244 | void blk_finish_plug(struct blk_plug *plug) |
1245 | { | |
008f75a2 | 1246 | if (plug == current->plug) { |
aa8dccca | 1247 | __blk_flush_plug(plug, false); |
008f75a2 CH |
1248 | current->plug = NULL; |
1249 | } | |
73c10101 | 1250 | } |
88b996cd | 1251 | EXPORT_SYMBOL(blk_finish_plug); |
73c10101 | 1252 | |
71ac860a ML |
1253 | void blk_io_schedule(void) |
1254 | { | |
1255 | /* Prevent hang_check timer from firing at us during very long I/O */ | |
1256 | unsigned long timeout = sysctl_hung_task_timeout_secs * HZ / 2; | |
1257 | ||
1258 | if (timeout) | |
1259 | io_schedule_timeout(timeout); | |
1260 | else | |
1261 | io_schedule(); | |
1262 | } | |
1263 | EXPORT_SYMBOL_GPL(blk_io_schedule); | |
1264 | ||
1da177e4 LT |
1265 | int __init blk_dev_init(void) |
1266 | { | |
16458cf3 | 1267 | BUILD_BUG_ON((__force u32)REQ_OP_LAST >= (1 << REQ_OP_BITS)); |
ef295ecf | 1268 | BUILD_BUG_ON(REQ_OP_BITS + REQ_FLAG_BITS > 8 * |
c593642c | 1269 | sizeof_field(struct request, cmd_flags)); |
ef295ecf | 1270 | BUILD_BUG_ON(REQ_OP_BITS + REQ_FLAG_BITS > 8 * |
c593642c | 1271 | sizeof_field(struct bio, bi_opf)); |
9eb55b03 | 1272 | |
89b90be2 TH |
1273 | /* used for unplugging and affects IO latency/throughput - HIGHPRI */ |
1274 | kblockd_workqueue = alloc_workqueue("kblockd", | |
28747fcd | 1275 | WQ_MEM_RECLAIM | WQ_HIGHPRI, 0); |
1da177e4 LT |
1276 | if (!kblockd_workqueue) |
1277 | panic("Failed to create kblockd\n"); | |
1278 | ||
48ff13a6 | 1279 | blk_requestq_cachep = KMEM_CACHE(request_queue, SLAB_PANIC); |
1da177e4 | 1280 | |
18fbda91 | 1281 | blk_debugfs_root = debugfs_create_dir("block", NULL); |
18fbda91 | 1282 | |
d38ecf93 | 1283 | return 0; |
1da177e4 | 1284 | } |