Commit | Line | Data |
---|---|---|
3dcf60bc | 1 | // SPDX-License-Identifier: GPL-2.0 |
1da177e4 | 2 | /* |
1da177e4 LT |
3 | * Copyright (C) 1991, 1992 Linus Torvalds |
4 | * Copyright (C) 1994, Karl Keyte: Added support for disk statistics | |
5 | * Elevator latency, (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE | |
6 | * Queue request tables / lock, selectable elevator, Jens Axboe <axboe@suse.de> | |
6728cb0e JA |
7 | * kernel-doc documentation started by NeilBrown <neilb@cse.unsw.edu.au> |
8 | * - July2000 | |
1da177e4 LT |
9 | * bio rewrite, highmem i/o, etc, Jens Axboe <axboe@suse.de> - may 2001 |
10 | */ | |
11 | ||
12 | /* | |
13 | * This handles all read/write requests to block devices | |
14 | */ | |
1da177e4 LT |
15 | #include <linux/kernel.h> |
16 | #include <linux/module.h> | |
1da177e4 LT |
17 | #include <linux/bio.h> |
18 | #include <linux/blkdev.h> | |
52abca64 | 19 | #include <linux/blk-pm.h> |
fe45e630 | 20 | #include <linux/blk-integrity.h> |
1da177e4 LT |
21 | #include <linux/highmem.h> |
22 | #include <linux/mm.h> | |
cee9a0c4 | 23 | #include <linux/pagemap.h> |
1da177e4 LT |
24 | #include <linux/kernel_stat.h> |
25 | #include <linux/string.h> | |
26 | #include <linux/init.h> | |
1da177e4 LT |
27 | #include <linux/completion.h> |
28 | #include <linux/slab.h> | |
29 | #include <linux/swap.h> | |
30 | #include <linux/writeback.h> | |
faccbd4b | 31 | #include <linux/task_io_accounting_ops.h> |
c17bb495 | 32 | #include <linux/fault-inject.h> |
73c10101 | 33 | #include <linux/list_sort.h> |
e3c78ca5 | 34 | #include <linux/delay.h> |
aaf7c680 | 35 | #include <linux/ratelimit.h> |
6c954667 | 36 | #include <linux/pm_runtime.h> |
54d4e6ab | 37 | #include <linux/t10-pi.h> |
18fbda91 | 38 | #include <linux/debugfs.h> |
30abb3a6 | 39 | #include <linux/bpf.h> |
b8e24a93 | 40 | #include <linux/psi.h> |
82d981d4 | 41 | #include <linux/part_stat.h> |
71ac860a | 42 | #include <linux/sched/sysctl.h> |
a892c8d5 | 43 | #include <linux/blk-crypto.h> |
55782138 LZ |
44 | |
45 | #define CREATE_TRACE_POINTS | |
46 | #include <trace/events/block.h> | |
1da177e4 | 47 | |
8324aa91 | 48 | #include "blk.h" |
2aa7745b | 49 | #include "blk-mq-sched.h" |
bca6b067 | 50 | #include "blk-pm.h" |
672fdcf0 | 51 | #include "blk-cgroup.h" |
a7b36ee6 | 52 | #include "blk-throttle.h" |
daaca352 | 53 | #include "blk-rq-qos.h" |
8324aa91 | 54 | |
18fbda91 | 55 | struct dentry *blk_debugfs_root; |
18fbda91 | 56 | |
d07335e5 | 57 | EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_remap); |
b0da3f0d | 58 | EXPORT_TRACEPOINT_SYMBOL_GPL(block_rq_remap); |
0a82a8d1 | 59 | EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_complete); |
3291fa57 | 60 | EXPORT_TRACEPOINT_SYMBOL_GPL(block_split); |
cbae8d45 | 61 | EXPORT_TRACEPOINT_SYMBOL_GPL(block_unplug); |
b357e4a6 | 62 | EXPORT_TRACEPOINT_SYMBOL_GPL(block_rq_insert); |
0bfc2455 | 63 | |
a73f730d TH |
64 | DEFINE_IDA(blk_queue_ida); |
65 | ||
1da177e4 LT |
66 | /* |
67 | * For queue allocation | |
68 | */ | |
6728cb0e | 69 | struct kmem_cache *blk_requestq_cachep; |
704b914f | 70 | struct kmem_cache *blk_requestq_srcu_cachep; |
1da177e4 | 71 | |
1da177e4 LT |
72 | /* |
73 | * Controlling structure to kblockd | |
74 | */ | |
ff856bad | 75 | static struct workqueue_struct *kblockd_workqueue; |
1da177e4 | 76 | |
8814ce8a BVA |
77 | /** |
78 | * blk_queue_flag_set - atomically set a queue flag | |
79 | * @flag: flag to be set | |
80 | * @q: request queue | |
81 | */ | |
82 | void blk_queue_flag_set(unsigned int flag, struct request_queue *q) | |
83 | { | |
57d74df9 | 84 | set_bit(flag, &q->queue_flags); |
8814ce8a BVA |
85 | } |
86 | EXPORT_SYMBOL(blk_queue_flag_set); | |
87 | ||
88 | /** | |
89 | * blk_queue_flag_clear - atomically clear a queue flag | |
90 | * @flag: flag to be cleared | |
91 | * @q: request queue | |
92 | */ | |
93 | void blk_queue_flag_clear(unsigned int flag, struct request_queue *q) | |
94 | { | |
57d74df9 | 95 | clear_bit(flag, &q->queue_flags); |
8814ce8a BVA |
96 | } |
97 | EXPORT_SYMBOL(blk_queue_flag_clear); | |
98 | ||
99 | /** | |
100 | * blk_queue_flag_test_and_set - atomically test and set a queue flag | |
101 | * @flag: flag to be set | |
102 | * @q: request queue | |
103 | * | |
104 | * Returns the previous value of @flag - 0 if the flag was not set and 1 if | |
105 | * the flag was already set. | |
106 | */ | |
107 | bool blk_queue_flag_test_and_set(unsigned int flag, struct request_queue *q) | |
108 | { | |
57d74df9 | 109 | return test_and_set_bit(flag, &q->queue_flags); |
8814ce8a BVA |
110 | } |
111 | EXPORT_SYMBOL_GPL(blk_queue_flag_test_and_set); | |
112 | ||
e47bc4ed CK |
113 | #define REQ_OP_NAME(name) [REQ_OP_##name] = #name |
114 | static const char *const blk_op_name[] = { | |
115 | REQ_OP_NAME(READ), | |
116 | REQ_OP_NAME(WRITE), | |
117 | REQ_OP_NAME(FLUSH), | |
118 | REQ_OP_NAME(DISCARD), | |
119 | REQ_OP_NAME(SECURE_ERASE), | |
120 | REQ_OP_NAME(ZONE_RESET), | |
6e33dbf2 | 121 | REQ_OP_NAME(ZONE_RESET_ALL), |
6c1b1da5 AJ |
122 | REQ_OP_NAME(ZONE_OPEN), |
123 | REQ_OP_NAME(ZONE_CLOSE), | |
124 | REQ_OP_NAME(ZONE_FINISH), | |
0512a75b | 125 | REQ_OP_NAME(ZONE_APPEND), |
e47bc4ed | 126 | REQ_OP_NAME(WRITE_ZEROES), |
e47bc4ed CK |
127 | REQ_OP_NAME(DRV_IN), |
128 | REQ_OP_NAME(DRV_OUT), | |
129 | }; | |
130 | #undef REQ_OP_NAME | |
131 | ||
132 | /** | |
133 | * blk_op_str - Return string XXX in the REQ_OP_XXX. | |
134 | * @op: REQ_OP_XXX. | |
135 | * | |
136 | * Description: Centralize block layer function to convert REQ_OP_XXX into | |
137 | * string format. Useful in the debugging and tracing bio or request. For | |
138 | * invalid REQ_OP_XXX it returns string "UNKNOWN". | |
139 | */ | |
140 | inline const char *blk_op_str(unsigned int op) | |
141 | { | |
142 | const char *op_str = "UNKNOWN"; | |
143 | ||
144 | if (op < ARRAY_SIZE(blk_op_name) && blk_op_name[op]) | |
145 | op_str = blk_op_name[op]; | |
146 | ||
147 | return op_str; | |
148 | } | |
149 | EXPORT_SYMBOL_GPL(blk_op_str); | |
150 | ||
2a842aca CH |
151 | static const struct { |
152 | int errno; | |
153 | const char *name; | |
154 | } blk_errors[] = { | |
155 | [BLK_STS_OK] = { 0, "" }, | |
156 | [BLK_STS_NOTSUPP] = { -EOPNOTSUPP, "operation not supported" }, | |
157 | [BLK_STS_TIMEOUT] = { -ETIMEDOUT, "timeout" }, | |
158 | [BLK_STS_NOSPC] = { -ENOSPC, "critical space allocation" }, | |
159 | [BLK_STS_TRANSPORT] = { -ENOLINK, "recoverable transport" }, | |
160 | [BLK_STS_TARGET] = { -EREMOTEIO, "critical target" }, | |
161 | [BLK_STS_NEXUS] = { -EBADE, "critical nexus" }, | |
162 | [BLK_STS_MEDIUM] = { -ENODATA, "critical medium" }, | |
163 | [BLK_STS_PROTECTION] = { -EILSEQ, "protection" }, | |
164 | [BLK_STS_RESOURCE] = { -ENOMEM, "kernel resource" }, | |
86ff7c2a | 165 | [BLK_STS_DEV_RESOURCE] = { -EBUSY, "device resource" }, |
03a07c92 | 166 | [BLK_STS_AGAIN] = { -EAGAIN, "nonblocking retry" }, |
7d32c027 | 167 | [BLK_STS_OFFLINE] = { -ENODEV, "device offline" }, |
2a842aca | 168 | |
4e4cbee9 CH |
169 | /* device mapper special case, should not leak out: */ |
170 | [BLK_STS_DM_REQUEUE] = { -EREMCHG, "dm internal retry" }, | |
171 | ||
3b481d91 KB |
172 | /* zone device specific errors */ |
173 | [BLK_STS_ZONE_OPEN_RESOURCE] = { -ETOOMANYREFS, "open zones exceeded" }, | |
174 | [BLK_STS_ZONE_ACTIVE_RESOURCE] = { -EOVERFLOW, "active zones exceeded" }, | |
175 | ||
2a842aca CH |
176 | /* everything else not covered above: */ |
177 | [BLK_STS_IOERR] = { -EIO, "I/O" }, | |
178 | }; | |
179 | ||
180 | blk_status_t errno_to_blk_status(int errno) | |
181 | { | |
182 | int i; | |
183 | ||
184 | for (i = 0; i < ARRAY_SIZE(blk_errors); i++) { | |
185 | if (blk_errors[i].errno == errno) | |
186 | return (__force blk_status_t)i; | |
187 | } | |
188 | ||
189 | return BLK_STS_IOERR; | |
190 | } | |
191 | EXPORT_SYMBOL_GPL(errno_to_blk_status); | |
192 | ||
193 | int blk_status_to_errno(blk_status_t status) | |
194 | { | |
195 | int idx = (__force int)status; | |
196 | ||
34bd9c1c | 197 | if (WARN_ON_ONCE(idx >= ARRAY_SIZE(blk_errors))) |
2a842aca CH |
198 | return -EIO; |
199 | return blk_errors[idx].errno; | |
200 | } | |
201 | EXPORT_SYMBOL_GPL(blk_status_to_errno); | |
202 | ||
0d7a29a2 | 203 | const char *blk_status_to_str(blk_status_t status) |
2a842aca CH |
204 | { |
205 | int idx = (__force int)status; | |
206 | ||
34bd9c1c | 207 | if (WARN_ON_ONCE(idx >= ARRAY_SIZE(blk_errors))) |
0d7a29a2 CH |
208 | return "<null>"; |
209 | return blk_errors[idx].name; | |
2a842aca CH |
210 | } |
211 | ||
1da177e4 LT |
212 | /** |
213 | * blk_sync_queue - cancel any pending callbacks on a queue | |
214 | * @q: the queue | |
215 | * | |
216 | * Description: | |
217 | * The block layer may perform asynchronous callback activity | |
218 | * on a queue, such as calling the unplug function after a timeout. | |
219 | * A block device may call blk_sync_queue to ensure that any | |
220 | * such activity is cancelled, thus allowing it to release resources | |
59c51591 | 221 | * that the callbacks might use. The caller must already have made sure |
c62b37d9 | 222 | * that its ->submit_bio will not re-add plugging prior to calling |
1da177e4 LT |
223 | * this function. |
224 | * | |
da527770 | 225 | * This function does not cancel any asynchronous activity arising |
da3dae54 | 226 | * out of elevator or throttling code. That would require elevator_exit() |
5efd6113 | 227 | * and blkcg_exit_queue() to be called with queue lock initialized. |
da527770 | 228 | * |
1da177e4 LT |
229 | */ |
230 | void blk_sync_queue(struct request_queue *q) | |
231 | { | |
70ed28b9 | 232 | del_timer_sync(&q->timeout); |
4e9b6f20 | 233 | cancel_work_sync(&q->timeout_work); |
1da177e4 LT |
234 | } |
235 | EXPORT_SYMBOL(blk_sync_queue); | |
236 | ||
c9254f2d | 237 | /** |
cd84a62e | 238 | * blk_set_pm_only - increment pm_only counter |
c9254f2d | 239 | * @q: request queue pointer |
c9254f2d | 240 | */ |
cd84a62e | 241 | void blk_set_pm_only(struct request_queue *q) |
c9254f2d | 242 | { |
cd84a62e | 243 | atomic_inc(&q->pm_only); |
c9254f2d | 244 | } |
cd84a62e | 245 | EXPORT_SYMBOL_GPL(blk_set_pm_only); |
c9254f2d | 246 | |
cd84a62e | 247 | void blk_clear_pm_only(struct request_queue *q) |
c9254f2d | 248 | { |
cd84a62e BVA |
249 | int pm_only; |
250 | ||
251 | pm_only = atomic_dec_return(&q->pm_only); | |
252 | WARN_ON_ONCE(pm_only < 0); | |
253 | if (pm_only == 0) | |
254 | wake_up_all(&q->mq_freeze_wq); | |
c9254f2d | 255 | } |
cd84a62e | 256 | EXPORT_SYMBOL_GPL(blk_clear_pm_only); |
c9254f2d | 257 | |
b5bd357c LC |
258 | /** |
259 | * blk_put_queue - decrement the request_queue refcount | |
260 | * @q: the request_queue structure to decrement the refcount for | |
261 | * | |
262 | * Decrements the refcount of the request_queue kobject. When this reaches 0 | |
263 | * we'll have blk_release_queue() called. | |
e8c7d14a LC |
264 | * |
265 | * Context: Any context, but the last reference must not be dropped from | |
266 | * atomic context. | |
b5bd357c | 267 | */ |
165125e1 | 268 | void blk_put_queue(struct request_queue *q) |
483f4afc AV |
269 | { |
270 | kobject_put(&q->kobj); | |
271 | } | |
d86e0e83 | 272 | EXPORT_SYMBOL(blk_put_queue); |
483f4afc | 273 | |
8e141f9e | 274 | void blk_queue_start_drain(struct request_queue *q) |
aed3ea94 | 275 | { |
d3cfb2a0 ML |
276 | /* |
277 | * When queue DYING flag is set, we need to block new req | |
278 | * entering queue, so we call blk_freeze_queue_start() to | |
279 | * prevent I/O from crossing blk_queue_enter(). | |
280 | */ | |
281 | blk_freeze_queue_start(q); | |
344e9ffc | 282 | if (queue_is_mq(q)) |
aed3ea94 | 283 | blk_mq_wake_waiters(q); |
055f6e18 ML |
284 | /* Make blk_queue_enter() reexamine the DYING flag. */ |
285 | wake_up_all(&q->mq_freeze_wq); | |
aed3ea94 | 286 | } |
8e141f9e | 287 | |
c9a929dd TH |
288 | /** |
289 | * blk_cleanup_queue - shutdown a request queue | |
290 | * @q: request queue to shutdown | |
291 | * | |
c246e80d BVA |
292 | * Mark @q DYING, drain all pending requests, mark @q DEAD, destroy and |
293 | * put it. All future requests will be failed immediately with -ENODEV. | |
e8c7d14a LC |
294 | * |
295 | * Context: can sleep | |
c94a96ac | 296 | */ |
6728cb0e | 297 | void blk_cleanup_queue(struct request_queue *q) |
483f4afc | 298 | { |
e8c7d14a LC |
299 | /* cannot be called from atomic context */ |
300 | might_sleep(); | |
301 | ||
bae85c15 BVA |
302 | WARN_ON_ONCE(blk_queue_registered(q)); |
303 | ||
3f3299d5 | 304 | /* mark @q DYING, no new request or merges will be allowed afterwards */ |
7a5428dc CH |
305 | blk_queue_flag_set(QUEUE_FLAG_DYING, q); |
306 | blk_queue_start_drain(q); | |
6ecf23af | 307 | |
57d74df9 CH |
308 | blk_queue_flag_set(QUEUE_FLAG_NOMERGES, q); |
309 | blk_queue_flag_set(QUEUE_FLAG_NOXMERGES, q); | |
c9a929dd | 310 | |
c246e80d BVA |
311 | /* |
312 | * Drain all requests queued before DYING marking. Set DEAD flag to | |
67ed8b73 BVA |
313 | * prevent that blk_mq_run_hw_queues() accesses the hardware queues |
314 | * after draining finished. | |
c246e80d | 315 | */ |
3ef28e83 | 316 | blk_freeze_queue(q); |
c57cdf7a | 317 | |
daaca352 ML |
318 | /* cleanup rq qos structures for queue without disk */ |
319 | rq_qos_exit(q); | |
320 | ||
57d74df9 | 321 | blk_queue_flag_set(QUEUE_FLAG_DEAD, q); |
c9a929dd | 322 | |
c9a929dd | 323 | blk_sync_queue(q); |
2a19b28f ML |
324 | if (queue_is_mq(q)) { |
325 | blk_mq_cancel_work_sync(q); | |
c7e2d94b | 326 | blk_mq_exit_queue(q); |
2a19b28f | 327 | } |
a1ce35fa | 328 | |
c3e22192 ML |
329 | /* |
330 | * In theory, request pool of sched_tags belongs to request queue. | |
331 | * However, the current implementation requires tag_set for freeing | |
332 | * requests, so free the pool now. | |
333 | * | |
334 | * Queue has become frozen, there can't be any in-queue requests, so | |
335 | * it is safe to free requests now. | |
336 | */ | |
337 | mutex_lock(&q->sysfs_lock); | |
338 | if (q->elevator) | |
1820f4f0 | 339 | blk_mq_sched_free_rqs(q); |
c3e22192 ML |
340 | mutex_unlock(&q->sysfs_lock); |
341 | ||
c9a929dd | 342 | /* @q is and will stay empty, shutdown and put */ |
483f4afc AV |
343 | blk_put_queue(q); |
344 | } | |
1da177e4 LT |
345 | EXPORT_SYMBOL(blk_cleanup_queue); |
346 | ||
3a0a5299 BVA |
347 | /** |
348 | * blk_queue_enter() - try to increase q->q_usage_counter | |
349 | * @q: request queue pointer | |
a4d34da7 | 350 | * @flags: BLK_MQ_REQ_NOWAIT and/or BLK_MQ_REQ_PM |
3a0a5299 | 351 | */ |
9a95e4ef | 352 | int blk_queue_enter(struct request_queue *q, blk_mq_req_flags_t flags) |
3ef28e83 | 353 | { |
a4d34da7 | 354 | const bool pm = flags & BLK_MQ_REQ_PM; |
3a0a5299 | 355 | |
1f14a098 | 356 | while (!blk_try_enter_queue(q, pm)) { |
3a0a5299 | 357 | if (flags & BLK_MQ_REQ_NOWAIT) |
3ef28e83 DW |
358 | return -EBUSY; |
359 | ||
5ed61d3f | 360 | /* |
1f14a098 CH |
361 | * read pair of barrier in blk_freeze_queue_start(), we need to |
362 | * order reading __PERCPU_REF_DEAD flag of .q_usage_counter and | |
363 | * reading .mq_freeze_depth or queue dying flag, otherwise the | |
364 | * following wait may never return if the two reads are | |
365 | * reordered. | |
5ed61d3f ML |
366 | */ |
367 | smp_rmb(); | |
1dc3039b | 368 | wait_event(q->mq_freeze_wq, |
7996a8b5 | 369 | (!q->mq_freeze_depth && |
52abca64 | 370 | blk_pm_resume_queue(pm, q)) || |
1dc3039b | 371 | blk_queue_dying(q)); |
3ef28e83 DW |
372 | if (blk_queue_dying(q)) |
373 | return -ENODEV; | |
3ef28e83 | 374 | } |
1f14a098 CH |
375 | |
376 | return 0; | |
3ef28e83 DW |
377 | } |
378 | ||
c98cb5bb | 379 | int __bio_queue_enter(struct request_queue *q, struct bio *bio) |
accea322 | 380 | { |
a6741536 | 381 | while (!blk_try_enter_queue(q, false)) { |
eab4e027 PB |
382 | struct gendisk *disk = bio->bi_bdev->bd_disk; |
383 | ||
a6741536 | 384 | if (bio->bi_opf & REQ_NOWAIT) { |
8e141f9e | 385 | if (test_bit(GD_DEAD, &disk->state)) |
a6741536 | 386 | goto dead; |
accea322 | 387 | bio_wouldblock_error(bio); |
a6741536 CH |
388 | return -EBUSY; |
389 | } | |
390 | ||
391 | /* | |
392 | * read pair of barrier in blk_freeze_queue_start(), we need to | |
393 | * order reading __PERCPU_REF_DEAD flag of .q_usage_counter and | |
394 | * reading .mq_freeze_depth or queue dying flag, otherwise the | |
395 | * following wait may never return if the two reads are | |
396 | * reordered. | |
397 | */ | |
398 | smp_rmb(); | |
399 | wait_event(q->mq_freeze_wq, | |
400 | (!q->mq_freeze_depth && | |
401 | blk_pm_resume_queue(false, q)) || | |
8e141f9e CH |
402 | test_bit(GD_DEAD, &disk->state)); |
403 | if (test_bit(GD_DEAD, &disk->state)) | |
a6741536 | 404 | goto dead; |
accea322 CH |
405 | } |
406 | ||
a6741536 CH |
407 | return 0; |
408 | dead: | |
409 | bio_io_error(bio); | |
410 | return -ENODEV; | |
accea322 CH |
411 | } |
412 | ||
3ef28e83 DW |
413 | void blk_queue_exit(struct request_queue *q) |
414 | { | |
415 | percpu_ref_put(&q->q_usage_counter); | |
416 | } | |
417 | ||
418 | static void blk_queue_usage_counter_release(struct percpu_ref *ref) | |
419 | { | |
420 | struct request_queue *q = | |
421 | container_of(ref, struct request_queue, q_usage_counter); | |
422 | ||
423 | wake_up_all(&q->mq_freeze_wq); | |
424 | } | |
425 | ||
bca237a5 | 426 | static void blk_rq_timed_out_timer(struct timer_list *t) |
287922eb | 427 | { |
bca237a5 | 428 | struct request_queue *q = from_timer(q, t, timeout); |
287922eb CH |
429 | |
430 | kblockd_schedule_work(&q->timeout_work); | |
431 | } | |
432 | ||
2e3c18d0 TH |
433 | static void blk_timeout_work(struct work_struct *work) |
434 | { | |
435 | } | |
436 | ||
704b914f | 437 | struct request_queue *blk_alloc_queue(int node_id, bool alloc_srcu) |
1946089a | 438 | { |
165125e1 | 439 | struct request_queue *q; |
338aa96d | 440 | int ret; |
1946089a | 441 | |
704b914f ML |
442 | q = kmem_cache_alloc_node(blk_get_queue_kmem_cache(alloc_srcu), |
443 | GFP_KERNEL | __GFP_ZERO, node_id); | |
1da177e4 LT |
444 | if (!q) |
445 | return NULL; | |
446 | ||
704b914f ML |
447 | if (alloc_srcu) { |
448 | blk_queue_flag_set(QUEUE_FLAG_HAS_SRCU, q); | |
449 | if (init_srcu_struct(q->srcu) != 0) | |
450 | goto fail_q; | |
451 | } | |
452 | ||
cbf62af3 | 453 | q->last_merge = NULL; |
cbf62af3 | 454 | |
3d745ea5 | 455 | q->id = ida_simple_get(&blk_queue_ida, 0, 0, GFP_KERNEL); |
a73f730d | 456 | if (q->id < 0) |
704b914f | 457 | goto fail_srcu; |
a73f730d | 458 | |
c495a176 | 459 | ret = bioset_init(&q->bio_split, BIO_POOL_SIZE, 0, 0); |
338aa96d | 460 | if (ret) |
54efd50b KO |
461 | goto fail_id; |
462 | ||
a83b576c JA |
463 | q->stats = blk_alloc_queue_stats(); |
464 | if (!q->stats) | |
edb0872f | 465 | goto fail_split; |
a83b576c | 466 | |
5151412d | 467 | q->node = node_id; |
0989a025 | 468 | |
079a2e3e | 469 | atomic_set(&q->nr_active_requests_shared_tags, 0); |
bccf5e26 | 470 | |
bca237a5 | 471 | timer_setup(&q->timeout, blk_rq_timed_out_timer, 0); |
2e3c18d0 | 472 | INIT_WORK(&q->timeout_work, blk_timeout_work); |
a612fddf | 473 | INIT_LIST_HEAD(&q->icq_list); |
483f4afc | 474 | |
8324aa91 | 475 | kobject_init(&q->kobj, &blk_queue_ktype); |
1da177e4 | 476 | |
85e0cbbb | 477 | mutex_init(&q->debugfs_mutex); |
483f4afc | 478 | mutex_init(&q->sysfs_lock); |
cecf5d87 | 479 | mutex_init(&q->sysfs_dir_lock); |
0d945c1f | 480 | spin_lock_init(&q->queue_lock); |
c94a96ac | 481 | |
320ae51f | 482 | init_waitqueue_head(&q->mq_freeze_wq); |
7996a8b5 | 483 | mutex_init(&q->mq_freeze_lock); |
320ae51f | 484 | |
3ef28e83 DW |
485 | /* |
486 | * Init percpu_ref in atomic mode so that it's faster to shutdown. | |
487 | * See blk_register_queue() for details. | |
488 | */ | |
489 | if (percpu_ref_init(&q->q_usage_counter, | |
490 | blk_queue_usage_counter_release, | |
491 | PERCPU_REF_INIT_ATOMIC, GFP_KERNEL)) | |
edb0872f | 492 | goto fail_stats; |
f51b802c | 493 | |
3d745ea5 CH |
494 | blk_queue_dma_alignment(q, 511); |
495 | blk_set_default_limits(&q->limits); | |
d2a27964 | 496 | q->nr_requests = BLKDEV_DEFAULT_RQ; |
3d745ea5 | 497 | |
1da177e4 | 498 | return q; |
a73f730d | 499 | |
a83b576c | 500 | fail_stats: |
edb0872f | 501 | blk_free_queue_stats(q->stats); |
54efd50b | 502 | fail_split: |
338aa96d | 503 | bioset_exit(&q->bio_split); |
a73f730d TH |
504 | fail_id: |
505 | ida_simple_remove(&blk_queue_ida, q->id); | |
704b914f ML |
506 | fail_srcu: |
507 | if (alloc_srcu) | |
508 | cleanup_srcu_struct(q->srcu); | |
a73f730d | 509 | fail_q: |
704b914f | 510 | kmem_cache_free(blk_get_queue_kmem_cache(alloc_srcu), q); |
a73f730d | 511 | return NULL; |
1da177e4 | 512 | } |
1da177e4 | 513 | |
b5bd357c LC |
514 | /** |
515 | * blk_get_queue - increment the request_queue refcount | |
516 | * @q: the request_queue structure to increment the refcount for | |
517 | * | |
518 | * Increment the refcount of the request_queue kobject. | |
763b5892 LC |
519 | * |
520 | * Context: Any context. | |
b5bd357c | 521 | */ |
09ac46c4 | 522 | bool blk_get_queue(struct request_queue *q) |
1da177e4 | 523 | { |
3f3299d5 | 524 | if (likely(!blk_queue_dying(q))) { |
09ac46c4 TH |
525 | __blk_get_queue(q); |
526 | return true; | |
1da177e4 LT |
527 | } |
528 | ||
09ac46c4 | 529 | return false; |
1da177e4 | 530 | } |
d86e0e83 | 531 | EXPORT_SYMBOL(blk_get_queue); |
1da177e4 | 532 | |
c17bb495 AM |
533 | #ifdef CONFIG_FAIL_MAKE_REQUEST |
534 | ||
535 | static DECLARE_FAULT_ATTR(fail_make_request); | |
536 | ||
537 | static int __init setup_fail_make_request(char *str) | |
538 | { | |
539 | return setup_fault_attr(&fail_make_request, str); | |
540 | } | |
541 | __setup("fail_make_request=", setup_fail_make_request); | |
542 | ||
06c8c691 | 543 | bool should_fail_request(struct block_device *part, unsigned int bytes) |
c17bb495 | 544 | { |
8446fe92 | 545 | return part->bd_make_it_fail && should_fail(&fail_make_request, bytes); |
c17bb495 AM |
546 | } |
547 | ||
548 | static int __init fail_make_request_debugfs(void) | |
549 | { | |
dd48c085 AM |
550 | struct dentry *dir = fault_create_debugfs_attr("fail_make_request", |
551 | NULL, &fail_make_request); | |
552 | ||
21f9fcd8 | 553 | return PTR_ERR_OR_ZERO(dir); |
c17bb495 AM |
554 | } |
555 | ||
556 | late_initcall(fail_make_request_debugfs); | |
c17bb495 AM |
557 | #endif /* CONFIG_FAIL_MAKE_REQUEST */ |
558 | ||
2f9f6221 | 559 | static inline bool bio_check_ro(struct bio *bio) |
721c7fc7 | 560 | { |
2f9f6221 | 561 | if (op_is_write(bio_op(bio)) && bdev_read_only(bio->bi_bdev)) { |
8b2ded1c MP |
562 | if (op_is_flush(bio->bi_opf) && !bio_sectors(bio)) |
563 | return false; | |
57e95e46 CH |
564 | pr_warn("Trying to write to read-only block-device %pg\n", |
565 | bio->bi_bdev); | |
a32e236e LT |
566 | /* Older lvm-tools actually trigger this */ |
567 | return false; | |
721c7fc7 ID |
568 | } |
569 | ||
570 | return false; | |
571 | } | |
572 | ||
30abb3a6 HM |
573 | static noinline int should_fail_bio(struct bio *bio) |
574 | { | |
309dca30 | 575 | if (should_fail_request(bdev_whole(bio->bi_bdev), bio->bi_iter.bi_size)) |
30abb3a6 HM |
576 | return -EIO; |
577 | return 0; | |
578 | } | |
579 | ALLOW_ERROR_INJECTION(should_fail_bio, ERRNO); | |
580 | ||
52c5e62d CH |
581 | /* |
582 | * Check whether this bio extends beyond the end of the device or partition. | |
583 | * This may well happen - the kernel calls bread() without checking the size of | |
584 | * the device, e.g., when mounting a file system. | |
585 | */ | |
2f9f6221 | 586 | static inline int bio_check_eod(struct bio *bio) |
52c5e62d | 587 | { |
2f9f6221 | 588 | sector_t maxsector = bdev_nr_sectors(bio->bi_bdev); |
52c5e62d CH |
589 | unsigned int nr_sectors = bio_sectors(bio); |
590 | ||
591 | if (nr_sectors && maxsector && | |
592 | (nr_sectors > maxsector || | |
593 | bio->bi_iter.bi_sector > maxsector - nr_sectors)) { | |
ad740780 CH |
594 | pr_info_ratelimited("%s: attempt to access beyond end of device\n" |
595 | "%pg: rw=%d, want=%llu, limit=%llu\n", | |
596 | current->comm, | |
597 | bio->bi_bdev, bio->bi_opf, | |
598 | bio_end_sector(bio), maxsector); | |
52c5e62d CH |
599 | return -EIO; |
600 | } | |
601 | return 0; | |
602 | } | |
603 | ||
74d46992 CH |
604 | /* |
605 | * Remap block n of partition p to block n+start(p) of the disk. | |
606 | */ | |
2f9f6221 | 607 | static int blk_partition_remap(struct bio *bio) |
74d46992 | 608 | { |
309dca30 | 609 | struct block_device *p = bio->bi_bdev; |
74d46992 | 610 | |
52c5e62d | 611 | if (unlikely(should_fail_request(p, bio->bi_iter.bi_size))) |
2f9f6221 | 612 | return -EIO; |
5eac3eb3 | 613 | if (bio_sectors(bio)) { |
8446fe92 | 614 | bio->bi_iter.bi_sector += p->bd_start_sect; |
1c02fca6 | 615 | trace_block_bio_remap(bio, p->bd_dev, |
29ff57c6 | 616 | bio->bi_iter.bi_sector - |
8446fe92 | 617 | p->bd_start_sect); |
52c5e62d | 618 | } |
30c5d345 | 619 | bio_set_flag(bio, BIO_REMAPPED); |
2f9f6221 | 620 | return 0; |
74d46992 CH |
621 | } |
622 | ||
0512a75b KB |
623 | /* |
624 | * Check write append to a zoned block device. | |
625 | */ | |
626 | static inline blk_status_t blk_check_zone_append(struct request_queue *q, | |
627 | struct bio *bio) | |
628 | { | |
629 | sector_t pos = bio->bi_iter.bi_sector; | |
630 | int nr_sectors = bio_sectors(bio); | |
631 | ||
632 | /* Only applicable to zoned block devices */ | |
633 | if (!blk_queue_is_zoned(q)) | |
634 | return BLK_STS_NOTSUPP; | |
635 | ||
636 | /* The bio sector must point to the start of a sequential zone */ | |
637 | if (pos & (blk_queue_zone_sectors(q) - 1) || | |
638 | !blk_queue_zone_is_seq(q, pos)) | |
639 | return BLK_STS_IOERR; | |
640 | ||
641 | /* | |
642 | * Not allowed to cross zone boundaries. Otherwise, the BIO will be | |
643 | * split and could result in non-contiguous sectors being written in | |
644 | * different zones. | |
645 | */ | |
646 | if (nr_sectors > q->limits.chunk_sectors) | |
647 | return BLK_STS_IOERR; | |
648 | ||
649 | /* Make sure the BIO is small enough and will not get split */ | |
650 | if (nr_sectors > q->limits.max_zone_append_sectors) | |
651 | return BLK_STS_IOERR; | |
652 | ||
653 | bio->bi_opf |= REQ_NOMERGE; | |
654 | ||
655 | return BLK_STS_OK; | |
656 | } | |
657 | ||
900e0807 JA |
658 | static void __submit_bio(struct bio *bio) |
659 | { | |
660 | struct gendisk *disk = bio->bi_bdev->bd_disk; | |
cc9c884d | 661 | |
7f36b7d0 ML |
662 | if (unlikely(!blk_crypto_bio_prep(&bio))) |
663 | return; | |
664 | ||
665 | if (!disk->fops->submit_bio) { | |
3e08773c | 666 | blk_mq_submit_bio(bio); |
7f36b7d0 ML |
667 | } else if (likely(bio_queue_enter(bio) == 0)) { |
668 | disk->fops->submit_bio(bio); | |
669 | blk_queue_exit(disk->queue); | |
670 | } | |
ac7c5675 CH |
671 | } |
672 | ||
566acf2d CH |
673 | /* |
674 | * The loop in this function may be a bit non-obvious, and so deserves some | |
675 | * explanation: | |
676 | * | |
677 | * - Before entering the loop, bio->bi_next is NULL (as all callers ensure | |
678 | * that), so we have a list with a single bio. | |
679 | * - We pretend that we have just taken it off a longer list, so we assign | |
680 | * bio_list to a pointer to the bio_list_on_stack, thus initialising the | |
681 | * bio_list of new bios to be added. ->submit_bio() may indeed add some more | |
682 | * bios through a recursive call to submit_bio_noacct. If it did, we find a | |
683 | * non-NULL value in bio_list and re-enter the loop from the top. | |
684 | * - In this case we really did just take the bio of the top of the list (no | |
685 | * pretending) and so remove it from bio_list, and call into ->submit_bio() | |
686 | * again. | |
687 | * | |
688 | * bio_list_on_stack[0] contains bios submitted by the current ->submit_bio. | |
689 | * bio_list_on_stack[1] contains bios that were submitted before the current | |
69fe0f29 | 690 | * ->submit_bio, but that haven't been processed yet. |
566acf2d | 691 | */ |
3e08773c | 692 | static void __submit_bio_noacct(struct bio *bio) |
566acf2d CH |
693 | { |
694 | struct bio_list bio_list_on_stack[2]; | |
566acf2d CH |
695 | |
696 | BUG_ON(bio->bi_next); | |
697 | ||
698 | bio_list_init(&bio_list_on_stack[0]); | |
699 | current->bio_list = bio_list_on_stack; | |
700 | ||
701 | do { | |
eab4e027 | 702 | struct request_queue *q = bdev_get_queue(bio->bi_bdev); |
566acf2d CH |
703 | struct bio_list lower, same; |
704 | ||
566acf2d CH |
705 | /* |
706 | * Create a fresh bio_list for all subordinate requests. | |
707 | */ | |
708 | bio_list_on_stack[1] = bio_list_on_stack[0]; | |
709 | bio_list_init(&bio_list_on_stack[0]); | |
710 | ||
3e08773c | 711 | __submit_bio(bio); |
566acf2d CH |
712 | |
713 | /* | |
714 | * Sort new bios into those for a lower level and those for the | |
715 | * same level. | |
716 | */ | |
717 | bio_list_init(&lower); | |
718 | bio_list_init(&same); | |
719 | while ((bio = bio_list_pop(&bio_list_on_stack[0])) != NULL) | |
eab4e027 | 720 | if (q == bdev_get_queue(bio->bi_bdev)) |
566acf2d CH |
721 | bio_list_add(&same, bio); |
722 | else | |
723 | bio_list_add(&lower, bio); | |
724 | ||
725 | /* | |
726 | * Now assemble so we handle the lowest level first. | |
727 | */ | |
728 | bio_list_merge(&bio_list_on_stack[0], &lower); | |
729 | bio_list_merge(&bio_list_on_stack[0], &same); | |
730 | bio_list_merge(&bio_list_on_stack[0], &bio_list_on_stack[1]); | |
731 | } while ((bio = bio_list_pop(&bio_list_on_stack[0]))); | |
732 | ||
733 | current->bio_list = NULL; | |
566acf2d CH |
734 | } |
735 | ||
3e08773c | 736 | static void __submit_bio_noacct_mq(struct bio *bio) |
ff93ea0c | 737 | { |
7c792f33 | 738 | struct bio_list bio_list[2] = { }; |
ff93ea0c | 739 | |
7c792f33 | 740 | current->bio_list = bio_list; |
ff93ea0c CH |
741 | |
742 | do { | |
3e08773c | 743 | __submit_bio(bio); |
7c792f33 | 744 | } while ((bio = bio_list_pop(&bio_list[0]))); |
ff93ea0c CH |
745 | |
746 | current->bio_list = NULL; | |
ff93ea0c CH |
747 | } |
748 | ||
3f98c753 | 749 | void submit_bio_noacct_nocheck(struct bio *bio) |
d89d8796 | 750 | { |
27a84d54 | 751 | /* |
566acf2d CH |
752 | * We only want one ->submit_bio to be active at a time, else stack |
753 | * usage with stacked devices could be a problem. Use current->bio_list | |
754 | * to collect a list of requests submited by a ->submit_bio method while | |
755 | * it is active, and then process them after it returned. | |
27a84d54 | 756 | */ |
3e08773c | 757 | if (current->bio_list) |
f5fe1b51 | 758 | bio_list_add(¤t->bio_list[0], bio); |
3e08773c CH |
759 | else if (!bio->bi_bdev->bd_disk->fops->submit_bio) |
760 | __submit_bio_noacct_mq(bio); | |
761 | else | |
762 | __submit_bio_noacct(bio); | |
d89d8796 | 763 | } |
3f98c753 ML |
764 | |
765 | /** | |
766 | * submit_bio_noacct - re-submit a bio to the block device layer for I/O | |
767 | * @bio: The bio describing the location in memory and on the device. | |
768 | * | |
769 | * This is a version of submit_bio() that shall only be used for I/O that is | |
770 | * resubmitted to lower level drivers by stacking block drivers. All file | |
771 | * systems and other upper level users of the block layer should use | |
772 | * submit_bio() instead. | |
773 | */ | |
774 | void submit_bio_noacct(struct bio *bio) | |
1da177e4 | 775 | { |
309dca30 | 776 | struct block_device *bdev = bio->bi_bdev; |
eab4e027 | 777 | struct request_queue *q = bdev_get_queue(bdev); |
4e4cbee9 | 778 | blk_status_t status = BLK_STS_IOERR; |
5a473e83 | 779 | struct blk_plug *plug; |
1da177e4 LT |
780 | |
781 | might_sleep(); | |
1da177e4 | 782 | |
5a473e83 JA |
783 | plug = blk_mq_plug(q, bio); |
784 | if (plug && plug->nowait) | |
785 | bio->bi_opf |= REQ_NOWAIT; | |
786 | ||
03a07c92 | 787 | /* |
b0beb280 | 788 | * For a REQ_NOWAIT based request, return -EOPNOTSUPP |
021a2446 | 789 | * if queue does not support NOWAIT. |
03a07c92 | 790 | */ |
021a2446 | 791 | if ((bio->bi_opf & REQ_NOWAIT) && !blk_queue_nowait(q)) |
b0beb280 | 792 | goto not_supported; |
03a07c92 | 793 | |
30abb3a6 | 794 | if (should_fail_bio(bio)) |
5a7bbad2 | 795 | goto end_io; |
2f9f6221 CH |
796 | if (unlikely(bio_check_ro(bio))) |
797 | goto end_io; | |
3a905c37 CH |
798 | if (!bio_flagged(bio, BIO_REMAPPED)) { |
799 | if (unlikely(bio_check_eod(bio))) | |
800 | goto end_io; | |
801 | if (bdev->bd_partno && unlikely(blk_partition_remap(bio))) | |
802 | goto end_io; | |
803 | } | |
2056a782 | 804 | |
5a7bbad2 | 805 | /* |
ed00aabd CH |
806 | * Filter flush bio's early so that bio based drivers without flush |
807 | * support don't have to worry about them. | |
5a7bbad2 | 808 | */ |
f3a8ab7d | 809 | if (op_is_flush(bio->bi_opf) && |
c888a8f9 | 810 | !test_bit(QUEUE_FLAG_WC, &q->queue_flags)) { |
1eff9d32 | 811 | bio->bi_opf &= ~(REQ_PREFLUSH | REQ_FUA); |
e439ab71 | 812 | if (!bio_sectors(bio)) { |
4e4cbee9 | 813 | status = BLK_STS_OK; |
51fd77bd JA |
814 | goto end_io; |
815 | } | |
5a7bbad2 | 816 | } |
5ddfe969 | 817 | |
d04c406f | 818 | if (!test_bit(QUEUE_FLAG_POLL, &q->queue_flags)) |
6ce913fe | 819 | bio_clear_polled(bio); |
d04c406f | 820 | |
288dab8a CH |
821 | switch (bio_op(bio)) { |
822 | case REQ_OP_DISCARD: | |
70200574 | 823 | if (!bdev_max_discard_sectors(bdev)) |
288dab8a CH |
824 | goto not_supported; |
825 | break; | |
826 | case REQ_OP_SECURE_ERASE: | |
827 | if (!blk_queue_secure_erase(q)) | |
828 | goto not_supported; | |
829 | break; | |
0512a75b KB |
830 | case REQ_OP_ZONE_APPEND: |
831 | status = blk_check_zone_append(q, bio); | |
832 | if (status != BLK_STS_OK) | |
833 | goto end_io; | |
834 | break; | |
2d253440 | 835 | case REQ_OP_ZONE_RESET: |
6c1b1da5 AJ |
836 | case REQ_OP_ZONE_OPEN: |
837 | case REQ_OP_ZONE_CLOSE: | |
838 | case REQ_OP_ZONE_FINISH: | |
74d46992 | 839 | if (!blk_queue_is_zoned(q)) |
2d253440 | 840 | goto not_supported; |
288dab8a | 841 | break; |
6e33dbf2 CK |
842 | case REQ_OP_ZONE_RESET_ALL: |
843 | if (!blk_queue_is_zoned(q) || !blk_queue_zone_resetall(q)) | |
844 | goto not_supported; | |
845 | break; | |
a6f0788e | 846 | case REQ_OP_WRITE_ZEROES: |
74d46992 | 847 | if (!q->limits.max_write_zeroes_sectors) |
a6f0788e CK |
848 | goto not_supported; |
849 | break; | |
288dab8a CH |
850 | default: |
851 | break; | |
5a7bbad2 | 852 | } |
01edede4 | 853 | |
b781d8db | 854 | if (blk_throtl_bio(bio)) |
3f98c753 | 855 | return; |
db18a53e CH |
856 | |
857 | blk_cgroup_bio_start(bio); | |
858 | blkcg_bio_issue_init(bio); | |
27a84d54 | 859 | |
fbbaf700 | 860 | if (!bio_flagged(bio, BIO_TRACE_COMPLETION)) { |
e8a676d6 | 861 | trace_block_bio_queue(bio); |
fbbaf700 N |
862 | /* Now that enqueuing has been traced, we need to trace |
863 | * completion as well. | |
864 | */ | |
865 | bio_set_flag(bio, BIO_TRACE_COMPLETION); | |
866 | } | |
3f98c753 | 867 | submit_bio_noacct_nocheck(bio); |
d24c670e | 868 | return; |
a7384677 | 869 | |
288dab8a | 870 | not_supported: |
4e4cbee9 | 871 | status = BLK_STS_NOTSUPP; |
a7384677 | 872 | end_io: |
4e4cbee9 | 873 | bio->bi_status = status; |
4246a0b6 | 874 | bio_endio(bio); |
d89d8796 | 875 | } |
ed00aabd | 876 | EXPORT_SYMBOL(submit_bio_noacct); |
1da177e4 LT |
877 | |
878 | /** | |
710027a4 | 879 | * submit_bio - submit a bio to the block device layer for I/O |
1da177e4 LT |
880 | * @bio: The &struct bio which describes the I/O |
881 | * | |
3fdd4086 CH |
882 | * submit_bio() is used to submit I/O requests to block devices. It is passed a |
883 | * fully set up &struct bio that describes the I/O that needs to be done. The | |
309dca30 | 884 | * bio will be send to the device described by the bi_bdev field. |
1da177e4 | 885 | * |
3fdd4086 CH |
886 | * The success/failure status of the request, along with notification of |
887 | * completion, is delivered asynchronously through the ->bi_end_io() callback | |
888 | * in @bio. The bio must NOT be touched by thecaller until ->bi_end_io() has | |
889 | * been called. | |
1da177e4 | 890 | */ |
3e08773c | 891 | void submit_bio(struct bio *bio) |
1da177e4 | 892 | { |
d3f77dfd | 893 | if (blkcg_punt_bio_submit(bio)) |
3e08773c | 894 | return; |
d3f77dfd | 895 | |
bf2de6f5 JA |
896 | /* |
897 | * If it's a regular read/write or a barrier with data attached, | |
898 | * go through the normal accounting stuff before submission. | |
899 | */ | |
e2a60da7 | 900 | if (bio_has_data(bio)) { |
73bd66d9 | 901 | unsigned int count = bio_sectors(bio); |
4363ac7c | 902 | |
a8ebb056 | 903 | if (op_is_write(bio_op(bio))) { |
bf2de6f5 JA |
904 | count_vm_events(PGPGOUT, count); |
905 | } else { | |
4f024f37 | 906 | task_io_account_read(bio->bi_iter.bi_size); |
bf2de6f5 JA |
907 | count_vm_events(PGPGIN, count); |
908 | } | |
1da177e4 LT |
909 | } |
910 | ||
b8e24a93 | 911 | /* |
760f83ea CH |
912 | * If we're reading data that is part of the userspace workingset, count |
913 | * submission time as memory stall. When the device is congested, or | |
914 | * the submitting cgroup IO-throttled, submission can be a significant | |
915 | * part of overall IO time. | |
b8e24a93 | 916 | */ |
760f83ea CH |
917 | if (unlikely(bio_op(bio) == REQ_OP_READ && |
918 | bio_flagged(bio, BIO_WORKINGSET))) { | |
919 | unsigned long pflags; | |
b8e24a93 | 920 | |
760f83ea | 921 | psi_memstall_enter(&pflags); |
3e08773c | 922 | submit_bio_noacct(bio); |
b8e24a93 | 923 | psi_memstall_leave(&pflags); |
3e08773c | 924 | return; |
760f83ea CH |
925 | } |
926 | ||
3e08773c | 927 | submit_bio_noacct(bio); |
1da177e4 | 928 | } |
1da177e4 LT |
929 | EXPORT_SYMBOL(submit_bio); |
930 | ||
3e08773c CH |
931 | /** |
932 | * bio_poll - poll for BIO completions | |
933 | * @bio: bio to poll for | |
e30028ac | 934 | * @iob: batches of IO |
3e08773c CH |
935 | * @flags: BLK_POLL_* flags that control the behavior |
936 | * | |
937 | * Poll for completions on queue associated with the bio. Returns number of | |
938 | * completed entries found. | |
939 | * | |
940 | * Note: the caller must either be the context that submitted @bio, or | |
941 | * be in a RCU critical section to prevent freeing of @bio. | |
942 | */ | |
5a72e899 | 943 | int bio_poll(struct bio *bio, struct io_comp_batch *iob, unsigned int flags) |
3e08773c | 944 | { |
859897c3 | 945 | struct request_queue *q = bdev_get_queue(bio->bi_bdev); |
3e08773c | 946 | blk_qc_t cookie = READ_ONCE(bio->bi_cookie); |
69fe0f29 | 947 | int ret = 0; |
3e08773c CH |
948 | |
949 | if (cookie == BLK_QC_T_NONE || | |
950 | !test_bit(QUEUE_FLAG_POLL, &q->queue_flags)) | |
951 | return 0; | |
952 | ||
aa8dccca | 953 | blk_flush_plug(current->plug, false); |
3e08773c CH |
954 | |
955 | if (blk_queue_enter(q, BLK_MQ_REQ_NOWAIT)) | |
956 | return 0; | |
69fe0f29 | 957 | if (queue_is_mq(q)) { |
5a72e899 | 958 | ret = blk_mq_poll(q, cookie, iob, flags); |
69fe0f29 ML |
959 | } else { |
960 | struct gendisk *disk = q->disk; | |
961 | ||
962 | if (disk && disk->fops->poll_bio) | |
963 | ret = disk->fops->poll_bio(bio, iob, flags); | |
964 | } | |
3e08773c CH |
965 | blk_queue_exit(q); |
966 | return ret; | |
967 | } | |
968 | EXPORT_SYMBOL_GPL(bio_poll); | |
969 | ||
970 | /* | |
971 | * Helper to implement file_operations.iopoll. Requires the bio to be stored | |
972 | * in iocb->private, and cleared before freeing the bio. | |
973 | */ | |
5a72e899 JA |
974 | int iocb_bio_iopoll(struct kiocb *kiocb, struct io_comp_batch *iob, |
975 | unsigned int flags) | |
3e08773c CH |
976 | { |
977 | struct bio *bio; | |
978 | int ret = 0; | |
979 | ||
980 | /* | |
981 | * Note: the bio cache only uses SLAB_TYPESAFE_BY_RCU, so bio can | |
982 | * point to a freshly allocated bio at this point. If that happens | |
983 | * we have a few cases to consider: | |
984 | * | |
985 | * 1) the bio is beeing initialized and bi_bdev is NULL. We can just | |
986 | * simply nothing in this case | |
987 | * 2) the bio points to a not poll enabled device. bio_poll will catch | |
988 | * this and return 0 | |
989 | * 3) the bio points to a poll capable device, including but not | |
990 | * limited to the one that the original bio pointed to. In this | |
991 | * case we will call into the actual poll method and poll for I/O, | |
992 | * even if we don't need to, but it won't cause harm either. | |
993 | * | |
994 | * For cases 2) and 3) above the RCU grace period ensures that bi_bdev | |
995 | * is still allocated. Because partitions hold a reference to the whole | |
996 | * device bdev and thus disk, the disk is also still valid. Grabbing | |
997 | * a reference to the queue in bio_poll() ensures the hctxs and requests | |
998 | * are still valid as well. | |
999 | */ | |
1000 | rcu_read_lock(); | |
1001 | bio = READ_ONCE(kiocb->private); | |
1002 | if (bio && bio->bi_bdev) | |
5a72e899 | 1003 | ret = bio_poll(bio, iob, flags); |
3e08773c CH |
1004 | rcu_read_unlock(); |
1005 | ||
1006 | return ret; | |
1007 | } | |
1008 | EXPORT_SYMBOL_GPL(iocb_bio_iopoll); | |
1009 | ||
450b7879 | 1010 | void update_io_ticks(struct block_device *part, unsigned long now, bool end) |
9123bf6f CH |
1011 | { |
1012 | unsigned long stamp; | |
1013 | again: | |
8446fe92 | 1014 | stamp = READ_ONCE(part->bd_stamp); |
d80c228d | 1015 | if (unlikely(time_after(now, stamp))) { |
8446fe92 | 1016 | if (likely(cmpxchg(&part->bd_stamp, stamp, now) == stamp)) |
9123bf6f CH |
1017 | __part_stat_add(part, io_ticks, end ? now - stamp : 1); |
1018 | } | |
8446fe92 CH |
1019 | if (part->bd_partno) { |
1020 | part = bdev_whole(part); | |
9123bf6f CH |
1021 | goto again; |
1022 | } | |
1023 | } | |
1024 | ||
8446fe92 | 1025 | static unsigned long __part_start_io_acct(struct block_device *part, |
e45c47d1 MS |
1026 | unsigned int sectors, unsigned int op, |
1027 | unsigned long start_time) | |
956d510e | 1028 | { |
956d510e | 1029 | const int sgrp = op_stat_group(op); |
956d510e CH |
1030 | |
1031 | part_stat_lock(); | |
e45c47d1 | 1032 | update_io_ticks(part, start_time, false); |
956d510e CH |
1033 | part_stat_inc(part, ios[sgrp]); |
1034 | part_stat_add(part, sectors[sgrp], sectors); | |
1035 | part_stat_local_inc(part, in_flight[op_is_write(op)]); | |
1036 | part_stat_unlock(); | |
320ae51f | 1037 | |
e45c47d1 MS |
1038 | return start_time; |
1039 | } | |
1040 | ||
1041 | /** | |
1042 | * bio_start_io_acct_time - start I/O accounting for bio based drivers | |
1043 | * @bio: bio to start account for | |
1044 | * @start_time: start time that should be passed back to bio_end_io_acct(). | |
1045 | */ | |
1046 | void bio_start_io_acct_time(struct bio *bio, unsigned long start_time) | |
1047 | { | |
1048 | __part_start_io_acct(bio->bi_bdev, bio_sectors(bio), | |
1049 | bio_op(bio), start_time); | |
956d510e | 1050 | } |
e45c47d1 | 1051 | EXPORT_SYMBOL_GPL(bio_start_io_acct_time); |
7b26410b | 1052 | |
99dfc43e CH |
1053 | /** |
1054 | * bio_start_io_acct - start I/O accounting for bio based drivers | |
1055 | * @bio: bio to start account for | |
1056 | * | |
1057 | * Returns the start time that should be passed back to bio_end_io_acct(). | |
1058 | */ | |
1059 | unsigned long bio_start_io_acct(struct bio *bio) | |
7b26410b | 1060 | { |
e45c47d1 MS |
1061 | return __part_start_io_acct(bio->bi_bdev, bio_sectors(bio), |
1062 | bio_op(bio), jiffies); | |
7b26410b | 1063 | } |
99dfc43e | 1064 | EXPORT_SYMBOL_GPL(bio_start_io_acct); |
7b26410b SL |
1065 | |
1066 | unsigned long disk_start_io_acct(struct gendisk *disk, unsigned int sectors, | |
1067 | unsigned int op) | |
1068 | { | |
e45c47d1 | 1069 | return __part_start_io_acct(disk->part0, sectors, op, jiffies); |
7b26410b | 1070 | } |
956d510e CH |
1071 | EXPORT_SYMBOL(disk_start_io_acct); |
1072 | ||
8446fe92 | 1073 | static void __part_end_io_acct(struct block_device *part, unsigned int op, |
7b26410b | 1074 | unsigned long start_time) |
956d510e | 1075 | { |
956d510e CH |
1076 | const int sgrp = op_stat_group(op); |
1077 | unsigned long now = READ_ONCE(jiffies); | |
1078 | unsigned long duration = now - start_time; | |
5b18b5a7 | 1079 | |
956d510e CH |
1080 | part_stat_lock(); |
1081 | update_io_ticks(part, now, true); | |
1082 | part_stat_add(part, nsecs[sgrp], jiffies_to_nsecs(duration)); | |
1083 | part_stat_local_dec(part, in_flight[op_is_write(op)]); | |
320ae51f JA |
1084 | part_stat_unlock(); |
1085 | } | |
7b26410b | 1086 | |
99dfc43e CH |
1087 | void bio_end_io_acct_remapped(struct bio *bio, unsigned long start_time, |
1088 | struct block_device *orig_bdev) | |
7b26410b | 1089 | { |
99dfc43e | 1090 | __part_end_io_acct(orig_bdev, bio_op(bio), start_time); |
7b26410b | 1091 | } |
99dfc43e | 1092 | EXPORT_SYMBOL_GPL(bio_end_io_acct_remapped); |
7b26410b SL |
1093 | |
1094 | void disk_end_io_acct(struct gendisk *disk, unsigned int op, | |
1095 | unsigned long start_time) | |
1096 | { | |
8446fe92 | 1097 | __part_end_io_acct(disk->part0, op, start_time); |
7b26410b | 1098 | } |
956d510e | 1099 | EXPORT_SYMBOL(disk_end_io_acct); |
320ae51f | 1100 | |
ef9e3fac KU |
1101 | /** |
1102 | * blk_lld_busy - Check if underlying low-level drivers of a device are busy | |
1103 | * @q : the queue of the device being checked | |
1104 | * | |
1105 | * Description: | |
1106 | * Check if underlying low-level drivers of a device are busy. | |
1107 | * If the drivers want to export their busy state, they must set own | |
1108 | * exporting function using blk_queue_lld_busy() first. | |
1109 | * | |
1110 | * Basically, this function is used only by request stacking drivers | |
1111 | * to stop dispatching requests to underlying devices when underlying | |
1112 | * devices are busy. This behavior helps more I/O merging on the queue | |
1113 | * of the request stacking driver and prevents I/O throughput regression | |
1114 | * on burst I/O load. | |
1115 | * | |
1116 | * Return: | |
1117 | * 0 - Not busy (The request stacking driver should dispatch request) | |
1118 | * 1 - Busy (The request stacking driver should stop dispatching request) | |
1119 | */ | |
1120 | int blk_lld_busy(struct request_queue *q) | |
1121 | { | |
344e9ffc | 1122 | if (queue_is_mq(q) && q->mq_ops->busy) |
9ba20527 | 1123 | return q->mq_ops->busy(q); |
ef9e3fac KU |
1124 | |
1125 | return 0; | |
1126 | } | |
1127 | EXPORT_SYMBOL_GPL(blk_lld_busy); | |
1128 | ||
59c3d45e | 1129 | int kblockd_schedule_work(struct work_struct *work) |
1da177e4 LT |
1130 | { |
1131 | return queue_work(kblockd_workqueue, work); | |
1132 | } | |
1da177e4 LT |
1133 | EXPORT_SYMBOL(kblockd_schedule_work); |
1134 | ||
818cd1cb JA |
1135 | int kblockd_mod_delayed_work_on(int cpu, struct delayed_work *dwork, |
1136 | unsigned long delay) | |
1137 | { | |
1138 | return mod_delayed_work_on(cpu, kblockd_workqueue, dwork, delay); | |
1139 | } | |
1140 | EXPORT_SYMBOL(kblockd_mod_delayed_work_on); | |
1141 | ||
47c122e3 JA |
1142 | void blk_start_plug_nr_ios(struct blk_plug *plug, unsigned short nr_ios) |
1143 | { | |
1144 | struct task_struct *tsk = current; | |
1145 | ||
1146 | /* | |
1147 | * If this is a nested plug, don't actually assign it. | |
1148 | */ | |
1149 | if (tsk->plug) | |
1150 | return; | |
1151 | ||
bc490f81 | 1152 | plug->mq_list = NULL; |
47c122e3 JA |
1153 | plug->cached_rq = NULL; |
1154 | plug->nr_ios = min_t(unsigned short, nr_ios, BLK_MAX_REQUEST_COUNT); | |
1155 | plug->rq_count = 0; | |
1156 | plug->multiple_queues = false; | |
dc5fc361 | 1157 | plug->has_elevator = false; |
47c122e3 JA |
1158 | plug->nowait = false; |
1159 | INIT_LIST_HEAD(&plug->cb_list); | |
1160 | ||
1161 | /* | |
1162 | * Store ordering should not be needed here, since a potential | |
1163 | * preempt will imply a full memory barrier | |
1164 | */ | |
1165 | tsk->plug = plug; | |
1166 | } | |
1167 | ||
75df7136 SJ |
1168 | /** |
1169 | * blk_start_plug - initialize blk_plug and track it inside the task_struct | |
1170 | * @plug: The &struct blk_plug that needs to be initialized | |
1171 | * | |
1172 | * Description: | |
40405851 JM |
1173 | * blk_start_plug() indicates to the block layer an intent by the caller |
1174 | * to submit multiple I/O requests in a batch. The block layer may use | |
1175 | * this hint to defer submitting I/Os from the caller until blk_finish_plug() | |
1176 | * is called. However, the block layer may choose to submit requests | |
1177 | * before a call to blk_finish_plug() if the number of queued I/Os | |
1178 | * exceeds %BLK_MAX_REQUEST_COUNT, or if the size of the I/O is larger than | |
1179 | * %BLK_PLUG_FLUSH_SIZE. The queued I/Os may also be submitted early if | |
1180 | * the task schedules (see below). | |
1181 | * | |
75df7136 SJ |
1182 | * Tracking blk_plug inside the task_struct will help with auto-flushing the |
1183 | * pending I/O should the task end up blocking between blk_start_plug() and | |
1184 | * blk_finish_plug(). This is important from a performance perspective, but | |
1185 | * also ensures that we don't deadlock. For instance, if the task is blocking | |
1186 | * for a memory allocation, memory reclaim could end up wanting to free a | |
1187 | * page belonging to that request that is currently residing in our private | |
1188 | * plug. By flushing the pending I/O when the process goes to sleep, we avoid | |
1189 | * this kind of deadlock. | |
1190 | */ | |
73c10101 JA |
1191 | void blk_start_plug(struct blk_plug *plug) |
1192 | { | |
47c122e3 | 1193 | blk_start_plug_nr_ios(plug, 1); |
73c10101 JA |
1194 | } |
1195 | EXPORT_SYMBOL(blk_start_plug); | |
1196 | ||
74018dc3 | 1197 | static void flush_plug_callbacks(struct blk_plug *plug, bool from_schedule) |
048c9374 N |
1198 | { |
1199 | LIST_HEAD(callbacks); | |
1200 | ||
2a7d5559 SL |
1201 | while (!list_empty(&plug->cb_list)) { |
1202 | list_splice_init(&plug->cb_list, &callbacks); | |
048c9374 | 1203 | |
2a7d5559 SL |
1204 | while (!list_empty(&callbacks)) { |
1205 | struct blk_plug_cb *cb = list_first_entry(&callbacks, | |
048c9374 N |
1206 | struct blk_plug_cb, |
1207 | list); | |
2a7d5559 | 1208 | list_del(&cb->list); |
74018dc3 | 1209 | cb->callback(cb, from_schedule); |
2a7d5559 | 1210 | } |
048c9374 N |
1211 | } |
1212 | } | |
1213 | ||
9cbb1750 N |
1214 | struct blk_plug_cb *blk_check_plugged(blk_plug_cb_fn unplug, void *data, |
1215 | int size) | |
1216 | { | |
1217 | struct blk_plug *plug = current->plug; | |
1218 | struct blk_plug_cb *cb; | |
1219 | ||
1220 | if (!plug) | |
1221 | return NULL; | |
1222 | ||
1223 | list_for_each_entry(cb, &plug->cb_list, list) | |
1224 | if (cb->callback == unplug && cb->data == data) | |
1225 | return cb; | |
1226 | ||
1227 | /* Not currently on the callback list */ | |
1228 | BUG_ON(size < sizeof(*cb)); | |
1229 | cb = kzalloc(size, GFP_ATOMIC); | |
1230 | if (cb) { | |
1231 | cb->data = data; | |
1232 | cb->callback = unplug; | |
1233 | list_add(&cb->list, &plug->cb_list); | |
1234 | } | |
1235 | return cb; | |
1236 | } | |
1237 | EXPORT_SYMBOL(blk_check_plugged); | |
1238 | ||
aa8dccca | 1239 | void __blk_flush_plug(struct blk_plug *plug, bool from_schedule) |
73c10101 | 1240 | { |
b600455d PB |
1241 | if (!list_empty(&plug->cb_list)) |
1242 | flush_plug_callbacks(plug, from_schedule); | |
bc490f81 | 1243 | if (!rq_list_empty(plug->mq_list)) |
320ae51f | 1244 | blk_mq_flush_plug_list(plug, from_schedule); |
c5fc7b93 JA |
1245 | /* |
1246 | * Unconditionally flush out cached requests, even if the unplug | |
1247 | * event came from schedule. Since we know hold references to the | |
1248 | * queue for cached requests, we don't want a blocked task holding | |
1249 | * up a queue freeze/quiesce event. | |
1250 | */ | |
1251 | if (unlikely(!rq_list_empty(plug->cached_rq))) | |
47c122e3 | 1252 | blk_mq_free_plug_rqs(plug); |
73c10101 | 1253 | } |
73c10101 | 1254 | |
40405851 JM |
1255 | /** |
1256 | * blk_finish_plug - mark the end of a batch of submitted I/O | |
1257 | * @plug: The &struct blk_plug passed to blk_start_plug() | |
1258 | * | |
1259 | * Description: | |
1260 | * Indicate that a batch of I/O submissions is complete. This function | |
1261 | * must be paired with an initial call to blk_start_plug(). The intent | |
1262 | * is to allow the block layer to optimize I/O submission. See the | |
1263 | * documentation for blk_start_plug() for more information. | |
1264 | */ | |
73c10101 JA |
1265 | void blk_finish_plug(struct blk_plug *plug) |
1266 | { | |
008f75a2 | 1267 | if (plug == current->plug) { |
aa8dccca | 1268 | __blk_flush_plug(plug, false); |
008f75a2 CH |
1269 | current->plug = NULL; |
1270 | } | |
73c10101 | 1271 | } |
88b996cd | 1272 | EXPORT_SYMBOL(blk_finish_plug); |
73c10101 | 1273 | |
71ac860a ML |
1274 | void blk_io_schedule(void) |
1275 | { | |
1276 | /* Prevent hang_check timer from firing at us during very long I/O */ | |
1277 | unsigned long timeout = sysctl_hung_task_timeout_secs * HZ / 2; | |
1278 | ||
1279 | if (timeout) | |
1280 | io_schedule_timeout(timeout); | |
1281 | else | |
1282 | io_schedule(); | |
1283 | } | |
1284 | EXPORT_SYMBOL_GPL(blk_io_schedule); | |
1285 | ||
1da177e4 LT |
1286 | int __init blk_dev_init(void) |
1287 | { | |
ef295ecf CH |
1288 | BUILD_BUG_ON(REQ_OP_LAST >= (1 << REQ_OP_BITS)); |
1289 | BUILD_BUG_ON(REQ_OP_BITS + REQ_FLAG_BITS > 8 * | |
c593642c | 1290 | sizeof_field(struct request, cmd_flags)); |
ef295ecf | 1291 | BUILD_BUG_ON(REQ_OP_BITS + REQ_FLAG_BITS > 8 * |
c593642c | 1292 | sizeof_field(struct bio, bi_opf)); |
704b914f ML |
1293 | BUILD_BUG_ON(ALIGN(offsetof(struct request_queue, srcu), |
1294 | __alignof__(struct request_queue)) != | |
1295 | sizeof(struct request_queue)); | |
9eb55b03 | 1296 | |
89b90be2 TH |
1297 | /* used for unplugging and affects IO latency/throughput - HIGHPRI */ |
1298 | kblockd_workqueue = alloc_workqueue("kblockd", | |
28747fcd | 1299 | WQ_MEM_RECLAIM | WQ_HIGHPRI, 0); |
1da177e4 LT |
1300 | if (!kblockd_workqueue) |
1301 | panic("Failed to create kblockd\n"); | |
1302 | ||
c2789bd4 | 1303 | blk_requestq_cachep = kmem_cache_create("request_queue", |
165125e1 | 1304 | sizeof(struct request_queue), 0, SLAB_PANIC, NULL); |
1da177e4 | 1305 | |
704b914f ML |
1306 | blk_requestq_srcu_cachep = kmem_cache_create("request_queue_srcu", |
1307 | sizeof(struct request_queue) + | |
1308 | sizeof(struct srcu_struct), 0, SLAB_PANIC, NULL); | |
1309 | ||
18fbda91 | 1310 | blk_debugfs_root = debugfs_create_dir("block", NULL); |
18fbda91 | 1311 | |
d38ecf93 | 1312 | return 0; |
1da177e4 | 1313 | } |