| 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
| 2 | #ifndef BLK_INTERNAL_H |
| 3 | #define BLK_INTERNAL_H |
| 4 | |
| 5 | #include <linux/blk-crypto.h> |
| 6 | #include <linux/memblock.h> /* for max_pfn/max_low_pfn */ |
| 7 | #include <xen/xen.h> |
| 8 | #include "blk-crypto-internal.h" |
| 9 | |
| 10 | struct elevator_type; |
| 11 | |
| 12 | /* Max future timer expiry for timeouts */ |
| 13 | #define BLK_MAX_TIMEOUT (5 * HZ) |
| 14 | |
| 15 | extern struct dentry *blk_debugfs_root; |
| 16 | |
| 17 | struct blk_flush_queue { |
| 18 | unsigned int flush_pending_idx:1; |
| 19 | unsigned int flush_running_idx:1; |
| 20 | blk_status_t rq_status; |
| 21 | unsigned long flush_pending_since; |
| 22 | struct list_head flush_queue[2]; |
| 23 | struct list_head flush_data_in_flight; |
| 24 | struct request *flush_rq; |
| 25 | |
| 26 | spinlock_t mq_flush_lock; |
| 27 | }; |
| 28 | |
| 29 | bool is_flush_rq(struct request *req); |
| 30 | |
| 31 | struct blk_flush_queue *blk_alloc_flush_queue(int node, int cmd_size, |
| 32 | gfp_t flags); |
| 33 | void blk_free_flush_queue(struct blk_flush_queue *q); |
| 34 | |
| 35 | void blk_freeze_queue(struct request_queue *q); |
| 36 | void __blk_mq_unfreeze_queue(struct request_queue *q, bool force_atomic); |
| 37 | void blk_queue_start_drain(struct request_queue *q); |
| 38 | int __bio_queue_enter(struct request_queue *q, struct bio *bio); |
| 39 | void submit_bio_noacct_nocheck(struct bio *bio); |
| 40 | |
| 41 | static inline bool blk_try_enter_queue(struct request_queue *q, bool pm) |
| 42 | { |
| 43 | rcu_read_lock(); |
| 44 | if (!percpu_ref_tryget_live_rcu(&q->q_usage_counter)) |
| 45 | goto fail; |
| 46 | |
| 47 | /* |
| 48 | * The code that increments the pm_only counter must ensure that the |
| 49 | * counter is globally visible before the queue is unfrozen. |
| 50 | */ |
| 51 | if (blk_queue_pm_only(q) && |
| 52 | (!pm || queue_rpm_status(q) == RPM_SUSPENDED)) |
| 53 | goto fail_put; |
| 54 | |
| 55 | rcu_read_unlock(); |
| 56 | return true; |
| 57 | |
| 58 | fail_put: |
| 59 | blk_queue_exit(q); |
| 60 | fail: |
| 61 | rcu_read_unlock(); |
| 62 | return false; |
| 63 | } |
| 64 | |
| 65 | static inline int bio_queue_enter(struct bio *bio) |
| 66 | { |
| 67 | struct request_queue *q = bdev_get_queue(bio->bi_bdev); |
| 68 | |
| 69 | if (blk_try_enter_queue(q, false)) |
| 70 | return 0; |
| 71 | return __bio_queue_enter(q, bio); |
| 72 | } |
| 73 | |
| 74 | #define BIO_INLINE_VECS 4 |
| 75 | struct bio_vec *bvec_alloc(mempool_t *pool, unsigned short *nr_vecs, |
| 76 | gfp_t gfp_mask); |
| 77 | void bvec_free(mempool_t *pool, struct bio_vec *bv, unsigned short nr_vecs); |
| 78 | |
| 79 | static inline bool biovec_phys_mergeable(struct request_queue *q, |
| 80 | struct bio_vec *vec1, struct bio_vec *vec2) |
| 81 | { |
| 82 | unsigned long mask = queue_segment_boundary(q); |
| 83 | phys_addr_t addr1 = page_to_phys(vec1->bv_page) + vec1->bv_offset; |
| 84 | phys_addr_t addr2 = page_to_phys(vec2->bv_page) + vec2->bv_offset; |
| 85 | |
| 86 | /* |
| 87 | * Merging adjacent physical pages may not work correctly under KMSAN |
| 88 | * if their metadata pages aren't adjacent. Just disable merging. |
| 89 | */ |
| 90 | if (IS_ENABLED(CONFIG_KMSAN)) |
| 91 | return false; |
| 92 | |
| 93 | if (addr1 + vec1->bv_len != addr2) |
| 94 | return false; |
| 95 | if (xen_domain() && !xen_biovec_phys_mergeable(vec1, vec2->bv_page)) |
| 96 | return false; |
| 97 | if ((addr1 | mask) != ((addr2 + vec2->bv_len - 1) | mask)) |
| 98 | return false; |
| 99 | return true; |
| 100 | } |
| 101 | |
| 102 | static inline bool __bvec_gap_to_prev(const struct queue_limits *lim, |
| 103 | struct bio_vec *bprv, unsigned int offset) |
| 104 | { |
| 105 | return (offset & lim->virt_boundary_mask) || |
| 106 | ((bprv->bv_offset + bprv->bv_len) & lim->virt_boundary_mask); |
| 107 | } |
| 108 | |
| 109 | /* |
| 110 | * Check if adding a bio_vec after bprv with offset would create a gap in |
| 111 | * the SG list. Most drivers don't care about this, but some do. |
| 112 | */ |
| 113 | static inline bool bvec_gap_to_prev(const struct queue_limits *lim, |
| 114 | struct bio_vec *bprv, unsigned int offset) |
| 115 | { |
| 116 | if (!lim->virt_boundary_mask) |
| 117 | return false; |
| 118 | return __bvec_gap_to_prev(lim, bprv, offset); |
| 119 | } |
| 120 | |
| 121 | static inline bool rq_mergeable(struct request *rq) |
| 122 | { |
| 123 | if (blk_rq_is_passthrough(rq)) |
| 124 | return false; |
| 125 | |
| 126 | if (req_op(rq) == REQ_OP_FLUSH) |
| 127 | return false; |
| 128 | |
| 129 | if (req_op(rq) == REQ_OP_WRITE_ZEROES) |
| 130 | return false; |
| 131 | |
| 132 | if (req_op(rq) == REQ_OP_ZONE_APPEND) |
| 133 | return false; |
| 134 | |
| 135 | if (rq->cmd_flags & REQ_NOMERGE_FLAGS) |
| 136 | return false; |
| 137 | if (rq->rq_flags & RQF_NOMERGE_FLAGS) |
| 138 | return false; |
| 139 | |
| 140 | return true; |
| 141 | } |
| 142 | |
| 143 | /* |
| 144 | * There are two different ways to handle DISCARD merges: |
| 145 | * 1) If max_discard_segments > 1, the driver treats every bio as a range and |
| 146 | * send the bios to controller together. The ranges don't need to be |
| 147 | * contiguous. |
| 148 | * 2) Otherwise, the request will be normal read/write requests. The ranges |
| 149 | * need to be contiguous. |
| 150 | */ |
| 151 | static inline bool blk_discard_mergable(struct request *req) |
| 152 | { |
| 153 | if (req_op(req) == REQ_OP_DISCARD && |
| 154 | queue_max_discard_segments(req->q) > 1) |
| 155 | return true; |
| 156 | return false; |
| 157 | } |
| 158 | |
| 159 | static inline unsigned int blk_rq_get_max_segments(struct request *rq) |
| 160 | { |
| 161 | if (req_op(rq) == REQ_OP_DISCARD) |
| 162 | return queue_max_discard_segments(rq->q); |
| 163 | return queue_max_segments(rq->q); |
| 164 | } |
| 165 | |
| 166 | static inline unsigned int blk_queue_get_max_sectors(struct request_queue *q, |
| 167 | enum req_op op) |
| 168 | { |
| 169 | if (unlikely(op == REQ_OP_DISCARD || op == REQ_OP_SECURE_ERASE)) |
| 170 | return min(q->limits.max_discard_sectors, |
| 171 | UINT_MAX >> SECTOR_SHIFT); |
| 172 | |
| 173 | if (unlikely(op == REQ_OP_WRITE_ZEROES)) |
| 174 | return q->limits.max_write_zeroes_sectors; |
| 175 | |
| 176 | return q->limits.max_sectors; |
| 177 | } |
| 178 | |
| 179 | #ifdef CONFIG_BLK_DEV_INTEGRITY |
| 180 | void blk_flush_integrity(void); |
| 181 | bool __bio_integrity_endio(struct bio *); |
| 182 | void bio_integrity_free(struct bio *bio); |
| 183 | static inline bool bio_integrity_endio(struct bio *bio) |
| 184 | { |
| 185 | if (bio_integrity(bio)) |
| 186 | return __bio_integrity_endio(bio); |
| 187 | return true; |
| 188 | } |
| 189 | |
| 190 | bool blk_integrity_merge_rq(struct request_queue *, struct request *, |
| 191 | struct request *); |
| 192 | bool blk_integrity_merge_bio(struct request_queue *, struct request *, |
| 193 | struct bio *); |
| 194 | |
| 195 | static inline bool integrity_req_gap_back_merge(struct request *req, |
| 196 | struct bio *next) |
| 197 | { |
| 198 | struct bio_integrity_payload *bip = bio_integrity(req->bio); |
| 199 | struct bio_integrity_payload *bip_next = bio_integrity(next); |
| 200 | |
| 201 | return bvec_gap_to_prev(&req->q->limits, |
| 202 | &bip->bip_vec[bip->bip_vcnt - 1], |
| 203 | bip_next->bip_vec[0].bv_offset); |
| 204 | } |
| 205 | |
| 206 | static inline bool integrity_req_gap_front_merge(struct request *req, |
| 207 | struct bio *bio) |
| 208 | { |
| 209 | struct bio_integrity_payload *bip = bio_integrity(bio); |
| 210 | struct bio_integrity_payload *bip_next = bio_integrity(req->bio); |
| 211 | |
| 212 | return bvec_gap_to_prev(&req->q->limits, |
| 213 | &bip->bip_vec[bip->bip_vcnt - 1], |
| 214 | bip_next->bip_vec[0].bv_offset); |
| 215 | } |
| 216 | |
| 217 | int blk_integrity_add(struct gendisk *disk); |
| 218 | void blk_integrity_del(struct gendisk *); |
| 219 | #else /* CONFIG_BLK_DEV_INTEGRITY */ |
| 220 | static inline bool blk_integrity_merge_rq(struct request_queue *rq, |
| 221 | struct request *r1, struct request *r2) |
| 222 | { |
| 223 | return true; |
| 224 | } |
| 225 | static inline bool blk_integrity_merge_bio(struct request_queue *rq, |
| 226 | struct request *r, struct bio *b) |
| 227 | { |
| 228 | return true; |
| 229 | } |
| 230 | static inline bool integrity_req_gap_back_merge(struct request *req, |
| 231 | struct bio *next) |
| 232 | { |
| 233 | return false; |
| 234 | } |
| 235 | static inline bool integrity_req_gap_front_merge(struct request *req, |
| 236 | struct bio *bio) |
| 237 | { |
| 238 | return false; |
| 239 | } |
| 240 | |
| 241 | static inline void blk_flush_integrity(void) |
| 242 | { |
| 243 | } |
| 244 | static inline bool bio_integrity_endio(struct bio *bio) |
| 245 | { |
| 246 | return true; |
| 247 | } |
| 248 | static inline void bio_integrity_free(struct bio *bio) |
| 249 | { |
| 250 | } |
| 251 | static inline int blk_integrity_add(struct gendisk *disk) |
| 252 | { |
| 253 | return 0; |
| 254 | } |
| 255 | static inline void blk_integrity_del(struct gendisk *disk) |
| 256 | { |
| 257 | } |
| 258 | #endif /* CONFIG_BLK_DEV_INTEGRITY */ |
| 259 | |
| 260 | unsigned long blk_rq_timeout(unsigned long timeout); |
| 261 | void blk_add_timer(struct request *req); |
| 262 | const char *blk_status_to_str(blk_status_t status); |
| 263 | |
| 264 | bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio, |
| 265 | unsigned int nr_segs); |
| 266 | bool blk_bio_list_merge(struct request_queue *q, struct list_head *list, |
| 267 | struct bio *bio, unsigned int nr_segs); |
| 268 | |
| 269 | /* |
| 270 | * Plug flush limits |
| 271 | */ |
| 272 | #define BLK_MAX_REQUEST_COUNT 32 |
| 273 | #define BLK_PLUG_FLUSH_SIZE (128 * 1024) |
| 274 | |
| 275 | /* |
| 276 | * Internal elevator interface |
| 277 | */ |
| 278 | #define ELV_ON_HASH(rq) ((rq)->rq_flags & RQF_HASHED) |
| 279 | |
| 280 | void blk_insert_flush(struct request *rq); |
| 281 | |
| 282 | int elevator_switch(struct request_queue *q, struct elevator_type *new_e); |
| 283 | void elevator_disable(struct request_queue *q); |
| 284 | void elevator_exit(struct request_queue *q); |
| 285 | int elv_register_queue(struct request_queue *q, bool uevent); |
| 286 | void elv_unregister_queue(struct request_queue *q); |
| 287 | |
| 288 | ssize_t part_size_show(struct device *dev, struct device_attribute *attr, |
| 289 | char *buf); |
| 290 | ssize_t part_stat_show(struct device *dev, struct device_attribute *attr, |
| 291 | char *buf); |
| 292 | ssize_t part_inflight_show(struct device *dev, struct device_attribute *attr, |
| 293 | char *buf); |
| 294 | ssize_t part_fail_show(struct device *dev, struct device_attribute *attr, |
| 295 | char *buf); |
| 296 | ssize_t part_fail_store(struct device *dev, struct device_attribute *attr, |
| 297 | const char *buf, size_t count); |
| 298 | ssize_t part_timeout_show(struct device *, struct device_attribute *, char *); |
| 299 | ssize_t part_timeout_store(struct device *, struct device_attribute *, |
| 300 | const char *, size_t); |
| 301 | |
| 302 | static inline bool bio_may_exceed_limits(struct bio *bio, |
| 303 | const struct queue_limits *lim) |
| 304 | { |
| 305 | switch (bio_op(bio)) { |
| 306 | case REQ_OP_DISCARD: |
| 307 | case REQ_OP_SECURE_ERASE: |
| 308 | case REQ_OP_WRITE_ZEROES: |
| 309 | return true; /* non-trivial splitting decisions */ |
| 310 | default: |
| 311 | break; |
| 312 | } |
| 313 | |
| 314 | /* |
| 315 | * All drivers must accept single-segments bios that are <= PAGE_SIZE. |
| 316 | * This is a quick and dirty check that relies on the fact that |
| 317 | * bi_io_vec[0] is always valid if a bio has data. The check might |
| 318 | * lead to occasional false negatives when bios are cloned, but compared |
| 319 | * to the performance impact of cloned bios themselves the loop below |
| 320 | * doesn't matter anyway. |
| 321 | */ |
| 322 | return lim->chunk_sectors || bio->bi_vcnt != 1 || |
| 323 | bio->bi_io_vec->bv_len + bio->bi_io_vec->bv_offset > PAGE_SIZE; |
| 324 | } |
| 325 | |
| 326 | struct bio *__bio_split_to_limits(struct bio *bio, |
| 327 | const struct queue_limits *lim, |
| 328 | unsigned int *nr_segs); |
| 329 | int ll_back_merge_fn(struct request *req, struct bio *bio, |
| 330 | unsigned int nr_segs); |
| 331 | bool blk_attempt_req_merge(struct request_queue *q, struct request *rq, |
| 332 | struct request *next); |
| 333 | unsigned int blk_recalc_rq_segments(struct request *rq); |
| 334 | void blk_rq_set_mixed_merge(struct request *rq); |
| 335 | bool blk_rq_merge_ok(struct request *rq, struct bio *bio); |
| 336 | enum elv_merge blk_try_merge(struct request *rq, struct bio *bio); |
| 337 | |
| 338 | void blk_set_default_limits(struct queue_limits *lim); |
| 339 | int blk_dev_init(void); |
| 340 | |
| 341 | /* |
| 342 | * Contribute to IO statistics IFF: |
| 343 | * |
| 344 | * a) it's attached to a gendisk, and |
| 345 | * b) the queue had IO stats enabled when this request was started |
| 346 | */ |
| 347 | static inline bool blk_do_io_stat(struct request *rq) |
| 348 | { |
| 349 | return (rq->rq_flags & RQF_IO_STAT) && !blk_rq_is_passthrough(rq); |
| 350 | } |
| 351 | |
| 352 | void update_io_ticks(struct block_device *part, unsigned long now, bool end); |
| 353 | |
| 354 | static inline void req_set_nomerge(struct request_queue *q, struct request *req) |
| 355 | { |
| 356 | req->cmd_flags |= REQ_NOMERGE; |
| 357 | if (req == q->last_merge) |
| 358 | q->last_merge = NULL; |
| 359 | } |
| 360 | |
| 361 | /* |
| 362 | * Internal io_context interface |
| 363 | */ |
| 364 | struct io_cq *ioc_find_get_icq(struct request_queue *q); |
| 365 | struct io_cq *ioc_lookup_icq(struct request_queue *q); |
| 366 | #ifdef CONFIG_BLK_ICQ |
| 367 | void ioc_clear_queue(struct request_queue *q); |
| 368 | #else |
| 369 | static inline void ioc_clear_queue(struct request_queue *q) |
| 370 | { |
| 371 | } |
| 372 | #endif /* CONFIG_BLK_ICQ */ |
| 373 | |
| 374 | #ifdef CONFIG_BLK_DEV_THROTTLING_LOW |
| 375 | extern ssize_t blk_throtl_sample_time_show(struct request_queue *q, char *page); |
| 376 | extern ssize_t blk_throtl_sample_time_store(struct request_queue *q, |
| 377 | const char *page, size_t count); |
| 378 | extern void blk_throtl_bio_endio(struct bio *bio); |
| 379 | extern void blk_throtl_stat_add(struct request *rq, u64 time); |
| 380 | #else |
| 381 | static inline void blk_throtl_bio_endio(struct bio *bio) { } |
| 382 | static inline void blk_throtl_stat_add(struct request *rq, u64 time) { } |
| 383 | #endif |
| 384 | |
| 385 | struct bio *__blk_queue_bounce(struct bio *bio, struct request_queue *q); |
| 386 | |
| 387 | static inline bool blk_queue_may_bounce(struct request_queue *q) |
| 388 | { |
| 389 | return IS_ENABLED(CONFIG_BOUNCE) && |
| 390 | q->limits.bounce == BLK_BOUNCE_HIGH && |
| 391 | max_low_pfn >= max_pfn; |
| 392 | } |
| 393 | |
| 394 | static inline struct bio *blk_queue_bounce(struct bio *bio, |
| 395 | struct request_queue *q) |
| 396 | { |
| 397 | if (unlikely(blk_queue_may_bounce(q) && bio_has_data(bio))) |
| 398 | return __blk_queue_bounce(bio, q); |
| 399 | return bio; |
| 400 | } |
| 401 | |
| 402 | #ifdef CONFIG_BLK_DEV_ZONED |
| 403 | void disk_free_zone_bitmaps(struct gendisk *disk); |
| 404 | void disk_clear_zone_settings(struct gendisk *disk); |
| 405 | #else |
| 406 | static inline void disk_free_zone_bitmaps(struct gendisk *disk) {} |
| 407 | static inline void disk_clear_zone_settings(struct gendisk *disk) {} |
| 408 | #endif |
| 409 | |
| 410 | int blk_alloc_ext_minor(void); |
| 411 | void blk_free_ext_minor(unsigned int minor); |
| 412 | #define ADDPART_FLAG_NONE 0 |
| 413 | #define ADDPART_FLAG_RAID 1 |
| 414 | #define ADDPART_FLAG_WHOLEDISK 2 |
| 415 | int bdev_add_partition(struct gendisk *disk, int partno, sector_t start, |
| 416 | sector_t length); |
| 417 | int bdev_del_partition(struct gendisk *disk, int partno); |
| 418 | int bdev_resize_partition(struct gendisk *disk, int partno, sector_t start, |
| 419 | sector_t length); |
| 420 | void blk_drop_partitions(struct gendisk *disk); |
| 421 | |
| 422 | struct gendisk *__alloc_disk_node(struct request_queue *q, int node_id, |
| 423 | struct lock_class_key *lkclass); |
| 424 | |
| 425 | int bio_add_hw_page(struct request_queue *q, struct bio *bio, |
| 426 | struct page *page, unsigned int len, unsigned int offset, |
| 427 | unsigned int max_sectors, bool *same_page); |
| 428 | |
| 429 | struct request_queue *blk_alloc_queue(int node_id); |
| 430 | |
| 431 | int disk_scan_partitions(struct gendisk *disk, fmode_t mode); |
| 432 | |
| 433 | int disk_alloc_events(struct gendisk *disk); |
| 434 | void disk_add_events(struct gendisk *disk); |
| 435 | void disk_del_events(struct gendisk *disk); |
| 436 | void disk_release_events(struct gendisk *disk); |
| 437 | void disk_block_events(struct gendisk *disk); |
| 438 | void disk_unblock_events(struct gendisk *disk); |
| 439 | void disk_flush_events(struct gendisk *disk, unsigned int mask); |
| 440 | extern struct device_attribute dev_attr_events; |
| 441 | extern struct device_attribute dev_attr_events_async; |
| 442 | extern struct device_attribute dev_attr_events_poll_msecs; |
| 443 | |
| 444 | extern struct attribute_group blk_trace_attr_group; |
| 445 | |
| 446 | long blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg); |
| 447 | long compat_blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg); |
| 448 | |
| 449 | extern const struct address_space_operations def_blk_aops; |
| 450 | |
| 451 | int disk_register_independent_access_ranges(struct gendisk *disk); |
| 452 | void disk_unregister_independent_access_ranges(struct gendisk *disk); |
| 453 | |
| 454 | #ifdef CONFIG_FAIL_MAKE_REQUEST |
| 455 | bool should_fail_request(struct block_device *part, unsigned int bytes); |
| 456 | #else /* CONFIG_FAIL_MAKE_REQUEST */ |
| 457 | static inline bool should_fail_request(struct block_device *part, |
| 458 | unsigned int bytes) |
| 459 | { |
| 460 | return false; |
| 461 | } |
| 462 | #endif /* CONFIG_FAIL_MAKE_REQUEST */ |
| 463 | |
| 464 | /* |
| 465 | * Optimized request reference counting. Ideally we'd make timeouts be more |
| 466 | * clever, as that's the only reason we need references at all... But until |
| 467 | * this happens, this is faster than using refcount_t. Also see: |
| 468 | * |
| 469 | * abc54d634334 ("io_uring: switch to atomic_t for io_kiocb reference count") |
| 470 | */ |
| 471 | #define req_ref_zero_or_close_to_overflow(req) \ |
| 472 | ((unsigned int) atomic_read(&(req->ref)) + 127u <= 127u) |
| 473 | |
| 474 | static inline bool req_ref_inc_not_zero(struct request *req) |
| 475 | { |
| 476 | return atomic_inc_not_zero(&req->ref); |
| 477 | } |
| 478 | |
| 479 | static inline bool req_ref_put_and_test(struct request *req) |
| 480 | { |
| 481 | WARN_ON_ONCE(req_ref_zero_or_close_to_overflow(req)); |
| 482 | return atomic_dec_and_test(&req->ref); |
| 483 | } |
| 484 | |
| 485 | static inline void req_ref_set(struct request *req, int value) |
| 486 | { |
| 487 | atomic_set(&req->ref, value); |
| 488 | } |
| 489 | |
| 490 | static inline int req_ref_read(struct request *req) |
| 491 | { |
| 492 | return atomic_read(&req->ref); |
| 493 | } |
| 494 | |
| 495 | #endif /* BLK_INTERNAL_H */ |