1 /* SPDX-License-Identifier: GPL-2.0 */
5 #include <linux/blk-crypto.h>
6 #include <linux/memblock.h> /* for max_pfn/max_low_pfn */
7 #include <linux/sched/sysctl.h>
8 #include <linux/timekeeping.h>
10 #include "blk-crypto-internal.h"
14 /* Max future timer expiry for timeouts */
15 #define BLK_MAX_TIMEOUT (5 * HZ)
17 extern struct dentry *blk_debugfs_root;
19 struct blk_flush_queue {
20 spinlock_t mq_flush_lock;
21 unsigned int flush_pending_idx:1;
22 unsigned int flush_running_idx:1;
23 blk_status_t rq_status;
24 unsigned long flush_pending_since;
25 struct list_head flush_queue[2];
26 unsigned long flush_data_in_flight;
27 struct request *flush_rq;
30 bool is_flush_rq(struct request *req);
32 struct blk_flush_queue *blk_alloc_flush_queue(int node, int cmd_size,
34 void blk_free_flush_queue(struct blk_flush_queue *q);
36 void blk_freeze_queue(struct request_queue *q);
37 void __blk_mq_unfreeze_queue(struct request_queue *q, bool force_atomic);
38 void blk_queue_start_drain(struct request_queue *q);
39 int __bio_queue_enter(struct request_queue *q, struct bio *bio);
40 void submit_bio_noacct_nocheck(struct bio *bio);
42 static inline bool blk_try_enter_queue(struct request_queue *q, bool pm)
45 if (!percpu_ref_tryget_live_rcu(&q->q_usage_counter))
49 * The code that increments the pm_only counter must ensure that the
50 * counter is globally visible before the queue is unfrozen.
52 if (blk_queue_pm_only(q) &&
53 (!pm || queue_rpm_status(q) == RPM_SUSPENDED))
66 static inline int bio_queue_enter(struct bio *bio)
68 struct request_queue *q = bdev_get_queue(bio->bi_bdev);
70 if (blk_try_enter_queue(q, false))
72 return __bio_queue_enter(q, bio);
75 static inline void blk_wait_io(struct completion *done)
77 /* Prevent hang_check timer from firing at us during very long I/O */
78 unsigned long timeout = sysctl_hung_task_timeout_secs * HZ / 2;
81 while (!wait_for_completion_io_timeout(done, timeout))
84 wait_for_completion_io(done);
87 #define BIO_INLINE_VECS 4
88 struct bio_vec *bvec_alloc(mempool_t *pool, unsigned short *nr_vecs,
90 void bvec_free(mempool_t *pool, struct bio_vec *bv, unsigned short nr_vecs);
92 bool bvec_try_merge_hw_page(struct request_queue *q, struct bio_vec *bv,
93 struct page *page, unsigned len, unsigned offset,
96 static inline bool biovec_phys_mergeable(struct request_queue *q,
97 struct bio_vec *vec1, struct bio_vec *vec2)
99 unsigned long mask = queue_segment_boundary(q);
100 phys_addr_t addr1 = page_to_phys(vec1->bv_page) + vec1->bv_offset;
101 phys_addr_t addr2 = page_to_phys(vec2->bv_page) + vec2->bv_offset;
104 * Merging adjacent physical pages may not work correctly under KMSAN
105 * if their metadata pages aren't adjacent. Just disable merging.
107 if (IS_ENABLED(CONFIG_KMSAN))
110 if (addr1 + vec1->bv_len != addr2)
112 if (xen_domain() && !xen_biovec_phys_mergeable(vec1, vec2->bv_page))
114 if ((addr1 | mask) != ((addr2 + vec2->bv_len - 1) | mask))
119 static inline bool __bvec_gap_to_prev(const struct queue_limits *lim,
120 struct bio_vec *bprv, unsigned int offset)
122 return (offset & lim->virt_boundary_mask) ||
123 ((bprv->bv_offset + bprv->bv_len) & lim->virt_boundary_mask);
127 * Check if adding a bio_vec after bprv with offset would create a gap in
128 * the SG list. Most drivers don't care about this, but some do.
130 static inline bool bvec_gap_to_prev(const struct queue_limits *lim,
131 struct bio_vec *bprv, unsigned int offset)
133 if (!lim->virt_boundary_mask)
135 return __bvec_gap_to_prev(lim, bprv, offset);
138 static inline bool rq_mergeable(struct request *rq)
140 if (blk_rq_is_passthrough(rq))
143 if (req_op(rq) == REQ_OP_FLUSH)
146 if (req_op(rq) == REQ_OP_WRITE_ZEROES)
149 if (req_op(rq) == REQ_OP_ZONE_APPEND)
152 if (rq->cmd_flags & REQ_NOMERGE_FLAGS)
154 if (rq->rq_flags & RQF_NOMERGE_FLAGS)
161 * There are two different ways to handle DISCARD merges:
162 * 1) If max_discard_segments > 1, the driver treats every bio as a range and
163 * send the bios to controller together. The ranges don't need to be
165 * 2) Otherwise, the request will be normal read/write requests. The ranges
166 * need to be contiguous.
168 static inline bool blk_discard_mergable(struct request *req)
170 if (req_op(req) == REQ_OP_DISCARD &&
171 queue_max_discard_segments(req->q) > 1)
176 static inline unsigned int blk_rq_get_max_segments(struct request *rq)
178 if (req_op(rq) == REQ_OP_DISCARD)
179 return queue_max_discard_segments(rq->q);
180 return queue_max_segments(rq->q);
183 static inline unsigned int blk_queue_get_max_sectors(struct request_queue *q,
186 if (unlikely(op == REQ_OP_DISCARD || op == REQ_OP_SECURE_ERASE))
187 return min(q->limits.max_discard_sectors,
188 UINT_MAX >> SECTOR_SHIFT);
190 if (unlikely(op == REQ_OP_WRITE_ZEROES))
191 return q->limits.max_write_zeroes_sectors;
193 return q->limits.max_sectors;
196 #ifdef CONFIG_BLK_DEV_INTEGRITY
197 void blk_flush_integrity(void);
198 bool __bio_integrity_endio(struct bio *);
199 void bio_integrity_free(struct bio *bio);
200 static inline bool bio_integrity_endio(struct bio *bio)
202 if (bio_integrity(bio))
203 return __bio_integrity_endio(bio);
207 bool blk_integrity_merge_rq(struct request_queue *, struct request *,
209 bool blk_integrity_merge_bio(struct request_queue *, struct request *,
212 static inline bool integrity_req_gap_back_merge(struct request *req,
215 struct bio_integrity_payload *bip = bio_integrity(req->bio);
216 struct bio_integrity_payload *bip_next = bio_integrity(next);
218 return bvec_gap_to_prev(&req->q->limits,
219 &bip->bip_vec[bip->bip_vcnt - 1],
220 bip_next->bip_vec[0].bv_offset);
223 static inline bool integrity_req_gap_front_merge(struct request *req,
226 struct bio_integrity_payload *bip = bio_integrity(bio);
227 struct bio_integrity_payload *bip_next = bio_integrity(req->bio);
229 return bvec_gap_to_prev(&req->q->limits,
230 &bip->bip_vec[bip->bip_vcnt - 1],
231 bip_next->bip_vec[0].bv_offset);
234 extern const struct attribute_group blk_integrity_attr_group;
235 #else /* CONFIG_BLK_DEV_INTEGRITY */
236 static inline bool blk_integrity_merge_rq(struct request_queue *rq,
237 struct request *r1, struct request *r2)
241 static inline bool blk_integrity_merge_bio(struct request_queue *rq,
242 struct request *r, struct bio *b)
246 static inline bool integrity_req_gap_back_merge(struct request *req,
251 static inline bool integrity_req_gap_front_merge(struct request *req,
257 static inline void blk_flush_integrity(void)
260 static inline bool bio_integrity_endio(struct bio *bio)
264 static inline void bio_integrity_free(struct bio *bio)
267 #endif /* CONFIG_BLK_DEV_INTEGRITY */
269 unsigned long blk_rq_timeout(unsigned long timeout);
270 void blk_add_timer(struct request *req);
272 enum bio_merge_status {
278 enum bio_merge_status bio_attempt_back_merge(struct request *req,
279 struct bio *bio, unsigned int nr_segs);
280 bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio,
281 unsigned int nr_segs);
282 bool blk_bio_list_merge(struct request_queue *q, struct list_head *list,
283 struct bio *bio, unsigned int nr_segs);
288 #define BLK_MAX_REQUEST_COUNT 32
289 #define BLK_PLUG_FLUSH_SIZE (128 * 1024)
292 * Internal elevator interface
294 #define ELV_ON_HASH(rq) ((rq)->rq_flags & RQF_HASHED)
296 bool blk_insert_flush(struct request *rq);
298 int elevator_switch(struct request_queue *q, struct elevator_type *new_e);
299 void elevator_disable(struct request_queue *q);
300 void elevator_exit(struct request_queue *q);
301 int elv_register_queue(struct request_queue *q, bool uevent);
302 void elv_unregister_queue(struct request_queue *q);
304 ssize_t part_size_show(struct device *dev, struct device_attribute *attr,
306 ssize_t part_stat_show(struct device *dev, struct device_attribute *attr,
308 ssize_t part_inflight_show(struct device *dev, struct device_attribute *attr,
310 ssize_t part_fail_show(struct device *dev, struct device_attribute *attr,
312 ssize_t part_fail_store(struct device *dev, struct device_attribute *attr,
313 const char *buf, size_t count);
314 ssize_t part_timeout_show(struct device *, struct device_attribute *, char *);
315 ssize_t part_timeout_store(struct device *, struct device_attribute *,
316 const char *, size_t);
318 static inline bool bio_may_exceed_limits(struct bio *bio,
319 const struct queue_limits *lim)
321 switch (bio_op(bio)) {
323 case REQ_OP_SECURE_ERASE:
324 case REQ_OP_WRITE_ZEROES:
325 return true; /* non-trivial splitting decisions */
331 * All drivers must accept single-segments bios that are <= PAGE_SIZE.
332 * This is a quick and dirty check that relies on the fact that
333 * bi_io_vec[0] is always valid if a bio has data. The check might
334 * lead to occasional false negatives when bios are cloned, but compared
335 * to the performance impact of cloned bios themselves the loop below
336 * doesn't matter anyway.
338 return lim->chunk_sectors || bio->bi_vcnt != 1 ||
339 bio->bi_io_vec->bv_len + bio->bi_io_vec->bv_offset > PAGE_SIZE;
342 struct bio *__bio_split_to_limits(struct bio *bio,
343 const struct queue_limits *lim,
344 unsigned int *nr_segs);
345 int ll_back_merge_fn(struct request *req, struct bio *bio,
346 unsigned int nr_segs);
347 bool blk_attempt_req_merge(struct request_queue *q, struct request *rq,
348 struct request *next);
349 unsigned int blk_recalc_rq_segments(struct request *rq);
350 bool blk_rq_merge_ok(struct request *rq, struct bio *bio);
351 enum elv_merge blk_try_merge(struct request *rq, struct bio *bio);
353 int blk_set_default_limits(struct queue_limits *lim);
354 int blk_dev_init(void);
357 * Contribute to IO statistics IFF:
359 * a) it's attached to a gendisk, and
360 * b) the queue had IO stats enabled when this request was started
362 static inline bool blk_do_io_stat(struct request *rq)
364 return (rq->rq_flags & RQF_IO_STAT) && !blk_rq_is_passthrough(rq);
367 void update_io_ticks(struct block_device *part, unsigned long now, bool end);
369 static inline void req_set_nomerge(struct request_queue *q, struct request *req)
371 req->cmd_flags |= REQ_NOMERGE;
372 if (req == q->last_merge)
373 q->last_merge = NULL;
377 * Internal io_context interface
379 struct io_cq *ioc_find_get_icq(struct request_queue *q);
380 struct io_cq *ioc_lookup_icq(struct request_queue *q);
381 #ifdef CONFIG_BLK_ICQ
382 void ioc_clear_queue(struct request_queue *q);
384 static inline void ioc_clear_queue(struct request_queue *q)
387 #endif /* CONFIG_BLK_ICQ */
389 #ifdef CONFIG_BLK_DEV_THROTTLING_LOW
390 extern ssize_t blk_throtl_sample_time_show(struct request_queue *q, char *page);
391 extern ssize_t blk_throtl_sample_time_store(struct request_queue *q,
392 const char *page, size_t count);
393 extern void blk_throtl_bio_endio(struct bio *bio);
394 extern void blk_throtl_stat_add(struct request *rq, u64 time);
396 static inline void blk_throtl_bio_endio(struct bio *bio) { }
397 static inline void blk_throtl_stat_add(struct request *rq, u64 time) { }
400 struct bio *__blk_queue_bounce(struct bio *bio, struct request_queue *q);
402 static inline bool blk_queue_may_bounce(struct request_queue *q)
404 return IS_ENABLED(CONFIG_BOUNCE) &&
405 q->limits.bounce == BLK_BOUNCE_HIGH &&
406 max_low_pfn >= max_pfn;
409 static inline struct bio *blk_queue_bounce(struct bio *bio,
410 struct request_queue *q)
412 if (unlikely(blk_queue_may_bounce(q) && bio_has_data(bio)))
413 return __blk_queue_bounce(bio, q);
417 #ifdef CONFIG_BLK_DEV_ZONED
418 void disk_init_zone_resources(struct gendisk *disk);
419 void disk_free_zone_resources(struct gendisk *disk);
420 static inline bool bio_zone_write_plugging(struct bio *bio)
422 return bio_flagged(bio, BIO_ZONE_WRITE_PLUGGING);
424 static inline bool bio_is_zone_append(struct bio *bio)
426 return bio_op(bio) == REQ_OP_ZONE_APPEND ||
427 bio_flagged(bio, BIO_EMULATES_ZONE_APPEND);
429 void blk_zone_write_plug_bio_merged(struct bio *bio);
430 void blk_zone_write_plug_init_request(struct request *rq);
431 static inline void blk_zone_update_request_bio(struct request *rq,
435 * For zone append requests, the request sector indicates the location
436 * at which the BIO data was written. Return this value to the BIO
437 * issuer through the BIO iter sector.
438 * For plugged zone writes, which include emulated zone append, we need
439 * the original BIO sector so that blk_zone_write_plug_bio_endio() can
440 * lookup the zone write plug.
442 if (req_op(rq) == REQ_OP_ZONE_APPEND || bio_zone_write_plugging(bio))
443 bio->bi_iter.bi_sector = rq->__sector;
445 void blk_zone_write_plug_bio_endio(struct bio *bio);
446 static inline void blk_zone_bio_endio(struct bio *bio)
449 * For write BIOs to zoned devices, signal the completion of the BIO so
450 * that the next write BIO can be submitted by zone write plugging.
452 if (bio_zone_write_plugging(bio))
453 blk_zone_write_plug_bio_endio(bio);
456 void blk_zone_write_plug_finish_request(struct request *rq);
457 static inline void blk_zone_finish_request(struct request *rq)
459 if (rq->rq_flags & RQF_ZONE_WRITE_PLUGGING)
460 blk_zone_write_plug_finish_request(rq);
462 int blkdev_report_zones_ioctl(struct block_device *bdev, unsigned int cmd,
464 int blkdev_zone_mgmt_ioctl(struct block_device *bdev, blk_mode_t mode,
465 unsigned int cmd, unsigned long arg);
466 #else /* CONFIG_BLK_DEV_ZONED */
467 static inline void disk_init_zone_resources(struct gendisk *disk)
470 static inline void disk_free_zone_resources(struct gendisk *disk)
473 static inline bool bio_zone_write_plugging(struct bio *bio)
477 static inline bool bio_is_zone_append(struct bio *bio)
481 static inline void blk_zone_write_plug_bio_merged(struct bio *bio)
484 static inline void blk_zone_write_plug_init_request(struct request *rq)
487 static inline void blk_zone_update_request_bio(struct request *rq,
491 static inline void blk_zone_bio_endio(struct bio *bio)
494 static inline void blk_zone_finish_request(struct request *rq)
497 static inline int blkdev_report_zones_ioctl(struct block_device *bdev,
498 unsigned int cmd, unsigned long arg)
502 static inline int blkdev_zone_mgmt_ioctl(struct block_device *bdev,
503 blk_mode_t mode, unsigned int cmd, unsigned long arg)
507 #endif /* CONFIG_BLK_DEV_ZONED */
509 struct block_device *bdev_alloc(struct gendisk *disk, u8 partno);
510 void bdev_add(struct block_device *bdev, dev_t dev);
512 int blk_alloc_ext_minor(void);
513 void blk_free_ext_minor(unsigned int minor);
514 #define ADDPART_FLAG_NONE 0
515 #define ADDPART_FLAG_RAID 1
516 #define ADDPART_FLAG_WHOLEDISK 2
517 int bdev_add_partition(struct gendisk *disk, int partno, sector_t start,
519 int bdev_del_partition(struct gendisk *disk, int partno);
520 int bdev_resize_partition(struct gendisk *disk, int partno, sector_t start,
522 void drop_partition(struct block_device *part);
524 void bdev_set_nr_sectors(struct block_device *bdev, sector_t sectors);
526 struct gendisk *__alloc_disk_node(struct request_queue *q, int node_id,
527 struct lock_class_key *lkclass);
529 int bio_add_hw_page(struct request_queue *q, struct bio *bio,
530 struct page *page, unsigned int len, unsigned int offset,
531 unsigned int max_sectors, bool *same_page);
534 * Clean up a page appropriately, where the page may be pinned, may have a
535 * ref taken on it or neither.
537 static inline void bio_release_page(struct bio *bio, struct page *page)
539 if (bio_flagged(bio, BIO_PAGE_PINNED))
540 unpin_user_page(page);
543 struct request_queue *blk_alloc_queue(struct queue_limits *lim, int node_id);
545 int disk_scan_partitions(struct gendisk *disk, blk_mode_t mode);
547 int disk_alloc_events(struct gendisk *disk);
548 void disk_add_events(struct gendisk *disk);
549 void disk_del_events(struct gendisk *disk);
550 void disk_release_events(struct gendisk *disk);
551 void disk_block_events(struct gendisk *disk);
552 void disk_unblock_events(struct gendisk *disk);
553 void disk_flush_events(struct gendisk *disk, unsigned int mask);
554 extern struct device_attribute dev_attr_events;
555 extern struct device_attribute dev_attr_events_async;
556 extern struct device_attribute dev_attr_events_poll_msecs;
558 extern struct attribute_group blk_trace_attr_group;
560 blk_mode_t file_to_blk_mode(struct file *file);
561 int truncate_bdev_range(struct block_device *bdev, blk_mode_t mode,
562 loff_t lstart, loff_t lend);
563 long blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg);
564 long compat_blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg);
566 extern const struct address_space_operations def_blk_aops;
568 int disk_register_independent_access_ranges(struct gendisk *disk);
569 void disk_unregister_independent_access_ranges(struct gendisk *disk);
571 #ifdef CONFIG_FAIL_MAKE_REQUEST
572 bool should_fail_request(struct block_device *part, unsigned int bytes);
573 #else /* CONFIG_FAIL_MAKE_REQUEST */
574 static inline bool should_fail_request(struct block_device *part,
579 #endif /* CONFIG_FAIL_MAKE_REQUEST */
582 * Optimized request reference counting. Ideally we'd make timeouts be more
583 * clever, as that's the only reason we need references at all... But until
584 * this happens, this is faster than using refcount_t. Also see:
586 * abc54d634334 ("io_uring: switch to atomic_t for io_kiocb reference count")
588 #define req_ref_zero_or_close_to_overflow(req) \
589 ((unsigned int) atomic_read(&(req->ref)) + 127u <= 127u)
591 static inline bool req_ref_inc_not_zero(struct request *req)
593 return atomic_inc_not_zero(&req->ref);
596 static inline bool req_ref_put_and_test(struct request *req)
598 WARN_ON_ONCE(req_ref_zero_or_close_to_overflow(req));
599 return atomic_dec_and_test(&req->ref);
602 static inline void req_ref_set(struct request *req, int value)
604 atomic_set(&req->ref, value);
607 static inline int req_ref_read(struct request *req)
609 return atomic_read(&req->ref);
612 static inline u64 blk_time_get_ns(void)
614 struct blk_plug *plug = current->plug;
616 if (!plug || !in_task())
617 return ktime_get_ns();
620 * 0 could very well be a valid time, but rather than flag "this is
621 * a valid timestamp" separately, just accept that we'll do an extra
622 * ktime_get_ns() if we just happen to get 0 as the current time.
624 if (!plug->cur_ktime) {
625 plug->cur_ktime = ktime_get_ns();
626 current->flags |= PF_BLOCK_TS;
628 return plug->cur_ktime;
631 static inline ktime_t blk_time_get(void)
633 return ns_to_ktime(blk_time_get_ns());
637 * From most significant bit:
638 * 1 bit: reserved for other usage, see below
639 * 12 bits: original size of bio
640 * 51 bits: issue time of bio
642 #define BIO_ISSUE_RES_BITS 1
643 #define BIO_ISSUE_SIZE_BITS 12
644 #define BIO_ISSUE_RES_SHIFT (64 - BIO_ISSUE_RES_BITS)
645 #define BIO_ISSUE_SIZE_SHIFT (BIO_ISSUE_RES_SHIFT - BIO_ISSUE_SIZE_BITS)
646 #define BIO_ISSUE_TIME_MASK ((1ULL << BIO_ISSUE_SIZE_SHIFT) - 1)
647 #define BIO_ISSUE_SIZE_MASK \
648 (((1ULL << BIO_ISSUE_SIZE_BITS) - 1) << BIO_ISSUE_SIZE_SHIFT)
649 #define BIO_ISSUE_RES_MASK (~((1ULL << BIO_ISSUE_RES_SHIFT) - 1))
651 /* Reserved bit for blk-throtl */
652 #define BIO_ISSUE_THROTL_SKIP_LATENCY (1ULL << 63)
654 static inline u64 __bio_issue_time(u64 time)
656 return time & BIO_ISSUE_TIME_MASK;
659 static inline u64 bio_issue_time(struct bio_issue *issue)
661 return __bio_issue_time(issue->value);
664 static inline sector_t bio_issue_size(struct bio_issue *issue)
666 return ((issue->value & BIO_ISSUE_SIZE_MASK) >> BIO_ISSUE_SIZE_SHIFT);
669 static inline void bio_issue_init(struct bio_issue *issue,
672 size &= (1ULL << BIO_ISSUE_SIZE_BITS) - 1;
673 issue->value = ((issue->value & BIO_ISSUE_RES_MASK) |
674 (blk_time_get_ns() & BIO_ISSUE_TIME_MASK) |
675 ((u64)size << BIO_ISSUE_SIZE_SHIFT));
678 void bdev_release(struct file *bdev_file);
679 int bdev_open(struct block_device *bdev, blk_mode_t mode, void *holder,
680 const struct blk_holder_ops *hops, struct file *bdev_file);
681 int bdev_permission(dev_t dev, blk_mode_t mode, void *holder);
683 #endif /* BLK_INTERNAL_H */