Merge tag 'mm-hotfixes-stable-2025-07-11-16-16' of git://git.kernel.org/pub/scm/linux...
[linux-block.git] / block / blk.h
CommitLineData
b2441318 1/* SPDX-License-Identifier: GPL-2.0 */
8324aa91
JA
2#ifndef BLK_INTERNAL_H
3#define BLK_INTERNAL_H
4
da042a36 5#include <linux/bio-integrity.h>
a892c8d5 6#include <linux/blk-crypto.h>
f1be1788 7#include <linux/lockdep.h>
9bb33f24 8#include <linux/memblock.h> /* for max_pfn/max_low_pfn */
0eb4db47 9#include <linux/sched/sysctl.h>
08420cf7 10#include <linux/timekeeping.h>
c39ae60d 11#include <xen/xen.h>
a892c8d5 12#include "blk-crypto-internal.h"
a73f730d 13
2e9bc346
CH
14struct elevator_type;
15
3d9a9e9a 16#define BLK_DEV_MAX_SECTORS (LLONG_MAX >> 9)
889c5706 17#define BLK_MIN_SEGMENT_SIZE 4096
3d9a9e9a 18
0d2602ca
JA
19/* Max future timer expiry for timeouts */
20#define BLK_MAX_TIMEOUT (5 * HZ)
21
18fbda91 22extern struct dentry *blk_debugfs_root;
18fbda91 23
7c94e1c1 24struct blk_flush_queue {
b175c867 25 spinlock_t mq_flush_lock;
7c94e1c1
ML
26 unsigned int flush_pending_idx:1;
27 unsigned int flush_running_idx:1;
8d699663 28 blk_status_t rq_status;
7c94e1c1
ML
29 unsigned long flush_pending_since;
30 struct list_head flush_queue[2];
b175c867 31 unsigned long flush_data_in_flight;
7c94e1c1 32 struct request *flush_rq;
7c94e1c1
ML
33};
34
a9ed27a7 35bool is_flush_rq(struct request *req);
8d699663 36
754a1572
GJ
37struct blk_flush_queue *blk_alloc_flush_queue(int node, int cmd_size,
38 gfp_t flags);
f70ced09 39void blk_free_flush_queue(struct blk_flush_queue *q);
f3552655 40
f1be1788
ML
41bool __blk_mq_unfreeze_queue(struct request_queue *q, bool force_atomic);
42bool blk_queue_start_drain(struct request_queue *q);
6a786998
ML
43bool __blk_freeze_queue_start(struct request_queue *q,
44 struct task_struct *owner);
c98cb5bb 45int __bio_queue_enter(struct request_queue *q, struct bio *bio);
3f98c753 46void submit_bio_noacct_nocheck(struct bio *bio);
0f8e9ecc 47void bio_await_chain(struct bio *bio);
c98cb5bb
JA
48
49static inline bool blk_try_enter_queue(struct request_queue *q, bool pm)
50{
51 rcu_read_lock();
52 if (!percpu_ref_tryget_live_rcu(&q->q_usage_counter))
53 goto fail;
54
55 /*
56 * The code that increments the pm_only counter must ensure that the
57 * counter is globally visible before the queue is unfrozen.
58 */
59 if (blk_queue_pm_only(q) &&
60 (!pm || queue_rpm_status(q) == RPM_SUSPENDED))
61 goto fail_put;
62
63 rcu_read_unlock();
64 return true;
65
66fail_put:
67 blk_queue_exit(q);
68fail:
69 rcu_read_unlock();
70 return false;
71}
72
73static inline int bio_queue_enter(struct bio *bio)
74{
75 struct request_queue *q = bdev_get_queue(bio->bi_bdev);
76
f1be1788
ML
77 if (blk_try_enter_queue(q, false)) {
78 rwsem_acquire_read(&q->io_lockdep_map, 0, 0, _RET_IP_);
79 rwsem_release(&q->io_lockdep_map, _RET_IP_);
c98cb5bb 80 return 0;
f1be1788 81 }
c98cb5bb
JA
82 return __bio_queue_enter(q, bio);
83}
3ef28e83 84
0eb4db47
KB
85static inline void blk_wait_io(struct completion *done)
86{
87 /* Prevent hang_check timer from firing at us during very long I/O */
88 unsigned long timeout = sysctl_hung_task_timeout_secs * HZ / 2;
89
90 if (timeout)
91 while (!wait_for_completion_io_timeout(done, timeout))
92 ;
93 else
94 wait_for_completion_io(done);
95}
96
5f33b522 97struct block_device *blkdev_get_no_open(dev_t dev, bool autoload);
c6320214
CH
98void blkdev_put_no_open(struct block_device *bdev);
99
dc0b8a57 100#define BIO_INLINE_VECS 4
7a800a20
CH
101struct bio_vec *bvec_alloc(mempool_t *pool, unsigned short *nr_vecs,
102 gfp_t gfp_mask);
103void bvec_free(mempool_t *pool, struct bio_vec *bv, unsigned short nr_vecs);
eec716a1 104
7c8998f7 105bool bvec_try_merge_hw_page(struct request_queue *q, struct bio_vec *bv,
77fd359b 106 struct page *page, unsigned len, unsigned offset);
7c8998f7 107
3dccdae5
CH
108static inline bool biovec_phys_mergeable(struct request_queue *q,
109 struct bio_vec *vec1, struct bio_vec *vec2)
6a9f5f24 110{
3dccdae5 111 unsigned long mask = queue_segment_boundary(q);
25f76c3d
CH
112 phys_addr_t addr1 = bvec_phys(vec1);
113 phys_addr_t addr2 = bvec_phys(vec2);
3dccdae5 114
f630a5d0
AP
115 /*
116 * Merging adjacent physical pages may not work correctly under KMSAN
117 * if their metadata pages aren't adjacent. Just disable merging.
118 */
119 if (IS_ENABLED(CONFIG_KMSAN))
120 return false;
121
3dccdae5 122 if (addr1 + vec1->bv_len != addr2)
6a9f5f24 123 return false;
0383ad43 124 if (xen_domain() && !xen_biovec_phys_mergeable(vec1, vec2->bv_page))
6a9f5f24 125 return false;
3dccdae5
CH
126 if ((addr1 | mask) != ((addr2 + vec2->bv_len - 1) | mask))
127 return false;
6a9f5f24
CH
128 return true;
129}
130
aa261f20 131static inline bool __bvec_gap_to_prev(const struct queue_limits *lim,
27ca1d4e
CH
132 struct bio_vec *bprv, unsigned int offset)
133{
c55ddd90
CH
134 return (offset & lim->virt_boundary_mask) ||
135 ((bprv->bv_offset + bprv->bv_len) & lim->virt_boundary_mask);
27ca1d4e
CH
136}
137
138/*
139 * Check if adding a bio_vec after bprv with offset would create a gap in
140 * the SG list. Most drivers don't care about this, but some do.
141 */
aa261f20 142static inline bool bvec_gap_to_prev(const struct queue_limits *lim,
27ca1d4e
CH
143 struct bio_vec *bprv, unsigned int offset)
144{
c55ddd90 145 if (!lim->virt_boundary_mask)
27ca1d4e 146 return false;
c55ddd90 147 return __bvec_gap_to_prev(lim, bprv, offset);
27ca1d4e
CH
148}
149
badf7f64
CH
150static inline bool rq_mergeable(struct request *rq)
151{
152 if (blk_rq_is_passthrough(rq))
153 return false;
154
155 if (req_op(rq) == REQ_OP_FLUSH)
156 return false;
157
158 if (req_op(rq) == REQ_OP_WRITE_ZEROES)
159 return false;
160
161 if (req_op(rq) == REQ_OP_ZONE_APPEND)
162 return false;
163
164 if (rq->cmd_flags & REQ_NOMERGE_FLAGS)
165 return false;
166 if (rq->rq_flags & RQF_NOMERGE_FLAGS)
167 return false;
168
169 return true;
170}
171
172/*
173 * There are two different ways to handle DISCARD merges:
174 * 1) If max_discard_segments > 1, the driver treats every bio as a range and
175 * send the bios to controller together. The ranges don't need to be
176 * contiguous.
177 * 2) Otherwise, the request will be normal read/write requests. The ranges
178 * need to be contiguous.
179 */
180static inline bool blk_discard_mergable(struct request *req)
181{
182 if (req_op(req) == REQ_OP_DISCARD &&
183 queue_max_discard_segments(req->q) > 1)
184 return true;
185 return false;
186}
187
49d24398
US
188static inline unsigned int blk_rq_get_max_segments(struct request *rq)
189{
190 if (req_op(rq) == REQ_OP_DISCARD)
191 return queue_max_discard_segments(rq->q);
192 return queue_max_segments(rq->q);
193}
194
8d1dfd51 195static inline unsigned int blk_queue_get_max_sectors(struct request *rq)
2a9336c4 196{
8d1dfd51
JG
197 struct request_queue *q = rq->q;
198 enum req_op op = req_op(rq);
199
2a9336c4
CH
200 if (unlikely(op == REQ_OP_DISCARD || op == REQ_OP_SECURE_ERASE))
201 return min(q->limits.max_discard_sectors,
202 UINT_MAX >> SECTOR_SHIFT);
203
204 if (unlikely(op == REQ_OP_WRITE_ZEROES))
205 return q->limits.max_write_zeroes_sectors;
206
9da3d1e9
JG
207 if (rq->cmd_flags & REQ_ATOMIC)
208 return q->limits.atomic_write_max_sectors;
209
2a9336c4
CH
210 return q->limits.max_sectors;
211}
212
5a48fc14
DW
213#ifdef CONFIG_BLK_DEV_INTEGRITY
214void blk_flush_integrity(void);
ece841ab 215void bio_integrity_free(struct bio *bio);
85253bac
CH
216
217/*
218 * Integrity payloads can either be owned by the submitter, in which case
219 * bio_uninit will free them, or owned and generated by the block layer,
220 * in which case we'll verify them here (for reads) and free them before
221 * the bio is handed back to the submitted.
222 */
223bool __bio_integrity_endio(struct bio *bio);
7c20f116
CH
224static inline bool bio_integrity_endio(struct bio *bio)
225{
85253bac
CH
226 struct bio_integrity_payload *bip = bio_integrity(bio);
227
228 if (bip && (bip->bip_flags & BIP_BLOCK_INTEGRITY))
7c20f116
CH
229 return __bio_integrity_endio(bio);
230 return true;
231}
43b729bf 232
92cf2fd1
CH
233bool blk_integrity_merge_rq(struct request_queue *, struct request *,
234 struct request *);
d59da419
CH
235bool blk_integrity_merge_bio(struct request_queue *, struct request *,
236 struct bio *);
92cf2fd1 237
43b729bf
CH
238static inline bool integrity_req_gap_back_merge(struct request *req,
239 struct bio *next)
240{
241 struct bio_integrity_payload *bip = bio_integrity(req->bio);
242 struct bio_integrity_payload *bip_next = bio_integrity(next);
243
c55ddd90
CH
244 return bvec_gap_to_prev(&req->q->limits,
245 &bip->bip_vec[bip->bip_vcnt - 1],
43b729bf
CH
246 bip_next->bip_vec[0].bv_offset);
247}
248
249static inline bool integrity_req_gap_front_merge(struct request *req,
250 struct bio *bio)
251{
252 struct bio_integrity_payload *bip = bio_integrity(bio);
253 struct bio_integrity_payload *bip_next = bio_integrity(req->bio);
254
c55ddd90
CH
255 return bvec_gap_to_prev(&req->q->limits,
256 &bip->bip_vec[bip->bip_vcnt - 1],
43b729bf
CH
257 bip_next->bip_vec[0].bv_offset);
258}
581e2600 259
ff53cd52 260extern const struct attribute_group blk_integrity_attr_group;
43b729bf 261#else /* CONFIG_BLK_DEV_INTEGRITY */
92cf2fd1
CH
262static inline bool blk_integrity_merge_rq(struct request_queue *rq,
263 struct request *r1, struct request *r2)
264{
265 return true;
266}
d59da419
CH
267static inline bool blk_integrity_merge_bio(struct request_queue *rq,
268 struct request *r, struct bio *b)
269{
270 return true;
271}
43b729bf
CH
272static inline bool integrity_req_gap_back_merge(struct request *req,
273 struct bio *next)
274{
275 return false;
276}
277static inline bool integrity_req_gap_front_merge(struct request *req,
278 struct bio *bio)
279{
280 return false;
281}
282
5a48fc14
DW
283static inline void blk_flush_integrity(void)
284{
285}
7c20f116
CH
286static inline bool bio_integrity_endio(struct bio *bio)
287{
288 return true;
289}
ece841ab
JT
290static inline void bio_integrity_free(struct bio *bio)
291{
292}
43b729bf 293#endif /* CONFIG_BLK_DEV_INTEGRITY */
8324aa91 294
0d2602ca 295unsigned long blk_rq_timeout(unsigned long timeout);
87ee7b11 296void blk_add_timer(struct request *req);
320ae51f 297
dd850ff3
DLM
298enum bio_merge_status {
299 BIO_MERGE_OK,
300 BIO_MERGE_NONE,
301 BIO_MERGE_FAILED,
302};
303
304enum bio_merge_status bio_attempt_back_merge(struct request *req,
305 struct bio *bio, unsigned int nr_segs);
320ae51f 306bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio,
0c5bcc92 307 unsigned int nr_segs);
bdc6a287
BW
308bool blk_bio_list_merge(struct request_queue *q, struct list_head *list,
309 struct bio *bio, unsigned int nr_segs);
320ae51f 310
ba0ffdd8
JA
311/*
312 * Plug flush limits
313 */
314#define BLK_MAX_REQUEST_COUNT 32
315#define BLK_PLUG_FLUSH_SIZE (128 * 1024)
316
158dbda0
TH
317/*
318 * Internal elevator interface
319 */
e8064021 320#define ELV_ON_HASH(rq) ((rq)->rq_flags & RQF_HASHED)
158dbda0 321
360f2648 322bool blk_insert_flush(struct request *rq);
dd831006 323
596dce11 324void elv_update_nr_hw_queues(struct request_queue *q);
1e44bedb
ML
325void elevator_set_default(struct request_queue *q);
326void elevator_set_none(struct request_queue *q);
83d016ac 327
3ad5cee5
CH
328ssize_t part_size_show(struct device *dev, struct device_attribute *attr,
329 char *buf);
330ssize_t part_stat_show(struct device *dev, struct device_attribute *attr,
331 char *buf);
332ssize_t part_inflight_show(struct device *dev, struct device_attribute *attr,
333 char *buf);
334ssize_t part_fail_show(struct device *dev, struct device_attribute *attr,
335 char *buf);
336ssize_t part_fail_store(struct device *dev, struct device_attribute *attr,
337 const char *buf, size_t count);
581d4e28
JA
338ssize_t part_timeout_show(struct device *, struct device_attribute *, char *);
339ssize_t part_timeout_store(struct device *, struct device_attribute *,
340 const char *, size_t);
581d4e28 341
b35243a4
CH
342struct bio *bio_split_discard(struct bio *bio, const struct queue_limits *lim,
343 unsigned *nsegs);
344struct bio *bio_split_write_zeroes(struct bio *bio,
345 const struct queue_limits *lim, unsigned *nsegs);
346struct bio *bio_split_rw(struct bio *bio, const struct queue_limits *lim,
347 unsigned *nr_segs);
1e8a7f6a
CH
348struct bio *bio_split_zone_append(struct bio *bio,
349 const struct queue_limits *lim, unsigned *nr_segs);
b35243a4
CH
350
351/*
352 * All drivers must accept single-segments bios that are smaller than PAGE_SIZE.
353 *
354 * This is a quick and dirty check that relies on the fact that bi_io_vec[0] is
355 * always valid if a bio has data. The check might lead to occasional false
356 * positives when bios are cloned, but compared to the performance impact of
357 * cloned bios themselves the loop below doesn't matter anyway.
358 */
359static inline bool bio_may_need_split(struct bio *bio,
360 const struct queue_limits *lim)
361{
889c5706
ML
362 if (lim->chunk_sectors)
363 return true;
364 if (bio->bi_vcnt != 1)
365 return true;
366 return bio->bi_io_vec->bv_len + bio->bi_io_vec->bv_offset >
367 lim->min_segment_size;
b35243a4
CH
368}
369
370/**
371 * __bio_split_to_limits - split a bio to fit the queue limits
372 * @bio: bio to be split
373 * @lim: queue limits to split based on
374 * @nr_segs: returns the number of segments in the returned bio
375 *
376 * Check if @bio needs splitting based on the queue limits, and if so split off
377 * a bio fitting the limits from the beginning of @bio and return it. @bio is
378 * shortened to the remainder and re-submitted.
379 *
380 * The split bio is allocated from @q->bio_split, which is provided by the
381 * block layer.
382 */
383static inline struct bio *__bio_split_to_limits(struct bio *bio,
384 const struct queue_limits *lim, unsigned int *nr_segs)
abd45c15
JA
385{
386 switch (bio_op(bio)) {
12515809
CH
387 case REQ_OP_READ:
388 case REQ_OP_WRITE:
b35243a4
CH
389 if (bio_may_need_split(bio, lim))
390 return bio_split_rw(bio, lim, nr_segs);
391 *nr_segs = 1;
392 return bio;
1e8a7f6a
CH
393 case REQ_OP_ZONE_APPEND:
394 return bio_split_zone_append(bio, lim, nr_segs);
abd45c15
JA
395 case REQ_OP_DISCARD:
396 case REQ_OP_SECURE_ERASE:
b35243a4 397 return bio_split_discard(bio, lim, nr_segs);
abd45c15 398 case REQ_OP_WRITE_ZEROES:
b35243a4 399 return bio_split_write_zeroes(bio, lim, nr_segs);
12515809
CH
400 default:
401 /* other operations can't be split */
402 *nr_segs = 0;
403 return bio;
abd45c15 404 }
abd45c15
JA
405}
406
b0a41585
CH
407/**
408 * get_max_segment_size() - maximum number of bytes to add as a single segment
409 * @lim: Request queue limits.
410 * @paddr: address of the range to add
411 * @len: maximum length available to add at @paddr
412 *
413 * Returns the maximum number of bytes of the range starting at @paddr that can
414 * be added to a single segment.
415 */
416static inline unsigned get_max_segment_size(const struct queue_limits *lim,
417 phys_addr_t paddr, unsigned int len)
418{
419 /*
420 * Prevent an overflow if mask = ULONG_MAX and offset = 0 by adding 1
421 * after having calculated the minimum.
422 */
423 return min_t(unsigned long, len,
424 min(lim->seg_boundary_mask - (lim->seg_boundary_mask & paddr),
425 (unsigned long)lim->max_segment_size - 1) + 1);
426}
427
14ccb66b
CH
428int ll_back_merge_fn(struct request *req, struct bio *bio,
429 unsigned int nr_segs);
fd2ef39c 430bool blk_attempt_req_merge(struct request_queue *q, struct request *rq,
5e84ea3a 431 struct request *next);
e9cd19c0 432unsigned int blk_recalc_rq_segments(struct request *rq);
050c8ea8 433bool blk_rq_merge_ok(struct request *rq, struct bio *bio);
34fe7c05 434enum elv_merge blk_try_merge(struct request *rq, struct bio *bio);
d6d48196 435
d690cb8a 436int blk_set_default_limits(struct queue_limits *lim);
73781b3b
CH
437void blk_apply_bdi_limits(struct backing_dev_info *bdi,
438 struct queue_limits *lim);
ff88972c
AB
439int blk_dev_init(void);
440
450b7879 441void update_io_ticks(struct block_device *part, unsigned long now, bool end);
fb8ec18c 442
6cf7677f
CH
443static inline void req_set_nomerge(struct request_queue *q, struct request *req)
444{
445 req->cmd_flags |= REQ_NOMERGE;
446 if (req == q->last_merge)
447 q->last_merge = NULL;
448}
449
f2dbd76a
TH
450/*
451 * Internal io_context interface
452 */
87dd1d63 453struct io_cq *ioc_find_get_icq(struct request_queue *q);
eca5892a 454struct io_cq *ioc_lookup_icq(struct request_queue *q);
5ef16305 455#ifdef CONFIG_BLK_ICQ
7e5a8794 456void ioc_clear_queue(struct request_queue *q);
5ef16305
CH
457#else
458static inline void ioc_clear_queue(struct request_queue *q)
459{
460}
461#endif /* CONFIG_BLK_ICQ */
f2dbd76a 462
bf505456 463#ifdef CONFIG_BLK_DEV_ZONED
dd291d77
DLM
464void disk_init_zone_resources(struct gendisk *disk);
465void disk_free_zone_resources(struct gendisk *disk);
466static inline bool bio_zone_write_plugging(struct bio *bio)
467{
468 return bio_flagged(bio, BIO_ZONE_WRITE_PLUGGING);
469}
470void blk_zone_write_plug_bio_merged(struct bio *bio);
096bc7ea 471void blk_zone_write_plug_init_request(struct request *rq);
a0508c36
DLM
472static inline void blk_zone_update_request_bio(struct request *rq,
473 struct bio *bio)
474{
475 /*
476 * For zone append requests, the request sector indicates the location
477 * at which the BIO data was written. Return this value to the BIO
478 * issuer through the BIO iter sector.
9b1ce7f0
DLM
479 * For plugged zone writes, which include emulated zone append, we need
480 * the original BIO sector so that blk_zone_write_plug_bio_endio() can
481 * lookup the zone write plug.
a0508c36 482 */
3bb6e356
JT
483 if (req_op(rq) == REQ_OP_ZONE_APPEND ||
484 bio_flagged(bio, BIO_EMULATES_ZONE_APPEND))
a0508c36
DLM
485 bio->bi_iter.bi_sector = rq->__sector;
486}
dd291d77
DLM
487void blk_zone_write_plug_bio_endio(struct bio *bio);
488static inline void blk_zone_bio_endio(struct bio *bio)
489{
490 /*
491 * For write BIOs to zoned devices, signal the completion of the BIO so
492 * that the next write BIO can be submitted by zone write plugging.
493 */
494 if (bio_zone_write_plugging(bio))
495 blk_zone_write_plug_bio_endio(bio);
496}
497
347bde9d
DLM
498void blk_zone_write_plug_finish_request(struct request *rq);
499static inline void blk_zone_finish_request(struct request *rq)
dd291d77
DLM
500{
501 if (rq->rq_flags & RQF_ZONE_WRITE_PLUGGING)
347bde9d 502 blk_zone_write_plug_finish_request(rq);
dd291d77 503}
5e4ea834
CH
504int blkdev_report_zones_ioctl(struct block_device *bdev, unsigned int cmd,
505 unsigned long arg);
05bdb996 506int blkdev_zone_mgmt_ioctl(struct block_device *bdev, blk_mode_t mode,
cfb42576
CH
507 unsigned int cmd, unsigned long arg);
508#else /* CONFIG_BLK_DEV_ZONED */
dd291d77
DLM
509static inline void disk_init_zone_resources(struct gendisk *disk)
510{
511}
512static inline void disk_free_zone_resources(struct gendisk *disk)
513{
514}
515static inline bool bio_zone_write_plugging(struct bio *bio)
516{
517 return false;
518}
519static inline void blk_zone_write_plug_bio_merged(struct bio *bio)
520{
521}
096bc7ea 522static inline void blk_zone_write_plug_init_request(struct request *rq)
a0508c36
DLM
523{
524}
525static inline void blk_zone_update_request_bio(struct request *rq,
526 struct bio *bio)
527{
528}
dd291d77
DLM
529static inline void blk_zone_bio_endio(struct bio *bio)
530{
531}
347bde9d 532static inline void blk_zone_finish_request(struct request *rq)
dd291d77
DLM
533{
534}
cfb42576 535static inline int blkdev_report_zones_ioctl(struct block_device *bdev,
5e4ea834 536 unsigned int cmd, unsigned long arg)
cfb42576
CH
537{
538 return -ENOTTY;
539}
540static inline int blkdev_zone_mgmt_ioctl(struct block_device *bdev,
05bdb996 541 blk_mode_t mode, unsigned int cmd, unsigned long arg)
cfb42576
CH
542{
543 return -ENOTTY;
544}
545#endif /* CONFIG_BLK_DEV_ZONED */
546
547struct block_device *bdev_alloc(struct gendisk *disk, u8 partno);
548void bdev_add(struct block_device *bdev, dev_t dev);
2638c208
AV
549void bdev_unhash(struct block_device *bdev);
550void bdev_drop(struct block_device *bdev);
bf505456 551
7c3f828b
CH
552int blk_alloc_ext_minor(void);
553void blk_free_ext_minor(unsigned int minor);
581e2600
CH
554#define ADDPART_FLAG_NONE 0
555#define ADDPART_FLAG_RAID 1
556#define ADDPART_FLAG_WHOLEDISK 2
ba40f4c5 557#define ADDPART_FLAG_READONLY 4
7f6be376
CH
558int bdev_add_partition(struct gendisk *disk, int partno, sector_t start,
559 sector_t length);
926fbb16 560int bdev_del_partition(struct gendisk *disk, int partno);
3d2e7989
CH
561int bdev_resize_partition(struct gendisk *disk, int partno, sector_t start,
562 sector_t length);
eec1be4c 563void drop_partition(struct block_device *part);
581e2600 564
83794367
DLM
565void bdev_set_nr_sectors(struct block_device *bdev, sector_t sectors);
566
6f8191fd
CH
567struct gendisk *__alloc_disk_node(struct request_queue *q, int node_id,
568 struct lock_class_key *lkclass);
569
fd363244
DH
570/*
571 * Clean up a page appropriately, where the page may be pinned, may have a
572 * ref taken on it or neither.
573 */
574static inline void bio_release_page(struct bio *bio, struct page *page)
575{
576 if (bio_flagged(bio, BIO_PAGE_PINNED))
577 unpin_user_page(page);
fd363244
DH
578}
579
ad751ba1 580struct request_queue *blk_alloc_queue(struct queue_limits *lim, int node_id);
704b914f 581
05bdb996 582int disk_scan_partitions(struct gendisk *disk, blk_mode_t mode);
da7ba729 583
92e7755e 584int disk_alloc_events(struct gendisk *disk);
d5870edf
CH
585void disk_add_events(struct gendisk *disk);
586void disk_del_events(struct gendisk *disk);
587void disk_release_events(struct gendisk *disk);
926597ff
CH
588void disk_block_events(struct gendisk *disk);
589void disk_unblock_events(struct gendisk *disk);
590void disk_flush_events(struct gendisk *disk, unsigned int mask);
2bc8cda5
CH
591extern struct device_attribute dev_attr_events;
592extern struct device_attribute dev_attr_events_async;
593extern struct device_attribute dev_attr_events_poll_msecs;
d5870edf 594
cc5c516d
CH
595extern struct attribute_group blk_trace_attr_group;
596
05bdb996
CH
597blk_mode_t file_to_blk_mode(struct file *file);
598int truncate_bdev_range(struct block_device *bdev, blk_mode_t mode,
599 loff_t lstart, loff_t lend);
8a709512 600long blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg);
50c52250 601int blkdev_uring_cmd(struct io_uring_cmd *cmd, unsigned int issue_flags);
84b8514b
CH
602long compat_blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg);
603
cd82cca7
CH
604extern const struct address_space_operations def_blk_aops;
605
22d0c408 606int disk_register_independent_access_ranges(struct gendisk *disk);
a2247f19
DLM
607void disk_unregister_independent_access_ranges(struct gendisk *disk);
608
06c8c691
CH
609#ifdef CONFIG_FAIL_MAKE_REQUEST
610bool should_fail_request(struct block_device *part, unsigned int bytes);
611#else /* CONFIG_FAIL_MAKE_REQUEST */
612static inline bool should_fail_request(struct block_device *part,
613 unsigned int bytes)
614{
615 return false;
616}
617#endif /* CONFIG_FAIL_MAKE_REQUEST */
618
0a467d0f
JA
619/*
620 * Optimized request reference counting. Ideally we'd make timeouts be more
621 * clever, as that's the only reason we need references at all... But until
622 * this happens, this is faster than using refcount_t. Also see:
623 *
624 * abc54d634334 ("io_uring: switch to atomic_t for io_kiocb reference count")
625 */
626#define req_ref_zero_or_close_to_overflow(req) \
627 ((unsigned int) atomic_read(&(req->ref)) + 127u <= 127u)
628
629static inline bool req_ref_inc_not_zero(struct request *req)
630{
631 return atomic_inc_not_zero(&req->ref);
632}
633
634static inline bool req_ref_put_and_test(struct request *req)
635{
636 WARN_ON_ONCE(req_ref_zero_or_close_to_overflow(req));
637 return atomic_dec_and_test(&req->ref);
638}
639
640static inline void req_ref_set(struct request *req, int value)
641{
642 atomic_set(&req->ref, value);
643}
644
645static inline int req_ref_read(struct request *req)
646{
647 return atomic_read(&req->ref);
648}
649
08420cf7
JA
650static inline u64 blk_time_get_ns(void)
651{
da4c8c3d
JA
652 struct blk_plug *plug = current->plug;
653
b874d4aa 654 if (!plug || !in_task())
da4c8c3d
JA
655 return ktime_get_ns();
656
657 /*
658 * 0 could very well be a valid time, but rather than flag "this is
659 * a valid timestamp" separately, just accept that we'll do an extra
660 * ktime_get_ns() if we just happen to get 0 as the current time.
661 */
06b23f92 662 if (!plug->cur_ktime) {
da4c8c3d 663 plug->cur_ktime = ktime_get_ns();
06b23f92
JA
664 current->flags |= PF_BLOCK_TS;
665 }
da4c8c3d 666 return plug->cur_ktime;
08420cf7
JA
667}
668
669static inline ktime_t blk_time_get(void)
670{
671 return ns_to_ktime(blk_time_get_ns());
672}
673
c4e47bbb
JA
674/*
675 * From most significant bit:
676 * 1 bit: reserved for other usage, see below
677 * 12 bits: original size of bio
678 * 51 bits: issue time of bio
679 */
680#define BIO_ISSUE_RES_BITS 1
681#define BIO_ISSUE_SIZE_BITS 12
682#define BIO_ISSUE_RES_SHIFT (64 - BIO_ISSUE_RES_BITS)
683#define BIO_ISSUE_SIZE_SHIFT (BIO_ISSUE_RES_SHIFT - BIO_ISSUE_SIZE_BITS)
684#define BIO_ISSUE_TIME_MASK ((1ULL << BIO_ISSUE_SIZE_SHIFT) - 1)
685#define BIO_ISSUE_SIZE_MASK \
686 (((1ULL << BIO_ISSUE_SIZE_BITS) - 1) << BIO_ISSUE_SIZE_SHIFT)
687#define BIO_ISSUE_RES_MASK (~((1ULL << BIO_ISSUE_RES_SHIFT) - 1))
688
689/* Reserved bit for blk-throtl */
690#define BIO_ISSUE_THROTL_SKIP_LATENCY (1ULL << 63)
691
692static inline u64 __bio_issue_time(u64 time)
693{
694 return time & BIO_ISSUE_TIME_MASK;
695}
696
697static inline u64 bio_issue_time(struct bio_issue *issue)
698{
699 return __bio_issue_time(issue->value);
700}
701
702static inline sector_t bio_issue_size(struct bio_issue *issue)
703{
704 return ((issue->value & BIO_ISSUE_SIZE_MASK) >> BIO_ISSUE_SIZE_SHIFT);
705}
706
707static inline void bio_issue_init(struct bio_issue *issue,
708 sector_t size)
709{
710 size &= (1ULL << BIO_ISSUE_SIZE_BITS) - 1;
711 issue->value = ((issue->value & BIO_ISSUE_RES_MASK) |
08420cf7 712 (blk_time_get_ns() & BIO_ISSUE_TIME_MASK) |
c4e47bbb
JA
713 ((u64)size << BIO_ISSUE_SIZE_SHIFT));
714}
715
7c09a4ed 716void bdev_release(struct file *bdev_file);
a56aefca
CB
717int bdev_open(struct block_device *bdev, blk_mode_t mode, void *holder,
718 const struct blk_holder_ops *hops, struct file *bdev_file);
719int bdev_permission(dev_t dev, blk_mode_t mode, void *holder);
1ddeeb2a 720
d19b4634 721void blk_integrity_generate(struct bio *bio);
105ca2a2 722void blk_integrity_verify_iter(struct bio *bio, struct bvec_iter *saved_iter);
e9f5f44a
CH
723void blk_integrity_prepare(struct request *rq);
724void blk_integrity_complete(struct request *rq, unsigned int nr_bytes);
725
6f491a8d 726#ifdef CONFIG_LOCKDEP
f6661b1d 727static inline void blk_freeze_acquire_lock(struct request_queue *q)
f1be1788 728{
6f491a8d 729 if (!q->mq_freeze_disk_dead)
f1be1788 730 rwsem_acquire(&q->io_lockdep_map, 0, 1, _RET_IP_);
f6661b1d 731 if (!q->mq_freeze_queue_dying)
f1be1788
ML
732 rwsem_acquire(&q->q_lockdep_map, 0, 1, _RET_IP_);
733}
734
f6661b1d 735static inline void blk_unfreeze_release_lock(struct request_queue *q)
f1be1788 736{
f6661b1d 737 if (!q->mq_freeze_queue_dying)
f1be1788 738 rwsem_release(&q->q_lockdep_map, _RET_IP_);
6f491a8d 739 if (!q->mq_freeze_disk_dead)
f1be1788
ML
740 rwsem_release(&q->io_lockdep_map, _RET_IP_);
741}
6f491a8d 742#else
f6661b1d 743static inline void blk_freeze_acquire_lock(struct request_queue *q)
6f491a8d
ML
744{
745}
f6661b1d 746static inline void blk_unfreeze_release_lock(struct request_queue *q)
6f491a8d
ML
747{
748}
749#endif
f1be1788 750
bc9fcbf9 751#endif /* BLK_INTERNAL_H */