ublk_drv: return flag of UBLK_F_URING_CMD_COMP_IN_TASK in case of module
[linux-block.git] / block / blk.h
CommitLineData
b2441318 1/* SPDX-License-Identifier: GPL-2.0 */
8324aa91
JA
2#ifndef BLK_INTERNAL_H
3#define BLK_INTERNAL_H
4
a892c8d5 5#include <linux/blk-crypto.h>
9bb33f24 6#include <linux/memblock.h> /* for max_pfn/max_low_pfn */
c39ae60d 7#include <xen/xen.h>
a892c8d5 8#include "blk-crypto-internal.h"
a73f730d 9
2e9bc346
CH
10struct elevator_type;
11
0d2602ca
JA
12/* Max future timer expiry for timeouts */
13#define BLK_MAX_TIMEOUT (5 * HZ)
14
18fbda91 15extern struct dentry *blk_debugfs_root;
18fbda91 16
7c94e1c1 17struct blk_flush_queue {
7c94e1c1
ML
18 unsigned int flush_pending_idx:1;
19 unsigned int flush_running_idx:1;
8d699663 20 blk_status_t rq_status;
7c94e1c1
ML
21 unsigned long flush_pending_since;
22 struct list_head flush_queue[2];
23 struct list_head flush_data_in_flight;
24 struct request *flush_rq;
0048b483 25
7c94e1c1
ML
26 spinlock_t mq_flush_lock;
27};
28
8324aa91 29extern struct kmem_cache *blk_requestq_cachep;
704b914f 30extern struct kmem_cache *blk_requestq_srcu_cachep;
8324aa91 31extern struct kobj_type blk_queue_ktype;
a73f730d 32extern struct ida blk_queue_ida;
8324aa91 33
a9ed27a7 34bool is_flush_rq(struct request *req);
8d699663 35
754a1572
GJ
36struct blk_flush_queue *blk_alloc_flush_queue(int node, int cmd_size,
37 gfp_t flags);
f70ced09 38void blk_free_flush_queue(struct blk_flush_queue *q);
f3552655 39
3ef28e83 40void blk_freeze_queue(struct request_queue *q);
aec89dc5 41void __blk_mq_unfreeze_queue(struct request_queue *q, bool force_atomic);
8e141f9e 42void blk_queue_start_drain(struct request_queue *q);
c98cb5bb 43int __bio_queue_enter(struct request_queue *q, struct bio *bio);
3f98c753 44void submit_bio_noacct_nocheck(struct bio *bio);
c98cb5bb
JA
45
46static inline bool blk_try_enter_queue(struct request_queue *q, bool pm)
47{
48 rcu_read_lock();
49 if (!percpu_ref_tryget_live_rcu(&q->q_usage_counter))
50 goto fail;
51
52 /*
53 * The code that increments the pm_only counter must ensure that the
54 * counter is globally visible before the queue is unfrozen.
55 */
56 if (blk_queue_pm_only(q) &&
57 (!pm || queue_rpm_status(q) == RPM_SUSPENDED))
58 goto fail_put;
59
60 rcu_read_unlock();
61 return true;
62
63fail_put:
64 blk_queue_exit(q);
65fail:
66 rcu_read_unlock();
67 return false;
68}
69
70static inline int bio_queue_enter(struct bio *bio)
71{
72 struct request_queue *q = bdev_get_queue(bio->bi_bdev);
73
74 if (blk_try_enter_queue(q, false))
75 return 0;
76 return __bio_queue_enter(q, bio);
77}
3ef28e83 78
dc0b8a57 79#define BIO_INLINE_VECS 4
7a800a20
CH
80struct bio_vec *bvec_alloc(mempool_t *pool, unsigned short *nr_vecs,
81 gfp_t gfp_mask);
82void bvec_free(mempool_t *pool, struct bio_vec *bv, unsigned short nr_vecs);
eec716a1 83
3dccdae5
CH
84static inline bool biovec_phys_mergeable(struct request_queue *q,
85 struct bio_vec *vec1, struct bio_vec *vec2)
6a9f5f24 86{
3dccdae5 87 unsigned long mask = queue_segment_boundary(q);
6e768461
CH
88 phys_addr_t addr1 = page_to_phys(vec1->bv_page) + vec1->bv_offset;
89 phys_addr_t addr2 = page_to_phys(vec2->bv_page) + vec2->bv_offset;
3dccdae5
CH
90
91 if (addr1 + vec1->bv_len != addr2)
6a9f5f24 92 return false;
0383ad43 93 if (xen_domain() && !xen_biovec_phys_mergeable(vec1, vec2->bv_page))
6a9f5f24 94 return false;
3dccdae5
CH
95 if ((addr1 | mask) != ((addr2 + vec2->bv_len - 1) | mask))
96 return false;
6a9f5f24
CH
97 return true;
98}
99
c55ddd90 100static inline bool __bvec_gap_to_prev(struct queue_limits *lim,
27ca1d4e
CH
101 struct bio_vec *bprv, unsigned int offset)
102{
c55ddd90
CH
103 return (offset & lim->virt_boundary_mask) ||
104 ((bprv->bv_offset + bprv->bv_len) & lim->virt_boundary_mask);
27ca1d4e
CH
105}
106
107/*
108 * Check if adding a bio_vec after bprv with offset would create a gap in
109 * the SG list. Most drivers don't care about this, but some do.
110 */
c55ddd90 111static inline bool bvec_gap_to_prev(struct queue_limits *lim,
27ca1d4e
CH
112 struct bio_vec *bprv, unsigned int offset)
113{
c55ddd90 114 if (!lim->virt_boundary_mask)
27ca1d4e 115 return false;
c55ddd90 116 return __bvec_gap_to_prev(lim, bprv, offset);
27ca1d4e
CH
117}
118
badf7f64
CH
119static inline bool rq_mergeable(struct request *rq)
120{
121 if (blk_rq_is_passthrough(rq))
122 return false;
123
124 if (req_op(rq) == REQ_OP_FLUSH)
125 return false;
126
127 if (req_op(rq) == REQ_OP_WRITE_ZEROES)
128 return false;
129
130 if (req_op(rq) == REQ_OP_ZONE_APPEND)
131 return false;
132
133 if (rq->cmd_flags & REQ_NOMERGE_FLAGS)
134 return false;
135 if (rq->rq_flags & RQF_NOMERGE_FLAGS)
136 return false;
137
138 return true;
139}
140
141/*
142 * There are two different ways to handle DISCARD merges:
143 * 1) If max_discard_segments > 1, the driver treats every bio as a range and
144 * send the bios to controller together. The ranges don't need to be
145 * contiguous.
146 * 2) Otherwise, the request will be normal read/write requests. The ranges
147 * need to be contiguous.
148 */
149static inline bool blk_discard_mergable(struct request *req)
150{
151 if (req_op(req) == REQ_OP_DISCARD &&
152 queue_max_discard_segments(req->q) > 1)
153 return true;
154 return false;
155}
156
2a9336c4 157static inline unsigned int blk_queue_get_max_sectors(struct request_queue *q,
77e7ffd7 158 enum req_op op)
2a9336c4
CH
159{
160 if (unlikely(op == REQ_OP_DISCARD || op == REQ_OP_SECURE_ERASE))
161 return min(q->limits.max_discard_sectors,
162 UINT_MAX >> SECTOR_SHIFT);
163
164 if (unlikely(op == REQ_OP_WRITE_ZEROES))
165 return q->limits.max_write_zeroes_sectors;
166
167 return q->limits.max_sectors;
168}
169
5a48fc14
DW
170#ifdef CONFIG_BLK_DEV_INTEGRITY
171void blk_flush_integrity(void);
7c20f116 172bool __bio_integrity_endio(struct bio *);
ece841ab 173void bio_integrity_free(struct bio *bio);
7c20f116
CH
174static inline bool bio_integrity_endio(struct bio *bio)
175{
176 if (bio_integrity(bio))
177 return __bio_integrity_endio(bio);
178 return true;
179}
43b729bf 180
92cf2fd1
CH
181bool blk_integrity_merge_rq(struct request_queue *, struct request *,
182 struct request *);
d59da419
CH
183bool blk_integrity_merge_bio(struct request_queue *, struct request *,
184 struct bio *);
92cf2fd1 185
43b729bf
CH
186static inline bool integrity_req_gap_back_merge(struct request *req,
187 struct bio *next)
188{
189 struct bio_integrity_payload *bip = bio_integrity(req->bio);
190 struct bio_integrity_payload *bip_next = bio_integrity(next);
191
c55ddd90
CH
192 return bvec_gap_to_prev(&req->q->limits,
193 &bip->bip_vec[bip->bip_vcnt - 1],
43b729bf
CH
194 bip_next->bip_vec[0].bv_offset);
195}
196
197static inline bool integrity_req_gap_front_merge(struct request *req,
198 struct bio *bio)
199{
200 struct bio_integrity_payload *bip = bio_integrity(bio);
201 struct bio_integrity_payload *bip_next = bio_integrity(req->bio);
202
c55ddd90
CH
203 return bvec_gap_to_prev(&req->q->limits,
204 &bip->bip_vec[bip->bip_vcnt - 1],
43b729bf
CH
205 bip_next->bip_vec[0].bv_offset);
206}
581e2600 207
614310c9 208int blk_integrity_add(struct gendisk *disk);
581e2600 209void blk_integrity_del(struct gendisk *);
43b729bf 210#else /* CONFIG_BLK_DEV_INTEGRITY */
92cf2fd1
CH
211static inline bool blk_integrity_merge_rq(struct request_queue *rq,
212 struct request *r1, struct request *r2)
213{
214 return true;
215}
d59da419
CH
216static inline bool blk_integrity_merge_bio(struct request_queue *rq,
217 struct request *r, struct bio *b)
218{
219 return true;
220}
43b729bf
CH
221static inline bool integrity_req_gap_back_merge(struct request *req,
222 struct bio *next)
223{
224 return false;
225}
226static inline bool integrity_req_gap_front_merge(struct request *req,
227 struct bio *bio)
228{
229 return false;
230}
231
5a48fc14
DW
232static inline void blk_flush_integrity(void)
233{
234}
7c20f116
CH
235static inline bool bio_integrity_endio(struct bio *bio)
236{
237 return true;
238}
ece841ab
JT
239static inline void bio_integrity_free(struct bio *bio)
240{
241}
614310c9 242static inline int blk_integrity_add(struct gendisk *disk)
581e2600 243{
614310c9 244 return 0;
581e2600
CH
245}
246static inline void blk_integrity_del(struct gendisk *disk)
247{
248}
43b729bf 249#endif /* CONFIG_BLK_DEV_INTEGRITY */
8324aa91 250
0d2602ca 251unsigned long blk_rq_timeout(unsigned long timeout);
87ee7b11 252void blk_add_timer(struct request *req);
0d7a29a2 253const char *blk_status_to_str(blk_status_t status);
320ae51f 254
320ae51f 255bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio,
0c5bcc92 256 unsigned int nr_segs);
bdc6a287
BW
257bool blk_bio_list_merge(struct request_queue *q, struct list_head *list,
258 struct bio *bio, unsigned int nr_segs);
320ae51f 259
ba0ffdd8
JA
260/*
261 * Plug flush limits
262 */
263#define BLK_MAX_REQUEST_COUNT 32
264#define BLK_PLUG_FLUSH_SIZE (128 * 1024)
265
158dbda0
TH
266/*
267 * Internal elevator interface
268 */
e8064021 269#define ELV_ON_HASH(rq) ((rq)->rq_flags & RQF_HASHED)
158dbda0 270
2b504bd4 271void blk_insert_flush(struct request *rq);
dd831006 272
8237c01f 273int elevator_switch(struct request_queue *q, struct elevator_type *new_e);
0c6cb3a2 274void elevator_exit(struct request_queue *q);
cecf5d87 275int elv_register_queue(struct request_queue *q, bool uevent);
83d016ac
BVA
276void elv_unregister_queue(struct request_queue *q);
277
3ad5cee5
CH
278ssize_t part_size_show(struct device *dev, struct device_attribute *attr,
279 char *buf);
280ssize_t part_stat_show(struct device *dev, struct device_attribute *attr,
281 char *buf);
282ssize_t part_inflight_show(struct device *dev, struct device_attribute *attr,
283 char *buf);
284ssize_t part_fail_show(struct device *dev, struct device_attribute *attr,
285 char *buf);
286ssize_t part_fail_store(struct device *dev, struct device_attribute *attr,
287 const char *buf, size_t count);
581d4e28
JA
288ssize_t part_timeout_show(struct device *, struct device_attribute *, char *);
289ssize_t part_timeout_store(struct device *, struct device_attribute *,
290 const char *, size_t);
581d4e28 291
c55ddd90
CH
292static inline bool bio_may_exceed_limits(struct bio *bio,
293 struct queue_limits *lim)
abd45c15
JA
294{
295 switch (bio_op(bio)) {
296 case REQ_OP_DISCARD:
297 case REQ_OP_SECURE_ERASE:
298 case REQ_OP_WRITE_ZEROES:
abd45c15
JA
299 return true; /* non-trivial splitting decisions */
300 default:
301 break;
302 }
303
304 /*
305 * All drivers must accept single-segments bios that are <= PAGE_SIZE.
306 * This is a quick and dirty check that relies on the fact that
307 * bi_io_vec[0] is always valid if a bio has data. The check might
308 * lead to occasional false negatives when bios are cloned, but compared
309 * to the performance impact of cloned bios themselves the loop below
310 * doesn't matter anyway.
311 */
c55ddd90 312 return lim->chunk_sectors || bio->bi_vcnt != 1 ||
abd45c15
JA
313 bio->bi_io_vec->bv_len + bio->bi_io_vec->bv_offset > PAGE_SIZE;
314}
315
c55ddd90 316struct bio *__bio_split_to_limits(struct bio *bio, struct queue_limits *lim,
5a97806f 317 unsigned int *nr_segs);
14ccb66b
CH
318int ll_back_merge_fn(struct request *req, struct bio *bio,
319 unsigned int nr_segs);
fd2ef39c 320bool blk_attempt_req_merge(struct request_queue *q, struct request *rq,
5e84ea3a 321 struct request *next);
e9cd19c0 322unsigned int blk_recalc_rq_segments(struct request *rq);
80a761fd 323void blk_rq_set_mixed_merge(struct request *rq);
050c8ea8 324bool blk_rq_merge_ok(struct request *rq, struct bio *bio);
34fe7c05 325enum elv_merge blk_try_merge(struct request *rq, struct bio *bio);
d6d48196 326
ff88972c
AB
327int blk_dev_init(void);
328
c2553b58
JA
329/*
330 * Contribute to IO statistics IFF:
331 *
332 * a) it's attached to a gendisk, and
48d9b0d4 333 * b) the queue had IO stats enabled when this request was started
c2553b58 334 */
599d067d 335static inline bool blk_do_io_stat(struct request *rq)
fb8ec18c 336{
41fa7222 337 return (rq->rq_flags & RQF_IO_STAT) && !blk_rq_is_passthrough(rq);
be6bfe36
PB
338}
339
450b7879 340void update_io_ticks(struct block_device *part, unsigned long now, bool end);
fb8ec18c 341
6cf7677f
CH
342static inline void req_set_nomerge(struct request_queue *q, struct request *req)
343{
344 req->cmd_flags |= REQ_NOMERGE;
345 if (req == q->last_merge)
346 q->last_merge = NULL;
347}
348
f2dbd76a
TH
349/*
350 * Internal io_context interface
351 */
87dd1d63 352struct io_cq *ioc_find_get_icq(struct request_queue *q);
eca5892a 353struct io_cq *ioc_lookup_icq(struct request_queue *q);
5ef16305 354#ifdef CONFIG_BLK_ICQ
7e5a8794 355void ioc_clear_queue(struct request_queue *q);
5ef16305
CH
356#else
357static inline void ioc_clear_queue(struct request_queue *q)
358{
359}
360#endif /* CONFIG_BLK_ICQ */
f2dbd76a 361
297e3d85
SL
362#ifdef CONFIG_BLK_DEV_THROTTLING_LOW
363extern ssize_t blk_throtl_sample_time_show(struct request_queue *q, char *page);
364extern ssize_t blk_throtl_sample_time_store(struct request_queue *q,
365 const char *page, size_t count);
9e234eea 366extern void blk_throtl_bio_endio(struct bio *bio);
b9147dd1 367extern void blk_throtl_stat_add(struct request *rq, u64 time);
9e234eea
SL
368#else
369static inline void blk_throtl_bio_endio(struct bio *bio) { }
b9147dd1 370static inline void blk_throtl_stat_add(struct request *rq, u64 time) { }
297e3d85 371#endif
bc9fcbf9 372
51d798cd 373struct bio *__blk_queue_bounce(struct bio *bio, struct request_queue *q);
9bb33f24
CH
374
375static inline bool blk_queue_may_bounce(struct request_queue *q)
376{
377 return IS_ENABLED(CONFIG_BOUNCE) &&
378 q->limits.bounce == BLK_BOUNCE_HIGH &&
379 max_low_pfn >= max_pfn;
380}
381
51d798cd
CH
382static inline struct bio *blk_queue_bounce(struct bio *bio,
383 struct request_queue *q)
3bce016a 384{
51d798cd
CH
385 if (unlikely(blk_queue_may_bounce(q) && bio_has_data(bio)))
386 return __blk_queue_bounce(bio, q);
387 return bio;
3bce016a 388}
3bce016a 389
d7067512 390#ifdef CONFIG_BLK_CGROUP_IOLATENCY
16fac1b5 391int blk_iolatency_init(struct gendisk *disk);
d7067512 392#else
16fac1b5 393static inline int blk_iolatency_init(struct gendisk *disk) { return 0; };
d7067512
JB
394#endif
395
bf505456 396#ifdef CONFIG_BLK_DEV_ZONED
5d400665 397void disk_free_zone_bitmaps(struct gendisk *disk);
b3c72f81 398void disk_clear_zone_settings(struct gendisk *disk);
bf505456 399#else
5d400665 400static inline void disk_free_zone_bitmaps(struct gendisk *disk) {}
b3c72f81 401static inline void disk_clear_zone_settings(struct gendisk *disk) {}
bf505456
DLM
402#endif
403
7c3f828b
CH
404int blk_alloc_ext_minor(void);
405void blk_free_ext_minor(unsigned int minor);
581e2600
CH
406#define ADDPART_FLAG_NONE 0
407#define ADDPART_FLAG_RAID 1
408#define ADDPART_FLAG_WHOLEDISK 2
7f6be376
CH
409int bdev_add_partition(struct gendisk *disk, int partno, sector_t start,
410 sector_t length);
926fbb16 411int bdev_del_partition(struct gendisk *disk, int partno);
3d2e7989
CH
412int bdev_resize_partition(struct gendisk *disk, int partno, sector_t start,
413 sector_t length);
e7243285 414void blk_drop_partitions(struct gendisk *disk);
581e2600 415
6f8191fd
CH
416struct gendisk *__alloc_disk_node(struct request_queue *q, int node_id,
417 struct lock_class_key *lkclass);
418
e4581105 419int bio_add_hw_page(struct request_queue *q, struct bio *bio,
130879f1 420 struct page *page, unsigned int len, unsigned int offset,
e4581105 421 unsigned int max_sectors, bool *same_page);
130879f1 422
704b914f
ML
423static inline struct kmem_cache *blk_get_queue_kmem_cache(bool srcu)
424{
425 if (srcu)
426 return blk_requestq_srcu_cachep;
427 return blk_requestq_cachep;
428}
429struct request_queue *blk_alloc_queue(int node_id, bool alloc_srcu);
430
e16e506c 431int disk_scan_partitions(struct gendisk *disk, fmode_t mode);
da7ba729 432
92e7755e 433int disk_alloc_events(struct gendisk *disk);
d5870edf
CH
434void disk_add_events(struct gendisk *disk);
435void disk_del_events(struct gendisk *disk);
436void disk_release_events(struct gendisk *disk);
926597ff
CH
437void disk_block_events(struct gendisk *disk);
438void disk_unblock_events(struct gendisk *disk);
439void disk_flush_events(struct gendisk *disk, unsigned int mask);
2bc8cda5
CH
440extern struct device_attribute dev_attr_events;
441extern struct device_attribute dev_attr_events_async;
442extern struct device_attribute dev_attr_events_poll_msecs;
d5870edf 443
cc5c516d
CH
444extern struct attribute_group blk_trace_attr_group;
445
8a709512 446long blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg);
84b8514b
CH
447long compat_blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg);
448
cd82cca7
CH
449extern const struct address_space_operations def_blk_aops;
450
22d0c408 451int disk_register_independent_access_ranges(struct gendisk *disk);
a2247f19
DLM
452void disk_unregister_independent_access_ranges(struct gendisk *disk);
453
06c8c691
CH
454#ifdef CONFIG_FAIL_MAKE_REQUEST
455bool should_fail_request(struct block_device *part, unsigned int bytes);
456#else /* CONFIG_FAIL_MAKE_REQUEST */
457static inline bool should_fail_request(struct block_device *part,
458 unsigned int bytes)
459{
460 return false;
461}
462#endif /* CONFIG_FAIL_MAKE_REQUEST */
463
0a467d0f
JA
464/*
465 * Optimized request reference counting. Ideally we'd make timeouts be more
466 * clever, as that's the only reason we need references at all... But until
467 * this happens, this is faster than using refcount_t. Also see:
468 *
469 * abc54d634334 ("io_uring: switch to atomic_t for io_kiocb reference count")
470 */
471#define req_ref_zero_or_close_to_overflow(req) \
472 ((unsigned int) atomic_read(&(req->ref)) + 127u <= 127u)
473
474static inline bool req_ref_inc_not_zero(struct request *req)
475{
476 return atomic_inc_not_zero(&req->ref);
477}
478
479static inline bool req_ref_put_and_test(struct request *req)
480{
481 WARN_ON_ONCE(req_ref_zero_or_close_to_overflow(req));
482 return atomic_dec_and_test(&req->ref);
483}
484
485static inline void req_ref_set(struct request *req, int value)
486{
487 atomic_set(&req->ref, value);
488}
489
490static inline int req_ref_read(struct request *req)
491{
492 return atomic_read(&req->ref);
493}
494
bc9fcbf9 495#endif /* BLK_INTERNAL_H */