blk-mq: don't use the requeue list to queue flush commands
[linux-block.git] / include / linux / blkdev.h
CommitLineData
b2441318 1/* SPDX-License-Identifier: GPL-2.0 */
322cbb50
CH
2/*
3 * Portions Copyright (C) 1992 Drew Eckhardt
4 */
1da177e4
LT
5#ifndef _LINUX_BLKDEV_H
6#define _LINUX_BLKDEV_H
7
322cbb50
CH
8#include <linux/types.h>
9#include <linux/blk_types.h>
10#include <linux/device.h>
1da177e4 11#include <linux/list.h>
320ae51f 12#include <linux/llist.h>
b296a6d5 13#include <linux/minmax.h>
1da177e4
LT
14#include <linux/timer.h>
15#include <linux/workqueue.h>
1da177e4 16#include <linux/wait.h>
1da177e4 17#include <linux/bio.h>
3e6053d7 18#include <linux/gfp.h>
322cbb50 19#include <linux/kdev_t.h>
548bc8e1 20#include <linux/rcupdate.h>
add703fd 21#include <linux/percpu-refcount.h>
6a0cb1bc 22#include <linux/blkzoned.h>
322cbb50 23#include <linux/sched.h>
d97e594c 24#include <linux/sbitmap.h>
322cbb50
CH
25#include <linux/uuid.h>
26#include <linux/xarray.h>
1da177e4 27
de477254 28struct module;
1da177e4 29struct request_queue;
1da177e4 30struct elevator_queue;
2056a782 31struct blk_trace;
3d6392cf
JA
32struct request;
33struct sg_io_hdr;
3c798398 34struct blkcg_gq;
7c94e1c1 35struct blk_flush_queue;
3e08773c 36struct kiocb;
bbd3e064 37struct pr_ops;
a7905043 38struct rq_qos;
34dbad5d
OS
39struct blk_queue_stats;
40struct blk_stat_callback;
cb77cb5a 41struct blk_crypto_profile;
1da177e4 42
322cbb50
CH
43extern const struct device_type disk_type;
44extern struct device_type part_type;
45extern struct class block_class;
46
8bd435b3
TH
47/*
48 * Maximum number of blkcg policies allowed to be registered concurrently.
49 * Defined here to simplify include dependency.
50 */
ec645dc9 51#define BLKCG_MAX_POLS 6
8bd435b3 52
322cbb50
CH
53#define DISK_MAX_PARTS 256
54#define DISK_NAME_LEN 32
55
56#define PARTITION_META_INFO_VOLNAMELTH 64
57/*
58 * Enough for the string representation of any kind of UUID plus NULL.
59 * EFI UUID is 36 characters. MSDOS UUID is 11 characters.
60 */
61#define PARTITION_META_INFO_UUIDLTH (UUID_STRING_LEN + 1)
62
63struct partition_meta_info {
64 char uuid[PARTITION_META_INFO_UUIDLTH];
65 u8 volname[PARTITION_META_INFO_VOLNAMELTH];
66};
67
68/**
69 * DOC: genhd capability flags
70 *
71 * ``GENHD_FL_REMOVABLE``: indicates that the block device gives access to
72 * removable media. When set, the device remains present even when media is not
73 * inserted. Shall not be set for devices which are removed entirely when the
74 * media is removed.
75 *
76 * ``GENHD_FL_HIDDEN``: the block device is hidden; it doesn't produce events,
77 * doesn't appear in sysfs, and can't be opened from userspace or using
78 * blkdev_get*. Used for the underlying components of multipath devices.
79 *
80 * ``GENHD_FL_NO_PART``: partition support is disabled. The kernel will not
81 * scan for partitions from add_disk, and users can't add partitions manually.
82 *
83 */
84enum {
85 GENHD_FL_REMOVABLE = 1 << 0,
86 GENHD_FL_HIDDEN = 1 << 1,
87 GENHD_FL_NO_PART = 1 << 2,
88};
89
90enum {
91 DISK_EVENT_MEDIA_CHANGE = 1 << 0, /* media changed */
92 DISK_EVENT_EJECT_REQUEST = 1 << 1, /* eject requested */
93};
94
95enum {
96 /* Poll even if events_poll_msecs is unset */
97 DISK_EVENT_FLAG_POLL = 1 << 0,
98 /* Forward events to udev */
99 DISK_EVENT_FLAG_UEVENT = 1 << 1,
100 /* Block event polling when open for exclusive write */
101 DISK_EVENT_FLAG_BLOCK_ON_EXCL_WRITE = 1 << 2,
102};
103
104struct disk_events;
105struct badblocks;
106
107struct blk_integrity {
108 const struct blk_integrity_profile *profile;
109 unsigned char flags;
110 unsigned char tuple_size;
111 unsigned char interval_exp;
112 unsigned char tag_size;
113};
114
115struct gendisk {
116 /*
117 * major/first_minor/minors should not be set by any new driver, the
118 * block core will take care of allocating them automatically.
119 */
120 int major;
121 int first_minor;
122 int minors;
123
124 char disk_name[DISK_NAME_LEN]; /* name of major driver */
125
126 unsigned short events; /* supported events */
127 unsigned short event_flags; /* flags related to event processing */
128
129 struct xarray part_tbl;
130 struct block_device *part0;
131
132 const struct block_device_operations *fops;
133 struct request_queue *queue;
134 void *private_data;
135
46754bd0
CH
136 struct bio_set bio_split;
137
322cbb50
CH
138 int flags;
139 unsigned long state;
140#define GD_NEED_PART_SCAN 0
141#define GD_READ_ONLY 1
142#define GD_DEAD 2
143#define GD_NATIVE_CAPACITY 3
76792055 144#define GD_ADDED 4
b9684a71 145#define GD_SUPPRESS_PART_SCAN 5
6f8191fd 146#define GD_OWNS_QUEUE 6
322cbb50
CH
147
148 struct mutex open_mutex; /* open/close mutex */
149 unsigned open_partitions; /* number of open partitions */
150
151 struct backing_dev_info *bdi;
2bd85221 152 struct kobject queue_kobj; /* the queue/ directory */
322cbb50
CH
153 struct kobject *slave_dir;
154#ifdef CONFIG_BLOCK_HOLDER_DEPRECATED
155 struct list_head slave_bdevs;
156#endif
157 struct timer_rand_state *random;
158 atomic_t sync_io; /* RAID */
159 struct disk_events *ev;
d86e716a
CH
160
161#ifdef CONFIG_BLK_DEV_ZONED
162 /*
163 * Zoned block device information for request dispatch control.
164 * nr_zones is the total number of zones of the device. This is always
165 * 0 for regular block devices. conv_zones_bitmap is a bitmap of nr_zones
166 * bits which indicates if a zone is conventional (bit set) or
167 * sequential (bit clear). seq_zones_wlock is a bitmap of nr_zones
168 * bits which indicates if a zone is write locked, that is, if a write
169 * request targeting the zone was dispatched.
170 *
171 * Reads of this information must be protected with blk_queue_enter() /
172 * blk_queue_exit(). Modifying this information is only allowed while
173 * no requests are being processed. See also blk_mq_freeze_queue() and
174 * blk_mq_unfreeze_queue().
175 */
176 unsigned int nr_zones;
177 unsigned int max_open_zones;
178 unsigned int max_active_zones;
179 unsigned long *conv_zones_bitmap;
180 unsigned long *seq_zones_wlock;
181#endif /* CONFIG_BLK_DEV_ZONED */
182
322cbb50
CH
183#if IS_ENABLED(CONFIG_CDROM)
184 struct cdrom_device_info *cdi;
185#endif
186 int node_id;
187 struct badblocks *bb;
188 struct lockdep_map lockdep_map;
189 u64 diskseq;
6a27d28c
CH
190
191 /*
192 * Independent sector access ranges. This is always NULL for
193 * devices that do not have multiple independent access ranges.
194 */
195 struct blk_independent_access_ranges *ia_ranges;
322cbb50
CH
196};
197
198static inline bool disk_live(struct gendisk *disk)
199{
200 return !inode_unhashed(disk->part0->bd_inode);
201}
202
dbdc1be3
CH
203/**
204 * disk_openers - returns how many openers are there for a disk
205 * @disk: disk to check
206 *
207 * This returns the number of openers for a disk. Note that this value is only
208 * stable if disk->open_mutex is held.
209 *
210 * Note: Due to a quirk in the block layer open code, each open partition is
211 * only counted once even if there are multiple openers.
212 */
213static inline unsigned int disk_openers(struct gendisk *disk)
214{
9acf381f 215 return atomic_read(&disk->part0->bd_openers);
dbdc1be3
CH
216}
217
322cbb50
CH
218/*
219 * The gendisk is refcounted by the part0 block_device, and the bd_device
220 * therein is also used for device model presentation in sysfs.
221 */
222#define dev_to_disk(device) \
223 (dev_to_bdev(device)->bd_disk)
224#define disk_to_dev(disk) \
225 (&((disk)->part0->bd_device))
226
227#if IS_REACHABLE(CONFIG_CDROM)
228#define disk_to_cdi(disk) ((disk)->cdi)
229#else
230#define disk_to_cdi(disk) NULL
231#endif
232
233static inline dev_t disk_devt(struct gendisk *disk)
234{
235 return MKDEV(disk->major, disk->first_minor);
236}
237
37ae5a0f 238static inline int blk_validate_block_size(unsigned long bsize)
570b1cac
XY
239{
240 if (bsize < 512 || bsize > PAGE_SIZE || !is_power_of_2(bsize))
241 return -EINVAL;
242
243 return 0;
244}
245
16458cf3 246static inline bool blk_op_is_passthrough(blk_opf_t op)
14cb0dc6 247{
da6269da 248 op &= REQ_OP_MASK;
14cb0dc6
ML
249 return op == REQ_OP_DRV_IN || op == REQ_OP_DRV_OUT;
250}
251
797476b8
DLM
252/*
253 * Zoned block device models (zoned limit).
3093a479
CH
254 *
255 * Note: This needs to be ordered from the least to the most severe
256 * restrictions for the inheritance in blk_stack_limits() to work.
797476b8
DLM
257 */
258enum blk_zoned_model {
3093a479
CH
259 BLK_ZONED_NONE = 0, /* Regular block device */
260 BLK_ZONED_HA, /* Host-aware zoned block device */
261 BLK_ZONED_HM, /* Host-managed zoned block device */
797476b8
DLM
262};
263
9bb33f24
CH
264/*
265 * BLK_BOUNCE_NONE: never bounce (default)
266 * BLK_BOUNCE_HIGH: bounce all highmem pages
267 */
268enum blk_bounce {
269 BLK_BOUNCE_NONE,
270 BLK_BOUNCE_HIGH,
271};
272
025146e1 273struct queue_limits {
9bb33f24 274 enum blk_bounce bounce;
025146e1 275 unsigned long seg_boundary_mask;
03100aad 276 unsigned long virt_boundary_mask;
025146e1
MP
277
278 unsigned int max_hw_sectors;
ca369d51 279 unsigned int max_dev_sectors;
762380ad 280 unsigned int chunk_sectors;
025146e1 281 unsigned int max_sectors;
c9c77418 282 unsigned int max_user_sectors;
025146e1 283 unsigned int max_segment_size;
c72758f3 284 unsigned int physical_block_size;
ad6bf88a 285 unsigned int logical_block_size;
c72758f3
MP
286 unsigned int alignment_offset;
287 unsigned int io_min;
288 unsigned int io_opt;
67efc925 289 unsigned int max_discard_sectors;
0034af03 290 unsigned int max_hw_discard_sectors;
44abff2c 291 unsigned int max_secure_erase_sectors;
a6f0788e 292 unsigned int max_write_zeroes_sectors;
0512a75b 293 unsigned int max_zone_append_sectors;
86b37281
MP
294 unsigned int discard_granularity;
295 unsigned int discard_alignment;
a805a4fa 296 unsigned int zone_write_granularity;
025146e1 297
8a78362c 298 unsigned short max_segments;
13f05c8d 299 unsigned short max_integrity_segments;
1e739730 300 unsigned short max_discard_segments;
025146e1 301
c72758f3 302 unsigned char misaligned;
86b37281 303 unsigned char discard_misaligned;
c78afc62 304 unsigned char raid_partial_stripes_expensive;
797476b8 305 enum blk_zoned_model zoned;
c964d62f
KB
306
307 /*
308 * Drivers that set dma_alignment to less than 511 must be prepared to
309 * handle individual bvec's that are not a multiple of a SECTOR_SIZE
310 * due to possible offsets.
311 */
312 unsigned int dma_alignment;
025146e1
MP
313};
314
d4100351
CH
315typedef int (*report_zones_cb)(struct blk_zone *zone, unsigned int idx,
316 void *data);
317
6b2bd274 318void disk_set_zoned(struct gendisk *disk, enum blk_zoned_model model);
27ba3e8f 319
6a0cb1bc
HR
320#ifdef CONFIG_BLK_DEV_ZONED
321
d4100351
CH
322#define BLK_ALL_ZONES ((unsigned int)-1)
323int blkdev_report_zones(struct block_device *bdev, sector_t sector,
324 unsigned int nr_zones, report_zones_cb cb, void *data);
b623e347 325unsigned int bdev_nr_zones(struct block_device *bdev);
ff07a02e 326extern int blkdev_zone_mgmt(struct block_device *bdev, enum req_op op,
6c1b1da5
AJ
327 sector_t sectors, sector_t nr_sectors,
328 gfp_t gfp_mask);
e732671a
DLM
329int blk_revalidate_disk_zones(struct gendisk *disk,
330 void (*update_driver_data)(struct gendisk *disk));
6a0cb1bc 331
3ed05a98
ST
332extern int blkdev_report_zones_ioctl(struct block_device *bdev, fmode_t mode,
333 unsigned int cmd, unsigned long arg);
e876df1f
AJ
334extern int blkdev_zone_mgmt_ioctl(struct block_device *bdev, fmode_t mode,
335 unsigned int cmd, unsigned long arg);
3ed05a98
ST
336
337#else /* CONFIG_BLK_DEV_ZONED */
338
b623e347 339static inline unsigned int bdev_nr_zones(struct block_device *bdev)
a91e1380
DLM
340{
341 return 0;
342}
bf505456 343
3ed05a98
ST
344static inline int blkdev_report_zones_ioctl(struct block_device *bdev,
345 fmode_t mode, unsigned int cmd,
346 unsigned long arg)
347{
348 return -ENOTTY;
349}
350
e876df1f
AJ
351static inline int blkdev_zone_mgmt_ioctl(struct block_device *bdev,
352 fmode_t mode, unsigned int cmd,
353 unsigned long arg)
3ed05a98
ST
354{
355 return -ENOTTY;
356}
357
6a0cb1bc
HR
358#endif /* CONFIG_BLK_DEV_ZONED */
359
a2247f19
DLM
360/*
361 * Independent access ranges: struct blk_independent_access_range describes
362 * a range of contiguous sectors that can be accessed using device command
363 * execution resources that are independent from the resources used for
364 * other access ranges. This is typically found with single-LUN multi-actuator
365 * HDDs where each access range is served by a different set of heads.
366 * The set of independent ranges supported by the device is defined using
367 * struct blk_independent_access_ranges. The independent ranges must not overlap
368 * and must include all sectors within the disk capacity (no sector holes
369 * allowed).
370 * For a device with multiple ranges, requests targeting sectors in different
371 * ranges can be executed in parallel. A request can straddle an access range
372 * boundary.
373 */
374struct blk_independent_access_range {
375 struct kobject kobj;
a2247f19
DLM
376 sector_t sector;
377 sector_t nr_sectors;
378};
379
380struct blk_independent_access_ranges {
381 struct kobject kobj;
382 bool sysfs_registered;
383 unsigned int nr_ia_ranges;
384 struct blk_independent_access_range ia_range[];
385};
386
d7b76301 387struct request_queue {
1da177e4 388 struct request *last_merge;
b374d18a 389 struct elevator_queue *elevator;
1da177e4 390
0549e87c
ML
391 struct percpu_ref q_usage_counter;
392
34dbad5d 393 struct blk_queue_stats *stats;
a7905043 394 struct rq_qos *rq_qos;
87760e5e 395
f8a5b122 396 const struct blk_mq_ops *mq_ops;
320ae51f 397
320ae51f 398 /* sw queues */
e6cdb092 399 struct blk_mq_ctx __percpu *queue_ctx;
320ae51f 400
d278d4a8
JA
401 unsigned int queue_depth;
402
320ae51f 403 /* hw dispatch queues */
4e5cc99e 404 struct xarray hctx_table;
320ae51f
JA
405 unsigned int nr_hw_queues;
406
1da177e4
LT
407 /*
408 * The queue owner gets to use this for whatever they like.
409 * ll_rw_blk doesn't touch it.
410 */
411 void *queuedata;
412
1da177e4 413 /*
d7b76301 414 * various queue flags, see QUEUE_* below
1da177e4 415 */
d7b76301 416 unsigned long queue_flags;
cd84a62e
BVA
417 /*
418 * Number of contexts that have called blk_set_pm_only(). If this
a4d34da7 419 * counter is above zero then only RQF_PM requests are processed.
cd84a62e
BVA
420 */
421 atomic_t pm_only;
1da177e4 422
a73f730d
TH
423 /*
424 * ida allocated id for this queue. Used to index queues from
425 * ioctx.
426 */
427 int id;
428
0d945c1f 429 spinlock_t queue_lock;
1da177e4 430
d152c682
CH
431 struct gendisk *disk;
432
2bd85221 433 refcount_t refs;
1da177e4 434
320ae51f
JA
435 /*
436 * mq queue kobject
437 */
1db4909e 438 struct kobject *mq_kobj;
320ae51f 439
ac6fc48c
DW
440#ifdef CONFIG_BLK_DEV_INTEGRITY
441 struct blk_integrity integrity;
442#endif /* CONFIG_BLK_DEV_INTEGRITY */
443
47fafbc7 444#ifdef CONFIG_PM
6c954667 445 struct device *dev;
db04e18d 446 enum rpm_status rpm_status;
6c954667
LM
447#endif
448
1da177e4
LT
449 /*
450 * queue settings
451 */
452 unsigned long nr_requests; /* Max # of requests */
1da177e4 453
e3790c7d 454 unsigned int dma_pad_mask;
1da177e4 455
1b262839 456#ifdef CONFIG_BLK_INLINE_ENCRYPTION
cb77cb5a 457 struct blk_crypto_profile *crypto_profile;
20f01f16 458 struct kobject *crypto_kobject;
1b262839
ST
459#endif
460
242f9dcb 461 unsigned int rq_timeout;
34dbad5d 462
242f9dcb 463 struct timer_list timeout;
287922eb 464 struct work_struct timeout_work;
242f9dcb 465
079a2e3e 466 atomic_t nr_active_requests_shared_tags;
bccf5e26 467
079a2e3e 468 struct blk_mq_tags *sched_shared_tags;
d97e594c 469
a612fddf 470 struct list_head icq_list;
1231039d
CH
471#ifdef CONFIG_BLK_CGROUP
472 DECLARE_BITMAP (blkcg_pols, BLKCG_MAX_POLS);
473 struct blkcg_gq *root_blkg;
474 struct list_head blkg_list;
475 struct mutex blkcg_mutex;
476#endif
a612fddf 477
025146e1
MP
478 struct queue_limits limits;
479
68c43f13
DLM
480 unsigned int required_elevator_features;
481
1946089a 482 int node;
6c5c9341 483#ifdef CONFIG_BLK_DEV_IO_TRACE
c780e86d 484 struct blk_trace __rcu *blk_trace;
6c5c9341 485#endif
1da177e4 486 /*
4913efe4 487 * for flush operations
1da177e4 488 */
7c94e1c1 489 struct blk_flush_queue *fq;
9a67aa52 490 struct list_head flush_list;
483f4afc 491
6fca6a61
CH
492 struct list_head requeue_list;
493 spinlock_t requeue_lock;
2849450a 494 struct delayed_work requeue_work;
6fca6a61 495
483f4afc 496 struct mutex sysfs_lock;
cecf5d87 497 struct mutex sysfs_dir_lock;
d351af01 498
2f8f1336
ML
499 /*
500 * for reusing dead hctx instance in case of updating
501 * nr_hw_queues
502 */
503 struct list_head unused_hctx_list;
504 spinlock_t unused_hctx_lock;
505
7996a8b5 506 int mq_freeze_depth;
d732580b 507
e43473b7
VG
508#ifdef CONFIG_BLK_DEV_THROTTLING
509 /* Throttle data */
510 struct throtl_data *td;
511#endif
548bc8e1 512 struct rcu_head rcu_head;
320ae51f 513 wait_queue_head_t mq_freeze_wq;
7996a8b5
BL
514 /*
515 * Protect concurrent access to q_usage_counter by
516 * percpu_ref_kill() and percpu_ref_reinit().
517 */
518 struct mutex mq_freeze_lock;
0d2602ca 519
e70feb8b
ML
520 int quiesce_depth;
521
0d2602ca
JA
522 struct blk_mq_tag_set *tag_set;
523 struct list_head tag_set_list;
4593fdbe 524
07e4fead 525 struct dentry *debugfs_dir;
d332ce09 526 struct dentry *sched_debugfs_dir;
cc56694f 527 struct dentry *rqos_debugfs_dir;
5cf9c91b
CH
528 /*
529 * Serializes all debugfs metadata operations using the above dentries.
530 */
531 struct mutex debugfs_mutex;
07e4fead 532
4593fdbe 533 bool mq_sysfs_init_done;
1da177e4
LT
534};
535
bfe373f6 536/* Keep blk_queue_flag_name[] in sync with the definitions below */
eca7abf3
JA
537#define QUEUE_FLAG_STOPPED 0 /* queue is stopped */
538#define QUEUE_FLAG_DYING 1 /* queue being torn down */
eca7abf3
JA
539#define QUEUE_FLAG_NOMERGES 3 /* disable merge attempts */
540#define QUEUE_FLAG_SAME_COMP 4 /* complete on same CPU-group */
541#define QUEUE_FLAG_FAIL_IO 5 /* fake timeout */
542#define QUEUE_FLAG_NONROT 6 /* non-rotational device (SSD) */
543#define QUEUE_FLAG_VIRT QUEUE_FLAG_NONROT /* paravirt device */
544#define QUEUE_FLAG_IO_STAT 7 /* do disk/partitions IO accounting */
eca7abf3
JA
545#define QUEUE_FLAG_NOXMERGES 9 /* No extended merges */
546#define QUEUE_FLAG_ADD_RANDOM 10 /* Contributes to random pool */
3222d8c2 547#define QUEUE_FLAG_SYNCHRONOUS 11 /* always completes in submit context */
eca7abf3 548#define QUEUE_FLAG_SAME_FORCE 12 /* force complete on same CPU */
eca7abf3 549#define QUEUE_FLAG_INIT_DONE 14 /* queue is initialized */
1cb039f3 550#define QUEUE_FLAG_STABLE_WRITES 15 /* don't modify blks until WB is done */
eca7abf3
JA
551#define QUEUE_FLAG_POLL 16 /* IO polling enabled if set */
552#define QUEUE_FLAG_WC 17 /* Write back caching */
553#define QUEUE_FLAG_FUA 18 /* device supports FUA writes */
554#define QUEUE_FLAG_DAX 19 /* device supports DAX */
555#define QUEUE_FLAG_STATS 20 /* track IO start and completion times */
eca7abf3 556#define QUEUE_FLAG_REGISTERED 22 /* queue has been registered to a disk */
eca7abf3
JA
557#define QUEUE_FLAG_QUIESCED 24 /* queue has been quiesced */
558#define QUEUE_FLAG_PCI_P2PDMA 25 /* device supports PCI p2p requests */
e84e8f06 559#define QUEUE_FLAG_ZONE_RESETALL 26 /* supports Zone Reset All */
6f816b4b 560#define QUEUE_FLAG_RQ_ALLOC_TIME 27 /* record rq->alloc_time_ns */
021a2446
MS
561#define QUEUE_FLAG_HCTX_ACTIVE 28 /* at least one blk-mq hctx is active */
562#define QUEUE_FLAG_NOWAIT 29 /* device supports NOWAIT */
4d337ceb 563#define QUEUE_FLAG_SQ_SCHED 30 /* single queue style io dispatch */
414dd48e 564#define QUEUE_FLAG_SKIP_TAGSET_QUIESCE 31 /* quiesce_tagset skip the queue*/
797e7dbb 565
ca5eebda
BF
566#define QUEUE_FLAG_MQ_DEFAULT ((1UL << QUEUE_FLAG_IO_STAT) | \
567 (1UL << QUEUE_FLAG_SAME_COMP) | \
568 (1UL << QUEUE_FLAG_NOWAIT))
94eddfbe 569
8814ce8a
BVA
570void blk_queue_flag_set(unsigned int flag, struct request_queue *q);
571void blk_queue_flag_clear(unsigned int flag, struct request_queue *q);
572bool blk_queue_flag_test_and_set(unsigned int flag, struct request_queue *q);
8814ce8a 573
1da177e4 574#define blk_queue_stopped(q) test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags)
3f3299d5 575#define blk_queue_dying(q) test_bit(QUEUE_FLAG_DYING, &(q)->queue_flags)
320ae51f 576#define blk_queue_init_done(q) test_bit(QUEUE_FLAG_INIT_DONE, &(q)->queue_flags)
ac9fafa1 577#define blk_queue_nomerges(q) test_bit(QUEUE_FLAG_NOMERGES, &(q)->queue_flags)
488991e2
AB
578#define blk_queue_noxmerges(q) \
579 test_bit(QUEUE_FLAG_NOXMERGES, &(q)->queue_flags)
a68bbddb 580#define blk_queue_nonrot(q) test_bit(QUEUE_FLAG_NONROT, &(q)->queue_flags)
1cb039f3
CH
581#define blk_queue_stable_writes(q) \
582 test_bit(QUEUE_FLAG_STABLE_WRITES, &(q)->queue_flags)
bc58ba94 583#define blk_queue_io_stat(q) test_bit(QUEUE_FLAG_IO_STAT, &(q)->queue_flags)
e2e1a148 584#define blk_queue_add_random(q) test_bit(QUEUE_FLAG_ADD_RANDOM, &(q)->queue_flags)
e84e8f06
CK
585#define blk_queue_zone_resetall(q) \
586 test_bit(QUEUE_FLAG_ZONE_RESETALL, &(q)->queue_flags)
163d4baa 587#define blk_queue_dax(q) test_bit(QUEUE_FLAG_DAX, &(q)->queue_flags)
49d92c0d
LG
588#define blk_queue_pci_p2pdma(q) \
589 test_bit(QUEUE_FLAG_PCI_P2PDMA, &(q)->queue_flags)
6f816b4b
TH
590#ifdef CONFIG_BLK_RQ_ALLOC_TIME
591#define blk_queue_rq_alloc_time(q) \
592 test_bit(QUEUE_FLAG_RQ_ALLOC_TIME, &(q)->queue_flags)
593#else
594#define blk_queue_rq_alloc_time(q) false
595#endif
1da177e4 596
33659ebb
CH
597#define blk_noretry_request(rq) \
598 ((rq)->cmd_flags & (REQ_FAILFAST_DEV|REQ_FAILFAST_TRANSPORT| \
599 REQ_FAILFAST_DRIVER))
f4560ffe 600#define blk_queue_quiesced(q) test_bit(QUEUE_FLAG_QUIESCED, &(q)->queue_flags)
cd84a62e 601#define blk_queue_pm_only(q) atomic_read(&(q)->pm_only)
58c898ba 602#define blk_queue_registered(q) test_bit(QUEUE_FLAG_REGISTERED, &(q)->queue_flags)
4d337ceb 603#define blk_queue_sq_sched(q) test_bit(QUEUE_FLAG_SQ_SCHED, &(q)->queue_flags)
414dd48e
CL
604#define blk_queue_skip_tagset_quiesce(q) \
605 test_bit(QUEUE_FLAG_SKIP_TAGSET_QUIESCE, &(q)->queue_flags)
c9254f2d 606
cd84a62e
BVA
607extern void blk_set_pm_only(struct request_queue *q);
608extern void blk_clear_pm_only(struct request_queue *q);
33659ebb 609
1da177e4
LT
610#define list_entry_rq(ptr) list_entry((ptr), struct request, queuelist)
611
3ab3a031
CH
612#define dma_map_bvec(dev, bv, dir, attrs) \
613 dma_map_page_attrs(dev, (bv)->bv_page, (bv)->bv_offset, (bv)->bv_len, \
614 (dir), (attrs))
615
344e9ffc 616static inline bool queue_is_mq(struct request_queue *q)
49fd524f 617{
a1ce35fa 618 return q->mq_ops;
49fd524f
JA
619}
620
52abca64
AS
621#ifdef CONFIG_PM
622static inline enum rpm_status queue_rpm_status(struct request_queue *q)
623{
624 return q->rpm_status;
625}
626#else
627static inline enum rpm_status queue_rpm_status(struct request_queue *q)
628{
629 return RPM_ACTIVE;
630}
631#endif
632
797476b8
DLM
633static inline enum blk_zoned_model
634blk_queue_zoned_model(struct request_queue *q)
635{
6fcd6695
CH
636 if (IS_ENABLED(CONFIG_BLK_DEV_ZONED))
637 return q->limits.zoned;
638 return BLK_ZONED_NONE;
797476b8
DLM
639}
640
641static inline bool blk_queue_is_zoned(struct request_queue *q)
642{
643 switch (blk_queue_zoned_model(q)) {
644 case BLK_ZONED_HA:
645 case BLK_ZONED_HM:
646 return true;
647 default:
648 return false;
649 }
650}
651
6a5ac984 652#ifdef CONFIG_BLK_DEV_ZONED
d86e716a 653static inline unsigned int disk_nr_zones(struct gendisk *disk)
965b652e 654{
d86e716a 655 return blk_queue_is_zoned(disk->queue) ? disk->nr_zones : 0;
965b652e
DLM
656}
657
d86e716a 658static inline unsigned int disk_zone_no(struct gendisk *disk, sector_t sector)
6cc77e9c 659{
d86e716a 660 if (!blk_queue_is_zoned(disk->queue))
6cc77e9c 661 return 0;
d86e716a 662 return sector >> ilog2(disk->queue->limits.chunk_sectors);
6cc77e9c
CH
663}
664
d86e716a 665static inline bool disk_zone_is_seq(struct gendisk *disk, sector_t sector)
6cc77e9c 666{
d86e716a 667 if (!blk_queue_is_zoned(disk->queue))
6cc77e9c 668 return false;
d86e716a 669 if (!disk->conv_zones_bitmap)
f216fdd7 670 return true;
d86e716a 671 return !test_bit(disk_zone_no(disk, sector), disk->conv_zones_bitmap);
6cc77e9c 672}
e15864f8 673
982977df 674static inline void disk_set_max_open_zones(struct gendisk *disk,
e15864f8
NC
675 unsigned int max_open_zones)
676{
d86e716a 677 disk->max_open_zones = max_open_zones;
e15864f8
NC
678}
679
982977df 680static inline void disk_set_max_active_zones(struct gendisk *disk,
659bf827
NC
681 unsigned int max_active_zones)
682{
d86e716a 683 disk->max_active_zones = max_active_zones;
659bf827
NC
684}
685
1dc01720
CH
686static inline unsigned int bdev_max_open_zones(struct block_device *bdev)
687{
d86e716a 688 return bdev->bd_disk->max_open_zones;
1dc01720
CH
689}
690
691static inline unsigned int bdev_max_active_zones(struct block_device *bdev)
659bf827 692{
d86e716a 693 return bdev->bd_disk->max_active_zones;
659bf827 694}
1dc01720 695
965b652e 696#else /* CONFIG_BLK_DEV_ZONED */
d86e716a 697static inline unsigned int disk_nr_zones(struct gendisk *disk)
965b652e
DLM
698{
699 return 0;
700}
d86e716a 701static inline bool disk_zone_is_seq(struct gendisk *disk, sector_t sector)
02992df8
JT
702{
703 return false;
704}
d86e716a 705static inline unsigned int disk_zone_no(struct gendisk *disk, sector_t sector)
02992df8
JT
706{
707 return 0;
708}
1dc01720 709static inline unsigned int bdev_max_open_zones(struct block_device *bdev)
e15864f8
NC
710{
711 return 0;
712}
d86e716a 713
1dc01720 714static inline unsigned int bdev_max_active_zones(struct block_device *bdev)
659bf827
NC
715{
716 return 0;
717}
6a5ac984 718#endif /* CONFIG_BLK_DEV_ZONED */
6cc77e9c 719
d278d4a8
JA
720static inline unsigned int blk_queue_depth(struct request_queue *q)
721{
722 if (q->queue_depth)
723 return q->queue_depth;
724
725 return q->nr_requests;
726}
727
3d6392cf
JA
728/*
729 * default timeout for SG_IO if none specified
730 */
731#define BLK_DEFAULT_SG_TIMEOUT (60 * HZ)
f2f1fa78 732#define BLK_MIN_SG_TIMEOUT (7 * HZ)
3d6392cf 733
5705f702 734/* This should not be used directly - use rq_for_each_segment */
1e428079
JA
735#define for_each_bio(_bio) \
736 for (; _bio; _bio = _bio->bi_next)
5705f702 737
322cbb50
CH
738int __must_check device_add_disk(struct device *parent, struct gendisk *disk,
739 const struct attribute_group **groups);
740static inline int __must_check add_disk(struct gendisk *disk)
741{
742 return device_add_disk(NULL, disk, NULL);
743}
744void del_gendisk(struct gendisk *gp);
745void invalidate_disk(struct gendisk *disk);
746void set_disk_ro(struct gendisk *disk, bool read_only);
747void disk_uevent(struct gendisk *disk, enum kobject_action action);
748
749static inline int get_disk_ro(struct gendisk *disk)
750{
751 return disk->part0->bd_read_only ||
752 test_bit(GD_READ_ONLY, &disk->state);
753}
754
755static inline int bdev_read_only(struct block_device *bdev)
756{
757 return bdev->bd_read_only || get_disk_ro(bdev->bd_disk);
758}
759
760bool set_capacity_and_notify(struct gendisk *disk, sector_t size);
761bool disk_force_media_change(struct gendisk *disk, unsigned int events);
762
763void add_disk_randomness(struct gendisk *disk) __latent_entropy;
764void rand_initialize_disk(struct gendisk *disk);
765
766static inline sector_t get_start_sect(struct block_device *bdev)
767{
768 return bdev->bd_start_sect;
769}
770
771static inline sector_t bdev_nr_sectors(struct block_device *bdev)
772{
773 return bdev->bd_nr_sectors;
774}
775
776static inline loff_t bdev_nr_bytes(struct block_device *bdev)
777{
778 return (loff_t)bdev_nr_sectors(bdev) << SECTOR_SHIFT;
779}
780
781static inline sector_t get_capacity(struct gendisk *disk)
782{
783 return bdev_nr_sectors(disk->part0);
784}
785
786static inline u64 sb_bdev_nr_blocks(struct super_block *sb)
787{
788 return bdev_nr_sectors(sb->s_bdev) >>
789 (sb->s_blocksize_bits - SECTOR_SHIFT);
790}
791
792int bdev_disk_changed(struct gendisk *disk, bool invalidate);
793
322cbb50
CH
794void put_disk(struct gendisk *disk);
795struct gendisk *__blk_alloc_disk(int node, struct lock_class_key *lkclass);
796
797/**
798 * blk_alloc_disk - allocate a gendisk structure
799 * @node_id: numa node to allocate on
800 *
801 * Allocate and pre-initialize a gendisk structure for use with BIO based
802 * drivers.
803 *
804 * Context: can sleep
805 */
806#define blk_alloc_disk(node_id) \
807({ \
808 static struct lock_class_key __key; \
809 \
810 __blk_alloc_disk(node_id, &__key); \
811})
322cbb50
CH
812
813int __register_blkdev(unsigned int major, const char *name,
814 void (*probe)(dev_t devt));
815#define register_blkdev(major, name) \
816 __register_blkdev(major, name, NULL)
817void unregister_blkdev(unsigned int major, const char *name);
818
819bool bdev_check_media_change(struct block_device *bdev);
820int __invalidate_device(struct block_device *bdev, bool kill_dirty);
821void set_capacity(struct gendisk *disk, sector_t size);
822
823#ifdef CONFIG_BLOCK_HOLDER_DEPRECATED
824int bd_link_disk_holder(struct block_device *bdev, struct gendisk *disk);
825void bd_unlink_disk_holder(struct block_device *bdev, struct gendisk *disk);
322cbb50
CH
826#else
827static inline int bd_link_disk_holder(struct block_device *bdev,
828 struct gendisk *disk)
829{
830 return 0;
831}
832static inline void bd_unlink_disk_holder(struct block_device *bdev,
833 struct gendisk *disk)
834{
835}
322cbb50
CH
836#endif /* CONFIG_BLOCK_HOLDER_DEPRECATED */
837
838dev_t part_devt(struct gendisk *disk, u8 partno);
839void inc_diskseq(struct gendisk *disk);
840dev_t blk_lookup_devt(const char *name, int partno);
841void blk_request_module(dev_t devt);
2d4dc890 842
1da177e4
LT
843extern int blk_register_queue(struct gendisk *disk);
844extern void blk_unregister_queue(struct gendisk *disk);
3e08773c 845void submit_bio_noacct(struct bio *bio);
5a97806f 846struct bio *bio_split_to_limits(struct bio *bio);
24b83deb 847
ef9e3fac 848extern int blk_lld_busy(struct request_queue *q);
9a95e4ef 849extern int blk_queue_enter(struct request_queue *q, blk_mq_req_flags_t flags);
2e6edc95 850extern void blk_queue_exit(struct request_queue *q);
1da177e4 851extern void blk_sync_queue(struct request_queue *q);
fb9b16e1 852
e47bc4ed 853/* Helper to convert REQ_OP_XXX to its string format XXX */
77e7ffd7 854extern const char *blk_op_str(enum req_op op);
e47bc4ed 855
2a842aca
CH
856int blk_status_to_errno(blk_status_t status);
857blk_status_t errno_to_blk_status(int errno);
858
ef99b2d3
CH
859/* only poll the hardware once, don't continue until a completion was found */
860#define BLK_POLL_ONESHOT (1 << 0)
5a72e899
JA
861int bio_poll(struct bio *bio, struct io_comp_batch *iob, unsigned int flags);
862int iocb_bio_iopoll(struct kiocb *kiocb, struct io_comp_batch *iob,
863 unsigned int flags);
05229bee 864
165125e1 865static inline struct request_queue *bdev_get_queue(struct block_device *bdev)
1da177e4 866{
17220ca5 867 return bdev->bd_queue; /* this is never NULL */
1da177e4
LT
868}
869
02694e86
CK
870/* Helper to convert BLK_ZONE_ZONE_XXX to its string format XXX */
871const char *blk_zone_cond_str(enum blk_zone_cond zone_cond);
872
d0ea6bde
DLM
873static inline unsigned int bio_zone_no(struct bio *bio)
874{
d86e716a 875 return disk_zone_no(bio->bi_bdev->bd_disk, bio->bi_iter.bi_sector);
d0ea6bde
DLM
876}
877
878static inline unsigned int bio_zone_is_seq(struct bio *bio)
879{
d86e716a 880 return disk_zone_is_seq(bio->bi_bdev->bd_disk, bio->bi_iter.bi_sector);
d0ea6bde 881}
6cc77e9c 882
8689461b
CH
883/*
884 * Return how much of the chunk is left to be used for I/O at a given offset.
885 */
886static inline unsigned int blk_chunk_sectors_left(sector_t offset,
887 unsigned int chunk_sectors)
888{
889 if (unlikely(!is_power_of_2(chunk_sectors)))
890 return chunk_sectors - sector_div(offset, chunk_sectors);
891 return chunk_sectors - (offset & (chunk_sectors - 1));
892}
893
1da177e4
LT
894/*
895 * Access functions for manipulating queue properties
896 */
9bb33f24 897void blk_queue_bounce_limit(struct request_queue *q, enum blk_bounce limit);
086fa5ff 898extern void blk_queue_max_hw_sectors(struct request_queue *, unsigned int);
762380ad 899extern void blk_queue_chunk_sectors(struct request_queue *, unsigned int);
8a78362c 900extern void blk_queue_max_segments(struct request_queue *, unsigned short);
1e739730
CH
901extern void blk_queue_max_discard_segments(struct request_queue *,
902 unsigned short);
44abff2c
CH
903void blk_queue_max_secure_erase_sectors(struct request_queue *q,
904 unsigned int max_sectors);
165125e1 905extern void blk_queue_max_segment_size(struct request_queue *, unsigned int);
67efc925
CH
906extern void blk_queue_max_discard_sectors(struct request_queue *q,
907 unsigned int max_discard_sectors);
a6f0788e
CK
908extern void blk_queue_max_write_zeroes_sectors(struct request_queue *q,
909 unsigned int max_write_same_sectors);
ad6bf88a 910extern void blk_queue_logical_block_size(struct request_queue *, unsigned int);
0512a75b
KB
911extern void blk_queue_max_zone_append_sectors(struct request_queue *q,
912 unsigned int max_zone_append_sectors);
892b6f90 913extern void blk_queue_physical_block_size(struct request_queue *, unsigned int);
a805a4fa
DLM
914void blk_queue_zone_write_granularity(struct request_queue *q,
915 unsigned int size);
c72758f3
MP
916extern void blk_queue_alignment_offset(struct request_queue *q,
917 unsigned int alignment);
471aa704 918void disk_update_readahead(struct gendisk *disk);
7c958e32 919extern void blk_limits_io_min(struct queue_limits *limits, unsigned int min);
c72758f3 920extern void blk_queue_io_min(struct request_queue *q, unsigned int min);
3c5820c7 921extern void blk_limits_io_opt(struct queue_limits *limits, unsigned int opt);
c72758f3 922extern void blk_queue_io_opt(struct request_queue *q, unsigned int opt);
d278d4a8 923extern void blk_set_queue_depth(struct request_queue *q, unsigned int depth);
b1bd055d 924extern void blk_set_stacking_limits(struct queue_limits *lim);
c72758f3
MP
925extern int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
926 sector_t offset);
927extern void disk_stack_limits(struct gendisk *disk, struct block_device *bdev,
928 sector_t offset);
27f8221a 929extern void blk_queue_update_dma_pad(struct request_queue *, unsigned int);
165125e1 930extern void blk_queue_segment_boundary(struct request_queue *, unsigned long);
03100aad 931extern void blk_queue_virt_boundary(struct request_queue *, unsigned long);
165125e1 932extern void blk_queue_dma_alignment(struct request_queue *, int);
11c3e689 933extern void blk_queue_update_dma_alignment(struct request_queue *, int);
242f9dcb 934extern void blk_queue_rq_timeout(struct request_queue *, unsigned int);
93e9d8e8 935extern void blk_queue_write_cache(struct request_queue *q, bool enabled, bool fua);
2e9bc346 936
a2247f19
DLM
937struct blk_independent_access_ranges *
938disk_alloc_independent_access_ranges(struct gendisk *disk, int nr_ia_ranges);
939void disk_set_independent_access_ranges(struct gendisk *disk,
940 struct blk_independent_access_ranges *iars);
941
2e9bc346
CH
942/*
943 * Elevator features for blk_queue_required_elevator_features:
944 */
945/* Supports zoned block devices sequential write constraint */
946#define ELEVATOR_F_ZBD_SEQ_WRITE (1U << 0)
2e9bc346 947
68c43f13
DLM
948extern void blk_queue_required_elevator_features(struct request_queue *q,
949 unsigned int features);
45147fb5
YS
950extern bool blk_queue_can_use_dma_map_merging(struct request_queue *q,
951 struct device *dev);
1da177e4 952
09ac46c4 953bool __must_check blk_get_queue(struct request_queue *);
165125e1 954extern void blk_put_queue(struct request_queue *);
7a5428dc
CH
955
956void blk_mark_disk_dead(struct gendisk *disk);
1da177e4 957
1a4dcfa8 958#ifdef CONFIG_BLOCK
316cc67d 959/*
75df7136
SJ
960 * blk_plug permits building a queue of related requests by holding the I/O
961 * fragments for a short period. This allows merging of sequential requests
962 * into single larger request. As the requests are moved from a per-task list to
963 * the device's request_queue in a batch, this results in improved scalability
964 * as the lock contention for request_queue lock is reduced.
965 *
966 * It is ok not to disable preemption when adding the request to the plug list
008f75a2
CH
967 * or when attempting a merge. For details, please see schedule() where
968 * blk_flush_plug() is called.
316cc67d 969 */
73c10101 970struct blk_plug {
bc490f81 971 struct request *mq_list; /* blk-mq requests */
47c122e3
JA
972
973 /* if ios_left is > 1, we can batch tag/rq allocations */
974 struct request *cached_rq;
975 unsigned short nr_ios;
976
5f0ed774 977 unsigned short rq_count;
47c122e3 978
ce5b009c 979 bool multiple_queues;
dc5fc361 980 bool has_elevator;
5a473e83 981 bool nowait;
47c122e3
JA
982
983 struct list_head cb_list; /* md requires an unplug callback */
73c10101 984};
55c022bb 985
9cbb1750 986struct blk_plug_cb;
74018dc3 987typedef void (*blk_plug_cb_fn)(struct blk_plug_cb *, bool);
048c9374
N
988struct blk_plug_cb {
989 struct list_head list;
9cbb1750
N
990 blk_plug_cb_fn callback;
991 void *data;
048c9374 992};
9cbb1750
N
993extern struct blk_plug_cb *blk_check_plugged(blk_plug_cb_fn unplug,
994 void *data, int size);
73c10101 995extern void blk_start_plug(struct blk_plug *);
47c122e3 996extern void blk_start_plug_nr_ios(struct blk_plug *, unsigned short);
73c10101 997extern void blk_finish_plug(struct blk_plug *);
73c10101 998
aa8dccca
CH
999void __blk_flush_plug(struct blk_plug *plug, bool from_schedule);
1000static inline void blk_flush_plug(struct blk_plug *plug, bool async)
73c10101 1001{
aa8dccca
CH
1002 if (plug)
1003 __blk_flush_plug(plug, async);
73c10101
JA
1004}
1005
c6bf3f0e 1006int blkdev_issue_flush(struct block_device *bdev);
1a4dcfa8
CH
1007long nr_blockdev_pages(void);
1008#else /* CONFIG_BLOCK */
1009struct blk_plug {
1010};
1011
47c122e3
JA
1012static inline void blk_start_plug_nr_ios(struct blk_plug *plug,
1013 unsigned short nr_ios)
1014{
1015}
1016
1a4dcfa8
CH
1017static inline void blk_start_plug(struct blk_plug *plug)
1018{
1019}
1020
1021static inline void blk_finish_plug(struct blk_plug *plug)
1022{
1023}
1024
008f75a2 1025static inline void blk_flush_plug(struct blk_plug *plug, bool async)
1a4dcfa8
CH
1026{
1027}
1028
c6bf3f0e 1029static inline int blkdev_issue_flush(struct block_device *bdev)
1a4dcfa8
CH
1030{
1031 return 0;
1032}
1033
1034static inline long nr_blockdev_pages(void)
1035{
1036 return 0;
1037}
1038#endif /* CONFIG_BLOCK */
1039
71ac860a
ML
1040extern void blk_io_schedule(void);
1041
44abff2c
CH
1042int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
1043 sector_t nr_sects, gfp_t gfp_mask);
1044int __blkdev_issue_discard(struct block_device *bdev, sector_t sector,
1045 sector_t nr_sects, gfp_t gfp_mask, struct bio **biop);
1046int blkdev_issue_secure_erase(struct block_device *bdev, sector_t sector,
1047 sector_t nr_sects, gfp_t gfp);
ee472d83
CH
1048
1049#define BLKDEV_ZERO_NOUNMAP (1 << 0) /* do not free blocks */
cb365b96 1050#define BLKDEV_ZERO_NOFALLBACK (1 << 1) /* don't write explicit zeroes */
ee472d83 1051
e73c23ff
CK
1052extern int __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
1053 sector_t nr_sects, gfp_t gfp_mask, struct bio **biop,
ee472d83 1054 unsigned flags);
3f14d792 1055extern int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
ee472d83
CH
1056 sector_t nr_sects, gfp_t gfp_mask, unsigned flags);
1057
2cf6d26a
CH
1058static inline int sb_issue_discard(struct super_block *sb, sector_t block,
1059 sector_t nr_blocks, gfp_t gfp_mask, unsigned long flags)
fb2dce86 1060{
233bde21
BVA
1061 return blkdev_issue_discard(sb->s_bdev,
1062 block << (sb->s_blocksize_bits -
1063 SECTOR_SHIFT),
1064 nr_blocks << (sb->s_blocksize_bits -
1065 SECTOR_SHIFT),
44abff2c 1066 gfp_mask);
fb2dce86 1067}
e6fa0be6 1068static inline int sb_issue_zeroout(struct super_block *sb, sector_t block,
a107e5a3 1069 sector_t nr_blocks, gfp_t gfp_mask)
e6fa0be6
LC
1070{
1071 return blkdev_issue_zeroout(sb->s_bdev,
233bde21
BVA
1072 block << (sb->s_blocksize_bits -
1073 SECTOR_SHIFT),
1074 nr_blocks << (sb->s_blocksize_bits -
1075 SECTOR_SHIFT),
ee472d83 1076 gfp_mask, 0);
e6fa0be6 1077}
1da177e4 1078
fa01b1e9
CH
1079static inline bool bdev_is_partition(struct block_device *bdev)
1080{
1081 return bdev->bd_partno;
1082}
1083
eb28d31b
MP
1084enum blk_default_limits {
1085 BLK_MAX_SEGMENTS = 128,
1086 BLK_SAFE_MAX_SECTORS = 255,
eb28d31b
MP
1087 BLK_MAX_SEGMENT_SIZE = 65536,
1088 BLK_SEG_BOUNDARY_MASK = 0xFFFFFFFFUL,
1089};
0e435ac2 1090
0a26f327
KB
1091#define BLK_DEF_MAX_SECTORS 2560u
1092
af2c68fe 1093static inline unsigned long queue_segment_boundary(const struct request_queue *q)
ae03bf63 1094{
025146e1 1095 return q->limits.seg_boundary_mask;
ae03bf63
MP
1096}
1097
af2c68fe 1098static inline unsigned long queue_virt_boundary(const struct request_queue *q)
03100aad
KB
1099{
1100 return q->limits.virt_boundary_mask;
1101}
1102
af2c68fe 1103static inline unsigned int queue_max_sectors(const struct request_queue *q)
ae03bf63 1104{
025146e1 1105 return q->limits.max_sectors;
ae03bf63
MP
1106}
1107
547e2f70
CH
1108static inline unsigned int queue_max_bytes(struct request_queue *q)
1109{
1110 return min_t(unsigned int, queue_max_sectors(q), INT_MAX >> 9) << 9;
1111}
1112
af2c68fe 1113static inline unsigned int queue_max_hw_sectors(const struct request_queue *q)
ae03bf63 1114{
025146e1 1115 return q->limits.max_hw_sectors;
ae03bf63
MP
1116}
1117
af2c68fe 1118static inline unsigned short queue_max_segments(const struct request_queue *q)
ae03bf63 1119{
8a78362c 1120 return q->limits.max_segments;
ae03bf63
MP
1121}
1122
af2c68fe 1123static inline unsigned short queue_max_discard_segments(const struct request_queue *q)
1e739730
CH
1124{
1125 return q->limits.max_discard_segments;
1126}
1127
af2c68fe 1128static inline unsigned int queue_max_segment_size(const struct request_queue *q)
ae03bf63 1129{
025146e1 1130 return q->limits.max_segment_size;
ae03bf63
MP
1131}
1132
0512a75b
KB
1133static inline unsigned int queue_max_zone_append_sectors(const struct request_queue *q)
1134{
fe6f0cdc
JT
1135
1136 const struct queue_limits *l = &q->limits;
1137
1138 return min(l->max_zone_append_sectors, l->max_sectors);
0512a75b
KB
1139}
1140
2aba0d19
CH
1141static inline unsigned int
1142bdev_max_zone_append_sectors(struct block_device *bdev)
1143{
1144 return queue_max_zone_append_sectors(bdev_get_queue(bdev));
1145}
1146
65ea1b66
NA
1147static inline unsigned int bdev_max_segments(struct block_device *bdev)
1148{
1149 return queue_max_segments(bdev_get_queue(bdev));
1150}
1151
ad6bf88a 1152static inline unsigned queue_logical_block_size(const struct request_queue *q)
1da177e4
LT
1153{
1154 int retval = 512;
1155
025146e1
MP
1156 if (q && q->limits.logical_block_size)
1157 retval = q->limits.logical_block_size;
1da177e4
LT
1158
1159 return retval;
1160}
1161
ad6bf88a 1162static inline unsigned int bdev_logical_block_size(struct block_device *bdev)
1da177e4 1163{
e1defc4f 1164 return queue_logical_block_size(bdev_get_queue(bdev));
1da177e4
LT
1165}
1166
af2c68fe 1167static inline unsigned int queue_physical_block_size(const struct request_queue *q)
c72758f3
MP
1168{
1169 return q->limits.physical_block_size;
1170}
1171
892b6f90 1172static inline unsigned int bdev_physical_block_size(struct block_device *bdev)
ac481c20
MP
1173{
1174 return queue_physical_block_size(bdev_get_queue(bdev));
1175}
1176
af2c68fe 1177static inline unsigned int queue_io_min(const struct request_queue *q)
c72758f3
MP
1178{
1179 return q->limits.io_min;
1180}
1181
ac481c20
MP
1182static inline int bdev_io_min(struct block_device *bdev)
1183{
1184 return queue_io_min(bdev_get_queue(bdev));
1185}
1186
af2c68fe 1187static inline unsigned int queue_io_opt(const struct request_queue *q)
c72758f3
MP
1188{
1189 return q->limits.io_opt;
1190}
1191
ac481c20
MP
1192static inline int bdev_io_opt(struct block_device *bdev)
1193{
1194 return queue_io_opt(bdev_get_queue(bdev));
1195}
1196
a805a4fa
DLM
1197static inline unsigned int
1198queue_zone_write_granularity(const struct request_queue *q)
1199{
1200 return q->limits.zone_write_granularity;
1201}
1202
1203static inline unsigned int
1204bdev_zone_write_granularity(struct block_device *bdev)
1205{
1206 return queue_zone_write_granularity(bdev_get_queue(bdev));
1207}
1208
89098b07 1209int bdev_alignment_offset(struct block_device *bdev);
5c4b4a5c 1210unsigned int bdev_discard_alignment(struct block_device *bdev);
c6e66634 1211
cf0fbf89
CH
1212static inline unsigned int bdev_max_discard_sectors(struct block_device *bdev)
1213{
1214 return bdev_get_queue(bdev)->limits.max_discard_sectors;
1215}
1216
7b47ef52
CH
1217static inline unsigned int bdev_discard_granularity(struct block_device *bdev)
1218{
1219 return bdev_get_queue(bdev)->limits.discard_granularity;
1220}
1221
44abff2c
CH
1222static inline unsigned int
1223bdev_max_secure_erase_sectors(struct block_device *bdev)
1224{
1225 return bdev_get_queue(bdev)->limits.max_secure_erase_sectors;
1226}
1227
a6f0788e
CK
1228static inline unsigned int bdev_write_zeroes_sectors(struct block_device *bdev)
1229{
1230 struct request_queue *q = bdev_get_queue(bdev);
1231
1232 if (q)
1233 return q->limits.max_write_zeroes_sectors;
1234
1235 return 0;
1236}
1237
10f0d2a5
CH
1238static inline bool bdev_nonrot(struct block_device *bdev)
1239{
1240 return blk_queue_nonrot(bdev_get_queue(bdev));
1241}
1242
3222d8c2
CH
1243static inline bool bdev_synchronous(struct block_device *bdev)
1244{
1245 return test_bit(QUEUE_FLAG_SYNCHRONOUS,
1246 &bdev_get_queue(bdev)->queue_flags);
1247}
1248
36d25489
CH
1249static inline bool bdev_stable_writes(struct block_device *bdev)
1250{
1251 return test_bit(QUEUE_FLAG_STABLE_WRITES,
1252 &bdev_get_queue(bdev)->queue_flags);
1253}
1254
08e688fd
CH
1255static inline bool bdev_write_cache(struct block_device *bdev)
1256{
1257 return test_bit(QUEUE_FLAG_WC, &bdev_get_queue(bdev)->queue_flags);
1258}
1259
a557e82e
CH
1260static inline bool bdev_fua(struct block_device *bdev)
1261{
1262 return test_bit(QUEUE_FLAG_FUA, &bdev_get_queue(bdev)->queue_flags);
1263}
1264
568ec936
CH
1265static inline bool bdev_nowait(struct block_device *bdev)
1266{
1267 return test_bit(QUEUE_FLAG_NOWAIT, &bdev_get_queue(bdev)->queue_flags);
1268}
1269
797476b8
DLM
1270static inline enum blk_zoned_model bdev_zoned_model(struct block_device *bdev)
1271{
9e0c7efa 1272 return blk_queue_zoned_model(bdev_get_queue(bdev));
797476b8
DLM
1273}
1274
1275static inline bool bdev_is_zoned(struct block_device *bdev)
1276{
fea127b3 1277 return blk_queue_is_zoned(bdev_get_queue(bdev));
797476b8
DLM
1278}
1279
d67ea690
PR
1280static inline unsigned int bdev_zone_no(struct block_device *bdev, sector_t sec)
1281{
1282 return disk_zone_no(bdev->bd_disk, sec);
1283}
1284
a3707982
BVA
1285/* Whether write serialization is required for @op on zoned devices. */
1286static inline bool op_needs_zoned_write_locking(enum req_op op)
1287{
1288 return op == REQ_OP_WRITE || op == REQ_OP_WRITE_ZEROES;
1289}
1290
8cafdb5a 1291static inline bool bdev_op_is_zoned_write(struct block_device *bdev,
3ddbe2a7 1292 enum req_op op)
8cafdb5a 1293{
a3707982 1294 return bdev_is_zoned(bdev) && op_needs_zoned_write_locking(op);
8cafdb5a
PR
1295}
1296
113ab72e 1297static inline sector_t bdev_zone_sectors(struct block_device *bdev)
6a0cb1bc
HR
1298{
1299 struct request_queue *q = bdev_get_queue(bdev);
1300
de71973c
CH
1301 if (!blk_queue_is_zoned(q))
1302 return 0;
1303 return q->limits.chunk_sectors;
6cc77e9c 1304}
6a0cb1bc 1305
e29b2100
PR
1306static inline sector_t bdev_offset_from_zone_start(struct block_device *bdev,
1307 sector_t sector)
1308{
1309 return sector & (bdev_zone_sectors(bdev) - 1);
1310}
1311
1312static inline bool bdev_is_zone_start(struct block_device *bdev,
1313 sector_t sector)
1314{
1315 return bdev_offset_from_zone_start(bdev, sector) == 0;
1316}
1317
af2c68fe 1318static inline int queue_dma_alignment(const struct request_queue *q)
1da177e4 1319{
c964d62f 1320 return q ? q->limits.dma_alignment : 511;
1da177e4
LT
1321}
1322
4a2dcc35
KB
1323static inline unsigned int bdev_dma_alignment(struct block_device *bdev)
1324{
1325 return queue_dma_alignment(bdev_get_queue(bdev));
1326}
1327
5debd969
KB
1328static inline bool bdev_iter_is_aligned(struct block_device *bdev,
1329 struct iov_iter *iter)
1330{
1331 return iov_iter_is_aligned(iter, bdev_dma_alignment(bdev),
1332 bdev_logical_block_size(bdev) - 1);
1333}
1334
14417799 1335static inline int blk_rq_aligned(struct request_queue *q, unsigned long addr,
87904074
FT
1336 unsigned int len)
1337{
1338 unsigned int alignment = queue_dma_alignment(q) | q->dma_pad_mask;
14417799 1339 return !(addr & alignment) && !(len & alignment);
87904074
FT
1340}
1341
1da177e4
LT
1342/* assumes size > 256 */
1343static inline unsigned int blksize_bits(unsigned int size)
1344{
adff2158 1345 return order_base_2(size >> SECTOR_SHIFT) + SECTOR_SHIFT;
1da177e4
LT
1346}
1347
2befb9e3 1348static inline unsigned int block_size(struct block_device *bdev)
1da177e4 1349{
6b7b181b 1350 return 1 << bdev->bd_inode->i_blkbits;
1da177e4
LT
1351}
1352
59c3d45e 1353int kblockd_schedule_work(struct work_struct *work);
818cd1cb 1354int kblockd_mod_delayed_work_on(int cpu, struct delayed_work *dwork, unsigned long delay);
1da177e4 1355
1da177e4
LT
1356#define MODULE_ALIAS_BLOCKDEV(major,minor) \
1357 MODULE_ALIAS("block-major-" __stringify(major) "-" __stringify(minor))
1358#define MODULE_ALIAS_BLOCKDEV_MAJOR(major) \
1359 MODULE_ALIAS("block-major-" __stringify(major) "-*")
1360
d145dc23
ST
1361#ifdef CONFIG_BLK_INLINE_ENCRYPTION
1362
cb77cb5a
EB
1363bool blk_crypto_register(struct blk_crypto_profile *profile,
1364 struct request_queue *q);
d145dc23 1365
d145dc23
ST
1366#else /* CONFIG_BLK_INLINE_ENCRYPTION */
1367
cb77cb5a
EB
1368static inline bool blk_crypto_register(struct blk_crypto_profile *profile,
1369 struct request_queue *q)
d145dc23
ST
1370{
1371 return true;
1372}
1373
d145dc23
ST
1374#endif /* CONFIG_BLK_INLINE_ENCRYPTION */
1375
9208d414
CH
1376enum blk_unique_id {
1377 /* these match the Designator Types specified in SPC */
1378 BLK_UID_T10 = 1,
1379 BLK_UID_EUI64 = 2,
1380 BLK_UID_NAA = 3,
1381};
1382
1383#define NFL4_UFLG_MASK 0x0000003F
d145dc23 1384
08f85851 1385struct block_device_operations {
3e08773c 1386 void (*submit_bio)(struct bio *bio);
69fe0f29
ML
1387 int (*poll_bio)(struct bio *bio, struct io_comp_batch *iob,
1388 unsigned int flags);
d4430d62 1389 int (*open) (struct block_device *, fmode_t);
db2a144b 1390 void (*release) (struct gendisk *, fmode_t);
d4430d62
AV
1391 int (*ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
1392 int (*compat_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
77ea887e
TH
1393 unsigned int (*check_events) (struct gendisk *disk,
1394 unsigned int clearing);
c3e33e04 1395 void (*unlock_native_capacity) (struct gendisk *);
08f85851 1396 int (*getgeo)(struct block_device *, struct hd_geometry *);
e00adcad 1397 int (*set_read_only)(struct block_device *bdev, bool ro);
76792055 1398 void (*free_disk)(struct gendisk *disk);
b3a27d05
NG
1399 /* this callback is with swap_lock and sometimes page table lock held */
1400 void (*swap_slot_free_notify) (struct block_device *, unsigned long);
e76239a3 1401 int (*report_zones)(struct gendisk *, sector_t sector,
d4100351 1402 unsigned int nr_zones, report_zones_cb cb, void *data);
050a4f34 1403 char *(*devnode)(struct gendisk *disk, umode_t *mode);
9208d414
CH
1404 /* returns the length of the identifier or a negative errno: */
1405 int (*get_unique_id)(struct gendisk *disk, u8 id[16],
1406 enum blk_unique_id id_type);
08f85851 1407 struct module *owner;
bbd3e064 1408 const struct pr_ops *pr_ops;
0bdfbca8
DO
1409
1410 /*
1411 * Special callback for probing GPT entry at a given sector.
1412 * Needed by Android devices, used by GPT scanner and MMC blk
1413 * driver.
1414 */
1415 int (*alternative_gpt_sector)(struct gendisk *disk, sector_t *sector);
08f85851
AV
1416};
1417
ee6a129d
AB
1418#ifdef CONFIG_COMPAT
1419extern int blkdev_compat_ptr_ioctl(struct block_device *, fmode_t,
1420 unsigned int, unsigned long);
1421#else
1422#define blkdev_compat_ptr_ioctl NULL
1423#endif
1424
0619317f
JA
1425static inline void blk_wake_io_task(struct task_struct *waiter)
1426{
1427 /*
1428 * If we're polling, the task itself is doing the completions. For
1429 * that case, we don't need to signal a wakeup, it's enough to just
1430 * mark us as RUNNING.
1431 */
1432 if (waiter == current)
1433 __set_current_state(TASK_RUNNING);
1434 else
1435 wake_up_process(waiter);
1436}
1437
5f275713 1438unsigned long bdev_start_io_acct(struct block_device *bdev, enum req_op op,
5f0614a5 1439 unsigned long start_time);
77e7ffd7 1440void bdev_end_io_acct(struct block_device *bdev, enum req_op op,
5f275713 1441 unsigned int sectors, unsigned long start_time);
956d510e 1442
99dfc43e
CH
1443unsigned long bio_start_io_acct(struct bio *bio);
1444void bio_end_io_acct_remapped(struct bio *bio, unsigned long start_time,
1445 struct block_device *orig_bdev);
956d510e
CH
1446
1447/**
1448 * bio_end_io_acct - end I/O accounting for bio based drivers
1449 * @bio: bio to end account for
b42c1fc3 1450 * @start_time: start time returned by bio_start_io_acct()
956d510e
CH
1451 */
1452static inline void bio_end_io_acct(struct bio *bio, unsigned long start_time)
1453{
99dfc43e 1454 return bio_end_io_acct_remapped(bio, start_time, bio->bi_bdev);
956d510e 1455}
956d510e 1456
3f1266f1
CH
1457int bdev_read_only(struct block_device *bdev);
1458int set_blocksize(struct block_device *bdev, int size);
1459
4e7b5671 1460int lookup_bdev(const char *pathname, dev_t *dev);
3f1266f1
CH
1461
1462void blkdev_show(struct seq_file *seqf, off_t offset);
1463
1464#define BDEVNAME_SIZE 32 /* Largest string for a blockdev identifier */
1465#define BDEVT_SIZE 10 /* Largest string for MAJ:MIN for blkdev */
1466#ifdef CONFIG_BLOCK
1467#define BLKDEV_MAJOR_MAX 512
1468#else
1469#define BLKDEV_MAJOR_MAX 0
1470#endif
1471
3f1266f1
CH
1472struct block_device *blkdev_get_by_path(const char *path, fmode_t mode,
1473 void *holder);
1474struct block_device *blkdev_get_by_dev(dev_t dev, fmode_t mode, void *holder);
37c3fc9a
CH
1475int bd_prepare_to_claim(struct block_device *bdev, void *holder);
1476void bd_abort_claiming(struct block_device *bdev, void *holder);
3f1266f1
CH
1477void blkdev_put(struct block_device *bdev, fmode_t mode);
1478
22ae8ce8
CH
1479/* just for blk-cgroup, don't use elsewhere */
1480struct block_device *blkdev_get_no_open(dev_t dev);
1481void blkdev_put_no_open(struct block_device *bdev);
1482
1483struct block_device *bdev_alloc(struct gendisk *disk, u8 partno);
1484void bdev_add(struct block_device *bdev, dev_t dev);
621c1f42 1485struct block_device *I_BDEV(struct inode *inode);
2c2b9fd6
CH
1486int truncate_bdev_range(struct block_device *bdev, fmode_t mode, loff_t lstart,
1487 loff_t lend);
3f1266f1
CH
1488
1489#ifdef CONFIG_BLOCK
1490void invalidate_bdev(struct block_device *bdev);
1491int sync_blockdev(struct block_device *bdev);
97d6fb1b 1492int sync_blockdev_range(struct block_device *bdev, loff_t lstart, loff_t lend);
70164eb6 1493int sync_blockdev_nowait(struct block_device *bdev);
1e03a36b 1494void sync_bdevs(bool wait);
2d985f8c 1495void bdev_statx_dioalign(struct inode *inode, struct kstat *stat);
322cbb50 1496void printk_all_partitions(void);
3f1266f1
CH
1497#else
1498static inline void invalidate_bdev(struct block_device *bdev)
1499{
1500}
1501static inline int sync_blockdev(struct block_device *bdev)
1502{
1503 return 0;
1504}
70164eb6
CH
1505static inline int sync_blockdev_nowait(struct block_device *bdev)
1506{
1507 return 0;
1508}
1e03a36b
CH
1509static inline void sync_bdevs(bool wait)
1510{
1511}
2d985f8c
EB
1512static inline void bdev_statx_dioalign(struct inode *inode, struct kstat *stat)
1513{
1514}
322cbb50
CH
1515static inline void printk_all_partitions(void)
1516{
1517}
1518#endif /* CONFIG_BLOCK */
1519
3f1266f1
CH
1520int fsync_bdev(struct block_device *bdev);
1521
040f04bd
CH
1522int freeze_bdev(struct block_device *bdev);
1523int thaw_bdev(struct block_device *bdev);
3f1266f1 1524
5a72e899
JA
1525struct io_comp_batch {
1526 struct request *req_list;
1527 bool need_ts;
1528 void (*complete)(struct io_comp_batch *);
1529};
1530
1531#define DEFINE_IO_COMP_BATCH(name) struct io_comp_batch name = { }
1532
3f1266f1 1533#endif /* _LINUX_BLKDEV_H */