Commit | Line | Data |
---|---|---|
b2441318 | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
7cc01581 TH |
2 | /* |
3 | * Block data types and constants. Directly include this file only to | |
4 | * break include dependency loop. | |
5 | */ | |
6 | #ifndef __LINUX_BLK_TYPES_H | |
7 | #define __LINUX_BLK_TYPES_H | |
8 | ||
7cc01581 | 9 | #include <linux/types.h> |
0781e79e | 10 | #include <linux/bvec.h> |
0d02129e | 11 | #include <linux/device.h> |
5238dcf4 | 12 | #include <linux/ktime.h> |
44981351 | 13 | #include <linux/rw_hint.h> |
7cc01581 TH |
14 | |
15 | struct bio_set; | |
16 | struct bio; | |
17 | struct bio_integrity_payload; | |
18 | struct page; | |
852c788f TH |
19 | struct io_context; |
20 | struct cgroup_subsys_state; | |
4246a0b6 | 21 | typedef void (bio_end_io_t) (struct bio *); |
a892c8d5 | 22 | struct bio_crypt_ctx; |
7cc01581 | 23 | |
99457db8 CH |
24 | /* |
25 | * The basic unit of block I/O is a sector. It is used in a number of contexts | |
26 | * in Linux (blk, bio, genhd). The size of one sector is 512 = 2**9 | |
27 | * bytes. Variables of type sector_t represent an offset or size that is a | |
28 | * multiple of 512 bytes. Hence these two constants. | |
29 | */ | |
30 | #ifndef SECTOR_SHIFT | |
31 | #define SECTOR_SHIFT 9 | |
32 | #endif | |
33 | #ifndef SECTOR_SIZE | |
34 | #define SECTOR_SIZE (1 << SECTOR_SHIFT) | |
35 | #endif | |
36 | ||
37 | #define PAGE_SECTORS_SHIFT (PAGE_SHIFT - SECTOR_SHIFT) | |
38 | #define PAGE_SECTORS (1 << PAGE_SECTORS_SHIFT) | |
39 | #define SECTOR_MASK (PAGE_SECTORS - 1) | |
40 | ||
621c1f42 | 41 | struct block_device { |
29ff57c6 | 42 | sector_t bd_start_sect; |
f09313c5 | 43 | sector_t bd_nr_sectors; |
3838c406 JA |
44 | struct gendisk * bd_disk; |
45 | struct request_queue * bd_queue; | |
15e3d2c5 CH |
46 | struct disk_stats __percpu *bd_stats; |
47 | unsigned long bd_stamp; | |
1116b9fa AV |
48 | atomic_t __bd_flags; // partition number + flags |
49 | #define BD_PARTNO 255 // lower 8 bits; assign-once | |
01e198f0 | 50 | #define BD_READ_ONLY (1u<<8) // read-only policy |
4c80105e | 51 | #define BD_WRITE_HOLDER (1u<<9) |
ac2b6f9d | 52 | #define BD_HAS_SUBMIT_BIO (1u<<10) |
49a43dae | 53 | #define BD_RO_WARNED (1u<<11) |
811ba89a AV |
54 | #ifdef CONFIG_FAIL_MAKE_REQUEST |
55 | #define BD_MAKE_IT_FAIL (1u<<12) | |
56 | #endif | |
46d40cfa | 57 | dev_t bd_dev; |
e33aef2c | 58 | struct address_space *bd_mapping; /* page cache */ |
fad907cf | 59 | |
9acf381f | 60 | atomic_t bd_openers; |
3838c406 | 61 | spinlock_t bd_size_lock; /* for bd_inode->i_size updates */ |
621c1f42 CH |
62 | void * bd_claiming; |
63 | void * bd_holder; | |
0718afd4 CH |
64 | const struct blk_holder_ops *bd_holder_ops; |
65 | struct mutex bd_holder_lock; | |
621c1f42 | 66 | int bd_holders; |
1bdd5ae0 | 67 | struct kobject *bd_holder_dir; |
621c1f42 | 68 | |
90f95dc4 CB |
69 | atomic_t bd_fsfreeze_count; /* number of freeze requests */ |
70 | struct mutex bd_fsfreeze_mutex; /* serialize freeze/thaw */ | |
231926db CH |
71 | |
72 | struct partition_meta_info *bd_meta_info; | |
ed5cc702 | 73 | int bd_writers; |
b55d26bd DB |
74 | #ifdef CONFIG_SECURITY |
75 | void *bd_security; | |
76 | #endif | |
3838c406 JA |
77 | /* |
78 | * keep this out-of-line as it's both big and not needed in the fast | |
79 | * path | |
80 | */ | |
81 | struct device bd_device; | |
621c1f42 CH |
82 | } __randomize_layout; |
83 | ||
a954ea81 | 84 | #define bdev_whole(_bdev) \ |
cb8432d6 | 85 | ((_bdev)->bd_disk->part0) |
a954ea81 | 86 | |
0d02129e CH |
87 | #define dev_to_bdev(device) \ |
88 | container_of((device), struct block_device, bd_device) | |
89 | ||
8d65269f | 90 | #define bdev_kobj(_bdev) \ |
0d02129e | 91 | (&((_bdev)->bd_device.kobj)) |
8d65269f | 92 | |
2a842aca CH |
93 | /* |
94 | * Block error status values. See block/blk-core:blk_errors for the details. | |
95 | */ | |
96 | typedef u8 __bitwise blk_status_t; | |
aad5b23e | 97 | typedef u16 blk_short_t; |
2a842aca CH |
98 | #define BLK_STS_OK 0 |
99 | #define BLK_STS_NOTSUPP ((__force blk_status_t)1) | |
100 | #define BLK_STS_TIMEOUT ((__force blk_status_t)2) | |
101 | #define BLK_STS_NOSPC ((__force blk_status_t)3) | |
102 | #define BLK_STS_TRANSPORT ((__force blk_status_t)4) | |
103 | #define BLK_STS_TARGET ((__force blk_status_t)5) | |
7ba15083 | 104 | #define BLK_STS_RESV_CONFLICT ((__force blk_status_t)6) |
2a842aca CH |
105 | #define BLK_STS_MEDIUM ((__force blk_status_t)7) |
106 | #define BLK_STS_PROTECTION ((__force blk_status_t)8) | |
107 | #define BLK_STS_RESOURCE ((__force blk_status_t)9) | |
108 | #define BLK_STS_IOERR ((__force blk_status_t)10) | |
109 | ||
4e4cbee9 CH |
110 | /* hack for device mapper, don't use elsewhere: */ |
111 | #define BLK_STS_DM_REQUEUE ((__force blk_status_t)11) | |
112 | ||
98d40e76 HR |
113 | /* |
114 | * BLK_STS_AGAIN should only be returned if RQF_NOWAIT is set | |
115 | * and the bio would block (cf bio_wouldblock_error()) | |
116 | */ | |
03a07c92 GR |
117 | #define BLK_STS_AGAIN ((__force blk_status_t)12) |
118 | ||
86ff7c2a ML |
119 | /* |
120 | * BLK_STS_DEV_RESOURCE is returned from the driver to the block layer if | |
121 | * device related resources are unavailable, but the driver can guarantee | |
122 | * that the queue will be rerun in the future once resources become | |
123 | * available again. This is typically the case for device specific | |
124 | * resources that are consumed for IO. If the driver fails allocating these | |
125 | * resources, we know that inflight (or pending) IO will free these | |
126 | * resource upon completion. | |
127 | * | |
128 | * This is different from BLK_STS_RESOURCE in that it explicitly references | |
129 | * a device specific resource. For resources of wider scope, allocation | |
130 | * failure can happen without having pending IO. This means that we can't | |
131 | * rely on request completions freeing these resources, as IO may not be in | |
132 | * flight. Examples of that are kernel memory allocations, DMA mappings, or | |
133 | * any other system wide resources. | |
134 | */ | |
135 | #define BLK_STS_DEV_RESOURCE ((__force blk_status_t)13) | |
136 | ||
3b481d91 KB |
137 | /* |
138 | * BLK_STS_ZONE_OPEN_RESOURCE is returned from the driver in the completion | |
139 | * path if the device returns a status indicating that too many zone resources | |
140 | * are currently open. The same command should be successful if resubmitted | |
141 | * after the number of open zones decreases below the device's limits, which is | |
142 | * reported in the request_queue's max_open_zones. | |
143 | */ | |
63b5385e | 144 | #define BLK_STS_ZONE_OPEN_RESOURCE ((__force blk_status_t)14) |
3b481d91 KB |
145 | |
146 | /* | |
147 | * BLK_STS_ZONE_ACTIVE_RESOURCE is returned from the driver in the completion | |
148 | * path if the device returns a status indicating that too many zone resources | |
149 | * are currently active. The same command should be successful if resubmitted | |
150 | * after the number of active zones decreases below the device's limits, which | |
151 | * is reported in the request_queue's max_active_zones. | |
152 | */ | |
63b5385e | 153 | #define BLK_STS_ZONE_ACTIVE_RESOURCE ((__force blk_status_t)15) |
3b481d91 | 154 | |
2651bf68 SL |
155 | /* |
156 | * BLK_STS_OFFLINE is returned from the driver when the target device is offline | |
157 | * or is being taken offline. This could help differentiate the case where a | |
158 | * device is intentionally being shut down from a real I/O error. | |
159 | */ | |
63b5385e | 160 | #define BLK_STS_OFFLINE ((__force blk_status_t)16) |
2651bf68 | 161 | |
dffc480d DLM |
162 | /* |
163 | * BLK_STS_DURATION_LIMIT is returned from the driver when the target device | |
164 | * aborted the command because it exceeded one of its Command Duration Limits. | |
165 | */ | |
63b5385e | 166 | #define BLK_STS_DURATION_LIMIT ((__force blk_status_t)17) |
dffc480d | 167 | |
9da3d1e9 JG |
168 | /* |
169 | * Invalid size or alignment. | |
170 | */ | |
171 | #define BLK_STS_INVAL ((__force blk_status_t)19) | |
172 | ||
9111e568 KB |
173 | /** |
174 | * blk_path_error - returns true if error may be path related | |
175 | * @error: status the request was completed with | |
176 | * | |
177 | * Description: | |
178 | * This classifies block error status into non-retryable errors and ones | |
179 | * that may be successful if retried on a failover path. | |
180 | * | |
181 | * Return: | |
182 | * %false - retrying failover path will not help | |
183 | * %true - may succeed if retried | |
184 | */ | |
185 | static inline bool blk_path_error(blk_status_t error) | |
186 | { | |
187 | switch (error) { | |
188 | case BLK_STS_NOTSUPP: | |
189 | case BLK_STS_NOSPC: | |
190 | case BLK_STS_TARGET: | |
7ba15083 | 191 | case BLK_STS_RESV_CONFLICT: |
9111e568 KB |
192 | case BLK_STS_MEDIUM: |
193 | case BLK_STS_PROTECTION: | |
194 | return false; | |
195 | } | |
196 | ||
197 | /* Anything else could be a path failure, so should be retried */ | |
198 | return true; | |
199 | } | |
200 | ||
5238dcf4 OS |
201 | struct bio_issue { |
202 | u64 value; | |
203 | }; | |
204 | ||
342a72a3 BVA |
205 | typedef __u32 __bitwise blk_opf_t; |
206 | ||
3e08773c CH |
207 | typedef unsigned int blk_qc_t; |
208 | #define BLK_QC_T_NONE -1U | |
209 | ||
7cc01581 TH |
210 | /* |
211 | * main unit of I/O for the block layer and lower layers (ie drivers and | |
212 | * stacking drivers) | |
213 | */ | |
214 | struct bio { | |
7cc01581 | 215 | struct bio *bi_next; /* request queue link */ |
309dca30 | 216 | struct block_device *bi_bdev; |
342a72a3 | 217 | blk_opf_t bi_opf; /* bottom bits REQ_OP, top bits |
5d2ae142 | 218 | * req_flags. |
4e1b2d52 | 219 | */ |
7a800a20 | 220 | unsigned short bi_flags; /* BIO_* below */ |
43b62ce3 | 221 | unsigned short bi_ioprio; |
44981351 | 222 | enum rw_hint bi_write_hint; |
111be883 | 223 | blk_status_t bi_status; |
993e4cde | 224 | atomic_t __bi_remaining; |
7cc01581 | 225 | |
111be883 | 226 | struct bvec_iter bi_iter; |
196d38bc | 227 | |
dd291d77 DLM |
228 | union { |
229 | /* for polled bios: */ | |
230 | blk_qc_t bi_cookie; | |
231 | /* for plugged zoned writes only: */ | |
232 | unsigned int __bi_nr_segments; | |
233 | }; | |
7cc01581 | 234 | bio_end_io_t *bi_end_io; |
7cc01581 | 235 | void *bi_private; |
852c788f TH |
236 | #ifdef CONFIG_BLK_CGROUP |
237 | /* | |
db6638d7 DZ |
238 | * Represents the association of the css and request_queue for the bio. |
239 | * If a bio goes direct to device, it will not have a blkg as it will | |
240 | * not have a request_queue associated with it. The reference is put | |
241 | * on release of the bio. | |
852c788f | 242 | */ |
08e18eab | 243 | struct blkcg_gq *bi_blkg; |
5238dcf4 | 244 | struct bio_issue bi_issue; |
7caa4715 TH |
245 | #ifdef CONFIG_BLK_CGROUP_IOCOST |
246 | u64 bi_iocost_cost; | |
247 | #endif | |
852c788f | 248 | #endif |
a892c8d5 ST |
249 | |
250 | #ifdef CONFIG_BLK_INLINE_ENCRYPTION | |
251 | struct bio_crypt_ctx *bi_crypt_context; | |
252 | #endif | |
253 | ||
180b2f95 | 254 | union { |
7cc01581 | 255 | #if defined(CONFIG_BLK_DEV_INTEGRITY) |
180b2f95 | 256 | struct bio_integrity_payload *bi_integrity; /* data integrity */ |
7cc01581 | 257 | #endif |
180b2f95 | 258 | }; |
7cc01581 | 259 | |
4f024f37 KO |
260 | unsigned short bi_vcnt; /* how many bio_vec's */ |
261 | ||
f44b48c7 KO |
262 | /* |
263 | * Everything starting with bi_max_vecs will be preserved by bio_reset() | |
264 | */ | |
265 | ||
4f024f37 | 266 | unsigned short bi_max_vecs; /* max bvl_vecs we can hold */ |
f44b48c7 | 267 | |
dac56212 | 268 | atomic_t __bi_cnt; /* pin count */ |
f44b48c7 KO |
269 | |
270 | struct bio_vec *bi_io_vec; /* the actual vec list */ | |
271 | ||
395c72a7 KO |
272 | struct bio_set *bi_pool; |
273 | ||
7cc01581 TH |
274 | /* |
275 | * We can inline a number of vecs at the end of the bio, to avoid | |
276 | * double allocations for a small number of bio_vecs. This member | |
277 | * MUST obviously be kept at the very end of the bio. | |
278 | */ | |
5a58ec8c | 279 | struct bio_vec bi_inline_vecs[]; |
7cc01581 TH |
280 | }; |
281 | ||
f44b48c7 | 282 | #define BIO_RESET_BYTES offsetof(struct bio, bi_max_vecs) |
e83502ca | 283 | #define BIO_MAX_SECTORS (UINT_MAX >> SECTOR_SHIFT) |
f44b48c7 | 284 | |
7cc01581 TH |
285 | /* |
286 | * bio flags | |
287 | */ | |
2b24e6f6 | 288 | enum { |
fd363244 | 289 | BIO_PAGE_PINNED, /* Unpin pages in bio_release_pages() */ |
2b24e6f6 JT |
290 | BIO_CLONED, /* doesn't own data */ |
291 | BIO_BOUNCED, /* bio is a bounce bio */ | |
2b24e6f6 JT |
292 | BIO_QUIET, /* Make BIO Quiet */ |
293 | BIO_CHAIN, /* chained bio, ->bi_remaining in effect */ | |
294 | BIO_REFFED, /* bio has elevated ->bi_cnt */ | |
320fb0f9 | 295 | BIO_BPS_THROTTLED, /* This bio has already been subjected to |
8d2bbd4c | 296 | * throttling rules. Don't do it again. */ |
2b24e6f6 | 297 | BIO_TRACE_COMPLETION, /* bio_endio() should trace the final completion |
fbbaf700 | 298 | * of this bio. */ |
0376e9ef | 299 | BIO_CGROUP_ACCT, /* has been accounted to a cgroup */ |
aa1b46dc TH |
300 | BIO_QOS_THROTTLED, /* bio went through rq_qos throttle path */ |
301 | BIO_QOS_MERGED, /* but went through rq_qos merge path */ | |
30c5d345 | 302 | BIO_REMAPPED, |
dd291d77 | 303 | BIO_ZONE_WRITE_PLUGGING, /* bio handled through zone write plugging */ |
9b1ce7f0 | 304 | BIO_EMULATES_ZONE_APPEND, /* bio emulates a zone append operation */ |
2b24e6f6 JT |
305 | BIO_FLAG_LAST |
306 | }; | |
cd4a4ae4 | 307 | |
9a95e4ef BVA |
308 | typedef __u32 __bitwise blk_mq_req_flags_t; |
309 | ||
ff07a02e | 310 | #define REQ_OP_BITS 8 |
342a72a3 | 311 | #define REQ_OP_MASK (__force blk_opf_t)((1 << REQ_OP_BITS) - 1) |
ff07a02e BVA |
312 | #define REQ_FLAG_BITS 24 |
313 | ||
314 | /** | |
315 | * enum req_op - Operations common to the bio and request structures. | |
ef295ecf | 316 | * We use 8 bits for encoding the operation, and the remaining 24 for flags. |
87374179 CH |
317 | * |
318 | * The least significant bit of the operation number indicates the data | |
319 | * transfer direction: | |
320 | * | |
321 | * - if the least significant bit is set transfers are TO the device | |
322 | * - if the least significant bit is not set transfers are FROM the device | |
323 | * | |
324 | * If a operation does not transfer data the least significant bit has no | |
325 | * meaning. | |
7cc01581 | 326 | */ |
ff07a02e | 327 | enum req_op { |
87374179 | 328 | /* read sectors from the device */ |
342a72a3 | 329 | REQ_OP_READ = (__force blk_opf_t)0, |
87374179 | 330 | /* write sectors to the device */ |
342a72a3 | 331 | REQ_OP_WRITE = (__force blk_opf_t)1, |
87374179 | 332 | /* flush the volatile write cache */ |
342a72a3 | 333 | REQ_OP_FLUSH = (__force blk_opf_t)2, |
87374179 | 334 | /* discard sectors */ |
342a72a3 | 335 | REQ_OP_DISCARD = (__force blk_opf_t)3, |
87374179 | 336 | /* securely erase sectors */ |
342a72a3 | 337 | REQ_OP_SECURE_ERASE = (__force blk_opf_t)5, |
1c042f8d CH |
338 | /* write data at the current zone write pointer */ |
339 | REQ_OP_ZONE_APPEND = (__force blk_opf_t)7, | |
a6f0788e | 340 | /* write the zero filled sector many times */ |
342a72a3 | 341 | REQ_OP_WRITE_ZEROES = (__force blk_opf_t)9, |
6c1b1da5 | 342 | /* Open a zone */ |
342a72a3 | 343 | REQ_OP_ZONE_OPEN = (__force blk_opf_t)10, |
6c1b1da5 | 344 | /* Close a zone */ |
342a72a3 | 345 | REQ_OP_ZONE_CLOSE = (__force blk_opf_t)11, |
6c1b1da5 | 346 | /* Transition a zone to full */ |
342a72a3 | 347 | REQ_OP_ZONE_FINISH = (__force blk_opf_t)12, |
ecdef9f4 | 348 | /* reset a zone write pointer */ |
1c042f8d | 349 | REQ_OP_ZONE_RESET = (__force blk_opf_t)13, |
ecdef9f4 | 350 | /* reset all the zone present on the device */ |
1c042f8d | 351 | REQ_OP_ZONE_RESET_ALL = (__force blk_opf_t)15, |
ef295ecf | 352 | |
aebf526b | 353 | /* Driver private requests */ |
342a72a3 BVA |
354 | REQ_OP_DRV_IN = (__force blk_opf_t)34, |
355 | REQ_OP_DRV_OUT = (__force blk_opf_t)35, | |
aebf526b | 356 | |
342a72a3 | 357 | REQ_OP_LAST = (__force blk_opf_t)36, |
ef295ecf CH |
358 | }; |
359 | ||
6fa99325 | 360 | /* Keep cmd_flag_name[] in sync with the definitions below */ |
ef295ecf CH |
361 | enum req_flag_bits { |
362 | __REQ_FAILFAST_DEV = /* no driver retries of device errors */ | |
363 | REQ_OP_BITS, | |
7cc01581 TH |
364 | __REQ_FAILFAST_TRANSPORT, /* no driver retries of transport errors */ |
365 | __REQ_FAILFAST_DRIVER, /* no driver retries of driver errors */ | |
7cc01581 TH |
366 | __REQ_SYNC, /* request is sync (sync write or read) */ |
367 | __REQ_META, /* metadata io request */ | |
65299a3b | 368 | __REQ_PRIO, /* boost priority in cfq */ |
bd1c1c21 | 369 | __REQ_NOMERGE, /* don't touch this for merging */ |
a2b80967 | 370 | __REQ_IDLE, /* anticipate more IO after this one */ |
180b2f95 | 371 | __REQ_INTEGRITY, /* I/O includes block integrity payload */ |
8e4bf844 | 372 | __REQ_FUA, /* forced unit access */ |
28a8f0d3 | 373 | __REQ_PREFLUSH, /* request for cache flush */ |
188bd2b1 | 374 | __REQ_RAHEAD, /* read ahead, can fail anytime */ |
1d796d6a | 375 | __REQ_BACKGROUND, /* background IO */ |
8977f563 | 376 | __REQ_NOWAIT, /* Don't wait if request will block */ |
3e08773c | 377 | __REQ_POLLED, /* caller polls for completion using bio_poll */ |
0df71650 | 378 | __REQ_ALLOC_CACHE, /* allocate IO from cache if available */ |
5ce7729f CH |
379 | __REQ_SWAP, /* swap I/O */ |
380 | __REQ_DRV, /* for driver use */ | |
3480373e | 381 | __REQ_FS_PRIVATE, /* for file system (submitter) use */ |
9da3d1e9 | 382 | __REQ_ATOMIC, /* for atomic write operations */ |
5ce7729f CH |
383 | /* |
384 | * Command specific flags, keep last: | |
385 | */ | |
386 | /* for REQ_OP_WRITE_ZEROES: */ | |
387 | __REQ_NOUNMAP, /* do not free blocks when zeroing */ | |
d1e36282 | 388 | |
7cc01581 TH |
389 | __REQ_NR_BITS, /* stops here */ |
390 | }; | |
391 | ||
342a72a3 BVA |
392 | #define REQ_FAILFAST_DEV \ |
393 | (__force blk_opf_t)(1ULL << __REQ_FAILFAST_DEV) | |
394 | #define REQ_FAILFAST_TRANSPORT \ | |
395 | (__force blk_opf_t)(1ULL << __REQ_FAILFAST_TRANSPORT) | |
396 | #define REQ_FAILFAST_DRIVER \ | |
397 | (__force blk_opf_t)(1ULL << __REQ_FAILFAST_DRIVER) | |
398 | #define REQ_SYNC (__force blk_opf_t)(1ULL << __REQ_SYNC) | |
399 | #define REQ_META (__force blk_opf_t)(1ULL << __REQ_META) | |
400 | #define REQ_PRIO (__force blk_opf_t)(1ULL << __REQ_PRIO) | |
401 | #define REQ_NOMERGE (__force blk_opf_t)(1ULL << __REQ_NOMERGE) | |
402 | #define REQ_IDLE (__force blk_opf_t)(1ULL << __REQ_IDLE) | |
403 | #define REQ_INTEGRITY (__force blk_opf_t)(1ULL << __REQ_INTEGRITY) | |
404 | #define REQ_FUA (__force blk_opf_t)(1ULL << __REQ_FUA) | |
405 | #define REQ_PREFLUSH (__force blk_opf_t)(1ULL << __REQ_PREFLUSH) | |
406 | #define REQ_RAHEAD (__force blk_opf_t)(1ULL << __REQ_RAHEAD) | |
407 | #define REQ_BACKGROUND (__force blk_opf_t)(1ULL << __REQ_BACKGROUND) | |
408 | #define REQ_NOWAIT (__force blk_opf_t)(1ULL << __REQ_NOWAIT) | |
342a72a3 BVA |
409 | #define REQ_POLLED (__force blk_opf_t)(1ULL << __REQ_POLLED) |
410 | #define REQ_ALLOC_CACHE (__force blk_opf_t)(1ULL << __REQ_ALLOC_CACHE) | |
342a72a3 | 411 | #define REQ_SWAP (__force blk_opf_t)(1ULL << __REQ_SWAP) |
3480373e CH |
412 | #define REQ_DRV (__force blk_opf_t)(1ULL << __REQ_DRV) |
413 | #define REQ_FS_PRIVATE (__force blk_opf_t)(1ULL << __REQ_FS_PRIVATE) | |
9da3d1e9 | 414 | #define REQ_ATOMIC (__force blk_opf_t)(1ULL << __REQ_ATOMIC) |
3480373e CH |
415 | |
416 | #define REQ_NOUNMAP (__force blk_opf_t)(1ULL << __REQ_NOUNMAP) | |
d928be9f | 417 | |
7cc01581 TH |
418 | #define REQ_FAILFAST_MASK \ |
419 | (REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | REQ_FAILFAST_DRIVER) | |
7cc01581 | 420 | |
e2a60da7 | 421 | #define REQ_NOMERGE_FLAGS \ |
e8064021 | 422 | (REQ_NOMERGE | REQ_PREFLUSH | REQ_FUA) |
e2a60da7 | 423 | |
dbae2c55 MC |
424 | enum stat_group { |
425 | STAT_READ, | |
426 | STAT_WRITE, | |
bdca3c87 | 427 | STAT_DISCARD, |
b6866318 | 428 | STAT_FLUSH, |
dbae2c55 MC |
429 | |
430 | NR_STAT_GROUPS | |
431 | }; | |
432 | ||
2d9b02be BVA |
433 | static inline enum req_op bio_op(const struct bio *bio) |
434 | { | |
435 | return bio->bi_opf & REQ_OP_MASK; | |
436 | } | |
7cc01581 | 437 | |
342a72a3 | 438 | static inline bool op_is_write(blk_opf_t op) |
87374179 | 439 | { |
342a72a3 | 440 | return !!(op & (__force blk_opf_t)1); |
87374179 CH |
441 | } |
442 | ||
f73f44eb CH |
443 | /* |
444 | * Check if the bio or request is one that needs special treatment in the | |
445 | * flush state machine. | |
446 | */ | |
342a72a3 | 447 | static inline bool op_is_flush(blk_opf_t op) |
f73f44eb CH |
448 | { |
449 | return op & (REQ_FUA | REQ_PREFLUSH); | |
450 | } | |
451 | ||
b685d3d6 CH |
452 | /* |
453 | * Reads are always treated as synchronous, as are requests with the FUA or | |
454 | * PREFLUSH flag. Other operations may be marked as synchronous using the | |
455 | * REQ_SYNC flag. | |
456 | */ | |
342a72a3 | 457 | static inline bool op_is_sync(blk_opf_t op) |
ef295ecf | 458 | { |
b685d3d6 CH |
459 | return (op & REQ_OP_MASK) == REQ_OP_READ || |
460 | (op & (REQ_SYNC | REQ_FUA | REQ_PREFLUSH)); | |
ef295ecf | 461 | } |
c11f0c0b | 462 | |
342a72a3 | 463 | static inline bool op_is_discard(blk_opf_t op) |
bdca3c87 MC |
464 | { |
465 | return (op & REQ_OP_MASK) == REQ_OP_DISCARD; | |
466 | } | |
467 | ||
6c1b1da5 AJ |
468 | /* |
469 | * Check if a bio or request operation is a zone management operation, with | |
470 | * the exception of REQ_OP_ZONE_RESET_ALL which is treated as a special case | |
471 | * due to its different handling in the block layer and device response in | |
472 | * case of command failure. | |
473 | */ | |
ff07a02e | 474 | static inline bool op_is_zone_mgmt(enum req_op op) |
6c1b1da5 AJ |
475 | { |
476 | switch (op & REQ_OP_MASK) { | |
477 | case REQ_OP_ZONE_RESET: | |
478 | case REQ_OP_ZONE_OPEN: | |
479 | case REQ_OP_ZONE_CLOSE: | |
480 | case REQ_OP_ZONE_FINISH: | |
481 | return true; | |
482 | default: | |
483 | return false; | |
484 | } | |
485 | } | |
486 | ||
77e7ffd7 | 487 | static inline int op_stat_group(enum req_op op) |
ddcf35d3 | 488 | { |
bdca3c87 MC |
489 | if (op_is_discard(op)) |
490 | return STAT_DISCARD; | |
ddcf35d3 MC |
491 | return op_is_write(op); |
492 | } | |
493 | ||
cf43e6be | 494 | struct blk_rq_stat { |
eca8b53a | 495 | u64 mean; |
cf43e6be JA |
496 | u64 min; |
497 | u64 max; | |
eca8b53a | 498 | u32 nr_samples; |
cf43e6be | 499 | u64 batch; |
cf43e6be JA |
500 | }; |
501 | ||
7cc01581 | 502 | #endif /* __LINUX_BLK_TYPES_H */ |