Commit | Line | Data |
---|---|---|
b2441318 | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
7cc01581 TH |
2 | /* |
3 | * Block data types and constants. Directly include this file only to | |
4 | * break include dependency loop. | |
5 | */ | |
6 | #ifndef __LINUX_BLK_TYPES_H | |
7 | #define __LINUX_BLK_TYPES_H | |
8 | ||
7cc01581 | 9 | #include <linux/types.h> |
0781e79e | 10 | #include <linux/bvec.h> |
0d02129e | 11 | #include <linux/device.h> |
5238dcf4 | 12 | #include <linux/ktime.h> |
7cc01581 TH |
13 | |
14 | struct bio_set; | |
15 | struct bio; | |
16 | struct bio_integrity_payload; | |
17 | struct page; | |
852c788f TH |
18 | struct io_context; |
19 | struct cgroup_subsys_state; | |
4246a0b6 | 20 | typedef void (bio_end_io_t) (struct bio *); |
a892c8d5 | 21 | struct bio_crypt_ctx; |
7cc01581 | 22 | |
99457db8 CH |
23 | /* |
24 | * The basic unit of block I/O is a sector. It is used in a number of contexts | |
25 | * in Linux (blk, bio, genhd). The size of one sector is 512 = 2**9 | |
26 | * bytes. Variables of type sector_t represent an offset or size that is a | |
27 | * multiple of 512 bytes. Hence these two constants. | |
28 | */ | |
29 | #ifndef SECTOR_SHIFT | |
30 | #define SECTOR_SHIFT 9 | |
31 | #endif | |
32 | #ifndef SECTOR_SIZE | |
33 | #define SECTOR_SIZE (1 << SECTOR_SHIFT) | |
34 | #endif | |
35 | ||
36 | #define PAGE_SECTORS_SHIFT (PAGE_SHIFT - SECTOR_SHIFT) | |
37 | #define PAGE_SECTORS (1 << PAGE_SECTORS_SHIFT) | |
38 | #define SECTOR_MASK (PAGE_SECTORS - 1) | |
39 | ||
621c1f42 | 40 | struct block_device { |
29ff57c6 | 41 | sector_t bd_start_sect; |
f09313c5 | 42 | sector_t bd_nr_sectors; |
15e3d2c5 CH |
43 | struct disk_stats __percpu *bd_stats; |
44 | unsigned long bd_stamp; | |
83950d35 | 45 | bool bd_read_only; /* read-only policy */ |
46d40cfa | 46 | dev_t bd_dev; |
9acf381f | 47 | atomic_t bd_openers; |
621c1f42 CH |
48 | struct inode * bd_inode; /* will die */ |
49 | struct super_block * bd_super; | |
621c1f42 | 50 | void * bd_claiming; |
0d02129e | 51 | struct device bd_device; |
621c1f42 CH |
52 | void * bd_holder; |
53 | int bd_holders; | |
54 | bool bd_write_holder; | |
1bdd5ae0 | 55 | struct kobject *bd_holder_dir; |
621c1f42 | 56 | u8 bd_partno; |
c2b4bb8c | 57 | spinlock_t bd_size_lock; /* for bd_inode->i_size updates */ |
621c1f42 | 58 | struct gendisk * bd_disk; |
17220ca5 | 59 | struct request_queue * bd_queue; |
621c1f42 CH |
60 | |
61 | /* The counter of freeze processes */ | |
62 | int bd_fsfreeze_count; | |
63 | /* Mutex for freeze */ | |
64 | struct mutex bd_fsfreeze_mutex; | |
040f04bd | 65 | struct super_block *bd_fsfreeze_sb; |
231926db CH |
66 | |
67 | struct partition_meta_info *bd_meta_info; | |
b309e993 CH |
68 | #ifdef CONFIG_FAIL_MAKE_REQUEST |
69 | bool bd_make_it_fail; | |
70 | #endif | |
621c1f42 CH |
71 | } __randomize_layout; |
72 | ||
a954ea81 | 73 | #define bdev_whole(_bdev) \ |
cb8432d6 | 74 | ((_bdev)->bd_disk->part0) |
a954ea81 | 75 | |
0d02129e CH |
76 | #define dev_to_bdev(device) \ |
77 | container_of((device), struct block_device, bd_device) | |
78 | ||
8d65269f | 79 | #define bdev_kobj(_bdev) \ |
0d02129e | 80 | (&((_bdev)->bd_device.kobj)) |
8d65269f | 81 | |
2a842aca CH |
82 | /* |
83 | * Block error status values. See block/blk-core:blk_errors for the details. | |
6e2fb221 | 84 | * Alpha cannot write a byte atomically, so we need to use 32-bit value. |
2a842aca | 85 | */ |
6e2fb221 MP |
86 | #if defined(CONFIG_ALPHA) && !defined(__alpha_bwx__) |
87 | typedef u32 __bitwise blk_status_t; | |
aad5b23e | 88 | typedef u32 blk_short_t; |
6e2fb221 | 89 | #else |
2a842aca | 90 | typedef u8 __bitwise blk_status_t; |
aad5b23e | 91 | typedef u16 blk_short_t; |
6e2fb221 | 92 | #endif |
2a842aca CH |
93 | #define BLK_STS_OK 0 |
94 | #define BLK_STS_NOTSUPP ((__force blk_status_t)1) | |
95 | #define BLK_STS_TIMEOUT ((__force blk_status_t)2) | |
96 | #define BLK_STS_NOSPC ((__force blk_status_t)3) | |
97 | #define BLK_STS_TRANSPORT ((__force blk_status_t)4) | |
98 | #define BLK_STS_TARGET ((__force blk_status_t)5) | |
99 | #define BLK_STS_NEXUS ((__force blk_status_t)6) | |
100 | #define BLK_STS_MEDIUM ((__force blk_status_t)7) | |
101 | #define BLK_STS_PROTECTION ((__force blk_status_t)8) | |
102 | #define BLK_STS_RESOURCE ((__force blk_status_t)9) | |
103 | #define BLK_STS_IOERR ((__force blk_status_t)10) | |
104 | ||
4e4cbee9 CH |
105 | /* hack for device mapper, don't use elsewhere: */ |
106 | #define BLK_STS_DM_REQUEUE ((__force blk_status_t)11) | |
107 | ||
98d40e76 HR |
108 | /* |
109 | * BLK_STS_AGAIN should only be returned if RQF_NOWAIT is set | |
110 | * and the bio would block (cf bio_wouldblock_error()) | |
111 | */ | |
03a07c92 GR |
112 | #define BLK_STS_AGAIN ((__force blk_status_t)12) |
113 | ||
86ff7c2a ML |
114 | /* |
115 | * BLK_STS_DEV_RESOURCE is returned from the driver to the block layer if | |
116 | * device related resources are unavailable, but the driver can guarantee | |
117 | * that the queue will be rerun in the future once resources become | |
118 | * available again. This is typically the case for device specific | |
119 | * resources that are consumed for IO. If the driver fails allocating these | |
120 | * resources, we know that inflight (or pending) IO will free these | |
121 | * resource upon completion. | |
122 | * | |
123 | * This is different from BLK_STS_RESOURCE in that it explicitly references | |
124 | * a device specific resource. For resources of wider scope, allocation | |
125 | * failure can happen without having pending IO. This means that we can't | |
126 | * rely on request completions freeing these resources, as IO may not be in | |
127 | * flight. Examples of that are kernel memory allocations, DMA mappings, or | |
128 | * any other system wide resources. | |
129 | */ | |
130 | #define BLK_STS_DEV_RESOURCE ((__force blk_status_t)13) | |
131 | ||
0512a75b KB |
132 | /* |
133 | * BLK_STS_ZONE_RESOURCE is returned from the driver to the block layer if zone | |
134 | * related resources are unavailable, but the driver can guarantee the queue | |
135 | * will be rerun in the future once the resources become available again. | |
136 | * | |
137 | * This is different from BLK_STS_DEV_RESOURCE in that it explicitly references | |
138 | * a zone specific resource and IO to a different zone on the same device could | |
139 | * still be served. Examples of that are zones that are write-locked, but a read | |
140 | * to the same zone could be served. | |
141 | */ | |
142 | #define BLK_STS_ZONE_RESOURCE ((__force blk_status_t)14) | |
143 | ||
3b481d91 KB |
144 | /* |
145 | * BLK_STS_ZONE_OPEN_RESOURCE is returned from the driver in the completion | |
146 | * path if the device returns a status indicating that too many zone resources | |
147 | * are currently open. The same command should be successful if resubmitted | |
148 | * after the number of open zones decreases below the device's limits, which is | |
149 | * reported in the request_queue's max_open_zones. | |
150 | */ | |
151 | #define BLK_STS_ZONE_OPEN_RESOURCE ((__force blk_status_t)15) | |
152 | ||
153 | /* | |
154 | * BLK_STS_ZONE_ACTIVE_RESOURCE is returned from the driver in the completion | |
155 | * path if the device returns a status indicating that too many zone resources | |
156 | * are currently active. The same command should be successful if resubmitted | |
157 | * after the number of active zones decreases below the device's limits, which | |
158 | * is reported in the request_queue's max_active_zones. | |
159 | */ | |
160 | #define BLK_STS_ZONE_ACTIVE_RESOURCE ((__force blk_status_t)16) | |
161 | ||
2651bf68 SL |
162 | /* |
163 | * BLK_STS_OFFLINE is returned from the driver when the target device is offline | |
164 | * or is being taken offline. This could help differentiate the case where a | |
165 | * device is intentionally being shut down from a real I/O error. | |
166 | */ | |
167 | #define BLK_STS_OFFLINE ((__force blk_status_t)17) | |
168 | ||
9111e568 KB |
169 | /** |
170 | * blk_path_error - returns true if error may be path related | |
171 | * @error: status the request was completed with | |
172 | * | |
173 | * Description: | |
174 | * This classifies block error status into non-retryable errors and ones | |
175 | * that may be successful if retried on a failover path. | |
176 | * | |
177 | * Return: | |
178 | * %false - retrying failover path will not help | |
179 | * %true - may succeed if retried | |
180 | */ | |
181 | static inline bool blk_path_error(blk_status_t error) | |
182 | { | |
183 | switch (error) { | |
184 | case BLK_STS_NOTSUPP: | |
185 | case BLK_STS_NOSPC: | |
186 | case BLK_STS_TARGET: | |
187 | case BLK_STS_NEXUS: | |
188 | case BLK_STS_MEDIUM: | |
189 | case BLK_STS_PROTECTION: | |
190 | return false; | |
191 | } | |
192 | ||
193 | /* Anything else could be a path failure, so should be retried */ | |
194 | return true; | |
195 | } | |
196 | ||
5238dcf4 OS |
197 | /* |
198 | * From most significant bit: | |
199 | * 1 bit: reserved for other usage, see below | |
200 | * 12 bits: original size of bio | |
201 | * 51 bits: issue time of bio | |
202 | */ | |
203 | #define BIO_ISSUE_RES_BITS 1 | |
204 | #define BIO_ISSUE_SIZE_BITS 12 | |
205 | #define BIO_ISSUE_RES_SHIFT (64 - BIO_ISSUE_RES_BITS) | |
206 | #define BIO_ISSUE_SIZE_SHIFT (BIO_ISSUE_RES_SHIFT - BIO_ISSUE_SIZE_BITS) | |
207 | #define BIO_ISSUE_TIME_MASK ((1ULL << BIO_ISSUE_SIZE_SHIFT) - 1) | |
208 | #define BIO_ISSUE_SIZE_MASK \ | |
209 | (((1ULL << BIO_ISSUE_SIZE_BITS) - 1) << BIO_ISSUE_SIZE_SHIFT) | |
210 | #define BIO_ISSUE_RES_MASK (~((1ULL << BIO_ISSUE_RES_SHIFT) - 1)) | |
211 | ||
212 | /* Reserved bit for blk-throtl */ | |
213 | #define BIO_ISSUE_THROTL_SKIP_LATENCY (1ULL << 63) | |
214 | ||
215 | struct bio_issue { | |
216 | u64 value; | |
217 | }; | |
218 | ||
219 | static inline u64 __bio_issue_time(u64 time) | |
220 | { | |
221 | return time & BIO_ISSUE_TIME_MASK; | |
222 | } | |
223 | ||
224 | static inline u64 bio_issue_time(struct bio_issue *issue) | |
225 | { | |
226 | return __bio_issue_time(issue->value); | |
227 | } | |
228 | ||
229 | static inline sector_t bio_issue_size(struct bio_issue *issue) | |
230 | { | |
231 | return ((issue->value & BIO_ISSUE_SIZE_MASK) >> BIO_ISSUE_SIZE_SHIFT); | |
232 | } | |
233 | ||
234 | static inline void bio_issue_init(struct bio_issue *issue, | |
235 | sector_t size) | |
236 | { | |
237 | size &= (1ULL << BIO_ISSUE_SIZE_BITS) - 1; | |
238 | issue->value = ((issue->value & BIO_ISSUE_RES_MASK) | | |
239 | (ktime_get_ns() & BIO_ISSUE_TIME_MASK) | | |
240 | ((u64)size << BIO_ISSUE_SIZE_SHIFT)); | |
241 | } | |
242 | ||
342a72a3 BVA |
243 | typedef __u32 __bitwise blk_opf_t; |
244 | ||
3e08773c CH |
245 | typedef unsigned int blk_qc_t; |
246 | #define BLK_QC_T_NONE -1U | |
247 | ||
7cc01581 TH |
248 | /* |
249 | * main unit of I/O for the block layer and lower layers (ie drivers and | |
250 | * stacking drivers) | |
251 | */ | |
252 | struct bio { | |
7cc01581 | 253 | struct bio *bi_next; /* request queue link */ |
309dca30 | 254 | struct block_device *bi_bdev; |
342a72a3 | 255 | blk_opf_t bi_opf; /* bottom bits REQ_OP, top bits |
5d2ae142 | 256 | * req_flags. |
4e1b2d52 | 257 | */ |
7a800a20 | 258 | unsigned short bi_flags; /* BIO_* below */ |
43b62ce3 | 259 | unsigned short bi_ioprio; |
111be883 | 260 | blk_status_t bi_status; |
993e4cde | 261 | atomic_t __bi_remaining; |
7cc01581 | 262 | |
111be883 | 263 | struct bvec_iter bi_iter; |
196d38bc | 264 | |
3e08773c | 265 | blk_qc_t bi_cookie; |
7cc01581 | 266 | bio_end_io_t *bi_end_io; |
7cc01581 | 267 | void *bi_private; |
852c788f TH |
268 | #ifdef CONFIG_BLK_CGROUP |
269 | /* | |
db6638d7 DZ |
270 | * Represents the association of the css and request_queue for the bio. |
271 | * If a bio goes direct to device, it will not have a blkg as it will | |
272 | * not have a request_queue associated with it. The reference is put | |
273 | * on release of the bio. | |
852c788f | 274 | */ |
08e18eab | 275 | struct blkcg_gq *bi_blkg; |
5238dcf4 | 276 | struct bio_issue bi_issue; |
7caa4715 TH |
277 | #ifdef CONFIG_BLK_CGROUP_IOCOST |
278 | u64 bi_iocost_cost; | |
279 | #endif | |
852c788f | 280 | #endif |
a892c8d5 ST |
281 | |
282 | #ifdef CONFIG_BLK_INLINE_ENCRYPTION | |
283 | struct bio_crypt_ctx *bi_crypt_context; | |
284 | #endif | |
285 | ||
180b2f95 | 286 | union { |
7cc01581 | 287 | #if defined(CONFIG_BLK_DEV_INTEGRITY) |
180b2f95 | 288 | struct bio_integrity_payload *bi_integrity; /* data integrity */ |
7cc01581 | 289 | #endif |
180b2f95 | 290 | }; |
7cc01581 | 291 | |
4f024f37 KO |
292 | unsigned short bi_vcnt; /* how many bio_vec's */ |
293 | ||
f44b48c7 KO |
294 | /* |
295 | * Everything starting with bi_max_vecs will be preserved by bio_reset() | |
296 | */ | |
297 | ||
4f024f37 | 298 | unsigned short bi_max_vecs; /* max bvl_vecs we can hold */ |
f44b48c7 | 299 | |
dac56212 | 300 | atomic_t __bi_cnt; /* pin count */ |
f44b48c7 KO |
301 | |
302 | struct bio_vec *bi_io_vec; /* the actual vec list */ | |
303 | ||
395c72a7 KO |
304 | struct bio_set *bi_pool; |
305 | ||
7cc01581 TH |
306 | /* |
307 | * We can inline a number of vecs at the end of the bio, to avoid | |
308 | * double allocations for a small number of bio_vecs. This member | |
309 | * MUST obviously be kept at the very end of the bio. | |
310 | */ | |
5a58ec8c | 311 | struct bio_vec bi_inline_vecs[]; |
7cc01581 TH |
312 | }; |
313 | ||
f44b48c7 | 314 | #define BIO_RESET_BYTES offsetof(struct bio, bi_max_vecs) |
e83502ca | 315 | #define BIO_MAX_SECTORS (UINT_MAX >> SECTOR_SHIFT) |
f44b48c7 | 316 | |
7cc01581 TH |
317 | /* |
318 | * bio flags | |
319 | */ | |
2b24e6f6 JT |
320 | enum { |
321 | BIO_NO_PAGE_REF, /* don't put release vec pages */ | |
2b24e6f6 JT |
322 | BIO_CLONED, /* doesn't own data */ |
323 | BIO_BOUNCED, /* bio is a bounce bio */ | |
2b24e6f6 JT |
324 | BIO_QUIET, /* Make BIO Quiet */ |
325 | BIO_CHAIN, /* chained bio, ->bi_remaining in effect */ | |
326 | BIO_REFFED, /* bio has elevated ->bi_cnt */ | |
320fb0f9 | 327 | BIO_BPS_THROTTLED, /* This bio has already been subjected to |
8d2bbd4c | 328 | * throttling rules. Don't do it again. */ |
2b24e6f6 | 329 | BIO_TRACE_COMPLETION, /* bio_endio() should trace the final completion |
fbbaf700 | 330 | * of this bio. */ |
0376e9ef | 331 | BIO_CGROUP_ACCT, /* has been accounted to a cgroup */ |
aa1b46dc TH |
332 | BIO_QOS_THROTTLED, /* bio went through rq_qos throttle path */ |
333 | BIO_QOS_MERGED, /* but went through rq_qos merge path */ | |
30c5d345 | 334 | BIO_REMAPPED, |
9ffbbb43 | 335 | BIO_ZONE_WRITE_LOCKED, /* Owns a zoned device zone write lock */ |
2b24e6f6 JT |
336 | BIO_FLAG_LAST |
337 | }; | |
cd4a4ae4 | 338 | |
9a95e4ef BVA |
339 | typedef __u32 __bitwise blk_mq_req_flags_t; |
340 | ||
ff07a02e | 341 | #define REQ_OP_BITS 8 |
342a72a3 | 342 | #define REQ_OP_MASK (__force blk_opf_t)((1 << REQ_OP_BITS) - 1) |
ff07a02e BVA |
343 | #define REQ_FLAG_BITS 24 |
344 | ||
345 | /** | |
346 | * enum req_op - Operations common to the bio and request structures. | |
ef295ecf | 347 | * We use 8 bits for encoding the operation, and the remaining 24 for flags. |
87374179 CH |
348 | * |
349 | * The least significant bit of the operation number indicates the data | |
350 | * transfer direction: | |
351 | * | |
352 | * - if the least significant bit is set transfers are TO the device | |
353 | * - if the least significant bit is not set transfers are FROM the device | |
354 | * | |
355 | * If a operation does not transfer data the least significant bit has no | |
356 | * meaning. | |
7cc01581 | 357 | */ |
ff07a02e | 358 | enum req_op { |
87374179 | 359 | /* read sectors from the device */ |
342a72a3 | 360 | REQ_OP_READ = (__force blk_opf_t)0, |
87374179 | 361 | /* write sectors to the device */ |
342a72a3 | 362 | REQ_OP_WRITE = (__force blk_opf_t)1, |
87374179 | 363 | /* flush the volatile write cache */ |
342a72a3 | 364 | REQ_OP_FLUSH = (__force blk_opf_t)2, |
87374179 | 365 | /* discard sectors */ |
342a72a3 | 366 | REQ_OP_DISCARD = (__force blk_opf_t)3, |
87374179 | 367 | /* securely erase sectors */ |
342a72a3 | 368 | REQ_OP_SECURE_ERASE = (__force blk_opf_t)5, |
a6f0788e | 369 | /* write the zero filled sector many times */ |
342a72a3 | 370 | REQ_OP_WRITE_ZEROES = (__force blk_opf_t)9, |
6c1b1da5 | 371 | /* Open a zone */ |
342a72a3 | 372 | REQ_OP_ZONE_OPEN = (__force blk_opf_t)10, |
6c1b1da5 | 373 | /* Close a zone */ |
342a72a3 | 374 | REQ_OP_ZONE_CLOSE = (__force blk_opf_t)11, |
6c1b1da5 | 375 | /* Transition a zone to full */ |
342a72a3 | 376 | REQ_OP_ZONE_FINISH = (__force blk_opf_t)12, |
0512a75b | 377 | /* write data at the current zone write pointer */ |
342a72a3 | 378 | REQ_OP_ZONE_APPEND = (__force blk_opf_t)13, |
ecdef9f4 | 379 | /* reset a zone write pointer */ |
342a72a3 | 380 | REQ_OP_ZONE_RESET = (__force blk_opf_t)15, |
ecdef9f4 | 381 | /* reset all the zone present on the device */ |
342a72a3 | 382 | REQ_OP_ZONE_RESET_ALL = (__force blk_opf_t)17, |
ef295ecf | 383 | |
aebf526b | 384 | /* Driver private requests */ |
342a72a3 BVA |
385 | REQ_OP_DRV_IN = (__force blk_opf_t)34, |
386 | REQ_OP_DRV_OUT = (__force blk_opf_t)35, | |
aebf526b | 387 | |
342a72a3 | 388 | REQ_OP_LAST = (__force blk_opf_t)36, |
ef295ecf CH |
389 | }; |
390 | ||
391 | enum req_flag_bits { | |
392 | __REQ_FAILFAST_DEV = /* no driver retries of device errors */ | |
393 | REQ_OP_BITS, | |
7cc01581 TH |
394 | __REQ_FAILFAST_TRANSPORT, /* no driver retries of transport errors */ |
395 | __REQ_FAILFAST_DRIVER, /* no driver retries of driver errors */ | |
7cc01581 TH |
396 | __REQ_SYNC, /* request is sync (sync write or read) */ |
397 | __REQ_META, /* metadata io request */ | |
65299a3b | 398 | __REQ_PRIO, /* boost priority in cfq */ |
bd1c1c21 | 399 | __REQ_NOMERGE, /* don't touch this for merging */ |
a2b80967 | 400 | __REQ_IDLE, /* anticipate more IO after this one */ |
180b2f95 | 401 | __REQ_INTEGRITY, /* I/O includes block integrity payload */ |
8e4bf844 | 402 | __REQ_FUA, /* forced unit access */ |
28a8f0d3 | 403 | __REQ_PREFLUSH, /* request for cache flush */ |
188bd2b1 | 404 | __REQ_RAHEAD, /* read ahead, can fail anytime */ |
1d796d6a | 405 | __REQ_BACKGROUND, /* background IO */ |
8977f563 | 406 | __REQ_NOWAIT, /* Don't wait if request will block */ |
d3f77dfd TH |
407 | /* |
408 | * When a shared kthread needs to issue a bio for a cgroup, doing | |
409 | * so synchronously can lead to priority inversions as the kthread | |
410 | * can be trapped waiting for that cgroup. CGROUP_PUNT flag makes | |
411 | * submit_bio() punt the actual issuing to a dedicated per-blkcg | |
412 | * work item to avoid such priority inversions. | |
413 | */ | |
414 | __REQ_CGROUP_PUNT, | |
3e08773c | 415 | __REQ_POLLED, /* caller polls for completion using bio_poll */ |
0df71650 | 416 | __REQ_ALLOC_CACHE, /* allocate IO from cache if available */ |
5ce7729f CH |
417 | __REQ_SWAP, /* swap I/O */ |
418 | __REQ_DRV, /* for driver use */ | |
419 | ||
420 | /* | |
421 | * Command specific flags, keep last: | |
422 | */ | |
423 | /* for REQ_OP_WRITE_ZEROES: */ | |
424 | __REQ_NOUNMAP, /* do not free blocks when zeroing */ | |
d1e36282 | 425 | |
7cc01581 TH |
426 | __REQ_NR_BITS, /* stops here */ |
427 | }; | |
428 | ||
342a72a3 BVA |
429 | #define REQ_FAILFAST_DEV \ |
430 | (__force blk_opf_t)(1ULL << __REQ_FAILFAST_DEV) | |
431 | #define REQ_FAILFAST_TRANSPORT \ | |
432 | (__force blk_opf_t)(1ULL << __REQ_FAILFAST_TRANSPORT) | |
433 | #define REQ_FAILFAST_DRIVER \ | |
434 | (__force blk_opf_t)(1ULL << __REQ_FAILFAST_DRIVER) | |
435 | #define REQ_SYNC (__force blk_opf_t)(1ULL << __REQ_SYNC) | |
436 | #define REQ_META (__force blk_opf_t)(1ULL << __REQ_META) | |
437 | #define REQ_PRIO (__force blk_opf_t)(1ULL << __REQ_PRIO) | |
438 | #define REQ_NOMERGE (__force blk_opf_t)(1ULL << __REQ_NOMERGE) | |
439 | #define REQ_IDLE (__force blk_opf_t)(1ULL << __REQ_IDLE) | |
440 | #define REQ_INTEGRITY (__force blk_opf_t)(1ULL << __REQ_INTEGRITY) | |
441 | #define REQ_FUA (__force blk_opf_t)(1ULL << __REQ_FUA) | |
442 | #define REQ_PREFLUSH (__force blk_opf_t)(1ULL << __REQ_PREFLUSH) | |
443 | #define REQ_RAHEAD (__force blk_opf_t)(1ULL << __REQ_RAHEAD) | |
444 | #define REQ_BACKGROUND (__force blk_opf_t)(1ULL << __REQ_BACKGROUND) | |
445 | #define REQ_NOWAIT (__force blk_opf_t)(1ULL << __REQ_NOWAIT) | |
446 | #define REQ_CGROUP_PUNT (__force blk_opf_t)(1ULL << __REQ_CGROUP_PUNT) | |
447 | ||
448 | #define REQ_NOUNMAP (__force blk_opf_t)(1ULL << __REQ_NOUNMAP) | |
449 | #define REQ_POLLED (__force blk_opf_t)(1ULL << __REQ_POLLED) | |
450 | #define REQ_ALLOC_CACHE (__force blk_opf_t)(1ULL << __REQ_ALLOC_CACHE) | |
451 | ||
452 | #define REQ_DRV (__force blk_opf_t)(1ULL << __REQ_DRV) | |
453 | #define REQ_SWAP (__force blk_opf_t)(1ULL << __REQ_SWAP) | |
d928be9f | 454 | |
7cc01581 TH |
455 | #define REQ_FAILFAST_MASK \ |
456 | (REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | REQ_FAILFAST_DRIVER) | |
7cc01581 | 457 | |
e2a60da7 | 458 | #define REQ_NOMERGE_FLAGS \ |
e8064021 | 459 | (REQ_NOMERGE | REQ_PREFLUSH | REQ_FUA) |
e2a60da7 | 460 | |
dbae2c55 MC |
461 | enum stat_group { |
462 | STAT_READ, | |
463 | STAT_WRITE, | |
bdca3c87 | 464 | STAT_DISCARD, |
b6866318 | 465 | STAT_FLUSH, |
dbae2c55 MC |
466 | |
467 | NR_STAT_GROUPS | |
468 | }; | |
469 | ||
2d9b02be BVA |
470 | static inline enum req_op bio_op(const struct bio *bio) |
471 | { | |
472 | return bio->bi_opf & REQ_OP_MASK; | |
473 | } | |
7cc01581 | 474 | |
342a72a3 | 475 | static inline bool op_is_write(blk_opf_t op) |
87374179 | 476 | { |
342a72a3 | 477 | return !!(op & (__force blk_opf_t)1); |
87374179 CH |
478 | } |
479 | ||
f73f44eb CH |
480 | /* |
481 | * Check if the bio or request is one that needs special treatment in the | |
482 | * flush state machine. | |
483 | */ | |
342a72a3 | 484 | static inline bool op_is_flush(blk_opf_t op) |
f73f44eb CH |
485 | { |
486 | return op & (REQ_FUA | REQ_PREFLUSH); | |
487 | } | |
488 | ||
b685d3d6 CH |
489 | /* |
490 | * Reads are always treated as synchronous, as are requests with the FUA or | |
491 | * PREFLUSH flag. Other operations may be marked as synchronous using the | |
492 | * REQ_SYNC flag. | |
493 | */ | |
342a72a3 | 494 | static inline bool op_is_sync(blk_opf_t op) |
ef295ecf | 495 | { |
b685d3d6 CH |
496 | return (op & REQ_OP_MASK) == REQ_OP_READ || |
497 | (op & (REQ_SYNC | REQ_FUA | REQ_PREFLUSH)); | |
ef295ecf | 498 | } |
c11f0c0b | 499 | |
342a72a3 | 500 | static inline bool op_is_discard(blk_opf_t op) |
bdca3c87 MC |
501 | { |
502 | return (op & REQ_OP_MASK) == REQ_OP_DISCARD; | |
503 | } | |
504 | ||
6c1b1da5 AJ |
505 | /* |
506 | * Check if a bio or request operation is a zone management operation, with | |
507 | * the exception of REQ_OP_ZONE_RESET_ALL which is treated as a special case | |
508 | * due to its different handling in the block layer and device response in | |
509 | * case of command failure. | |
510 | */ | |
ff07a02e | 511 | static inline bool op_is_zone_mgmt(enum req_op op) |
6c1b1da5 AJ |
512 | { |
513 | switch (op & REQ_OP_MASK) { | |
514 | case REQ_OP_ZONE_RESET: | |
515 | case REQ_OP_ZONE_OPEN: | |
516 | case REQ_OP_ZONE_CLOSE: | |
517 | case REQ_OP_ZONE_FINISH: | |
518 | return true; | |
519 | default: | |
520 | return false; | |
521 | } | |
522 | } | |
523 | ||
77e7ffd7 | 524 | static inline int op_stat_group(enum req_op op) |
ddcf35d3 | 525 | { |
bdca3c87 MC |
526 | if (op_is_discard(op)) |
527 | return STAT_DISCARD; | |
ddcf35d3 MC |
528 | return op_is_write(op); |
529 | } | |
530 | ||
cf43e6be | 531 | struct blk_rq_stat { |
eca8b53a | 532 | u64 mean; |
cf43e6be JA |
533 | u64 min; |
534 | u64 max; | |
eca8b53a | 535 | u32 nr_samples; |
cf43e6be | 536 | u64 batch; |
cf43e6be JA |
537 | }; |
538 | ||
7cc01581 | 539 | #endif /* __LINUX_BLK_TYPES_H */ |