scsi: block: Introduce ioprio hints
[linux-block.git] / include / linux / blk_types.h
CommitLineData
b2441318 1/* SPDX-License-Identifier: GPL-2.0 */
7cc01581
TH
2/*
3 * Block data types and constants. Directly include this file only to
4 * break include dependency loop.
5 */
6#ifndef __LINUX_BLK_TYPES_H
7#define __LINUX_BLK_TYPES_H
8
7cc01581 9#include <linux/types.h>
0781e79e 10#include <linux/bvec.h>
0d02129e 11#include <linux/device.h>
5238dcf4 12#include <linux/ktime.h>
7cc01581
TH
13
14struct bio_set;
15struct bio;
16struct bio_integrity_payload;
17struct page;
852c788f
TH
18struct io_context;
19struct cgroup_subsys_state;
4246a0b6 20typedef void (bio_end_io_t) (struct bio *);
a892c8d5 21struct bio_crypt_ctx;
7cc01581 22
99457db8
CH
23/*
24 * The basic unit of block I/O is a sector. It is used in a number of contexts
25 * in Linux (blk, bio, genhd). The size of one sector is 512 = 2**9
26 * bytes. Variables of type sector_t represent an offset or size that is a
27 * multiple of 512 bytes. Hence these two constants.
28 */
29#ifndef SECTOR_SHIFT
30#define SECTOR_SHIFT 9
31#endif
32#ifndef SECTOR_SIZE
33#define SECTOR_SIZE (1 << SECTOR_SHIFT)
34#endif
35
36#define PAGE_SECTORS_SHIFT (PAGE_SHIFT - SECTOR_SHIFT)
37#define PAGE_SECTORS (1 << PAGE_SECTORS_SHIFT)
38#define SECTOR_MASK (PAGE_SECTORS - 1)
39
621c1f42 40struct block_device {
29ff57c6 41 sector_t bd_start_sect;
f09313c5 42 sector_t bd_nr_sectors;
3838c406
JA
43 struct gendisk * bd_disk;
44 struct request_queue * bd_queue;
15e3d2c5
CH
45 struct disk_stats __percpu *bd_stats;
46 unsigned long bd_stamp;
83950d35 47 bool bd_read_only; /* read-only policy */
3838c406
JA
48 u8 bd_partno;
49 bool bd_write_holder;
9f4107b0 50 bool bd_has_submit_bio;
46d40cfa 51 dev_t bd_dev;
9acf381f 52 atomic_t bd_openers;
3838c406 53 spinlock_t bd_size_lock; /* for bd_inode->i_size updates */
621c1f42
CH
54 struct inode * bd_inode; /* will die */
55 struct super_block * bd_super;
621c1f42
CH
56 void * bd_claiming;
57 void * bd_holder;
3838c406
JA
58 /* The counter of freeze processes */
59 int bd_fsfreeze_count;
621c1f42 60 int bd_holders;
1bdd5ae0 61 struct kobject *bd_holder_dir;
621c1f42 62
621c1f42
CH
63 /* Mutex for freeze */
64 struct mutex bd_fsfreeze_mutex;
040f04bd 65 struct super_block *bd_fsfreeze_sb;
231926db
CH
66
67 struct partition_meta_info *bd_meta_info;
b309e993
CH
68#ifdef CONFIG_FAIL_MAKE_REQUEST
69 bool bd_make_it_fail;
70#endif
3838c406
JA
71 /*
72 * keep this out-of-line as it's both big and not needed in the fast
73 * path
74 */
75 struct device bd_device;
621c1f42
CH
76} __randomize_layout;
77
a954ea81 78#define bdev_whole(_bdev) \
cb8432d6 79 ((_bdev)->bd_disk->part0)
a954ea81 80
0d02129e
CH
81#define dev_to_bdev(device) \
82 container_of((device), struct block_device, bd_device)
83
8d65269f 84#define bdev_kobj(_bdev) \
0d02129e 85 (&((_bdev)->bd_device.kobj))
8d65269f 86
2a842aca
CH
87/*
88 * Block error status values. See block/blk-core:blk_errors for the details.
6e2fb221 89 * Alpha cannot write a byte atomically, so we need to use 32-bit value.
2a842aca 90 */
6e2fb221
MP
91#if defined(CONFIG_ALPHA) && !defined(__alpha_bwx__)
92typedef u32 __bitwise blk_status_t;
aad5b23e 93typedef u32 blk_short_t;
6e2fb221 94#else
2a842aca 95typedef u8 __bitwise blk_status_t;
aad5b23e 96typedef u16 blk_short_t;
6e2fb221 97#endif
2a842aca
CH
98#define BLK_STS_OK 0
99#define BLK_STS_NOTSUPP ((__force blk_status_t)1)
100#define BLK_STS_TIMEOUT ((__force blk_status_t)2)
101#define BLK_STS_NOSPC ((__force blk_status_t)3)
102#define BLK_STS_TRANSPORT ((__force blk_status_t)4)
103#define BLK_STS_TARGET ((__force blk_status_t)5)
104#define BLK_STS_NEXUS ((__force blk_status_t)6)
105#define BLK_STS_MEDIUM ((__force blk_status_t)7)
106#define BLK_STS_PROTECTION ((__force blk_status_t)8)
107#define BLK_STS_RESOURCE ((__force blk_status_t)9)
108#define BLK_STS_IOERR ((__force blk_status_t)10)
109
4e4cbee9
CH
110/* hack for device mapper, don't use elsewhere: */
111#define BLK_STS_DM_REQUEUE ((__force blk_status_t)11)
112
98d40e76
HR
113/*
114 * BLK_STS_AGAIN should only be returned if RQF_NOWAIT is set
115 * and the bio would block (cf bio_wouldblock_error())
116 */
03a07c92
GR
117#define BLK_STS_AGAIN ((__force blk_status_t)12)
118
86ff7c2a
ML
119/*
120 * BLK_STS_DEV_RESOURCE is returned from the driver to the block layer if
121 * device related resources are unavailable, but the driver can guarantee
122 * that the queue will be rerun in the future once resources become
123 * available again. This is typically the case for device specific
124 * resources that are consumed for IO. If the driver fails allocating these
125 * resources, we know that inflight (or pending) IO will free these
126 * resource upon completion.
127 *
128 * This is different from BLK_STS_RESOURCE in that it explicitly references
129 * a device specific resource. For resources of wider scope, allocation
130 * failure can happen without having pending IO. This means that we can't
131 * rely on request completions freeing these resources, as IO may not be in
132 * flight. Examples of that are kernel memory allocations, DMA mappings, or
133 * any other system wide resources.
134 */
135#define BLK_STS_DEV_RESOURCE ((__force blk_status_t)13)
136
0512a75b
KB
137/*
138 * BLK_STS_ZONE_RESOURCE is returned from the driver to the block layer if zone
139 * related resources are unavailable, but the driver can guarantee the queue
140 * will be rerun in the future once the resources become available again.
141 *
142 * This is different from BLK_STS_DEV_RESOURCE in that it explicitly references
143 * a zone specific resource and IO to a different zone on the same device could
144 * still be served. Examples of that are zones that are write-locked, but a read
145 * to the same zone could be served.
146 */
147#define BLK_STS_ZONE_RESOURCE ((__force blk_status_t)14)
148
3b481d91
KB
149/*
150 * BLK_STS_ZONE_OPEN_RESOURCE is returned from the driver in the completion
151 * path if the device returns a status indicating that too many zone resources
152 * are currently open. The same command should be successful if resubmitted
153 * after the number of open zones decreases below the device's limits, which is
154 * reported in the request_queue's max_open_zones.
155 */
156#define BLK_STS_ZONE_OPEN_RESOURCE ((__force blk_status_t)15)
157
158/*
159 * BLK_STS_ZONE_ACTIVE_RESOURCE is returned from the driver in the completion
160 * path if the device returns a status indicating that too many zone resources
161 * are currently active. The same command should be successful if resubmitted
162 * after the number of active zones decreases below the device's limits, which
163 * is reported in the request_queue's max_active_zones.
164 */
165#define BLK_STS_ZONE_ACTIVE_RESOURCE ((__force blk_status_t)16)
166
2651bf68
SL
167/*
168 * BLK_STS_OFFLINE is returned from the driver when the target device is offline
169 * or is being taken offline. This could help differentiate the case where a
170 * device is intentionally being shut down from a real I/O error.
171 */
172#define BLK_STS_OFFLINE ((__force blk_status_t)17)
173
9111e568
KB
174/**
175 * blk_path_error - returns true if error may be path related
176 * @error: status the request was completed with
177 *
178 * Description:
179 * This classifies block error status into non-retryable errors and ones
180 * that may be successful if retried on a failover path.
181 *
182 * Return:
183 * %false - retrying failover path will not help
184 * %true - may succeed if retried
185 */
186static inline bool blk_path_error(blk_status_t error)
187{
188 switch (error) {
189 case BLK_STS_NOTSUPP:
190 case BLK_STS_NOSPC:
191 case BLK_STS_TARGET:
192 case BLK_STS_NEXUS:
193 case BLK_STS_MEDIUM:
194 case BLK_STS_PROTECTION:
195 return false;
196 }
197
198 /* Anything else could be a path failure, so should be retried */
199 return true;
200}
201
5238dcf4
OS
202/*
203 * From most significant bit:
204 * 1 bit: reserved for other usage, see below
205 * 12 bits: original size of bio
206 * 51 bits: issue time of bio
207 */
208#define BIO_ISSUE_RES_BITS 1
209#define BIO_ISSUE_SIZE_BITS 12
210#define BIO_ISSUE_RES_SHIFT (64 - BIO_ISSUE_RES_BITS)
211#define BIO_ISSUE_SIZE_SHIFT (BIO_ISSUE_RES_SHIFT - BIO_ISSUE_SIZE_BITS)
212#define BIO_ISSUE_TIME_MASK ((1ULL << BIO_ISSUE_SIZE_SHIFT) - 1)
213#define BIO_ISSUE_SIZE_MASK \
214 (((1ULL << BIO_ISSUE_SIZE_BITS) - 1) << BIO_ISSUE_SIZE_SHIFT)
215#define BIO_ISSUE_RES_MASK (~((1ULL << BIO_ISSUE_RES_SHIFT) - 1))
216
217/* Reserved bit for blk-throtl */
218#define BIO_ISSUE_THROTL_SKIP_LATENCY (1ULL << 63)
219
220struct bio_issue {
221 u64 value;
222};
223
224static inline u64 __bio_issue_time(u64 time)
225{
226 return time & BIO_ISSUE_TIME_MASK;
227}
228
229static inline u64 bio_issue_time(struct bio_issue *issue)
230{
231 return __bio_issue_time(issue->value);
232}
233
234static inline sector_t bio_issue_size(struct bio_issue *issue)
235{
236 return ((issue->value & BIO_ISSUE_SIZE_MASK) >> BIO_ISSUE_SIZE_SHIFT);
237}
238
239static inline void bio_issue_init(struct bio_issue *issue,
240 sector_t size)
241{
242 size &= (1ULL << BIO_ISSUE_SIZE_BITS) - 1;
243 issue->value = ((issue->value & BIO_ISSUE_RES_MASK) |
244 (ktime_get_ns() & BIO_ISSUE_TIME_MASK) |
245 ((u64)size << BIO_ISSUE_SIZE_SHIFT));
246}
247
342a72a3
BVA
248typedef __u32 __bitwise blk_opf_t;
249
3e08773c
CH
250typedef unsigned int blk_qc_t;
251#define BLK_QC_T_NONE -1U
252
7cc01581
TH
253/*
254 * main unit of I/O for the block layer and lower layers (ie drivers and
255 * stacking drivers)
256 */
257struct bio {
7cc01581 258 struct bio *bi_next; /* request queue link */
309dca30 259 struct block_device *bi_bdev;
342a72a3 260 blk_opf_t bi_opf; /* bottom bits REQ_OP, top bits
5d2ae142 261 * req_flags.
4e1b2d52 262 */
7a800a20 263 unsigned short bi_flags; /* BIO_* below */
43b62ce3 264 unsigned short bi_ioprio;
111be883 265 blk_status_t bi_status;
993e4cde 266 atomic_t __bi_remaining;
7cc01581 267
111be883 268 struct bvec_iter bi_iter;
196d38bc 269
3e08773c 270 blk_qc_t bi_cookie;
7cc01581 271 bio_end_io_t *bi_end_io;
7cc01581 272 void *bi_private;
852c788f
TH
273#ifdef CONFIG_BLK_CGROUP
274 /*
db6638d7
DZ
275 * Represents the association of the css and request_queue for the bio.
276 * If a bio goes direct to device, it will not have a blkg as it will
277 * not have a request_queue associated with it. The reference is put
278 * on release of the bio.
852c788f 279 */
08e18eab 280 struct blkcg_gq *bi_blkg;
5238dcf4 281 struct bio_issue bi_issue;
7caa4715
TH
282#ifdef CONFIG_BLK_CGROUP_IOCOST
283 u64 bi_iocost_cost;
284#endif
852c788f 285#endif
a892c8d5
ST
286
287#ifdef CONFIG_BLK_INLINE_ENCRYPTION
288 struct bio_crypt_ctx *bi_crypt_context;
289#endif
290
180b2f95 291 union {
7cc01581 292#if defined(CONFIG_BLK_DEV_INTEGRITY)
180b2f95 293 struct bio_integrity_payload *bi_integrity; /* data integrity */
7cc01581 294#endif
180b2f95 295 };
7cc01581 296
4f024f37
KO
297 unsigned short bi_vcnt; /* how many bio_vec's */
298
f44b48c7
KO
299 /*
300 * Everything starting with bi_max_vecs will be preserved by bio_reset()
301 */
302
4f024f37 303 unsigned short bi_max_vecs; /* max bvl_vecs we can hold */
f44b48c7 304
dac56212 305 atomic_t __bi_cnt; /* pin count */
f44b48c7
KO
306
307 struct bio_vec *bi_io_vec; /* the actual vec list */
308
395c72a7
KO
309 struct bio_set *bi_pool;
310
7cc01581
TH
311 /*
312 * We can inline a number of vecs at the end of the bio, to avoid
313 * double allocations for a small number of bio_vecs. This member
314 * MUST obviously be kept at the very end of the bio.
315 */
5a58ec8c 316 struct bio_vec bi_inline_vecs[];
7cc01581
TH
317};
318
f44b48c7 319#define BIO_RESET_BYTES offsetof(struct bio, bi_max_vecs)
e83502ca 320#define BIO_MAX_SECTORS (UINT_MAX >> SECTOR_SHIFT)
f44b48c7 321
7cc01581
TH
322/*
323 * bio flags
324 */
2b24e6f6
JT
325enum {
326 BIO_NO_PAGE_REF, /* don't put release vec pages */
2b24e6f6
JT
327 BIO_CLONED, /* doesn't own data */
328 BIO_BOUNCED, /* bio is a bounce bio */
2b24e6f6
JT
329 BIO_QUIET, /* Make BIO Quiet */
330 BIO_CHAIN, /* chained bio, ->bi_remaining in effect */
331 BIO_REFFED, /* bio has elevated ->bi_cnt */
320fb0f9 332 BIO_BPS_THROTTLED, /* This bio has already been subjected to
8d2bbd4c 333 * throttling rules. Don't do it again. */
2b24e6f6 334 BIO_TRACE_COMPLETION, /* bio_endio() should trace the final completion
fbbaf700 335 * of this bio. */
0376e9ef 336 BIO_CGROUP_ACCT, /* has been accounted to a cgroup */
aa1b46dc
TH
337 BIO_QOS_THROTTLED, /* bio went through rq_qos throttle path */
338 BIO_QOS_MERGED, /* but went through rq_qos merge path */
30c5d345 339 BIO_REMAPPED,
9ffbbb43 340 BIO_ZONE_WRITE_LOCKED, /* Owns a zoned device zone write lock */
2b24e6f6
JT
341 BIO_FLAG_LAST
342};
cd4a4ae4 343
9a95e4ef
BVA
344typedef __u32 __bitwise blk_mq_req_flags_t;
345
ff07a02e 346#define REQ_OP_BITS 8
342a72a3 347#define REQ_OP_MASK (__force blk_opf_t)((1 << REQ_OP_BITS) - 1)
ff07a02e
BVA
348#define REQ_FLAG_BITS 24
349
350/**
351 * enum req_op - Operations common to the bio and request structures.
ef295ecf 352 * We use 8 bits for encoding the operation, and the remaining 24 for flags.
87374179
CH
353 *
354 * The least significant bit of the operation number indicates the data
355 * transfer direction:
356 *
357 * - if the least significant bit is set transfers are TO the device
358 * - if the least significant bit is not set transfers are FROM the device
359 *
360 * If a operation does not transfer data the least significant bit has no
361 * meaning.
7cc01581 362 */
ff07a02e 363enum req_op {
87374179 364 /* read sectors from the device */
342a72a3 365 REQ_OP_READ = (__force blk_opf_t)0,
87374179 366 /* write sectors to the device */
342a72a3 367 REQ_OP_WRITE = (__force blk_opf_t)1,
87374179 368 /* flush the volatile write cache */
342a72a3 369 REQ_OP_FLUSH = (__force blk_opf_t)2,
87374179 370 /* discard sectors */
342a72a3 371 REQ_OP_DISCARD = (__force blk_opf_t)3,
87374179 372 /* securely erase sectors */
342a72a3 373 REQ_OP_SECURE_ERASE = (__force blk_opf_t)5,
a6f0788e 374 /* write the zero filled sector many times */
342a72a3 375 REQ_OP_WRITE_ZEROES = (__force blk_opf_t)9,
6c1b1da5 376 /* Open a zone */
342a72a3 377 REQ_OP_ZONE_OPEN = (__force blk_opf_t)10,
6c1b1da5 378 /* Close a zone */
342a72a3 379 REQ_OP_ZONE_CLOSE = (__force blk_opf_t)11,
6c1b1da5 380 /* Transition a zone to full */
342a72a3 381 REQ_OP_ZONE_FINISH = (__force blk_opf_t)12,
0512a75b 382 /* write data at the current zone write pointer */
342a72a3 383 REQ_OP_ZONE_APPEND = (__force blk_opf_t)13,
ecdef9f4 384 /* reset a zone write pointer */
342a72a3 385 REQ_OP_ZONE_RESET = (__force blk_opf_t)15,
ecdef9f4 386 /* reset all the zone present on the device */
342a72a3 387 REQ_OP_ZONE_RESET_ALL = (__force blk_opf_t)17,
ef295ecf 388
aebf526b 389 /* Driver private requests */
342a72a3
BVA
390 REQ_OP_DRV_IN = (__force blk_opf_t)34,
391 REQ_OP_DRV_OUT = (__force blk_opf_t)35,
aebf526b 392
342a72a3 393 REQ_OP_LAST = (__force blk_opf_t)36,
ef295ecf
CH
394};
395
396enum req_flag_bits {
397 __REQ_FAILFAST_DEV = /* no driver retries of device errors */
398 REQ_OP_BITS,
7cc01581
TH
399 __REQ_FAILFAST_TRANSPORT, /* no driver retries of transport errors */
400 __REQ_FAILFAST_DRIVER, /* no driver retries of driver errors */
7cc01581
TH
401 __REQ_SYNC, /* request is sync (sync write or read) */
402 __REQ_META, /* metadata io request */
65299a3b 403 __REQ_PRIO, /* boost priority in cfq */
bd1c1c21 404 __REQ_NOMERGE, /* don't touch this for merging */
a2b80967 405 __REQ_IDLE, /* anticipate more IO after this one */
180b2f95 406 __REQ_INTEGRITY, /* I/O includes block integrity payload */
8e4bf844 407 __REQ_FUA, /* forced unit access */
28a8f0d3 408 __REQ_PREFLUSH, /* request for cache flush */
188bd2b1 409 __REQ_RAHEAD, /* read ahead, can fail anytime */
1d796d6a 410 __REQ_BACKGROUND, /* background IO */
8977f563 411 __REQ_NOWAIT, /* Don't wait if request will block */
3e08773c 412 __REQ_POLLED, /* caller polls for completion using bio_poll */
0df71650 413 __REQ_ALLOC_CACHE, /* allocate IO from cache if available */
5ce7729f
CH
414 __REQ_SWAP, /* swap I/O */
415 __REQ_DRV, /* for driver use */
3480373e 416 __REQ_FS_PRIVATE, /* for file system (submitter) use */
5ce7729f
CH
417
418 /*
419 * Command specific flags, keep last:
420 */
421 /* for REQ_OP_WRITE_ZEROES: */
422 __REQ_NOUNMAP, /* do not free blocks when zeroing */
d1e36282 423
7cc01581
TH
424 __REQ_NR_BITS, /* stops here */
425};
426
342a72a3
BVA
427#define REQ_FAILFAST_DEV \
428 (__force blk_opf_t)(1ULL << __REQ_FAILFAST_DEV)
429#define REQ_FAILFAST_TRANSPORT \
430 (__force blk_opf_t)(1ULL << __REQ_FAILFAST_TRANSPORT)
431#define REQ_FAILFAST_DRIVER \
432 (__force blk_opf_t)(1ULL << __REQ_FAILFAST_DRIVER)
433#define REQ_SYNC (__force blk_opf_t)(1ULL << __REQ_SYNC)
434#define REQ_META (__force blk_opf_t)(1ULL << __REQ_META)
435#define REQ_PRIO (__force blk_opf_t)(1ULL << __REQ_PRIO)
436#define REQ_NOMERGE (__force blk_opf_t)(1ULL << __REQ_NOMERGE)
437#define REQ_IDLE (__force blk_opf_t)(1ULL << __REQ_IDLE)
438#define REQ_INTEGRITY (__force blk_opf_t)(1ULL << __REQ_INTEGRITY)
439#define REQ_FUA (__force blk_opf_t)(1ULL << __REQ_FUA)
440#define REQ_PREFLUSH (__force blk_opf_t)(1ULL << __REQ_PREFLUSH)
441#define REQ_RAHEAD (__force blk_opf_t)(1ULL << __REQ_RAHEAD)
442#define REQ_BACKGROUND (__force blk_opf_t)(1ULL << __REQ_BACKGROUND)
443#define REQ_NOWAIT (__force blk_opf_t)(1ULL << __REQ_NOWAIT)
342a72a3
BVA
444#define REQ_POLLED (__force blk_opf_t)(1ULL << __REQ_POLLED)
445#define REQ_ALLOC_CACHE (__force blk_opf_t)(1ULL << __REQ_ALLOC_CACHE)
342a72a3 446#define REQ_SWAP (__force blk_opf_t)(1ULL << __REQ_SWAP)
3480373e
CH
447#define REQ_DRV (__force blk_opf_t)(1ULL << __REQ_DRV)
448#define REQ_FS_PRIVATE (__force blk_opf_t)(1ULL << __REQ_FS_PRIVATE)
449
450#define REQ_NOUNMAP (__force blk_opf_t)(1ULL << __REQ_NOUNMAP)
d928be9f 451
7cc01581
TH
452#define REQ_FAILFAST_MASK \
453 (REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | REQ_FAILFAST_DRIVER)
7cc01581 454
e2a60da7 455#define REQ_NOMERGE_FLAGS \
e8064021 456 (REQ_NOMERGE | REQ_PREFLUSH | REQ_FUA)
e2a60da7 457
dbae2c55
MC
458enum stat_group {
459 STAT_READ,
460 STAT_WRITE,
bdca3c87 461 STAT_DISCARD,
b6866318 462 STAT_FLUSH,
dbae2c55
MC
463
464 NR_STAT_GROUPS
465};
466
2d9b02be
BVA
467static inline enum req_op bio_op(const struct bio *bio)
468{
469 return bio->bi_opf & REQ_OP_MASK;
470}
7cc01581 471
342a72a3 472static inline bool op_is_write(blk_opf_t op)
87374179 473{
342a72a3 474 return !!(op & (__force blk_opf_t)1);
87374179
CH
475}
476
f73f44eb
CH
477/*
478 * Check if the bio or request is one that needs special treatment in the
479 * flush state machine.
480 */
342a72a3 481static inline bool op_is_flush(blk_opf_t op)
f73f44eb
CH
482{
483 return op & (REQ_FUA | REQ_PREFLUSH);
484}
485
b685d3d6
CH
486/*
487 * Reads are always treated as synchronous, as are requests with the FUA or
488 * PREFLUSH flag. Other operations may be marked as synchronous using the
489 * REQ_SYNC flag.
490 */
342a72a3 491static inline bool op_is_sync(blk_opf_t op)
ef295ecf 492{
b685d3d6
CH
493 return (op & REQ_OP_MASK) == REQ_OP_READ ||
494 (op & (REQ_SYNC | REQ_FUA | REQ_PREFLUSH));
ef295ecf 495}
c11f0c0b 496
342a72a3 497static inline bool op_is_discard(blk_opf_t op)
bdca3c87
MC
498{
499 return (op & REQ_OP_MASK) == REQ_OP_DISCARD;
500}
501
6c1b1da5
AJ
502/*
503 * Check if a bio or request operation is a zone management operation, with
504 * the exception of REQ_OP_ZONE_RESET_ALL which is treated as a special case
505 * due to its different handling in the block layer and device response in
506 * case of command failure.
507 */
ff07a02e 508static inline bool op_is_zone_mgmt(enum req_op op)
6c1b1da5
AJ
509{
510 switch (op & REQ_OP_MASK) {
511 case REQ_OP_ZONE_RESET:
512 case REQ_OP_ZONE_OPEN:
513 case REQ_OP_ZONE_CLOSE:
514 case REQ_OP_ZONE_FINISH:
515 return true;
516 default:
517 return false;
518 }
519}
520
77e7ffd7 521static inline int op_stat_group(enum req_op op)
ddcf35d3 522{
bdca3c87
MC
523 if (op_is_discard(op))
524 return STAT_DISCARD;
ddcf35d3
MC
525 return op_is_write(op);
526}
527
cf43e6be 528struct blk_rq_stat {
eca8b53a 529 u64 mean;
cf43e6be
JA
530 u64 min;
531 u64 max;
eca8b53a 532 u32 nr_samples;
cf43e6be 533 u64 batch;
cf43e6be
JA
534};
535
7cc01581 536#endif /* __LINUX_BLK_TYPES_H */