4 #include "compiler/compiler.h"
20 IO_U_F_FLIGHT = 1 << 1,
21 IO_U_F_NO_FILE_PUT = 1 << 2,
22 IO_U_F_IN_CUR_DEPTH = 1 << 3,
23 IO_U_F_BUSY_OK = 1 << 4,
24 IO_U_F_TRIMMED = 1 << 5,
25 IO_U_F_BARRIER = 1 << 6,
26 IO_U_F_VER_LIST = 1 << 7,
33 struct timespec start_time;
34 struct timespec issue_time;
36 struct fio_file *file;
41 * For replay workloads, we may want to account as a different
42 * IO type than what is being submitted.
44 enum fio_ddir acct_ddir;
49 unsigned short numberio;
52 * Allocated/set buffer and length
54 unsigned long long buflen;
55 unsigned long long offset;
59 * Initial seed for generating the buffer contents
64 * IO engine state, may be different from above when we get
65 * partial transfers / residual data counts
68 unsigned long long xfer_buflen;
71 * Parameter related to pre-filled buffers and
72 * their size to handle variable block sizes.
74 unsigned long long buf_filled_len;
78 unsigned long long resid;
82 * io engine private data
91 struct flist_head verify_list;
92 struct workqueue_work work;
95 #ifdef CONFIG_LINUX_BLKZONED
97 * ZBD mode zbd_queue_io callback: called after engine->queue operation
98 * to advance a zone write pointer and eventually unlock the I/O zone.
99 * @q indicates the I/O queue status (busy, queued or completed).
100 * @success == true means that the I/O operation has been queued or
101 * completed successfully.
103 void (*zbd_queue_io)(struct io_u *, int q, bool success);
106 * ZBD mode zbd_put_io callback: called in after completion of an I/O
107 * or commit of an async I/O to unlock the I/O target zone.
109 void (*zbd_put_io)(const struct io_u *);
113 * Callback for io completion
115 int (*end_io)(struct thread_data *, struct io_u **);
121 #ifdef CONFIG_POSIXAIO
125 struct sg_io_hdr hdr;
130 #ifdef CONFIG_SOLARISAIO
131 aio_result_t resultp;
143 extern struct io_u *__get_io_u(struct thread_data *);
144 extern struct io_u *get_io_u(struct thread_data *);
145 extern void put_io_u(struct thread_data *, struct io_u *);
146 extern void clear_io_u(struct thread_data *, struct io_u *);
147 extern void requeue_io_u(struct thread_data *, struct io_u **);
148 extern int __must_check io_u_sync_complete(struct thread_data *, struct io_u *);
149 extern int __must_check io_u_queued_complete(struct thread_data *, int);
150 extern void io_u_queued(struct thread_data *, struct io_u *);
151 extern int io_u_quiesce(struct thread_data *);
152 extern void io_u_log_error(struct thread_data *, struct io_u *);
153 extern void io_u_mark_depth(struct thread_data *, unsigned int);
154 extern void fill_io_buffer(struct thread_data *, void *, unsigned long long, unsigned long long);
155 extern void io_u_fill_buffer(struct thread_data *td, struct io_u *, unsigned long long, unsigned long long);
156 void io_u_mark_complete(struct thread_data *, unsigned int);
157 void io_u_mark_submit(struct thread_data *, unsigned int);
158 bool queue_full(const struct thread_data *);
160 int do_io_u_sync(const struct thread_data *, struct io_u *);
161 int do_io_u_trim(const struct thread_data *, struct io_u *);
164 static inline void dprint_io_u(struct io_u *io_u, const char *p)
166 struct fio_file *f = io_u->file;
169 dprint(FD_IO, "%s: io_u %p: off=0x%llx,len=0x%llx,ddir=%d,file=%s\n",
171 (unsigned long long) io_u->offset,
172 io_u->buflen, io_u->ddir,
175 dprint(FD_IO, "%s: io_u %p: off=0x%llx,len=0x%llx,ddir=%d\n",
177 (unsigned long long) io_u->offset,
178 io_u->buflen, io_u->ddir);
181 #define dprint_io_u(io_u, p)
184 static inline enum fio_ddir acct_ddir(struct io_u *io_u)
186 if (io_u->acct_ddir != -1)
187 return io_u->acct_ddir;
192 #define io_u_clear(td, io_u, val) \
193 td_flags_clear((td), &(io_u->flags), (val))
194 #define io_u_set(td, io_u, val) \
195 td_flags_set((td), &(io_u)->flags, (val))