4 #include "compiler/compiler.h"
20 IO_U_F_FLIGHT = 1 << 1,
21 IO_U_F_NO_FILE_PUT = 1 << 2,
22 IO_U_F_IN_CUR_DEPTH = 1 << 3,
23 IO_U_F_BUSY_OK = 1 << 4,
24 IO_U_F_TRIMMED = 1 << 5,
25 IO_U_F_BARRIER = 1 << 6,
26 IO_U_F_VER_LIST = 1 << 7,
27 IO_U_F_PRIORITY = 1 << 8,
34 struct timespec start_time;
35 struct timespec issue_time;
37 struct fio_file *file;
42 * For replay workloads, we may want to account as a different
43 * IO type than what is being submitted.
45 enum fio_ddir acct_ddir;
50 unsigned short numberio;
53 * Allocated/set buffer and length
55 unsigned long long buflen;
56 unsigned long long offset;
60 * Initial seed for generating the buffer contents
65 * IO engine state, may be different from above when we get
66 * partial transfers / residual data counts
69 unsigned long long xfer_buflen;
72 * Parameter related to pre-filled buffers and
73 * their size to handle variable block sizes.
75 unsigned long long buf_filled_len;
79 unsigned long long resid;
83 * io engine private data
92 struct flist_head verify_list;
93 struct workqueue_work work;
97 * ZBD mode zbd_queue_io callback: called after engine->queue operation
98 * to advance a zone write pointer and eventually unlock the I/O zone.
99 * @q indicates the I/O queue status (busy, queued or completed).
100 * @success == true means that the I/O operation has been queued or
101 * completed successfully.
103 void (*zbd_queue_io)(struct io_u *, int q, bool success);
106 * ZBD mode zbd_put_io callback: called in after completion of an I/O
107 * or commit of an async I/O to unlock the I/O target zone.
109 void (*zbd_put_io)(const struct io_u *);
112 * Callback for io completion
114 int (*end_io)(struct thread_data *, struct io_u **);
120 #ifdef CONFIG_POSIXAIO
124 struct sg_io_hdr hdr;
129 #ifdef CONFIG_SOLARISAIO
130 aio_result_t resultp;
142 extern struct io_u *__get_io_u(struct thread_data *);
143 extern struct io_u *get_io_u(struct thread_data *);
144 extern void put_io_u(struct thread_data *, struct io_u *);
145 extern void clear_io_u(struct thread_data *, struct io_u *);
146 extern void requeue_io_u(struct thread_data *, struct io_u **);
147 extern int __must_check io_u_sync_complete(struct thread_data *, struct io_u *);
148 extern int __must_check io_u_queued_complete(struct thread_data *, int);
149 extern void io_u_queued(struct thread_data *, struct io_u *);
150 extern int io_u_quiesce(struct thread_data *);
151 extern void io_u_log_error(struct thread_data *, struct io_u *);
152 extern void io_u_mark_depth(struct thread_data *, unsigned int);
153 extern void fill_io_buffer(struct thread_data *, void *, unsigned long long, unsigned long long);
154 extern void io_u_fill_buffer(struct thread_data *td, struct io_u *, unsigned long long, unsigned long long);
155 void io_u_mark_complete(struct thread_data *, unsigned int);
156 void io_u_mark_submit(struct thread_data *, unsigned int);
157 bool queue_full(const struct thread_data *);
159 int do_io_u_sync(const struct thread_data *, struct io_u *);
160 int do_io_u_trim(const struct thread_data *, struct io_u *);
163 static inline void dprint_io_u(struct io_u *io_u, const char *p)
165 struct fio_file *f = io_u->file;
168 dprint(FD_IO, "%s: io_u %p: off=0x%llx,len=0x%llx,ddir=%d,file=%s\n",
170 (unsigned long long) io_u->offset,
171 io_u->buflen, io_u->ddir,
174 dprint(FD_IO, "%s: io_u %p: off=0x%llx,len=0x%llx,ddir=%d\n",
176 (unsigned long long) io_u->offset,
177 io_u->buflen, io_u->ddir);
180 #define dprint_io_u(io_u, p)
183 static inline enum fio_ddir acct_ddir(struct io_u *io_u)
185 if (io_u->acct_ddir != -1)
186 return io_u->acct_ddir;
191 #define io_u_clear(td, io_u, val) \
192 td_flags_clear((td), &(io_u->flags), (val))
193 #define io_u_set(td, io_u, val) \
194 td_flags_set((td), &(io_u)->flags, (val))
195 #define io_u_is_prio(io_u) \
196 (io_u->flags & (unsigned int) IO_U_F_PRIORITY) != 0