4 * IO engine that uses the posix defined aio interface.
15 struct posixaio_data {
16 struct io_u **aio_events;
20 static int fill_timespec(struct timespec *ts)
23 if (!clock_gettime(CLOCK_MONOTONIC, ts))
26 perror("clock_gettime");
31 static unsigned long long ts_utime_since_now(struct timespec *t)
36 if (fill_timespec(&now))
39 sec = now.tv_sec - t->tv_sec;
40 nsec = now.tv_nsec - t->tv_nsec;
41 if (sec > 0 && nsec < 0) {
51 static int fio_posixaio_cancel(struct thread_data fio_unused *td,
54 struct fio_file *f = io_u->file;
55 int r = aio_cancel(f->fd, &io_u->aiocb);
57 if (r == AIO_ALLDONE || r == AIO_CANCELED)
63 static int fio_posixaio_prep(struct thread_data fio_unused *td,
66 os_aiocb_t *aiocb = &io_u->aiocb;
67 struct fio_file *f = io_u->file;
69 aiocb->aio_fildes = f->fd;
70 aiocb->aio_buf = io_u->xfer_buf;
71 aiocb->aio_nbytes = io_u->xfer_buflen;
72 aiocb->aio_offset = io_u->offset;
73 aiocb->aio_sigevent.sigev_notify = SIGEV_NONE;
79 #define SUSPEND_ENTRIES 8
81 static int fio_posixaio_getevents(struct thread_data *td, unsigned int min,
82 unsigned int max, struct timespec *t)
84 struct posixaio_data *pd = td->io_ops->data;
85 os_aiocb_t *suspend_list[SUSPEND_ENTRIES];
86 struct flist_head *entry;
87 struct timespec start;
89 int suspend_entries = 0;
92 if (t && !fill_timespec(&start))
96 memset(suspend_list, 0, sizeof(*suspend_list));
98 flist_for_each(entry, &td->io_u_busylist) {
99 struct io_u *io_u = flist_entry(entry, struct io_u, list);
105 err = aio_error(&io_u->aiocb);
106 if (err == EINPROGRESS) {
107 if (suspend_entries < SUSPEND_ENTRIES) {
108 suspend_list[suspend_entries] = &io_u->aiocb;
116 pd->aio_events[r++] = io_u;
118 if (err == ECANCELED)
119 io_u->resid = io_u->xfer_buflen;
121 ssize_t retval = aio_return(&io_u->aiocb);
123 io_u->resid = io_u->xfer_buflen - retval;
132 unsigned long long usec;
134 usec = (t->tv_sec * 1000000) + (t->tv_nsec / 1000);
135 if (ts_utime_since_now(&start) > usec)
140 * must have some in-flight, wait for at least one
142 aio_suspend((const os_aiocb_t * const *)suspend_list,
147 static struct io_u *fio_posixaio_event(struct thread_data *td, int event)
149 struct posixaio_data *pd = td->io_ops->data;
151 return pd->aio_events[event];
154 static int fio_posixaio_queue(struct thread_data *td,
157 struct posixaio_data *pd = td->io_ops->data;
158 os_aiocb_t *aiocb = &io_u->aiocb;
161 fio_ro_check(td, io_u);
163 if (io_u->ddir == DDIR_READ)
164 ret = aio_read(aiocb);
165 else if (io_u->ddir == DDIR_WRITE)
166 ret = aio_write(aiocb);
167 else if (io_u->ddir == DDIR_TRIM) {
171 do_io_u_trim(td, io_u);
172 return FIO_Q_COMPLETED;
174 #ifdef CONFIG_POSIXAIO_FSYNC
175 ret = aio_fsync(O_SYNC, aiocb);
180 do_io_u_sync(td, io_u);
181 return FIO_Q_COMPLETED;
187 * At least OSX has a very low limit on the number of pending
188 * IOs, so if it returns EAGAIN, we are out of resources
189 * to queue more. Just return FIO_Q_BUSY to naturally
190 * drop off at this depth.
196 td_verror(td, io_u->error, "xfer");
197 return FIO_Q_COMPLETED;
204 static void fio_posixaio_cleanup(struct thread_data *td)
206 struct posixaio_data *pd = td->io_ops->data;
209 free(pd->aio_events);
214 static int fio_posixaio_init(struct thread_data *td)
216 struct posixaio_data *pd = malloc(sizeof(*pd));
218 memset(pd, 0, sizeof(*pd));
219 pd->aio_events = malloc(td->o.iodepth * sizeof(struct io_u *));
220 memset(pd->aio_events, 0, td->o.iodepth * sizeof(struct io_u *));
222 td->io_ops->data = pd;
226 static struct ioengine_ops ioengine = {
228 .version = FIO_IOOPS_VERSION,
229 .init = fio_posixaio_init,
230 .prep = fio_posixaio_prep,
231 .queue = fio_posixaio_queue,
232 .cancel = fio_posixaio_cancel,
233 .getevents = fio_posixaio_getevents,
234 .event = fio_posixaio_event,
235 .cleanup = fio_posixaio_cleanup,
236 .open_file = generic_open_file,
237 .close_file = generic_close_file,
238 .get_file_size = generic_get_file_size,
241 static void fio_init fio_posixaio_register(void)
243 register_ioengine(&ioengine);
246 static void fio_exit fio_posixaio_unregister(void)
248 unregister_ioengine(&ioengine);