4 * IO engine using Glusterfs's gfapi async interface
14 static struct io_u *fio_gf_event(struct thread_data *td, int event)
16 struct gf_data *gf_data = td->io_ops_data;
18 dprint(FD_IO, "%s\n", __FUNCTION__);
19 return gf_data->aio_events[event];
22 static int fio_gf_getevents(struct thread_data *td, unsigned int min,
23 unsigned int max, const struct timespec *t)
25 struct gf_data *g = td->io_ops_data;
26 unsigned int events = 0;
30 dprint(FD_IO, "%s\n", __FUNCTION__);
32 io_u_qiter(&td->io_u_all, io_u, i) {
33 struct fio_gf_iou *io;
35 if (!(io_u->flags & IO_U_F_FLIGHT))
38 io = io_u->engine_data;
39 if (io->io_complete) {
41 g->aio_events[events] = io_u;
59 static void fio_gf_io_u_free(struct thread_data *td, struct io_u *io_u)
61 struct fio_gf_iou *io = io_u->engine_data;
65 log_err("incomplete IO found.\n");
66 io_u->engine_data = NULL;
71 static int fio_gf_io_u_init(struct thread_data *td, struct io_u *io_u)
73 struct fio_gf_iou *io;
74 dprint(FD_FILE, "%s\n", __FUNCTION__);
76 io = malloc(sizeof(struct fio_gf_iou));
78 td_verror(td, errno, "malloc");
83 io_u->engine_data = io;
87 #if defined(CONFIG_GF_NEW_API)
88 static void gf_async_cb(glfs_fd_t * fd, ssize_t ret, struct glfs_stat *prestat,
89 struct glfs_stat *poststat, void *data)
91 static void gf_async_cb(glfs_fd_t * fd, ssize_t ret, void *data)
94 struct io_u *io_u = data;
95 struct fio_gf_iou *iou = io_u->engine_data;
97 dprint(FD_IO, "%s ret %zd\n", __FUNCTION__, ret);
101 static enum fio_q_status fio_gf_async_queue(struct thread_data fio_unused * td,
104 struct gf_data *g = td->io_ops_data;
107 dprint(FD_IO, "%s op %s\n", __FUNCTION__, io_ddir_name(io_u->ddir));
109 fio_ro_check(td, io_u);
111 if (io_u->ddir == DDIR_READ)
112 r = glfs_pread_async(g->fd, io_u->xfer_buf, io_u->xfer_buflen,
113 io_u->offset, 0, gf_async_cb, io_u);
114 else if (io_u->ddir == DDIR_WRITE)
115 r = glfs_pwrite_async(g->fd, io_u->xfer_buf, io_u->xfer_buflen,
116 io_u->offset, 0, gf_async_cb, io_u);
117 #if defined(CONFIG_GF_TRIM)
118 else if (io_u->ddir == DDIR_TRIM)
119 r = glfs_discard_async(g->fd, io_u->offset, io_u->xfer_buflen,
122 else if (io_u->ddir == DDIR_DATASYNC)
123 r = glfs_fdatasync_async(g->fd, gf_async_cb, io_u);
124 else if (io_u->ddir == DDIR_SYNC)
125 r = glfs_fsync_async(g->fd, gf_async_cb, io_u);
130 log_err("glfs queue failed.\n");
138 td_verror(td, io_u->error, "xfer");
139 return FIO_Q_COMPLETED;
142 static int fio_gf_async_setup(struct thread_data *td)
148 log_err("the async interface is still very experimental...\n");
150 r = fio_gf_setup(td);
154 td->o.use_thread = 1;
156 g->aio_events = calloc(td->o.iodepth, sizeof(struct io_u *));
157 if (!g->aio_events) {
166 static struct ioengine_ops ioengine = {
167 .name = "gfapi_async",
168 .version = FIO_IOOPS_VERSION,
169 .init = fio_gf_async_setup,
170 .cleanup = fio_gf_cleanup,
171 .queue = fio_gf_async_queue,
172 .open_file = fio_gf_open_file,
173 .close_file = fio_gf_close_file,
174 .unlink_file = fio_gf_unlink_file,
175 .get_file_size = fio_gf_get_file_size,
176 .getevents = fio_gf_getevents,
177 .event = fio_gf_event,
178 .io_u_init = fio_gf_io_u_init,
179 .io_u_free = fio_gf_io_u_free,
180 .options = gfapi_options,
181 .option_struct_size = sizeof(struct gf_options),
182 .flags = FIO_DISKLESSIO,
185 static void fio_init fio_gf_register(void)
187 register_ioengine(&ioengine);
190 static void fio_exit fio_gf_unregister(void)
192 unregister_ioengine(&ioengine);