4 * IO engine using Glusterfs's gfapi async interface
13 static ulong cb_count = 0, issued = 0;
15 static struct io_u *fio_gf_event(struct thread_data *td, int event)
17 struct gf_data *gf_data = td->io_ops->data;
18 dprint(FD_IO, "%s\n", __FUNCTION__);
19 return gf_data->aio_events[event];
22 static int fio_gf_getevents(struct thread_data *td, unsigned int min,
23 unsigned int max, const struct timespec *t)
25 struct gf_data *g = td->io_ops->data;
26 unsigned int events = 0;
29 struct fio_gf_iou *io = NULL;
31 dprint(FD_IO, "%s\n", __FUNCTION__);
33 io_u_qiter(&td->io_u_all, io_u, i) {
34 if (!(io_u->flags & IO_U_F_FLIGHT))
37 io = (struct fio_gf_iou *)io_u->engine_data;
39 if (io && io->io_complete) {
41 g->aio_events[events] = io_u;
59 static void fio_gf_io_u_free(struct thread_data *td, struct io_u *io_u)
61 struct fio_gf_iou *io = io_u->engine_data;
64 if (io->io_complete) {
65 log_err("incomplete IO found.\n");
67 io_u->engine_data = NULL;
70 log_err("issued %lu finished %lu\n", issued, cb_count);
73 static int fio_gf_io_u_init(struct thread_data *td, struct io_u *io_u)
75 struct fio_gf_iou *io = NULL;
77 dprint(FD_FILE, "%s\n", __FUNCTION__);
79 if (!io_u->engine_data) {
80 io = malloc(sizeof(struct fio_gf_iou));
82 td_verror(td, errno, "malloc");
87 io_u->engine_data = io;
92 static void gf_async_cb(glfs_fd_t * fd, ssize_t ret, void *data)
94 struct io_u *io_u = (struct io_u *)data;
95 struct fio_gf_iou *iou = (struct fio_gf_iou *)io_u->engine_data;
97 dprint(FD_IO, "%s ret %lu\n", __FUNCTION__, ret);
102 static int fio_gf_async_queue(struct thread_data fio_unused * td,
105 struct gf_data *g = td->io_ops->data;
108 dprint(FD_IO, "%s op %s\n", __FUNCTION__,
109 io_u->ddir == DDIR_READ ? "read" : io_u->ddir ==
110 DDIR_WRITE ? "write" : io_u->ddir ==
111 DDIR_SYNC ? "sync" : "unknown");
113 fio_ro_check(td, io_u);
115 if (io_u->ddir == DDIR_READ)
116 r = glfs_pread_async(g->fd, io_u->xfer_buf, io_u->xfer_buflen,
117 io_u->offset, 0, gf_async_cb,
119 else if (io_u->ddir == DDIR_WRITE)
120 r = glfs_pwrite_async(g->fd, io_u->xfer_buf, io_u->xfer_buflen,
121 io_u->offset, 0, gf_async_cb,
123 else if (io_u->ddir == DDIR_SYNC) {
124 r = glfs_fsync_async(g->fd, gf_async_cb, (void *)io_u);
126 log_err("unsupported operation.\n");
127 io_u->error = -EINVAL;
131 log_err("glfs failed.\n");
140 td_verror(td, io_u->error, "xfer");
141 return FIO_Q_COMPLETED;
144 int fio_gf_async_setup(struct thread_data *td)
147 struct gf_data *g = NULL;
150 log_err("the async interface is still very experimental...\n");
152 r = fio_gf_setup(td);
156 td->o.use_thread = 1;
157 g = td->io_ops->data;
158 g->aio_events = malloc(td->o.iodepth * sizeof(struct io_u *));
159 if (!g->aio_events) {
165 memset(g->aio_events, 0, td->o.iodepth * sizeof(struct io_u *));
171 static int fio_gf_async_prep(struct thread_data *td, struct io_u *io_u)
173 dprint(FD_FILE, "%s\n", __FUNCTION__);
175 if (!ddir_rw(io_u->ddir))
181 static struct ioengine_ops ioengine = {
182 .name = "gfapi_async",
183 .version = FIO_IOOPS_VERSION,
184 .init = fio_gf_async_setup,
185 .cleanup = fio_gf_cleanup,
186 .prep = fio_gf_async_prep,
187 .queue = fio_gf_async_queue,
188 .open_file = fio_gf_open_file,
189 .close_file = fio_gf_close_file,
190 .unlink_file = fio_gf_unlink_file,
191 .get_file_size = fio_gf_get_file_size,
192 .getevents = fio_gf_getevents,
193 .event = fio_gf_event,
194 .io_u_init = fio_gf_io_u_init,
195 .io_u_free = fio_gf_io_u_free,
196 .options = gfapi_options,
197 .option_struct_size = sizeof(struct gf_options),
198 .flags = FIO_DISKLESSIO,
201 static void fio_init fio_gf_register(void)
203 register_ioengine(&ioengine);
206 static void fio_exit fio_gf_unregister(void)
208 unregister_ioengine(&ioengine);