4 * IO engine using Glusterfs's gfapi async interface
14 static struct io_u *fio_gf_event(struct thread_data *td, int event)
16 struct gf_data *gf_data = td->io_ops->data;
17 dprint(FD_IO, "%s\n", __FUNCTION__);
18 return gf_data->aio_events[event];
21 static int fio_gf_getevents(struct thread_data *td, unsigned int min,
22 unsigned int max, struct timespec *t)
24 struct gf_data *g = td->io_ops->data;
25 unsigned int events = 0;
28 struct fio_gf_iou *io = NULL;
30 dprint(FD_IO, "%s\n", __FUNCTION__);
32 io_u_qiter(&td->io_u_all, io_u, i) {
33 if (!(io_u->flags & IO_U_F_FLIGHT))
36 io = (struct fio_gf_iou *)io_u->engine_data;
38 if (io && io->io_complete) {
40 g->aio_events[events] = io_u;
55 #define LAST_POS(f) ((f)->engine_data)
56 static int fio_gf_prep(struct thread_data *td, struct io_u *io_u)
58 struct fio_file *f = io_u->file;
59 struct gf_data *g = td->io_ops->data;
60 struct fio_gf_iou *io = NULL;
62 dprint(FD_FILE, "fio prep\n");
64 io = malloc(sizeof(struct fio_gf_iou));
66 td_verror(td, errno, "malloc");
71 io_u->engine_data = io;
73 g->aio_events = malloc(td->o.iodepth * sizeof(struct io_u *));
75 td_verror(td, errno, "malloc");
80 memset(g->aio_events, 0, td->o.iodepth * sizeof(struct io_u *));
82 if (!ddir_rw(io_u->ddir))
85 if (LAST_POS(f) != -1ULL && LAST_POS(f) == io_u->offset)
88 if (glfs_lseek(g->fd, io_u->offset, SEEK_SET) < 0) {
89 td_verror(td, errno, "lseek");
92 io = malloc(sizeof(struct fio_gf_iou));
94 td_verror(td, errno, "malloc");
101 static void gf_async_cb(glfs_fd_t *fd, ssize_t ret, void *data)
103 struct io_u *io_u = (struct io_u *)data;
104 struct fio_gf_iou *iou =
105 (struct fio_gf_iou *)io_u->engine_data;
107 dprint(FD_IO, "%s ret %lu\n", __FUNCTION__, ret);
108 iou->io_complete = 1;
111 static int fio_gf_async_queue(struct thread_data fio_unused *td, struct io_u *io_u)
113 struct gf_data *g = td->io_ops->data;
116 fio_ro_check(td, io_u);
118 if (io_u->ddir == DDIR_READ)
119 r = glfs_pread_async(g->fd, io_u->xfer_buf, io_u->xfer_buflen, io_u->offset,
120 0, gf_async_cb, (void *)io_u);
121 else if (io_u->ddir == DDIR_WRITE)
122 r = glfs_pread_async(g->fd, io_u->xfer_buf, io_u->xfer_buflen, io_u->offset,
123 0, gf_async_cb, (void *)io_u);
124 else if (io_u->ddir == DDIR_SYNC) {
125 r = glfs_fsync_async(g->fd, gf_async_cb, (void *)io_u);
127 log_err("unsupported operation.\n");
128 io_u->error = -EINVAL;
132 log_err("glfs failed.\n");
141 td_verror(td, io_u->error, "xfer");
142 return FIO_Q_COMPLETED;
146 static struct ioengine_ops ioengine = {
147 .name = "gfapi_async",
148 .version = FIO_IOOPS_VERSION,
149 .init = fio_gf_setup,
150 .cleanup = fio_gf_cleanup,
152 .queue = fio_gf_async_queue,
153 .open_file = fio_gf_open_file,
154 .close_file = fio_gf_close_file,
155 .get_file_size = fio_gf_get_file_size,
156 .getevents = fio_gf_getevents,
157 .event = fio_gf_event,
158 .options = gfapi_options,
159 .option_struct_size = sizeof(struct gf_options),
160 .flags = FIO_DISKLESSIO,
163 static void fio_init fio_gf_register(void)
165 register_ioengine(&ioengine);
168 static void fio_exit fio_gf_unregister(void)
170 unregister_ioengine(&ioengine);