engines/io_uring: ensure sqe stores are ordered SQ ring tail update
[fio.git] / engines / glusterfs_async.c
CommitLineData
cc47f094 1/*
2 * glusterfs engine
3 *
4 * IO engine using Glusterfs's gfapi async interface
5 *
6 */
7#include "gfapi.h"
88db2717 8#define NOT_YET 1
cc47f094 9struct fio_gf_iou {
10 struct io_u *io_u;
11 int io_complete;
12};
13
14static struct io_u *fio_gf_event(struct thread_data *td, int event)
15{
565e784d 16 struct gf_data *gf_data = td->io_ops_data;
8859391b 17
cc47f094 18 dprint(FD_IO, "%s\n", __FUNCTION__);
19 return gf_data->aio_events[event];
20}
21
22static int fio_gf_getevents(struct thread_data *td, unsigned int min,
1f440ece 23 unsigned int max, const struct timespec *t)
cc47f094 24{
565e784d 25 struct gf_data *g = td->io_ops_data;
cc47f094 26 unsigned int events = 0;
27 struct io_u *io_u;
8859391b 28 int i;
cc47f094 29
30 dprint(FD_IO, "%s\n", __FUNCTION__);
31 do {
32 io_u_qiter(&td->io_u_all, io_u, i) {
8859391b
JA
33 struct fio_gf_iou *io;
34
cc47f094 35 if (!(io_u->flags & IO_U_F_FLIGHT))
36 continue;
37
8859391b
JA
38 io = io_u->engine_data;
39 if (io->io_complete) {
cc47f094 40 io->io_complete = 0;
41 g->aio_events[events] = io_u;
42 events++;
88db2717 43
b29c813f
JA
44 if (events >= max)
45 break;
cc47f094 46 }
47
48 }
49 if (events < min)
6fa14b99 50 usleep(100);
cc47f094 51 else
52 break;
53
54 } while (1);
55
56 return events;
57}
58
88db2717 59static void fio_gf_io_u_free(struct thread_data *td, struct io_u *io_u)
cc47f094 60{
88db2717 61 struct fio_gf_iou *io = io_u->engine_data;
62
63 if (io) {
8859391b 64 if (io->io_complete)
b29c813f 65 log_err("incomplete IO found.\n");
88db2717 66 io_u->engine_data = NULL;
67 free(io);
68 }
69}
cc47f094 70
88db2717 71static int fio_gf_io_u_init(struct thread_data *td, struct io_u *io_u)
72{
04e9eb82 73 struct fio_gf_iou *io;
88db2717 74 dprint(FD_FILE, "%s\n", __FUNCTION__);
04e9eb82 75
76 io = malloc(sizeof(struct fio_gf_iou));
77 if (!io) {
78 td_verror(td, errno, "malloc");
79 return 1;
80 }
81 io->io_complete = 0;
82 io->io_u = io_u;
83 io_u->engine_data = io;
cc47f094 84 return 0;
85}
86
b29c813f 87static void gf_async_cb(glfs_fd_t * fd, ssize_t ret, void *data)
cc47f094 88{
8859391b
JA
89 struct io_u *io_u = data;
90 struct fio_gf_iou *iou = io_u->engine_data;
cc47f094 91
f6149216 92 dprint(FD_IO, "%s ret %zd\n", __FUNCTION__, ret);
b29c813f 93 iou->io_complete = 1;
cc47f094 94}
95
d3b07186
BVA
96static enum fio_q_status fio_gf_async_queue(struct thread_data fio_unused * td,
97 struct io_u *io_u)
cc47f094 98{
565e784d 99 struct gf_data *g = td->io_ops_data;
8859391b 100 int r;
cc47f094 101
8859391b 102 dprint(FD_IO, "%s op %s\n", __FUNCTION__, io_ddir_name(io_u->ddir));
88db2717 103
cc47f094 104 fio_ro_check(td, io_u);
105
106 if (io_u->ddir == DDIR_READ)
b29c813f 107 r = glfs_pread_async(g->fd, io_u->xfer_buf, io_u->xfer_buflen,
8859391b 108 io_u->offset, 0, gf_async_cb, io_u);
cc47f094 109 else if (io_u->ddir == DDIR_WRITE)
b29c813f 110 r = glfs_pwrite_async(g->fd, io_u->xfer_buf, io_u->xfer_buflen,
8859391b 111 io_u->offset, 0, gf_async_cb, io_u);
6876c98c
JA
112#if defined(CONFIG_GF_TRIM)
113 else if (io_u->ddir == DDIR_TRIM)
114 r = glfs_discard_async(g->fd, io_u->offset, io_u->xfer_buflen,
115 gf_async_cb, io_u);
116#endif
656955eb
JA
117 else if (io_u->ddir == DDIR_DATASYNC)
118 r = glfs_fdatasync_async(g->fd, gf_async_cb, io_u);
8859391b
JA
119 else if (io_u->ddir == DDIR_SYNC)
120 r = glfs_fsync_async(g->fd, gf_async_cb, io_u);
121 else
97068f80 122 r = EINVAL;
8859391b 123
b29c813f 124 if (r) {
8859391b 125 log_err("glfs queue failed.\n");
b29c813f
JA
126 io_u->error = r;
127 goto failed;
128 }
cc47f094 129 return FIO_Q_QUEUED;
130
131failed:
132 io_u->error = r;
133 td_verror(td, io_u->error, "xfer");
134 return FIO_Q_COMPLETED;
135}
136
a89ba4b1 137static int fio_gf_async_setup(struct thread_data *td)
88db2717 138{
8859391b
JA
139 struct gf_data *g;
140 int r;
7d4a8e7e 141
88db2717 142#if defined(NOT_YET)
7d4a8e7e 143 log_err("the async interface is still very experimental...\n");
88db2717 144#endif
b29c813f 145 r = fio_gf_setup(td);
8859391b 146 if (r)
b29c813f 147 return r;
8859391b 148
6fa14b99 149 td->o.use_thread = 1;
565e784d 150 g = td->io_ops_data;
8859391b 151 g->aio_events = calloc(td->o.iodepth, sizeof(struct io_u *));
b29c813f
JA
152 if (!g->aio_events) {
153 r = -ENOMEM;
154 fio_gf_cleanup(td);
155 return r;
156 }
88db2717 157
b29c813f 158 return r;
88db2717 159}
160
cc47f094 161static struct ioengine_ops ioengine = {
b29c813f
JA
162 .name = "gfapi_async",
163 .version = FIO_IOOPS_VERSION,
164 .init = fio_gf_async_setup,
165 .cleanup = fio_gf_cleanup,
b29c813f
JA
166 .queue = fio_gf_async_queue,
167 .open_file = fio_gf_open_file,
168 .close_file = fio_gf_close_file,
38ef9c90 169 .unlink_file = fio_gf_unlink_file,
b29c813f
JA
170 .get_file_size = fio_gf_get_file_size,
171 .getevents = fio_gf_getevents,
172 .event = fio_gf_event,
173 .io_u_init = fio_gf_io_u_init,
174 .io_u_free = fio_gf_io_u_free,
175 .options = gfapi_options,
cc47f094 176 .option_struct_size = sizeof(struct gf_options),
b29c813f 177 .flags = FIO_DISKLESSIO,
cc47f094 178};
179
180static void fio_init fio_gf_register(void)
181{
b29c813f 182 register_ioengine(&ioengine);
cc47f094 183}
184
185static void fio_exit fio_gf_unregister(void)
186{
b29c813f 187 unregister_ioengine(&ioengine);
cc47f094 188}