2 * read/write() engine that uses syslet to be async
14 #ifdef FIO_HAVE_SYSLET
18 unsigned int nr_events;
20 struct async_head_user *ahu;
21 struct syslet_uatom **ring;
22 unsigned int ring_index;
26 * Inspect the ring to see if we have completed events
28 static void fio_syslet_complete(struct thread_data *td)
30 struct syslet_data *sd = td->io_ops->data;
33 struct syslet_uatom *atom;
37 atom = sd->ring[sd->ring_index];
41 sd->ring[sd->ring_index] = NULL;
42 if (++sd->ring_index == td->iodepth)
48 io_u->resid = io_u->xfer_buflen - ret;
52 sd->events[sd->nr_events++] = io_u;
56 static int fio_syslet_getevents(struct thread_data *td, int min,
58 struct timespec fio_unused *t)
60 struct syslet_data *sd = td->io_ops->data;
65 fio_syslet_complete(td);
68 * do we have enough immediate completions?
70 if (sd->nr_events >= (unsigned int) min)
74 * OK, we need to wait for some events...
76 get_events = min - sd->nr_events;
77 ret = async_wait(get_events);
87 static struct io_u *fio_syslet_event(struct thread_data *td, int event)
89 struct syslet_data *sd = td->io_ops->data;
91 return sd->events[event];
94 static void init_atom(struct syslet_uatom *atom, int nr, void *arg0,
95 void *arg1, void *arg2, void *arg3, void *ret_ptr,
96 unsigned long flags, void *priv)
100 atom->ret_ptr = ret_ptr;
102 atom->arg_ptr[0] = arg0;
103 atom->arg_ptr[1] = arg1;
104 atom->arg_ptr[2] = arg2;
105 atom->arg_ptr[3] = arg3;
106 atom->arg_ptr[4] = atom->arg_ptr[5] = NULL;
107 atom->private = priv;
111 * Use seek atom for sync
113 static void fio_syslet_prep_sync(struct io_u *io_u, struct fio_file *f)
115 init_atom(&io_u->req.atom, __NR_fsync, &f->fd, NULL, NULL, NULL,
116 &io_u->req.ret, 0, io_u);
119 static void fio_syslet_prep_rw(struct io_u *io_u, struct fio_file *f)
126 if (io_u->ddir == DDIR_READ)
131 init_atom(&io_u->req.atom, nr, &f->fd, &io_u->xfer_buf,
132 &io_u->xfer_buflen, &io_u->offset, &io_u->req.ret, 0, io_u);
135 static int fio_syslet_prep(struct thread_data fio_unused *td, struct io_u *io_u)
137 struct fio_file *f = io_u->file;
139 if (io_u->ddir == DDIR_SYNC)
140 fio_syslet_prep_sync(io_u, f);
142 fio_syslet_prep_rw(io_u, f);
147 static int fio_syslet_queue(struct thread_data *td, struct io_u *io_u)
149 struct syslet_data *sd = td->io_ops->data;
153 * On sync completion, the atom is returned. So on NULL return
154 * it's queued asynchronously.
156 if (!async_exec(&io_u->req.atom))
163 if (ret != (long) io_u->xfer_buflen) {
165 io_u->resid = io_u->xfer_buflen - ret;
167 return FIO_Q_COMPLETED;
173 sd->events[sd->nr_events++] = io_u;
175 td_verror(td, io_u->error);
177 return FIO_Q_COMPLETED;
180 static int async_head_init(struct syslet_data *sd, unsigned int depth)
182 unsigned long ring_size;
184 sd->ahu = malloc(sizeof(struct async_head_user));
185 memset(sd->ahu, 0, sizeof(struct async_head_user));
187 ring_size = sizeof(struct syslet_uatom *) * depth;
188 sd->ring = malloc(ring_size);
189 memset(sd->ring, 0, ring_size);
191 sd->ahu->completion_ring = sd->ring;
192 sd->ahu->ring_size_bytes = ring_size;
193 sd->ahu->max_nr_threads = -1;
195 if (async_register(sd->ahu, sizeof(*sd->ahu)) < 0) {
196 perror("async_register");
197 fprintf(stderr, "fio: syslet likely not supported\n");
206 static void async_head_exit(struct syslet_data *sd)
208 if (async_unregister(sd->ahu, sizeof(*sd->ahu)) < 0)
209 perror("async_register");
215 static void fio_syslet_cleanup(struct thread_data *td)
217 struct syslet_data *sd = td->io_ops->data;
223 td->io_ops->data = NULL;
227 static int fio_syslet_init(struct thread_data *td)
229 struct syslet_data *sd;
232 sd = malloc(sizeof(*sd));
233 memset(sd, 0, sizeof(*sd));
234 sd->events = malloc(sizeof(struct io_u *) * td->iodepth);
235 memset(sd->events, 0, sizeof(struct io_u *) * td->iodepth);
238 * This will handily fail for kernels where syslet isn't available
240 if (async_head_init(sd, td->iodepth)) {
246 td->io_ops->data = sd;
250 static struct ioengine_ops ioengine = {
252 .version = FIO_IOOPS_VERSION,
253 .init = fio_syslet_init,
254 .prep = fio_syslet_prep,
255 .queue = fio_syslet_queue,
256 .getevents = fio_syslet_getevents,
257 .event = fio_syslet_event,
258 .cleanup = fio_syslet_cleanup,
261 #else /* FIO_HAVE_SYSLET */
264 * When we have a proper configure system in place, we simply wont build
265 * and install this io engine. For now install a crippled version that
266 * just complains and fails to load.
268 static int fio_syslet_init(struct thread_data fio_unused *td)
270 fprintf(stderr, "fio: syslet not available\n");
274 static struct ioengine_ops ioengine = {
276 .version = FIO_IOOPS_VERSION,
277 .init = fio_syslet_init,
280 #endif /* FIO_HAVE_SYSLET */
282 static void fio_init fio_syslet_register(void)
284 register_ioengine(&ioengine);
287 static void fio_exit fio_syslet_unregister(void)
289 unregister_ioengine(&ioengine);