2 * read/write() engine that uses syslet to be async
14 #ifdef FIO_HAVE_SYSLET
18 unsigned int nr_events;
20 struct syslet_uatom **ring;
21 unsigned int ring_index;
25 * Inspect the ring to see if we have completed events
27 static void fio_syslet_complete(struct thread_data *td)
29 struct syslet_data *sd = td->io_ops->data;
32 struct syslet_uatom *atom;
36 atom = sd->ring[sd->ring_index];
40 sd->ring[sd->ring_index] = NULL;
41 if (++sd->ring_index == td->iodepth)
47 io_u->resid = io_u->xfer_buflen - ret;
51 sd->events[sd->nr_events++] = io_u;
55 static int fio_syslet_getevents(struct thread_data *td, int min,
57 struct timespec fio_unused *t)
59 struct syslet_data *sd = td->io_ops->data;
64 fio_syslet_complete(td);
67 * do we have enough immediate completions?
69 if (sd->nr_events >= (unsigned int) min)
73 * OK, we need to wait for some events...
75 get_events = min - sd->nr_events;
76 ret = async_wait(get_events);
86 static struct io_u *fio_syslet_event(struct thread_data *td, int event)
88 struct syslet_data *sd = td->io_ops->data;
90 return sd->events[event];
93 static void init_atom(struct syslet_uatom *atom, int nr, void *arg0,
94 void *arg1, void *arg2, void *ret_ptr,
95 unsigned long flags, void *priv,struct syslet_uatom *next)
99 atom->ret_ptr = ret_ptr;
101 atom->arg_ptr[0] = arg0;
102 atom->arg_ptr[1] = arg1;
103 atom->arg_ptr[2] = arg2;
104 atom->arg_ptr[3] = atom->arg_ptr[4] = atom->arg_ptr[5] = NULL;
105 atom->private = priv;
109 * Use seek atom for sync
111 static void fio_syslet_prep_sync(struct io_u *io_u, struct fio_file *f)
113 init_atom(&io_u->seek_atom.atom, __NR_fsync, &f->fd, NULL, NULL,
114 &io_u->seek_atom.ret, SYSLET_STOP_ON_NEGATIVE, io_u, NULL);
117 static void fio_syslet_prep_rw(struct io_u *io_u, struct fio_file *f)
124 io_u->seek_atom.cmd = SEEK_SET;
125 init_atom(&io_u->seek_atom.atom, __NR_lseek, &f->fd, &io_u->offset,
126 &io_u->seek_atom.cmd, &io_u->seek_atom.ret,
127 SYSLET_STOP_ON_NEGATIVE | SYSLET_NO_COMPLETE |
128 SYSLET_SKIP_TO_NEXT_ON_STOP,
129 NULL, &io_u->rw_atom.atom);
134 if (io_u->ddir == DDIR_READ)
139 init_atom(&io_u->rw_atom.atom, nr, &f->fd, &io_u->xfer_buf,
140 &io_u->xfer_buflen, &io_u->rw_atom.ret,
141 SYSLET_STOP_ON_NEGATIVE | SYSLET_SKIP_TO_NEXT_ON_STOP,
145 static int fio_syslet_prep(struct thread_data fio_unused *td, struct io_u *io_u)
147 struct fio_file *f = io_u->file;
149 if (io_u->ddir == DDIR_SYNC)
150 fio_syslet_prep_sync(io_u, f);
152 fio_syslet_prep_rw(io_u, f);
157 static int fio_syslet_queue(struct thread_data *td, struct io_u *io_u)
159 struct syslet_data *sd = td->io_ops->data;
160 struct syslet_uatom *done;
163 done = async_exec(&io_u->seek_atom.atom);
170 ret = io_u->rw_atom.ret;
171 if (ret != (long) io_u->xfer_buflen) {
173 io_u->resid = io_u->xfer_buflen - ret;
181 sd->events[sd->nr_events++] = io_u;
183 td_verror(td, io_u->error);
188 static int async_head_init(struct syslet_data *sd, unsigned int depth)
190 struct async_head_user ahu;
191 unsigned long ring_size;
193 ring_size = sizeof(struct syslet_uatom *) * depth;
194 sd->ring = malloc(ring_size);
195 memset(sd->ring, 0, ring_size);
197 memset(&ahu, 0, sizeof(ahu));
198 ahu.completion_ring = sd->ring;
199 ahu.ring_size_bytes = ring_size;
200 ahu.max_nr_threads = -1;
202 if (async_register(&ahu, sizeof(ahu)) < 0) {
203 perror("async_register");
204 fprintf(stderr, "fio: syslet likely not supported\n");
212 static void async_head_exit(struct syslet_data *sd, unsigned int depth)
214 struct async_head_user ahu;
216 memset(&ahu, 0, sizeof(ahu));
217 ahu.completion_ring = sd->ring;
218 ahu.ring_size_bytes = sizeof(struct syslet_uatom *) * depth;
220 if (async_unregister(&ahu, sizeof(ahu)) < 0)
221 perror("async_register");
224 static void fio_syslet_cleanup(struct thread_data *td)
226 struct syslet_data *sd = td->io_ops->data;
229 async_head_exit(sd, td->iodepth);
232 td->io_ops->data = NULL;
236 static int fio_syslet_init(struct thread_data *td)
238 struct syslet_data *sd;
241 sd = malloc(sizeof(*sd));
242 memset(sd, 0, sizeof(*sd));
243 sd->events = malloc(sizeof(struct io_u *) * td->iodepth);
244 memset(sd->events, 0, sizeof(struct io_u *) * td->iodepth);
247 * This will handily fail for kernels where syslet isn't available
249 if (async_head_init(sd, td->iodepth)) {
255 td->io_ops->data = sd;
259 static struct ioengine_ops ioengine = {
261 .version = FIO_IOOPS_VERSION,
262 .init = fio_syslet_init,
263 .prep = fio_syslet_prep,
264 .queue = fio_syslet_queue,
265 .getevents = fio_syslet_getevents,
266 .event = fio_syslet_event,
267 .cleanup = fio_syslet_cleanup,
270 #else /* FIO_HAVE_SYSLET */
273 * When we have a proper configure system in place, we simply wont build
274 * and install this io engine. For now install a crippled version that
275 * just complains and fails to load.
277 static int fio_syslet_init(struct thread_data fio_unused *td)
279 fprintf(stderr, "fio: syslet not available\n");
283 static struct ioengine_ops ioengine = {
285 .version = FIO_IOOPS_VERSION,
286 .init = fio_syslet_init,
289 #endif /* FIO_HAVE_SYSLET */
291 static void fio_init fio_syslet_register(void)
293 register_ioengine(&ioengine);
296 static void fio_exit fio_syslet_unregister(void)
298 unregister_ioengine(&ioengine);