2 * read/write() engine that uses syslet to be async
14 #ifdef FIO_HAVE_SYSLET
18 unsigned int nr_events;
20 struct async_head_user ahu;
21 struct syslet_uatom **ring;
23 struct syslet_uatom *head, *tail;
26 static void fio_syslet_complete_atom(struct thread_data *td,
27 struct syslet_uatom *atom)
29 struct syslet_data *sd = td->io_ops->data;
30 struct syslet_uatom *last;
34 * complete from the beginning of the sequence up to (and
35 * including) this atom
39 atom = io_u->req.head;
42 * now complete in right order
50 io_u->resid = io_u->xfer_buflen - ret;
54 assert(sd->nr_events < td->iodepth);
55 sd->events[sd->nr_events++] = io_u;
67 * Inspect the ring to see if we have completed events
69 static void fio_syslet_complete(struct thread_data *td)
71 struct syslet_data *sd = td->io_ops->data;
74 struct syslet_uatom *atom;
76 atom = sd->ring[sd->ahu.user_ring_idx];
80 sd->ring[sd->ahu.user_ring_idx] = NULL;
81 if (++sd->ahu.user_ring_idx == td->iodepth)
82 sd->ahu.user_ring_idx = 0;
84 fio_syslet_complete_atom(td, atom);
88 static int fio_syslet_getevents(struct thread_data *td, int min,
90 struct timespec fio_unused *t)
92 struct syslet_data *sd = td->io_ops->data;
96 fio_syslet_complete(td);
99 * do we have enough immediate completions?
101 if (sd->nr_events >= (unsigned int) min)
105 * OK, we need to wait for some events...
107 ret = async_wait(1, sd->ahu.user_ring_idx, &sd->ahu);
117 static struct io_u *fio_syslet_event(struct thread_data *td, int event)
119 struct syslet_data *sd = td->io_ops->data;
121 return sd->events[event];
124 static void init_atom(struct syslet_uatom *atom, int nr, void *arg0,
125 void *arg1, void *arg2, void *arg3, void *ret_ptr,
126 unsigned long flags, void *priv)
130 atom->ret_ptr = ret_ptr;
132 atom->arg_ptr[0] = arg0;
133 atom->arg_ptr[1] = arg1;
134 atom->arg_ptr[2] = arg2;
135 atom->arg_ptr[3] = arg3;
136 atom->arg_ptr[4] = atom->arg_ptr[5] = NULL;
137 atom->private = priv;
141 * Use seek atom for sync
143 static void fio_syslet_prep_sync(struct io_u *io_u, struct fio_file *f)
145 init_atom(&io_u->req.atom, __NR_fsync, &f->fd, NULL, NULL, NULL,
146 &io_u->req.ret, 0, io_u);
149 static void fio_syslet_prep_rw(struct io_u *io_u, struct fio_file *f)
156 if (io_u->ddir == DDIR_READ)
161 init_atom(&io_u->req.atom, nr, &f->fd, &io_u->xfer_buf,
162 &io_u->xfer_buflen, &io_u->offset, &io_u->req.ret, 0, io_u);
165 static int fio_syslet_prep(struct thread_data fio_unused *td, struct io_u *io_u)
167 struct fio_file *f = io_u->file;
169 if (io_u->ddir == DDIR_SYNC)
170 fio_syslet_prep_sync(io_u, f);
172 fio_syslet_prep_rw(io_u, f);
177 static void cachemiss_thread_start(void)
180 async_thread(NULL, NULL);
183 #define THREAD_STACK_SIZE (16384)
185 static unsigned long thread_stack_alloc()
187 return (unsigned long) malloc(THREAD_STACK_SIZE) + THREAD_STACK_SIZE;
190 static void fio_syslet_queued(struct thread_data *td, struct syslet_data *sd)
192 struct syslet_uatom *atom;
195 fio_gettime(&now, NULL);
199 struct io_u *io_u = atom->private;
201 memcpy(&io_u->issue_time, &now, sizeof(now));
202 io_u_queued(td, io_u);
207 static int fio_syslet_commit(struct thread_data *td)
209 struct syslet_data *sd = td->io_ops->data;
210 struct syslet_uatom *done;
215 assert(!sd->tail->next);
217 if (!sd->ahu.new_thread_stack)
218 sd->ahu.new_thread_stack = thread_stack_alloc();
220 fio_syslet_queued(td, sd);
223 * On sync completion, the atom is returned. So on NULL return
224 * it's queued asynchronously.
226 done = async_exec(sd->head, &sd->ahu);
228 sd->head = sd->tail = NULL;
231 fio_syslet_complete_atom(td, done);
236 static int fio_syslet_queue(struct thread_data *td, struct io_u *io_u)
238 struct syslet_data *sd = td->io_ops->data;
241 sd->tail->next = &io_u->req.atom;
242 sd->tail = &io_u->req.atom;
244 sd->head = sd->tail = &io_u->req.atom;
246 io_u->req.head = sd->head;
250 static int async_head_init(struct syslet_data *sd, unsigned int depth)
252 unsigned long ring_size;
254 memset(&sd->ahu, 0, sizeof(struct async_head_user));
256 ring_size = sizeof(struct syslet_uatom *) * depth;
257 sd->ring = malloc(ring_size);
258 memset(sd->ring, 0, ring_size);
260 sd->ahu.user_ring_idx = 0;
261 sd->ahu.completion_ring = sd->ring;
262 sd->ahu.ring_size_bytes = ring_size;
263 sd->ahu.head_stack = thread_stack_alloc();
264 sd->ahu.head_eip = (unsigned long) cachemiss_thread_start;
265 sd->ahu.new_thread_eip = (unsigned long) cachemiss_thread_start;
270 static void async_head_exit(struct syslet_data *sd)
275 static void fio_syslet_cleanup(struct thread_data *td)
277 struct syslet_data *sd = td->io_ops->data;
283 td->io_ops->data = NULL;
287 static int fio_syslet_init(struct thread_data *td)
289 struct syslet_data *sd;
292 sd = malloc(sizeof(*sd));
293 memset(sd, 0, sizeof(*sd));
294 sd->events = malloc(sizeof(struct io_u *) * td->iodepth);
295 memset(sd->events, 0, sizeof(struct io_u *) * td->iodepth);
298 * This will handily fail for kernels where syslet isn't available
300 if (async_head_init(sd, td->iodepth)) {
306 td->io_ops->data = sd;
310 static struct ioengine_ops ioengine = {
312 .version = FIO_IOOPS_VERSION,
313 .init = fio_syslet_init,
314 .prep = fio_syslet_prep,
315 .queue = fio_syslet_queue,
316 .commit = fio_syslet_commit,
317 .getevents = fio_syslet_getevents,
318 .event = fio_syslet_event,
319 .cleanup = fio_syslet_cleanup,
320 .open_file = generic_open_file,
321 .close_file = generic_close_file,
324 #else /* FIO_HAVE_SYSLET */
327 * When we have a proper configure system in place, we simply wont build
328 * and install this io engine. For now install a crippled version that
329 * just complains and fails to load.
331 static int fio_syslet_init(struct thread_data fio_unused *td)
333 fprintf(stderr, "fio: syslet not available\n");
337 static struct ioengine_ops ioengine = {
339 .version = FIO_IOOPS_VERSION,
340 .init = fio_syslet_init,
343 #endif /* FIO_HAVE_SYSLET */
345 static void fio_init fio_syslet_register(void)
347 register_ioengine(&ioengine);
350 static void fio_exit fio_syslet_unregister(void)
352 unregister_ioengine(&ioengine);