4 * IO engine that does regular pread(2)/pwrite(2) to transfer data, but
5 * with syslets to make the execution async.
13 #include <asm/unistd.h>
18 #ifdef FIO_HAVE_SYSLET
21 #define __NR_fio_pread __NR_pread64
22 #define __NR_fio_pwrite __NR_pwrite64
24 #define __NR_fio_pread __NR_pread
25 #define __NR_fio_pwrite __NR_pwrite
30 unsigned int nr_events;
32 struct async_head_user ahu;
33 struct syslet_uatom **ring;
35 struct syslet_uatom *head, *tail;
38 static void fio_syslet_complete_atom(struct thread_data *td,
39 struct syslet_uatom *atom)
41 struct syslet_data *sd = td->io_ops->data;
42 struct syslet_uatom *last;
46 * complete from the beginning of the sequence up to (and
47 * including) this atom
51 atom = io_u->req.head;
54 * now complete in right order
62 io_u->resid = io_u->xfer_buflen - ret;
66 assert(sd->nr_events < td->iodepth);
67 sd->events[sd->nr_events++] = io_u;
79 * Inspect the ring to see if we have completed events
81 static void fio_syslet_complete(struct thread_data *td)
83 struct syslet_data *sd = td->io_ops->data;
86 struct syslet_uatom *atom;
88 atom = sd->ring[sd->ahu.user_ring_idx];
92 sd->ring[sd->ahu.user_ring_idx] = NULL;
93 if (++sd->ahu.user_ring_idx == td->iodepth)
94 sd->ahu.user_ring_idx = 0;
96 fio_syslet_complete_atom(td, atom);
100 static int fio_syslet_getevents(struct thread_data *td, int min,
102 struct timespec fio_unused *t)
104 struct syslet_data *sd = td->io_ops->data;
108 fio_syslet_complete(td);
111 * do we have enough immediate completions?
113 if (sd->nr_events >= (unsigned int) min)
117 * OK, we need to wait for some events...
119 ret = async_wait(1, sd->ahu.user_ring_idx, &sd->ahu);
129 static struct io_u *fio_syslet_event(struct thread_data *td, int event)
131 struct syslet_data *sd = td->io_ops->data;
133 return sd->events[event];
136 static void init_atom(struct syslet_uatom *atom, int nr, void *arg0,
137 void *arg1, void *arg2, void *arg3, void *ret_ptr,
138 unsigned long flags, void *priv)
142 atom->ret_ptr = ret_ptr;
144 atom->arg_ptr[0] = arg0;
145 atom->arg_ptr[1] = arg1;
146 atom->arg_ptr[2] = arg2;
147 atom->arg_ptr[3] = arg3;
148 atom->arg_ptr[4] = atom->arg_ptr[5] = NULL;
149 atom->private = priv;
153 * Use seek atom for sync
155 static void fio_syslet_prep_sync(struct io_u *io_u, struct fio_file *f)
157 init_atom(&io_u->req.atom, __NR_fsync, &f->fd, NULL, NULL, NULL,
158 &io_u->req.ret, 0, io_u);
161 static void fio_syslet_prep_rw(struct io_u *io_u, struct fio_file *f)
168 if (io_u->ddir == DDIR_READ)
171 nr = __NR_fio_pwrite;
173 init_atom(&io_u->req.atom, nr, &f->fd, &io_u->xfer_buf,
174 &io_u->xfer_buflen, &io_u->offset, &io_u->req.ret, 0, io_u);
177 static int fio_syslet_prep(struct thread_data fio_unused *td, struct io_u *io_u)
179 struct fio_file *f = io_u->file;
181 if (io_u->ddir == DDIR_SYNC)
182 fio_syslet_prep_sync(io_u, f);
184 fio_syslet_prep_rw(io_u, f);
189 static void cachemiss_thread_start(void)
192 async_thread(NULL, NULL);
195 #define THREAD_STACK_SIZE (16384)
197 static unsigned long thread_stack_alloc()
199 return (unsigned long) malloc(THREAD_STACK_SIZE) + THREAD_STACK_SIZE;
202 static void fio_syslet_queued(struct thread_data *td, struct syslet_data *sd)
204 struct syslet_uatom *atom;
207 fio_gettime(&now, NULL);
211 struct io_u *io_u = atom->private;
213 memcpy(&io_u->issue_time, &now, sizeof(now));
214 io_u_queued(td, io_u);
219 static int fio_syslet_commit(struct thread_data *td)
221 struct syslet_data *sd = td->io_ops->data;
222 struct syslet_uatom *done;
227 assert(!sd->tail->next);
229 if (!sd->ahu.new_thread_stack)
230 sd->ahu.new_thread_stack = thread_stack_alloc();
232 fio_syslet_queued(td, sd);
235 * On sync completion, the atom is returned. So on NULL return
236 * it's queued asynchronously.
238 done = async_exec(sd->head, &sd->ahu);
240 sd->head = sd->tail = NULL;
243 fio_syslet_complete_atom(td, done);
248 static int fio_syslet_queue(struct thread_data *td, struct io_u *io_u)
250 struct syslet_data *sd = td->io_ops->data;
253 sd->tail->next = &io_u->req.atom;
254 sd->tail = &io_u->req.atom;
256 sd->head = sd->tail = &io_u->req.atom;
258 io_u->req.head = sd->head;
262 static int async_head_init(struct syslet_data *sd, unsigned int depth)
264 unsigned long ring_size;
266 memset(&sd->ahu, 0, sizeof(struct async_head_user));
268 ring_size = sizeof(struct syslet_uatom *) * depth;
269 sd->ring = malloc(ring_size);
270 memset(sd->ring, 0, ring_size);
272 sd->ahu.user_ring_idx = 0;
273 sd->ahu.completion_ring = sd->ring;
274 sd->ahu.ring_size_bytes = ring_size;
275 sd->ahu.head_stack = thread_stack_alloc();
276 sd->ahu.head_eip = (unsigned long) cachemiss_thread_start;
277 sd->ahu.new_thread_eip = (unsigned long) cachemiss_thread_start;
282 static void async_head_exit(struct syslet_data *sd)
287 static void fio_syslet_cleanup(struct thread_data *td)
289 struct syslet_data *sd = td->io_ops->data;
295 td->io_ops->data = NULL;
299 static int fio_syslet_init(struct thread_data *td)
301 struct syslet_data *sd;
304 sd = malloc(sizeof(*sd));
305 memset(sd, 0, sizeof(*sd));
306 sd->events = malloc(sizeof(struct io_u *) * td->iodepth);
307 memset(sd->events, 0, sizeof(struct io_u *) * td->iodepth);
310 * This will handily fail for kernels where syslet isn't available
312 if (async_head_init(sd, td->iodepth)) {
318 td->io_ops->data = sd;
322 static struct ioengine_ops ioengine = {
324 .version = FIO_IOOPS_VERSION,
325 .init = fio_syslet_init,
326 .prep = fio_syslet_prep,
327 .queue = fio_syslet_queue,
328 .commit = fio_syslet_commit,
329 .getevents = fio_syslet_getevents,
330 .event = fio_syslet_event,
331 .cleanup = fio_syslet_cleanup,
332 .open_file = generic_open_file,
333 .close_file = generic_close_file,
336 #else /* FIO_HAVE_SYSLET */
339 * When we have a proper configure system in place, we simply wont build
340 * and install this io engine. For now install a crippled version that
341 * just complains and fails to load.
343 static int fio_syslet_init(struct thread_data fio_unused *td)
345 fprintf(stderr, "fio: syslet not available\n");
349 static struct ioengine_ops ioengine = {
351 .version = FIO_IOOPS_VERSION,
352 .init = fio_syslet_init,
355 #endif /* FIO_HAVE_SYSLET */
357 static void fio_init fio_syslet_register(void)
359 register_ioengine(&ioengine);
362 static void fio_exit fio_syslet_unregister(void)
364 unregister_ioengine(&ioengine);