2 * read/write() engine that uses syslet to be async
14 #ifdef FIO_HAVE_SYSLET
18 unsigned int nr_events;
20 struct async_head_user ahu;
21 struct syslet_uatom **ring;
23 struct syslet_uatom *head, *tail;
24 struct syslet_uatom **event_map;
25 unsigned int event_map_idx;
28 static void fio_syslet_complete_atom(struct thread_data *td,
29 struct syslet_uatom *atom)
31 struct syslet_data *sd = td->io_ops->data;
35 if (!sd->event_map_idx)
39 * Find the start of the string of atoms for this sequence
41 for (end = sd->event_map_idx - 1; end >= 0; end--)
42 if (atom == sd->event_map[end])
45 if (end < 0 || atom != sd->event_map[end]) {
46 printf("didn't find atom\n");
50 //printf("end=%d, total %d\n", end, sd->event_map_idx);
53 * now complete in right order
55 for (i = 0; i <= end; i++) {
58 atom = sd->event_map[i];
62 io_u->resid = io_u->xfer_buflen - ret;
66 assert(sd->nr_events < td->iodepth);
67 sd->events[sd->nr_events++] = io_u;
71 * Move later completions to the front, if we didn't complete all
73 if (end == (int) sd->event_map_idx - 1)
74 sd->event_map_idx = 0;
76 int nr = sd->event_map_idx - end - 1;
78 memmove(sd->event_map, &sd->event_map[end + 1], nr * sizeof(struct syslet_uatom *));
79 sd->event_map_idx = nr;
84 * Inspect the ring to see if we have completed events
86 static void fio_syslet_complete(struct thread_data *td)
88 struct syslet_data *sd = td->io_ops->data;
91 struct syslet_uatom *atom;
93 atom = sd->ring[sd->ahu.user_ring_idx];
97 sd->ring[sd->ahu.user_ring_idx] = NULL;
98 if (++sd->ahu.user_ring_idx == td->iodepth)
99 sd->ahu.user_ring_idx = 0;
101 fio_syslet_complete_atom(td, atom);
105 static int fio_syslet_getevents(struct thread_data *td, int min,
107 struct timespec fio_unused *t)
109 struct syslet_data *sd = td->io_ops->data;
113 fio_syslet_complete(td);
116 * do we have enough immediate completions?
118 if (sd->nr_events >= (unsigned int) min)
122 * OK, we need to wait for some events...
124 ret = async_wait(1, sd->ahu.user_ring_idx, &sd->ahu);
134 static struct io_u *fio_syslet_event(struct thread_data *td, int event)
136 struct syslet_data *sd = td->io_ops->data;
138 return sd->events[event];
141 static void init_atom(struct syslet_uatom *atom, int nr, void *arg0,
142 void *arg1, void *arg2, void *arg3, void *ret_ptr,
143 unsigned long flags, void *priv)
147 atom->ret_ptr = ret_ptr;
149 atom->arg_ptr[0] = arg0;
150 atom->arg_ptr[1] = arg1;
151 atom->arg_ptr[2] = arg2;
152 atom->arg_ptr[3] = arg3;
153 atom->arg_ptr[4] = atom->arg_ptr[5] = NULL;
154 atom->private = priv;
158 * Use seek atom for sync
160 static void fio_syslet_prep_sync(struct io_u *io_u, struct fio_file *f)
162 init_atom(&io_u->req.atom, __NR_fsync, &f->fd, NULL, NULL, NULL,
163 &io_u->req.ret, 0, io_u);
166 static void fio_syslet_prep_rw(struct io_u *io_u, struct fio_file *f)
173 if (io_u->ddir == DDIR_READ)
178 init_atom(&io_u->req.atom, nr, &f->fd, &io_u->xfer_buf,
179 &io_u->xfer_buflen, &io_u->offset, &io_u->req.ret, 0, io_u);
182 static int fio_syslet_prep(struct thread_data fio_unused *td, struct io_u *io_u)
184 struct fio_file *f = io_u->file;
186 if (io_u->ddir == DDIR_SYNC)
187 fio_syslet_prep_sync(io_u, f);
189 fio_syslet_prep_rw(io_u, f);
194 static void cachemiss_thread_start(void)
197 async_thread(NULL, NULL);
200 #define THREAD_STACK_SIZE (16384)
202 static unsigned long thread_stack_alloc()
204 return (unsigned long)malloc(THREAD_STACK_SIZE) + THREAD_STACK_SIZE;
207 static int fio_syslet_commit(struct thread_data *td)
209 struct syslet_data *sd = td->io_ops->data;
210 struct syslet_uatom *done;
215 if (!sd->ahu.new_thread_stack)
216 sd->ahu.new_thread_stack = thread_stack_alloc();
219 * On sync completion, the atom is returned. So on NULL return
220 * it's queued asynchronously.
222 done = async_exec(sd->head, &sd->ahu);
224 sd->head = sd->tail = NULL;
227 fio_syslet_complete_atom(td, done);
232 static int fio_syslet_queue(struct thread_data *td, struct io_u *io_u)
234 struct syslet_data *sd = td->io_ops->data;
237 sd->tail->next = &io_u->req.atom;
238 sd->tail = &io_u->req.atom;
240 sd->head = sd->tail = &io_u->req.atom;
242 sd->event_map[sd->event_map_idx++] = sd->tail;
246 static int async_head_init(struct syslet_data *sd, unsigned int depth)
248 unsigned long ring_size;
250 memset(&sd->ahu, 0, sizeof(struct async_head_user));
252 ring_size = sizeof(struct syslet_uatom *) * depth;
253 sd->ring = malloc(ring_size);
254 memset(sd->ring, 0, ring_size);
256 sd->ahu.user_ring_idx = 0;
257 sd->ahu.completion_ring = sd->ring;
258 sd->ahu.ring_size_bytes = ring_size;
259 sd->ahu.head_stack = thread_stack_alloc();
260 sd->ahu.head_eip = (unsigned long)cachemiss_thread_start;
261 sd->ahu.new_thread_eip = (unsigned long)cachemiss_thread_start;
266 static void async_head_exit(struct syslet_data *sd)
271 static void fio_syslet_cleanup(struct thread_data *td)
273 struct syslet_data *sd = td->io_ops->data;
280 td->io_ops->data = NULL;
284 static int fio_syslet_init(struct thread_data *td)
286 struct syslet_data *sd;
289 sd = malloc(sizeof(*sd));
290 memset(sd, 0, sizeof(*sd));
291 sd->events = malloc(sizeof(struct io_u *) * td->iodepth);
292 memset(sd->events, 0, sizeof(struct io_u *) * td->iodepth);
293 sd->event_map = malloc(sizeof(struct syslet_uatom *) * td->iodepth);
294 memset(sd->event_map, 0, sizeof(struct syslet_uatom *) * td->iodepth);
297 * This will handily fail for kernels where syslet isn't available
299 if (async_head_init(sd, td->iodepth)) {
305 td->io_ops->data = sd;
309 static struct ioengine_ops ioengine = {
311 .version = FIO_IOOPS_VERSION,
312 .init = fio_syslet_init,
313 .prep = fio_syslet_prep,
314 .queue = fio_syslet_queue,
315 .commit = fio_syslet_commit,
316 .getevents = fio_syslet_getevents,
317 .event = fio_syslet_event,
318 .cleanup = fio_syslet_cleanup,
321 #else /* FIO_HAVE_SYSLET */
324 * When we have a proper configure system in place, we simply wont build
325 * and install this io engine. For now install a crippled version that
326 * just complains and fails to load.
328 static int fio_syslet_init(struct thread_data fio_unused *td)
330 fprintf(stderr, "fio: syslet not available\n");
334 static struct ioengine_ops ioengine = {
336 .version = FIO_IOOPS_VERSION,
337 .init = fio_syslet_init,
340 #endif /* FIO_HAVE_SYSLET */
342 static void fio_init fio_syslet_register(void)
344 register_ioengine(&ioengine);
347 static void fio_exit fio_syslet_unregister(void)
349 unregister_ioengine(&ioengine);