4 * IO engine that does regular pread(2)/pwrite(2) to transfer data, but
5 * with syslets to make the execution async.
14 #include <asm/unistd.h>
17 #include "../indirect.h"
18 #include "../syslet.h"
20 #ifdef FIO_HAVE_SYSLET
23 #define __NR_fio_pread __NR_pread64
24 #define __NR_fio_pwrite __NR_pwrite64
26 #define __NR_fio_pread __NR_pread
27 #define __NR_fio_pwrite __NR_pwrite
32 unsigned int nr_events;
34 struct syslet_ring *ring;
38 static void fio_syslet_complete(struct thread_data *td, struct io_u *io_u)
40 struct syslet_data *sd = td->io_ops->data;
42 assert(sd->nr_events < td->o.iodepth);
43 sd->events[sd->nr_events++] = io_u;
46 static void syslet_complete_nr(struct thread_data *td, unsigned int nr)
48 struct syslet_data *sd = td->io_ops->data;
51 for (i = 0; i < nr; i++) {
52 unsigned int idx = (i + sd->ring->user_tail) % td->o.iodepth;
53 struct syslet_completion *comp = &sd->ring->comp[idx];
54 struct io_u *io_u = (struct io_u *) (long) comp->caller_data;
56 io_u->resid = io_u->xfer_buflen - comp->status;
57 fio_syslet_complete(td, io_u);
62 static void fio_syslet_wait_for_events(struct thread_data *td)
64 struct syslet_data *sd = td->io_ops->data;
65 struct syslet_ring *ring = sd->ring;
70 unsigned int kh = ring->kernel_head;
74 * first reap events that are already completed
76 if (ring->user_tail != kh) {
77 unsigned int nr = kh - ring->user_tail;
79 syslet_complete_nr(td, nr);
86 * block waiting for at least one event
88 ret = syscall(__NR_syslet_ring_wait, ring, ring->user_tail);
93 static int fio_syslet_getevents(struct thread_data *td, int min,
95 struct timespec fio_unused *t)
97 struct syslet_data *sd = td->io_ops->data;
102 * do we have enough immediate completions?
104 if (sd->nr_events >= (unsigned int) min)
107 fio_syslet_wait_for_events(td);
115 static struct io_u *fio_syslet_event(struct thread_data *td, int event)
117 struct syslet_data *sd = td->io_ops->data;
119 return sd->events[event];
122 static void fio_syslet_prep_sync(struct fio_file *f,
123 struct indirect_registers *regs)
125 FILL_IN(*regs, __NR_fsync, (long) f->fd);
128 static void fio_syslet_prep_rw(struct io_u *io_u, struct fio_file *f,
129 struct indirect_registers *regs)
136 if (io_u->ddir == DDIR_READ)
139 nr = __NR_fio_pwrite;
141 FILL_IN(*regs, nr, (long) f->fd, (long) io_u->xfer_buf,
142 (long) io_u->xfer_buflen, (long) io_u->offset);
145 static void fio_syslet_prep(struct io_u *io_u, struct indirect_registers *regs)
147 struct fio_file *f = io_u->file;
149 if (io_u->ddir == DDIR_SYNC)
150 fio_syslet_prep_sync(f, regs);
152 fio_syslet_prep_rw(io_u, f, regs);
155 static void ret_func(void)
160 static int fio_syslet_queue(struct thread_data *td, struct io_u *io_u)
162 struct syslet_data *sd = td->io_ops->data;
163 union indirect_params params;
164 struct indirect_registers regs;
167 fio_ro_check(td, io_u);
169 memset(¶ms, 0, sizeof(params));
170 fill_syslet_args(¶ms.syslet, sd->ring, (long)io_u, ret_func, sd->stack);
172 fio_syslet_prep(io_u, ®s);
174 ret = syscall(__NR_indirect, ®s, ¶ms, sizeof(params), 0);
175 if (ret == (int) io_u->xfer_buflen) {
177 * completed sync, account. this also catches fsync().
179 return FIO_Q_COMPLETED;
180 } else if (ret < 0) {
182 * queued for async execution
184 if (errno == ESYSLETPENDING)
189 td_verror(td, io_u->error, "xfer");
190 return FIO_Q_COMPLETED;
193 static int check_syslet_support(struct syslet_data *sd)
195 union indirect_params params;
196 struct indirect_registers regs;
197 pid_t pid, my_pid = getpid();
199 memset(¶ms, 0, sizeof(params));
200 fill_syslet_args(¶ms.syslet, sd->ring, 0, ret_func, sd->stack);
202 FILL_IN(regs, __NR_getpid);
204 pid = syscall(__NR_indirect, ®s, ¶ms, sizeof(params), 0);
211 static void fio_syslet_cleanup(struct thread_data *td)
213 struct syslet_data *sd = td->io_ops->data;
220 td->io_ops->data = NULL;
224 static int fio_syslet_init(struct thread_data *td)
226 struct syslet_data *sd;
227 void *ring = NULL, *stack = NULL;
229 sd = malloc(sizeof(*sd));
230 memset(sd, 0, sizeof(*sd));
232 sd->events = malloc(sizeof(struct io_u *) * td->o.iodepth);
233 memset(sd->events, 0, sizeof(struct io_u *) * td->o.iodepth);
234 if (posix_memalign(&ring, sizeof(uint64_t), sizeof(struct syslet_ring)))
236 if (posix_memalign(&stack, page_size, page_size))
242 memset(sd->ring, 0, sizeof(*sd->ring));
243 sd->ring->elements = td->o.iodepth;
245 if (!check_syslet_support(sd)) {
246 td->io_ops->data = sd;
250 log_err("fio: syslets do not appear to work\n");
261 static struct ioengine_ops ioengine = {
263 .version = FIO_IOOPS_VERSION,
264 .init = fio_syslet_init,
265 .queue = fio_syslet_queue,
266 .getevents = fio_syslet_getevents,
267 .event = fio_syslet_event,
268 .cleanup = fio_syslet_cleanup,
269 .open_file = generic_open_file,
270 .close_file = generic_close_file,
273 #else /* FIO_HAVE_SYSLET */
276 * When we have a proper configure system in place, we simply wont build
277 * and install this io engine. For now install a crippled version that
278 * just complains and fails to load.
280 static int fio_syslet_init(struct thread_data fio_unused *td)
282 fprintf(stderr, "fio: syslet not available\n");
286 static struct ioengine_ops ioengine = {
288 .version = FIO_IOOPS_VERSION,
289 .init = fio_syslet_init,
292 #endif /* FIO_HAVE_SYSLET */
294 static void fio_init fio_syslet_register(void)
296 register_ioengine(&ioengine);
299 static void fio_exit fio_syslet_unregister(void)
301 unregister_ioengine(&ioengine);