4 * IO engine that does regular pread(2)/pwrite(2) to transfer data, but
5 * with syslets to make the execution async.
14 #include <asm/unistd.h>
17 #include "../indirect.h"
18 #include "../syslet.h"
21 #ifdef FIO_HAVE_SYSLET
24 #define __NR_fio_pread __NR_pread64
25 #define __NR_fio_pwrite __NR_pwrite64
27 #define __NR_fio_pread __NR_pread
28 #define __NR_fio_pwrite __NR_pwrite
33 unsigned int nr_events;
35 struct syslet_ring *ring;
36 unsigned int ring_mask;
40 static void fio_syslet_add_event(struct thread_data *td, struct io_u *io_u)
42 struct syslet_data *sd = td->io_ops->data;
44 assert(sd->nr_events < td->o.iodepth);
45 sd->events[sd->nr_events++] = io_u;
48 static void fio_syslet_add_events(struct thread_data *td, unsigned int nr)
50 struct syslet_data *sd = td->io_ops->data;
53 uidx = sd->ring->user_tail;
56 for (i = 0; i < nr; i++) {
57 unsigned int idx = (i + uidx) & sd->ring_mask;
58 struct syslet_completion *comp = &sd->ring->comp[idx];
59 struct io_u *io_u = (struct io_u *) (long) comp->caller_data;
64 io_u->resid = io_u->xfer_buflen;
67 io_u->resid = io_u->xfer_buflen - ret;
71 fio_syslet_add_event(td, io_u);
75 static void fio_syslet_wait_for_events(struct thread_data *td)
77 struct syslet_data *sd = td->io_ops->data;
78 struct syslet_ring *ring = sd->ring;
81 unsigned int kh = ring->kernel_head;
85 * first reap events that are already completed
87 if (ring->user_tail != kh) {
88 unsigned int nr = kh - ring->user_tail;
90 fio_syslet_add_events(td, nr);
96 * block waiting for at least one event
98 ret = syscall(__NR_syslet_ring_wait, ring, ring->user_tail);
103 static int fio_syslet_getevents(struct thread_data *td, unsigned int min,
104 unsigned int fio_unused max,
105 struct timespec fio_unused *t)
107 struct syslet_data *sd = td->io_ops->data;
111 * While we have less events than requested, block waiting for them
112 * (if we have to, there may already be more completed events ready
113 * for us - see fio_syslet_wait_for_events()
115 while (sd->nr_events < min)
116 fio_syslet_wait_for_events(td);
123 static struct io_u *fio_syslet_event(struct thread_data *td, int event)
125 struct syslet_data *sd = td->io_ops->data;
127 return sd->events[event];
130 static void fio_syslet_prep_sync(struct fio_file *f,
131 struct indirect_registers *regs)
133 FILL_IN(*regs, __NR_fsync, (long) f->fd);
136 static void fio_syslet_prep_rw(struct io_u *io_u, struct fio_file *f,
137 struct indirect_registers *regs)
144 if (io_u->ddir == DDIR_READ)
147 nr = __NR_fio_pwrite;
149 FILL_IN(*regs, nr, (long) f->fd, (long) io_u->xfer_buf,
150 (long) io_u->xfer_buflen, (long) io_u->offset);
153 static void fio_syslet_prep(struct io_u *io_u, struct indirect_registers *regs)
155 struct fio_file *f = io_u->file;
157 if (io_u->ddir == DDIR_SYNC)
158 fio_syslet_prep_sync(f, regs);
160 fio_syslet_prep_rw(io_u, f, regs);
163 static void ret_func(void)
168 static int fio_syslet_queue(struct thread_data *td, struct io_u *io_u)
170 struct syslet_data *sd = td->io_ops->data;
171 union indirect_params params;
172 struct indirect_registers regs;
175 fio_ro_check(td, io_u);
177 memset(¶ms, 0, sizeof(params));
178 fill_syslet_args(¶ms.syslet, sd->ring, (long)io_u, ret_func, sd->stack);
180 fio_syslet_prep(io_u, ®s);
182 ret = syscall(__NR_indirect, ®s, ¶ms, sizeof(params), 0);
183 if (ret == (int) io_u->xfer_buflen) {
185 * completed sync, account. this also catches fsync().
187 return FIO_Q_COMPLETED;
188 } else if (ret < 0) {
190 * queued for async execution
192 if (errno == ESYSLETPENDING)
197 td_verror(td, io_u->error, "xfer");
198 return FIO_Q_COMPLETED;
201 static int check_syslet_support(struct syslet_data *sd)
203 union indirect_params params;
204 struct indirect_registers regs;
205 pid_t pid, my_pid = getpid();
207 memset(¶ms, 0, sizeof(params));
208 fill_syslet_args(¶ms.syslet, sd->ring, 0, ret_func, sd->stack);
210 FILL_IN(regs, __NR_getpid);
212 pid = syscall(__NR_indirect, ®s, ¶ms, sizeof(params), 0);
219 static void fio_syslet_cleanup(struct thread_data *td)
221 struct syslet_data *sd = td->io_ops->data;
227 td->io_ops->data = NULL;
231 static int fio_syslet_init(struct thread_data *td)
233 struct syslet_data *sd;
234 void *ring = NULL, *stack = NULL;
235 unsigned int ring_size, ring_nr;
237 sd = malloc(sizeof(*sd));
238 memset(sd, 0, sizeof(*sd));
240 sd->events = malloc(sizeof(struct io_u *) * td->o.iodepth);
241 memset(sd->events, 0, sizeof(struct io_u *) * td->o.iodepth);
244 * The ring needs to be a power-of-2, so round it up if we have to
246 ring_nr = td->o.iodepth;
247 if (ring_nr & (ring_nr - 1))
248 ring_nr = 1 << fls(ring_nr);
250 ring_size = sizeof(struct syslet_ring) +
251 ring_nr * sizeof(struct syslet_completion);
252 if (posix_memalign(&ring, sizeof(uint64_t), ring_size))
254 if (posix_memalign(&stack, page_size, page_size))
258 sd->ring_mask = ring_nr - 1;
261 memset(sd->ring, 0, ring_size);
262 sd->ring->elements = ring_nr;
264 if (!check_syslet_support(sd)) {
265 td->io_ops->data = sd;
269 log_err("fio: syslets do not appear to work\n");
280 static struct ioengine_ops ioengine = {
282 .version = FIO_IOOPS_VERSION,
283 .init = fio_syslet_init,
284 .queue = fio_syslet_queue,
285 .getevents = fio_syslet_getevents,
286 .event = fio_syslet_event,
287 .cleanup = fio_syslet_cleanup,
288 .open_file = generic_open_file,
289 .close_file = generic_close_file,
292 #else /* FIO_HAVE_SYSLET */
295 * When we have a proper configure system in place, we simply wont build
296 * and install this io engine. For now install a crippled version that
297 * just complains and fails to load.
299 static int fio_syslet_init(struct thread_data fio_unused *td)
301 fprintf(stderr, "fio: syslet not available\n");
305 static struct ioengine_ops ioengine = {
307 .version = FIO_IOOPS_VERSION,
308 .init = fio_syslet_init,
311 #endif /* FIO_HAVE_SYSLET */
313 static void fio_init fio_syslet_register(void)
315 register_ioengine(&ioengine);
318 static void fio_exit fio_syslet_unregister(void)
320 unregister_ioengine(&ioengine);