/*
- * read/write() engine that uses syslet to be async
+ * syslet engine
+ *
+ * IO engine that does regular pread(2)/pwrite(2) to transfer data, but
+ * with syslets to make the execution async.
*
*/
#include <stdio.h>
#include <unistd.h>
#include <errno.h>
#include <assert.h>
+#include <asm/unistd.h>
#include "../fio.h"
#include "../os.h"
#ifdef FIO_HAVE_SYSLET
+#ifdef __NR_pread64
+#define __NR_fio_pread __NR_pread64
+#define __NR_fio_pwrite __NR_pwrite64
+#else
+#define __NR_fio_pread __NR_pread
+#define __NR_fio_pwrite __NR_pwrite
+#endif
+
struct syslet_data {
struct io_u **events;
unsigned int nr_events;
struct syslet_uatom **ring;
struct syslet_uatom *head, *tail;
- struct syslet_uatom **event_map;
- unsigned int event_map_idx;
};
static void fio_syslet_complete_atom(struct thread_data *td,
struct syslet_uatom *atom)
{
struct syslet_data *sd = td->io_ops->data;
+ struct syslet_uatom *last;
struct io_u *io_u;
- int i, end;
-
- if (!sd->event_map_idx)
- return;
/*
- * Find the start of the string of atoms for this sequence
+ * complete from the beginning of the sequence up to (and
+ * including) this atom
*/
- for (end = sd->event_map_idx - 1; end >= 0; end--)
- if (atom == sd->event_map[end])
- break;
-
- if (end < 0 || atom != sd->event_map[end]) {
- printf("didn't find atom\n");
- return;
- }
-
- //printf("end=%d, total %d\n", end, sd->event_map_idx);
+ last = atom;
+ io_u = atom->private;
+ atom = io_u->req.head;
/*
* now complete in right order
*/
- for (i = 0; i <= end; i++) {
+ do {
long ret;
- atom = sd->event_map[i];
io_u = atom->private;
ret = *atom->ret_ptr;
- if (ret > 0)
+ if (ret >= 0)
io_u->resid = io_u->xfer_buflen - ret;
else if (ret < 0)
io_u->error = ret;
- assert(sd->nr_events < td->iodepth);
+ assert(sd->nr_events < td->o.iodepth);
sd->events[sd->nr_events++] = io_u;
- }
- /*
- * Move later completions to the front, if we didn't complete all
- */
- if (end == (int) sd->event_map_idx - 1)
- sd->event_map_idx = 0;
- else {
- int nr = sd->event_map_idx - end - 1;
+ if (atom == last)
+ break;
- memmove(sd->event_map, &sd->event_map[end + 1], nr * sizeof(struct syslet_uatom *));
- sd->event_map_idx = nr;
- }
+ atom = atom->next;
+ } while (1);
+
+ assert(!last->next);
}
/*
break;
sd->ring[sd->ahu.user_ring_idx] = NULL;
- if (++sd->ahu.user_ring_idx == td->iodepth)
+ if (++sd->ahu.user_ring_idx == td->o.iodepth)
sd->ahu.user_ring_idx = 0;
fio_syslet_complete_atom(td, atom);
* prepare rw
*/
if (io_u->ddir == DDIR_READ)
- nr = __NR_pread64;
+ nr = __NR_fio_pread;
else
- nr = __NR_pwrite64;
+ nr = __NR_fio_pwrite;
init_atom(&io_u->req.atom, nr, &f->fd, &io_u->xfer_buf,
&io_u->xfer_buflen, &io_u->offset, &io_u->req.ret, 0, io_u);
static unsigned long thread_stack_alloc()
{
- return (unsigned long)malloc(THREAD_STACK_SIZE) + THREAD_STACK_SIZE;
+ return (unsigned long) malloc(THREAD_STACK_SIZE) + THREAD_STACK_SIZE;
+}
+
+static void fio_syslet_queued(struct thread_data *td, struct syslet_data *sd)
+{
+ struct syslet_uatom *atom;
+ struct timeval now;
+
+ fio_gettime(&now, NULL);
+
+ atom = sd->head;
+ while (atom) {
+ struct io_u *io_u = atom->private;
+
+ memcpy(&io_u->issue_time, &now, sizeof(now));
+ io_u_queued(td, io_u);
+ atom = atom->next;
+ }
}
static int fio_syslet_commit(struct thread_data *td)
if (!sd->head)
return 0;
+ assert(!sd->tail->next);
+
if (!sd->ahu.new_thread_stack)
sd->ahu.new_thread_stack = thread_stack_alloc();
+ fio_syslet_queued(td, sd);
+
/*
* On sync completion, the atom is returned. So on NULL return
* it's queued asynchronously.
} else
sd->head = sd->tail = &io_u->req.atom;
- sd->event_map[sd->event_map_idx++] = sd->tail;
+ io_u->req.head = sd->head;
return FIO_Q_QUEUED;
}
sd->ahu.completion_ring = sd->ring;
sd->ahu.ring_size_bytes = ring_size;
sd->ahu.head_stack = thread_stack_alloc();
- sd->ahu.head_eip = (unsigned long)cachemiss_thread_start;
- sd->ahu.new_thread_eip = (unsigned long)cachemiss_thread_start;
+ sd->ahu.head_eip = (unsigned long) cachemiss_thread_start;
+ sd->ahu.new_thread_eip = (unsigned long) cachemiss_thread_start;
return 0;
}
if (sd) {
async_head_exit(sd);
free(sd->events);
- free(sd->event_map);
free(sd);
td->io_ops->data = NULL;
}
sd = malloc(sizeof(*sd));
memset(sd, 0, sizeof(*sd));
- sd->events = malloc(sizeof(struct io_u *) * td->iodepth);
- memset(sd->events, 0, sizeof(struct io_u *) * td->iodepth);
- sd->event_map = malloc(sizeof(struct syslet_uatom *) * td->iodepth);
- memset(sd->event_map, 0, sizeof(struct syslet_uatom *) * td->iodepth);
+ sd->events = malloc(sizeof(struct io_u *) * td->o.iodepth);
+ memset(sd->events, 0, sizeof(struct io_u *) * td->o.iodepth);
/*
* This will handily fail for kernels where syslet isn't available
*/
- if (async_head_init(sd, td->iodepth)) {
+ if (async_head_init(sd, td->o.iodepth)) {
free(sd->events);
free(sd);
return 1;
.getevents = fio_syslet_getevents,
.event = fio_syslet_event,
.cleanup = fio_syslet_cleanup,
+ .open_file = generic_open_file,
+ .close_file = generic_close_file,
};
#else /* FIO_HAVE_SYSLET */