struct io_u **events;
unsigned int nr_events;
- struct async_head_user *ahu;
+ struct async_head_user ahu;
struct syslet_uatom **ring;
- unsigned int ring_index;
+
+ struct syslet_uatom *head, *tail;
};
+static void fio_syslet_complete_atom(struct thread_data *td,
+ struct syslet_uatom *atom)
+{
+ struct syslet_data *sd = td->io_ops->data;
+ struct syslet_uatom *last;
+ struct io_u *io_u;
+
+ /*
+ * complete from the beginning of the sequence up to (and
+ * including) this atom
+ */
+ last = atom;
+ io_u = atom->private;
+ atom = io_u->req.head;
+
+ /*
+ * now complete in right order
+ */
+ do {
+ long ret;
+
+ io_u = atom->private;
+ ret = *atom->ret_ptr;
+ if (ret >= 0)
+ io_u->resid = io_u->xfer_buflen - ret;
+ else if (ret < 0)
+ io_u->error = ret;
+
+ assert(sd->nr_events < td->iodepth);
+ sd->events[sd->nr_events++] = io_u;
+
+ if (atom == last)
+ break;
+
+ atom = atom->next;
+ } while (1);
+
+ assert(!last->next);
+}
+
/*
* Inspect the ring to see if we have completed events
*/
do {
struct syslet_uatom *atom;
- struct io_u *io_u;
- long ret;
- atom = sd->ring[sd->ring_index];
+ atom = sd->ring[sd->ahu.user_ring_idx];
if (!atom)
break;
- sd->ring[sd->ring_index] = NULL;
- if (++sd->ring_index == td->iodepth)
- sd->ring_index = 0;
-
- io_u = atom->private;
- ret = *atom->ret_ptr;
- if (ret > 0)
- io_u->resid = io_u->xfer_buflen - ret;
- else if (ret < 0)
- io_u->error = ret;
+ sd->ring[sd->ahu.user_ring_idx] = NULL;
+ if (++sd->ahu.user_ring_idx == td->iodepth)
+ sd->ahu.user_ring_idx = 0;
- sd->events[sd->nr_events++] = io_u;
+ fio_syslet_complete_atom(td, atom);
} while (1);
}
struct timespec fio_unused *t)
{
struct syslet_data *sd = td->io_ops->data;
- int get_events;
long ret;
do {
/*
* OK, we need to wait for some events...
*/
- get_events = min - sd->nr_events;
- ret = async_wait(get_events);
+ ret = async_wait(1, sd->ahu.user_ring_idx, &sd->ahu);
if (ret < 0)
- return errno;
+ return -errno;
} while (1);
ret = sd->nr_events;
return 0;
}
-static int fio_syslet_queue(struct thread_data *td, struct io_u *io_u)
+static void cachemiss_thread_start(void)
+{
+ while (1)
+ async_thread(NULL, NULL);
+}
+
+#define THREAD_STACK_SIZE (16384)
+
+static unsigned long thread_stack_alloc()
+{
+ return (unsigned long) malloc(THREAD_STACK_SIZE) + THREAD_STACK_SIZE;
+}
+
+static void fio_syslet_queued(struct thread_data *td, struct syslet_data *sd)
+{
+ struct syslet_uatom *atom;
+ struct timeval now;
+
+ fio_gettime(&now, NULL);
+
+ atom = sd->head;
+ while (atom) {
+ struct io_u *io_u = atom->private;
+
+ memcpy(&io_u->issue_time, &now, sizeof(now));
+ io_u_queued(td, io_u);
+ atom = atom->next;
+ }
+}
+
+static int fio_syslet_commit(struct thread_data *td)
{
struct syslet_data *sd = td->io_ops->data;
- long ret;
+ struct syslet_uatom *done;
+
+ if (!sd->head)
+ return 0;
+
+ assert(!sd->tail->next);
+
+ if (!sd->ahu.new_thread_stack)
+ sd->ahu.new_thread_stack = thread_stack_alloc();
+
+ fio_syslet_queued(td, sd);
/*
* On sync completion, the atom is returned. So on NULL return
* it's queued asynchronously.
*/
- if (!async_exec(&io_u->req.atom))
- return 0;
+ done = async_exec(sd->head, &sd->ahu);
- /*
- * completed sync
- */
- ret = io_u->req.ret;
- if (ret != (long) io_u->xfer_buflen) {
- if (ret > 0) {
- io_u->resid = io_u->xfer_buflen - ret;
- io_u->error = 0;
- return ret;
- } else
- io_u->error = errno;
- }
+ sd->head = sd->tail = NULL;
- if (!io_u->error)
- sd->events[sd->nr_events++] = io_u;
- else
- td_verror(td, io_u->error);
+ if (done)
+ fio_syslet_complete_atom(td, done);
+
+ return 0;
+}
- return io_u->error;
+static int fio_syslet_queue(struct thread_data *td, struct io_u *io_u)
+{
+ struct syslet_data *sd = td->io_ops->data;
+
+ if (sd->tail) {
+ sd->tail->next = &io_u->req.atom;
+ sd->tail = &io_u->req.atom;
+ } else
+ sd->head = sd->tail = &io_u->req.atom;
+
+ io_u->req.head = sd->head;
+ return FIO_Q_QUEUED;
}
static int async_head_init(struct syslet_data *sd, unsigned int depth)
{
unsigned long ring_size;
- sd->ahu = malloc(sizeof(struct async_head_user));
- memset(sd->ahu, 0, sizeof(struct async_head_user));
+ memset(&sd->ahu, 0, sizeof(struct async_head_user));
ring_size = sizeof(struct syslet_uatom *) * depth;
sd->ring = malloc(ring_size);
memset(sd->ring, 0, ring_size);
- sd->ahu->completion_ring = sd->ring;
- sd->ahu->ring_size_bytes = ring_size;
- sd->ahu->max_nr_threads = -1;
-
- if (async_register(sd->ahu, sizeof(*sd->ahu)) < 0) {
- perror("async_register");
- fprintf(stderr, "fio: syslet likely not supported\n");
- free(sd->ring);
- free(sd->ahu);
- return 1;
- }
+ sd->ahu.user_ring_idx = 0;
+ sd->ahu.completion_ring = sd->ring;
+ sd->ahu.ring_size_bytes = ring_size;
+ sd->ahu.head_stack = thread_stack_alloc();
+ sd->ahu.head_eip = (unsigned long) cachemiss_thread_start;
+ sd->ahu.new_thread_eip = (unsigned long) cachemiss_thread_start;
return 0;
}
static void async_head_exit(struct syslet_data *sd)
{
- if (async_unregister(sd->ahu, sizeof(*sd->ahu)) < 0)
- perror("async_register");
-
- free(sd->ahu);
free(sd->ring);
}
.init = fio_syslet_init,
.prep = fio_syslet_prep,
.queue = fio_syslet_queue,
+ .commit = fio_syslet_commit,
.getevents = fio_syslet_getevents,
.event = fio_syslet_event,
.cleanup = fio_syslet_cleanup,
+ .open_file = generic_open_file,
+ .close_file = generic_close_file,
};
#else /* FIO_HAVE_SYSLET */