struct io_u **events;
unsigned int nr_events;
- struct async_head_user *ahu;
+ struct async_head_user ahu;
struct syslet_uatom **ring;
- unsigned int ring_index;
};
/*
struct io_u *io_u;
long ret;
- atom = sd->ring[sd->ring_index];
+ atom = sd->ring[sd->ahu.user_ring_idx];
if (!atom)
break;
- sd->ring[sd->ring_index] = NULL;
- if (++sd->ring_index == td->iodepth)
- sd->ring_index = 0;
+ sd->ring[sd->ahu.user_ring_idx] = NULL;
+ if (++sd->ahu.user_ring_idx == td->iodepth)
+ sd->ahu.user_ring_idx = 0;
io_u = atom->private;
ret = *atom->ret_ptr;
* OK, we need to wait for some events...
*/
get_events = min - sd->nr_events;
- ret = async_wait(get_events);
+ ret = async_wait(get_events, sd->ahu.user_ring_idx, &sd->ahu);
if (ret < 0)
- return errno;
+ return -errno;
} while (1);
ret = sd->nr_events;
static void fio_syslet_prep_sync(struct io_u *io_u, struct fio_file *f)
{
init_atom(&io_u->req.atom, __NR_fsync, &f->fd, NULL, NULL, NULL,
- &io_u->req.ret, SYSLET_STOP_ON_NEGATIVE, io_u);
+ &io_u->req.ret, 0, io_u);
}
static void fio_syslet_prep_rw(struct io_u *io_u, struct fio_file *f)
nr = __NR_pwrite64;
init_atom(&io_u->req.atom, nr, &f->fd, &io_u->xfer_buf,
- &io_u->xfer_buflen, &io_u->offset, &io_u->req.ret,
- SYSLET_STOP_ON_NEGATIVE, io_u);
+ &io_u->xfer_buflen, &io_u->offset, &io_u->req.ret, 0, io_u);
}
static int fio_syslet_prep(struct thread_data fio_unused *td, struct io_u *io_u)
return 0;
}
+static void cachemiss_thread_start(void)
+{
+ while (1)
+ async_thread();
+}
+
+#define THREAD_STACK_SIZE (16384)
+
+static unsigned long thread_stack_alloc()
+{
+ return (unsigned long)malloc(THREAD_STACK_SIZE) + THREAD_STACK_SIZE;
+}
+
static int fio_syslet_queue(struct thread_data *td, struct io_u *io_u)
{
struct syslet_data *sd = td->io_ops->data;
struct syslet_uatom *done;
long ret;
- done = async_exec(&io_u->req.atom);
+ if (!sd->ahu.new_thread_stack)
+ sd->ahu.new_thread_stack = thread_stack_alloc();
+
+ /*
+ * On sync completion, the atom is returned. So on NULL return
+ * it's queued asynchronously.
+ */
+ done = async_exec(&io_u->req.atom, &sd->ahu);
+
if (!done)
- return 0;
+ return FIO_Q_QUEUED;
/*
* completed sync
if (ret > 0) {
io_u->resid = io_u->xfer_buflen - ret;
io_u->error = 0;
- return ret;
+ return FIO_Q_COMPLETED;
} else
io_u->error = errno;
}
- if (!io_u->error)
- sd->events[sd->nr_events++] = io_u;
- else
+ assert(sd->nr_events < td->iodepth);
+
+ if (io_u->error)
td_verror(td, io_u->error);
- return io_u->error;
+ return FIO_Q_COMPLETED;
}
static int async_head_init(struct syslet_data *sd, unsigned int depth)
{
unsigned long ring_size;
- sd->ahu = malloc(sizeof(struct async_head_user));
- memset(sd->ahu, 0, sizeof(struct async_head_user));
+ memset(&sd->ahu, 0, sizeof(struct async_head_user));
ring_size = sizeof(struct syslet_uatom *) * depth;
sd->ring = malloc(ring_size);
memset(sd->ring, 0, ring_size);
- sd->ahu->completion_ring = sd->ring;
- sd->ahu->ring_size_bytes = ring_size;
- sd->ahu->max_nr_threads = -1;
-
- if (async_register(sd->ahu, sizeof(*sd->ahu)) < 0) {
- perror("async_register");
- fprintf(stderr, "fio: syslet likely not supported\n");
- free(sd->ring);
- free(sd->ahu);
- return 1;
- }
+ sd->ahu.user_ring_idx = 0;
+ sd->ahu.completion_ring = sd->ring;
+ sd->ahu.ring_size_bytes = ring_size;
+ sd->ahu.head_stack = thread_stack_alloc();
+ sd->ahu.head_eip = (unsigned long)cachemiss_thread_start;
+ sd->ahu.new_thread_eip = (unsigned long)cachemiss_thread_start;
return 0;
}
static void async_head_exit(struct syslet_data *sd)
{
- if (async_unregister(sd->ahu, sizeof(*sd->ahu)) < 0)
- perror("async_register");
+ free(sd->ring);
}
static void fio_syslet_cleanup(struct thread_data *td)