io_u = atom->private;
ret = *atom->ret_ptr;
- if (ret > 0)
+ if (ret >= 0)
io_u->resid = io_u->xfer_buflen - ret;
else if (ret < 0)
io_u->error = ret;
return (unsigned long) malloc(THREAD_STACK_SIZE) + THREAD_STACK_SIZE;
}
+static void fio_syslet_queued(struct thread_data *td, struct syslet_data *sd)
+{
+ struct syslet_uatom *atom;
+ struct timeval now;
+
+ fio_gettime(&now, NULL);
+
+ atom = sd->head;
+ while (atom) {
+ struct io_u *io_u = atom->private;
+
+ memcpy(&io_u->issue_time, &now, sizeof(now));
+ io_u_queued(td, io_u);
+ atom = atom->next;
+ }
+}
+
static int fio_syslet_commit(struct thread_data *td)
{
struct syslet_data *sd = td->io_ops->data;
if (!sd->ahu.new_thread_stack)
sd->ahu.new_thread_stack = thread_stack_alloc();
+ fio_syslet_queued(td, sd);
+
/*
* On sync completion, the atom is returned. So on NULL return
* it's queued asynchronously.
.getevents = fio_syslet_getevents,
.event = fio_syslet_event,
.cleanup = fio_syslet_cleanup,
+ .open_file = generic_open_file,
+ .close_file = generic_close_file,
};
#else /* FIO_HAVE_SYSLET */