X-Git-Url: https://git.kernel.dk/?a=blobdiff_plain;f=engines%2Fsyslet-rw.c;h=4b1b263691f5825ef247b38a564a8c66f6d4f3cb;hb=5b38ee84f35c7cf3a3a804e6dbe3621f8980d3bb;hp=0263f05ac0f923227a80af0dc50d5fd17bdf3eda;hpb=e49499f8738562e014867ad693a999646d5e1bfb;p=fio.git diff --git a/engines/syslet-rw.c b/engines/syslet-rw.c index 0263f05a..4b1b2636 100644 --- a/engines/syslet-rw.c +++ b/engines/syslet-rw.c @@ -19,8 +19,50 @@ struct syslet_data { struct async_head_user ahu; struct syslet_uatom **ring; + + struct syslet_uatom *head, *tail; }; +static void fio_syslet_complete_atom(struct thread_data *td, + struct syslet_uatom *atom) +{ + struct syslet_data *sd = td->io_ops->data; + struct syslet_uatom *last; + struct io_u *io_u; + + /* + * complete from the beginning of the sequence up to (and + * including) this atom + */ + last = atom; + io_u = atom->private; + atom = io_u->req.head; + + /* + * now complete in right order + */ + do { + long ret; + + io_u = atom->private; + ret = *atom->ret_ptr; + if (ret > 0) + io_u->resid = io_u->xfer_buflen - ret; + else if (ret < 0) + io_u->error = ret; + + assert(sd->nr_events < td->iodepth); + sd->events[sd->nr_events++] = io_u; + + if (atom == last) + break; + + atom = atom->next; + } while (1); + + assert(!last->next); +} + /* * Inspect the ring to see if we have completed events */ @@ -30,8 +72,6 @@ static void fio_syslet_complete(struct thread_data *td) do { struct syslet_uatom *atom; - struct io_u *io_u; - long ret; atom = sd->ring[sd->ahu.user_ring_idx]; if (!atom) @@ -41,14 +81,7 @@ static void fio_syslet_complete(struct thread_data *td) if (++sd->ahu.user_ring_idx == td->iodepth) sd->ahu.user_ring_idx = 0; - io_u = atom->private; - ret = *atom->ret_ptr; - if (ret > 0) - io_u->resid = io_u->xfer_buflen - ret; - else if (ret < 0) - io_u->error = ret; - - sd->events[sd->nr_events++] = io_u; + fio_syslet_complete_atom(td, atom); } while (1); } @@ -57,7 +90,6 @@ static int fio_syslet_getevents(struct thread_data *td, int min, struct timespec fio_unused *t) { struct syslet_data *sd = td->io_ops->data; - int get_events; long ret; do { @@ -72,8 +104,7 @@ static int fio_syslet_getevents(struct thread_data *td, int min, /* * OK, we need to wait for some events... */ - get_events = min - sd->nr_events; - ret = async_wait(get_events, sd->ahu.user_ring_idx, &sd->ahu); + ret = async_wait(1, sd->ahu.user_ring_idx, &sd->ahu); if (ret < 0) return -errno; } while (1); @@ -146,21 +177,25 @@ static int fio_syslet_prep(struct thread_data fio_unused *td, struct io_u *io_u) static void cachemiss_thread_start(void) { while (1) - async_thread(); + async_thread(NULL, NULL); } #define THREAD_STACK_SIZE (16384) static unsigned long thread_stack_alloc() { - return (unsigned long)malloc(THREAD_STACK_SIZE) + THREAD_STACK_SIZE; + return (unsigned long) malloc(THREAD_STACK_SIZE) + THREAD_STACK_SIZE; } -static int fio_syslet_queue(struct thread_data *td, struct io_u *io_u) +static int fio_syslet_commit(struct thread_data *td) { struct syslet_data *sd = td->io_ops->data; struct syslet_uatom *done; - long ret; + + if (!sd->head) + return 0; + + assert(!sd->tail->next); if (!sd->ahu.new_thread_stack) sd->ahu.new_thread_stack = thread_stack_alloc(); @@ -169,29 +204,28 @@ static int fio_syslet_queue(struct thread_data *td, struct io_u *io_u) * On sync completion, the atom is returned. So on NULL return * it's queued asynchronously. */ - done = async_exec(&io_u->req.atom, &sd->ahu); + done = async_exec(sd->head, &sd->ahu); - if (!done) - return FIO_Q_QUEUED; + sd->head = sd->tail = NULL; - /* - * completed sync - */ - ret = io_u->req.ret; - if (ret != (long) io_u->xfer_buflen) { - if (ret > 0) { - io_u->resid = io_u->xfer_buflen - ret; - io_u->error = 0; - return FIO_Q_COMPLETED; - } else - io_u->error = errno; - } + if (done) + fio_syslet_complete_atom(td, done); - assert(sd->nr_events < td->iodepth); + return 0; +} + +static int fio_syslet_queue(struct thread_data *td, struct io_u *io_u) +{ + struct syslet_data *sd = td->io_ops->data; - if (io_u->error) + if (sd->tail) { + sd->tail->next = &io_u->req.atom; + sd->tail = &io_u->req.atom; + } else + sd->head = sd->tail = &io_u->req.atom; - return FIO_Q_COMPLETED; + io_u->req.head = sd->head; + return FIO_Q_QUEUED; } static int async_head_init(struct syslet_data *sd, unsigned int depth) @@ -208,8 +242,8 @@ static int async_head_init(struct syslet_data *sd, unsigned int depth) sd->ahu.completion_ring = sd->ring; sd->ahu.ring_size_bytes = ring_size; sd->ahu.head_stack = thread_stack_alloc(); - sd->ahu.head_eip = (unsigned long)cachemiss_thread_start; - sd->ahu.new_thread_eip = (unsigned long)cachemiss_thread_start; + sd->ahu.head_eip = (unsigned long) cachemiss_thread_start; + sd->ahu.new_thread_eip = (unsigned long) cachemiss_thread_start; return 0; } @@ -260,6 +294,7 @@ static struct ioengine_ops ioengine = { .init = fio_syslet_init, .prep = fio_syslet_prep, .queue = fio_syslet_queue, + .commit = fio_syslet_commit, .getevents = fio_syslet_getevents, .event = fio_syslet_event, .cleanup = fio_syslet_cleanup,