Fix build of syslet engine on x86-64
[fio.git] / engines / syslet-rw.c
index 4b65b2d3fd5832f65ddbf9e5e35b86de19babc85..b635f6440d1fd1089bfb9150e892f58c3358cc2a 100644 (file)
@@ -1,5 +1,8 @@
 /*
- * read/write() engine that uses syslet to be async
+ * syslet engine
+ *
+ * IO engine that does regular pread(2)/pwrite(2) to transfer data, but
+ * with syslets to make the execution async.
  *
  */
 #include <stdio.h>
@@ -7,6 +10,7 @@
 #include <unistd.h>
 #include <errno.h>
 #include <assert.h>
+#include <asm/unistd.h>
 
 #include "../fio.h"
 #include "../os.h"
@@ -17,11 +21,52 @@ struct syslet_data {
        struct io_u **events;
        unsigned int nr_events;
        
-       struct async_head_user *ahu;
+       struct async_head_user ahu;
        struct syslet_uatom **ring;
-       unsigned int ring_index;
+
+       struct syslet_uatom *head, *tail;
 };
 
+static void fio_syslet_complete_atom(struct thread_data *td,
+                                    struct syslet_uatom *atom)
+{
+       struct syslet_data *sd = td->io_ops->data;
+       struct syslet_uatom *last;
+       struct io_u *io_u;
+
+       /*
+        * complete from the beginning of the sequence up to (and
+        * including) this atom
+        */
+       last = atom;
+       io_u = atom->private;
+       atom = io_u->req.head;
+
+       /*
+        * now complete in right order
+        */
+       do {
+               long ret;
+
+               io_u = atom->private;
+               ret = *atom->ret_ptr;
+               if (ret >= 0)
+                       io_u->resid = io_u->xfer_buflen - ret;
+               else if (ret < 0)
+                       io_u->error = ret;
+
+               assert(sd->nr_events < td->iodepth);
+               sd->events[sd->nr_events++] = io_u;
+
+               if (atom == last)
+                       break;
+
+               atom = atom->next;
+       } while (1);
+
+       assert(!last->next);
+}
+
 /*
  * Inspect the ring to see if we have completed events
  */
@@ -31,25 +76,16 @@ static void fio_syslet_complete(struct thread_data *td)
 
        do {
                struct syslet_uatom *atom;
-               struct io_u *io_u;
-               long ret;
 
-               atom = sd->ring[sd->ring_index];
+               atom = sd->ring[sd->ahu.user_ring_idx];
                if (!atom)
                        break;
 
-               sd->ring[sd->ring_index] = NULL;
-               if (++sd->ring_index == td->iodepth)
-                       sd->ring_index = 0;
+               sd->ring[sd->ahu.user_ring_idx] = NULL;
+               if (++sd->ahu.user_ring_idx == td->iodepth)
+                       sd->ahu.user_ring_idx = 0;
 
-               io_u = atom->private;
-               ret = *atom->ret_ptr;
-               if (ret > 0)
-                       io_u->resid = io_u->xfer_buflen - ret;
-               else if (ret < 0)
-                       io_u->error = ret;
-
-               sd->events[sd->nr_events++] = io_u;
+               fio_syslet_complete_atom(td, atom);
        } while (1);
 }
 
@@ -58,7 +94,6 @@ static int fio_syslet_getevents(struct thread_data *td, int min,
                                struct timespec fio_unused *t)
 {
        struct syslet_data *sd = td->io_ops->data;
-       int get_events;
        long ret;
 
        do {
@@ -73,10 +108,9 @@ static int fio_syslet_getevents(struct thread_data *td, int min,
                /*
                 * OK, we need to wait for some events...
                 */
-               get_events = min - sd->nr_events;
-               ret = async_wait(get_events);
+               ret = async_wait(1, sd->ahu.user_ring_idx, &sd->ahu);
                if (ret < 0)
-                       return errno;
+                       return -errno;
        } while (1);
 
        ret = sd->nr_events;
@@ -144,71 +178,101 @@ static int fio_syslet_prep(struct thread_data fio_unused *td, struct io_u *io_u)
        return 0;
 }
 
-static int fio_syslet_queue(struct thread_data *td, struct io_u *io_u)
+static void cachemiss_thread_start(void)
+{
+       while (1)
+               async_thread(NULL, NULL);
+}
+
+#define THREAD_STACK_SIZE (16384)
+
+static unsigned long thread_stack_alloc()
+{
+       return (unsigned long) malloc(THREAD_STACK_SIZE) + THREAD_STACK_SIZE;
+}
+
+static void fio_syslet_queued(struct thread_data *td, struct syslet_data *sd)
+{
+       struct syslet_uatom *atom;
+       struct timeval now;
+
+       fio_gettime(&now, NULL);
+
+       atom = sd->head;
+       while (atom) {
+               struct io_u *io_u = atom->private;
+
+               memcpy(&io_u->issue_time, &now, sizeof(now));
+               io_u_queued(td, io_u);
+               atom = atom->next;
+       }
+}
+
+static int fio_syslet_commit(struct thread_data *td)
 {
        struct syslet_data *sd = td->io_ops->data;
-       long ret;
+       struct syslet_uatom *done;
+
+       if (!sd->head)
+               return 0;
+
+       assert(!sd->tail->next);
+
+       if (!sd->ahu.new_thread_stack)
+               sd->ahu.new_thread_stack = thread_stack_alloc();
+
+       fio_syslet_queued(td, sd);
 
        /*
         * On sync completion, the atom is returned. So on NULL return
         * it's queued asynchronously.
         */
-       if (!async_exec(&io_u->req.atom))
-               return 0;
+       done = async_exec(sd->head, &sd->ahu);
 
-       /*
-        * completed sync
-        */
-       ret = io_u->req.ret;
-       if (ret != (long) io_u->xfer_buflen) {
-               if (ret > 0) {
-                       io_u->resid = io_u->xfer_buflen - ret;
-                       io_u->error = 0;
-                       return ret;
-               } else
-                       io_u->error = errno;
-       }
+       sd->head = sd->tail = NULL;
 
-       if (!io_u->error)
-               sd->events[sd->nr_events++] = io_u;
-       else
-               td_verror(td, io_u->error);
+       if (done)
+               fio_syslet_complete_atom(td, done);
+
+       return 0;
+}
 
-       return io_u->error;
+static int fio_syslet_queue(struct thread_data *td, struct io_u *io_u)
+{
+       struct syslet_data *sd = td->io_ops->data;
+
+       if (sd->tail) {
+               sd->tail->next = &io_u->req.atom;
+               sd->tail = &io_u->req.atom;
+       } else
+               sd->head = sd->tail = &io_u->req.atom;
+
+       io_u->req.head = sd->head;
+       return FIO_Q_QUEUED;
 }
 
 static int async_head_init(struct syslet_data *sd, unsigned int depth)
 {
        unsigned long ring_size;
 
-       sd->ahu = malloc(sizeof(struct async_head_user));
-       memset(sd->ahu, 0, sizeof(struct async_head_user));
+       memset(&sd->ahu, 0, sizeof(struct async_head_user));
 
        ring_size = sizeof(struct syslet_uatom *) * depth;
        sd->ring = malloc(ring_size);
        memset(sd->ring, 0, ring_size);
 
-       sd->ahu->completion_ring = sd->ring;
-       sd->ahu->ring_size_bytes = ring_size;
-       sd->ahu->max_nr_threads = -1;
-
-       if (async_register(sd->ahu, sizeof(*sd->ahu)) < 0) {
-               perror("async_register");
-               fprintf(stderr, "fio: syslet likely not supported\n");
-               free(sd->ring);
-               free(sd->ahu);
-               return 1;
-       }
+       sd->ahu.user_ring_idx = 0;
+       sd->ahu.completion_ring = sd->ring;
+       sd->ahu.ring_size_bytes = ring_size;
+       sd->ahu.head_stack = thread_stack_alloc();
+       sd->ahu.head_eip = (unsigned long) cachemiss_thread_start;
+       sd->ahu.new_thread_eip = (unsigned long) cachemiss_thread_start;
 
        return 0;
 }
 
 static void async_head_exit(struct syslet_data *sd)
 {
-       if (async_unregister(sd->ahu, sizeof(*sd->ahu)) < 0)
-               perror("async_register");
-
-       free(sd->ahu);
        free(sd->ring);
 }
 
@@ -253,9 +317,12 @@ static struct ioengine_ops ioengine = {
        .init           = fio_syslet_init,
        .prep           = fio_syslet_prep,
        .queue          = fio_syslet_queue,
+       .commit         = fio_syslet_commit,
        .getevents      = fio_syslet_getevents,
        .event          = fio_syslet_event,
        .cleanup        = fio_syslet_cleanup,
+       .open_file      = generic_open_file,
+       .close_file     = generic_close_file,
 };
 
 #else /* FIO_HAVE_SYSLET */