Update syslet-rw to fixed size ABI structures
authorZach Brown <zach.brown@oracle.com>
Tue, 18 Sep 2007 23:28:48 +0000 (16:28 -0700)
committerJens Axboe <jens.axboe@oracle.com>
Wed, 19 Sep 2007 16:30:17 +0000 (18:30 +0200)
The syslet system calls moved to using fixed size members of structures.  This
updates the syslet-rw engine to match.  syslet.h was copied from the kernel and
then uXX was replaced with uintXX_T.  Casts were added to move between integers
and pointers.

Signed-off-by: Zach Brown <zach.brown@oracle.com>
Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
engines/syslet-rw.c
syslet.h

index 83c9842404ed424fc6512828510cd7d7498de54c..6709015761bf2e80274ec424d4beff50dae3dc34 100644 (file)
@@ -46,7 +46,7 @@ static void fio_syslet_complete_atom(struct thread_data *td,
         * including) this atom
         */
        last = atom;
-       io_u = atom->private;
+       io_u = (struct io_u *)atom->private;
        atom = io_u->req.head;
 
        /*
@@ -55,8 +55,8 @@ static void fio_syslet_complete_atom(struct thread_data *td,
        do {
                long ret;
 
-               io_u = atom->private;
-               ret = *atom->ret_ptr;
+               io_u = (struct io_u *)atom->private;
+               ret = *(long *)atom->ret_ptr;
                if (ret >= 0)
                        io_u->resid = io_u->xfer_buflen - ret;
                else if (ret < 0)
@@ -68,7 +68,7 @@ static void fio_syslet_complete_atom(struct thread_data *td,
                if (atom == last)
                        break;
 
-               atom = atom->next;
+               atom = (struct syslet_uatom *)atom->next;
        } while (1);
 
        assert(!last->next);
@@ -138,14 +138,15 @@ static void init_atom(struct syslet_uatom *atom, int nr, void *arg0,
 {
        atom->flags = flags;
        atom->nr = nr;
-       atom->ret_ptr = ret_ptr;
-       atom->next = NULL;
-       atom->arg_ptr[0] = arg0;
-       atom->arg_ptr[1] = arg1;
-       atom->arg_ptr[2] = arg2;
-       atom->arg_ptr[3] = arg3;
-       atom->arg_ptr[4] = atom->arg_ptr[5] = NULL;
-       atom->private = priv;
+       atom->ret_ptr = (uint64_t)ret_ptr;
+       atom->next = 0;
+       atom->arg_ptr[0] = (uint64_t)arg0;
+       atom->arg_ptr[1] = (uint64_t)arg1;
+       atom->arg_ptr[2] = (uint64_t)arg2;
+       atom->arg_ptr[3] = (uint64_t)arg3;
+       atom->arg_ptr[4] = 0;
+       atom->arg_ptr[5] = 0;
+       atom->private = (uint64_t)priv;
 }
 
 /*
@@ -207,11 +208,11 @@ static void fio_syslet_queued(struct thread_data *td, struct syslet_data *sd)
 
        atom = sd->head;
        while (atom) {
-               struct io_u *io_u = atom->private;
+               struct io_u *io_u = (struct io_u *)atom->private;
 
                memcpy(&io_u->issue_time, &now, sizeof(now));
                io_u_queued(td, io_u);
-               atom = atom->next;
+               atom = (struct syslet_uatom *)atom->next;
        }
 }
 
@@ -256,10 +257,10 @@ static int fio_syslet_queue(struct thread_data *td, struct io_u *io_u)
        fio_ro_check(td, io_u);
 
        if (sd->tail) {
-               sd->tail->next = &io_u->req.atom;
+               sd->tail->next = (uint64_t)&io_u->req.atom;
                sd->tail = &io_u->req.atom;
        } else
-               sd->head = sd->tail = &io_u->req.atom;
+               sd->head = sd->tail = (struct syslet_uatom *)&io_u->req.atom;
 
        io_u->req.head = sd->head;
        return FIO_Q_QUEUED;
@@ -276,11 +277,11 @@ static int async_head_init(struct syslet_data *sd, unsigned int depth)
        memset(sd->ring, 0, ring_size);
 
        sd->ahu.user_ring_idx = 0;
-       sd->ahu.completion_ring = sd->ring;
+       sd->ahu.completion_ring_ptr = (uint64_t)sd->ring;
        sd->ahu.ring_size_bytes = ring_size;
        sd->ahu.head_stack = thread_stack_alloc();
-       sd->ahu.head_eip = (unsigned long) cachemiss_thread_start;
-       sd->ahu.new_thread_eip = (unsigned long) cachemiss_thread_start;
+       sd->ahu.head_ip = (uint64_t)cachemiss_thread_start;
+       sd->ahu.new_thread_ip = (uint64_t)cachemiss_thread_start;
        sd->ahu.new_thread_stack = thread_stack_alloc();
 
        return 0;
index 84dd1c756d417e69c866e3410a8dfcbc7dc0fc81..ded5c4a778192271574cb73e56d4ab7e43ee7a4d 100644 (file)
--- a/syslet.h
+++ b/syslet.h
  *   jump a full syslet_uatom number of bytes.) ]
  */
 struct syslet_uatom {
-       unsigned long                           flags;
-       unsigned long                           nr;
-       long __user                             *ret_ptr;
-       struct syslet_uatom     __user          *next;
-       unsigned long           __user          *arg_ptr[6];
+       uint32_t                flags;
+       uint32_t                nr;
+       uint64_t                ret_ptr;
+       uint64_t                next;
+       uint64_t                arg_ptr[6];
        /*
         * User-space can put anything in here, kernel will not
         * touch it:
         */
-       void __user                             *private;
+       uint64_t                private;
 };
 
 /*
@@ -111,11 +111,11 @@ struct async_head_user {
        /*
         * Current completion ring index - managed by the kernel:
         */
-       unsigned long                           kernel_ring_idx;
+       uint64_t                kernel_ring_idx;
        /*
         * User-side ring index:
         */
-       unsigned long                           user_ring_idx;
+       uint64_t                user_ring_idx;
 
        /*
         * Ring of pointers to completed async syslets (i.e. syslets that
@@ -127,20 +127,20 @@ struct async_head_user {
         * Note: the final atom that generated the exit condition is
         * queued here. Normally this would be the last atom of a syslet.
         */
-       struct syslet_uatom __user              **completion_ring;
+       uint64_t                completion_ring_ptr;
 
        /*
         * Ring size in bytes:
         */
-       unsigned long                           ring_size_bytes;
+       uint64_t                ring_size_bytes;
 
        /*
         * The head task can become a cachemiss thread later on
         * too, if it blocks - so it needs its separate thread
         * stack and start address too:
         */
-       unsigned long                           head_stack;
-       unsigned long                           head_eip;
+       uint64_t                head_stack;
+       uint64_t                head_ip;
 
        /*
         * Newly started async kernel threads will take their
@@ -148,8 +148,8 @@ struct async_head_user {
         * code has to check for new_thread_stack going to NULL
         * and has to refill it with a new stack if that happens.
         */
-       unsigned long                           new_thread_stack;
-       unsigned long                           new_thread_eip;
+       uint64_t                new_thread_stack;
+       uint64_t                new_thread_ip;
 };
 
 #endif