+/*
+ * Just check for SPLICE_F_MOVE, if that isn't there, assume the others
+ * aren't either.
+ */
+#ifndef SPLICE_F_MOVE
+#define SPLICE_F_MOVE (0x01) /* move pages instead of copying */
+#define SPLICE_F_NONBLOCK (0x02) /* don't block on the pipe splicing (but */
+ /* we may still block on the fd we splice */
+ /* from/to, of course */
+#define SPLICE_F_MORE (0x04) /* expect more data */
+#define SPLICE_F_GIFT (0x08) /* pages passed in are a gift */
+
+static inline int splice(int fdin, loff_t *off_in, int fdout, loff_t *off_out,
+ size_t len, unsigned long flags)
+{
+ return syscall(__NR_sys_splice, fdin, off_in, fdout, off_out, len, flags);
+}
+
+static inline int tee(int fdin, int fdout, size_t len, unsigned int flags)
+{
+ return syscall(__NR_sys_tee, fdin, fdout, len, flags);
+}
+
+static inline int vmsplice(int fd, const struct iovec *iov,
+ unsigned long nr_segs, unsigned int flags)
+{
+ return syscall(__NR_sys_vmsplice, fd, iov, nr_segs, flags);
+}
+#endif
+
+#define SPLICE_DEF_SIZE (64*1024)
+
+#ifdef FIO_HAVE_SYSLET
+
+struct syslet_uatom;
+struct async_head_user;
+
+/*
+ * syslet stuff
+ */
+static inline struct syslet_uatom *
+async_exec(struct syslet_uatom *atom, struct async_head_user *ahu)
+{
+ return (void *) syscall(__NR_async_exec, atom, ahu);
+}
+
+static inline long
+async_wait(unsigned long min_wait_events, unsigned long user_ring_idx,
+ struct async_head_user *ahu)
+{
+ return syscall(__NR_async_wait, min_wait_events,
+ user_ring_idx, ahu);
+}
+
+static inline long async_thread(void *event, struct async_head_user *ahu)
+{
+ return syscall(__NR_async_thread, event, ahu);
+}
+
+static inline long umem_add(unsigned long *uptr, unsigned long inc)
+{
+ return syscall(__NR_umem_add, uptr, inc);
+}
+#endif /* FIO_HAVE_SYSLET */
+