ifndef CONFIG_INET_ATON
SOURCE += lib/inet_aton.c
endif
+ifdef CONFIG_GFAPI
+ SOURCE += engines/glusterfs.c
+ SOURCE += engines/glusterfs_sync.c
+ SOURCE += engines/glusterfs_async.c
+ ifdef CONFIG_GF_FADVISE
+ CFLAGS += "-DGFAPI_USE_FADVISE"
+ endif
+endif
ifeq ($(CONFIG_TARGET_OS), Linux)
SOURCE += diskutil.c fifo.c blktrace.c cgroup.c trim.c engines/sg.c \
fi
echo "setvbuf $setvbuf"
+# check for gfapi
+gfapi="no"
+cat > $TMPC << EOF
+#include <glusterfs/api/glfs.h>
+
+int main(int argc, char **argv)
+{
+
+ glfs_t *g = glfs_new("foo");
+
+ return 0;
+}
+EOF
+if compile_prog "" "-lgfapi -lglusterfs" "gfapi"; then
+ LIBS="-lgfapi -lglusterfs $LIBS"
+ gfapi="yes"
+fi
+ echo "Gluster API engine $gfapi"
+
+##########################################
+# check for gfapi fadvise support
+gf_fadvise="no"
+cat > $TMPC << EOF
+#include <glusterfs/api/glfs.h>
+
+int main(int argc, char **argv)
+{
+ struct glfs_fd *fd;
+ int ret = glfs_fadvise(fd, 0, 0, 1);
+
+ return 0;
+}
+EOF
+
+if compile_prog "" "-lgfapi -lglusterfs" "gfapi"; then
+ gf_fadvise="yes"
+fi
+echo "Gluster API use fadvise $gf_fadvise"
+
##########################################
# Check if we support stckf on s390
s390_z196_facilities="no"
output_sym "CONFIG_S390_Z196_FACILITIES"
CFLAGS="$CFLAGS -march=z9-109"
fi
+if test "$gfapi" = "yes" ; then
+ output_sym "CONFIG_GFAPI"
+fi
+if test "$gf_fadvise" = "yes" ; then
+ output_sym "CONFIG_GF_FADVISE"
+fi
echo "LIBS+=$LIBS" >> $config_host_mak
echo "CFLAGS+=$CFLAGS" >> $config_host_mak
--- /dev/null
+#include <glusterfs/api/glfs.h>
+#include <glusterfs/api/glfs-handles.h>
+#include "../fio.h"
+
+struct gf_options {
+ struct thread_data *td;
+ char *gf_vol;
+ char *gf_brick;
+};
+
+struct gf_data {
+ glfs_t *fs;
+ glfs_fd_t *fd;
+ struct io_u **aio_events;
+};
+
+extern struct fio_option gfapi_options[];
+extern int fio_gf_setup(struct thread_data *td);
+extern void fio_gf_cleanup(struct thread_data *td);
+extern int fio_gf_get_file_size(struct thread_data *td, struct fio_file *f);
+extern int fio_gf_open_file(struct thread_data *td, struct fio_file *f);
+extern int fio_gf_close_file(struct thread_data *td, struct fio_file *f);
--- /dev/null
+/*
+ * glusterfs engine
+ *
+ * common Glusterfs's gfapi interface
+ *
+ */
+
+#include "gfapi.h"
+
+struct fio_option gfapi_options[] = {
+ {
+ .name = "volume",
+ .lname = "Glusterfs volume",
+ .type = FIO_OPT_STR_STORE,
+ .help = "Name of the Glusterfs volume",
+ .off1 = offsetof(struct gf_options, gf_vol),
+ .category = FIO_OPT_C_ENGINE,
+ .group = FIO_OPT_G_GFAPI,
+ },
+ {
+ .name = "brick",
+ .lname = "Glusterfs brick name",
+ .type = FIO_OPT_STR_STORE,
+ .help = "Name of the Glusterfs brick to connect",
+ .off1 = offsetof(struct gf_options, gf_brick),
+ .category = FIO_OPT_C_ENGINE,
+ .group = FIO_OPT_G_GFAPI,
+ },
+ {
+ .name = NULL,
+ },
+};
+
+int fio_gf_setup(struct thread_data *td)
+{
+ int r = 0;
+ struct gf_data *g = NULL;
+ struct gf_options *opt = td->eo;
+ struct stat sb = {0, };
+
+ dprint(FD_IO, "fio setup\n");
+
+ if (td->io_ops->data)
+ return 0;
+
+ g = malloc(sizeof(struct gf_data));
+ if (!g){
+ log_err("malloc failed.\n");
+ return -ENOMEM;
+ }
+ g->fs = NULL; g->fd = NULL; g->aio_events = NULL;
+
+ g->fs = glfs_new (opt->gf_vol);
+ if (!g->fs){
+ log_err("glfs_new failed.\n");
+ goto cleanup;
+ }
+ glfs_set_logging (g->fs, "/tmp/fio_gfapi.log", 7);
+ /* default to tcp */
+ r = glfs_set_volfile_server(g->fs, "tcp", opt->gf_brick, 0);
+ if (r){
+ log_err("glfs_set_volfile_server failed.\n");
+ goto cleanup;
+ }
+ r = glfs_init(g->fs);
+ if (r){
+ log_err("glfs_init failed. Is glusterd running on brick?\n");
+ goto cleanup;
+ }
+ sleep(2);
+ r = glfs_lstat (g->fs, ".", &sb);
+ if (r){
+ log_err("glfs_lstat failed.\n");
+ goto cleanup;
+ }
+ dprint(FD_FILE, "fio setup %p\n", g->fs);
+ td->io_ops->data = g;
+cleanup:
+ if (r){
+ if (g){
+ if (g->fs){
+ glfs_fini(g->fs);
+ }
+ free(g);
+ td->io_ops->data = NULL;
+ }
+ }
+ return r;
+}
+
+void fio_gf_cleanup(struct thread_data *td)
+{
+ struct gf_data *g = td->io_ops->data;
+
+ if (g) {
+ if (g->aio_events)
+ free(g->aio_events);
+ if (g->fd)
+ glfs_close(g->fd);
+ if (g->fs)
+ glfs_fini(g->fs);
+ free(g);
+ td->io_ops->data = NULL;
+ }
+}
+
+int fio_gf_get_file_size(struct thread_data *td, struct fio_file *f)
+{
+ struct stat buf;
+ int ret;
+ struct gf_data *g = td->io_ops->data;
+
+ dprint(FD_FILE, "get file size %s\n", f->file_name);
+
+ if (!g || !g->fs)
+ {
+ return 0;
+ }
+ if (fio_file_size_known(f))
+ return 0;
+
+ ret = glfs_lstat (g->fs, f->file_name, &buf);
+ if (ret < 0){
+ log_err("glfs_lstat failed.\n");
+ return ret;
+ }
+
+ f->real_file_size = buf.st_size;
+ fio_file_set_size_known(f);
+
+ return 0;
+
+}
+
+int fio_gf_open_file(struct thread_data *td, struct fio_file *f)
+{
+
+ int flags = 0;
+ int ret = 0;
+ struct gf_data *g = td->io_ops->data;
+ struct stat sb = {0, };
+
+ if (td_write(td)) {
+ if (!read_only)
+ flags = O_RDWR;
+ } else if (td_read(td)) {
+ if (!read_only)
+ flags = O_RDWR;
+ else
+ flags = O_RDONLY;
+ }
+ dprint(FD_FILE, "fio file %s open mode %s td rw %s\n", f->file_name,
+ flags == O_RDONLY? "ro":"rw", td_read(td)? "read":"write");
+ g->fd = glfs_creat(g->fs, f->file_name, flags, 0644);
+ if (!g->fd){
+ log_err("glfs_creat failed.\n");
+ ret = errno;
+ }
+ /* file for read doesn't exist or shorter than required, create/extend it */
+ if (td_read(td)){
+ if (glfs_lstat (g->fs, f->file_name, &sb) || sb.st_size < f->real_file_size){
+ dprint(FD_FILE, "fio extend file %s from %ld to %ld\n", f->file_name, sb.st_size, f->real_file_size);
+ ret = glfs_ftruncate (g->fd, f->real_file_size);
+ if (ret){
+ log_err("failed fio extend file %s to %ld\n", f->file_name, f->real_file_size);
+ }else{
+ unsigned long long left;
+ unsigned int bs;
+ char *b;
+ int r;
+
+ /* fill the file, copied from extend_file */
+ b = malloc(td->o.max_bs[DDIR_WRITE]);
+
+ left = f->real_file_size;
+ while (left && !td->terminate) {
+ bs = td->o.max_bs[DDIR_WRITE];
+ if (bs > left)
+ bs = left;
+
+ fill_io_buffer(td, b, bs, bs);
+
+ r = glfs_write(g->fd, b, bs, 0);
+ dprint(FD_IO, "fio write %d of %ld file %s\n", r, f->real_file_size, f->file_name);
+
+ if (r > 0) {
+ left -= r;
+ continue;
+ } else {
+ if (r < 0) {
+ int __e = errno;
+
+ if (__e == ENOSPC) {
+ if (td->o.fill_device)
+ break;
+ log_info("fio: ENOSPC on laying out "
+ "file, stopping\n");
+ break;
+ }
+ td_verror(td, errno, "write");
+ } else
+ td_verror(td, EIO, "write");
+
+ break;
+ }
+ }
+
+ if (b) free(b);
+ glfs_lseek(g->fd, 0, SEEK_SET);
+
+ if (td->terminate) {
+ dprint(FD_FILE, "terminate unlink %s\n", f->file_name);
+ unlink(f->file_name);
+ } else if (td->o.create_fsync) {
+ if (glfs_fsync(g->fd) < 0) {
+ dprint(FD_FILE, "failed to sync, close %s\n", f->file_name);
+ td_verror(td, errno, "fsync");
+ glfs_close(g->fd);
+ g->fd = NULL;
+ return 1;
+ }
+ }
+ }
+ }
+ }
+#if defined(GFAPI_USE_FADVISE)
+ {
+ int r = 0;
+ if (td_random(td)){
+ r = glfs_fadvise(g->fd, 0, f->real_file_size, POSIX_FADV_RANDOM);
+ }else{
+ r = glfs_fadvise(g->fd, 0, f->real_file_size, POSIX_FADV_SEQUENTIAL);
+ }
+ if (r){
+ dprint(FD_FILE, "fio %p fadvise %s status %d\n", g->fs, f->file_name, r);
+ }
+ }
+#endif
+ dprint(FD_FILE, "fio %p created %s\n", g->fs, f->file_name);
+ f->fd = -1;
+ f->shadow_fd = -1;
+
+ return ret;
+}
+
+int fio_gf_close_file(struct thread_data *td, struct fio_file *f)
+{
+ int ret = 0;
+ struct gf_data *g = td->io_ops->data;
+
+ dprint(FD_FILE, "fd close %s\n", f->file_name);
+
+ if (g){
+ if (g->fd && glfs_close(g->fd) < 0)
+ ret = errno;
+
+ if (g->fs)
+ glfs_fini(g->fs);
+
+ g->fd = NULL;
+ free(g);
+ }
+ td->io_ops->data = NULL;
+ f->engine_data = 0;
+
+ return ret;
+}
+
--- /dev/null
+/*
+ * glusterfs engine
+ *
+ * IO engine using Glusterfs's gfapi async interface
+ *
+ */
+#include "gfapi.h"
+#define NOT_YET 1
+struct fio_gf_iou {
+ struct io_u *io_u;
+ int io_complete;
+};
+static ulong cb_count = 0, issued = 0;
+
+static struct io_u *fio_gf_event(struct thread_data *td, int event)
+{
+ struct gf_data *gf_data = td->io_ops->data;
+ dprint(FD_IO, "%s\n", __FUNCTION__);
+ return gf_data->aio_events[event];
+}
+
+static int fio_gf_getevents(struct thread_data *td, unsigned int min,
+ unsigned int max, struct timespec *t)
+{
+ struct gf_data *g = td->io_ops->data;
+ unsigned int events = 0;
+ struct io_u *io_u;
+ int i = 0;
+ struct fio_gf_iou *io = NULL;
+
+ dprint(FD_IO, "%s\n", __FUNCTION__);
+ do {
+ io_u_qiter(&td->io_u_all, io_u, i) {
+ if (!(io_u->flags & IO_U_F_FLIGHT))
+ continue;
+
+ io = (struct fio_gf_iou *)io_u->engine_data;
+
+ if (io && io->io_complete) {
+ io->io_complete = 0;
+ g->aio_events[events] = io_u;
+ events++;
+
+ if (events >= max)
+ break;
+ }
+
+ }
+ if (events < min)
+ usleep(100);
+ else
+ break;
+
+ } while (1);
+
+ return events;
+}
+
+static void fio_gf_io_u_free(struct thread_data *td, struct io_u *io_u)
+{
+ struct fio_gf_iou *io = io_u->engine_data;
+
+ if (io) {
+ if (io->io_complete){
+ log_err("incomplete IO found.\n");
+ }
+ io_u->engine_data = NULL;
+ free(io);
+ }
+ fprintf(stderr, "issued %lu finished %lu\n", issued, cb_count);
+}
+
+static int fio_gf_io_u_init(struct thread_data *td, struct io_u *io_u)
+{
+ struct fio_gf_iou *io = NULL;
+
+ dprint(FD_FILE, "%s\n", __FUNCTION__);
+
+ if (!io_u->engine_data){
+ io = malloc(sizeof(struct fio_gf_iou));
+ if (!io){
+ td_verror(td, errno, "malloc");
+ return 1;
+ }
+ io->io_complete = 0;
+ io->io_u = io_u;
+ io_u->engine_data = io;
+ }
+ return 0;
+}
+
+static void gf_async_cb(glfs_fd_t *fd, ssize_t ret, void *data)
+{
+ struct io_u *io_u = (struct io_u *)data;
+ struct fio_gf_iou *iou =
+ (struct fio_gf_iou *)io_u->engine_data;
+
+ dprint(FD_IO, "%s ret %lu\n", __FUNCTION__, ret);
+ iou->io_complete = 1;
+ cb_count ++;
+}
+
+static int fio_gf_async_queue(struct thread_data fio_unused *td, struct io_u *io_u)
+{
+ struct gf_data *g = td->io_ops->data;
+ int r = 0;
+
+ dprint(FD_IO, "%s op %s\n", __FUNCTION__,
+ io_u->ddir == DDIR_READ? "read": io_u->ddir == DDIR_WRITE? "write":io_u->ddir == DDIR_SYNC? "sync":"unknown");
+
+ fio_ro_check(td, io_u);
+
+ if (io_u->ddir == DDIR_READ)
+ r = glfs_pread_async(g->fd, io_u->xfer_buf, io_u->xfer_buflen, io_u->offset,
+ 0, gf_async_cb, (void *)io_u);
+ else if (io_u->ddir == DDIR_WRITE)
+ r = glfs_pwrite_async(g->fd, io_u->xfer_buf, io_u->xfer_buflen, io_u->offset,
+ 0, gf_async_cb, (void *)io_u);
+ else if (io_u->ddir == DDIR_SYNC) {
+ r = glfs_fsync_async(g->fd, gf_async_cb, (void *)io_u);
+ }else {
+ log_err("unsupported operation.\n");
+ io_u->error = -EINVAL;
+ goto failed;
+ }
+ if (r){
+ log_err("glfs failed.\n");
+ io_u->error = r;
+ goto failed;
+ }
+ issued ++;
+ return FIO_Q_QUEUED;
+
+failed:
+ io_u->error = r;
+ td_verror(td, io_u->error, "xfer");
+ return FIO_Q_COMPLETED;
+}
+
+int fio_gf_async_setup(struct thread_data *td)
+{
+ int r = 0;
+ struct gf_data *g = NULL;
+#if defined(NOT_YET)
+ fprintf(stderr, "the async interface is still very experimental...\n");
+#endif
+ r = fio_gf_setup(td);
+ if (r){
+ return r;
+ }
+ td->o.use_thread = 1;
+ g = td->io_ops->data;
+ g->aio_events = malloc(td->o.iodepth * sizeof(struct io_u *));
+ if (!g->aio_events){
+ r = -ENOMEM;
+ fio_gf_cleanup(td);
+ return r;
+ }
+
+ memset(g->aio_events, 0, td->o.iodepth * sizeof(struct io_u *));
+
+ return r;
+
+}
+
+static int fio_gf_async_prep(struct thread_data *td, struct io_u *io_u)
+{
+ dprint(FD_FILE, "%s\n", __FUNCTION__);
+
+ if (!ddir_rw(io_u->ddir))
+ return 0;
+
+ return 0;
+}
+
+static struct ioengine_ops ioengine = {
+ .name = "gfapi_async",
+ .version = FIO_IOOPS_VERSION,
+ .init = fio_gf_async_setup,
+ .cleanup = fio_gf_cleanup,
+ .prep = fio_gf_async_prep,
+ .queue = fio_gf_async_queue,
+ .open_file = fio_gf_open_file,
+ .close_file = fio_gf_close_file,
+ .get_file_size = fio_gf_get_file_size,
+ .getevents = fio_gf_getevents,
+ .event = fio_gf_event,
+ .io_u_init = fio_gf_io_u_init,
+ .io_u_free = fio_gf_io_u_free,
+ .options = gfapi_options,
+ .option_struct_size = sizeof(struct gf_options),
+ .flags = FIO_DISKLESSIO,
+};
+
+static void fio_init fio_gf_register(void)
+{
+ register_ioengine(&ioengine);
+}
+
+static void fio_exit fio_gf_unregister(void)
+{
+ unregister_ioengine(&ioengine);
+}
--- /dev/null
+/*
+ * glusterfs engine
+ *
+ * IO engine using Glusterfs's gfapi sync interface
+ *
+ */
+
+#include "gfapi.h"
+
+#define LAST_POS(f) ((f)->engine_data)
+static int fio_gf_prep(struct thread_data *td, struct io_u *io_u)
+{
+ struct fio_file *f = io_u->file;
+ struct gf_data *g = td->io_ops->data;
+
+ dprint(FD_FILE, "fio prep\n");
+
+ if (!ddir_rw(io_u->ddir))
+ return 0;
+
+ if (LAST_POS(f) != -1ULL && LAST_POS(f) == io_u->offset)
+ return 0;
+
+ if (glfs_lseek(g->fd, io_u->offset, SEEK_SET) < 0) {
+ td_verror(td, errno, "lseek");
+ return 1;
+ }
+
+ return 0;
+}
+
+static int fio_gf_queue(struct thread_data *td, struct io_u *io_u)
+{
+ struct gf_data *g = td->io_ops->data;
+ int ret = 0;
+
+ dprint(FD_FILE, "fio queue len %lu\n", io_u->xfer_buflen);
+ fio_ro_check(td, io_u);
+
+ if (io_u->ddir == DDIR_READ)
+ ret = glfs_read(g->fd, io_u->xfer_buf, io_u->xfer_buflen, 0);
+ else if (io_u->ddir == DDIR_WRITE)
+ ret = glfs_write(g->fd, io_u->xfer_buf, io_u->xfer_buflen, 0);
+ else {
+ log_err("unsupported operation.\n");
+ return -EINVAL;
+ }
+ dprint(FD_FILE, "fio len %lu ret %d\n", io_u->xfer_buflen, ret);
+ if (io_u->file && ret >= 0 && ddir_rw(io_u->ddir))
+ LAST_POS(io_u->file) = io_u->offset + ret;
+
+ if (ret != (int) io_u->xfer_buflen) {
+ if (ret >= 0) {
+ io_u->resid = io_u->xfer_buflen - ret;
+ io_u->error = 0;
+ return FIO_Q_COMPLETED;
+ } else
+ io_u->error = errno;
+ }
+
+ if (io_u->error){
+ log_err("IO failed.\n");
+ td_verror(td, io_u->error, "xfer");
+ }
+
+ return FIO_Q_COMPLETED;
+
+}
+
+static struct ioengine_ops ioengine = {
+ .name = "gfapi",
+ .version = FIO_IOOPS_VERSION,
+ .init = fio_gf_setup,
+ .cleanup = fio_gf_cleanup,
+ .prep = fio_gf_prep,
+ .queue = fio_gf_queue,
+ .open_file = fio_gf_open_file,
+ .close_file = fio_gf_close_file,
+ .get_file_size = fio_gf_get_file_size,
+ .options = gfapi_options,
+ .option_struct_size = sizeof(struct gf_options),
+ .flags = FIO_SYNCIO | FIO_DISKLESSIO,
+};
+
+static void fio_init fio_gf_register(void)
+{
+ register_ioengine(&ioengine);
+}
+
+static void fio_exit fio_gf_unregister(void)
+{
+ unregister_ioengine(&ioengine);
+}
IO engine supporting direct access to Ceph Rados Block Devices (RBD) via librbd
without the need to use the kernel rbd driver. This ioengine defines engine specific
options.
+.TP
+.B gfapi
+Using Glusterfs libgfapi sync interface to direct access to Glusterfs volumes without
+having to go through FUSE. This ioengine defines engine specific
+options.
+.TP
+.B gfapi_async
+Using Glusterfs libgfapi async interface to direct access to Glusterfs volumes without
+having to go through FUSE. This ioengine defines engine specific
+options.
.RE
.P
.RE
.help = "fallocate() file based engine",
},
#endif
+#ifdef CONFIG_GFAPI
+ { .ival = "gfapi",
+ .help = "Glusterfs libgfapi(sync) based engine"
+ },
+ { .ival = "gfapi_async",
+ .help = "Glusterfs libgfapi(async) based engine"
+ },
+#endif
+
{ .ival = "external",
.help = "Load external engine (append name)",
},
__FIO_OPT_G_ACT,
__FIO_OPT_G_LATPROF,
__FIO_OPT_G_RBD,
+ __FIO_OPT_G_GFAPI,
__FIO_OPT_G_NR,
FIO_OPT_G_RATE = (1U << __FIO_OPT_G_RATE),
FIO_OPT_G_ACT = (1U << __FIO_OPT_G_ACT),
FIO_OPT_G_LATPROF = (1U << __FIO_OPT_G_LATPROF),
FIO_OPT_G_RBD = (1U << __FIO_OPT_G_RBD),
+ FIO_OPT_G_GFAPI = (1U << __FIO_OPT_G_GFAPI),
FIO_OPT_G_INVALID = (1U << __FIO_OPT_G_NR),
};