#include "../optgroup.h"
struct rados_data {
- rados_t cluster;
- rados_ioctx_t io_ctx;
- struct io_u **aio_events;
- bool connected;
- pthread_mutex_t completed_lock;
- pthread_cond_t completed_more_io;
- struct flist_head completed_operations;
+ rados_t cluster;
+ rados_ioctx_t io_ctx;
+ struct io_u **aio_events;
+ bool connected;
+ pthread_mutex_t completed_lock;
+ pthread_cond_t completed_more_io;
+ struct flist_head completed_operations;
+ uint64_t ops_scheduled;
+ uint64_t ops_completed;
};
struct fio_rados_iou {
char *cluster_name;
char *pool_name;
char *client_name;
+ char *conf;
int busy_poll;
+ int touch_objects;
};
static struct fio_option options[] = {
.category = FIO_OPT_C_ENGINE,
.group = FIO_OPT_G_RBD,
},
+ {
+ .name = "conf",
+ .lname = "ceph configuration file path",
+ .type = FIO_OPT_STR_STORE,
+ .help = "Path of the ceph configuration file",
+ .off1 = offsetof(struct rados_options, conf),
+ .def = "/etc/ceph/ceph.conf",
+ .category = FIO_OPT_C_ENGINE,
+ .group = FIO_OPT_G_RBD,
+ },
{
.name = "busy_poll",
.lname = "busy poll mode",
.category = FIO_OPT_C_ENGINE,
.group = FIO_OPT_G_RBD,
},
+ {
+ .name = "touch_objects",
+ .lname = "touch objects on start",
+ .type = FIO_OPT_BOOL,
+ .help = "Touch (create) objects on start",
+ .off1 = offsetof(struct rados_options, touch_objects),
+ .def = "1",
+ .category = FIO_OPT_C_ENGINE,
+ .group = FIO_OPT_G_RBD,
+ },
{
.name = NULL,
},
pthread_mutex_init(&rados->completed_lock, NULL);
pthread_cond_init(&rados->completed_more_io, NULL);
INIT_FLIST_HEAD(&rados->completed_operations);
+ rados->ops_scheduled = 0;
+ rados->ops_completed = 0;
*rados_data_ptr = rados;
return 0;
char *client_name = NULL;
/*
- * If we specify cluser name, the rados_create2
+ * If we specify cluster name, the rados_create2
* will not assume 'client.'. name is considered
* as a full type.id namestr
*/
goto failed_early;
}
- r = rados_conf_read_file(rados->cluster, NULL);
+ r = rados_conf_read_file(rados->cluster, o->conf);
if (r < 0) {
log_err("rados_conf_read_file failed.\n");
goto failed_early;
for (i = 0; i < td->o.nr_files; i++) {
f = td->files[i];
f->real_file_size = file_size;
- r = rados_write(rados->io_ctx, f->file_name, "", 0, 0);
- if (r < 0) {
- goto failed_obj_create;
+ if (o->touch_objects) {
+ r = rados_write(rados->io_ctx, f->file_name, "", 0, 0);
+ if (r < 0) {
+ goto failed_obj_create;
+ }
}
}
return 0;
static void fio_rados_cleanup(struct thread_data *td)
{
struct rados_data *rados = td->io_ops_data;
-
if (rados) {
+ pthread_mutex_lock(&rados->completed_lock);
+ while (rados->ops_scheduled != rados->ops_completed)
+ pthread_cond_wait(&rados->completed_more_io, &rados->completed_lock);
+ pthread_mutex_unlock(&rados->completed_lock);
_fio_rados_rm_objects(td, rados);
_fio_rados_disconnect(rados);
free(rados->aio_events);
assert(rados_aio_is_complete(fri->completion));
pthread_mutex_lock(&rados->completed_lock);
flist_add_tail(&fri->list, &rados->completed_operations);
+ rados->ops_completed++;
pthread_mutex_unlock(&rados->completed_lock);
pthread_cond_signal(&rados->completed_more_io);
}
log_err("rados_write failed.\n");
goto failed_comp;
}
+ rados->ops_scheduled++;
return FIO_Q_QUEUED;
} else if (io_u->ddir == DDIR_READ) {
r = rados_aio_create_completion(fri, complete_callback,
log_err("rados_aio_read failed.\n");
goto failed_comp;
}
+ rados->ops_scheduled++;
return FIO_Q_QUEUED;
} else if (io_u->ddir == DDIR_TRIM) {
r = rados_aio_create_completion(fri, complete_callback,
log_err("rados_aio_write_op_operate failed.\n");
goto failed_write_op;
}
+ rados->ops_scheduled++;
return FIO_Q_QUEUED;
}
}
/* ioengine_ops for get_ioengine() */
-static struct ioengine_ops ioengine = {
+FIO_STATIC struct ioengine_ops ioengine = {
.name = "rados",
.version = FIO_IOOPS_VERSION,
.flags = FIO_DISKLESSIO,