engines/xnvme: user space vfio based backend
[fio.git] / engines / rados.c
index 23e62c4c45e3c5bf15a9d646636d1cfe3cf49b0c..d0d15c5b54c8380353b56e8b9856a8475a1b9b25 100644 (file)
@@ -37,6 +37,7 @@ struct rados_options {
        char *cluster_name;
        char *pool_name;
        char *client_name;
+       char *conf;
        int busy_poll;
        int touch_objects;
 };
@@ -69,6 +70,16 @@ static struct fio_option options[] = {
                .category = FIO_OPT_C_ENGINE,
                .group    = FIO_OPT_G_RBD,
        },
+       {
+               .name     = "conf",
+               .lname    = "ceph configuration file path",
+               .type     = FIO_OPT_STR_STORE,
+               .help     = "Path of the ceph configuration file",
+               .off1     = offsetof(struct rados_options, conf),
+               .def      = "/etc/ceph/ceph.conf",
+               .category = FIO_OPT_C_ENGINE,
+               .group    = FIO_OPT_G_RBD,
+       },
        {
                .name     = "busy_poll",
                .lname    = "busy poll mode",
@@ -151,7 +162,7 @@ static int _fio_rados_connect(struct thread_data *td)
                char *client_name = NULL;
 
                /*
-               * If we specify cluser name, the rados_create2
+               * If we specify cluster name, the rados_create2
                * will not assume 'client.'. name is considered
                * as a full type.id namestr
                */
@@ -184,7 +195,7 @@ static int _fio_rados_connect(struct thread_data *td)
                goto failed_early;
        }
 
-       r = rados_conf_read_file(rados->cluster, NULL);
+       r = rados_conf_read_file(rados->cluster, o->conf);
        if (r < 0) {
                log_err("rados_conf_read_file failed.\n");
                goto failed_early;