engines/io_uring: add verbose error for ENOSYS
[fio.git] / engines / rados.c
index 30fcebb5fe75db1f2c6e7c4a54bde4d379510692..42ee48ff02b3f6371027ab4cbcbc304aefbbea10 100644 (file)
 #include "../optgroup.h"
 
 struct rados_data {
-        rados_t cluster;
-        rados_ioctx_t io_ctx;
-        struct io_u **aio_events;
-        bool connected;
-        pthread_mutex_t completed_lock;
-        pthread_cond_t completed_more_io;
-        struct flist_head completed_operations;
+       rados_t cluster;
+       rados_ioctx_t io_ctx;
+       struct io_u **aio_events;
+       bool connected;
+       pthread_mutex_t completed_lock;
+       pthread_cond_t completed_more_io;
+       struct flist_head completed_operations;
+       uint64_t ops_scheduled;
+       uint64_t ops_completed;
 };
 
 struct fio_rados_iou {
@@ -101,6 +103,8 @@ static int _fio_setup_rados_data(struct thread_data *td,
        pthread_mutex_init(&rados->completed_lock, NULL);
        pthread_cond_init(&rados->completed_more_io, NULL);
        INIT_FLIST_HEAD(&rados->completed_operations);
+       rados->ops_scheduled = 0;
+       rados->ops_completed = 0;
        *rados_data_ptr = rados;
        return 0;
 
@@ -227,8 +231,11 @@ static void _fio_rados_disconnect(struct rados_data *rados)
 static void fio_rados_cleanup(struct thread_data *td)
 {
        struct rados_data *rados = td->io_ops_data;
-
        if (rados) {
+               pthread_mutex_lock(&rados->completed_lock);
+               while (rados->ops_scheduled != rados->ops_completed)
+                       pthread_cond_wait(&rados->completed_more_io, &rados->completed_lock);
+               pthread_mutex_unlock(&rados->completed_lock);
                _fio_rados_rm_objects(td, rados);
                _fio_rados_disconnect(rados);
                free(rados->aio_events);
@@ -244,6 +251,7 @@ static void complete_callback(rados_completion_t cb, void *arg)
        assert(rados_aio_is_complete(fri->completion));
        pthread_mutex_lock(&rados->completed_lock);
        flist_add_tail(&fri->list, &rados->completed_operations);
+       rados->ops_completed++;
        pthread_mutex_unlock(&rados->completed_lock);
        pthread_cond_signal(&rados->completed_more_io);
 }
@@ -272,6 +280,7 @@ static enum fio_q_status fio_rados_queue(struct thread_data *td,
                        log_err("rados_write failed.\n");
                        goto failed_comp;
                }
+               rados->ops_scheduled++;
                return FIO_Q_QUEUED;
        } else if (io_u->ddir == DDIR_READ) {
                r = rados_aio_create_completion(fri, complete_callback,
@@ -286,6 +295,7 @@ static enum fio_q_status fio_rados_queue(struct thread_data *td,
                        log_err("rados_aio_read failed.\n");
                        goto failed_comp;
                }
+               rados->ops_scheduled++;
                return FIO_Q_QUEUED;
        } else if (io_u->ddir == DDIR_TRIM) {
                r = rados_aio_create_completion(fri, complete_callback,
@@ -307,6 +317,7 @@ static enum fio_q_status fio_rados_queue(struct thread_data *td,
                        log_err("rados_aio_write_op_operate failed.\n");
                        goto failed_write_op;
                }
+               rados->ops_scheduled++;
                return FIO_Q_QUEUED;
         }
 
@@ -433,7 +444,7 @@ static int fio_rados_io_u_init(struct thread_data *td, struct io_u *io_u)
 }
 
 /* ioengine_ops for get_ioengine() */
-static struct ioengine_ops ioengine = {
+FIO_STATIC struct ioengine_ops ioengine = {
        .name = "rados",
        .version                = FIO_IOOPS_VERSION,
        .flags                  = FIO_DISKLESSIO,