#include <rbd/librbd.h>
#include "../fio.h"
+#include "../optgroup.h"
+
+#ifdef CONFIG_RBD_POLL
+/* add for poll */
+#include <poll.h>
+#include <sys/eventfd.h>
+#endif
struct fio_rbd_iou {
struct io_u *io_u;
rbd_completion_t completion;
int io_seen;
+ int io_complete;
};
struct rbd_data {
rados_ioctx_t io_ctx;
rbd_image_t image;
struct io_u **aio_events;
+ struct io_u **sort_events;
+ int fd; /* add for poll */
+ bool connected;
};
struct rbd_options {
+ void *pad;
+ char *cluster_name;
char *rbd_name;
char *pool_name;
char *client_name;
};
static struct fio_option options[] = {
+ {
+ .name = "clustername",
+ .lname = "ceph cluster name",
+ .type = FIO_OPT_STR_STORE,
+ .help = "Cluster name for ceph",
+ .off1 = offsetof(struct rbd_options, cluster_name),
+ .category = FIO_OPT_C_ENGINE,
+ .group = FIO_OPT_G_RBD,
+ },
{
.name = "rbdname",
.lname = "rbd engine rbdname",
.lname = "Busy poll",
.type = FIO_OPT_BOOL,
.help = "Busy poll for completions instead of sleeping",
- .off1 = offsetof(struct rbd_options, client_name),
+ .off1 = offsetof(struct rbd_options, busy_poll),
.def = "0",
.category = FIO_OPT_C_ENGINE,
.group = FIO_OPT_G_RBD,
static int _fio_setup_rbd_data(struct thread_data *td,
struct rbd_data **rbd_data_ptr)
{
- struct rbd_data *rbd_data;
+ struct rbd_data *rbd;
- if (td->io_ops->data)
+ if (td->io_ops_data)
return 0;
- rbd_data = malloc(sizeof(struct rbd_data));
- if (!rbd_data)
+ rbd = calloc(1, sizeof(struct rbd_data));
+ if (!rbd)
goto failed;
- memset(rbd_data, 0, sizeof(struct rbd_data));
+ rbd->connected = false;
- rbd_data->aio_events = malloc(td->o.iodepth * sizeof(struct io_u *));
- if (!rbd_data->aio_events)
- goto failed;
+ /* add for poll, init fd: -1 */
+ rbd->fd = -1;
- memset(rbd_data->aio_events, 0, td->o.iodepth * sizeof(struct io_u *));
+ rbd->aio_events = calloc(td->o.iodepth, sizeof(struct io_u *));
+ if (!rbd->aio_events)
+ goto failed;
- *rbd_data_ptr = rbd_data;
+ rbd->sort_events = calloc(td->o.iodepth, sizeof(struct io_u *));
+ if (!rbd->sort_events)
+ goto failed;
+ *rbd_data_ptr = rbd;
return 0;
failed:
- if (rbd_data)
- free(rbd_data);
+ if (rbd) {
+ if (rbd->aio_events)
+ free(rbd->aio_events);
+ if (rbd->sort_events)
+ free(rbd->sort_events);
+ free(rbd);
+ }
return 1;
}
+#ifdef CONFIG_RBD_POLL
+static bool _fio_rbd_setup_poll(struct rbd_data *rbd)
+{
+ int r;
+
+ /* add for rbd poll */
+ rbd->fd = eventfd(0, EFD_SEMAPHORE);
+ if (rbd->fd < 0) {
+ log_err("eventfd failed.\n");
+ return false;
+ }
+
+ r = rbd_set_image_notification(rbd->image, rbd->fd, EVENT_TYPE_EVENTFD);
+ if (r < 0) {
+ log_err("rbd_set_image_notification failed.\n");
+ close(rbd->fd);
+ rbd->fd = -1;
+ return false;
+ }
+
+ return true;
+}
+#else
+static bool _fio_rbd_setup_poll(struct rbd_data *rbd)
+{
+ return true;
+}
+#endif
+
static int _fio_rbd_connect(struct thread_data *td)
{
- struct rbd_data *rbd_data = td->io_ops->data;
+ struct rbd_data *rbd = td->io_ops_data;
struct rbd_options *o = td->eo;
int r;
- r = rados_create(&rbd_data->cluster, o->client_name);
+ if (o->cluster_name) {
+ char *client_name = NULL;
+
+ /*
+ * If we specify cluser name, the rados_create2
+ * will not assume 'client.'. name is considered
+ * as a full type.id namestr
+ */
+ if (o->client_name) {
+ if (!index(o->client_name, '.')) {
+ client_name = calloc(1, strlen("client.") +
+ strlen(o->client_name) + 1);
+ strcat(client_name, "client.");
+ strcat(client_name, o->client_name);
+ } else {
+ client_name = o->client_name;
+ }
+ }
+
+ r = rados_create2(&rbd->cluster, o->cluster_name,
+ client_name, 0);
+
+ if (client_name && !index(o->client_name, '.'))
+ free(client_name);
+ } else
+ r = rados_create(&rbd->cluster, o->client_name);
+
if (r < 0) {
log_err("rados_create failed.\n");
goto failed_early;
}
- r = rados_conf_read_file(rbd_data->cluster, NULL);
+ r = rados_conf_read_file(rbd->cluster, NULL);
if (r < 0) {
log_err("rados_conf_read_file failed.\n");
goto failed_early;
}
- r = rados_connect(rbd_data->cluster);
+ r = rados_connect(rbd->cluster);
if (r < 0) {
log_err("rados_connect failed.\n");
goto failed_shutdown;
}
- r = rados_ioctx_create(rbd_data->cluster, o->pool_name,
- &rbd_data->io_ctx);
+ r = rados_ioctx_create(rbd->cluster, o->pool_name, &rbd->io_ctx);
if (r < 0) {
log_err("rados_ioctx_create failed.\n");
goto failed_shutdown;
}
- r = rbd_open(rbd_data->io_ctx, o->rbd_name, &rbd_data->image,
- NULL /*snap */ );
+ r = rbd_open(rbd->io_ctx, o->rbd_name, &rbd->image, NULL /*snap */ );
if (r < 0) {
log_err("rbd_open failed.\n");
goto failed_open;
}
+
+ if (!_fio_rbd_setup_poll(rbd))
+ goto failed_poll;
+
return 0;
+failed_poll:
+ rbd_close(rbd->image);
+ rbd->image = NULL;
failed_open:
- rados_ioctx_destroy(rbd_data->io_ctx);
- rbd_data->io_ctx = NULL;
+ rados_ioctx_destroy(rbd->io_ctx);
+ rbd->io_ctx = NULL;
failed_shutdown:
- rados_shutdown(rbd_data->cluster);
- rbd_data->cluster = NULL;
+ rados_shutdown(rbd->cluster);
+ rbd->cluster = NULL;
failed_early:
return 1;
}
-static void _fio_rbd_disconnect(struct rbd_data *rbd_data)
+static void _fio_rbd_disconnect(struct rbd_data *rbd)
{
- if (!rbd_data)
+ if (!rbd)
return;
+ /* close eventfd */
+ if (rbd->fd != -1) {
+ close(rbd->fd);
+ rbd->fd = -1;
+ }
+
/* shutdown everything */
- if (rbd_data->image) {
- rbd_close(rbd_data->image);
- rbd_data->image = NULL;
+ if (rbd->image) {
+ rbd_close(rbd->image);
+ rbd->image = NULL;
}
- if (rbd_data->io_ctx) {
- rados_ioctx_destroy(rbd_data->io_ctx);
- rbd_data->io_ctx = NULL;
+ if (rbd->io_ctx) {
+ rados_ioctx_destroy(rbd->io_ctx);
+ rbd->io_ctx = NULL;
}
- if (rbd_data->cluster) {
- rados_shutdown(rbd_data->cluster);
- rbd_data->cluster = NULL;
+ if (rbd->cluster) {
+ rados_shutdown(rbd->cluster);
+ rbd->cluster = NULL;
}
}
*/
ret = rbd_aio_get_return_value(fri->completion);
if (ret < 0) {
- io_u->error = ret;
+ io_u->error = -ret;
io_u->resid = io_u->xfer_buflen;
} else
io_u->error = 0;
+
+ fri->io_complete = 1;
}
static struct io_u *fio_rbd_event(struct thread_data *td, int event)
{
- struct rbd_data *rbd_data = td->io_ops->data;
+ struct rbd_data *rbd = td->io_ops_data;
- return rbd_data->aio_events[event];
+ return rbd->aio_events[event];
}
-static inline int fri_check_complete(struct rbd_data *rbd_data,
- struct io_u *io_u,
+static inline int fri_check_complete(struct rbd_data *rbd, struct io_u *io_u,
unsigned int *events)
{
struct fio_rbd_iou *fri = io_u->engine_data;
- if (rbd_aio_is_complete(fri->completion)) {
+ if (fri->io_complete) {
fri->io_seen = 1;
- rbd_data->aio_events[*events] = io_u;
+ rbd->aio_events[*events] = io_u;
(*events)++;
rbd_aio_release(fri->completion);
return 0;
}
+#ifndef CONFIG_RBD_POLL
+static inline int rbd_io_u_seen(struct io_u *io_u)
+{
+ struct fio_rbd_iou *fri = io_u->engine_data;
+
+ return fri->io_seen;
+}
+#endif
+
+static void rbd_io_u_wait_complete(struct io_u *io_u)
+{
+ struct fio_rbd_iou *fri = io_u->engine_data;
+
+ rbd_aio_wait_for_complete(fri->completion);
+}
+
+static int rbd_io_u_cmp(const void *p1, const void *p2)
+{
+ const struct io_u **a = (const struct io_u **) p1;
+ const struct io_u **b = (const struct io_u **) p2;
+ uint64_t at, bt;
+
+ at = utime_since_now(&(*a)->start_time);
+ bt = utime_since_now(&(*b)->start_time);
+
+ if (at < bt)
+ return -1;
+ else if (at == bt)
+ return 0;
+ else
+ return 1;
+}
+
static int rbd_iter_events(struct thread_data *td, unsigned int *events,
unsigned int min_evts, int wait)
{
- struct rbd_data *rbd_data = td->io_ops->data;
+ struct rbd_data *rbd = td->io_ops_data;
unsigned int this_events = 0;
struct io_u *io_u;
- int i;
+ int i, sidx = 0;
+
+#ifdef CONFIG_RBD_POLL
+ int ret = 0;
+ int event_num = 0;
+ struct fio_rbd_iou *fri = NULL;
+ rbd_completion_t comps[min_evts];
+ uint64_t counter;
+ bool completed;
+
+ struct pollfd pfd;
+ pfd.fd = rbd->fd;
+ pfd.events = POLLIN;
+
+ ret = poll(&pfd, 1, wait ? -1 : 0);
+ if (ret <= 0)
+ return 0;
+ if (!(pfd.revents & POLLIN))
+ return 0;
- io_u_qiter(&td->io_u_all, io_u, i) {
- struct fio_rbd_iou *fri = io_u->engine_data;
+ event_num = rbd_poll_io_events(rbd->image, comps, min_evts);
+
+ for (i = 0; i < event_num; i++) {
+ fri = rbd_aio_get_arg(comps[i]);
+ io_u = fri->io_u;
+
+ /* best effort to decrement the semaphore */
+ ret = read(rbd->fd, &counter, sizeof(counter));
+ if (ret <= 0)
+ log_err("rbd_iter_events failed to decrement semaphore.\n");
+ completed = fri_check_complete(rbd, io_u, events);
+ assert(completed);
+
+ this_events++;
+ }
+#else
+ io_u_qiter(&td->io_u_all, io_u, i) {
if (!(io_u->flags & IO_U_F_FLIGHT))
continue;
- if (fri->io_seen)
+ if (rbd_io_u_seen(io_u))
continue;
- if (fri_check_complete(rbd_data, io_u, events))
+ if (fri_check_complete(rbd, io_u, events))
this_events++;
- else if (wait) {
- rbd_aio_wait_for_complete(fri->completion);
+ else if (wait)
+ rbd->sort_events[sidx++] = io_u;
+ }
+#endif
+
+ if (!wait || !sidx)
+ return this_events;
- if (fri_check_complete(rbd_data, io_u, events))
- this_events++;
+ /*
+ * Sort events, oldest issue first, then wait on as many as we
+ * need in order of age. If we have enough events, stop waiting,
+ * and just check if any of the older ones are done.
+ */
+ if (sidx > 1)
+ qsort(rbd->sort_events, sidx, sizeof(struct io_u *), rbd_io_u_cmp);
+
+ for (i = 0; i < sidx; i++) {
+ io_u = rbd->sort_events[i];
+
+ if (fri_check_complete(rbd, io_u, events)) {
+ this_events++;
+ continue;
}
+
+ /*
+ * Stop waiting when we have enough, but continue checking
+ * all pending IOs if they are complete.
+ */
if (*events >= min_evts)
- break;
+ continue;
+
+ rbd_io_u_wait_complete(io_u);
+
+ if (fri_check_complete(rbd, io_u, events))
+ this_events++;
}
return this_events;
return events;
}
-static int fio_rbd_queue(struct thread_data *td, struct io_u *io_u)
+static enum fio_q_status fio_rbd_queue(struct thread_data *td,
+ struct io_u *io_u)
{
- struct rbd_data *rbd_data = td->io_ops->data;
+ struct rbd_data *rbd = td->io_ops_data;
struct fio_rbd_iou *fri = io_u->engine_data;
int r = -1;
fio_ro_check(td, io_u);
fri->io_seen = 0;
+ fri->io_complete = 0;
r = rbd_aio_create_completion(fri, _fio_rbd_finish_aiocb,
&fri->completion);
}
if (io_u->ddir == DDIR_WRITE) {
- r = rbd_aio_write(rbd_data->image, io_u->offset,
- io_u->xfer_buflen, io_u->xfer_buf,
- fri->completion);
+ r = rbd_aio_write(rbd->image, io_u->offset, io_u->xfer_buflen,
+ io_u->xfer_buf, fri->completion);
if (r < 0) {
log_err("rbd_aio_write failed.\n");
goto failed_comp;
}
} else if (io_u->ddir == DDIR_READ) {
- r = rbd_aio_read(rbd_data->image, io_u->offset,
- io_u->xfer_buflen, io_u->xfer_buf,
- fri->completion);
+ r = rbd_aio_read(rbd->image, io_u->offset, io_u->xfer_buflen,
+ io_u->xfer_buf, fri->completion);
if (r < 0) {
log_err("rbd_aio_read failed.\n");
goto failed_comp;
}
} else if (io_u->ddir == DDIR_TRIM) {
- r = rbd_aio_discard(rbd_data->image, io_u->offset,
- io_u->xfer_buflen, fri->completion);
+ r = rbd_aio_discard(rbd->image, io_u->offset,
+ io_u->xfer_buflen, fri->completion);
if (r < 0) {
log_err("rbd_aio_discard failed.\n");
goto failed_comp;
}
} else if (io_u->ddir == DDIR_SYNC) {
- r = rbd_aio_flush(rbd_data->image, fri->completion);
+ r = rbd_aio_flush(rbd->image, fri->completion);
if (r < 0) {
log_err("rbd_flush failed.\n");
goto failed_comp;
} else {
dprint(FD_IO, "%s: Warning: unhandled ddir: %d\n", __func__,
io_u->ddir);
+ r = -EINVAL;
goto failed_comp;
}
failed_comp:
rbd_aio_release(fri->completion);
failed:
- io_u->error = r;
+ io_u->error = -r;
td_verror(td, io_u->error, "xfer");
return FIO_Q_COMPLETED;
}
static int fio_rbd_init(struct thread_data *td)
{
int r;
+ struct rbd_data *rbd = td->io_ops_data;
+
+ if (rbd->connected)
+ return 0;
r = _fio_rbd_connect(td);
if (r) {
static void fio_rbd_cleanup(struct thread_data *td)
{
- struct rbd_data *rbd_data = td->io_ops->data;
+ struct rbd_data *rbd = td->io_ops_data;
- if (rbd_data) {
- _fio_rbd_disconnect(rbd_data);
- free(rbd_data->aio_events);
- free(rbd_data);
+ if (rbd) {
+ _fio_rbd_disconnect(rbd);
+ free(rbd->aio_events);
+ free(rbd->sort_events);
+ free(rbd);
}
-
}
static int fio_rbd_setup(struct thread_data *td)
{
- int r = 0;
rbd_image_info_t info;
struct fio_file *f;
- struct rbd_data *rbd_data = NULL;
- int major, minor, extra;
-
- /* log version of librbd. No cluster connection required. */
- rbd_version(&major, &minor, &extra);
- log_info("rbd engine: RBD version: %d.%d.%d\n", major, minor, extra);
+ struct rbd_data *rbd = NULL;
+ int r;
/* allocate engine specific structure to deal with librbd. */
- r = _fio_setup_rbd_data(td, &rbd_data);
+ r = _fio_setup_rbd_data(td, &rbd);
if (r) {
log_err("fio_setup_rbd_data failed.\n");
goto cleanup;
}
- td->io_ops->data = rbd_data;
+ td->io_ops_data = rbd;
/* librbd does not allow us to run first in the main thread and later
* in a fork child. It needs to be the same process context all the
log_err("fio_rbd_connect failed.\n");
goto cleanup;
}
+ rbd->connected = true;
/* get size of the RADOS block device */
- r = rbd_stat(rbd_data->image, &info, sizeof(info));
+ r = rbd_stat(rbd->image, &info, sizeof(info));
if (r < 0) {
log_err("rbd_status failed.\n");
- goto disconnect;
+ goto cleanup;
+ } else if (info.size == 0) {
+ log_err("image size should be larger than zero.\n");
+ r = -EINVAL;
+ goto cleanup;
}
- dprint(FD_IO, "rbd-engine: image size: %lu\n", info.size);
+
+ dprint(FD_IO, "rbd-engine: image size: %" PRIu64 "\n", info.size);
/* taken from "net" engine. Pretend we deal with files,
* even if we do not have any ideas about files.
f = td->files[0];
f->real_file_size = info.size;
- /* disconnect, then we were only connected to determine
- * the size of the RBD.
- */
- _fio_rbd_disconnect(rbd_data);
return 0;
-disconnect:
- _fio_rbd_disconnect(rbd_data);
cleanup:
fio_rbd_cleanup(td);
return r;
static int fio_rbd_invalidate(struct thread_data *td, struct fio_file *f)
{
#if defined(CONFIG_RBD_INVAL)
- struct rbd_data *rbd_data = td->io_ops->data;
+ struct rbd_data *rbd = td->io_ops_data;
- return rbd_invalidate_cache(rbd_data->image);
+ return rbd_invalidate_cache(rbd->image);
#else
return 0;
#endif