X-Git-Url: https://git.kernel.dk/?p=fio.git;a=blobdiff_plain;f=engines%2Frdma.c;h=da00cba8b66b3f6db0bcd3a9cba8b45b9362ffa7;hp=87b061a2e61788563fff7ac05d69e06b1b8e3f29;hb=9acb08a9957b1111a06fbca6af113fa0c98dbd7c;hpb=ea6209ad989dfb437aec487d26d4209bcf755d2c diff --git a/engines/rdma.c b/engines/rdma.c index 87b061a2..da00cba8 100644 --- a/engines/rdma.c +++ b/engines/rdma.c @@ -5,12 +5,7 @@ * Supports both RDMA memory semantics and channel semantics * for the InfiniBand, RoCE and iWARP protocols. * - * This I/O engine is disabled by default. To enable it, execute: - * - * $ export EXTFLAGS+=" -DFIO_HAVE_RDMA " - * $ export EXTLIBS+=" -libverbs -lrdmacm " - * - * before running make. You will need the Linux RDMA software as well, either + * You will need the Linux RDMA software installed, either * from your Linux distributor or directly from openfabrics.org: * * http://www.openfabrics.org/downloads/OFED/ @@ -18,13 +13,13 @@ * Exchanging steps of RDMA ioengine control messages: * 1. client side sends test mode (RDMA_WRITE/RDMA_READ/SEND) * to server side. - * 2. server side parses test mode, and sends back confirmation + * 2. server side parses test mode, and sends back confirmation * to client side. In RDMA WRITE/READ test, this confirmation - * includes memory information, such as rkey, address. + * includes memory information, such as rkey, address. * 3. client side initiates test loop. - * 4. In RDMA WRITE/READ test, client side sends a completion + * 4. In RDMA WRITE/READ test, client side sends a completion * notification to server side. Server side updates its - * td->done as true. + * td->done as true. * */ #include @@ -41,16 +36,14 @@ #include #include -#include #include #include #include "../fio.h" - -#ifdef FIO_HAVE_RDMA +#include "../hash.h" +#include "../optgroup.h" #include -#include #define FIO_RDMA_MAX_IO_DEPTH 512 @@ -62,6 +55,77 @@ enum rdma_io_mode { FIO_RDMA_CHA_RECV }; +struct rdmaio_options { + struct thread_data *td; + unsigned int port; + enum rdma_io_mode verb; +}; + +static int str_hostname_cb(void *data, const char *input) +{ + struct rdmaio_options *o = data; + + if (o->td->o.filename) + free(o->td->o.filename); + o->td->o.filename = strdup(input); + return 0; +} + +static struct fio_option options[] = { + { + .name = "hostname", + .lname = "rdma engine hostname", + .type = FIO_OPT_STR_STORE, + .cb = str_hostname_cb, + .help = "Hostname for RDMA IO engine", + .category = FIO_OPT_C_ENGINE, + .group = FIO_OPT_G_RDMA, + }, + { + .name = "port", + .lname = "rdma engine port", + .type = FIO_OPT_INT, + .off1 = offsetof(struct rdmaio_options, port), + .minval = 1, + .maxval = 65535, + .help = "Port to use for RDMA connections", + .category = FIO_OPT_C_ENGINE, + .group = FIO_OPT_G_RDMA, + }, + { + .name = "verb", + .lname = "RDMA engine verb", + .alias = "proto", + .type = FIO_OPT_STR, + .off1 = offsetof(struct rdmaio_options, verb), + .help = "RDMA engine verb", + .def = "write", + .posval = { + { .ival = "write", + .oval = FIO_RDMA_MEM_WRITE, + .help = "Memory Write", + }, + { .ival = "read", + .oval = FIO_RDMA_MEM_READ, + .help = "Memory Read", + }, + { .ival = "send", + .oval = FIO_RDMA_CHA_SEND, + .help = "Posted Send", + }, + { .ival = "recv", + .oval = FIO_RDMA_CHA_RECV, + .help = "Posted Receive", + }, + }, + .category = FIO_OPT_C_ENGINE, + .group = FIO_OPT_G_RDMA, + }, + { + .name = NULL, + }, +}; + struct remote_u { uint64_t buf; uint32_t rkey; @@ -73,6 +137,7 @@ struct rdma_info_blk { uint32_t nr; /* client: io depth server: number of records for memory semantic */ + uint32_t max_bs; /* maximum block size */ struct remote_u rmt_us[FIO_RDMA_MAX_IO_DEPTH]; }; @@ -125,13 +190,22 @@ struct rdmaio_data { static int client_recv(struct thread_data *td, struct ibv_wc *wc) { - struct rdmaio_data *rd = td->io_ops->data; + struct rdmaio_data *rd = td->io_ops_data; + unsigned int max_bs; if (wc->byte_len != sizeof(rd->recv_buf)) { log_err("Received bogus data, size %d\n", wc->byte_len); return 1; } + max_bs = max(td->o.max_bs[DDIR_READ], td->o.max_bs[DDIR_WRITE]); + if (max_bs > ntohl(rd->recv_buf.max_bs)) { + log_err("fio: Server's block size (%d) must be greater than or " + "equal to the client's block size (%d)!\n", + ntohl(rd->recv_buf.max_bs), max_bs); + return 1; + } + /* store mr info for MEMORY semantic */ if ((rd->rdma_protocol == FIO_RDMA_MEM_WRITE) || (rd->rdma_protocol == FIO_RDMA_MEM_READ)) { @@ -141,7 +215,7 @@ static int client_recv(struct thread_data *td, struct ibv_wc *wc) rd->rmt_nr = ntohl(rd->recv_buf.nr); for (i = 0; i < rd->rmt_nr; i++) { - rd->rmt_us[i].buf = ntohll(rd->recv_buf.rmt_us[i].buf); + rd->rmt_us[i].buf = be64_to_cpu(rd->recv_buf.rmt_us[i].buf); rd->rmt_us[i].rkey = ntohl(rd->recv_buf.rmt_us[i].rkey); rd->rmt_us[i].size = ntohl(rd->recv_buf.rmt_us[i].size); @@ -157,7 +231,8 @@ static int client_recv(struct thread_data *td, struct ibv_wc *wc) static int server_recv(struct thread_data *td, struct ibv_wc *wc) { - struct rdmaio_data *rd = td->io_ops->data; + struct rdmaio_data *rd = td->io_ops_data; + unsigned int max_bs; if (wc->wr_id == FIO_RDMA_MAX_IO_DEPTH) { rd->rdma_protocol = ntohl(rd->recv_buf.mode); @@ -165,6 +240,15 @@ static int server_recv(struct thread_data *td, struct ibv_wc *wc) /* CHANNEL semantic, do nothing */ if (rd->rdma_protocol == FIO_RDMA_CHA_SEND) rd->rdma_protocol = FIO_RDMA_CHA_RECV; + + max_bs = max(td->o.max_bs[DDIR_READ], td->o.max_bs[DDIR_WRITE]); + if (max_bs < ntohl(rd->recv_buf.max_bs)) { + log_err("fio: Server's block size (%d) must be greater than or " + "equal to the client's block size (%d)!\n", + ntohl(rd->recv_buf.max_bs), max_bs); + return 1; + } + } return 0; @@ -172,7 +256,7 @@ static int server_recv(struct thread_data *td, struct ibv_wc *wc) static int cq_event_handler(struct thread_data *td, enum ibv_wc_opcode opcode) { - struct rdmaio_data *rd = td->io_ops->data; + struct rdmaio_data *rd = td->io_ops_data; struct ibv_wc wc; struct rdma_io_u_data *r_io_u_d; int ret; @@ -193,9 +277,12 @@ static int cq_event_handler(struct thread_data *td, enum ibv_wc_opcode opcode) case IBV_WC_RECV: if (rd->is_client == 1) - client_recv(td, &wc); + ret = client_recv(td, &wc); else - server_recv(td, &wc); + ret = server_recv(td, &wc); + + if (ret) + return -1; if (wc.wr_id == FIO_RDMA_MAX_IO_DEPTH) break; @@ -265,6 +352,7 @@ static int cq_event_handler(struct thread_data *td, enum ibv_wc_opcode opcode) } rd->cq_event_num++; } + if (ret) { log_err("fio: poll error %d\n", ret); return 1; @@ -279,7 +367,7 @@ static int cq_event_handler(struct thread_data *td, enum ibv_wc_opcode opcode) */ static int rdma_poll_wait(struct thread_data *td, enum ibv_wc_opcode opcode) { - struct rdmaio_data *rd = td->io_ops->data; + struct rdmaio_data *rd = td->io_ops_data; struct ibv_cq *ev_cq; void *ev_ctx; int ret; @@ -304,7 +392,7 @@ again: } ret = cq_event_handler(td, opcode); - if (ret < 1) + if (ret == 0) goto again; ibv_ack_cq_events(rd->cq, ret); @@ -316,7 +404,7 @@ again: static int fio_rdmaio_setup_qp(struct thread_data *td) { - struct rdmaio_data *rd = td->io_ops->data; + struct rdmaio_data *rd = td->io_ops_data; struct ibv_qp_init_attr init_attr; int qp_depth = td->o.iodepth * 2; /* 2 times of io depth */ @@ -324,8 +412,9 @@ static int fio_rdmaio_setup_qp(struct thread_data *td) rd->pd = ibv_alloc_pd(rd->child_cm_id->verbs); else rd->pd = ibv_alloc_pd(rd->cm_id->verbs); + if (rd->pd == NULL) { - log_err("fio: ibv_alloc_pd fail\n"); + log_err("fio: ibv_alloc_pd fail: %m\n"); return 1; } @@ -334,7 +423,7 @@ static int fio_rdmaio_setup_qp(struct thread_data *td) else rd->channel = ibv_create_comp_channel(rd->cm_id->verbs); if (rd->channel == NULL) { - log_err("fio: ibv_create_comp_channel fail\n"); + log_err("fio: ibv_create_comp_channel fail: %m\n"); goto err1; } @@ -348,12 +437,12 @@ static int fio_rdmaio_setup_qp(struct thread_data *td) rd->cq = ibv_create_cq(rd->cm_id->verbs, qp_depth, rd, rd->channel, 0); if (rd->cq == NULL) { - log_err("fio: ibv_create_cq failed\n"); + log_err("fio: ibv_create_cq failed: %m\n"); goto err2; } if (ibv_req_notify_cq(rd->cq, 0) != 0) { - log_err("fio: ibv_create_cq failed\n"); + log_err("fio: ibv_req_notify_cq failed: %m\n"); goto err3; } @@ -369,13 +458,13 @@ static int fio_rdmaio_setup_qp(struct thread_data *td) if (rd->is_client == 0) { if (rdma_create_qp(rd->child_cm_id, rd->pd, &init_attr) != 0) { - log_err("fio: rdma_create_qp failed\n"); + log_err("fio: rdma_create_qp failed: %m\n"); goto err3; } rd->qp = rd->child_cm_id->qp; } else { if (rdma_create_qp(rd->cm_id, rd->pd, &init_attr) != 0) { - log_err("fio: rdma_create_qp failed\n"); + log_err("fio: rdma_create_qp failed: %m\n"); goto err3; } rd->qp = rd->cm_id->qp; @@ -395,19 +484,19 @@ err1: static int fio_rdmaio_setup_control_msg_buffers(struct thread_data *td) { - struct rdmaio_data *rd = td->io_ops->data; + struct rdmaio_data *rd = td->io_ops_data; rd->recv_mr = ibv_reg_mr(rd->pd, &rd->recv_buf, sizeof(rd->recv_buf), IBV_ACCESS_LOCAL_WRITE); if (rd->recv_mr == NULL) { - log_err("fio: recv_buf reg_mr failed\n"); + log_err("fio: recv_buf reg_mr failed: %m\n"); return 1; } rd->send_mr = ibv_reg_mr(rd->pd, &rd->send_buf, sizeof(rd->send_buf), 0); if (rd->send_mr == NULL) { - log_err("fio: send_buf reg_mr failed\n"); + log_err("fio: send_buf reg_mr failed: %m\n"); ibv_dereg_mr(rd->recv_mr); return 1; } @@ -415,7 +504,7 @@ static int fio_rdmaio_setup_control_msg_buffers(struct thread_data *td) /* setup work request */ /* recv wq */ rd->recv_sgl.addr = (uint64_t) (unsigned long)&rd->recv_buf; - rd->recv_sgl.length = sizeof rd->recv_buf; + rd->recv_sgl.length = sizeof(rd->recv_buf); rd->recv_sgl.lkey = rd->recv_mr->lkey; rd->rq_wr.sg_list = &rd->recv_sgl; rd->rq_wr.num_sge = 1; @@ -423,7 +512,7 @@ static int fio_rdmaio_setup_control_msg_buffers(struct thread_data *td) /* send wq */ rd->send_sgl.addr = (uint64_t) (unsigned long)&rd->send_buf; - rd->send_sgl.length = sizeof rd->send_buf; + rd->send_sgl.length = sizeof(rd->send_buf); rd->send_sgl.lkey = rd->send_mr->lkey; rd->sq_wr.opcode = IBV_WR_SEND; @@ -439,14 +528,13 @@ static int get_next_channel_event(struct thread_data *td, struct rdma_event_channel *channel, enum rdma_cm_event_type wait_event) { - struct rdmaio_data *rd = td->io_ops->data; - - int ret; + struct rdmaio_data *rd = td->io_ops_data; struct rdma_cm_event *event; + int ret; ret = rdma_get_cm_event(channel, &event); if (ret) { - log_err("fio: rdma_get_cm_event"); + log_err("fio: rdma_get_cm_event: %d\n", ret); return 1; } @@ -472,7 +560,7 @@ static int get_next_channel_event(struct thread_data *td, static int fio_rdmaio_prep(struct thread_data *td, struct io_u *io_u) { - struct rdmaio_data *rd = td->io_ops->data; + struct rdmaio_data *rd = td->io_ops_data; struct rdma_io_u_data *r_io_u_d; r_io_u_d = io_u->engine_data; @@ -515,14 +603,14 @@ static int fio_rdmaio_prep(struct thread_data *td, struct io_u *io_u) static struct io_u *fio_rdmaio_event(struct thread_data *td, int event) { - struct rdmaio_data *rd = td->io_ops->data; + struct rdmaio_data *rd = td->io_ops_data; struct io_u *io_u; int i; io_u = rd->io_us_completed[0]; - for (i = 0; i < rd->io_u_completed_nr - 1; i++) { + for (i = 0; i < rd->io_u_completed_nr - 1; i++) rd->io_us_completed[i] = rd->io_us_completed[i + 1]; - } + rd->io_u_completed_nr--; dprint_io_u(io_u, "fio_rdmaio_event"); @@ -531,17 +619,14 @@ static struct io_u *fio_rdmaio_event(struct thread_data *td, int event) } static int fio_rdmaio_getevents(struct thread_data *td, unsigned int min, - unsigned int max, struct timespec *t) + unsigned int max, const struct timespec *t) { - struct rdmaio_data *rd = td->io_ops->data; - int r; + struct rdmaio_data *rd = td->io_ops_data; enum ibv_wc_opcode comp_opcode; - comp_opcode = IBV_WC_RDMA_WRITE; struct ibv_cq *ev_cq; void *ev_ctx; - int ret; - - r = 0; + int ret, r = 0; + comp_opcode = IBV_WC_RDMA_WRITE; switch (rd->rdma_protocol) { case FIO_RDMA_MEM_WRITE: @@ -598,7 +683,7 @@ again: static int fio_rdmaio_send(struct thread_data *td, struct io_u **io_us, unsigned int nr) { - struct rdmaio_data *rd = td->io_ops->data; + struct rdmaio_data *rd = td->io_ops_data; struct ibv_send_wr *bad_wr; #if 0 enum ibv_wc_opcode comp_opcode; @@ -616,10 +701,7 @@ static int fio_rdmaio_send(struct thread_data *td, struct io_u **io_us, case FIO_RDMA_MEM_WRITE: /* compose work request */ r_io_u_d = io_us[i]->engine_data; - if (td->o.use_os_rand) - index = os_random_long(&td->random_state) % rd->rmt_nr; - else - index = __rand(&rd->rand_state) % rd->rmt_nr; + index = __rand(&rd->rand_state) % rd->rmt_nr; r_io_u_d->sq_wr.opcode = IBV_WR_RDMA_WRITE; r_io_u_d->sq_wr.wr.rdma.rkey = rd->rmt_us[index].rkey; r_io_u_d->sq_wr.wr.rdma.remote_addr = \ @@ -629,10 +711,7 @@ static int fio_rdmaio_send(struct thread_data *td, struct io_u **io_us, case FIO_RDMA_MEM_READ: /* compose work request */ r_io_u_d = io_us[i]->engine_data; - if (td->o.use_os_rand) - index = os_random_long(&td->random_state) % rd->rmt_nr; - else - index = __rand(&rd->rand_state) % rd->rmt_nr; + index = __rand(&rd->rand_state) % rd->rmt_nr; r_io_u_d->sq_wr.opcode = IBV_WR_RDMA_READ; r_io_u_d->sq_wr.wr.rdma.rkey = rd->rmt_us[index].rkey; r_io_u_d->sq_wr.wr.rdma.remote_addr = \ @@ -651,7 +730,7 @@ static int fio_rdmaio_send(struct thread_data *td, struct io_u **io_us, } if (ibv_post_send(rd->qp, &r_io_u_d->sq_wr, &bad_wr) != 0) { - log_err("fio: ibv_post_send fail\n"); + log_err("fio: ibv_post_send fail: %m\n"); return -1; } @@ -667,7 +746,7 @@ static int fio_rdmaio_send(struct thread_data *td, struct io_u **io_us, static int fio_rdmaio_recv(struct thread_data *td, struct io_u **io_us, unsigned int nr) { - struct rdmaio_data *rd = td->io_ops->data; + struct rdmaio_data *rd = td->io_ops_data; struct ibv_recv_wr *bad_wr; struct rdma_io_u_data *r_io_u_d; int i; @@ -679,7 +758,7 @@ static int fio_rdmaio_recv(struct thread_data *td, struct io_u **io_us, r_io_u_d = io_us[i]->engine_data; if (ibv_post_recv(rd->qp, &r_io_u_d->rq_wr, &bad_wr) != 0) { - log_err("fio: ibv_post_recv fail\n"); + log_err("fio: ibv_post_recv fail: %m\n"); return 1; } } @@ -687,7 +766,7 @@ static int fio_rdmaio_recv(struct thread_data *td, struct io_u **io_us, || (rd->rdma_protocol == FIO_RDMA_MEM_WRITE)) { /* re-post the rq_wr */ if (ibv_post_recv(rd->qp, &rd->rq_wr, &bad_wr) != 0) { - log_err("fio: ibv_post_recv fail\n"); + log_err("fio: ibv_post_recv fail: %m\n"); return 1; } @@ -703,7 +782,7 @@ static int fio_rdmaio_recv(struct thread_data *td, struct io_u **io_us, static int fio_rdmaio_queue(struct thread_data *td, struct io_u *io_u) { - struct rdmaio_data *rd = td->io_ops->data; + struct rdmaio_data *rd = td->io_ops_data; fio_ro_check(td, io_u); @@ -721,8 +800,8 @@ static int fio_rdmaio_queue(struct thread_data *td, struct io_u *io_u) static void fio_rdmaio_queued(struct thread_data *td, struct io_u **io_us, unsigned int nr) { - struct rdmaio_data *rd = td->io_ops->data; - struct timeval now; + struct rdmaio_data *rd = td->io_ops_data; + struct timespec now; unsigned int i; if (!fio_fill_issue_time(td)) @@ -744,7 +823,7 @@ static void fio_rdmaio_queued(struct thread_data *td, struct io_u **io_us, static int fio_rdmaio_commit(struct thread_data *td) { - struct rdmaio_data *rd = td->io_ops->data; + struct rdmaio_data *rd = td->io_ops_data; struct io_u **io_us; int ret; @@ -754,11 +833,11 @@ static int fio_rdmaio_commit(struct thread_data *td) io_us = rd->io_us_queued; do { /* RDMA_WRITE or RDMA_READ */ - if (rd->is_client) { + if (rd->is_client) ret = fio_rdmaio_send(td, io_us, rd->io_u_queued_nr); - } else if (!rd->is_client) { + else if (!rd->is_client) ret = fio_rdmaio_recv(td, io_us, rd->io_u_queued_nr); - } else + else ret = 0; /* must be a SYNC */ if (ret > 0) { @@ -776,17 +855,17 @@ static int fio_rdmaio_commit(struct thread_data *td) static int fio_rdmaio_connect(struct thread_data *td, struct fio_file *f) { - struct rdmaio_data *rd = td->io_ops->data; + struct rdmaio_data *rd = td->io_ops_data; struct rdma_conn_param conn_param; struct ibv_send_wr *bad_wr; - memset(&conn_param, 0, sizeof conn_param); + memset(&conn_param, 0, sizeof(conn_param)); conn_param.responder_resources = 1; conn_param.initiator_depth = 1; conn_param.retry_count = 10; if (rdma_connect(rd->cm_id, &conn_param) != 0) { - log_err("fio: rdma_connect fail\n"); + log_err("fio: rdma_connect fail: %m\n"); return 1; } @@ -801,19 +880,21 @@ static int fio_rdmaio_connect(struct thread_data *td, struct fio_file *f) rd->send_buf.nr = htonl(td->o.iodepth); if (ibv_post_send(rd->qp, &rd->sq_wr, &bad_wr) != 0) { - log_err("fio: ibv_post_send fail"); + log_err("fio: ibv_post_send fail: %m\n"); return 1; } - rdma_poll_wait(td, IBV_WC_SEND); + if (rdma_poll_wait(td, IBV_WC_SEND) < 0) + return 1; /* wait for remote MR info from server side */ - rdma_poll_wait(td, IBV_WC_RECV); + if (rdma_poll_wait(td, IBV_WC_RECV) < 0) + return 1; /* In SEND/RECV test, it's a good practice to setup the iodepth of * of the RECV side deeper than that of the SEND side to * avoid RNR (receiver not ready) error. The - * SEND side may send so many unsolicited message before + * SEND side may send so many unsolicited message before * RECV side commits sufficient recv buffers into recv queue. * This may lead to RNR error. Here, SEND side pauses for a while * during which RECV side commits sufficient recv buffers. @@ -825,17 +906,18 @@ static int fio_rdmaio_connect(struct thread_data *td, struct fio_file *f) static int fio_rdmaio_accept(struct thread_data *td, struct fio_file *f) { - struct rdmaio_data *rd = td->io_ops->data; + struct rdmaio_data *rd = td->io_ops_data; struct rdma_conn_param conn_param; struct ibv_send_wr *bad_wr; + int ret = 0; /* rdma_accept() - then wait for accept success */ - memset(&conn_param, 0, sizeof conn_param); + memset(&conn_param, 0, sizeof(conn_param)); conn_param.responder_resources = 1; conn_param.initiator_depth = 1; if (rdma_accept(rd->child_cm_id, &conn_param) != 0) { - log_err("fio: rdma_accept\n"); + log_err("fio: rdma_accept: %m\n"); return 1; } @@ -846,16 +928,17 @@ static int fio_rdmaio_accept(struct thread_data *td, struct fio_file *f) } /* wait for request */ - rdma_poll_wait(td, IBV_WC_RECV); + ret = rdma_poll_wait(td, IBV_WC_RECV) < 0; if (ibv_post_send(rd->qp, &rd->sq_wr, &bad_wr) != 0) { - log_err("fio: ibv_post_send fail"); + log_err("fio: ibv_post_send fail: %m\n"); return 1; } - rdma_poll_wait(td, IBV_WC_SEND); + if (rdma_poll_wait(td, IBV_WC_SEND) < 0) + return 1; - return 0; + return ret; } static int fio_rdmaio_open_file(struct thread_data *td, struct fio_file *f) @@ -868,7 +951,7 @@ static int fio_rdmaio_open_file(struct thread_data *td, struct fio_file *f) static int fio_rdmaio_close_file(struct thread_data *td, struct fio_file *f) { - struct rdmaio_data *rd = td->io_ops->data; + struct rdmaio_data *rd = td->io_ops_data; struct ibv_send_wr *bad_wr; /* unregister rdma buffer */ @@ -881,11 +964,11 @@ static int fio_rdmaio_close_file(struct thread_data *td, struct fio_file *f) || (rd->rdma_protocol == FIO_RDMA_MEM_READ))) { if (ibv_post_send(rd->qp, &rd->sq_wr, &bad_wr) != 0) { - log_err("fio: ibv_post_send fail"); + log_err("fio: ibv_post_send fail: %m\n"); return 1; } - dprint(FD_IO, "fio: close infomation sent success\n"); + dprint(FD_IO, "fio: close information sent success\n"); rdma_poll_wait(td, IBV_WC_SEND); } @@ -893,14 +976,17 @@ static int fio_rdmaio_close_file(struct thread_data *td, struct fio_file *f) rdma_disconnect(rd->cm_id); else { rdma_disconnect(rd->child_cm_id); -/* rdma_disconnect(rd->cm_id); */ +#if 0 + rdma_disconnect(rd->cm_id); +#endif } -/* if (get_next_channel_event(td, rd->cm_channel, RDMA_CM_EVENT_DISCONNECTED) != 0) - { - log_err("fio: wait for RDMA_CM_EVENT_DISCONNECTED\n"); - return 1; - }*/ +#if 0 + if (get_next_channel_event(td, rd->cm_channel, RDMA_CM_EVENT_DISCONNECTED) != 0) { + log_err("fio: wait for RDMA_CM_EVENT_DISCONNECTED\n"); + return 1; + } +#endif ibv_destroy_cq(rd->cq); ibv_destroy_qp(rd->qp); @@ -921,8 +1007,9 @@ static int fio_rdmaio_close_file(struct thread_data *td, struct fio_file *f) static int fio_rdmaio_setup_connect(struct thread_data *td, const char *host, unsigned short port) { - struct rdmaio_data *rd = td->io_ops->data; + struct rdmaio_data *rd = td->io_ops_data; struct ibv_recv_wr *bad_wr; + int err; rd->addr.sin_family = AF_INET; rd->addr.sin_port = htons(port); @@ -940,28 +1027,28 @@ static int fio_rdmaio_setup_connect(struct thread_data *td, const char *host, } /* resolve route */ - if (rdma_resolve_addr(rd->cm_id, NULL, - (struct sockaddr *)&rd->addr, 2000) != 0) { - log_err("fio: rdma_resolve_addr"); + err = rdma_resolve_addr(rd->cm_id, NULL, (struct sockaddr *)&rd->addr, 2000); + if (err != 0) { + log_err("fio: rdma_resolve_addr: %d\n", err); return 1; } - if (get_next_channel_event - (td, rd->cm_channel, RDMA_CM_EVENT_ADDR_RESOLVED) - != 0) { - log_err("fio: get_next_channel_event"); + err = get_next_channel_event(td, rd->cm_channel, RDMA_CM_EVENT_ADDR_RESOLVED); + if (err != 0) { + log_err("fio: get_next_channel_event: %d\n", err); return 1; } /* resolve route */ - if (rdma_resolve_route(rd->cm_id, 2000) != 0) { - log_err("fio: rdma_resolve_route"); + err = rdma_resolve_route(rd->cm_id, 2000); + if (err != 0) { + log_err("fio: rdma_resolve_route: %d\n", err); return 1; } - if (get_next_channel_event - (td, rd->cm_channel, RDMA_CM_EVENT_ROUTE_RESOLVED) != 0) { - log_err("fio: get_next_channel_event"); + err = get_next_channel_event(td, rd->cm_channel, RDMA_CM_EVENT_ROUTE_RESOLVED); + if (err != 0) { + log_err("fio: get_next_channel_event: %d\n", err); return 1; } @@ -973,8 +1060,9 @@ static int fio_rdmaio_setup_connect(struct thread_data *td, const char *host, return 1; /* post recv buf */ - if (ibv_post_recv(rd->qp, &rd->rq_wr, &bad_wr) != 0) { - log_err("fio: ibv_post_recv fail\n"); + err = ibv_post_recv(rd->qp, &rd->rq_wr, &bad_wr); + if (err != 0) { + log_err("fio: ibv_post_recv fail: %d\n", err); return 1; } @@ -983,8 +1071,11 @@ static int fio_rdmaio_setup_connect(struct thread_data *td, const char *host, static int fio_rdmaio_setup_listen(struct thread_data *td, short port) { - struct rdmaio_data *rd = td->io_ops->data; + struct rdmaio_data *rd = td->io_ops_data; struct ibv_recv_wr *bad_wr; + int state = td->runstate; + + td_set_runstate(td, TD_SETTING_UP); rd->addr.sin_family = AF_INET; rd->addr.sin_addr.s_addr = htonl(INADDR_ANY); @@ -992,15 +1083,17 @@ static int fio_rdmaio_setup_listen(struct thread_data *td, short port) /* rdma_listen */ if (rdma_bind_addr(rd->cm_id, (struct sockaddr *)&rd->addr) != 0) { - log_err("fio: rdma_bind_addr fail\n"); + log_err("fio: rdma_bind_addr fail: %m\n"); return 1; } if (rdma_listen(rd->cm_id, 3) != 0) { - log_err("fio: rdma_listen fail\n"); + log_err("fio: rdma_listen fail: %m\n"); return 1; } + log_info("fio: waiting for connection\n"); + /* wait for CONNECT_REQUEST */ if (get_next_channel_event (td, rd->cm_channel, RDMA_CM_EVENT_CONNECT_REQUEST) != 0) { @@ -1016,31 +1109,19 @@ static int fio_rdmaio_setup_listen(struct thread_data *td, short port) /* post recv buf */ if (ibv_post_recv(rd->qp, &rd->rq_wr, &bad_wr) != 0) { - log_err("fio: ibv_post_recv fail\n"); + log_err("fio: ibv_post_recv fail: %m\n"); return 1; } + td_set_runstate(td, state); return 0; } -static int fio_rdmaio_init(struct thread_data *td) +static int check_set_rlimits(struct thread_data *td) { - struct rdmaio_data *rd = td->io_ops->data; - unsigned int port; - char host[64], buf[128]; - char *sep, *portp, *modep; - int ret; +#ifdef CONFIG_RLIMIT_MEMLOCK struct rlimit rl; - if (td_rw(td)) { - log_err("fio: rdma connections must be read OR write\n"); - return 1; - } - if (td_random(td)) { - log_err("fio: RDMA network IO can't be random\n"); - return 1; - } - /* check RLIMIT_MEMLOCK */ if (getrlimit(RLIMIT_MEMLOCK, &rl) != 0) { log_err("fio: getrlimit fail: %d(%s)\n", @@ -1065,57 +1146,104 @@ static int fio_rdmaio_init(struct thread_data *td) return 1; } } +#endif - strcpy(buf, td->o.filename); + return 0; +} - sep = strchr(buf, '/'); - if (!sep) - goto bad_host; +static int compat_options(struct thread_data *td) +{ + // The original RDMA engine had an ugly / seperator + // on the filename for it's options. This function + // retains backwards compatibility with it.100 - *sep = '\0'; - sep++; - strcpy(host, buf); - if (!strlen(host)) - goto bad_host; + struct rdmaio_options *o = td->eo; + char *modep, *portp; + char *filename = td->o.filename; - modep = NULL; - portp = sep; - sep = strchr(portp, '/'); - if (sep) { - *sep = '\0'; - modep = sep + 1; - } + if (!filename) + return 0; - port = strtol(portp, NULL, 10); - if (!port || port > 65535) + portp = strchr(filename, '/'); + if (portp == NULL) + return 0; + + *portp = '\0'; + portp++; + + o->port = strtol(portp, NULL, 10); + if (!o->port || o->port > 65535) goto bad_host; + modep = strchr(portp, '/'); + if (modep != NULL) { + *modep = '\0'; + modep++; + } + if (modep) { if (!strncmp("rdma_write", modep, strlen(modep)) || !strncmp("RDMA_WRITE", modep, strlen(modep))) - rd->rdma_protocol = FIO_RDMA_MEM_WRITE; + o->verb = FIO_RDMA_MEM_WRITE; else if (!strncmp("rdma_read", modep, strlen(modep)) || !strncmp("RDMA_READ", modep, strlen(modep))) - rd->rdma_protocol = FIO_RDMA_MEM_READ; + o->verb = FIO_RDMA_MEM_READ; else if (!strncmp("send", modep, strlen(modep)) || !strncmp("SEND", modep, strlen(modep))) - rd->rdma_protocol = FIO_RDMA_CHA_SEND; + o->verb = FIO_RDMA_CHA_SEND; else goto bad_host; } else - rd->rdma_protocol = FIO_RDMA_MEM_WRITE; + o->verb = FIO_RDMA_MEM_WRITE; + + + return 0; + +bad_host: + log_err("fio: bad rdma host/port/protocol: %s\n", td->o.filename); + return 1; +} + +static int fio_rdmaio_init(struct thread_data *td) +{ + struct rdmaio_data *rd = td->io_ops_data; + struct rdmaio_options *o = td->eo; + unsigned int max_bs; + int ret, i; + + if (td_rw(td)) { + log_err("fio: rdma connections must be read OR write\n"); + return 1; + } + if (td_random(td)) { + log_err("fio: RDMA network IO can't be random\n"); + return 1; + } + + if (compat_options(td)) + return 1; + + if (!o->port) { + log_err("fio: no port has been specified which is required " + "for the rdma engine\n"); + return 1; + } + + if (check_set_rlimits(td)) + return 1; + rd->rdma_protocol = o->verb; rd->cq_event_num = 0; rd->cm_channel = rdma_create_event_channel(); if (!rd->cm_channel) { - log_err("fio: rdma_create_event_channel fail\n"); + log_err("fio: rdma_create_event_channel fail: %m\n"); return 1; } ret = rdma_create_id(rd->cm_channel, &rd->cm_id, rd, RDMA_PS_TCP); if (ret) { - log_err("fio: rdma_create_id fail\n"); + log_err("fio: rdma_create_id fail: %m\n"); return 1; } @@ -1142,20 +1270,20 @@ static int fio_rdmaio_init(struct thread_data *td) if (td_read(td)) { /* READ as the server */ rd->is_client = 0; + td->flags |= TD_F_NO_PROGRESS; /* server rd->rdma_buf_len will be setup after got request */ - ret = fio_rdmaio_setup_listen(td, port); + ret = fio_rdmaio_setup_listen(td, o->port); } else { /* WRITE as the client */ rd->is_client = 1; - ret = fio_rdmaio_setup_connect(td, host, port); + ret = fio_rdmaio_setup_connect(td, td->o.filename, o->port); } - struct flist_head *entry; - unsigned int max_bs; max_bs = max(td->o.max_bs[DDIR_READ], td->o.max_bs[DDIR_WRITE]); + rd->send_buf.max_bs = htonl(max_bs); + /* register each io_u in the free list */ - int i = 0; - flist_for_each(entry, &td->io_u_freelist) { - struct io_u *io_u = flist_entry(entry, struct io_u, list); + for (i = 0; i < td->io_u_freelist.nr; i++) { + struct io_u *io_u = td->io_u_freelist.io_us[i]; io_u->engine_data = malloc(sizeof(struct rdma_io_u_data)); memset(io_u->engine_data, 0, sizeof(struct rdma_io_u_data)); @@ -1166,118 +1294,72 @@ static int fio_rdmaio_init(struct thread_data *td) IBV_ACCESS_REMOTE_READ | IBV_ACCESS_REMOTE_WRITE); if (io_u->mr == NULL) { - log_err("fio: ibv_reg_mr io_u failed\n"); + log_err("fio: ibv_reg_mr io_u failed: %m\n"); return 1; } rd->send_buf.rmt_us[i].buf = - htonll((uint64_t) (unsigned long)io_u->buf); + cpu_to_be64((uint64_t) (unsigned long)io_u->buf); rd->send_buf.rmt_us[i].rkey = htonl(io_u->mr->rkey); rd->send_buf.rmt_us[i].size = htonl(max_bs); -/* log_info("fio: Send rkey %x addr %" PRIx64 " len %d to client\n", - io_u->mr->rkey, io_u->buf, max_bs); */ - i++; +#if 0 + log_info("fio: Send rkey %x addr %" PRIx64 " len %d to client\n", io_u->mr->rkey, io_u->buf, max_bs); */ +#endif } rd->send_buf.nr = htonl(i); return ret; -bad_host: - log_err("fio: bad rdma host/port/protocol: %s\n", td->o.filename); - return 1; } static void fio_rdmaio_cleanup(struct thread_data *td) { - struct rdmaio_data *rd = td->io_ops->data; - - if (rd) { -/* if (nd->listenfd != -1) - close(nd->listenfd); - if (nd->pipes[0] != -1) - close(nd->pipes[0]); - if (nd->pipes[1] != -1) - close(nd->pipes[1]); -*/ + struct rdmaio_data *rd = td->io_ops_data; + + if (rd) free(rd); - } } static int fio_rdmaio_setup(struct thread_data *td) { struct rdmaio_data *rd; - if (!td->io_ops->data) { - rd = malloc(sizeof(*rd));; - - memset(rd, 0, sizeof(*rd)); - init_rand_seed(&rd->rand_state, GOLDEN_RATIO_PRIME); - td->io_ops->data = rd; + if (!td->files_index) { + add_file(td, td->o.filename ?: "rdma", 0, 0); + td->o.nr_files = td->o.nr_files ?: 1; + td->o.open_files++; } - return 0; -} - -static struct ioengine_ops ioengine_rw = { - .name = "rdma", - .version = FIO_IOOPS_VERSION, - .setup = fio_rdmaio_setup, - .init = fio_rdmaio_init, - .prep = fio_rdmaio_prep, - .queue = fio_rdmaio_queue, - .commit = fio_rdmaio_commit, - .getevents = fio_rdmaio_getevents, - .event = fio_rdmaio_event, - .cleanup = fio_rdmaio_cleanup, - .open_file = fio_rdmaio_open_file, - .close_file = fio_rdmaio_close_file, - .flags = FIO_DISKLESSIO | FIO_UNIDIR | FIO_PIPEIO, -}; - -#else /* FIO_HAVE_RDMA */ + if (!td->io_ops_data) { + rd = malloc(sizeof(*rd)); -static int fio_rdmaio_open_file(struct thread_data *td, struct fio_file *f) -{ - return 0; -} + memset(rd, 0, sizeof(*rd)); + init_rand_seed(&rd->rand_state, (unsigned int) GOLDEN_RATIO_PRIME, 0); + td->io_ops_data = rd; + } -static int fio_rdmaio_close_file(struct thread_data *td, struct fio_file *f) -{ return 0; } -static int fio_rdmaio_queue(struct thread_data *td, struct io_u *io_u) -{ - return FIO_Q_COMPLETED; -} - -static int fio_rdmaio_init(struct thread_data fio_unused * td) -{ - log_err("fio: rdma(librdmacm libibverbs) not available\n"); - log_err(" You haven't compiled rdma ioengine into fio.\n"); - log_err(" If you want to try rdma ioengine,\n"); - log_err(" make sure OFED is installed,\n"); - log_err(" $ ofed_info\n"); - log_err(" then try to make fio as follows:\n"); - log_err(" $ export EXTFLAGS+=\" -DFIO_HAVE_RDMA \"\n"); - log_err(" $ export EXTLIBS+=\" -libverbs -lrdmacm \"\n"); - log_err(" $ make clean && make\n"); - return 1; -} - static struct ioengine_ops ioengine_rw = { - .name = "rdma", - .version = FIO_IOOPS_VERSION, - .init = fio_rdmaio_init, - .queue = fio_rdmaio_queue, - .open_file = fio_rdmaio_open_file, - .close_file = fio_rdmaio_close_file, - .flags = FIO_SYNCIO | FIO_DISKLESSIO | FIO_UNIDIR | FIO_PIPEIO, + .name = "rdma", + .version = FIO_IOOPS_VERSION, + .setup = fio_rdmaio_setup, + .init = fio_rdmaio_init, + .prep = fio_rdmaio_prep, + .queue = fio_rdmaio_queue, + .commit = fio_rdmaio_commit, + .getevents = fio_rdmaio_getevents, + .event = fio_rdmaio_event, + .cleanup = fio_rdmaio_cleanup, + .open_file = fio_rdmaio_open_file, + .close_file = fio_rdmaio_close_file, + .flags = FIO_DISKLESSIO | FIO_UNIDIR | FIO_PIPEIO, + .options = options, + .option_struct_size = sizeof(struct rdmaio_options), }; -#endif - static void fio_init fio_rdmaio_register(void) { register_ioengine(&ioengine_rw);