4 * RDMA I/O engine based on the IB verbs and RDMA/CM user space libraries.
5 * Supports both RDMA memory semantics and channel semantics
6 * for the InfiniBand, RoCE and iWARP protocols.
8 * You will need the Linux RDMA software installed, either
9 * from your Linux distributor or directly from openfabrics.org:
11 * http://www.openfabrics.org/downloads/OFED/
13 * Exchanging steps of RDMA ioengine control messages:
14 * 1. client side sends test mode (RDMA_WRITE/RDMA_READ/SEND)
16 * 2. server side parses test mode, and sends back confirmation
17 * to client side. In RDMA WRITE/READ test, this confirmation
18 * includes memory information, such as rkey, address.
19 * 3. client side initiates test loop.
20 * 4. In RDMA WRITE/READ test, client side sends a completion
21 * notification to server side. Server side updates its
30 #include <netinet/in.h>
31 #include <arpa/inet.h>
34 #include <sys/types.h>
35 #include <sys/socket.h>
37 #include <sys/resource.h>
44 #include "../optgroup.h"
46 #include <rdma/rdma_cma.h>
48 #define FIO_RDMA_MAX_IO_DEPTH 512
58 struct rdmaio_options {
59 struct thread_data *td;
61 enum rdma_io_mode verb;
65 static int str_hostname_cb(void *data, const char *input)
67 struct rdmaio_options *o = data;
69 if (o->td->o.filename)
70 free(o->td->o.filename);
71 o->td->o.filename = strdup(input);
75 static struct fio_option options[] = {
78 .lname = "rdma engine hostname",
79 .type = FIO_OPT_STR_STORE,
80 .cb = str_hostname_cb,
81 .help = "Hostname for RDMA IO engine",
82 .category = FIO_OPT_C_ENGINE,
83 .group = FIO_OPT_G_RDMA,
87 .lname = "rdma engine bindname",
88 .type = FIO_OPT_STR_STORE,
89 .off1 = offsetof(struct rdmaio_options, bindname),
90 .help = "Bind for RDMA IO engine",
92 .category = FIO_OPT_C_ENGINE,
93 .group = FIO_OPT_G_RDMA,
97 .lname = "rdma engine port",
99 .off1 = offsetof(struct rdmaio_options, port),
102 .help = "Port to use for RDMA connections",
103 .category = FIO_OPT_C_ENGINE,
104 .group = FIO_OPT_G_RDMA,
108 .lname = "RDMA engine verb",
111 .off1 = offsetof(struct rdmaio_options, verb),
112 .help = "RDMA engine verb",
116 .oval = FIO_RDMA_MEM_WRITE,
117 .help = "Memory Write",
120 .oval = FIO_RDMA_MEM_READ,
121 .help = "Memory Read",
124 .oval = FIO_RDMA_CHA_SEND,
125 .help = "Posted Send",
128 .oval = FIO_RDMA_CHA_RECV,
129 .help = "Posted Receive",
132 .category = FIO_OPT_C_ENGINE,
133 .group = FIO_OPT_G_RDMA,
146 struct rdma_info_blk {
147 uint32_t mode; /* channel semantic or memory semantic */
148 uint32_t nr; /* client: io depth
149 server: number of records for memory semantic
151 uint32_t max_bs; /* maximum block size */
152 struct remote_u rmt_us[FIO_RDMA_MAX_IO_DEPTH];
155 struct rdma_io_u_data {
157 struct ibv_send_wr sq_wr;
158 struct ibv_recv_wr rq_wr;
159 struct ibv_sge rdma_sgl;
164 enum rdma_io_mode rdma_protocol;
166 struct sockaddr_in addr;
168 struct ibv_recv_wr rq_wr;
169 struct ibv_sge recv_sgl;
170 struct rdma_info_blk recv_buf;
171 struct ibv_mr *recv_mr;
173 struct ibv_send_wr sq_wr;
174 struct ibv_sge send_sgl;
175 struct rdma_info_blk send_buf;
176 struct ibv_mr *send_mr;
178 struct ibv_comp_channel *channel;
184 struct rdma_event_channel *cm_channel;
185 struct rdma_cm_id *cm_id;
186 struct rdma_cm_id *child_cm_id;
190 struct remote_u *rmt_us;
192 struct io_u **io_us_queued;
194 struct io_u **io_us_flight;
196 struct io_u **io_us_completed;
197 int io_u_completed_nr;
199 struct frand_state rand_state;
202 static int client_recv(struct thread_data *td, struct ibv_wc *wc)
204 struct rdmaio_data *rd = td->io_ops_data;
207 if (wc->byte_len != sizeof(rd->recv_buf)) {
208 log_err("Received bogus data, size %d\n", wc->byte_len);
212 max_bs = max(td->o.max_bs[DDIR_READ], td->o.max_bs[DDIR_WRITE]);
213 if (max_bs > ntohl(rd->recv_buf.max_bs)) {
214 log_err("fio: Server's block size (%d) must be greater than or "
215 "equal to the client's block size (%d)!\n",
216 ntohl(rd->recv_buf.max_bs), max_bs);
220 /* store mr info for MEMORY semantic */
221 if ((rd->rdma_protocol == FIO_RDMA_MEM_WRITE) ||
222 (rd->rdma_protocol == FIO_RDMA_MEM_READ)) {
223 /* struct flist_head *entry; */
226 rd->rmt_nr = ntohl(rd->recv_buf.nr);
228 for (i = 0; i < rd->rmt_nr; i++) {
229 rd->rmt_us[i].buf = be64_to_cpu(rd->recv_buf.rmt_us[i].buf);
230 rd->rmt_us[i].rkey = ntohl(rd->recv_buf.rmt_us[i].rkey);
231 rd->rmt_us[i].size = ntohl(rd->recv_buf.rmt_us[i].size);
234 "fio: Received rkey %x addr %" PRIx64
235 " len %d from peer\n", rd->rmt_us[i].rkey,
236 rd->rmt_us[i].buf, rd->rmt_us[i].size);
243 static int server_recv(struct thread_data *td, struct ibv_wc *wc)
245 struct rdmaio_data *rd = td->io_ops_data;
248 if (wc->wr_id == FIO_RDMA_MAX_IO_DEPTH) {
249 rd->rdma_protocol = ntohl(rd->recv_buf.mode);
251 /* CHANNEL semantic, do nothing */
252 if (rd->rdma_protocol == FIO_RDMA_CHA_SEND)
253 rd->rdma_protocol = FIO_RDMA_CHA_RECV;
255 max_bs = max(td->o.max_bs[DDIR_READ], td->o.max_bs[DDIR_WRITE]);
256 if (max_bs < ntohl(rd->recv_buf.max_bs)) {
257 log_err("fio: Server's block size (%d) must be greater than or "
258 "equal to the client's block size (%d)!\n",
259 ntohl(rd->recv_buf.max_bs), max_bs);
268 static int cq_event_handler(struct thread_data *td, enum ibv_wc_opcode opcode)
270 struct rdmaio_data *rd = td->io_ops_data;
272 struct rdma_io_u_data *r_io_u_d;
277 while ((ret = ibv_poll_cq(rd->cq, 1, &wc)) == 1) {
282 log_err("fio: cq completion status %d(%s)\n",
283 wc.status, ibv_wc_status_str(wc.status));
290 if (rd->is_client == 1)
291 ret = client_recv(td, &wc);
293 ret = server_recv(td, &wc);
298 if (wc.wr_id == FIO_RDMA_MAX_IO_DEPTH)
301 for (i = 0; i < rd->io_u_flight_nr; i++) {
302 r_io_u_d = rd->io_us_flight[i]->engine_data;
304 if (wc.wr_id == r_io_u_d->rq_wr.wr_id) {
305 rd->io_us_flight[i]->resid =
306 rd->io_us_flight[i]->buflen
309 rd->io_us_flight[i]->error = 0;
311 rd->io_us_completed[rd->
313 = rd->io_us_flight[i];
314 rd->io_u_completed_nr++;
318 if (i == rd->io_u_flight_nr)
319 log_err("fio: recv wr %" PRId64 " not found\n",
322 /* put the last one into middle of the list */
323 rd->io_us_flight[i] =
324 rd->io_us_flight[rd->io_u_flight_nr - 1];
325 rd->io_u_flight_nr--;
331 case IBV_WC_RDMA_WRITE:
332 case IBV_WC_RDMA_READ:
333 if (wc.wr_id == FIO_RDMA_MAX_IO_DEPTH)
336 for (i = 0; i < rd->io_u_flight_nr; i++) {
337 r_io_u_d = rd->io_us_flight[i]->engine_data;
339 if (wc.wr_id == r_io_u_d->sq_wr.wr_id) {
340 rd->io_us_completed[rd->
342 = rd->io_us_flight[i];
343 rd->io_u_completed_nr++;
347 if (i == rd->io_u_flight_nr)
348 log_err("fio: send wr %" PRId64 " not found\n",
351 /* put the last one into middle of the list */
352 rd->io_us_flight[i] =
353 rd->io_us_flight[rd->io_u_flight_nr - 1];
354 rd->io_u_flight_nr--;
360 log_info("fio: unknown completion event %d\n",
368 log_err("fio: poll error %d\n", ret);
376 * Return -1 for error and 'nr events' for a positive number
379 static int rdma_poll_wait(struct thread_data *td, enum ibv_wc_opcode opcode)
381 struct rdmaio_data *rd = td->io_ops_data;
382 struct ibv_cq *ev_cq;
386 if (rd->cq_event_num > 0) { /* previous left */
392 if (ibv_get_cq_event(rd->channel, &ev_cq, &ev_ctx) != 0) {
393 log_err("fio: Failed to get cq event!\n");
396 if (ev_cq != rd->cq) {
397 log_err("fio: Unknown CQ!\n");
400 if (ibv_req_notify_cq(rd->cq, 0) != 0) {
401 log_err("fio: Failed to set notify!\n");
405 ret = cq_event_handler(td, opcode);
409 ibv_ack_cq_events(rd->cq, ret);
416 static int fio_rdmaio_setup_qp(struct thread_data *td)
418 struct rdmaio_data *rd = td->io_ops_data;
419 struct ibv_qp_init_attr init_attr;
420 int qp_depth = td->o.iodepth * 2; /* 2 times of io depth */
422 if (rd->is_client == 0)
423 rd->pd = ibv_alloc_pd(rd->child_cm_id->verbs);
425 rd->pd = ibv_alloc_pd(rd->cm_id->verbs);
427 if (rd->pd == NULL) {
428 log_err("fio: ibv_alloc_pd fail: %m\n");
432 if (rd->is_client == 0)
433 rd->channel = ibv_create_comp_channel(rd->child_cm_id->verbs);
435 rd->channel = ibv_create_comp_channel(rd->cm_id->verbs);
436 if (rd->channel == NULL) {
437 log_err("fio: ibv_create_comp_channel fail: %m\n");
444 if (rd->is_client == 0)
445 rd->cq = ibv_create_cq(rd->child_cm_id->verbs,
446 qp_depth, rd, rd->channel, 0);
448 rd->cq = ibv_create_cq(rd->cm_id->verbs,
449 qp_depth, rd, rd->channel, 0);
450 if (rd->cq == NULL) {
451 log_err("fio: ibv_create_cq failed: %m\n");
455 if (ibv_req_notify_cq(rd->cq, 0) != 0) {
456 log_err("fio: ibv_req_notify_cq failed: %m\n");
460 /* create queue pair */
461 memset(&init_attr, 0, sizeof(init_attr));
462 init_attr.cap.max_send_wr = qp_depth;
463 init_attr.cap.max_recv_wr = qp_depth;
464 init_attr.cap.max_recv_sge = 1;
465 init_attr.cap.max_send_sge = 1;
466 init_attr.qp_type = IBV_QPT_RC;
467 init_attr.send_cq = rd->cq;
468 init_attr.recv_cq = rd->cq;
470 if (rd->is_client == 0) {
471 if (rdma_create_qp(rd->child_cm_id, rd->pd, &init_attr) != 0) {
472 log_err("fio: rdma_create_qp failed: %m\n");
475 rd->qp = rd->child_cm_id->qp;
477 if (rdma_create_qp(rd->cm_id, rd->pd, &init_attr) != 0) {
478 log_err("fio: rdma_create_qp failed: %m\n");
481 rd->qp = rd->cm_id->qp;
487 ibv_destroy_cq(rd->cq);
489 ibv_destroy_comp_channel(rd->channel);
491 ibv_dealloc_pd(rd->pd);
496 static int fio_rdmaio_setup_control_msg_buffers(struct thread_data *td)
498 struct rdmaio_data *rd = td->io_ops_data;
500 rd->recv_mr = ibv_reg_mr(rd->pd, &rd->recv_buf, sizeof(rd->recv_buf),
501 IBV_ACCESS_LOCAL_WRITE);
502 if (rd->recv_mr == NULL) {
503 log_err("fio: recv_buf reg_mr failed: %m\n");
507 rd->send_mr = ibv_reg_mr(rd->pd, &rd->send_buf, sizeof(rd->send_buf),
509 if (rd->send_mr == NULL) {
510 log_err("fio: send_buf reg_mr failed: %m\n");
511 ibv_dereg_mr(rd->recv_mr);
515 /* setup work request */
517 rd->recv_sgl.addr = (uint64_t) (unsigned long)&rd->recv_buf;
518 rd->recv_sgl.length = sizeof(rd->recv_buf);
519 rd->recv_sgl.lkey = rd->recv_mr->lkey;
520 rd->rq_wr.sg_list = &rd->recv_sgl;
521 rd->rq_wr.num_sge = 1;
522 rd->rq_wr.wr_id = FIO_RDMA_MAX_IO_DEPTH;
525 rd->send_sgl.addr = (uint64_t) (unsigned long)&rd->send_buf;
526 rd->send_sgl.length = sizeof(rd->send_buf);
527 rd->send_sgl.lkey = rd->send_mr->lkey;
529 rd->sq_wr.opcode = IBV_WR_SEND;
530 rd->sq_wr.send_flags = IBV_SEND_SIGNALED;
531 rd->sq_wr.sg_list = &rd->send_sgl;
532 rd->sq_wr.num_sge = 1;
533 rd->sq_wr.wr_id = FIO_RDMA_MAX_IO_DEPTH;
538 static int get_next_channel_event(struct thread_data *td,
539 struct rdma_event_channel *channel,
540 enum rdma_cm_event_type wait_event)
542 struct rdmaio_data *rd = td->io_ops_data;
543 struct rdma_cm_event *event;
546 ret = rdma_get_cm_event(channel, &event);
548 log_err("fio: rdma_get_cm_event: %d\n", ret);
552 if (event->event != wait_event) {
553 log_err("fio: event is %s instead of %s\n",
554 rdma_event_str(event->event),
555 rdma_event_str(wait_event));
559 switch (event->event) {
560 case RDMA_CM_EVENT_CONNECT_REQUEST:
561 rd->child_cm_id = event->id;
567 rdma_ack_cm_event(event);
572 static int fio_rdmaio_prep(struct thread_data *td, struct io_u *io_u)
574 struct rdmaio_data *rd = td->io_ops_data;
575 struct rdma_io_u_data *r_io_u_d;
577 r_io_u_d = io_u->engine_data;
579 switch (rd->rdma_protocol) {
580 case FIO_RDMA_MEM_WRITE:
581 case FIO_RDMA_MEM_READ:
582 r_io_u_d->rdma_sgl.addr = (uint64_t) (unsigned long)io_u->buf;
583 r_io_u_d->rdma_sgl.lkey = io_u->mr->lkey;
584 r_io_u_d->sq_wr.wr_id = r_io_u_d->wr_id;
585 r_io_u_d->sq_wr.send_flags = IBV_SEND_SIGNALED;
586 r_io_u_d->sq_wr.sg_list = &r_io_u_d->rdma_sgl;
587 r_io_u_d->sq_wr.num_sge = 1;
589 case FIO_RDMA_CHA_SEND:
590 r_io_u_d->rdma_sgl.addr = (uint64_t) (unsigned long)io_u->buf;
591 r_io_u_d->rdma_sgl.lkey = io_u->mr->lkey;
592 r_io_u_d->rdma_sgl.length = io_u->buflen;
593 r_io_u_d->sq_wr.wr_id = r_io_u_d->wr_id;
594 r_io_u_d->sq_wr.opcode = IBV_WR_SEND;
595 r_io_u_d->sq_wr.send_flags = IBV_SEND_SIGNALED;
596 r_io_u_d->sq_wr.sg_list = &r_io_u_d->rdma_sgl;
597 r_io_u_d->sq_wr.num_sge = 1;
599 case FIO_RDMA_CHA_RECV:
600 r_io_u_d->rdma_sgl.addr = (uint64_t) (unsigned long)io_u->buf;
601 r_io_u_d->rdma_sgl.lkey = io_u->mr->lkey;
602 r_io_u_d->rdma_sgl.length = io_u->buflen;
603 r_io_u_d->rq_wr.wr_id = r_io_u_d->wr_id;
604 r_io_u_d->rq_wr.sg_list = &r_io_u_d->rdma_sgl;
605 r_io_u_d->rq_wr.num_sge = 1;
608 log_err("fio: unknown rdma protocol - %d\n", rd->rdma_protocol);
615 static struct io_u *fio_rdmaio_event(struct thread_data *td, int event)
617 struct rdmaio_data *rd = td->io_ops_data;
621 io_u = rd->io_us_completed[0];
622 for (i = 0; i < rd->io_u_completed_nr - 1; i++)
623 rd->io_us_completed[i] = rd->io_us_completed[i + 1];
625 rd->io_u_completed_nr--;
627 dprint_io_u(io_u, "fio_rdmaio_event");
632 static int fio_rdmaio_getevents(struct thread_data *td, unsigned int min,
633 unsigned int max, const struct timespec *t)
635 struct rdmaio_data *rd = td->io_ops_data;
636 enum ibv_wc_opcode comp_opcode;
637 struct ibv_cq *ev_cq;
640 comp_opcode = IBV_WC_RDMA_WRITE;
642 switch (rd->rdma_protocol) {
643 case FIO_RDMA_MEM_WRITE:
644 comp_opcode = IBV_WC_RDMA_WRITE;
646 case FIO_RDMA_MEM_READ:
647 comp_opcode = IBV_WC_RDMA_READ;
649 case FIO_RDMA_CHA_SEND:
650 comp_opcode = IBV_WC_SEND;
652 case FIO_RDMA_CHA_RECV:
653 comp_opcode = IBV_WC_RECV;
656 log_err("fio: unknown rdma protocol - %d\n", rd->rdma_protocol);
660 if (rd->cq_event_num > 0) { /* previous left */
666 if (ibv_get_cq_event(rd->channel, &ev_cq, &ev_ctx) != 0) {
667 log_err("fio: Failed to get cq event!\n");
670 if (ev_cq != rd->cq) {
671 log_err("fio: Unknown CQ!\n");
674 if (ibv_req_notify_cq(rd->cq, 0) != 0) {
675 log_err("fio: Failed to set notify!\n");
679 ret = cq_event_handler(td, comp_opcode);
683 ibv_ack_cq_events(rd->cq, ret);
689 rd->cq_event_num -= r;
694 static int fio_rdmaio_send(struct thread_data *td, struct io_u **io_us,
697 struct rdmaio_data *rd = td->io_ops_data;
698 struct ibv_send_wr *bad_wr;
700 enum ibv_wc_opcode comp_opcode;
701 comp_opcode = IBV_WC_RDMA_WRITE;
705 struct rdma_io_u_data *r_io_u_d;
709 for (i = 0; i < nr; i++) {
710 /* RDMA_WRITE or RDMA_READ */
711 switch (rd->rdma_protocol) {
712 case FIO_RDMA_MEM_WRITE:
713 /* compose work request */
714 r_io_u_d = io_us[i]->engine_data;
715 index = __rand(&rd->rand_state) % rd->rmt_nr;
716 r_io_u_d->sq_wr.opcode = IBV_WR_RDMA_WRITE;
717 r_io_u_d->sq_wr.wr.rdma.rkey = rd->rmt_us[index].rkey;
718 r_io_u_d->sq_wr.wr.rdma.remote_addr = \
719 rd->rmt_us[index].buf;
720 r_io_u_d->sq_wr.sg_list->length = io_us[i]->buflen;
722 case FIO_RDMA_MEM_READ:
723 /* compose work request */
724 r_io_u_d = io_us[i]->engine_data;
725 index = __rand(&rd->rand_state) % rd->rmt_nr;
726 r_io_u_d->sq_wr.opcode = IBV_WR_RDMA_READ;
727 r_io_u_d->sq_wr.wr.rdma.rkey = rd->rmt_us[index].rkey;
728 r_io_u_d->sq_wr.wr.rdma.remote_addr = \
729 rd->rmt_us[index].buf;
730 r_io_u_d->sq_wr.sg_list->length = io_us[i]->buflen;
732 case FIO_RDMA_CHA_SEND:
733 r_io_u_d = io_us[i]->engine_data;
734 r_io_u_d->sq_wr.opcode = IBV_WR_SEND;
735 r_io_u_d->sq_wr.send_flags = IBV_SEND_SIGNALED;
738 log_err("fio: unknown rdma protocol - %d\n",
743 if (ibv_post_send(rd->qp, &r_io_u_d->sq_wr, &bad_wr) != 0) {
744 log_err("fio: ibv_post_send fail: %m\n");
748 dprint_io_u(io_us[i], "fio_rdmaio_send");
751 /* wait for completion
752 rdma_poll_wait(td, comp_opcode); */
757 static int fio_rdmaio_recv(struct thread_data *td, struct io_u **io_us,
760 struct rdmaio_data *rd = td->io_ops_data;
761 struct ibv_recv_wr *bad_wr;
762 struct rdma_io_u_data *r_io_u_d;
766 if (rd->rdma_protocol == FIO_RDMA_CHA_RECV) {
767 /* post io_u into recv queue */
768 for (i = 0; i < nr; i++) {
769 r_io_u_d = io_us[i]->engine_data;
770 if (ibv_post_recv(rd->qp, &r_io_u_d->rq_wr, &bad_wr) !=
772 log_err("fio: ibv_post_recv fail: %m\n");
776 } else if ((rd->rdma_protocol == FIO_RDMA_MEM_READ)
777 || (rd->rdma_protocol == FIO_RDMA_MEM_WRITE)) {
778 /* re-post the rq_wr */
779 if (ibv_post_recv(rd->qp, &rd->rq_wr, &bad_wr) != 0) {
780 log_err("fio: ibv_post_recv fail: %m\n");
784 rdma_poll_wait(td, IBV_WC_RECV);
786 dprint(FD_IO, "fio: recv FINISH message\n");
794 static enum fio_q_status fio_rdmaio_queue(struct thread_data *td,
797 struct rdmaio_data *rd = td->io_ops_data;
799 fio_ro_check(td, io_u);
801 if (rd->io_u_queued_nr == (int)td->o.iodepth)
804 rd->io_us_queued[rd->io_u_queued_nr] = io_u;
805 rd->io_u_queued_nr++;
807 dprint_io_u(io_u, "fio_rdmaio_queue");
812 static void fio_rdmaio_queued(struct thread_data *td, struct io_u **io_us,
815 struct rdmaio_data *rd = td->io_ops_data;
819 if (!fio_fill_issue_time(td))
822 fio_gettime(&now, NULL);
824 for (i = 0; i < nr; i++) {
825 struct io_u *io_u = io_us[i];
827 /* queued -> flight */
828 rd->io_us_flight[rd->io_u_flight_nr] = io_u;
829 rd->io_u_flight_nr++;
831 memcpy(&io_u->issue_time, &now, sizeof(now));
832 io_u_queued(td, io_u);
836 static int fio_rdmaio_commit(struct thread_data *td)
838 struct rdmaio_data *rd = td->io_ops_data;
842 if (!rd->io_us_queued)
845 io_us = rd->io_us_queued;
847 /* RDMA_WRITE or RDMA_READ */
849 ret = fio_rdmaio_send(td, io_us, rd->io_u_queued_nr);
850 else if (!rd->is_client)
851 ret = fio_rdmaio_recv(td, io_us, rd->io_u_queued_nr);
853 ret = 0; /* must be a SYNC */
856 fio_rdmaio_queued(td, io_us, ret);
857 io_u_mark_submit(td, ret);
858 rd->io_u_queued_nr -= ret;
863 } while (rd->io_u_queued_nr);
868 static int fio_rdmaio_connect(struct thread_data *td, struct fio_file *f)
870 struct rdmaio_data *rd = td->io_ops_data;
871 struct rdma_conn_param conn_param;
872 struct ibv_send_wr *bad_wr;
874 memset(&conn_param, 0, sizeof(conn_param));
875 conn_param.responder_resources = 1;
876 conn_param.initiator_depth = 1;
877 conn_param.retry_count = 10;
879 if (rdma_connect(rd->cm_id, &conn_param) != 0) {
880 log_err("fio: rdma_connect fail: %m\n");
884 if (get_next_channel_event
885 (td, rd->cm_channel, RDMA_CM_EVENT_ESTABLISHED) != 0) {
886 log_err("fio: wait for RDMA_CM_EVENT_ESTABLISHED\n");
890 /* send task request */
891 rd->send_buf.mode = htonl(rd->rdma_protocol);
892 rd->send_buf.nr = htonl(td->o.iodepth);
894 if (ibv_post_send(rd->qp, &rd->sq_wr, &bad_wr) != 0) {
895 log_err("fio: ibv_post_send fail: %m\n");
899 if (rdma_poll_wait(td, IBV_WC_SEND) < 0)
902 /* wait for remote MR info from server side */
903 if (rdma_poll_wait(td, IBV_WC_RECV) < 0)
906 /* In SEND/RECV test, it's a good practice to setup the iodepth of
907 * of the RECV side deeper than that of the SEND side to
908 * avoid RNR (receiver not ready) error. The
909 * SEND side may send so many unsolicited message before
910 * RECV side commits sufficient recv buffers into recv queue.
911 * This may lead to RNR error. Here, SEND side pauses for a while
912 * during which RECV side commits sufficient recv buffers.
919 static int fio_rdmaio_accept(struct thread_data *td, struct fio_file *f)
921 struct rdmaio_data *rd = td->io_ops_data;
922 struct rdma_conn_param conn_param;
923 struct ibv_send_wr *bad_wr;
926 /* rdma_accept() - then wait for accept success */
927 memset(&conn_param, 0, sizeof(conn_param));
928 conn_param.responder_resources = 1;
929 conn_param.initiator_depth = 1;
931 if (rdma_accept(rd->child_cm_id, &conn_param) != 0) {
932 log_err("fio: rdma_accept: %m\n");
936 if (get_next_channel_event
937 (td, rd->cm_channel, RDMA_CM_EVENT_ESTABLISHED) != 0) {
938 log_err("fio: wait for RDMA_CM_EVENT_ESTABLISHED\n");
942 /* wait for request */
943 ret = rdma_poll_wait(td, IBV_WC_RECV) < 0;
945 if (ibv_post_send(rd->qp, &rd->sq_wr, &bad_wr) != 0) {
946 log_err("fio: ibv_post_send fail: %m\n");
950 if (rdma_poll_wait(td, IBV_WC_SEND) < 0)
956 static int fio_rdmaio_open_file(struct thread_data *td, struct fio_file *f)
959 return fio_rdmaio_accept(td, f);
961 return fio_rdmaio_connect(td, f);
964 static int fio_rdmaio_close_file(struct thread_data *td, struct fio_file *f)
966 struct rdmaio_data *rd = td->io_ops_data;
967 struct ibv_send_wr *bad_wr;
969 /* unregister rdma buffer */
972 * Client sends notification to the server side
974 /* refer to: http://linux.die.net/man/7/rdma_cm */
975 if ((rd->is_client == 1) && ((rd->rdma_protocol == FIO_RDMA_MEM_WRITE)
976 || (rd->rdma_protocol ==
977 FIO_RDMA_MEM_READ))) {
978 if (ibv_post_send(rd->qp, &rd->sq_wr, &bad_wr) != 0) {
979 log_err("fio: ibv_post_send fail: %m\n");
983 dprint(FD_IO, "fio: close information sent success\n");
984 rdma_poll_wait(td, IBV_WC_SEND);
987 if (rd->is_client == 1)
988 rdma_disconnect(rd->cm_id);
990 rdma_disconnect(rd->child_cm_id);
992 rdma_disconnect(rd->cm_id);
997 if (get_next_channel_event(td, rd->cm_channel, RDMA_CM_EVENT_DISCONNECTED) != 0) {
998 log_err("fio: wait for RDMA_CM_EVENT_DISCONNECTED\n");
1003 ibv_destroy_cq(rd->cq);
1004 ibv_destroy_qp(rd->qp);
1006 if (rd->is_client == 1)
1007 rdma_destroy_id(rd->cm_id);
1009 rdma_destroy_id(rd->child_cm_id);
1010 rdma_destroy_id(rd->cm_id);
1013 ibv_destroy_comp_channel(rd->channel);
1014 ibv_dealloc_pd(rd->pd);
1019 static int aton(struct thread_data *td, const char *host,
1020 struct sockaddr_in *addr)
1022 if (inet_aton(host, &addr->sin_addr) != 1) {
1023 struct hostent *hent;
1025 hent = gethostbyname(host);
1027 td_verror(td, errno, "gethostbyname");
1031 memcpy(&addr->sin_addr, hent->h_addr, 4);
1036 static int fio_rdmaio_setup_connect(struct thread_data *td, const char *host,
1037 unsigned short port)
1039 struct rdmaio_data *rd = td->io_ops_data;
1040 struct rdmaio_options *o = td->eo;
1041 struct sockaddr_storage addrb;
1042 struct ibv_recv_wr *bad_wr;
1045 rd->addr.sin_family = AF_INET;
1046 rd->addr.sin_port = htons(port);
1048 err = aton(td, host, &rd->addr);
1053 if (strcmp(o->bindname, "") != 0) {
1054 addrb.ss_family = AF_INET;
1055 err = aton(td, o->bindname, (struct sockaddr_in *)&addrb);
1058 err = rdma_resolve_addr(rd->cm_id, (struct sockaddr *)&addrb,
1059 (struct sockaddr *)&rd->addr, 2000);
1062 err = rdma_resolve_addr(rd->cm_id, NULL,
1063 (struct sockaddr *)&rd->addr, 2000);
1067 log_err("fio: rdma_resolve_addr: %d\n", err);
1071 err = get_next_channel_event(td, rd->cm_channel, RDMA_CM_EVENT_ADDR_RESOLVED);
1073 log_err("fio: get_next_channel_event: %d\n", err);
1078 err = rdma_resolve_route(rd->cm_id, 2000);
1080 log_err("fio: rdma_resolve_route: %d\n", err);
1084 err = get_next_channel_event(td, rd->cm_channel, RDMA_CM_EVENT_ROUTE_RESOLVED);
1086 log_err("fio: get_next_channel_event: %d\n", err);
1090 /* create qp and buffer */
1091 if (fio_rdmaio_setup_qp(td) != 0)
1094 if (fio_rdmaio_setup_control_msg_buffers(td) != 0)
1098 err = ibv_post_recv(rd->qp, &rd->rq_wr, &bad_wr);
1100 log_err("fio: ibv_post_recv fail: %d\n", err);
1107 static int fio_rdmaio_setup_listen(struct thread_data *td, short port)
1109 struct rdmaio_data *rd = td->io_ops_data;
1110 struct rdmaio_options *o = td->eo;
1111 struct ibv_recv_wr *bad_wr;
1112 int state = td->runstate;
1114 td_set_runstate(td, TD_SETTING_UP);
1116 rd->addr.sin_family = AF_INET;
1117 rd->addr.sin_port = htons(port);
1119 if (strcmp(o->bindname, "") == 0)
1120 rd->addr.sin_addr.s_addr = htonl(INADDR_ANY);
1122 rd->addr.sin_addr.s_addr = htonl(*o->bindname);
1125 if (rdma_bind_addr(rd->cm_id, (struct sockaddr *)&rd->addr) != 0) {
1126 log_err("fio: rdma_bind_addr fail: %m\n");
1130 if (rdma_listen(rd->cm_id, 3) != 0) {
1131 log_err("fio: rdma_listen fail: %m\n");
1135 log_info("fio: waiting for connection\n");
1137 /* wait for CONNECT_REQUEST */
1138 if (get_next_channel_event
1139 (td, rd->cm_channel, RDMA_CM_EVENT_CONNECT_REQUEST) != 0) {
1140 log_err("fio: wait for RDMA_CM_EVENT_CONNECT_REQUEST\n");
1144 if (fio_rdmaio_setup_qp(td) != 0)
1147 if (fio_rdmaio_setup_control_msg_buffers(td) != 0)
1151 if (ibv_post_recv(rd->qp, &rd->rq_wr, &bad_wr) != 0) {
1152 log_err("fio: ibv_post_recv fail: %m\n");
1156 td_set_runstate(td, state);
1160 static int check_set_rlimits(struct thread_data *td)
1162 #ifdef CONFIG_RLIMIT_MEMLOCK
1165 /* check RLIMIT_MEMLOCK */
1166 if (getrlimit(RLIMIT_MEMLOCK, &rl) != 0) {
1167 log_err("fio: getrlimit fail: %d(%s)\n",
1168 errno, strerror(errno));
1173 if ((rl.rlim_cur != RLIM_INFINITY)
1174 && (rl.rlim_cur < td->orig_buffer_size)) {
1175 log_err("fio: soft RLIMIT_MEMLOCK is: %" PRId64 "\n",
1177 log_err("fio: total block size is: %zd\n",
1178 td->orig_buffer_size);
1179 /* try to set larger RLIMIT_MEMLOCK */
1180 rl.rlim_cur = rl.rlim_max;
1181 if (setrlimit(RLIMIT_MEMLOCK, &rl) != 0) {
1182 log_err("fio: setrlimit fail: %d(%s)\n",
1183 errno, strerror(errno));
1184 log_err("fio: you may try enlarge MEMLOCK by root\n");
1185 log_err("# ulimit -l unlimited\n");
1194 static int compat_options(struct thread_data *td)
1196 // The original RDMA engine had an ugly / seperator
1197 // on the filename for it's options. This function
1198 // retains backwards compatibility with it. Note we do not
1199 // support setting the bindname option is this legacy mode.
1201 struct rdmaio_options *o = td->eo;
1202 char *modep, *portp;
1203 char *filename = td->o.filename;
1208 portp = strchr(filename, '/');
1215 o->port = strtol(portp, NULL, 10);
1216 if (!o->port || o->port > 65535)
1219 modep = strchr(portp, '/');
1220 if (modep != NULL) {
1226 if (!strncmp("rdma_write", modep, strlen(modep)) ||
1227 !strncmp("RDMA_WRITE", modep, strlen(modep)))
1228 o->verb = FIO_RDMA_MEM_WRITE;
1229 else if (!strncmp("rdma_read", modep, strlen(modep)) ||
1230 !strncmp("RDMA_READ", modep, strlen(modep)))
1231 o->verb = FIO_RDMA_MEM_READ;
1232 else if (!strncmp("send", modep, strlen(modep)) ||
1233 !strncmp("SEND", modep, strlen(modep)))
1234 o->verb = FIO_RDMA_CHA_SEND;
1238 o->verb = FIO_RDMA_MEM_WRITE;
1244 log_err("fio: bad rdma host/port/protocol: %s\n", td->o.filename);
1248 static int fio_rdmaio_init(struct thread_data *td)
1250 struct rdmaio_data *rd = td->io_ops_data;
1251 struct rdmaio_options *o = td->eo;
1252 unsigned int max_bs;
1256 log_err("fio: rdma connections must be read OR write\n");
1259 if (td_random(td)) {
1260 log_err("fio: RDMA network IO can't be random\n");
1264 if (compat_options(td))
1268 log_err("fio: no port has been specified which is required "
1269 "for the rdma engine\n");
1273 if (check_set_rlimits(td))
1276 rd->rdma_protocol = o->verb;
1277 rd->cq_event_num = 0;
1279 rd->cm_channel = rdma_create_event_channel();
1280 if (!rd->cm_channel) {
1281 log_err("fio: rdma_create_event_channel fail: %m\n");
1285 ret = rdma_create_id(rd->cm_channel, &rd->cm_id, rd, RDMA_PS_TCP);
1287 log_err("fio: rdma_create_id fail: %m\n");
1291 if ((rd->rdma_protocol == FIO_RDMA_MEM_WRITE) ||
1292 (rd->rdma_protocol == FIO_RDMA_MEM_READ)) {
1294 malloc(FIO_RDMA_MAX_IO_DEPTH * sizeof(struct remote_u));
1295 memset(rd->rmt_us, 0,
1296 FIO_RDMA_MAX_IO_DEPTH * sizeof(struct remote_u));
1300 rd->io_us_queued = malloc(td->o.iodepth * sizeof(struct io_u *));
1301 memset(rd->io_us_queued, 0, td->o.iodepth * sizeof(struct io_u *));
1302 rd->io_u_queued_nr = 0;
1304 rd->io_us_flight = malloc(td->o.iodepth * sizeof(struct io_u *));
1305 memset(rd->io_us_flight, 0, td->o.iodepth * sizeof(struct io_u *));
1306 rd->io_u_flight_nr = 0;
1308 rd->io_us_completed = malloc(td->o.iodepth * sizeof(struct io_u *));
1309 memset(rd->io_us_completed, 0, td->o.iodepth * sizeof(struct io_u *));
1310 rd->io_u_completed_nr = 0;
1312 if (td_read(td)) { /* READ as the server */
1314 td->flags |= TD_F_NO_PROGRESS;
1315 /* server rd->rdma_buf_len will be setup after got request */
1316 ret = fio_rdmaio_setup_listen(td, o->port);
1317 } else { /* WRITE as the client */
1319 ret = fio_rdmaio_setup_connect(td, td->o.filename, o->port);
1322 max_bs = max(td->o.max_bs[DDIR_READ], td->o.max_bs[DDIR_WRITE]);
1323 rd->send_buf.max_bs = htonl(max_bs);
1325 /* register each io_u in the free list */
1326 for (i = 0; i < td->io_u_freelist.nr; i++) {
1327 struct io_u *io_u = td->io_u_freelist.io_us[i];
1329 io_u->engine_data = malloc(sizeof(struct rdma_io_u_data));
1330 memset(io_u->engine_data, 0, sizeof(struct rdma_io_u_data));
1331 ((struct rdma_io_u_data *)io_u->engine_data)->wr_id = i;
1333 io_u->mr = ibv_reg_mr(rd->pd, io_u->buf, max_bs,
1334 IBV_ACCESS_LOCAL_WRITE |
1335 IBV_ACCESS_REMOTE_READ |
1336 IBV_ACCESS_REMOTE_WRITE);
1337 if (io_u->mr == NULL) {
1338 log_err("fio: ibv_reg_mr io_u failed: %m\n");
1342 rd->send_buf.rmt_us[i].buf =
1343 cpu_to_be64((uint64_t) (unsigned long)io_u->buf);
1344 rd->send_buf.rmt_us[i].rkey = htonl(io_u->mr->rkey);
1345 rd->send_buf.rmt_us[i].size = htonl(max_bs);
1348 log_info("fio: Send rkey %x addr %" PRIx64 " len %d to client\n", io_u->mr->rkey, io_u->buf, max_bs); */
1352 rd->send_buf.nr = htonl(i);
1357 static void fio_rdmaio_cleanup(struct thread_data *td)
1359 struct rdmaio_data *rd = td->io_ops_data;
1365 static int fio_rdmaio_setup(struct thread_data *td)
1367 struct rdmaio_data *rd;
1369 if (!td->files_index) {
1370 add_file(td, td->o.filename ?: "rdma", 0, 0);
1371 td->o.nr_files = td->o.nr_files ?: 1;
1375 if (!td->io_ops_data) {
1376 rd = malloc(sizeof(*rd));
1378 memset(rd, 0, sizeof(*rd));
1379 init_rand_seed(&rd->rand_state, (unsigned int) GOLDEN_RATIO_PRIME, 0);
1380 td->io_ops_data = rd;
1386 static struct ioengine_ops ioengine_rw = {
1388 .version = FIO_IOOPS_VERSION,
1389 .setup = fio_rdmaio_setup,
1390 .init = fio_rdmaio_init,
1391 .prep = fio_rdmaio_prep,
1392 .queue = fio_rdmaio_queue,
1393 .commit = fio_rdmaio_commit,
1394 .getevents = fio_rdmaio_getevents,
1395 .event = fio_rdmaio_event,
1396 .cleanup = fio_rdmaio_cleanup,
1397 .open_file = fio_rdmaio_open_file,
1398 .close_file = fio_rdmaio_close_file,
1399 .flags = FIO_DISKLESSIO | FIO_UNIDIR | FIO_PIPEIO,
1401 .option_struct_size = sizeof(struct rdmaio_options),
1404 static void fio_init fio_rdmaio_register(void)
1406 register_ioengine(&ioengine_rw);
1409 static void fio_exit fio_rdmaio_unregister(void)
1411 unregister_ioengine(&ioengine_rw);