RDMA IO engine
[fio.git] / engines / rdma.c
CommitLineData
21b8aee8 1/*
2 * rdma engine
3 *
4 * RDMA IO engine using OFED library.
5 * Support both RDMA memory semantic and channel semantic
6 * in InfiniBand, RoCE and iWarp environment.
7 *
8 * This is currently disabled. To enable it, execute:
9 *
10 * $ export EXTFLAGS="-DFIO_HAVE_RDMA"
11 * $ export EXTLIBS="-libverbs -lrdmacm"
12 *
13 * before running make. You'll need the OFED as well:
14 *
15 * http://www.openfabrics.org/downloads/OFED/
16 *
17 */
18#include <stdio.h>
19#include <stdlib.h>
20#include <unistd.h>
21#include <errno.h>
22#include <assert.h>
23#include <netinet/in.h>
24#include <arpa/inet.h>
25#include <netdb.h>
26#include <sys/poll.h>
27#include <sys/types.h>
28#include <sys/socket.h>
29#include <sys/time.h>
30#include <sys/resource.h>
31
32#include <byteswap.h>
33#include <pthread.h>
34#include <inttypes.h>
35
36#include "../fio.h"
37
38#ifdef FIO_HAVE_RDMA
39
40#include <rdma/rdma_cma.h>
41#include <infiniband/arch.h>
42
43#define FIO_RDMA_MAX_IO_DPETH 128
44
45enum rdma_io_mode {
46 FIO_RDMA_UNKNOWN = 0,
47 FIO_RDMA_MEM_WRITE,
48 FIO_RDMA_MEM_READ,
49 FIO_RDMA_CHA_SEND,
50 FIO_RDMA_CHA_RECV
51};
52
53struct remote_u {
54 uint64_t buf;
55 uint32_t rkey;
56 uint32_t size;
57};
58
59struct rdma_info_blk {
60 uint32_t mode; /* channel semantic or memory semantic */
61 uint32_t nr; /* client: io depth
62 server: number of records for memory semantic
63 */
64 struct remote_u rmt_us[FIO_RDMA_MAX_IO_DPETH];
65};
66
67struct rdma_io_u_data {
68 uint64_t wr_id;
69 struct ibv_send_wr sq_wr;
70 struct ibv_recv_wr rq_wr;
71 struct ibv_sge rdma_sgl;
72};
73
74struct rdmaio_data {
75 int is_client;
76 enum rdma_io_mode rdma_protocol;
77 char host[64];
78 struct sockaddr_in addr;
79
80 struct ibv_recv_wr rq_wr;
81 struct ibv_sge recv_sgl;
82 struct rdma_info_blk recv_buf;
83 struct ibv_mr *recv_mr;
84
85 struct ibv_send_wr sq_wr;
86 struct ibv_sge send_sgl;
87 struct rdma_info_blk send_buf;
88 struct ibv_mr *send_mr;
89
90 struct ibv_comp_channel *channel;
91 struct ibv_cq *cq;
92 struct ibv_pd *pd;
93 struct ibv_qp *qp;
94
95 pthread_t cmthread;
96 struct rdma_event_channel *cm_channel;
97 struct rdma_cm_id *cm_id;
98 struct rdma_cm_id *child_cm_id;
99
100 int cq_event_num;
101
102 struct remote_u *rmt_us;
103 int rmt_nr;
104 struct io_u **io_us_queued;
105 int io_u_queued_nr;
106 struct io_u **io_us_flight;
107 int io_u_flight_nr;
108 struct io_u **io_us_completed;
109 int io_u_completed_nr;
110};
111
112static int client_recv(struct thread_data *td, struct ibv_wc *wc)
113{
114 struct rdmaio_data *rd = td->io_ops->data;
115
116 if (wc->byte_len != sizeof(rd->recv_buf)) {
117 fprintf(stderr, "Received bogus data, size %d\n", wc->byte_len);
118 return 1;
119 }
120
121 /* store mr info for MEMORY semantic */
122 if ((rd->rdma_protocol == FIO_RDMA_MEM_WRITE) ||
123 (rd->rdma_protocol == FIO_RDMA_MEM_READ)) {
124 /* struct flist_head *entry; */
125 int i = 0;
126
127 rd->rmt_nr = ntohl(rd->recv_buf.nr);
128
129 for (i = 0; i < rd->rmt_nr; i++) {
130 rd->rmt_us[i].buf = ntohll(rd->recv_buf.rmt_us[i].buf);
131 rd->rmt_us[i].rkey = ntohl(rd->recv_buf.rmt_us[i].rkey);
132 rd->rmt_us[i].size = ntohl(rd->recv_buf.rmt_us[i].size);
133
134 dprint(FD_IO,
135 "fio: Received rkey %x addr %" PRIx64
136 " len %d from peer\n", rd->rmt_us[i].rkey,
137 rd->rmt_us[i].buf, rd->rmt_us[i].size);
138 }
139 }
140
141 return 0;
142}
143
144static int server_recv(struct thread_data *td, struct ibv_wc *wc)
145{
146 struct rdmaio_data *rd = td->io_ops->data;
147
148 if (wc->wr_id == FIO_RDMA_MAX_IO_DPETH) {
149 rd->rdma_protocol = ntohl(rd->recv_buf.mode);
150
151 /* CHANNEL semantic, do nothing */
152 if (rd->rdma_protocol == FIO_RDMA_CHA_SEND)
153 rd->rdma_protocol = FIO_RDMA_CHA_RECV;
154 }
155
156 return 0;
157}
158
159static int cq_event_handler(struct thread_data *td, enum ibv_wc_opcode opcode)
160{
161 struct rdmaio_data *rd = td->io_ops->data;
162 struct ibv_wc wc;
163 struct rdma_io_u_data *r_io_u_d;
164 int ret;
165 int compevnum = 0;
166 int i;
167
168 while ((ret = ibv_poll_cq(rd->cq, 1, &wc)) == 1) {
169 ret = 0;
170 compevnum++;
171
172 if (wc.status) {
173 log_err("fio: cq completion status %d(%s)\n",
174 wc.status, ibv_wc_status_str(wc.status));
175 return -1;
176 }
177
178 switch (wc.opcode) {
179
180 case IBV_WC_RECV:
181 if (rd->is_client == 1)
182 client_recv(td, &wc);
183 else
184 server_recv(td, &wc);
185
186 if (wc.wr_id == FIO_RDMA_MAX_IO_DPETH)
187 break;
188
189 for (i = 0; i < rd->io_u_flight_nr; i++) {
190 r_io_u_d = rd->io_us_flight[i]->engine_data;
191
192 if (wc.wr_id == r_io_u_d->rq_wr.wr_id) {
193 rd->io_us_flight[i]->resid =
194 rd->io_us_flight[i]->buflen
195 - wc.byte_len;
196
197 rd->io_us_flight[i]->error = 0;
198
199 rd->io_us_completed[rd->
200 io_u_completed_nr]
201 = rd->io_us_flight[i];
202 rd->io_u_completed_nr++;
203 break;
204 }
205 }
206 if (i == rd->io_u_flight_nr)
207 log_err("fio: recv wr %ld not found\n",
208 wc.wr_id);
209 else {
210 /* put the last one into middle of the list */
211 rd->io_us_flight[i] =
212 rd->io_us_flight[rd->io_u_flight_nr - 1];
213 rd->io_u_flight_nr--;
214 }
215
216 break;
217
218 case IBV_WC_SEND:
219 case IBV_WC_RDMA_WRITE:
220 case IBV_WC_RDMA_READ:
221 if (wc.wr_id == FIO_RDMA_MAX_IO_DPETH)
222 break;
223
224 for (i = 0; i < rd->io_u_flight_nr; i++) {
225 r_io_u_d = rd->io_us_flight[i]->engine_data;
226
227 if (wc.wr_id == r_io_u_d->sq_wr.wr_id) {
228 rd->io_us_completed[rd->
229 io_u_completed_nr]
230 = rd->io_us_flight[i];
231 rd->io_u_completed_nr++;
232 break;
233 }
234 }
235 if (i == rd->io_u_flight_nr)
236 log_err("fio: send wr %ld not found\n",
237 wc.wr_id);
238 else {
239 /* put the last one into middle of the list */
240 rd->io_us_flight[i] =
241 rd->io_us_flight[rd->io_u_flight_nr - 1];
242 rd->io_u_flight_nr--;
243 }
244
245 break;
246
247 default:
248 log_info("fio: unknown completion event %d\n",
249 wc.opcode);
250 return -1;
251 }
252 rd->cq_event_num++;
253 }
254 if (ret) {
255 log_err("fio: poll error %d\n", ret);
256 return 1;
257 }
258
259 return compevnum;
260}
261
262/*
263 * Return -1 for error and 'nr events' for a positive number
264 * of events
265 */
266static int rdma_poll_wait(struct thread_data *td, enum ibv_wc_opcode opcode)
267{
268 struct rdmaio_data *rd = td->io_ops->data;
269 struct ibv_cq *ev_cq;
270 void *ev_ctx;
271 int ret;
272
273 if (rd->cq_event_num > 0) { /* previous left */
274 rd->cq_event_num--;
275 return 0;
276 }
277
278again:
279 if (ibv_get_cq_event(rd->channel, &ev_cq, &ev_ctx) != 0) {
280 log_err("fio: Failed to get cq event!\n");
281 return -1;
282 }
283 if (ev_cq != rd->cq) {
284 log_err("fio: Unknown CQ!\n");
285 return -1;
286 }
287 if (ibv_req_notify_cq(rd->cq, 0) != 0) {
288 log_err("fio: Failed to set notify!\n");
289 return -1;
290 }
291
292 ret = cq_event_handler(td, opcode);
293 if (ret < 1)
294 goto again;
295
296 ibv_ack_cq_events(rd->cq, ret);
297
298 rd->cq_event_num--;
299
300 return ret;
301}
302
303static int fio_rdmaio_setup_qp(struct thread_data *td)
304{
305 struct rdmaio_data *rd = td->io_ops->data;
306 struct ibv_qp_init_attr init_attr;
307 int qp_depth = td->o.iodepth * 2; /* 2 times of io depth */
308
309 if (rd->is_client == 0)
310 rd->pd = ibv_alloc_pd(rd->child_cm_id->verbs);
311 else
312 rd->pd = ibv_alloc_pd(rd->cm_id->verbs);
313 if (rd->pd == NULL) {
314 log_err("fio: ibv_alloc_pd fail\n");
315 return 1;
316 }
317
318 if (rd->is_client == 0)
319 rd->channel = ibv_create_comp_channel(rd->child_cm_id->verbs);
320 else
321 rd->channel = ibv_create_comp_channel(rd->cm_id->verbs);
322 if (rd->channel == NULL) {
323 log_err("fio: ibv_create_comp_channel fail\n");
324 goto err1;
325 }
326
327 if (qp_depth < 16)
328 qp_depth = 16;
329
330 if (rd->is_client == 0)
331 rd->cq = ibv_create_cq(rd->child_cm_id->verbs,
332 qp_depth, rd, rd->channel, 0);
333 else
334 rd->cq = ibv_create_cq(rd->cm_id->verbs,
335 qp_depth, rd, rd->channel, 0);
336 if (rd->cq == NULL) {
337 log_err("fio: ibv_create_cq failed\n");
338 goto err2;
339 }
340
341 if (ibv_req_notify_cq(rd->cq, 0) != 0) {
342 log_err("fio: ibv_create_cq failed\n");
343 goto err3;
344 }
345
346 /* create queue pair */
347 memset(&init_attr, 0, sizeof(init_attr));
348 init_attr.cap.max_send_wr = qp_depth;
349 init_attr.cap.max_recv_wr = qp_depth;
350 init_attr.cap.max_recv_sge = 1;
351 init_attr.cap.max_send_sge = 1;
352 init_attr.qp_type = IBV_QPT_RC;
353 init_attr.send_cq = rd->cq;
354 init_attr.recv_cq = rd->cq;
355
356 if (rd->is_client == 0) {
357 if (rdma_create_qp(rd->child_cm_id, rd->pd, &init_attr) != 0) {
358 log_err("fio: rdma_create_qp failed\n");
359 goto err3;
360 }
361 rd->qp = rd->child_cm_id->qp;
362 } else {
363 if (rdma_create_qp(rd->cm_id, rd->pd, &init_attr) != 0) {
364 log_err("fio: rdma_create_qp failed\n");
365 goto err3;
366 }
367 rd->qp = rd->cm_id->qp;
368 }
369
370 return 0;
371
372err3:
373 ibv_destroy_cq(rd->cq);
374err2:
375 ibv_destroy_comp_channel(rd->channel);
376err1:
377 ibv_dealloc_pd(rd->pd);
378
379 return 1;
380}
381
382static int fio_rdmaio_setup_control_msg_buffers(struct thread_data *td)
383{
384 struct rdmaio_data *rd = td->io_ops->data;
385
386 rd->recv_mr = ibv_reg_mr(rd->pd, &rd->recv_buf, sizeof(rd->recv_buf),
387 IBV_ACCESS_LOCAL_WRITE);
388 if (rd->recv_mr == NULL) {
389 log_err("fio: recv_buf reg_mr failed\n");
390 return 1;
391 }
392
393 rd->send_mr = ibv_reg_mr(rd->pd, &rd->send_buf, sizeof(rd->send_buf),
394 0);
395 if (rd->send_mr == NULL) {
396 log_err("fio: send_buf reg_mr failed\n");
397 ibv_dereg_mr(rd->recv_mr);
398 return 1;
399 }
400
401 /* setup work request */
402 /* recv wq */
403 rd->recv_sgl.addr = (uint64_t) (unsigned long)&rd->recv_buf;
404 rd->recv_sgl.length = sizeof rd->recv_buf;
405 rd->recv_sgl.lkey = rd->recv_mr->lkey;
406 rd->rq_wr.sg_list = &rd->recv_sgl;
407 rd->rq_wr.num_sge = 1;
408 rd->rq_wr.wr_id = FIO_RDMA_MAX_IO_DPETH;
409
410 /* send wq */
411 rd->send_sgl.addr = (uint64_t) (unsigned long)&rd->send_buf;
412 rd->send_sgl.length = sizeof rd->send_buf;
413 rd->send_sgl.lkey = rd->send_mr->lkey;
414
415 rd->sq_wr.opcode = IBV_WR_SEND;
416 rd->sq_wr.send_flags = IBV_SEND_SIGNALED;
417 rd->sq_wr.sg_list = &rd->send_sgl;
418 rd->sq_wr.num_sge = 1;
419 rd->sq_wr.wr_id = FIO_RDMA_MAX_IO_DPETH;
420
421 return 0;
422}
423
424static int get_next_channel_event(struct thread_data *td,
425 struct rdma_event_channel *channel,
426 enum rdma_cm_event_type wait_event)
427{
428 struct rdmaio_data *rd = td->io_ops->data;
429
430 int ret;
431 struct rdma_cm_event *event;
432
433 ret = rdma_get_cm_event(channel, &event);
434 if (ret) {
435 log_err("fio: rdma_get_cm_event");
436 return 1;
437 }
438
439 if (event->event != wait_event) {
440 log_err("fio: event is %s instead of %s\n",
441 rdma_event_str(event->event),
442 rdma_event_str(wait_event));
443 return 1;
444 }
445
446 switch (event->event) {
447 case RDMA_CM_EVENT_CONNECT_REQUEST:
448 rd->child_cm_id = event->id;
449 break;
450 default:
451 break;
452 }
453
454 rdma_ack_cm_event(event);
455
456 return 0;
457}
458
459static int fio_rdmaio_prep(struct thread_data *td, struct io_u *io_u)
460{
461 struct rdmaio_data *rd = td->io_ops->data;
462 struct rdma_io_u_data *r_io_u_d;
463
464 r_io_u_d = io_u->engine_data;
465
466 switch (rd->rdma_protocol) {
467 case FIO_RDMA_MEM_WRITE:
468 case FIO_RDMA_MEM_READ:
469 r_io_u_d->rdma_sgl.addr = (uint64_t) (unsigned long)io_u->buf;
470 r_io_u_d->rdma_sgl.lkey = io_u->mr->lkey;
471 r_io_u_d->sq_wr.wr_id = r_io_u_d->wr_id;
472 r_io_u_d->sq_wr.send_flags = IBV_SEND_SIGNALED;
473 r_io_u_d->sq_wr.sg_list = &r_io_u_d->rdma_sgl;
474 r_io_u_d->sq_wr.num_sge = 1;
475 break;
476 case FIO_RDMA_CHA_SEND:
477 r_io_u_d->rdma_sgl.addr = (uint64_t) (unsigned long)io_u->buf;
478 r_io_u_d->rdma_sgl.lkey = io_u->mr->lkey;
479 r_io_u_d->rdma_sgl.length = io_u->buflen;
480 r_io_u_d->sq_wr.wr_id = r_io_u_d->wr_id;
481 r_io_u_d->sq_wr.opcode = IBV_WR_SEND;
482 r_io_u_d->sq_wr.send_flags = IBV_SEND_SIGNALED;
483 r_io_u_d->sq_wr.sg_list = &r_io_u_d->rdma_sgl;
484 r_io_u_d->sq_wr.num_sge = 1;
485 break;
486 case FIO_RDMA_CHA_RECV:
487 r_io_u_d->rdma_sgl.addr = (uint64_t) (unsigned long)io_u->buf;
488 r_io_u_d->rdma_sgl.lkey = io_u->mr->lkey;
489 r_io_u_d->rdma_sgl.length = io_u->buflen;
490 r_io_u_d->rq_wr.wr_id = r_io_u_d->wr_id;
491 r_io_u_d->rq_wr.sg_list = &r_io_u_d->rdma_sgl;
492 r_io_u_d->rq_wr.num_sge = 1;
493 break;
494 default:
495 log_err("fio: unknown rdma protocol - %d\n", rd->rdma_protocol);
496 break;
497 }
498
499 return 0;
500}
501
502static struct io_u *fio_rdmaio_event(struct thread_data *td, int event)
503{
504 struct rdmaio_data *rd = td->io_ops->data;
505 struct io_u *io_u;
506 int i;
507
508 io_u = rd->io_us_completed[0];
509 for (i = 0; i < rd->io_u_completed_nr - 1; i++) {
510 rd->io_us_completed[i] = rd->io_us_completed[i + 1];
511 }
512 rd->io_u_completed_nr--;
513
514 dprint_io_u(io_u, "fio_rdmaio_event");
515
516 return io_u;
517}
518
519static int fio_rdmaio_getevents(struct thread_data *td, unsigned int min,
520 unsigned int max, struct timespec *t)
521{
522 struct rdmaio_data *rd = td->io_ops->data;
523 int r;
524 enum ibv_wc_opcode comp_opcode;
525 comp_opcode = IBV_WC_RDMA_WRITE;
526 struct ibv_cq *ev_cq;
527 void *ev_ctx;
528 int ret;
529
530 r = 0;
531
532 switch (rd->rdma_protocol) {
533 case FIO_RDMA_MEM_WRITE:
534 comp_opcode = IBV_WC_RDMA_WRITE;
535 break;
536 case FIO_RDMA_MEM_READ:
537 comp_opcode = IBV_WC_RDMA_READ;
538 break;
539 case FIO_RDMA_CHA_SEND:
540 comp_opcode = IBV_WC_SEND;
541 break;
542 case FIO_RDMA_CHA_RECV:
543 comp_opcode = IBV_WC_RECV;
544 break;
545 default:
546 log_err("fio: unknown rdma protocol - %d\n", rd->rdma_protocol);
547 break;
548 }
549
550 if (rd->cq_event_num > 0) { /* previous left */
551 rd->cq_event_num--;
552 return 0;
553 }
554
555again:
556 if (ibv_get_cq_event(rd->channel, &ev_cq, &ev_ctx) != 0) {
557 log_err("fio: Failed to get cq event!\n");
558 return -1;
559 }
560 if (ev_cq != rd->cq) {
561 log_err("fio: Unknown CQ!\n");
562 return -1;
563 }
564 if (ibv_req_notify_cq(rd->cq, 0) != 0) {
565 log_err("fio: Failed to set notify!\n");
566 return -1;
567 }
568
569 ret = cq_event_handler(td, comp_opcode);
570 if (ret < 1)
571 goto again;
572
573 ibv_ack_cq_events(rd->cq, ret);
574
575 r += ret;
576 if (r < min)
577 goto again;
578
579 rd->cq_event_num -= r;
580
581 return r;
582}
583
584static int fio_rdmaio_send(struct thread_data *td, struct io_u **io_us,
585 unsigned int nr)
586{
587 struct rdmaio_data *rd = td->io_ops->data;
588 struct ibv_send_wr *bad_wr;
589 enum ibv_wc_opcode comp_opcode;
590 comp_opcode = IBV_WC_RDMA_WRITE;
591 int i, index;
592 struct rdma_io_u_data *r_io_u_d;
593
594 r_io_u_d = NULL;
595
596 for (i = 0; i < nr; i++) {
597 /* RDMA_WRITE or RDMA_READ */
598 switch (rd->rdma_protocol) {
599 case FIO_RDMA_MEM_WRITE:
600 /* compose work request */
601 r_io_u_d = io_us[i]->engine_data;
602 index = rand() % rd->rmt_nr;
603 r_io_u_d->sq_wr.opcode = IBV_WR_RDMA_WRITE;
604 r_io_u_d->sq_wr.wr.rdma.rkey = rd->rmt_us[index].rkey;
605 r_io_u_d->sq_wr.wr.rdma.remote_addr =
606 rd->rmt_us[index].buf;
607 r_io_u_d->sq_wr.sg_list->length = io_us[i]->buflen;
608 break;
609 case FIO_RDMA_MEM_READ:
610 /* compose work request */
611 r_io_u_d = io_us[i]->engine_data;
612 index = rand() % rd->rmt_nr;
613 r_io_u_d->sq_wr.opcode = IBV_WR_RDMA_READ;
614 r_io_u_d->sq_wr.wr.rdma.rkey = rd->rmt_us[index].rkey;
615 r_io_u_d->sq_wr.wr.rdma.remote_addr =
616 rd->rmt_us[index].buf;
617 r_io_u_d->sq_wr.sg_list->length = io_us[i]->buflen;
618 break;
619 case FIO_RDMA_CHA_SEND:
620 r_io_u_d = io_us[i]->engine_data;
621 r_io_u_d->sq_wr.opcode = IBV_WR_SEND;
622 r_io_u_d->sq_wr.send_flags = IBV_SEND_SIGNALED;
623 break;
624 default:
625 log_err("fio: unknown rdma protocol - %d\n",
626 rd->rdma_protocol);
627 break;
628 }
629
630 if (ibv_post_send(rd->qp, &r_io_u_d->sq_wr, &bad_wr) != 0) {
631 log_err("fio: ibv_post_send fail\n");
632 return -1;
633 }
634
635 dprint_io_u(io_us[i], "fio_rdmaio_send");
636 }
637
638 /* wait for completion
639 rdma_poll_wait(td, comp_opcode); */
640
641 return i;
642}
643
644static int fio_rdmaio_recv(struct thread_data *td, struct io_u **io_us,
645 unsigned int nr)
646{
647 struct rdmaio_data *rd = td->io_ops->data;
648 struct ibv_recv_wr *bad_wr;
649 struct rdma_io_u_data *r_io_u_d;
650 int i;
651
652 i = 0;
653 if (rd->rdma_protocol == FIO_RDMA_CHA_RECV) {
654 /* post io_u into recv queue */
655 for (i = 0; i < nr; i++) {
656 r_io_u_d = io_us[i]->engine_data;
657 if (ibv_post_recv(rd->qp, &r_io_u_d->rq_wr, &bad_wr) !=
658 0) {
659 log_err("fio: ibv_post_recv fail\n");
660 return 1;
661 }
662 }
663 } else if ((rd->rdma_protocol == FIO_RDMA_MEM_READ)
664 || (rd->rdma_protocol == FIO_RDMA_MEM_WRITE)) {
665 /* re-post the rq_wr */
666 if (ibv_post_recv(rd->qp, &rd->rq_wr, &bad_wr) != 0) {
667 log_err("fio: ibv_post_recv fail\n");
668 return 1;
669 }
670
671 rdma_poll_wait(td, IBV_WC_RECV);
672
673 dprint(FD_IO, "fio: recv FINISH message\n");
674 exit(0);
675 }
676
677 return i;
678}
679
680static int fio_rdmaio_queue(struct thread_data *td, struct io_u *io_u)
681{
682 struct rdmaio_data *rd = td->io_ops->data;
683
684 fio_ro_check(td, io_u);
685
686 if (rd->io_u_queued_nr == (int)td->o.iodepth)
687 return FIO_Q_BUSY;
688
689 rd->io_us_queued[rd->io_u_queued_nr] = io_u;
690 rd->io_u_queued_nr++;
691
692 dprint_io_u(io_u, "fio_rdmaio_queue");
693
694 return FIO_Q_QUEUED;
695}
696
697static void fio_rdmaio_queued(struct thread_data *td, struct io_u **io_us,
698 unsigned int nr)
699{
700 struct rdmaio_data *rd = td->io_ops->data;
701 struct timeval now;
702 unsigned int i;
703
704 if (!fio_fill_issue_time(td))
705 return;
706
707 fio_gettime(&now, NULL);
708
709 for (i = 0; i < nr; i++) {
710 struct io_u *io_u = io_us[i];
711
712 /* queued -> flight */
713 rd->io_us_flight[rd->io_u_flight_nr] = io_u;
714 rd->io_u_flight_nr++;
715
716 memcpy(&io_u->issue_time, &now, sizeof(now));
717 io_u_queued(td, io_u);
718 }
719}
720
721static int fio_rdmaio_commit(struct thread_data *td)
722{
723 struct rdmaio_data *rd = td->io_ops->data;
724 struct io_u **io_us;
725 int ret;
726
727 if (!rd->io_us_queued)
728 return 0;
729
730 io_us = rd->io_us_queued;
731 do {
732 /* RDMA_WRITE or RDMA_READ */
733 if (rd->is_client) {
734 ret = fio_rdmaio_send(td, io_us, rd->io_u_queued_nr);
735 } else if (!rd->is_client) {
736 ret = fio_rdmaio_recv(td, io_us, rd->io_u_queued_nr);
737 } else
738 ret = 0; /* must be a SYNC */
739
740 if (ret > 0) {
741 fio_rdmaio_queued(td, io_us, ret);
742 io_u_mark_submit(td, ret);
743 rd->io_u_queued_nr -= ret;
744 io_us += ret;
745 ret = 0;
746 } else
747 break;
748 } while (rd->io_u_queued_nr);
749
750 return ret;
751}
752
753static int fio_rdmaio_connect(struct thread_data *td, struct fio_file *f)
754{
755 struct rdmaio_data *rd = td->io_ops->data;
756 struct rdma_conn_param conn_param;
757 struct ibv_send_wr *bad_wr;
758
759 memset(&conn_param, 0, sizeof conn_param);
760 conn_param.responder_resources = 1;
761 conn_param.initiator_depth = 1;
762 conn_param.retry_count = 10;
763
764 if (rdma_connect(rd->cm_id, &conn_param) != 0) {
765 log_err("fio: rdma_connect fail\n");
766 return 1;
767 }
768
769 if (get_next_channel_event
770 (td, rd->cm_channel, RDMA_CM_EVENT_ESTABLISHED) != 0) {
771 log_err("fio: wait for RDMA_CM_EVENT_ESTABLISHED\n");
772 return 1;
773 }
774
775 /* send task request */
776 rd->send_buf.mode = htonl(rd->rdma_protocol);
777 rd->send_buf.nr = htonl(td->o.iodepth);
778
779 if (ibv_post_send(rd->qp, &rd->sq_wr, &bad_wr) != 0) {
780 log_err("fio: ibv_post_send fail");
781 return 1;
782 }
783
784 rdma_poll_wait(td, IBV_WC_SEND);
785
786 /* wait for remote MR info from server side */
787 rdma_poll_wait(td, IBV_WC_RECV);
788
789 return 0;
790}
791
792static int fio_rdmaio_accept(struct thread_data *td, struct fio_file *f)
793{
794 struct rdmaio_data *rd = td->io_ops->data;
795 struct rdma_conn_param conn_param;
796 struct ibv_send_wr *bad_wr;
797
798 /* rdma_accept() - then wait for accept success */
799 memset(&conn_param, 0, sizeof conn_param);
800 conn_param.responder_resources = 1;
801 conn_param.initiator_depth = 1;
802
803 if (rdma_accept(rd->child_cm_id, &conn_param) != 0) {
804 log_err("fio: rdma_accept\n");
805 return 1;
806 }
807
808 if (get_next_channel_event
809 (td, rd->cm_channel, RDMA_CM_EVENT_ESTABLISHED) != 0) {
810 log_err("fio: wait for RDMA_CM_EVENT_ESTABLISHED\n");
811 return 1;
812 }
813
814 /* wait for request */
815 rdma_poll_wait(td, IBV_WC_RECV);
816
817 if (ibv_post_send(rd->qp, &rd->sq_wr, &bad_wr) != 0) {
818 log_err("fio: ibv_post_send fail");
819 return 1;
820 }
821
822 rdma_poll_wait(td, IBV_WC_SEND);
823
824 return 0;
825}
826
827static int fio_rdmaio_open_file(struct thread_data *td, struct fio_file *f)
828{
829 if (td_read(td))
830 return fio_rdmaio_accept(td, f);
831 else
832 return fio_rdmaio_connect(td, f);
833}
834
835static int fio_rdmaio_close_file(struct thread_data *td, struct fio_file *f)
836{
837 struct rdmaio_data *rd = td->io_ops->data;
838 struct ibv_send_wr *bad_wr;
839
840 /* unregister rdma buffer */
841
842 /*
843 * Client sends notification to the server side
844 */
845 /* refer to: http://linux.die.net/man/7/rdma_cm */
846 if ((rd->is_client == 1) && ((rd->rdma_protocol == FIO_RDMA_MEM_WRITE)
847 || (rd->rdma_protocol ==
848 FIO_RDMA_MEM_READ))) {
849 if (ibv_post_send(rd->qp, &rd->sq_wr, &bad_wr) != 0) {
850 log_err("fio: ibv_post_send fail");
851 return 1;
852 }
853
854 dprint(FD_IO, "fio: close infomation sent success\n");
855 rdma_poll_wait(td, IBV_WC_SEND);
856 }
857
858 if (rd->is_client == 1)
859 rdma_disconnect(rd->cm_id);
860 else {
861 rdma_disconnect(rd->child_cm_id);
862/* rdma_disconnect(rd->cm_id); */
863 }
864
865/* if (get_next_channel_event(td, rd->cm_channel, RDMA_CM_EVENT_DISCONNECTED) != 0)
866 {
867 log_err("fio: wait for RDMA_CM_EVENT_DISCONNECTED\n");
868 return 1;
869 }*/
870
871 ibv_destroy_qp(rd->qp);
872 ibv_destroy_cq(rd->cq);
873
874 if (rd->is_client == 1)
875 rdma_destroy_id(rd->cm_id);
876 else {
877 rdma_destroy_id(rd->child_cm_id);
878 rdma_destroy_id(rd->cm_id);
879 }
880
881 ibv_destroy_comp_channel(rd->channel);
882 ibv_dealloc_pd(rd->pd);
883
884 return 0;
885}
886
887static int fio_rdmaio_setup_connect(struct thread_data *td, const char *host,
888 unsigned short port)
889{
890 struct rdmaio_data *rd = td->io_ops->data;
891 struct ibv_recv_wr *bad_wr;
892
893 rd->addr.sin_family = AF_INET;
894 rd->addr.sin_port = htons(port);
895
896 if (inet_aton(host, &rd->addr.sin_addr) != 1) {
897 struct hostent *hent;
898
899 hent = gethostbyname(host);
900 if (!hent) {
901 td_verror(td, errno, "gethostbyname");
902 return 1;
903 }
904
905 memcpy(&rd->addr.sin_addr, hent->h_addr, 4);
906 }
907
908 /* resolve route */
909 if (rdma_resolve_addr(rd->cm_id, NULL,
910 (struct sockaddr *)&rd->addr, 2000) != 0) {
911 log_err("fio: rdma_resolve_addr");
912 return 1;
913 }
914
915 if (get_next_channel_event
916 (td, rd->cm_channel, RDMA_CM_EVENT_ADDR_RESOLVED)
917 != 0) {
918 log_err("fio: get_next_channel_event");
919 return 1;
920 }
921
922 /* resolve route */
923 if (rdma_resolve_route(rd->cm_id, 2000) != 0) {
924 log_err("fio: rdma_resolve_route");
925 return 1;
926 }
927
928 if (get_next_channel_event
929 (td, rd->cm_channel, RDMA_CM_EVENT_ROUTE_RESOLVED) != 0) {
930 log_err("fio: get_next_channel_event");
931 return 1;
932 }
933
934 /* create qp and buffer */
935 if (fio_rdmaio_setup_qp(td) != 0)
936 return 1;
937
938 if (fio_rdmaio_setup_control_msg_buffers(td) != 0)
939 return 1;
940
941 /* post recv buf */
942 if (ibv_post_recv(rd->qp, &rd->rq_wr, &bad_wr) != 0) {
943 log_err("fio: ibv_post_recv fail\n");
944 return 1;
945 }
946
947 return 0;
948}
949
950static int fio_rdmaio_setup_listen(struct thread_data *td, short port)
951{
952 struct rdmaio_data *rd = td->io_ops->data;
953 struct ibv_recv_wr *bad_wr;
954
955 rd->addr.sin_family = AF_INET;
956 rd->addr.sin_addr.s_addr = htonl(INADDR_ANY);
957 rd->addr.sin_port = htons(port);
958
959 /* rdma_listen */
960 if (rdma_bind_addr(rd->cm_id, (struct sockaddr *)&rd->addr) != 0) {
961 log_err("fio: rdma_bind_addr fail\n");
962 return 1;
963 }
964
965 if (rdma_listen(rd->cm_id, 3) != 0) {
966 log_err("fio: rdma_listen fail\n");
967 return 1;
968 }
969
970 /* wait for CONNECT_REQUEST */
971 if (get_next_channel_event
972 (td, rd->cm_channel, RDMA_CM_EVENT_CONNECT_REQUEST) != 0) {
973 log_err("fio: wait for RDMA_CM_EVENT_CONNECT_REQUEST\n");
974 return 1;
975 }
976
977 if (fio_rdmaio_setup_qp(td) != 0)
978 return 1;
979
980 if (fio_rdmaio_setup_control_msg_buffers(td) != 0)
981 return 1;
982
983 /* post recv buf */
984 if (ibv_post_recv(rd->qp, &rd->rq_wr, &bad_wr) != 0) {
985 log_err("fio: ibv_post_recv fail\n");
986 return 1;
987 }
988
989 return 0;
990}
991
992static int fio_rdmaio_init(struct thread_data *td)
993{
994 struct rdmaio_data *rd = td->io_ops->data;
995 unsigned int port;
996 char host[64], buf[128];
997 char *sep, *portp, *modep;
998 int ret;
999 struct rlimit rl;
1000
1001 if (td_rw(td)) {
1002 log_err("fio: rdma connections must be read OR write\n");
1003 return 1;
1004 }
1005 if (td_random(td)) {
1006 log_err("fio: RDMA network IO can't be random\n");
1007 return 1;
1008 }
1009
1010 /* check RLIMIT_MEMLOCK */
1011 if (getrlimit(RLIMIT_MEMLOCK, &rl) != 0) {
1012 log_err("fio: getrlimit fail: %d(%s)\n",
1013 errno, strerror(errno));
1014 return 1;
1015 }
1016
1017 /* soft limit */
1018 if ((rl.rlim_cur != RLIM_INFINITY)
1019 && (rl.rlim_cur < td->orig_buffer_size)) {
1020 log_err("fio: soft RLIMIT_MEMLOCK is: %ld\n", rl.rlim_cur);
1021 log_err("fio: total block size is: %ld\n",
1022 td->orig_buffer_size);
1023 /* try to set larger RLIMIT_MEMLOCK */
1024 rl.rlim_cur = rl.rlim_max;
1025 if (setrlimit(RLIMIT_MEMLOCK, &rl) != 0) {
1026 log_err("fio: setrlimit fail: %d(%s)\n",
1027 errno, strerror(errno));
1028 log_err("fio: you may try enlarge MEMLOCK by root\n");
1029 log_err("# ulimit -l unlimited\n");
1030 return 1;
1031 }
1032 }
1033
1034 strcpy(buf, td->o.filename);
1035
1036 sep = strchr(buf, '/');
1037 if (!sep)
1038 goto bad_host;
1039
1040 *sep = '\0';
1041 sep++;
1042 strcpy(host, buf);
1043 if (!strlen(host))
1044 goto bad_host;
1045
1046 modep = NULL;
1047 portp = sep;
1048 sep = strchr(portp, '/');
1049 if (sep) {
1050 *sep = '\0';
1051 modep = sep + 1;
1052 }
1053
1054 port = strtol(portp, NULL, 10);
1055 if (!port || port > 65535)
1056 goto bad_host;
1057
1058 if (modep) {
1059 if (!strncmp("rdma_write", modep, strlen(modep)) ||
1060 !strncmp("RDMA_WRITE", modep, strlen(modep)))
1061 rd->rdma_protocol = FIO_RDMA_MEM_WRITE;
1062 else if (!strncmp("rdma_read", modep, strlen(modep)) ||
1063 !strncmp("RDMA_READ", modep, strlen(modep)))
1064 rd->rdma_protocol = FIO_RDMA_MEM_READ;
1065 else if (!strncmp("send", modep, strlen(modep)) ||
1066 !strncmp("SEND", modep, strlen(modep)))
1067 rd->rdma_protocol = FIO_RDMA_CHA_SEND;
1068 else
1069 goto bad_host;
1070 } else
1071 rd->rdma_protocol = FIO_RDMA_MEM_WRITE;
1072
1073 rd->cq_event_num = 0;
1074
1075 rd->cm_channel = rdma_create_event_channel();
1076 if (!rd->cm_channel) {
1077 log_err("fio: rdma_create_event_channel fail\n");
1078 return 1;
1079 }
1080
1081 ret = rdma_create_id(rd->cm_channel, &rd->cm_id, rd, RDMA_PS_TCP);
1082 if (ret) {
1083 log_err("fio: rdma_create_id fail\n");
1084 return 1;
1085 }
1086
1087 if ((rd->rdma_protocol == FIO_RDMA_MEM_WRITE) ||
1088 (rd->rdma_protocol == FIO_RDMA_MEM_READ)) {
1089 rd->rmt_us =
1090 malloc(FIO_RDMA_MAX_IO_DPETH * sizeof(struct remote_u));
1091 memset(rd->rmt_us, 0,
1092 FIO_RDMA_MAX_IO_DPETH * sizeof(struct remote_u));
1093 rd->rmt_nr = 0;
1094 }
1095
1096 rd->io_us_queued = malloc(td->o.iodepth * sizeof(struct io_u *));
1097 memset(rd->io_us_queued, 0, td->o.iodepth * sizeof(struct io_u *));
1098 rd->io_u_queued_nr = 0;
1099
1100 rd->io_us_flight = malloc(td->o.iodepth * sizeof(struct io_u *));
1101 memset(rd->io_us_flight, 0, td->o.iodepth * sizeof(struct io_u *));
1102 rd->io_u_flight_nr = 0;
1103
1104 rd->io_us_completed = malloc(td->o.iodepth * sizeof(struct io_u *));
1105 memset(rd->io_us_completed, 0, td->o.iodepth * sizeof(struct io_u *));
1106 rd->io_u_completed_nr = 0;
1107
1108 if (td_read(td)) { /* READ as the server */
1109 rd->is_client = 0;
1110 /* server rd->rdma_buf_len will be setup after got request */
1111 ret = fio_rdmaio_setup_listen(td, port);
1112 } else { /* WRITE as the client */
1113 rd->is_client = 1;
1114 ret = fio_rdmaio_setup_connect(td, host, port);
1115 }
1116
1117 struct flist_head *entry;
1118 unsigned int max_bs;
1119 max_bs = max(td->o.max_bs[DDIR_READ], td->o.max_bs[DDIR_WRITE]);
1120 /* register each io_u in the free list */
1121 int i = 0;
1122 flist_for_each(entry, &td->io_u_freelist) {
1123 struct io_u *io_u = flist_entry(entry, struct io_u, list);
1124
1125 io_u->engine_data = malloc(sizeof(struct rdma_io_u_data));
1126 memset(io_u->engine_data, 0, sizeof(struct rdma_io_u_data));
1127 ((struct rdma_io_u_data *)io_u->engine_data)->wr_id = i;
1128
1129 io_u->mr = ibv_reg_mr(rd->pd, io_u->buf, max_bs,
1130 IBV_ACCESS_LOCAL_WRITE |
1131 IBV_ACCESS_REMOTE_READ |
1132 IBV_ACCESS_REMOTE_WRITE);
1133 if (io_u->mr == NULL) {
1134 log_err("fio: ibv_reg_mr io_u failed\n");
1135 return 1;
1136 }
1137
1138 rd->send_buf.rmt_us[i].buf =
1139 htonll((uint64_t) (unsigned long)io_u->buf);
1140 rd->send_buf.rmt_us[i].rkey = htonl(io_u->mr->rkey);
1141 rd->send_buf.rmt_us[i].size = htonl(max_bs);
1142
1143/* log_info("fio: Send rkey %x addr %" PRIx64 " len %d to client\n",
1144 io_u->mr->rkey, io_u->buf, max_bs); */
1145 i++;
1146 }
1147
1148 rd->send_buf.nr = htonl(i);
1149
1150 return ret;
1151bad_host:
1152 log_err("fio: bad rdma host/port/protocol: %s\n", td->o.filename);
1153 return 1;
1154}
1155
1156static void fio_rdmaio_cleanup(struct thread_data *td)
1157{
1158 struct rdmaio_data *rd = td->io_ops->data;
1159
1160 if (rd) {
1161/* if (nd->listenfd != -1)
1162 close(nd->listenfd);
1163 if (nd->pipes[0] != -1)
1164 close(nd->pipes[0]);
1165 if (nd->pipes[1] != -1)
1166 close(nd->pipes[1]);
1167*/
1168 free(rd);
1169 }
1170}
1171
1172static int fio_rdmaio_setup(struct thread_data *td)
1173{
1174 struct rdmaio_data *rd;
1175
1176 if (!td->io_ops->data) {
1177 rd = malloc(sizeof(*rd));;
1178
1179 memset(rd, 0, sizeof(*rd));
1180 td->io_ops->data = rd;
1181 }
1182
1183 return 0;
1184}
1185
1186static struct ioengine_ops ioengine_rw = {
1187 .name = "rdma",
1188 .version = FIO_IOOPS_VERSION,
1189 .setup = fio_rdmaio_setup,
1190 .init = fio_rdmaio_init,
1191 .prep = fio_rdmaio_prep,
1192 .queue = fio_rdmaio_queue,
1193 .commit = fio_rdmaio_commit,
1194 .getevents = fio_rdmaio_getevents,
1195 .event = fio_rdmaio_event,
1196 .cleanup = fio_rdmaio_cleanup,
1197 .open_file = fio_rdmaio_open_file,
1198 .close_file = fio_rdmaio_close_file,
1199 .flags = FIO_DISKLESSIO | FIO_UNIDIR | FIO_PIPEIO,
1200};
1201
1202#else /* FIO_HAVE_RDMA */
1203
1204static int fio_rdmaio_open_file(struct thread_data *td, struct fio_file *f)
1205{
1206 return 0;
1207}
1208
1209static int fio_rdmaio_close_file(struct thread_data *td, struct fio_file *f)
1210{
1211 return 0;
1212}
1213
1214static int fio_rdmaio_queue(struct thread_data *td, struct io_u *io_u)
1215{
1216 return FIO_Q_COMPLETED;
1217}
1218
1219static int fio_rdmaio_init(struct thread_data fio_unused * td)
1220{
1221 log_err("fio: rdma(librdmacm libibverbs) not available\n");
1222 log_err(" You haven't compiled rdma ioengine into fio.\n");
1223 log_err(" If you want to try rdma ioengine,\n");
1224 log_err(" make sure OFED is installed,\n");
1225 log_err(" $ ofed_info\n");
1226 log_err(" then try to make fio as follows:\n");
1227 log_err(" $ export EXTFLAGS=\"-DFIO_HAVE_RDMA\"\n");
1228 log_err(" $ export EXTLIBS=\"-libverbs -lrdmacm\"\n");
1229 log_err(" $ make clean && make\n");
1230 return 1;
1231}
1232
1233static struct ioengine_ops ioengine_rw = {
1234 .name = "rdma",
1235 .version = FIO_IOOPS_VERSION,
1236 .init = fio_rdmaio_init,
1237 .queue = fio_rdmaio_queue,
1238 .open_file = fio_rdmaio_open_file,
1239 .close_file = fio_rdmaio_close_file,
1240 .flags = FIO_SYNCIO | FIO_DISKLESSIO | FIO_UNIDIR | FIO_PIPEIO,
1241};
1242
1243#endif
1244
1245static void fio_init fio_rdmaio_register(void)
1246{
1247 register_ioengine(&ioengine_rw);
1248}
1249
1250static void fio_exit fio_rdmaio_unregister(void)
1251{
1252 unregister_ioengine(&ioengine_rw);
1253}