net: fix warning for systems without TCP_NODELAY
[fio.git] / engines / rdma.c
CommitLineData
21b8aee8 1/*
85286c5c 2 * RDMA I/O engine
21b8aee8 3 *
85286c5c
BVA
4 * RDMA I/O engine based on the IB verbs and RDMA/CM user space libraries.
5 * Supports both RDMA memory semantics and channel semantics
6 * for the InfiniBand, RoCE and iWARP protocols.
21b8aee8 7 *
0ac5d398 8 * You will need the Linux RDMA software installed, either
85286c5c 9 * from your Linux distributor or directly from openfabrics.org:
21b8aee8 10 *
11 * http://www.openfabrics.org/downloads/OFED/
12 *
7d7803fa
YR
13 * Exchanging steps of RDMA ioengine control messages:
14 * 1. client side sends test mode (RDMA_WRITE/RDMA_READ/SEND)
15 * to server side.
222757cc 16 * 2. server side parses test mode, and sends back confirmation
7d7803fa 17 * to client side. In RDMA WRITE/READ test, this confirmation
222757cc 18 * includes memory information, such as rkey, address.
7d7803fa 19 * 3. client side initiates test loop.
222757cc 20 * 4. In RDMA WRITE/READ test, client side sends a completion
7d7803fa 21 * notification to server side. Server side updates its
222757cc 22 * td->done as true.
7d7803fa 23 *
21b8aee8 24 */
25#include <stdio.h>
26#include <stdlib.h>
27#include <unistd.h>
28#include <errno.h>
29#include <assert.h>
30#include <netinet/in.h>
31#include <arpa/inet.h>
32#include <netdb.h>
33#include <sys/poll.h>
34#include <sys/types.h>
35#include <sys/socket.h>
36#include <sys/time.h>
37#include <sys/resource.h>
38
39#include <byteswap.h>
40#include <pthread.h>
41#include <inttypes.h>
42
43#include "../fio.h"
ee88d056 44#include "../hash.h"
21b8aee8 45
21b8aee8 46#include <rdma/rdma_cma.h>
47#include <infiniband/arch.h>
48
7d7803fa 49#define FIO_RDMA_MAX_IO_DEPTH 512
21b8aee8 50
51enum rdma_io_mode {
52 FIO_RDMA_UNKNOWN = 0,
53 FIO_RDMA_MEM_WRITE,
54 FIO_RDMA_MEM_READ,
55 FIO_RDMA_CHA_SEND,
56 FIO_RDMA_CHA_RECV
57};
58
59struct remote_u {
60 uint64_t buf;
61 uint32_t rkey;
62 uint32_t size;
63};
64
65struct rdma_info_blk {
66 uint32_t mode; /* channel semantic or memory semantic */
67 uint32_t nr; /* client: io depth
68 server: number of records for memory semantic
69 */
76cc5224 70 struct remote_u rmt_us[FIO_RDMA_MAX_IO_DEPTH];
21b8aee8 71};
72
73struct rdma_io_u_data {
74 uint64_t wr_id;
75 struct ibv_send_wr sq_wr;
76 struct ibv_recv_wr rq_wr;
77 struct ibv_sge rdma_sgl;
78};
79
80struct rdmaio_data {
81 int is_client;
82 enum rdma_io_mode rdma_protocol;
83 char host[64];
84 struct sockaddr_in addr;
85
86 struct ibv_recv_wr rq_wr;
87 struct ibv_sge recv_sgl;
88 struct rdma_info_blk recv_buf;
89 struct ibv_mr *recv_mr;
90
91 struct ibv_send_wr sq_wr;
92 struct ibv_sge send_sgl;
93 struct rdma_info_blk send_buf;
94 struct ibv_mr *send_mr;
95
96 struct ibv_comp_channel *channel;
97 struct ibv_cq *cq;
98 struct ibv_pd *pd;
99 struct ibv_qp *qp;
100
101 pthread_t cmthread;
102 struct rdma_event_channel *cm_channel;
103 struct rdma_cm_id *cm_id;
104 struct rdma_cm_id *child_cm_id;
105
106 int cq_event_num;
107
108 struct remote_u *rmt_us;
109 int rmt_nr;
110 struct io_u **io_us_queued;
111 int io_u_queued_nr;
112 struct io_u **io_us_flight;
113 int io_u_flight_nr;
114 struct io_u **io_us_completed;
115 int io_u_completed_nr;
ea6209ad
JA
116
117 struct frand_state rand_state;
21b8aee8 118};
119
120static int client_recv(struct thread_data *td, struct ibv_wc *wc)
121{
122 struct rdmaio_data *rd = td->io_ops->data;
123
124 if (wc->byte_len != sizeof(rd->recv_buf)) {
b6cf38f0 125 log_err("Received bogus data, size %d\n", wc->byte_len);
21b8aee8 126 return 1;
127 }
128
129 /* store mr info for MEMORY semantic */
130 if ((rd->rdma_protocol == FIO_RDMA_MEM_WRITE) ||
131 (rd->rdma_protocol == FIO_RDMA_MEM_READ)) {
132 /* struct flist_head *entry; */
133 int i = 0;
134
135 rd->rmt_nr = ntohl(rd->recv_buf.nr);
136
137 for (i = 0; i < rd->rmt_nr; i++) {
138 rd->rmt_us[i].buf = ntohll(rd->recv_buf.rmt_us[i].buf);
139 rd->rmt_us[i].rkey = ntohl(rd->recv_buf.rmt_us[i].rkey);
140 rd->rmt_us[i].size = ntohl(rd->recv_buf.rmt_us[i].size);
141
142 dprint(FD_IO,
143 "fio: Received rkey %x addr %" PRIx64
144 " len %d from peer\n", rd->rmt_us[i].rkey,
145 rd->rmt_us[i].buf, rd->rmt_us[i].size);
146 }
147 }
148
149 return 0;
150}
151
152static int server_recv(struct thread_data *td, struct ibv_wc *wc)
153{
154 struct rdmaio_data *rd = td->io_ops->data;
155
76cc5224 156 if (wc->wr_id == FIO_RDMA_MAX_IO_DEPTH) {
21b8aee8 157 rd->rdma_protocol = ntohl(rd->recv_buf.mode);
158
159 /* CHANNEL semantic, do nothing */
160 if (rd->rdma_protocol == FIO_RDMA_CHA_SEND)
161 rd->rdma_protocol = FIO_RDMA_CHA_RECV;
162 }
163
164 return 0;
165}
166
167static int cq_event_handler(struct thread_data *td, enum ibv_wc_opcode opcode)
168{
169 struct rdmaio_data *rd = td->io_ops->data;
170 struct ibv_wc wc;
171 struct rdma_io_u_data *r_io_u_d;
172 int ret;
173 int compevnum = 0;
174 int i;
175
176 while ((ret = ibv_poll_cq(rd->cq, 1, &wc)) == 1) {
177 ret = 0;
178 compevnum++;
179
180 if (wc.status) {
181 log_err("fio: cq completion status %d(%s)\n",
182 wc.status, ibv_wc_status_str(wc.status));
183 return -1;
184 }
185
186 switch (wc.opcode) {
187
188 case IBV_WC_RECV:
189 if (rd->is_client == 1)
190 client_recv(td, &wc);
191 else
192 server_recv(td, &wc);
193
76cc5224 194 if (wc.wr_id == FIO_RDMA_MAX_IO_DEPTH)
21b8aee8 195 break;
196
197 for (i = 0; i < rd->io_u_flight_nr; i++) {
198 r_io_u_d = rd->io_us_flight[i]->engine_data;
199
200 if (wc.wr_id == r_io_u_d->rq_wr.wr_id) {
201 rd->io_us_flight[i]->resid =
202 rd->io_us_flight[i]->buflen
203 - wc.byte_len;
204
205 rd->io_us_flight[i]->error = 0;
206
207 rd->io_us_completed[rd->
208 io_u_completed_nr]
209 = rd->io_us_flight[i];
210 rd->io_u_completed_nr++;
211 break;
212 }
213 }
214 if (i == rd->io_u_flight_nr)
e07f72d3 215 log_err("fio: recv wr %" PRId64 " not found\n",
21b8aee8 216 wc.wr_id);
217 else {
218 /* put the last one into middle of the list */
219 rd->io_us_flight[i] =
220 rd->io_us_flight[rd->io_u_flight_nr - 1];
221 rd->io_u_flight_nr--;
222 }
223
224 break;
225
226 case IBV_WC_SEND:
227 case IBV_WC_RDMA_WRITE:
228 case IBV_WC_RDMA_READ:
76cc5224 229 if (wc.wr_id == FIO_RDMA_MAX_IO_DEPTH)
21b8aee8 230 break;
231
232 for (i = 0; i < rd->io_u_flight_nr; i++) {
233 r_io_u_d = rd->io_us_flight[i]->engine_data;
234
235 if (wc.wr_id == r_io_u_d->sq_wr.wr_id) {
236 rd->io_us_completed[rd->
237 io_u_completed_nr]
238 = rd->io_us_flight[i];
239 rd->io_u_completed_nr++;
240 break;
241 }
242 }
243 if (i == rd->io_u_flight_nr)
e07f72d3 244 log_err("fio: send wr %" PRId64 " not found\n",
21b8aee8 245 wc.wr_id);
246 else {
247 /* put the last one into middle of the list */
248 rd->io_us_flight[i] =
249 rd->io_us_flight[rd->io_u_flight_nr - 1];
250 rd->io_u_flight_nr--;
251 }
252
253 break;
254
255 default:
256 log_info("fio: unknown completion event %d\n",
257 wc.opcode);
258 return -1;
259 }
260 rd->cq_event_num++;
261 }
262 if (ret) {
263 log_err("fio: poll error %d\n", ret);
264 return 1;
265 }
266
267 return compevnum;
268}
269
270/*
271 * Return -1 for error and 'nr events' for a positive number
272 * of events
273 */
274static int rdma_poll_wait(struct thread_data *td, enum ibv_wc_opcode opcode)
275{
276 struct rdmaio_data *rd = td->io_ops->data;
277 struct ibv_cq *ev_cq;
278 void *ev_ctx;
279 int ret;
280
281 if (rd->cq_event_num > 0) { /* previous left */
282 rd->cq_event_num--;
283 return 0;
284 }
285
286again:
287 if (ibv_get_cq_event(rd->channel, &ev_cq, &ev_ctx) != 0) {
288 log_err("fio: Failed to get cq event!\n");
289 return -1;
290 }
291 if (ev_cq != rd->cq) {
292 log_err("fio: Unknown CQ!\n");
293 return -1;
294 }
295 if (ibv_req_notify_cq(rd->cq, 0) != 0) {
296 log_err("fio: Failed to set notify!\n");
297 return -1;
298 }
299
300 ret = cq_event_handler(td, opcode);
301 if (ret < 1)
302 goto again;
303
304 ibv_ack_cq_events(rd->cq, ret);
305
306 rd->cq_event_num--;
307
308 return ret;
309}
310
311static int fio_rdmaio_setup_qp(struct thread_data *td)
312{
313 struct rdmaio_data *rd = td->io_ops->data;
314 struct ibv_qp_init_attr init_attr;
315 int qp_depth = td->o.iodepth * 2; /* 2 times of io depth */
316
317 if (rd->is_client == 0)
318 rd->pd = ibv_alloc_pd(rd->child_cm_id->verbs);
319 else
320 rd->pd = ibv_alloc_pd(rd->cm_id->verbs);
222757cc 321
21b8aee8 322 if (rd->pd == NULL) {
323 log_err("fio: ibv_alloc_pd fail\n");
324 return 1;
325 }
326
327 if (rd->is_client == 0)
328 rd->channel = ibv_create_comp_channel(rd->child_cm_id->verbs);
329 else
330 rd->channel = ibv_create_comp_channel(rd->cm_id->verbs);
331 if (rd->channel == NULL) {
332 log_err("fio: ibv_create_comp_channel fail\n");
333 goto err1;
334 }
335
336 if (qp_depth < 16)
337 qp_depth = 16;
338
339 if (rd->is_client == 0)
340 rd->cq = ibv_create_cq(rd->child_cm_id->verbs,
341 qp_depth, rd, rd->channel, 0);
342 else
343 rd->cq = ibv_create_cq(rd->cm_id->verbs,
344 qp_depth, rd, rd->channel, 0);
345 if (rd->cq == NULL) {
346 log_err("fio: ibv_create_cq failed\n");
347 goto err2;
348 }
349
350 if (ibv_req_notify_cq(rd->cq, 0) != 0) {
351 log_err("fio: ibv_create_cq failed\n");
352 goto err3;
353 }
354
355 /* create queue pair */
356 memset(&init_attr, 0, sizeof(init_attr));
357 init_attr.cap.max_send_wr = qp_depth;
358 init_attr.cap.max_recv_wr = qp_depth;
359 init_attr.cap.max_recv_sge = 1;
360 init_attr.cap.max_send_sge = 1;
361 init_attr.qp_type = IBV_QPT_RC;
362 init_attr.send_cq = rd->cq;
363 init_attr.recv_cq = rd->cq;
364
365 if (rd->is_client == 0) {
366 if (rdma_create_qp(rd->child_cm_id, rd->pd, &init_attr) != 0) {
367 log_err("fio: rdma_create_qp failed\n");
368 goto err3;
369 }
370 rd->qp = rd->child_cm_id->qp;
371 } else {
372 if (rdma_create_qp(rd->cm_id, rd->pd, &init_attr) != 0) {
373 log_err("fio: rdma_create_qp failed\n");
374 goto err3;
375 }
376 rd->qp = rd->cm_id->qp;
377 }
378
379 return 0;
380
381err3:
382 ibv_destroy_cq(rd->cq);
383err2:
384 ibv_destroy_comp_channel(rd->channel);
385err1:
386 ibv_dealloc_pd(rd->pd);
387
388 return 1;
389}
390
391static int fio_rdmaio_setup_control_msg_buffers(struct thread_data *td)
392{
393 struct rdmaio_data *rd = td->io_ops->data;
394
395 rd->recv_mr = ibv_reg_mr(rd->pd, &rd->recv_buf, sizeof(rd->recv_buf),
396 IBV_ACCESS_LOCAL_WRITE);
397 if (rd->recv_mr == NULL) {
398 log_err("fio: recv_buf reg_mr failed\n");
399 return 1;
400 }
401
402 rd->send_mr = ibv_reg_mr(rd->pd, &rd->send_buf, sizeof(rd->send_buf),
403 0);
404 if (rd->send_mr == NULL) {
405 log_err("fio: send_buf reg_mr failed\n");
406 ibv_dereg_mr(rd->recv_mr);
407 return 1;
408 }
409
410 /* setup work request */
411 /* recv wq */
412 rd->recv_sgl.addr = (uint64_t) (unsigned long)&rd->recv_buf;
222757cc 413 rd->recv_sgl.length = sizeof(rd->recv_buf);
21b8aee8 414 rd->recv_sgl.lkey = rd->recv_mr->lkey;
415 rd->rq_wr.sg_list = &rd->recv_sgl;
416 rd->rq_wr.num_sge = 1;
76cc5224 417 rd->rq_wr.wr_id = FIO_RDMA_MAX_IO_DEPTH;
21b8aee8 418
419 /* send wq */
420 rd->send_sgl.addr = (uint64_t) (unsigned long)&rd->send_buf;
222757cc 421 rd->send_sgl.length = sizeof(rd->send_buf);
21b8aee8 422 rd->send_sgl.lkey = rd->send_mr->lkey;
423
424 rd->sq_wr.opcode = IBV_WR_SEND;
425 rd->sq_wr.send_flags = IBV_SEND_SIGNALED;
426 rd->sq_wr.sg_list = &rd->send_sgl;
427 rd->sq_wr.num_sge = 1;
76cc5224 428 rd->sq_wr.wr_id = FIO_RDMA_MAX_IO_DEPTH;
21b8aee8 429
430 return 0;
431}
432
433static int get_next_channel_event(struct thread_data *td,
434 struct rdma_event_channel *channel,
435 enum rdma_cm_event_type wait_event)
436{
437 struct rdmaio_data *rd = td->io_ops->data;
21b8aee8 438 struct rdma_cm_event *event;
222757cc 439 int ret;
21b8aee8 440
441 ret = rdma_get_cm_event(channel, &event);
442 if (ret) {
222757cc 443 log_err("fio: rdma_get_cm_event: %d\n", ret);
21b8aee8 444 return 1;
445 }
446
447 if (event->event != wait_event) {
448 log_err("fio: event is %s instead of %s\n",
449 rdma_event_str(event->event),
450 rdma_event_str(wait_event));
451 return 1;
452 }
453
454 switch (event->event) {
455 case RDMA_CM_EVENT_CONNECT_REQUEST:
456 rd->child_cm_id = event->id;
457 break;
458 default:
459 break;
460 }
461
462 rdma_ack_cm_event(event);
463
464 return 0;
465}
466
467static int fio_rdmaio_prep(struct thread_data *td, struct io_u *io_u)
468{
469 struct rdmaio_data *rd = td->io_ops->data;
470 struct rdma_io_u_data *r_io_u_d;
471
472 r_io_u_d = io_u->engine_data;
473
474 switch (rd->rdma_protocol) {
475 case FIO_RDMA_MEM_WRITE:
476 case FIO_RDMA_MEM_READ:
477 r_io_u_d->rdma_sgl.addr = (uint64_t) (unsigned long)io_u->buf;
478 r_io_u_d->rdma_sgl.lkey = io_u->mr->lkey;
479 r_io_u_d->sq_wr.wr_id = r_io_u_d->wr_id;
480 r_io_u_d->sq_wr.send_flags = IBV_SEND_SIGNALED;
481 r_io_u_d->sq_wr.sg_list = &r_io_u_d->rdma_sgl;
482 r_io_u_d->sq_wr.num_sge = 1;
483 break;
484 case FIO_RDMA_CHA_SEND:
485 r_io_u_d->rdma_sgl.addr = (uint64_t) (unsigned long)io_u->buf;
486 r_io_u_d->rdma_sgl.lkey = io_u->mr->lkey;
487 r_io_u_d->rdma_sgl.length = io_u->buflen;
488 r_io_u_d->sq_wr.wr_id = r_io_u_d->wr_id;
489 r_io_u_d->sq_wr.opcode = IBV_WR_SEND;
490 r_io_u_d->sq_wr.send_flags = IBV_SEND_SIGNALED;
491 r_io_u_d->sq_wr.sg_list = &r_io_u_d->rdma_sgl;
492 r_io_u_d->sq_wr.num_sge = 1;
493 break;
494 case FIO_RDMA_CHA_RECV:
495 r_io_u_d->rdma_sgl.addr = (uint64_t) (unsigned long)io_u->buf;
496 r_io_u_d->rdma_sgl.lkey = io_u->mr->lkey;
497 r_io_u_d->rdma_sgl.length = io_u->buflen;
498 r_io_u_d->rq_wr.wr_id = r_io_u_d->wr_id;
499 r_io_u_d->rq_wr.sg_list = &r_io_u_d->rdma_sgl;
500 r_io_u_d->rq_wr.num_sge = 1;
501 break;
502 default:
503 log_err("fio: unknown rdma protocol - %d\n", rd->rdma_protocol);
504 break;
505 }
506
507 return 0;
508}
509
510static struct io_u *fio_rdmaio_event(struct thread_data *td, int event)
511{
512 struct rdmaio_data *rd = td->io_ops->data;
513 struct io_u *io_u;
514 int i;
515
516 io_u = rd->io_us_completed[0];
222757cc 517 for (i = 0; i < rd->io_u_completed_nr - 1; i++)
21b8aee8 518 rd->io_us_completed[i] = rd->io_us_completed[i + 1];
222757cc 519
21b8aee8 520 rd->io_u_completed_nr--;
521
522 dprint_io_u(io_u, "fio_rdmaio_event");
523
524 return io_u;
525}
526
527static int fio_rdmaio_getevents(struct thread_data *td, unsigned int min,
528 unsigned int max, struct timespec *t)
529{
530 struct rdmaio_data *rd = td->io_ops->data;
21b8aee8 531 enum ibv_wc_opcode comp_opcode;
532 comp_opcode = IBV_WC_RDMA_WRITE;
533 struct ibv_cq *ev_cq;
534 void *ev_ctx;
222757cc 535 int ret, r = 0;
21b8aee8 536
537 switch (rd->rdma_protocol) {
538 case FIO_RDMA_MEM_WRITE:
539 comp_opcode = IBV_WC_RDMA_WRITE;
540 break;
541 case FIO_RDMA_MEM_READ:
542 comp_opcode = IBV_WC_RDMA_READ;
543 break;
544 case FIO_RDMA_CHA_SEND:
545 comp_opcode = IBV_WC_SEND;
546 break;
547 case FIO_RDMA_CHA_RECV:
548 comp_opcode = IBV_WC_RECV;
549 break;
550 default:
551 log_err("fio: unknown rdma protocol - %d\n", rd->rdma_protocol);
552 break;
553 }
554
555 if (rd->cq_event_num > 0) { /* previous left */
556 rd->cq_event_num--;
557 return 0;
558 }
559
560again:
561 if (ibv_get_cq_event(rd->channel, &ev_cq, &ev_ctx) != 0) {
562 log_err("fio: Failed to get cq event!\n");
563 return -1;
564 }
565 if (ev_cq != rd->cq) {
566 log_err("fio: Unknown CQ!\n");
567 return -1;
568 }
569 if (ibv_req_notify_cq(rd->cq, 0) != 0) {
570 log_err("fio: Failed to set notify!\n");
571 return -1;
572 }
573
574 ret = cq_event_handler(td, comp_opcode);
575 if (ret < 1)
576 goto again;
577
578 ibv_ack_cq_events(rd->cq, ret);
579
580 r += ret;
581 if (r < min)
582 goto again;
583
584 rd->cq_event_num -= r;
585
586 return r;
587}
588
589static int fio_rdmaio_send(struct thread_data *td, struct io_u **io_us,
590 unsigned int nr)
591{
592 struct rdmaio_data *rd = td->io_ops->data;
593 struct ibv_send_wr *bad_wr;
e07f72d3 594#if 0
21b8aee8 595 enum ibv_wc_opcode comp_opcode;
596 comp_opcode = IBV_WC_RDMA_WRITE;
e07f72d3 597#endif
7d7803fa
YR
598 int i;
599 long index;
21b8aee8 600 struct rdma_io_u_data *r_io_u_d;
601
602 r_io_u_d = NULL;
603
604 for (i = 0; i < nr; i++) {
605 /* RDMA_WRITE or RDMA_READ */
606 switch (rd->rdma_protocol) {
607 case FIO_RDMA_MEM_WRITE:
608 /* compose work request */
609 r_io_u_d = io_us[i]->engine_data;
222757cc 610 index = __rand(&rd->rand_state) % rd->rmt_nr;
21b8aee8 611 r_io_u_d->sq_wr.opcode = IBV_WR_RDMA_WRITE;
612 r_io_u_d->sq_wr.wr.rdma.rkey = rd->rmt_us[index].rkey;
b6cf38f0
YR
613 r_io_u_d->sq_wr.wr.rdma.remote_addr = \
614 rd->rmt_us[index].buf;
21b8aee8 615 r_io_u_d->sq_wr.sg_list->length = io_us[i]->buflen;
616 break;
617 case FIO_RDMA_MEM_READ:
618 /* compose work request */
619 r_io_u_d = io_us[i]->engine_data;
222757cc 620 index = __rand(&rd->rand_state) % rd->rmt_nr;
21b8aee8 621 r_io_u_d->sq_wr.opcode = IBV_WR_RDMA_READ;
622 r_io_u_d->sq_wr.wr.rdma.rkey = rd->rmt_us[index].rkey;
b6cf38f0
YR
623 r_io_u_d->sq_wr.wr.rdma.remote_addr = \
624 rd->rmt_us[index].buf;
21b8aee8 625 r_io_u_d->sq_wr.sg_list->length = io_us[i]->buflen;
626 break;
627 case FIO_RDMA_CHA_SEND:
628 r_io_u_d = io_us[i]->engine_data;
629 r_io_u_d->sq_wr.opcode = IBV_WR_SEND;
630 r_io_u_d->sq_wr.send_flags = IBV_SEND_SIGNALED;
631 break;
632 default:
633 log_err("fio: unknown rdma protocol - %d\n",
634 rd->rdma_protocol);
635 break;
636 }
637
638 if (ibv_post_send(rd->qp, &r_io_u_d->sq_wr, &bad_wr) != 0) {
639 log_err("fio: ibv_post_send fail\n");
640 return -1;
641 }
642
643 dprint_io_u(io_us[i], "fio_rdmaio_send");
644 }
645
646 /* wait for completion
647 rdma_poll_wait(td, comp_opcode); */
648
649 return i;
650}
651
652static int fio_rdmaio_recv(struct thread_data *td, struct io_u **io_us,
653 unsigned int nr)
654{
655 struct rdmaio_data *rd = td->io_ops->data;
656 struct ibv_recv_wr *bad_wr;
657 struct rdma_io_u_data *r_io_u_d;
658 int i;
659
660 i = 0;
661 if (rd->rdma_protocol == FIO_RDMA_CHA_RECV) {
662 /* post io_u into recv queue */
663 for (i = 0; i < nr; i++) {
664 r_io_u_d = io_us[i]->engine_data;
665 if (ibv_post_recv(rd->qp, &r_io_u_d->rq_wr, &bad_wr) !=
666 0) {
667 log_err("fio: ibv_post_recv fail\n");
668 return 1;
669 }
670 }
671 } else if ((rd->rdma_protocol == FIO_RDMA_MEM_READ)
672 || (rd->rdma_protocol == FIO_RDMA_MEM_WRITE)) {
673 /* re-post the rq_wr */
674 if (ibv_post_recv(rd->qp, &rd->rq_wr, &bad_wr) != 0) {
675 log_err("fio: ibv_post_recv fail\n");
676 return 1;
677 }
678
679 rdma_poll_wait(td, IBV_WC_RECV);
680
681 dprint(FD_IO, "fio: recv FINISH message\n");
a05d62b2
YR
682 td->done = 1;
683 return 0;
21b8aee8 684 }
685
686 return i;
687}
688
689static int fio_rdmaio_queue(struct thread_data *td, struct io_u *io_u)
690{
691 struct rdmaio_data *rd = td->io_ops->data;
692
693 fio_ro_check(td, io_u);
694
695 if (rd->io_u_queued_nr == (int)td->o.iodepth)
696 return FIO_Q_BUSY;
697
698 rd->io_us_queued[rd->io_u_queued_nr] = io_u;
699 rd->io_u_queued_nr++;
700
701 dprint_io_u(io_u, "fio_rdmaio_queue");
702
703 return FIO_Q_QUEUED;
704}
705
706static void fio_rdmaio_queued(struct thread_data *td, struct io_u **io_us,
707 unsigned int nr)
708{
709 struct rdmaio_data *rd = td->io_ops->data;
710 struct timeval now;
711 unsigned int i;
712
713 if (!fio_fill_issue_time(td))
714 return;
715
716 fio_gettime(&now, NULL);
717
718 for (i = 0; i < nr; i++) {
719 struct io_u *io_u = io_us[i];
720
721 /* queued -> flight */
722 rd->io_us_flight[rd->io_u_flight_nr] = io_u;
723 rd->io_u_flight_nr++;
724
725 memcpy(&io_u->issue_time, &now, sizeof(now));
726 io_u_queued(td, io_u);
727 }
728}
729
730static int fio_rdmaio_commit(struct thread_data *td)
731{
732 struct rdmaio_data *rd = td->io_ops->data;
733 struct io_u **io_us;
734 int ret;
735
736 if (!rd->io_us_queued)
737 return 0;
738
739 io_us = rd->io_us_queued;
740 do {
741 /* RDMA_WRITE or RDMA_READ */
222757cc 742 if (rd->is_client)
21b8aee8 743 ret = fio_rdmaio_send(td, io_us, rd->io_u_queued_nr);
222757cc 744 else if (!rd->is_client)
21b8aee8 745 ret = fio_rdmaio_recv(td, io_us, rd->io_u_queued_nr);
222757cc 746 else
21b8aee8 747 ret = 0; /* must be a SYNC */
748
749 if (ret > 0) {
750 fio_rdmaio_queued(td, io_us, ret);
751 io_u_mark_submit(td, ret);
752 rd->io_u_queued_nr -= ret;
753 io_us += ret;
754 ret = 0;
755 } else
756 break;
757 } while (rd->io_u_queued_nr);
758
759 return ret;
760}
761
762static int fio_rdmaio_connect(struct thread_data *td, struct fio_file *f)
763{
764 struct rdmaio_data *rd = td->io_ops->data;
765 struct rdma_conn_param conn_param;
766 struct ibv_send_wr *bad_wr;
767
222757cc 768 memset(&conn_param, 0, sizeof(conn_param));
21b8aee8 769 conn_param.responder_resources = 1;
770 conn_param.initiator_depth = 1;
771 conn_param.retry_count = 10;
772
773 if (rdma_connect(rd->cm_id, &conn_param) != 0) {
774 log_err("fio: rdma_connect fail\n");
775 return 1;
776 }
777
778 if (get_next_channel_event
779 (td, rd->cm_channel, RDMA_CM_EVENT_ESTABLISHED) != 0) {
780 log_err("fio: wait for RDMA_CM_EVENT_ESTABLISHED\n");
781 return 1;
782 }
783
784 /* send task request */
785 rd->send_buf.mode = htonl(rd->rdma_protocol);
786 rd->send_buf.nr = htonl(td->o.iodepth);
787
788 if (ibv_post_send(rd->qp, &rd->sq_wr, &bad_wr) != 0) {
789 log_err("fio: ibv_post_send fail");
790 return 1;
791 }
792
793 rdma_poll_wait(td, IBV_WC_SEND);
794
795 /* wait for remote MR info from server side */
796 rdma_poll_wait(td, IBV_WC_RECV);
797
7d7803fa
YR
798 /* In SEND/RECV test, it's a good practice to setup the iodepth of
799 * of the RECV side deeper than that of the SEND side to
800 * avoid RNR (receiver not ready) error. The
222757cc 801 * SEND side may send so many unsolicited message before
7d7803fa
YR
802 * RECV side commits sufficient recv buffers into recv queue.
803 * This may lead to RNR error. Here, SEND side pauses for a while
804 * during which RECV side commits sufficient recv buffers.
805 */
806 usleep(500000);
807
21b8aee8 808 return 0;
809}
810
811static int fio_rdmaio_accept(struct thread_data *td, struct fio_file *f)
812{
813 struct rdmaio_data *rd = td->io_ops->data;
814 struct rdma_conn_param conn_param;
815 struct ibv_send_wr *bad_wr;
816
817 /* rdma_accept() - then wait for accept success */
222757cc 818 memset(&conn_param, 0, sizeof(conn_param));
21b8aee8 819 conn_param.responder_resources = 1;
820 conn_param.initiator_depth = 1;
821
822 if (rdma_accept(rd->child_cm_id, &conn_param) != 0) {
823 log_err("fio: rdma_accept\n");
824 return 1;
825 }
826
827 if (get_next_channel_event
828 (td, rd->cm_channel, RDMA_CM_EVENT_ESTABLISHED) != 0) {
829 log_err("fio: wait for RDMA_CM_EVENT_ESTABLISHED\n");
830 return 1;
831 }
832
833 /* wait for request */
834 rdma_poll_wait(td, IBV_WC_RECV);
835
836 if (ibv_post_send(rd->qp, &rd->sq_wr, &bad_wr) != 0) {
837 log_err("fio: ibv_post_send fail");
838 return 1;
839 }
840
841 rdma_poll_wait(td, IBV_WC_SEND);
842
843 return 0;
844}
845
846static int fio_rdmaio_open_file(struct thread_data *td, struct fio_file *f)
847{
848 if (td_read(td))
849 return fio_rdmaio_accept(td, f);
850 else
851 return fio_rdmaio_connect(td, f);
852}
853
854static int fio_rdmaio_close_file(struct thread_data *td, struct fio_file *f)
855{
856 struct rdmaio_data *rd = td->io_ops->data;
857 struct ibv_send_wr *bad_wr;
858
859 /* unregister rdma buffer */
860
861 /*
862 * Client sends notification to the server side
863 */
864 /* refer to: http://linux.die.net/man/7/rdma_cm */
865 if ((rd->is_client == 1) && ((rd->rdma_protocol == FIO_RDMA_MEM_WRITE)
866 || (rd->rdma_protocol ==
867 FIO_RDMA_MEM_READ))) {
868 if (ibv_post_send(rd->qp, &rd->sq_wr, &bad_wr) != 0) {
869 log_err("fio: ibv_post_send fail");
870 return 1;
871 }
872
873 dprint(FD_IO, "fio: close infomation sent success\n");
874 rdma_poll_wait(td, IBV_WC_SEND);
875 }
876
877 if (rd->is_client == 1)
878 rdma_disconnect(rd->cm_id);
879 else {
880 rdma_disconnect(rd->child_cm_id);
222757cc
JA
881#if 0
882 rdma_disconnect(rd->cm_id);
883#endif
21b8aee8 884 }
885
222757cc
JA
886#if 0
887 if (get_next_channel_event(td, rd->cm_channel, RDMA_CM_EVENT_DISCONNECTED) != 0) {
888 log_err("fio: wait for RDMA_CM_EVENT_DISCONNECTED\n");
889 return 1;
890 }
891#endif
21b8aee8 892
21b8aee8 893 ibv_destroy_cq(rd->cq);
7d7803fa 894 ibv_destroy_qp(rd->qp);
21b8aee8 895
896 if (rd->is_client == 1)
897 rdma_destroy_id(rd->cm_id);
898 else {
899 rdma_destroy_id(rd->child_cm_id);
900 rdma_destroy_id(rd->cm_id);
901 }
902
903 ibv_destroy_comp_channel(rd->channel);
904 ibv_dealloc_pd(rd->pd);
905
906 return 0;
907}
908
909static int fio_rdmaio_setup_connect(struct thread_data *td, const char *host,
910 unsigned short port)
911{
912 struct rdmaio_data *rd = td->io_ops->data;
913 struct ibv_recv_wr *bad_wr;
222757cc 914 int err;
21b8aee8 915
916 rd->addr.sin_family = AF_INET;
917 rd->addr.sin_port = htons(port);
918
919 if (inet_aton(host, &rd->addr.sin_addr) != 1) {
920 struct hostent *hent;
921
922 hent = gethostbyname(host);
923 if (!hent) {
924 td_verror(td, errno, "gethostbyname");
925 return 1;
926 }
927
928 memcpy(&rd->addr.sin_addr, hent->h_addr, 4);
929 }
930
931 /* resolve route */
222757cc
JA
932 err = rdma_resolve_addr(rd->cm_id, NULL, (struct sockaddr *)&rd->addr, 2000);
933 if (err != 0) {
934 log_err("fio: rdma_resolve_addr: %d\n", err);
21b8aee8 935 return 1;
936 }
937
222757cc
JA
938 err = get_next_channel_event(td, rd->cm_channel, RDMA_CM_EVENT_ADDR_RESOLVED);
939 if (err != 0) {
940 log_err("fio: get_next_channel_event: %d\n", err);
21b8aee8 941 return 1;
942 }
943
944 /* resolve route */
222757cc
JA
945 err = rdma_resolve_route(rd->cm_id, 2000);
946 if (err != 0) {
947 log_err("fio: rdma_resolve_route: %d\n", err);
21b8aee8 948 return 1;
949 }
950
222757cc
JA
951 err = get_next_channel_event(td, rd->cm_channel, RDMA_CM_EVENT_ROUTE_RESOLVED);
952 if (err != 0) {
953 log_err("fio: get_next_channel_event: %d\n", err);
21b8aee8 954 return 1;
955 }
956
957 /* create qp and buffer */
958 if (fio_rdmaio_setup_qp(td) != 0)
959 return 1;
960
961 if (fio_rdmaio_setup_control_msg_buffers(td) != 0)
962 return 1;
963
964 /* post recv buf */
222757cc
JA
965 err = ibv_post_recv(rd->qp, &rd->rq_wr, &bad_wr);
966 if (err != 0) {
967 log_err("fio: ibv_post_recv fail: %d\n", err);
21b8aee8 968 return 1;
969 }
970
971 return 0;
972}
973
974static int fio_rdmaio_setup_listen(struct thread_data *td, short port)
975{
976 struct rdmaio_data *rd = td->io_ops->data;
977 struct ibv_recv_wr *bad_wr;
978
979 rd->addr.sin_family = AF_INET;
980 rd->addr.sin_addr.s_addr = htonl(INADDR_ANY);
981 rd->addr.sin_port = htons(port);
982
983 /* rdma_listen */
984 if (rdma_bind_addr(rd->cm_id, (struct sockaddr *)&rd->addr) != 0) {
985 log_err("fio: rdma_bind_addr fail\n");
986 return 1;
987 }
988
989 if (rdma_listen(rd->cm_id, 3) != 0) {
990 log_err("fio: rdma_listen fail\n");
991 return 1;
992 }
993
994 /* wait for CONNECT_REQUEST */
995 if (get_next_channel_event
996 (td, rd->cm_channel, RDMA_CM_EVENT_CONNECT_REQUEST) != 0) {
997 log_err("fio: wait for RDMA_CM_EVENT_CONNECT_REQUEST\n");
998 return 1;
999 }
1000
1001 if (fio_rdmaio_setup_qp(td) != 0)
1002 return 1;
1003
1004 if (fio_rdmaio_setup_control_msg_buffers(td) != 0)
1005 return 1;
1006
1007 /* post recv buf */
1008 if (ibv_post_recv(rd->qp, &rd->rq_wr, &bad_wr) != 0) {
1009 log_err("fio: ibv_post_recv fail\n");
1010 return 1;
1011 }
1012
1013 return 0;
1014}
1015
1016static int fio_rdmaio_init(struct thread_data *td)
1017{
1018 struct rdmaio_data *rd = td->io_ops->data;
222757cc
JA
1019 struct flist_head *entry;
1020 unsigned int max_bs;
21b8aee8 1021 unsigned int port;
1022 char host[64], buf[128];
1023 char *sep, *portp, *modep;
222757cc 1024 int ret, i = 0;
21b8aee8 1025 struct rlimit rl;
1026
1027 if (td_rw(td)) {
1028 log_err("fio: rdma connections must be read OR write\n");
1029 return 1;
1030 }
1031 if (td_random(td)) {
1032 log_err("fio: RDMA network IO can't be random\n");
1033 return 1;
1034 }
1035
1036 /* check RLIMIT_MEMLOCK */
1037 if (getrlimit(RLIMIT_MEMLOCK, &rl) != 0) {
1038 log_err("fio: getrlimit fail: %d(%s)\n",
1039 errno, strerror(errno));
1040 return 1;
1041 }
1042
1043 /* soft limit */
1044 if ((rl.rlim_cur != RLIM_INFINITY)
1045 && (rl.rlim_cur < td->orig_buffer_size)) {
e07f72d3
BVA
1046 log_err("fio: soft RLIMIT_MEMLOCK is: %" PRId64 "\n",
1047 rl.rlim_cur);
1048 log_err("fio: total block size is: %zd\n",
21b8aee8 1049 td->orig_buffer_size);
1050 /* try to set larger RLIMIT_MEMLOCK */
1051 rl.rlim_cur = rl.rlim_max;
1052 if (setrlimit(RLIMIT_MEMLOCK, &rl) != 0) {
1053 log_err("fio: setrlimit fail: %d(%s)\n",
1054 errno, strerror(errno));
1055 log_err("fio: you may try enlarge MEMLOCK by root\n");
1056 log_err("# ulimit -l unlimited\n");
1057 return 1;
1058 }
1059 }
1060
1061 strcpy(buf, td->o.filename);
1062
1063 sep = strchr(buf, '/');
1064 if (!sep)
1065 goto bad_host;
1066
1067 *sep = '\0';
1068 sep++;
1069 strcpy(host, buf);
1070 if (!strlen(host))
1071 goto bad_host;
1072
1073 modep = NULL;
1074 portp = sep;
1075 sep = strchr(portp, '/');
1076 if (sep) {
1077 *sep = '\0';
1078 modep = sep + 1;
1079 }
1080
1081 port = strtol(portp, NULL, 10);
1082 if (!port || port > 65535)
1083 goto bad_host;
1084
1085 if (modep) {
1086 if (!strncmp("rdma_write", modep, strlen(modep)) ||
1087 !strncmp("RDMA_WRITE", modep, strlen(modep)))
1088 rd->rdma_protocol = FIO_RDMA_MEM_WRITE;
1089 else if (!strncmp("rdma_read", modep, strlen(modep)) ||
1090 !strncmp("RDMA_READ", modep, strlen(modep)))
1091 rd->rdma_protocol = FIO_RDMA_MEM_READ;
1092 else if (!strncmp("send", modep, strlen(modep)) ||
1093 !strncmp("SEND", modep, strlen(modep)))
1094 rd->rdma_protocol = FIO_RDMA_CHA_SEND;
1095 else
1096 goto bad_host;
1097 } else
1098 rd->rdma_protocol = FIO_RDMA_MEM_WRITE;
1099
1100 rd->cq_event_num = 0;
1101
1102 rd->cm_channel = rdma_create_event_channel();
1103 if (!rd->cm_channel) {
1104 log_err("fio: rdma_create_event_channel fail\n");
1105 return 1;
1106 }
1107
1108 ret = rdma_create_id(rd->cm_channel, &rd->cm_id, rd, RDMA_PS_TCP);
1109 if (ret) {
1110 log_err("fio: rdma_create_id fail\n");
1111 return 1;
1112 }
1113
1114 if ((rd->rdma_protocol == FIO_RDMA_MEM_WRITE) ||
1115 (rd->rdma_protocol == FIO_RDMA_MEM_READ)) {
1116 rd->rmt_us =
76cc5224 1117 malloc(FIO_RDMA_MAX_IO_DEPTH * sizeof(struct remote_u));
21b8aee8 1118 memset(rd->rmt_us, 0,
76cc5224 1119 FIO_RDMA_MAX_IO_DEPTH * sizeof(struct remote_u));
21b8aee8 1120 rd->rmt_nr = 0;
1121 }
1122
1123 rd->io_us_queued = malloc(td->o.iodepth * sizeof(struct io_u *));
1124 memset(rd->io_us_queued, 0, td->o.iodepth * sizeof(struct io_u *));
1125 rd->io_u_queued_nr = 0;
1126
1127 rd->io_us_flight = malloc(td->o.iodepth * sizeof(struct io_u *));
1128 memset(rd->io_us_flight, 0, td->o.iodepth * sizeof(struct io_u *));
1129 rd->io_u_flight_nr = 0;
1130
1131 rd->io_us_completed = malloc(td->o.iodepth * sizeof(struct io_u *));
1132 memset(rd->io_us_completed, 0, td->o.iodepth * sizeof(struct io_u *));
1133 rd->io_u_completed_nr = 0;
1134
1135 if (td_read(td)) { /* READ as the server */
1136 rd->is_client = 0;
1137 /* server rd->rdma_buf_len will be setup after got request */
1138 ret = fio_rdmaio_setup_listen(td, port);
1139 } else { /* WRITE as the client */
1140 rd->is_client = 1;
1141 ret = fio_rdmaio_setup_connect(td, host, port);
1142 }
1143
21b8aee8 1144 max_bs = max(td->o.max_bs[DDIR_READ], td->o.max_bs[DDIR_WRITE]);
1145 /* register each io_u in the free list */
21b8aee8 1146 flist_for_each(entry, &td->io_u_freelist) {
1147 struct io_u *io_u = flist_entry(entry, struct io_u, list);
1148
1149 io_u->engine_data = malloc(sizeof(struct rdma_io_u_data));
1150 memset(io_u->engine_data, 0, sizeof(struct rdma_io_u_data));
1151 ((struct rdma_io_u_data *)io_u->engine_data)->wr_id = i;
1152
1153 io_u->mr = ibv_reg_mr(rd->pd, io_u->buf, max_bs,
1154 IBV_ACCESS_LOCAL_WRITE |
1155 IBV_ACCESS_REMOTE_READ |
1156 IBV_ACCESS_REMOTE_WRITE);
1157 if (io_u->mr == NULL) {
1158 log_err("fio: ibv_reg_mr io_u failed\n");
1159 return 1;
1160 }
1161
1162 rd->send_buf.rmt_us[i].buf =
1163 htonll((uint64_t) (unsigned long)io_u->buf);
1164 rd->send_buf.rmt_us[i].rkey = htonl(io_u->mr->rkey);
1165 rd->send_buf.rmt_us[i].size = htonl(max_bs);
1166
222757cc
JA
1167#if 0
1168 log_info("fio: Send rkey %x addr %" PRIx64 " len %d to client\n", io_u->mr->rkey, io_u->buf, max_bs); */
1169#endif
21b8aee8 1170 i++;
1171 }
1172
1173 rd->send_buf.nr = htonl(i);
1174
1175 return ret;
1176bad_host:
1177 log_err("fio: bad rdma host/port/protocol: %s\n", td->o.filename);
1178 return 1;
1179}
1180
1181static void fio_rdmaio_cleanup(struct thread_data *td)
1182{
1183 struct rdmaio_data *rd = td->io_ops->data;
1184
222757cc 1185 if (rd)
21b8aee8 1186 free(rd);
21b8aee8 1187}
1188
1189static int fio_rdmaio_setup(struct thread_data *td)
1190{
1191 struct rdmaio_data *rd;
1192
1193 if (!td->io_ops->data) {
222757cc 1194 rd = malloc(sizeof(*rd));
21b8aee8 1195
1196 memset(rd, 0, sizeof(*rd));
ee88d056 1197 init_rand_seed(&rd->rand_state, (unsigned int) GOLDEN_RATIO_PRIME);
21b8aee8 1198 td->io_ops->data = rd;
1199 }
1200
1201 return 0;
1202}
1203
1204static struct ioengine_ops ioengine_rw = {
b6cf38f0
YR
1205 .name = "rdma",
1206 .version = FIO_IOOPS_VERSION,
1207 .setup = fio_rdmaio_setup,
1208 .init = fio_rdmaio_init,
1209 .prep = fio_rdmaio_prep,
1210 .queue = fio_rdmaio_queue,
1211 .commit = fio_rdmaio_commit,
1212 .getevents = fio_rdmaio_getevents,
1213 .event = fio_rdmaio_event,
1214 .cleanup = fio_rdmaio_cleanup,
1215 .open_file = fio_rdmaio_open_file,
1216 .close_file = fio_rdmaio_close_file,
1217 .flags = FIO_DISKLESSIO | FIO_UNIDIR | FIO_PIPEIO,
21b8aee8 1218};
1219
21b8aee8 1220static void fio_init fio_rdmaio_register(void)
1221{
1222 register_ioengine(&ioengine_rw);
1223}
1224
1225static void fio_exit fio_rdmaio_unregister(void)
1226{
1227 unregister_ioengine(&ioengine_rw);
1228}