Merge branch 'gpspm-add-optional-use-rpma_conn_completion_wait-function' of https...
[fio.git] / engines / librpma_fio.h
CommitLineData
e4c4625f
JM
1/*
2 * librpma_fio: librpma_apm and librpma_gpspm engines' common header.
3 *
4 * Copyright 2021, Intel Corporation
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License,
8 * version 2 as published by the Free Software Foundation..
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 */
15
16#ifndef LIBRPMA_FIO_H
17#define LIBRPMA_FIO_H 1
18
19#include "../fio.h"
20#include "../optgroup.h"
21
22#include <librpma.h>
23
24/* servers' and clients' common */
25
26#define librpma_td_verror(td, err, func) \
27 td_vmsg((td), (err), rpma_err_2str(err), (func))
28
29/* ceil(a / b) = (a + b - 1) / b */
30#define LIBRPMA_FIO_CEIL(a, b) (((a) + (b) - 1) / (b))
31
32/* common option structure for server and client */
33struct librpma_fio_options_values {
34 /*
35 * FIO considers .off1 == 0 absent so the first meaningful field has to
36 * have padding ahead of it.
37 */
38 void *pad;
39 char *server_ip;
40 /* base server listening port */
41 char *port;
42 /* Direct Write to PMem is possible */
43 unsigned int direct_write_to_pmem;
6a229978
OS
44 /* Set to 0 to wait for completion instead of busy-wait polling completion. */
45 unsigned int busy_wait_polling;
e4c4625f
JM
46};
47
48extern struct fio_option librpma_fio_options[];
49
50/*
51 * Limited by the maximum length of the private data
52 * for rdma_connect() in case of RDMA_PS_TCP (28 bytes).
53 */
54#define LIBRPMA_FIO_DESCRIPTOR_MAX_SIZE 24
55
56struct librpma_fio_workspace {
57 uint16_t max_msg_num; /* # of RQ slots */
58 uint8_t direct_write_to_pmem; /* Direct Write to PMem is possible */
59 uint8_t mr_desc_size; /* size of mr_desc in descriptor[] */
60 /* buffer containing mr_desc */
61 char descriptor[LIBRPMA_FIO_DESCRIPTOR_MAX_SIZE];
62};
63
64#define LIBRPMA_FIO_PORT_STR_LEN_MAX 12
65
66int librpma_fio_td_port(const char *port_base_str, struct thread_data *td,
67 char *port_out);
68
69struct librpma_fio_mem {
70 /* memory buffer */
71 char *mem_ptr;
72
73 /* size of the mapped persistent memory */
74 size_t size_mmap;
75};
76
77char *librpma_fio_allocate_dram(struct thread_data *td, size_t size,
78 struct librpma_fio_mem *mem);
79
80char *librpma_fio_allocate_pmem(struct thread_data *td, const char *filename,
81 size_t size, struct librpma_fio_mem *mem);
82
83void librpma_fio_free(struct librpma_fio_mem *mem);
84
85/* clients' common */
86
87typedef int (*librpma_fio_flush_t)(struct thread_data *td,
88 struct io_u *first_io_u, struct io_u *last_io_u,
89 unsigned long long int len);
90
91/*
92 * RETURN VALUE
93 * - ( 1) - on success
94 * - ( 0) - skip
95 * - (-1) - on error
96 */
97typedef int (*librpma_fio_get_io_u_index_t)(struct rpma_completion *cmpl,
98 unsigned int *io_u_index);
99
100struct librpma_fio_client_data {
101 struct rpma_peer *peer;
102 struct rpma_conn *conn;
103
104 /* aligned td->orig_buffer */
105 char *orig_buffer_aligned;
106
107 /* ious's base address memory registration (cd->orig_buffer_aligned) */
108 struct rpma_mr_local *orig_mr;
109
110 struct librpma_fio_workspace *ws;
111
112 /* a server's memory representation */
113 struct rpma_mr_remote *server_mr;
114 enum rpma_flush_type server_mr_flush_type;
115
116 /* remote workspace description */
117 size_t ws_size;
118
119 /* in-memory queues */
120 struct io_u **io_us_queued;
121 int io_u_queued_nr;
122 struct io_u **io_us_flight;
123 int io_u_flight_nr;
124 struct io_u **io_us_completed;
125 int io_u_completed_nr;
126
127 /* SQ control. Note: all of them have to be kept in sync. */
128 uint32_t op_send_posted;
129 uint32_t op_send_completed;
130 uint32_t op_recv_completed;
131
132 librpma_fio_flush_t flush;
133 librpma_fio_get_io_u_index_t get_io_u_index;
134
135 /* engine-specific client data */
136 void *client_data;
137};
138
139int librpma_fio_client_init(struct thread_data *td,
140 struct rpma_conn_cfg *cfg);
141void librpma_fio_client_cleanup(struct thread_data *td);
142
143int librpma_fio_file_nop(struct thread_data *td, struct fio_file *f);
144int librpma_fio_client_get_file_size(struct thread_data *td,
145 struct fio_file *f);
146
147int librpma_fio_client_post_init(struct thread_data *td);
148
149enum fio_q_status librpma_fio_client_queue(struct thread_data *td,
150 struct io_u *io_u);
151
152int librpma_fio_client_commit(struct thread_data *td);
153
154int librpma_fio_client_getevents(struct thread_data *td, unsigned int min,
155 unsigned int max, const struct timespec *t);
156
157struct io_u *librpma_fio_client_event(struct thread_data *td, int event);
158
159char *librpma_fio_client_errdetails(struct io_u *io_u);
160
161static inline int librpma_fio_client_io_read(struct thread_data *td,
162 struct io_u *io_u, int flags)
163{
164 struct librpma_fio_client_data *ccd = td->io_ops_data;
165 size_t dst_offset = (char *)(io_u->xfer_buf) - ccd->orig_buffer_aligned;
166 size_t src_offset = io_u->offset;
167 int ret;
168
169 if ((ret = rpma_read(ccd->conn, ccd->orig_mr, dst_offset,
170 ccd->server_mr, src_offset, io_u->xfer_buflen,
171 flags, (void *)(uintptr_t)io_u->index))) {
172 librpma_td_verror(td, ret, "rpma_read");
173 return -1;
174 }
175
176 return 0;
177}
178
179static inline int librpma_fio_client_io_write(struct thread_data *td,
180 struct io_u *io_u)
181{
182 struct librpma_fio_client_data *ccd = td->io_ops_data;
183 size_t src_offset = (char *)(io_u->xfer_buf) - ccd->orig_buffer_aligned;
184 size_t dst_offset = io_u->offset;
185 int ret;
186
187 if ((ret = rpma_write(ccd->conn, ccd->server_mr, dst_offset,
188 ccd->orig_mr, src_offset, io_u->xfer_buflen,
189 RPMA_F_COMPLETION_ON_ERROR,
190 (void *)(uintptr_t)io_u->index))) {
191 librpma_td_verror(td, ret, "rpma_write");
192 return -1;
193 }
194
195 return 0;
196}
197
198static inline int librpma_fio_client_io_complete_all_sends(
199 struct thread_data *td)
200{
201 struct librpma_fio_client_data *ccd = td->io_ops_data;
202 struct rpma_completion cmpl;
203 int ret;
204
205 while (ccd->op_send_posted != ccd->op_send_completed) {
206 /* get a completion */
207 ret = rpma_conn_completion_get(ccd->conn, &cmpl);
208 if (ret == RPMA_E_NO_COMPLETION) {
209 /* lack of completion is not an error */
210 continue;
211 } else if (ret != 0) {
212 /* an error occurred */
213 librpma_td_verror(td, ret, "rpma_conn_completion_get");
214 break;
215 }
216
217 if (cmpl.op_status != IBV_WC_SUCCESS)
218 return -1;
219
220 if (cmpl.op == RPMA_OP_SEND)
221 ++ccd->op_send_completed;
222 else {
223 log_err(
224 "A completion other than RPMA_OP_SEND got during cleaning up the CQ from SENDs\n");
225 return -1;
226 }
227 }
228
229 /*
230 * All posted SENDs are completed and RECVs for them (responses) are
231 * completed. This is the initial situation so the counters are reset.
232 */
233 if (ccd->op_send_posted == ccd->op_send_completed &&
234 ccd->op_send_completed == ccd->op_recv_completed) {
235 ccd->op_send_posted = 0;
236 ccd->op_send_completed = 0;
237 ccd->op_recv_completed = 0;
238 }
239
240 return 0;
241}
242
243/* servers' common */
244
245typedef int (*librpma_fio_prepare_connection_t)(
246 struct thread_data *td,
247 struct rpma_conn_req *conn_req);
248
249struct librpma_fio_server_data {
250 struct rpma_peer *peer;
251
252 /* resources of an incoming connection */
253 struct rpma_conn *conn;
254
255 char *ws_ptr;
256 struct rpma_mr_local *ws_mr;
257 struct librpma_fio_mem mem;
258
259 /* engine-specific server data */
260 void *server_data;
261
262 librpma_fio_prepare_connection_t prepare_connection;
263};
264
265int librpma_fio_server_init(struct thread_data *td);
266
267void librpma_fio_server_cleanup(struct thread_data *td);
268
269int librpma_fio_server_open_file(struct thread_data *td,
270 struct fio_file *f, struct rpma_conn_cfg *cfg);
271
272int librpma_fio_server_close_file(struct thread_data *td,
273 struct fio_file *f);
274
275#endif /* LIBRPMA_FIO_H */