Commit | Line | Data |
---|---|---|
e4c4625f JM |
1 | /* |
2 | * librpma_fio: librpma_apm and librpma_gpspm engines' common header. | |
3 | * | |
8fabefc1 | 4 | * Copyright 2021-2022, Intel Corporation |
e4c4625f JM |
5 | * |
6 | * This program is free software; you can redistribute it and/or | |
7 | * modify it under the terms of the GNU General Public License, | |
8 | * version 2 as published by the Free Software Foundation.. | |
9 | * | |
10 | * This program is distributed in the hope that it will be useful, | |
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
13 | * GNU General Public License for more details. | |
14 | */ | |
15 | ||
16 | #ifndef LIBRPMA_FIO_H | |
17 | #define LIBRPMA_FIO_H 1 | |
18 | ||
19 | #include "../fio.h" | |
20 | #include "../optgroup.h" | |
21 | ||
22 | #include <librpma.h> | |
23 | ||
24 | /* servers' and clients' common */ | |
25 | ||
26 | #define librpma_td_verror(td, err, func) \ | |
27 | td_vmsg((td), (err), rpma_err_2str(err), (func)) | |
28 | ||
29 | /* ceil(a / b) = (a + b - 1) / b */ | |
30 | #define LIBRPMA_FIO_CEIL(a, b) (((a) + (b) - 1) / (b)) | |
31 | ||
32 | /* common option structure for server and client */ | |
33 | struct librpma_fio_options_values { | |
34 | /* | |
35 | * FIO considers .off1 == 0 absent so the first meaningful field has to | |
36 | * have padding ahead of it. | |
37 | */ | |
38 | void *pad; | |
39 | char *server_ip; | |
40 | /* base server listening port */ | |
41 | char *port; | |
42 | /* Direct Write to PMem is possible */ | |
43 | unsigned int direct_write_to_pmem; | |
6a229978 OS |
44 | /* Set to 0 to wait for completion instead of busy-wait polling completion. */ |
45 | unsigned int busy_wait_polling; | |
e4c4625f JM |
46 | }; |
47 | ||
48 | extern struct fio_option librpma_fio_options[]; | |
49 | ||
50 | /* | |
51 | * Limited by the maximum length of the private data | |
52 | * for rdma_connect() in case of RDMA_PS_TCP (28 bytes). | |
53 | */ | |
54 | #define LIBRPMA_FIO_DESCRIPTOR_MAX_SIZE 24 | |
55 | ||
56 | struct librpma_fio_workspace { | |
57 | uint16_t max_msg_num; /* # of RQ slots */ | |
58 | uint8_t direct_write_to_pmem; /* Direct Write to PMem is possible */ | |
59 | uint8_t mr_desc_size; /* size of mr_desc in descriptor[] */ | |
60 | /* buffer containing mr_desc */ | |
61 | char descriptor[LIBRPMA_FIO_DESCRIPTOR_MAX_SIZE]; | |
62 | }; | |
63 | ||
64 | #define LIBRPMA_FIO_PORT_STR_LEN_MAX 12 | |
65 | ||
66 | int librpma_fio_td_port(const char *port_base_str, struct thread_data *td, | |
67 | char *port_out); | |
68 | ||
69 | struct librpma_fio_mem { | |
70 | /* memory buffer */ | |
71 | char *mem_ptr; | |
72 | ||
73 | /* size of the mapped persistent memory */ | |
74 | size_t size_mmap; | |
8fabefc1 KS |
75 | |
76 | #ifdef CONFIG_LIBPMEM2_INSTALLED | |
77 | /* libpmem2 structure used for mapping PMem */ | |
78 | struct pmem2_map *map; | |
79 | #endif | |
e4c4625f JM |
80 | }; |
81 | ||
82 | char *librpma_fio_allocate_dram(struct thread_data *td, size_t size, | |
83 | struct librpma_fio_mem *mem); | |
84 | ||
a6becc33 | 85 | char *librpma_fio_allocate_pmem(struct thread_data *td, struct fio_file *f, |
e4c4625f JM |
86 | size_t size, struct librpma_fio_mem *mem); |
87 | ||
88 | void librpma_fio_free(struct librpma_fio_mem *mem); | |
89 | ||
90 | /* clients' common */ | |
91 | ||
92 | typedef int (*librpma_fio_flush_t)(struct thread_data *td, | |
93 | struct io_u *first_io_u, struct io_u *last_io_u, | |
94 | unsigned long long int len); | |
95 | ||
96 | /* | |
97 | * RETURN VALUE | |
98 | * - ( 1) - on success | |
99 | * - ( 0) - skip | |
100 | * - (-1) - on error | |
101 | */ | |
4ef7dd21 | 102 | typedef int (*librpma_fio_get_io_u_index_t)(struct ibv_wc *wc, |
e4c4625f JM |
103 | unsigned int *io_u_index); |
104 | ||
105 | struct librpma_fio_client_data { | |
106 | struct rpma_peer *peer; | |
107 | struct rpma_conn *conn; | |
4ef7dd21 | 108 | struct rpma_cq *cq; |
e4c4625f JM |
109 | |
110 | /* aligned td->orig_buffer */ | |
111 | char *orig_buffer_aligned; | |
112 | ||
113 | /* ious's base address memory registration (cd->orig_buffer_aligned) */ | |
114 | struct rpma_mr_local *orig_mr; | |
115 | ||
116 | struct librpma_fio_workspace *ws; | |
117 | ||
118 | /* a server's memory representation */ | |
119 | struct rpma_mr_remote *server_mr; | |
120 | enum rpma_flush_type server_mr_flush_type; | |
121 | ||
122 | /* remote workspace description */ | |
123 | size_t ws_size; | |
124 | ||
125 | /* in-memory queues */ | |
126 | struct io_u **io_us_queued; | |
127 | int io_u_queued_nr; | |
128 | struct io_u **io_us_flight; | |
129 | int io_u_flight_nr; | |
130 | struct io_u **io_us_completed; | |
131 | int io_u_completed_nr; | |
132 | ||
133 | /* SQ control. Note: all of them have to be kept in sync. */ | |
134 | uint32_t op_send_posted; | |
135 | uint32_t op_send_completed; | |
136 | uint32_t op_recv_completed; | |
137 | ||
138 | librpma_fio_flush_t flush; | |
139 | librpma_fio_get_io_u_index_t get_io_u_index; | |
140 | ||
141 | /* engine-specific client data */ | |
142 | void *client_data; | |
143 | }; | |
144 | ||
145 | int librpma_fio_client_init(struct thread_data *td, | |
146 | struct rpma_conn_cfg *cfg); | |
147 | void librpma_fio_client_cleanup(struct thread_data *td); | |
148 | ||
149 | int librpma_fio_file_nop(struct thread_data *td, struct fio_file *f); | |
150 | int librpma_fio_client_get_file_size(struct thread_data *td, | |
151 | struct fio_file *f); | |
152 | ||
153 | int librpma_fio_client_post_init(struct thread_data *td); | |
154 | ||
155 | enum fio_q_status librpma_fio_client_queue(struct thread_data *td, | |
156 | struct io_u *io_u); | |
157 | ||
158 | int librpma_fio_client_commit(struct thread_data *td); | |
159 | ||
160 | int librpma_fio_client_getevents(struct thread_data *td, unsigned int min, | |
161 | unsigned int max, const struct timespec *t); | |
162 | ||
163 | struct io_u *librpma_fio_client_event(struct thread_data *td, int event); | |
164 | ||
165 | char *librpma_fio_client_errdetails(struct io_u *io_u); | |
166 | ||
167 | static inline int librpma_fio_client_io_read(struct thread_data *td, | |
168 | struct io_u *io_u, int flags) | |
169 | { | |
170 | struct librpma_fio_client_data *ccd = td->io_ops_data; | |
171 | size_t dst_offset = (char *)(io_u->xfer_buf) - ccd->orig_buffer_aligned; | |
172 | size_t src_offset = io_u->offset; | |
173 | int ret; | |
174 | ||
175 | if ((ret = rpma_read(ccd->conn, ccd->orig_mr, dst_offset, | |
176 | ccd->server_mr, src_offset, io_u->xfer_buflen, | |
177 | flags, (void *)(uintptr_t)io_u->index))) { | |
178 | librpma_td_verror(td, ret, "rpma_read"); | |
179 | return -1; | |
180 | } | |
181 | ||
182 | return 0; | |
183 | } | |
184 | ||
185 | static inline int librpma_fio_client_io_write(struct thread_data *td, | |
186 | struct io_u *io_u) | |
187 | { | |
188 | struct librpma_fio_client_data *ccd = td->io_ops_data; | |
189 | size_t src_offset = (char *)(io_u->xfer_buf) - ccd->orig_buffer_aligned; | |
190 | size_t dst_offset = io_u->offset; | |
191 | int ret; | |
192 | ||
193 | if ((ret = rpma_write(ccd->conn, ccd->server_mr, dst_offset, | |
194 | ccd->orig_mr, src_offset, io_u->xfer_buflen, | |
195 | RPMA_F_COMPLETION_ON_ERROR, | |
196 | (void *)(uintptr_t)io_u->index))) { | |
197 | librpma_td_verror(td, ret, "rpma_write"); | |
198 | return -1; | |
199 | } | |
200 | ||
201 | return 0; | |
202 | } | |
203 | ||
204 | static inline int librpma_fio_client_io_complete_all_sends( | |
205 | struct thread_data *td) | |
206 | { | |
207 | struct librpma_fio_client_data *ccd = td->io_ops_data; | |
4ef7dd21 | 208 | struct ibv_wc wc; |
e4c4625f JM |
209 | int ret; |
210 | ||
211 | while (ccd->op_send_posted != ccd->op_send_completed) { | |
212 | /* get a completion */ | |
4ef7dd21 | 213 | ret = rpma_cq_get_wc(ccd->cq, 1, &wc, NULL); |
e4c4625f JM |
214 | if (ret == RPMA_E_NO_COMPLETION) { |
215 | /* lack of completion is not an error */ | |
216 | continue; | |
217 | } else if (ret != 0) { | |
218 | /* an error occurred */ | |
4ef7dd21 | 219 | librpma_td_verror(td, ret, "rpma_cq_get_wc"); |
e4c4625f JM |
220 | break; |
221 | } | |
222 | ||
4ef7dd21 | 223 | if (wc.status != IBV_WC_SUCCESS) |
e4c4625f JM |
224 | return -1; |
225 | ||
4ef7dd21 | 226 | if (wc.opcode == IBV_WC_SEND) |
e4c4625f JM |
227 | ++ccd->op_send_completed; |
228 | else { | |
229 | log_err( | |
4ef7dd21 | 230 | "A completion other than IBV_WC_SEND got during cleaning up the CQ from SENDs\n"); |
e4c4625f JM |
231 | return -1; |
232 | } | |
233 | } | |
234 | ||
235 | /* | |
236 | * All posted SENDs are completed and RECVs for them (responses) are | |
237 | * completed. This is the initial situation so the counters are reset. | |
238 | */ | |
239 | if (ccd->op_send_posted == ccd->op_send_completed && | |
240 | ccd->op_send_completed == ccd->op_recv_completed) { | |
241 | ccd->op_send_posted = 0; | |
242 | ccd->op_send_completed = 0; | |
243 | ccd->op_recv_completed = 0; | |
244 | } | |
245 | ||
246 | return 0; | |
247 | } | |
248 | ||
249 | /* servers' common */ | |
250 | ||
251 | typedef int (*librpma_fio_prepare_connection_t)( | |
252 | struct thread_data *td, | |
253 | struct rpma_conn_req *conn_req); | |
254 | ||
255 | struct librpma_fio_server_data { | |
256 | struct rpma_peer *peer; | |
257 | ||
258 | /* resources of an incoming connection */ | |
259 | struct rpma_conn *conn; | |
4ef7dd21 | 260 | struct rpma_cq *cq; |
e4c4625f JM |
261 | |
262 | char *ws_ptr; | |
263 | struct rpma_mr_local *ws_mr; | |
264 | struct librpma_fio_mem mem; | |
265 | ||
266 | /* engine-specific server data */ | |
267 | void *server_data; | |
268 | ||
269 | librpma_fio_prepare_connection_t prepare_connection; | |
270 | }; | |
271 | ||
272 | int librpma_fio_server_init(struct thread_data *td); | |
273 | ||
274 | void librpma_fio_server_cleanup(struct thread_data *td); | |
275 | ||
276 | int librpma_fio_server_open_file(struct thread_data *td, | |
277 | struct fio_file *f, struct rpma_conn_cfg *cfg); | |
278 | ||
279 | int librpma_fio_server_close_file(struct thread_data *td, | |
280 | struct fio_file *f); | |
281 | ||
282 | #endif /* LIBRPMA_FIO_H */ |