Commit | Line | Data |
---|---|---|
6a98d71d JW |
1 | // SPDX-License-Identifier: GPL-2.0-or-later |
2 | /* | |
3 | * RDMA Transport Layer | |
4 | * | |
5 | * Copyright (c) 2014 - 2018 ProfitBricks GmbH. All rights reserved. | |
6 | * Copyright (c) 2018 - 2019 1&1 IONOS Cloud GmbH. All rights reserved. | |
7 | * Copyright (c) 2019 - 2020 1&1 IONOS SE. All rights reserved. | |
8 | */ | |
9 | ||
10 | #undef pr_fmt | |
11 | #define pr_fmt(fmt) KBUILD_MODNAME " L" __stringify(__LINE__) ": " fmt | |
12 | ||
13 | #include <linux/module.h> | |
14 | #include <linux/rculist.h> | |
09e0dbbe | 15 | #include <linux/random.h> |
6a98d71d JW |
16 | |
17 | #include "rtrs-clt.h" | |
18 | #include "rtrs-log.h" | |
5a93929d | 19 | #include "rtrs-clt-trace.h" |
6a98d71d JW |
20 | |
21 | #define RTRS_CONNECT_TIMEOUT_MS 30000 | |
22 | /* | |
23 | * Wait a bit before trying to reconnect after a failure | |
24 | * in order to give server time to finish clean up which | |
25 | * leads to "false positives" failed reconnect attempts | |
26 | */ | |
27 | #define RTRS_RECONNECT_BACKOFF 1000 | |
09e0dbbe DK |
28 | /* |
29 | * Wait for additional random time between 0 and 8 seconds | |
30 | * before starting to reconnect to avoid clients reconnecting | |
31 | * all at once in case of a major network outage | |
32 | */ | |
33 | #define RTRS_RECONNECT_SEED 8 | |
6a98d71d | 34 | |
03e9b33a | 35 | #define FIRST_CONN 0x01 |
6fc45596 JW |
36 | /* limit to 128 * 4k = 512k max IO */ |
37 | #define RTRS_MAX_SEGMENTS 128 | |
03e9b33a | 38 | |
6a98d71d JW |
39 | MODULE_DESCRIPTION("RDMA Transport Client"); |
40 | MODULE_LICENSE("GPL"); | |
41 | ||
42 | static const struct rtrs_rdma_dev_pd_ops dev_pd_ops; | |
43 | static struct rtrs_rdma_dev_pd dev_pd = { | |
44 | .ops = &dev_pd_ops | |
45 | }; | |
46 | ||
47 | static struct workqueue_struct *rtrs_wq; | |
48 | static struct class *rtrs_clt_dev_class; | |
49 | ||
f3433d79 | 50 | static inline bool rtrs_clt_is_connected(const struct rtrs_clt_sess *clt) |
6a98d71d | 51 | { |
caa84d95 | 52 | struct rtrs_clt_path *clt_path; |
6a98d71d JW |
53 | bool connected = false; |
54 | ||
55 | rcu_read_lock(); | |
caa84d95 | 56 | list_for_each_entry_rcu(clt_path, &clt->paths_list, s.entry) |
57eb9382 GJ |
57 | if (READ_ONCE(clt_path->state) == RTRS_CLT_CONNECTED) { |
58 | connected = true; | |
59 | break; | |
60 | } | |
6a98d71d JW |
61 | rcu_read_unlock(); |
62 | ||
63 | return connected; | |
64 | } | |
65 | ||
66 | static struct rtrs_permit * | |
f3433d79 | 67 | __rtrs_get_permit(struct rtrs_clt_sess *clt, enum rtrs_clt_con_type con_type) |
6a98d71d JW |
68 | { |
69 | size_t max_depth = clt->queue_depth; | |
70 | struct rtrs_permit *permit; | |
71 | int bit; | |
72 | ||
73 | /* | |
74 | * Adapted from null_blk get_tag(). Callers from different cpus may | |
75 | * grab the same bit, since find_first_zero_bit is not atomic. | |
76 | * But then the test_and_set_bit_lock will fail for all the | |
77 | * callers but one, so that they will loop again. | |
78 | * This way an explicit spinlock is not required. | |
79 | */ | |
80 | do { | |
81 | bit = find_first_zero_bit(clt->permits_map, max_depth); | |
4693d6b7 | 82 | if (bit >= max_depth) |
6a98d71d | 83 | return NULL; |
4693d6b7 | 84 | } while (test_and_set_bit_lock(bit, clt->permits_map)); |
6a98d71d JW |
85 | |
86 | permit = get_permit(clt, bit); | |
87 | WARN_ON(permit->mem_id != bit); | |
88 | permit->cpu_id = raw_smp_processor_id(); | |
89 | permit->con_type = con_type; | |
90 | ||
91 | return permit; | |
92 | } | |
93 | ||
f3433d79 | 94 | static inline void __rtrs_put_permit(struct rtrs_clt_sess *clt, |
6a98d71d JW |
95 | struct rtrs_permit *permit) |
96 | { | |
97 | clear_bit_unlock(permit->mem_id, clt->permits_map); | |
98 | } | |
99 | ||
100 | /** | |
101 | * rtrs_clt_get_permit() - allocates permit for future RDMA operation | |
102 | * @clt: Current session | |
103 | * @con_type: Type of connection to use with the permit | |
104 | * @can_wait: Wait type | |
105 | * | |
106 | * Description: | |
107 | * Allocates permit for the following RDMA operation. Permit is used | |
108 | * to preallocate all resources and to propagate memory pressure | |
109 | * up earlier. | |
110 | * | |
111 | * Context: | |
9f455eea | 112 | * Can sleep if @wait == RTRS_PERMIT_WAIT |
6a98d71d | 113 | */ |
f3433d79 | 114 | struct rtrs_permit *rtrs_clt_get_permit(struct rtrs_clt_sess *clt, |
6a98d71d | 115 | enum rtrs_clt_con_type con_type, |
9f455eea | 116 | enum wait_type can_wait) |
6a98d71d JW |
117 | { |
118 | struct rtrs_permit *permit; | |
119 | DEFINE_WAIT(wait); | |
120 | ||
121 | permit = __rtrs_get_permit(clt, con_type); | |
4693d6b7 | 122 | if (permit || !can_wait) |
6a98d71d JW |
123 | return permit; |
124 | ||
125 | do { | |
126 | prepare_to_wait(&clt->permits_wait, &wait, | |
127 | TASK_UNINTERRUPTIBLE); | |
128 | permit = __rtrs_get_permit(clt, con_type); | |
4693d6b7 | 129 | if (permit) |
6a98d71d JW |
130 | break; |
131 | ||
132 | io_schedule(); | |
133 | } while (1); | |
134 | ||
135 | finish_wait(&clt->permits_wait, &wait); | |
136 | ||
137 | return permit; | |
138 | } | |
139 | EXPORT_SYMBOL(rtrs_clt_get_permit); | |
140 | ||
141 | /** | |
142 | * rtrs_clt_put_permit() - puts allocated permit | |
143 | * @clt: Current session | |
144 | * @permit: Permit to be freed | |
145 | * | |
146 | * Context: | |
147 | * Does not matter | |
148 | */ | |
f3433d79 VT |
149 | void rtrs_clt_put_permit(struct rtrs_clt_sess *clt, |
150 | struct rtrs_permit *permit) | |
6a98d71d JW |
151 | { |
152 | if (WARN_ON(!test_bit(permit->mem_id, clt->permits_map))) | |
153 | return; | |
154 | ||
155 | __rtrs_put_permit(clt, permit); | |
156 | ||
157 | /* | |
158 | * rtrs_clt_get_permit() adds itself to the &clt->permits_wait list | |
159 | * before calling schedule(). So if rtrs_clt_get_permit() is sleeping | |
160 | * it must have added itself to &clt->permits_wait before | |
161 | * __rtrs_put_permit() finished. | |
162 | * Hence it is safe to guard wake_up() with a waitqueue_active() test. | |
163 | */ | |
164 | if (waitqueue_active(&clt->permits_wait)) | |
165 | wake_up(&clt->permits_wait); | |
166 | } | |
167 | EXPORT_SYMBOL(rtrs_clt_put_permit); | |
168 | ||
6a98d71d JW |
169 | /** |
170 | * rtrs_permit_to_clt_con() - returns RDMA connection pointer by the permit | |
caa84d95 | 171 | * @clt_path: client path pointer |
6a98d71d JW |
172 | * @permit: permit for the allocation of the RDMA buffer |
173 | * Note: | |
174 | * IO connection starts from 1. | |
175 | * 0 connection is for user messages. | |
176 | */ | |
177 | static | |
caa84d95 | 178 | struct rtrs_clt_con *rtrs_permit_to_clt_con(struct rtrs_clt_path *clt_path, |
6a98d71d JW |
179 | struct rtrs_permit *permit) |
180 | { | |
181 | int id = 0; | |
182 | ||
4693d6b7 | 183 | if (permit->con_type == RTRS_IO_CON) |
caa84d95 | 184 | id = (permit->cpu_id % (clt_path->s.irq_con_num - 1)) + 1; |
6a98d71d | 185 | |
caa84d95 | 186 | return to_clt_con(clt_path->s.con[id]); |
6a98d71d JW |
187 | } |
188 | ||
189 | /** | |
7a8732a6 | 190 | * rtrs_clt_change_state() - change the session state through session state |
6a98d71d JW |
191 | * machine. |
192 | * | |
caa84d95 | 193 | * @clt_path: client path to change the state of. |
6a98d71d JW |
194 | * @new_state: state to change to. |
195 | * | |
7a8732a6 | 196 | * returns true if sess's state is changed to new state, otherwise return false. |
6a98d71d JW |
197 | * |
198 | * Locks: | |
199 | * state_wq lock must be hold. | |
200 | */ | |
caa84d95 | 201 | static bool rtrs_clt_change_state(struct rtrs_clt_path *clt_path, |
6a98d71d JW |
202 | enum rtrs_clt_state new_state) |
203 | { | |
204 | enum rtrs_clt_state old_state; | |
205 | bool changed = false; | |
206 | ||
caa84d95 | 207 | lockdep_assert_held(&clt_path->state_wq.lock); |
6a98d71d | 208 | |
caa84d95 | 209 | old_state = clt_path->state; |
6a98d71d JW |
210 | switch (new_state) { |
211 | case RTRS_CLT_CONNECTING: | |
212 | switch (old_state) { | |
213 | case RTRS_CLT_RECONNECTING: | |
214 | changed = true; | |
215 | fallthrough; | |
216 | default: | |
217 | break; | |
218 | } | |
219 | break; | |
220 | case RTRS_CLT_RECONNECTING: | |
221 | switch (old_state) { | |
222 | case RTRS_CLT_CONNECTED: | |
223 | case RTRS_CLT_CONNECTING_ERR: | |
224 | case RTRS_CLT_CLOSED: | |
225 | changed = true; | |
226 | fallthrough; | |
227 | default: | |
228 | break; | |
229 | } | |
230 | break; | |
231 | case RTRS_CLT_CONNECTED: | |
232 | switch (old_state) { | |
233 | case RTRS_CLT_CONNECTING: | |
234 | changed = true; | |
235 | fallthrough; | |
236 | default: | |
237 | break; | |
238 | } | |
239 | break; | |
240 | case RTRS_CLT_CONNECTING_ERR: | |
241 | switch (old_state) { | |
242 | case RTRS_CLT_CONNECTING: | |
243 | changed = true; | |
244 | fallthrough; | |
245 | default: | |
246 | break; | |
247 | } | |
248 | break; | |
249 | case RTRS_CLT_CLOSING: | |
250 | switch (old_state) { | |
251 | case RTRS_CLT_CONNECTING: | |
252 | case RTRS_CLT_CONNECTING_ERR: | |
253 | case RTRS_CLT_RECONNECTING: | |
254 | case RTRS_CLT_CONNECTED: | |
255 | changed = true; | |
256 | fallthrough; | |
257 | default: | |
258 | break; | |
259 | } | |
260 | break; | |
261 | case RTRS_CLT_CLOSED: | |
262 | switch (old_state) { | |
263 | case RTRS_CLT_CLOSING: | |
264 | changed = true; | |
265 | fallthrough; | |
266 | default: | |
267 | break; | |
268 | } | |
269 | break; | |
270 | case RTRS_CLT_DEAD: | |
271 | switch (old_state) { | |
272 | case RTRS_CLT_CLOSED: | |
273 | changed = true; | |
274 | fallthrough; | |
275 | default: | |
276 | break; | |
277 | } | |
278 | break; | |
279 | default: | |
280 | break; | |
281 | } | |
282 | if (changed) { | |
caa84d95 VT |
283 | clt_path->state = new_state; |
284 | wake_up_locked(&clt_path->state_wq); | |
6a98d71d JW |
285 | } |
286 | ||
287 | return changed; | |
288 | } | |
289 | ||
caa84d95 | 290 | static bool rtrs_clt_change_state_from_to(struct rtrs_clt_path *clt_path, |
6a98d71d JW |
291 | enum rtrs_clt_state old_state, |
292 | enum rtrs_clt_state new_state) | |
293 | { | |
294 | bool changed = false; | |
295 | ||
caa84d95 VT |
296 | spin_lock_irq(&clt_path->state_wq.lock); |
297 | if (clt_path->state == old_state) | |
298 | changed = rtrs_clt_change_state(clt_path, new_state); | |
299 | spin_unlock_irq(&clt_path->state_wq.lock); | |
6a98d71d JW |
300 | |
301 | return changed; | |
302 | } | |
303 | ||
c1289d5d | 304 | static void rtrs_clt_stop_and_destroy_conns(struct rtrs_clt_path *clt_path); |
6a98d71d JW |
305 | static void rtrs_rdma_error_recovery(struct rtrs_clt_con *con) |
306 | { | |
caa84d95 | 307 | struct rtrs_clt_path *clt_path = to_clt_path(con->c.path); |
6a98d71d | 308 | |
5a93929d SP |
309 | trace_rtrs_rdma_error_recovery(clt_path); |
310 | ||
caa84d95 | 311 | if (rtrs_clt_change_state_from_to(clt_path, |
6a98d71d JW |
312 | RTRS_CLT_CONNECTED, |
313 | RTRS_CLT_RECONNECTING)) { | |
c1289d5d | 314 | queue_work(rtrs_wq, &clt_path->err_recovery_work); |
6a98d71d JW |
315 | } else { |
316 | /* | |
317 | * Error can happen just on establishing new connection, | |
318 | * so notify waiter with error state, waiter is responsible | |
319 | * for cleaning the rest and reconnect if needed. | |
320 | */ | |
caa84d95 | 321 | rtrs_clt_change_state_from_to(clt_path, |
6a98d71d JW |
322 | RTRS_CLT_CONNECTING, |
323 | RTRS_CLT_CONNECTING_ERR); | |
324 | } | |
325 | } | |
326 | ||
327 | static void rtrs_clt_fast_reg_done(struct ib_cq *cq, struct ib_wc *wc) | |
328 | { | |
3b89e92c | 329 | struct rtrs_clt_con *con = to_clt_con(wc->qp->qp_context); |
6a98d71d | 330 | |
4693d6b7 | 331 | if (wc->status != IB_WC_SUCCESS) { |
d9372794 | 332 | rtrs_err(con->c.path, "Failed IB_WR_REG_MR: %s\n", |
6a98d71d JW |
333 | ib_wc_status_msg(wc->status)); |
334 | rtrs_rdma_error_recovery(con); | |
335 | } | |
336 | } | |
337 | ||
338 | static struct ib_cqe fast_reg_cqe = { | |
339 | .done = rtrs_clt_fast_reg_done | |
340 | }; | |
341 | ||
342 | static void complete_rdma_req(struct rtrs_clt_io_req *req, int errno, | |
343 | bool notify, bool can_wait); | |
344 | ||
345 | static void rtrs_clt_inv_rkey_done(struct ib_cq *cq, struct ib_wc *wc) | |
346 | { | |
347 | struct rtrs_clt_io_req *req = | |
348 | container_of(wc->wr_cqe, typeof(*req), inv_cqe); | |
3b89e92c | 349 | struct rtrs_clt_con *con = to_clt_con(wc->qp->qp_context); |
6a98d71d | 350 | |
4693d6b7 | 351 | if (wc->status != IB_WC_SUCCESS) { |
d9372794 | 352 | rtrs_err(con->c.path, "Failed IB_WR_LOCAL_INV: %s\n", |
6a98d71d JW |
353 | ib_wc_status_msg(wc->status)); |
354 | rtrs_rdma_error_recovery(con); | |
355 | } | |
356 | req->need_inv = false; | |
4693d6b7 | 357 | if (req->need_inv_comp) |
6a98d71d JW |
358 | complete(&req->inv_comp); |
359 | else | |
360 | /* Complete request from INV callback */ | |
361 | complete_rdma_req(req, req->inv_errno, true, false); | |
362 | } | |
363 | ||
364 | static int rtrs_inv_rkey(struct rtrs_clt_io_req *req) | |
365 | { | |
366 | struct rtrs_clt_con *con = req->con; | |
367 | struct ib_send_wr wr = { | |
368 | .opcode = IB_WR_LOCAL_INV, | |
369 | .wr_cqe = &req->inv_cqe, | |
370 | .send_flags = IB_SEND_SIGNALED, | |
371 | .ex.invalidate_rkey = req->mr->rkey, | |
372 | }; | |
373 | req->inv_cqe.done = rtrs_clt_inv_rkey_done; | |
374 | ||
375 | return ib_post_send(con->c.qp, &wr, NULL); | |
376 | } | |
377 | ||
378 | static void complete_rdma_req(struct rtrs_clt_io_req *req, int errno, | |
379 | bool notify, bool can_wait) | |
380 | { | |
381 | struct rtrs_clt_con *con = req->con; | |
caa84d95 | 382 | struct rtrs_clt_path *clt_path; |
6a98d71d JW |
383 | int err; |
384 | ||
385 | if (WARN_ON(!req->in_use)) | |
386 | return; | |
387 | if (WARN_ON(!req->con)) | |
388 | return; | |
caa84d95 | 389 | clt_path = to_clt_path(con->c.path); |
6a98d71d JW |
390 | |
391 | if (req->sg_cnt) { | |
4693d6b7 | 392 | if (req->dir == DMA_FROM_DEVICE && req->need_inv) { |
6a98d71d JW |
393 | /* |
394 | * We are here to invalidate read requests | |
395 | * ourselves. In normal scenario server should | |
396 | * send INV for all read requests, but | |
397 | * we are here, thus two things could happen: | |
398 | * | |
399 | * 1. this is failover, when errno != 0 | |
400 | * and can_wait == 1, | |
401 | * | |
402 | * 2. something totally bad happened and | |
403 | * server forgot to send INV, so we | |
404 | * should do that ourselves. | |
405 | */ | |
406 | ||
4693d6b7 | 407 | if (can_wait) { |
6a98d71d JW |
408 | req->need_inv_comp = true; |
409 | } else { | |
410 | /* This should be IO path, so always notify */ | |
411 | WARN_ON(!notify); | |
412 | /* Save errno for INV callback */ | |
413 | req->inv_errno = errno; | |
414 | } | |
415 | ||
2ece9ec6 | 416 | refcount_inc(&req->ref); |
6a98d71d | 417 | err = rtrs_inv_rkey(req); |
4693d6b7 | 418 | if (err) { |
d9372794 | 419 | rtrs_err(con->c.path, "Send INV WR key=%#x: %d\n", |
6a98d71d | 420 | req->mr->rkey, err); |
4693d6b7 | 421 | } else if (can_wait) { |
6a98d71d JW |
422 | wait_for_completion(&req->inv_comp); |
423 | } else { | |
424 | /* | |
425 | * Something went wrong, so request will be | |
426 | * completed from INV callback. | |
427 | */ | |
428 | WARN_ON_ONCE(1); | |
429 | ||
430 | return; | |
431 | } | |
2ece9ec6 JW |
432 | if (!refcount_dec_and_test(&req->ref)) |
433 | return; | |
6a98d71d | 434 | } |
caa84d95 | 435 | ib_dma_unmap_sg(clt_path->s.dev->ib_dev, req->sglist, |
6a98d71d JW |
436 | req->sg_cnt, req->dir); |
437 | } | |
2ece9ec6 JW |
438 | if (!refcount_dec_and_test(&req->ref)) |
439 | return; | |
0d8f2cfa | 440 | if (req->mp_policy == MP_POLICY_MIN_INFLIGHT) |
caa84d95 | 441 | atomic_dec(&clt_path->stats->inflight); |
6a98d71d JW |
442 | |
443 | req->in_use = false; | |
444 | req->con = NULL; | |
445 | ||
2f37b017 | 446 | if (errno) { |
d9372794 | 447 | rtrs_err_rl(con->c.path, "IO request failed: error=%d path=%s [%s:%u] notify=%d\n", |
caa84d95 VT |
448 | errno, kobject_name(&clt_path->kobj), clt_path->hca_name, |
449 | clt_path->hca_port, notify); | |
2f37b017 GK |
450 | } |
451 | ||
6a98d71d JW |
452 | if (notify) |
453 | req->conf(req->priv, errno); | |
454 | } | |
455 | ||
456 | static int rtrs_post_send_rdma(struct rtrs_clt_con *con, | |
457 | struct rtrs_clt_io_req *req, | |
458 | struct rtrs_rbuf *rbuf, u32 off, | |
459 | u32 imm, struct ib_send_wr *wr) | |
460 | { | |
caa84d95 | 461 | struct rtrs_clt_path *clt_path = to_clt_path(con->c.path); |
6a98d71d JW |
462 | enum ib_send_flags flags; |
463 | struct ib_sge sge; | |
464 | ||
4693d6b7 | 465 | if (!req->sg_size) { |
d9372794 | 466 | rtrs_wrn(con->c.path, |
6a98d71d JW |
467 | "Doing RDMA Write failed, no data supplied\n"); |
468 | return -EINVAL; | |
469 | } | |
470 | ||
471 | /* user data and user message in the first list element */ | |
472 | sge.addr = req->iu->dma_addr; | |
473 | sge.length = req->sg_size; | |
caa84d95 | 474 | sge.lkey = clt_path->s.dev->ib_pd->local_dma_lkey; |
6a98d71d JW |
475 | |
476 | /* | |
477 | * From time to time we have to post signalled sends, | |
478 | * or send queue will fill up and only QP reset can help. | |
479 | */ | |
caa84d95 | 480 | flags = atomic_inc_return(&con->c.wr_cnt) % clt_path->s.signal_interval ? |
6a98d71d JW |
481 | 0 : IB_SEND_SIGNALED; |
482 | ||
caa84d95 VT |
483 | ib_dma_sync_single_for_device(clt_path->s.dev->ib_dev, |
484 | req->iu->dma_addr, | |
6a98d71d JW |
485 | req->sg_size, DMA_TO_DEVICE); |
486 | ||
487 | return rtrs_iu_post_rdma_write_imm(&con->c, req->iu, &sge, 1, | |
488 | rbuf->rkey, rbuf->addr + off, | |
630e438f | 489 | imm, flags, wr, NULL); |
6a98d71d JW |
490 | } |
491 | ||
caa84d95 | 492 | static void process_io_rsp(struct rtrs_clt_path *clt_path, u32 msg_id, |
6a98d71d JW |
493 | s16 errno, bool w_inval) |
494 | { | |
495 | struct rtrs_clt_io_req *req; | |
496 | ||
caa84d95 | 497 | if (WARN_ON(msg_id >= clt_path->queue_depth)) |
6a98d71d JW |
498 | return; |
499 | ||
caa84d95 | 500 | req = &clt_path->reqs[msg_id]; |
6a98d71d JW |
501 | /* Drop need_inv if server responded with send with invalidation */ |
502 | req->need_inv &= !w_inval; | |
503 | complete_rdma_req(req, errno, true, false); | |
504 | } | |
505 | ||
506 | static void rtrs_clt_recv_done(struct rtrs_clt_con *con, struct ib_wc *wc) | |
507 | { | |
508 | struct rtrs_iu *iu; | |
509 | int err; | |
caa84d95 | 510 | struct rtrs_clt_path *clt_path = to_clt_path(con->c.path); |
6a98d71d | 511 | |
caa84d95 | 512 | WARN_ON((clt_path->flags & RTRS_MSG_NEW_RKEY_F) == 0); |
6a98d71d JW |
513 | iu = container_of(wc->wr_cqe, struct rtrs_iu, |
514 | cqe); | |
515 | err = rtrs_iu_post_recv(&con->c, iu); | |
4693d6b7 | 516 | if (err) { |
d9372794 | 517 | rtrs_err(con->c.path, "post iu failed %d\n", err); |
6a98d71d JW |
518 | rtrs_rdma_error_recovery(con); |
519 | } | |
520 | } | |
521 | ||
522 | static void rtrs_clt_rkey_rsp_done(struct rtrs_clt_con *con, struct ib_wc *wc) | |
523 | { | |
caa84d95 | 524 | struct rtrs_clt_path *clt_path = to_clt_path(con->c.path); |
6a98d71d JW |
525 | struct rtrs_msg_rkey_rsp *msg; |
526 | u32 imm_type, imm_payload; | |
527 | bool w_inval = false; | |
528 | struct rtrs_iu *iu; | |
529 | u32 buf_id; | |
530 | int err; | |
531 | ||
caa84d95 | 532 | WARN_ON((clt_path->flags & RTRS_MSG_NEW_RKEY_F) == 0); |
6a98d71d JW |
533 | |
534 | iu = container_of(wc->wr_cqe, struct rtrs_iu, cqe); | |
535 | ||
4693d6b7 | 536 | if (wc->byte_len < sizeof(*msg)) { |
d9372794 | 537 | rtrs_err(con->c.path, "rkey response is malformed: size %d\n", |
6a98d71d JW |
538 | wc->byte_len); |
539 | goto out; | |
540 | } | |
caa84d95 | 541 | ib_dma_sync_single_for_cpu(clt_path->s.dev->ib_dev, iu->dma_addr, |
6a98d71d JW |
542 | iu->size, DMA_FROM_DEVICE); |
543 | msg = iu->buf; | |
4693d6b7 | 544 | if (le16_to_cpu(msg->type) != RTRS_MSG_RKEY_RSP) { |
caa84d95 VT |
545 | rtrs_err(clt_path->clt, |
546 | "rkey response is malformed: type %d\n", | |
6a98d71d JW |
547 | le16_to_cpu(msg->type)); |
548 | goto out; | |
549 | } | |
550 | buf_id = le16_to_cpu(msg->buf_id); | |
caa84d95 | 551 | if (WARN_ON(buf_id >= clt_path->queue_depth)) |
6a98d71d JW |
552 | goto out; |
553 | ||
554 | rtrs_from_imm(be32_to_cpu(wc->ex.imm_data), &imm_type, &imm_payload); | |
4693d6b7 GK |
555 | if (imm_type == RTRS_IO_RSP_IMM || |
556 | imm_type == RTRS_IO_RSP_W_INV_IMM) { | |
6a98d71d JW |
557 | u32 msg_id; |
558 | ||
559 | w_inval = (imm_type == RTRS_IO_RSP_W_INV_IMM); | |
560 | rtrs_from_io_rsp_imm(imm_payload, &msg_id, &err); | |
561 | ||
562 | if (WARN_ON(buf_id != msg_id)) | |
563 | goto out; | |
caa84d95 VT |
564 | clt_path->rbufs[buf_id].rkey = le32_to_cpu(msg->rkey); |
565 | process_io_rsp(clt_path, msg_id, err, w_inval); | |
6a98d71d | 566 | } |
caa84d95 | 567 | ib_dma_sync_single_for_device(clt_path->s.dev->ib_dev, iu->dma_addr, |
6a98d71d JW |
568 | iu->size, DMA_FROM_DEVICE); |
569 | return rtrs_clt_recv_done(con, wc); | |
570 | out: | |
571 | rtrs_rdma_error_recovery(con); | |
572 | } | |
573 | ||
574 | static void rtrs_clt_rdma_done(struct ib_cq *cq, struct ib_wc *wc); | |
575 | ||
576 | static struct ib_cqe io_comp_cqe = { | |
577 | .done = rtrs_clt_rdma_done | |
578 | }; | |
579 | ||
580 | /* | |
581 | * Post x2 empty WRs: first is for this RDMA with IMM, | |
582 | * second is for RECV with INV, which happened earlier. | |
583 | */ | |
584 | static int rtrs_post_recv_empty_x2(struct rtrs_con *con, struct ib_cqe *cqe) | |
585 | { | |
586 | struct ib_recv_wr wr_arr[2], *wr; | |
587 | int i; | |
588 | ||
589 | memset(wr_arr, 0, sizeof(wr_arr)); | |
590 | for (i = 0; i < ARRAY_SIZE(wr_arr); i++) { | |
591 | wr = &wr_arr[i]; | |
592 | wr->wr_cqe = cqe; | |
593 | if (i) | |
594 | /* Chain backwards */ | |
595 | wr->next = &wr_arr[i - 1]; | |
596 | } | |
597 | ||
598 | return ib_post_recv(con->qp, wr, NULL); | |
599 | } | |
600 | ||
601 | static void rtrs_clt_rdma_done(struct ib_cq *cq, struct ib_wc *wc) | |
602 | { | |
3b89e92c | 603 | struct rtrs_clt_con *con = to_clt_con(wc->qp->qp_context); |
caa84d95 | 604 | struct rtrs_clt_path *clt_path = to_clt_path(con->c.path); |
6a98d71d JW |
605 | u32 imm_type, imm_payload; |
606 | bool w_inval = false; | |
607 | int err; | |
608 | ||
4693d6b7 | 609 | if (wc->status != IB_WC_SUCCESS) { |
6a98d71d | 610 | if (wc->status != IB_WC_WR_FLUSH_ERR) { |
caa84d95 | 611 | rtrs_err(clt_path->clt, "RDMA failed: %s\n", |
6a98d71d JW |
612 | ib_wc_status_msg(wc->status)); |
613 | rtrs_rdma_error_recovery(con); | |
614 | } | |
615 | return; | |
616 | } | |
617 | rtrs_clt_update_wc_stats(con); | |
618 | ||
619 | switch (wc->opcode) { | |
620 | case IB_WC_RECV_RDMA_WITH_IMM: | |
621 | /* | |
622 | * post_recv() RDMA write completions of IO reqs (read/write) | |
623 | * and hb | |
624 | */ | |
625 | if (WARN_ON(wc->wr_cqe->done != rtrs_clt_rdma_done)) | |
626 | return; | |
627 | rtrs_from_imm(be32_to_cpu(wc->ex.imm_data), | |
628 | &imm_type, &imm_payload); | |
4693d6b7 GK |
629 | if (imm_type == RTRS_IO_RSP_IMM || |
630 | imm_type == RTRS_IO_RSP_W_INV_IMM) { | |
6a98d71d JW |
631 | u32 msg_id; |
632 | ||
633 | w_inval = (imm_type == RTRS_IO_RSP_W_INV_IMM); | |
634 | rtrs_from_io_rsp_imm(imm_payload, &msg_id, &err); | |
635 | ||
caa84d95 | 636 | process_io_rsp(clt_path, msg_id, err, w_inval); |
6a98d71d JW |
637 | } else if (imm_type == RTRS_HB_MSG_IMM) { |
638 | WARN_ON(con->c.cid); | |
caa84d95 VT |
639 | rtrs_send_hb_ack(&clt_path->s); |
640 | if (clt_path->flags & RTRS_MSG_NEW_RKEY_F) | |
6a98d71d JW |
641 | return rtrs_clt_recv_done(con, wc); |
642 | } else if (imm_type == RTRS_HB_ACK_IMM) { | |
643 | WARN_ON(con->c.cid); | |
caa84d95 VT |
644 | clt_path->s.hb_missed_cnt = 0; |
645 | clt_path->s.hb_cur_latency = | |
646 | ktime_sub(ktime_get(), clt_path->s.hb_last_sent); | |
647 | if (clt_path->flags & RTRS_MSG_NEW_RKEY_F) | |
6a98d71d JW |
648 | return rtrs_clt_recv_done(con, wc); |
649 | } else { | |
d9372794 | 650 | rtrs_wrn(con->c.path, "Unknown IMM type %u\n", |
6a98d71d JW |
651 | imm_type); |
652 | } | |
653 | if (w_inval) | |
654 | /* | |
655 | * Post x2 empty WRs: first is for this RDMA with IMM, | |
656 | * second is for RECV with INV, which happened earlier. | |
657 | */ | |
658 | err = rtrs_post_recv_empty_x2(&con->c, &io_comp_cqe); | |
659 | else | |
660 | err = rtrs_post_recv_empty(&con->c, &io_comp_cqe); | |
4693d6b7 | 661 | if (err) { |
d9372794 | 662 | rtrs_err(con->c.path, "rtrs_post_recv_empty(): %d\n", |
6a98d71d JW |
663 | err); |
664 | rtrs_rdma_error_recovery(con); | |
6a98d71d JW |
665 | } |
666 | break; | |
667 | case IB_WC_RECV: | |
668 | /* | |
669 | * Key invalidations from server side | |
670 | */ | |
671 | WARN_ON(!(wc->wc_flags & IB_WC_WITH_INVALIDATE || | |
672 | wc->wc_flags & IB_WC_WITH_IMM)); | |
673 | WARN_ON(wc->wr_cqe->done != rtrs_clt_rdma_done); | |
caa84d95 | 674 | if (clt_path->flags & RTRS_MSG_NEW_RKEY_F) { |
6a98d71d JW |
675 | if (wc->wc_flags & IB_WC_WITH_INVALIDATE) |
676 | return rtrs_clt_recv_done(con, wc); | |
677 | ||
678 | return rtrs_clt_rkey_rsp_done(con, wc); | |
679 | } | |
680 | break; | |
681 | case IB_WC_RDMA_WRITE: | |
682 | /* | |
683 | * post_send() RDMA write completions of IO reqs (read/write) | |
e2d98504 | 684 | * and hb. |
6a98d71d JW |
685 | */ |
686 | break; | |
687 | ||
688 | default: | |
caa84d95 | 689 | rtrs_wrn(clt_path->clt, "Unexpected WC type: %d\n", wc->opcode); |
6a98d71d JW |
690 | return; |
691 | } | |
692 | } | |
693 | ||
694 | static int post_recv_io(struct rtrs_clt_con *con, size_t q_size) | |
695 | { | |
696 | int err, i; | |
caa84d95 | 697 | struct rtrs_clt_path *clt_path = to_clt_path(con->c.path); |
6a98d71d JW |
698 | |
699 | for (i = 0; i < q_size; i++) { | |
caa84d95 | 700 | if (clt_path->flags & RTRS_MSG_NEW_RKEY_F) { |
6a98d71d JW |
701 | struct rtrs_iu *iu = &con->rsp_ius[i]; |
702 | ||
703 | err = rtrs_iu_post_recv(&con->c, iu); | |
704 | } else { | |
705 | err = rtrs_post_recv_empty(&con->c, &io_comp_cqe); | |
706 | } | |
4693d6b7 | 707 | if (err) |
6a98d71d JW |
708 | return err; |
709 | } | |
710 | ||
711 | return 0; | |
712 | } | |
713 | ||
caa84d95 | 714 | static int post_recv_path(struct rtrs_clt_path *clt_path) |
6a98d71d JW |
715 | { |
716 | size_t q_size = 0; | |
717 | int err, cid; | |
718 | ||
caa84d95 | 719 | for (cid = 0; cid < clt_path->s.con_num; cid++) { |
6a98d71d JW |
720 | if (cid == 0) |
721 | q_size = SERVICE_CON_QUEUE_DEPTH; | |
722 | else | |
caa84d95 | 723 | q_size = clt_path->queue_depth; |
6a98d71d JW |
724 | |
725 | /* | |
726 | * x2 for RDMA read responses + FR key invalidations, | |
727 | * RDMA writes do not require any FR registrations. | |
728 | */ | |
729 | q_size *= 2; | |
730 | ||
caa84d95 | 731 | err = post_recv_io(to_clt_con(clt_path->s.con[cid]), q_size); |
4693d6b7 | 732 | if (err) { |
caa84d95 VT |
733 | rtrs_err(clt_path->clt, "post_recv_io(), err: %d\n", |
734 | err); | |
6a98d71d JW |
735 | return err; |
736 | } | |
737 | } | |
738 | ||
739 | return 0; | |
740 | } | |
741 | ||
742 | struct path_it { | |
743 | int i; | |
744 | struct list_head skip_list; | |
f3433d79 | 745 | struct rtrs_clt_sess *clt; |
caa84d95 | 746 | struct rtrs_clt_path *(*next_path)(struct path_it *it); |
6a98d71d JW |
747 | }; |
748 | ||
c14adff2 MHI |
749 | /* |
750 | * rtrs_clt_get_next_path_or_null - get clt path from the list or return NULL | |
6a98d71d | 751 | * @head: the head for the list. |
c14adff2 | 752 | * @clt_path: The element to take the next clt_path from. |
6a98d71d | 753 | * |
c14adff2 | 754 | * Next clt path returned in round-robin fashion, i.e. head will be skipped, |
6a98d71d JW |
755 | * but if list is observed as empty, NULL will be returned. |
756 | * | |
c14adff2 | 757 | * This function may safely run concurrently with the _rcu list-mutation |
6a98d71d JW |
758 | * primitives such as list_add_rcu() as long as it's guarded by rcu_read_lock(). |
759 | */ | |
c14adff2 MHI |
760 | static inline struct rtrs_clt_path * |
761 | rtrs_clt_get_next_path_or_null(struct list_head *head, struct rtrs_clt_path *clt_path) | |
762 | { | |
763 | return list_next_or_null_rcu(head, &clt_path->s.entry, typeof(*clt_path), s.entry) ?: | |
764 | list_next_or_null_rcu(head, | |
765 | READ_ONCE((&clt_path->s.entry)->next), | |
766 | typeof(*clt_path), s.entry); | |
767 | } | |
6a98d71d JW |
768 | |
769 | /** | |
770 | * get_next_path_rr() - Returns path in round-robin fashion. | |
771 | * @it: the path pointer | |
772 | * | |
773 | * Related to @MP_POLICY_RR | |
774 | * | |
775 | * Locks: | |
776 | * rcu_read_lock() must be hold. | |
777 | */ | |
caa84d95 | 778 | static struct rtrs_clt_path *get_next_path_rr(struct path_it *it) |
6a98d71d | 779 | { |
caa84d95 VT |
780 | struct rtrs_clt_path __rcu **ppcpu_path; |
781 | struct rtrs_clt_path *path; | |
f3433d79 | 782 | struct rtrs_clt_sess *clt; |
6a98d71d JW |
783 | |
784 | clt = it->clt; | |
785 | ||
786 | /* | |
787 | * Here we use two RCU objects: @paths_list and @pcpu_path | |
788 | * pointer. See rtrs_clt_remove_path_from_arr() for details | |
789 | * how that is handled. | |
790 | */ | |
791 | ||
792 | ppcpu_path = this_cpu_ptr(clt->pcpu_path); | |
793 | path = rcu_dereference(*ppcpu_path); | |
4693d6b7 | 794 | if (!path) |
6a98d71d JW |
795 | path = list_first_or_null_rcu(&clt->paths_list, |
796 | typeof(*path), s.entry); | |
797 | else | |
c14adff2 MHI |
798 | path = rtrs_clt_get_next_path_or_null(&clt->paths_list, path); |
799 | ||
6a98d71d JW |
800 | rcu_assign_pointer(*ppcpu_path, path); |
801 | ||
802 | return path; | |
803 | } | |
804 | ||
805 | /** | |
806 | * get_next_path_min_inflight() - Returns path with minimal inflight count. | |
807 | * @it: the path pointer | |
808 | * | |
809 | * Related to @MP_POLICY_MIN_INFLIGHT | |
810 | * | |
811 | * Locks: | |
812 | * rcu_read_lock() must be hold. | |
813 | */ | |
caa84d95 | 814 | static struct rtrs_clt_path *get_next_path_min_inflight(struct path_it *it) |
6a98d71d | 815 | { |
caa84d95 | 816 | struct rtrs_clt_path *min_path = NULL; |
f3433d79 | 817 | struct rtrs_clt_sess *clt = it->clt; |
caa84d95 | 818 | struct rtrs_clt_path *clt_path; |
6a98d71d JW |
819 | int min_inflight = INT_MAX; |
820 | int inflight; | |
821 | ||
caa84d95 VT |
822 | list_for_each_entry_rcu(clt_path, &clt->paths_list, s.entry) { |
823 | if (READ_ONCE(clt_path->state) != RTRS_CLT_CONNECTED) | |
41db63a7 MHI |
824 | continue; |
825 | ||
caa84d95 | 826 | if (!list_empty(raw_cpu_ptr(clt_path->mp_skip_entry))) |
6a98d71d JW |
827 | continue; |
828 | ||
caa84d95 | 829 | inflight = atomic_read(&clt_path->stats->inflight); |
6a98d71d JW |
830 | |
831 | if (inflight < min_inflight) { | |
832 | min_inflight = inflight; | |
caa84d95 | 833 | min_path = clt_path; |
6a98d71d JW |
834 | } |
835 | } | |
836 | ||
837 | /* | |
838 | * add the path to the skip list, so that next time we can get | |
839 | * a different one | |
840 | */ | |
841 | if (min_path) | |
842 | list_add(raw_cpu_ptr(min_path->mp_skip_entry), &it->skip_list); | |
843 | ||
844 | return min_path; | |
845 | } | |
846 | ||
dc3b66a0 GK |
847 | /** |
848 | * get_next_path_min_latency() - Returns path with minimal latency. | |
849 | * @it: the path pointer | |
850 | * | |
851 | * Return: a path with the lowest latency or NULL if all paths are tried | |
852 | * | |
853 | * Locks: | |
854 | * rcu_read_lock() must be hold. | |
855 | * | |
856 | * Related to @MP_POLICY_MIN_LATENCY | |
857 | * | |
858 | * This DOES skip an already-tried path. | |
859 | * There is a skip-list to skip a path if the path has tried but failed. | |
860 | * It will try the minimum latency path and then the second minimum latency | |
861 | * path and so on. Finally it will return NULL if all paths are tried. | |
862 | * Therefore the caller MUST check the returned | |
863 | * path is NULL and trigger the IO error. | |
864 | */ | |
caa84d95 | 865 | static struct rtrs_clt_path *get_next_path_min_latency(struct path_it *it) |
dc3b66a0 | 866 | { |
caa84d95 | 867 | struct rtrs_clt_path *min_path = NULL; |
f3433d79 | 868 | struct rtrs_clt_sess *clt = it->clt; |
caa84d95 | 869 | struct rtrs_clt_path *clt_path; |
925cac63 | 870 | ktime_t min_latency = KTIME_MAX; |
dc3b66a0 GK |
871 | ktime_t latency; |
872 | ||
caa84d95 VT |
873 | list_for_each_entry_rcu(clt_path, &clt->paths_list, s.entry) { |
874 | if (READ_ONCE(clt_path->state) != RTRS_CLT_CONNECTED) | |
dc3b66a0 GK |
875 | continue; |
876 | ||
caa84d95 | 877 | if (!list_empty(raw_cpu_ptr(clt_path->mp_skip_entry))) |
dc3b66a0 GK |
878 | continue; |
879 | ||
caa84d95 | 880 | latency = clt_path->s.hb_cur_latency; |
dc3b66a0 GK |
881 | |
882 | if (latency < min_latency) { | |
883 | min_latency = latency; | |
caa84d95 | 884 | min_path = clt_path; |
dc3b66a0 GK |
885 | } |
886 | } | |
887 | ||
888 | /* | |
889 | * add the path to the skip list, so that next time we can get | |
890 | * a different one | |
891 | */ | |
892 | if (min_path) | |
893 | list_add(raw_cpu_ptr(min_path->mp_skip_entry), &it->skip_list); | |
894 | ||
895 | return min_path; | |
896 | } | |
897 | ||
f3433d79 | 898 | static inline void path_it_init(struct path_it *it, struct rtrs_clt_sess *clt) |
6a98d71d JW |
899 | { |
900 | INIT_LIST_HEAD(&it->skip_list); | |
901 | it->clt = clt; | |
902 | it->i = 0; | |
903 | ||
904 | if (clt->mp_policy == MP_POLICY_RR) | |
905 | it->next_path = get_next_path_rr; | |
dc3b66a0 | 906 | else if (clt->mp_policy == MP_POLICY_MIN_INFLIGHT) |
6a98d71d | 907 | it->next_path = get_next_path_min_inflight; |
dc3b66a0 GK |
908 | else |
909 | it->next_path = get_next_path_min_latency; | |
6a98d71d JW |
910 | } |
911 | ||
912 | static inline void path_it_deinit(struct path_it *it) | |
913 | { | |
914 | struct list_head *skip, *tmp; | |
915 | /* | |
b962fee5 | 916 | * The skip_list is used only for the MIN_INFLIGHT and MIN_LATENCY policies. |
6a98d71d JW |
917 | * We need to remove paths from it, so that next IO can insert |
918 | * paths (->mp_skip_entry) into a skip_list again. | |
919 | */ | |
920 | list_for_each_safe(skip, tmp, &it->skip_list) | |
921 | list_del_init(skip); | |
922 | } | |
923 | ||
924 | /** | |
bf194997 | 925 | * rtrs_clt_init_req() - Initialize an rtrs_clt_io_req holding information |
6a98d71d JW |
926 | * about an inflight IO. |
927 | * The user buffer holding user control message (not data) is copied into | |
928 | * the corresponding buffer of rtrs_iu (req->iu->buf), which later on will | |
929 | * also hold the control message of rtrs. | |
930 | * @req: an io request holding information about IO. | |
caa84d95 | 931 | * @clt_path: client path |
6a98d71d JW |
932 | * @conf: conformation callback function to notify upper layer. |
933 | * @permit: permit for allocation of RDMA remote buffer | |
934 | * @priv: private pointer | |
935 | * @vec: kernel vector containing control message | |
936 | * @usr_len: length of the user message | |
937 | * @sg: scater list for IO data | |
938 | * @sg_cnt: number of scater list entries | |
939 | * @data_len: length of the IO data | |
940 | * @dir: direction of the IO. | |
941 | */ | |
942 | static void rtrs_clt_init_req(struct rtrs_clt_io_req *req, | |
caa84d95 | 943 | struct rtrs_clt_path *clt_path, |
6a98d71d JW |
944 | void (*conf)(void *priv, int errno), |
945 | struct rtrs_permit *permit, void *priv, | |
946 | const struct kvec *vec, size_t usr_len, | |
947 | struct scatterlist *sg, size_t sg_cnt, | |
948 | size_t data_len, int dir) | |
949 | { | |
950 | struct iov_iter iter; | |
951 | size_t len; | |
952 | ||
953 | req->permit = permit; | |
954 | req->in_use = true; | |
955 | req->usr_len = usr_len; | |
956 | req->data_len = data_len; | |
957 | req->sglist = sg; | |
958 | req->sg_cnt = sg_cnt; | |
959 | req->priv = priv; | |
960 | req->dir = dir; | |
caa84d95 | 961 | req->con = rtrs_permit_to_clt_con(clt_path, permit); |
6a98d71d JW |
962 | req->conf = conf; |
963 | req->need_inv = false; | |
964 | req->need_inv_comp = false; | |
965 | req->inv_errno = 0; | |
2ece9ec6 | 966 | refcount_set(&req->ref, 1); |
caa84d95 | 967 | req->mp_policy = clt_path->clt->mp_policy; |
6a98d71d JW |
968 | |
969 | iov_iter_kvec(&iter, READ, vec, 1, usr_len); | |
970 | len = _copy_from_iter(req->iu->buf, usr_len, &iter); | |
971 | WARN_ON(len != usr_len); | |
972 | ||
973 | reinit_completion(&req->inv_comp); | |
974 | } | |
975 | ||
976 | static struct rtrs_clt_io_req * | |
caa84d95 | 977 | rtrs_clt_get_req(struct rtrs_clt_path *clt_path, |
6a98d71d JW |
978 | void (*conf)(void *priv, int errno), |
979 | struct rtrs_permit *permit, void *priv, | |
980 | const struct kvec *vec, size_t usr_len, | |
981 | struct scatterlist *sg, size_t sg_cnt, | |
982 | size_t data_len, int dir) | |
983 | { | |
984 | struct rtrs_clt_io_req *req; | |
985 | ||
caa84d95 VT |
986 | req = &clt_path->reqs[permit->mem_id]; |
987 | rtrs_clt_init_req(req, clt_path, conf, permit, priv, vec, usr_len, | |
6a98d71d JW |
988 | sg, sg_cnt, data_len, dir); |
989 | return req; | |
990 | } | |
991 | ||
992 | static struct rtrs_clt_io_req * | |
caa84d95 | 993 | rtrs_clt_get_copy_req(struct rtrs_clt_path *alive_path, |
6a98d71d JW |
994 | struct rtrs_clt_io_req *fail_req) |
995 | { | |
996 | struct rtrs_clt_io_req *req; | |
997 | struct kvec vec = { | |
998 | .iov_base = fail_req->iu->buf, | |
999 | .iov_len = fail_req->usr_len | |
1000 | }; | |
1001 | ||
caa84d95 VT |
1002 | req = &alive_path->reqs[fail_req->permit->mem_id]; |
1003 | rtrs_clt_init_req(req, alive_path, fail_req->conf, fail_req->permit, | |
6a98d71d JW |
1004 | fail_req->priv, &vec, fail_req->usr_len, |
1005 | fail_req->sglist, fail_req->sg_cnt, | |
1006 | fail_req->data_len, fail_req->dir); | |
1007 | return req; | |
1008 | } | |
1009 | ||
1010 | static int rtrs_post_rdma_write_sg(struct rtrs_clt_con *con, | |
630e438f | 1011 | struct rtrs_clt_io_req *req, |
2ece9ec6 | 1012 | struct rtrs_rbuf *rbuf, bool fr_en, |
b66905e0 JW |
1013 | u32 count, u32 size, u32 imm, |
1014 | struct ib_send_wr *wr, | |
630e438f | 1015 | struct ib_send_wr *tail) |
6a98d71d | 1016 | { |
caa84d95 | 1017 | struct rtrs_clt_path *clt_path = to_clt_path(con->c.path); |
6a98d71d JW |
1018 | struct ib_sge *sge = req->sge; |
1019 | enum ib_send_flags flags; | |
1020 | struct scatterlist *sg; | |
1021 | size_t num_sge; | |
1022 | int i; | |
630e438f | 1023 | struct ib_send_wr *ptail = NULL; |
6a98d71d | 1024 | |
2ece9ec6 JW |
1025 | if (fr_en) { |
1026 | i = 0; | |
1027 | sge[i].addr = req->mr->iova; | |
1028 | sge[i].length = req->mr->length; | |
1029 | sge[i].lkey = req->mr->lkey; | |
1030 | i++; | |
1031 | num_sge = 2; | |
1032 | ptail = tail; | |
1033 | } else { | |
b66905e0 | 1034 | for_each_sg(req->sglist, sg, count, i) { |
2ece9ec6 JW |
1035 | sge[i].addr = sg_dma_address(sg); |
1036 | sge[i].length = sg_dma_len(sg); | |
caa84d95 | 1037 | sge[i].lkey = clt_path->s.dev->ib_pd->local_dma_lkey; |
2ece9ec6 | 1038 | } |
b66905e0 | 1039 | num_sge = 1 + count; |
6a98d71d JW |
1040 | } |
1041 | sge[i].addr = req->iu->dma_addr; | |
1042 | sge[i].length = size; | |
caa84d95 | 1043 | sge[i].lkey = clt_path->s.dev->ib_pd->local_dma_lkey; |
6a98d71d | 1044 | |
6a98d71d JW |
1045 | /* |
1046 | * From time to time we have to post signalled sends, | |
1047 | * or send queue will fill up and only QP reset can help. | |
1048 | */ | |
caa84d95 | 1049 | flags = atomic_inc_return(&con->c.wr_cnt) % clt_path->s.signal_interval ? |
6a98d71d JW |
1050 | 0 : IB_SEND_SIGNALED; |
1051 | ||
caa84d95 VT |
1052 | ib_dma_sync_single_for_device(clt_path->s.dev->ib_dev, |
1053 | req->iu->dma_addr, | |
6a98d71d JW |
1054 | size, DMA_TO_DEVICE); |
1055 | ||
1056 | return rtrs_iu_post_rdma_write_imm(&con->c, req->iu, sge, num_sge, | |
1057 | rbuf->rkey, rbuf->addr, imm, | |
630e438f | 1058 | flags, wr, ptail); |
6a98d71d JW |
1059 | } |
1060 | ||
2ece9ec6 JW |
1061 | static int rtrs_map_sg_fr(struct rtrs_clt_io_req *req, size_t count) |
1062 | { | |
1063 | int nr; | |
1064 | ||
1065 | /* Align the MR to a 4K page size to match the block virt boundary */ | |
1066 | nr = ib_map_mr_sg(req->mr, req->sglist, count, NULL, SZ_4K); | |
1067 | if (nr < 0) | |
1068 | return nr; | |
4693d6b7 | 1069 | if (nr < req->sg_cnt) |
2ece9ec6 JW |
1070 | return -EINVAL; |
1071 | ib_update_fast_reg_key(req->mr, ib_inc_rkey(req->mr->rkey)); | |
1072 | ||
1073 | return nr; | |
1074 | } | |
1075 | ||
6a98d71d JW |
1076 | static int rtrs_clt_write_req(struct rtrs_clt_io_req *req) |
1077 | { | |
1078 | struct rtrs_clt_con *con = req->con; | |
d9372794 | 1079 | struct rtrs_path *s = con->c.path; |
caa84d95 | 1080 | struct rtrs_clt_path *clt_path = to_clt_path(s); |
6a98d71d JW |
1081 | struct rtrs_msg_rdma_write *msg; |
1082 | ||
1083 | struct rtrs_rbuf *rbuf; | |
1084 | int ret, count = 0; | |
1085 | u32 imm, buf_id; | |
2ece9ec6 JW |
1086 | struct ib_reg_wr rwr; |
1087 | struct ib_send_wr inv_wr; | |
1088 | struct ib_send_wr *wr = NULL; | |
1089 | bool fr_en = false; | |
6a98d71d JW |
1090 | |
1091 | const size_t tsize = sizeof(*msg) + req->data_len + req->usr_len; | |
1092 | ||
caa84d95 | 1093 | if (tsize > clt_path->chunk_size) { |
6a98d71d | 1094 | rtrs_wrn(s, "Write request failed, size too big %zu > %d\n", |
caa84d95 | 1095 | tsize, clt_path->chunk_size); |
6a98d71d JW |
1096 | return -EMSGSIZE; |
1097 | } | |
1098 | if (req->sg_cnt) { | |
caa84d95 | 1099 | count = ib_dma_map_sg(clt_path->s.dev->ib_dev, req->sglist, |
6a98d71d | 1100 | req->sg_cnt, req->dir); |
4693d6b7 | 1101 | if (!count) { |
6a98d71d JW |
1102 | rtrs_wrn(s, "Write request failed, map failed\n"); |
1103 | return -EINVAL; | |
1104 | } | |
1105 | } | |
1106 | /* put rtrs msg after sg and user message */ | |
1107 | msg = req->iu->buf + req->usr_len; | |
1108 | msg->type = cpu_to_le16(RTRS_MSG_WRITE); | |
1109 | msg->usr_len = cpu_to_le16(req->usr_len); | |
1110 | ||
1111 | /* rtrs message on server side will be after user data and message */ | |
1112 | imm = req->permit->mem_off + req->data_len + req->usr_len; | |
1113 | imm = rtrs_to_io_req_imm(imm); | |
1114 | buf_id = req->permit->mem_id; | |
1115 | req->sg_size = tsize; | |
caa84d95 | 1116 | rbuf = &clt_path->rbufs[buf_id]; |
6a98d71d | 1117 | |
2ece9ec6 JW |
1118 | if (count) { |
1119 | ret = rtrs_map_sg_fr(req, count); | |
1120 | if (ret < 0) { | |
1121 | rtrs_err_rl(s, | |
1122 | "Write request failed, failed to map fast reg. data, err: %d\n", | |
1123 | ret); | |
caa84d95 | 1124 | ib_dma_unmap_sg(clt_path->s.dev->ib_dev, req->sglist, |
2ece9ec6 JW |
1125 | req->sg_cnt, req->dir); |
1126 | return ret; | |
1127 | } | |
1128 | inv_wr = (struct ib_send_wr) { | |
1129 | .opcode = IB_WR_LOCAL_INV, | |
1130 | .wr_cqe = &req->inv_cqe, | |
1131 | .send_flags = IB_SEND_SIGNALED, | |
1132 | .ex.invalidate_rkey = req->mr->rkey, | |
1133 | }; | |
1134 | req->inv_cqe.done = rtrs_clt_inv_rkey_done; | |
1135 | rwr = (struct ib_reg_wr) { | |
1136 | .wr.opcode = IB_WR_REG_MR, | |
1137 | .wr.wr_cqe = &fast_reg_cqe, | |
1138 | .mr = req->mr, | |
1139 | .key = req->mr->rkey, | |
1140 | .access = (IB_ACCESS_LOCAL_WRITE), | |
1141 | }; | |
1142 | wr = &rwr.wr; | |
1143 | fr_en = true; | |
1144 | refcount_inc(&req->ref); | |
1145 | } | |
6a98d71d JW |
1146 | /* |
1147 | * Update stats now, after request is successfully sent it is not | |
1148 | * safe anymore to touch it. | |
1149 | */ | |
1150 | rtrs_clt_update_all_stats(req, WRITE); | |
1151 | ||
b66905e0 | 1152 | ret = rtrs_post_rdma_write_sg(req->con, req, rbuf, fr_en, count, |
630e438f | 1153 | req->usr_len + sizeof(*msg), |
2ece9ec6 | 1154 | imm, wr, &inv_wr); |
4693d6b7 | 1155 | if (ret) { |
2f37b017 GK |
1156 | rtrs_err_rl(s, |
1157 | "Write request failed: error=%d path=%s [%s:%u]\n", | |
caa84d95 VT |
1158 | ret, kobject_name(&clt_path->kobj), clt_path->hca_name, |
1159 | clt_path->hca_port); | |
0d8f2cfa | 1160 | if (req->mp_policy == MP_POLICY_MIN_INFLIGHT) |
caa84d95 | 1161 | atomic_dec(&clt_path->stats->inflight); |
6a98d71d | 1162 | if (req->sg_cnt) |
caa84d95 | 1163 | ib_dma_unmap_sg(clt_path->s.dev->ib_dev, req->sglist, |
6a98d71d JW |
1164 | req->sg_cnt, req->dir); |
1165 | } | |
1166 | ||
1167 | return ret; | |
1168 | } | |
1169 | ||
6a98d71d JW |
1170 | static int rtrs_clt_read_req(struct rtrs_clt_io_req *req) |
1171 | { | |
1172 | struct rtrs_clt_con *con = req->con; | |
d9372794 | 1173 | struct rtrs_path *s = con->c.path; |
caa84d95 | 1174 | struct rtrs_clt_path *clt_path = to_clt_path(s); |
6a98d71d | 1175 | struct rtrs_msg_rdma_read *msg; |
caa84d95 | 1176 | struct rtrs_ib_dev *dev = clt_path->s.dev; |
6a98d71d JW |
1177 | |
1178 | struct ib_reg_wr rwr; | |
1179 | struct ib_send_wr *wr = NULL; | |
1180 | ||
1181 | int ret, count = 0; | |
1182 | u32 imm, buf_id; | |
1183 | ||
1184 | const size_t tsize = sizeof(*msg) + req->data_len + req->usr_len; | |
1185 | ||
caa84d95 | 1186 | if (tsize > clt_path->chunk_size) { |
6a98d71d JW |
1187 | rtrs_wrn(s, |
1188 | "Read request failed, message size is %zu, bigger than CHUNK_SIZE %d\n", | |
caa84d95 | 1189 | tsize, clt_path->chunk_size); |
6a98d71d JW |
1190 | return -EMSGSIZE; |
1191 | } | |
1192 | ||
1193 | if (req->sg_cnt) { | |
1194 | count = ib_dma_map_sg(dev->ib_dev, req->sglist, req->sg_cnt, | |
1195 | req->dir); | |
4693d6b7 | 1196 | if (!count) { |
6a98d71d JW |
1197 | rtrs_wrn(s, |
1198 | "Read request failed, dma map failed\n"); | |
1199 | return -EINVAL; | |
1200 | } | |
1201 | } | |
1202 | /* put our message into req->buf after user message*/ | |
1203 | msg = req->iu->buf + req->usr_len; | |
1204 | msg->type = cpu_to_le16(RTRS_MSG_READ); | |
1205 | msg->usr_len = cpu_to_le16(req->usr_len); | |
1206 | ||
1207 | if (count) { | |
1208 | ret = rtrs_map_sg_fr(req, count); | |
1209 | if (ret < 0) { | |
1210 | rtrs_err_rl(s, | |
1211 | "Read request failed, failed to map fast reg. data, err: %d\n", | |
1212 | ret); | |
1213 | ib_dma_unmap_sg(dev->ib_dev, req->sglist, req->sg_cnt, | |
1214 | req->dir); | |
1215 | return ret; | |
1216 | } | |
1217 | rwr = (struct ib_reg_wr) { | |
1218 | .wr.opcode = IB_WR_REG_MR, | |
1219 | .wr.wr_cqe = &fast_reg_cqe, | |
1220 | .mr = req->mr, | |
1221 | .key = req->mr->rkey, | |
1222 | .access = (IB_ACCESS_LOCAL_WRITE | | |
1223 | IB_ACCESS_REMOTE_WRITE), | |
1224 | }; | |
1225 | wr = &rwr.wr; | |
1226 | ||
1227 | msg->sg_cnt = cpu_to_le16(1); | |
1228 | msg->flags = cpu_to_le16(RTRS_MSG_NEED_INVAL_F); | |
1229 | ||
1230 | msg->desc[0].addr = cpu_to_le64(req->mr->iova); | |
1231 | msg->desc[0].key = cpu_to_le32(req->mr->rkey); | |
1232 | msg->desc[0].len = cpu_to_le32(req->mr->length); | |
1233 | ||
1234 | /* Further invalidation is required */ | |
1235 | req->need_inv = !!RTRS_MSG_NEED_INVAL_F; | |
1236 | ||
1237 | } else { | |
1238 | msg->sg_cnt = 0; | |
1239 | msg->flags = 0; | |
1240 | } | |
1241 | /* | |
1242 | * rtrs message will be after the space reserved for disk data and | |
1243 | * user message | |
1244 | */ | |
1245 | imm = req->permit->mem_off + req->data_len + req->usr_len; | |
1246 | imm = rtrs_to_io_req_imm(imm); | |
1247 | buf_id = req->permit->mem_id; | |
1248 | ||
1249 | req->sg_size = sizeof(*msg); | |
1250 | req->sg_size += le16_to_cpu(msg->sg_cnt) * sizeof(struct rtrs_sg_desc); | |
1251 | req->sg_size += req->usr_len; | |
1252 | ||
1253 | /* | |
1254 | * Update stats now, after request is successfully sent it is not | |
1255 | * safe anymore to touch it. | |
1256 | */ | |
1257 | rtrs_clt_update_all_stats(req, READ); | |
1258 | ||
caa84d95 | 1259 | ret = rtrs_post_send_rdma(req->con, req, &clt_path->rbufs[buf_id], |
6a98d71d | 1260 | req->data_len, imm, wr); |
4693d6b7 | 1261 | if (ret) { |
2f37b017 GK |
1262 | rtrs_err_rl(s, |
1263 | "Read request failed: error=%d path=%s [%s:%u]\n", | |
caa84d95 VT |
1264 | ret, kobject_name(&clt_path->kobj), clt_path->hca_name, |
1265 | clt_path->hca_port); | |
0d8f2cfa | 1266 | if (req->mp_policy == MP_POLICY_MIN_INFLIGHT) |
caa84d95 | 1267 | atomic_dec(&clt_path->stats->inflight); |
6a98d71d JW |
1268 | req->need_inv = false; |
1269 | if (req->sg_cnt) | |
1270 | ib_dma_unmap_sg(dev->ib_dev, req->sglist, | |
1271 | req->sg_cnt, req->dir); | |
1272 | } | |
1273 | ||
1274 | return ret; | |
1275 | } | |
1276 | ||
1277 | /** | |
bf194997 | 1278 | * rtrs_clt_failover_req() - Try to find an active path for a failed request |
6a98d71d JW |
1279 | * @clt: clt context |
1280 | * @fail_req: a failed io request. | |
1281 | */ | |
f3433d79 | 1282 | static int rtrs_clt_failover_req(struct rtrs_clt_sess *clt, |
6a98d71d JW |
1283 | struct rtrs_clt_io_req *fail_req) |
1284 | { | |
caa84d95 | 1285 | struct rtrs_clt_path *alive_path; |
6a98d71d JW |
1286 | struct rtrs_clt_io_req *req; |
1287 | int err = -ECONNABORTED; | |
1288 | struct path_it it; | |
1289 | ||
a94dae86 DK |
1290 | rcu_read_lock(); |
1291 | for (path_it_init(&it, clt); | |
caa84d95 | 1292 | (alive_path = it.next_path(&it)) && it.i < it.clt->paths_num; |
a94dae86 | 1293 | it.i++) { |
caa84d95 | 1294 | if (READ_ONCE(alive_path->state) != RTRS_CLT_CONNECTED) |
6a98d71d | 1295 | continue; |
caa84d95 | 1296 | req = rtrs_clt_get_copy_req(alive_path, fail_req); |
6a98d71d JW |
1297 | if (req->dir == DMA_TO_DEVICE) |
1298 | err = rtrs_clt_write_req(req); | |
1299 | else | |
1300 | err = rtrs_clt_read_req(req); | |
4693d6b7 | 1301 | if (err) { |
6a98d71d JW |
1302 | req->in_use = false; |
1303 | continue; | |
1304 | } | |
1305 | /* Success path */ | |
caa84d95 | 1306 | rtrs_clt_inc_failover_cnt(alive_path->stats); |
6a98d71d | 1307 | break; |
a94dae86 DK |
1308 | } |
1309 | path_it_deinit(&it); | |
1310 | rcu_read_unlock(); | |
6a98d71d JW |
1311 | |
1312 | return err; | |
1313 | } | |
1314 | ||
caa84d95 | 1315 | static void fail_all_outstanding_reqs(struct rtrs_clt_path *clt_path) |
6a98d71d | 1316 | { |
f3433d79 | 1317 | struct rtrs_clt_sess *clt = clt_path->clt; |
6a98d71d JW |
1318 | struct rtrs_clt_io_req *req; |
1319 | int i, err; | |
1320 | ||
caa84d95 | 1321 | if (!clt_path->reqs) |
6a98d71d | 1322 | return; |
caa84d95 VT |
1323 | for (i = 0; i < clt_path->queue_depth; ++i) { |
1324 | req = &clt_path->reqs[i]; | |
6a98d71d JW |
1325 | if (!req->in_use) |
1326 | continue; | |
1327 | ||
1328 | /* | |
1329 | * Safely (without notification) complete failed request. | |
1330 | * After completion this request is still useble and can | |
1331 | * be failovered to another path. | |
1332 | */ | |
1333 | complete_rdma_req(req, -ECONNABORTED, false, true); | |
1334 | ||
1335 | err = rtrs_clt_failover_req(clt, req); | |
4693d6b7 | 1336 | if (err) |
6a98d71d JW |
1337 | /* Failover failed, notify anyway */ |
1338 | req->conf(req->priv, err); | |
1339 | } | |
1340 | } | |
1341 | ||
caa84d95 | 1342 | static void free_path_reqs(struct rtrs_clt_path *clt_path) |
6a98d71d JW |
1343 | { |
1344 | struct rtrs_clt_io_req *req; | |
1345 | int i; | |
1346 | ||
caa84d95 | 1347 | if (!clt_path->reqs) |
6a98d71d | 1348 | return; |
caa84d95 VT |
1349 | for (i = 0; i < clt_path->queue_depth; ++i) { |
1350 | req = &clt_path->reqs[i]; | |
6a98d71d JW |
1351 | if (req->mr) |
1352 | ib_dereg_mr(req->mr); | |
1353 | kfree(req->sge); | |
caa84d95 | 1354 | rtrs_iu_free(req->iu, clt_path->s.dev->ib_dev, 1); |
6a98d71d | 1355 | } |
caa84d95 VT |
1356 | kfree(clt_path->reqs); |
1357 | clt_path->reqs = NULL; | |
6a98d71d JW |
1358 | } |
1359 | ||
caa84d95 | 1360 | static int alloc_path_reqs(struct rtrs_clt_path *clt_path) |
6a98d71d JW |
1361 | { |
1362 | struct rtrs_clt_io_req *req; | |
6a98d71d JW |
1363 | int i, err = -ENOMEM; |
1364 | ||
caa84d95 VT |
1365 | clt_path->reqs = kcalloc(clt_path->queue_depth, |
1366 | sizeof(*clt_path->reqs), | |
1367 | GFP_KERNEL); | |
1368 | if (!clt_path->reqs) | |
6a98d71d JW |
1369 | return -ENOMEM; |
1370 | ||
caa84d95 VT |
1371 | for (i = 0; i < clt_path->queue_depth; ++i) { |
1372 | req = &clt_path->reqs[i]; | |
1373 | req->iu = rtrs_iu_alloc(1, clt_path->max_hdr_size, GFP_KERNEL, | |
1374 | clt_path->s.dev->ib_dev, | |
6a98d71d JW |
1375 | DMA_TO_DEVICE, |
1376 | rtrs_clt_rdma_done); | |
1377 | if (!req->iu) | |
1378 | goto out; | |
1379 | ||
df1885a7 | 1380 | req->sge = kcalloc(2, sizeof(*req->sge), GFP_KERNEL); |
6a98d71d JW |
1381 | if (!req->sge) |
1382 | goto out; | |
1383 | ||
caa84d95 VT |
1384 | req->mr = ib_alloc_mr(clt_path->s.dev->ib_pd, |
1385 | IB_MR_TYPE_MEM_REG, | |
1386 | clt_path->max_pages_per_mr); | |
6a98d71d JW |
1387 | if (IS_ERR(req->mr)) { |
1388 | err = PTR_ERR(req->mr); | |
1389 | req->mr = NULL; | |
caa84d95 VT |
1390 | pr_err("Failed to alloc clt_path->max_pages_per_mr %d\n", |
1391 | clt_path->max_pages_per_mr); | |
6a98d71d JW |
1392 | goto out; |
1393 | } | |
1394 | ||
1395 | init_completion(&req->inv_comp); | |
1396 | } | |
1397 | ||
1398 | return 0; | |
1399 | ||
1400 | out: | |
caa84d95 | 1401 | free_path_reqs(clt_path); |
6a98d71d JW |
1402 | |
1403 | return err; | |
1404 | } | |
1405 | ||
f3433d79 | 1406 | static int alloc_permits(struct rtrs_clt_sess *clt) |
6a98d71d JW |
1407 | { |
1408 | unsigned int chunk_bits; | |
1409 | int err, i; | |
1410 | ||
e471eedd | 1411 | clt->permits_map = bitmap_zalloc(clt->queue_depth, GFP_KERNEL); |
6a98d71d JW |
1412 | if (!clt->permits_map) { |
1413 | err = -ENOMEM; | |
1414 | goto out_err; | |
1415 | } | |
1416 | clt->permits = kcalloc(clt->queue_depth, permit_size(clt), GFP_KERNEL); | |
1417 | if (!clt->permits) { | |
1418 | err = -ENOMEM; | |
1419 | goto err_map; | |
1420 | } | |
1421 | chunk_bits = ilog2(clt->queue_depth - 1) + 1; | |
1422 | for (i = 0; i < clt->queue_depth; i++) { | |
1423 | struct rtrs_permit *permit; | |
1424 | ||
1425 | permit = get_permit(clt, i); | |
1426 | permit->mem_id = i; | |
1427 | permit->mem_off = i << (MAX_IMM_PAYL_BITS - chunk_bits); | |
1428 | } | |
1429 | ||
1430 | return 0; | |
1431 | ||
1432 | err_map: | |
e471eedd | 1433 | bitmap_free(clt->permits_map); |
6a98d71d JW |
1434 | clt->permits_map = NULL; |
1435 | out_err: | |
1436 | return err; | |
1437 | } | |
1438 | ||
f3433d79 | 1439 | static void free_permits(struct rtrs_clt_sess *clt) |
6a98d71d | 1440 | { |
fc411473 | 1441 | if (clt->permits_map) |
25a033f5 | 1442 | wait_event(clt->permits_wait, |
fc411473 CJ |
1443 | bitmap_empty(clt->permits_map, clt->queue_depth)); |
1444 | ||
e471eedd | 1445 | bitmap_free(clt->permits_map); |
6a98d71d JW |
1446 | clt->permits_map = NULL; |
1447 | kfree(clt->permits); | |
1448 | clt->permits = NULL; | |
1449 | } | |
1450 | ||
caa84d95 | 1451 | static void query_fast_reg_mode(struct rtrs_clt_path *clt_path) |
6a98d71d JW |
1452 | { |
1453 | struct ib_device *ib_dev; | |
1454 | u64 max_pages_per_mr; | |
1455 | int mr_page_shift; | |
1456 | ||
caa84d95 | 1457 | ib_dev = clt_path->s.dev->ib_dev; |
6a98d71d JW |
1458 | |
1459 | /* | |
1460 | * Use the smallest page size supported by the HCA, down to a | |
1461 | * minimum of 4096 bytes. We're unlikely to build large sglists | |
1462 | * out of smaller entries. | |
1463 | */ | |
1464 | mr_page_shift = max(12, ffs(ib_dev->attrs.page_size_cap) - 1); | |
1465 | max_pages_per_mr = ib_dev->attrs.max_mr_size; | |
1466 | do_div(max_pages_per_mr, (1ull << mr_page_shift)); | |
caa84d95 VT |
1467 | clt_path->max_pages_per_mr = |
1468 | min3(clt_path->max_pages_per_mr, (u32)max_pages_per_mr, | |
6a98d71d | 1469 | ib_dev->attrs.max_fast_reg_page_list_len); |
caa84d95 VT |
1470 | clt_path->clt->max_segments = |
1471 | min(clt_path->max_pages_per_mr, clt_path->clt->max_segments); | |
6a98d71d JW |
1472 | } |
1473 | ||
caa84d95 | 1474 | static bool rtrs_clt_change_state_get_old(struct rtrs_clt_path *clt_path, |
6a98d71d JW |
1475 | enum rtrs_clt_state new_state, |
1476 | enum rtrs_clt_state *old_state) | |
1477 | { | |
1478 | bool changed; | |
1479 | ||
caa84d95 | 1480 | spin_lock_irq(&clt_path->state_wq.lock); |
11f7b394 | 1481 | if (old_state) |
caa84d95 VT |
1482 | *old_state = clt_path->state; |
1483 | changed = rtrs_clt_change_state(clt_path, new_state); | |
1484 | spin_unlock_irq(&clt_path->state_wq.lock); | |
6a98d71d JW |
1485 | |
1486 | return changed; | |
1487 | } | |
1488 | ||
6a98d71d JW |
1489 | static void rtrs_clt_hb_err_handler(struct rtrs_con *c) |
1490 | { | |
1491 | struct rtrs_clt_con *con = container_of(c, typeof(*con), c); | |
1492 | ||
1493 | rtrs_rdma_error_recovery(con); | |
1494 | } | |
1495 | ||
caa84d95 | 1496 | static void rtrs_clt_init_hb(struct rtrs_clt_path *clt_path) |
6a98d71d | 1497 | { |
caa84d95 | 1498 | rtrs_init_hb(&clt_path->s, &io_comp_cqe, |
6a98d71d JW |
1499 | RTRS_HB_INTERVAL_MS, |
1500 | RTRS_HB_MISSED_MAX, | |
1501 | rtrs_clt_hb_err_handler, | |
1502 | rtrs_wq); | |
1503 | } | |
1504 | ||
6a98d71d JW |
1505 | static void rtrs_clt_reconnect_work(struct work_struct *work); |
1506 | static void rtrs_clt_close_work(struct work_struct *work); | |
1507 | ||
c1289d5d JW |
1508 | static void rtrs_clt_err_recovery_work(struct work_struct *work) |
1509 | { | |
1510 | struct rtrs_clt_path *clt_path; | |
1511 | struct rtrs_clt_sess *clt; | |
1512 | int delay_ms; | |
1513 | ||
1514 | clt_path = container_of(work, struct rtrs_clt_path, err_recovery_work); | |
1515 | clt = clt_path->clt; | |
1516 | delay_ms = clt->reconnect_delay_sec * 1000; | |
1517 | rtrs_clt_stop_and_destroy_conns(clt_path); | |
1518 | queue_delayed_work(rtrs_wq, &clt_path->reconnect_dwork, | |
1519 | msecs_to_jiffies(delay_ms + | |
81895a65 | 1520 | prandom_u32_max(RTRS_RECONNECT_SEED))); |
c1289d5d JW |
1521 | } |
1522 | ||
f3433d79 | 1523 | static struct rtrs_clt_path *alloc_path(struct rtrs_clt_sess *clt, |
7404bdde JW |
1524 | const struct rtrs_addr *path, |
1525 | size_t con_num, u32 nr_poll_queues) | |
6a98d71d | 1526 | { |
caa84d95 | 1527 | struct rtrs_clt_path *clt_path; |
6a98d71d JW |
1528 | int err = -ENOMEM; |
1529 | int cpu; | |
2958a995 | 1530 | size_t total_con; |
6a98d71d | 1531 | |
caa84d95 VT |
1532 | clt_path = kzalloc(sizeof(*clt_path), GFP_KERNEL); |
1533 | if (!clt_path) | |
6a98d71d JW |
1534 | goto err; |
1535 | ||
2958a995 GK |
1536 | /* |
1537 | * irqmode and poll | |
1538 | * +1: Extra connection for user messages | |
1539 | */ | |
1540 | total_con = con_num + nr_poll_queues + 1; | |
caa84d95 VT |
1541 | clt_path->s.con = kcalloc(total_con, sizeof(*clt_path->s.con), |
1542 | GFP_KERNEL); | |
1543 | if (!clt_path->s.con) | |
1544 | goto err_free_path; | |
6a98d71d | 1545 | |
caa84d95 VT |
1546 | clt_path->s.con_num = total_con; |
1547 | clt_path->s.irq_con_num = con_num + 1; | |
2958a995 | 1548 | |
caa84d95 VT |
1549 | clt_path->stats = kzalloc(sizeof(*clt_path->stats), GFP_KERNEL); |
1550 | if (!clt_path->stats) | |
6a98d71d JW |
1551 | goto err_free_con; |
1552 | ||
caa84d95 VT |
1553 | mutex_init(&clt_path->init_mutex); |
1554 | uuid_gen(&clt_path->s.uuid); | |
1555 | memcpy(&clt_path->s.dst_addr, path->dst, | |
6a98d71d JW |
1556 | rdma_addr_size((struct sockaddr *)path->dst)); |
1557 | ||
1558 | /* | |
1559 | * rdma_resolve_addr() passes src_addr to cma_bind_addr, which | |
1560 | * checks the sa_family to be non-zero. If user passed src_addr=NULL | |
1561 | * the sess->src_addr will contain only zeros, which is then fine. | |
1562 | */ | |
1563 | if (path->src) | |
caa84d95 | 1564 | memcpy(&clt_path->s.src_addr, path->src, |
6a98d71d | 1565 | rdma_addr_size((struct sockaddr *)path->src)); |
caa84d95 VT |
1566 | strscpy(clt_path->s.sessname, clt->sessname, |
1567 | sizeof(clt_path->s.sessname)); | |
1568 | clt_path->clt = clt; | |
1569 | clt_path->max_pages_per_mr = RTRS_MAX_SEGMENTS; | |
1570 | init_waitqueue_head(&clt_path->state_wq); | |
1571 | clt_path->state = RTRS_CLT_CONNECTING; | |
1572 | atomic_set(&clt_path->connected_cnt, 0); | |
1573 | INIT_WORK(&clt_path->close_work, rtrs_clt_close_work); | |
c1289d5d | 1574 | INIT_WORK(&clt_path->err_recovery_work, rtrs_clt_err_recovery_work); |
caa84d95 VT |
1575 | INIT_DELAYED_WORK(&clt_path->reconnect_dwork, rtrs_clt_reconnect_work); |
1576 | rtrs_clt_init_hb(clt_path); | |
1577 | ||
1578 | clt_path->mp_skip_entry = alloc_percpu(typeof(*clt_path->mp_skip_entry)); | |
1579 | if (!clt_path->mp_skip_entry) | |
6a98d71d JW |
1580 | goto err_free_stats; |
1581 | ||
1582 | for_each_possible_cpu(cpu) | |
caa84d95 | 1583 | INIT_LIST_HEAD(per_cpu_ptr(clt_path->mp_skip_entry, cpu)); |
6a98d71d | 1584 | |
caa84d95 | 1585 | err = rtrs_clt_init_stats(clt_path->stats); |
6a98d71d JW |
1586 | if (err) |
1587 | goto err_free_percpu; | |
1588 | ||
caa84d95 | 1589 | return clt_path; |
6a98d71d JW |
1590 | |
1591 | err_free_percpu: | |
caa84d95 | 1592 | free_percpu(clt_path->mp_skip_entry); |
6a98d71d | 1593 | err_free_stats: |
caa84d95 | 1594 | kfree(clt_path->stats); |
6a98d71d | 1595 | err_free_con: |
caa84d95 VT |
1596 | kfree(clt_path->s.con); |
1597 | err_free_path: | |
1598 | kfree(clt_path); | |
6a98d71d JW |
1599 | err: |
1600 | return ERR_PTR(err); | |
1601 | } | |
1602 | ||
caa84d95 | 1603 | void free_path(struct rtrs_clt_path *clt_path) |
6a98d71d | 1604 | { |
caa84d95 VT |
1605 | free_percpu(clt_path->mp_skip_entry); |
1606 | mutex_destroy(&clt_path->init_mutex); | |
1607 | kfree(clt_path->s.con); | |
1608 | kfree(clt_path->rbufs); | |
1609 | kfree(clt_path); | |
6a98d71d JW |
1610 | } |
1611 | ||
caa84d95 | 1612 | static int create_con(struct rtrs_clt_path *clt_path, unsigned int cid) |
6a98d71d JW |
1613 | { |
1614 | struct rtrs_clt_con *con; | |
1615 | ||
1616 | con = kzalloc(sizeof(*con), GFP_KERNEL); | |
1617 | if (!con) | |
1618 | return -ENOMEM; | |
1619 | ||
1620 | /* Map first two connections to the first CPU */ | |
1621 | con->cpu = (cid ? cid - 1 : 0) % nr_cpu_ids; | |
1622 | con->c.cid = cid; | |
caa84d95 | 1623 | con->c.path = &clt_path->s; |
a10431ef JW |
1624 | /* Align with srv, init as 1 */ |
1625 | atomic_set(&con->c.wr_cnt, 1); | |
fcf2959d | 1626 | mutex_init(&con->con_mutex); |
6a98d71d | 1627 | |
caa84d95 | 1628 | clt_path->s.con[cid] = &con->c; |
6a98d71d JW |
1629 | |
1630 | return 0; | |
1631 | } | |
1632 | ||
1633 | static void destroy_con(struct rtrs_clt_con *con) | |
1634 | { | |
caa84d95 | 1635 | struct rtrs_clt_path *clt_path = to_clt_path(con->c.path); |
6a98d71d | 1636 | |
caa84d95 | 1637 | clt_path->s.con[con->c.cid] = NULL; |
fcf2959d | 1638 | mutex_destroy(&con->con_mutex); |
6a98d71d JW |
1639 | kfree(con); |
1640 | } | |
1641 | ||
1642 | static int create_con_cq_qp(struct rtrs_clt_con *con) | |
1643 | { | |
caa84d95 | 1644 | struct rtrs_clt_path *clt_path = to_clt_path(con->c.path); |
a95fbe2a | 1645 | u32 max_send_wr, max_recv_wr, cq_num, max_send_sge, wr_limit; |
6a98d71d JW |
1646 | int err, cq_vector; |
1647 | struct rtrs_msg_rkey_rsp *rsp; | |
1648 | ||
fcf2959d | 1649 | lockdep_assert_held(&con->con_mutex); |
6a98d71d | 1650 | if (con->c.cid == 0) { |
0509ebfa | 1651 | max_send_sge = 1; |
6a98d71d | 1652 | /* We must be the first here */ |
caa84d95 | 1653 | if (WARN_ON(clt_path->s.dev)) |
6a98d71d JW |
1654 | return -EINVAL; |
1655 | ||
1656 | /* | |
1657 | * The whole session uses device from user connection. | |
1658 | * Be careful not to close user connection before ib dev | |
1659 | * is gracefully put. | |
1660 | */ | |
caa84d95 | 1661 | clt_path->s.dev = rtrs_ib_dev_find_or_add(con->c.cm_id->device, |
6a98d71d | 1662 | &dev_pd); |
caa84d95 VT |
1663 | if (!clt_path->s.dev) { |
1664 | rtrs_wrn(clt_path->clt, | |
6a98d71d JW |
1665 | "rtrs_ib_dev_find_get_or_add(): no memory\n"); |
1666 | return -ENOMEM; | |
1667 | } | |
caa84d95 VT |
1668 | clt_path->s.dev_ref = 1; |
1669 | query_fast_reg_mode(clt_path); | |
1670 | wr_limit = clt_path->s.dev->ib_dev->attrs.max_qp_wr; | |
a95fbe2a JW |
1671 | /* |
1672 | * Two (request + registration) completion for send | |
1673 | * Two for recv if always_invalidate is set on server | |
1674 | * or one for recv. | |
1675 | * + 2 for drain and heartbeat | |
1676 | * in case qp gets into error state. | |
1677 | */ | |
1678 | max_send_wr = | |
1679 | min_t(int, wr_limit, SERVICE_CON_QUEUE_DEPTH * 2 + 2); | |
1680 | max_recv_wr = max_send_wr; | |
6a98d71d JW |
1681 | } else { |
1682 | /* | |
1683 | * Here we assume that session members are correctly set. | |
1684 | * This is always true if user connection (cid == 0) is | |
1685 | * established first. | |
1686 | */ | |
caa84d95 | 1687 | if (WARN_ON(!clt_path->s.dev)) |
6a98d71d | 1688 | return -EINVAL; |
caa84d95 | 1689 | if (WARN_ON(!clt_path->queue_depth)) |
6a98d71d JW |
1690 | return -EINVAL; |
1691 | ||
caa84d95 | 1692 | wr_limit = clt_path->s.dev->ib_dev->attrs.max_qp_wr; |
6a98d71d | 1693 | /* Shared between connections */ |
caa84d95 | 1694 | clt_path->s.dev_ref++; |
a95fbe2a | 1695 | max_send_wr = min_t(int, wr_limit, |
6a98d71d | 1696 | /* QD * (REQ + RSP + FR REGS or INVS) + drain */ |
caa84d95 | 1697 | clt_path->queue_depth * 3 + 1); |
a95fbe2a | 1698 | max_recv_wr = min_t(int, wr_limit, |
caa84d95 | 1699 | clt_path->queue_depth * 3 + 1); |
df1885a7 | 1700 | max_send_sge = 2; |
6a98d71d | 1701 | } |
cfcdbd9d | 1702 | atomic_set(&con->c.sq_wr_avail, max_send_wr); |
354462eb | 1703 | cq_num = max_send_wr + max_recv_wr; |
6a98d71d | 1704 | /* alloc iu to recv new rkey reply when server reports flags set */ |
caa84d95 | 1705 | if (clt_path->flags & RTRS_MSG_NEW_RKEY_F || con->c.cid == 0) { |
354462eb | 1706 | con->rsp_ius = rtrs_iu_alloc(cq_num, sizeof(*rsp), |
caa84d95 VT |
1707 | GFP_KERNEL, |
1708 | clt_path->s.dev->ib_dev, | |
6a98d71d JW |
1709 | DMA_FROM_DEVICE, |
1710 | rtrs_clt_rdma_done); | |
1711 | if (!con->rsp_ius) | |
1712 | return -ENOMEM; | |
354462eb | 1713 | con->queue_num = cq_num; |
6a98d71d | 1714 | } |
354462eb | 1715 | cq_num = max_send_wr + max_recv_wr; |
caa84d95 VT |
1716 | cq_vector = con->cpu % clt_path->s.dev->ib_dev->num_comp_vectors; |
1717 | if (con->c.cid >= clt_path->s.irq_con_num) | |
1718 | err = rtrs_cq_qp_create(&clt_path->s, &con->c, max_send_sge, | |
354462eb | 1719 | cq_vector, cq_num, max_send_wr, |
2958a995 GK |
1720 | max_recv_wr, IB_POLL_DIRECT); |
1721 | else | |
caa84d95 | 1722 | err = rtrs_cq_qp_create(&clt_path->s, &con->c, max_send_sge, |
354462eb | 1723 | cq_vector, cq_num, max_send_wr, |
2958a995 | 1724 | max_recv_wr, IB_POLL_SOFTIRQ); |
6a98d71d JW |
1725 | /* |
1726 | * In case of error we do not bother to clean previous allocations, | |
1727 | * since destroy_con_cq_qp() must be called. | |
1728 | */ | |
6a98d71d JW |
1729 | return err; |
1730 | } | |
1731 | ||
1732 | static void destroy_con_cq_qp(struct rtrs_clt_con *con) | |
1733 | { | |
caa84d95 | 1734 | struct rtrs_clt_path *clt_path = to_clt_path(con->c.path); |
6a98d71d JW |
1735 | |
1736 | /* | |
1737 | * Be careful here: destroy_con_cq_qp() can be called even | |
1738 | * create_con_cq_qp() failed, see comments there. | |
1739 | */ | |
fcf2959d | 1740 | lockdep_assert_held(&con->con_mutex); |
6a98d71d JW |
1741 | rtrs_cq_qp_destroy(&con->c); |
1742 | if (con->rsp_ius) { | |
caa84d95 VT |
1743 | rtrs_iu_free(con->rsp_ius, clt_path->s.dev->ib_dev, |
1744 | con->queue_num); | |
6a98d71d | 1745 | con->rsp_ius = NULL; |
354462eb | 1746 | con->queue_num = 0; |
6a98d71d | 1747 | } |
caa84d95 VT |
1748 | if (clt_path->s.dev_ref && !--clt_path->s.dev_ref) { |
1749 | rtrs_ib_dev_put(clt_path->s.dev); | |
1750 | clt_path->s.dev = NULL; | |
6a98d71d JW |
1751 | } |
1752 | } | |
1753 | ||
1754 | static void stop_cm(struct rtrs_clt_con *con) | |
1755 | { | |
1756 | rdma_disconnect(con->c.cm_id); | |
1757 | if (con->c.qp) | |
1758 | ib_drain_qp(con->c.qp); | |
1759 | } | |
1760 | ||
1761 | static void destroy_cm(struct rtrs_clt_con *con) | |
1762 | { | |
1763 | rdma_destroy_id(con->c.cm_id); | |
1764 | con->c.cm_id = NULL; | |
1765 | } | |
1766 | ||
1767 | static int rtrs_rdma_addr_resolved(struct rtrs_clt_con *con) | |
1768 | { | |
d9372794 | 1769 | struct rtrs_path *s = con->c.path; |
6a98d71d JW |
1770 | int err; |
1771 | ||
fcf2959d | 1772 | mutex_lock(&con->con_mutex); |
6a98d71d | 1773 | err = create_con_cq_qp(con); |
fcf2959d | 1774 | mutex_unlock(&con->con_mutex); |
6a98d71d JW |
1775 | if (err) { |
1776 | rtrs_err(s, "create_con_cq_qp(), err: %d\n", err); | |
1777 | return err; | |
1778 | } | |
1779 | err = rdma_resolve_route(con->c.cm_id, RTRS_CONNECT_TIMEOUT_MS); | |
2b3062e4 | 1780 | if (err) |
6a98d71d | 1781 | rtrs_err(s, "Resolving route failed, err: %d\n", err); |
6a98d71d JW |
1782 | |
1783 | return err; | |
1784 | } | |
1785 | ||
1786 | static int rtrs_rdma_route_resolved(struct rtrs_clt_con *con) | |
1787 | { | |
caa84d95 | 1788 | struct rtrs_clt_path *clt_path = to_clt_path(con->c.path); |
f3433d79 | 1789 | struct rtrs_clt_sess *clt = clt_path->clt; |
6a98d71d JW |
1790 | struct rtrs_msg_conn_req msg; |
1791 | struct rdma_conn_param param; | |
1792 | ||
1793 | int err; | |
1794 | ||
1795 | param = (struct rdma_conn_param) { | |
1796 | .retry_count = 7, | |
1797 | .rnr_retry_count = 7, | |
1798 | .private_data = &msg, | |
1799 | .private_data_len = sizeof(msg), | |
1800 | }; | |
1801 | ||
1802 | msg = (struct rtrs_msg_conn_req) { | |
1803 | .magic = cpu_to_le16(RTRS_MAGIC), | |
1804 | .version = cpu_to_le16(RTRS_PROTO_VER), | |
1805 | .cid = cpu_to_le16(con->c.cid), | |
caa84d95 VT |
1806 | .cid_num = cpu_to_le16(clt_path->s.con_num), |
1807 | .recon_cnt = cpu_to_le16(clt_path->s.recon_cnt), | |
6a98d71d | 1808 | }; |
caa84d95 VT |
1809 | msg.first_conn = clt_path->for_new_clt ? FIRST_CONN : 0; |
1810 | uuid_copy(&msg.sess_uuid, &clt_path->s.uuid); | |
6a98d71d JW |
1811 | uuid_copy(&msg.paths_uuid, &clt->paths_uuid); |
1812 | ||
071ba4cc | 1813 | err = rdma_connect_locked(con->c.cm_id, ¶m); |
6a98d71d | 1814 | if (err) |
071ba4cc | 1815 | rtrs_err(clt, "rdma_connect_locked(): %d\n", err); |
6a98d71d JW |
1816 | |
1817 | return err; | |
1818 | } | |
1819 | ||
1820 | static int rtrs_rdma_conn_established(struct rtrs_clt_con *con, | |
1821 | struct rdma_cm_event *ev) | |
1822 | { | |
caa84d95 | 1823 | struct rtrs_clt_path *clt_path = to_clt_path(con->c.path); |
f3433d79 | 1824 | struct rtrs_clt_sess *clt = clt_path->clt; |
6a98d71d JW |
1825 | const struct rtrs_msg_conn_rsp *msg; |
1826 | u16 version, queue_depth; | |
1827 | int errno; | |
1828 | u8 len; | |
1829 | ||
1830 | msg = ev->param.conn.private_data; | |
1831 | len = ev->param.conn.private_data_len; | |
1832 | if (len < sizeof(*msg)) { | |
1833 | rtrs_err(clt, "Invalid RTRS connection response\n"); | |
1834 | return -ECONNRESET; | |
1835 | } | |
1836 | if (le16_to_cpu(msg->magic) != RTRS_MAGIC) { | |
1837 | rtrs_err(clt, "Invalid RTRS magic\n"); | |
1838 | return -ECONNRESET; | |
1839 | } | |
1840 | version = le16_to_cpu(msg->version); | |
1841 | if (version >> 8 != RTRS_PROTO_VER_MAJOR) { | |
1842 | rtrs_err(clt, "Unsupported major RTRS version: %d, expected %d\n", | |
1843 | version >> 8, RTRS_PROTO_VER_MAJOR); | |
1844 | return -ECONNRESET; | |
1845 | } | |
1846 | errno = le16_to_cpu(msg->errno); | |
1847 | if (errno) { | |
1848 | rtrs_err(clt, "Invalid RTRS message: errno %d\n", | |
1849 | errno); | |
1850 | return -ECONNRESET; | |
1851 | } | |
1852 | if (con->c.cid == 0) { | |
1853 | queue_depth = le16_to_cpu(msg->queue_depth); | |
1854 | ||
caa84d95 | 1855 | if (clt_path->queue_depth > 0 && queue_depth != clt_path->queue_depth) { |
5b73b799 MHI |
1856 | rtrs_err(clt, "Error: queue depth changed\n"); |
1857 | ||
1858 | /* | |
1859 | * Stop any more reconnection attempts | |
1860 | */ | |
caa84d95 | 1861 | clt_path->reconnect_attempts = -1; |
5b73b799 MHI |
1862 | rtrs_err(clt, |
1863 | "Disabling auto-reconnect. Trigger a manual reconnect after issue is resolved\n"); | |
1864 | return -ECONNRESET; | |
1865 | } | |
1866 | ||
caa84d95 VT |
1867 | if (!clt_path->rbufs) { |
1868 | clt_path->rbufs = kcalloc(queue_depth, | |
1869 | sizeof(*clt_path->rbufs), | |
1870 | GFP_KERNEL); | |
1871 | if (!clt_path->rbufs) | |
6a98d71d JW |
1872 | return -ENOMEM; |
1873 | } | |
caa84d95 VT |
1874 | clt_path->queue_depth = queue_depth; |
1875 | clt_path->s.signal_interval = min_not_zero(queue_depth, | |
e2d98504 | 1876 | (unsigned short) SERVICE_CON_QUEUE_DEPTH); |
caa84d95 VT |
1877 | clt_path->max_hdr_size = le32_to_cpu(msg->max_hdr_size); |
1878 | clt_path->max_io_size = le32_to_cpu(msg->max_io_size); | |
1879 | clt_path->flags = le32_to_cpu(msg->flags); | |
1880 | clt_path->chunk_size = clt_path->max_io_size + clt_path->max_hdr_size; | |
6a98d71d JW |
1881 | |
1882 | /* | |
5b73b799 | 1883 | * Global IO size is always a minimum. |
6a98d71d JW |
1884 | * If while a reconnection server sends us a value a bit |
1885 | * higher - client does not care and uses cached minimum. | |
1886 | * | |
1887 | * Since we can have several sessions (paths) restablishing | |
1888 | * connections in parallel, use lock. | |
1889 | */ | |
1890 | mutex_lock(&clt->paths_mutex); | |
caa84d95 VT |
1891 | clt->queue_depth = clt_path->queue_depth; |
1892 | clt->max_io_size = min_not_zero(clt_path->max_io_size, | |
6a98d71d JW |
1893 | clt->max_io_size); |
1894 | mutex_unlock(&clt->paths_mutex); | |
1895 | ||
1896 | /* | |
1897 | * Cache the hca_port and hca_name for sysfs | |
1898 | */ | |
caa84d95 VT |
1899 | clt_path->hca_port = con->c.cm_id->port_num; |
1900 | scnprintf(clt_path->hca_name, sizeof(clt_path->hca_name), | |
1901 | clt_path->s.dev->ib_dev->name); | |
1902 | clt_path->s.src_addr = con->c.cm_id->route.addr.src_addr; | |
03e9b33a | 1903 | /* set for_new_clt, to allow future reconnect on any path */ |
caa84d95 | 1904 | clt_path->for_new_clt = 1; |
6a98d71d JW |
1905 | } |
1906 | ||
1907 | return 0; | |
1908 | } | |
1909 | ||
1910 | static inline void flag_success_on_conn(struct rtrs_clt_con *con) | |
1911 | { | |
caa84d95 | 1912 | struct rtrs_clt_path *clt_path = to_clt_path(con->c.path); |
6a98d71d | 1913 | |
caa84d95 | 1914 | atomic_inc(&clt_path->connected_cnt); |
6a98d71d JW |
1915 | con->cm_err = 1; |
1916 | } | |
1917 | ||
1918 | static int rtrs_rdma_conn_rejected(struct rtrs_clt_con *con, | |
1919 | struct rdma_cm_event *ev) | |
1920 | { | |
d9372794 | 1921 | struct rtrs_path *s = con->c.path; |
6a98d71d JW |
1922 | const struct rtrs_msg_conn_rsp *msg; |
1923 | const char *rej_msg; | |
1924 | int status, errno; | |
1925 | u8 data_len; | |
1926 | ||
1927 | status = ev->status; | |
1928 | rej_msg = rdma_reject_msg(con->c.cm_id, status); | |
1929 | msg = rdma_consumer_reject_data(con->c.cm_id, ev, &data_len); | |
1930 | ||
1931 | if (msg && data_len >= sizeof(*msg)) { | |
1932 | errno = (int16_t)le16_to_cpu(msg->errno); | |
1933 | if (errno == -EBUSY) | |
1934 | rtrs_err(s, | |
1935 | "Previous session is still exists on the server, please reconnect later\n"); | |
1936 | else | |
1937 | rtrs_err(s, | |
1938 | "Connect rejected: status %d (%s), rtrs errno %d\n", | |
1939 | status, rej_msg, errno); | |
1940 | } else { | |
1941 | rtrs_err(s, | |
1942 | "Connect rejected but with malformed message: status %d (%s)\n", | |
1943 | status, rej_msg); | |
1944 | } | |
1945 | ||
1946 | return -ECONNRESET; | |
1947 | } | |
1948 | ||
caa84d95 | 1949 | void rtrs_clt_close_conns(struct rtrs_clt_path *clt_path, bool wait) |
6a98d71d | 1950 | { |
5a93929d SP |
1951 | trace_rtrs_clt_close_conns(clt_path); |
1952 | ||
caa84d95 VT |
1953 | if (rtrs_clt_change_state_get_old(clt_path, RTRS_CLT_CLOSING, NULL)) |
1954 | queue_work(rtrs_wq, &clt_path->close_work); | |
6a98d71d | 1955 | if (wait) |
caa84d95 | 1956 | flush_work(&clt_path->close_work); |
6a98d71d JW |
1957 | } |
1958 | ||
1959 | static inline void flag_error_on_conn(struct rtrs_clt_con *con, int cm_err) | |
1960 | { | |
1961 | if (con->cm_err == 1) { | |
caa84d95 | 1962 | struct rtrs_clt_path *clt_path; |
6a98d71d | 1963 | |
caa84d95 VT |
1964 | clt_path = to_clt_path(con->c.path); |
1965 | if (atomic_dec_and_test(&clt_path->connected_cnt)) | |
6a98d71d | 1966 | |
caa84d95 | 1967 | wake_up(&clt_path->state_wq); |
6a98d71d JW |
1968 | } |
1969 | con->cm_err = cm_err; | |
1970 | } | |
1971 | ||
1972 | static int rtrs_clt_rdma_cm_handler(struct rdma_cm_id *cm_id, | |
1973 | struct rdma_cm_event *ev) | |
1974 | { | |
1975 | struct rtrs_clt_con *con = cm_id->context; | |
d9372794 | 1976 | struct rtrs_path *s = con->c.path; |
caa84d95 | 1977 | struct rtrs_clt_path *clt_path = to_clt_path(s); |
6a98d71d JW |
1978 | int cm_err = 0; |
1979 | ||
1980 | switch (ev->event) { | |
1981 | case RDMA_CM_EVENT_ADDR_RESOLVED: | |
1982 | cm_err = rtrs_rdma_addr_resolved(con); | |
1983 | break; | |
1984 | case RDMA_CM_EVENT_ROUTE_RESOLVED: | |
1985 | cm_err = rtrs_rdma_route_resolved(con); | |
1986 | break; | |
1987 | case RDMA_CM_EVENT_ESTABLISHED: | |
f553e760 | 1988 | cm_err = rtrs_rdma_conn_established(con, ev); |
4693d6b7 | 1989 | if (!cm_err) { |
6a98d71d JW |
1990 | /* |
1991 | * Report success and wake up. Here we abuse state_wq, | |
1992 | * i.e. wake up without state change, but we set cm_err. | |
1993 | */ | |
1994 | flag_success_on_conn(con); | |
caa84d95 | 1995 | wake_up(&clt_path->state_wq); |
6a98d71d JW |
1996 | return 0; |
1997 | } | |
1998 | break; | |
1999 | case RDMA_CM_EVENT_REJECTED: | |
2000 | cm_err = rtrs_rdma_conn_rejected(con, ev); | |
2001 | break; | |
16101b60 GK |
2002 | case RDMA_CM_EVENT_DISCONNECTED: |
2003 | /* No message for disconnecting */ | |
2004 | cm_err = -ECONNRESET; | |
2005 | break; | |
6a98d71d JW |
2006 | case RDMA_CM_EVENT_CONNECT_ERROR: |
2007 | case RDMA_CM_EVENT_UNREACHABLE: | |
16101b60 GK |
2008 | case RDMA_CM_EVENT_ADDR_CHANGE: |
2009 | case RDMA_CM_EVENT_TIMEWAIT_EXIT: | |
c33d516a JW |
2010 | rtrs_wrn(s, "CM error (CM event: %s, err: %d)\n", |
2011 | rdma_event_msg(ev->event), ev->status); | |
6a98d71d JW |
2012 | cm_err = -ECONNRESET; |
2013 | break; | |
2014 | case RDMA_CM_EVENT_ADDR_ERROR: | |
2015 | case RDMA_CM_EVENT_ROUTE_ERROR: | |
c33d516a JW |
2016 | rtrs_wrn(s, "CM error (CM event: %s, err: %d)\n", |
2017 | rdma_event_msg(ev->event), ev->status); | |
6a98d71d JW |
2018 | cm_err = -EHOSTUNREACH; |
2019 | break; | |
6a98d71d JW |
2020 | case RDMA_CM_EVENT_DEVICE_REMOVAL: |
2021 | /* | |
2022 | * Device removal is a special case. Queue close and return 0. | |
2023 | */ | |
caa84d95 | 2024 | rtrs_clt_close_conns(clt_path, false); |
6a98d71d JW |
2025 | return 0; |
2026 | default: | |
c33d516a JW |
2027 | rtrs_err(s, "Unexpected RDMA CM error (CM event: %s, err: %d)\n", |
2028 | rdma_event_msg(ev->event), ev->status); | |
6a98d71d JW |
2029 | cm_err = -ECONNRESET; |
2030 | break; | |
2031 | } | |
2032 | ||
2033 | if (cm_err) { | |
2034 | /* | |
2035 | * cm error makes sense only on connection establishing, | |
2036 | * in other cases we rely on normal procedure of reconnecting. | |
2037 | */ | |
2038 | flag_error_on_conn(con, cm_err); | |
2039 | rtrs_rdma_error_recovery(con); | |
2040 | } | |
2041 | ||
2042 | return 0; | |
2043 | } | |
2044 | ||
2045 | static int create_cm(struct rtrs_clt_con *con) | |
2046 | { | |
d9372794 | 2047 | struct rtrs_path *s = con->c.path; |
caa84d95 | 2048 | struct rtrs_clt_path *clt_path = to_clt_path(s); |
6a98d71d JW |
2049 | struct rdma_cm_id *cm_id; |
2050 | int err; | |
2051 | ||
2052 | cm_id = rdma_create_id(&init_net, rtrs_clt_rdma_cm_handler, con, | |
caa84d95 | 2053 | clt_path->s.dst_addr.ss_family == AF_IB ? |
6a98d71d JW |
2054 | RDMA_PS_IB : RDMA_PS_TCP, IB_QPT_RC); |
2055 | if (IS_ERR(cm_id)) { | |
2056 | err = PTR_ERR(cm_id); | |
2057 | rtrs_err(s, "Failed to create CM ID, err: %d\n", err); | |
2058 | ||
2059 | return err; | |
2060 | } | |
2061 | con->c.cm_id = cm_id; | |
2062 | con->cm_err = 0; | |
2063 | /* allow the port to be reused */ | |
2064 | err = rdma_set_reuseaddr(cm_id, 1); | |
2065 | if (err != 0) { | |
2066 | rtrs_err(s, "Set address reuse failed, err: %d\n", err); | |
2067 | goto destroy_cm; | |
2068 | } | |
caa84d95 VT |
2069 | err = rdma_resolve_addr(cm_id, (struct sockaddr *)&clt_path->s.src_addr, |
2070 | (struct sockaddr *)&clt_path->s.dst_addr, | |
6a98d71d JW |
2071 | RTRS_CONNECT_TIMEOUT_MS); |
2072 | if (err) { | |
2073 | rtrs_err(s, "Failed to resolve address, err: %d\n", err); | |
2074 | goto destroy_cm; | |
2075 | } | |
2076 | /* | |
2077 | * Combine connection status and session events. This is needed | |
2078 | * for waiting two possible cases: cm_err has something meaningful | |
2079 | * or session state was really changed to error by device removal. | |
2080 | */ | |
2081 | err = wait_event_interruptible_timeout( | |
caa84d95 VT |
2082 | clt_path->state_wq, |
2083 | con->cm_err || clt_path->state != RTRS_CLT_CONNECTING, | |
6a98d71d JW |
2084 | msecs_to_jiffies(RTRS_CONNECT_TIMEOUT_MS)); |
2085 | if (err == 0 || err == -ERESTARTSYS) { | |
2086 | if (err == 0) | |
2087 | err = -ETIMEDOUT; | |
2088 | /* Timedout or interrupted */ | |
2089 | goto errr; | |
2090 | } | |
2091 | if (con->cm_err < 0) { | |
2092 | err = con->cm_err; | |
2093 | goto errr; | |
2094 | } | |
caa84d95 | 2095 | if (READ_ONCE(clt_path->state) != RTRS_CLT_CONNECTING) { |
6a98d71d JW |
2096 | /* Device removal */ |
2097 | err = -ECONNABORTED; | |
2098 | goto errr; | |
2099 | } | |
2100 | ||
2101 | return 0; | |
2102 | ||
2103 | errr: | |
2104 | stop_cm(con); | |
fcf2959d | 2105 | mutex_lock(&con->con_mutex); |
6a98d71d | 2106 | destroy_con_cq_qp(con); |
fcf2959d | 2107 | mutex_unlock(&con->con_mutex); |
6a98d71d JW |
2108 | destroy_cm: |
2109 | destroy_cm(con); | |
2110 | ||
2111 | return err; | |
2112 | } | |
2113 | ||
caa84d95 | 2114 | static void rtrs_clt_path_up(struct rtrs_clt_path *clt_path) |
6a98d71d | 2115 | { |
f3433d79 | 2116 | struct rtrs_clt_sess *clt = clt_path->clt; |
6a98d71d JW |
2117 | int up; |
2118 | ||
2119 | /* | |
2120 | * We can fire RECONNECTED event only when all paths were | |
2121 | * connected on rtrs_clt_open(), then each was disconnected | |
2122 | * and the first one connected again. That's why this nasty | |
2123 | * game with counter value. | |
2124 | */ | |
2125 | ||
2126 | mutex_lock(&clt->paths_ev_mutex); | |
2127 | up = ++clt->paths_up; | |
2128 | /* | |
2129 | * Here it is safe to access paths num directly since up counter | |
2130 | * is greater than MAX_PATHS_NUM only while rtrs_clt_open() is | |
2131 | * in progress, thus paths removals are impossible. | |
2132 | */ | |
2133 | if (up > MAX_PATHS_NUM && up == MAX_PATHS_NUM + clt->paths_num) | |
2134 | clt->paths_up = clt->paths_num; | |
2135 | else if (up == 1) | |
2136 | clt->link_ev(clt->priv, RTRS_CLT_LINK_EV_RECONNECTED); | |
2137 | mutex_unlock(&clt->paths_ev_mutex); | |
2138 | ||
2139 | /* Mark session as established */ | |
caa84d95 VT |
2140 | clt_path->established = true; |
2141 | clt_path->reconnect_attempts = 0; | |
2142 | clt_path->stats->reconnects.successful_cnt++; | |
6a98d71d JW |
2143 | } |
2144 | ||
caa84d95 | 2145 | static void rtrs_clt_path_down(struct rtrs_clt_path *clt_path) |
6a98d71d | 2146 | { |
f3433d79 | 2147 | struct rtrs_clt_sess *clt = clt_path->clt; |
6a98d71d | 2148 | |
caa84d95 | 2149 | if (!clt_path->established) |
6a98d71d JW |
2150 | return; |
2151 | ||
caa84d95 | 2152 | clt_path->established = false; |
6a98d71d JW |
2153 | mutex_lock(&clt->paths_ev_mutex); |
2154 | WARN_ON(!clt->paths_up); | |
2155 | if (--clt->paths_up == 0) | |
2156 | clt->link_ev(clt->priv, RTRS_CLT_LINK_EV_DISCONNECTED); | |
2157 | mutex_unlock(&clt->paths_ev_mutex); | |
2158 | } | |
2159 | ||
caa84d95 | 2160 | static void rtrs_clt_stop_and_destroy_conns(struct rtrs_clt_path *clt_path) |
6a98d71d JW |
2161 | { |
2162 | struct rtrs_clt_con *con; | |
2163 | unsigned int cid; | |
2164 | ||
caa84d95 | 2165 | WARN_ON(READ_ONCE(clt_path->state) == RTRS_CLT_CONNECTED); |
6a98d71d JW |
2166 | |
2167 | /* | |
2168 | * Possible race with rtrs_clt_open(), when DEVICE_REMOVAL comes | |
2169 | * exactly in between. Start destroying after it finishes. | |
2170 | */ | |
caa84d95 VT |
2171 | mutex_lock(&clt_path->init_mutex); |
2172 | mutex_unlock(&clt_path->init_mutex); | |
6a98d71d JW |
2173 | |
2174 | /* | |
2175 | * All IO paths must observe !CONNECTED state before we | |
2176 | * free everything. | |
2177 | */ | |
2178 | synchronize_rcu(); | |
2179 | ||
caa84d95 | 2180 | rtrs_stop_hb(&clt_path->s); |
6a98d71d JW |
2181 | |
2182 | /* | |
2183 | * The order it utterly crucial: firstly disconnect and complete all | |
2184 | * rdma requests with error (thus set in_use=false for requests), | |
2185 | * then fail outstanding requests checking in_use for each, and | |
2186 | * eventually notify upper layer about session disconnection. | |
2187 | */ | |
2188 | ||
caa84d95 VT |
2189 | for (cid = 0; cid < clt_path->s.con_num; cid++) { |
2190 | if (!clt_path->s.con[cid]) | |
6a98d71d | 2191 | break; |
caa84d95 | 2192 | con = to_clt_con(clt_path->s.con[cid]); |
6a98d71d JW |
2193 | stop_cm(con); |
2194 | } | |
caa84d95 VT |
2195 | fail_all_outstanding_reqs(clt_path); |
2196 | free_path_reqs(clt_path); | |
2197 | rtrs_clt_path_down(clt_path); | |
6a98d71d JW |
2198 | |
2199 | /* | |
2200 | * Wait for graceful shutdown, namely when peer side invokes | |
2201 | * rdma_disconnect(). 'connected_cnt' is decremented only on | |
2202 | * CM events, thus if other side had crashed and hb has detected | |
2203 | * something is wrong, here we will stuck for exactly timeout ms, | |
2204 | * since CM does not fire anything. That is fine, we are not in | |
2205 | * hurry. | |
2206 | */ | |
caa84d95 VT |
2207 | wait_event_timeout(clt_path->state_wq, |
2208 | !atomic_read(&clt_path->connected_cnt), | |
6a98d71d JW |
2209 | msecs_to_jiffies(RTRS_CONNECT_TIMEOUT_MS)); |
2210 | ||
caa84d95 VT |
2211 | for (cid = 0; cid < clt_path->s.con_num; cid++) { |
2212 | if (!clt_path->s.con[cid]) | |
6a98d71d | 2213 | break; |
caa84d95 | 2214 | con = to_clt_con(clt_path->s.con[cid]); |
fcf2959d | 2215 | mutex_lock(&con->con_mutex); |
6a98d71d | 2216 | destroy_con_cq_qp(con); |
fcf2959d | 2217 | mutex_unlock(&con->con_mutex); |
6a98d71d JW |
2218 | destroy_cm(con); |
2219 | destroy_con(con); | |
2220 | } | |
2221 | } | |
2222 | ||
caa84d95 | 2223 | static void rtrs_clt_remove_path_from_arr(struct rtrs_clt_path *clt_path) |
6a98d71d | 2224 | { |
f3433d79 | 2225 | struct rtrs_clt_sess *clt = clt_path->clt; |
caa84d95 | 2226 | struct rtrs_clt_path *next; |
6a98d71d JW |
2227 | bool wait_for_grace = false; |
2228 | int cpu; | |
2229 | ||
2230 | mutex_lock(&clt->paths_mutex); | |
caa84d95 | 2231 | list_del_rcu(&clt_path->s.entry); |
6a98d71d JW |
2232 | |
2233 | /* Make sure everybody observes path removal. */ | |
2234 | synchronize_rcu(); | |
2235 | ||
2236 | /* | |
2237 | * At this point nobody sees @sess in the list, but still we have | |
2238 | * dangling pointer @pcpu_path which _can_ point to @sess. Since | |
2239 | * nobody can observe @sess in the list, we guarantee that IO path | |
2240 | * will not assign @sess to @pcpu_path, i.e. @pcpu_path can be equal | |
2241 | * to @sess, but can never again become @sess. | |
2242 | */ | |
2243 | ||
2244 | /* | |
2245 | * Decrement paths number only after grace period, because | |
2246 | * caller of do_each_path() must firstly observe list without | |
2247 | * path and only then decremented paths number. | |
2248 | * | |
2249 | * Otherwise there can be the following situation: | |
2250 | * o Two paths exist and IO is coming. | |
2251 | * o One path is removed: | |
2252 | * CPU#0 CPU#1 | |
2253 | * do_each_path(): rtrs_clt_remove_path_from_arr(): | |
2254 | * path = get_next_path() | |
2255 | * ^^^ list_del_rcu(path) | |
2256 | * [!CONNECTED path] clt->paths_num-- | |
2257 | * ^^^^^^^^^ | |
2258 | * load clt->paths_num from 2 to 1 | |
2259 | * ^^^^^^^^^ | |
2260 | * sees 1 | |
2261 | * | |
2262 | * path is observed as !CONNECTED, but do_each_path() loop | |
2263 | * ends, because expression i < clt->paths_num is false. | |
2264 | */ | |
2265 | clt->paths_num--; | |
2266 | ||
2267 | /* | |
2268 | * Get @next connection from current @sess which is going to be | |
2269 | * removed. If @sess is the last element, then @next is NULL. | |
2270 | */ | |
2271 | rcu_read_lock(); | |
c14adff2 | 2272 | next = rtrs_clt_get_next_path_or_null(&clt->paths_list, clt_path); |
6a98d71d JW |
2273 | rcu_read_unlock(); |
2274 | ||
2275 | /* | |
2276 | * @pcpu paths can still point to the path which is going to be | |
2277 | * removed, so change the pointer manually. | |
2278 | */ | |
2279 | for_each_possible_cpu(cpu) { | |
caa84d95 | 2280 | struct rtrs_clt_path __rcu **ppcpu_path; |
6a98d71d JW |
2281 | |
2282 | ppcpu_path = per_cpu_ptr(clt->pcpu_path, cpu); | |
2283 | if (rcu_dereference_protected(*ppcpu_path, | |
caa84d95 | 2284 | lockdep_is_held(&clt->paths_mutex)) != clt_path) |
6a98d71d JW |
2285 | /* |
2286 | * synchronize_rcu() was called just after deleting | |
2287 | * entry from the list, thus IO code path cannot | |
2288 | * change pointer back to the pointer which is going | |
2289 | * to be removed, we are safe here. | |
2290 | */ | |
2291 | continue; | |
2292 | ||
2293 | /* | |
2294 | * We race with IO code path, which also changes pointer, | |
2295 | * thus we have to be careful not to overwrite it. | |
2296 | */ | |
db77d84c GJ |
2297 | if (try_cmpxchg((struct rtrs_clt_path **)ppcpu_path, &clt_path, |
2298 | next)) | |
6a98d71d JW |
2299 | /* |
2300 | * @ppcpu_path was successfully replaced with @next, | |
2301 | * that means that someone could also pick up the | |
2302 | * @sess and dereferencing it right now, so wait for | |
2303 | * a grace period is required. | |
2304 | */ | |
2305 | wait_for_grace = true; | |
2306 | } | |
2307 | if (wait_for_grace) | |
2308 | synchronize_rcu(); | |
2309 | ||
2310 | mutex_unlock(&clt->paths_mutex); | |
2311 | } | |
2312 | ||
caa84d95 | 2313 | static void rtrs_clt_add_path_to_arr(struct rtrs_clt_path *clt_path) |
6a98d71d | 2314 | { |
f3433d79 | 2315 | struct rtrs_clt_sess *clt = clt_path->clt; |
6a98d71d JW |
2316 | |
2317 | mutex_lock(&clt->paths_mutex); | |
2318 | clt->paths_num++; | |
2319 | ||
caa84d95 | 2320 | list_add_tail_rcu(&clt_path->s.entry, &clt->paths_list); |
6a98d71d JW |
2321 | mutex_unlock(&clt->paths_mutex); |
2322 | } | |
2323 | ||
2324 | static void rtrs_clt_close_work(struct work_struct *work) | |
2325 | { | |
caa84d95 | 2326 | struct rtrs_clt_path *clt_path; |
6a98d71d | 2327 | |
caa84d95 | 2328 | clt_path = container_of(work, struct rtrs_clt_path, close_work); |
6a98d71d | 2329 | |
c1289d5d | 2330 | cancel_work_sync(&clt_path->err_recovery_work); |
caa84d95 VT |
2331 | cancel_delayed_work_sync(&clt_path->reconnect_dwork); |
2332 | rtrs_clt_stop_and_destroy_conns(clt_path); | |
2333 | rtrs_clt_change_state_get_old(clt_path, RTRS_CLT_CLOSED, NULL); | |
6a98d71d JW |
2334 | } |
2335 | ||
caa84d95 | 2336 | static int init_conns(struct rtrs_clt_path *clt_path) |
6a98d71d JW |
2337 | { |
2338 | unsigned int cid; | |
2339 | int err; | |
2340 | ||
2341 | /* | |
2342 | * On every new session connections increase reconnect counter | |
2343 | * to avoid clashes with previous sessions not yet closed | |
2344 | * sessions on a server side. | |
2345 | */ | |
caa84d95 | 2346 | clt_path->s.recon_cnt++; |
6a98d71d JW |
2347 | |
2348 | /* Establish all RDMA connections */ | |
caa84d95 VT |
2349 | for (cid = 0; cid < clt_path->s.con_num; cid++) { |
2350 | err = create_con(clt_path, cid); | |
6a98d71d JW |
2351 | if (err) |
2352 | goto destroy; | |
2353 | ||
caa84d95 | 2354 | err = create_cm(to_clt_con(clt_path->s.con[cid])); |
6a98d71d | 2355 | if (err) { |
caa84d95 | 2356 | destroy_con(to_clt_con(clt_path->s.con[cid])); |
6a98d71d JW |
2357 | goto destroy; |
2358 | } | |
2359 | } | |
caa84d95 | 2360 | err = alloc_path_reqs(clt_path); |
6a98d71d JW |
2361 | if (err) |
2362 | goto destroy; | |
2363 | ||
caa84d95 | 2364 | rtrs_start_hb(&clt_path->s); |
6a98d71d JW |
2365 | |
2366 | return 0; | |
2367 | ||
2368 | destroy: | |
2369 | while (cid--) { | |
caa84d95 | 2370 | struct rtrs_clt_con *con = to_clt_con(clt_path->s.con[cid]); |
6a98d71d JW |
2371 | |
2372 | stop_cm(con); | |
fcf2959d JW |
2373 | |
2374 | mutex_lock(&con->con_mutex); | |
6a98d71d | 2375 | destroy_con_cq_qp(con); |
fcf2959d | 2376 | mutex_unlock(&con->con_mutex); |
6a98d71d JW |
2377 | destroy_cm(con); |
2378 | destroy_con(con); | |
2379 | } | |
2380 | /* | |
2381 | * If we've never taken async path and got an error, say, | |
2382 | * doing rdma_resolve_addr(), switch to CONNECTION_ERR state | |
2383 | * manually to keep reconnecting. | |
2384 | */ | |
caa84d95 | 2385 | rtrs_clt_change_state_get_old(clt_path, RTRS_CLT_CONNECTING_ERR, NULL); |
6a98d71d JW |
2386 | |
2387 | return err; | |
2388 | } | |
2389 | ||
2390 | static void rtrs_clt_info_req_done(struct ib_cq *cq, struct ib_wc *wc) | |
2391 | { | |
3b89e92c | 2392 | struct rtrs_clt_con *con = to_clt_con(wc->qp->qp_context); |
caa84d95 | 2393 | struct rtrs_clt_path *clt_path = to_clt_path(con->c.path); |
6a98d71d JW |
2394 | struct rtrs_iu *iu; |
2395 | ||
2396 | iu = container_of(wc->wr_cqe, struct rtrs_iu, cqe); | |
caa84d95 | 2397 | rtrs_iu_free(iu, clt_path->s.dev->ib_dev, 1); |
6a98d71d | 2398 | |
4693d6b7 | 2399 | if (wc->status != IB_WC_SUCCESS) { |
caa84d95 | 2400 | rtrs_err(clt_path->clt, "Path info request send failed: %s\n", |
6a98d71d | 2401 | ib_wc_status_msg(wc->status)); |
caa84d95 | 2402 | rtrs_clt_change_state_get_old(clt_path, RTRS_CLT_CONNECTING_ERR, NULL); |
6a98d71d JW |
2403 | return; |
2404 | } | |
2405 | ||
2406 | rtrs_clt_update_wc_stats(con); | |
2407 | } | |
2408 | ||
caa84d95 | 2409 | static int process_info_rsp(struct rtrs_clt_path *clt_path, |
6a98d71d JW |
2410 | const struct rtrs_msg_info_rsp *msg) |
2411 | { | |
2412 | unsigned int sg_cnt, total_len; | |
2413 | int i, sgi; | |
2414 | ||
2415 | sg_cnt = le16_to_cpu(msg->sg_cnt); | |
caa84d95 VT |
2416 | if (!sg_cnt || (clt_path->queue_depth % sg_cnt)) { |
2417 | rtrs_err(clt_path->clt, | |
2418 | "Incorrect sg_cnt %d, is not multiple\n", | |
c3b16b67 | 2419 | sg_cnt); |
6a98d71d | 2420 | return -EINVAL; |
c3b16b67 GK |
2421 | } |
2422 | ||
6a98d71d JW |
2423 | /* |
2424 | * Check if IB immediate data size is enough to hold the mem_id and | |
2425 | * the offset inside the memory chunk. | |
2426 | */ | |
caa84d95 | 2427 | if ((ilog2(sg_cnt - 1) + 1) + (ilog2(clt_path->chunk_size - 1) + 1) > |
4693d6b7 | 2428 | MAX_IMM_PAYL_BITS) { |
caa84d95 | 2429 | rtrs_err(clt_path->clt, |
6a98d71d | 2430 | "RDMA immediate size (%db) not enough to encode %d buffers of size %dB\n", |
caa84d95 | 2431 | MAX_IMM_PAYL_BITS, sg_cnt, clt_path->chunk_size); |
6a98d71d JW |
2432 | return -EINVAL; |
2433 | } | |
6a98d71d | 2434 | total_len = 0; |
caa84d95 | 2435 | for (sgi = 0, i = 0; sgi < sg_cnt && i < clt_path->queue_depth; sgi++) { |
6a98d71d JW |
2436 | const struct rtrs_sg_desc *desc = &msg->desc[sgi]; |
2437 | u32 len, rkey; | |
2438 | u64 addr; | |
2439 | ||
2440 | addr = le64_to_cpu(desc->addr); | |
2441 | rkey = le32_to_cpu(desc->key); | |
2442 | len = le32_to_cpu(desc->len); | |
2443 | ||
2444 | total_len += len; | |
2445 | ||
caa84d95 VT |
2446 | if (!len || (len % clt_path->chunk_size)) { |
2447 | rtrs_err(clt_path->clt, "Incorrect [%d].len %d\n", | |
2448 | sgi, | |
6a98d71d JW |
2449 | len); |
2450 | return -EINVAL; | |
2451 | } | |
caa84d95 VT |
2452 | for ( ; len && i < clt_path->queue_depth; i++) { |
2453 | clt_path->rbufs[i].addr = addr; | |
2454 | clt_path->rbufs[i].rkey = rkey; | |
6a98d71d | 2455 | |
caa84d95 VT |
2456 | len -= clt_path->chunk_size; |
2457 | addr += clt_path->chunk_size; | |
6a98d71d JW |
2458 | } |
2459 | } | |
2460 | /* Sanity check */ | |
caa84d95 VT |
2461 | if (sgi != sg_cnt || i != clt_path->queue_depth) { |
2462 | rtrs_err(clt_path->clt, | |
2463 | "Incorrect sg vector, not fully mapped\n"); | |
6a98d71d JW |
2464 | return -EINVAL; |
2465 | } | |
caa84d95 VT |
2466 | if (total_len != clt_path->chunk_size * clt_path->queue_depth) { |
2467 | rtrs_err(clt_path->clt, "Incorrect total_len %d\n", total_len); | |
6a98d71d JW |
2468 | return -EINVAL; |
2469 | } | |
2470 | ||
2471 | return 0; | |
2472 | } | |
2473 | ||
2474 | static void rtrs_clt_info_rsp_done(struct ib_cq *cq, struct ib_wc *wc) | |
2475 | { | |
3b89e92c | 2476 | struct rtrs_clt_con *con = to_clt_con(wc->qp->qp_context); |
caa84d95 | 2477 | struct rtrs_clt_path *clt_path = to_clt_path(con->c.path); |
6a98d71d JW |
2478 | struct rtrs_msg_info_rsp *msg; |
2479 | enum rtrs_clt_state state; | |
2480 | struct rtrs_iu *iu; | |
2481 | size_t rx_sz; | |
2482 | int err; | |
2483 | ||
2484 | state = RTRS_CLT_CONNECTING_ERR; | |
2485 | ||
2486 | WARN_ON(con->c.cid); | |
2487 | iu = container_of(wc->wr_cqe, struct rtrs_iu, cqe); | |
4693d6b7 | 2488 | if (wc->status != IB_WC_SUCCESS) { |
caa84d95 | 2489 | rtrs_err(clt_path->clt, "Path info response recv failed: %s\n", |
6a98d71d JW |
2490 | ib_wc_status_msg(wc->status)); |
2491 | goto out; | |
2492 | } | |
2493 | WARN_ON(wc->opcode != IB_WC_RECV); | |
2494 | ||
4693d6b7 | 2495 | if (wc->byte_len < sizeof(*msg)) { |
caa84d95 | 2496 | rtrs_err(clt_path->clt, "Path info response is malformed: size %d\n", |
6a98d71d JW |
2497 | wc->byte_len); |
2498 | goto out; | |
2499 | } | |
caa84d95 | 2500 | ib_dma_sync_single_for_cpu(clt_path->s.dev->ib_dev, iu->dma_addr, |
6a98d71d JW |
2501 | iu->size, DMA_FROM_DEVICE); |
2502 | msg = iu->buf; | |
4693d6b7 | 2503 | if (le16_to_cpu(msg->type) != RTRS_MSG_INFO_RSP) { |
caa84d95 | 2504 | rtrs_err(clt_path->clt, "Path info response is malformed: type %d\n", |
6a98d71d JW |
2505 | le16_to_cpu(msg->type)); |
2506 | goto out; | |
2507 | } | |
2508 | rx_sz = sizeof(*msg); | |
2509 | rx_sz += sizeof(msg->desc[0]) * le16_to_cpu(msg->sg_cnt); | |
4693d6b7 | 2510 | if (wc->byte_len < rx_sz) { |
caa84d95 | 2511 | rtrs_err(clt_path->clt, "Path info response is malformed: size %d\n", |
6a98d71d JW |
2512 | wc->byte_len); |
2513 | goto out; | |
2514 | } | |
caa84d95 | 2515 | err = process_info_rsp(clt_path, msg); |
4693d6b7 | 2516 | if (err) |
6a98d71d JW |
2517 | goto out; |
2518 | ||
caa84d95 | 2519 | err = post_recv_path(clt_path); |
4693d6b7 | 2520 | if (err) |
6a98d71d JW |
2521 | goto out; |
2522 | ||
2523 | state = RTRS_CLT_CONNECTED; | |
2524 | ||
2525 | out: | |
2526 | rtrs_clt_update_wc_stats(con); | |
caa84d95 VT |
2527 | rtrs_iu_free(iu, clt_path->s.dev->ib_dev, 1); |
2528 | rtrs_clt_change_state_get_old(clt_path, state, NULL); | |
6a98d71d JW |
2529 | } |
2530 | ||
caa84d95 | 2531 | static int rtrs_send_path_info(struct rtrs_clt_path *clt_path) |
6a98d71d | 2532 | { |
caa84d95 | 2533 | struct rtrs_clt_con *usr_con = to_clt_con(clt_path->s.con[0]); |
6a98d71d JW |
2534 | struct rtrs_msg_info_req *msg; |
2535 | struct rtrs_iu *tx_iu, *rx_iu; | |
2536 | size_t rx_sz; | |
2537 | int err; | |
2538 | ||
2539 | rx_sz = sizeof(struct rtrs_msg_info_rsp); | |
caa84d95 | 2540 | rx_sz += sizeof(struct rtrs_sg_desc) * clt_path->queue_depth; |
6a98d71d JW |
2541 | |
2542 | tx_iu = rtrs_iu_alloc(1, sizeof(struct rtrs_msg_info_req), GFP_KERNEL, | |
caa84d95 | 2543 | clt_path->s.dev->ib_dev, DMA_TO_DEVICE, |
6a98d71d | 2544 | rtrs_clt_info_req_done); |
caa84d95 | 2545 | rx_iu = rtrs_iu_alloc(1, rx_sz, GFP_KERNEL, clt_path->s.dev->ib_dev, |
6a98d71d | 2546 | DMA_FROM_DEVICE, rtrs_clt_info_rsp_done); |
4693d6b7 | 2547 | if (!tx_iu || !rx_iu) { |
6a98d71d JW |
2548 | err = -ENOMEM; |
2549 | goto out; | |
2550 | } | |
2551 | /* Prepare for getting info response */ | |
2552 | err = rtrs_iu_post_recv(&usr_con->c, rx_iu); | |
4693d6b7 | 2553 | if (err) { |
caa84d95 | 2554 | rtrs_err(clt_path->clt, "rtrs_iu_post_recv(), err: %d\n", err); |
6a98d71d JW |
2555 | goto out; |
2556 | } | |
2557 | rx_iu = NULL; | |
2558 | ||
2559 | msg = tx_iu->buf; | |
2560 | msg->type = cpu_to_le16(RTRS_MSG_INFO_REQ); | |
caa84d95 | 2561 | memcpy(msg->pathname, clt_path->s.sessname, sizeof(msg->pathname)); |
6a98d71d | 2562 | |
caa84d95 VT |
2563 | ib_dma_sync_single_for_device(clt_path->s.dev->ib_dev, |
2564 | tx_iu->dma_addr, | |
6a98d71d JW |
2565 | tx_iu->size, DMA_TO_DEVICE); |
2566 | ||
2567 | /* Send info request */ | |
2568 | err = rtrs_iu_post_send(&usr_con->c, tx_iu, sizeof(*msg), NULL); | |
4693d6b7 | 2569 | if (err) { |
caa84d95 | 2570 | rtrs_err(clt_path->clt, "rtrs_iu_post_send(), err: %d\n", err); |
6a98d71d JW |
2571 | goto out; |
2572 | } | |
2573 | tx_iu = NULL; | |
2574 | ||
2575 | /* Wait for state change */ | |
caa84d95 VT |
2576 | wait_event_interruptible_timeout(clt_path->state_wq, |
2577 | clt_path->state != RTRS_CLT_CONNECTING, | |
6a98d71d JW |
2578 | msecs_to_jiffies( |
2579 | RTRS_CONNECT_TIMEOUT_MS)); | |
caa84d95 VT |
2580 | if (READ_ONCE(clt_path->state) != RTRS_CLT_CONNECTED) { |
2581 | if (READ_ONCE(clt_path->state) == RTRS_CLT_CONNECTING_ERR) | |
6a98d71d JW |
2582 | err = -ECONNRESET; |
2583 | else | |
2584 | err = -ETIMEDOUT; | |
6a98d71d JW |
2585 | } |
2586 | ||
2587 | out: | |
2588 | if (tx_iu) | |
caa84d95 | 2589 | rtrs_iu_free(tx_iu, clt_path->s.dev->ib_dev, 1); |
6a98d71d | 2590 | if (rx_iu) |
caa84d95 | 2591 | rtrs_iu_free(rx_iu, clt_path->s.dev->ib_dev, 1); |
4693d6b7 | 2592 | if (err) |
6a98d71d | 2593 | /* If we've never taken async path because of malloc problems */ |
caa84d95 VT |
2594 | rtrs_clt_change_state_get_old(clt_path, |
2595 | RTRS_CLT_CONNECTING_ERR, NULL); | |
6a98d71d JW |
2596 | |
2597 | return err; | |
2598 | } | |
2599 | ||
2600 | /** | |
caa84d95 VT |
2601 | * init_path() - establishes all path connections and does handshake |
2602 | * @clt_path: client path. | |
6a98d71d JW |
2603 | * In case of error full close or reconnect procedure should be taken, |
2604 | * because reconnect or close async works can be started. | |
2605 | */ | |
caa84d95 | 2606 | static int init_path(struct rtrs_clt_path *clt_path) |
6a98d71d JW |
2607 | { |
2608 | int err; | |
7c71f0d1 GK |
2609 | char str[NAME_MAX]; |
2610 | struct rtrs_addr path = { | |
caa84d95 VT |
2611 | .src = &clt_path->s.src_addr, |
2612 | .dst = &clt_path->s.dst_addr, | |
7c71f0d1 GK |
2613 | }; |
2614 | ||
2615 | rtrs_addr_to_str(&path, str, sizeof(str)); | |
6a98d71d | 2616 | |
caa84d95 VT |
2617 | mutex_lock(&clt_path->init_mutex); |
2618 | err = init_conns(clt_path); | |
6a98d71d | 2619 | if (err) { |
caa84d95 | 2620 | rtrs_err(clt_path->clt, |
2f37b017 | 2621 | "init_conns() failed: err=%d path=%s [%s:%u]\n", err, |
caa84d95 | 2622 | str, clt_path->hca_name, clt_path->hca_port); |
6a98d71d JW |
2623 | goto out; |
2624 | } | |
caa84d95 | 2625 | err = rtrs_send_path_info(clt_path); |
6a98d71d | 2626 | if (err) { |
caa84d95 VT |
2627 | rtrs_err(clt_path->clt, |
2628 | "rtrs_send_path_info() failed: err=%d path=%s [%s:%u]\n", | |
2629 | err, str, clt_path->hca_name, clt_path->hca_port); | |
6a98d71d JW |
2630 | goto out; |
2631 | } | |
caa84d95 | 2632 | rtrs_clt_path_up(clt_path); |
6a98d71d | 2633 | out: |
caa84d95 | 2634 | mutex_unlock(&clt_path->init_mutex); |
6a98d71d JW |
2635 | |
2636 | return err; | |
2637 | } | |
2638 | ||
2639 | static void rtrs_clt_reconnect_work(struct work_struct *work) | |
2640 | { | |
caa84d95 | 2641 | struct rtrs_clt_path *clt_path; |
f3433d79 | 2642 | struct rtrs_clt_sess *clt; |
6a98d71d JW |
2643 | int err; |
2644 | ||
caa84d95 VT |
2645 | clt_path = container_of(to_delayed_work(work), struct rtrs_clt_path, |
2646 | reconnect_dwork); | |
2647 | clt = clt_path->clt; | |
6a98d71d | 2648 | |
5a93929d SP |
2649 | trace_rtrs_clt_reconnect_work(clt_path); |
2650 | ||
caa84d95 | 2651 | if (READ_ONCE(clt_path->state) != RTRS_CLT_RECONNECTING) |
6a98d71d JW |
2652 | return; |
2653 | ||
caa84d95 VT |
2654 | if (clt_path->reconnect_attempts >= clt->max_reconnect_attempts) { |
2655 | /* Close a path completely if max attempts is reached */ | |
2656 | rtrs_clt_close_conns(clt_path, false); | |
6a98d71d JW |
2657 | return; |
2658 | } | |
caa84d95 | 2659 | clt_path->reconnect_attempts++; |
6a98d71d | 2660 | |
6a98d71d | 2661 | msleep(RTRS_RECONNECT_BACKOFF); |
caa84d95 VT |
2662 | if (rtrs_clt_change_state_get_old(clt_path, RTRS_CLT_CONNECTING, NULL)) { |
2663 | err = init_path(clt_path); | |
6a98d71d JW |
2664 | if (err) |
2665 | goto reconnect_again; | |
2666 | } | |
2667 | ||
2668 | return; | |
2669 | ||
2670 | reconnect_again: | |
caa84d95 VT |
2671 | if (rtrs_clt_change_state_get_old(clt_path, RTRS_CLT_RECONNECTING, NULL)) { |
2672 | clt_path->stats->reconnects.fail_cnt++; | |
c1289d5d | 2673 | queue_work(rtrs_wq, &clt_path->err_recovery_work); |
6a98d71d JW |
2674 | } |
2675 | } | |
2676 | ||
2677 | static void rtrs_clt_dev_release(struct device *dev) | |
2678 | { | |
f3433d79 VT |
2679 | struct rtrs_clt_sess *clt = container_of(dev, struct rtrs_clt_sess, |
2680 | dev); | |
6a98d71d | 2681 | |
8700af2c MHI |
2682 | mutex_destroy(&clt->paths_ev_mutex); |
2683 | mutex_destroy(&clt->paths_mutex); | |
6a98d71d JW |
2684 | kfree(clt); |
2685 | } | |
2686 | ||
f3433d79 | 2687 | static struct rtrs_clt_sess *alloc_clt(const char *sessname, size_t paths_num, |
6a98d71d JW |
2688 | u16 port, size_t pdu_sz, void *priv, |
2689 | void (*link_ev)(void *priv, | |
2690 | enum rtrs_clt_link_ev ev), | |
6a98d71d JW |
2691 | unsigned int reconnect_delay_sec, |
2692 | unsigned int max_reconnect_attempts) | |
2693 | { | |
f3433d79 | 2694 | struct rtrs_clt_sess *clt; |
6a98d71d JW |
2695 | int err; |
2696 | ||
2697 | if (!paths_num || paths_num > MAX_PATHS_NUM) | |
2698 | return ERR_PTR(-EINVAL); | |
2699 | ||
2700 | if (strlen(sessname) >= sizeof(clt->sessname)) | |
2701 | return ERR_PTR(-EINVAL); | |
2702 | ||
2703 | clt = kzalloc(sizeof(*clt), GFP_KERNEL); | |
2704 | if (!clt) | |
2705 | return ERR_PTR(-ENOMEM); | |
2706 | ||
2707 | clt->pcpu_path = alloc_percpu(typeof(*clt->pcpu_path)); | |
2708 | if (!clt->pcpu_path) { | |
2709 | kfree(clt); | |
2710 | return ERR_PTR(-ENOMEM); | |
2711 | } | |
2712 | ||
8700af2c MHI |
2713 | clt->dev.class = rtrs_clt_dev_class; |
2714 | clt->dev.release = rtrs_clt_dev_release; | |
6a98d71d JW |
2715 | uuid_gen(&clt->paths_uuid); |
2716 | INIT_LIST_HEAD_RCU(&clt->paths_list); | |
2717 | clt->paths_num = paths_num; | |
2718 | clt->paths_up = MAX_PATHS_NUM; | |
2719 | clt->port = port; | |
2720 | clt->pdu_sz = pdu_sz; | |
6fc45596 | 2721 | clt->max_segments = RTRS_MAX_SEGMENTS; |
6a98d71d JW |
2722 | clt->reconnect_delay_sec = reconnect_delay_sec; |
2723 | clt->max_reconnect_attempts = max_reconnect_attempts; | |
2724 | clt->priv = priv; | |
2725 | clt->link_ev = link_ev; | |
2726 | clt->mp_policy = MP_POLICY_MIN_INFLIGHT; | |
2d612f0d | 2727 | strscpy(clt->sessname, sessname, sizeof(clt->sessname)); |
6a98d71d JW |
2728 | init_waitqueue_head(&clt->permits_wait); |
2729 | mutex_init(&clt->paths_ev_mutex); | |
2730 | mutex_init(&clt->paths_mutex); | |
8700af2c | 2731 | device_initialize(&clt->dev); |
6a98d71d | 2732 | |
6a98d71d | 2733 | err = dev_set_name(&clt->dev, "%s", sessname); |
eab09824 | 2734 | if (err) |
8700af2c MHI |
2735 | goto err_put; |
2736 | ||
6a98d71d JW |
2737 | /* |
2738 | * Suppress user space notification until | |
2739 | * sysfs files are created | |
2740 | */ | |
2741 | dev_set_uevent_suppress(&clt->dev, true); | |
8700af2c MHI |
2742 | err = device_add(&clt->dev); |
2743 | if (err) | |
2744 | goto err_put; | |
6a98d71d JW |
2745 | |
2746 | clt->kobj_paths = kobject_create_and_add("paths", &clt->dev.kobj); | |
2747 | if (!clt->kobj_paths) { | |
eab09824 | 2748 | err = -ENOMEM; |
8700af2c | 2749 | goto err_del; |
6a98d71d JW |
2750 | } |
2751 | err = rtrs_clt_create_sysfs_root_files(clt); | |
2752 | if (err) { | |
6a98d71d JW |
2753 | kobject_del(clt->kobj_paths); |
2754 | kobject_put(clt->kobj_paths); | |
8700af2c | 2755 | goto err_del; |
6a98d71d JW |
2756 | } |
2757 | dev_set_uevent_suppress(&clt->dev, false); | |
2758 | kobject_uevent(&clt->dev.kobj, KOBJ_ADD); | |
2759 | ||
2760 | return clt; | |
8700af2c MHI |
2761 | err_del: |
2762 | device_del(&clt->dev); | |
2763 | err_put: | |
eab09824 | 2764 | free_percpu(clt->pcpu_path); |
8700af2c | 2765 | put_device(&clt->dev); |
eab09824 | 2766 | return ERR_PTR(err); |
6a98d71d JW |
2767 | } |
2768 | ||
f3433d79 | 2769 | static void free_clt(struct rtrs_clt_sess *clt) |
6a98d71d | 2770 | { |
6a98d71d | 2771 | free_percpu(clt->pcpu_path); |
8700af2c MHI |
2772 | |
2773 | /* | |
2774 | * release callback will free clt and destroy mutexes in last put | |
2775 | */ | |
6a98d71d JW |
2776 | device_unregister(&clt->dev); |
2777 | } | |
2778 | ||
2779 | /** | |
caa84d95 | 2780 | * rtrs_clt_open() - Open a path to an RTRS server |
6a98d71d | 2781 | * @ops: holds the link event callback and the private pointer. |
9c477178 | 2782 | * @pathname: name of the path to an RTRS server |
6a98d71d JW |
2783 | * @paths: Paths to be established defined by their src and dst addresses |
2784 | * @paths_num: Number of elements in the @paths array | |
2785 | * @port: port to be used by the RTRS session | |
2786 | * @pdu_sz: Size of extra payload which can be accessed after permit allocation. | |
2787 | * @reconnect_delay_sec: time between reconnect tries | |
6a98d71d JW |
2788 | * @max_reconnect_attempts: Number of times to reconnect on error before giving |
2789 | * up, 0 for * disabled, -1 for forever | |
2958a995 | 2790 | * @nr_poll_queues: number of polling mode connection using IB_POLL_DIRECT flag |
6a98d71d JW |
2791 | * |
2792 | * Starts session establishment with the rtrs_server. The function can block | |
2793 | * up to ~2000ms before it returns. | |
2794 | * | |
2795 | * Return a valid pointer on success otherwise PTR_ERR. | |
2796 | */ | |
f3433d79 | 2797 | struct rtrs_clt_sess *rtrs_clt_open(struct rtrs_clt_ops *ops, |
caa84d95 | 2798 | const char *pathname, |
6a98d71d JW |
2799 | const struct rtrs_addr *paths, |
2800 | size_t paths_num, u16 port, | |
2801 | size_t pdu_sz, u8 reconnect_delay_sec, | |
2958a995 | 2802 | s16 max_reconnect_attempts, u32 nr_poll_queues) |
6a98d71d | 2803 | { |
caa84d95 | 2804 | struct rtrs_clt_path *clt_path, *tmp; |
f3433d79 | 2805 | struct rtrs_clt_sess *clt; |
6a98d71d JW |
2806 | int err, i; |
2807 | ||
caa84d95 VT |
2808 | if (strchr(pathname, '/') || strchr(pathname, '.')) { |
2809 | pr_err("pathname cannot contain / and .\n"); | |
dea7bb3a MHI |
2810 | err = -EINVAL; |
2811 | goto out; | |
2812 | } | |
2813 | ||
caa84d95 | 2814 | clt = alloc_clt(pathname, paths_num, port, pdu_sz, ops->priv, |
6a98d71d | 2815 | ops->link_ev, |
7404bdde | 2816 | reconnect_delay_sec, |
6a98d71d JW |
2817 | max_reconnect_attempts); |
2818 | if (IS_ERR(clt)) { | |
2819 | err = PTR_ERR(clt); | |
2820 | goto out; | |
2821 | } | |
2822 | for (i = 0; i < paths_num; i++) { | |
caa84d95 | 2823 | struct rtrs_clt_path *clt_path; |
6a98d71d | 2824 | |
caa84d95 | 2825 | clt_path = alloc_path(clt, &paths[i], nr_cpu_ids, |
7404bdde | 2826 | nr_poll_queues); |
caa84d95 VT |
2827 | if (IS_ERR(clt_path)) { |
2828 | err = PTR_ERR(clt_path); | |
2829 | goto close_all_path; | |
6a98d71d | 2830 | } |
03e9b33a | 2831 | if (!i) |
caa84d95 VT |
2832 | clt_path->for_new_clt = 1; |
2833 | list_add_tail_rcu(&clt_path->s.entry, &clt->paths_list); | |
6a98d71d | 2834 | |
caa84d95 | 2835 | err = init_path(clt_path); |
6a98d71d | 2836 | if (err) { |
caa84d95 VT |
2837 | list_del_rcu(&clt_path->s.entry); |
2838 | rtrs_clt_close_conns(clt_path, true); | |
2839 | free_percpu(clt_path->stats->pcpu_stats); | |
2840 | kfree(clt_path->stats); | |
2841 | free_path(clt_path); | |
2842 | goto close_all_path; | |
6a98d71d JW |
2843 | } |
2844 | ||
caa84d95 | 2845 | err = rtrs_clt_create_path_files(clt_path); |
6a98d71d | 2846 | if (err) { |
caa84d95 VT |
2847 | list_del_rcu(&clt_path->s.entry); |
2848 | rtrs_clt_close_conns(clt_path, true); | |
2849 | free_percpu(clt_path->stats->pcpu_stats); | |
2850 | kfree(clt_path->stats); | |
2851 | free_path(clt_path); | |
2852 | goto close_all_path; | |
6a98d71d JW |
2853 | } |
2854 | } | |
2855 | err = alloc_permits(clt); | |
2856 | if (err) | |
caa84d95 | 2857 | goto close_all_path; |
6a98d71d JW |
2858 | |
2859 | return clt; | |
2860 | ||
caa84d95 VT |
2861 | close_all_path: |
2862 | list_for_each_entry_safe(clt_path, tmp, &clt->paths_list, s.entry) { | |
2863 | rtrs_clt_destroy_path_files(clt_path, NULL); | |
2864 | rtrs_clt_close_conns(clt_path, true); | |
2865 | kobject_put(&clt_path->kobj); | |
6a98d71d | 2866 | } |
7b47b27f | 2867 | rtrs_clt_destroy_sysfs_root(clt); |
6a98d71d JW |
2868 | free_clt(clt); |
2869 | ||
2870 | out: | |
2871 | return ERR_PTR(err); | |
2872 | } | |
2873 | EXPORT_SYMBOL(rtrs_clt_open); | |
2874 | ||
2875 | /** | |
caa84d95 | 2876 | * rtrs_clt_close() - Close a path |
6a98d71d JW |
2877 | * @clt: Session handle. Session is freed upon return. |
2878 | */ | |
f3433d79 | 2879 | void rtrs_clt_close(struct rtrs_clt_sess *clt) |
6a98d71d | 2880 | { |
caa84d95 | 2881 | struct rtrs_clt_path *clt_path, *tmp; |
6a98d71d JW |
2882 | |
2883 | /* Firstly forbid sysfs access */ | |
7b47b27f | 2884 | rtrs_clt_destroy_sysfs_root(clt); |
6a98d71d JW |
2885 | |
2886 | /* Now it is safe to iterate over all paths without locks */ | |
caa84d95 VT |
2887 | list_for_each_entry_safe(clt_path, tmp, &clt->paths_list, s.entry) { |
2888 | rtrs_clt_close_conns(clt_path, true); | |
2889 | rtrs_clt_destroy_path_files(clt_path, NULL); | |
2890 | kobject_put(&clt_path->kobj); | |
6a98d71d | 2891 | } |
c46fa891 | 2892 | free_permits(clt); |
6a98d71d JW |
2893 | free_clt(clt); |
2894 | } | |
2895 | EXPORT_SYMBOL(rtrs_clt_close); | |
2896 | ||
caa84d95 | 2897 | int rtrs_clt_reconnect_from_sysfs(struct rtrs_clt_path *clt_path) |
6a98d71d JW |
2898 | { |
2899 | enum rtrs_clt_state old_state; | |
2900 | int err = -EBUSY; | |
2901 | bool changed; | |
2902 | ||
caa84d95 VT |
2903 | changed = rtrs_clt_change_state_get_old(clt_path, |
2904 | RTRS_CLT_RECONNECTING, | |
6a98d71d JW |
2905 | &old_state); |
2906 | if (changed) { | |
caa84d95 | 2907 | clt_path->reconnect_attempts = 0; |
c1289d5d | 2908 | rtrs_clt_stop_and_destroy_conns(clt_path); |
caa84d95 | 2909 | queue_delayed_work(rtrs_wq, &clt_path->reconnect_dwork, 0); |
6a98d71d JW |
2910 | } |
2911 | if (changed || old_state == RTRS_CLT_RECONNECTING) { | |
2912 | /* | |
2913 | * flush_delayed_work() queues pending work for immediate | |
2914 | * execution, so do the flush if we have queued something | |
2915 | * right now or work is pending. | |
2916 | */ | |
caa84d95 VT |
2917 | flush_delayed_work(&clt_path->reconnect_dwork); |
2918 | err = (READ_ONCE(clt_path->state) == | |
6a98d71d JW |
2919 | RTRS_CLT_CONNECTED ? 0 : -ENOTCONN); |
2920 | } | |
2921 | ||
2922 | return err; | |
2923 | } | |
2924 | ||
caa84d95 | 2925 | int rtrs_clt_remove_path_from_sysfs(struct rtrs_clt_path *clt_path, |
6a98d71d JW |
2926 | const struct attribute *sysfs_self) |
2927 | { | |
2928 | enum rtrs_clt_state old_state; | |
2929 | bool changed; | |
2930 | ||
2931 | /* | |
2932 | * Continue stopping path till state was changed to DEAD or | |
2933 | * state was observed as DEAD: | |
2934 | * 1. State was changed to DEAD - we were fast and nobody | |
2935 | * invoked rtrs_clt_reconnect(), which can again start | |
2936 | * reconnecting. | |
2937 | * 2. State was observed as DEAD - we have someone in parallel | |
2938 | * removing the path. | |
2939 | */ | |
2940 | do { | |
caa84d95 VT |
2941 | rtrs_clt_close_conns(clt_path, true); |
2942 | changed = rtrs_clt_change_state_get_old(clt_path, | |
6a98d71d JW |
2943 | RTRS_CLT_DEAD, |
2944 | &old_state); | |
2945 | } while (!changed && old_state != RTRS_CLT_DEAD); | |
2946 | ||
4693d6b7 | 2947 | if (changed) { |
caa84d95 VT |
2948 | rtrs_clt_remove_path_from_arr(clt_path); |
2949 | rtrs_clt_destroy_path_files(clt_path, sysfs_self); | |
2950 | kobject_put(&clt_path->kobj); | |
6a98d71d JW |
2951 | } |
2952 | ||
2953 | return 0; | |
2954 | } | |
2955 | ||
f3433d79 | 2956 | void rtrs_clt_set_max_reconnect_attempts(struct rtrs_clt_sess *clt, int value) |
6a98d71d JW |
2957 | { |
2958 | clt->max_reconnect_attempts = (unsigned int)value; | |
2959 | } | |
2960 | ||
f3433d79 | 2961 | int rtrs_clt_get_max_reconnect_attempts(const struct rtrs_clt_sess *clt) |
6a98d71d JW |
2962 | { |
2963 | return (int)clt->max_reconnect_attempts; | |
2964 | } | |
2965 | ||
2966 | /** | |
2967 | * rtrs_clt_request() - Request data transfer to/from server via RDMA. | |
2968 | * | |
2969 | * @dir: READ/WRITE | |
2970 | * @ops: callback function to be called as confirmation, and the pointer. | |
2971 | * @clt: Session | |
2972 | * @permit: Preallocated permit | |
2973 | * @vec: Message that is sent to server together with the request. | |
2974 | * Sum of len of all @vec elements limited to <= IO_MSG_SIZE. | |
2975 | * Since the msg is copied internally it can be allocated on stack. | |
2976 | * @nr: Number of elements in @vec. | |
2977 | * @data_len: length of data sent to/from server | |
2978 | * @sg: Pages to be sent/received to/from server. | |
2979 | * @sg_cnt: Number of elements in the @sg | |
2980 | * | |
2981 | * Return: | |
2982 | * 0: Success | |
2983 | * <0: Error | |
2984 | * | |
2985 | * On dir=READ rtrs client will request a data transfer from Server to client. | |
2986 | * The data that the server will respond with will be stored in @sg when | |
2987 | * the user receives an %RTRS_CLT_RDMA_EV_RDMA_REQUEST_WRITE_COMPL event. | |
2988 | * On dir=WRITE rtrs client will rdma write data in sg to server side. | |
2989 | */ | |
2990 | int rtrs_clt_request(int dir, struct rtrs_clt_req_ops *ops, | |
f3433d79 VT |
2991 | struct rtrs_clt_sess *clt, struct rtrs_permit *permit, |
2992 | const struct kvec *vec, size_t nr, size_t data_len, | |
2993 | struct scatterlist *sg, unsigned int sg_cnt) | |
6a98d71d JW |
2994 | { |
2995 | struct rtrs_clt_io_req *req; | |
caa84d95 | 2996 | struct rtrs_clt_path *clt_path; |
6a98d71d JW |
2997 | |
2998 | enum dma_data_direction dma_dir; | |
2999 | int err = -ECONNABORTED, i; | |
3000 | size_t usr_len, hdr_len; | |
3001 | struct path_it it; | |
3002 | ||
3003 | /* Get kvec length */ | |
3004 | for (i = 0, usr_len = 0; i < nr; i++) | |
3005 | usr_len += vec[i].iov_len; | |
3006 | ||
3007 | if (dir == READ) { | |
3008 | hdr_len = sizeof(struct rtrs_msg_rdma_read) + | |
3009 | sg_cnt * sizeof(struct rtrs_sg_desc); | |
3010 | dma_dir = DMA_FROM_DEVICE; | |
3011 | } else { | |
3012 | hdr_len = sizeof(struct rtrs_msg_rdma_write); | |
3013 | dma_dir = DMA_TO_DEVICE; | |
3014 | } | |
3015 | ||
a94dae86 DK |
3016 | rcu_read_lock(); |
3017 | for (path_it_init(&it, clt); | |
caa84d95 VT |
3018 | (clt_path = it.next_path(&it)) && it.i < it.clt->paths_num; it.i++) { |
3019 | if (READ_ONCE(clt_path->state) != RTRS_CLT_CONNECTED) | |
6a98d71d JW |
3020 | continue; |
3021 | ||
caa84d95 VT |
3022 | if (usr_len + hdr_len > clt_path->max_hdr_size) { |
3023 | rtrs_wrn_rl(clt_path->clt, | |
6a98d71d JW |
3024 | "%s request failed, user message size is %zu and header length %zu, but max size is %u\n", |
3025 | dir == READ ? "Read" : "Write", | |
caa84d95 | 3026 | usr_len, hdr_len, clt_path->max_hdr_size); |
6a98d71d JW |
3027 | err = -EMSGSIZE; |
3028 | break; | |
3029 | } | |
caa84d95 | 3030 | req = rtrs_clt_get_req(clt_path, ops->conf_fn, permit, ops->priv, |
6a98d71d JW |
3031 | vec, usr_len, sg, sg_cnt, data_len, |
3032 | dma_dir); | |
3033 | if (dir == READ) | |
3034 | err = rtrs_clt_read_req(req); | |
3035 | else | |
3036 | err = rtrs_clt_write_req(req); | |
4693d6b7 | 3037 | if (err) { |
6a98d71d JW |
3038 | req->in_use = false; |
3039 | continue; | |
3040 | } | |
3041 | /* Success path */ | |
3042 | break; | |
a94dae86 DK |
3043 | } |
3044 | path_it_deinit(&it); | |
3045 | rcu_read_unlock(); | |
6a98d71d JW |
3046 | |
3047 | return err; | |
3048 | } | |
3049 | EXPORT_SYMBOL(rtrs_clt_request); | |
3050 | ||
f3433d79 | 3051 | int rtrs_clt_rdma_cq_direct(struct rtrs_clt_sess *clt, unsigned int index) |
2958a995 | 3052 | { |
c646790a GK |
3053 | /* If no path, return -1 for block layer not to try again */ |
3054 | int cnt = -1; | |
2958a995 | 3055 | struct rtrs_con *con; |
caa84d95 | 3056 | struct rtrs_clt_path *clt_path; |
2958a995 GK |
3057 | struct path_it it; |
3058 | ||
3059 | rcu_read_lock(); | |
3060 | for (path_it_init(&it, clt); | |
caa84d95 VT |
3061 | (clt_path = it.next_path(&it)) && it.i < it.clt->paths_num; it.i++) { |
3062 | if (READ_ONCE(clt_path->state) != RTRS_CLT_CONNECTED) | |
2958a995 GK |
3063 | continue; |
3064 | ||
caa84d95 | 3065 | con = clt_path->s.con[index + 1]; |
2958a995 GK |
3066 | cnt = ib_process_cq_direct(con->cq, -1); |
3067 | if (cnt) | |
3068 | break; | |
3069 | } | |
3070 | path_it_deinit(&it); | |
3071 | rcu_read_unlock(); | |
3072 | ||
3073 | return cnt; | |
3074 | } | |
3075 | EXPORT_SYMBOL(rtrs_clt_rdma_cq_direct); | |
3076 | ||
6a98d71d JW |
3077 | /** |
3078 | * rtrs_clt_query() - queries RTRS session attributes | |
3079 | *@clt: session pointer | |
3080 | *@attr: query results for session attributes. | |
3081 | * Returns: | |
3082 | * 0 on success | |
3083 | * -ECOMM no connection to the server | |
3084 | */ | |
f3433d79 | 3085 | int rtrs_clt_query(struct rtrs_clt_sess *clt, struct rtrs_attrs *attr) |
6a98d71d JW |
3086 | { |
3087 | if (!rtrs_clt_is_connected(clt)) | |
3088 | return -ECOMM; | |
3089 | ||
3090 | attr->queue_depth = clt->queue_depth; | |
7404bdde | 3091 | attr->max_segments = clt->max_segments; |
0633e237 JW |
3092 | /* Cap max_io_size to min of remote buffer size and the fr pages */ |
3093 | attr->max_io_size = min_t(int, clt->max_io_size, | |
3094 | clt->max_segments * SZ_4K); | |
6a98d71d JW |
3095 | |
3096 | return 0; | |
3097 | } | |
3098 | EXPORT_SYMBOL(rtrs_clt_query); | |
3099 | ||
f3433d79 | 3100 | int rtrs_clt_create_path_from_sysfs(struct rtrs_clt_sess *clt, |
6a98d71d JW |
3101 | struct rtrs_addr *addr) |
3102 | { | |
caa84d95 | 3103 | struct rtrs_clt_path *clt_path; |
6a98d71d JW |
3104 | int err; |
3105 | ||
caa84d95 VT |
3106 | clt_path = alloc_path(clt, addr, nr_cpu_ids, 0); |
3107 | if (IS_ERR(clt_path)) | |
3108 | return PTR_ERR(clt_path); | |
6a98d71d | 3109 | |
ac5e8814 MHI |
3110 | mutex_lock(&clt->paths_mutex); |
3111 | if (clt->paths_num == 0) { | |
3112 | /* | |
3113 | * When all the paths are removed for a session, | |
3114 | * the addition of the first path is like a new session for | |
3115 | * the storage server | |
3116 | */ | |
caa84d95 | 3117 | clt_path->for_new_clt = 1; |
ac5e8814 MHI |
3118 | } |
3119 | ||
3120 | mutex_unlock(&clt->paths_mutex); | |
3121 | ||
6a98d71d JW |
3122 | /* |
3123 | * It is totally safe to add path in CONNECTING state: coming | |
3124 | * IO will never grab it. Also it is very important to add | |
3125 | * path before init, since init fires LINK_CONNECTED event. | |
3126 | */ | |
caa84d95 | 3127 | rtrs_clt_add_path_to_arr(clt_path); |
6a98d71d | 3128 | |
caa84d95 | 3129 | err = init_path(clt_path); |
6a98d71d | 3130 | if (err) |
caa84d95 | 3131 | goto close_path; |
6a98d71d | 3132 | |
caa84d95 | 3133 | err = rtrs_clt_create_path_files(clt_path); |
6a98d71d | 3134 | if (err) |
caa84d95 | 3135 | goto close_path; |
6a98d71d JW |
3136 | |
3137 | return 0; | |
3138 | ||
caa84d95 VT |
3139 | close_path: |
3140 | rtrs_clt_remove_path_from_arr(clt_path); | |
3141 | rtrs_clt_close_conns(clt_path, true); | |
3142 | free_percpu(clt_path->stats->pcpu_stats); | |
3143 | kfree(clt_path->stats); | |
3144 | free_path(clt_path); | |
6a98d71d JW |
3145 | |
3146 | return err; | |
3147 | } | |
3148 | ||
3149 | static int rtrs_clt_ib_dev_init(struct rtrs_ib_dev *dev) | |
3150 | { | |
3151 | if (!(dev->ib_dev->attrs.device_cap_flags & | |
3152 | IB_DEVICE_MEM_MGT_EXTENSIONS)) { | |
3153 | pr_err("Memory registrations not supported.\n"); | |
3154 | return -ENOTSUPP; | |
3155 | } | |
3156 | ||
3157 | return 0; | |
3158 | } | |
3159 | ||
3160 | static const struct rtrs_rdma_dev_pd_ops dev_pd_ops = { | |
3161 | .init = rtrs_clt_ib_dev_init | |
3162 | }; | |
3163 | ||
3164 | static int __init rtrs_client_init(void) | |
3165 | { | |
3166 | rtrs_rdma_dev_pd_init(0, &dev_pd); | |
3167 | ||
3168 | rtrs_clt_dev_class = class_create(THIS_MODULE, "rtrs-client"); | |
3169 | if (IS_ERR(rtrs_clt_dev_class)) { | |
3170 | pr_err("Failed to create rtrs-client dev class\n"); | |
3171 | return PTR_ERR(rtrs_clt_dev_class); | |
3172 | } | |
03ed5a8c | 3173 | rtrs_wq = alloc_workqueue("rtrs_client_wq", 0, 0); |
6a98d71d JW |
3174 | if (!rtrs_wq) { |
3175 | class_destroy(rtrs_clt_dev_class); | |
3176 | return -ENOMEM; | |
3177 | } | |
3178 | ||
3179 | return 0; | |
3180 | } | |
3181 | ||
3182 | static void __exit rtrs_client_exit(void) | |
3183 | { | |
3184 | destroy_workqueue(rtrs_wq); | |
3185 | class_destroy(rtrs_clt_dev_class); | |
3186 | rtrs_rdma_dev_pd_deinit(&dev_pd); | |
3187 | } | |
3188 | ||
3189 | module_init(rtrs_client_init); | |
3190 | module_exit(rtrs_client_exit); |