Commit | Line | Data |
---|---|---|
ec16227e AG |
1 | /* |
2 | * Copyright (c) 2006 Oracle. All rights reserved. | |
3 | * | |
4 | * This software is available to you under a choice of one of two | |
5 | * licenses. You may choose to be licensed under the terms of the GNU | |
6 | * General Public License (GPL) Version 2, available from the file | |
7 | * COPYING in the main directory of this source tree, or the | |
8 | * OpenIB.org BSD license below: | |
9 | * | |
10 | * Redistribution and use in source and binary forms, with or | |
11 | * without modification, are permitted provided that the following | |
12 | * conditions are met: | |
13 | * | |
14 | * - Redistributions of source code must retain the above | |
15 | * copyright notice, this list of conditions and the following | |
16 | * disclaimer. | |
17 | * | |
18 | * - Redistributions in binary form must reproduce the above | |
19 | * copyright notice, this list of conditions and the following | |
20 | * disclaimer in the documentation and/or other materials | |
21 | * provided with the distribution. | |
22 | * | |
23 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | |
24 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | |
25 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | |
26 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | |
27 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | |
28 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | |
29 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | |
30 | * SOFTWARE. | |
31 | * | |
32 | */ | |
33 | #include <linux/kernel.h> | |
34 | #include <linux/in.h> | |
5a0e3ad6 | 35 | #include <linux/slab.h> |
ec16227e | 36 | #include <linux/vmalloc.h> |
cb0a6056 | 37 | #include <linux/ratelimit.h> |
ec16227e | 38 | |
0cb43965 | 39 | #include "rds_single_path.h" |
ec16227e AG |
40 | #include "rds.h" |
41 | #include "ib.h" | |
42 | ||
43 | /* | |
44 | * Set the selected protocol version | |
45 | */ | |
46 | static void rds_ib_set_protocol(struct rds_connection *conn, unsigned int version) | |
47 | { | |
48 | conn->c_version = version; | |
49 | } | |
50 | ||
51 | /* | |
52 | * Set up flow control | |
53 | */ | |
54 | static void rds_ib_set_flow_control(struct rds_connection *conn, u32 credits) | |
55 | { | |
56 | struct rds_ib_connection *ic = conn->c_transport_data; | |
57 | ||
58 | if (rds_ib_sysctl_flow_control && credits != 0) { | |
59 | /* We're doing flow control */ | |
60 | ic->i_flowctl = 1; | |
61 | rds_ib_send_add_credits(conn, credits); | |
62 | } else { | |
63 | ic->i_flowctl = 0; | |
64 | } | |
65 | } | |
66 | ||
67 | /* | |
68 | * Tune RNR behavior. Without flow control, we use a rather | |
69 | * low timeout, but not the absolute minimum - this should | |
70 | * be tunable. | |
71 | * | |
72 | * We already set the RNR retry count to 7 (which is the | |
73 | * smallest infinite number :-) above. | |
74 | * If flow control is off, we want to change this back to 0 | |
75 | * so that we learn quickly when our credit accounting is | |
76 | * buggy. | |
77 | * | |
78 | * Caller passes in a qp_attr pointer - don't waste stack spacv | |
79 | * by allocation this twice. | |
80 | */ | |
81 | static void | |
82 | rds_ib_tune_rnr(struct rds_ib_connection *ic, struct ib_qp_attr *attr) | |
83 | { | |
84 | int ret; | |
85 | ||
86 | attr->min_rnr_timer = IB_RNR_TIMER_000_32; | |
87 | ret = ib_modify_qp(ic->i_cm_id->qp, attr, IB_QP_MIN_RNR_TIMER); | |
88 | if (ret) | |
89 | printk(KERN_NOTICE "ib_modify_qp(IB_QP_MIN_RNR_TIMER): err=%d\n", -ret); | |
90 | } | |
91 | ||
92 | /* | |
93 | * Connection established. | |
94 | * We get here for both outgoing and incoming connection. | |
95 | */ | |
96 | void rds_ib_cm_connect_complete(struct rds_connection *conn, struct rdma_cm_event *event) | |
97 | { | |
98 | const struct rds_ib_connect_private *dp = NULL; | |
99 | struct rds_ib_connection *ic = conn->c_transport_data; | |
ec16227e AG |
100 | struct ib_qp_attr qp_attr; |
101 | int err; | |
102 | ||
9ddbcfa0 | 103 | if (event->param.conn.private_data_len >= sizeof(*dp)) { |
ec16227e AG |
104 | dp = event->param.conn.private_data; |
105 | ||
02a6a259 AG |
106 | /* make sure it isn't empty data */ |
107 | if (dp->dp_protocol_major) { | |
108 | rds_ib_set_protocol(conn, | |
ec16227e | 109 | RDS_PROTOCOL(dp->dp_protocol_major, |
02a6a259 AG |
110 | dp->dp_protocol_minor)); |
111 | rds_ib_set_flow_control(conn, be32_to_cpu(dp->dp_credit)); | |
112 | } | |
ec16227e AG |
113 | } |
114 | ||
5c3da57d | 115 | if (conn->c_version < RDS_PROTOCOL(3, 1)) { |
ff3f19a2 SS |
116 | pr_notice("RDS/IB: Connection <%pI4,%pI4> version %u.%u no longer supported\n", |
117 | &conn->c_laddr, &conn->c_faddr, | |
118 | RDS_PROTOCOL_MAJOR(conn->c_version), | |
119 | RDS_PROTOCOL_MINOR(conn->c_version)); | |
ebeeb1ad | 120 | set_bit(RDS_DESTROY_PENDING, &conn->c_path[0].cp_flags); |
f147dd9e AG |
121 | rds_conn_destroy(conn); |
122 | return; | |
123 | } else { | |
581d53c9 SS |
124 | pr_notice("RDS/IB: %s conn connected <%pI4,%pI4> version %u.%u%s\n", |
125 | ic->i_active_side ? "Active" : "Passive", | |
ff3f19a2 SS |
126 | &conn->c_laddr, &conn->c_faddr, |
127 | RDS_PROTOCOL_MAJOR(conn->c_version), | |
128 | RDS_PROTOCOL_MINOR(conn->c_version), | |
129 | ic->i_flowctl ? ", flow control" : ""); | |
f147dd9e | 130 | } |
ec16227e | 131 | |
cf657269 SS |
132 | atomic_set(&ic->i_cq_quiesce, 0); |
133 | ||
581d53c9 SS |
134 | /* Init rings and fill recv. this needs to wait until protocol |
135 | * negotiation is complete, since ring layout is different | |
136 | * from 3.1 to 4.1. | |
e11d912a AG |
137 | */ |
138 | rds_ib_send_init_ring(ic); | |
139 | rds_ib_recv_init_ring(ic); | |
140 | /* Post receive buffers - as a side effect, this will update | |
141 | * the posted credit count. */ | |
73ce4317 | 142 | rds_ib_recv_refill(conn, 1, GFP_KERNEL); |
e11d912a | 143 | |
ec16227e AG |
144 | /* Tune RNR behavior */ |
145 | rds_ib_tune_rnr(ic, &qp_attr); | |
146 | ||
147 | qp_attr.qp_state = IB_QPS_RTS; | |
148 | err = ib_modify_qp(ic->i_cm_id->qp, &qp_attr, IB_QP_STATE); | |
149 | if (err) | |
150 | printk(KERN_NOTICE "ib_modify_qp(IB_QP_STATE, RTS): err=%d\n", err); | |
151 | ||
3e0249f9 ZB |
152 | /* update ib_device with this local ipaddr */ |
153 | err = rds_ib_update_ipaddr(ic->rds_ibdev, conn->c_laddr); | |
ec16227e | 154 | if (err) |
3e0249f9 ZB |
155 | printk(KERN_ERR "rds_ib_update_ipaddr failed (%d)\n", |
156 | err); | |
ec16227e AG |
157 | |
158 | /* If the peer gave us the last packet it saw, process this as if | |
159 | * we had received a regular ACK. */ | |
c0adf54a | 160 | if (dp) { |
161 | /* dp structure start is not guaranteed to be 8 bytes aligned. | |
162 | * Since dp_ack_seq is 64-bit extended load operations can be | |
163 | * used so go through get_unaligned to avoid unaligned errors. | |
164 | */ | |
e2783717 | 165 | __be64 dp_ack_seq = get_unaligned(&dp->dp_ack_seq); |
c0adf54a | 166 | |
167 | if (dp_ack_seq) | |
168 | rds_send_drop_acked(conn, be64_to_cpu(dp_ack_seq), | |
169 | NULL); | |
170 | } | |
ec16227e AG |
171 | |
172 | rds_connect_complete(conn); | |
173 | } | |
174 | ||
175 | static void rds_ib_cm_fill_conn_param(struct rds_connection *conn, | |
176 | struct rdma_conn_param *conn_param, | |
177 | struct rds_ib_connect_private *dp, | |
40589e74 AG |
178 | u32 protocol_version, |
179 | u32 max_responder_resources, | |
180 | u32 max_initiator_depth) | |
ec16227e | 181 | { |
40589e74 | 182 | struct rds_ib_connection *ic = conn->c_transport_data; |
3e0249f9 | 183 | struct rds_ib_device *rds_ibdev = ic->rds_ibdev; |
40589e74 | 184 | |
ec16227e | 185 | memset(conn_param, 0, sizeof(struct rdma_conn_param)); |
40589e74 | 186 | |
40589e74 AG |
187 | conn_param->responder_resources = |
188 | min_t(u32, rds_ibdev->max_responder_resources, max_responder_resources); | |
189 | conn_param->initiator_depth = | |
190 | min_t(u32, rds_ibdev->max_initiator_depth, max_initiator_depth); | |
3ba23ade | 191 | conn_param->retry_count = min_t(unsigned int, rds_ib_retry_count, 7); |
ec16227e AG |
192 | conn_param->rnr_retry_count = 7; |
193 | ||
194 | if (dp) { | |
ec16227e AG |
195 | memset(dp, 0, sizeof(*dp)); |
196 | dp->dp_saddr = conn->c_laddr; | |
197 | dp->dp_daddr = conn->c_faddr; | |
198 | dp->dp_protocol_major = RDS_PROTOCOL_MAJOR(protocol_version); | |
199 | dp->dp_protocol_minor = RDS_PROTOCOL_MINOR(protocol_version); | |
200 | dp->dp_protocol_minor_mask = cpu_to_be16(RDS_IB_SUPPORTED_PROTOCOLS); | |
a7c55654 | 201 | dp->dp_ack_seq = cpu_to_be64(rds_ib_piggyb_ack(ic)); |
ec16227e AG |
202 | |
203 | /* Advertise flow control */ | |
204 | if (ic->i_flowctl) { | |
205 | unsigned int credits; | |
206 | ||
207 | credits = IB_GET_POST_CREDITS(atomic_read(&ic->i_credits)); | |
208 | dp->dp_credit = cpu_to_be32(credits); | |
209 | atomic_sub(IB_SET_POST_CREDITS(credits), &ic->i_credits); | |
210 | } | |
211 | ||
212 | conn_param->private_data = dp; | |
213 | conn_param->private_data_len = sizeof(*dp); | |
214 | } | |
215 | } | |
216 | ||
217 | static void rds_ib_cq_event_handler(struct ib_event *event, void *data) | |
218 | { | |
1bde04a6 | 219 | rdsdebug("event %u (%s) data %p\n", |
3c88f3dc | 220 | event->event, ib_event_msg(event->event), data); |
ec16227e AG |
221 | } |
222 | ||
f4f943c9 SS |
223 | /* Plucking the oldest entry from the ring can be done concurrently with |
224 | * the thread refilling the ring. Each ring operation is protected by | |
225 | * spinlocks and the transient state of refilling doesn't change the | |
226 | * recording of which entry is oldest. | |
227 | * | |
228 | * This relies on IB only calling one cq comp_handler for each cq so that | |
229 | * there will only be one caller of rds_recv_incoming() per RDS connection. | |
230 | */ | |
231 | static void rds_ib_cq_comp_handler_recv(struct ib_cq *cq, void *context) | |
232 | { | |
233 | struct rds_connection *conn = context; | |
234 | struct rds_ib_connection *ic = conn->c_transport_data; | |
235 | ||
236 | rdsdebug("conn %p cq %p\n", conn, cq); | |
237 | ||
238 | rds_ib_stats_inc(s_ib_evt_handler_call); | |
239 | ||
240 | tasklet_schedule(&ic->i_recv_tasklet); | |
241 | } | |
242 | ||
dcfd041c | 243 | static void poll_scq(struct rds_ib_connection *ic, struct ib_cq *cq, |
244 | struct ib_wc *wcs) | |
f4f943c9 | 245 | { |
dcfd041c | 246 | int nr, i; |
f4f943c9 SS |
247 | struct ib_wc *wc; |
248 | ||
249 | while ((nr = ib_poll_cq(cq, RDS_IB_WC_MAX, wcs)) > 0) { | |
250 | for (i = 0; i < nr; i++) { | |
251 | wc = wcs + i; | |
252 | rdsdebug("wc wr_id 0x%llx status %u byte_len %u imm_data %u\n", | |
253 | (unsigned long long)wc->wr_id, wc->status, | |
254 | wc->byte_len, be32_to_cpu(wc->ex.imm_data)); | |
0c28c045 | 255 | |
1659185f AR |
256 | if (wc->wr_id <= ic->i_send_ring.w_nr || |
257 | wc->wr_id == RDS_IB_ACK_WR_ID) | |
258 | rds_ib_send_cqe_handler(ic, wc); | |
259 | else | |
260 | rds_ib_mr_cqe_handler(ic, wc); | |
261 | ||
f4f943c9 SS |
262 | } |
263 | } | |
264 | } | |
265 | ||
0c28c045 SS |
266 | static void rds_ib_tasklet_fn_send(unsigned long data) |
267 | { | |
268 | struct rds_ib_connection *ic = (struct rds_ib_connection *)data; | |
269 | struct rds_connection *conn = ic->conn; | |
0c28c045 SS |
270 | |
271 | rds_ib_stats_inc(s_ib_tasklet_call); | |
272 | ||
cf657269 SS |
273 | /* if cq has been already reaped, ignore incoming cq event */ |
274 | if (atomic_read(&ic->i_cq_quiesce)) | |
275 | return; | |
276 | ||
dcfd041c | 277 | poll_scq(ic, ic->i_send_cq, ic->i_send_wc); |
0c28c045 | 278 | ib_req_notify_cq(ic->i_send_cq, IB_CQ_NEXT_COMP); |
dcfd041c | 279 | poll_scq(ic, ic->i_send_cq, ic->i_send_wc); |
0c28c045 SS |
280 | |
281 | if (rds_conn_up(conn) && | |
282 | (!test_bit(RDS_LL_SEND_FULL, &conn->c_flags) || | |
283 | test_bit(0, &conn->c_map_queued))) | |
1f9ecd7e | 284 | rds_send_xmit(&ic->conn->c_path[0]); |
0c28c045 SS |
285 | } |
286 | ||
dcfd041c | 287 | static void poll_rcq(struct rds_ib_connection *ic, struct ib_cq *cq, |
288 | struct ib_wc *wcs, | |
289 | struct rds_ib_ack_state *ack_state) | |
290 | { | |
291 | int nr, i; | |
292 | struct ib_wc *wc; | |
293 | ||
294 | while ((nr = ib_poll_cq(cq, RDS_IB_WC_MAX, wcs)) > 0) { | |
295 | for (i = 0; i < nr; i++) { | |
296 | wc = wcs + i; | |
297 | rdsdebug("wc wr_id 0x%llx status %u byte_len %u imm_data %u\n", | |
298 | (unsigned long long)wc->wr_id, wc->status, | |
299 | wc->byte_len, be32_to_cpu(wc->ex.imm_data)); | |
300 | ||
301 | rds_ib_recv_cqe_handler(ic, wc, ack_state); | |
302 | } | |
303 | } | |
304 | } | |
305 | ||
f4f943c9 SS |
306 | static void rds_ib_tasklet_fn_recv(unsigned long data) |
307 | { | |
308 | struct rds_ib_connection *ic = (struct rds_ib_connection *)data; | |
309 | struct rds_connection *conn = ic->conn; | |
310 | struct rds_ib_device *rds_ibdev = ic->rds_ibdev; | |
311 | struct rds_ib_ack_state state; | |
312 | ||
9441c973 SS |
313 | if (!rds_ibdev) |
314 | rds_conn_drop(conn); | |
f4f943c9 SS |
315 | |
316 | rds_ib_stats_inc(s_ib_tasklet_call); | |
317 | ||
cf657269 SS |
318 | /* if cq has been already reaped, ignore incoming cq event */ |
319 | if (atomic_read(&ic->i_cq_quiesce)) | |
320 | return; | |
321 | ||
f4f943c9 | 322 | memset(&state, 0, sizeof(state)); |
dcfd041c | 323 | poll_rcq(ic, ic->i_recv_cq, ic->i_recv_wc, &state); |
f4f943c9 | 324 | ib_req_notify_cq(ic->i_recv_cq, IB_CQ_SOLICITED); |
dcfd041c | 325 | poll_rcq(ic, ic->i_recv_cq, ic->i_recv_wc, &state); |
f4f943c9 SS |
326 | |
327 | if (state.ack_next_valid) | |
328 | rds_ib_set_ack(ic, state.ack_next, state.ack_required); | |
329 | if (state.ack_recv_valid && state.ack_recv > ic->i_ack_recv) { | |
330 | rds_send_drop_acked(conn, state.ack_recv, NULL); | |
331 | ic->i_ack_recv = state.ack_recv; | |
332 | } | |
333 | ||
334 | if (rds_conn_up(conn)) | |
335 | rds_ib_attempt_ack(ic); | |
336 | } | |
337 | ||
ec16227e AG |
338 | static void rds_ib_qp_event_handler(struct ib_event *event, void *data) |
339 | { | |
340 | struct rds_connection *conn = data; | |
341 | struct rds_ib_connection *ic = conn->c_transport_data; | |
342 | ||
1bde04a6 | 343 | rdsdebug("conn %p ic %p event %u (%s)\n", conn, ic, event->event, |
3c88f3dc | 344 | ib_event_msg(event->event)); |
ec16227e AG |
345 | |
346 | switch (event->event) { | |
347 | case IB_EVENT_COMM_EST: | |
348 | rdma_notify(ic->i_cm_id, IB_EVENT_COMM_EST); | |
349 | break; | |
350 | default: | |
1bde04a6 | 351 | rdsdebug("Fatal QP Event %u (%s) " |
fdf6e6b4 | 352 | "- connection %pI4->%pI4, reconnecting\n", |
3c88f3dc | 353 | event->event, ib_event_msg(event->event), |
1bde04a6 | 354 | &conn->c_laddr, &conn->c_faddr); |
97069788 | 355 | rds_conn_drop(conn); |
ec16227e AG |
356 | break; |
357 | } | |
358 | } | |
359 | ||
0c28c045 SS |
360 | static void rds_ib_cq_comp_handler_send(struct ib_cq *cq, void *context) |
361 | { | |
362 | struct rds_connection *conn = context; | |
363 | struct rds_ib_connection *ic = conn->c_transport_data; | |
364 | ||
365 | rdsdebug("conn %p cq %p\n", conn, cq); | |
366 | ||
367 | rds_ib_stats_inc(s_ib_evt_handler_call); | |
368 | ||
369 | tasklet_schedule(&ic->i_send_tasklet); | |
370 | } | |
371 | ||
be2f76ea SS |
372 | static inline int ibdev_get_unused_vector(struct rds_ib_device *rds_ibdev) |
373 | { | |
374 | int min = rds_ibdev->vector_load[rds_ibdev->dev->num_comp_vectors - 1]; | |
375 | int index = rds_ibdev->dev->num_comp_vectors - 1; | |
376 | int i; | |
377 | ||
378 | for (i = rds_ibdev->dev->num_comp_vectors - 1; i >= 0; i--) { | |
379 | if (rds_ibdev->vector_load[i] < min) { | |
380 | index = i; | |
381 | min = rds_ibdev->vector_load[i]; | |
382 | } | |
383 | } | |
384 | ||
385 | rds_ibdev->vector_load[index]++; | |
386 | return index; | |
387 | } | |
388 | ||
389 | static inline void ibdev_put_vector(struct rds_ib_device *rds_ibdev, int index) | |
390 | { | |
391 | rds_ibdev->vector_load[index]--; | |
392 | } | |
393 | ||
ec16227e AG |
394 | /* |
395 | * This needs to be very careful to not leave IS_ERR pointers around for | |
396 | * cleanup to trip over. | |
397 | */ | |
398 | static int rds_ib_setup_qp(struct rds_connection *conn) | |
399 | { | |
400 | struct rds_ib_connection *ic = conn->c_transport_data; | |
401 | struct ib_device *dev = ic->i_cm_id->device; | |
402 | struct ib_qp_init_attr attr; | |
8e37210b | 403 | struct ib_cq_init_attr cq_attr = {}; |
ec16227e | 404 | struct rds_ib_device *rds_ibdev; |
ad6832f9 | 405 | int ret, fr_queue_space; |
ec16227e | 406 | |
3e0249f9 ZB |
407 | /* |
408 | * It's normal to see a null device if an incoming connection races | |
409 | * with device removal, so we don't print a warning. | |
ec16227e | 410 | */ |
3e0249f9 ZB |
411 | rds_ibdev = rds_ib_get_client_data(dev); |
412 | if (!rds_ibdev) | |
ec16227e | 413 | return -EOPNOTSUPP; |
3e0249f9 | 414 | |
ad6832f9 | 415 | /* The fr_queue_space is currently set to 512, to add extra space on |
416 | * completion queue and send queue. This extra space is used for FRMR | |
417 | * registration and invalidation work requests | |
418 | */ | |
56012459 SS |
419 | fr_queue_space = rds_ibdev->use_fastreg ? |
420 | (RDS_IB_DEFAULT_FR_WR + 1) + | |
421 | (RDS_IB_DEFAULT_FR_INV_WR + 1) | |
422 | : 0; | |
ad6832f9 | 423 | |
3e0249f9 ZB |
424 | /* add the conn now so that connection establishment has the dev */ |
425 | rds_ib_add_conn(rds_ibdev, conn); | |
ec16227e AG |
426 | |
427 | if (rds_ibdev->max_wrs < ic->i_send_ring.w_nr + 1) | |
428 | rds_ib_ring_resize(&ic->i_send_ring, rds_ibdev->max_wrs - 1); | |
429 | if (rds_ibdev->max_wrs < ic->i_recv_ring.w_nr + 1) | |
430 | rds_ib_ring_resize(&ic->i_recv_ring, rds_ibdev->max_wrs - 1); | |
431 | ||
432 | /* Protection domain and memory range */ | |
433 | ic->i_pd = rds_ibdev->pd; | |
ec16227e | 434 | |
be2f76ea | 435 | ic->i_scq_vector = ibdev_get_unused_vector(rds_ibdev); |
ad6832f9 | 436 | cq_attr.cqe = ic->i_send_ring.w_nr + fr_queue_space + 1; |
be2f76ea | 437 | cq_attr.comp_vector = ic->i_scq_vector; |
0c28c045 | 438 | ic->i_send_cq = ib_create_cq(dev, rds_ib_cq_comp_handler_send, |
ec16227e | 439 | rds_ib_cq_event_handler, conn, |
8e37210b | 440 | &cq_attr); |
ec16227e AG |
441 | if (IS_ERR(ic->i_send_cq)) { |
442 | ret = PTR_ERR(ic->i_send_cq); | |
443 | ic->i_send_cq = NULL; | |
be2f76ea | 444 | ibdev_put_vector(rds_ibdev, ic->i_scq_vector); |
ec16227e | 445 | rdsdebug("ib_create_cq send failed: %d\n", ret); |
3b12f73a | 446 | goto rds_ibdev_out; |
ec16227e AG |
447 | } |
448 | ||
be2f76ea | 449 | ic->i_rcq_vector = ibdev_get_unused_vector(rds_ibdev); |
8e37210b | 450 | cq_attr.cqe = ic->i_recv_ring.w_nr; |
be2f76ea | 451 | cq_attr.comp_vector = ic->i_rcq_vector; |
f4f943c9 | 452 | ic->i_recv_cq = ib_create_cq(dev, rds_ib_cq_comp_handler_recv, |
ec16227e | 453 | rds_ib_cq_event_handler, conn, |
8e37210b | 454 | &cq_attr); |
ec16227e AG |
455 | if (IS_ERR(ic->i_recv_cq)) { |
456 | ret = PTR_ERR(ic->i_recv_cq); | |
457 | ic->i_recv_cq = NULL; | |
be2f76ea | 458 | ibdev_put_vector(rds_ibdev, ic->i_rcq_vector); |
ec16227e | 459 | rdsdebug("ib_create_cq recv failed: %d\n", ret); |
3b12f73a | 460 | goto send_cq_out; |
ec16227e AG |
461 | } |
462 | ||
463 | ret = ib_req_notify_cq(ic->i_send_cq, IB_CQ_NEXT_COMP); | |
464 | if (ret) { | |
465 | rdsdebug("ib_req_notify_cq send failed: %d\n", ret); | |
3b12f73a | 466 | goto recv_cq_out; |
ec16227e AG |
467 | } |
468 | ||
469 | ret = ib_req_notify_cq(ic->i_recv_cq, IB_CQ_SOLICITED); | |
470 | if (ret) { | |
471 | rdsdebug("ib_req_notify_cq recv failed: %d\n", ret); | |
3b12f73a | 472 | goto recv_cq_out; |
ec16227e AG |
473 | } |
474 | ||
475 | /* XXX negotiate max send/recv with remote? */ | |
476 | memset(&attr, 0, sizeof(attr)); | |
477 | attr.event_handler = rds_ib_qp_event_handler; | |
478 | attr.qp_context = conn; | |
479 | /* + 1 to allow for the single ack message */ | |
ad6832f9 | 480 | attr.cap.max_send_wr = ic->i_send_ring.w_nr + fr_queue_space + 1; |
ec16227e AG |
481 | attr.cap.max_recv_wr = ic->i_recv_ring.w_nr + 1; |
482 | attr.cap.max_send_sge = rds_ibdev->max_sge; | |
483 | attr.cap.max_recv_sge = RDS_IB_RECV_SGE; | |
484 | attr.sq_sig_type = IB_SIGNAL_REQ_WR; | |
485 | attr.qp_type = IB_QPT_RC; | |
486 | attr.send_cq = ic->i_send_cq; | |
487 | attr.recv_cq = ic->i_recv_cq; | |
ad6832f9 | 488 | atomic_set(&ic->i_fastreg_wrs, RDS_IB_DEFAULT_FR_WR); |
56012459 | 489 | atomic_set(&ic->i_fastunreg_wrs, RDS_IB_DEFAULT_FR_INV_WR); |
ec16227e AG |
490 | |
491 | /* | |
492 | * XXX this can fail if max_*_wr is too large? Are we supposed | |
493 | * to back off until we get a value that the hardware can support? | |
494 | */ | |
495 | ret = rdma_create_qp(ic->i_cm_id, ic->i_pd, &attr); | |
496 | if (ret) { | |
497 | rdsdebug("rdma_create_qp failed: %d\n", ret); | |
3b12f73a | 498 | goto recv_cq_out; |
ec16227e AG |
499 | } |
500 | ||
501 | ic->i_send_hdrs = ib_dma_alloc_coherent(dev, | |
502 | ic->i_send_ring.w_nr * | |
503 | sizeof(struct rds_header), | |
504 | &ic->i_send_hdrs_dma, GFP_KERNEL); | |
8690bfa1 | 505 | if (!ic->i_send_hdrs) { |
ec16227e AG |
506 | ret = -ENOMEM; |
507 | rdsdebug("ib_dma_alloc_coherent send failed\n"); | |
3b12f73a | 508 | goto qp_out; |
ec16227e AG |
509 | } |
510 | ||
511 | ic->i_recv_hdrs = ib_dma_alloc_coherent(dev, | |
512 | ic->i_recv_ring.w_nr * | |
513 | sizeof(struct rds_header), | |
514 | &ic->i_recv_hdrs_dma, GFP_KERNEL); | |
8690bfa1 | 515 | if (!ic->i_recv_hdrs) { |
ec16227e AG |
516 | ret = -ENOMEM; |
517 | rdsdebug("ib_dma_alloc_coherent recv failed\n"); | |
3b12f73a | 518 | goto send_hdrs_dma_out; |
ec16227e AG |
519 | } |
520 | ||
521 | ic->i_ack = ib_dma_alloc_coherent(dev, sizeof(struct rds_header), | |
522 | &ic->i_ack_dma, GFP_KERNEL); | |
8690bfa1 | 523 | if (!ic->i_ack) { |
ec16227e AG |
524 | ret = -ENOMEM; |
525 | rdsdebug("ib_dma_alloc_coherent ack failed\n"); | |
3b12f73a | 526 | goto recv_hdrs_dma_out; |
ec16227e AG |
527 | } |
528 | ||
3dbd4439 | 529 | ic->i_sends = vzalloc_node(ic->i_send_ring.w_nr * sizeof(struct rds_ib_send_work), |
e4c52c98 | 530 | ibdev_to_node(dev)); |
8690bfa1 | 531 | if (!ic->i_sends) { |
ec16227e AG |
532 | ret = -ENOMEM; |
533 | rdsdebug("send allocation failed\n"); | |
3b12f73a | 534 | goto ack_dma_out; |
ec16227e | 535 | } |
ec16227e | 536 | |
3dbd4439 | 537 | ic->i_recvs = vzalloc_node(ic->i_recv_ring.w_nr * sizeof(struct rds_ib_recv_work), |
e4c52c98 | 538 | ibdev_to_node(dev)); |
8690bfa1 | 539 | if (!ic->i_recvs) { |
ec16227e AG |
540 | ret = -ENOMEM; |
541 | rdsdebug("recv allocation failed\n"); | |
3b12f73a | 542 | goto sends_out; |
ec16227e AG |
543 | } |
544 | ||
ec16227e AG |
545 | rds_ib_recv_init_ack(ic); |
546 | ||
e5580242 | 547 | rdsdebug("conn %p pd %p cq %p %p\n", conn, ic->i_pd, |
ec16227e AG |
548 | ic->i_send_cq, ic->i_recv_cq); |
549 | ||
3b12f73a ZY |
550 | return ret; |
551 | ||
552 | sends_out: | |
553 | vfree(ic->i_sends); | |
554 | ack_dma_out: | |
555 | ib_dma_free_coherent(dev, sizeof(struct rds_header), | |
556 | ic->i_ack, ic->i_ack_dma); | |
557 | recv_hdrs_dma_out: | |
558 | ib_dma_free_coherent(dev, ic->i_recv_ring.w_nr * | |
559 | sizeof(struct rds_header), | |
560 | ic->i_recv_hdrs, ic->i_recv_hdrs_dma); | |
561 | send_hdrs_dma_out: | |
562 | ib_dma_free_coherent(dev, ic->i_send_ring.w_nr * | |
563 | sizeof(struct rds_header), | |
564 | ic->i_send_hdrs, ic->i_send_hdrs_dma); | |
565 | qp_out: | |
566 | rdma_destroy_qp(ic->i_cm_id); | |
567 | recv_cq_out: | |
568 | if (!ib_destroy_cq(ic->i_recv_cq)) | |
569 | ic->i_recv_cq = NULL; | |
570 | send_cq_out: | |
571 | if (!ib_destroy_cq(ic->i_send_cq)) | |
572 | ic->i_send_cq = NULL; | |
573 | rds_ibdev_out: | |
574 | rds_ib_remove_conn(rds_ibdev, conn); | |
3e0249f9 | 575 | rds_ib_dev_put(rds_ibdev); |
3b12f73a | 576 | |
ec16227e AG |
577 | return ret; |
578 | } | |
579 | ||
9ddbcfa0 | 580 | static u32 rds_ib_protocol_compatible(struct rdma_cm_event *event) |
ec16227e | 581 | { |
9ddbcfa0 | 582 | const struct rds_ib_connect_private *dp = event->param.conn.private_data; |
ec16227e AG |
583 | u16 common; |
584 | u32 version = 0; | |
585 | ||
9ddbcfa0 AG |
586 | /* |
587 | * rdma_cm private data is odd - when there is any private data in the | |
ec16227e AG |
588 | * request, we will be given a pretty large buffer without telling us the |
589 | * original size. The only way to tell the difference is by looking at | |
590 | * the contents, which are initialized to zero. | |
591 | * If the protocol version fields aren't set, this is a connection attempt | |
592 | * from an older version. This could could be 3.0 or 2.0 - we can't tell. | |
9ddbcfa0 AG |
593 | * We really should have changed this for OFED 1.3 :-( |
594 | */ | |
595 | ||
596 | /* Be paranoid. RDS always has privdata */ | |
597 | if (!event->param.conn.private_data_len) { | |
598 | printk(KERN_NOTICE "RDS incoming connection has no private data, " | |
599 | "rejecting\n"); | |
600 | return 0; | |
601 | } | |
602 | ||
603 | /* Even if len is crap *now* I still want to check it. -ASG */ | |
f64f9e71 JP |
604 | if (event->param.conn.private_data_len < sizeof (*dp) || |
605 | dp->dp_protocol_major == 0) | |
ec16227e AG |
606 | return RDS_PROTOCOL_3_0; |
607 | ||
608 | common = be16_to_cpu(dp->dp_protocol_minor_mask) & RDS_IB_SUPPORTED_PROTOCOLS; | |
609 | if (dp->dp_protocol_major == 3 && common) { | |
610 | version = RDS_PROTOCOL_3_0; | |
611 | while ((common >>= 1) != 0) | |
612 | version++; | |
a4967598 MM |
613 | } else |
614 | printk_ratelimited(KERN_NOTICE "RDS: Connection from %pI4 using incompatible protocol version %u.%u\n", | |
615 | &dp->dp_saddr, | |
616 | dp->dp_protocol_major, | |
617 | dp->dp_protocol_minor); | |
ec16227e AG |
618 | return version; |
619 | } | |
620 | ||
621 | int rds_ib_cm_handle_connect(struct rdma_cm_id *cm_id, | |
622 | struct rdma_cm_event *event) | |
623 | { | |
624 | __be64 lguid = cm_id->route.path_rec->sgid.global.interface_id; | |
625 | __be64 fguid = cm_id->route.path_rec->dgid.global.interface_id; | |
626 | const struct rds_ib_connect_private *dp = event->param.conn.private_data; | |
627 | struct rds_ib_connect_private dp_rep; | |
628 | struct rds_connection *conn = NULL; | |
629 | struct rds_ib_connection *ic = NULL; | |
630 | struct rdma_conn_param conn_param; | |
631 | u32 version; | |
a46ca94e | 632 | int err = 1, destroy = 1; |
ec16227e AG |
633 | |
634 | /* Check whether the remote protocol version matches ours. */ | |
9ddbcfa0 | 635 | version = rds_ib_protocol_compatible(event); |
ec16227e AG |
636 | if (!version) |
637 | goto out; | |
638 | ||
639 | rdsdebug("saddr %pI4 daddr %pI4 RDSv%u.%u lguid 0x%llx fguid " | |
640 | "0x%llx\n", &dp->dp_saddr, &dp->dp_daddr, | |
641 | RDS_PROTOCOL_MAJOR(version), RDS_PROTOCOL_MINOR(version), | |
642 | (unsigned long long)be64_to_cpu(lguid), | |
643 | (unsigned long long)be64_to_cpu(fguid)); | |
644 | ||
d5a8ac28 SV |
645 | /* RDS/IB is not currently netns aware, thus init_net */ |
646 | conn = rds_conn_create(&init_net, dp->dp_daddr, dp->dp_saddr, | |
647 | &rds_ib_transport, GFP_KERNEL); | |
ec16227e AG |
648 | if (IS_ERR(conn)) { |
649 | rdsdebug("rds_conn_create failed (%ld)\n", PTR_ERR(conn)); | |
650 | conn = NULL; | |
651 | goto out; | |
652 | } | |
653 | ||
654 | /* | |
655 | * The connection request may occur while the | |
656 | * previous connection exist, e.g. in case of failover. | |
657 | * But as connections may be initiated simultaneously | |
658 | * by both hosts, we have a random backoff mechanism - | |
659 | * see the comment above rds_queue_reconnect() | |
660 | */ | |
661 | mutex_lock(&conn->c_cm_lock); | |
662 | if (!rds_conn_transition(conn, RDS_CONN_DOWN, RDS_CONN_CONNECTING)) { | |
663 | if (rds_conn_state(conn) == RDS_CONN_UP) { | |
664 | rdsdebug("incoming connect while connecting\n"); | |
665 | rds_conn_drop(conn); | |
666 | rds_ib_stats_inc(s_ib_listen_closed_stale); | |
667 | } else | |
668 | if (rds_conn_state(conn) == RDS_CONN_CONNECTING) { | |
669 | /* Wait and see - our connect may still be succeeding */ | |
670 | rds_ib_stats_inc(s_ib_connect_raced); | |
671 | } | |
ec16227e AG |
672 | goto out; |
673 | } | |
674 | ||
675 | ic = conn->c_transport_data; | |
676 | ||
677 | rds_ib_set_protocol(conn, version); | |
678 | rds_ib_set_flow_control(conn, be32_to_cpu(dp->dp_credit)); | |
679 | ||
680 | /* If the peer gave us the last packet it saw, process this as if | |
681 | * we had received a regular ACK. */ | |
682 | if (dp->dp_ack_seq) | |
683 | rds_send_drop_acked(conn, be64_to_cpu(dp->dp_ack_seq), NULL); | |
684 | ||
685 | BUG_ON(cm_id->context); | |
686 | BUG_ON(ic->i_cm_id); | |
687 | ||
688 | ic->i_cm_id = cm_id; | |
689 | cm_id->context = conn; | |
690 | ||
691 | /* We got halfway through setting up the ib_connection, if we | |
692 | * fail now, we have to take the long route out of this mess. */ | |
693 | destroy = 0; | |
694 | ||
695 | err = rds_ib_setup_qp(conn); | |
696 | if (err) { | |
697 | rds_ib_conn_error(conn, "rds_ib_setup_qp failed (%d)\n", err); | |
698 | goto out; | |
699 | } | |
700 | ||
40589e74 AG |
701 | rds_ib_cm_fill_conn_param(conn, &conn_param, &dp_rep, version, |
702 | event->param.conn.responder_resources, | |
703 | event->param.conn.initiator_depth); | |
ec16227e AG |
704 | |
705 | /* rdma_accept() calls rdma_reject() internally if it fails */ | |
b418c527 ZY |
706 | if (rdma_accept(cm_id, &conn_param)) |
707 | rds_ib_conn_error(conn, "rdma_accept failed\n"); | |
ec16227e AG |
708 | |
709 | out: | |
a46ca94e ZB |
710 | if (conn) |
711 | mutex_unlock(&conn->c_cm_lock); | |
712 | if (err) | |
713 | rdma_reject(cm_id, NULL, 0); | |
ec16227e AG |
714 | return destroy; |
715 | } | |
716 | ||
717 | ||
718 | int rds_ib_cm_initiate_connect(struct rdma_cm_id *cm_id) | |
719 | { | |
720 | struct rds_connection *conn = cm_id->context; | |
721 | struct rds_ib_connection *ic = conn->c_transport_data; | |
722 | struct rdma_conn_param conn_param; | |
723 | struct rds_ib_connect_private dp; | |
724 | int ret; | |
725 | ||
726 | /* If the peer doesn't do protocol negotiation, we must | |
727 | * default to RDSv3.0 */ | |
728 | rds_ib_set_protocol(conn, RDS_PROTOCOL_3_0); | |
729 | ic->i_flowctl = rds_ib_sysctl_flow_control; /* advertise flow control */ | |
730 | ||
731 | ret = rds_ib_setup_qp(conn); | |
732 | if (ret) { | |
733 | rds_ib_conn_error(conn, "rds_ib_setup_qp failed (%d)\n", ret); | |
734 | goto out; | |
735 | } | |
736 | ||
40589e74 AG |
737 | rds_ib_cm_fill_conn_param(conn, &conn_param, &dp, RDS_PROTOCOL_VERSION, |
738 | UINT_MAX, UINT_MAX); | |
ec16227e AG |
739 | ret = rdma_connect(cm_id, &conn_param); |
740 | if (ret) | |
741 | rds_ib_conn_error(conn, "rdma_connect failed (%d)\n", ret); | |
742 | ||
743 | out: | |
744 | /* Beware - returning non-zero tells the rdma_cm to destroy | |
745 | * the cm_id. We should certainly not do it as long as we still | |
746 | * "own" the cm_id. */ | |
747 | if (ret) { | |
748 | if (ic->i_cm_id == cm_id) | |
749 | ret = 0; | |
750 | } | |
581d53c9 | 751 | ic->i_active_side = true; |
ec16227e AG |
752 | return ret; |
753 | } | |
754 | ||
b04e8554 | 755 | int rds_ib_conn_path_connect(struct rds_conn_path *cp) |
ec16227e | 756 | { |
b04e8554 | 757 | struct rds_connection *conn = cp->cp_conn; |
ec16227e AG |
758 | struct rds_ib_connection *ic = conn->c_transport_data; |
759 | struct sockaddr_in src, dest; | |
760 | int ret; | |
761 | ||
762 | /* XXX I wonder what affect the port space has */ | |
763 | /* delegate cm event handler to rdma_transport */ | |
fa20105e | 764 | ic->i_cm_id = rdma_create_id(&init_net, rds_rdma_cm_event_handler, conn, |
b26f9b99 | 765 | RDMA_PS_TCP, IB_QPT_RC); |
ec16227e AG |
766 | if (IS_ERR(ic->i_cm_id)) { |
767 | ret = PTR_ERR(ic->i_cm_id); | |
768 | ic->i_cm_id = NULL; | |
769 | rdsdebug("rdma_create_id() failed: %d\n", ret); | |
770 | goto out; | |
771 | } | |
772 | ||
773 | rdsdebug("created cm id %p for conn %p\n", ic->i_cm_id, conn); | |
774 | ||
775 | src.sin_family = AF_INET; | |
776 | src.sin_addr.s_addr = (__force u32)conn->c_laddr; | |
777 | src.sin_port = (__force u16)htons(0); | |
778 | ||
779 | dest.sin_family = AF_INET; | |
780 | dest.sin_addr.s_addr = (__force u32)conn->c_faddr; | |
781 | dest.sin_port = (__force u16)htons(RDS_PORT); | |
782 | ||
783 | ret = rdma_resolve_addr(ic->i_cm_id, (struct sockaddr *)&src, | |
784 | (struct sockaddr *)&dest, | |
785 | RDS_RDMA_RESOLVE_TIMEOUT_MS); | |
786 | if (ret) { | |
787 | rdsdebug("addr resolve failed for cm id %p: %d\n", ic->i_cm_id, | |
788 | ret); | |
789 | rdma_destroy_id(ic->i_cm_id); | |
790 | ic->i_cm_id = NULL; | |
791 | } | |
792 | ||
793 | out: | |
794 | return ret; | |
795 | } | |
796 | ||
797 | /* | |
798 | * This is so careful about only cleaning up resources that were built up | |
799 | * so that it can be called at any point during startup. In fact it | |
800 | * can be called multiple times for a given connection. | |
801 | */ | |
226f7a7d | 802 | void rds_ib_conn_path_shutdown(struct rds_conn_path *cp) |
ec16227e | 803 | { |
226f7a7d | 804 | struct rds_connection *conn = cp->cp_conn; |
ec16227e AG |
805 | struct rds_ib_connection *ic = conn->c_transport_data; |
806 | int err = 0; | |
807 | ||
808 | rdsdebug("cm %p pd %p cq %p %p qp %p\n", ic->i_cm_id, | |
809 | ic->i_pd, ic->i_send_cq, ic->i_recv_cq, | |
810 | ic->i_cm_id ? ic->i_cm_id->qp : NULL); | |
811 | ||
812 | if (ic->i_cm_id) { | |
813 | struct ib_device *dev = ic->i_cm_id->device; | |
814 | ||
815 | rdsdebug("disconnecting cm %p\n", ic->i_cm_id); | |
816 | err = rdma_disconnect(ic->i_cm_id); | |
817 | if (err) { | |
818 | /* Actually this may happen quite frequently, when | |
819 | * an outgoing connect raced with an incoming connect. | |
820 | */ | |
821 | rdsdebug("failed to disconnect, cm: %p err %d\n", | |
822 | ic->i_cm_id, err); | |
823 | } | |
824 | ||
e32b4a70 | 825 | /* |
f046011c ZB |
826 | * We want to wait for tx and rx completion to finish |
827 | * before we tear down the connection, but we have to be | |
828 | * careful not to get stuck waiting on a send ring that | |
829 | * only has unsignaled sends in it. We've shutdown new | |
830 | * sends before getting here so by waiting for signaled | |
831 | * sends to complete we're ensured that there will be no | |
832 | * more tx processing. | |
e32b4a70 | 833 | */ |
ec16227e | 834 | wait_event(rds_ib_ring_empty_wait, |
f046011c | 835 | rds_ib_ring_empty(&ic->i_recv_ring) && |
ad6832f9 | 836 | (atomic_read(&ic->i_signaled_sends) == 0) && |
56012459 SS |
837 | (atomic_read(&ic->i_fastreg_wrs) == RDS_IB_DEFAULT_FR_WR) && |
838 | (atomic_read(&ic->i_fastunreg_wrs) == RDS_IB_DEFAULT_FR_INV_WR)); | |
0c28c045 | 839 | tasklet_kill(&ic->i_send_tasklet); |
f046011c | 840 | tasklet_kill(&ic->i_recv_tasklet); |
ec16227e | 841 | |
cf657269 SS |
842 | atomic_set(&ic->i_cq_quiesce, 1); |
843 | ||
1bc7b863 | 844 | /* first destroy the ib state that generates callbacks */ |
845 | if (ic->i_cm_id->qp) | |
846 | rdma_destroy_qp(ic->i_cm_id); | |
be2f76ea SS |
847 | if (ic->i_send_cq) { |
848 | if (ic->rds_ibdev) | |
849 | ibdev_put_vector(ic->rds_ibdev, ic->i_scq_vector); | |
1bc7b863 | 850 | ib_destroy_cq(ic->i_send_cq); |
be2f76ea SS |
851 | } |
852 | ||
853 | if (ic->i_recv_cq) { | |
854 | if (ic->rds_ibdev) | |
855 | ibdev_put_vector(ic->rds_ibdev, ic->i_rcq_vector); | |
1bc7b863 | 856 | ib_destroy_cq(ic->i_recv_cq); |
be2f76ea | 857 | } |
1bc7b863 | 858 | |
859 | /* then free the resources that ib callbacks use */ | |
ec16227e AG |
860 | if (ic->i_send_hdrs) |
861 | ib_dma_free_coherent(dev, | |
862 | ic->i_send_ring.w_nr * | |
863 | sizeof(struct rds_header), | |
864 | ic->i_send_hdrs, | |
865 | ic->i_send_hdrs_dma); | |
866 | ||
867 | if (ic->i_recv_hdrs) | |
868 | ib_dma_free_coherent(dev, | |
869 | ic->i_recv_ring.w_nr * | |
870 | sizeof(struct rds_header), | |
871 | ic->i_recv_hdrs, | |
872 | ic->i_recv_hdrs_dma); | |
873 | ||
874 | if (ic->i_ack) | |
875 | ib_dma_free_coherent(dev, sizeof(struct rds_header), | |
876 | ic->i_ack, ic->i_ack_dma); | |
877 | ||
878 | if (ic->i_sends) | |
879 | rds_ib_send_clear_ring(ic); | |
880 | if (ic->i_recvs) | |
881 | rds_ib_recv_clear_ring(ic); | |
882 | ||
1c3be624 SS |
883 | rdma_destroy_id(ic->i_cm_id); |
884 | ||
ec16227e AG |
885 | /* |
886 | * Move connection back to the nodev list. | |
887 | */ | |
745cbcca AG |
888 | if (ic->rds_ibdev) |
889 | rds_ib_remove_conn(ic->rds_ibdev, conn); | |
ec16227e AG |
890 | |
891 | ic->i_cm_id = NULL; | |
892 | ic->i_pd = NULL; | |
ec16227e AG |
893 | ic->i_send_cq = NULL; |
894 | ic->i_recv_cq = NULL; | |
895 | ic->i_send_hdrs = NULL; | |
896 | ic->i_recv_hdrs = NULL; | |
897 | ic->i_ack = NULL; | |
898 | } | |
899 | BUG_ON(ic->rds_ibdev); | |
900 | ||
901 | /* Clear pending transmit */ | |
ff3d7d36 AG |
902 | if (ic->i_data_op) { |
903 | struct rds_message *rm; | |
904 | ||
905 | rm = container_of(ic->i_data_op, struct rds_message, data); | |
906 | rds_message_put(rm); | |
907 | ic->i_data_op = NULL; | |
ec16227e AG |
908 | } |
909 | ||
910 | /* Clear the ACK state */ | |
911 | clear_bit(IB_ACK_IN_FLIGHT, &ic->i_ack_flags); | |
8cbd9606 AG |
912 | #ifdef KERNEL_HAS_ATOMIC64 |
913 | atomic64_set(&ic->i_ack_next, 0); | |
914 | #else | |
915 | ic->i_ack_next = 0; | |
916 | #endif | |
ec16227e AG |
917 | ic->i_ack_recv = 0; |
918 | ||
919 | /* Clear flow control state */ | |
920 | ic->i_flowctl = 0; | |
921 | atomic_set(&ic->i_credits, 0); | |
922 | ||
923 | rds_ib_ring_init(&ic->i_send_ring, rds_ib_sysctl_max_send_wr); | |
924 | rds_ib_ring_init(&ic->i_recv_ring, rds_ib_sysctl_max_recv_wr); | |
925 | ||
926 | if (ic->i_ibinc) { | |
927 | rds_inc_put(&ic->i_ibinc->ii_inc); | |
928 | ic->i_ibinc = NULL; | |
929 | } | |
930 | ||
931 | vfree(ic->i_sends); | |
932 | ic->i_sends = NULL; | |
933 | vfree(ic->i_recvs); | |
934 | ic->i_recvs = NULL; | |
581d53c9 | 935 | ic->i_active_side = false; |
ec16227e AG |
936 | } |
937 | ||
938 | int rds_ib_conn_alloc(struct rds_connection *conn, gfp_t gfp) | |
939 | { | |
940 | struct rds_ib_connection *ic; | |
941 | unsigned long flags; | |
33244125 | 942 | int ret; |
ec16227e AG |
943 | |
944 | /* XXX too lazy? */ | |
f0229eaa | 945 | ic = kzalloc(sizeof(struct rds_ib_connection), gfp); |
8690bfa1 | 946 | if (!ic) |
ec16227e AG |
947 | return -ENOMEM; |
948 | ||
33244125 CM |
949 | ret = rds_ib_recv_alloc_caches(ic); |
950 | if (ret) { | |
951 | kfree(ic); | |
952 | return ret; | |
953 | } | |
954 | ||
ec16227e | 955 | INIT_LIST_HEAD(&ic->ib_node); |
0c28c045 SS |
956 | tasklet_init(&ic->i_send_tasklet, rds_ib_tasklet_fn_send, |
957 | (unsigned long)ic); | |
f4f943c9 | 958 | tasklet_init(&ic->i_recv_tasklet, rds_ib_tasklet_fn_recv, |
0c28c045 | 959 | (unsigned long)ic); |
ec16227e | 960 | mutex_init(&ic->i_recv_mutex); |
8cbd9606 AG |
961 | #ifndef KERNEL_HAS_ATOMIC64 |
962 | spin_lock_init(&ic->i_ack_lock); | |
963 | #endif | |
f046011c | 964 | atomic_set(&ic->i_signaled_sends, 0); |
ec16227e AG |
965 | |
966 | /* | |
967 | * rds_ib_conn_shutdown() waits for these to be emptied so they | |
968 | * must be initialized before it can be called. | |
969 | */ | |
970 | rds_ib_ring_init(&ic->i_send_ring, rds_ib_sysctl_max_send_wr); | |
971 | rds_ib_ring_init(&ic->i_recv_ring, rds_ib_sysctl_max_recv_wr); | |
972 | ||
973 | ic->conn = conn; | |
974 | conn->c_transport_data = ic; | |
975 | ||
976 | spin_lock_irqsave(&ib_nodev_conns_lock, flags); | |
977 | list_add_tail(&ic->ib_node, &ib_nodev_conns); | |
978 | spin_unlock_irqrestore(&ib_nodev_conns_lock, flags); | |
979 | ||
980 | ||
981 | rdsdebug("conn %p conn ic %p\n", conn, conn->c_transport_data); | |
982 | return 0; | |
983 | } | |
984 | ||
745cbcca AG |
985 | /* |
986 | * Free a connection. Connection must be shut down and not set for reconnect. | |
987 | */ | |
ec16227e AG |
988 | void rds_ib_conn_free(void *arg) |
989 | { | |
990 | struct rds_ib_connection *ic = arg; | |
745cbcca AG |
991 | spinlock_t *lock_ptr; |
992 | ||
ec16227e | 993 | rdsdebug("ic %p\n", ic); |
745cbcca AG |
994 | |
995 | /* | |
996 | * Conn is either on a dev's list or on the nodev list. | |
997 | * A race with shutdown() or connect() would cause problems | |
998 | * (since rds_ibdev would change) but that should never happen. | |
999 | */ | |
1000 | lock_ptr = ic->rds_ibdev ? &ic->rds_ibdev->spinlock : &ib_nodev_conns_lock; | |
1001 | ||
1002 | spin_lock_irq(lock_ptr); | |
ec16227e | 1003 | list_del(&ic->ib_node); |
745cbcca AG |
1004 | spin_unlock_irq(lock_ptr); |
1005 | ||
33244125 CM |
1006 | rds_ib_recv_free_caches(ic); |
1007 | ||
ec16227e AG |
1008 | kfree(ic); |
1009 | } | |
1010 | ||
1011 | ||
1012 | /* | |
1013 | * An error occurred on the connection | |
1014 | */ | |
1015 | void | |
1016 | __rds_ib_conn_error(struct rds_connection *conn, const char *fmt, ...) | |
1017 | { | |
1018 | va_list ap; | |
1019 | ||
1020 | rds_conn_drop(conn); | |
1021 | ||
1022 | va_start(ap, fmt); | |
1023 | vprintk(fmt, ap); | |
1024 | va_end(ap); | |
1025 | } |