Commit | Line | Data |
---|---|---|
839fcaba MT |
1 | /* |
2 | * Copyright (c) 2006 Mellanox Technologies. All rights reserved | |
3 | * | |
4 | * This software is available to you under a choice of one of two | |
5 | * licenses. You may choose to be licensed under the terms of the GNU | |
6 | * General Public License (GPL) Version 2, available from the file | |
7 | * COPYING in the main directory of this source tree, or the | |
8 | * OpenIB.org BSD license below: | |
9 | * | |
10 | * Redistribution and use in source and binary forms, with or | |
11 | * without modification, are permitted provided that the following | |
12 | * conditions are met: | |
13 | * | |
14 | * - Redistributions of source code must retain the above | |
15 | * copyright notice, this list of conditions and the following | |
16 | * disclaimer. | |
17 | * | |
18 | * - Redistributions in binary form must reproduce the above | |
19 | * copyright notice, this list of conditions and the following | |
20 | * disclaimer in the documentation and/or other materials | |
21 | * provided with the distribution. | |
22 | * | |
23 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | |
24 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | |
25 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | |
26 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | |
27 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | |
28 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | |
29 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | |
30 | * SOFTWARE. | |
839fcaba MT |
31 | */ |
32 | ||
33 | #include <rdma/ib_cm.h> | |
839fcaba MT |
34 | #include <net/dst.h> |
35 | #include <net/icmp.h> | |
36 | #include <linux/icmpv6.h> | |
518b1646 | 37 | #include <linux/delay.h> |
5a0e3ad6 | 38 | #include <linux/slab.h> |
10313cbb | 39 | #include <linux/vmalloc.h> |
fec14d2f | 40 | #include <linux/moduleparam.h> |
174cd4b1 | 41 | #include <linux/sched/signal.h> |
d83187dd | 42 | #include <linux/sched/mm.h> |
839fcaba | 43 | |
68e995a2 PS |
44 | #include "ipoib.h" |
45 | ||
46 | int ipoib_max_conn_qp = 128; | |
47 | ||
48 | module_param_named(max_nonsrq_conn_qp, ipoib_max_conn_qp, int, 0444); | |
49 | MODULE_PARM_DESC(max_nonsrq_conn_qp, | |
50 | "Max number of connected-mode QPs per interface " | |
51 | "(applied only if shared receive queue is not available)"); | |
52 | ||
839fcaba MT |
53 | #ifdef CONFIG_INFINIBAND_IPOIB_DEBUG_DATA |
54 | static int data_debug_level; | |
55 | ||
56 | module_param_named(cm_data_debug_level, data_debug_level, int, 0644); | |
57 | MODULE_PARM_DESC(cm_data_debug_level, | |
58 | "Enable data path debug tracing for connected mode if > 0"); | |
59 | #endif | |
60 | ||
839fcaba MT |
61 | #define IPOIB_CM_IETF_ID 0x1000000000000000ULL |
62 | ||
63 | #define IPOIB_CM_RX_UPDATE_TIME (256 * HZ) | |
64 | #define IPOIB_CM_RX_TIMEOUT (2 * 256 * HZ) | |
65 | #define IPOIB_CM_RX_DELAY (3 * 256 * HZ) | |
66 | #define IPOIB_CM_RX_UPDATE_MASK (0x3) | |
67 | ||
fc791b63 PA |
68 | #define IPOIB_CM_RX_RESERVE (ALIGN(IPOIB_HARD_LEN, 16) - IPOIB_ENCAP_LEN) |
69 | ||
518b1646 MT |
70 | static struct ib_qp_attr ipoib_cm_err_attr = { |
71 | .qp_state = IB_QPS_ERR | |
72 | }; | |
73 | ||
09f60f8f | 74 | #define IPOIB_CM_RX_DRAIN_WRID 0xffffffff |
518b1646 | 75 | |
ec56dc0b | 76 | static struct ib_send_wr ipoib_cm_rx_drain_wr = { |
ec56dc0b | 77 | .opcode = IB_WR_SEND, |
518b1646 MT |
78 | }; |
79 | ||
839fcaba MT |
80 | static int ipoib_cm_tx_handler(struct ib_cm_id *cm_id, |
81 | struct ib_cm_event *event); | |
82 | ||
1812063b | 83 | static void ipoib_cm_dma_unmap_rx(struct ipoib_dev_priv *priv, int frags, |
839fcaba MT |
84 | u64 mapping[IPOIB_CM_RX_SG]) |
85 | { | |
86 | int i; | |
87 | ||
88 | ib_dma_unmap_single(priv->ca, mapping[0], IPOIB_CM_HEAD_SIZE, DMA_FROM_DEVICE); | |
89 | ||
1812063b | 90 | for (i = 0; i < frags; ++i) |
787adb9d | 91 | ib_dma_unmap_page(priv->ca, mapping[i + 1], PAGE_SIZE, DMA_FROM_DEVICE); |
839fcaba MT |
92 | } |
93 | ||
68e995a2 | 94 | static int ipoib_cm_post_receive_srq(struct net_device *dev, int id) |
839fcaba | 95 | { |
c1048aff | 96 | struct ipoib_dev_priv *priv = ipoib_priv(dev); |
839fcaba MT |
97 | struct ib_recv_wr *bad_wr; |
98 | int i, ret; | |
99 | ||
1b524963 | 100 | priv->cm.rx_wr.wr_id = id | IPOIB_OP_CM | IPOIB_OP_RECV; |
839fcaba | 101 | |
586a6934 | 102 | for (i = 0; i < priv->cm.num_frags; ++i) |
839fcaba MT |
103 | priv->cm.rx_sge[i].addr = priv->cm.srq_ring[id].mapping[i]; |
104 | ||
105 | ret = ib_post_srq_recv(priv->cm.srq, &priv->cm.rx_wr, &bad_wr); | |
106 | if (unlikely(ret)) { | |
107 | ipoib_warn(priv, "post srq failed for buf %d (%d)\n", id, ret); | |
586a6934 | 108 | ipoib_cm_dma_unmap_rx(priv, priv->cm.num_frags - 1, |
1812063b | 109 | priv->cm.srq_ring[id].mapping); |
839fcaba MT |
110 | dev_kfree_skb_any(priv->cm.srq_ring[id].skb); |
111 | priv->cm.srq_ring[id].skb = NULL; | |
112 | } | |
113 | ||
114 | return ret; | |
115 | } | |
116 | ||
68e995a2 | 117 | static int ipoib_cm_post_receive_nonsrq(struct net_device *dev, |
a7d834c4 RD |
118 | struct ipoib_cm_rx *rx, |
119 | struct ib_recv_wr *wr, | |
120 | struct ib_sge *sge, int id) | |
68e995a2 | 121 | { |
c1048aff | 122 | struct ipoib_dev_priv *priv = ipoib_priv(dev); |
68e995a2 PS |
123 | struct ib_recv_wr *bad_wr; |
124 | int i, ret; | |
125 | ||
a7d834c4 | 126 | wr->wr_id = id | IPOIB_OP_CM | IPOIB_OP_RECV; |
68e995a2 PS |
127 | |
128 | for (i = 0; i < IPOIB_CM_RX_SG; ++i) | |
a7d834c4 | 129 | sge[i].addr = rx->rx_ring[id].mapping[i]; |
68e995a2 | 130 | |
a7d834c4 | 131 | ret = ib_post_recv(rx->qp, wr, &bad_wr); |
68e995a2 PS |
132 | if (unlikely(ret)) { |
133 | ipoib_warn(priv, "post recv failed for buf %d (%d)\n", id, ret); | |
134 | ipoib_cm_dma_unmap_rx(priv, IPOIB_CM_RX_SG - 1, | |
135 | rx->rx_ring[id].mapping); | |
136 | dev_kfree_skb_any(rx->rx_ring[id].skb); | |
137 | rx->rx_ring[id].skb = NULL; | |
138 | } | |
139 | ||
140 | return ret; | |
141 | } | |
142 | ||
143 | static struct sk_buff *ipoib_cm_alloc_rx_skb(struct net_device *dev, | |
144 | struct ipoib_cm_rx_buf *rx_ring, | |
145 | int id, int frags, | |
22252b4e TA |
146 | u64 mapping[IPOIB_CM_RX_SG], |
147 | gfp_t gfp) | |
839fcaba | 148 | { |
c1048aff | 149 | struct ipoib_dev_priv *priv = ipoib_priv(dev); |
839fcaba MT |
150 | struct sk_buff *skb; |
151 | int i; | |
152 | ||
fc791b63 | 153 | skb = dev_alloc_skb(ALIGN(IPOIB_CM_HEAD_SIZE + IPOIB_PSEUDO_LEN, 16)); |
839fcaba | 154 | if (unlikely(!skb)) |
1812063b | 155 | return NULL; |
839fcaba MT |
156 | |
157 | /* | |
fc791b63 | 158 | * IPoIB adds a IPOIB_ENCAP_LEN byte header, this will align the |
839fcaba MT |
159 | * IP header to a multiple of 16. |
160 | */ | |
fc791b63 | 161 | skb_reserve(skb, IPOIB_CM_RX_RESERVE); |
839fcaba MT |
162 | |
163 | mapping[0] = ib_dma_map_single(priv->ca, skb->data, IPOIB_CM_HEAD_SIZE, | |
164 | DMA_FROM_DEVICE); | |
165 | if (unlikely(ib_dma_mapping_error(priv->ca, mapping[0]))) { | |
166 | dev_kfree_skb_any(skb); | |
1812063b | 167 | return NULL; |
839fcaba MT |
168 | } |
169 | ||
1812063b | 170 | for (i = 0; i < frags; i++) { |
22252b4e | 171 | struct page *page = alloc_page(gfp); |
839fcaba MT |
172 | |
173 | if (!page) | |
174 | goto partial_error; | |
175 | skb_fill_page_desc(skb, i, page, 0, PAGE_SIZE); | |
176 | ||
5581be3b | 177 | mapping[i + 1] = ib_dma_map_page(priv->ca, page, |
6371ea3d | 178 | 0, PAGE_SIZE, DMA_FROM_DEVICE); |
839fcaba MT |
179 | if (unlikely(ib_dma_mapping_error(priv->ca, mapping[i + 1]))) |
180 | goto partial_error; | |
181 | } | |
182 | ||
68e995a2 | 183 | rx_ring[id].skb = skb; |
1812063b | 184 | return skb; |
839fcaba MT |
185 | |
186 | partial_error: | |
187 | ||
188 | ib_dma_unmap_single(priv->ca, mapping[0], IPOIB_CM_HEAD_SIZE, DMA_FROM_DEVICE); | |
189 | ||
841adfca | 190 | for (; i > 0; --i) |
787adb9d | 191 | ib_dma_unmap_page(priv->ca, mapping[i], PAGE_SIZE, DMA_FROM_DEVICE); |
839fcaba | 192 | |
8a2e65f8 | 193 | dev_kfree_skb_any(skb); |
1812063b | 194 | return NULL; |
839fcaba MT |
195 | } |
196 | ||
1efb6144 RD |
197 | static void ipoib_cm_free_rx_ring(struct net_device *dev, |
198 | struct ipoib_cm_rx_buf *rx_ring) | |
199 | { | |
c1048aff | 200 | struct ipoib_dev_priv *priv = ipoib_priv(dev); |
1efb6144 RD |
201 | int i; |
202 | ||
203 | for (i = 0; i < ipoib_recvq_size; ++i) | |
204 | if (rx_ring[i].skb) { | |
205 | ipoib_cm_dma_unmap_rx(priv, IPOIB_CM_RX_SG - 1, | |
206 | rx_ring[i].mapping); | |
207 | dev_kfree_skb_any(rx_ring[i].skb); | |
208 | } | |
209 | ||
b1404069 | 210 | vfree(rx_ring); |
1efb6144 RD |
211 | } |
212 | ||
2337f809 | 213 | static void ipoib_cm_start_rx_drain(struct ipoib_dev_priv *priv) |
518b1646 | 214 | { |
ec56dc0b MT |
215 | struct ib_send_wr *bad_wr; |
216 | struct ipoib_cm_rx *p; | |
518b1646 | 217 | |
ec56dc0b | 218 | /* We only reserved 1 extra slot in CQ for drain WRs, so |
518b1646 MT |
219 | * make sure we have at most 1 outstanding WR. */ |
220 | if (list_empty(&priv->cm.rx_flush_list) || | |
221 | !list_empty(&priv->cm.rx_drain_list)) | |
222 | return; | |
223 | ||
ec56dc0b MT |
224 | /* |
225 | * QPs on flush list are error state. This way, a "flush | |
226 | * error" WC will be immediately generated for each WR we post. | |
227 | */ | |
228 | p = list_entry(priv->cm.rx_flush_list.next, typeof(*p), list); | |
14d3a3b2 | 229 | ipoib_cm_rx_drain_wr.wr_id = IPOIB_CM_RX_DRAIN_WRID; |
ec56dc0b MT |
230 | if (ib_post_send(p->qp, &ipoib_cm_rx_drain_wr, &bad_wr)) |
231 | ipoib_warn(priv, "failed to post drain wr\n"); | |
518b1646 MT |
232 | |
233 | list_splice_init(&priv->cm.rx_flush_list, &priv->cm.rx_drain_list); | |
234 | } | |
235 | ||
236 | static void ipoib_cm_rx_event_handler(struct ib_event *event, void *ctx) | |
237 | { | |
238 | struct ipoib_cm_rx *p = ctx; | |
c1048aff | 239 | struct ipoib_dev_priv *priv = ipoib_priv(p->dev); |
518b1646 MT |
240 | unsigned long flags; |
241 | ||
242 | if (event->event != IB_EVENT_QP_LAST_WQE_REACHED) | |
243 | return; | |
244 | ||
245 | spin_lock_irqsave(&priv->lock, flags); | |
246 | list_move(&p->list, &priv->cm.rx_flush_list); | |
247 | p->state = IPOIB_CM_RX_FLUSH; | |
248 | ipoib_cm_start_rx_drain(priv); | |
249 | spin_unlock_irqrestore(&priv->lock, flags); | |
250 | } | |
251 | ||
839fcaba MT |
252 | static struct ib_qp *ipoib_cm_create_rx_qp(struct net_device *dev, |
253 | struct ipoib_cm_rx *p) | |
254 | { | |
c1048aff | 255 | struct ipoib_dev_priv *priv = ipoib_priv(dev); |
839fcaba | 256 | struct ib_qp_init_attr attr = { |
518b1646 | 257 | .event_handler = ipoib_cm_rx_event_handler, |
f56bcd80 EC |
258 | .send_cq = priv->recv_cq, /* For drain WR */ |
259 | .recv_cq = priv->recv_cq, | |
839fcaba | 260 | .srq = priv->cm.srq, |
ec56dc0b | 261 | .cap.max_send_wr = 1, /* For drain WR */ |
839fcaba MT |
262 | .cap.max_send_sge = 1, /* FIXME: 0 Seems not to work */ |
263 | .sq_sig_type = IB_SIGNAL_ALL_WR, | |
264 | .qp_type = IB_QPT_RC, | |
265 | .qp_context = p, | |
266 | }; | |
68e995a2 PS |
267 | |
268 | if (!ipoib_cm_has_srq(dev)) { | |
269 | attr.cap.max_recv_wr = ipoib_recvq_size; | |
270 | attr.cap.max_recv_sge = IPOIB_CM_RX_SG; | |
271 | } | |
272 | ||
839fcaba MT |
273 | return ib_create_qp(priv->pd, &attr); |
274 | } | |
275 | ||
276 | static int ipoib_cm_modify_rx_qp(struct net_device *dev, | |
68e995a2 PS |
277 | struct ib_cm_id *cm_id, struct ib_qp *qp, |
278 | unsigned psn) | |
839fcaba | 279 | { |
c1048aff | 280 | struct ipoib_dev_priv *priv = ipoib_priv(dev); |
839fcaba MT |
281 | struct ib_qp_attr qp_attr; |
282 | int qp_attr_mask, ret; | |
283 | ||
284 | qp_attr.qp_state = IB_QPS_INIT; | |
285 | ret = ib_cm_init_qp_attr(cm_id, &qp_attr, &qp_attr_mask); | |
286 | if (ret) { | |
287 | ipoib_warn(priv, "failed to init QP attr for INIT: %d\n", ret); | |
288 | return ret; | |
289 | } | |
290 | ret = ib_modify_qp(qp, &qp_attr, qp_attr_mask); | |
291 | if (ret) { | |
292 | ipoib_warn(priv, "failed to modify QP to INIT: %d\n", ret); | |
293 | return ret; | |
294 | } | |
295 | qp_attr.qp_state = IB_QPS_RTR; | |
296 | ret = ib_cm_init_qp_attr(cm_id, &qp_attr, &qp_attr_mask); | |
297 | if (ret) { | |
298 | ipoib_warn(priv, "failed to init QP attr for RTR: %d\n", ret); | |
299 | return ret; | |
300 | } | |
301 | qp_attr.rq_psn = psn; | |
302 | ret = ib_modify_qp(qp, &qp_attr, qp_attr_mask); | |
303 | if (ret) { | |
304 | ipoib_warn(priv, "failed to modify QP to RTR: %d\n", ret); | |
305 | return ret; | |
306 | } | |
ec56dc0b MT |
307 | |
308 | /* | |
309 | * Current Mellanox HCA firmware won't generate completions | |
310 | * with error for drain WRs unless the QP has been moved to | |
311 | * RTS first. This work-around leaves a window where a QP has | |
312 | * moved to error asynchronously, but this will eventually get | |
313 | * fixed in firmware, so let's not error out if modify QP | |
314 | * fails. | |
315 | */ | |
316 | qp_attr.qp_state = IB_QPS_RTS; | |
317 | ret = ib_cm_init_qp_attr(cm_id, &qp_attr, &qp_attr_mask); | |
318 | if (ret) { | |
319 | ipoib_warn(priv, "failed to init QP attr for RTS: %d\n", ret); | |
320 | return 0; | |
321 | } | |
322 | ret = ib_modify_qp(qp, &qp_attr, qp_attr_mask); | |
323 | if (ret) { | |
324 | ipoib_warn(priv, "failed to modify QP to RTS: %d\n", ret); | |
325 | return 0; | |
326 | } | |
327 | ||
839fcaba MT |
328 | return 0; |
329 | } | |
330 | ||
a7d834c4 RD |
331 | static void ipoib_cm_init_rx_wr(struct net_device *dev, |
332 | struct ib_recv_wr *wr, | |
333 | struct ib_sge *sge) | |
334 | { | |
c1048aff | 335 | struct ipoib_dev_priv *priv = ipoib_priv(dev); |
a7d834c4 RD |
336 | int i; |
337 | ||
338 | for (i = 0; i < priv->cm.num_frags; ++i) | |
77b1f996 | 339 | sge[i].lkey = priv->pd->local_dma_lkey; |
a7d834c4 RD |
340 | |
341 | sge[0].length = IPOIB_CM_HEAD_SIZE; | |
342 | for (i = 1; i < priv->cm.num_frags; ++i) | |
343 | sge[i].length = PAGE_SIZE; | |
344 | ||
345 | wr->next = NULL; | |
e0819816 | 346 | wr->sg_list = sge; |
a7d834c4 RD |
347 | wr->num_sge = priv->cm.num_frags; |
348 | } | |
349 | ||
68e995a2 PS |
350 | static int ipoib_cm_nonsrq_init_rx(struct net_device *dev, struct ib_cm_id *cm_id, |
351 | struct ipoib_cm_rx *rx) | |
352 | { | |
c1048aff | 353 | struct ipoib_dev_priv *priv = ipoib_priv(dev); |
a7d834c4 RD |
354 | struct { |
355 | struct ib_recv_wr wr; | |
356 | struct ib_sge sge[IPOIB_CM_RX_SG]; | |
357 | } *t; | |
68e995a2 PS |
358 | int ret; |
359 | int i; | |
360 | ||
fad953ce KC |
361 | rx->rx_ring = vzalloc(array_size(ipoib_recvq_size, |
362 | sizeof(*rx->rx_ring))); | |
74226649 | 363 | if (!rx->rx_ring) |
68e995a2 | 364 | return -ENOMEM; |
b1404069 | 365 | |
a7d834c4 RD |
366 | t = kmalloc(sizeof *t, GFP_KERNEL); |
367 | if (!t) { | |
368 | ret = -ENOMEM; | |
c5e8f57b | 369 | goto err_free_1; |
a7d834c4 RD |
370 | } |
371 | ||
372 | ipoib_cm_init_rx_wr(dev, &t->wr, t->sge); | |
373 | ||
68e995a2 PS |
374 | spin_lock_irq(&priv->lock); |
375 | ||
376 | if (priv->cm.nonsrq_conn_qp >= ipoib_max_conn_qp) { | |
377 | spin_unlock_irq(&priv->lock); | |
378 | ib_send_cm_rej(cm_id, IB_CM_REJ_NO_QP, NULL, 0, NULL, 0); | |
379 | ret = -EINVAL; | |
380 | goto err_free; | |
381 | } else | |
382 | ++priv->cm.nonsrq_conn_qp; | |
383 | ||
384 | spin_unlock_irq(&priv->lock); | |
385 | ||
386 | for (i = 0; i < ipoib_recvq_size; ++i) { | |
387 | if (!ipoib_cm_alloc_rx_skb(dev, rx->rx_ring, i, IPOIB_CM_RX_SG - 1, | |
22252b4e TA |
388 | rx->rx_ring[i].mapping, |
389 | GFP_KERNEL)) { | |
68e995a2 | 390 | ipoib_warn(priv, "failed to allocate receive buffer %d\n", i); |
8f71c1a2 BVA |
391 | ret = -ENOMEM; |
392 | goto err_count; | |
a7d834c4 RD |
393 | } |
394 | ret = ipoib_cm_post_receive_nonsrq(dev, rx, &t->wr, t->sge, i); | |
68e995a2 PS |
395 | if (ret) { |
396 | ipoib_warn(priv, "ipoib_cm_post_receive_nonsrq " | |
397 | "failed for buf %d\n", i); | |
398 | ret = -EIO; | |
399 | goto err_count; | |
400 | } | |
401 | } | |
402 | ||
403 | rx->recv_count = ipoib_recvq_size; | |
404 | ||
a7d834c4 RD |
405 | kfree(t); |
406 | ||
68e995a2 PS |
407 | return 0; |
408 | ||
409 | err_count: | |
410 | spin_lock_irq(&priv->lock); | |
411 | --priv->cm.nonsrq_conn_qp; | |
412 | spin_unlock_irq(&priv->lock); | |
413 | ||
414 | err_free: | |
a7d834c4 | 415 | kfree(t); |
c5e8f57b ZY |
416 | |
417 | err_free_1: | |
68e995a2 PS |
418 | ipoib_cm_free_rx_ring(dev, rx->rx_ring); |
419 | ||
420 | return ret; | |
421 | } | |
422 | ||
839fcaba MT |
423 | static int ipoib_cm_send_rep(struct net_device *dev, struct ib_cm_id *cm_id, |
424 | struct ib_qp *qp, struct ib_cm_req_event_param *req, | |
425 | unsigned psn) | |
426 | { | |
c1048aff | 427 | struct ipoib_dev_priv *priv = ipoib_priv(dev); |
839fcaba MT |
428 | struct ipoib_cm_data data = {}; |
429 | struct ib_cm_rep_param rep = {}; | |
430 | ||
431 | data.qpn = cpu_to_be32(priv->qp->qp_num); | |
432 | data.mtu = cpu_to_be32(IPOIB_CM_BUF_SIZE); | |
433 | ||
434 | rep.private_data = &data; | |
435 | rep.private_data_len = sizeof data; | |
436 | rep.flow_control = 0; | |
437 | rep.rnr_retry_count = req->rnr_retry_count; | |
68e995a2 | 438 | rep.srq = ipoib_cm_has_srq(dev); |
839fcaba MT |
439 | rep.qp_num = qp->qp_num; |
440 | rep.starting_psn = psn; | |
441 | return ib_send_cm_rep(cm_id, &rep); | |
442 | } | |
443 | ||
444 | static int ipoib_cm_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event) | |
445 | { | |
446 | struct net_device *dev = cm_id->context; | |
c1048aff | 447 | struct ipoib_dev_priv *priv = ipoib_priv(dev); |
839fcaba | 448 | struct ipoib_cm_rx *p; |
839fcaba MT |
449 | unsigned psn; |
450 | int ret; | |
451 | ||
452 | ipoib_dbg(priv, "REQ arrived\n"); | |
453 | p = kzalloc(sizeof *p, GFP_KERNEL); | |
454 | if (!p) | |
455 | return -ENOMEM; | |
456 | p->dev = dev; | |
457 | p->id = cm_id; | |
3ec7393a MT |
458 | cm_id->context = p; |
459 | p->state = IPOIB_CM_RX_LIVE; | |
460 | p->jiffies = jiffies; | |
461 | INIT_LIST_HEAD(&p->list); | |
462 | ||
839fcaba MT |
463 | p->qp = ipoib_cm_create_rx_qp(dev, p); |
464 | if (IS_ERR(p->qp)) { | |
465 | ret = PTR_ERR(p->qp); | |
466 | goto err_qp; | |
467 | } | |
468 | ||
50bea5c0 | 469 | psn = prandom_u32() & 0xffffff; |
839fcaba MT |
470 | ret = ipoib_cm_modify_rx_qp(dev, cm_id, p->qp, psn); |
471 | if (ret) | |
472 | goto err_modify; | |
473 | ||
68e995a2 PS |
474 | if (!ipoib_cm_has_srq(dev)) { |
475 | ret = ipoib_cm_nonsrq_init_rx(dev, cm_id, p); | |
476 | if (ret) | |
477 | goto err_modify; | |
478 | } | |
479 | ||
3ec7393a | 480 | spin_lock_irq(&priv->lock); |
0b39578b | 481 | queue_delayed_work(priv->wq, |
3ec7393a MT |
482 | &priv->cm.stale_task, IPOIB_CM_RX_DELAY); |
483 | /* Add this entry to passive ids list head, but do not re-add it | |
484 | * if IB_EVENT_QP_LAST_WQE_REACHED has moved it to flush list. */ | |
485 | p->jiffies = jiffies; | |
486 | if (p->state == IPOIB_CM_RX_LIVE) | |
487 | list_move(&p->list, &priv->cm.passive_ids); | |
488 | spin_unlock_irq(&priv->lock); | |
489 | ||
839fcaba MT |
490 | ret = ipoib_cm_send_rep(dev, cm_id, p->qp, &event->param.req_rcvd, psn); |
491 | if (ret) { | |
492 | ipoib_warn(priv, "failed to send REP: %d\n", ret); | |
3ec7393a MT |
493 | if (ib_modify_qp(p->qp, &ipoib_cm_err_attr, IB_QP_STATE)) |
494 | ipoib_warn(priv, "unable to move qp to error state\n"); | |
839fcaba | 495 | } |
839fcaba MT |
496 | return 0; |
497 | ||
839fcaba MT |
498 | err_modify: |
499 | ib_destroy_qp(p->qp); | |
500 | err_qp: | |
501 | kfree(p); | |
502 | return ret; | |
503 | } | |
504 | ||
505 | static int ipoib_cm_rx_handler(struct ib_cm_id *cm_id, | |
506 | struct ib_cm_event *event) | |
507 | { | |
508 | struct ipoib_cm_rx *p; | |
509 | struct ipoib_dev_priv *priv; | |
839fcaba MT |
510 | |
511 | switch (event->event) { | |
512 | case IB_CM_REQ_RECEIVED: | |
513 | return ipoib_cm_req_handler(cm_id, event); | |
514 | case IB_CM_DREQ_RECEIVED: | |
839fcaba MT |
515 | ib_send_cm_drep(cm_id, NULL, 0); |
516 | /* Fall through */ | |
517 | case IB_CM_REJ_RECEIVED: | |
518 | p = cm_id->context; | |
c1048aff | 519 | priv = ipoib_priv(p->dev); |
518b1646 MT |
520 | if (ib_modify_qp(p->qp, &ipoib_cm_err_attr, IB_QP_STATE)) |
521 | ipoib_warn(priv, "unable to move qp to error state\n"); | |
522 | /* Fall through */ | |
839fcaba MT |
523 | default: |
524 | return 0; | |
525 | } | |
526 | } | |
527 | /* Adjust length of skb with fragments to match received data */ | |
528 | static void skb_put_frags(struct sk_buff *skb, unsigned int hdr_space, | |
1812063b | 529 | unsigned int length, struct sk_buff *toskb) |
839fcaba MT |
530 | { |
531 | int i, num_frags; | |
532 | unsigned int size; | |
533 | ||
534 | /* put header into skb */ | |
535 | size = min(length, hdr_space); | |
536 | skb->tail += size; | |
537 | skb->len += size; | |
538 | length -= size; | |
539 | ||
540 | num_frags = skb_shinfo(skb)->nr_frags; | |
541 | for (i = 0; i < num_frags; i++) { | |
542 | skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; | |
543 | ||
544 | if (length == 0) { | |
545 | /* don't need this page */ | |
5581be3b IC |
546 | skb_fill_page_desc(toskb, i, skb_frag_page(frag), |
547 | 0, PAGE_SIZE); | |
839fcaba MT |
548 | --skb_shinfo(skb)->nr_frags; |
549 | } else { | |
550 | size = min(length, (unsigned) PAGE_SIZE); | |
551 | ||
9e903e08 | 552 | skb_frag_size_set(frag, size); |
839fcaba MT |
553 | skb->data_len += size; |
554 | skb->truesize += size; | |
555 | skb->len += size; | |
556 | length -= size; | |
557 | } | |
558 | } | |
559 | } | |
560 | ||
561 | void ipoib_cm_handle_rx_wc(struct net_device *dev, struct ib_wc *wc) | |
562 | { | |
c1048aff | 563 | struct ipoib_dev_priv *priv = ipoib_priv(dev); |
68e995a2 | 564 | struct ipoib_cm_rx_buf *rx_ring; |
1b524963 | 565 | unsigned int wr_id = wc->wr_id & ~(IPOIB_OP_CM | IPOIB_OP_RECV); |
1812063b | 566 | struct sk_buff *skb, *newskb; |
839fcaba MT |
567 | struct ipoib_cm_rx *p; |
568 | unsigned long flags; | |
569 | u64 mapping[IPOIB_CM_RX_SG]; | |
1812063b | 570 | int frags; |
68e995a2 | 571 | int has_srq; |
f89271da | 572 | struct sk_buff *small_skb; |
839fcaba | 573 | |
a89875fc RD |
574 | ipoib_dbg_data(priv, "cm recv completion: id %d, status: %d\n", |
575 | wr_id, wc->status); | |
839fcaba MT |
576 | |
577 | if (unlikely(wr_id >= ipoib_recvq_size)) { | |
1b524963 | 578 | if (wr_id == (IPOIB_CM_RX_DRAIN_WRID & ~(IPOIB_OP_CM | IPOIB_OP_RECV))) { |
518b1646 MT |
579 | spin_lock_irqsave(&priv->lock, flags); |
580 | list_splice_init(&priv->cm.rx_drain_list, &priv->cm.rx_reap_list); | |
581 | ipoib_cm_start_rx_drain(priv); | |
0b39578b | 582 | queue_work(priv->wq, &priv->cm.rx_reap_task); |
518b1646 MT |
583 | spin_unlock_irqrestore(&priv->lock, flags); |
584 | } else | |
585 | ipoib_warn(priv, "cm recv completion event with wrid %d (> %d)\n", | |
586 | wr_id, ipoib_recvq_size); | |
839fcaba MT |
587 | return; |
588 | } | |
589 | ||
68e995a2 PS |
590 | p = wc->qp->qp_context; |
591 | ||
592 | has_srq = ipoib_cm_has_srq(dev); | |
593 | rx_ring = has_srq ? priv->cm.srq_ring : p->rx_ring; | |
594 | ||
595 | skb = rx_ring[wr_id].skb; | |
839fcaba MT |
596 | |
597 | if (unlikely(wc->status != IB_WC_SUCCESS)) { | |
b04dc199 AH |
598 | ipoib_dbg(priv, |
599 | "cm recv error (status=%d, wrid=%d vend_err %#x)\n", | |
600 | wc->status, wr_id, wc->vendor_err); | |
de903512 | 601 | ++dev->stats.rx_dropped; |
68e995a2 PS |
602 | if (has_srq) |
603 | goto repost; | |
604 | else { | |
605 | if (!--p->recv_count) { | |
606 | spin_lock_irqsave(&priv->lock, flags); | |
607 | list_move(&p->list, &priv->cm.rx_reap_list); | |
608 | spin_unlock_irqrestore(&priv->lock, flags); | |
0b39578b | 609 | queue_work(priv->wq, &priv->cm.rx_reap_task); |
68e995a2 PS |
610 | } |
611 | return; | |
612 | } | |
839fcaba MT |
613 | } |
614 | ||
fd312561 | 615 | if (unlikely(!(wr_id & IPOIB_CM_RX_UPDATE_MASK))) { |
d6ef7d68 | 616 | if (p && time_after_eq(jiffies, p->jiffies + IPOIB_CM_RX_UPDATE_TIME)) { |
839fcaba MT |
617 | spin_lock_irqsave(&priv->lock, flags); |
618 | p->jiffies = jiffies; | |
518b1646 MT |
619 | /* Move this entry to list head, but do not re-add it |
620 | * if it has been moved out of list. */ | |
621 | if (p->state == IPOIB_CM_RX_LIVE) | |
839fcaba MT |
622 | list_move(&p->list, &priv->cm.passive_ids); |
623 | spin_unlock_irqrestore(&priv->lock, flags); | |
839fcaba MT |
624 | } |
625 | } | |
626 | ||
f89271da EC |
627 | if (wc->byte_len < IPOIB_CM_COPYBREAK) { |
628 | int dlen = wc->byte_len; | |
629 | ||
fc791b63 | 630 | small_skb = dev_alloc_skb(dlen + IPOIB_CM_RX_RESERVE); |
f89271da | 631 | if (small_skb) { |
fc791b63 | 632 | skb_reserve(small_skb, IPOIB_CM_RX_RESERVE); |
f89271da EC |
633 | ib_dma_sync_single_for_cpu(priv->ca, rx_ring[wr_id].mapping[0], |
634 | dlen, DMA_FROM_DEVICE); | |
635 | skb_copy_from_linear_data(skb, small_skb->data, dlen); | |
636 | ib_dma_sync_single_for_device(priv->ca, rx_ring[wr_id].mapping[0], | |
637 | dlen, DMA_FROM_DEVICE); | |
638 | skb_put(small_skb, dlen); | |
639 | skb = small_skb; | |
640 | goto copied; | |
641 | } | |
642 | } | |
643 | ||
1812063b MT |
644 | frags = PAGE_ALIGN(wc->byte_len - min(wc->byte_len, |
645 | (unsigned)IPOIB_CM_HEAD_SIZE)) / PAGE_SIZE; | |
646 | ||
22252b4e TA |
647 | newskb = ipoib_cm_alloc_rx_skb(dev, rx_ring, wr_id, frags, |
648 | mapping, GFP_ATOMIC); | |
1812063b | 649 | if (unlikely(!newskb)) { |
839fcaba MT |
650 | /* |
651 | * If we can't allocate a new RX buffer, dump | |
652 | * this packet and reuse the old buffer. | |
653 | */ | |
654 | ipoib_dbg(priv, "failed to allocate receive buffer %d\n", wr_id); | |
de903512 | 655 | ++dev->stats.rx_dropped; |
839fcaba MT |
656 | goto repost; |
657 | } | |
658 | ||
68e995a2 PS |
659 | ipoib_cm_dma_unmap_rx(priv, frags, rx_ring[wr_id].mapping); |
660 | memcpy(rx_ring[wr_id].mapping, mapping, (frags + 1) * sizeof *mapping); | |
839fcaba MT |
661 | |
662 | ipoib_dbg_data(priv, "received %d bytes, SLID 0x%04x\n", | |
663 | wc->byte_len, wc->slid); | |
664 | ||
1812063b | 665 | skb_put_frags(skb, IPOIB_CM_HEAD_SIZE, wc->byte_len, newskb); |
839fcaba | 666 | |
f89271da | 667 | copied: |
839fcaba | 668 | skb->protocol = ((struct ipoib_header *) skb->data)->proto; |
fc791b63 | 669 | skb_add_pseudo_hdr(skb); |
839fcaba | 670 | |
de903512 RD |
671 | ++dev->stats.rx_packets; |
672 | dev->stats.rx_bytes += skb->len; | |
839fcaba MT |
673 | |
674 | skb->dev = dev; | |
675 | /* XXX get correct PACKET_ type here */ | |
676 | skb->pkt_type = PACKET_HOST; | |
8d1cc86a | 677 | netif_receive_skb(skb); |
839fcaba MT |
678 | |
679 | repost: | |
68e995a2 PS |
680 | if (has_srq) { |
681 | if (unlikely(ipoib_cm_post_receive_srq(dev, wr_id))) | |
682 | ipoib_warn(priv, "ipoib_cm_post_receive_srq failed " | |
683 | "for buf %d\n", wr_id); | |
684 | } else { | |
a7d834c4 RD |
685 | if (unlikely(ipoib_cm_post_receive_nonsrq(dev, p, |
686 | &priv->cm.rx_wr, | |
687 | priv->cm.rx_sge, | |
688 | wr_id))) { | |
68e995a2 PS |
689 | --p->recv_count; |
690 | ipoib_warn(priv, "ipoib_cm_post_receive_nonsrq failed " | |
691 | "for buf %d\n", wr_id); | |
692 | } | |
693 | } | |
839fcaba MT |
694 | } |
695 | ||
696 | static inline int post_send(struct ipoib_dev_priv *priv, | |
697 | struct ipoib_cm_tx *tx, | |
698 | unsigned int wr_id, | |
c4268778 | 699 | struct ipoib_tx_buf *tx_req) |
839fcaba MT |
700 | { |
701 | struct ib_send_wr *bad_wr; | |
702 | ||
c4268778 | 703 | ipoib_build_sge(priv, tx_req); |
839fcaba | 704 | |
e622f2f4 | 705 | priv->tx_wr.wr.wr_id = wr_id | IPOIB_OP_CM; |
839fcaba | 706 | |
e622f2f4 | 707 | return ib_post_send(tx->qp, &priv->tx_wr.wr, &bad_wr); |
839fcaba MT |
708 | } |
709 | ||
710 | void ipoib_cm_send(struct net_device *dev, struct sk_buff *skb, struct ipoib_cm_tx *tx) | |
711 | { | |
c1048aff | 712 | struct ipoib_dev_priv *priv = ipoib_priv(dev); |
c4268778 | 713 | struct ipoib_tx_buf *tx_req; |
a48f509b | 714 | int rc; |
78a50a5e | 715 | unsigned usable_sge = tx->max_send_sge - !!skb_headlen(skb); |
839fcaba MT |
716 | |
717 | if (unlikely(skb->len > tx->mtu)) { | |
718 | ipoib_warn(priv, "packet len %d (> %d) too long to send, dropping\n", | |
719 | skb->len, tx->mtu); | |
de903512 RD |
720 | ++dev->stats.tx_dropped; |
721 | ++dev->stats.tx_errors; | |
77d8e1ef | 722 | ipoib_cm_skb_too_long(dev, skb, tx->mtu - IPOIB_ENCAP_LEN); |
839fcaba MT |
723 | return; |
724 | } | |
78a50a5e HWR |
725 | if (skb_shinfo(skb)->nr_frags > usable_sge) { |
726 | if (skb_linearize(skb) < 0) { | |
727 | ipoib_warn(priv, "skb could not be linearized\n"); | |
728 | ++dev->stats.tx_dropped; | |
729 | ++dev->stats.tx_errors; | |
730 | dev_kfree_skb_any(skb); | |
731 | return; | |
732 | } | |
733 | /* Does skb_linearize return ok without reducing nr_frags? */ | |
734 | if (skb_shinfo(skb)->nr_frags > usable_sge) { | |
735 | ipoib_warn(priv, "too many frags after skb linearize\n"); | |
736 | ++dev->stats.tx_dropped; | |
737 | ++dev->stats.tx_errors; | |
738 | dev_kfree_skb_any(skb); | |
739 | return; | |
740 | } | |
741 | } | |
839fcaba MT |
742 | ipoib_dbg_data(priv, "sending packet: head 0x%x length %d connection 0x%x\n", |
743 | tx->tx_head, skb->len, tx->qp->qp_num); | |
744 | ||
745 | /* | |
746 | * We put the skb into the tx_ring _before_ we call post_send() | |
747 | * because it's entirely possible that the completion handler will | |
748 | * run before we execute anything after the post_send(). That | |
749 | * means we have to make sure everything is properly recorded and | |
750 | * our state is consistent before we call post_send(). | |
751 | */ | |
752 | tx_req = &tx->tx_ring[tx->tx_head & (ipoib_sendq_size - 1)]; | |
753 | tx_req->skb = skb; | |
c4268778 YS |
754 | |
755 | if (unlikely(ipoib_dma_map_tx(priv->ca, tx_req))) { | |
de903512 | 756 | ++dev->stats.tx_errors; |
839fcaba MT |
757 | dev_kfree_skb_any(skb); |
758 | return; | |
759 | } | |
760 | ||
8966e28d ES |
761 | if ((priv->tx_head - priv->tx_tail) == ipoib_sendq_size - 1) { |
762 | ipoib_dbg(priv, "TX ring 0x%x full, stopping kernel net queue\n", | |
763 | tx->qp->qp_num); | |
764 | netif_stop_queue(dev); | |
765 | } | |
766 | ||
7e5a90c2 SP |
767 | skb_orphan(skb); |
768 | skb_dst_drop(skb); | |
769 | ||
809cb695 AE |
770 | if (netif_queue_stopped(dev)) { |
771 | rc = ib_req_notify_cq(priv->send_cq, IB_CQ_NEXT_COMP | | |
772 | IB_CQ_REPORT_MISSED_EVENTS); | |
773 | if (unlikely(rc < 0)) | |
8966e28d | 774 | ipoib_warn(priv, "IPoIB/CM:request notify on send CQ failed\n"); |
809cb695 | 775 | else if (rc) |
8966e28d | 776 | napi_schedule(&priv->send_napi); |
809cb695 | 777 | } |
8966e28d | 778 | |
c4268778 | 779 | rc = post_send(priv, tx, tx->tx_head & (ipoib_sendq_size - 1), tx_req); |
a48f509b | 780 | if (unlikely(rc)) { |
8966e28d | 781 | ipoib_warn(priv, "IPoIB/CM:post_send failed, error %d\n", rc); |
de903512 | 782 | ++dev->stats.tx_errors; |
c4268778 | 783 | ipoib_dma_unmap_tx(priv, tx_req); |
839fcaba | 784 | dev_kfree_skb_any(skb); |
8966e28d ES |
785 | |
786 | if (netif_queue_stopped(dev)) | |
787 | netif_wake_queue(dev); | |
839fcaba | 788 | } else { |
860e9538 | 789 | netif_trans_update(dev); |
839fcaba | 790 | ++tx->tx_head; |
2c104ea6 | 791 | ++priv->tx_head; |
839fcaba MT |
792 | } |
793 | } | |
794 | ||
1b524963 | 795 | void ipoib_cm_handle_tx_wc(struct net_device *dev, struct ib_wc *wc) |
839fcaba | 796 | { |
c1048aff | 797 | struct ipoib_dev_priv *priv = ipoib_priv(dev); |
1b524963 MT |
798 | struct ipoib_cm_tx *tx = wc->qp->qp_context; |
799 | unsigned int wr_id = wc->wr_id & ~IPOIB_OP_CM; | |
c4268778 | 800 | struct ipoib_tx_buf *tx_req; |
839fcaba MT |
801 | unsigned long flags; |
802 | ||
a89875fc RD |
803 | ipoib_dbg_data(priv, "cm send completion: id %d, status: %d\n", |
804 | wr_id, wc->status); | |
839fcaba MT |
805 | |
806 | if (unlikely(wr_id >= ipoib_sendq_size)) { | |
807 | ipoib_warn(priv, "cm send completion event with wrid %d (> %d)\n", | |
808 | wr_id, ipoib_sendq_size); | |
809 | return; | |
810 | } | |
811 | ||
812 | tx_req = &tx->tx_ring[wr_id]; | |
813 | ||
c4268778 | 814 | ipoib_dma_unmap_tx(priv, tx_req); |
839fcaba MT |
815 | |
816 | /* FIXME: is this right? Shouldn't we only increment on success? */ | |
de903512 RD |
817 | ++dev->stats.tx_packets; |
818 | dev->stats.tx_bytes += tx_req->skb->len; | |
839fcaba MT |
819 | |
820 | dev_kfree_skb_any(tx_req->skb); | |
821 | ||
943c246e RD |
822 | netif_tx_lock(dev); |
823 | ||
839fcaba | 824 | ++tx->tx_tail; |
2c104ea6 | 825 | ++priv->tx_tail; |
8966e28d ES |
826 | |
827 | if (unlikely(netif_queue_stopped(dev) && | |
828 | (priv->tx_head - priv->tx_tail) <= ipoib_sendq_size >> 1 && | |
829 | test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags))) | |
839fcaba | 830 | netif_wake_queue(dev); |
839fcaba MT |
831 | |
832 | if (wc->status != IB_WC_SUCCESS && | |
833 | wc->status != IB_WC_WR_FLUSH_ERR) { | |
834 | struct ipoib_neigh *neigh; | |
835 | ||
af3c79be SS |
836 | /* IB_WC[_RNR]_RETRY_EXC_ERR error is part of the life cycle, |
837 | * so don't make waves. | |
838 | */ | |
839 | if (wc->status == IB_WC_RNR_RETRY_EXC_ERR || | |
840 | wc->status == IB_WC_RETRY_EXC_ERR) | |
841 | ipoib_dbg(priv, | |
b04dc199 | 842 | "%s: failed cm send event (status=%d, wrid=%d vend_err %#x)\n", |
af3c79be | 843 | __func__, wc->status, wr_id, wc->vendor_err); |
13ee429a | 844 | else |
af3c79be | 845 | ipoib_warn(priv, |
b04dc199 | 846 | "%s: failed cm send event (status=%d, wrid=%d vend_err %#x)\n", |
af3c79be | 847 | __func__, wc->status, wr_id, wc->vendor_err); |
839fcaba | 848 | |
943c246e | 849 | spin_lock_irqsave(&priv->lock, flags); |
839fcaba MT |
850 | neigh = tx->neigh; |
851 | ||
852 | if (neigh) { | |
853 | neigh->cm = NULL; | |
b63b70d8 | 854 | ipoib_neigh_free(neigh); |
839fcaba MT |
855 | |
856 | tx->neigh = NULL; | |
857 | } | |
858 | ||
839fcaba MT |
859 | if (test_and_clear_bit(IPOIB_FLAG_INITIALIZED, &tx->flags)) { |
860 | list_move(&tx->list, &priv->cm.reap_list); | |
0b39578b | 861 | queue_work(priv->wq, &priv->cm.reap_task); |
839fcaba MT |
862 | } |
863 | ||
864 | clear_bit(IPOIB_FLAG_OPER_UP, &tx->flags); | |
865 | ||
943c246e | 866 | spin_unlock_irqrestore(&priv->lock, flags); |
839fcaba MT |
867 | } |
868 | ||
943c246e | 869 | netif_tx_unlock(dev); |
839fcaba MT |
870 | } |
871 | ||
839fcaba MT |
872 | int ipoib_cm_dev_open(struct net_device *dev) |
873 | { | |
c1048aff | 874 | struct ipoib_dev_priv *priv = ipoib_priv(dev); |
839fcaba MT |
875 | int ret; |
876 | ||
877 | if (!IPOIB_CM_SUPPORTED(dev->dev_addr)) | |
878 | return 0; | |
879 | ||
880 | priv->cm.id = ib_create_cm_id(priv->ca, ipoib_cm_rx_handler, dev); | |
881 | if (IS_ERR(priv->cm.id)) { | |
c55359a2 | 882 | pr_warn("%s: failed to create CM ID\n", priv->ca->name); |
347fcfbe | 883 | ret = PTR_ERR(priv->cm.id); |
518b1646 | 884 | goto err_cm; |
839fcaba MT |
885 | } |
886 | ||
887 | ret = ib_cm_listen(priv->cm.id, cpu_to_be64(IPOIB_CM_IETF_ID | priv->qp->qp_num), | |
73fec7fd | 888 | 0); |
839fcaba | 889 | if (ret) { |
c55359a2 YS |
890 | pr_warn("%s: failed to listen on ID 0x%llx\n", priv->ca->name, |
891 | IPOIB_CM_IETF_ID | priv->qp->qp_num); | |
518b1646 | 892 | goto err_listen; |
839fcaba | 893 | } |
518b1646 | 894 | |
839fcaba | 895 | return 0; |
518b1646 MT |
896 | |
897 | err_listen: | |
898 | ib_destroy_cm_id(priv->cm.id); | |
899 | err_cm: | |
900 | priv->cm.id = NULL; | |
518b1646 | 901 | return ret; |
839fcaba MT |
902 | } |
903 | ||
efcd9971 RD |
904 | static void ipoib_cm_free_rx_reap_list(struct net_device *dev) |
905 | { | |
c1048aff | 906 | struct ipoib_dev_priv *priv = ipoib_priv(dev); |
efcd9971 RD |
907 | struct ipoib_cm_rx *rx, *n; |
908 | LIST_HEAD(list); | |
909 | ||
910 | spin_lock_irq(&priv->lock); | |
911 | list_splice_init(&priv->cm.rx_reap_list, &list); | |
912 | spin_unlock_irq(&priv->lock); | |
913 | ||
914 | list_for_each_entry_safe(rx, n, &list, list) { | |
915 | ib_destroy_cm_id(rx->id); | |
916 | ib_destroy_qp(rx->qp); | |
68e995a2 PS |
917 | if (!ipoib_cm_has_srq(dev)) { |
918 | ipoib_cm_free_rx_ring(priv->dev, rx->rx_ring); | |
919 | spin_lock_irq(&priv->lock); | |
920 | --priv->cm.nonsrq_conn_qp; | |
921 | spin_unlock_irq(&priv->lock); | |
922 | } | |
efcd9971 RD |
923 | kfree(rx); |
924 | } | |
925 | } | |
926 | ||
839fcaba MT |
927 | void ipoib_cm_dev_stop(struct net_device *dev) |
928 | { | |
c1048aff | 929 | struct ipoib_dev_priv *priv = ipoib_priv(dev); |
efcd9971 | 930 | struct ipoib_cm_rx *p; |
518b1646 | 931 | unsigned long begin; |
518b1646 | 932 | int ret; |
839fcaba | 933 | |
347fcfbe | 934 | if (!IPOIB_CM_SUPPORTED(dev->dev_addr) || !priv->cm.id) |
839fcaba MT |
935 | return; |
936 | ||
937 | ib_destroy_cm_id(priv->cm.id); | |
347fcfbe | 938 | priv->cm.id = NULL; |
518b1646 | 939 | |
37aebbde | 940 | spin_lock_irq(&priv->lock); |
839fcaba MT |
941 | while (!list_empty(&priv->cm.passive_ids)) { |
942 | p = list_entry(priv->cm.passive_ids.next, typeof(*p), list); | |
518b1646 MT |
943 | list_move(&p->list, &priv->cm.rx_error_list); |
944 | p->state = IPOIB_CM_RX_ERROR; | |
37aebbde | 945 | spin_unlock_irq(&priv->lock); |
518b1646 MT |
946 | ret = ib_modify_qp(p->qp, &ipoib_cm_err_attr, IB_QP_STATE); |
947 | if (ret) | |
948 | ipoib_warn(priv, "unable to move qp to error state: %d\n", ret); | |
949 | spin_lock_irq(&priv->lock); | |
950 | } | |
951 | ||
952 | /* Wait for all RX to be drained */ | |
953 | begin = jiffies; | |
954 | ||
955 | while (!list_empty(&priv->cm.rx_error_list) || | |
956 | !list_empty(&priv->cm.rx_flush_list) || | |
957 | !list_empty(&priv->cm.rx_drain_list)) { | |
8fd357a6 | 958 | if (time_after(jiffies, begin + 5 * HZ)) { |
518b1646 MT |
959 | ipoib_warn(priv, "RX drain timing out\n"); |
960 | ||
961 | /* | |
962 | * assume the HW is wedged and just free up everything. | |
963 | */ | |
ec229e5e PS |
964 | list_splice_init(&priv->cm.rx_flush_list, |
965 | &priv->cm.rx_reap_list); | |
966 | list_splice_init(&priv->cm.rx_error_list, | |
967 | &priv->cm.rx_reap_list); | |
968 | list_splice_init(&priv->cm.rx_drain_list, | |
969 | &priv->cm.rx_reap_list); | |
518b1646 MT |
970 | break; |
971 | } | |
972 | spin_unlock_irq(&priv->lock); | |
98e77d9f | 973 | usleep_range(1000, 2000); |
2dfbfc37 | 974 | ipoib_drain_cq(dev); |
518b1646 MT |
975 | spin_lock_irq(&priv->lock); |
976 | } | |
977 | ||
518b1646 MT |
978 | spin_unlock_irq(&priv->lock); |
979 | ||
efcd9971 | 980 | ipoib_cm_free_rx_reap_list(dev); |
839fcaba MT |
981 | |
982 | cancel_delayed_work(&priv->cm.stale_task); | |
983 | } | |
984 | ||
985 | static int ipoib_cm_rep_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event) | |
986 | { | |
987 | struct ipoib_cm_tx *p = cm_id->context; | |
c1048aff | 988 | struct ipoib_dev_priv *priv = ipoib_priv(p->dev); |
839fcaba MT |
989 | struct ipoib_cm_data *data = event->private_data; |
990 | struct sk_buff_head skqueue; | |
991 | struct ib_qp_attr qp_attr; | |
992 | int qp_attr_mask, ret; | |
993 | struct sk_buff *skb; | |
839fcaba MT |
994 | |
995 | p->mtu = be32_to_cpu(data->mtu); | |
996 | ||
82c3aca6 MT |
997 | if (p->mtu <= IPOIB_ENCAP_LEN) { |
998 | ipoib_warn(priv, "Rejecting connection: mtu %d <= %d\n", | |
999 | p->mtu, IPOIB_ENCAP_LEN); | |
839fcaba MT |
1000 | return -EINVAL; |
1001 | } | |
1002 | ||
1003 | qp_attr.qp_state = IB_QPS_RTR; | |
1004 | ret = ib_cm_init_qp_attr(cm_id, &qp_attr, &qp_attr_mask); | |
1005 | if (ret) { | |
1006 | ipoib_warn(priv, "failed to init QP attr for RTR: %d\n", ret); | |
1007 | return ret; | |
1008 | } | |
1009 | ||
1010 | qp_attr.rq_psn = 0 /* FIXME */; | |
1011 | ret = ib_modify_qp(p->qp, &qp_attr, qp_attr_mask); | |
1012 | if (ret) { | |
1013 | ipoib_warn(priv, "failed to modify QP to RTR: %d\n", ret); | |
1014 | return ret; | |
1015 | } | |
1016 | ||
1017 | qp_attr.qp_state = IB_QPS_RTS; | |
1018 | ret = ib_cm_init_qp_attr(cm_id, &qp_attr, &qp_attr_mask); | |
1019 | if (ret) { | |
1020 | ipoib_warn(priv, "failed to init QP attr for RTS: %d\n", ret); | |
1021 | return ret; | |
1022 | } | |
1023 | ret = ib_modify_qp(p->qp, &qp_attr, qp_attr_mask); | |
1024 | if (ret) { | |
1025 | ipoib_warn(priv, "failed to modify QP to RTS: %d\n", ret); | |
1026 | return ret; | |
1027 | } | |
1028 | ||
1029 | skb_queue_head_init(&skqueue); | |
1030 | ||
37aebbde | 1031 | spin_lock_irq(&priv->lock); |
839fcaba MT |
1032 | set_bit(IPOIB_FLAG_OPER_UP, &p->flags); |
1033 | if (p->neigh) | |
1034 | while ((skb = __skb_dequeue(&p->neigh->queue))) | |
1035 | __skb_queue_tail(&skqueue, skb); | |
37aebbde | 1036 | spin_unlock_irq(&priv->lock); |
839fcaba MT |
1037 | |
1038 | while ((skb = __skb_dequeue(&skqueue))) { | |
1039 | skb->dev = p->dev; | |
d32b9a81 FD |
1040 | ret = dev_queue_xmit(skb); |
1041 | if (ret) | |
1042 | ipoib_warn(priv, "%s:dev_queue_xmit failed to re-queue packet, ret:%d\n", | |
1043 | __func__, ret); | |
839fcaba MT |
1044 | } |
1045 | ||
1046 | ret = ib_send_cm_rtu(cm_id, NULL, 0); | |
1047 | if (ret) { | |
1048 | ipoib_warn(priv, "failed to send RTU: %d\n", ret); | |
1049 | return ret; | |
1050 | } | |
1051 | return 0; | |
1052 | } | |
1053 | ||
1b524963 | 1054 | static struct ib_qp *ipoib_cm_create_tx_qp(struct net_device *dev, struct ipoib_cm_tx *tx) |
839fcaba | 1055 | { |
c1048aff | 1056 | struct ipoib_dev_priv *priv = ipoib_priv(dev); |
ede6bc04 | 1057 | struct ib_qp_init_attr attr = { |
8966e28d | 1058 | .send_cq = priv->send_cq, |
f56bcd80 | 1059 | .recv_cq = priv->recv_cq, |
ede6bc04 DB |
1060 | .srq = priv->cm.srq, |
1061 | .cap.max_send_wr = ipoib_sendq_size, | |
1062 | .cap.max_send_sge = 1, | |
1063 | .sq_sig_type = IB_SIGNAL_ALL_WR, | |
1064 | .qp_type = IB_QPT_RC, | |
09b93088 | 1065 | .qp_context = tx, |
d83187dd | 1066 | .create_flags = 0 |
2337f809 | 1067 | }; |
09b93088 OG |
1068 | struct ib_qp *tx_qp; |
1069 | ||
c4268778 | 1070 | if (dev->features & NETIF_F_SG) |
78a50a5e HWR |
1071 | attr.cap.max_send_sge = |
1072 | min_t(u32, priv->ca->attrs.max_sge, MAX_SKB_FRAGS + 1); | |
c4268778 | 1073 | |
09b93088 | 1074 | tx_qp = ib_create_qp(priv->pd, &attr); |
78a50a5e | 1075 | tx->max_send_sge = attr.cap.max_send_sge; |
09b93088 | 1076 | return tx_qp; |
839fcaba MT |
1077 | } |
1078 | ||
1079 | static int ipoib_cm_send_req(struct net_device *dev, | |
1080 | struct ib_cm_id *id, struct ib_qp *qp, | |
1081 | u32 qpn, | |
c2f8fc4e | 1082 | struct sa_path_rec *pathrec) |
839fcaba | 1083 | { |
c1048aff | 1084 | struct ipoib_dev_priv *priv = ipoib_priv(dev); |
839fcaba MT |
1085 | struct ipoib_cm_data data = {}; |
1086 | struct ib_cm_req_param req = {}; | |
1087 | ||
1088 | data.qpn = cpu_to_be32(priv->qp->qp_num); | |
1089 | data.mtu = cpu_to_be32(IPOIB_CM_BUF_SIZE); | |
1090 | ||
2337f809 RD |
1091 | req.primary_path = pathrec; |
1092 | req.alternate_path = NULL; | |
1093 | req.service_id = cpu_to_be64(IPOIB_CM_IETF_ID | qpn); | |
1094 | req.qp_num = qp->qp_num; | |
1095 | req.qp_type = qp->qp_type; | |
1096 | req.private_data = &data; | |
1097 | req.private_data_len = sizeof data; | |
1098 | req.flow_control = 0; | |
839fcaba | 1099 | |
2337f809 | 1100 | req.starting_psn = 0; /* FIXME */ |
839fcaba MT |
1101 | |
1102 | /* | |
1103 | * Pick some arbitrary defaults here; we could make these | |
1104 | * module parameters if anyone cared about setting them. | |
1105 | */ | |
2337f809 RD |
1106 | req.responder_resources = 4; |
1107 | req.remote_cm_response_timeout = 20; | |
1108 | req.local_cm_response_timeout = 20; | |
1109 | req.retry_count = 0; /* RFC draft warns against retries */ | |
1110 | req.rnr_retry_count = 0; /* RFC draft warns against retries */ | |
1111 | req.max_cm_retries = 15; | |
68e995a2 | 1112 | req.srq = ipoib_cm_has_srq(dev); |
839fcaba MT |
1113 | return ib_send_cm_req(id, &req); |
1114 | } | |
1115 | ||
1116 | static int ipoib_cm_modify_tx_init(struct net_device *dev, | |
1117 | struct ib_cm_id *cm_id, struct ib_qp *qp) | |
1118 | { | |
c1048aff | 1119 | struct ipoib_dev_priv *priv = ipoib_priv(dev); |
839fcaba MT |
1120 | struct ib_qp_attr qp_attr; |
1121 | int qp_attr_mask, ret; | |
9fdd5e5b | 1122 | ret = ib_find_pkey(priv->ca, priv->port, priv->pkey, &qp_attr.pkey_index); |
839fcaba | 1123 | if (ret) { |
9fdd5e5b | 1124 | ipoib_warn(priv, "pkey 0x%x not found: %d\n", priv->pkey, ret); |
839fcaba MT |
1125 | return ret; |
1126 | } | |
1127 | ||
1128 | qp_attr.qp_state = IB_QPS_INIT; | |
1129 | qp_attr.qp_access_flags = IB_ACCESS_LOCAL_WRITE; | |
1130 | qp_attr.port_num = priv->port; | |
1131 | qp_attr_mask = IB_QP_STATE | IB_QP_ACCESS_FLAGS | IB_QP_PKEY_INDEX | IB_QP_PORT; | |
1132 | ||
1133 | ret = ib_modify_qp(qp, &qp_attr, qp_attr_mask); | |
1134 | if (ret) { | |
1135 | ipoib_warn(priv, "failed to modify tx QP to INIT: %d\n", ret); | |
1136 | return ret; | |
1137 | } | |
1138 | return 0; | |
1139 | } | |
1140 | ||
1141 | static int ipoib_cm_tx_init(struct ipoib_cm_tx *p, u32 qpn, | |
c2f8fc4e | 1142 | struct sa_path_rec *pathrec) |
839fcaba | 1143 | { |
c1048aff | 1144 | struct ipoib_dev_priv *priv = ipoib_priv(p->dev); |
d83187dd | 1145 | unsigned int noio_flag; |
839fcaba MT |
1146 | int ret; |
1147 | ||
d83187dd | 1148 | noio_flag = memalloc_noio_save(); |
fad953ce | 1149 | p->tx_ring = vzalloc(array_size(ipoib_sendq_size, sizeof(*p->tx_ring))); |
839fcaba | 1150 | if (!p->tx_ring) { |
9d98e19b | 1151 | memalloc_noio_restore(noio_flag); |
839fcaba MT |
1152 | ret = -ENOMEM; |
1153 | goto err_tx; | |
1154 | } | |
09b93088 | 1155 | memset(p->tx_ring, 0, ipoib_sendq_size * sizeof *p->tx_ring); |
839fcaba | 1156 | |
1b524963 | 1157 | p->qp = ipoib_cm_create_tx_qp(p->dev, p); |
d83187dd | 1158 | memalloc_noio_restore(noio_flag); |
839fcaba MT |
1159 | if (IS_ERR(p->qp)) { |
1160 | ret = PTR_ERR(p->qp); | |
d83187dd | 1161 | ipoib_warn(priv, "failed to create tx qp: %d\n", ret); |
839fcaba MT |
1162 | goto err_qp; |
1163 | } | |
1164 | ||
1165 | p->id = ib_create_cm_id(priv->ca, ipoib_cm_tx_handler, p); | |
1166 | if (IS_ERR(p->id)) { | |
1167 | ret = PTR_ERR(p->id); | |
1168 | ipoib_warn(priv, "failed to create tx cm id: %d\n", ret); | |
1169 | goto err_id; | |
1170 | } | |
1171 | ||
1172 | ret = ipoib_cm_modify_tx_init(p->dev, p->id, p->qp); | |
1173 | if (ret) { | |
1174 | ipoib_warn(priv, "failed to modify tx qp to rtr: %d\n", ret); | |
23536dfa | 1175 | goto err_modify_send; |
839fcaba MT |
1176 | } |
1177 | ||
1178 | ret = ipoib_cm_send_req(p->dev, p->id, p->qp, qpn, pathrec); | |
1179 | if (ret) { | |
1180 | ipoib_warn(priv, "failed to send cm req: %d\n", ret); | |
23536dfa | 1181 | goto err_modify_send; |
839fcaba MT |
1182 | } |
1183 | ||
5b095d98 | 1184 | ipoib_dbg(priv, "Request connection 0x%x for gid %pI6 qpn 0x%x\n", |
fcace2fe | 1185 | p->qp->qp_num, pathrec->dgid.raw, qpn); |
839fcaba MT |
1186 | |
1187 | return 0; | |
1188 | ||
23536dfa | 1189 | err_modify_send: |
839fcaba MT |
1190 | ib_destroy_cm_id(p->id); |
1191 | err_id: | |
1192 | p->id = NULL; | |
1193 | ib_destroy_qp(p->qp); | |
839fcaba MT |
1194 | err_qp: |
1195 | p->qp = NULL; | |
10313cbb | 1196 | vfree(p->tx_ring); |
839fcaba MT |
1197 | err_tx: |
1198 | return ret; | |
1199 | } | |
1200 | ||
1201 | static void ipoib_cm_tx_destroy(struct ipoib_cm_tx *p) | |
1202 | { | |
c1048aff | 1203 | struct ipoib_dev_priv *priv = ipoib_priv(p->dev); |
c4268778 | 1204 | struct ipoib_tx_buf *tx_req; |
1b524963 | 1205 | unsigned long begin; |
839fcaba MT |
1206 | |
1207 | ipoib_dbg(priv, "Destroy active connection 0x%x head 0x%x tail 0x%x\n", | |
1208 | p->qp ? p->qp->qp_num : 0, p->tx_head, p->tx_tail); | |
1209 | ||
1210 | if (p->id) | |
1211 | ib_destroy_cm_id(p->id); | |
1212 | ||
839fcaba | 1213 | if (p->tx_ring) { |
1b524963 MT |
1214 | /* Wait for all sends to complete */ |
1215 | begin = jiffies; | |
839fcaba | 1216 | while ((int) p->tx_tail - (int) p->tx_head < 0) { |
1b524963 MT |
1217 | if (time_after(jiffies, begin + 5 * HZ)) { |
1218 | ipoib_warn(priv, "timing out; %d sends not completed\n", | |
1219 | p->tx_head - p->tx_tail); | |
1220 | goto timeout; | |
1221 | } | |
1222 | ||
98e77d9f | 1223 | usleep_range(1000, 2000); |
839fcaba | 1224 | } |
1b524963 MT |
1225 | } |
1226 | ||
1227 | timeout: | |
839fcaba | 1228 | |
1b524963 MT |
1229 | while ((int) p->tx_tail - (int) p->tx_head < 0) { |
1230 | tx_req = &p->tx_ring[p->tx_tail & (ipoib_sendq_size - 1)]; | |
c4268778 | 1231 | ipoib_dma_unmap_tx(priv, tx_req); |
1b524963 | 1232 | dev_kfree_skb_any(tx_req->skb); |
8966e28d | 1233 | netif_tx_lock_bh(p->dev); |
1b524963 | 1234 | ++p->tx_tail; |
2c104ea6 | 1235 | ++priv->tx_tail; |
2c104ea6 | 1236 | if (unlikely(priv->tx_head - priv->tx_tail == ipoib_sendq_size >> 1) && |
1b524963 MT |
1237 | netif_queue_stopped(p->dev) && |
1238 | test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags)) | |
1239 | netif_wake_queue(p->dev); | |
943c246e | 1240 | netif_tx_unlock_bh(p->dev); |
839fcaba MT |
1241 | } |
1242 | ||
1b524963 MT |
1243 | if (p->qp) |
1244 | ib_destroy_qp(p->qp); | |
1245 | ||
10313cbb | 1246 | vfree(p->tx_ring); |
839fcaba MT |
1247 | kfree(p); |
1248 | } | |
1249 | ||
1250 | static int ipoib_cm_tx_handler(struct ib_cm_id *cm_id, | |
1251 | struct ib_cm_event *event) | |
1252 | { | |
1253 | struct ipoib_cm_tx *tx = cm_id->context; | |
c1048aff | 1254 | struct ipoib_dev_priv *priv = ipoib_priv(tx->dev); |
839fcaba MT |
1255 | struct net_device *dev = priv->dev; |
1256 | struct ipoib_neigh *neigh; | |
943c246e | 1257 | unsigned long flags; |
839fcaba MT |
1258 | int ret; |
1259 | ||
1260 | switch (event->event) { | |
1261 | case IB_CM_DREQ_RECEIVED: | |
1262 | ipoib_dbg(priv, "DREQ received.\n"); | |
1263 | ib_send_cm_drep(cm_id, NULL, 0); | |
1264 | break; | |
1265 | case IB_CM_REP_RECEIVED: | |
1266 | ipoib_dbg(priv, "REP received.\n"); | |
1267 | ret = ipoib_cm_rep_handler(cm_id, event); | |
1268 | if (ret) | |
1269 | ib_send_cm_rej(cm_id, IB_CM_REJ_CONSUMER_DEFINED, | |
1270 | NULL, 0, NULL, 0); | |
1271 | break; | |
1272 | case IB_CM_REQ_ERROR: | |
1273 | case IB_CM_REJ_RECEIVED: | |
1274 | case IB_CM_TIMEWAIT_EXIT: | |
1275 | ipoib_dbg(priv, "CM error %d.\n", event->event); | |
943c246e RD |
1276 | netif_tx_lock_bh(dev); |
1277 | spin_lock_irqsave(&priv->lock, flags); | |
839fcaba MT |
1278 | neigh = tx->neigh; |
1279 | ||
1280 | if (neigh) { | |
1281 | neigh->cm = NULL; | |
b63b70d8 | 1282 | ipoib_neigh_free(neigh); |
839fcaba MT |
1283 | |
1284 | tx->neigh = NULL; | |
1285 | } | |
1286 | ||
1287 | if (test_and_clear_bit(IPOIB_FLAG_INITIALIZED, &tx->flags)) { | |
1288 | list_move(&tx->list, &priv->cm.reap_list); | |
0b39578b | 1289 | queue_work(priv->wq, &priv->cm.reap_task); |
839fcaba MT |
1290 | } |
1291 | ||
943c246e RD |
1292 | spin_unlock_irqrestore(&priv->lock, flags); |
1293 | netif_tx_unlock_bh(dev); | |
839fcaba MT |
1294 | break; |
1295 | default: | |
1296 | break; | |
1297 | } | |
1298 | ||
1299 | return 0; | |
1300 | } | |
1301 | ||
1302 | struct ipoib_cm_tx *ipoib_cm_create_tx(struct net_device *dev, struct ipoib_path *path, | |
1303 | struct ipoib_neigh *neigh) | |
1304 | { | |
c1048aff | 1305 | struct ipoib_dev_priv *priv = ipoib_priv(dev); |
839fcaba MT |
1306 | struct ipoib_cm_tx *tx; |
1307 | ||
1308 | tx = kzalloc(sizeof *tx, GFP_ATOMIC); | |
1309 | if (!tx) | |
1310 | return NULL; | |
1311 | ||
1312 | neigh->cm = tx; | |
1313 | tx->neigh = neigh; | |
1314 | tx->path = path; | |
1315 | tx->dev = dev; | |
1316 | list_add(&tx->list, &priv->cm.start_list); | |
1317 | set_bit(IPOIB_FLAG_INITIALIZED, &tx->flags); | |
0b39578b | 1318 | queue_work(priv->wq, &priv->cm.start_task); |
839fcaba MT |
1319 | return tx; |
1320 | } | |
1321 | ||
1322 | void ipoib_cm_destroy_tx(struct ipoib_cm_tx *tx) | |
1323 | { | |
c1048aff | 1324 | struct ipoib_dev_priv *priv = ipoib_priv(tx->dev); |
fa16ebed | 1325 | unsigned long flags; |
839fcaba | 1326 | if (test_and_clear_bit(IPOIB_FLAG_INITIALIZED, &tx->flags)) { |
fa16ebed | 1327 | spin_lock_irqsave(&priv->lock, flags); |
839fcaba | 1328 | list_move(&tx->list, &priv->cm.reap_list); |
0b39578b | 1329 | queue_work(priv->wq, &priv->cm.reap_task); |
5b095d98 | 1330 | ipoib_dbg(priv, "Reap connection for gid %pI6\n", |
b63b70d8 | 1331 | tx->neigh->daddr + 4); |
839fcaba | 1332 | tx->neigh = NULL; |
fa16ebed | 1333 | spin_unlock_irqrestore(&priv->lock, flags); |
839fcaba MT |
1334 | } |
1335 | } | |
1336 | ||
546481c2 ES |
1337 | #define QPN_AND_OPTIONS_OFFSET 4 |
1338 | ||
839fcaba MT |
1339 | static void ipoib_cm_tx_start(struct work_struct *work) |
1340 | { | |
1341 | struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv, | |
1342 | cm.start_task); | |
1343 | struct net_device *dev = priv->dev; | |
1344 | struct ipoib_neigh *neigh; | |
1345 | struct ipoib_cm_tx *p; | |
1346 | unsigned long flags; | |
546481c2 | 1347 | struct ipoib_path *path; |
839fcaba MT |
1348 | int ret; |
1349 | ||
c2f8fc4e | 1350 | struct sa_path_rec pathrec; |
839fcaba MT |
1351 | u32 qpn; |
1352 | ||
943c246e RD |
1353 | netif_tx_lock_bh(dev); |
1354 | spin_lock_irqsave(&priv->lock, flags); | |
1355 | ||
839fcaba MT |
1356 | while (!list_empty(&priv->cm.start_list)) { |
1357 | p = list_entry(priv->cm.start_list.next, typeof(*p), list); | |
1358 | list_del_init(&p->list); | |
1359 | neigh = p->neigh; | |
546481c2 | 1360 | |
b63b70d8 | 1361 | qpn = IPOIB_QPN(neigh->daddr); |
546481c2 ES |
1362 | /* |
1363 | * As long as the search is with these 2 locks, | |
1364 | * path existence indicates its validity. | |
1365 | */ | |
1366 | path = __path_find(dev, neigh->daddr + QPN_AND_OPTIONS_OFFSET); | |
1367 | if (!path) { | |
1368 | pr_info("%s ignore not valid path %pI6\n", | |
1369 | __func__, | |
1370 | neigh->daddr + QPN_AND_OPTIONS_OFFSET); | |
1371 | goto free_neigh; | |
1372 | } | |
839fcaba | 1373 | memcpy(&pathrec, &p->path->pathrec, sizeof pathrec); |
943c246e RD |
1374 | |
1375 | spin_unlock_irqrestore(&priv->lock, flags); | |
1376 | netif_tx_unlock_bh(dev); | |
1377 | ||
839fcaba | 1378 | ret = ipoib_cm_tx_init(p, qpn, &pathrec); |
943c246e RD |
1379 | |
1380 | netif_tx_lock_bh(dev); | |
1381 | spin_lock_irqsave(&priv->lock, flags); | |
1382 | ||
839fcaba | 1383 | if (ret) { |
546481c2 | 1384 | free_neigh: |
839fcaba MT |
1385 | neigh = p->neigh; |
1386 | if (neigh) { | |
1387 | neigh->cm = NULL; | |
b63b70d8 | 1388 | ipoib_neigh_free(neigh); |
839fcaba MT |
1389 | } |
1390 | list_del(&p->list); | |
1391 | kfree(p); | |
1392 | } | |
1393 | } | |
943c246e RD |
1394 | |
1395 | spin_unlock_irqrestore(&priv->lock, flags); | |
1396 | netif_tx_unlock_bh(dev); | |
839fcaba MT |
1397 | } |
1398 | ||
1399 | static void ipoib_cm_tx_reap(struct work_struct *work) | |
1400 | { | |
1401 | struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv, | |
1402 | cm.reap_task); | |
943c246e | 1403 | struct net_device *dev = priv->dev; |
839fcaba | 1404 | struct ipoib_cm_tx *p; |
943c246e RD |
1405 | unsigned long flags; |
1406 | ||
1407 | netif_tx_lock_bh(dev); | |
1408 | spin_lock_irqsave(&priv->lock, flags); | |
839fcaba | 1409 | |
839fcaba MT |
1410 | while (!list_empty(&priv->cm.reap_list)) { |
1411 | p = list_entry(priv->cm.reap_list.next, typeof(*p), list); | |
27d41d29 | 1412 | list_del_init(&p->list); |
943c246e RD |
1413 | spin_unlock_irqrestore(&priv->lock, flags); |
1414 | netif_tx_unlock_bh(dev); | |
839fcaba | 1415 | ipoib_cm_tx_destroy(p); |
943c246e RD |
1416 | netif_tx_lock_bh(dev); |
1417 | spin_lock_irqsave(&priv->lock, flags); | |
839fcaba | 1418 | } |
943c246e RD |
1419 | |
1420 | spin_unlock_irqrestore(&priv->lock, flags); | |
1421 | netif_tx_unlock_bh(dev); | |
839fcaba MT |
1422 | } |
1423 | ||
1424 | static void ipoib_cm_skb_reap(struct work_struct *work) | |
1425 | { | |
1426 | struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv, | |
1427 | cm.skb_task); | |
943c246e | 1428 | struct net_device *dev = priv->dev; |
839fcaba | 1429 | struct sk_buff *skb; |
943c246e | 1430 | unsigned long flags; |
839fcaba MT |
1431 | unsigned mtu = priv->mcast_mtu; |
1432 | ||
943c246e RD |
1433 | netif_tx_lock_bh(dev); |
1434 | spin_lock_irqsave(&priv->lock, flags); | |
1435 | ||
839fcaba | 1436 | while ((skb = skb_dequeue(&priv->cm.skb_queue))) { |
943c246e RD |
1437 | spin_unlock_irqrestore(&priv->lock, flags); |
1438 | netif_tx_unlock_bh(dev); | |
1439 | ||
839fcaba MT |
1440 | if (skb->protocol == htons(ETH_P_IP)) |
1441 | icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, htonl(mtu)); | |
d90f9b35 | 1442 | #if IS_ENABLED(CONFIG_IPV6) |
839fcaba | 1443 | else if (skb->protocol == htons(ETH_P_IPV6)) |
3ffe533c | 1444 | icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu); |
839fcaba MT |
1445 | #endif |
1446 | dev_kfree_skb_any(skb); | |
943c246e RD |
1447 | |
1448 | netif_tx_lock_bh(dev); | |
1449 | spin_lock_irqsave(&priv->lock, flags); | |
839fcaba | 1450 | } |
943c246e RD |
1451 | |
1452 | spin_unlock_irqrestore(&priv->lock, flags); | |
1453 | netif_tx_unlock_bh(dev); | |
839fcaba MT |
1454 | } |
1455 | ||
2337f809 | 1456 | void ipoib_cm_skb_too_long(struct net_device *dev, struct sk_buff *skb, |
839fcaba MT |
1457 | unsigned int mtu) |
1458 | { | |
c1048aff | 1459 | struct ipoib_dev_priv *priv = ipoib_priv(dev); |
839fcaba MT |
1460 | int e = skb_queue_empty(&priv->cm.skb_queue); |
1461 | ||
f15ca723 | 1462 | skb_dst_update_pmtu(skb, mtu); |
839fcaba MT |
1463 | |
1464 | skb_queue_tail(&priv->cm.skb_queue, skb); | |
1465 | if (e) | |
0b39578b | 1466 | queue_work(priv->wq, &priv->cm.skb_task); |
839fcaba MT |
1467 | } |
1468 | ||
518b1646 MT |
1469 | static void ipoib_cm_rx_reap(struct work_struct *work) |
1470 | { | |
efcd9971 RD |
1471 | ipoib_cm_free_rx_reap_list(container_of(work, struct ipoib_dev_priv, |
1472 | cm.rx_reap_task)->dev); | |
518b1646 MT |
1473 | } |
1474 | ||
839fcaba MT |
1475 | static void ipoib_cm_stale_task(struct work_struct *work) |
1476 | { | |
1477 | struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv, | |
1478 | cm.stale_task.work); | |
1479 | struct ipoib_cm_rx *p; | |
518b1646 | 1480 | int ret; |
839fcaba | 1481 | |
37aebbde | 1482 | spin_lock_irq(&priv->lock); |
839fcaba | 1483 | while (!list_empty(&priv->cm.passive_ids)) { |
518b1646 | 1484 | /* List is sorted by LRU, start from tail, |
839fcaba MT |
1485 | * stop when we see a recently used entry */ |
1486 | p = list_entry(priv->cm.passive_ids.prev, typeof(*p), list); | |
60a596da | 1487 | if (time_before_eq(jiffies, p->jiffies + IPOIB_CM_RX_TIMEOUT)) |
839fcaba | 1488 | break; |
518b1646 MT |
1489 | list_move(&p->list, &priv->cm.rx_error_list); |
1490 | p->state = IPOIB_CM_RX_ERROR; | |
37aebbde | 1491 | spin_unlock_irq(&priv->lock); |
518b1646 MT |
1492 | ret = ib_modify_qp(p->qp, &ipoib_cm_err_attr, IB_QP_STATE); |
1493 | if (ret) | |
1494 | ipoib_warn(priv, "unable to move qp to error state: %d\n", ret); | |
37aebbde | 1495 | spin_lock_irq(&priv->lock); |
839fcaba | 1496 | } |
7c5b9ef8 MT |
1497 | |
1498 | if (!list_empty(&priv->cm.passive_ids)) | |
0b39578b | 1499 | queue_delayed_work(priv->wq, |
7c5b9ef8 | 1500 | &priv->cm.stale_task, IPOIB_CM_RX_DELAY); |
37aebbde | 1501 | spin_unlock_irq(&priv->lock); |
839fcaba MT |
1502 | } |
1503 | ||
2337f809 | 1504 | static ssize_t show_mode(struct device *d, struct device_attribute *attr, |
839fcaba MT |
1505 | char *buf) |
1506 | { | |
c1048aff ES |
1507 | struct net_device *dev = to_net_dev(d); |
1508 | struct ipoib_dev_priv *priv = ipoib_priv(dev); | |
839fcaba MT |
1509 | |
1510 | if (test_bit(IPOIB_FLAG_ADMIN_CM, &priv->flags)) | |
1511 | return sprintf(buf, "connected\n"); | |
1512 | else | |
1513 | return sprintf(buf, "datagram\n"); | |
1514 | } | |
1515 | ||
862096a8 OG |
1516 | static ssize_t set_mode(struct device *d, struct device_attribute *attr, |
1517 | const char *buf, size_t count) | |
1518 | { | |
1519 | struct net_device *dev = to_net_dev(d); | |
1520 | int ret; | |
c1048aff | 1521 | struct ipoib_dev_priv *priv = ipoib_priv(dev); |
198b12f7 ES |
1522 | |
1523 | if (test_bit(IPOIB_FLAG_GOING_DOWN, &priv->flags)) | |
1524 | return -EPERM; | |
862096a8 | 1525 | |
69956d83 | 1526 | if (!mutex_trylock(&priv->sysfs_mutex)) |
862096a8 OG |
1527 | return restart_syscall(); |
1528 | ||
69956d83 ES |
1529 | if (!rtnl_trylock()) { |
1530 | mutex_unlock(&priv->sysfs_mutex); | |
1531 | return restart_syscall(); | |
1532 | } | |
1533 | ||
862096a8 OG |
1534 | ret = ipoib_set_mode(dev, buf); |
1535 | ||
0a0007f2 FD |
1536 | /* The assumption is that the function ipoib_set_mode returned |
1537 | * with the rtnl held by it, if not the value -EBUSY returned, | |
1538 | * then no need to rtnl_unlock | |
1539 | */ | |
1540 | if (ret != -EBUSY) | |
1541 | rtnl_unlock(); | |
69956d83 | 1542 | mutex_unlock(&priv->sysfs_mutex); |
862096a8 | 1543 | |
0a0007f2 | 1544 | return (!ret || ret == -EBUSY) ? count : ret; |
862096a8 OG |
1545 | } |
1546 | ||
551fd612 | 1547 | static DEVICE_ATTR(mode, S_IWUSR | S_IRUGO, show_mode, set_mode); |
839fcaba MT |
1548 | |
1549 | int ipoib_cm_add_mode_attr(struct net_device *dev) | |
1550 | { | |
1551 | return device_create_file(&dev->dev, &dev_attr_mode); | |
1552 | } | |
1553 | ||
586a6934 | 1554 | static void ipoib_cm_create_srq(struct net_device *dev, int max_sge) |
839fcaba | 1555 | { |
c1048aff | 1556 | struct ipoib_dev_priv *priv = ipoib_priv(dev); |
839fcaba | 1557 | struct ib_srq_init_attr srq_init_attr = { |
96104eda | 1558 | .srq_type = IB_SRQT_BASIC, |
839fcaba MT |
1559 | .attr = { |
1560 | .max_wr = ipoib_recvq_size, | |
586a6934 | 1561 | .max_sge = max_sge |
839fcaba MT |
1562 | } |
1563 | }; | |
7b3687df RD |
1564 | |
1565 | priv->cm.srq = ib_create_srq(priv->pd, &srq_init_attr); | |
1566 | if (IS_ERR(priv->cm.srq)) { | |
68e995a2 | 1567 | if (PTR_ERR(priv->cm.srq) != -ENOSYS) |
c55359a2 | 1568 | pr_warn("%s: failed to allocate SRQ, error %ld\n", |
68e995a2 | 1569 | priv->ca->name, PTR_ERR(priv->cm.srq)); |
7b3687df | 1570 | priv->cm.srq = NULL; |
68e995a2 | 1571 | return; |
7b3687df RD |
1572 | } |
1573 | ||
fad953ce KC |
1574 | priv->cm.srq_ring = vzalloc(array_size(ipoib_recvq_size, |
1575 | sizeof(*priv->cm.srq_ring))); | |
7b3687df | 1576 | if (!priv->cm.srq_ring) { |
7b3687df RD |
1577 | ib_destroy_srq(priv->cm.srq); |
1578 | priv->cm.srq = NULL; | |
b1404069 | 1579 | return; |
7b3687df | 1580 | } |
b1404069 | 1581 | |
7b3687df RD |
1582 | } |
1583 | ||
1584 | int ipoib_cm_dev_init(struct net_device *dev) | |
1585 | { | |
c1048aff | 1586 | struct ipoib_dev_priv *priv = ipoib_priv(dev); |
4a061b28 | 1587 | int max_srq_sge, i; |
839fcaba MT |
1588 | |
1589 | INIT_LIST_HEAD(&priv->cm.passive_ids); | |
1590 | INIT_LIST_HEAD(&priv->cm.reap_list); | |
1591 | INIT_LIST_HEAD(&priv->cm.start_list); | |
518b1646 MT |
1592 | INIT_LIST_HEAD(&priv->cm.rx_error_list); |
1593 | INIT_LIST_HEAD(&priv->cm.rx_flush_list); | |
1594 | INIT_LIST_HEAD(&priv->cm.rx_drain_list); | |
1595 | INIT_LIST_HEAD(&priv->cm.rx_reap_list); | |
839fcaba MT |
1596 | INIT_WORK(&priv->cm.start_task, ipoib_cm_tx_start); |
1597 | INIT_WORK(&priv->cm.reap_task, ipoib_cm_tx_reap); | |
1598 | INIT_WORK(&priv->cm.skb_task, ipoib_cm_skb_reap); | |
518b1646 | 1599 | INIT_WORK(&priv->cm.rx_reap_task, ipoib_cm_rx_reap); |
839fcaba MT |
1600 | INIT_DELAYED_WORK(&priv->cm.stale_task, ipoib_cm_stale_task); |
1601 | ||
1602 | skb_queue_head_init(&priv->cm.skb_queue); | |
1603 | ||
4a061b28 | 1604 | ipoib_dbg(priv, "max_srq_sge=%d\n", priv->ca->attrs.max_srq_sge); |
586a6934 | 1605 | |
4a061b28 OG |
1606 | max_srq_sge = min_t(int, IPOIB_CM_RX_SG, priv->ca->attrs.max_srq_sge); |
1607 | ipoib_cm_create_srq(dev, max_srq_sge); | |
586a6934 | 1608 | if (ipoib_cm_has_srq(dev)) { |
4a061b28 OG |
1609 | priv->cm.max_cm_mtu = max_srq_sge * PAGE_SIZE - 0x10; |
1610 | priv->cm.num_frags = max_srq_sge; | |
586a6934 PS |
1611 | ipoib_dbg(priv, "max_cm_mtu = 0x%x, num_frags=%d\n", |
1612 | priv->cm.max_cm_mtu, priv->cm.num_frags); | |
1613 | } else { | |
1614 | priv->cm.max_cm_mtu = IPOIB_CM_MTU; | |
1615 | priv->cm.num_frags = IPOIB_CM_RX_SG; | |
1616 | } | |
1617 | ||
a7d834c4 | 1618 | ipoib_cm_init_rx_wr(dev, &priv->cm.rx_wr, priv->cm.rx_sge); |
68e995a2 PS |
1619 | |
1620 | if (ipoib_cm_has_srq(dev)) { | |
1621 | for (i = 0; i < ipoib_recvq_size; ++i) { | |
1622 | if (!ipoib_cm_alloc_rx_skb(dev, priv->cm.srq_ring, i, | |
586a6934 | 1623 | priv->cm.num_frags - 1, |
22252b4e TA |
1624 | priv->cm.srq_ring[i].mapping, |
1625 | GFP_KERNEL)) { | |
68e995a2 PS |
1626 | ipoib_warn(priv, "failed to allocate " |
1627 | "receive buffer %d\n", i); | |
1628 | ipoib_cm_dev_cleanup(dev); | |
1629 | return -ENOMEM; | |
1630 | } | |
7b3687df | 1631 | |
68e995a2 PS |
1632 | if (ipoib_cm_post_receive_srq(dev, i)) { |
1633 | ipoib_warn(priv, "ipoib_cm_post_receive_srq " | |
1634 | "failed for buf %d\n", i); | |
1635 | ipoib_cm_dev_cleanup(dev); | |
1636 | return -EIO; | |
1637 | } | |
839fcaba MT |
1638 | } |
1639 | } | |
1640 | ||
1641 | priv->dev->dev_addr[0] = IPOIB_FLAGS_RC; | |
1642 | return 0; | |
1643 | } | |
1644 | ||
1645 | void ipoib_cm_dev_cleanup(struct net_device *dev) | |
1646 | { | |
c1048aff | 1647 | struct ipoib_dev_priv *priv = ipoib_priv(dev); |
1efb6144 | 1648 | int ret; |
839fcaba MT |
1649 | |
1650 | if (!priv->cm.srq) | |
1651 | return; | |
1652 | ||
1653 | ipoib_dbg(priv, "Cleanup ipoib connected mode.\n"); | |
1654 | ||
1655 | ret = ib_destroy_srq(priv->cm.srq); | |
1656 | if (ret) | |
1657 | ipoib_warn(priv, "ib_destroy_srq failed: %d\n", ret); | |
1658 | ||
1659 | priv->cm.srq = NULL; | |
1660 | if (!priv->cm.srq_ring) | |
1661 | return; | |
1efb6144 RD |
1662 | |
1663 | ipoib_cm_free_rx_ring(dev, priv->cm.srq_ring); | |
839fcaba MT |
1664 | priv->cm.srq_ring = NULL; |
1665 | } |