Commit | Line | Data |
---|---|---|
839fcaba MT |
1 | /* |
2 | * Copyright (c) 2006 Mellanox Technologies. All rights reserved | |
3 | * | |
4 | * This software is available to you under a choice of one of two | |
5 | * licenses. You may choose to be licensed under the terms of the GNU | |
6 | * General Public License (GPL) Version 2, available from the file | |
7 | * COPYING in the main directory of this source tree, or the | |
8 | * OpenIB.org BSD license below: | |
9 | * | |
10 | * Redistribution and use in source and binary forms, with or | |
11 | * without modification, are permitted provided that the following | |
12 | * conditions are met: | |
13 | * | |
14 | * - Redistributions of source code must retain the above | |
15 | * copyright notice, this list of conditions and the following | |
16 | * disclaimer. | |
17 | * | |
18 | * - Redistributions in binary form must reproduce the above | |
19 | * copyright notice, this list of conditions and the following | |
20 | * disclaimer in the documentation and/or other materials | |
21 | * provided with the distribution. | |
22 | * | |
23 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | |
24 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | |
25 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | |
26 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | |
27 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | |
28 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | |
29 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | |
30 | * SOFTWARE. | |
31 | * | |
32 | * $Id$ | |
33 | */ | |
34 | ||
35 | #include <rdma/ib_cm.h> | |
36 | #include <rdma/ib_cache.h> | |
37 | #include <net/dst.h> | |
38 | #include <net/icmp.h> | |
39 | #include <linux/icmpv6.h> | |
518b1646 | 40 | #include <linux/delay.h> |
839fcaba MT |
41 | |
42 | #ifdef CONFIG_INFINIBAND_IPOIB_DEBUG_DATA | |
43 | static int data_debug_level; | |
44 | ||
45 | module_param_named(cm_data_debug_level, data_debug_level, int, 0644); | |
46 | MODULE_PARM_DESC(cm_data_debug_level, | |
47 | "Enable data path debug tracing for connected mode if > 0"); | |
48 | #endif | |
49 | ||
50 | #include "ipoib.h" | |
51 | ||
52 | #define IPOIB_CM_IETF_ID 0x1000000000000000ULL | |
53 | ||
54 | #define IPOIB_CM_RX_UPDATE_TIME (256 * HZ) | |
55 | #define IPOIB_CM_RX_TIMEOUT (2 * 256 * HZ) | |
56 | #define IPOIB_CM_RX_DELAY (3 * 256 * HZ) | |
57 | #define IPOIB_CM_RX_UPDATE_MASK (0x3) | |
58 | ||
518b1646 MT |
59 | static struct ib_qp_attr ipoib_cm_err_attr = { |
60 | .qp_state = IB_QPS_ERR | |
61 | }; | |
62 | ||
63 | #define IPOIB_CM_RX_DRAIN_WRID 0x7fffffff | |
64 | ||
ec56dc0b MT |
65 | static struct ib_send_wr ipoib_cm_rx_drain_wr = { |
66 | .wr_id = IPOIB_CM_RX_DRAIN_WRID, | |
67 | .opcode = IB_WR_SEND, | |
518b1646 MT |
68 | }; |
69 | ||
839fcaba MT |
70 | static int ipoib_cm_tx_handler(struct ib_cm_id *cm_id, |
71 | struct ib_cm_event *event); | |
72 | ||
1812063b | 73 | static void ipoib_cm_dma_unmap_rx(struct ipoib_dev_priv *priv, int frags, |
839fcaba MT |
74 | u64 mapping[IPOIB_CM_RX_SG]) |
75 | { | |
76 | int i; | |
77 | ||
78 | ib_dma_unmap_single(priv->ca, mapping[0], IPOIB_CM_HEAD_SIZE, DMA_FROM_DEVICE); | |
79 | ||
1812063b | 80 | for (i = 0; i < frags; ++i) |
839fcaba MT |
81 | ib_dma_unmap_single(priv->ca, mapping[i + 1], PAGE_SIZE, DMA_FROM_DEVICE); |
82 | } | |
83 | ||
84 | static int ipoib_cm_post_receive(struct net_device *dev, int id) | |
85 | { | |
86 | struct ipoib_dev_priv *priv = netdev_priv(dev); | |
87 | struct ib_recv_wr *bad_wr; | |
88 | int i, ret; | |
89 | ||
90 | priv->cm.rx_wr.wr_id = id | IPOIB_CM_OP_SRQ; | |
91 | ||
92 | for (i = 0; i < IPOIB_CM_RX_SG; ++i) | |
93 | priv->cm.rx_sge[i].addr = priv->cm.srq_ring[id].mapping[i]; | |
94 | ||
95 | ret = ib_post_srq_recv(priv->cm.srq, &priv->cm.rx_wr, &bad_wr); | |
96 | if (unlikely(ret)) { | |
97 | ipoib_warn(priv, "post srq failed for buf %d (%d)\n", id, ret); | |
1812063b MT |
98 | ipoib_cm_dma_unmap_rx(priv, IPOIB_CM_RX_SG - 1, |
99 | priv->cm.srq_ring[id].mapping); | |
839fcaba MT |
100 | dev_kfree_skb_any(priv->cm.srq_ring[id].skb); |
101 | priv->cm.srq_ring[id].skb = NULL; | |
102 | } | |
103 | ||
104 | return ret; | |
105 | } | |
106 | ||
1812063b MT |
107 | static struct sk_buff *ipoib_cm_alloc_rx_skb(struct net_device *dev, int id, int frags, |
108 | u64 mapping[IPOIB_CM_RX_SG]) | |
839fcaba MT |
109 | { |
110 | struct ipoib_dev_priv *priv = netdev_priv(dev); | |
111 | struct sk_buff *skb; | |
112 | int i; | |
113 | ||
114 | skb = dev_alloc_skb(IPOIB_CM_HEAD_SIZE + 12); | |
115 | if (unlikely(!skb)) | |
1812063b | 116 | return NULL; |
839fcaba MT |
117 | |
118 | /* | |
119 | * IPoIB adds a 4 byte header. So we need 12 more bytes to align the | |
120 | * IP header to a multiple of 16. | |
121 | */ | |
122 | skb_reserve(skb, 12); | |
123 | ||
124 | mapping[0] = ib_dma_map_single(priv->ca, skb->data, IPOIB_CM_HEAD_SIZE, | |
125 | DMA_FROM_DEVICE); | |
126 | if (unlikely(ib_dma_mapping_error(priv->ca, mapping[0]))) { | |
127 | dev_kfree_skb_any(skb); | |
1812063b | 128 | return NULL; |
839fcaba MT |
129 | } |
130 | ||
1812063b | 131 | for (i = 0; i < frags; i++) { |
839fcaba MT |
132 | struct page *page = alloc_page(GFP_ATOMIC); |
133 | ||
134 | if (!page) | |
135 | goto partial_error; | |
136 | skb_fill_page_desc(skb, i, page, 0, PAGE_SIZE); | |
137 | ||
138 | mapping[i + 1] = ib_dma_map_page(priv->ca, skb_shinfo(skb)->frags[i].page, | |
6371ea3d | 139 | 0, PAGE_SIZE, DMA_FROM_DEVICE); |
839fcaba MT |
140 | if (unlikely(ib_dma_mapping_error(priv->ca, mapping[i + 1]))) |
141 | goto partial_error; | |
142 | } | |
143 | ||
144 | priv->cm.srq_ring[id].skb = skb; | |
1812063b | 145 | return skb; |
839fcaba MT |
146 | |
147 | partial_error: | |
148 | ||
149 | ib_dma_unmap_single(priv->ca, mapping[0], IPOIB_CM_HEAD_SIZE, DMA_FROM_DEVICE); | |
150 | ||
841adfca RC |
151 | for (; i > 0; --i) |
152 | ib_dma_unmap_single(priv->ca, mapping[i], PAGE_SIZE, DMA_FROM_DEVICE); | |
839fcaba | 153 | |
8a2e65f8 | 154 | dev_kfree_skb_any(skb); |
1812063b | 155 | return NULL; |
839fcaba MT |
156 | } |
157 | ||
518b1646 MT |
158 | static void ipoib_cm_start_rx_drain(struct ipoib_dev_priv* priv) |
159 | { | |
ec56dc0b MT |
160 | struct ib_send_wr *bad_wr; |
161 | struct ipoib_cm_rx *p; | |
518b1646 | 162 | |
ec56dc0b | 163 | /* We only reserved 1 extra slot in CQ for drain WRs, so |
518b1646 MT |
164 | * make sure we have at most 1 outstanding WR. */ |
165 | if (list_empty(&priv->cm.rx_flush_list) || | |
166 | !list_empty(&priv->cm.rx_drain_list)) | |
167 | return; | |
168 | ||
ec56dc0b MT |
169 | /* |
170 | * QPs on flush list are error state. This way, a "flush | |
171 | * error" WC will be immediately generated for each WR we post. | |
172 | */ | |
173 | p = list_entry(priv->cm.rx_flush_list.next, typeof(*p), list); | |
174 | if (ib_post_send(p->qp, &ipoib_cm_rx_drain_wr, &bad_wr)) | |
175 | ipoib_warn(priv, "failed to post drain wr\n"); | |
518b1646 MT |
176 | |
177 | list_splice_init(&priv->cm.rx_flush_list, &priv->cm.rx_drain_list); | |
178 | } | |
179 | ||
180 | static void ipoib_cm_rx_event_handler(struct ib_event *event, void *ctx) | |
181 | { | |
182 | struct ipoib_cm_rx *p = ctx; | |
183 | struct ipoib_dev_priv *priv = netdev_priv(p->dev); | |
184 | unsigned long flags; | |
185 | ||
186 | if (event->event != IB_EVENT_QP_LAST_WQE_REACHED) | |
187 | return; | |
188 | ||
189 | spin_lock_irqsave(&priv->lock, flags); | |
190 | list_move(&p->list, &priv->cm.rx_flush_list); | |
191 | p->state = IPOIB_CM_RX_FLUSH; | |
192 | ipoib_cm_start_rx_drain(priv); | |
193 | spin_unlock_irqrestore(&priv->lock, flags); | |
194 | } | |
195 | ||
839fcaba MT |
196 | static struct ib_qp *ipoib_cm_create_rx_qp(struct net_device *dev, |
197 | struct ipoib_cm_rx *p) | |
198 | { | |
199 | struct ipoib_dev_priv *priv = netdev_priv(dev); | |
200 | struct ib_qp_init_attr attr = { | |
518b1646 | 201 | .event_handler = ipoib_cm_rx_event_handler, |
ec56dc0b | 202 | .send_cq = priv->cq, /* For drain WR */ |
839fcaba MT |
203 | .recv_cq = priv->cq, |
204 | .srq = priv->cm.srq, | |
ec56dc0b | 205 | .cap.max_send_wr = 1, /* For drain WR */ |
839fcaba MT |
206 | .cap.max_send_sge = 1, /* FIXME: 0 Seems not to work */ |
207 | .sq_sig_type = IB_SIGNAL_ALL_WR, | |
208 | .qp_type = IB_QPT_RC, | |
209 | .qp_context = p, | |
210 | }; | |
211 | return ib_create_qp(priv->pd, &attr); | |
212 | } | |
213 | ||
214 | static int ipoib_cm_modify_rx_qp(struct net_device *dev, | |
215 | struct ib_cm_id *cm_id, struct ib_qp *qp, | |
216 | unsigned psn) | |
217 | { | |
218 | struct ipoib_dev_priv *priv = netdev_priv(dev); | |
219 | struct ib_qp_attr qp_attr; | |
220 | int qp_attr_mask, ret; | |
221 | ||
222 | qp_attr.qp_state = IB_QPS_INIT; | |
223 | ret = ib_cm_init_qp_attr(cm_id, &qp_attr, &qp_attr_mask); | |
224 | if (ret) { | |
225 | ipoib_warn(priv, "failed to init QP attr for INIT: %d\n", ret); | |
226 | return ret; | |
227 | } | |
228 | ret = ib_modify_qp(qp, &qp_attr, qp_attr_mask); | |
229 | if (ret) { | |
230 | ipoib_warn(priv, "failed to modify QP to INIT: %d\n", ret); | |
231 | return ret; | |
232 | } | |
233 | qp_attr.qp_state = IB_QPS_RTR; | |
234 | ret = ib_cm_init_qp_attr(cm_id, &qp_attr, &qp_attr_mask); | |
235 | if (ret) { | |
236 | ipoib_warn(priv, "failed to init QP attr for RTR: %d\n", ret); | |
237 | return ret; | |
238 | } | |
239 | qp_attr.rq_psn = psn; | |
240 | ret = ib_modify_qp(qp, &qp_attr, qp_attr_mask); | |
241 | if (ret) { | |
242 | ipoib_warn(priv, "failed to modify QP to RTR: %d\n", ret); | |
243 | return ret; | |
244 | } | |
ec56dc0b MT |
245 | |
246 | /* | |
247 | * Current Mellanox HCA firmware won't generate completions | |
248 | * with error for drain WRs unless the QP has been moved to | |
249 | * RTS first. This work-around leaves a window where a QP has | |
250 | * moved to error asynchronously, but this will eventually get | |
251 | * fixed in firmware, so let's not error out if modify QP | |
252 | * fails. | |
253 | */ | |
254 | qp_attr.qp_state = IB_QPS_RTS; | |
255 | ret = ib_cm_init_qp_attr(cm_id, &qp_attr, &qp_attr_mask); | |
256 | if (ret) { | |
257 | ipoib_warn(priv, "failed to init QP attr for RTS: %d\n", ret); | |
258 | return 0; | |
259 | } | |
260 | ret = ib_modify_qp(qp, &qp_attr, qp_attr_mask); | |
261 | if (ret) { | |
262 | ipoib_warn(priv, "failed to modify QP to RTS: %d\n", ret); | |
263 | return 0; | |
264 | } | |
265 | ||
839fcaba MT |
266 | return 0; |
267 | } | |
268 | ||
269 | static int ipoib_cm_send_rep(struct net_device *dev, struct ib_cm_id *cm_id, | |
270 | struct ib_qp *qp, struct ib_cm_req_event_param *req, | |
271 | unsigned psn) | |
272 | { | |
273 | struct ipoib_dev_priv *priv = netdev_priv(dev); | |
274 | struct ipoib_cm_data data = {}; | |
275 | struct ib_cm_rep_param rep = {}; | |
276 | ||
277 | data.qpn = cpu_to_be32(priv->qp->qp_num); | |
278 | data.mtu = cpu_to_be32(IPOIB_CM_BUF_SIZE); | |
279 | ||
280 | rep.private_data = &data; | |
281 | rep.private_data_len = sizeof data; | |
282 | rep.flow_control = 0; | |
283 | rep.rnr_retry_count = req->rnr_retry_count; | |
284 | rep.target_ack_delay = 20; /* FIXME */ | |
285 | rep.srq = 1; | |
286 | rep.qp_num = qp->qp_num; | |
287 | rep.starting_psn = psn; | |
288 | return ib_send_cm_rep(cm_id, &rep); | |
289 | } | |
290 | ||
291 | static int ipoib_cm_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event) | |
292 | { | |
293 | struct net_device *dev = cm_id->context; | |
294 | struct ipoib_dev_priv *priv = netdev_priv(dev); | |
295 | struct ipoib_cm_rx *p; | |
839fcaba MT |
296 | unsigned psn; |
297 | int ret; | |
298 | ||
299 | ipoib_dbg(priv, "REQ arrived\n"); | |
300 | p = kzalloc(sizeof *p, GFP_KERNEL); | |
301 | if (!p) | |
302 | return -ENOMEM; | |
303 | p->dev = dev; | |
304 | p->id = cm_id; | |
3ec7393a MT |
305 | cm_id->context = p; |
306 | p->state = IPOIB_CM_RX_LIVE; | |
307 | p->jiffies = jiffies; | |
308 | INIT_LIST_HEAD(&p->list); | |
309 | ||
839fcaba MT |
310 | p->qp = ipoib_cm_create_rx_qp(dev, p); |
311 | if (IS_ERR(p->qp)) { | |
312 | ret = PTR_ERR(p->qp); | |
313 | goto err_qp; | |
314 | } | |
315 | ||
316 | psn = random32() & 0xffffff; | |
317 | ret = ipoib_cm_modify_rx_qp(dev, cm_id, p->qp, psn); | |
318 | if (ret) | |
319 | goto err_modify; | |
320 | ||
3ec7393a MT |
321 | spin_lock_irq(&priv->lock); |
322 | queue_delayed_work(ipoib_workqueue, | |
323 | &priv->cm.stale_task, IPOIB_CM_RX_DELAY); | |
324 | /* Add this entry to passive ids list head, but do not re-add it | |
325 | * if IB_EVENT_QP_LAST_WQE_REACHED has moved it to flush list. */ | |
326 | p->jiffies = jiffies; | |
327 | if (p->state == IPOIB_CM_RX_LIVE) | |
328 | list_move(&p->list, &priv->cm.passive_ids); | |
329 | spin_unlock_irq(&priv->lock); | |
330 | ||
839fcaba MT |
331 | ret = ipoib_cm_send_rep(dev, cm_id, p->qp, &event->param.req_rcvd, psn); |
332 | if (ret) { | |
333 | ipoib_warn(priv, "failed to send REP: %d\n", ret); | |
3ec7393a MT |
334 | if (ib_modify_qp(p->qp, &ipoib_cm_err_attr, IB_QP_STATE)) |
335 | ipoib_warn(priv, "unable to move qp to error state\n"); | |
839fcaba | 336 | } |
839fcaba MT |
337 | return 0; |
338 | ||
839fcaba MT |
339 | err_modify: |
340 | ib_destroy_qp(p->qp); | |
341 | err_qp: | |
342 | kfree(p); | |
343 | return ret; | |
344 | } | |
345 | ||
346 | static int ipoib_cm_rx_handler(struct ib_cm_id *cm_id, | |
347 | struct ib_cm_event *event) | |
348 | { | |
349 | struct ipoib_cm_rx *p; | |
350 | struct ipoib_dev_priv *priv; | |
839fcaba MT |
351 | |
352 | switch (event->event) { | |
353 | case IB_CM_REQ_RECEIVED: | |
354 | return ipoib_cm_req_handler(cm_id, event); | |
355 | case IB_CM_DREQ_RECEIVED: | |
356 | p = cm_id->context; | |
357 | ib_send_cm_drep(cm_id, NULL, 0); | |
358 | /* Fall through */ | |
359 | case IB_CM_REJ_RECEIVED: | |
360 | p = cm_id->context; | |
361 | priv = netdev_priv(p->dev); | |
518b1646 MT |
362 | if (ib_modify_qp(p->qp, &ipoib_cm_err_attr, IB_QP_STATE)) |
363 | ipoib_warn(priv, "unable to move qp to error state\n"); | |
364 | /* Fall through */ | |
839fcaba MT |
365 | default: |
366 | return 0; | |
367 | } | |
368 | } | |
369 | /* Adjust length of skb with fragments to match received data */ | |
370 | static void skb_put_frags(struct sk_buff *skb, unsigned int hdr_space, | |
1812063b | 371 | unsigned int length, struct sk_buff *toskb) |
839fcaba MT |
372 | { |
373 | int i, num_frags; | |
374 | unsigned int size; | |
375 | ||
376 | /* put header into skb */ | |
377 | size = min(length, hdr_space); | |
378 | skb->tail += size; | |
379 | skb->len += size; | |
380 | length -= size; | |
381 | ||
382 | num_frags = skb_shinfo(skb)->nr_frags; | |
383 | for (i = 0; i < num_frags; i++) { | |
384 | skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; | |
385 | ||
386 | if (length == 0) { | |
387 | /* don't need this page */ | |
1812063b | 388 | skb_fill_page_desc(toskb, i, frag->page, 0, PAGE_SIZE); |
839fcaba MT |
389 | --skb_shinfo(skb)->nr_frags; |
390 | } else { | |
391 | size = min(length, (unsigned) PAGE_SIZE); | |
392 | ||
393 | frag->size = size; | |
394 | skb->data_len += size; | |
395 | skb->truesize += size; | |
396 | skb->len += size; | |
397 | length -= size; | |
398 | } | |
399 | } | |
400 | } | |
401 | ||
402 | void ipoib_cm_handle_rx_wc(struct net_device *dev, struct ib_wc *wc) | |
403 | { | |
404 | struct ipoib_dev_priv *priv = netdev_priv(dev); | |
405 | unsigned int wr_id = wc->wr_id & ~IPOIB_CM_OP_SRQ; | |
1812063b | 406 | struct sk_buff *skb, *newskb; |
839fcaba MT |
407 | struct ipoib_cm_rx *p; |
408 | unsigned long flags; | |
409 | u64 mapping[IPOIB_CM_RX_SG]; | |
1812063b | 410 | int frags; |
839fcaba | 411 | |
a89875fc RD |
412 | ipoib_dbg_data(priv, "cm recv completion: id %d, status: %d\n", |
413 | wr_id, wc->status); | |
839fcaba MT |
414 | |
415 | if (unlikely(wr_id >= ipoib_recvq_size)) { | |
518b1646 MT |
416 | if (wr_id == (IPOIB_CM_RX_DRAIN_WRID & ~IPOIB_CM_OP_SRQ)) { |
417 | spin_lock_irqsave(&priv->lock, flags); | |
418 | list_splice_init(&priv->cm.rx_drain_list, &priv->cm.rx_reap_list); | |
419 | ipoib_cm_start_rx_drain(priv); | |
420 | queue_work(ipoib_workqueue, &priv->cm.rx_reap_task); | |
421 | spin_unlock_irqrestore(&priv->lock, flags); | |
422 | } else | |
423 | ipoib_warn(priv, "cm recv completion event with wrid %d (> %d)\n", | |
424 | wr_id, ipoib_recvq_size); | |
839fcaba MT |
425 | return; |
426 | } | |
427 | ||
428 | skb = priv->cm.srq_ring[wr_id].skb; | |
429 | ||
430 | if (unlikely(wc->status != IB_WC_SUCCESS)) { | |
431 | ipoib_dbg(priv, "cm recv error " | |
432 | "(status=%d, wrid=%d vend_err %x)\n", | |
433 | wc->status, wr_id, wc->vendor_err); | |
434 | ++priv->stats.rx_dropped; | |
435 | goto repost; | |
436 | } | |
437 | ||
438 | if (!likely(wr_id & IPOIB_CM_RX_UPDATE_MASK)) { | |
439 | p = wc->qp->qp_context; | |
d6ef7d68 | 440 | if (p && time_after_eq(jiffies, p->jiffies + IPOIB_CM_RX_UPDATE_TIME)) { |
839fcaba MT |
441 | spin_lock_irqsave(&priv->lock, flags); |
442 | p->jiffies = jiffies; | |
518b1646 MT |
443 | /* Move this entry to list head, but do not re-add it |
444 | * if it has been moved out of list. */ | |
445 | if (p->state == IPOIB_CM_RX_LIVE) | |
839fcaba MT |
446 | list_move(&p->list, &priv->cm.passive_ids); |
447 | spin_unlock_irqrestore(&priv->lock, flags); | |
839fcaba MT |
448 | } |
449 | } | |
450 | ||
1812063b MT |
451 | frags = PAGE_ALIGN(wc->byte_len - min(wc->byte_len, |
452 | (unsigned)IPOIB_CM_HEAD_SIZE)) / PAGE_SIZE; | |
453 | ||
454 | newskb = ipoib_cm_alloc_rx_skb(dev, wr_id, frags, mapping); | |
455 | if (unlikely(!newskb)) { | |
839fcaba MT |
456 | /* |
457 | * If we can't allocate a new RX buffer, dump | |
458 | * this packet and reuse the old buffer. | |
459 | */ | |
460 | ipoib_dbg(priv, "failed to allocate receive buffer %d\n", wr_id); | |
461 | ++priv->stats.rx_dropped; | |
462 | goto repost; | |
463 | } | |
464 | ||
1812063b MT |
465 | ipoib_cm_dma_unmap_rx(priv, frags, priv->cm.srq_ring[wr_id].mapping); |
466 | memcpy(priv->cm.srq_ring[wr_id].mapping, mapping, (frags + 1) * sizeof *mapping); | |
839fcaba MT |
467 | |
468 | ipoib_dbg_data(priv, "received %d bytes, SLID 0x%04x\n", | |
469 | wc->byte_len, wc->slid); | |
470 | ||
1812063b | 471 | skb_put_frags(skb, IPOIB_CM_HEAD_SIZE, wc->byte_len, newskb); |
839fcaba MT |
472 | |
473 | skb->protocol = ((struct ipoib_header *) skb->data)->proto; | |
459a98ed | 474 | skb_reset_mac_header(skb); |
839fcaba MT |
475 | skb_pull(skb, IPOIB_ENCAP_LEN); |
476 | ||
477 | dev->last_rx = jiffies; | |
478 | ++priv->stats.rx_packets; | |
479 | priv->stats.rx_bytes += skb->len; | |
480 | ||
481 | skb->dev = dev; | |
482 | /* XXX get correct PACKET_ type here */ | |
483 | skb->pkt_type = PACKET_HOST; | |
8d1cc86a | 484 | netif_receive_skb(skb); |
839fcaba MT |
485 | |
486 | repost: | |
487 | if (unlikely(ipoib_cm_post_receive(dev, wr_id))) | |
488 | ipoib_warn(priv, "ipoib_cm_post_receive failed " | |
489 | "for buf %d\n", wr_id); | |
490 | } | |
491 | ||
492 | static inline int post_send(struct ipoib_dev_priv *priv, | |
493 | struct ipoib_cm_tx *tx, | |
494 | unsigned int wr_id, | |
495 | u64 addr, int len) | |
496 | { | |
497 | struct ib_send_wr *bad_wr; | |
498 | ||
499 | priv->tx_sge.addr = addr; | |
500 | priv->tx_sge.length = len; | |
501 | ||
502 | priv->tx_wr.wr_id = wr_id; | |
503 | ||
504 | return ib_post_send(tx->qp, &priv->tx_wr, &bad_wr); | |
505 | } | |
506 | ||
507 | void ipoib_cm_send(struct net_device *dev, struct sk_buff *skb, struct ipoib_cm_tx *tx) | |
508 | { | |
509 | struct ipoib_dev_priv *priv = netdev_priv(dev); | |
510 | struct ipoib_tx_buf *tx_req; | |
511 | u64 addr; | |
512 | ||
513 | if (unlikely(skb->len > tx->mtu)) { | |
514 | ipoib_warn(priv, "packet len %d (> %d) too long to send, dropping\n", | |
515 | skb->len, tx->mtu); | |
516 | ++priv->stats.tx_dropped; | |
517 | ++priv->stats.tx_errors; | |
77d8e1ef | 518 | ipoib_cm_skb_too_long(dev, skb, tx->mtu - IPOIB_ENCAP_LEN); |
839fcaba MT |
519 | return; |
520 | } | |
521 | ||
522 | ipoib_dbg_data(priv, "sending packet: head 0x%x length %d connection 0x%x\n", | |
523 | tx->tx_head, skb->len, tx->qp->qp_num); | |
524 | ||
525 | /* | |
526 | * We put the skb into the tx_ring _before_ we call post_send() | |
527 | * because it's entirely possible that the completion handler will | |
528 | * run before we execute anything after the post_send(). That | |
529 | * means we have to make sure everything is properly recorded and | |
530 | * our state is consistent before we call post_send(). | |
531 | */ | |
532 | tx_req = &tx->tx_ring[tx->tx_head & (ipoib_sendq_size - 1)]; | |
533 | tx_req->skb = skb; | |
534 | addr = ib_dma_map_single(priv->ca, skb->data, skb->len, DMA_TO_DEVICE); | |
535 | if (unlikely(ib_dma_mapping_error(priv->ca, addr))) { | |
536 | ++priv->stats.tx_errors; | |
537 | dev_kfree_skb_any(skb); | |
538 | return; | |
539 | } | |
540 | ||
541 | tx_req->mapping = addr; | |
542 | ||
543 | if (unlikely(post_send(priv, tx, tx->tx_head & (ipoib_sendq_size - 1), | |
544 | addr, skb->len))) { | |
545 | ipoib_warn(priv, "post_send failed\n"); | |
546 | ++priv->stats.tx_errors; | |
547 | ib_dma_unmap_single(priv->ca, addr, skb->len, DMA_TO_DEVICE); | |
548 | dev_kfree_skb_any(skb); | |
549 | } else { | |
550 | dev->trans_start = jiffies; | |
551 | ++tx->tx_head; | |
552 | ||
553 | if (tx->tx_head - tx->tx_tail == ipoib_sendq_size) { | |
554 | ipoib_dbg(priv, "TX ring 0x%x full, stopping kernel net queue\n", | |
555 | tx->qp->qp_num); | |
556 | netif_stop_queue(dev); | |
557 | set_bit(IPOIB_FLAG_NETIF_STOPPED, &tx->flags); | |
558 | } | |
559 | } | |
560 | } | |
561 | ||
562 | static void ipoib_cm_handle_tx_wc(struct net_device *dev, struct ipoib_cm_tx *tx, | |
563 | struct ib_wc *wc) | |
564 | { | |
565 | struct ipoib_dev_priv *priv = netdev_priv(dev); | |
566 | unsigned int wr_id = wc->wr_id; | |
567 | struct ipoib_tx_buf *tx_req; | |
568 | unsigned long flags; | |
569 | ||
a89875fc RD |
570 | ipoib_dbg_data(priv, "cm send completion: id %d, status: %d\n", |
571 | wr_id, wc->status); | |
839fcaba MT |
572 | |
573 | if (unlikely(wr_id >= ipoib_sendq_size)) { | |
574 | ipoib_warn(priv, "cm send completion event with wrid %d (> %d)\n", | |
575 | wr_id, ipoib_sendq_size); | |
576 | return; | |
577 | } | |
578 | ||
579 | tx_req = &tx->tx_ring[wr_id]; | |
580 | ||
581 | ib_dma_unmap_single(priv->ca, tx_req->mapping, tx_req->skb->len, DMA_TO_DEVICE); | |
582 | ||
583 | /* FIXME: is this right? Shouldn't we only increment on success? */ | |
584 | ++priv->stats.tx_packets; | |
585 | priv->stats.tx_bytes += tx_req->skb->len; | |
586 | ||
587 | dev_kfree_skb_any(tx_req->skb); | |
588 | ||
589 | spin_lock_irqsave(&priv->tx_lock, flags); | |
590 | ++tx->tx_tail; | |
591 | if (unlikely(test_bit(IPOIB_FLAG_NETIF_STOPPED, &tx->flags)) && | |
592 | tx->tx_head - tx->tx_tail <= ipoib_sendq_size >> 1) { | |
593 | clear_bit(IPOIB_FLAG_NETIF_STOPPED, &tx->flags); | |
594 | netif_wake_queue(dev); | |
595 | } | |
596 | ||
597 | if (wc->status != IB_WC_SUCCESS && | |
598 | wc->status != IB_WC_WR_FLUSH_ERR) { | |
599 | struct ipoib_neigh *neigh; | |
600 | ||
601 | ipoib_dbg(priv, "failed cm send event " | |
602 | "(status=%d, wrid=%d vend_err %x)\n", | |
603 | wc->status, wr_id, wc->vendor_err); | |
604 | ||
605 | spin_lock(&priv->lock); | |
606 | neigh = tx->neigh; | |
607 | ||
608 | if (neigh) { | |
609 | neigh->cm = NULL; | |
610 | list_del(&neigh->list); | |
611 | if (neigh->ah) | |
612 | ipoib_put_ah(neigh->ah); | |
613 | ipoib_neigh_free(dev, neigh); | |
614 | ||
615 | tx->neigh = NULL; | |
616 | } | |
617 | ||
618 | /* queue would be re-started anyway when TX is destroyed, | |
619 | * but it makes sense to do it ASAP here. */ | |
620 | if (test_and_clear_bit(IPOIB_FLAG_NETIF_STOPPED, &tx->flags)) | |
621 | netif_wake_queue(dev); | |
622 | ||
623 | if (test_and_clear_bit(IPOIB_FLAG_INITIALIZED, &tx->flags)) { | |
624 | list_move(&tx->list, &priv->cm.reap_list); | |
625 | queue_work(ipoib_workqueue, &priv->cm.reap_task); | |
626 | } | |
627 | ||
628 | clear_bit(IPOIB_FLAG_OPER_UP, &tx->flags); | |
629 | ||
630 | spin_unlock(&priv->lock); | |
631 | } | |
632 | ||
633 | spin_unlock_irqrestore(&priv->tx_lock, flags); | |
634 | } | |
635 | ||
636 | static void ipoib_cm_tx_completion(struct ib_cq *cq, void *tx_ptr) | |
637 | { | |
638 | struct ipoib_cm_tx *tx = tx_ptr; | |
639 | int n, i; | |
640 | ||
641 | ib_req_notify_cq(cq, IB_CQ_NEXT_COMP); | |
642 | do { | |
643 | n = ib_poll_cq(cq, IPOIB_NUM_WC, tx->ibwc); | |
644 | for (i = 0; i < n; ++i) | |
645 | ipoib_cm_handle_tx_wc(tx->dev, tx, tx->ibwc + i); | |
646 | } while (n == IPOIB_NUM_WC); | |
647 | } | |
648 | ||
649 | int ipoib_cm_dev_open(struct net_device *dev) | |
650 | { | |
651 | struct ipoib_dev_priv *priv = netdev_priv(dev); | |
652 | int ret; | |
653 | ||
654 | if (!IPOIB_CM_SUPPORTED(dev->dev_addr)) | |
655 | return 0; | |
656 | ||
657 | priv->cm.id = ib_create_cm_id(priv->ca, ipoib_cm_rx_handler, dev); | |
658 | if (IS_ERR(priv->cm.id)) { | |
659 | printk(KERN_WARNING "%s: failed to create CM ID\n", priv->ca->name); | |
347fcfbe | 660 | ret = PTR_ERR(priv->cm.id); |
518b1646 | 661 | goto err_cm; |
839fcaba MT |
662 | } |
663 | ||
664 | ret = ib_cm_listen(priv->cm.id, cpu_to_be64(IPOIB_CM_IETF_ID | priv->qp->qp_num), | |
665 | 0, NULL); | |
666 | if (ret) { | |
667 | printk(KERN_WARNING "%s: failed to listen on ID 0x%llx\n", priv->ca->name, | |
668 | IPOIB_CM_IETF_ID | priv->qp->qp_num); | |
518b1646 | 669 | goto err_listen; |
839fcaba | 670 | } |
518b1646 | 671 | |
839fcaba | 672 | return 0; |
518b1646 MT |
673 | |
674 | err_listen: | |
675 | ib_destroy_cm_id(priv->cm.id); | |
676 | err_cm: | |
677 | priv->cm.id = NULL; | |
518b1646 | 678 | return ret; |
839fcaba MT |
679 | } |
680 | ||
681 | void ipoib_cm_dev_stop(struct net_device *dev) | |
682 | { | |
683 | struct ipoib_dev_priv *priv = netdev_priv(dev); | |
518b1646 MT |
684 | struct ipoib_cm_rx *p, *n; |
685 | unsigned long begin; | |
686 | LIST_HEAD(list); | |
687 | int ret; | |
839fcaba | 688 | |
347fcfbe | 689 | if (!IPOIB_CM_SUPPORTED(dev->dev_addr) || !priv->cm.id) |
839fcaba MT |
690 | return; |
691 | ||
692 | ib_destroy_cm_id(priv->cm.id); | |
347fcfbe | 693 | priv->cm.id = NULL; |
518b1646 | 694 | |
37aebbde | 695 | spin_lock_irq(&priv->lock); |
839fcaba MT |
696 | while (!list_empty(&priv->cm.passive_ids)) { |
697 | p = list_entry(priv->cm.passive_ids.next, typeof(*p), list); | |
518b1646 MT |
698 | list_move(&p->list, &priv->cm.rx_error_list); |
699 | p->state = IPOIB_CM_RX_ERROR; | |
37aebbde | 700 | spin_unlock_irq(&priv->lock); |
518b1646 MT |
701 | ret = ib_modify_qp(p->qp, &ipoib_cm_err_attr, IB_QP_STATE); |
702 | if (ret) | |
703 | ipoib_warn(priv, "unable to move qp to error state: %d\n", ret); | |
704 | spin_lock_irq(&priv->lock); | |
705 | } | |
706 | ||
707 | /* Wait for all RX to be drained */ | |
708 | begin = jiffies; | |
709 | ||
710 | while (!list_empty(&priv->cm.rx_error_list) || | |
711 | !list_empty(&priv->cm.rx_flush_list) || | |
712 | !list_empty(&priv->cm.rx_drain_list)) { | |
8fd357a6 | 713 | if (time_after(jiffies, begin + 5 * HZ)) { |
518b1646 MT |
714 | ipoib_warn(priv, "RX drain timing out\n"); |
715 | ||
716 | /* | |
717 | * assume the HW is wedged and just free up everything. | |
718 | */ | |
719 | list_splice_init(&priv->cm.rx_flush_list, &list); | |
720 | list_splice_init(&priv->cm.rx_error_list, &list); | |
721 | list_splice_init(&priv->cm.rx_drain_list, &list); | |
722 | break; | |
723 | } | |
724 | spin_unlock_irq(&priv->lock); | |
725 | msleep(1); | |
2dfbfc37 | 726 | ipoib_drain_cq(dev); |
518b1646 MT |
727 | spin_lock_irq(&priv->lock); |
728 | } | |
729 | ||
730 | list_splice_init(&priv->cm.rx_reap_list, &list); | |
731 | ||
732 | spin_unlock_irq(&priv->lock); | |
733 | ||
734 | list_for_each_entry_safe(p, n, &list, list) { | |
839fcaba MT |
735 | ib_destroy_cm_id(p->id); |
736 | ib_destroy_qp(p->qp); | |
737 | kfree(p); | |
839fcaba | 738 | } |
839fcaba MT |
739 | |
740 | cancel_delayed_work(&priv->cm.stale_task); | |
741 | } | |
742 | ||
743 | static int ipoib_cm_rep_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event) | |
744 | { | |
745 | struct ipoib_cm_tx *p = cm_id->context; | |
746 | struct ipoib_dev_priv *priv = netdev_priv(p->dev); | |
747 | struct ipoib_cm_data *data = event->private_data; | |
748 | struct sk_buff_head skqueue; | |
749 | struct ib_qp_attr qp_attr; | |
750 | int qp_attr_mask, ret; | |
751 | struct sk_buff *skb; | |
839fcaba MT |
752 | |
753 | p->mtu = be32_to_cpu(data->mtu); | |
754 | ||
82c3aca6 MT |
755 | if (p->mtu <= IPOIB_ENCAP_LEN) { |
756 | ipoib_warn(priv, "Rejecting connection: mtu %d <= %d\n", | |
757 | p->mtu, IPOIB_ENCAP_LEN); | |
839fcaba MT |
758 | return -EINVAL; |
759 | } | |
760 | ||
761 | qp_attr.qp_state = IB_QPS_RTR; | |
762 | ret = ib_cm_init_qp_attr(cm_id, &qp_attr, &qp_attr_mask); | |
763 | if (ret) { | |
764 | ipoib_warn(priv, "failed to init QP attr for RTR: %d\n", ret); | |
765 | return ret; | |
766 | } | |
767 | ||
768 | qp_attr.rq_psn = 0 /* FIXME */; | |
769 | ret = ib_modify_qp(p->qp, &qp_attr, qp_attr_mask); | |
770 | if (ret) { | |
771 | ipoib_warn(priv, "failed to modify QP to RTR: %d\n", ret); | |
772 | return ret; | |
773 | } | |
774 | ||
775 | qp_attr.qp_state = IB_QPS_RTS; | |
776 | ret = ib_cm_init_qp_attr(cm_id, &qp_attr, &qp_attr_mask); | |
777 | if (ret) { | |
778 | ipoib_warn(priv, "failed to init QP attr for RTS: %d\n", ret); | |
779 | return ret; | |
780 | } | |
781 | ret = ib_modify_qp(p->qp, &qp_attr, qp_attr_mask); | |
782 | if (ret) { | |
783 | ipoib_warn(priv, "failed to modify QP to RTS: %d\n", ret); | |
784 | return ret; | |
785 | } | |
786 | ||
787 | skb_queue_head_init(&skqueue); | |
788 | ||
37aebbde | 789 | spin_lock_irq(&priv->lock); |
839fcaba MT |
790 | set_bit(IPOIB_FLAG_OPER_UP, &p->flags); |
791 | if (p->neigh) | |
792 | while ((skb = __skb_dequeue(&p->neigh->queue))) | |
793 | __skb_queue_tail(&skqueue, skb); | |
37aebbde | 794 | spin_unlock_irq(&priv->lock); |
839fcaba MT |
795 | |
796 | while ((skb = __skb_dequeue(&skqueue))) { | |
797 | skb->dev = p->dev; | |
798 | if (dev_queue_xmit(skb)) | |
799 | ipoib_warn(priv, "dev_queue_xmit failed " | |
800 | "to requeue packet\n"); | |
801 | } | |
802 | ||
803 | ret = ib_send_cm_rtu(cm_id, NULL, 0); | |
804 | if (ret) { | |
805 | ipoib_warn(priv, "failed to send RTU: %d\n", ret); | |
806 | return ret; | |
807 | } | |
808 | return 0; | |
809 | } | |
810 | ||
811 | static struct ib_qp *ipoib_cm_create_tx_qp(struct net_device *dev, struct ib_cq *cq) | |
812 | { | |
813 | struct ipoib_dev_priv *priv = netdev_priv(dev); | |
814 | struct ib_qp_init_attr attr = {}; | |
815 | attr.recv_cq = priv->cq; | |
816 | attr.srq = priv->cm.srq; | |
817 | attr.cap.max_send_wr = ipoib_sendq_size; | |
818 | attr.cap.max_send_sge = 1; | |
819 | attr.sq_sig_type = IB_SIGNAL_ALL_WR; | |
820 | attr.qp_type = IB_QPT_RC; | |
821 | attr.send_cq = cq; | |
822 | return ib_create_qp(priv->pd, &attr); | |
823 | } | |
824 | ||
825 | static int ipoib_cm_send_req(struct net_device *dev, | |
826 | struct ib_cm_id *id, struct ib_qp *qp, | |
827 | u32 qpn, | |
828 | struct ib_sa_path_rec *pathrec) | |
829 | { | |
830 | struct ipoib_dev_priv *priv = netdev_priv(dev); | |
831 | struct ipoib_cm_data data = {}; | |
832 | struct ib_cm_req_param req = {}; | |
833 | ||
834 | data.qpn = cpu_to_be32(priv->qp->qp_num); | |
835 | data.mtu = cpu_to_be32(IPOIB_CM_BUF_SIZE); | |
836 | ||
837 | req.primary_path = pathrec; | |
838 | req.alternate_path = NULL; | |
839 | req.service_id = cpu_to_be64(IPOIB_CM_IETF_ID | qpn); | |
840 | req.qp_num = qp->qp_num; | |
841 | req.qp_type = qp->qp_type; | |
842 | req.private_data = &data; | |
843 | req.private_data_len = sizeof data; | |
844 | req.flow_control = 0; | |
845 | ||
846 | req.starting_psn = 0; /* FIXME */ | |
847 | ||
848 | /* | |
849 | * Pick some arbitrary defaults here; we could make these | |
850 | * module parameters if anyone cared about setting them. | |
851 | */ | |
852 | req.responder_resources = 4; | |
853 | req.remote_cm_response_timeout = 20; | |
854 | req.local_cm_response_timeout = 20; | |
855 | req.retry_count = 0; /* RFC draft warns against retries */ | |
856 | req.rnr_retry_count = 0; /* RFC draft warns against retries */ | |
857 | req.max_cm_retries = 15; | |
858 | req.srq = 1; | |
859 | return ib_send_cm_req(id, &req); | |
860 | } | |
861 | ||
862 | static int ipoib_cm_modify_tx_init(struct net_device *dev, | |
863 | struct ib_cm_id *cm_id, struct ib_qp *qp) | |
864 | { | |
865 | struct ipoib_dev_priv *priv = netdev_priv(dev); | |
866 | struct ib_qp_attr qp_attr; | |
867 | int qp_attr_mask, ret; | |
868 | ret = ib_find_cached_pkey(priv->ca, priv->port, priv->pkey, &qp_attr.pkey_index); | |
869 | if (ret) { | |
870 | ipoib_warn(priv, "pkey 0x%x not in cache: %d\n", priv->pkey, ret); | |
871 | return ret; | |
872 | } | |
873 | ||
874 | qp_attr.qp_state = IB_QPS_INIT; | |
875 | qp_attr.qp_access_flags = IB_ACCESS_LOCAL_WRITE; | |
876 | qp_attr.port_num = priv->port; | |
877 | qp_attr_mask = IB_QP_STATE | IB_QP_ACCESS_FLAGS | IB_QP_PKEY_INDEX | IB_QP_PORT; | |
878 | ||
879 | ret = ib_modify_qp(qp, &qp_attr, qp_attr_mask); | |
880 | if (ret) { | |
881 | ipoib_warn(priv, "failed to modify tx QP to INIT: %d\n", ret); | |
882 | return ret; | |
883 | } | |
884 | return 0; | |
885 | } | |
886 | ||
887 | static int ipoib_cm_tx_init(struct ipoib_cm_tx *p, u32 qpn, | |
888 | struct ib_sa_path_rec *pathrec) | |
889 | { | |
890 | struct ipoib_dev_priv *priv = netdev_priv(p->dev); | |
891 | int ret; | |
892 | ||
893 | p->tx_ring = kzalloc(ipoib_sendq_size * sizeof *p->tx_ring, | |
894 | GFP_KERNEL); | |
895 | if (!p->tx_ring) { | |
896 | ipoib_warn(priv, "failed to allocate tx ring\n"); | |
897 | ret = -ENOMEM; | |
898 | goto err_tx; | |
899 | } | |
900 | ||
901 | p->cq = ib_create_cq(priv->ca, ipoib_cm_tx_completion, NULL, p, | |
f4fd0b22 | 902 | ipoib_sendq_size + 1, 0); |
839fcaba MT |
903 | if (IS_ERR(p->cq)) { |
904 | ret = PTR_ERR(p->cq); | |
905 | ipoib_warn(priv, "failed to allocate tx cq: %d\n", ret); | |
906 | goto err_cq; | |
907 | } | |
908 | ||
909 | ret = ib_req_notify_cq(p->cq, IB_CQ_NEXT_COMP); | |
910 | if (ret) { | |
911 | ipoib_warn(priv, "failed to request completion notification: %d\n", ret); | |
912 | goto err_req_notify; | |
913 | } | |
914 | ||
915 | p->qp = ipoib_cm_create_tx_qp(p->dev, p->cq); | |
916 | if (IS_ERR(p->qp)) { | |
917 | ret = PTR_ERR(p->qp); | |
918 | ipoib_warn(priv, "failed to allocate tx qp: %d\n", ret); | |
919 | goto err_qp; | |
920 | } | |
921 | ||
922 | p->id = ib_create_cm_id(priv->ca, ipoib_cm_tx_handler, p); | |
923 | if (IS_ERR(p->id)) { | |
924 | ret = PTR_ERR(p->id); | |
925 | ipoib_warn(priv, "failed to create tx cm id: %d\n", ret); | |
926 | goto err_id; | |
927 | } | |
928 | ||
929 | ret = ipoib_cm_modify_tx_init(p->dev, p->id, p->qp); | |
930 | if (ret) { | |
931 | ipoib_warn(priv, "failed to modify tx qp to rtr: %d\n", ret); | |
932 | goto err_modify; | |
933 | } | |
934 | ||
935 | ret = ipoib_cm_send_req(p->dev, p->id, p->qp, qpn, pathrec); | |
936 | if (ret) { | |
937 | ipoib_warn(priv, "failed to send cm req: %d\n", ret); | |
938 | goto err_send_cm; | |
939 | } | |
940 | ||
941 | ipoib_dbg(priv, "Request connection 0x%x for gid " IPOIB_GID_FMT " qpn 0x%x\n", | |
942 | p->qp->qp_num, IPOIB_GID_ARG(pathrec->dgid), qpn); | |
943 | ||
944 | return 0; | |
945 | ||
946 | err_send_cm: | |
947 | err_modify: | |
948 | ib_destroy_cm_id(p->id); | |
949 | err_id: | |
950 | p->id = NULL; | |
951 | ib_destroy_qp(p->qp); | |
952 | err_req_notify: | |
953 | err_qp: | |
954 | p->qp = NULL; | |
955 | ib_destroy_cq(p->cq); | |
956 | err_cq: | |
957 | p->cq = NULL; | |
958 | err_tx: | |
959 | return ret; | |
960 | } | |
961 | ||
962 | static void ipoib_cm_tx_destroy(struct ipoib_cm_tx *p) | |
963 | { | |
964 | struct ipoib_dev_priv *priv = netdev_priv(p->dev); | |
965 | struct ipoib_tx_buf *tx_req; | |
966 | ||
967 | ipoib_dbg(priv, "Destroy active connection 0x%x head 0x%x tail 0x%x\n", | |
968 | p->qp ? p->qp->qp_num : 0, p->tx_head, p->tx_tail); | |
969 | ||
970 | if (p->id) | |
971 | ib_destroy_cm_id(p->id); | |
972 | ||
973 | if (p->qp) | |
974 | ib_destroy_qp(p->qp); | |
975 | ||
976 | if (p->cq) | |
977 | ib_destroy_cq(p->cq); | |
978 | ||
979 | if (test_bit(IPOIB_FLAG_NETIF_STOPPED, &p->flags)) | |
980 | netif_wake_queue(p->dev); | |
981 | ||
982 | if (p->tx_ring) { | |
983 | while ((int) p->tx_tail - (int) p->tx_head < 0) { | |
984 | tx_req = &p->tx_ring[p->tx_tail & (ipoib_sendq_size - 1)]; | |
985 | ib_dma_unmap_single(priv->ca, tx_req->mapping, tx_req->skb->len, | |
986 | DMA_TO_DEVICE); | |
987 | dev_kfree_skb_any(tx_req->skb); | |
988 | ++p->tx_tail; | |
989 | } | |
990 | ||
991 | kfree(p->tx_ring); | |
992 | } | |
993 | ||
994 | kfree(p); | |
995 | } | |
996 | ||
997 | static int ipoib_cm_tx_handler(struct ib_cm_id *cm_id, | |
998 | struct ib_cm_event *event) | |
999 | { | |
1000 | struct ipoib_cm_tx *tx = cm_id->context; | |
1001 | struct ipoib_dev_priv *priv = netdev_priv(tx->dev); | |
1002 | struct net_device *dev = priv->dev; | |
1003 | struct ipoib_neigh *neigh; | |
839fcaba MT |
1004 | int ret; |
1005 | ||
1006 | switch (event->event) { | |
1007 | case IB_CM_DREQ_RECEIVED: | |
1008 | ipoib_dbg(priv, "DREQ received.\n"); | |
1009 | ib_send_cm_drep(cm_id, NULL, 0); | |
1010 | break; | |
1011 | case IB_CM_REP_RECEIVED: | |
1012 | ipoib_dbg(priv, "REP received.\n"); | |
1013 | ret = ipoib_cm_rep_handler(cm_id, event); | |
1014 | if (ret) | |
1015 | ib_send_cm_rej(cm_id, IB_CM_REJ_CONSUMER_DEFINED, | |
1016 | NULL, 0, NULL, 0); | |
1017 | break; | |
1018 | case IB_CM_REQ_ERROR: | |
1019 | case IB_CM_REJ_RECEIVED: | |
1020 | case IB_CM_TIMEWAIT_EXIT: | |
1021 | ipoib_dbg(priv, "CM error %d.\n", event->event); | |
37aebbde | 1022 | spin_lock_irq(&priv->tx_lock); |
839fcaba MT |
1023 | spin_lock(&priv->lock); |
1024 | neigh = tx->neigh; | |
1025 | ||
1026 | if (neigh) { | |
1027 | neigh->cm = NULL; | |
1028 | list_del(&neigh->list); | |
1029 | if (neigh->ah) | |
1030 | ipoib_put_ah(neigh->ah); | |
1031 | ipoib_neigh_free(dev, neigh); | |
1032 | ||
1033 | tx->neigh = NULL; | |
1034 | } | |
1035 | ||
1036 | if (test_and_clear_bit(IPOIB_FLAG_INITIALIZED, &tx->flags)) { | |
1037 | list_move(&tx->list, &priv->cm.reap_list); | |
1038 | queue_work(ipoib_workqueue, &priv->cm.reap_task); | |
1039 | } | |
1040 | ||
1041 | spin_unlock(&priv->lock); | |
37aebbde | 1042 | spin_unlock_irq(&priv->tx_lock); |
839fcaba MT |
1043 | break; |
1044 | default: | |
1045 | break; | |
1046 | } | |
1047 | ||
1048 | return 0; | |
1049 | } | |
1050 | ||
1051 | struct ipoib_cm_tx *ipoib_cm_create_tx(struct net_device *dev, struct ipoib_path *path, | |
1052 | struct ipoib_neigh *neigh) | |
1053 | { | |
1054 | struct ipoib_dev_priv *priv = netdev_priv(dev); | |
1055 | struct ipoib_cm_tx *tx; | |
1056 | ||
1057 | tx = kzalloc(sizeof *tx, GFP_ATOMIC); | |
1058 | if (!tx) | |
1059 | return NULL; | |
1060 | ||
1061 | neigh->cm = tx; | |
1062 | tx->neigh = neigh; | |
1063 | tx->path = path; | |
1064 | tx->dev = dev; | |
1065 | list_add(&tx->list, &priv->cm.start_list); | |
1066 | set_bit(IPOIB_FLAG_INITIALIZED, &tx->flags); | |
1067 | queue_work(ipoib_workqueue, &priv->cm.start_task); | |
1068 | return tx; | |
1069 | } | |
1070 | ||
1071 | void ipoib_cm_destroy_tx(struct ipoib_cm_tx *tx) | |
1072 | { | |
1073 | struct ipoib_dev_priv *priv = netdev_priv(tx->dev); | |
1074 | if (test_and_clear_bit(IPOIB_FLAG_INITIALIZED, &tx->flags)) { | |
1075 | list_move(&tx->list, &priv->cm.reap_list); | |
1076 | queue_work(ipoib_workqueue, &priv->cm.reap_task); | |
1077 | ipoib_dbg(priv, "Reap connection for gid " IPOIB_GID_FMT "\n", | |
1078 | IPOIB_GID_ARG(tx->neigh->dgid)); | |
1079 | tx->neigh = NULL; | |
1080 | } | |
1081 | } | |
1082 | ||
1083 | static void ipoib_cm_tx_start(struct work_struct *work) | |
1084 | { | |
1085 | struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv, | |
1086 | cm.start_task); | |
1087 | struct net_device *dev = priv->dev; | |
1088 | struct ipoib_neigh *neigh; | |
1089 | struct ipoib_cm_tx *p; | |
1090 | unsigned long flags; | |
1091 | int ret; | |
1092 | ||
1093 | struct ib_sa_path_rec pathrec; | |
1094 | u32 qpn; | |
1095 | ||
1096 | spin_lock_irqsave(&priv->tx_lock, flags); | |
1097 | spin_lock(&priv->lock); | |
1098 | while (!list_empty(&priv->cm.start_list)) { | |
1099 | p = list_entry(priv->cm.start_list.next, typeof(*p), list); | |
1100 | list_del_init(&p->list); | |
1101 | neigh = p->neigh; | |
1102 | qpn = IPOIB_QPN(neigh->neighbour->ha); | |
1103 | memcpy(&pathrec, &p->path->pathrec, sizeof pathrec); | |
1104 | spin_unlock(&priv->lock); | |
1105 | spin_unlock_irqrestore(&priv->tx_lock, flags); | |
1106 | ret = ipoib_cm_tx_init(p, qpn, &pathrec); | |
1107 | spin_lock_irqsave(&priv->tx_lock, flags); | |
1108 | spin_lock(&priv->lock); | |
1109 | if (ret) { | |
1110 | neigh = p->neigh; | |
1111 | if (neigh) { | |
1112 | neigh->cm = NULL; | |
1113 | list_del(&neigh->list); | |
1114 | if (neigh->ah) | |
1115 | ipoib_put_ah(neigh->ah); | |
1116 | ipoib_neigh_free(dev, neigh); | |
1117 | } | |
1118 | list_del(&p->list); | |
1119 | kfree(p); | |
1120 | } | |
1121 | } | |
1122 | spin_unlock(&priv->lock); | |
1123 | spin_unlock_irqrestore(&priv->tx_lock, flags); | |
1124 | } | |
1125 | ||
1126 | static void ipoib_cm_tx_reap(struct work_struct *work) | |
1127 | { | |
1128 | struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv, | |
1129 | cm.reap_task); | |
1130 | struct ipoib_cm_tx *p; | |
839fcaba | 1131 | |
37aebbde | 1132 | spin_lock_irq(&priv->tx_lock); |
839fcaba MT |
1133 | spin_lock(&priv->lock); |
1134 | while (!list_empty(&priv->cm.reap_list)) { | |
1135 | p = list_entry(priv->cm.reap_list.next, typeof(*p), list); | |
1136 | list_del(&p->list); | |
1137 | spin_unlock(&priv->lock); | |
37aebbde | 1138 | spin_unlock_irq(&priv->tx_lock); |
839fcaba | 1139 | ipoib_cm_tx_destroy(p); |
37aebbde | 1140 | spin_lock_irq(&priv->tx_lock); |
839fcaba MT |
1141 | spin_lock(&priv->lock); |
1142 | } | |
1143 | spin_unlock(&priv->lock); | |
37aebbde | 1144 | spin_unlock_irq(&priv->tx_lock); |
839fcaba MT |
1145 | } |
1146 | ||
1147 | static void ipoib_cm_skb_reap(struct work_struct *work) | |
1148 | { | |
1149 | struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv, | |
1150 | cm.skb_task); | |
839fcaba | 1151 | struct sk_buff *skb; |
839fcaba MT |
1152 | |
1153 | unsigned mtu = priv->mcast_mtu; | |
1154 | ||
37aebbde | 1155 | spin_lock_irq(&priv->tx_lock); |
839fcaba MT |
1156 | spin_lock(&priv->lock); |
1157 | while ((skb = skb_dequeue(&priv->cm.skb_queue))) { | |
1158 | spin_unlock(&priv->lock); | |
37aebbde | 1159 | spin_unlock_irq(&priv->tx_lock); |
839fcaba MT |
1160 | if (skb->protocol == htons(ETH_P_IP)) |
1161 | icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, htonl(mtu)); | |
1162 | #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) | |
1163 | else if (skb->protocol == htons(ETH_P_IPV6)) | |
20089ca5 | 1164 | icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu, priv->dev); |
839fcaba MT |
1165 | #endif |
1166 | dev_kfree_skb_any(skb); | |
37aebbde | 1167 | spin_lock_irq(&priv->tx_lock); |
839fcaba MT |
1168 | spin_lock(&priv->lock); |
1169 | } | |
1170 | spin_unlock(&priv->lock); | |
37aebbde | 1171 | spin_unlock_irq(&priv->tx_lock); |
839fcaba MT |
1172 | } |
1173 | ||
1174 | void ipoib_cm_skb_too_long(struct net_device* dev, struct sk_buff *skb, | |
1175 | unsigned int mtu) | |
1176 | { | |
1177 | struct ipoib_dev_priv *priv = netdev_priv(dev); | |
1178 | int e = skb_queue_empty(&priv->cm.skb_queue); | |
1179 | ||
1180 | if (skb->dst) | |
1181 | skb->dst->ops->update_pmtu(skb->dst, mtu); | |
1182 | ||
1183 | skb_queue_tail(&priv->cm.skb_queue, skb); | |
1184 | if (e) | |
1185 | queue_work(ipoib_workqueue, &priv->cm.skb_task); | |
1186 | } | |
1187 | ||
518b1646 MT |
1188 | static void ipoib_cm_rx_reap(struct work_struct *work) |
1189 | { | |
1190 | struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv, | |
1191 | cm.rx_reap_task); | |
1192 | struct ipoib_cm_rx *p, *n; | |
1193 | LIST_HEAD(list); | |
1194 | ||
1195 | spin_lock_irq(&priv->lock); | |
1196 | list_splice_init(&priv->cm.rx_reap_list, &list); | |
1197 | spin_unlock_irq(&priv->lock); | |
1198 | ||
1199 | list_for_each_entry_safe(p, n, &list, list) { | |
1200 | ib_destroy_cm_id(p->id); | |
1201 | ib_destroy_qp(p->qp); | |
1202 | kfree(p); | |
1203 | } | |
1204 | } | |
1205 | ||
839fcaba MT |
1206 | static void ipoib_cm_stale_task(struct work_struct *work) |
1207 | { | |
1208 | struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv, | |
1209 | cm.stale_task.work); | |
1210 | struct ipoib_cm_rx *p; | |
518b1646 | 1211 | int ret; |
839fcaba | 1212 | |
37aebbde | 1213 | spin_lock_irq(&priv->lock); |
839fcaba | 1214 | while (!list_empty(&priv->cm.passive_ids)) { |
518b1646 | 1215 | /* List is sorted by LRU, start from tail, |
839fcaba MT |
1216 | * stop when we see a recently used entry */ |
1217 | p = list_entry(priv->cm.passive_ids.prev, typeof(*p), list); | |
60a596da | 1218 | if (time_before_eq(jiffies, p->jiffies + IPOIB_CM_RX_TIMEOUT)) |
839fcaba | 1219 | break; |
518b1646 MT |
1220 | list_move(&p->list, &priv->cm.rx_error_list); |
1221 | p->state = IPOIB_CM_RX_ERROR; | |
37aebbde | 1222 | spin_unlock_irq(&priv->lock); |
518b1646 MT |
1223 | ret = ib_modify_qp(p->qp, &ipoib_cm_err_attr, IB_QP_STATE); |
1224 | if (ret) | |
1225 | ipoib_warn(priv, "unable to move qp to error state: %d\n", ret); | |
37aebbde | 1226 | spin_lock_irq(&priv->lock); |
839fcaba | 1227 | } |
7c5b9ef8 MT |
1228 | |
1229 | if (!list_empty(&priv->cm.passive_ids)) | |
1230 | queue_delayed_work(ipoib_workqueue, | |
1231 | &priv->cm.stale_task, IPOIB_CM_RX_DELAY); | |
37aebbde | 1232 | spin_unlock_irq(&priv->lock); |
839fcaba MT |
1233 | } |
1234 | ||
1235 | ||
1236 | static ssize_t show_mode(struct device *d, struct device_attribute *attr, | |
1237 | char *buf) | |
1238 | { | |
1239 | struct ipoib_dev_priv *priv = netdev_priv(to_net_dev(d)); | |
1240 | ||
1241 | if (test_bit(IPOIB_FLAG_ADMIN_CM, &priv->flags)) | |
1242 | return sprintf(buf, "connected\n"); | |
1243 | else | |
1244 | return sprintf(buf, "datagram\n"); | |
1245 | } | |
1246 | ||
1247 | static ssize_t set_mode(struct device *d, struct device_attribute *attr, | |
1248 | const char *buf, size_t count) | |
1249 | { | |
1250 | struct net_device *dev = to_net_dev(d); | |
1251 | struct ipoib_dev_priv *priv = netdev_priv(dev); | |
1252 | ||
1253 | /* flush paths if we switch modes so that connections are restarted */ | |
1254 | if (IPOIB_CM_SUPPORTED(dev->dev_addr) && !strcmp(buf, "connected\n")) { | |
1255 | set_bit(IPOIB_FLAG_ADMIN_CM, &priv->flags); | |
1256 | ipoib_warn(priv, "enabling connected mode " | |
1257 | "will cause multicast packet drops\n"); | |
1258 | ipoib_flush_paths(dev); | |
1259 | return count; | |
1260 | } | |
1261 | ||
1262 | if (!strcmp(buf, "datagram\n")) { | |
1263 | clear_bit(IPOIB_FLAG_ADMIN_CM, &priv->flags); | |
1264 | dev->mtu = min(priv->mcast_mtu, dev->mtu); | |
1265 | ipoib_flush_paths(dev); | |
1266 | return count; | |
1267 | } | |
1268 | ||
1269 | return -EINVAL; | |
1270 | } | |
1271 | ||
551fd612 | 1272 | static DEVICE_ATTR(mode, S_IWUSR | S_IRUGO, show_mode, set_mode); |
839fcaba MT |
1273 | |
1274 | int ipoib_cm_add_mode_attr(struct net_device *dev) | |
1275 | { | |
1276 | return device_create_file(&dev->dev, &dev_attr_mode); | |
1277 | } | |
1278 | ||
1279 | int ipoib_cm_dev_init(struct net_device *dev) | |
1280 | { | |
1281 | struct ipoib_dev_priv *priv = netdev_priv(dev); | |
1282 | struct ib_srq_init_attr srq_init_attr = { | |
1283 | .attr = { | |
1284 | .max_wr = ipoib_recvq_size, | |
1285 | .max_sge = IPOIB_CM_RX_SG | |
1286 | } | |
1287 | }; | |
1288 | int ret, i; | |
1289 | ||
1290 | INIT_LIST_HEAD(&priv->cm.passive_ids); | |
1291 | INIT_LIST_HEAD(&priv->cm.reap_list); | |
1292 | INIT_LIST_HEAD(&priv->cm.start_list); | |
518b1646 MT |
1293 | INIT_LIST_HEAD(&priv->cm.rx_error_list); |
1294 | INIT_LIST_HEAD(&priv->cm.rx_flush_list); | |
1295 | INIT_LIST_HEAD(&priv->cm.rx_drain_list); | |
1296 | INIT_LIST_HEAD(&priv->cm.rx_reap_list); | |
839fcaba MT |
1297 | INIT_WORK(&priv->cm.start_task, ipoib_cm_tx_start); |
1298 | INIT_WORK(&priv->cm.reap_task, ipoib_cm_tx_reap); | |
1299 | INIT_WORK(&priv->cm.skb_task, ipoib_cm_skb_reap); | |
518b1646 | 1300 | INIT_WORK(&priv->cm.rx_reap_task, ipoib_cm_rx_reap); |
839fcaba MT |
1301 | INIT_DELAYED_WORK(&priv->cm.stale_task, ipoib_cm_stale_task); |
1302 | ||
1303 | skb_queue_head_init(&priv->cm.skb_queue); | |
1304 | ||
1305 | priv->cm.srq = ib_create_srq(priv->pd, &srq_init_attr); | |
1306 | if (IS_ERR(priv->cm.srq)) { | |
1307 | ret = PTR_ERR(priv->cm.srq); | |
1308 | priv->cm.srq = NULL; | |
1309 | return ret; | |
1310 | } | |
1311 | ||
1312 | priv->cm.srq_ring = kzalloc(ipoib_recvq_size * sizeof *priv->cm.srq_ring, | |
1313 | GFP_KERNEL); | |
1314 | if (!priv->cm.srq_ring) { | |
1315 | printk(KERN_WARNING "%s: failed to allocate CM ring (%d entries)\n", | |
1316 | priv->ca->name, ipoib_recvq_size); | |
1317 | ipoib_cm_dev_cleanup(dev); | |
1318 | return -ENOMEM; | |
1319 | } | |
1320 | ||
1321 | for (i = 0; i < IPOIB_CM_RX_SG; ++i) | |
1322 | priv->cm.rx_sge[i].lkey = priv->mr->lkey; | |
1323 | ||
1324 | priv->cm.rx_sge[0].length = IPOIB_CM_HEAD_SIZE; | |
1325 | for (i = 1; i < IPOIB_CM_RX_SG; ++i) | |
1326 | priv->cm.rx_sge[i].length = PAGE_SIZE; | |
1327 | priv->cm.rx_wr.next = NULL; | |
1328 | priv->cm.rx_wr.sg_list = priv->cm.rx_sge; | |
1329 | priv->cm.rx_wr.num_sge = IPOIB_CM_RX_SG; | |
1330 | ||
1331 | for (i = 0; i < ipoib_recvq_size; ++i) { | |
1812063b MT |
1332 | if (!ipoib_cm_alloc_rx_skb(dev, i, IPOIB_CM_RX_SG - 1, |
1333 | priv->cm.srq_ring[i].mapping)) { | |
839fcaba MT |
1334 | ipoib_warn(priv, "failed to allocate receive buffer %d\n", i); |
1335 | ipoib_cm_dev_cleanup(dev); | |
1336 | return -ENOMEM; | |
1337 | } | |
1338 | if (ipoib_cm_post_receive(dev, i)) { | |
1339 | ipoib_warn(priv, "ipoib_ib_post_receive failed for buf %d\n", i); | |
1340 | ipoib_cm_dev_cleanup(dev); | |
1341 | return -EIO; | |
1342 | } | |
1343 | } | |
1344 | ||
1345 | priv->dev->dev_addr[0] = IPOIB_FLAGS_RC; | |
1346 | return 0; | |
1347 | } | |
1348 | ||
1349 | void ipoib_cm_dev_cleanup(struct net_device *dev) | |
1350 | { | |
1351 | struct ipoib_dev_priv *priv = netdev_priv(dev); | |
1352 | int i, ret; | |
1353 | ||
1354 | if (!priv->cm.srq) | |
1355 | return; | |
1356 | ||
1357 | ipoib_dbg(priv, "Cleanup ipoib connected mode.\n"); | |
1358 | ||
1359 | ret = ib_destroy_srq(priv->cm.srq); | |
1360 | if (ret) | |
1361 | ipoib_warn(priv, "ib_destroy_srq failed: %d\n", ret); | |
1362 | ||
1363 | priv->cm.srq = NULL; | |
1364 | if (!priv->cm.srq_ring) | |
1365 | return; | |
1366 | for (i = 0; i < ipoib_recvq_size; ++i) | |
1367 | if (priv->cm.srq_ring[i].skb) { | |
1812063b MT |
1368 | ipoib_cm_dma_unmap_rx(priv, IPOIB_CM_RX_SG - 1, |
1369 | priv->cm.srq_ring[i].mapping); | |
839fcaba MT |
1370 | dev_kfree_skb_any(priv->cm.srq_ring[i].skb); |
1371 | priv->cm.srq_ring[i].skb = NULL; | |
1372 | } | |
1373 | kfree(priv->cm.srq_ring); | |
1374 | priv->cm.srq_ring = NULL; | |
1375 | } |