RDMA/iwcm: Reject connect requests if cmid is not in LISTEN state
[linux-2.6-block.git] / drivers / infiniband / ulp / ipoib / ipoib_ib.c
CommitLineData
1da177e4
LT
1/*
2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
2a1d9b7f
RD
3 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
4 * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
5 * Copyright (c) 2004, 2005 Voltaire, Inc. All rights reserved.
1da177e4
LT
6 *
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * OpenIB.org BSD license below:
12 *
13 * Redistribution and use in source and binary forms, with or
14 * without modification, are permitted provided that the following
15 * conditions are met:
16 *
17 * - Redistributions of source code must retain the above
18 * copyright notice, this list of conditions and the following
19 * disclaimer.
20 *
21 * - Redistributions in binary form must reproduce the above
22 * copyright notice, this list of conditions and the following
23 * disclaimer in the documentation and/or other materials
24 * provided with the distribution.
25 *
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 * SOFTWARE.
1da177e4
LT
34 */
35
36#include <linux/delay.h>
fec14d2f 37#include <linux/moduleparam.h>
1da177e4 38#include <linux/dma-mapping.h>
5a0e3ad6 39#include <linux/slab.h>
1da177e4 40
40ca1988
EC
41#include <linux/ip.h>
42#include <linux/tcp.h>
1da177e4
LT
43
44#include "ipoib.h"
45
46#ifdef CONFIG_INFINIBAND_IPOIB_DEBUG_DATA
47static int data_debug_level;
48
49module_param(data_debug_level, int, 0644);
50MODULE_PARM_DESC(data_debug_level,
51 "Enable data path debug tracing if > 0");
52#endif
53
95ed644f 54static DEFINE_MUTEX(pkey_mutex);
1da177e4
LT
55
56struct ipoib_ah *ipoib_create_ah(struct net_device *dev,
57 struct ib_pd *pd, struct ib_ah_attr *attr)
58{
59 struct ipoib_ah *ah;
3874397c 60 struct ib_ah *vah;
1da177e4
LT
61
62 ah = kmalloc(sizeof *ah, GFP_KERNEL);
63 if (!ah)
3874397c 64 return ERR_PTR(-ENOMEM);
1da177e4
LT
65
66 ah->dev = dev;
67 ah->last_send = 0;
68 kref_init(&ah->ref);
69
3874397c
MM
70 vah = ib_create_ah(pd, attr);
71 if (IS_ERR(vah)) {
1da177e4 72 kfree(ah);
3874397c
MM
73 ah = (struct ipoib_ah *)vah;
74 } else {
75 ah->ah = vah;
1da177e4 76 ipoib_dbg(netdev_priv(dev), "Created ah %p\n", ah->ah);
3874397c 77 }
1da177e4
LT
78
79 return ah;
80}
81
82void ipoib_free_ah(struct kref *kref)
83{
84 struct ipoib_ah *ah = container_of(kref, struct ipoib_ah, ref);
85 struct ipoib_dev_priv *priv = netdev_priv(ah->dev);
86
87 unsigned long flags;
88
31c02e21
RD
89 spin_lock_irqsave(&priv->lock, flags);
90 list_add_tail(&ah->list, &priv->dead_ahs);
91 spin_unlock_irqrestore(&priv->lock, flags);
1da177e4
LT
92}
93
bc7b3a36
SM
94static void ipoib_ud_dma_unmap_rx(struct ipoib_dev_priv *priv,
95 u64 mapping[IPOIB_UD_RX_SG])
96{
97 if (ipoib_ud_need_sg(priv->max_ib_mtu)) {
98 ib_dma_unmap_single(priv->ca, mapping[0], IPOIB_UD_HEAD_SIZE,
99 DMA_FROM_DEVICE);
100 ib_dma_unmap_page(priv->ca, mapping[1], PAGE_SIZE,
101 DMA_FROM_DEVICE);
102 } else
103 ib_dma_unmap_single(priv->ca, mapping[0],
104 IPOIB_UD_BUF_SIZE(priv->max_ib_mtu),
105 DMA_FROM_DEVICE);
106}
107
108static void ipoib_ud_skb_put_frags(struct ipoib_dev_priv *priv,
109 struct sk_buff *skb,
110 unsigned int length)
111{
112 if (ipoib_ud_need_sg(priv->max_ib_mtu)) {
113 skb_frag_t *frag = &skb_shinfo(skb)->frags[0];
114 unsigned int size;
115 /*
116 * There is only two buffers needed for max_payload = 4K,
117 * first buf size is IPOIB_UD_HEAD_SIZE
118 */
119 skb->tail += IPOIB_UD_HEAD_SIZE;
120 skb->len += length;
121
122 size = length - IPOIB_UD_HEAD_SIZE;
123
9e903e08 124 skb_frag_size_set(frag, size);
bc7b3a36
SM
125 skb->data_len += size;
126 skb->truesize += size;
127 } else
128 skb_put(skb, length);
129
130}
131
1993d683 132static int ipoib_ib_post_receive(struct net_device *dev, int id)
1da177e4 133{
1993d683 134 struct ipoib_dev_priv *priv = netdev_priv(dev);
1da177e4 135 struct ib_recv_wr *bad_wr;
1993d683
RD
136 int ret;
137
bc7b3a36
SM
138 priv->rx_wr.wr_id = id | IPOIB_OP_RECV;
139 priv->rx_sge[0].addr = priv->rx_ring[id].mapping[0];
140 priv->rx_sge[1].addr = priv->rx_ring[id].mapping[1];
1993d683 141
1993d683 142
bc7b3a36 143 ret = ib_post_recv(priv->qp, &priv->rx_wr, &bad_wr);
1993d683
RD
144 if (unlikely(ret)) {
145 ipoib_warn(priv, "receive failed for buf %d (%d)\n", id, ret);
bc7b3a36 146 ipoib_ud_dma_unmap_rx(priv, priv->rx_ring[id].mapping);
1993d683
RD
147 dev_kfree_skb_any(priv->rx_ring[id].skb);
148 priv->rx_ring[id].skb = NULL;
149 }
1da177e4 150
1993d683 151 return ret;
1da177e4
LT
152}
153
bc7b3a36 154static struct sk_buff *ipoib_alloc_rx_skb(struct net_device *dev, int id)
1da177e4
LT
155{
156 struct ipoib_dev_priv *priv = netdev_priv(dev);
157 struct sk_buff *skb;
bc7b3a36
SM
158 int buf_size;
159 u64 *mapping;
1da177e4 160
bc7b3a36
SM
161 if (ipoib_ud_need_sg(priv->max_ib_mtu))
162 buf_size = IPOIB_UD_HEAD_SIZE;
163 else
164 buf_size = IPOIB_UD_BUF_SIZE(priv->max_ib_mtu);
165
166 skb = dev_alloc_skb(buf_size + 4);
167 if (unlikely(!skb))
168 return NULL;
1993d683
RD
169
170 /*
171 * IB will leave a 40 byte gap for a GRH and IPoIB adds a 4 byte
172 * header. So we need 4 more bytes to get to 48 and align the
173 * IP header to a multiple of 16.
174 */
175 skb_reserve(skb, 4);
176
bc7b3a36
SM
177 mapping = priv->rx_ring[id].mapping;
178 mapping[0] = ib_dma_map_single(priv->ca, skb->data, buf_size,
179 DMA_FROM_DEVICE);
180 if (unlikely(ib_dma_mapping_error(priv->ca, mapping[0])))
181 goto error;
182
183 if (ipoib_ud_need_sg(priv->max_ib_mtu)) {
184 struct page *page = alloc_page(GFP_ATOMIC);
185 if (!page)
186 goto partial_error;
187 skb_fill_page_desc(skb, 0, page, 0, PAGE_SIZE);
188 mapping[1] =
5581be3b 189 ib_dma_map_page(priv->ca, page,
bc7b3a36
SM
190 0, PAGE_SIZE, DMA_FROM_DEVICE);
191 if (unlikely(ib_dma_mapping_error(priv->ca, mapping[1])))
192 goto partial_error;
1da177e4
LT
193 }
194
bc7b3a36
SM
195 priv->rx_ring[id].skb = skb;
196 return skb;
1993d683 197
bc7b3a36
SM
198partial_error:
199 ib_dma_unmap_single(priv->ca, mapping[0], buf_size, DMA_FROM_DEVICE);
200error:
201 dev_kfree_skb_any(skb);
202 return NULL;
1da177e4
LT
203}
204
205static int ipoib_ib_post_receives(struct net_device *dev)
206{
207 struct ipoib_dev_priv *priv = netdev_priv(dev);
208 int i;
209
0f485251 210 for (i = 0; i < ipoib_recvq_size; ++i) {
bc7b3a36 211 if (!ipoib_alloc_rx_skb(dev, i)) {
1993d683
RD
212 ipoib_warn(priv, "failed to allocate receive buffer %d\n", i);
213 return -ENOMEM;
214 }
1da177e4
LT
215 if (ipoib_ib_post_receive(dev, i)) {
216 ipoib_warn(priv, "ipoib_ib_post_receive failed for buf %d\n", i);
217 return -EIO;
218 }
219 }
220
221 return 0;
222}
223
2439a6e6 224static void ipoib_ib_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
1da177e4
LT
225{
226 struct ipoib_dev_priv *priv = netdev_priv(dev);
2439a6e6
RD
227 unsigned int wr_id = wc->wr_id & ~IPOIB_OP_RECV;
228 struct sk_buff *skb;
bc7b3a36 229 u64 mapping[IPOIB_UD_RX_SG];
fed1db33 230 union ib_gid *dgid;
1da177e4 231
a89875fc
RD
232 ipoib_dbg_data(priv, "recv completion: id %d, status: %d\n",
233 wr_id, wc->status);
1da177e4 234
2439a6e6
RD
235 if (unlikely(wr_id >= ipoib_recvq_size)) {
236 ipoib_warn(priv, "recv completion event with wrid %d (> %d)\n",
237 wr_id, ipoib_recvq_size);
238 return;
239 }
240
241 skb = priv->rx_ring[wr_id].skb;
2439a6e6
RD
242
243 if (unlikely(wc->status != IB_WC_SUCCESS)) {
244 if (wc->status != IB_WC_WR_FLUSH_ERR)
245 ipoib_warn(priv, "failed recv event "
246 "(status=%d, wrid=%d vend_err %x)\n",
247 wc->status, wr_id, wc->vendor_err);
bc7b3a36 248 ipoib_ud_dma_unmap_rx(priv, priv->rx_ring[wr_id].mapping);
2439a6e6
RD
249 dev_kfree_skb_any(skb);
250 priv->rx_ring[wr_id].skb = NULL;
251 return;
252 }
1da177e4 253
1b844afe
RD
254 /*
255 * Drop packets that this interface sent, ie multicast packets
256 * that the HCA has replicated.
257 */
258 if (wc->slid == priv->local_lid && wc->src_qp == priv->qp->qp_num)
259 goto repost;
260
bc7b3a36
SM
261 memcpy(mapping, priv->rx_ring[wr_id].mapping,
262 IPOIB_UD_RX_SG * sizeof *mapping);
263
2439a6e6
RD
264 /*
265 * If we can't allocate a new RX buffer, dump
266 * this packet and reuse the old buffer.
267 */
bc7b3a36 268 if (unlikely(!ipoib_alloc_rx_skb(dev, wr_id))) {
de903512 269 ++dev->stats.rx_dropped;
2439a6e6
RD
270 goto repost;
271 }
272
273 ipoib_dbg_data(priv, "received %d bytes, SLID 0x%04x\n",
274 wc->byte_len, wc->slid);
275
bc7b3a36
SM
276 ipoib_ud_dma_unmap_rx(priv, mapping);
277 ipoib_ud_skb_put_frags(priv, skb, wc->byte_len);
2439a6e6 278
fed1db33
CL
279 /* First byte of dgid signals multicast when 0xff */
280 dgid = &((struct ib_grh *)skb->data)->dgid;
281
282 if (!(wc->wc_flags & IB_WC_GRH) || dgid->raw[0] != 0xff)
283 skb->pkt_type = PACKET_HOST;
284 else if (memcmp(dgid, dev->broadcast + 4, sizeof(union ib_gid)) == 0)
285 skb->pkt_type = PACKET_BROADCAST;
286 else
287 skb->pkt_type = PACKET_MULTICAST;
288
2439a6e6
RD
289 skb_pull(skb, IB_GRH_BYTES);
290
1b844afe
RD
291 skb->protocol = ((struct ipoib_header *) skb->data)->proto;
292 skb_reset_mac_header(skb);
293 skb_pull(skb, IPOIB_ENCAP_LEN);
294
de903512
RD
295 ++dev->stats.rx_packets;
296 dev->stats.rx_bytes += skb->len;
1b844afe
RD
297
298 skb->dev = dev;
3d96c74d 299 if ((dev->features & NETIF_F_RXCSUM) && likely(wc->csum_ok))
6046136c
EC
300 skb->ip_summed = CHECKSUM_UNNECESSARY;
301
8ae31e5b 302 napi_gro_receive(&priv->napi, skb);
1da177e4 303
2439a6e6
RD
304repost:
305 if (unlikely(ipoib_ib_post_receive(dev, wr_id)))
306 ipoib_warn(priv, "ipoib_ib_post_receive failed "
307 "for buf %d\n", wr_id);
308}
1da177e4 309
7143740d
EC
310static int ipoib_dma_map_tx(struct ib_device *ca,
311 struct ipoib_tx_buf *tx_req)
312{
313 struct sk_buff *skb = tx_req->skb;
314 u64 *mapping = tx_req->mapping;
315 int i;
40ca1988 316 int off;
7143740d 317
40ca1988
EC
318 if (skb_headlen(skb)) {
319 mapping[0] = ib_dma_map_single(ca, skb->data, skb_headlen(skb),
320 DMA_TO_DEVICE);
321 if (unlikely(ib_dma_mapping_error(ca, mapping[0])))
322 return -EIO;
323
324 off = 1;
325 } else
326 off = 0;
7143740d
EC
327
328 for (i = 0; i < skb_shinfo(skb)->nr_frags; ++i) {
9e903e08 329 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
5581be3b
IC
330 mapping[i + off] = ib_dma_map_page(ca,
331 skb_frag_page(frag),
9e903e08 332 frag->page_offset, skb_frag_size(frag),
7143740d 333 DMA_TO_DEVICE);
40ca1988 334 if (unlikely(ib_dma_mapping_error(ca, mapping[i + off])))
7143740d
EC
335 goto partial_error;
336 }
337 return 0;
338
339partial_error:
7143740d 340 for (; i > 0; --i) {
9e903e08
ED
341 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i - 1];
342
343 ib_dma_unmap_page(ca, mapping[i - !off], skb_frag_size(frag), DMA_TO_DEVICE);
7143740d 344 }
40ca1988
EC
345
346 if (off)
347 ib_dma_unmap_single(ca, mapping[0], skb_headlen(skb), DMA_TO_DEVICE);
348
7143740d
EC
349 return -EIO;
350}
351
352static void ipoib_dma_unmap_tx(struct ib_device *ca,
353 struct ipoib_tx_buf *tx_req)
354{
355 struct sk_buff *skb = tx_req->skb;
356 u64 *mapping = tx_req->mapping;
357 int i;
40ca1988 358 int off;
7143740d 359
40ca1988
EC
360 if (skb_headlen(skb)) {
361 ib_dma_unmap_single(ca, mapping[0], skb_headlen(skb), DMA_TO_DEVICE);
362 off = 1;
363 } else
364 off = 0;
7143740d
EC
365
366 for (i = 0; i < skb_shinfo(skb)->nr_frags; ++i) {
9e903e08
ED
367 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
368
369 ib_dma_unmap_page(ca, mapping[i + off], skb_frag_size(frag),
7143740d
EC
370 DMA_TO_DEVICE);
371 }
372}
373
2439a6e6
RD
374static void ipoib_ib_handle_tx_wc(struct net_device *dev, struct ib_wc *wc)
375{
376 struct ipoib_dev_priv *priv = netdev_priv(dev);
377 unsigned int wr_id = wc->wr_id;
378 struct ipoib_tx_buf *tx_req;
1da177e4 379
a89875fc
RD
380 ipoib_dbg_data(priv, "send completion: id %d, status: %d\n",
381 wr_id, wc->status);
1da177e4 382
2439a6e6
RD
383 if (unlikely(wr_id >= ipoib_sendq_size)) {
384 ipoib_warn(priv, "send completion event with wrid %d (> %d)\n",
385 wr_id, ipoib_sendq_size);
386 return;
1da177e4 387 }
2439a6e6
RD
388
389 tx_req = &priv->tx_ring[wr_id];
390
7143740d 391 ipoib_dma_unmap_tx(priv->ca, tx_req);
2439a6e6 392
de903512
RD
393 ++dev->stats.tx_packets;
394 dev->stats.tx_bytes += tx_req->skb->len;
2439a6e6
RD
395
396 dev_kfree_skb_any(tx_req->skb);
397
2439a6e6 398 ++priv->tx_tail;
1b524963
MT
399 if (unlikely(--priv->tx_outstanding == ipoib_sendq_size >> 1) &&
400 netif_queue_stopped(dev) &&
401 test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags))
2439a6e6 402 netif_wake_queue(dev);
2439a6e6
RD
403
404 if (wc->status != IB_WC_SUCCESS &&
405 wc->status != IB_WC_WR_FLUSH_ERR)
406 ipoib_warn(priv, "failed send event "
407 "(status=%d, wrid=%d vend_err %x)\n",
408 wc->status, wr_id, wc->vendor_err);
409}
410
f56bcd80
EC
411static int poll_tx(struct ipoib_dev_priv *priv)
412{
413 int n, i;
414
415 n = ib_poll_cq(priv->send_cq, MAX_SEND_CQE, priv->send_wc);
416 for (i = 0; i < n; ++i)
417 ipoib_ib_handle_tx_wc(priv->dev, priv->send_wc + i);
418
419 return n == MAX_SEND_CQE;
420}
421
bea3348e 422int ipoib_poll(struct napi_struct *napi, int budget)
2439a6e6 423{
bea3348e
SH
424 struct ipoib_dev_priv *priv = container_of(napi, struct ipoib_dev_priv, napi);
425 struct net_device *dev = priv->dev;
8d1cc86a
RD
426 int done;
427 int t;
8d1cc86a
RD
428 int n, i;
429
430 done = 0;
8d1cc86a 431
bea3348e
SH
432poll_more:
433 while (done < budget) {
434 int max = (budget - done);
435
8d1cc86a 436 t = min(IPOIB_NUM_WC, max);
f56bcd80 437 n = ib_poll_cq(priv->recv_cq, t, priv->ibwc);
8d1cc86a 438
bea3348e 439 for (i = 0; i < n; i++) {
8d1cc86a
RD
440 struct ib_wc *wc = priv->ibwc + i;
441
1b524963 442 if (wc->wr_id & IPOIB_OP_RECV) {
8d1cc86a 443 ++done;
1b524963
MT
444 if (wc->wr_id & IPOIB_OP_CM)
445 ipoib_cm_handle_rx_wc(dev, wc);
446 else
447 ipoib_ib_handle_rx_wc(dev, wc);
f56bcd80
EC
448 } else
449 ipoib_cm_handle_tx_wc(priv->dev, wc);
8d1cc86a
RD
450 }
451
bea3348e 452 if (n != t)
8d1cc86a 453 break;
8d1cc86a
RD
454 }
455
bea3348e 456 if (done < budget) {
288379f0 457 napi_complete(napi);
f56bcd80 458 if (unlikely(ib_req_notify_cq(priv->recv_cq,
8d1cc86a
RD
459 IB_CQ_NEXT_COMP |
460 IB_CQ_REPORT_MISSED_EVENTS)) &&
288379f0 461 napi_reschedule(napi))
bea3348e 462 goto poll_more;
8d1cc86a
RD
463 }
464
bea3348e 465 return done;
1da177e4
LT
466}
467
468void ipoib_ib_completion(struct ib_cq *cq, void *dev_ptr)
469{
bea3348e
SH
470 struct net_device *dev = dev_ptr;
471 struct ipoib_dev_priv *priv = netdev_priv(dev);
472
288379f0 473 napi_schedule(&priv->napi);
1da177e4
LT
474}
475
57ce41d1
EC
476static void drain_tx_cq(struct net_device *dev)
477{
478 struct ipoib_dev_priv *priv = netdev_priv(dev);
57ce41d1 479
943c246e 480 netif_tx_lock(dev);
57ce41d1
EC
481 while (poll_tx(priv))
482 ; /* nothing */
483
484 if (netif_queue_stopped(dev))
485 mod_timer(&priv->poll_timer, jiffies + 1);
486
943c246e 487 netif_tx_unlock(dev);
57ce41d1
EC
488}
489
490void ipoib_send_comp_handler(struct ib_cq *cq, void *dev_ptr)
491{
943c246e
RD
492 struct ipoib_dev_priv *priv = netdev_priv(dev_ptr);
493
494 mod_timer(&priv->poll_timer, jiffies);
57ce41d1
EC
495}
496
1da177e4
LT
497static inline int post_send(struct ipoib_dev_priv *priv,
498 unsigned int wr_id,
499 struct ib_ah *address, u32 qpn,
40ca1988
EC
500 struct ipoib_tx_buf *tx_req,
501 void *head, int hlen)
1da177e4
LT
502{
503 struct ib_send_wr *bad_wr;
40ca1988
EC
504 int i, off;
505 struct sk_buff *skb = tx_req->skb;
506 skb_frag_t *frags = skb_shinfo(skb)->frags;
507 int nr_frags = skb_shinfo(skb)->nr_frags;
508 u64 *mapping = tx_req->mapping;
509
510 if (skb_headlen(skb)) {
511 priv->tx_sge[0].addr = mapping[0];
512 priv->tx_sge[0].length = skb_headlen(skb);
513 off = 1;
514 } else
515 off = 0;
1da177e4 516
7143740d 517 for (i = 0; i < nr_frags; ++i) {
40ca1988 518 priv->tx_sge[i + off].addr = mapping[i + off];
9e903e08 519 priv->tx_sge[i + off].length = skb_frag_size(&frags[i]);
7143740d 520 }
40ca1988 521 priv->tx_wr.num_sge = nr_frags + off;
7143740d
EC
522 priv->tx_wr.wr_id = wr_id;
523 priv->tx_wr.wr.ud.remote_qpn = qpn;
524 priv->tx_wr.wr.ud.ah = address;
1da177e4 525
40ca1988
EC
526 if (head) {
527 priv->tx_wr.wr.ud.mss = skb_shinfo(skb)->gso_size;
528 priv->tx_wr.wr.ud.header = head;
529 priv->tx_wr.wr.ud.hlen = hlen;
530 priv->tx_wr.opcode = IB_WR_LSO;
531 } else
532 priv->tx_wr.opcode = IB_WR_SEND;
533
1da177e4
LT
534 return ib_post_send(priv->qp, &priv->tx_wr, &bad_wr);
535}
536
537void ipoib_send(struct net_device *dev, struct sk_buff *skb,
538 struct ipoib_ah *address, u32 qpn)
539{
540 struct ipoib_dev_priv *priv = netdev_priv(dev);
1993d683 541 struct ipoib_tx_buf *tx_req;
a48f509b 542 int hlen, rc;
40ca1988
EC
543 void *phead;
544
545 if (skb_is_gso(skb)) {
546 hlen = skb_transport_offset(skb) + tcp_hdrlen(skb);
547 phead = skb->data;
548 if (unlikely(!skb_pull(skb, hlen))) {
549 ipoib_warn(priv, "linear data too small\n");
550 ++dev->stats.tx_dropped;
551 ++dev->stats.tx_errors;
552 dev_kfree_skb_any(skb);
553 return;
554 }
555 } else {
556 if (unlikely(skb->len > priv->mcast_mtu + IPOIB_ENCAP_LEN)) {
557 ipoib_warn(priv, "packet len %d (> %d) too long to send, dropping\n",
558 skb->len, priv->mcast_mtu + IPOIB_ENCAP_LEN);
559 ++dev->stats.tx_dropped;
560 ++dev->stats.tx_errors;
561 ipoib_cm_skb_too_long(dev, skb, priv->mcast_mtu);
562 return;
563 }
564 phead = NULL;
565 hlen = 0;
1da177e4
LT
566 }
567
568 ipoib_dbg_data(priv, "sending packet, length=%d address=%p qpn=0x%06x\n",
569 skb->len, address, qpn);
570
571 /*
572 * We put the skb into the tx_ring _before_ we call post_send()
573 * because it's entirely possible that the completion handler will
574 * run before we execute anything after the post_send(). That
575 * means we have to make sure everything is properly recorded and
576 * our state is consistent before we call post_send().
577 */
0f485251 578 tx_req = &priv->tx_ring[priv->tx_head & (ipoib_sendq_size - 1)];
1da177e4 579 tx_req->skb = skb;
7143740d 580 if (unlikely(ipoib_dma_map_tx(priv->ca, tx_req))) {
de903512 581 ++dev->stats.tx_errors;
73fbe8be
RD
582 dev_kfree_skb_any(skb);
583 return;
584 }
1da177e4 585
6046136c
EC
586 if (skb->ip_summed == CHECKSUM_PARTIAL)
587 priv->tx_wr.send_flags |= IB_SEND_IP_CSUM;
588 else
589 priv->tx_wr.send_flags &= ~IB_SEND_IP_CSUM;
590
57ce41d1
EC
591 if (++priv->tx_outstanding == ipoib_sendq_size) {
592 ipoib_dbg(priv, "TX ring full, stopping kernel net queue\n");
593 if (ib_req_notify_cq(priv->send_cq, IB_CQ_NEXT_COMP))
594 ipoib_warn(priv, "request notify on send CQ failed\n");
595 netif_stop_queue(dev);
596 }
597
a48f509b
OG
598 rc = post_send(priv, priv->tx_head & (ipoib_sendq_size - 1),
599 address->ah, qpn, tx_req, phead, hlen);
600 if (unlikely(rc)) {
601 ipoib_warn(priv, "post_send failed, error %d\n", rc);
de903512 602 ++dev->stats.tx_errors;
57ce41d1 603 --priv->tx_outstanding;
7143740d 604 ipoib_dma_unmap_tx(priv->ca, tx_req);
1da177e4 605 dev_kfree_skb_any(skb);
57ce41d1
EC
606 if (netif_queue_stopped(dev))
607 netif_wake_queue(dev);
1da177e4
LT
608 } else {
609 dev->trans_start = jiffies;
610
611 address->last_send = priv->tx_head;
612 ++priv->tx_head;
f56bcd80 613 skb_orphan(skb);
1da177e4 614
1da177e4 615 }
f56bcd80
EC
616
617 if (unlikely(priv->tx_outstanding > MAX_SEND_CQE))
57ce41d1
EC
618 while (poll_tx(priv))
619 ; /* nothing */
1da177e4
LT
620}
621
622static void __ipoib_reap_ah(struct net_device *dev)
623{
624 struct ipoib_dev_priv *priv = netdev_priv(dev);
625 struct ipoib_ah *ah, *tah;
626 LIST_HEAD(remove_list);
943c246e
RD
627 unsigned long flags;
628
629 netif_tx_lock_bh(dev);
630 spin_lock_irqsave(&priv->lock, flags);
1da177e4 631
1da177e4 632 list_for_each_entry_safe(ah, tah, &priv->dead_ahs, list)
2181858b 633 if ((int) priv->tx_tail - (int) ah->last_send >= 0) {
1da177e4 634 list_del(&ah->list);
31c02e21
RD
635 ib_destroy_ah(ah->ah);
636 kfree(ah);
1da177e4 637 }
943c246e
RD
638
639 spin_unlock_irqrestore(&priv->lock, flags);
640 netif_tx_unlock_bh(dev);
1da177e4
LT
641}
642
c4028958 643void ipoib_reap_ah(struct work_struct *work)
1da177e4 644{
c4028958
DH
645 struct ipoib_dev_priv *priv =
646 container_of(work, struct ipoib_dev_priv, ah_reap_task.work);
647 struct net_device *dev = priv->dev;
1da177e4
LT
648
649 __ipoib_reap_ah(dev);
650
651 if (!test_bit(IPOIB_STOP_REAPER, &priv->flags))
69fc507a
AB
652 queue_delayed_work(ipoib_workqueue, &priv->ah_reap_task,
653 round_jiffies_relative(HZ));
1da177e4
LT
654}
655
57ce41d1
EC
656static void ipoib_ib_tx_timer_func(unsigned long ctx)
657{
658 drain_tx_cq((struct net_device *)ctx);
659}
660
1da177e4
LT
661int ipoib_ib_dev_open(struct net_device *dev)
662{
663 struct ipoib_dev_priv *priv = netdev_priv(dev);
664 int ret;
665
26bbf13c
YE
666 if (ib_find_pkey(priv->ca, priv->port, priv->pkey, &priv->pkey_index)) {
667 ipoib_warn(priv, "P_Key 0x%04x not found\n", priv->pkey);
668 clear_bit(IPOIB_PKEY_ASSIGNED, &priv->flags);
669 return -1;
670 }
671 set_bit(IPOIB_PKEY_ASSIGNED, &priv->flags);
672
5b6810e0 673 ret = ipoib_init_qp(dev);
1da177e4 674 if (ret) {
5b6810e0 675 ipoib_warn(priv, "ipoib_init_qp returned %d\n", ret);
1da177e4
LT
676 return -1;
677 }
678
679 ret = ipoib_ib_post_receives(dev);
680 if (ret) {
681 ipoib_warn(priv, "ipoib_ib_post_receives returned %d\n", ret);
26bbf13c 682 ipoib_ib_dev_stop(dev, 1);
1da177e4
LT
683 return -1;
684 }
685
839fcaba
MT
686 ret = ipoib_cm_dev_open(dev);
687 if (ret) {
24bd1e4e 688 ipoib_warn(priv, "ipoib_cm_dev_open returned %d\n", ret);
26bbf13c 689 ipoib_ib_dev_stop(dev, 1);
839fcaba
MT
690 return -1;
691 }
692
1da177e4 693 clear_bit(IPOIB_STOP_REAPER, &priv->flags);
69fc507a
AB
694 queue_delayed_work(ipoib_workqueue, &priv->ah_reap_task,
695 round_jiffies_relative(HZ));
1da177e4 696
e028cc55
YE
697 if (!test_and_set_bit(IPOIB_FLAG_INITIALIZED, &priv->flags))
698 napi_enable(&priv->napi);
7a343d4c 699
1da177e4
LT
700 return 0;
701}
702
7a343d4c
LA
703static void ipoib_pkey_dev_check_presence(struct net_device *dev)
704{
705 struct ipoib_dev_priv *priv = netdev_priv(dev);
706 u16 pkey_index = 0;
707
9fdd5e5b 708 if (ib_find_pkey(priv->ca, priv->port, priv->pkey, &pkey_index))
7a343d4c
LA
709 clear_bit(IPOIB_PKEY_ASSIGNED, &priv->flags);
710 else
711 set_bit(IPOIB_PKEY_ASSIGNED, &priv->flags);
712}
713
1da177e4
LT
714int ipoib_ib_dev_up(struct net_device *dev)
715{
716 struct ipoib_dev_priv *priv = netdev_priv(dev);
717
7a343d4c
LA
718 ipoib_pkey_dev_check_presence(dev);
719
720 if (!test_bit(IPOIB_PKEY_ASSIGNED, &priv->flags)) {
721 ipoib_dbg(priv, "PKEY is not assigned.\n");
722 return 0;
723 }
724
1da177e4
LT
725 set_bit(IPOIB_FLAG_OPER_UP, &priv->flags);
726
727 return ipoib_mcast_start_thread(dev);
728}
729
0b3ea082 730int ipoib_ib_dev_down(struct net_device *dev, int flush)
1da177e4
LT
731{
732 struct ipoib_dev_priv *priv = netdev_priv(dev);
733
734 ipoib_dbg(priv, "downing ib_dev\n");
735
736 clear_bit(IPOIB_FLAG_OPER_UP, &priv->flags);
737 netif_carrier_off(dev);
738
739 /* Shutdown the P_Key thread if still active */
740 if (!test_bit(IPOIB_PKEY_ASSIGNED, &priv->flags)) {
95ed644f 741 mutex_lock(&pkey_mutex);
1da177e4 742 set_bit(IPOIB_PKEY_STOP, &priv->flags);
26bbf13c 743 cancel_delayed_work(&priv->pkey_poll_task);
95ed644f 744 mutex_unlock(&pkey_mutex);
0b3ea082
JM
745 if (flush)
746 flush_workqueue(ipoib_workqueue);
1da177e4
LT
747 }
748
0b3ea082 749 ipoib_mcast_stop_thread(dev, flush);
1da177e4
LT
750 ipoib_mcast_dev_flush(dev);
751
1da177e4
LT
752 ipoib_flush_paths(dev);
753
754 return 0;
755}
756
757static int recvs_pending(struct net_device *dev)
758{
759 struct ipoib_dev_priv *priv = netdev_priv(dev);
760 int pending = 0;
761 int i;
762
0f485251 763 for (i = 0; i < ipoib_recvq_size; ++i)
1da177e4
LT
764 if (priv->rx_ring[i].skb)
765 ++pending;
766
767 return pending;
768}
769
2dfbfc37
MT
770void ipoib_drain_cq(struct net_device *dev)
771{
772 struct ipoib_dev_priv *priv = netdev_priv(dev);
773 int i, n;
943c246e
RD
774
775 /*
776 * We call completion handling routines that expect to be
777 * called from the BH-disabled NAPI poll context, so disable
778 * BHs here too.
779 */
780 local_bh_disable();
781
2dfbfc37 782 do {
f56bcd80 783 n = ib_poll_cq(priv->recv_cq, IPOIB_NUM_WC, priv->ibwc);
2dfbfc37 784 for (i = 0; i < n; ++i) {
ce423ef5
RD
785 /*
786 * Convert any successful completions to flush
787 * errors to avoid passing packets up the
788 * stack after bringing the device down.
789 */
790 if (priv->ibwc[i].status == IB_WC_SUCCESS)
791 priv->ibwc[i].status = IB_WC_WR_FLUSH_ERR;
792
1b524963
MT
793 if (priv->ibwc[i].wr_id & IPOIB_OP_RECV) {
794 if (priv->ibwc[i].wr_id & IPOIB_OP_CM)
795 ipoib_cm_handle_rx_wc(dev, priv->ibwc + i);
796 else
797 ipoib_ib_handle_rx_wc(dev, priv->ibwc + i);
f56bcd80
EC
798 } else
799 ipoib_cm_handle_tx_wc(dev, priv->ibwc + i);
2dfbfc37
MT
800 }
801 } while (n == IPOIB_NUM_WC);
f56bcd80
EC
802
803 while (poll_tx(priv))
804 ; /* nothing */
943c246e
RD
805
806 local_bh_enable();
2dfbfc37
MT
807}
808
26bbf13c 809int ipoib_ib_dev_stop(struct net_device *dev, int flush)
1da177e4
LT
810{
811 struct ipoib_dev_priv *priv = netdev_priv(dev);
812 struct ib_qp_attr qp_attr;
1da177e4 813 unsigned long begin;
1993d683 814 struct ipoib_tx_buf *tx_req;
2dfbfc37 815 int i;
1da177e4 816
e028cc55
YE
817 if (test_and_clear_bit(IPOIB_FLAG_INITIALIZED, &priv->flags))
818 napi_disable(&priv->napi);
7a343d4c 819
839fcaba
MT
820 ipoib_cm_dev_stop(dev);
821
3bc12e75
RD
822 /*
823 * Move our QP to the error state and then reinitialize in
824 * when all work requests have completed or have been flushed.
825 */
1da177e4 826 qp_attr.qp_state = IB_QPS_ERR;
3bc12e75 827 if (ib_modify_qp(priv->qp, &qp_attr, IB_QP_STATE))
1da177e4
LT
828 ipoib_warn(priv, "Failed to modify QP to ERROR state\n");
829
830 /* Wait for all sends and receives to complete */
831 begin = jiffies;
832
833 while (priv->tx_head != priv->tx_tail || recvs_pending(dev)) {
834 if (time_after(jiffies, begin + 5 * HZ)) {
835 ipoib_warn(priv, "timing out; %d sends %d receives not completed\n",
836 priv->tx_head - priv->tx_tail, recvs_pending(dev));
837
838 /*
839 * assume the HW is wedged and just free up
840 * all our pending work requests.
841 */
2181858b 842 while ((int) priv->tx_tail - (int) priv->tx_head < 0) {
1da177e4 843 tx_req = &priv->tx_ring[priv->tx_tail &
0f485251 844 (ipoib_sendq_size - 1)];
7143740d 845 ipoib_dma_unmap_tx(priv->ca, tx_req);
1da177e4
LT
846 dev_kfree_skb_any(tx_req->skb);
847 ++priv->tx_tail;
1b524963 848 --priv->tx_outstanding;
1da177e4
LT
849 }
850
37ccf9df
RC
851 for (i = 0; i < ipoib_recvq_size; ++i) {
852 struct ipoib_rx_buf *rx_req;
853
854 rx_req = &priv->rx_ring[i];
855 if (!rx_req->skb)
856 continue;
bc7b3a36
SM
857 ipoib_ud_dma_unmap_rx(priv,
858 priv->rx_ring[i].mapping);
37ccf9df
RC
859 dev_kfree_skb_any(rx_req->skb);
860 rx_req->skb = NULL;
861 }
1da177e4
LT
862
863 goto timeout;
864 }
865
2dfbfc37 866 ipoib_drain_cq(dev);
8d1cc86a 867
1da177e4
LT
868 msleep(1);
869 }
870
871 ipoib_dbg(priv, "All sends and receives done.\n");
872
873timeout:
57ce41d1 874 del_timer_sync(&priv->poll_timer);
1da177e4 875 qp_attr.qp_state = IB_QPS_RESET;
3bc12e75 876 if (ib_modify_qp(priv->qp, &qp_attr, IB_QP_STATE))
1da177e4
LT
877 ipoib_warn(priv, "Failed to modify QP to RESET state\n");
878
879 /* Wait for all AHs to be reaped */
880 set_bit(IPOIB_STOP_REAPER, &priv->flags);
881 cancel_delayed_work(&priv->ah_reap_task);
26bbf13c
YE
882 if (flush)
883 flush_workqueue(ipoib_workqueue);
1da177e4
LT
884
885 begin = jiffies;
886
887 while (!list_empty(&priv->dead_ahs)) {
888 __ipoib_reap_ah(dev);
889
890 if (time_after(jiffies, begin + HZ)) {
891 ipoib_warn(priv, "timing out; will leak address handles\n");
892 break;
893 }
894
895 msleep(1);
896 }
897
f56bcd80 898 ib_req_notify_cq(priv->recv_cq, IB_CQ_NEXT_COMP);
8d1cc86a 899
1da177e4
LT
900 return 0;
901}
902
903int ipoib_ib_dev_init(struct net_device *dev, struct ib_device *ca, int port)
904{
905 struct ipoib_dev_priv *priv = netdev_priv(dev);
906
907 priv->ca = ca;
908 priv->port = port;
909 priv->qp = NULL;
910
911 if (ipoib_transport_dev_init(dev, ca)) {
912 printk(KERN_WARNING "%s: ipoib_transport_dev_init failed\n", ca->name);
913 return -ENODEV;
914 }
915
2767840a
RD
916 setup_timer(&priv->poll_timer, ipoib_ib_tx_timer_func,
917 (unsigned long) dev);
918
1da177e4
LT
919 if (dev->flags & IFF_UP) {
920 if (ipoib_ib_dev_open(dev)) {
921 ipoib_transport_dev_cleanup(dev);
922 return -ENODEV;
923 }
924 }
925
926 return 0;
927}
928
ee1e2c82
MS
929static void __ipoib_ib_dev_flush(struct ipoib_dev_priv *priv,
930 enum ipoib_flush_level level)
1da177e4 931{
26bbf13c 932 struct ipoib_dev_priv *cpriv;
c4028958 933 struct net_device *dev = priv->dev;
26bbf13c
YE
934 u16 new_index;
935
936 mutex_lock(&priv->vlan_mutex);
1da177e4 937
26bbf13c
YE
938 /*
939 * Flush any child interfaces too -- they might be up even if
940 * the parent is down.
941 */
942 list_for_each_entry(cpriv, &priv->child_intfs, list)
ee1e2c82 943 __ipoib_ib_dev_flush(cpriv, level);
26bbf13c
YE
944
945 mutex_unlock(&priv->vlan_mutex);
946
947 if (!test_bit(IPOIB_FLAG_INITIALIZED, &priv->flags)) {
7a343d4c
LA
948 ipoib_dbg(priv, "Not flushing - IPOIB_FLAG_INITIALIZED not set.\n");
949 return;
950 }
951
952 if (!test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags)) {
953 ipoib_dbg(priv, "Not flushing - IPOIB_FLAG_ADMIN_UP not set.\n");
1da177e4 954 return;
7a343d4c 955 }
1da177e4 956
ee1e2c82 957 if (level == IPOIB_FLUSH_HEAVY) {
26bbf13c
YE
958 if (ib_find_pkey(priv->ca, priv->port, priv->pkey, &new_index)) {
959 clear_bit(IPOIB_PKEY_ASSIGNED, &priv->flags);
960 ipoib_ib_dev_down(dev, 0);
167c4265 961 ipoib_ib_dev_stop(dev, 0);
9fdd5e5b
RD
962 if (ipoib_pkey_dev_delay_open(dev))
963 return;
26bbf13c 964 }
26bbf13c
YE
965
966 /* restart QP only if P_Key index is changed */
9fdd5e5b
RD
967 if (test_and_set_bit(IPOIB_PKEY_ASSIGNED, &priv->flags) &&
968 new_index == priv->pkey_index) {
26bbf13c
YE
969 ipoib_dbg(priv, "Not flushing - P_Key index not changed.\n");
970 return;
971 }
972 priv->pkey_index = new_index;
973 }
974
ee1e2c82
MS
975 if (level == IPOIB_FLUSH_LIGHT) {
976 ipoib_mark_paths_invalid(dev);
977 ipoib_mcast_dev_flush(dev);
978 }
1da177e4 979
ee1e2c82
MS
980 if (level >= IPOIB_FLUSH_NORMAL)
981 ipoib_ib_dev_down(dev, 0);
1da177e4 982
ee1e2c82 983 if (level == IPOIB_FLUSH_HEAVY) {
26bbf13c
YE
984 ipoib_ib_dev_stop(dev, 0);
985 ipoib_ib_dev_open(dev);
986 }
987
1da177e4
LT
988 /*
989 * The device could have been brought down between the start and when
990 * we get here, don't bring it back up if it's not configured up
991 */
5ccd0255 992 if (test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags)) {
ee1e2c82
MS
993 if (level >= IPOIB_FLUSH_NORMAL)
994 ipoib_ib_dev_up(dev);
c4028958 995 ipoib_mcast_restart_task(&priv->restart_task);
5ccd0255 996 }
26bbf13c 997}
1da177e4 998
ee1e2c82
MS
999void ipoib_ib_dev_flush_light(struct work_struct *work)
1000{
1001 struct ipoib_dev_priv *priv =
1002 container_of(work, struct ipoib_dev_priv, flush_light);
1003
1004 __ipoib_ib_dev_flush(priv, IPOIB_FLUSH_LIGHT);
1005}
1006
1007void ipoib_ib_dev_flush_normal(struct work_struct *work)
26bbf13c
YE
1008{
1009 struct ipoib_dev_priv *priv =
ee1e2c82 1010 container_of(work, struct ipoib_dev_priv, flush_normal);
4f71055a 1011
ee1e2c82 1012 __ipoib_ib_dev_flush(priv, IPOIB_FLUSH_NORMAL);
26bbf13c 1013}
4f71055a 1014
ee1e2c82 1015void ipoib_ib_dev_flush_heavy(struct work_struct *work)
26bbf13c
YE
1016{
1017 struct ipoib_dev_priv *priv =
ee1e2c82 1018 container_of(work, struct ipoib_dev_priv, flush_heavy);
26bbf13c 1019
ee1e2c82 1020 __ipoib_ib_dev_flush(priv, IPOIB_FLUSH_HEAVY);
1da177e4
LT
1021}
1022
1023void ipoib_ib_dev_cleanup(struct net_device *dev)
1024{
1025 struct ipoib_dev_priv *priv = netdev_priv(dev);
1026
1027 ipoib_dbg(priv, "cleaning up ib_dev\n");
1028
8d2cae06 1029 ipoib_mcast_stop_thread(dev, 1);
988bd503 1030 ipoib_mcast_dev_flush(dev);
1da177e4
LT
1031
1032 ipoib_transport_dev_cleanup(dev);
1033}
1034
1035/*
1036 * Delayed P_Key Assigment Interim Support
1037 *
1038 * The following is initial implementation of delayed P_Key assigment
1039 * mechanism. It is using the same approach implemented for the multicast
1040 * group join. The single goal of this implementation is to quickly address
1041 * Bug #2507. This implementation will probably be removed when the P_Key
1042 * change async notification is available.
1043 */
1da177e4 1044
c4028958 1045void ipoib_pkey_poll(struct work_struct *work)
1da177e4 1046{
c4028958 1047 struct ipoib_dev_priv *priv =
26bbf13c 1048 container_of(work, struct ipoib_dev_priv, pkey_poll_task.work);
c4028958 1049 struct net_device *dev = priv->dev;
1da177e4
LT
1050
1051 ipoib_pkey_dev_check_presence(dev);
1052
1053 if (test_bit(IPOIB_PKEY_ASSIGNED, &priv->flags))
1054 ipoib_open(dev);
1055 else {
95ed644f 1056 mutex_lock(&pkey_mutex);
1da177e4
LT
1057 if (!test_bit(IPOIB_PKEY_STOP, &priv->flags))
1058 queue_delayed_work(ipoib_workqueue,
26bbf13c 1059 &priv->pkey_poll_task,
1da177e4 1060 HZ);
95ed644f 1061 mutex_unlock(&pkey_mutex);
1da177e4
LT
1062 }
1063}
1064
1065int ipoib_pkey_dev_delay_open(struct net_device *dev)
1066{
1067 struct ipoib_dev_priv *priv = netdev_priv(dev);
1068
1069 /* Look for the interface pkey value in the IB Port P_Key table and */
1070 /* set the interface pkey assigment flag */
1071 ipoib_pkey_dev_check_presence(dev);
1072
1073 /* P_Key value not assigned yet - start polling */
1074 if (!test_bit(IPOIB_PKEY_ASSIGNED, &priv->flags)) {
95ed644f 1075 mutex_lock(&pkey_mutex);
1da177e4
LT
1076 clear_bit(IPOIB_PKEY_STOP, &priv->flags);
1077 queue_delayed_work(ipoib_workqueue,
26bbf13c 1078 &priv->pkey_poll_task,
1da177e4 1079 HZ);
95ed644f 1080 mutex_unlock(&pkey_mutex);
1da177e4
LT
1081 return 1;
1082 }
1083
1084 return 0;
1085}