Merge tag 'for-v4.5-rc/omap-critical-fixes-a' of git://git.kernel.org/pub/scm/linux...
[linux-2.6-block.git] / drivers / infiniband / ulp / ipoib / ipoib_ib.c
CommitLineData
1da177e4
LT
1/*
2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
2a1d9b7f
RD
3 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
4 * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
5 * Copyright (c) 2004, 2005 Voltaire, Inc. All rights reserved.
1da177e4
LT
6 *
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * OpenIB.org BSD license below:
12 *
13 * Redistribution and use in source and binary forms, with or
14 * without modification, are permitted provided that the following
15 * conditions are met:
16 *
17 * - Redistributions of source code must retain the above
18 * copyright notice, this list of conditions and the following
19 * disclaimer.
20 *
21 * - Redistributions in binary form must reproduce the above
22 * copyright notice, this list of conditions and the following
23 * disclaimer in the documentation and/or other materials
24 * provided with the distribution.
25 *
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 * SOFTWARE.
1da177e4
LT
34 */
35
36#include <linux/delay.h>
fec14d2f 37#include <linux/moduleparam.h>
1da177e4 38#include <linux/dma-mapping.h>
5a0e3ad6 39#include <linux/slab.h>
1da177e4 40
40ca1988
EC
41#include <linux/ip.h>
42#include <linux/tcp.h>
1da177e4
LT
43
44#include "ipoib.h"
45
46#ifdef CONFIG_INFINIBAND_IPOIB_DEBUG_DATA
47static int data_debug_level;
48
49module_param(data_debug_level, int, 0644);
50MODULE_PARM_DESC(data_debug_level,
51 "Enable data path debug tracing if > 0");
52#endif
53
95ed644f 54static DEFINE_MUTEX(pkey_mutex);
1da177e4
LT
55
56struct ipoib_ah *ipoib_create_ah(struct net_device *dev,
57 struct ib_pd *pd, struct ib_ah_attr *attr)
58{
59 struct ipoib_ah *ah;
3874397c 60 struct ib_ah *vah;
1da177e4
LT
61
62 ah = kmalloc(sizeof *ah, GFP_KERNEL);
63 if (!ah)
3874397c 64 return ERR_PTR(-ENOMEM);
1da177e4
LT
65
66 ah->dev = dev;
67 ah->last_send = 0;
68 kref_init(&ah->ref);
69
3874397c
MM
70 vah = ib_create_ah(pd, attr);
71 if (IS_ERR(vah)) {
1da177e4 72 kfree(ah);
3874397c
MM
73 ah = (struct ipoib_ah *)vah;
74 } else {
75 ah->ah = vah;
1da177e4 76 ipoib_dbg(netdev_priv(dev), "Created ah %p\n", ah->ah);
3874397c 77 }
1da177e4
LT
78
79 return ah;
80}
81
82void ipoib_free_ah(struct kref *kref)
83{
84 struct ipoib_ah *ah = container_of(kref, struct ipoib_ah, ref);
85 struct ipoib_dev_priv *priv = netdev_priv(ah->dev);
86
87 unsigned long flags;
88
31c02e21
RD
89 spin_lock_irqsave(&priv->lock, flags);
90 list_add_tail(&ah->list, &priv->dead_ahs);
91 spin_unlock_irqrestore(&priv->lock, flags);
1da177e4
LT
92}
93
bc7b3a36
SM
94static void ipoib_ud_dma_unmap_rx(struct ipoib_dev_priv *priv,
95 u64 mapping[IPOIB_UD_RX_SG])
96{
a44878d1
ES
97 ib_dma_unmap_single(priv->ca, mapping[0],
98 IPOIB_UD_BUF_SIZE(priv->max_ib_mtu),
99 DMA_FROM_DEVICE);
bc7b3a36
SM
100}
101
1993d683 102static int ipoib_ib_post_receive(struct net_device *dev, int id)
1da177e4 103{
1993d683 104 struct ipoib_dev_priv *priv = netdev_priv(dev);
1da177e4 105 struct ib_recv_wr *bad_wr;
1993d683
RD
106 int ret;
107
bc7b3a36
SM
108 priv->rx_wr.wr_id = id | IPOIB_OP_RECV;
109 priv->rx_sge[0].addr = priv->rx_ring[id].mapping[0];
110 priv->rx_sge[1].addr = priv->rx_ring[id].mapping[1];
1993d683 111
1993d683 112
bc7b3a36 113 ret = ib_post_recv(priv->qp, &priv->rx_wr, &bad_wr);
1993d683
RD
114 if (unlikely(ret)) {
115 ipoib_warn(priv, "receive failed for buf %d (%d)\n", id, ret);
bc7b3a36 116 ipoib_ud_dma_unmap_rx(priv, priv->rx_ring[id].mapping);
1993d683
RD
117 dev_kfree_skb_any(priv->rx_ring[id].skb);
118 priv->rx_ring[id].skb = NULL;
119 }
1da177e4 120
1993d683 121 return ret;
1da177e4
LT
122}
123
bc7b3a36 124static struct sk_buff *ipoib_alloc_rx_skb(struct net_device *dev, int id)
1da177e4
LT
125{
126 struct ipoib_dev_priv *priv = netdev_priv(dev);
127 struct sk_buff *skb;
bc7b3a36
SM
128 int buf_size;
129 u64 *mapping;
1da177e4 130
a44878d1 131 buf_size = IPOIB_UD_BUF_SIZE(priv->max_ib_mtu);
bc7b3a36 132
a44878d1 133 skb = dev_alloc_skb(buf_size + IPOIB_ENCAP_LEN);
bc7b3a36
SM
134 if (unlikely(!skb))
135 return NULL;
1993d683
RD
136
137 /*
138 * IB will leave a 40 byte gap for a GRH and IPoIB adds a 4 byte
139 * header. So we need 4 more bytes to get to 48 and align the
140 * IP header to a multiple of 16.
141 */
142 skb_reserve(skb, 4);
143
bc7b3a36
SM
144 mapping = priv->rx_ring[id].mapping;
145 mapping[0] = ib_dma_map_single(priv->ca, skb->data, buf_size,
146 DMA_FROM_DEVICE);
147 if (unlikely(ib_dma_mapping_error(priv->ca, mapping[0])))
148 goto error;
149
bc7b3a36
SM
150 priv->rx_ring[id].skb = skb;
151 return skb;
bc7b3a36
SM
152error:
153 dev_kfree_skb_any(skb);
154 return NULL;
1da177e4
LT
155}
156
157static int ipoib_ib_post_receives(struct net_device *dev)
158{
159 struct ipoib_dev_priv *priv = netdev_priv(dev);
160 int i;
161
0f485251 162 for (i = 0; i < ipoib_recvq_size; ++i) {
bc7b3a36 163 if (!ipoib_alloc_rx_skb(dev, i)) {
1993d683
RD
164 ipoib_warn(priv, "failed to allocate receive buffer %d\n", i);
165 return -ENOMEM;
166 }
1da177e4
LT
167 if (ipoib_ib_post_receive(dev, i)) {
168 ipoib_warn(priv, "ipoib_ib_post_receive failed for buf %d\n", i);
169 return -EIO;
170 }
171 }
172
173 return 0;
174}
175
2439a6e6 176static void ipoib_ib_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
1da177e4
LT
177{
178 struct ipoib_dev_priv *priv = netdev_priv(dev);
2439a6e6
RD
179 unsigned int wr_id = wc->wr_id & ~IPOIB_OP_RECV;
180 struct sk_buff *skb;
bc7b3a36 181 u64 mapping[IPOIB_UD_RX_SG];
fed1db33 182 union ib_gid *dgid;
1da177e4 183
a89875fc
RD
184 ipoib_dbg_data(priv, "recv completion: id %d, status: %d\n",
185 wr_id, wc->status);
1da177e4 186
2439a6e6
RD
187 if (unlikely(wr_id >= ipoib_recvq_size)) {
188 ipoib_warn(priv, "recv completion event with wrid %d (> %d)\n",
189 wr_id, ipoib_recvq_size);
190 return;
191 }
192
193 skb = priv->rx_ring[wr_id].skb;
2439a6e6
RD
194
195 if (unlikely(wc->status != IB_WC_SUCCESS)) {
196 if (wc->status != IB_WC_WR_FLUSH_ERR)
197 ipoib_warn(priv, "failed recv event "
198 "(status=%d, wrid=%d vend_err %x)\n",
199 wc->status, wr_id, wc->vendor_err);
bc7b3a36 200 ipoib_ud_dma_unmap_rx(priv, priv->rx_ring[wr_id].mapping);
2439a6e6
RD
201 dev_kfree_skb_any(skb);
202 priv->rx_ring[wr_id].skb = NULL;
203 return;
204 }
1da177e4 205
1b844afe
RD
206 /*
207 * Drop packets that this interface sent, ie multicast packets
208 * that the HCA has replicated.
209 */
210 if (wc->slid == priv->local_lid && wc->src_qp == priv->qp->qp_num)
211 goto repost;
212
bc7b3a36
SM
213 memcpy(mapping, priv->rx_ring[wr_id].mapping,
214 IPOIB_UD_RX_SG * sizeof *mapping);
215
2439a6e6
RD
216 /*
217 * If we can't allocate a new RX buffer, dump
218 * this packet and reuse the old buffer.
219 */
bc7b3a36 220 if (unlikely(!ipoib_alloc_rx_skb(dev, wr_id))) {
de903512 221 ++dev->stats.rx_dropped;
2439a6e6
RD
222 goto repost;
223 }
224
225 ipoib_dbg_data(priv, "received %d bytes, SLID 0x%04x\n",
226 wc->byte_len, wc->slid);
227
bc7b3a36 228 ipoib_ud_dma_unmap_rx(priv, mapping);
a44878d1
ES
229
230 skb_put(skb, wc->byte_len);
2439a6e6 231
fed1db33
CL
232 /* First byte of dgid signals multicast when 0xff */
233 dgid = &((struct ib_grh *)skb->data)->dgid;
234
235 if (!(wc->wc_flags & IB_WC_GRH) || dgid->raw[0] != 0xff)
236 skb->pkt_type = PACKET_HOST;
237 else if (memcmp(dgid, dev->broadcast + 4, sizeof(union ib_gid)) == 0)
238 skb->pkt_type = PACKET_BROADCAST;
239 else
240 skb->pkt_type = PACKET_MULTICAST;
241
2439a6e6
RD
242 skb_pull(skb, IB_GRH_BYTES);
243
1b844afe
RD
244 skb->protocol = ((struct ipoib_header *) skb->data)->proto;
245 skb_reset_mac_header(skb);
246 skb_pull(skb, IPOIB_ENCAP_LEN);
247
de903512
RD
248 ++dev->stats.rx_packets;
249 dev->stats.rx_bytes += skb->len;
1b844afe
RD
250
251 skb->dev = dev;
d927d505
OG
252 if ((dev->features & NETIF_F_RXCSUM) &&
253 likely(wc->wc_flags & IB_WC_IP_CSUM_OK))
6046136c
EC
254 skb->ip_summed = CHECKSUM_UNNECESSARY;
255
8ae31e5b 256 napi_gro_receive(&priv->napi, skb);
1da177e4 257
2439a6e6
RD
258repost:
259 if (unlikely(ipoib_ib_post_receive(dev, wr_id)))
260 ipoib_warn(priv, "ipoib_ib_post_receive failed "
261 "for buf %d\n", wr_id);
262}
1da177e4 263
c4268778 264int ipoib_dma_map_tx(struct ib_device *ca, struct ipoib_tx_buf *tx_req)
7143740d
EC
265{
266 struct sk_buff *skb = tx_req->skb;
267 u64 *mapping = tx_req->mapping;
268 int i;
40ca1988 269 int off;
7143740d 270
40ca1988
EC
271 if (skb_headlen(skb)) {
272 mapping[0] = ib_dma_map_single(ca, skb->data, skb_headlen(skb),
273 DMA_TO_DEVICE);
274 if (unlikely(ib_dma_mapping_error(ca, mapping[0])))
275 return -EIO;
276
277 off = 1;
278 } else
279 off = 0;
7143740d
EC
280
281 for (i = 0; i < skb_shinfo(skb)->nr_frags; ++i) {
9e903e08 282 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
5581be3b
IC
283 mapping[i + off] = ib_dma_map_page(ca,
284 skb_frag_page(frag),
9e903e08 285 frag->page_offset, skb_frag_size(frag),
7143740d 286 DMA_TO_DEVICE);
40ca1988 287 if (unlikely(ib_dma_mapping_error(ca, mapping[i + off])))
7143740d
EC
288 goto partial_error;
289 }
290 return 0;
291
292partial_error:
7143740d 293 for (; i > 0; --i) {
9e903e08
ED
294 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i - 1];
295
296 ib_dma_unmap_page(ca, mapping[i - !off], skb_frag_size(frag), DMA_TO_DEVICE);
7143740d 297 }
40ca1988
EC
298
299 if (off)
300 ib_dma_unmap_single(ca, mapping[0], skb_headlen(skb), DMA_TO_DEVICE);
301
7143740d
EC
302 return -EIO;
303}
304
c4268778
YS
305void ipoib_dma_unmap_tx(struct ipoib_dev_priv *priv,
306 struct ipoib_tx_buf *tx_req)
7143740d
EC
307{
308 struct sk_buff *skb = tx_req->skb;
309 u64 *mapping = tx_req->mapping;
310 int i;
40ca1988 311 int off;
7143740d 312
40ca1988 313 if (skb_headlen(skb)) {
c4268778
YS
314 ib_dma_unmap_single(priv->ca, mapping[0], skb_headlen(skb),
315 DMA_TO_DEVICE);
40ca1988
EC
316 off = 1;
317 } else
318 off = 0;
7143740d
EC
319
320 for (i = 0; i < skb_shinfo(skb)->nr_frags; ++i) {
9e903e08
ED
321 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
322
c4268778
YS
323 ib_dma_unmap_page(priv->ca, mapping[i + off],
324 skb_frag_size(frag), DMA_TO_DEVICE);
7143740d
EC
325 }
326}
327
2c010730
ES
328/*
329 * As the result of a completion error the QP Can be transferred to SQE states.
330 * The function checks if the (send)QP is in SQE state and
331 * moves it back to RTS state, that in order to have it functional again.
332 */
333static void ipoib_qp_state_validate_work(struct work_struct *work)
334{
335 struct ipoib_qp_state_validate *qp_work =
336 container_of(work, struct ipoib_qp_state_validate, work);
337
338 struct ipoib_dev_priv *priv = qp_work->priv;
339 struct ib_qp_attr qp_attr;
340 struct ib_qp_init_attr query_init_attr;
341 int ret;
342
343 ret = ib_query_qp(priv->qp, &qp_attr, IB_QP_STATE, &query_init_attr);
344 if (ret) {
345 ipoib_warn(priv, "%s: Failed to query QP ret: %d\n",
346 __func__, ret);
347 goto free_res;
348 }
349 pr_info("%s: QP: 0x%x is in state: %d\n",
350 __func__, priv->qp->qp_num, qp_attr.qp_state);
351
352 /* currently support only in SQE->RTS transition*/
353 if (qp_attr.qp_state == IB_QPS_SQE) {
354 qp_attr.qp_state = IB_QPS_RTS;
355
356 ret = ib_modify_qp(priv->qp, &qp_attr, IB_QP_STATE);
357 if (ret) {
358 pr_warn("failed(%d) modify QP:0x%x SQE->RTS\n",
359 ret, priv->qp->qp_num);
360 goto free_res;
361 }
362 pr_info("%s: QP: 0x%x moved from IB_QPS_SQE to IB_QPS_RTS\n",
363 __func__, priv->qp->qp_num);
364 } else {
365 pr_warn("QP (%d) will stay in state: %d\n",
366 priv->qp->qp_num, qp_attr.qp_state);
367 }
368
369free_res:
370 kfree(qp_work);
371}
372
2439a6e6
RD
373static void ipoib_ib_handle_tx_wc(struct net_device *dev, struct ib_wc *wc)
374{
375 struct ipoib_dev_priv *priv = netdev_priv(dev);
376 unsigned int wr_id = wc->wr_id;
377 struct ipoib_tx_buf *tx_req;
1da177e4 378
a89875fc
RD
379 ipoib_dbg_data(priv, "send completion: id %d, status: %d\n",
380 wr_id, wc->status);
1da177e4 381
2439a6e6
RD
382 if (unlikely(wr_id >= ipoib_sendq_size)) {
383 ipoib_warn(priv, "send completion event with wrid %d (> %d)\n",
384 wr_id, ipoib_sendq_size);
385 return;
1da177e4 386 }
2439a6e6
RD
387
388 tx_req = &priv->tx_ring[wr_id];
389
c4268778 390 ipoib_dma_unmap_tx(priv, tx_req);
2439a6e6 391
de903512
RD
392 ++dev->stats.tx_packets;
393 dev->stats.tx_bytes += tx_req->skb->len;
2439a6e6
RD
394
395 dev_kfree_skb_any(tx_req->skb);
396
2439a6e6 397 ++priv->tx_tail;
1b524963
MT
398 if (unlikely(--priv->tx_outstanding == ipoib_sendq_size >> 1) &&
399 netif_queue_stopped(dev) &&
400 test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags))
2439a6e6 401 netif_wake_queue(dev);
2439a6e6
RD
402
403 if (wc->status != IB_WC_SUCCESS &&
2c010730
ES
404 wc->status != IB_WC_WR_FLUSH_ERR) {
405 struct ipoib_qp_state_validate *qp_work;
2439a6e6
RD
406 ipoib_warn(priv, "failed send event "
407 "(status=%d, wrid=%d vend_err %x)\n",
408 wc->status, wr_id, wc->vendor_err);
2c010730
ES
409 qp_work = kzalloc(sizeof(*qp_work), GFP_ATOMIC);
410 if (!qp_work) {
411 ipoib_warn(priv, "%s Failed alloc ipoib_qp_state_validate for qp: 0x%x\n",
412 __func__, priv->qp->qp_num);
413 return;
414 }
415
416 INIT_WORK(&qp_work->work, ipoib_qp_state_validate_work);
417 qp_work->priv = priv;
418 queue_work(priv->wq, &qp_work->work);
419 }
2439a6e6
RD
420}
421
f56bcd80
EC
422static int poll_tx(struct ipoib_dev_priv *priv)
423{
424 int n, i;
425
426 n = ib_poll_cq(priv->send_cq, MAX_SEND_CQE, priv->send_wc);
427 for (i = 0; i < n; ++i)
428 ipoib_ib_handle_tx_wc(priv->dev, priv->send_wc + i);
429
430 return n == MAX_SEND_CQE;
431}
432
bea3348e 433int ipoib_poll(struct napi_struct *napi, int budget)
2439a6e6 434{
bea3348e
SH
435 struct ipoib_dev_priv *priv = container_of(napi, struct ipoib_dev_priv, napi);
436 struct net_device *dev = priv->dev;
8d1cc86a
RD
437 int done;
438 int t;
8d1cc86a
RD
439 int n, i;
440
441 done = 0;
8d1cc86a 442
bea3348e
SH
443poll_more:
444 while (done < budget) {
445 int max = (budget - done);
446
8d1cc86a 447 t = min(IPOIB_NUM_WC, max);
f56bcd80 448 n = ib_poll_cq(priv->recv_cq, t, priv->ibwc);
8d1cc86a 449
bea3348e 450 for (i = 0; i < n; i++) {
8d1cc86a
RD
451 struct ib_wc *wc = priv->ibwc + i;
452
1b524963 453 if (wc->wr_id & IPOIB_OP_RECV) {
8d1cc86a 454 ++done;
1b524963
MT
455 if (wc->wr_id & IPOIB_OP_CM)
456 ipoib_cm_handle_rx_wc(dev, wc);
457 else
458 ipoib_ib_handle_rx_wc(dev, wc);
f56bcd80
EC
459 } else
460 ipoib_cm_handle_tx_wc(priv->dev, wc);
8d1cc86a
RD
461 }
462
bea3348e 463 if (n != t)
8d1cc86a 464 break;
8d1cc86a
RD
465 }
466
bea3348e 467 if (done < budget) {
288379f0 468 napi_complete(napi);
f56bcd80 469 if (unlikely(ib_req_notify_cq(priv->recv_cq,
8d1cc86a
RD
470 IB_CQ_NEXT_COMP |
471 IB_CQ_REPORT_MISSED_EVENTS)) &&
288379f0 472 napi_reschedule(napi))
bea3348e 473 goto poll_more;
8d1cc86a
RD
474 }
475
bea3348e 476 return done;
1da177e4
LT
477}
478
479void ipoib_ib_completion(struct ib_cq *cq, void *dev_ptr)
480{
bea3348e
SH
481 struct net_device *dev = dev_ptr;
482 struct ipoib_dev_priv *priv = netdev_priv(dev);
483
288379f0 484 napi_schedule(&priv->napi);
1da177e4
LT
485}
486
57ce41d1
EC
487static void drain_tx_cq(struct net_device *dev)
488{
489 struct ipoib_dev_priv *priv = netdev_priv(dev);
57ce41d1 490
943c246e 491 netif_tx_lock(dev);
57ce41d1
EC
492 while (poll_tx(priv))
493 ; /* nothing */
494
495 if (netif_queue_stopped(dev))
496 mod_timer(&priv->poll_timer, jiffies + 1);
497
943c246e 498 netif_tx_unlock(dev);
57ce41d1
EC
499}
500
501void ipoib_send_comp_handler(struct ib_cq *cq, void *dev_ptr)
502{
943c246e
RD
503 struct ipoib_dev_priv *priv = netdev_priv(dev_ptr);
504
505 mod_timer(&priv->poll_timer, jiffies);
57ce41d1
EC
506}
507
1da177e4
LT
508static inline int post_send(struct ipoib_dev_priv *priv,
509 unsigned int wr_id,
510 struct ib_ah *address, u32 qpn,
40ca1988
EC
511 struct ipoib_tx_buf *tx_req,
512 void *head, int hlen)
1da177e4
LT
513{
514 struct ib_send_wr *bad_wr;
40ca1988 515 struct sk_buff *skb = tx_req->skb;
40ca1988 516
c4268778 517 ipoib_build_sge(priv, tx_req);
1da177e4 518
e622f2f4
CH
519 priv->tx_wr.wr.wr_id = wr_id;
520 priv->tx_wr.remote_qpn = qpn;
521 priv->tx_wr.ah = address;
1da177e4 522
40ca1988 523 if (head) {
e622f2f4
CH
524 priv->tx_wr.mss = skb_shinfo(skb)->gso_size;
525 priv->tx_wr.header = head;
526 priv->tx_wr.hlen = hlen;
527 priv->tx_wr.wr.opcode = IB_WR_LSO;
40ca1988 528 } else
e622f2f4 529 priv->tx_wr.wr.opcode = IB_WR_SEND;
40ca1988 530
e622f2f4 531 return ib_post_send(priv->qp, &priv->tx_wr.wr, &bad_wr);
1da177e4
LT
532}
533
534void ipoib_send(struct net_device *dev, struct sk_buff *skb,
535 struct ipoib_ah *address, u32 qpn)
536{
537 struct ipoib_dev_priv *priv = netdev_priv(dev);
1993d683 538 struct ipoib_tx_buf *tx_req;
a48f509b 539 int hlen, rc;
40ca1988
EC
540 void *phead;
541
542 if (skb_is_gso(skb)) {
543 hlen = skb_transport_offset(skb) + tcp_hdrlen(skb);
544 phead = skb->data;
545 if (unlikely(!skb_pull(skb, hlen))) {
546 ipoib_warn(priv, "linear data too small\n");
547 ++dev->stats.tx_dropped;
548 ++dev->stats.tx_errors;
549 dev_kfree_skb_any(skb);
550 return;
551 }
552 } else {
553 if (unlikely(skb->len > priv->mcast_mtu + IPOIB_ENCAP_LEN)) {
554 ipoib_warn(priv, "packet len %d (> %d) too long to send, dropping\n",
555 skb->len, priv->mcast_mtu + IPOIB_ENCAP_LEN);
556 ++dev->stats.tx_dropped;
557 ++dev->stats.tx_errors;
558 ipoib_cm_skb_too_long(dev, skb, priv->mcast_mtu);
559 return;
560 }
561 phead = NULL;
562 hlen = 0;
1da177e4
LT
563 }
564
565 ipoib_dbg_data(priv, "sending packet, length=%d address=%p qpn=0x%06x\n",
566 skb->len, address, qpn);
567
568 /*
569 * We put the skb into the tx_ring _before_ we call post_send()
570 * because it's entirely possible that the completion handler will
571 * run before we execute anything after the post_send(). That
572 * means we have to make sure everything is properly recorded and
573 * our state is consistent before we call post_send().
574 */
0f485251 575 tx_req = &priv->tx_ring[priv->tx_head & (ipoib_sendq_size - 1)];
1da177e4 576 tx_req->skb = skb;
7143740d 577 if (unlikely(ipoib_dma_map_tx(priv->ca, tx_req))) {
de903512 578 ++dev->stats.tx_errors;
73fbe8be
RD
579 dev_kfree_skb_any(skb);
580 return;
581 }
1da177e4 582
6046136c 583 if (skb->ip_summed == CHECKSUM_PARTIAL)
e622f2f4 584 priv->tx_wr.wr.send_flags |= IB_SEND_IP_CSUM;
6046136c 585 else
e622f2f4 586 priv->tx_wr.wr.send_flags &= ~IB_SEND_IP_CSUM;
6046136c 587
57ce41d1
EC
588 if (++priv->tx_outstanding == ipoib_sendq_size) {
589 ipoib_dbg(priv, "TX ring full, stopping kernel net queue\n");
590 if (ib_req_notify_cq(priv->send_cq, IB_CQ_NEXT_COMP))
591 ipoib_warn(priv, "request notify on send CQ failed\n");
592 netif_stop_queue(dev);
593 }
594
7e5a90c2
SP
595 skb_orphan(skb);
596 skb_dst_drop(skb);
597
a48f509b
OG
598 rc = post_send(priv, priv->tx_head & (ipoib_sendq_size - 1),
599 address->ah, qpn, tx_req, phead, hlen);
600 if (unlikely(rc)) {
601 ipoib_warn(priv, "post_send failed, error %d\n", rc);
de903512 602 ++dev->stats.tx_errors;
57ce41d1 603 --priv->tx_outstanding;
c4268778 604 ipoib_dma_unmap_tx(priv, tx_req);
1da177e4 605 dev_kfree_skb_any(skb);
57ce41d1
EC
606 if (netif_queue_stopped(dev))
607 netif_wake_queue(dev);
1da177e4
LT
608 } else {
609 dev->trans_start = jiffies;
610
611 address->last_send = priv->tx_head;
612 ++priv->tx_head;
1da177e4 613 }
f56bcd80
EC
614
615 if (unlikely(priv->tx_outstanding > MAX_SEND_CQE))
57ce41d1
EC
616 while (poll_tx(priv))
617 ; /* nothing */
1da177e4
LT
618}
619
620static void __ipoib_reap_ah(struct net_device *dev)
621{
622 struct ipoib_dev_priv *priv = netdev_priv(dev);
623 struct ipoib_ah *ah, *tah;
624 LIST_HEAD(remove_list);
943c246e
RD
625 unsigned long flags;
626
627 netif_tx_lock_bh(dev);
628 spin_lock_irqsave(&priv->lock, flags);
1da177e4 629
1da177e4 630 list_for_each_entry_safe(ah, tah, &priv->dead_ahs, list)
2181858b 631 if ((int) priv->tx_tail - (int) ah->last_send >= 0) {
1da177e4 632 list_del(&ah->list);
31c02e21
RD
633 ib_destroy_ah(ah->ah);
634 kfree(ah);
1da177e4 635 }
943c246e
RD
636
637 spin_unlock_irqrestore(&priv->lock, flags);
638 netif_tx_unlock_bh(dev);
1da177e4
LT
639}
640
c4028958 641void ipoib_reap_ah(struct work_struct *work)
1da177e4 642{
c4028958
DH
643 struct ipoib_dev_priv *priv =
644 container_of(work, struct ipoib_dev_priv, ah_reap_task.work);
645 struct net_device *dev = priv->dev;
1da177e4
LT
646
647 __ipoib_reap_ah(dev);
648
649 if (!test_bit(IPOIB_STOP_REAPER, &priv->flags))
0b39578b 650 queue_delayed_work(priv->wq, &priv->ah_reap_task,
69fc507a 651 round_jiffies_relative(HZ));
1da177e4
LT
652}
653
efc82eee 654static void ipoib_flush_ah(struct net_device *dev)
e135106f
DL
655{
656 struct ipoib_dev_priv *priv = netdev_priv(dev);
657
658 cancel_delayed_work(&priv->ah_reap_task);
efc82eee 659 flush_workqueue(priv->wq);
e135106f
DL
660 ipoib_reap_ah(&priv->ah_reap_task.work);
661}
662
efc82eee 663static void ipoib_stop_ah(struct net_device *dev)
e135106f
DL
664{
665 struct ipoib_dev_priv *priv = netdev_priv(dev);
666
667 set_bit(IPOIB_STOP_REAPER, &priv->flags);
efc82eee 668 ipoib_flush_ah(dev);
e135106f
DL
669}
670
57ce41d1
EC
671static void ipoib_ib_tx_timer_func(unsigned long ctx)
672{
673 drain_tx_cq((struct net_device *)ctx);
674}
675
efc82eee 676int ipoib_ib_dev_open(struct net_device *dev)
1da177e4
LT
677{
678 struct ipoib_dev_priv *priv = netdev_priv(dev);
679 int ret;
680
dd57c930
AE
681 ipoib_pkey_dev_check_presence(dev);
682
683 if (!test_bit(IPOIB_PKEY_ASSIGNED, &priv->flags)) {
684 ipoib_warn(priv, "P_Key 0x%04x is %s\n", priv->pkey,
685 (!(priv->pkey & 0x7fff) ? "Invalid" : "not found"));
26bbf13c
YE
686 return -1;
687 }
26bbf13c 688
5b6810e0 689 ret = ipoib_init_qp(dev);
1da177e4 690 if (ret) {
5b6810e0 691 ipoib_warn(priv, "ipoib_init_qp returned %d\n", ret);
1da177e4
LT
692 return -1;
693 }
694
695 ret = ipoib_ib_post_receives(dev);
696 if (ret) {
697 ipoib_warn(priv, "ipoib_ib_post_receives returned %d\n", ret);
c2bb5628 698 goto dev_stop;
1da177e4
LT
699 }
700
839fcaba
MT
701 ret = ipoib_cm_dev_open(dev);
702 if (ret) {
24bd1e4e 703 ipoib_warn(priv, "ipoib_cm_dev_open returned %d\n", ret);
c2bb5628 704 goto dev_stop;
839fcaba
MT
705 }
706
1da177e4 707 clear_bit(IPOIB_STOP_REAPER, &priv->flags);
0b39578b 708 queue_delayed_work(priv->wq, &priv->ah_reap_task,
69fc507a 709 round_jiffies_relative(HZ));
1da177e4 710
e028cc55
YE
711 if (!test_and_set_bit(IPOIB_FLAG_INITIALIZED, &priv->flags))
712 napi_enable(&priv->napi);
7a343d4c 713
1da177e4 714 return 0;
c2bb5628
ES
715dev_stop:
716 if (!test_and_set_bit(IPOIB_FLAG_INITIALIZED, &priv->flags))
717 napi_enable(&priv->napi);
efc82eee 718 ipoib_ib_dev_stop(dev);
c2bb5628 719 return -1;
1da177e4
LT
720}
721
db84f880 722void ipoib_pkey_dev_check_presence(struct net_device *dev)
7a343d4c
LA
723{
724 struct ipoib_dev_priv *priv = netdev_priv(dev);
7a343d4c 725
dd57c930
AE
726 if (!(priv->pkey & 0x7fff) ||
727 ib_find_pkey(priv->ca, priv->port, priv->pkey,
728 &priv->pkey_index))
7a343d4c
LA
729 clear_bit(IPOIB_PKEY_ASSIGNED, &priv->flags);
730 else
731 set_bit(IPOIB_PKEY_ASSIGNED, &priv->flags);
732}
733
1da177e4
LT
734int ipoib_ib_dev_up(struct net_device *dev)
735{
736 struct ipoib_dev_priv *priv = netdev_priv(dev);
737
7a343d4c
LA
738 ipoib_pkey_dev_check_presence(dev);
739
740 if (!test_bit(IPOIB_PKEY_ASSIGNED, &priv->flags)) {
741 ipoib_dbg(priv, "PKEY is not assigned.\n");
742 return 0;
743 }
744
1da177e4
LT
745 set_bit(IPOIB_FLAG_OPER_UP, &priv->flags);
746
747 return ipoib_mcast_start_thread(dev);
748}
749
efc82eee 750int ipoib_ib_dev_down(struct net_device *dev)
1da177e4
LT
751{
752 struct ipoib_dev_priv *priv = netdev_priv(dev);
753
754 ipoib_dbg(priv, "downing ib_dev\n");
755
756 clear_bit(IPOIB_FLAG_OPER_UP, &priv->flags);
757 netif_carrier_off(dev);
758
efc82eee 759 ipoib_mcast_stop_thread(dev);
1da177e4
LT
760 ipoib_mcast_dev_flush(dev);
761
1da177e4
LT
762 ipoib_flush_paths(dev);
763
764 return 0;
765}
766
767static int recvs_pending(struct net_device *dev)
768{
769 struct ipoib_dev_priv *priv = netdev_priv(dev);
770 int pending = 0;
771 int i;
772
0f485251 773 for (i = 0; i < ipoib_recvq_size; ++i)
1da177e4
LT
774 if (priv->rx_ring[i].skb)
775 ++pending;
776
777 return pending;
778}
779
2dfbfc37
MT
780void ipoib_drain_cq(struct net_device *dev)
781{
782 struct ipoib_dev_priv *priv = netdev_priv(dev);
783 int i, n;
943c246e
RD
784
785 /*
786 * We call completion handling routines that expect to be
787 * called from the BH-disabled NAPI poll context, so disable
788 * BHs here too.
789 */
790 local_bh_disable();
791
2dfbfc37 792 do {
f56bcd80 793 n = ib_poll_cq(priv->recv_cq, IPOIB_NUM_WC, priv->ibwc);
2dfbfc37 794 for (i = 0; i < n; ++i) {
ce423ef5
RD
795 /*
796 * Convert any successful completions to flush
797 * errors to avoid passing packets up the
798 * stack after bringing the device down.
799 */
800 if (priv->ibwc[i].status == IB_WC_SUCCESS)
801 priv->ibwc[i].status = IB_WC_WR_FLUSH_ERR;
802
1b524963
MT
803 if (priv->ibwc[i].wr_id & IPOIB_OP_RECV) {
804 if (priv->ibwc[i].wr_id & IPOIB_OP_CM)
805 ipoib_cm_handle_rx_wc(dev, priv->ibwc + i);
806 else
807 ipoib_ib_handle_rx_wc(dev, priv->ibwc + i);
f56bcd80
EC
808 } else
809 ipoib_cm_handle_tx_wc(dev, priv->ibwc + i);
2dfbfc37
MT
810 }
811 } while (n == IPOIB_NUM_WC);
f56bcd80
EC
812
813 while (poll_tx(priv))
814 ; /* nothing */
943c246e
RD
815
816 local_bh_enable();
2dfbfc37
MT
817}
818
efc82eee 819int ipoib_ib_dev_stop(struct net_device *dev)
1da177e4
LT
820{
821 struct ipoib_dev_priv *priv = netdev_priv(dev);
822 struct ib_qp_attr qp_attr;
1da177e4 823 unsigned long begin;
1993d683 824 struct ipoib_tx_buf *tx_req;
2dfbfc37 825 int i;
1da177e4 826
e028cc55
YE
827 if (test_and_clear_bit(IPOIB_FLAG_INITIALIZED, &priv->flags))
828 napi_disable(&priv->napi);
7a343d4c 829
839fcaba
MT
830 ipoib_cm_dev_stop(dev);
831
3bc12e75
RD
832 /*
833 * Move our QP to the error state and then reinitialize in
834 * when all work requests have completed or have been flushed.
835 */
1da177e4 836 qp_attr.qp_state = IB_QPS_ERR;
3bc12e75 837 if (ib_modify_qp(priv->qp, &qp_attr, IB_QP_STATE))
1da177e4
LT
838 ipoib_warn(priv, "Failed to modify QP to ERROR state\n");
839
840 /* Wait for all sends and receives to complete */
841 begin = jiffies;
842
843 while (priv->tx_head != priv->tx_tail || recvs_pending(dev)) {
844 if (time_after(jiffies, begin + 5 * HZ)) {
845 ipoib_warn(priv, "timing out; %d sends %d receives not completed\n",
846 priv->tx_head - priv->tx_tail, recvs_pending(dev));
847
848 /*
849 * assume the HW is wedged and just free up
850 * all our pending work requests.
851 */
2181858b 852 while ((int) priv->tx_tail - (int) priv->tx_head < 0) {
1da177e4 853 tx_req = &priv->tx_ring[priv->tx_tail &
0f485251 854 (ipoib_sendq_size - 1)];
c4268778 855 ipoib_dma_unmap_tx(priv, tx_req);
1da177e4
LT
856 dev_kfree_skb_any(tx_req->skb);
857 ++priv->tx_tail;
1b524963 858 --priv->tx_outstanding;
1da177e4
LT
859 }
860
37ccf9df
RC
861 for (i = 0; i < ipoib_recvq_size; ++i) {
862 struct ipoib_rx_buf *rx_req;
863
864 rx_req = &priv->rx_ring[i];
865 if (!rx_req->skb)
866 continue;
bc7b3a36
SM
867 ipoib_ud_dma_unmap_rx(priv,
868 priv->rx_ring[i].mapping);
37ccf9df
RC
869 dev_kfree_skb_any(rx_req->skb);
870 rx_req->skb = NULL;
871 }
1da177e4
LT
872
873 goto timeout;
874 }
875
2dfbfc37 876 ipoib_drain_cq(dev);
8d1cc86a 877
1da177e4
LT
878 msleep(1);
879 }
880
881 ipoib_dbg(priv, "All sends and receives done.\n");
882
883timeout:
57ce41d1 884 del_timer_sync(&priv->poll_timer);
1da177e4 885 qp_attr.qp_state = IB_QPS_RESET;
3bc12e75 886 if (ib_modify_qp(priv->qp, &qp_attr, IB_QP_STATE))
1da177e4
LT
887 ipoib_warn(priv, "Failed to modify QP to RESET state\n");
888
efc82eee 889 ipoib_flush_ah(dev);
1da177e4 890
f56bcd80 891 ib_req_notify_cq(priv->recv_cq, IB_CQ_NEXT_COMP);
8d1cc86a 892
1da177e4
LT
893 return 0;
894}
895
896int ipoib_ib_dev_init(struct net_device *dev, struct ib_device *ca, int port)
897{
898 struct ipoib_dev_priv *priv = netdev_priv(dev);
899
900 priv->ca = ca;
901 priv->port = port;
902 priv->qp = NULL;
903
904 if (ipoib_transport_dev_init(dev, ca)) {
905 printk(KERN_WARNING "%s: ipoib_transport_dev_init failed\n", ca->name);
906 return -ENODEV;
907 }
908
2767840a
RD
909 setup_timer(&priv->poll_timer, ipoib_ib_tx_timer_func,
910 (unsigned long) dev);
911
1da177e4 912 if (dev->flags & IFF_UP) {
efc82eee 913 if (ipoib_ib_dev_open(dev)) {
1da177e4
LT
914 ipoib_transport_dev_cleanup(dev);
915 return -ENODEV;
916 }
917 }
918
919 return 0;
920}
921
c2904141
ES
922/*
923 * Takes whatever value which is in pkey index 0 and updates priv->pkey
924 * returns 0 if the pkey value was changed.
925 */
926static inline int update_parent_pkey(struct ipoib_dev_priv *priv)
927{
928 int result;
929 u16 prev_pkey;
930
931 prev_pkey = priv->pkey;
932 result = ib_query_pkey(priv->ca, priv->port, 0, &priv->pkey);
933 if (result) {
934 ipoib_warn(priv, "ib_query_pkey port %d failed (ret = %d)\n",
935 priv->port, result);
936 return result;
937 }
938
939 priv->pkey |= 0x8000;
940
941 if (prev_pkey != priv->pkey) {
942 ipoib_dbg(priv, "pkey changed from 0x%x to 0x%x\n",
943 prev_pkey, priv->pkey);
944 /*
945 * Update the pkey in the broadcast address, while making sure to set
946 * the full membership bit, so that we join the right broadcast group.
947 */
948 priv->dev->broadcast[8] = priv->pkey >> 8;
949 priv->dev->broadcast[9] = priv->pkey & 0xff;
950 return 0;
951 }
952
953 return 1;
954}
dd57c930
AE
955/*
956 * returns 0 if pkey value was found in a different slot.
957 */
958static inline int update_child_pkey(struct ipoib_dev_priv *priv)
959{
960 u16 old_index = priv->pkey_index;
961
962 priv->pkey_index = 0;
963 ipoib_pkey_dev_check_presence(priv->dev);
964
965 if (test_bit(IPOIB_PKEY_ASSIGNED, &priv->flags) &&
966 (old_index == priv->pkey_index))
967 return 1;
968 return 0;
969}
c2904141 970
ee1e2c82 971static void __ipoib_ib_dev_flush(struct ipoib_dev_priv *priv,
8b7cce0d
HE
972 enum ipoib_flush_level level,
973 int nesting)
1da177e4 974{
26bbf13c 975 struct ipoib_dev_priv *cpriv;
c4028958 976 struct net_device *dev = priv->dev;
c2904141 977 int result;
26bbf13c 978
8b7cce0d 979 down_read_nested(&priv->vlan_rwsem, nesting);
1da177e4 980
26bbf13c
YE
981 /*
982 * Flush any child interfaces too -- they might be up even if
983 * the parent is down.
984 */
985 list_for_each_entry(cpriv, &priv->child_intfs, list)
8b7cce0d 986 __ipoib_ib_dev_flush(cpriv, level, nesting + 1);
26bbf13c 987
f47944cc 988 up_read(&priv->vlan_rwsem);
26bbf13c 989
dd57c930
AE
990 if (!test_bit(IPOIB_FLAG_INITIALIZED, &priv->flags) &&
991 level != IPOIB_FLUSH_HEAVY) {
7a343d4c
LA
992 ipoib_dbg(priv, "Not flushing - IPOIB_FLAG_INITIALIZED not set.\n");
993 return;
994 }
995
996 if (!test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags)) {
dd57c930
AE
997 /* interface is down. update pkey and leave. */
998 if (level == IPOIB_FLUSH_HEAVY) {
999 if (!test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags))
1000 update_parent_pkey(priv);
1001 else
1002 update_child_pkey(priv);
1003 }
7a343d4c 1004 ipoib_dbg(priv, "Not flushing - IPOIB_FLAG_ADMIN_UP not set.\n");
1da177e4 1005 return;
7a343d4c 1006 }
1da177e4 1007
ee1e2c82 1008 if (level == IPOIB_FLUSH_HEAVY) {
c2904141
ES
1009 /* child devices chase their origin pkey value, while non-child
1010 * (parent) devices should always takes what present in pkey index 0
1011 */
1012 if (test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags)) {
dd57c930
AE
1013 result = update_child_pkey(priv);
1014 if (result) {
1015 /* restart QP only if P_Key index is changed */
c2904141 1016 ipoib_dbg(priv, "Not flushing - P_Key index not changed.\n");
9fdd5e5b 1017 return;
c2904141 1018 }
dd57c930 1019
c2904141
ES
1020 } else {
1021 result = update_parent_pkey(priv);
1022 /* restart QP only if P_Key value changed */
1023 if (result) {
1024 ipoib_dbg(priv, "Not flushing - P_Key value not changed.\n");
1025 return;
1026 }
26bbf13c 1027 }
26bbf13c
YE
1028 }
1029
ee1e2c82
MS
1030 if (level == IPOIB_FLUSH_LIGHT) {
1031 ipoib_mark_paths_invalid(dev);
1032 ipoib_mcast_dev_flush(dev);
efc82eee 1033 ipoib_flush_ah(dev);
ee1e2c82 1034 }
1da177e4 1035
ee1e2c82 1036 if (level >= IPOIB_FLUSH_NORMAL)
efc82eee 1037 ipoib_ib_dev_down(dev);
1da177e4 1038
ee1e2c82 1039 if (level == IPOIB_FLUSH_HEAVY) {
dd57c930 1040 if (test_bit(IPOIB_FLAG_INITIALIZED, &priv->flags))
efc82eee
DL
1041 ipoib_ib_dev_stop(dev);
1042 if (ipoib_ib_dev_open(dev) != 0)
dd57c930
AE
1043 return;
1044 if (netif_queue_stopped(dev))
1045 netif_start_queue(dev);
26bbf13c
YE
1046 }
1047
1da177e4
LT
1048 /*
1049 * The device could have been brought down between the start and when
1050 * we get here, don't bring it back up if it's not configured up
1051 */
5ccd0255 1052 if (test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags)) {
ee1e2c82
MS
1053 if (level >= IPOIB_FLUSH_NORMAL)
1054 ipoib_ib_dev_up(dev);
c4028958 1055 ipoib_mcast_restart_task(&priv->restart_task);
5ccd0255 1056 }
26bbf13c 1057}
1da177e4 1058
ee1e2c82
MS
1059void ipoib_ib_dev_flush_light(struct work_struct *work)
1060{
1061 struct ipoib_dev_priv *priv =
1062 container_of(work, struct ipoib_dev_priv, flush_light);
1063
8b7cce0d 1064 __ipoib_ib_dev_flush(priv, IPOIB_FLUSH_LIGHT, 0);
ee1e2c82
MS
1065}
1066
1067void ipoib_ib_dev_flush_normal(struct work_struct *work)
26bbf13c
YE
1068{
1069 struct ipoib_dev_priv *priv =
ee1e2c82 1070 container_of(work, struct ipoib_dev_priv, flush_normal);
4f71055a 1071
8b7cce0d 1072 __ipoib_ib_dev_flush(priv, IPOIB_FLUSH_NORMAL, 0);
26bbf13c 1073}
4f71055a 1074
ee1e2c82 1075void ipoib_ib_dev_flush_heavy(struct work_struct *work)
26bbf13c
YE
1076{
1077 struct ipoib_dev_priv *priv =
ee1e2c82 1078 container_of(work, struct ipoib_dev_priv, flush_heavy);
26bbf13c 1079
8b7cce0d 1080 __ipoib_ib_dev_flush(priv, IPOIB_FLUSH_HEAVY, 0);
1da177e4
LT
1081}
1082
1083void ipoib_ib_dev_cleanup(struct net_device *dev)
1084{
1085 struct ipoib_dev_priv *priv = netdev_priv(dev);
1086
1087 ipoib_dbg(priv, "cleaning up ib_dev\n");
a39c52ab
ES
1088 /*
1089 * We must make sure there are no more (path) completions
1090 * that may wish to touch priv fields that are no longer valid
1091 */
1092 ipoib_flush_paths(dev);
1da177e4 1093
efc82eee 1094 ipoib_mcast_stop_thread(dev);
988bd503 1095 ipoib_mcast_dev_flush(dev);
1da177e4 1096
e135106f
DL
1097 /*
1098 * All of our ah references aren't free until after
1099 * ipoib_mcast_dev_flush(), ipoib_flush_paths, and
1100 * the neighbor garbage collection is stopped and reaped.
1101 * That should all be done now, so make a final ah flush.
1102 */
efc82eee 1103 ipoib_stop_ah(dev);
e135106f 1104
1da177e4
LT
1105 ipoib_transport_dev_cleanup(dev);
1106}
1107
1da177e4 1108