IB/ipoib: drop useless LIST_HEAD
[linux-2.6-block.git] / drivers / infiniband / ulp / ipoib / ipoib_ib.c
CommitLineData
1da177e4
LT
1/*
2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
2a1d9b7f
RD
3 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
4 * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
5 * Copyright (c) 2004, 2005 Voltaire, Inc. All rights reserved.
1da177e4
LT
6 *
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * OpenIB.org BSD license below:
12 *
13 * Redistribution and use in source and binary forms, with or
14 * without modification, are permitted provided that the following
15 * conditions are met:
16 *
17 * - Redistributions of source code must retain the above
18 * copyright notice, this list of conditions and the following
19 * disclaimer.
20 *
21 * - Redistributions in binary form must reproduce the above
22 * copyright notice, this list of conditions and the following
23 * disclaimer in the documentation and/or other materials
24 * provided with the distribution.
25 *
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 * SOFTWARE.
1da177e4
LT
34 */
35
36#include <linux/delay.h>
fec14d2f 37#include <linux/moduleparam.h>
1da177e4 38#include <linux/dma-mapping.h>
5a0e3ad6 39#include <linux/slab.h>
1da177e4 40
40ca1988
EC
41#include <linux/ip.h>
42#include <linux/tcp.h>
1dfce294 43#include <rdma/ib_cache.h>
1da177e4
LT
44
45#include "ipoib.h"
46
47#ifdef CONFIG_INFINIBAND_IPOIB_DEBUG_DATA
48static int data_debug_level;
49
50module_param(data_debug_level, int, 0644);
51MODULE_PARM_DESC(data_debug_level,
52 "Enable data path debug tracing if > 0");
53#endif
54
1da177e4 55struct ipoib_ah *ipoib_create_ah(struct net_device *dev,
90898850 56 struct ib_pd *pd, struct rdma_ah_attr *attr)
1da177e4
LT
57{
58 struct ipoib_ah *ah;
3874397c 59 struct ib_ah *vah;
1da177e4 60
b1b63970 61 ah = kmalloc(sizeof(*ah), GFP_KERNEL);
1da177e4 62 if (!ah)
3874397c 63 return ERR_PTR(-ENOMEM);
1da177e4
LT
64
65 ah->dev = dev;
66 ah->last_send = 0;
67 kref_init(&ah->ref);
68
b090c4e3 69 vah = rdma_create_ah(pd, attr, RDMA_CREATE_AH_SLEEPABLE);
3874397c 70 if (IS_ERR(vah)) {
1da177e4 71 kfree(ah);
3874397c
MM
72 ah = (struct ipoib_ah *)vah;
73 } else {
74 ah->ah = vah;
c1048aff 75 ipoib_dbg(ipoib_priv(dev), "Created ah %p\n", ah->ah);
3874397c 76 }
1da177e4
LT
77
78 return ah;
79}
80
81void ipoib_free_ah(struct kref *kref)
82{
83 struct ipoib_ah *ah = container_of(kref, struct ipoib_ah, ref);
c1048aff 84 struct ipoib_dev_priv *priv = ipoib_priv(ah->dev);
1da177e4
LT
85
86 unsigned long flags;
87
31c02e21
RD
88 spin_lock_irqsave(&priv->lock, flags);
89 list_add_tail(&ah->list, &priv->dead_ahs);
90 spin_unlock_irqrestore(&priv->lock, flags);
1da177e4
LT
91}
92
bc7b3a36
SM
93static void ipoib_ud_dma_unmap_rx(struct ipoib_dev_priv *priv,
94 u64 mapping[IPOIB_UD_RX_SG])
95{
a44878d1
ES
96 ib_dma_unmap_single(priv->ca, mapping[0],
97 IPOIB_UD_BUF_SIZE(priv->max_ib_mtu),
98 DMA_FROM_DEVICE);
bc7b3a36
SM
99}
100
1993d683 101static int ipoib_ib_post_receive(struct net_device *dev, int id)
1da177e4 102{
c1048aff 103 struct ipoib_dev_priv *priv = ipoib_priv(dev);
1993d683
RD
104 int ret;
105
bc7b3a36
SM
106 priv->rx_wr.wr_id = id | IPOIB_OP_RECV;
107 priv->rx_sge[0].addr = priv->rx_ring[id].mapping[0];
108 priv->rx_sge[1].addr = priv->rx_ring[id].mapping[1];
1993d683 109
1993d683 110
4b4671a0 111 ret = ib_post_recv(priv->qp, &priv->rx_wr, NULL);
1993d683
RD
112 if (unlikely(ret)) {
113 ipoib_warn(priv, "receive failed for buf %d (%d)\n", id, ret);
bc7b3a36 114 ipoib_ud_dma_unmap_rx(priv, priv->rx_ring[id].mapping);
1993d683
RD
115 dev_kfree_skb_any(priv->rx_ring[id].skb);
116 priv->rx_ring[id].skb = NULL;
117 }
1da177e4 118
1993d683 119 return ret;
1da177e4
LT
120}
121
bc7b3a36 122static struct sk_buff *ipoib_alloc_rx_skb(struct net_device *dev, int id)
1da177e4 123{
c1048aff 124 struct ipoib_dev_priv *priv = ipoib_priv(dev);
1da177e4 125 struct sk_buff *skb;
bc7b3a36
SM
126 int buf_size;
127 u64 *mapping;
1da177e4 128
a44878d1 129 buf_size = IPOIB_UD_BUF_SIZE(priv->max_ib_mtu);
bc7b3a36 130
fc791b63 131 skb = dev_alloc_skb(buf_size + IPOIB_HARD_LEN);
bc7b3a36
SM
132 if (unlikely(!skb))
133 return NULL;
1993d683
RD
134
135 /*
fc791b63
PA
136 * the IP header will be at IPOIP_HARD_LEN + IB_GRH_BYTES, that is
137 * 64 bytes aligned
1993d683 138 */
fc791b63 139 skb_reserve(skb, sizeof(struct ipoib_pseudo_header));
1993d683 140
bc7b3a36
SM
141 mapping = priv->rx_ring[id].mapping;
142 mapping[0] = ib_dma_map_single(priv->ca, skb->data, buf_size,
143 DMA_FROM_DEVICE);
144 if (unlikely(ib_dma_mapping_error(priv->ca, mapping[0])))
145 goto error;
146
bc7b3a36
SM
147 priv->rx_ring[id].skb = skb;
148 return skb;
bc7b3a36
SM
149error:
150 dev_kfree_skb_any(skb);
151 return NULL;
1da177e4
LT
152}
153
154static int ipoib_ib_post_receives(struct net_device *dev)
155{
c1048aff 156 struct ipoib_dev_priv *priv = ipoib_priv(dev);
1da177e4
LT
157 int i;
158
0f485251 159 for (i = 0; i < ipoib_recvq_size; ++i) {
bc7b3a36 160 if (!ipoib_alloc_rx_skb(dev, i)) {
1993d683
RD
161 ipoib_warn(priv, "failed to allocate receive buffer %d\n", i);
162 return -ENOMEM;
163 }
1da177e4
LT
164 if (ipoib_ib_post_receive(dev, i)) {
165 ipoib_warn(priv, "ipoib_ib_post_receive failed for buf %d\n", i);
166 return -EIO;
167 }
168 }
169
170 return 0;
171}
172
2439a6e6 173static void ipoib_ib_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
1da177e4 174{
c1048aff 175 struct ipoib_dev_priv *priv = ipoib_priv(dev);
2439a6e6
RD
176 unsigned int wr_id = wc->wr_id & ~IPOIB_OP_RECV;
177 struct sk_buff *skb;
bc7b3a36 178 u64 mapping[IPOIB_UD_RX_SG];
fed1db33 179 union ib_gid *dgid;
68996a6e 180 union ib_gid *sgid;
1da177e4 181
a89875fc
RD
182 ipoib_dbg_data(priv, "recv completion: id %d, status: %d\n",
183 wr_id, wc->status);
1da177e4 184
2439a6e6
RD
185 if (unlikely(wr_id >= ipoib_recvq_size)) {
186 ipoib_warn(priv, "recv completion event with wrid %d (> %d)\n",
187 wr_id, ipoib_recvq_size);
188 return;
189 }
190
191 skb = priv->rx_ring[wr_id].skb;
2439a6e6
RD
192
193 if (unlikely(wc->status != IB_WC_SUCCESS)) {
194 if (wc->status != IB_WC_WR_FLUSH_ERR)
b04dc199
AH
195 ipoib_warn(priv,
196 "failed recv event (status=%d, wrid=%d vend_err %#x)\n",
2439a6e6 197 wc->status, wr_id, wc->vendor_err);
bc7b3a36 198 ipoib_ud_dma_unmap_rx(priv, priv->rx_ring[wr_id].mapping);
2439a6e6
RD
199 dev_kfree_skb_any(skb);
200 priv->rx_ring[wr_id].skb = NULL;
201 return;
202 }
1da177e4 203
bc7b3a36 204 memcpy(mapping, priv->rx_ring[wr_id].mapping,
b1b63970 205 IPOIB_UD_RX_SG * sizeof(*mapping));
bc7b3a36 206
2439a6e6
RD
207 /*
208 * If we can't allocate a new RX buffer, dump
209 * this packet and reuse the old buffer.
210 */
bc7b3a36 211 if (unlikely(!ipoib_alloc_rx_skb(dev, wr_id))) {
de903512 212 ++dev->stats.rx_dropped;
2439a6e6
RD
213 goto repost;
214 }
215
216 ipoib_dbg_data(priv, "received %d bytes, SLID 0x%04x\n",
217 wc->byte_len, wc->slid);
218
bc7b3a36 219 ipoib_ud_dma_unmap_rx(priv, mapping);
a44878d1
ES
220
221 skb_put(skb, wc->byte_len);
2439a6e6 222
fed1db33
CL
223 /* First byte of dgid signals multicast when 0xff */
224 dgid = &((struct ib_grh *)skb->data)->dgid;
225
226 if (!(wc->wc_flags & IB_WC_GRH) || dgid->raw[0] != 0xff)
227 skb->pkt_type = PACKET_HOST;
228 else if (memcmp(dgid, dev->broadcast + 4, sizeof(union ib_gid)) == 0)
229 skb->pkt_type = PACKET_BROADCAST;
230 else
231 skb->pkt_type = PACKET_MULTICAST;
232
68996a6e
EC
233 sgid = &((struct ib_grh *)skb->data)->sgid;
234
235 /*
236 * Drop packets that this interface sent, ie multicast packets
237 * that the HCA has replicated.
238 */
239 if (wc->slid == priv->local_lid && wc->src_qp == priv->qp->qp_num) {
240 int need_repost = 1;
241
242 if ((wc->wc_flags & IB_WC_GRH) &&
243 sgid->global.interface_id != priv->local_gid.global.interface_id)
244 need_repost = 0;
245
246 if (need_repost) {
247 dev_kfree_skb_any(skb);
248 goto repost;
249 }
250 }
251
2439a6e6
RD
252 skb_pull(skb, IB_GRH_BYTES);
253
1b844afe 254 skb->protocol = ((struct ipoib_header *) skb->data)->proto;
fc791b63 255 skb_add_pseudo_hdr(skb);
1b844afe 256
de903512
RD
257 ++dev->stats.rx_packets;
258 dev->stats.rx_bytes += skb->len;
4829d964
AV
259 if (skb->pkt_type == PACKET_MULTICAST)
260 dev->stats.multicast++;
1b844afe
RD
261
262 skb->dev = dev;
d927d505
OG
263 if ((dev->features & NETIF_F_RXCSUM) &&
264 likely(wc->wc_flags & IB_WC_IP_CSUM_OK))
6046136c
EC
265 skb->ip_summed = CHECKSUM_UNNECESSARY;
266
8966e28d 267 napi_gro_receive(&priv->recv_napi, skb);
1da177e4 268
2439a6e6
RD
269repost:
270 if (unlikely(ipoib_ib_post_receive(dev, wr_id)))
271 ipoib_warn(priv, "ipoib_ib_post_receive failed "
272 "for buf %d\n", wr_id);
273}
1da177e4 274
c4268778 275int ipoib_dma_map_tx(struct ib_device *ca, struct ipoib_tx_buf *tx_req)
7143740d
EC
276{
277 struct sk_buff *skb = tx_req->skb;
278 u64 *mapping = tx_req->mapping;
279 int i;
40ca1988 280 int off;
7143740d 281
40ca1988
EC
282 if (skb_headlen(skb)) {
283 mapping[0] = ib_dma_map_single(ca, skb->data, skb_headlen(skb),
284 DMA_TO_DEVICE);
285 if (unlikely(ib_dma_mapping_error(ca, mapping[0])))
286 return -EIO;
287
288 off = 1;
289 } else
290 off = 0;
7143740d
EC
291
292 for (i = 0; i < skb_shinfo(skb)->nr_frags; ++i) {
9e903e08 293 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
5581be3b
IC
294 mapping[i + off] = ib_dma_map_page(ca,
295 skb_frag_page(frag),
9e903e08 296 frag->page_offset, skb_frag_size(frag),
7143740d 297 DMA_TO_DEVICE);
40ca1988 298 if (unlikely(ib_dma_mapping_error(ca, mapping[i + off])))
7143740d
EC
299 goto partial_error;
300 }
301 return 0;
302
303partial_error:
7143740d 304 for (; i > 0; --i) {
9e903e08
ED
305 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i - 1];
306
307 ib_dma_unmap_page(ca, mapping[i - !off], skb_frag_size(frag), DMA_TO_DEVICE);
7143740d 308 }
40ca1988
EC
309
310 if (off)
311 ib_dma_unmap_single(ca, mapping[0], skb_headlen(skb), DMA_TO_DEVICE);
312
7143740d
EC
313 return -EIO;
314}
315
c4268778
YS
316void ipoib_dma_unmap_tx(struct ipoib_dev_priv *priv,
317 struct ipoib_tx_buf *tx_req)
7143740d
EC
318{
319 struct sk_buff *skb = tx_req->skb;
320 u64 *mapping = tx_req->mapping;
321 int i;
40ca1988 322 int off;
7143740d 323
40ca1988 324 if (skb_headlen(skb)) {
c4268778
YS
325 ib_dma_unmap_single(priv->ca, mapping[0], skb_headlen(skb),
326 DMA_TO_DEVICE);
40ca1988
EC
327 off = 1;
328 } else
329 off = 0;
7143740d
EC
330
331 for (i = 0; i < skb_shinfo(skb)->nr_frags; ++i) {
9e903e08
ED
332 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
333
c4268778
YS
334 ib_dma_unmap_page(priv->ca, mapping[i + off],
335 skb_frag_size(frag), DMA_TO_DEVICE);
7143740d
EC
336 }
337}
338
2c010730
ES
339/*
340 * As the result of a completion error the QP Can be transferred to SQE states.
341 * The function checks if the (send)QP is in SQE state and
342 * moves it back to RTS state, that in order to have it functional again.
343 */
344static void ipoib_qp_state_validate_work(struct work_struct *work)
345{
346 struct ipoib_qp_state_validate *qp_work =
347 container_of(work, struct ipoib_qp_state_validate, work);
348
349 struct ipoib_dev_priv *priv = qp_work->priv;
350 struct ib_qp_attr qp_attr;
351 struct ib_qp_init_attr query_init_attr;
352 int ret;
353
354 ret = ib_query_qp(priv->qp, &qp_attr, IB_QP_STATE, &query_init_attr);
355 if (ret) {
356 ipoib_warn(priv, "%s: Failed to query QP ret: %d\n",
357 __func__, ret);
358 goto free_res;
359 }
360 pr_info("%s: QP: 0x%x is in state: %d\n",
361 __func__, priv->qp->qp_num, qp_attr.qp_state);
362
363 /* currently support only in SQE->RTS transition*/
364 if (qp_attr.qp_state == IB_QPS_SQE) {
365 qp_attr.qp_state = IB_QPS_RTS;
366
367 ret = ib_modify_qp(priv->qp, &qp_attr, IB_QP_STATE);
368 if (ret) {
369 pr_warn("failed(%d) modify QP:0x%x SQE->RTS\n",
370 ret, priv->qp->qp_num);
371 goto free_res;
372 }
373 pr_info("%s: QP: 0x%x moved from IB_QPS_SQE to IB_QPS_RTS\n",
374 __func__, priv->qp->qp_num);
375 } else {
376 pr_warn("QP (%d) will stay in state: %d\n",
377 priv->qp->qp_num, qp_attr.qp_state);
378 }
379
380free_res:
381 kfree(qp_work);
382}
383
2439a6e6
RD
384static void ipoib_ib_handle_tx_wc(struct net_device *dev, struct ib_wc *wc)
385{
c1048aff 386 struct ipoib_dev_priv *priv = ipoib_priv(dev);
2439a6e6
RD
387 unsigned int wr_id = wc->wr_id;
388 struct ipoib_tx_buf *tx_req;
1da177e4 389
a89875fc
RD
390 ipoib_dbg_data(priv, "send completion: id %d, status: %d\n",
391 wr_id, wc->status);
1da177e4 392
2439a6e6
RD
393 if (unlikely(wr_id >= ipoib_sendq_size)) {
394 ipoib_warn(priv, "send completion event with wrid %d (> %d)\n",
395 wr_id, ipoib_sendq_size);
396 return;
1da177e4 397 }
2439a6e6
RD
398
399 tx_req = &priv->tx_ring[wr_id];
400
c4268778 401 ipoib_dma_unmap_tx(priv, tx_req);
2439a6e6 402
de903512
RD
403 ++dev->stats.tx_packets;
404 dev->stats.tx_bytes += tx_req->skb->len;
2439a6e6
RD
405
406 dev_kfree_skb_any(tx_req->skb);
407
2439a6e6 408 ++priv->tx_tail;
8966e28d
ES
409
410 if (unlikely(netif_queue_stopped(dev) &&
411 ((priv->tx_head - priv->tx_tail) <= ipoib_sendq_size >> 1) &&
412 test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags)))
2439a6e6 413 netif_wake_queue(dev);
2439a6e6
RD
414
415 if (wc->status != IB_WC_SUCCESS &&
2c010730
ES
416 wc->status != IB_WC_WR_FLUSH_ERR) {
417 struct ipoib_qp_state_validate *qp_work;
b04dc199
AH
418 ipoib_warn(priv,
419 "failed send event (status=%d, wrid=%d vend_err %#x)\n",
2439a6e6 420 wc->status, wr_id, wc->vendor_err);
2c010730 421 qp_work = kzalloc(sizeof(*qp_work), GFP_ATOMIC);
74226649 422 if (!qp_work)
2c010730 423 return;
2c010730
ES
424
425 INIT_WORK(&qp_work->work, ipoib_qp_state_validate_work);
426 qp_work->priv = priv;
427 queue_work(priv->wq, &qp_work->work);
428 }
2439a6e6
RD
429}
430
f56bcd80
EC
431static int poll_tx(struct ipoib_dev_priv *priv)
432{
433 int n, i;
8966e28d 434 struct ib_wc *wc;
f56bcd80
EC
435
436 n = ib_poll_cq(priv->send_cq, MAX_SEND_CQE, priv->send_wc);
8966e28d
ES
437 for (i = 0; i < n; ++i) {
438 wc = priv->send_wc + i;
439 if (wc->wr_id & IPOIB_OP_CM)
440 ipoib_cm_handle_tx_wc(priv->dev, priv->send_wc + i);
441 else
442 ipoib_ib_handle_tx_wc(priv->dev, priv->send_wc + i);
443 }
f56bcd80
EC
444 return n == MAX_SEND_CQE;
445}
446
8966e28d 447int ipoib_rx_poll(struct napi_struct *napi, int budget)
2439a6e6 448{
8966e28d
ES
449 struct ipoib_dev_priv *priv =
450 container_of(napi, struct ipoib_dev_priv, recv_napi);
bea3348e 451 struct net_device *dev = priv->dev;
8d1cc86a
RD
452 int done;
453 int t;
8d1cc86a
RD
454 int n, i;
455
456 done = 0;
8d1cc86a 457
bea3348e
SH
458poll_more:
459 while (done < budget) {
460 int max = (budget - done);
461
8d1cc86a 462 t = min(IPOIB_NUM_WC, max);
f56bcd80 463 n = ib_poll_cq(priv->recv_cq, t, priv->ibwc);
8d1cc86a 464
bea3348e 465 for (i = 0; i < n; i++) {
8d1cc86a
RD
466 struct ib_wc *wc = priv->ibwc + i;
467
1b524963 468 if (wc->wr_id & IPOIB_OP_RECV) {
8d1cc86a 469 ++done;
1b524963
MT
470 if (wc->wr_id & IPOIB_OP_CM)
471 ipoib_cm_handle_rx_wc(dev, wc);
472 else
473 ipoib_ib_handle_rx_wc(dev, wc);
8966e28d
ES
474 } else {
475 pr_warn("%s: Got unexpected wqe id\n", __func__);
476 }
8d1cc86a
RD
477 }
478
bea3348e 479 if (n != t)
8d1cc86a 480 break;
8d1cc86a
RD
481 }
482
bea3348e 483 if (done < budget) {
288379f0 484 napi_complete(napi);
f56bcd80 485 if (unlikely(ib_req_notify_cq(priv->recv_cq,
8d1cc86a
RD
486 IB_CQ_NEXT_COMP |
487 IB_CQ_REPORT_MISSED_EVENTS)) &&
288379f0 488 napi_reschedule(napi))
bea3348e 489 goto poll_more;
8d1cc86a
RD
490 }
491
bea3348e 492 return done;
1da177e4
LT
493}
494
8966e28d 495int ipoib_tx_poll(struct napi_struct *napi, int budget)
1da177e4 496{
8966e28d
ES
497 struct ipoib_dev_priv *priv = container_of(napi, struct ipoib_dev_priv,
498 send_napi);
499 struct net_device *dev = priv->dev;
500 int n, i;
501 struct ib_wc *wc;
bea3348e 502
8966e28d
ES
503poll_more:
504 n = ib_poll_cq(priv->send_cq, MAX_SEND_CQE, priv->send_wc);
1da177e4 505
8966e28d
ES
506 for (i = 0; i < n; i++) {
507 wc = priv->send_wc + i;
508 if (wc->wr_id & IPOIB_OP_CM)
509 ipoib_cm_handle_tx_wc(dev, wc);
510 else
511 ipoib_ib_handle_tx_wc(dev, wc);
512 }
57ce41d1 513
8966e28d
ES
514 if (n < budget) {
515 napi_complete(napi);
516 if (unlikely(ib_req_notify_cq(priv->send_cq, IB_CQ_NEXT_COMP |
517 IB_CQ_REPORT_MISSED_EVENTS)) &&
518 napi_reschedule(napi))
519 goto poll_more;
520 }
521 return n < 0 ? 0 : n;
522}
57ce41d1 523
8966e28d
ES
524void ipoib_ib_rx_completion(struct ib_cq *cq, void *ctx_ptr)
525{
526 struct ipoib_dev_priv *priv = ctx_ptr;
57ce41d1 527
8966e28d 528 napi_schedule(&priv->recv_napi);
57ce41d1
EC
529}
530
8966e28d 531void ipoib_ib_tx_completion(struct ib_cq *cq, void *ctx_ptr)
57ce41d1 532{
8966e28d 533 struct ipoib_dev_priv *priv = ctx_ptr;
943c246e 534
8966e28d 535 napi_schedule(&priv->send_napi);
57ce41d1
EC
536}
537
1da177e4
LT
538static inline int post_send(struct ipoib_dev_priv *priv,
539 unsigned int wr_id,
10adcbd2 540 struct ib_ah *address, u32 dqpn,
40ca1988
EC
541 struct ipoib_tx_buf *tx_req,
542 void *head, int hlen)
1da177e4 543{
40ca1988 544 struct sk_buff *skb = tx_req->skb;
40ca1988 545
c4268778 546 ipoib_build_sge(priv, tx_req);
1da177e4 547
e622f2f4 548 priv->tx_wr.wr.wr_id = wr_id;
10adcbd2 549 priv->tx_wr.remote_qpn = dqpn;
e622f2f4 550 priv->tx_wr.ah = address;
1da177e4 551
40ca1988 552 if (head) {
e622f2f4
CH
553 priv->tx_wr.mss = skb_shinfo(skb)->gso_size;
554 priv->tx_wr.header = head;
555 priv->tx_wr.hlen = hlen;
556 priv->tx_wr.wr.opcode = IB_WR_LSO;
40ca1988 557 } else
e622f2f4 558 priv->tx_wr.wr.opcode = IB_WR_SEND;
40ca1988 559
4b4671a0 560 return ib_post_send(priv->qp, &priv->tx_wr.wr, NULL);
1da177e4
LT
561}
562
cd565b4b
ES
563int ipoib_send(struct net_device *dev, struct sk_buff *skb,
564 struct ib_ah *address, u32 dqpn)
1da177e4 565{
c1048aff 566 struct ipoib_dev_priv *priv = ipoib_priv(dev);
1993d683 567 struct ipoib_tx_buf *tx_req;
a48f509b 568 int hlen, rc;
40ca1988 569 void *phead;
0578cdad 570 unsigned int usable_sge = priv->max_send_sge - !!skb_headlen(skb);
40ca1988
EC
571
572 if (skb_is_gso(skb)) {
573 hlen = skb_transport_offset(skb) + tcp_hdrlen(skb);
574 phead = skb->data;
575 if (unlikely(!skb_pull(skb, hlen))) {
576 ipoib_warn(priv, "linear data too small\n");
577 ++dev->stats.tx_dropped;
578 ++dev->stats.tx_errors;
579 dev_kfree_skb_any(skb);
cd565b4b 580 return -1;
40ca1988
EC
581 }
582 } else {
583 if (unlikely(skb->len > priv->mcast_mtu + IPOIB_ENCAP_LEN)) {
584 ipoib_warn(priv, "packet len %d (> %d) too long to send, dropping\n",
585 skb->len, priv->mcast_mtu + IPOIB_ENCAP_LEN);
586 ++dev->stats.tx_dropped;
587 ++dev->stats.tx_errors;
588 ipoib_cm_skb_too_long(dev, skb, priv->mcast_mtu);
cd565b4b 589 return -1;
40ca1988
EC
590 }
591 phead = NULL;
592 hlen = 0;
1da177e4 593 }
78a50a5e
HWR
594 if (skb_shinfo(skb)->nr_frags > usable_sge) {
595 if (skb_linearize(skb) < 0) {
596 ipoib_warn(priv, "skb could not be linearized\n");
597 ++dev->stats.tx_dropped;
598 ++dev->stats.tx_errors;
599 dev_kfree_skb_any(skb);
cd565b4b 600 return -1;
78a50a5e
HWR
601 }
602 /* Does skb_linearize return ok without reducing nr_frags? */
603 if (skb_shinfo(skb)->nr_frags > usable_sge) {
604 ipoib_warn(priv, "too many frags after skb linearize\n");
605 ++dev->stats.tx_dropped;
606 ++dev->stats.tx_errors;
607 dev_kfree_skb_any(skb);
cd565b4b 608 return -1;
78a50a5e
HWR
609 }
610 }
1da177e4 611
10adcbd2
ES
612 ipoib_dbg_data(priv,
613 "sending packet, length=%d address=%p dqpn=0x%06x\n",
614 skb->len, address, dqpn);
1da177e4
LT
615
616 /*
617 * We put the skb into the tx_ring _before_ we call post_send()
618 * because it's entirely possible that the completion handler will
619 * run before we execute anything after the post_send(). That
620 * means we have to make sure everything is properly recorded and
621 * our state is consistent before we call post_send().
622 */
0f485251 623 tx_req = &priv->tx_ring[priv->tx_head & (ipoib_sendq_size - 1)];
1da177e4 624 tx_req->skb = skb;
7143740d 625 if (unlikely(ipoib_dma_map_tx(priv->ca, tx_req))) {
de903512 626 ++dev->stats.tx_errors;
73fbe8be 627 dev_kfree_skb_any(skb);
cd565b4b 628 return -1;
73fbe8be 629 }
1da177e4 630
6046136c 631 if (skb->ip_summed == CHECKSUM_PARTIAL)
e622f2f4 632 priv->tx_wr.wr.send_flags |= IB_SEND_IP_CSUM;
6046136c 633 else
e622f2f4 634 priv->tx_wr.wr.send_flags &= ~IB_SEND_IP_CSUM;
2c104ea6
ES
635 /* increase the tx_head after send success, but use it for queue state */
636 if (priv->tx_head - priv->tx_tail == ipoib_sendq_size - 1) {
57ce41d1 637 ipoib_dbg(priv, "TX ring full, stopping kernel net queue\n");
57ce41d1
EC
638 netif_stop_queue(dev);
639 }
640
7e5a90c2
SP
641 skb_orphan(skb);
642 skb_dst_drop(skb);
643
8966e28d
ES
644 if (netif_queue_stopped(dev))
645 if (ib_req_notify_cq(priv->send_cq, IB_CQ_NEXT_COMP |
809cb695 646 IB_CQ_REPORT_MISSED_EVENTS) < 0)
8966e28d
ES
647 ipoib_warn(priv, "request notify on send CQ failed\n");
648
a48f509b 649 rc = post_send(priv, priv->tx_head & (ipoib_sendq_size - 1),
cd565b4b 650 address, dqpn, tx_req, phead, hlen);
a48f509b
OG
651 if (unlikely(rc)) {
652 ipoib_warn(priv, "post_send failed, error %d\n", rc);
de903512 653 ++dev->stats.tx_errors;
c4268778 654 ipoib_dma_unmap_tx(priv, tx_req);
1da177e4 655 dev_kfree_skb_any(skb);
57ce41d1
EC
656 if (netif_queue_stopped(dev))
657 netif_wake_queue(dev);
cd565b4b 658 rc = 0;
1da177e4 659 } else {
860e9538 660 netif_trans_update(dev);
1da177e4 661
cd565b4b 662 rc = priv->tx_head;
1da177e4 663 ++priv->tx_head;
1da177e4 664 }
cd565b4b 665 return rc;
1da177e4
LT
666}
667
668static void __ipoib_reap_ah(struct net_device *dev)
669{
c1048aff 670 struct ipoib_dev_priv *priv = ipoib_priv(dev);
1da177e4 671 struct ipoib_ah *ah, *tah;
943c246e
RD
672 unsigned long flags;
673
674 netif_tx_lock_bh(dev);
675 spin_lock_irqsave(&priv->lock, flags);
1da177e4 676
1da177e4 677 list_for_each_entry_safe(ah, tah, &priv->dead_ahs, list)
2181858b 678 if ((int) priv->tx_tail - (int) ah->last_send >= 0) {
1da177e4 679 list_del(&ah->list);
2553ba21 680 rdma_destroy_ah(ah->ah, 0);
31c02e21 681 kfree(ah);
1da177e4 682 }
943c246e
RD
683
684 spin_unlock_irqrestore(&priv->lock, flags);
685 netif_tx_unlock_bh(dev);
1da177e4
LT
686}
687
c4028958 688void ipoib_reap_ah(struct work_struct *work)
1da177e4 689{
c4028958
DH
690 struct ipoib_dev_priv *priv =
691 container_of(work, struct ipoib_dev_priv, ah_reap_task.work);
692 struct net_device *dev = priv->dev;
1da177e4
LT
693
694 __ipoib_reap_ah(dev);
695
696 if (!test_bit(IPOIB_STOP_REAPER, &priv->flags))
0b39578b 697 queue_delayed_work(priv->wq, &priv->ah_reap_task,
69fc507a 698 round_jiffies_relative(HZ));
1da177e4
LT
699}
700
efc82eee 701static void ipoib_flush_ah(struct net_device *dev)
e135106f 702{
c1048aff 703 struct ipoib_dev_priv *priv = ipoib_priv(dev);
e135106f
DL
704
705 cancel_delayed_work(&priv->ah_reap_task);
efc82eee 706 flush_workqueue(priv->wq);
e135106f
DL
707 ipoib_reap_ah(&priv->ah_reap_task.work);
708}
709
efc82eee 710static void ipoib_stop_ah(struct net_device *dev)
e135106f 711{
c1048aff 712 struct ipoib_dev_priv *priv = ipoib_priv(dev);
e135106f
DL
713
714 set_bit(IPOIB_STOP_REAPER, &priv->flags);
efc82eee 715 ipoib_flush_ah(dev);
e135106f
DL
716}
717
7ce1a3ee 718static int recvs_pending(struct net_device *dev)
57ce41d1 719{
c1048aff 720 struct ipoib_dev_priv *priv = ipoib_priv(dev);
7ce1a3ee
ES
721 int pending = 0;
722 int i;
723
724 for (i = 0; i < ipoib_recvq_size; ++i)
725 if (priv->rx_ring[i].skb)
726 ++pending;
727
728 return pending;
57ce41d1
EC
729}
730
5dc78ad1
ES
731static void check_qp_movement_and_print(struct ipoib_dev_priv *priv,
732 struct ib_qp *qp,
733 enum ib_qp_state new_state)
734{
735 struct ib_qp_attr qp_attr;
736 struct ib_qp_init_attr query_init_attr;
737 int ret;
738
739 ret = ib_query_qp(qp, &qp_attr, IB_QP_STATE, &query_init_attr);
740 if (ret) {
741 ipoib_warn(priv, "%s: Failed to query QP\n", __func__);
742 return;
743 }
744 /* print according to the new-state and the previous state.*/
745 if (new_state == IB_QPS_ERR && qp_attr.qp_state == IB_QPS_RESET)
746 ipoib_dbg(priv, "Failed modify QP, IB_QPS_RESET to IB_QPS_ERR, acceptable\n");
747 else
748 ipoib_warn(priv, "Failed to modify QP to state: %d from state: %d\n",
749 new_state, qp_attr.qp_state);
750}
751
8966e28d
ES
752static void ipoib_napi_enable(struct net_device *dev)
753{
754 struct ipoib_dev_priv *priv = ipoib_priv(dev);
755
756 napi_enable(&priv->recv_napi);
757 napi_enable(&priv->send_napi);
758}
759
760static void ipoib_napi_disable(struct net_device *dev)
761{
762 struct ipoib_dev_priv *priv = ipoib_priv(dev);
763
764 napi_disable(&priv->recv_napi);
765 napi_disable(&priv->send_napi);
766}
767
7ce1a3ee 768int ipoib_ib_dev_stop_default(struct net_device *dev)
1da177e4 769{
c1048aff 770 struct ipoib_dev_priv *priv = ipoib_priv(dev);
7ce1a3ee
ES
771 struct ib_qp_attr qp_attr;
772 unsigned long begin;
773 struct ipoib_tx_buf *tx_req;
774 int i;
1da177e4 775
cd565b4b 776 if (test_bit(IPOIB_FLAG_INITIALIZED, &priv->flags))
8966e28d 777 ipoib_napi_disable(dev);
dd57c930 778
7ce1a3ee
ES
779 ipoib_cm_dev_stop(dev);
780
781 /*
782 * Move our QP to the error state and then reinitialize in
783 * when all work requests have completed or have been flushed.
784 */
785 qp_attr.qp_state = IB_QPS_ERR;
786 if (ib_modify_qp(priv->qp, &qp_attr, IB_QP_STATE))
5dc78ad1 787 check_qp_movement_and_print(priv, priv->qp, IB_QPS_ERR);
7ce1a3ee
ES
788
789 /* Wait for all sends and receives to complete */
790 begin = jiffies;
791
792 while (priv->tx_head != priv->tx_tail || recvs_pending(dev)) {
793 if (time_after(jiffies, begin + 5 * HZ)) {
794 ipoib_warn(priv,
795 "timing out; %d sends %d receives not completed\n",
796 priv->tx_head - priv->tx_tail,
797 recvs_pending(dev));
798
799 /*
800 * assume the HW is wedged and just free up
801 * all our pending work requests.
802 */
803 while ((int)priv->tx_tail - (int)priv->tx_head < 0) {
804 tx_req = &priv->tx_ring[priv->tx_tail &
805 (ipoib_sendq_size - 1)];
806 ipoib_dma_unmap_tx(priv, tx_req);
807 dev_kfree_skb_any(tx_req->skb);
808 ++priv->tx_tail;
7ce1a3ee
ES
809 }
810
811 for (i = 0; i < ipoib_recvq_size; ++i) {
812 struct ipoib_rx_buf *rx_req;
813
814 rx_req = &priv->rx_ring[i];
815 if (!rx_req->skb)
816 continue;
817 ipoib_ud_dma_unmap_rx(priv,
818 priv->rx_ring[i].mapping);
819 dev_kfree_skb_any(rx_req->skb);
820 rx_req->skb = NULL;
821 }
822
823 goto timeout;
824 }
825
826 ipoib_drain_cq(dev);
827
98e77d9f 828 usleep_range(1000, 2000);
26bbf13c 829 }
26bbf13c 830
7ce1a3ee
ES
831 ipoib_dbg(priv, "All sends and receives done.\n");
832
833timeout:
7ce1a3ee
ES
834 qp_attr.qp_state = IB_QPS_RESET;
835 if (ib_modify_qp(priv->qp, &qp_attr, IB_QP_STATE))
836 ipoib_warn(priv, "Failed to modify QP to RESET state\n");
837
838 ib_req_notify_cq(priv->recv_cq, IB_CQ_NEXT_COMP);
839
840 return 0;
841}
842
843int ipoib_ib_dev_stop(struct net_device *dev)
844{
cd565b4b
ES
845 struct ipoib_dev_priv *priv = ipoib_priv(dev);
846
847 priv->rn_ops->ndo_stop(dev);
7ce1a3ee 848
cd565b4b 849 clear_bit(IPOIB_FLAG_INITIALIZED, &priv->flags);
7ce1a3ee
ES
850 ipoib_flush_ah(dev);
851
852 return 0;
853}
854
7ce1a3ee
ES
855int ipoib_ib_dev_open_default(struct net_device *dev)
856{
c1048aff 857 struct ipoib_dev_priv *priv = ipoib_priv(dev);
7ce1a3ee
ES
858 int ret;
859
5b6810e0 860 ret = ipoib_init_qp(dev);
1da177e4 861 if (ret) {
5b6810e0 862 ipoib_warn(priv, "ipoib_init_qp returned %d\n", ret);
1da177e4
LT
863 return -1;
864 }
865
866 ret = ipoib_ib_post_receives(dev);
867 if (ret) {
868 ipoib_warn(priv, "ipoib_ib_post_receives returned %d\n", ret);
cd565b4b 869 goto out;
1da177e4
LT
870 }
871
839fcaba
MT
872 ret = ipoib_cm_dev_open(dev);
873 if (ret) {
24bd1e4e 874 ipoib_warn(priv, "ipoib_cm_dev_open returned %d\n", ret);
cd565b4b 875 goto out;
839fcaba
MT
876 }
877
cd565b4b 878 if (!test_bit(IPOIB_FLAG_INITIALIZED, &priv->flags))
8966e28d 879 ipoib_napi_enable(dev);
7a343d4c 880
1da177e4 881 return 0;
cd565b4b 882out:
c2bb5628 883 return -1;
1da177e4
LT
884}
885
7ce1a3ee
ES
886int ipoib_ib_dev_open(struct net_device *dev)
887{
c1048aff 888 struct ipoib_dev_priv *priv = ipoib_priv(dev);
7ce1a3ee
ES
889
890 ipoib_pkey_dev_check_presence(dev);
891
892 if (!test_bit(IPOIB_PKEY_ASSIGNED, &priv->flags)) {
893 ipoib_warn(priv, "P_Key 0x%04x is %s\n", priv->pkey,
894 (!(priv->pkey & 0x7fff) ? "Invalid" : "not found"));
895 return -1;
896 }
897
898 clear_bit(IPOIB_STOP_REAPER, &priv->flags);
899 queue_delayed_work(priv->wq, &priv->ah_reap_task,
900 round_jiffies_relative(HZ));
901
cd565b4b 902 if (priv->rn_ops->ndo_open(dev)) {
7ce1a3ee 903 pr_warn("%s: Failed to open dev\n", dev->name);
cd565b4b 904 goto dev_stop;
7ce1a3ee
ES
905 }
906
cd565b4b
ES
907 set_bit(IPOIB_FLAG_INITIALIZED, &priv->flags);
908
7ce1a3ee
ES
909 return 0;
910
cd565b4b 911dev_stop:
7ce1a3ee
ES
912 set_bit(IPOIB_STOP_REAPER, &priv->flags);
913 cancel_delayed_work(&priv->ah_reap_task);
cd565b4b 914 set_bit(IPOIB_FLAG_INITIALIZED, &priv->flags);
cd565b4b 915 ipoib_ib_dev_stop(dev);
7ce1a3ee
ES
916 return -1;
917}
918
db84f880 919void ipoib_pkey_dev_check_presence(struct net_device *dev)
7a343d4c 920{
c1048aff 921 struct ipoib_dev_priv *priv = ipoib_priv(dev);
980f91c3 922 struct rdma_netdev *rn = netdev_priv(dev);
7a343d4c 923
dd57c930
AE
924 if (!(priv->pkey & 0x7fff) ||
925 ib_find_pkey(priv->ca, priv->port, priv->pkey,
980f91c3 926 &priv->pkey_index)) {
7a343d4c 927 clear_bit(IPOIB_PKEY_ASSIGNED, &priv->flags);
980f91c3
AV
928 } else {
929 if (rn->set_id)
930 rn->set_id(dev, priv->pkey_index);
7a343d4c 931 set_bit(IPOIB_PKEY_ASSIGNED, &priv->flags);
980f91c3 932 }
7a343d4c
LA
933}
934
5c37077f 935void ipoib_ib_dev_up(struct net_device *dev)
1da177e4 936{
c1048aff 937 struct ipoib_dev_priv *priv = ipoib_priv(dev);
1da177e4 938
7a343d4c
LA
939 ipoib_pkey_dev_check_presence(dev);
940
941 if (!test_bit(IPOIB_PKEY_ASSIGNED, &priv->flags)) {
942 ipoib_dbg(priv, "PKEY is not assigned.\n");
5c37077f 943 return;
7a343d4c
LA
944 }
945
1da177e4
LT
946 set_bit(IPOIB_FLAG_OPER_UP, &priv->flags);
947
5c37077f 948 ipoib_mcast_start_thread(dev);
1da177e4
LT
949}
950
dfc0e555 951void ipoib_ib_dev_down(struct net_device *dev)
1da177e4 952{
c1048aff 953 struct ipoib_dev_priv *priv = ipoib_priv(dev);
1da177e4
LT
954
955 ipoib_dbg(priv, "downing ib_dev\n");
956
957 clear_bit(IPOIB_FLAG_OPER_UP, &priv->flags);
958 netif_carrier_off(dev);
959
efc82eee 960 ipoib_mcast_stop_thread(dev);
1da177e4
LT
961 ipoib_mcast_dev_flush(dev);
962
1da177e4 963 ipoib_flush_paths(dev);
1da177e4
LT
964}
965
2dfbfc37
MT
966void ipoib_drain_cq(struct net_device *dev)
967{
c1048aff 968 struct ipoib_dev_priv *priv = ipoib_priv(dev);
2dfbfc37 969 int i, n;
943c246e
RD
970
971 /*
972 * We call completion handling routines that expect to be
973 * called from the BH-disabled NAPI poll context, so disable
974 * BHs here too.
975 */
976 local_bh_disable();
977
2dfbfc37 978 do {
f56bcd80 979 n = ib_poll_cq(priv->recv_cq, IPOIB_NUM_WC, priv->ibwc);
2dfbfc37 980 for (i = 0; i < n; ++i) {
ce423ef5
RD
981 /*
982 * Convert any successful completions to flush
983 * errors to avoid passing packets up the
984 * stack after bringing the device down.
985 */
986 if (priv->ibwc[i].status == IB_WC_SUCCESS)
987 priv->ibwc[i].status = IB_WC_WR_FLUSH_ERR;
988
1b524963
MT
989 if (priv->ibwc[i].wr_id & IPOIB_OP_RECV) {
990 if (priv->ibwc[i].wr_id & IPOIB_OP_CM)
991 ipoib_cm_handle_rx_wc(dev, priv->ibwc + i);
992 else
993 ipoib_ib_handle_rx_wc(dev, priv->ibwc + i);
8966e28d
ES
994 } else {
995 pr_warn("%s: Got unexpected wqe id\n", __func__);
996 }
2dfbfc37
MT
997 }
998 } while (n == IPOIB_NUM_WC);
f56bcd80
EC
999
1000 while (poll_tx(priv))
1001 ; /* nothing */
943c246e
RD
1002
1003 local_bh_enable();
2dfbfc37
MT
1004}
1005
c2904141
ES
1006/*
1007 * Takes whatever value which is in pkey index 0 and updates priv->pkey
1008 * returns 0 if the pkey value was changed.
1009 */
1010static inline int update_parent_pkey(struct ipoib_dev_priv *priv)
1011{
1012 int result;
1013 u16 prev_pkey;
1014
1015 prev_pkey = priv->pkey;
1016 result = ib_query_pkey(priv->ca, priv->port, 0, &priv->pkey);
1017 if (result) {
1018 ipoib_warn(priv, "ib_query_pkey port %d failed (ret = %d)\n",
1019 priv->port, result);
1020 return result;
1021 }
1022
1023 priv->pkey |= 0x8000;
1024
1025 if (prev_pkey != priv->pkey) {
1026 ipoib_dbg(priv, "pkey changed from 0x%x to 0x%x\n",
1027 prev_pkey, priv->pkey);
1028 /*
1029 * Update the pkey in the broadcast address, while making sure to set
1030 * the full membership bit, so that we join the right broadcast group.
1031 */
1032 priv->dev->broadcast[8] = priv->pkey >> 8;
1033 priv->dev->broadcast[9] = priv->pkey & 0xff;
1034 return 0;
1035 }
1036
1037 return 1;
1038}
dd57c930
AE
1039/*
1040 * returns 0 if pkey value was found in a different slot.
1041 */
1042static inline int update_child_pkey(struct ipoib_dev_priv *priv)
1043{
1044 u16 old_index = priv->pkey_index;
1045
1046 priv->pkey_index = 0;
1047 ipoib_pkey_dev_check_presence(priv->dev);
1048
1049 if (test_bit(IPOIB_PKEY_ASSIGNED, &priv->flags) &&
1050 (old_index == priv->pkey_index))
1051 return 1;
1052 return 0;
1053}
c2904141 1054
492a7e67
MB
1055/*
1056 * returns true if the device address of the ipoib interface has changed and the
1057 * new address is a valid one (i.e in the gid table), return false otherwise.
1058 */
1059static bool ipoib_dev_addr_changed_valid(struct ipoib_dev_priv *priv)
1060{
1061 union ib_gid search_gid;
1062 union ib_gid gid0;
1063 union ib_gid *netdev_gid;
1064 int err;
1065 u16 index;
1066 u8 port;
1067 bool ret = false;
1068
1069 netdev_gid = (union ib_gid *)(priv->dev->dev_addr + 4);
1dfce294 1070 if (rdma_query_gid(priv->ca, priv->port, 0, &gid0))
492a7e67
MB
1071 return false;
1072
9b29953b 1073 netif_addr_lock_bh(priv->dev);
492a7e67
MB
1074
1075 /* The subnet prefix may have changed, update it now so we won't have
1076 * to do it later
1077 */
1078 priv->local_gid.global.subnet_prefix = gid0.global.subnet_prefix;
1079 netdev_gid->global.subnet_prefix = gid0.global.subnet_prefix;
1080 search_gid.global.subnet_prefix = gid0.global.subnet_prefix;
1081
1082 search_gid.global.interface_id = priv->local_gid.global.interface_id;
1083
9b29953b 1084 netif_addr_unlock_bh(priv->dev);
492a7e67 1085
b26c4a11 1086 err = ib_find_gid(priv->ca, &search_gid, &port, &index);
492a7e67 1087
9b29953b 1088 netif_addr_lock_bh(priv->dev);
492a7e67
MB
1089
1090 if (search_gid.global.interface_id !=
1091 priv->local_gid.global.interface_id)
1092 /* There was a change while we were looking up the gid, bail
1093 * here and let the next work sort this out
1094 */
1095 goto out;
1096
1097 /* The next section of code needs some background:
1098 * Per IB spec the port GUID can't change if the HCA is powered on.
1099 * port GUID is the basis for GID at index 0 which is the basis for
1100 * the default device address of a ipoib interface.
1101 *
1102 * so it seems the flow should be:
1103 * if user_changed_dev_addr && gid in gid tbl
1104 * set bit dev_addr_set
1105 * return true
1106 * else
1107 * return false
1108 *
1109 * The issue is that there are devices that don't follow the spec,
1110 * they change the port GUID when the HCA is powered, so in order
1111 * not to break userspace applications, We need to check if the
1112 * user wanted to control the device address and we assume that
1113 * if he sets the device address back to be based on GID index 0,
1114 * he no longer wishs to control it.
1115 *
1116 * If the user doesn't control the the device address,
1117 * IPOIB_FLAG_DEV_ADDR_SET is set and ib_find_gid failed it means
1118 * the port GUID has changed and GID at index 0 has changed
1119 * so we need to change priv->local_gid and priv->dev->dev_addr
1120 * to reflect the new GID.
1121 */
1122 if (!test_bit(IPOIB_FLAG_DEV_ADDR_SET, &priv->flags)) {
1123 if (!err && port == priv->port) {
1124 set_bit(IPOIB_FLAG_DEV_ADDR_SET, &priv->flags);
1125 if (index == 0)
1126 clear_bit(IPOIB_FLAG_DEV_ADDR_CTRL,
1127 &priv->flags);
1128 else
1129 set_bit(IPOIB_FLAG_DEV_ADDR_CTRL, &priv->flags);
1130 ret = true;
1131 } else {
1132 ret = false;
1133 }
1134 } else {
1135 if (!err && port == priv->port) {
1136 ret = true;
1137 } else {
1138 if (!test_bit(IPOIB_FLAG_DEV_ADDR_CTRL, &priv->flags)) {
1139 memcpy(&priv->local_gid, &gid0,
1140 sizeof(priv->local_gid));
1141 memcpy(priv->dev->dev_addr + 4, &gid0,
1142 sizeof(priv->local_gid));
1143 ret = true;
1144 }
1145 }
1146 }
1147
1148out:
9b29953b 1149 netif_addr_unlock_bh(priv->dev);
492a7e67
MB
1150
1151 return ret;
1152}
1153
ee1e2c82 1154static void __ipoib_ib_dev_flush(struct ipoib_dev_priv *priv,
8b7cce0d
HE
1155 enum ipoib_flush_level level,
1156 int nesting)
1da177e4 1157{
26bbf13c 1158 struct ipoib_dev_priv *cpriv;
c4028958 1159 struct net_device *dev = priv->dev;
c2904141 1160 int result;
26bbf13c 1161
8b7cce0d 1162 down_read_nested(&priv->vlan_rwsem, nesting);
1da177e4 1163
26bbf13c
YE
1164 /*
1165 * Flush any child interfaces too -- they might be up even if
1166 * the parent is down.
1167 */
1168 list_for_each_entry(cpriv, &priv->child_intfs, list)
8b7cce0d 1169 __ipoib_ib_dev_flush(cpriv, level, nesting + 1);
26bbf13c 1170
f47944cc 1171 up_read(&priv->vlan_rwsem);
26bbf13c 1172
dd57c930
AE
1173 if (!test_bit(IPOIB_FLAG_INITIALIZED, &priv->flags) &&
1174 level != IPOIB_FLUSH_HEAVY) {
492a7e67
MB
1175 /* Make sure the dev_addr is set even if not flushing */
1176 if (level == IPOIB_FLUSH_LIGHT)
1177 ipoib_dev_addr_changed_valid(priv);
7a343d4c
LA
1178 ipoib_dbg(priv, "Not flushing - IPOIB_FLAG_INITIALIZED not set.\n");
1179 return;
1180 }
1181
1182 if (!test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags)) {
dd57c930
AE
1183 /* interface is down. update pkey and leave. */
1184 if (level == IPOIB_FLUSH_HEAVY) {
1185 if (!test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags))
1186 update_parent_pkey(priv);
1187 else
1188 update_child_pkey(priv);
492a7e67
MB
1189 } else if (level == IPOIB_FLUSH_LIGHT)
1190 ipoib_dev_addr_changed_valid(priv);
7a343d4c 1191 ipoib_dbg(priv, "Not flushing - IPOIB_FLAG_ADMIN_UP not set.\n");
1da177e4 1192 return;
7a343d4c 1193 }
1da177e4 1194
ee1e2c82 1195 if (level == IPOIB_FLUSH_HEAVY) {
c2904141
ES
1196 /* child devices chase their origin pkey value, while non-child
1197 * (parent) devices should always takes what present in pkey index 0
1198 */
1199 if (test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags)) {
dd57c930
AE
1200 result = update_child_pkey(priv);
1201 if (result) {
1202 /* restart QP only if P_Key index is changed */
c2904141 1203 ipoib_dbg(priv, "Not flushing - P_Key index not changed.\n");
9fdd5e5b 1204 return;
c2904141 1205 }
dd57c930 1206
c2904141
ES
1207 } else {
1208 result = update_parent_pkey(priv);
1209 /* restart QP only if P_Key value changed */
1210 if (result) {
1211 ipoib_dbg(priv, "Not flushing - P_Key value not changed.\n");
1212 return;
1213 }
26bbf13c 1214 }
26bbf13c
YE
1215 }
1216
ee1e2c82 1217 if (level == IPOIB_FLUSH_LIGHT) {
344bacca 1218 int oper_up;
ee1e2c82 1219 ipoib_mark_paths_invalid(dev);
344bacca
AV
1220 /* Set IPoIB operation as down to prevent races between:
1221 * the flush flow which leaves MCG and on the fly joins
1222 * which can happen during that time. mcast restart task
1223 * should deal with join requests we missed.
1224 */
1225 oper_up = test_and_clear_bit(IPOIB_FLAG_OPER_UP, &priv->flags);
ee1e2c82 1226 ipoib_mcast_dev_flush(dev);
344bacca
AV
1227 if (oper_up)
1228 set_bit(IPOIB_FLAG_OPER_UP, &priv->flags);
efc82eee 1229 ipoib_flush_ah(dev);
ee1e2c82 1230 }
1da177e4 1231
ee1e2c82 1232 if (level >= IPOIB_FLUSH_NORMAL)
efc82eee 1233 ipoib_ib_dev_down(dev);
1da177e4 1234
ee1e2c82 1235 if (level == IPOIB_FLUSH_HEAVY) {
dd57c930 1236 if (test_bit(IPOIB_FLAG_INITIALIZED, &priv->flags))
efc82eee 1237 ipoib_ib_dev_stop(dev);
b4b678b0 1238
1f80bd6a 1239 if (ipoib_ib_dev_open(dev))
dd57c930 1240 return;
b4b678b0 1241
dd57c930
AE
1242 if (netif_queue_stopped(dev))
1243 netif_start_queue(dev);
26bbf13c
YE
1244 }
1245
1da177e4
LT
1246 /*
1247 * The device could have been brought down between the start and when
1248 * we get here, don't bring it back up if it's not configured up
1249 */
5ccd0255 1250 if (test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags)) {
ee1e2c82
MS
1251 if (level >= IPOIB_FLUSH_NORMAL)
1252 ipoib_ib_dev_up(dev);
492a7e67
MB
1253 if (ipoib_dev_addr_changed_valid(priv))
1254 ipoib_mcast_restart_task(&priv->restart_task);
5ccd0255 1255 }
26bbf13c 1256}
1da177e4 1257
ee1e2c82
MS
1258void ipoib_ib_dev_flush_light(struct work_struct *work)
1259{
1260 struct ipoib_dev_priv *priv =
1261 container_of(work, struct ipoib_dev_priv, flush_light);
1262
8b7cce0d 1263 __ipoib_ib_dev_flush(priv, IPOIB_FLUSH_LIGHT, 0);
ee1e2c82
MS
1264}
1265
1266void ipoib_ib_dev_flush_normal(struct work_struct *work)
26bbf13c
YE
1267{
1268 struct ipoib_dev_priv *priv =
ee1e2c82 1269 container_of(work, struct ipoib_dev_priv, flush_normal);
4f71055a 1270
8b7cce0d 1271 __ipoib_ib_dev_flush(priv, IPOIB_FLUSH_NORMAL, 0);
26bbf13c 1272}
4f71055a 1273
ee1e2c82 1274void ipoib_ib_dev_flush_heavy(struct work_struct *work)
26bbf13c
YE
1275{
1276 struct ipoib_dev_priv *priv =
ee1e2c82 1277 container_of(work, struct ipoib_dev_priv, flush_heavy);
26bbf13c 1278
1f80bd6a 1279 rtnl_lock();
8b7cce0d 1280 __ipoib_ib_dev_flush(priv, IPOIB_FLUSH_HEAVY, 0);
1f80bd6a 1281 rtnl_unlock();
1da177e4
LT
1282}
1283
1284void ipoib_ib_dev_cleanup(struct net_device *dev)
1285{
c1048aff 1286 struct ipoib_dev_priv *priv = ipoib_priv(dev);
1da177e4
LT
1287
1288 ipoib_dbg(priv, "cleaning up ib_dev\n");
a39c52ab
ES
1289 /*
1290 * We must make sure there are no more (path) completions
1291 * that may wish to touch priv fields that are no longer valid
1292 */
1293 ipoib_flush_paths(dev);
1da177e4 1294
efc82eee 1295 ipoib_mcast_stop_thread(dev);
988bd503 1296 ipoib_mcast_dev_flush(dev);
1da177e4 1297
e135106f
DL
1298 /*
1299 * All of our ah references aren't free until after
1300 * ipoib_mcast_dev_flush(), ipoib_flush_paths, and
1301 * the neighbor garbage collection is stopped and reaped.
1302 * That should all be done now, so make a final ah flush.
1303 */
efc82eee 1304 ipoib_stop_ah(dev);
e135106f 1305
515ed4f3
ES
1306 clear_bit(IPOIB_PKEY_ASSIGNED, &priv->flags);
1307
cd565b4b 1308 priv->rn_ops->ndo_uninit(dev);
1da177e4 1309
515ed4f3
ES
1310 if (priv->pd) {
1311 ib_dealloc_pd(priv->pd);
1312 priv->pd = NULL;
1313 }
1314}
1da177e4 1315