2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
4 * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
5 * Copyright (c) 2004, 2005 Voltaire, Inc. All rights reserved.
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * OpenIB.org BSD license below:
13 * Redistribution and use in source and binary forms, with or
14 * without modification, are permitted provided that the following
17 * - Redistributions of source code must retain the above
18 * copyright notice, this list of conditions and the following
21 * - Redistributions in binary form must reproduce the above
22 * copyright notice, this list of conditions and the following
23 * disclaimer in the documentation and/or other materials
24 * provided with the distribution.
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
36 #include <linux/delay.h>
37 #include <linux/moduleparam.h>
38 #include <linux/dma-mapping.h>
39 #include <linux/slab.h>
42 #include <linux/tcp.h>
46 #ifdef CONFIG_INFINIBAND_IPOIB_DEBUG_DATA
47 static int data_debug_level;
49 module_param(data_debug_level, int, 0644);
50 MODULE_PARM_DESC(data_debug_level,
51 "Enable data path debug tracing if > 0");
54 static DEFINE_MUTEX(pkey_mutex);
56 struct ipoib_ah *ipoib_create_ah(struct net_device *dev,
57 struct ib_pd *pd, struct ib_ah_attr *attr)
61 ah = kmalloc(sizeof *ah, GFP_KERNEL);
69 ah->ah = ib_create_ah(pd, attr);
74 ipoib_dbg(netdev_priv(dev), "Created ah %p\n", ah->ah);
79 void ipoib_free_ah(struct kref *kref)
81 struct ipoib_ah *ah = container_of(kref, struct ipoib_ah, ref);
82 struct ipoib_dev_priv *priv = netdev_priv(ah->dev);
86 spin_lock_irqsave(&priv->lock, flags);
87 list_add_tail(&ah->list, &priv->dead_ahs);
88 spin_unlock_irqrestore(&priv->lock, flags);
91 static void ipoib_ud_dma_unmap_rx(struct ipoib_dev_priv *priv,
92 u64 mapping[IPOIB_UD_RX_SG])
94 if (ipoib_ud_need_sg(priv->max_ib_mtu)) {
95 ib_dma_unmap_single(priv->ca, mapping[0], IPOIB_UD_HEAD_SIZE,
97 ib_dma_unmap_page(priv->ca, mapping[1], PAGE_SIZE,
100 ib_dma_unmap_single(priv->ca, mapping[0],
101 IPOIB_UD_BUF_SIZE(priv->max_ib_mtu),
105 static void ipoib_ud_skb_put_frags(struct ipoib_dev_priv *priv,
109 if (ipoib_ud_need_sg(priv->max_ib_mtu)) {
110 skb_frag_t *frag = &skb_shinfo(skb)->frags[0];
113 * There is only two buffers needed for max_payload = 4K,
114 * first buf size is IPOIB_UD_HEAD_SIZE
116 skb->tail += IPOIB_UD_HEAD_SIZE;
119 size = length - IPOIB_UD_HEAD_SIZE;
121 skb_frag_size_set(frag, size);
122 skb->data_len += size;
123 skb->truesize += size;
125 skb_put(skb, length);
129 static int ipoib_ib_post_receive(struct net_device *dev, int id)
131 struct ipoib_dev_priv *priv = netdev_priv(dev);
132 struct ib_recv_wr *bad_wr;
135 priv->rx_wr.wr_id = id | IPOIB_OP_RECV;
136 priv->rx_sge[0].addr = priv->rx_ring[id].mapping[0];
137 priv->rx_sge[1].addr = priv->rx_ring[id].mapping[1];
140 ret = ib_post_recv(priv->qp, &priv->rx_wr, &bad_wr);
142 ipoib_warn(priv, "receive failed for buf %d (%d)\n", id, ret);
143 ipoib_ud_dma_unmap_rx(priv, priv->rx_ring[id].mapping);
144 dev_kfree_skb_any(priv->rx_ring[id].skb);
145 priv->rx_ring[id].skb = NULL;
151 static struct sk_buff *ipoib_alloc_rx_skb(struct net_device *dev, int id)
153 struct ipoib_dev_priv *priv = netdev_priv(dev);
158 if (ipoib_ud_need_sg(priv->max_ib_mtu))
159 buf_size = IPOIB_UD_HEAD_SIZE;
161 buf_size = IPOIB_UD_BUF_SIZE(priv->max_ib_mtu);
163 skb = dev_alloc_skb(buf_size + 4);
168 * IB will leave a 40 byte gap for a GRH and IPoIB adds a 4 byte
169 * header. So we need 4 more bytes to get to 48 and align the
170 * IP header to a multiple of 16.
174 mapping = priv->rx_ring[id].mapping;
175 mapping[0] = ib_dma_map_single(priv->ca, skb->data, buf_size,
177 if (unlikely(ib_dma_mapping_error(priv->ca, mapping[0])))
180 if (ipoib_ud_need_sg(priv->max_ib_mtu)) {
181 struct page *page = alloc_page(GFP_ATOMIC);
184 skb_fill_page_desc(skb, 0, page, 0, PAGE_SIZE);
186 ib_dma_map_page(priv->ca, page,
187 0, PAGE_SIZE, DMA_FROM_DEVICE);
188 if (unlikely(ib_dma_mapping_error(priv->ca, mapping[1])))
192 priv->rx_ring[id].skb = skb;
196 ib_dma_unmap_single(priv->ca, mapping[0], buf_size, DMA_FROM_DEVICE);
198 dev_kfree_skb_any(skb);
202 static int ipoib_ib_post_receives(struct net_device *dev)
204 struct ipoib_dev_priv *priv = netdev_priv(dev);
207 for (i = 0; i < ipoib_recvq_size; ++i) {
208 if (!ipoib_alloc_rx_skb(dev, i)) {
209 ipoib_warn(priv, "failed to allocate receive buffer %d\n", i);
212 if (ipoib_ib_post_receive(dev, i)) {
213 ipoib_warn(priv, "ipoib_ib_post_receive failed for buf %d\n", i);
221 static void ipoib_ib_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
223 struct ipoib_dev_priv *priv = netdev_priv(dev);
224 unsigned int wr_id = wc->wr_id & ~IPOIB_OP_RECV;
226 u64 mapping[IPOIB_UD_RX_SG];
229 ipoib_dbg_data(priv, "recv completion: id %d, status: %d\n",
232 if (unlikely(wr_id >= ipoib_recvq_size)) {
233 ipoib_warn(priv, "recv completion event with wrid %d (> %d)\n",
234 wr_id, ipoib_recvq_size);
238 skb = priv->rx_ring[wr_id].skb;
240 if (unlikely(wc->status != IB_WC_SUCCESS)) {
241 if (wc->status != IB_WC_WR_FLUSH_ERR)
242 ipoib_warn(priv, "failed recv event "
243 "(status=%d, wrid=%d vend_err %x)\n",
244 wc->status, wr_id, wc->vendor_err);
245 ipoib_ud_dma_unmap_rx(priv, priv->rx_ring[wr_id].mapping);
246 dev_kfree_skb_any(skb);
247 priv->rx_ring[wr_id].skb = NULL;
252 * Drop packets that this interface sent, ie multicast packets
253 * that the HCA has replicated.
255 if (wc->slid == priv->local_lid && wc->src_qp == priv->qp->qp_num)
258 memcpy(mapping, priv->rx_ring[wr_id].mapping,
259 IPOIB_UD_RX_SG * sizeof *mapping);
262 * If we can't allocate a new RX buffer, dump
263 * this packet and reuse the old buffer.
265 if (unlikely(!ipoib_alloc_rx_skb(dev, wr_id))) {
266 ++dev->stats.rx_dropped;
270 ipoib_dbg_data(priv, "received %d bytes, SLID 0x%04x\n",
271 wc->byte_len, wc->slid);
273 ipoib_ud_dma_unmap_rx(priv, mapping);
274 ipoib_ud_skb_put_frags(priv, skb, wc->byte_len);
276 /* First byte of dgid signals multicast when 0xff */
277 dgid = &((struct ib_grh *)skb->data)->dgid;
279 if (!(wc->wc_flags & IB_WC_GRH) || dgid->raw[0] != 0xff)
280 skb->pkt_type = PACKET_HOST;
281 else if (memcmp(dgid, dev->broadcast + 4, sizeof(union ib_gid)) == 0)
282 skb->pkt_type = PACKET_BROADCAST;
284 skb->pkt_type = PACKET_MULTICAST;
286 skb_pull(skb, IB_GRH_BYTES);
288 skb->protocol = ((struct ipoib_header *) skb->data)->proto;
289 skb_reset_mac_header(skb);
290 skb_pull(skb, IPOIB_ENCAP_LEN);
292 ++dev->stats.rx_packets;
293 dev->stats.rx_bytes += skb->len;
296 if ((dev->features & NETIF_F_RXCSUM) && likely(wc->csum_ok))
297 skb->ip_summed = CHECKSUM_UNNECESSARY;
299 napi_gro_receive(&priv->napi, skb);
302 if (unlikely(ipoib_ib_post_receive(dev, wr_id)))
303 ipoib_warn(priv, "ipoib_ib_post_receive failed "
304 "for buf %d\n", wr_id);
307 static int ipoib_dma_map_tx(struct ib_device *ca,
308 struct ipoib_tx_buf *tx_req)
310 struct sk_buff *skb = tx_req->skb;
311 u64 *mapping = tx_req->mapping;
315 if (skb_headlen(skb)) {
316 mapping[0] = ib_dma_map_single(ca, skb->data, skb_headlen(skb),
318 if (unlikely(ib_dma_mapping_error(ca, mapping[0])))
325 for (i = 0; i < skb_shinfo(skb)->nr_frags; ++i) {
326 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
327 mapping[i + off] = ib_dma_map_page(ca,
329 frag->page_offset, skb_frag_size(frag),
331 if (unlikely(ib_dma_mapping_error(ca, mapping[i + off])))
338 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i - 1];
340 ib_dma_unmap_page(ca, mapping[i - !off], skb_frag_size(frag), DMA_TO_DEVICE);
344 ib_dma_unmap_single(ca, mapping[0], skb_headlen(skb), DMA_TO_DEVICE);
349 static void ipoib_dma_unmap_tx(struct ib_device *ca,
350 struct ipoib_tx_buf *tx_req)
352 struct sk_buff *skb = tx_req->skb;
353 u64 *mapping = tx_req->mapping;
357 if (skb_headlen(skb)) {
358 ib_dma_unmap_single(ca, mapping[0], skb_headlen(skb), DMA_TO_DEVICE);
363 for (i = 0; i < skb_shinfo(skb)->nr_frags; ++i) {
364 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
366 ib_dma_unmap_page(ca, mapping[i + off], skb_frag_size(frag),
371 static void ipoib_ib_handle_tx_wc(struct net_device *dev, struct ib_wc *wc)
373 struct ipoib_dev_priv *priv = netdev_priv(dev);
374 unsigned int wr_id = wc->wr_id;
375 struct ipoib_tx_buf *tx_req;
377 ipoib_dbg_data(priv, "send completion: id %d, status: %d\n",
380 if (unlikely(wr_id >= ipoib_sendq_size)) {
381 ipoib_warn(priv, "send completion event with wrid %d (> %d)\n",
382 wr_id, ipoib_sendq_size);
386 tx_req = &priv->tx_ring[wr_id];
388 ipoib_dma_unmap_tx(priv->ca, tx_req);
390 ++dev->stats.tx_packets;
391 dev->stats.tx_bytes += tx_req->skb->len;
393 dev_kfree_skb_any(tx_req->skb);
396 if (unlikely(--priv->tx_outstanding == ipoib_sendq_size >> 1) &&
397 netif_queue_stopped(dev) &&
398 test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags))
399 netif_wake_queue(dev);
401 if (wc->status != IB_WC_SUCCESS &&
402 wc->status != IB_WC_WR_FLUSH_ERR)
403 ipoib_warn(priv, "failed send event "
404 "(status=%d, wrid=%d vend_err %x)\n",
405 wc->status, wr_id, wc->vendor_err);
408 static int poll_tx(struct ipoib_dev_priv *priv)
412 n = ib_poll_cq(priv->send_cq, MAX_SEND_CQE, priv->send_wc);
413 for (i = 0; i < n; ++i)
414 ipoib_ib_handle_tx_wc(priv->dev, priv->send_wc + i);
416 return n == MAX_SEND_CQE;
419 int ipoib_poll(struct napi_struct *napi, int budget)
421 struct ipoib_dev_priv *priv = container_of(napi, struct ipoib_dev_priv, napi);
422 struct net_device *dev = priv->dev;
430 while (done < budget) {
431 int max = (budget - done);
433 t = min(IPOIB_NUM_WC, max);
434 n = ib_poll_cq(priv->recv_cq, t, priv->ibwc);
436 for (i = 0; i < n; i++) {
437 struct ib_wc *wc = priv->ibwc + i;
439 if (wc->wr_id & IPOIB_OP_RECV) {
441 if (wc->wr_id & IPOIB_OP_CM)
442 ipoib_cm_handle_rx_wc(dev, wc);
444 ipoib_ib_handle_rx_wc(dev, wc);
446 ipoib_cm_handle_tx_wc(priv->dev, wc);
455 if (unlikely(ib_req_notify_cq(priv->recv_cq,
457 IB_CQ_REPORT_MISSED_EVENTS)) &&
458 napi_reschedule(napi))
465 void ipoib_ib_completion(struct ib_cq *cq, void *dev_ptr)
467 struct net_device *dev = dev_ptr;
468 struct ipoib_dev_priv *priv = netdev_priv(dev);
470 napi_schedule(&priv->napi);
473 static void drain_tx_cq(struct net_device *dev)
475 struct ipoib_dev_priv *priv = netdev_priv(dev);
478 while (poll_tx(priv))
481 if (netif_queue_stopped(dev))
482 mod_timer(&priv->poll_timer, jiffies + 1);
484 netif_tx_unlock(dev);
487 void ipoib_send_comp_handler(struct ib_cq *cq, void *dev_ptr)
489 struct ipoib_dev_priv *priv = netdev_priv(dev_ptr);
491 mod_timer(&priv->poll_timer, jiffies);
494 static inline int post_send(struct ipoib_dev_priv *priv,
496 struct ib_ah *address, u32 qpn,
497 struct ipoib_tx_buf *tx_req,
498 void *head, int hlen)
500 struct ib_send_wr *bad_wr;
502 struct sk_buff *skb = tx_req->skb;
503 skb_frag_t *frags = skb_shinfo(skb)->frags;
504 int nr_frags = skb_shinfo(skb)->nr_frags;
505 u64 *mapping = tx_req->mapping;
507 if (skb_headlen(skb)) {
508 priv->tx_sge[0].addr = mapping[0];
509 priv->tx_sge[0].length = skb_headlen(skb);
514 for (i = 0; i < nr_frags; ++i) {
515 priv->tx_sge[i + off].addr = mapping[i + off];
516 priv->tx_sge[i + off].length = skb_frag_size(&frags[i]);
518 priv->tx_wr.num_sge = nr_frags + off;
519 priv->tx_wr.wr_id = wr_id;
520 priv->tx_wr.wr.ud.remote_qpn = qpn;
521 priv->tx_wr.wr.ud.ah = address;
524 priv->tx_wr.wr.ud.mss = skb_shinfo(skb)->gso_size;
525 priv->tx_wr.wr.ud.header = head;
526 priv->tx_wr.wr.ud.hlen = hlen;
527 priv->tx_wr.opcode = IB_WR_LSO;
529 priv->tx_wr.opcode = IB_WR_SEND;
531 return ib_post_send(priv->qp, &priv->tx_wr, &bad_wr);
534 void ipoib_send(struct net_device *dev, struct sk_buff *skb,
535 struct ipoib_ah *address, u32 qpn)
537 struct ipoib_dev_priv *priv = netdev_priv(dev);
538 struct ipoib_tx_buf *tx_req;
542 if (skb_is_gso(skb)) {
543 hlen = skb_transport_offset(skb) + tcp_hdrlen(skb);
545 if (unlikely(!skb_pull(skb, hlen))) {
546 ipoib_warn(priv, "linear data too small\n");
547 ++dev->stats.tx_dropped;
548 ++dev->stats.tx_errors;
549 dev_kfree_skb_any(skb);
553 if (unlikely(skb->len > priv->mcast_mtu + IPOIB_ENCAP_LEN)) {
554 ipoib_warn(priv, "packet len %d (> %d) too long to send, dropping\n",
555 skb->len, priv->mcast_mtu + IPOIB_ENCAP_LEN);
556 ++dev->stats.tx_dropped;
557 ++dev->stats.tx_errors;
558 ipoib_cm_skb_too_long(dev, skb, priv->mcast_mtu);
565 ipoib_dbg_data(priv, "sending packet, length=%d address=%p qpn=0x%06x\n",
566 skb->len, address, qpn);
569 * We put the skb into the tx_ring _before_ we call post_send()
570 * because it's entirely possible that the completion handler will
571 * run before we execute anything after the post_send(). That
572 * means we have to make sure everything is properly recorded and
573 * our state is consistent before we call post_send().
575 tx_req = &priv->tx_ring[priv->tx_head & (ipoib_sendq_size - 1)];
577 if (unlikely(ipoib_dma_map_tx(priv->ca, tx_req))) {
578 ++dev->stats.tx_errors;
579 dev_kfree_skb_any(skb);
583 if (skb->ip_summed == CHECKSUM_PARTIAL)
584 priv->tx_wr.send_flags |= IB_SEND_IP_CSUM;
586 priv->tx_wr.send_flags &= ~IB_SEND_IP_CSUM;
588 if (++priv->tx_outstanding == ipoib_sendq_size) {
589 ipoib_dbg(priv, "TX ring full, stopping kernel net queue\n");
590 if (ib_req_notify_cq(priv->send_cq, IB_CQ_NEXT_COMP))
591 ipoib_warn(priv, "request notify on send CQ failed\n");
592 netif_stop_queue(dev);
595 rc = post_send(priv, priv->tx_head & (ipoib_sendq_size - 1),
596 address->ah, qpn, tx_req, phead, hlen);
598 ipoib_warn(priv, "post_send failed, error %d\n", rc);
599 ++dev->stats.tx_errors;
600 --priv->tx_outstanding;
601 ipoib_dma_unmap_tx(priv->ca, tx_req);
602 dev_kfree_skb_any(skb);
603 if (netif_queue_stopped(dev))
604 netif_wake_queue(dev);
606 dev->trans_start = jiffies;
608 address->last_send = priv->tx_head;
614 if (unlikely(priv->tx_outstanding > MAX_SEND_CQE))
615 while (poll_tx(priv))
619 static void __ipoib_reap_ah(struct net_device *dev)
621 struct ipoib_dev_priv *priv = netdev_priv(dev);
622 struct ipoib_ah *ah, *tah;
623 LIST_HEAD(remove_list);
626 netif_tx_lock_bh(dev);
627 spin_lock_irqsave(&priv->lock, flags);
629 list_for_each_entry_safe(ah, tah, &priv->dead_ahs, list)
630 if ((int) priv->tx_tail - (int) ah->last_send >= 0) {
632 ib_destroy_ah(ah->ah);
636 spin_unlock_irqrestore(&priv->lock, flags);
637 netif_tx_unlock_bh(dev);
640 void ipoib_reap_ah(struct work_struct *work)
642 struct ipoib_dev_priv *priv =
643 container_of(work, struct ipoib_dev_priv, ah_reap_task.work);
644 struct net_device *dev = priv->dev;
646 __ipoib_reap_ah(dev);
648 if (!test_bit(IPOIB_STOP_REAPER, &priv->flags))
649 queue_delayed_work(ipoib_workqueue, &priv->ah_reap_task,
650 round_jiffies_relative(HZ));
653 static void ipoib_ib_tx_timer_func(unsigned long ctx)
655 drain_tx_cq((struct net_device *)ctx);
658 int ipoib_ib_dev_open(struct net_device *dev)
660 struct ipoib_dev_priv *priv = netdev_priv(dev);
663 if (ib_find_pkey(priv->ca, priv->port, priv->pkey, &priv->pkey_index)) {
664 ipoib_warn(priv, "P_Key 0x%04x not found\n", priv->pkey);
665 clear_bit(IPOIB_PKEY_ASSIGNED, &priv->flags);
668 set_bit(IPOIB_PKEY_ASSIGNED, &priv->flags);
670 ret = ipoib_init_qp(dev);
672 ipoib_warn(priv, "ipoib_init_qp returned %d\n", ret);
676 ret = ipoib_ib_post_receives(dev);
678 ipoib_warn(priv, "ipoib_ib_post_receives returned %d\n", ret);
679 ipoib_ib_dev_stop(dev, 1);
683 ret = ipoib_cm_dev_open(dev);
685 ipoib_warn(priv, "ipoib_cm_dev_open returned %d\n", ret);
686 ipoib_ib_dev_stop(dev, 1);
690 clear_bit(IPOIB_STOP_REAPER, &priv->flags);
691 queue_delayed_work(ipoib_workqueue, &priv->ah_reap_task,
692 round_jiffies_relative(HZ));
694 if (!test_and_set_bit(IPOIB_FLAG_INITIALIZED, &priv->flags))
695 napi_enable(&priv->napi);
700 static void ipoib_pkey_dev_check_presence(struct net_device *dev)
702 struct ipoib_dev_priv *priv = netdev_priv(dev);
705 if (ib_find_pkey(priv->ca, priv->port, priv->pkey, &pkey_index))
706 clear_bit(IPOIB_PKEY_ASSIGNED, &priv->flags);
708 set_bit(IPOIB_PKEY_ASSIGNED, &priv->flags);
711 int ipoib_ib_dev_up(struct net_device *dev)
713 struct ipoib_dev_priv *priv = netdev_priv(dev);
715 ipoib_pkey_dev_check_presence(dev);
717 if (!test_bit(IPOIB_PKEY_ASSIGNED, &priv->flags)) {
718 ipoib_dbg(priv, "PKEY is not assigned.\n");
722 set_bit(IPOIB_FLAG_OPER_UP, &priv->flags);
724 return ipoib_mcast_start_thread(dev);
727 int ipoib_ib_dev_down(struct net_device *dev, int flush)
729 struct ipoib_dev_priv *priv = netdev_priv(dev);
731 ipoib_dbg(priv, "downing ib_dev\n");
733 clear_bit(IPOIB_FLAG_OPER_UP, &priv->flags);
734 netif_carrier_off(dev);
736 /* Shutdown the P_Key thread if still active */
737 if (!test_bit(IPOIB_PKEY_ASSIGNED, &priv->flags)) {
738 mutex_lock(&pkey_mutex);
739 set_bit(IPOIB_PKEY_STOP, &priv->flags);
740 cancel_delayed_work(&priv->pkey_poll_task);
741 mutex_unlock(&pkey_mutex);
743 flush_workqueue(ipoib_workqueue);
746 ipoib_mcast_stop_thread(dev, flush);
747 ipoib_mcast_dev_flush(dev);
749 ipoib_flush_paths(dev);
754 static int recvs_pending(struct net_device *dev)
756 struct ipoib_dev_priv *priv = netdev_priv(dev);
760 for (i = 0; i < ipoib_recvq_size; ++i)
761 if (priv->rx_ring[i].skb)
767 void ipoib_drain_cq(struct net_device *dev)
769 struct ipoib_dev_priv *priv = netdev_priv(dev);
773 * We call completion handling routines that expect to be
774 * called from the BH-disabled NAPI poll context, so disable
780 n = ib_poll_cq(priv->recv_cq, IPOIB_NUM_WC, priv->ibwc);
781 for (i = 0; i < n; ++i) {
783 * Convert any successful completions to flush
784 * errors to avoid passing packets up the
785 * stack after bringing the device down.
787 if (priv->ibwc[i].status == IB_WC_SUCCESS)
788 priv->ibwc[i].status = IB_WC_WR_FLUSH_ERR;
790 if (priv->ibwc[i].wr_id & IPOIB_OP_RECV) {
791 if (priv->ibwc[i].wr_id & IPOIB_OP_CM)
792 ipoib_cm_handle_rx_wc(dev, priv->ibwc + i);
794 ipoib_ib_handle_rx_wc(dev, priv->ibwc + i);
796 ipoib_cm_handle_tx_wc(dev, priv->ibwc + i);
798 } while (n == IPOIB_NUM_WC);
800 while (poll_tx(priv))
806 int ipoib_ib_dev_stop(struct net_device *dev, int flush)
808 struct ipoib_dev_priv *priv = netdev_priv(dev);
809 struct ib_qp_attr qp_attr;
811 struct ipoib_tx_buf *tx_req;
814 if (test_and_clear_bit(IPOIB_FLAG_INITIALIZED, &priv->flags))
815 napi_disable(&priv->napi);
817 ipoib_cm_dev_stop(dev);
820 * Move our QP to the error state and then reinitialize in
821 * when all work requests have completed or have been flushed.
823 qp_attr.qp_state = IB_QPS_ERR;
824 if (ib_modify_qp(priv->qp, &qp_attr, IB_QP_STATE))
825 ipoib_warn(priv, "Failed to modify QP to ERROR state\n");
827 /* Wait for all sends and receives to complete */
830 while (priv->tx_head != priv->tx_tail || recvs_pending(dev)) {
831 if (time_after(jiffies, begin + 5 * HZ)) {
832 ipoib_warn(priv, "timing out; %d sends %d receives not completed\n",
833 priv->tx_head - priv->tx_tail, recvs_pending(dev));
836 * assume the HW is wedged and just free up
837 * all our pending work requests.
839 while ((int) priv->tx_tail - (int) priv->tx_head < 0) {
840 tx_req = &priv->tx_ring[priv->tx_tail &
841 (ipoib_sendq_size - 1)];
842 ipoib_dma_unmap_tx(priv->ca, tx_req);
843 dev_kfree_skb_any(tx_req->skb);
845 --priv->tx_outstanding;
848 for (i = 0; i < ipoib_recvq_size; ++i) {
849 struct ipoib_rx_buf *rx_req;
851 rx_req = &priv->rx_ring[i];
854 ipoib_ud_dma_unmap_rx(priv,
855 priv->rx_ring[i].mapping);
856 dev_kfree_skb_any(rx_req->skb);
868 ipoib_dbg(priv, "All sends and receives done.\n");
871 del_timer_sync(&priv->poll_timer);
872 qp_attr.qp_state = IB_QPS_RESET;
873 if (ib_modify_qp(priv->qp, &qp_attr, IB_QP_STATE))
874 ipoib_warn(priv, "Failed to modify QP to RESET state\n");
876 /* Wait for all AHs to be reaped */
877 set_bit(IPOIB_STOP_REAPER, &priv->flags);
878 cancel_delayed_work(&priv->ah_reap_task);
880 flush_workqueue(ipoib_workqueue);
884 while (!list_empty(&priv->dead_ahs)) {
885 __ipoib_reap_ah(dev);
887 if (time_after(jiffies, begin + HZ)) {
888 ipoib_warn(priv, "timing out; will leak address handles\n");
895 ib_req_notify_cq(priv->recv_cq, IB_CQ_NEXT_COMP);
900 int ipoib_ib_dev_init(struct net_device *dev, struct ib_device *ca, int port)
902 struct ipoib_dev_priv *priv = netdev_priv(dev);
908 if (ipoib_transport_dev_init(dev, ca)) {
909 printk(KERN_WARNING "%s: ipoib_transport_dev_init failed\n", ca->name);
913 setup_timer(&priv->poll_timer, ipoib_ib_tx_timer_func,
914 (unsigned long) dev);
916 if (dev->flags & IFF_UP) {
917 if (ipoib_ib_dev_open(dev)) {
918 ipoib_transport_dev_cleanup(dev);
926 static void __ipoib_ib_dev_flush(struct ipoib_dev_priv *priv,
927 enum ipoib_flush_level level)
929 struct ipoib_dev_priv *cpriv;
930 struct net_device *dev = priv->dev;
933 mutex_lock(&priv->vlan_mutex);
936 * Flush any child interfaces too -- they might be up even if
937 * the parent is down.
939 list_for_each_entry(cpriv, &priv->child_intfs, list)
940 __ipoib_ib_dev_flush(cpriv, level);
942 mutex_unlock(&priv->vlan_mutex);
944 if (!test_bit(IPOIB_FLAG_INITIALIZED, &priv->flags)) {
945 ipoib_dbg(priv, "Not flushing - IPOIB_FLAG_INITIALIZED not set.\n");
949 if (!test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags)) {
950 ipoib_dbg(priv, "Not flushing - IPOIB_FLAG_ADMIN_UP not set.\n");
954 if (level == IPOIB_FLUSH_HEAVY) {
955 if (ib_find_pkey(priv->ca, priv->port, priv->pkey, &new_index)) {
956 clear_bit(IPOIB_PKEY_ASSIGNED, &priv->flags);
957 ipoib_ib_dev_down(dev, 0);
958 ipoib_ib_dev_stop(dev, 0);
959 if (ipoib_pkey_dev_delay_open(dev))
963 /* restart QP only if P_Key index is changed */
964 if (test_and_set_bit(IPOIB_PKEY_ASSIGNED, &priv->flags) &&
965 new_index == priv->pkey_index) {
966 ipoib_dbg(priv, "Not flushing - P_Key index not changed.\n");
969 priv->pkey_index = new_index;
972 if (level == IPOIB_FLUSH_LIGHT) {
973 ipoib_mark_paths_invalid(dev);
974 ipoib_mcast_dev_flush(dev);
977 if (level >= IPOIB_FLUSH_NORMAL)
978 ipoib_ib_dev_down(dev, 0);
980 if (level == IPOIB_FLUSH_HEAVY) {
981 ipoib_ib_dev_stop(dev, 0);
982 ipoib_ib_dev_open(dev);
986 * The device could have been brought down between the start and when
987 * we get here, don't bring it back up if it's not configured up
989 if (test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags)) {
990 if (level >= IPOIB_FLUSH_NORMAL)
991 ipoib_ib_dev_up(dev);
992 ipoib_mcast_restart_task(&priv->restart_task);
996 void ipoib_ib_dev_flush_light(struct work_struct *work)
998 struct ipoib_dev_priv *priv =
999 container_of(work, struct ipoib_dev_priv, flush_light);
1001 __ipoib_ib_dev_flush(priv, IPOIB_FLUSH_LIGHT);
1004 void ipoib_ib_dev_flush_normal(struct work_struct *work)
1006 struct ipoib_dev_priv *priv =
1007 container_of(work, struct ipoib_dev_priv, flush_normal);
1009 __ipoib_ib_dev_flush(priv, IPOIB_FLUSH_NORMAL);
1012 void ipoib_ib_dev_flush_heavy(struct work_struct *work)
1014 struct ipoib_dev_priv *priv =
1015 container_of(work, struct ipoib_dev_priv, flush_heavy);
1017 __ipoib_ib_dev_flush(priv, IPOIB_FLUSH_HEAVY);
1020 void ipoib_ib_dev_cleanup(struct net_device *dev)
1022 struct ipoib_dev_priv *priv = netdev_priv(dev);
1024 ipoib_dbg(priv, "cleaning up ib_dev\n");
1026 ipoib_mcast_stop_thread(dev, 1);
1027 ipoib_mcast_dev_flush(dev);
1029 ipoib_transport_dev_cleanup(dev);
1033 * Delayed P_Key Assigment Interim Support
1035 * The following is initial implementation of delayed P_Key assigment
1036 * mechanism. It is using the same approach implemented for the multicast
1037 * group join. The single goal of this implementation is to quickly address
1038 * Bug #2507. This implementation will probably be removed when the P_Key
1039 * change async notification is available.
1042 void ipoib_pkey_poll(struct work_struct *work)
1044 struct ipoib_dev_priv *priv =
1045 container_of(work, struct ipoib_dev_priv, pkey_poll_task.work);
1046 struct net_device *dev = priv->dev;
1048 ipoib_pkey_dev_check_presence(dev);
1050 if (test_bit(IPOIB_PKEY_ASSIGNED, &priv->flags))
1053 mutex_lock(&pkey_mutex);
1054 if (!test_bit(IPOIB_PKEY_STOP, &priv->flags))
1055 queue_delayed_work(ipoib_workqueue,
1056 &priv->pkey_poll_task,
1058 mutex_unlock(&pkey_mutex);
1062 int ipoib_pkey_dev_delay_open(struct net_device *dev)
1064 struct ipoib_dev_priv *priv = netdev_priv(dev);
1066 /* Look for the interface pkey value in the IB Port P_Key table and */
1067 /* set the interface pkey assigment flag */
1068 ipoib_pkey_dev_check_presence(dev);
1070 /* P_Key value not assigned yet - start polling */
1071 if (!test_bit(IPOIB_PKEY_ASSIGNED, &priv->flags)) {
1072 mutex_lock(&pkey_mutex);
1073 clear_bit(IPOIB_PKEY_STOP, &priv->flags);
1074 queue_delayed_work(ipoib_workqueue,
1075 &priv->pkey_poll_task,
1077 mutex_unlock(&pkey_mutex);