1 /******************************************************************************
2 * This software may be used and distributed according to the terms of
3 * the GNU General Public License (GPL), incorporated herein by reference.
4 * Drivers based on or derived from this code fall under the GPL and must
5 * retain the authorship, copyright and license notice. This file is not
6 * a complete program and may only be used when the entire operating
7 * system is licensed under the GPL.
8 * See the file COPYING in this distribution for more information.
10 * vxge-main.c: Driver for Exar Corp's X3100 Series 10GbE PCIe I/O
11 * Virtualized Server Adapter.
12 * Copyright(c) 2002-2010 Exar Corp.
14 * The module loadable parameters that are supported by the driver and a brief
15 * explanation of all the variables:
17 * Strip VLAN Tag enable/disable. Instructs the device to remove
18 * the VLAN tag from all received tagged frames that are not
19 * replicated at the internal L2 switch.
20 * 0 - Do not strip the VLAN tag.
21 * 1 - Strip the VLAN tag.
24 * Enable learning the mac address of the guest OS interface in
25 * a virtualization environment.
30 * Maximum number of port to be supported.
34 * This configures the maximum no of VPATH configures for each
36 * MIN - 1 and MAX - 17
39 * This configures maximum no of Device function to be enabled.
40 * MIN - 1 and MAX - 17
42 ******************************************************************************/
44 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
46 #include <linux/bitops.h>
47 #include <linux/if_vlan.h>
48 #include <linux/interrupt.h>
49 #include <linux/pci.h>
50 #include <linux/slab.h>
51 #include <linux/tcp.h>
53 #include <linux/netdevice.h>
54 #include <linux/etherdevice.h>
55 #include <linux/firmware.h>
56 #include <linux/net_tstamp.h>
57 #include <linux/prefetch.h>
58 #include <linux/module.h>
59 #include "vxge-main.h"
62 MODULE_LICENSE("Dual BSD/GPL");
63 MODULE_DESCRIPTION("Neterion's X3100 Series 10GbE PCIe I/O"
64 "Virtualized Server Adapter");
66 static DEFINE_PCI_DEVICE_TABLE(vxge_id_table) = {
67 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_TITAN_WIN, PCI_ANY_ID,
69 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_TITAN_UNI, PCI_ANY_ID,
74 MODULE_DEVICE_TABLE(pci, vxge_id_table);
76 VXGE_MODULE_PARAM_INT(vlan_tag_strip, VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_ENABLE);
77 VXGE_MODULE_PARAM_INT(addr_learn_en, VXGE_HW_MAC_ADDR_LEARN_DEFAULT);
78 VXGE_MODULE_PARAM_INT(max_config_port, VXGE_MAX_CONFIG_PORT);
79 VXGE_MODULE_PARAM_INT(max_config_vpath, VXGE_USE_DEFAULT);
80 VXGE_MODULE_PARAM_INT(max_mac_vpath, VXGE_MAX_MAC_ADDR_COUNT);
81 VXGE_MODULE_PARAM_INT(max_config_dev, VXGE_MAX_CONFIG_DEV);
83 static u16 vpath_selector[VXGE_HW_MAX_VIRTUAL_PATHS] =
84 {0, 1, 3, 3, 7, 7, 7, 7, 15, 15, 15, 15, 15, 15, 15, 15, 31};
85 static unsigned int bw_percentage[VXGE_HW_MAX_VIRTUAL_PATHS] =
86 {[0 ...(VXGE_HW_MAX_VIRTUAL_PATHS - 1)] = 0xFF};
87 module_param_array(bw_percentage, uint, NULL, 0);
89 static struct vxge_drv_config *driver_config;
91 static inline int is_vxge_card_up(struct vxgedev *vdev)
93 return test_bit(__VXGE_STATE_CARD_UP, &vdev->state);
96 static inline void VXGE_COMPLETE_VPATH_TX(struct vxge_fifo *fifo)
98 struct sk_buff **skb_ptr = NULL;
99 struct sk_buff **temp;
100 #define NR_SKB_COMPLETED 128
101 struct sk_buff *completed[NR_SKB_COMPLETED];
108 if (__netif_tx_trylock(fifo->txq)) {
109 vxge_hw_vpath_poll_tx(fifo->handle, &skb_ptr,
110 NR_SKB_COMPLETED, &more);
111 __netif_tx_unlock(fifo->txq);
115 for (temp = completed; temp != skb_ptr; temp++)
116 dev_kfree_skb_irq(*temp);
120 static inline void VXGE_COMPLETE_ALL_TX(struct vxgedev *vdev)
124 /* Complete all transmits */
125 for (i = 0; i < vdev->no_of_vpath; i++)
126 VXGE_COMPLETE_VPATH_TX(&vdev->vpaths[i].fifo);
129 static inline void VXGE_COMPLETE_ALL_RX(struct vxgedev *vdev)
132 struct vxge_ring *ring;
134 /* Complete all receives*/
135 for (i = 0; i < vdev->no_of_vpath; i++) {
136 ring = &vdev->vpaths[i].ring;
137 vxge_hw_vpath_poll_rx(ring->handle);
142 * vxge_callback_link_up
144 * This function is called during interrupt context to notify link up state
147 static void vxge_callback_link_up(struct __vxge_hw_device *hldev)
149 struct net_device *dev = hldev->ndev;
150 struct vxgedev *vdev = netdev_priv(dev);
152 vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d",
153 vdev->ndev->name, __func__, __LINE__);
154 netdev_notice(vdev->ndev, "Link Up\n");
155 vdev->stats.link_up++;
157 netif_carrier_on(vdev->ndev);
158 netif_tx_wake_all_queues(vdev->ndev);
160 vxge_debug_entryexit(VXGE_TRACE,
161 "%s: %s:%d Exiting...", vdev->ndev->name, __func__, __LINE__);
165 * vxge_callback_link_down
167 * This function is called during interrupt context to notify link down state
170 static void vxge_callback_link_down(struct __vxge_hw_device *hldev)
172 struct net_device *dev = hldev->ndev;
173 struct vxgedev *vdev = netdev_priv(dev);
175 vxge_debug_entryexit(VXGE_TRACE,
176 "%s: %s:%d", vdev->ndev->name, __func__, __LINE__);
177 netdev_notice(vdev->ndev, "Link Down\n");
179 vdev->stats.link_down++;
180 netif_carrier_off(vdev->ndev);
181 netif_tx_stop_all_queues(vdev->ndev);
183 vxge_debug_entryexit(VXGE_TRACE,
184 "%s: %s:%d Exiting...", vdev->ndev->name, __func__, __LINE__);
192 static struct sk_buff *
193 vxge_rx_alloc(void *dtrh, struct vxge_ring *ring, const int skb_size)
195 struct net_device *dev;
197 struct vxge_rx_priv *rx_priv;
200 vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d",
201 ring->ndev->name, __func__, __LINE__);
203 rx_priv = vxge_hw_ring_rxd_private_get(dtrh);
205 /* try to allocate skb first. this one may fail */
206 skb = netdev_alloc_skb(dev, skb_size +
207 VXGE_HW_HEADER_ETHERNET_II_802_3_ALIGN);
209 vxge_debug_mem(VXGE_ERR,
210 "%s: out of memory to allocate SKB", dev->name);
211 ring->stats.skb_alloc_fail++;
215 vxge_debug_mem(VXGE_TRACE,
216 "%s: %s:%d Skb : 0x%p", ring->ndev->name,
217 __func__, __LINE__, skb);
219 skb_reserve(skb, VXGE_HW_HEADER_ETHERNET_II_802_3_ALIGN);
222 rx_priv->skb_data = NULL;
223 rx_priv->data_size = skb_size;
224 vxge_debug_entryexit(VXGE_TRACE,
225 "%s: %s:%d Exiting...", ring->ndev->name, __func__, __LINE__);
233 static int vxge_rx_map(void *dtrh, struct vxge_ring *ring)
235 struct vxge_rx_priv *rx_priv;
238 vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d",
239 ring->ndev->name, __func__, __LINE__);
240 rx_priv = vxge_hw_ring_rxd_private_get(dtrh);
242 rx_priv->skb_data = rx_priv->skb->data;
243 dma_addr = pci_map_single(ring->pdev, rx_priv->skb_data,
244 rx_priv->data_size, PCI_DMA_FROMDEVICE);
246 if (unlikely(pci_dma_mapping_error(ring->pdev, dma_addr))) {
247 ring->stats.pci_map_fail++;
250 vxge_debug_mem(VXGE_TRACE,
251 "%s: %s:%d 1 buffer mode dma_addr = 0x%llx",
252 ring->ndev->name, __func__, __LINE__,
253 (unsigned long long)dma_addr);
254 vxge_hw_ring_rxd_1b_set(dtrh, dma_addr, rx_priv->data_size);
256 rx_priv->data_dma = dma_addr;
257 vxge_debug_entryexit(VXGE_TRACE,
258 "%s: %s:%d Exiting...", ring->ndev->name, __func__, __LINE__);
264 * vxge_rx_initial_replenish
265 * Allocation of RxD as an initial replenish procedure.
267 static enum vxge_hw_status
268 vxge_rx_initial_replenish(void *dtrh, void *userdata)
270 struct vxge_ring *ring = (struct vxge_ring *)userdata;
271 struct vxge_rx_priv *rx_priv;
273 vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d",
274 ring->ndev->name, __func__, __LINE__);
275 if (vxge_rx_alloc(dtrh, ring,
276 VXGE_LL_MAX_FRAME_SIZE(ring->ndev)) == NULL)
279 if (vxge_rx_map(dtrh, ring)) {
280 rx_priv = vxge_hw_ring_rxd_private_get(dtrh);
281 dev_kfree_skb(rx_priv->skb);
285 vxge_debug_entryexit(VXGE_TRACE,
286 "%s: %s:%d Exiting...", ring->ndev->name, __func__, __LINE__);
292 vxge_rx_complete(struct vxge_ring *ring, struct sk_buff *skb, u16 vlan,
293 int pkt_length, struct vxge_hw_ring_rxd_info *ext_info)
296 vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d",
297 ring->ndev->name, __func__, __LINE__);
298 skb_record_rx_queue(skb, ring->driver_id);
299 skb->protocol = eth_type_trans(skb, ring->ndev);
301 u64_stats_update_begin(&ring->stats.syncp);
302 ring->stats.rx_frms++;
303 ring->stats.rx_bytes += pkt_length;
305 if (skb->pkt_type == PACKET_MULTICAST)
306 ring->stats.rx_mcast++;
307 u64_stats_update_end(&ring->stats.syncp);
309 vxge_debug_rx(VXGE_TRACE,
310 "%s: %s:%d skb protocol = %d",
311 ring->ndev->name, __func__, __LINE__, skb->protocol);
313 if (ext_info->vlan &&
314 ring->vlan_tag_strip == VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_ENABLE)
315 __vlan_hwaccel_put_tag(skb, ext_info->vlan);
316 napi_gro_receive(ring->napi_p, skb);
318 vxge_debug_entryexit(VXGE_TRACE,
319 "%s: %s:%d Exiting...", ring->ndev->name, __func__, __LINE__);
322 static inline void vxge_re_pre_post(void *dtr, struct vxge_ring *ring,
323 struct vxge_rx_priv *rx_priv)
325 pci_dma_sync_single_for_device(ring->pdev,
326 rx_priv->data_dma, rx_priv->data_size, PCI_DMA_FROMDEVICE);
328 vxge_hw_ring_rxd_1b_set(dtr, rx_priv->data_dma, rx_priv->data_size);
329 vxge_hw_ring_rxd_pre_post(ring->handle, dtr);
332 static inline void vxge_post(int *dtr_cnt, void **first_dtr,
333 void *post_dtr, struct __vxge_hw_ring *ringh)
335 int dtr_count = *dtr_cnt;
336 if ((*dtr_cnt % VXGE_HW_RXSYNC_FREQ_CNT) == 0) {
338 vxge_hw_ring_rxd_post_post_wmb(ringh, *first_dtr);
339 *first_dtr = post_dtr;
341 vxge_hw_ring_rxd_post_post(ringh, post_dtr);
343 *dtr_cnt = dtr_count;
349 * If the interrupt is because of a received frame or if the receive ring
350 * contains fresh as yet un-processed frames, this function is called.
352 static enum vxge_hw_status
353 vxge_rx_1b_compl(struct __vxge_hw_ring *ringh, void *dtr,
354 u8 t_code, void *userdata)
356 struct vxge_ring *ring = (struct vxge_ring *)userdata;
357 struct net_device *dev = ring->ndev;
358 unsigned int dma_sizes;
359 void *first_dtr = NULL;
365 struct vxge_rx_priv *rx_priv;
366 struct vxge_hw_ring_rxd_info ext_info;
367 vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d",
368 ring->ndev->name, __func__, __LINE__);
371 prefetch((char *)dtr + L1_CACHE_BYTES);
372 rx_priv = vxge_hw_ring_rxd_private_get(dtr);
374 data_size = rx_priv->data_size;
375 data_dma = rx_priv->data_dma;
376 prefetch(rx_priv->skb_data);
378 vxge_debug_rx(VXGE_TRACE,
379 "%s: %s:%d skb = 0x%p",
380 ring->ndev->name, __func__, __LINE__, skb);
382 vxge_hw_ring_rxd_1b_get(ringh, dtr, &dma_sizes);
383 pkt_length = dma_sizes;
385 pkt_length -= ETH_FCS_LEN;
387 vxge_debug_rx(VXGE_TRACE,
388 "%s: %s:%d Packet Length = %d",
389 ring->ndev->name, __func__, __LINE__, pkt_length);
391 vxge_hw_ring_rxd_1b_info_get(ringh, dtr, &ext_info);
393 /* check skb validity */
396 prefetch((char *)skb + L1_CACHE_BYTES);
397 if (unlikely(t_code)) {
398 if (vxge_hw_ring_handle_tcode(ringh, dtr, t_code) !=
401 ring->stats.rx_errors++;
402 vxge_debug_rx(VXGE_TRACE,
403 "%s: %s :%d Rx T_code is %d",
404 ring->ndev->name, __func__,
407 /* If the t_code is not supported and if the
408 * t_code is other than 0x5 (unparseable packet
409 * such as unknown UPV6 header), Drop it !!!
411 vxge_re_pre_post(dtr, ring, rx_priv);
413 vxge_post(&dtr_cnt, &first_dtr, dtr, ringh);
414 ring->stats.rx_dropped++;
419 if (pkt_length > VXGE_LL_RX_COPY_THRESHOLD) {
420 if (vxge_rx_alloc(dtr, ring, data_size) != NULL) {
421 if (!vxge_rx_map(dtr, ring)) {
422 skb_put(skb, pkt_length);
424 pci_unmap_single(ring->pdev, data_dma,
425 data_size, PCI_DMA_FROMDEVICE);
427 vxge_hw_ring_rxd_pre_post(ringh, dtr);
428 vxge_post(&dtr_cnt, &first_dtr, dtr,
431 dev_kfree_skb(rx_priv->skb);
433 rx_priv->data_size = data_size;
434 vxge_re_pre_post(dtr, ring, rx_priv);
436 vxge_post(&dtr_cnt, &first_dtr, dtr,
438 ring->stats.rx_dropped++;
442 vxge_re_pre_post(dtr, ring, rx_priv);
444 vxge_post(&dtr_cnt, &first_dtr, dtr, ringh);
445 ring->stats.rx_dropped++;
449 struct sk_buff *skb_up;
451 skb_up = netdev_alloc_skb(dev, pkt_length +
452 VXGE_HW_HEADER_ETHERNET_II_802_3_ALIGN);
453 if (skb_up != NULL) {
455 VXGE_HW_HEADER_ETHERNET_II_802_3_ALIGN);
457 pci_dma_sync_single_for_cpu(ring->pdev,
461 vxge_debug_mem(VXGE_TRACE,
462 "%s: %s:%d skb_up = %p",
463 ring->ndev->name, __func__,
465 memcpy(skb_up->data, skb->data, pkt_length);
467 vxge_re_pre_post(dtr, ring, rx_priv);
469 vxge_post(&dtr_cnt, &first_dtr, dtr,
471 /* will netif_rx small SKB instead */
473 skb_put(skb, pkt_length);
475 vxge_re_pre_post(dtr, ring, rx_priv);
477 vxge_post(&dtr_cnt, &first_dtr, dtr, ringh);
478 vxge_debug_rx(VXGE_ERR,
479 "%s: vxge_rx_1b_compl: out of "
480 "memory", dev->name);
481 ring->stats.skb_alloc_fail++;
486 if ((ext_info.proto & VXGE_HW_FRAME_PROTO_TCP_OR_UDP) &&
487 !(ext_info.proto & VXGE_HW_FRAME_PROTO_IP_FRAG) &&
488 (dev->features & NETIF_F_RXCSUM) && /* Offload Rx side CSUM */
489 ext_info.l3_cksum == VXGE_HW_L3_CKSUM_OK &&
490 ext_info.l4_cksum == VXGE_HW_L4_CKSUM_OK)
491 skb->ip_summed = CHECKSUM_UNNECESSARY;
493 skb_checksum_none_assert(skb);
497 struct skb_shared_hwtstamps *skb_hwts;
498 u32 ns = *(u32 *)(skb->head + pkt_length);
500 skb_hwts = skb_hwtstamps(skb);
501 skb_hwts->hwtstamp = ns_to_ktime(ns);
502 skb_hwts->syststamp.tv64 = 0;
505 /* rth_hash_type and rth_it_hit are non-zero regardless of
506 * whether rss is enabled. Only the rth_value is zero/non-zero
507 * if rss is disabled/enabled, so key off of that.
509 if (ext_info.rth_value)
510 skb->rxhash = ext_info.rth_value;
512 vxge_rx_complete(ring, skb, ext_info.vlan,
513 pkt_length, &ext_info);
516 ring->pkts_processed++;
520 } while (vxge_hw_ring_rxd_next_completed(ringh, &dtr,
521 &t_code) == VXGE_HW_OK);
524 vxge_hw_ring_rxd_post_post_wmb(ringh, first_dtr);
526 vxge_debug_entryexit(VXGE_TRACE,
535 * If an interrupt was raised to indicate DMA complete of the Tx packet,
536 * this function is called. It identifies the last TxD whose buffer was
537 * freed and frees all skbs whose data have already DMA'ed into the NICs
540 static enum vxge_hw_status
541 vxge_xmit_compl(struct __vxge_hw_fifo *fifo_hw, void *dtr,
542 enum vxge_hw_fifo_tcode t_code, void *userdata,
543 struct sk_buff ***skb_ptr, int nr_skb, int *more)
545 struct vxge_fifo *fifo = (struct vxge_fifo *)userdata;
546 struct sk_buff *skb, **done_skb = *skb_ptr;
549 vxge_debug_entryexit(VXGE_TRACE,
550 "%s:%d Entered....", __func__, __LINE__);
556 struct vxge_tx_priv *txd_priv =
557 vxge_hw_fifo_txdl_private_get(dtr);
560 frg_cnt = skb_shinfo(skb)->nr_frags;
561 frag = &skb_shinfo(skb)->frags[0];
563 vxge_debug_tx(VXGE_TRACE,
564 "%s: %s:%d fifo_hw = %p dtr = %p "
565 "tcode = 0x%x", fifo->ndev->name, __func__,
566 __LINE__, fifo_hw, dtr, t_code);
567 /* check skb validity */
569 vxge_debug_tx(VXGE_TRACE,
570 "%s: %s:%d skb = %p itxd_priv = %p frg_cnt = %d",
571 fifo->ndev->name, __func__, __LINE__,
572 skb, txd_priv, frg_cnt);
573 if (unlikely(t_code)) {
574 fifo->stats.tx_errors++;
575 vxge_debug_tx(VXGE_ERR,
576 "%s: tx: dtr %p completed due to "
577 "error t_code %01x", fifo->ndev->name,
579 vxge_hw_fifo_handle_tcode(fifo_hw, dtr, t_code);
582 /* for unfragmented skb */
583 pci_unmap_single(fifo->pdev, txd_priv->dma_buffers[i++],
584 skb_headlen(skb), PCI_DMA_TODEVICE);
586 for (j = 0; j < frg_cnt; j++) {
587 pci_unmap_page(fifo->pdev,
588 txd_priv->dma_buffers[i++],
589 skb_frag_size(frag), PCI_DMA_TODEVICE);
593 vxge_hw_fifo_txdl_free(fifo_hw, dtr);
595 /* Updating the statistics block */
596 u64_stats_update_begin(&fifo->stats.syncp);
597 fifo->stats.tx_frms++;
598 fifo->stats.tx_bytes += skb->len;
599 u64_stats_update_end(&fifo->stats.syncp);
609 if (pkt_cnt > fifo->indicate_max_pkts)
612 } while (vxge_hw_fifo_txdl_next_completed(fifo_hw,
613 &dtr, &t_code) == VXGE_HW_OK);
616 if (netif_tx_queue_stopped(fifo->txq))
617 netif_tx_wake_queue(fifo->txq);
619 vxge_debug_entryexit(VXGE_TRACE,
620 "%s: %s:%d Exiting...",
621 fifo->ndev->name, __func__, __LINE__);
625 /* select a vpath to transmit the packet */
626 static u32 vxge_get_vpath_no(struct vxgedev *vdev, struct sk_buff *skb)
628 u16 queue_len, counter = 0;
629 if (skb->protocol == htons(ETH_P_IP)) {
635 if (!ip_is_fragment(ip)) {
636 th = (struct tcphdr *)(((unsigned char *)ip) +
639 queue_len = vdev->no_of_vpath;
640 counter = (ntohs(th->source) +
642 vdev->vpath_selector[queue_len - 1];
643 if (counter >= queue_len)
644 counter = queue_len - 1;
650 static enum vxge_hw_status vxge_search_mac_addr_in_list(
651 struct vxge_vpath *vpath, u64 del_mac)
653 struct list_head *entry, *next;
654 list_for_each_safe(entry, next, &vpath->mac_addr_list) {
655 if (((struct vxge_mac_addrs *)entry)->macaddr == del_mac)
661 static int vxge_mac_list_add(struct vxge_vpath *vpath, struct macInfo *mac)
663 struct vxge_mac_addrs *new_mac_entry;
664 u8 *mac_address = NULL;
666 if (vpath->mac_addr_cnt >= VXGE_MAX_LEARN_MAC_ADDR_CNT)
669 new_mac_entry = kzalloc(sizeof(struct vxge_mac_addrs), GFP_ATOMIC);
670 if (!new_mac_entry) {
671 vxge_debug_mem(VXGE_ERR,
672 "%s: memory allocation failed",
677 list_add(&new_mac_entry->item, &vpath->mac_addr_list);
679 /* Copy the new mac address to the list */
680 mac_address = (u8 *)&new_mac_entry->macaddr;
681 memcpy(mac_address, mac->macaddr, ETH_ALEN);
683 new_mac_entry->state = mac->state;
684 vpath->mac_addr_cnt++;
686 if (is_multicast_ether_addr(mac->macaddr))
687 vpath->mcast_addr_cnt++;
692 /* Add a mac address to DA table */
693 static enum vxge_hw_status
694 vxge_add_mac_addr(struct vxgedev *vdev, struct macInfo *mac)
696 enum vxge_hw_status status = VXGE_HW_OK;
697 struct vxge_vpath *vpath;
698 enum vxge_hw_vpath_mac_addr_add_mode duplicate_mode;
700 if (is_multicast_ether_addr(mac->macaddr))
701 duplicate_mode = VXGE_HW_VPATH_MAC_ADDR_ADD_DUPLICATE;
703 duplicate_mode = VXGE_HW_VPATH_MAC_ADDR_REPLACE_DUPLICATE;
705 vpath = &vdev->vpaths[mac->vpath_no];
706 status = vxge_hw_vpath_mac_addr_add(vpath->handle, mac->macaddr,
707 mac->macmask, duplicate_mode);
708 if (status != VXGE_HW_OK) {
709 vxge_debug_init(VXGE_ERR,
710 "DA config add entry failed for vpath:%d",
713 if (FALSE == vxge_mac_list_add(vpath, mac))
719 static int vxge_learn_mac(struct vxgedev *vdev, u8 *mac_header)
721 struct macInfo mac_info;
722 u8 *mac_address = NULL;
723 u64 mac_addr = 0, vpath_vector = 0;
725 enum vxge_hw_status status = VXGE_HW_OK;
726 struct vxge_vpath *vpath = NULL;
727 struct __vxge_hw_device *hldev;
729 hldev = pci_get_drvdata(vdev->pdev);
731 mac_address = (u8 *)&mac_addr;
732 memcpy(mac_address, mac_header, ETH_ALEN);
734 /* Is this mac address already in the list? */
735 for (vpath_idx = 0; vpath_idx < vdev->no_of_vpath; vpath_idx++) {
736 vpath = &vdev->vpaths[vpath_idx];
737 if (vxge_search_mac_addr_in_list(vpath, mac_addr))
741 memset(&mac_info, 0, sizeof(struct macInfo));
742 memcpy(mac_info.macaddr, mac_header, ETH_ALEN);
744 /* Any vpath has room to add mac address to its da table? */
745 for (vpath_idx = 0; vpath_idx < vdev->no_of_vpath; vpath_idx++) {
746 vpath = &vdev->vpaths[vpath_idx];
747 if (vpath->mac_addr_cnt < vpath->max_mac_addr_cnt) {
748 /* Add this mac address to this vpath */
749 mac_info.vpath_no = vpath_idx;
750 mac_info.state = VXGE_LL_MAC_ADDR_IN_DA_TABLE;
751 status = vxge_add_mac_addr(vdev, &mac_info);
752 if (status != VXGE_HW_OK)
758 mac_info.state = VXGE_LL_MAC_ADDR_IN_LIST;
760 mac_info.vpath_no = vpath_idx;
761 /* Is the first vpath already selected as catch-basin ? */
762 vpath = &vdev->vpaths[vpath_idx];
763 if (vpath->mac_addr_cnt > vpath->max_mac_addr_cnt) {
764 /* Add this mac address to this vpath */
765 if (FALSE == vxge_mac_list_add(vpath, &mac_info))
770 /* Select first vpath as catch-basin */
771 vpath_vector = vxge_mBIT(vpath->device_id);
772 status = vxge_hw_mgmt_reg_write(vpath->vdev->devh,
773 vxge_hw_mgmt_reg_type_mrpcim,
776 struct vxge_hw_mrpcim_reg,
779 if (status != VXGE_HW_OK) {
780 vxge_debug_tx(VXGE_ERR,
781 "%s: Unable to set the vpath-%d in catch-basin mode",
782 VXGE_DRIVER_NAME, vpath->device_id);
786 if (FALSE == vxge_mac_list_add(vpath, &mac_info))
794 * @skb : the socket buffer containing the Tx data.
795 * @dev : device pointer.
797 * This function is the Tx entry point of the driver. Neterion NIC supports
798 * certain protocol assist features on Tx side, namely CSO, S/G, LSO.
801 vxge_xmit(struct sk_buff *skb, struct net_device *dev)
803 struct vxge_fifo *fifo = NULL;
806 struct vxgedev *vdev = NULL;
807 enum vxge_hw_status status;
808 int frg_cnt, first_frg_len;
810 int i = 0, j = 0, avail;
812 struct vxge_tx_priv *txdl_priv = NULL;
813 struct __vxge_hw_fifo *fifo_hw;
817 vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d",
818 dev->name, __func__, __LINE__);
820 /* A buffer with no data will be dropped */
821 if (unlikely(skb->len <= 0)) {
822 vxge_debug_tx(VXGE_ERR,
823 "%s: Buffer has no data..", dev->name);
828 vdev = netdev_priv(dev);
830 if (unlikely(!is_vxge_card_up(vdev))) {
831 vxge_debug_tx(VXGE_ERR,
832 "%s: vdev not initialized", dev->name);
837 if (vdev->config.addr_learn_en) {
838 vpath_no = vxge_learn_mac(vdev, skb->data + ETH_ALEN);
839 if (vpath_no == -EPERM) {
840 vxge_debug_tx(VXGE_ERR,
841 "%s: Failed to store the mac address",
848 if (vdev->config.tx_steering_type == TX_MULTIQ_STEERING)
849 vpath_no = skb_get_queue_mapping(skb);
850 else if (vdev->config.tx_steering_type == TX_PORT_STEERING)
851 vpath_no = vxge_get_vpath_no(vdev, skb);
853 vxge_debug_tx(VXGE_TRACE, "%s: vpath_no= %d", dev->name, vpath_no);
855 if (vpath_no >= vdev->no_of_vpath)
858 fifo = &vdev->vpaths[vpath_no].fifo;
859 fifo_hw = fifo->handle;
861 if (netif_tx_queue_stopped(fifo->txq))
862 return NETDEV_TX_BUSY;
864 avail = vxge_hw_fifo_free_txdl_count_get(fifo_hw);
866 vxge_debug_tx(VXGE_ERR,
867 "%s: No free TXDs available", dev->name);
868 fifo->stats.txd_not_free++;
872 /* Last TXD? Stop tx queue to avoid dropping packets. TX
873 * completion will resume the queue.
876 netif_tx_stop_queue(fifo->txq);
878 status = vxge_hw_fifo_txdl_reserve(fifo_hw, &dtr, &dtr_priv);
879 if (unlikely(status != VXGE_HW_OK)) {
880 vxge_debug_tx(VXGE_ERR,
881 "%s: Out of descriptors .", dev->name);
882 fifo->stats.txd_out_of_desc++;
886 vxge_debug_tx(VXGE_TRACE,
887 "%s: %s:%d fifo_hw = %p dtr = %p dtr_priv = %p",
888 dev->name, __func__, __LINE__,
889 fifo_hw, dtr, dtr_priv);
891 if (vlan_tx_tag_present(skb)) {
892 u16 vlan_tag = vlan_tx_tag_get(skb);
893 vxge_hw_fifo_txdl_vlan_set(dtr, vlan_tag);
896 first_frg_len = skb_headlen(skb);
898 dma_pointer = pci_map_single(fifo->pdev, skb->data, first_frg_len,
901 if (unlikely(pci_dma_mapping_error(fifo->pdev, dma_pointer))) {
902 vxge_hw_fifo_txdl_free(fifo_hw, dtr);
903 fifo->stats.pci_map_fail++;
907 txdl_priv = vxge_hw_fifo_txdl_private_get(dtr);
908 txdl_priv->skb = skb;
909 txdl_priv->dma_buffers[j] = dma_pointer;
911 frg_cnt = skb_shinfo(skb)->nr_frags;
912 vxge_debug_tx(VXGE_TRACE,
913 "%s: %s:%d skb = %p txdl_priv = %p "
914 "frag_cnt = %d dma_pointer = 0x%llx", dev->name,
915 __func__, __LINE__, skb, txdl_priv,
916 frg_cnt, (unsigned long long)dma_pointer);
918 vxge_hw_fifo_txdl_buffer_set(fifo_hw, dtr, j++, dma_pointer,
921 frag = &skb_shinfo(skb)->frags[0];
922 for (i = 0; i < frg_cnt; i++) {
923 /* ignore 0 length fragment */
924 if (!skb_frag_size(frag))
927 dma_pointer = (u64)skb_frag_dma_map(&fifo->pdev->dev, frag,
928 0, skb_frag_size(frag),
931 if (unlikely(dma_mapping_error(&fifo->pdev->dev, dma_pointer)))
933 vxge_debug_tx(VXGE_TRACE,
934 "%s: %s:%d frag = %d dma_pointer = 0x%llx",
935 dev->name, __func__, __LINE__, i,
936 (unsigned long long)dma_pointer);
938 txdl_priv->dma_buffers[j] = dma_pointer;
939 vxge_hw_fifo_txdl_buffer_set(fifo_hw, dtr, j++, dma_pointer,
940 skb_frag_size(frag));
944 offload_type = vxge_offload_type(skb);
946 if (offload_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) {
947 int mss = vxge_tcp_mss(skb);
949 vxge_debug_tx(VXGE_TRACE, "%s: %s:%d mss = %d",
950 dev->name, __func__, __LINE__, mss);
951 vxge_hw_fifo_txdl_mss_set(dtr, mss);
953 vxge_assert(skb->len <=
954 dev->mtu + VXGE_HW_MAC_HEADER_MAX_SIZE);
960 if (skb->ip_summed == CHECKSUM_PARTIAL)
961 vxge_hw_fifo_txdl_cksum_set_bits(dtr,
962 VXGE_HW_FIFO_TXD_TX_CKO_IPV4_EN |
963 VXGE_HW_FIFO_TXD_TX_CKO_TCP_EN |
964 VXGE_HW_FIFO_TXD_TX_CKO_UDP_EN);
966 vxge_hw_fifo_txdl_post(fifo_hw, dtr);
968 vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d Exiting...",
969 dev->name, __func__, __LINE__);
973 vxge_debug_tx(VXGE_TRACE, "%s: pci_map_page failed", dev->name);
976 frag = &skb_shinfo(skb)->frags[0];
978 pci_unmap_single(fifo->pdev, txdl_priv->dma_buffers[j++],
979 skb_headlen(skb), PCI_DMA_TODEVICE);
982 pci_unmap_page(fifo->pdev, txdl_priv->dma_buffers[j],
983 skb_frag_size(frag), PCI_DMA_TODEVICE);
987 vxge_hw_fifo_txdl_free(fifo_hw, dtr);
989 netif_tx_stop_queue(fifo->txq);
998 * Function will be called by hw function to abort all outstanding receive
1002 vxge_rx_term(void *dtrh, enum vxge_hw_rxd_state state, void *userdata)
1004 struct vxge_ring *ring = (struct vxge_ring *)userdata;
1005 struct vxge_rx_priv *rx_priv =
1006 vxge_hw_ring_rxd_private_get(dtrh);
1008 vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d",
1009 ring->ndev->name, __func__, __LINE__);
1010 if (state != VXGE_HW_RXD_STATE_POSTED)
1013 pci_unmap_single(ring->pdev, rx_priv->data_dma,
1014 rx_priv->data_size, PCI_DMA_FROMDEVICE);
1016 dev_kfree_skb(rx_priv->skb);
1017 rx_priv->skb_data = NULL;
1019 vxge_debug_entryexit(VXGE_TRACE,
1020 "%s: %s:%d Exiting...",
1021 ring->ndev->name, __func__, __LINE__);
1027 * Function will be called to abort all outstanding tx descriptors
1030 vxge_tx_term(void *dtrh, enum vxge_hw_txdl_state state, void *userdata)
1032 struct vxge_fifo *fifo = (struct vxge_fifo *)userdata;
1034 int i = 0, j, frg_cnt;
1035 struct vxge_tx_priv *txd_priv = vxge_hw_fifo_txdl_private_get(dtrh);
1036 struct sk_buff *skb = txd_priv->skb;
1038 vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__);
1040 if (state != VXGE_HW_TXDL_STATE_POSTED)
1043 /* check skb validity */
1045 frg_cnt = skb_shinfo(skb)->nr_frags;
1046 frag = &skb_shinfo(skb)->frags[0];
1048 /* for unfragmented skb */
1049 pci_unmap_single(fifo->pdev, txd_priv->dma_buffers[i++],
1050 skb_headlen(skb), PCI_DMA_TODEVICE);
1052 for (j = 0; j < frg_cnt; j++) {
1053 pci_unmap_page(fifo->pdev, txd_priv->dma_buffers[i++],
1054 skb_frag_size(frag), PCI_DMA_TODEVICE);
1060 vxge_debug_entryexit(VXGE_TRACE,
1061 "%s:%d Exiting...", __func__, __LINE__);
1064 static int vxge_mac_list_del(struct vxge_vpath *vpath, struct macInfo *mac)
1066 struct list_head *entry, *next;
1068 u8 *mac_address = (u8 *) (&del_mac);
1070 /* Copy the mac address to delete from the list */
1071 memcpy(mac_address, mac->macaddr, ETH_ALEN);
1073 list_for_each_safe(entry, next, &vpath->mac_addr_list) {
1074 if (((struct vxge_mac_addrs *)entry)->macaddr == del_mac) {
1076 kfree((struct vxge_mac_addrs *)entry);
1077 vpath->mac_addr_cnt--;
1079 if (is_multicast_ether_addr(mac->macaddr))
1080 vpath->mcast_addr_cnt--;
1088 /* delete a mac address from DA table */
1089 static enum vxge_hw_status
1090 vxge_del_mac_addr(struct vxgedev *vdev, struct macInfo *mac)
1092 enum vxge_hw_status status = VXGE_HW_OK;
1093 struct vxge_vpath *vpath;
1095 vpath = &vdev->vpaths[mac->vpath_no];
1096 status = vxge_hw_vpath_mac_addr_delete(vpath->handle, mac->macaddr,
1098 if (status != VXGE_HW_OK) {
1099 vxge_debug_init(VXGE_ERR,
1100 "DA config delete entry failed for vpath:%d",
1103 vxge_mac_list_del(vpath, mac);
1108 * vxge_set_multicast
1109 * @dev: pointer to the device structure
1111 * Entry point for multicast address enable/disable
1112 * This function is a driver entry point which gets called by the kernel
1113 * whenever multicast addresses must be enabled/disabled. This also gets
1114 * called to set/reset promiscuous mode. Depending on the deivce flag, we
1115 * determine, if multicast address must be enabled or if promiscuous mode
1116 * is to be disabled etc.
1118 static void vxge_set_multicast(struct net_device *dev)
1120 struct netdev_hw_addr *ha;
1121 struct vxgedev *vdev;
1122 int i, mcast_cnt = 0;
1123 struct __vxge_hw_device *hldev;
1124 struct vxge_vpath *vpath;
1125 enum vxge_hw_status status = VXGE_HW_OK;
1126 struct macInfo mac_info;
1128 struct vxge_mac_addrs *mac_entry;
1129 struct list_head *list_head;
1130 struct list_head *entry, *next;
1131 u8 *mac_address = NULL;
1133 vxge_debug_entryexit(VXGE_TRACE,
1134 "%s:%d", __func__, __LINE__);
1136 vdev = netdev_priv(dev);
1137 hldev = (struct __vxge_hw_device *)vdev->devh;
1139 if (unlikely(!is_vxge_card_up(vdev)))
1142 if ((dev->flags & IFF_ALLMULTI) && (!vdev->all_multi_flg)) {
1143 for (i = 0; i < vdev->no_of_vpath; i++) {
1144 vpath = &vdev->vpaths[i];
1145 vxge_assert(vpath->is_open);
1146 status = vxge_hw_vpath_mcast_enable(vpath->handle);
1147 if (status != VXGE_HW_OK)
1148 vxge_debug_init(VXGE_ERR, "failed to enable "
1149 "multicast, status %d", status);
1150 vdev->all_multi_flg = 1;
1152 } else if (!(dev->flags & IFF_ALLMULTI) && (vdev->all_multi_flg)) {
1153 for (i = 0; i < vdev->no_of_vpath; i++) {
1154 vpath = &vdev->vpaths[i];
1155 vxge_assert(vpath->is_open);
1156 status = vxge_hw_vpath_mcast_disable(vpath->handle);
1157 if (status != VXGE_HW_OK)
1158 vxge_debug_init(VXGE_ERR, "failed to disable "
1159 "multicast, status %d", status);
1160 vdev->all_multi_flg = 0;
1165 if (!vdev->config.addr_learn_en) {
1166 for (i = 0; i < vdev->no_of_vpath; i++) {
1167 vpath = &vdev->vpaths[i];
1168 vxge_assert(vpath->is_open);
1170 if (dev->flags & IFF_PROMISC)
1171 status = vxge_hw_vpath_promisc_enable(
1174 status = vxge_hw_vpath_promisc_disable(
1176 if (status != VXGE_HW_OK)
1177 vxge_debug_init(VXGE_ERR, "failed to %s promisc"
1178 ", status %d", dev->flags&IFF_PROMISC ?
1179 "enable" : "disable", status);
1183 memset(&mac_info, 0, sizeof(struct macInfo));
1184 /* Update individual M_CAST address list */
1185 if ((!vdev->all_multi_flg) && netdev_mc_count(dev)) {
1186 mcast_cnt = vdev->vpaths[0].mcast_addr_cnt;
1187 list_head = &vdev->vpaths[0].mac_addr_list;
1188 if ((netdev_mc_count(dev) +
1189 (vdev->vpaths[0].mac_addr_cnt - mcast_cnt)) >
1190 vdev->vpaths[0].max_mac_addr_cnt)
1191 goto _set_all_mcast;
1193 /* Delete previous MC's */
1194 for (i = 0; i < mcast_cnt; i++) {
1195 list_for_each_safe(entry, next, list_head) {
1196 mac_entry = (struct vxge_mac_addrs *)entry;
1197 /* Copy the mac address to delete */
1198 mac_address = (u8 *)&mac_entry->macaddr;
1199 memcpy(mac_info.macaddr, mac_address, ETH_ALEN);
1201 if (is_multicast_ether_addr(mac_info.macaddr)) {
1202 for (vpath_idx = 0; vpath_idx <
1205 mac_info.vpath_no = vpath_idx;
1206 status = vxge_del_mac_addr(
1215 netdev_for_each_mc_addr(ha, dev) {
1216 memcpy(mac_info.macaddr, ha->addr, ETH_ALEN);
1217 for (vpath_idx = 0; vpath_idx < vdev->no_of_vpath;
1219 mac_info.vpath_no = vpath_idx;
1220 mac_info.state = VXGE_LL_MAC_ADDR_IN_DA_TABLE;
1221 status = vxge_add_mac_addr(vdev, &mac_info);
1222 if (status != VXGE_HW_OK) {
1223 vxge_debug_init(VXGE_ERR,
1224 "%s:%d Setting individual"
1225 "multicast address failed",
1226 __func__, __LINE__);
1227 goto _set_all_mcast;
1234 mcast_cnt = vdev->vpaths[0].mcast_addr_cnt;
1235 /* Delete previous MC's */
1236 for (i = 0; i < mcast_cnt; i++) {
1237 list_for_each_safe(entry, next, list_head) {
1238 mac_entry = (struct vxge_mac_addrs *)entry;
1239 /* Copy the mac address to delete */
1240 mac_address = (u8 *)&mac_entry->macaddr;
1241 memcpy(mac_info.macaddr, mac_address, ETH_ALEN);
1243 if (is_multicast_ether_addr(mac_info.macaddr))
1247 for (vpath_idx = 0; vpath_idx < vdev->no_of_vpath;
1249 mac_info.vpath_no = vpath_idx;
1250 status = vxge_del_mac_addr(vdev, &mac_info);
1254 /* Enable all multicast */
1255 for (i = 0; i < vdev->no_of_vpath; i++) {
1256 vpath = &vdev->vpaths[i];
1257 vxge_assert(vpath->is_open);
1259 status = vxge_hw_vpath_mcast_enable(vpath->handle);
1260 if (status != VXGE_HW_OK) {
1261 vxge_debug_init(VXGE_ERR,
1262 "%s:%d Enabling all multicasts failed",
1263 __func__, __LINE__);
1265 vdev->all_multi_flg = 1;
1267 dev->flags |= IFF_ALLMULTI;
1270 vxge_debug_entryexit(VXGE_TRACE,
1271 "%s:%d Exiting...", __func__, __LINE__);
1276 * @dev: pointer to the device structure
1278 * Update entry "0" (default MAC addr)
1280 static int vxge_set_mac_addr(struct net_device *dev, void *p)
1282 struct sockaddr *addr = p;
1283 struct vxgedev *vdev;
1284 struct __vxge_hw_device *hldev;
1285 enum vxge_hw_status status = VXGE_HW_OK;
1286 struct macInfo mac_info_new, mac_info_old;
1289 vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__);
1291 vdev = netdev_priv(dev);
1294 if (!is_valid_ether_addr(addr->sa_data))
1297 memset(&mac_info_new, 0, sizeof(struct macInfo));
1298 memset(&mac_info_old, 0, sizeof(struct macInfo));
1300 vxge_debug_entryexit(VXGE_TRACE, "%s:%d Exiting...",
1301 __func__, __LINE__);
1303 /* Get the old address */
1304 memcpy(mac_info_old.macaddr, dev->dev_addr, dev->addr_len);
1306 /* Copy the new address */
1307 memcpy(mac_info_new.macaddr, addr->sa_data, dev->addr_len);
1309 /* First delete the old mac address from all the vpaths
1310 as we can't specify the index while adding new mac address */
1311 for (vpath_idx = 0; vpath_idx < vdev->no_of_vpath; vpath_idx++) {
1312 struct vxge_vpath *vpath = &vdev->vpaths[vpath_idx];
1313 if (!vpath->is_open) {
1314 /* This can happen when this interface is added/removed
1315 to the bonding interface. Delete this station address
1316 from the linked list */
1317 vxge_mac_list_del(vpath, &mac_info_old);
1319 /* Add this new address to the linked list
1320 for later restoring */
1321 vxge_mac_list_add(vpath, &mac_info_new);
1325 /* Delete the station address */
1326 mac_info_old.vpath_no = vpath_idx;
1327 status = vxge_del_mac_addr(vdev, &mac_info_old);
1330 if (unlikely(!is_vxge_card_up(vdev))) {
1331 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
1335 /* Set this mac address to all the vpaths */
1336 for (vpath_idx = 0; vpath_idx < vdev->no_of_vpath; vpath_idx++) {
1337 mac_info_new.vpath_no = vpath_idx;
1338 mac_info_new.state = VXGE_LL_MAC_ADDR_IN_DA_TABLE;
1339 status = vxge_add_mac_addr(vdev, &mac_info_new);
1340 if (status != VXGE_HW_OK)
1344 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
1350 * vxge_vpath_intr_enable
1351 * @vdev: pointer to vdev
1352 * @vp_id: vpath for which to enable the interrupts
1354 * Enables the interrupts for the vpath
1356 static void vxge_vpath_intr_enable(struct vxgedev *vdev, int vp_id)
1358 struct vxge_vpath *vpath = &vdev->vpaths[vp_id];
1360 int tim_msix_id[4] = {0, 1, 0, 0};
1361 int alarm_msix_id = VXGE_ALARM_MSIX_ID;
1363 vxge_hw_vpath_intr_enable(vpath->handle);
1365 if (vdev->config.intr_type == INTA)
1366 vxge_hw_vpath_inta_unmask_tx_rx(vpath->handle);
1368 vxge_hw_vpath_msix_set(vpath->handle, tim_msix_id,
1371 msix_id = vpath->device_id * VXGE_HW_VPATH_MSIX_ACTIVE;
1372 vxge_hw_vpath_msix_unmask(vpath->handle, msix_id);
1373 vxge_hw_vpath_msix_unmask(vpath->handle, msix_id + 1);
1375 /* enable the alarm vector */
1376 msix_id = (vpath->handle->vpath->hldev->first_vp_id *
1377 VXGE_HW_VPATH_MSIX_ACTIVE) + alarm_msix_id;
1378 vxge_hw_vpath_msix_unmask(vpath->handle, msix_id);
1383 * vxge_vpath_intr_disable
1384 * @vdev: pointer to vdev
1385 * @vp_id: vpath for which to disable the interrupts
1387 * Disables the interrupts for the vpath
1389 static void vxge_vpath_intr_disable(struct vxgedev *vdev, int vp_id)
1391 struct vxge_vpath *vpath = &vdev->vpaths[vp_id];
1392 struct __vxge_hw_device *hldev;
1395 hldev = pci_get_drvdata(vdev->pdev);
1397 vxge_hw_vpath_wait_receive_idle(hldev, vpath->device_id);
1399 vxge_hw_vpath_intr_disable(vpath->handle);
1401 if (vdev->config.intr_type == INTA)
1402 vxge_hw_vpath_inta_mask_tx_rx(vpath->handle);
1404 msix_id = vpath->device_id * VXGE_HW_VPATH_MSIX_ACTIVE;
1405 vxge_hw_vpath_msix_mask(vpath->handle, msix_id);
1406 vxge_hw_vpath_msix_mask(vpath->handle, msix_id + 1);
1408 /* disable the alarm vector */
1409 msix_id = (vpath->handle->vpath->hldev->first_vp_id *
1410 VXGE_HW_VPATH_MSIX_ACTIVE) + VXGE_ALARM_MSIX_ID;
1411 vxge_hw_vpath_msix_mask(vpath->handle, msix_id);
1415 /* list all mac addresses from DA table */
1416 static enum vxge_hw_status
1417 vxge_search_mac_addr_in_da_table(struct vxge_vpath *vpath, struct macInfo *mac)
1419 enum vxge_hw_status status = VXGE_HW_OK;
1420 unsigned char macmask[ETH_ALEN];
1421 unsigned char macaddr[ETH_ALEN];
1423 status = vxge_hw_vpath_mac_addr_get(vpath->handle,
1425 if (status != VXGE_HW_OK) {
1426 vxge_debug_init(VXGE_ERR,
1427 "DA config list entry failed for vpath:%d",
1432 while (memcmp(mac->macaddr, macaddr, ETH_ALEN)) {
1433 status = vxge_hw_vpath_mac_addr_get_next(vpath->handle,
1435 if (status != VXGE_HW_OK)
1442 /* Store all mac addresses from the list to the DA table */
1443 static enum vxge_hw_status vxge_restore_vpath_mac_addr(struct vxge_vpath *vpath)
1445 enum vxge_hw_status status = VXGE_HW_OK;
1446 struct macInfo mac_info;
1447 u8 *mac_address = NULL;
1448 struct list_head *entry, *next;
1450 memset(&mac_info, 0, sizeof(struct macInfo));
1452 if (vpath->is_open) {
1453 list_for_each_safe(entry, next, &vpath->mac_addr_list) {
1456 ((struct vxge_mac_addrs *)entry)->macaddr;
1457 memcpy(mac_info.macaddr, mac_address, ETH_ALEN);
1458 ((struct vxge_mac_addrs *)entry)->state =
1459 VXGE_LL_MAC_ADDR_IN_DA_TABLE;
1460 /* does this mac address already exist in da table? */
1461 status = vxge_search_mac_addr_in_da_table(vpath,
1463 if (status != VXGE_HW_OK) {
1464 /* Add this mac address to the DA table */
1465 status = vxge_hw_vpath_mac_addr_add(
1466 vpath->handle, mac_info.macaddr,
1468 VXGE_HW_VPATH_MAC_ADDR_ADD_DUPLICATE);
1469 if (status != VXGE_HW_OK) {
1470 vxge_debug_init(VXGE_ERR,
1471 "DA add entry failed for vpath:%d",
1473 ((struct vxge_mac_addrs *)entry)->state
1474 = VXGE_LL_MAC_ADDR_IN_LIST;
1483 /* Store all vlan ids from the list to the vid table */
1484 static enum vxge_hw_status
1485 vxge_restore_vpath_vid_table(struct vxge_vpath *vpath)
1487 enum vxge_hw_status status = VXGE_HW_OK;
1488 struct vxgedev *vdev = vpath->vdev;
1491 if (!vpath->is_open)
1494 for_each_set_bit(vid, vdev->active_vlans, VLAN_N_VID)
1495 status = vxge_hw_vpath_vid_add(vpath->handle, vid);
1502 * @vdev: pointer to vdev
1503 * @vp_id: vpath to reset
1507 static int vxge_reset_vpath(struct vxgedev *vdev, int vp_id)
1509 enum vxge_hw_status status = VXGE_HW_OK;
1510 struct vxge_vpath *vpath = &vdev->vpaths[vp_id];
1513 /* check if device is down already */
1514 if (unlikely(!is_vxge_card_up(vdev)))
1517 /* is device reset already scheduled */
1518 if (test_bit(__VXGE_STATE_RESET_CARD, &vdev->state))
1521 if (vpath->handle) {
1522 if (vxge_hw_vpath_reset(vpath->handle) == VXGE_HW_OK) {
1523 if (is_vxge_card_up(vdev) &&
1524 vxge_hw_vpath_recover_from_reset(vpath->handle)
1526 vxge_debug_init(VXGE_ERR,
1527 "vxge_hw_vpath_recover_from_reset"
1528 "failed for vpath:%d", vp_id);
1532 vxge_debug_init(VXGE_ERR,
1533 "vxge_hw_vpath_reset failed for"
1538 return VXGE_HW_FAIL;
1540 vxge_restore_vpath_mac_addr(vpath);
1541 vxge_restore_vpath_vid_table(vpath);
1543 /* Enable all broadcast */
1544 vxge_hw_vpath_bcast_enable(vpath->handle);
1546 /* Enable all multicast */
1547 if (vdev->all_multi_flg) {
1548 status = vxge_hw_vpath_mcast_enable(vpath->handle);
1549 if (status != VXGE_HW_OK)
1550 vxge_debug_init(VXGE_ERR,
1551 "%s:%d Enabling multicast failed",
1552 __func__, __LINE__);
1555 /* Enable the interrupts */
1556 vxge_vpath_intr_enable(vdev, vp_id);
1560 /* Enable the flow of traffic through the vpath */
1561 vxge_hw_vpath_enable(vpath->handle);
1564 vxge_hw_vpath_rx_doorbell_init(vpath->handle);
1565 vpath->ring.last_status = VXGE_HW_OK;
1567 /* Vpath reset done */
1568 clear_bit(vp_id, &vdev->vp_reset);
1570 /* Start the vpath queue */
1571 if (netif_tx_queue_stopped(vpath->fifo.txq))
1572 netif_tx_wake_queue(vpath->fifo.txq);
1578 static void vxge_config_ci_for_tti_rti(struct vxgedev *vdev)
1582 /* Enable CI for RTI */
1583 if (vdev->config.intr_type == MSI_X) {
1584 for (i = 0; i < vdev->no_of_vpath; i++) {
1585 struct __vxge_hw_ring *hw_ring;
1587 hw_ring = vdev->vpaths[i].ring.handle;
1588 vxge_hw_vpath_dynamic_rti_ci_set(hw_ring);
1592 /* Enable CI for TTI */
1593 for (i = 0; i < vdev->no_of_vpath; i++) {
1594 struct __vxge_hw_fifo *hw_fifo = vdev->vpaths[i].fifo.handle;
1595 vxge_hw_vpath_tti_ci_set(hw_fifo);
1597 * For Inta (with or without napi), Set CI ON for only one
1598 * vpath. (Have only one free running timer).
1600 if ((vdev->config.intr_type == INTA) && (i == 0))
1607 static int do_vxge_reset(struct vxgedev *vdev, int event)
1609 enum vxge_hw_status status;
1610 int ret = 0, vp_id, i;
1612 vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__);
1614 if ((event == VXGE_LL_FULL_RESET) || (event == VXGE_LL_START_RESET)) {
1615 /* check if device is down already */
1616 if (unlikely(!is_vxge_card_up(vdev)))
1619 /* is reset already scheduled */
1620 if (test_and_set_bit(__VXGE_STATE_RESET_CARD, &vdev->state))
1624 if (event == VXGE_LL_FULL_RESET) {
1625 netif_carrier_off(vdev->ndev);
1627 /* wait for all the vpath reset to complete */
1628 for (vp_id = 0; vp_id < vdev->no_of_vpath; vp_id++) {
1629 while (test_bit(vp_id, &vdev->vp_reset))
1633 netif_carrier_on(vdev->ndev);
1635 /* if execution mode is set to debug, don't reset the adapter */
1636 if (unlikely(vdev->exec_mode)) {
1637 vxge_debug_init(VXGE_ERR,
1638 "%s: execution mode is debug, returning..",
1640 clear_bit(__VXGE_STATE_CARD_UP, &vdev->state);
1641 netif_tx_stop_all_queues(vdev->ndev);
1646 if (event == VXGE_LL_FULL_RESET) {
1647 vxge_hw_device_wait_receive_idle(vdev->devh);
1648 vxge_hw_device_intr_disable(vdev->devh);
1650 switch (vdev->cric_err_event) {
1651 case VXGE_HW_EVENT_UNKNOWN:
1652 netif_tx_stop_all_queues(vdev->ndev);
1653 vxge_debug_init(VXGE_ERR,
1654 "fatal: %s: Disabling device due to"
1659 case VXGE_HW_EVENT_RESET_START:
1661 case VXGE_HW_EVENT_RESET_COMPLETE:
1662 case VXGE_HW_EVENT_LINK_DOWN:
1663 case VXGE_HW_EVENT_LINK_UP:
1664 case VXGE_HW_EVENT_ALARM_CLEARED:
1665 case VXGE_HW_EVENT_ECCERR:
1666 case VXGE_HW_EVENT_MRPCIM_ECCERR:
1669 case VXGE_HW_EVENT_FIFO_ERR:
1670 case VXGE_HW_EVENT_VPATH_ERR:
1672 case VXGE_HW_EVENT_CRITICAL_ERR:
1673 netif_tx_stop_all_queues(vdev->ndev);
1674 vxge_debug_init(VXGE_ERR,
1675 "fatal: %s: Disabling device due to"
1678 /* SOP or device reset required */
1679 /* This event is not currently used */
1682 case VXGE_HW_EVENT_SERR:
1683 netif_tx_stop_all_queues(vdev->ndev);
1684 vxge_debug_init(VXGE_ERR,
1685 "fatal: %s: Disabling device due to"
1690 case VXGE_HW_EVENT_SRPCIM_SERR:
1691 case VXGE_HW_EVENT_MRPCIM_SERR:
1694 case VXGE_HW_EVENT_SLOT_FREEZE:
1695 netif_tx_stop_all_queues(vdev->ndev);
1696 vxge_debug_init(VXGE_ERR,
1697 "fatal: %s: Disabling device due to"
1708 if ((event == VXGE_LL_FULL_RESET) || (event == VXGE_LL_START_RESET))
1709 netif_tx_stop_all_queues(vdev->ndev);
1711 if (event == VXGE_LL_FULL_RESET) {
1712 status = vxge_reset_all_vpaths(vdev);
1713 if (status != VXGE_HW_OK) {
1714 vxge_debug_init(VXGE_ERR,
1715 "fatal: %s: can not reset vpaths",
1722 if (event == VXGE_LL_COMPL_RESET) {
1723 for (i = 0; i < vdev->no_of_vpath; i++)
1724 if (vdev->vpaths[i].handle) {
1725 if (vxge_hw_vpath_recover_from_reset(
1726 vdev->vpaths[i].handle)
1728 vxge_debug_init(VXGE_ERR,
1729 "vxge_hw_vpath_recover_"
1730 "from_reset failed for vpath: "
1736 vxge_debug_init(VXGE_ERR,
1737 "vxge_hw_vpath_reset failed for "
1744 if ((event == VXGE_LL_FULL_RESET) || (event == VXGE_LL_COMPL_RESET)) {
1745 /* Reprogram the DA table with populated mac addresses */
1746 for (vp_id = 0; vp_id < vdev->no_of_vpath; vp_id++) {
1747 vxge_restore_vpath_mac_addr(&vdev->vpaths[vp_id]);
1748 vxge_restore_vpath_vid_table(&vdev->vpaths[vp_id]);
1751 /* enable vpath interrupts */
1752 for (i = 0; i < vdev->no_of_vpath; i++)
1753 vxge_vpath_intr_enable(vdev, i);
1755 vxge_hw_device_intr_enable(vdev->devh);
1759 /* Indicate card up */
1760 set_bit(__VXGE_STATE_CARD_UP, &vdev->state);
1762 /* Get the traffic to flow through the vpaths */
1763 for (i = 0; i < vdev->no_of_vpath; i++) {
1764 vxge_hw_vpath_enable(vdev->vpaths[i].handle);
1766 vxge_hw_vpath_rx_doorbell_init(vdev->vpaths[i].handle);
1769 netif_tx_wake_all_queues(vdev->ndev);
1773 vxge_config_ci_for_tti_rti(vdev);
1776 vxge_debug_entryexit(VXGE_TRACE,
1777 "%s:%d Exiting...", __func__, __LINE__);
1779 /* Indicate reset done */
1780 if ((event == VXGE_LL_FULL_RESET) || (event == VXGE_LL_COMPL_RESET))
1781 clear_bit(__VXGE_STATE_RESET_CARD, &vdev->state);
1787 * @vdev: pointer to ll device
1789 * driver may reset the chip on events of serr, eccerr, etc
1791 static void vxge_reset(struct work_struct *work)
1793 struct vxgedev *vdev = container_of(work, struct vxgedev, reset_task);
1795 if (!netif_running(vdev->ndev))
1798 do_vxge_reset(vdev, VXGE_LL_FULL_RESET);
1802 * vxge_poll - Receive handler when Receive Polling is used.
1803 * @dev: pointer to the device structure.
1804 * @budget: Number of packets budgeted to be processed in this iteration.
1806 * This function comes into picture only if Receive side is being handled
1807 * through polling (called NAPI in linux). It mostly does what the normal
1808 * Rx interrupt handler does in terms of descriptor and packet processing
1809 * but not in an interrupt context. Also it will process a specified number
1810 * of packets at most in one iteration. This value is passed down by the
1811 * kernel as the function argument 'budget'.
1813 static int vxge_poll_msix(struct napi_struct *napi, int budget)
1815 struct vxge_ring *ring = container_of(napi, struct vxge_ring, napi);
1817 int budget_org = budget;
1819 ring->budget = budget;
1820 ring->pkts_processed = 0;
1821 vxge_hw_vpath_poll_rx(ring->handle);
1822 pkts_processed = ring->pkts_processed;
1824 if (ring->pkts_processed < budget_org) {
1825 napi_complete(napi);
1827 /* Re enable the Rx interrupts for the vpath */
1828 vxge_hw_channel_msix_unmask(
1829 (struct __vxge_hw_channel *)ring->handle,
1830 ring->rx_vector_no);
1834 /* We are copying and returning the local variable, in case if after
1835 * clearing the msix interrupt above, if the interrupt fires right
1836 * away which can preempt this NAPI thread */
1837 return pkts_processed;
1840 static int vxge_poll_inta(struct napi_struct *napi, int budget)
1842 struct vxgedev *vdev = container_of(napi, struct vxgedev, napi);
1843 int pkts_processed = 0;
1845 int budget_org = budget;
1846 struct vxge_ring *ring;
1848 struct __vxge_hw_device *hldev = pci_get_drvdata(vdev->pdev);
1850 for (i = 0; i < vdev->no_of_vpath; i++) {
1851 ring = &vdev->vpaths[i].ring;
1852 ring->budget = budget;
1853 ring->pkts_processed = 0;
1854 vxge_hw_vpath_poll_rx(ring->handle);
1855 pkts_processed += ring->pkts_processed;
1856 budget -= ring->pkts_processed;
1861 VXGE_COMPLETE_ALL_TX(vdev);
1863 if (pkts_processed < budget_org) {
1864 napi_complete(napi);
1865 /* Re enable the Rx interrupts for the ring */
1866 vxge_hw_device_unmask_all(hldev);
1867 vxge_hw_device_flush_io(hldev);
1870 return pkts_processed;
1873 #ifdef CONFIG_NET_POLL_CONTROLLER
1875 * vxge_netpoll - netpoll event handler entry point
1876 * @dev : pointer to the device structure.
1878 * This function will be called by upper layer to check for events on the
1879 * interface in situations where interrupts are disabled. It is used for
1880 * specific in-kernel networking tasks, such as remote consoles and kernel
1881 * debugging over the network (example netdump in RedHat).
1883 static void vxge_netpoll(struct net_device *dev)
1885 struct __vxge_hw_device *hldev;
1886 struct vxgedev *vdev;
1888 vdev = netdev_priv(dev);
1889 hldev = pci_get_drvdata(vdev->pdev);
1891 vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__);
1893 if (pci_channel_offline(vdev->pdev))
1896 disable_irq(dev->irq);
1897 vxge_hw_device_clear_tx_rx(hldev);
1899 vxge_hw_device_clear_tx_rx(hldev);
1900 VXGE_COMPLETE_ALL_RX(vdev);
1901 VXGE_COMPLETE_ALL_TX(vdev);
1903 enable_irq(dev->irq);
1905 vxge_debug_entryexit(VXGE_TRACE,
1906 "%s:%d Exiting...", __func__, __LINE__);
1910 /* RTH configuration */
1911 static enum vxge_hw_status vxge_rth_configure(struct vxgedev *vdev)
1913 enum vxge_hw_status status = VXGE_HW_OK;
1914 struct vxge_hw_rth_hash_types hash_types;
1915 u8 itable[256] = {0}; /* indirection table */
1916 u8 mtable[256] = {0}; /* CPU to vpath mapping */
1921 * - itable with bucket numbers
1922 * - mtable with bucket-to-vpath mapping
1924 for (index = 0; index < (1 << vdev->config.rth_bkt_sz); index++) {
1925 itable[index] = index;
1926 mtable[index] = index % vdev->no_of_vpath;
1929 /* set indirection table, bucket-to-vpath mapping */
1930 status = vxge_hw_vpath_rts_rth_itable_set(vdev->vp_handles,
1933 vdev->config.rth_bkt_sz);
1934 if (status != VXGE_HW_OK) {
1935 vxge_debug_init(VXGE_ERR,
1936 "RTH indirection table configuration failed "
1937 "for vpath:%d", vdev->vpaths[0].device_id);
1941 /* Fill RTH hash types */
1942 hash_types.hash_type_tcpipv4_en = vdev->config.rth_hash_type_tcpipv4;
1943 hash_types.hash_type_ipv4_en = vdev->config.rth_hash_type_ipv4;
1944 hash_types.hash_type_tcpipv6_en = vdev->config.rth_hash_type_tcpipv6;
1945 hash_types.hash_type_ipv6_en = vdev->config.rth_hash_type_ipv6;
1946 hash_types.hash_type_tcpipv6ex_en =
1947 vdev->config.rth_hash_type_tcpipv6ex;
1948 hash_types.hash_type_ipv6ex_en = vdev->config.rth_hash_type_ipv6ex;
1951 * Because the itable_set() method uses the active_table field
1952 * for the target virtual path the RTH config should be updated
1953 * for all VPATHs. The h/w only uses the lowest numbered VPATH
1954 * when steering frames.
1956 for (index = 0; index < vdev->no_of_vpath; index++) {
1957 status = vxge_hw_vpath_rts_rth_set(
1958 vdev->vpaths[index].handle,
1959 vdev->config.rth_algorithm,
1961 vdev->config.rth_bkt_sz);
1962 if (status != VXGE_HW_OK) {
1963 vxge_debug_init(VXGE_ERR,
1964 "RTH configuration failed for vpath:%d",
1965 vdev->vpaths[index].device_id);
1974 enum vxge_hw_status vxge_reset_all_vpaths(struct vxgedev *vdev)
1976 enum vxge_hw_status status = VXGE_HW_OK;
1977 struct vxge_vpath *vpath;
1980 for (i = 0; i < vdev->no_of_vpath; i++) {
1981 vpath = &vdev->vpaths[i];
1982 if (vpath->handle) {
1983 if (vxge_hw_vpath_reset(vpath->handle) == VXGE_HW_OK) {
1984 if (is_vxge_card_up(vdev) &&
1985 vxge_hw_vpath_recover_from_reset(
1986 vpath->handle) != VXGE_HW_OK) {
1987 vxge_debug_init(VXGE_ERR,
1988 "vxge_hw_vpath_recover_"
1989 "from_reset failed for vpath: "
1994 vxge_debug_init(VXGE_ERR,
1995 "vxge_hw_vpath_reset failed for "
2006 static void vxge_close_vpaths(struct vxgedev *vdev, int index)
2008 struct vxge_vpath *vpath;
2011 for (i = index; i < vdev->no_of_vpath; i++) {
2012 vpath = &vdev->vpaths[i];
2014 if (vpath->handle && vpath->is_open) {
2015 vxge_hw_vpath_close(vpath->handle);
2016 vdev->stats.vpaths_open--;
2019 vpath->handle = NULL;
2024 static int vxge_open_vpaths(struct vxgedev *vdev)
2026 struct vxge_hw_vpath_attr attr;
2027 enum vxge_hw_status status;
2028 struct vxge_vpath *vpath;
2032 for (i = 0; i < vdev->no_of_vpath; i++) {
2033 vpath = &vdev->vpaths[i];
2034 vxge_assert(vpath->is_configured);
2036 if (!vdev->titan1) {
2037 struct vxge_hw_vp_config *vcfg;
2038 vcfg = &vdev->devh->config.vp_config[vpath->device_id];
2040 vcfg->rti.urange_a = RTI_T1A_RX_URANGE_A;
2041 vcfg->rti.urange_b = RTI_T1A_RX_URANGE_B;
2042 vcfg->rti.urange_c = RTI_T1A_RX_URANGE_C;
2043 vcfg->tti.uec_a = TTI_T1A_TX_UFC_A;
2044 vcfg->tti.uec_b = TTI_T1A_TX_UFC_B;
2045 vcfg->tti.uec_c = TTI_T1A_TX_UFC_C(vdev->mtu);
2046 vcfg->tti.uec_d = TTI_T1A_TX_UFC_D(vdev->mtu);
2047 vcfg->tti.ltimer_val = VXGE_T1A_TTI_LTIMER_VAL;
2048 vcfg->tti.rtimer_val = VXGE_T1A_TTI_RTIMER_VAL;
2051 attr.vp_id = vpath->device_id;
2052 attr.fifo_attr.callback = vxge_xmit_compl;
2053 attr.fifo_attr.txdl_term = vxge_tx_term;
2054 attr.fifo_attr.per_txdl_space = sizeof(struct vxge_tx_priv);
2055 attr.fifo_attr.userdata = &vpath->fifo;
2057 attr.ring_attr.callback = vxge_rx_1b_compl;
2058 attr.ring_attr.rxd_init = vxge_rx_initial_replenish;
2059 attr.ring_attr.rxd_term = vxge_rx_term;
2060 attr.ring_attr.per_rxd_space = sizeof(struct vxge_rx_priv);
2061 attr.ring_attr.userdata = &vpath->ring;
2063 vpath->ring.ndev = vdev->ndev;
2064 vpath->ring.pdev = vdev->pdev;
2066 status = vxge_hw_vpath_open(vdev->devh, &attr, &vpath->handle);
2067 if (status == VXGE_HW_OK) {
2068 vpath->fifo.handle =
2069 (struct __vxge_hw_fifo *)attr.fifo_attr.userdata;
2070 vpath->ring.handle =
2071 (struct __vxge_hw_ring *)attr.ring_attr.userdata;
2072 vpath->fifo.tx_steering_type =
2073 vdev->config.tx_steering_type;
2074 vpath->fifo.ndev = vdev->ndev;
2075 vpath->fifo.pdev = vdev->pdev;
2076 if (vdev->config.tx_steering_type)
2078 netdev_get_tx_queue(vdev->ndev, i);
2081 netdev_get_tx_queue(vdev->ndev, 0);
2082 vpath->fifo.indicate_max_pkts =
2083 vdev->config.fifo_indicate_max_pkts;
2084 vpath->fifo.tx_vector_no = 0;
2085 vpath->ring.rx_vector_no = 0;
2086 vpath->ring.rx_hwts = vdev->rx_hwts;
2088 vdev->vp_handles[i] = vpath->handle;
2089 vpath->ring.vlan_tag_strip = vdev->vlan_tag_strip;
2090 vdev->stats.vpaths_open++;
2092 vdev->stats.vpath_open_fail++;
2093 vxge_debug_init(VXGE_ERR, "%s: vpath: %d failed to "
2094 "open with status: %d",
2095 vdev->ndev->name, vpath->device_id,
2097 vxge_close_vpaths(vdev, 0);
2101 vp_id = vpath->handle->vpath->vp_id;
2102 vdev->vpaths_deployed |= vxge_mBIT(vp_id);
2109 * adaptive_coalesce_tx_interrupts - Changes the interrupt coalescing
2110 * if the interrupts are not within a range
2111 * @fifo: pointer to transmit fifo structure
2112 * Description: The function changes boundary timer and restriction timer
2113 * value depends on the traffic
2114 * Return Value: None
2116 static void adaptive_coalesce_tx_interrupts(struct vxge_fifo *fifo)
2118 fifo->interrupt_count++;
2119 if (jiffies > fifo->jiffies + HZ / 100) {
2120 struct __vxge_hw_fifo *hw_fifo = fifo->handle;
2122 fifo->jiffies = jiffies;
2123 if (fifo->interrupt_count > VXGE_T1A_MAX_TX_INTERRUPT_COUNT &&
2124 hw_fifo->rtimer != VXGE_TTI_RTIMER_ADAPT_VAL) {
2125 hw_fifo->rtimer = VXGE_TTI_RTIMER_ADAPT_VAL;
2126 vxge_hw_vpath_dynamic_tti_rtimer_set(hw_fifo);
2127 } else if (hw_fifo->rtimer != 0) {
2128 hw_fifo->rtimer = 0;
2129 vxge_hw_vpath_dynamic_tti_rtimer_set(hw_fifo);
2131 fifo->interrupt_count = 0;
2136 * adaptive_coalesce_rx_interrupts - Changes the interrupt coalescing
2137 * if the interrupts are not within a range
2138 * @ring: pointer to receive ring structure
2139 * Description: The function increases of decreases the packet counts within
2140 * the ranges of traffic utilization, if the interrupts due to this ring are
2141 * not within a fixed range.
2142 * Return Value: Nothing
2144 static void adaptive_coalesce_rx_interrupts(struct vxge_ring *ring)
2146 ring->interrupt_count++;
2147 if (jiffies > ring->jiffies + HZ / 100) {
2148 struct __vxge_hw_ring *hw_ring = ring->handle;
2150 ring->jiffies = jiffies;
2151 if (ring->interrupt_count > VXGE_T1A_MAX_INTERRUPT_COUNT &&
2152 hw_ring->rtimer != VXGE_RTI_RTIMER_ADAPT_VAL) {
2153 hw_ring->rtimer = VXGE_RTI_RTIMER_ADAPT_VAL;
2154 vxge_hw_vpath_dynamic_rti_rtimer_set(hw_ring);
2155 } else if (hw_ring->rtimer != 0) {
2156 hw_ring->rtimer = 0;
2157 vxge_hw_vpath_dynamic_rti_rtimer_set(hw_ring);
2159 ring->interrupt_count = 0;
2165 * @irq: the irq of the device.
2166 * @dev_id: a void pointer to the hldev structure of the Titan device
2167 * @ptregs: pointer to the registers pushed on the stack.
2169 * This function is the ISR handler of the device when napi is enabled. It
2170 * identifies the reason for the interrupt and calls the relevant service
2173 static irqreturn_t vxge_isr_napi(int irq, void *dev_id)
2175 struct net_device *dev;
2176 struct __vxge_hw_device *hldev;
2178 enum vxge_hw_status status;
2179 struct vxgedev *vdev = (struct vxgedev *)dev_id;
2181 vxge_debug_intr(VXGE_TRACE, "%s:%d", __func__, __LINE__);
2184 hldev = pci_get_drvdata(vdev->pdev);
2186 if (pci_channel_offline(vdev->pdev))
2189 if (unlikely(!is_vxge_card_up(vdev)))
2192 status = vxge_hw_device_begin_irq(hldev, vdev->exec_mode, &reason);
2193 if (status == VXGE_HW_OK) {
2194 vxge_hw_device_mask_all(hldev);
2197 VXGE_HW_TITAN_GENERAL_INT_STATUS_VPATH_TRAFFIC_INT(
2198 vdev->vpaths_deployed >>
2199 (64 - VXGE_HW_MAX_VIRTUAL_PATHS))) {
2201 vxge_hw_device_clear_tx_rx(hldev);
2202 napi_schedule(&vdev->napi);
2203 vxge_debug_intr(VXGE_TRACE,
2204 "%s:%d Exiting...", __func__, __LINE__);
2207 vxge_hw_device_unmask_all(hldev);
2208 } else if (unlikely((status == VXGE_HW_ERR_VPATH) ||
2209 (status == VXGE_HW_ERR_CRITICAL) ||
2210 (status == VXGE_HW_ERR_FIFO))) {
2211 vxge_hw_device_mask_all(hldev);
2212 vxge_hw_device_flush_io(hldev);
2214 } else if (unlikely(status == VXGE_HW_ERR_SLOT_FREEZE))
2217 vxge_debug_intr(VXGE_TRACE, "%s:%d Exiting...", __func__, __LINE__);
2221 #ifdef CONFIG_PCI_MSI
2223 static irqreturn_t vxge_tx_msix_handle(int irq, void *dev_id)
2225 struct vxge_fifo *fifo = (struct vxge_fifo *)dev_id;
2227 adaptive_coalesce_tx_interrupts(fifo);
2229 vxge_hw_channel_msix_mask((struct __vxge_hw_channel *)fifo->handle,
2230 fifo->tx_vector_no);
2232 vxge_hw_channel_msix_clear((struct __vxge_hw_channel *)fifo->handle,
2233 fifo->tx_vector_no);
2235 VXGE_COMPLETE_VPATH_TX(fifo);
2237 vxge_hw_channel_msix_unmask((struct __vxge_hw_channel *)fifo->handle,
2238 fifo->tx_vector_no);
2245 static irqreturn_t vxge_rx_msix_napi_handle(int irq, void *dev_id)
2247 struct vxge_ring *ring = (struct vxge_ring *)dev_id;
2249 adaptive_coalesce_rx_interrupts(ring);
2251 vxge_hw_channel_msix_mask((struct __vxge_hw_channel *)ring->handle,
2252 ring->rx_vector_no);
2254 vxge_hw_channel_msix_clear((struct __vxge_hw_channel *)ring->handle,
2255 ring->rx_vector_no);
2257 napi_schedule(&ring->napi);
2262 vxge_alarm_msix_handle(int irq, void *dev_id)
2265 enum vxge_hw_status status;
2266 struct vxge_vpath *vpath = (struct vxge_vpath *)dev_id;
2267 struct vxgedev *vdev = vpath->vdev;
2268 int msix_id = (vpath->handle->vpath->vp_id *
2269 VXGE_HW_VPATH_MSIX_ACTIVE) + VXGE_ALARM_MSIX_ID;
2271 for (i = 0; i < vdev->no_of_vpath; i++) {
2272 /* Reduce the chance of losing alarm interrupts by masking
2273 * the vector. A pending bit will be set if an alarm is
2274 * generated and on unmask the interrupt will be fired.
2276 vxge_hw_vpath_msix_mask(vdev->vpaths[i].handle, msix_id);
2277 vxge_hw_vpath_msix_clear(vdev->vpaths[i].handle, msix_id);
2280 status = vxge_hw_vpath_alarm_process(vdev->vpaths[i].handle,
2282 if (status == VXGE_HW_OK) {
2283 vxge_hw_vpath_msix_unmask(vdev->vpaths[i].handle,
2288 vxge_debug_intr(VXGE_ERR,
2289 "%s: vxge_hw_vpath_alarm_process failed %x ",
2290 VXGE_DRIVER_NAME, status);
2295 static int vxge_alloc_msix(struct vxgedev *vdev)
2298 int msix_intr_vect = 0, temp;
2302 /* Tx/Rx MSIX Vectors count */
2303 vdev->intr_cnt = vdev->no_of_vpath * 2;
2305 /* Alarm MSIX Vectors count */
2308 vdev->entries = kcalloc(vdev->intr_cnt, sizeof(struct msix_entry),
2310 if (!vdev->entries) {
2311 vxge_debug_init(VXGE_ERR,
2312 "%s: memory allocation failed",
2315 goto alloc_entries_failed;
2318 vdev->vxge_entries = kcalloc(vdev->intr_cnt,
2319 sizeof(struct vxge_msix_entry),
2321 if (!vdev->vxge_entries) {
2322 vxge_debug_init(VXGE_ERR, "%s: memory allocation failed",
2325 goto alloc_vxge_entries_failed;
2328 for (i = 0, j = 0; i < vdev->no_of_vpath; i++) {
2330 msix_intr_vect = i * VXGE_HW_VPATH_MSIX_ACTIVE;
2332 /* Initialize the fifo vector */
2333 vdev->entries[j].entry = msix_intr_vect;
2334 vdev->vxge_entries[j].entry = msix_intr_vect;
2335 vdev->vxge_entries[j].in_use = 0;
2338 /* Initialize the ring vector */
2339 vdev->entries[j].entry = msix_intr_vect + 1;
2340 vdev->vxge_entries[j].entry = msix_intr_vect + 1;
2341 vdev->vxge_entries[j].in_use = 0;
2345 /* Initialize the alarm vector */
2346 vdev->entries[j].entry = VXGE_ALARM_MSIX_ID;
2347 vdev->vxge_entries[j].entry = VXGE_ALARM_MSIX_ID;
2348 vdev->vxge_entries[j].in_use = 0;
2350 ret = pci_enable_msix(vdev->pdev, vdev->entries, vdev->intr_cnt);
2352 vxge_debug_init(VXGE_ERR,
2353 "%s: MSI-X enable failed for %d vectors, ret: %d",
2354 VXGE_DRIVER_NAME, vdev->intr_cnt, ret);
2355 if ((max_config_vpath != VXGE_USE_DEFAULT) || (ret < 3)) {
2357 goto enable_msix_failed;
2360 kfree(vdev->entries);
2361 kfree(vdev->vxge_entries);
2362 vdev->entries = NULL;
2363 vdev->vxge_entries = NULL;
2364 /* Try with less no of vector by reducing no of vpaths count */
2366 vxge_close_vpaths(vdev, temp);
2367 vdev->no_of_vpath = temp;
2369 } else if (ret < 0) {
2371 goto enable_msix_failed;
2376 kfree(vdev->vxge_entries);
2377 alloc_vxge_entries_failed:
2378 kfree(vdev->entries);
2379 alloc_entries_failed:
2383 static int vxge_enable_msix(struct vxgedev *vdev)
2387 /* 0 - Tx, 1 - Rx */
2388 int tim_msix_id[4] = {0, 1, 0, 0};
2392 /* allocate msix vectors */
2393 ret = vxge_alloc_msix(vdev);
2395 for (i = 0; i < vdev->no_of_vpath; i++) {
2396 struct vxge_vpath *vpath = &vdev->vpaths[i];
2398 /* If fifo or ring are not enabled, the MSIX vector for
2399 * it should be set to 0.
2401 vpath->ring.rx_vector_no = (vpath->device_id *
2402 VXGE_HW_VPATH_MSIX_ACTIVE) + 1;
2404 vpath->fifo.tx_vector_no = (vpath->device_id *
2405 VXGE_HW_VPATH_MSIX_ACTIVE);
2407 vxge_hw_vpath_msix_set(vpath->handle, tim_msix_id,
2408 VXGE_ALARM_MSIX_ID);
2415 static void vxge_rem_msix_isr(struct vxgedev *vdev)
2419 for (intr_cnt = 0; intr_cnt < (vdev->no_of_vpath * 2 + 1);
2421 if (vdev->vxge_entries[intr_cnt].in_use) {
2422 synchronize_irq(vdev->entries[intr_cnt].vector);
2423 free_irq(vdev->entries[intr_cnt].vector,
2424 vdev->vxge_entries[intr_cnt].arg);
2425 vdev->vxge_entries[intr_cnt].in_use = 0;
2429 kfree(vdev->entries);
2430 kfree(vdev->vxge_entries);
2431 vdev->entries = NULL;
2432 vdev->vxge_entries = NULL;
2434 if (vdev->config.intr_type == MSI_X)
2435 pci_disable_msix(vdev->pdev);
2439 static void vxge_rem_isr(struct vxgedev *vdev)
2441 struct __vxge_hw_device *hldev;
2442 hldev = pci_get_drvdata(vdev->pdev);
2444 #ifdef CONFIG_PCI_MSI
2445 if (vdev->config.intr_type == MSI_X) {
2446 vxge_rem_msix_isr(vdev);
2449 if (vdev->config.intr_type == INTA) {
2450 synchronize_irq(vdev->pdev->irq);
2451 free_irq(vdev->pdev->irq, vdev);
2455 static int vxge_add_isr(struct vxgedev *vdev)
2458 #ifdef CONFIG_PCI_MSI
2459 int vp_idx = 0, intr_idx = 0, intr_cnt = 0, msix_idx = 0, irq_req = 0;
2460 int pci_fun = PCI_FUNC(vdev->pdev->devfn);
2462 if (vdev->config.intr_type == MSI_X)
2463 ret = vxge_enable_msix(vdev);
2466 vxge_debug_init(VXGE_ERR,
2467 "%s: Enabling MSI-X Failed", VXGE_DRIVER_NAME);
2468 vxge_debug_init(VXGE_ERR,
2469 "%s: Defaulting to INTA", VXGE_DRIVER_NAME);
2470 vdev->config.intr_type = INTA;
2473 if (vdev->config.intr_type == MSI_X) {
2475 intr_idx < (vdev->no_of_vpath *
2476 VXGE_HW_VPATH_MSIX_ACTIVE); intr_idx++) {
2478 msix_idx = intr_idx % VXGE_HW_VPATH_MSIX_ACTIVE;
2483 snprintf(vdev->desc[intr_cnt], VXGE_INTR_STRLEN,
2484 "%s:vxge:MSI-X %d - Tx - fn:%d vpath:%d",
2486 vdev->entries[intr_cnt].entry,
2489 vdev->entries[intr_cnt].vector,
2490 vxge_tx_msix_handle, 0,
2491 vdev->desc[intr_cnt],
2492 &vdev->vpaths[vp_idx].fifo);
2493 vdev->vxge_entries[intr_cnt].arg =
2494 &vdev->vpaths[vp_idx].fifo;
2498 snprintf(vdev->desc[intr_cnt], VXGE_INTR_STRLEN,
2499 "%s:vxge:MSI-X %d - Rx - fn:%d vpath:%d",
2501 vdev->entries[intr_cnt].entry,
2504 vdev->entries[intr_cnt].vector,
2505 vxge_rx_msix_napi_handle,
2507 vdev->desc[intr_cnt],
2508 &vdev->vpaths[vp_idx].ring);
2509 vdev->vxge_entries[intr_cnt].arg =
2510 &vdev->vpaths[vp_idx].ring;
2516 vxge_debug_init(VXGE_ERR,
2517 "%s: MSIX - %d Registration failed",
2518 vdev->ndev->name, intr_cnt);
2519 vxge_rem_msix_isr(vdev);
2520 vdev->config.intr_type = INTA;
2521 vxge_debug_init(VXGE_ERR,
2522 "%s: Defaulting to INTA"
2523 , vdev->ndev->name);
2528 /* We requested for this msix interrupt */
2529 vdev->vxge_entries[intr_cnt].in_use = 1;
2530 msix_idx += vdev->vpaths[vp_idx].device_id *
2531 VXGE_HW_VPATH_MSIX_ACTIVE;
2532 vxge_hw_vpath_msix_unmask(
2533 vdev->vpaths[vp_idx].handle,
2538 /* Point to next vpath handler */
2539 if (((intr_idx + 1) % VXGE_HW_VPATH_MSIX_ACTIVE == 0) &&
2540 (vp_idx < (vdev->no_of_vpath - 1)))
2544 intr_cnt = vdev->no_of_vpath * 2;
2545 snprintf(vdev->desc[intr_cnt], VXGE_INTR_STRLEN,
2546 "%s:vxge:MSI-X %d - Alarm - fn:%d",
2548 vdev->entries[intr_cnt].entry,
2550 /* For Alarm interrupts */
2551 ret = request_irq(vdev->entries[intr_cnt].vector,
2552 vxge_alarm_msix_handle, 0,
2553 vdev->desc[intr_cnt],
2556 vxge_debug_init(VXGE_ERR,
2557 "%s: MSIX - %d Registration failed",
2558 vdev->ndev->name, intr_cnt);
2559 vxge_rem_msix_isr(vdev);
2560 vdev->config.intr_type = INTA;
2561 vxge_debug_init(VXGE_ERR,
2562 "%s: Defaulting to INTA",
2567 msix_idx = (vdev->vpaths[0].handle->vpath->vp_id *
2568 VXGE_HW_VPATH_MSIX_ACTIVE) + VXGE_ALARM_MSIX_ID;
2569 vxge_hw_vpath_msix_unmask(vdev->vpaths[vp_idx].handle,
2571 vdev->vxge_entries[intr_cnt].in_use = 1;
2572 vdev->vxge_entries[intr_cnt].arg = &vdev->vpaths[0];
2577 if (vdev->config.intr_type == INTA) {
2578 snprintf(vdev->desc[0], VXGE_INTR_STRLEN,
2579 "%s:vxge:INTA", vdev->ndev->name);
2580 vxge_hw_device_set_intr_type(vdev->devh,
2581 VXGE_HW_INTR_MODE_IRQLINE);
2583 vxge_hw_vpath_tti_ci_set(vdev->vpaths[0].fifo.handle);
2585 ret = request_irq((int) vdev->pdev->irq,
2587 IRQF_SHARED, vdev->desc[0], vdev);
2589 vxge_debug_init(VXGE_ERR,
2590 "%s %s-%d: ISR registration failed",
2591 VXGE_DRIVER_NAME, "IRQ", vdev->pdev->irq);
2594 vxge_debug_init(VXGE_TRACE,
2595 "new %s-%d line allocated",
2596 "IRQ", vdev->pdev->irq);
2602 static void vxge_poll_vp_reset(unsigned long data)
2604 struct vxgedev *vdev = (struct vxgedev *)data;
2607 for (i = 0; i < vdev->no_of_vpath; i++) {
2608 if (test_bit(i, &vdev->vp_reset)) {
2609 vxge_reset_vpath(vdev, i);
2613 if (j && (vdev->config.intr_type != MSI_X)) {
2614 vxge_hw_device_unmask_all(vdev->devh);
2615 vxge_hw_device_flush_io(vdev->devh);
2618 mod_timer(&vdev->vp_reset_timer, jiffies + HZ / 2);
2621 static void vxge_poll_vp_lockup(unsigned long data)
2623 struct vxgedev *vdev = (struct vxgedev *)data;
2624 enum vxge_hw_status status = VXGE_HW_OK;
2625 struct vxge_vpath *vpath;
2626 struct vxge_ring *ring;
2628 unsigned long rx_frms;
2630 for (i = 0; i < vdev->no_of_vpath; i++) {
2631 ring = &vdev->vpaths[i].ring;
2633 /* Truncated to machine word size number of frames */
2634 rx_frms = ACCESS_ONCE(ring->stats.rx_frms);
2636 /* Did this vpath received any packets */
2637 if (ring->stats.prev_rx_frms == rx_frms) {
2638 status = vxge_hw_vpath_check_leak(ring->handle);
2640 /* Did it received any packets last time */
2641 if ((VXGE_HW_FAIL == status) &&
2642 (VXGE_HW_FAIL == ring->last_status)) {
2644 /* schedule vpath reset */
2645 if (!test_and_set_bit(i, &vdev->vp_reset)) {
2646 vpath = &vdev->vpaths[i];
2648 /* disable interrupts for this vpath */
2649 vxge_vpath_intr_disable(vdev, i);
2651 /* stop the queue for this vpath */
2652 netif_tx_stop_queue(vpath->fifo.txq);
2657 ring->stats.prev_rx_frms = rx_frms;
2658 ring->last_status = status;
2661 /* Check every 1 milli second */
2662 mod_timer(&vdev->vp_lockup_timer, jiffies + HZ / 1000);
2665 static netdev_features_t vxge_fix_features(struct net_device *dev,
2666 netdev_features_t features)
2668 netdev_features_t changed = dev->features ^ features;
2670 /* Enabling RTH requires some of the logic in vxge_device_register and a
2671 * vpath reset. Due to these restrictions, only allow modification
2672 * while the interface is down.
2674 if ((changed & NETIF_F_RXHASH) && netif_running(dev))
2675 features ^= NETIF_F_RXHASH;
2680 static int vxge_set_features(struct net_device *dev, netdev_features_t features)
2682 struct vxgedev *vdev = netdev_priv(dev);
2683 netdev_features_t changed = dev->features ^ features;
2685 if (!(changed & NETIF_F_RXHASH))
2688 /* !netif_running() ensured by vxge_fix_features() */
2690 vdev->devh->config.rth_en = !!(features & NETIF_F_RXHASH);
2691 if (vxge_reset_all_vpaths(vdev) != VXGE_HW_OK) {
2692 dev->features = features ^ NETIF_F_RXHASH;
2693 vdev->devh->config.rth_en = !!(dev->features & NETIF_F_RXHASH);
2702 * @dev: pointer to the device structure.
2704 * This function is the open entry point of the driver. It mainly calls a
2705 * function to allocate Rx buffers and inserts them into the buffer
2706 * descriptors and then enables the Rx part of the NIC.
2707 * Return value: '0' on success and an appropriate (-)ve integer as
2708 * defined in errno.h file on failure.
2710 static int vxge_open(struct net_device *dev)
2712 enum vxge_hw_status status;
2713 struct vxgedev *vdev;
2714 struct __vxge_hw_device *hldev;
2715 struct vxge_vpath *vpath;
2718 u64 val64, function_mode;
2720 vxge_debug_entryexit(VXGE_TRACE,
2721 "%s: %s:%d", dev->name, __func__, __LINE__);
2723 vdev = netdev_priv(dev);
2724 hldev = pci_get_drvdata(vdev->pdev);
2725 function_mode = vdev->config.device_hw_info.function_mode;
2727 /* make sure you have link off by default every time Nic is
2729 netif_carrier_off(dev);
2732 status = vxge_open_vpaths(vdev);
2733 if (status != VXGE_HW_OK) {
2734 vxge_debug_init(VXGE_ERR,
2735 "%s: fatal: Vpath open failed", vdev->ndev->name);
2740 vdev->mtu = dev->mtu;
2742 status = vxge_add_isr(vdev);
2743 if (status != VXGE_HW_OK) {
2744 vxge_debug_init(VXGE_ERR,
2745 "%s: fatal: ISR add failed", dev->name);
2750 if (vdev->config.intr_type != MSI_X) {
2751 netif_napi_add(dev, &vdev->napi, vxge_poll_inta,
2752 vdev->config.napi_weight);
2753 napi_enable(&vdev->napi);
2754 for (i = 0; i < vdev->no_of_vpath; i++) {
2755 vpath = &vdev->vpaths[i];
2756 vpath->ring.napi_p = &vdev->napi;
2759 for (i = 0; i < vdev->no_of_vpath; i++) {
2760 vpath = &vdev->vpaths[i];
2761 netif_napi_add(dev, &vpath->ring.napi,
2762 vxge_poll_msix, vdev->config.napi_weight);
2763 napi_enable(&vpath->ring.napi);
2764 vpath->ring.napi_p = &vpath->ring.napi;
2769 if (vdev->config.rth_steering) {
2770 status = vxge_rth_configure(vdev);
2771 if (status != VXGE_HW_OK) {
2772 vxge_debug_init(VXGE_ERR,
2773 "%s: fatal: RTH configuration failed",
2779 printk(KERN_INFO "%s: Receive Hashing Offload %s\n", dev->name,
2780 hldev->config.rth_en ? "enabled" : "disabled");
2782 for (i = 0; i < vdev->no_of_vpath; i++) {
2783 vpath = &vdev->vpaths[i];
2785 /* set initial mtu before enabling the device */
2786 status = vxge_hw_vpath_mtu_set(vpath->handle, vdev->mtu);
2787 if (status != VXGE_HW_OK) {
2788 vxge_debug_init(VXGE_ERR,
2789 "%s: fatal: can not set new MTU", dev->name);
2795 VXGE_DEVICE_DEBUG_LEVEL_SET(VXGE_TRACE, VXGE_COMPONENT_LL, vdev);
2796 vxge_debug_init(vdev->level_trace,
2797 "%s: MTU is %d", vdev->ndev->name, vdev->mtu);
2798 VXGE_DEVICE_DEBUG_LEVEL_SET(VXGE_ERR, VXGE_COMPONENT_LL, vdev);
2800 /* Restore the DA, VID table and also multicast and promiscuous mode
2803 if (vdev->all_multi_flg) {
2804 for (i = 0; i < vdev->no_of_vpath; i++) {
2805 vpath = &vdev->vpaths[i];
2806 vxge_restore_vpath_mac_addr(vpath);
2807 vxge_restore_vpath_vid_table(vpath);
2809 status = vxge_hw_vpath_mcast_enable(vpath->handle);
2810 if (status != VXGE_HW_OK)
2811 vxge_debug_init(VXGE_ERR,
2812 "%s:%d Enabling multicast failed",
2813 __func__, __LINE__);
2817 /* Enable vpath to sniff all unicast/multicast traffic that not
2818 * addressed to them. We allow promiscuous mode for PF only
2822 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++)
2823 val64 |= VXGE_HW_RXMAC_AUTHORIZE_ALL_ADDR_VP(i);
2825 vxge_hw_mgmt_reg_write(vdev->devh,
2826 vxge_hw_mgmt_reg_type_mrpcim,
2828 (ulong)offsetof(struct vxge_hw_mrpcim_reg,
2829 rxmac_authorize_all_addr),
2832 vxge_hw_mgmt_reg_write(vdev->devh,
2833 vxge_hw_mgmt_reg_type_mrpcim,
2835 (ulong)offsetof(struct vxge_hw_mrpcim_reg,
2836 rxmac_authorize_all_vid),
2839 vxge_set_multicast(dev);
2841 /* Enabling Bcast and mcast for all vpath */
2842 for (i = 0; i < vdev->no_of_vpath; i++) {
2843 vpath = &vdev->vpaths[i];
2844 status = vxge_hw_vpath_bcast_enable(vpath->handle);
2845 if (status != VXGE_HW_OK)
2846 vxge_debug_init(VXGE_ERR,
2847 "%s : Can not enable bcast for vpath "
2848 "id %d", dev->name, i);
2849 if (vdev->config.addr_learn_en) {
2850 status = vxge_hw_vpath_mcast_enable(vpath->handle);
2851 if (status != VXGE_HW_OK)
2852 vxge_debug_init(VXGE_ERR,
2853 "%s : Can not enable mcast for vpath "
2854 "id %d", dev->name, i);
2858 vxge_hw_device_setpause_data(vdev->devh, 0,
2859 vdev->config.tx_pause_enable,
2860 vdev->config.rx_pause_enable);
2862 if (vdev->vp_reset_timer.function == NULL)
2863 vxge_os_timer(vdev->vp_reset_timer,
2864 vxge_poll_vp_reset, vdev, (HZ/2));
2866 /* There is no need to check for RxD leak and RxD lookup on Titan1A */
2867 if (vdev->titan1 && vdev->vp_lockup_timer.function == NULL)
2868 vxge_os_timer(vdev->vp_lockup_timer, vxge_poll_vp_lockup, vdev,
2871 set_bit(__VXGE_STATE_CARD_UP, &vdev->state);
2875 if (vxge_hw_device_link_state_get(vdev->devh) == VXGE_HW_LINK_UP) {
2876 netif_carrier_on(vdev->ndev);
2877 netdev_notice(vdev->ndev, "Link Up\n");
2878 vdev->stats.link_up++;
2881 vxge_hw_device_intr_enable(vdev->devh);
2885 for (i = 0; i < vdev->no_of_vpath; i++) {
2886 vpath = &vdev->vpaths[i];
2888 vxge_hw_vpath_enable(vpath->handle);
2890 vxge_hw_vpath_rx_doorbell_init(vpath->handle);
2893 netif_tx_start_all_queues(vdev->ndev);
2896 vxge_config_ci_for_tti_rti(vdev);
2904 if (vdev->config.intr_type != MSI_X)
2905 napi_disable(&vdev->napi);
2907 for (i = 0; i < vdev->no_of_vpath; i++)
2908 napi_disable(&vdev->vpaths[i].ring.napi);
2912 vxge_close_vpaths(vdev, 0);
2914 vxge_debug_entryexit(VXGE_TRACE,
2915 "%s: %s:%d Exiting...",
2916 dev->name, __func__, __LINE__);
2920 /* Loop through the mac address list and delete all the entries */
2921 static void vxge_free_mac_add_list(struct vxge_vpath *vpath)
2924 struct list_head *entry, *next;
2925 if (list_empty(&vpath->mac_addr_list))
2928 list_for_each_safe(entry, next, &vpath->mac_addr_list) {
2930 kfree((struct vxge_mac_addrs *)entry);
2934 static void vxge_napi_del_all(struct vxgedev *vdev)
2937 if (vdev->config.intr_type != MSI_X)
2938 netif_napi_del(&vdev->napi);
2940 for (i = 0; i < vdev->no_of_vpath; i++)
2941 netif_napi_del(&vdev->vpaths[i].ring.napi);
2945 static int do_vxge_close(struct net_device *dev, int do_io)
2947 enum vxge_hw_status status;
2948 struct vxgedev *vdev;
2949 struct __vxge_hw_device *hldev;
2951 u64 val64, vpath_vector;
2952 vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d",
2953 dev->name, __func__, __LINE__);
2955 vdev = netdev_priv(dev);
2956 hldev = pci_get_drvdata(vdev->pdev);
2958 if (unlikely(!is_vxge_card_up(vdev)))
2961 /* If vxge_handle_crit_err task is executing,
2962 * wait till it completes. */
2963 while (test_and_set_bit(__VXGE_STATE_RESET_CARD, &vdev->state))
2967 /* Put the vpath back in normal mode */
2968 vpath_vector = vxge_mBIT(vdev->vpaths[0].device_id);
2969 status = vxge_hw_mgmt_reg_read(vdev->devh,
2970 vxge_hw_mgmt_reg_type_mrpcim,
2973 struct vxge_hw_mrpcim_reg,
2974 rts_mgr_cbasin_cfg),
2976 if (status == VXGE_HW_OK) {
2977 val64 &= ~vpath_vector;
2978 status = vxge_hw_mgmt_reg_write(vdev->devh,
2979 vxge_hw_mgmt_reg_type_mrpcim,
2982 struct vxge_hw_mrpcim_reg,
2983 rts_mgr_cbasin_cfg),
2987 /* Remove the function 0 from promiscuous mode */
2988 vxge_hw_mgmt_reg_write(vdev->devh,
2989 vxge_hw_mgmt_reg_type_mrpcim,
2991 (ulong)offsetof(struct vxge_hw_mrpcim_reg,
2992 rxmac_authorize_all_addr),
2995 vxge_hw_mgmt_reg_write(vdev->devh,
2996 vxge_hw_mgmt_reg_type_mrpcim,
2998 (ulong)offsetof(struct vxge_hw_mrpcim_reg,
2999 rxmac_authorize_all_vid),
3006 del_timer_sync(&vdev->vp_lockup_timer);
3008 del_timer_sync(&vdev->vp_reset_timer);
3011 vxge_hw_device_wait_receive_idle(hldev);
3013 clear_bit(__VXGE_STATE_CARD_UP, &vdev->state);
3016 if (vdev->config.intr_type != MSI_X)
3017 napi_disable(&vdev->napi);
3019 for (i = 0; i < vdev->no_of_vpath; i++)
3020 napi_disable(&vdev->vpaths[i].ring.napi);
3023 netif_carrier_off(vdev->ndev);
3024 netdev_notice(vdev->ndev, "Link Down\n");
3025 netif_tx_stop_all_queues(vdev->ndev);
3027 /* Note that at this point xmit() is stopped by upper layer */
3029 vxge_hw_device_intr_disable(vdev->devh);
3033 vxge_napi_del_all(vdev);
3036 vxge_reset_all_vpaths(vdev);
3038 vxge_close_vpaths(vdev, 0);
3040 vxge_debug_entryexit(VXGE_TRACE,
3041 "%s: %s:%d Exiting...", dev->name, __func__, __LINE__);
3043 clear_bit(__VXGE_STATE_RESET_CARD, &vdev->state);
3050 * @dev: device pointer.
3052 * This is the stop entry point of the driver. It needs to undo exactly
3053 * whatever was done by the open entry point, thus it's usually referred to
3054 * as the close function.Among other things this function mainly stops the
3055 * Rx side of the NIC and frees all the Rx buffers in the Rx rings.
3056 * Return value: '0' on success and an appropriate (-)ve integer as
3057 * defined in errno.h file on failure.
3059 static int vxge_close(struct net_device *dev)
3061 do_vxge_close(dev, 1);
3067 * @dev: net device pointer.
3068 * @new_mtu :the new MTU size for the device.
3070 * A driver entry point to change MTU size for the device. Before changing
3071 * the MTU the device must be stopped.
3073 static int vxge_change_mtu(struct net_device *dev, int new_mtu)
3075 struct vxgedev *vdev = netdev_priv(dev);
3077 vxge_debug_entryexit(vdev->level_trace,
3078 "%s:%d", __func__, __LINE__);
3079 if ((new_mtu < VXGE_HW_MIN_MTU) || (new_mtu > VXGE_HW_MAX_MTU)) {
3080 vxge_debug_init(vdev->level_err,
3081 "%s: mtu size is invalid", dev->name);
3085 /* check if device is down already */
3086 if (unlikely(!is_vxge_card_up(vdev))) {
3087 /* just store new value, will use later on open() */
3089 vxge_debug_init(vdev->level_err,
3090 "%s", "device is down on MTU change");
3094 vxge_debug_init(vdev->level_trace,
3095 "trying to apply new MTU %d", new_mtu);
3097 if (vxge_close(dev))
3101 vdev->mtu = new_mtu;
3106 vxge_debug_init(vdev->level_trace,
3107 "%s: MTU changed to %d", vdev->ndev->name, new_mtu);
3109 vxge_debug_entryexit(vdev->level_trace,
3110 "%s:%d Exiting...", __func__, __LINE__);
3117 * @dev: pointer to the device structure
3118 * @stats: pointer to struct rtnl_link_stats64
3121 static struct rtnl_link_stats64 *
3122 vxge_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *net_stats)
3124 struct vxgedev *vdev = netdev_priv(dev);
3127 /* net_stats already zeroed by caller */
3128 for (k = 0; k < vdev->no_of_vpath; k++) {
3129 struct vxge_ring_stats *rxstats = &vdev->vpaths[k].ring.stats;
3130 struct vxge_fifo_stats *txstats = &vdev->vpaths[k].fifo.stats;
3132 u64 packets, bytes, multicast;
3135 start = u64_stats_fetch_begin(&rxstats->syncp);
3137 packets = rxstats->rx_frms;
3138 multicast = rxstats->rx_mcast;
3139 bytes = rxstats->rx_bytes;
3140 } while (u64_stats_fetch_retry(&rxstats->syncp, start));
3142 net_stats->rx_packets += packets;
3143 net_stats->rx_bytes += bytes;
3144 net_stats->multicast += multicast;
3146 net_stats->rx_errors += rxstats->rx_errors;
3147 net_stats->rx_dropped += rxstats->rx_dropped;
3150 start = u64_stats_fetch_begin(&txstats->syncp);
3152 packets = txstats->tx_frms;
3153 bytes = txstats->tx_bytes;
3154 } while (u64_stats_fetch_retry(&txstats->syncp, start));
3156 net_stats->tx_packets += packets;
3157 net_stats->tx_bytes += bytes;
3158 net_stats->tx_errors += txstats->tx_errors;
3164 static enum vxge_hw_status vxge_timestamp_config(struct __vxge_hw_device *devh)
3166 enum vxge_hw_status status;
3169 /* Timestamp is passed to the driver via the FCS, therefore we
3170 * must disable the FCS stripping by the adapter. Since this is
3171 * required for the driver to load (due to a hardware bug),
3172 * there is no need to do anything special here.
3174 val64 = VXGE_HW_XMAC_TIMESTAMP_EN |
3175 VXGE_HW_XMAC_TIMESTAMP_USE_LINK_ID(0) |
3176 VXGE_HW_XMAC_TIMESTAMP_INTERVAL(0);
3178 status = vxge_hw_mgmt_reg_write(devh,
3179 vxge_hw_mgmt_reg_type_mrpcim,
3181 offsetof(struct vxge_hw_mrpcim_reg,
3184 vxge_hw_device_flush_io(devh);
3185 devh->config.hwts_en = VXGE_HW_HWTS_ENABLE;
3189 static int vxge_hwtstamp_ioctl(struct vxgedev *vdev, void __user *data)
3191 struct hwtstamp_config config;
3194 if (copy_from_user(&config, data, sizeof(config)))
3197 /* reserved for future extensions */
3201 /* Transmit HW Timestamp not supported */
3202 switch (config.tx_type) {
3203 case HWTSTAMP_TX_OFF:
3205 case HWTSTAMP_TX_ON:
3210 switch (config.rx_filter) {
3211 case HWTSTAMP_FILTER_NONE:
3213 config.rx_filter = HWTSTAMP_FILTER_NONE;
3216 case HWTSTAMP_FILTER_ALL:
3217 case HWTSTAMP_FILTER_SOME:
3218 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
3219 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
3220 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
3221 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
3222 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
3223 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
3224 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
3225 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
3226 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
3227 case HWTSTAMP_FILTER_PTP_V2_EVENT:
3228 case HWTSTAMP_FILTER_PTP_V2_SYNC:
3229 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
3230 if (vdev->devh->config.hwts_en != VXGE_HW_HWTS_ENABLE)
3234 config.rx_filter = HWTSTAMP_FILTER_ALL;
3241 for (i = 0; i < vdev->no_of_vpath; i++)
3242 vdev->vpaths[i].ring.rx_hwts = vdev->rx_hwts;
3244 if (copy_to_user(data, &config, sizeof(config)))
3252 * @dev: Device pointer.
3253 * @ifr: An IOCTL specific structure, that can contain a pointer to
3254 * a proprietary structure used to pass information to the driver.
3255 * @cmd: This is used to distinguish between the different commands that
3256 * can be passed to the IOCTL functions.
3258 * Entry point for the Ioctl.
3260 static int vxge_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
3262 struct vxgedev *vdev = netdev_priv(dev);
3267 ret = vxge_hwtstamp_ioctl(vdev, rq->ifr_data);
3280 * @dev: pointer to net device structure
3282 * Watchdog for transmit side.
3283 * This function is triggered if the Tx Queue is stopped
3284 * for a pre-defined amount of time when the Interface is still up.
3286 static void vxge_tx_watchdog(struct net_device *dev)
3288 struct vxgedev *vdev;
3290 vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__);
3292 vdev = netdev_priv(dev);
3294 vdev->cric_err_event = VXGE_HW_EVENT_RESET_START;
3296 schedule_work(&vdev->reset_task);
3297 vxge_debug_entryexit(VXGE_TRACE,
3298 "%s:%d Exiting...", __func__, __LINE__);
3302 * vxge_vlan_rx_add_vid
3303 * @dev: net device pointer.
3306 * Add the vlan id to the devices vlan id table
3309 vxge_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
3311 struct vxgedev *vdev = netdev_priv(dev);
3312 struct vxge_vpath *vpath;
3315 /* Add these vlan to the vid table */
3316 for (vp_id = 0; vp_id < vdev->no_of_vpath; vp_id++) {
3317 vpath = &vdev->vpaths[vp_id];
3318 if (!vpath->is_open)
3320 vxge_hw_vpath_vid_add(vpath->handle, vid);
3322 set_bit(vid, vdev->active_vlans);
3326 * vxge_vlan_rx_add_vid
3327 * @dev: net device pointer.
3330 * Remove the vlan id from the device's vlan id table
3333 vxge_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
3335 struct vxgedev *vdev = netdev_priv(dev);
3336 struct vxge_vpath *vpath;
3339 vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__);
3341 /* Delete this vlan from the vid table */
3342 for (vp_id = 0; vp_id < vdev->no_of_vpath; vp_id++) {
3343 vpath = &vdev->vpaths[vp_id];
3344 if (!vpath->is_open)
3346 vxge_hw_vpath_vid_delete(vpath->handle, vid);
3348 vxge_debug_entryexit(VXGE_TRACE,
3349 "%s:%d Exiting...", __func__, __LINE__);
3350 clear_bit(vid, vdev->active_vlans);
3353 static const struct net_device_ops vxge_netdev_ops = {
3354 .ndo_open = vxge_open,
3355 .ndo_stop = vxge_close,
3356 .ndo_get_stats64 = vxge_get_stats64,
3357 .ndo_start_xmit = vxge_xmit,
3358 .ndo_validate_addr = eth_validate_addr,
3359 .ndo_set_rx_mode = vxge_set_multicast,
3360 .ndo_do_ioctl = vxge_ioctl,
3361 .ndo_set_mac_address = vxge_set_mac_addr,
3362 .ndo_change_mtu = vxge_change_mtu,
3363 .ndo_fix_features = vxge_fix_features,
3364 .ndo_set_features = vxge_set_features,
3365 .ndo_vlan_rx_kill_vid = vxge_vlan_rx_kill_vid,
3366 .ndo_vlan_rx_add_vid = vxge_vlan_rx_add_vid,
3367 .ndo_tx_timeout = vxge_tx_watchdog,
3368 #ifdef CONFIG_NET_POLL_CONTROLLER
3369 .ndo_poll_controller = vxge_netpoll,
3373 static int __devinit vxge_device_register(struct __vxge_hw_device *hldev,
3374 struct vxge_config *config,
3375 int high_dma, int no_of_vpath,
3376 struct vxgedev **vdev_out)
3378 struct net_device *ndev;
3379 enum vxge_hw_status status = VXGE_HW_OK;
3380 struct vxgedev *vdev;
3381 int ret = 0, no_of_queue = 1;
3385 if (config->tx_steering_type)
3386 no_of_queue = no_of_vpath;
3388 ndev = alloc_etherdev_mq(sizeof(struct vxgedev),
3392 vxge_hw_device_trace_level_get(hldev),
3393 "%s : device allocation failed", __func__);
3398 vxge_debug_entryexit(
3399 vxge_hw_device_trace_level_get(hldev),
3400 "%s: %s:%d Entering...",
3401 ndev->name, __func__, __LINE__);
3403 vdev = netdev_priv(ndev);
3404 memset(vdev, 0, sizeof(struct vxgedev));
3408 vdev->pdev = hldev->pdev;
3409 memcpy(&vdev->config, config, sizeof(struct vxge_config));
3411 vdev->titan1 = (vdev->pdev->revision == VXGE_HW_TITAN1_PCI_REVISION);
3413 SET_NETDEV_DEV(ndev, &vdev->pdev->dev);
3415 ndev->hw_features = NETIF_F_RXCSUM | NETIF_F_SG |
3416 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
3417 NETIF_F_TSO | NETIF_F_TSO6 |
3419 if (vdev->config.rth_steering != NO_STEERING)
3420 ndev->hw_features |= NETIF_F_RXHASH;
3422 ndev->features |= ndev->hw_features |
3423 NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
3425 /* Driver entry points */
3426 ndev->irq = vdev->pdev->irq;
3427 ndev->base_addr = (unsigned long) hldev->bar0;
3429 ndev->netdev_ops = &vxge_netdev_ops;
3431 ndev->watchdog_timeo = VXGE_LL_WATCH_DOG_TIMEOUT;
3432 INIT_WORK(&vdev->reset_task, vxge_reset);
3434 vxge_initialize_ethtool_ops(ndev);
3436 /* Allocate memory for vpath */
3437 vdev->vpaths = kzalloc((sizeof(struct vxge_vpath)) *
3438 no_of_vpath, GFP_KERNEL);
3439 if (!vdev->vpaths) {
3440 vxge_debug_init(VXGE_ERR,
3441 "%s: vpath memory allocation failed",
3447 vxge_debug_init(vxge_hw_device_trace_level_get(hldev),
3448 "%s : checksuming enabled", __func__);
3451 ndev->features |= NETIF_F_HIGHDMA;
3452 vxge_debug_init(vxge_hw_device_trace_level_get(hldev),
3453 "%s : using High DMA", __func__);
3456 ret = register_netdev(ndev);
3458 vxge_debug_init(vxge_hw_device_trace_level_get(hldev),
3459 "%s: %s : device registration failed!",
3460 ndev->name, __func__);
3464 /* Set the factory defined MAC address initially */
3465 ndev->addr_len = ETH_ALEN;
3467 /* Make Link state as off at this point, when the Link change
3468 * interrupt comes the state will be automatically changed to
3471 netif_carrier_off(ndev);
3473 vxge_debug_init(vxge_hw_device_trace_level_get(hldev),
3474 "%s: Ethernet device registered",
3480 /* Resetting the Device stats */
3481 status = vxge_hw_mrpcim_stats_access(
3483 VXGE_HW_STATS_OP_CLEAR_ALL_STATS,
3488 if (status == VXGE_HW_ERR_PRIVILAGED_OPEARATION)
3490 vxge_hw_device_trace_level_get(hldev),
3491 "%s: device stats clear returns"
3492 "VXGE_HW_ERR_PRIVILAGED_OPEARATION", ndev->name);
3494 vxge_debug_entryexit(vxge_hw_device_trace_level_get(hldev),
3495 "%s: %s:%d Exiting...",
3496 ndev->name, __func__, __LINE__);
3500 kfree(vdev->vpaths);
3508 * vxge_device_unregister
3510 * This function will unregister and free network device
3512 static void vxge_device_unregister(struct __vxge_hw_device *hldev)
3514 struct vxgedev *vdev;
3515 struct net_device *dev;
3519 vdev = netdev_priv(dev);
3521 vxge_debug_entryexit(vdev->level_trace, "%s: %s:%d", vdev->ndev->name,
3522 __func__, __LINE__);
3524 strncpy(buf, dev->name, IFNAMSIZ);
3526 flush_work_sync(&vdev->reset_task);
3528 /* in 2.6 will call stop() if device is up */
3529 unregister_netdev(dev);
3531 kfree(vdev->vpaths);
3533 /* we are safe to free it now */
3536 vxge_debug_init(vdev->level_trace, "%s: ethernet device unregistered",
3538 vxge_debug_entryexit(vdev->level_trace, "%s: %s:%d Exiting...", buf,
3539 __func__, __LINE__);
3543 * vxge_callback_crit_err
3545 * This function is called by the alarm handler in interrupt context.
3546 * Driver must analyze it based on the event type.
3549 vxge_callback_crit_err(struct __vxge_hw_device *hldev,
3550 enum vxge_hw_event type, u64 vp_id)
3552 struct net_device *dev = hldev->ndev;
3553 struct vxgedev *vdev = netdev_priv(dev);
3554 struct vxge_vpath *vpath = NULL;
3557 vxge_debug_entryexit(vdev->level_trace,
3558 "%s: %s:%d", vdev->ndev->name, __func__, __LINE__);
3560 /* Note: This event type should be used for device wide
3561 * indications only - Serious errors, Slot freeze and critical errors
3563 vdev->cric_err_event = type;
3565 for (vpath_idx = 0; vpath_idx < vdev->no_of_vpath; vpath_idx++) {
3566 vpath = &vdev->vpaths[vpath_idx];
3567 if (vpath->device_id == vp_id)
3571 if (!test_bit(__VXGE_STATE_RESET_CARD, &vdev->state)) {
3572 if (type == VXGE_HW_EVENT_SLOT_FREEZE) {
3573 vxge_debug_init(VXGE_ERR,
3574 "%s: Slot is frozen", vdev->ndev->name);
3575 } else if (type == VXGE_HW_EVENT_SERR) {
3576 vxge_debug_init(VXGE_ERR,
3577 "%s: Encountered Serious Error",
3579 } else if (type == VXGE_HW_EVENT_CRITICAL_ERR)
3580 vxge_debug_init(VXGE_ERR,
3581 "%s: Encountered Critical Error",
3585 if ((type == VXGE_HW_EVENT_SERR) ||
3586 (type == VXGE_HW_EVENT_SLOT_FREEZE)) {
3587 if (unlikely(vdev->exec_mode))
3588 clear_bit(__VXGE_STATE_CARD_UP, &vdev->state);
3589 } else if (type == VXGE_HW_EVENT_CRITICAL_ERR) {
3590 vxge_hw_device_mask_all(hldev);
3591 if (unlikely(vdev->exec_mode))
3592 clear_bit(__VXGE_STATE_CARD_UP, &vdev->state);
3593 } else if ((type == VXGE_HW_EVENT_FIFO_ERR) ||
3594 (type == VXGE_HW_EVENT_VPATH_ERR)) {
3596 if (unlikely(vdev->exec_mode))
3597 clear_bit(__VXGE_STATE_CARD_UP, &vdev->state);
3599 /* check if this vpath is already set for reset */
3600 if (!test_and_set_bit(vpath_idx, &vdev->vp_reset)) {
3602 /* disable interrupts for this vpath */
3603 vxge_vpath_intr_disable(vdev, vpath_idx);
3605 /* stop the queue for this vpath */
3606 netif_tx_stop_queue(vpath->fifo.txq);
3611 vxge_debug_entryexit(vdev->level_trace,
3612 "%s: %s:%d Exiting...",
3613 vdev->ndev->name, __func__, __LINE__);
3616 static void verify_bandwidth(void)
3618 int i, band_width, total = 0, equal_priority = 0;
3620 /* 1. If user enters 0 for some fifo, give equal priority to all */
3621 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
3622 if (bw_percentage[i] == 0) {
3628 if (!equal_priority) {
3629 /* 2. If sum exceeds 100, give equal priority to all */
3630 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
3631 if (bw_percentage[i] == 0xFF)
3634 total += bw_percentage[i];
3635 if (total > VXGE_HW_VPATH_BANDWIDTH_MAX) {
3642 if (!equal_priority) {
3643 /* Is all the bandwidth consumed? */
3644 if (total < VXGE_HW_VPATH_BANDWIDTH_MAX) {
3645 if (i < VXGE_HW_MAX_VIRTUAL_PATHS) {
3646 /* Split rest of bw equally among next VPs*/
3648 (VXGE_HW_VPATH_BANDWIDTH_MAX - total) /
3649 (VXGE_HW_MAX_VIRTUAL_PATHS - i);
3650 if (band_width < 2) /* min of 2% */
3653 for (; i < VXGE_HW_MAX_VIRTUAL_PATHS;
3659 } else if (i < VXGE_HW_MAX_VIRTUAL_PATHS)
3663 if (equal_priority) {
3664 vxge_debug_init(VXGE_ERR,
3665 "%s: Assigning equal bandwidth to all the vpaths",
3667 bw_percentage[0] = VXGE_HW_VPATH_BANDWIDTH_MAX /
3668 VXGE_HW_MAX_VIRTUAL_PATHS;
3669 for (i = 1; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++)
3670 bw_percentage[i] = bw_percentage[0];
3675 * Vpath configuration
3677 static int __devinit vxge_config_vpaths(
3678 struct vxge_hw_device_config *device_config,
3679 u64 vpath_mask, struct vxge_config *config_param)
3681 int i, no_of_vpaths = 0, default_no_vpath = 0, temp;
3682 u32 txdl_size, txdl_per_memblock;
3684 temp = driver_config->vpath_per_dev;
3685 if ((driver_config->vpath_per_dev == VXGE_USE_DEFAULT) &&
3686 (max_config_dev == VXGE_MAX_CONFIG_DEV)) {
3687 /* No more CPU. Return vpath number as zero.*/
3688 if (driver_config->g_no_cpus == -1)
3691 if (!driver_config->g_no_cpus)
3692 driver_config->g_no_cpus = num_online_cpus();
3694 driver_config->vpath_per_dev = driver_config->g_no_cpus >> 1;
3695 if (!driver_config->vpath_per_dev)
3696 driver_config->vpath_per_dev = 1;
3698 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++)
3699 if (!vxge_bVALn(vpath_mask, i, 1))
3703 if (default_no_vpath < driver_config->vpath_per_dev)
3704 driver_config->vpath_per_dev = default_no_vpath;
3706 driver_config->g_no_cpus = driver_config->g_no_cpus -
3707 (driver_config->vpath_per_dev * 2);
3708 if (driver_config->g_no_cpus <= 0)
3709 driver_config->g_no_cpus = -1;
3712 if (driver_config->vpath_per_dev == 1) {
3713 vxge_debug_ll_config(VXGE_TRACE,
3714 "%s: Disable tx and rx steering, "
3715 "as single vpath is configured", VXGE_DRIVER_NAME);
3716 config_param->rth_steering = NO_STEERING;
3717 config_param->tx_steering_type = NO_STEERING;
3718 device_config->rth_en = 0;
3721 /* configure bandwidth */
3722 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++)
3723 device_config->vp_config[i].min_bandwidth = bw_percentage[i];
3725 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
3726 device_config->vp_config[i].vp_id = i;
3727 device_config->vp_config[i].mtu = VXGE_HW_DEFAULT_MTU;
3728 if (no_of_vpaths < driver_config->vpath_per_dev) {
3729 if (!vxge_bVALn(vpath_mask, i, 1)) {
3730 vxge_debug_ll_config(VXGE_TRACE,
3731 "%s: vpath: %d is not available",
3732 VXGE_DRIVER_NAME, i);
3735 vxge_debug_ll_config(VXGE_TRACE,
3736 "%s: vpath: %d available",
3737 VXGE_DRIVER_NAME, i);
3741 vxge_debug_ll_config(VXGE_TRACE,
3742 "%s: vpath: %d is not configured, "
3743 "max_config_vpath exceeded",
3744 VXGE_DRIVER_NAME, i);
3748 /* Configure Tx fifo's */
3749 device_config->vp_config[i].fifo.enable =
3750 VXGE_HW_FIFO_ENABLE;
3751 device_config->vp_config[i].fifo.max_frags =
3753 device_config->vp_config[i].fifo.memblock_size =
3754 VXGE_HW_MIN_FIFO_MEMBLOCK_SIZE;
3756 txdl_size = device_config->vp_config[i].fifo.max_frags *
3757 sizeof(struct vxge_hw_fifo_txd);
3758 txdl_per_memblock = VXGE_HW_MIN_FIFO_MEMBLOCK_SIZE / txdl_size;
3760 device_config->vp_config[i].fifo.fifo_blocks =
3761 ((VXGE_DEF_FIFO_LENGTH - 1) / txdl_per_memblock) + 1;
3763 device_config->vp_config[i].fifo.intr =
3764 VXGE_HW_FIFO_QUEUE_INTR_DISABLE;
3766 /* Configure tti properties */
3767 device_config->vp_config[i].tti.intr_enable =
3768 VXGE_HW_TIM_INTR_ENABLE;
3770 device_config->vp_config[i].tti.btimer_val =
3771 (VXGE_TTI_BTIMER_VAL * 1000) / 272;
3773 device_config->vp_config[i].tti.timer_ac_en =
3774 VXGE_HW_TIM_TIMER_AC_ENABLE;
3776 /* For msi-x with napi (each vector has a handler of its own) -
3777 * Set CI to OFF for all vpaths
3779 device_config->vp_config[i].tti.timer_ci_en =
3780 VXGE_HW_TIM_TIMER_CI_DISABLE;
3782 device_config->vp_config[i].tti.timer_ri_en =
3783 VXGE_HW_TIM_TIMER_RI_DISABLE;
3785 device_config->vp_config[i].tti.util_sel =
3786 VXGE_HW_TIM_UTIL_SEL_LEGACY_TX_NET_UTIL;
3788 device_config->vp_config[i].tti.ltimer_val =
3789 (VXGE_TTI_LTIMER_VAL * 1000) / 272;
3791 device_config->vp_config[i].tti.rtimer_val =
3792 (VXGE_TTI_RTIMER_VAL * 1000) / 272;
3794 device_config->vp_config[i].tti.urange_a = TTI_TX_URANGE_A;
3795 device_config->vp_config[i].tti.urange_b = TTI_TX_URANGE_B;
3796 device_config->vp_config[i].tti.urange_c = TTI_TX_URANGE_C;
3797 device_config->vp_config[i].tti.uec_a = TTI_TX_UFC_A;
3798 device_config->vp_config[i].tti.uec_b = TTI_TX_UFC_B;
3799 device_config->vp_config[i].tti.uec_c = TTI_TX_UFC_C;
3800 device_config->vp_config[i].tti.uec_d = TTI_TX_UFC_D;
3802 /* Configure Rx rings */
3803 device_config->vp_config[i].ring.enable =
3804 VXGE_HW_RING_ENABLE;
3806 device_config->vp_config[i].ring.ring_blocks =
3807 VXGE_HW_DEF_RING_BLOCKS;
3809 device_config->vp_config[i].ring.buffer_mode =
3810 VXGE_HW_RING_RXD_BUFFER_MODE_1;
3812 device_config->vp_config[i].ring.rxds_limit =
3813 VXGE_HW_DEF_RING_RXDS_LIMIT;
3815 device_config->vp_config[i].ring.scatter_mode =
3816 VXGE_HW_RING_SCATTER_MODE_A;
3818 /* Configure rti properties */
3819 device_config->vp_config[i].rti.intr_enable =
3820 VXGE_HW_TIM_INTR_ENABLE;
3822 device_config->vp_config[i].rti.btimer_val =
3823 (VXGE_RTI_BTIMER_VAL * 1000)/272;
3825 device_config->vp_config[i].rti.timer_ac_en =
3826 VXGE_HW_TIM_TIMER_AC_ENABLE;
3828 device_config->vp_config[i].rti.timer_ci_en =
3829 VXGE_HW_TIM_TIMER_CI_DISABLE;
3831 device_config->vp_config[i].rti.timer_ri_en =
3832 VXGE_HW_TIM_TIMER_RI_DISABLE;
3834 device_config->vp_config[i].rti.util_sel =
3835 VXGE_HW_TIM_UTIL_SEL_LEGACY_RX_NET_UTIL;
3837 device_config->vp_config[i].rti.urange_a =
3839 device_config->vp_config[i].rti.urange_b =
3841 device_config->vp_config[i].rti.urange_c =
3843 device_config->vp_config[i].rti.uec_a = RTI_RX_UFC_A;
3844 device_config->vp_config[i].rti.uec_b = RTI_RX_UFC_B;
3845 device_config->vp_config[i].rti.uec_c = RTI_RX_UFC_C;
3846 device_config->vp_config[i].rti.uec_d = RTI_RX_UFC_D;
3848 device_config->vp_config[i].rti.rtimer_val =
3849 (VXGE_RTI_RTIMER_VAL * 1000) / 272;
3851 device_config->vp_config[i].rti.ltimer_val =
3852 (VXGE_RTI_LTIMER_VAL * 1000) / 272;
3854 device_config->vp_config[i].rpa_strip_vlan_tag =
3858 driver_config->vpath_per_dev = temp;
3859 return no_of_vpaths;
3862 /* initialize device configuratrions */
3863 static void __devinit vxge_device_config_init(
3864 struct vxge_hw_device_config *device_config,
3867 /* Used for CQRQ/SRQ. */
3868 device_config->dma_blockpool_initial =
3869 VXGE_HW_INITIAL_DMA_BLOCK_POOL_SIZE;
3871 device_config->dma_blockpool_max =
3872 VXGE_HW_MAX_DMA_BLOCK_POOL_SIZE;
3874 if (max_mac_vpath > VXGE_MAX_MAC_ADDR_COUNT)
3875 max_mac_vpath = VXGE_MAX_MAC_ADDR_COUNT;
3877 #ifndef CONFIG_PCI_MSI
3878 vxge_debug_init(VXGE_ERR,
3879 "%s: This Kernel does not support "
3880 "MSI-X. Defaulting to INTA", VXGE_DRIVER_NAME);
3884 /* Configure whether MSI-X or IRQL. */
3885 switch (*intr_type) {
3887 device_config->intr_mode = VXGE_HW_INTR_MODE_IRQLINE;
3891 device_config->intr_mode = VXGE_HW_INTR_MODE_MSIX_ONE_SHOT;
3895 /* Timer period between device poll */
3896 device_config->device_poll_millis = VXGE_TIMER_DELAY;
3898 /* Configure mac based steering. */
3899 device_config->rts_mac_en = addr_learn_en;
3901 /* Configure Vpaths */
3902 device_config->rth_it_type = VXGE_HW_RTH_IT_TYPE_MULTI_IT;
3904 vxge_debug_ll_config(VXGE_TRACE, "%s : Device Config Params ",
3906 vxge_debug_ll_config(VXGE_TRACE, "intr_mode : %d",
3907 device_config->intr_mode);
3908 vxge_debug_ll_config(VXGE_TRACE, "device_poll_millis : %d",
3909 device_config->device_poll_millis);
3910 vxge_debug_ll_config(VXGE_TRACE, "rth_en : %d",
3911 device_config->rth_en);
3912 vxge_debug_ll_config(VXGE_TRACE, "rth_it_type : %d",
3913 device_config->rth_it_type);
3916 static void __devinit vxge_print_parm(struct vxgedev *vdev, u64 vpath_mask)
3920 vxge_debug_init(VXGE_TRACE,
3921 "%s: %d Vpath(s) opened",
3922 vdev->ndev->name, vdev->no_of_vpath);
3924 switch (vdev->config.intr_type) {
3926 vxge_debug_init(VXGE_TRACE,
3927 "%s: Interrupt type INTA", vdev->ndev->name);
3931 vxge_debug_init(VXGE_TRACE,
3932 "%s: Interrupt type MSI-X", vdev->ndev->name);
3936 if (vdev->config.rth_steering) {
3937 vxge_debug_init(VXGE_TRACE,
3938 "%s: RTH steering enabled for TCP_IPV4",
3941 vxge_debug_init(VXGE_TRACE,
3942 "%s: RTH steering disabled", vdev->ndev->name);
3945 switch (vdev->config.tx_steering_type) {
3947 vxge_debug_init(VXGE_TRACE,
3948 "%s: Tx steering disabled", vdev->ndev->name);
3950 case TX_PRIORITY_STEERING:
3951 vxge_debug_init(VXGE_TRACE,
3952 "%s: Unsupported tx steering option",
3954 vxge_debug_init(VXGE_TRACE,
3955 "%s: Tx steering disabled", vdev->ndev->name);
3956 vdev->config.tx_steering_type = 0;
3958 case TX_VLAN_STEERING:
3959 vxge_debug_init(VXGE_TRACE,
3960 "%s: Unsupported tx steering option",
3962 vxge_debug_init(VXGE_TRACE,
3963 "%s: Tx steering disabled", vdev->ndev->name);
3964 vdev->config.tx_steering_type = 0;
3966 case TX_MULTIQ_STEERING:
3967 vxge_debug_init(VXGE_TRACE,
3968 "%s: Tx multiqueue steering enabled",
3971 case TX_PORT_STEERING:
3972 vxge_debug_init(VXGE_TRACE,
3973 "%s: Tx port steering enabled",
3977 vxge_debug_init(VXGE_ERR,
3978 "%s: Unsupported tx steering type",
3980 vxge_debug_init(VXGE_TRACE,
3981 "%s: Tx steering disabled", vdev->ndev->name);
3982 vdev->config.tx_steering_type = 0;
3985 if (vdev->config.addr_learn_en)
3986 vxge_debug_init(VXGE_TRACE,
3987 "%s: MAC Address learning enabled", vdev->ndev->name);
3989 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
3990 if (!vxge_bVALn(vpath_mask, i, 1))
3992 vxge_debug_ll_config(VXGE_TRACE,
3993 "%s: MTU size - %d", vdev->ndev->name,
3994 ((struct __vxge_hw_device *)(vdev->devh))->
3995 config.vp_config[i].mtu);
3996 vxge_debug_init(VXGE_TRACE,
3997 "%s: VLAN tag stripping %s", vdev->ndev->name,
3998 ((struct __vxge_hw_device *)(vdev->devh))->
3999 config.vp_config[i].rpa_strip_vlan_tag
4000 ? "Enabled" : "Disabled");
4001 vxge_debug_ll_config(VXGE_TRACE,
4002 "%s: Max frags : %d", vdev->ndev->name,
4003 ((struct __vxge_hw_device *)(vdev->devh))->
4004 config.vp_config[i].fifo.max_frags);
4011 * vxge_pm_suspend - vxge power management suspend entry point
4014 static int vxge_pm_suspend(struct pci_dev *pdev, pm_message_t state)
4019 * vxge_pm_resume - vxge power management resume entry point
4022 static int vxge_pm_resume(struct pci_dev *pdev)
4030 * vxge_io_error_detected - called when PCI error is detected
4031 * @pdev: Pointer to PCI device
4032 * @state: The current pci connection state
4034 * This function is called after a PCI bus error affecting
4035 * this device has been detected.
4037 static pci_ers_result_t vxge_io_error_detected(struct pci_dev *pdev,
4038 pci_channel_state_t state)
4040 struct __vxge_hw_device *hldev = pci_get_drvdata(pdev);
4041 struct net_device *netdev = hldev->ndev;
4043 netif_device_detach(netdev);
4045 if (state == pci_channel_io_perm_failure)
4046 return PCI_ERS_RESULT_DISCONNECT;
4048 if (netif_running(netdev)) {
4049 /* Bring down the card, while avoiding PCI I/O */
4050 do_vxge_close(netdev, 0);
4053 pci_disable_device(pdev);
4055 return PCI_ERS_RESULT_NEED_RESET;
4059 * vxge_io_slot_reset - called after the pci bus has been reset.
4060 * @pdev: Pointer to PCI device
4062 * Restart the card from scratch, as if from a cold-boot.
4063 * At this point, the card has exprienced a hard reset,
4064 * followed by fixups by BIOS, and has its config space
4065 * set up identically to what it was at cold boot.
4067 static pci_ers_result_t vxge_io_slot_reset(struct pci_dev *pdev)
4069 struct __vxge_hw_device *hldev = pci_get_drvdata(pdev);
4070 struct net_device *netdev = hldev->ndev;
4072 struct vxgedev *vdev = netdev_priv(netdev);
4074 if (pci_enable_device(pdev)) {
4075 netdev_err(netdev, "Cannot re-enable device after reset\n");
4076 return PCI_ERS_RESULT_DISCONNECT;
4079 pci_set_master(pdev);
4080 do_vxge_reset(vdev, VXGE_LL_FULL_RESET);
4082 return PCI_ERS_RESULT_RECOVERED;
4086 * vxge_io_resume - called when traffic can start flowing again.
4087 * @pdev: Pointer to PCI device
4089 * This callback is called when the error recovery driver tells
4090 * us that its OK to resume normal operation.
4092 static void vxge_io_resume(struct pci_dev *pdev)
4094 struct __vxge_hw_device *hldev = pci_get_drvdata(pdev);
4095 struct net_device *netdev = hldev->ndev;
4097 if (netif_running(netdev)) {
4098 if (vxge_open(netdev)) {
4100 "Can't bring device back up after reset\n");
4105 netif_device_attach(netdev);
4108 static inline u32 vxge_get_num_vfs(u64 function_mode)
4110 u32 num_functions = 0;
4112 switch (function_mode) {
4113 case VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION:
4114 case VXGE_HW_FUNCTION_MODE_SRIOV_8:
4117 case VXGE_HW_FUNCTION_MODE_SINGLE_FUNCTION:
4120 case VXGE_HW_FUNCTION_MODE_SRIOV:
4121 case VXGE_HW_FUNCTION_MODE_MRIOV:
4122 case VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION_17:
4125 case VXGE_HW_FUNCTION_MODE_SRIOV_4:
4128 case VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION_2:
4131 case VXGE_HW_FUNCTION_MODE_MRIOV_8:
4132 num_functions = 8; /* TODO */
4135 return num_functions;
4138 int vxge_fw_upgrade(struct vxgedev *vdev, char *fw_name, int override)
4140 struct __vxge_hw_device *hldev = vdev->devh;
4141 u32 maj, min, bld, cmaj, cmin, cbld;
4142 enum vxge_hw_status status;
4143 const struct firmware *fw;
4146 ret = request_firmware(&fw, fw_name, &vdev->pdev->dev);
4148 vxge_debug_init(VXGE_ERR, "%s: Firmware file '%s' not found",
4149 VXGE_DRIVER_NAME, fw_name);
4153 /* Load the new firmware onto the adapter */
4154 status = vxge_update_fw_image(hldev, fw->data, fw->size);
4155 if (status != VXGE_HW_OK) {
4156 vxge_debug_init(VXGE_ERR,
4157 "%s: FW image download to adapter failed '%s'.",
4158 VXGE_DRIVER_NAME, fw_name);
4163 /* Read the version of the new firmware */
4164 status = vxge_hw_upgrade_read_version(hldev, &maj, &min, &bld);
4165 if (status != VXGE_HW_OK) {
4166 vxge_debug_init(VXGE_ERR,
4167 "%s: Upgrade read version failed '%s'.",
4168 VXGE_DRIVER_NAME, fw_name);
4173 cmaj = vdev->config.device_hw_info.fw_version.major;
4174 cmin = vdev->config.device_hw_info.fw_version.minor;
4175 cbld = vdev->config.device_hw_info.fw_version.build;
4176 /* It's possible the version in /lib/firmware is not the latest version.
4177 * If so, we could get into a loop of trying to upgrade to the latest
4178 * and flashing the older version.
4180 if (VXGE_FW_VER(maj, min, bld) == VXGE_FW_VER(cmaj, cmin, cbld) &&
4186 printk(KERN_NOTICE "Upgrade to firmware version %d.%d.%d commencing\n",
4189 /* Flash the adapter with the new firmware */
4190 status = vxge_hw_flash_fw(hldev);
4191 if (status != VXGE_HW_OK) {
4192 vxge_debug_init(VXGE_ERR, "%s: Upgrade commit failed '%s'.",
4193 VXGE_DRIVER_NAME, fw_name);
4198 printk(KERN_NOTICE "Upgrade of firmware successful! Adapter must be "
4199 "hard reset before using, thus requiring a system reboot or a "
4200 "hotplug event.\n");
4203 release_firmware(fw);
4207 static int vxge_probe_fw_update(struct vxgedev *vdev)
4213 maj = vdev->config.device_hw_info.fw_version.major;
4214 min = vdev->config.device_hw_info.fw_version.minor;
4215 bld = vdev->config.device_hw_info.fw_version.build;
4217 if (VXGE_FW_VER(maj, min, bld) == VXGE_CERT_FW_VER)
4220 /* Ignore the build number when determining if the current firmware is
4221 * "too new" to load the driver
4223 if (VXGE_FW_VER(maj, min, 0) > VXGE_CERT_FW_VER) {
4224 vxge_debug_init(VXGE_ERR, "%s: Firmware newer than last known "
4225 "version, unable to load driver\n",
4230 /* Firmware 1.4.4 and older cannot be upgraded, and is too ancient to
4231 * work with this driver.
4233 if (VXGE_FW_VER(maj, min, bld) <= VXGE_FW_DEAD_VER) {
4234 vxge_debug_init(VXGE_ERR, "%s: Firmware %d.%d.%d cannot be "
4235 "upgraded\n", VXGE_DRIVER_NAME, maj, min, bld);
4239 /* If file not specified, determine gPXE or not */
4240 if (VXGE_FW_VER(maj, min, bld) >= VXGE_EPROM_FW_VER) {
4242 for (i = 0; i < VXGE_HW_MAX_ROM_IMAGES; i++)
4243 if (vdev->devh->eprom_versions[i]) {
4249 fw_name = "vxge/X3fw-pxe.ncf";
4251 fw_name = "vxge/X3fw.ncf";
4253 ret = vxge_fw_upgrade(vdev, fw_name, 0);
4254 /* -EINVAL and -ENOENT are not fatal errors for flashing firmware on
4255 * probe, so ignore them
4257 if (ret != -EINVAL && ret != -ENOENT)
4262 if (VXGE_FW_VER(VXGE_CERT_FW_VER_MAJOR, VXGE_CERT_FW_VER_MINOR, 0) >
4263 VXGE_FW_VER(maj, min, 0)) {
4264 vxge_debug_init(VXGE_ERR, "%s: Firmware %d.%d.%d is too old to"
4265 " be used with this driver.\n"
4266 "Please get the latest version from "
4267 "ftp://ftp.s2io.com/pub/X3100-Drivers/FIRMWARE",
4268 VXGE_DRIVER_NAME, maj, min, bld);
4275 static int __devinit is_sriov_initialized(struct pci_dev *pdev)
4280 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
4282 pci_read_config_word(pdev, pos + PCI_SRIOV_CTRL, &ctrl);
4283 if (ctrl & PCI_SRIOV_CTRL_VFE)
4289 static const struct vxge_hw_uld_cbs vxge_callbacks = {
4290 .link_up = vxge_callback_link_up,
4291 .link_down = vxge_callback_link_down,
4292 .crit_err = vxge_callback_crit_err,
4297 * @pdev : structure containing the PCI related information of the device.
4298 * @pre: List of PCI devices supported by the driver listed in vxge_id_table.
4300 * This function is called when a new PCI device gets detected and initializes
4303 * returns 0 on success and negative on failure.
4306 static int __devinit
4307 vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre)
4309 struct __vxge_hw_device *hldev;
4310 enum vxge_hw_status status;
4314 struct vxgedev *vdev;
4315 struct vxge_config *ll_config = NULL;
4316 struct vxge_hw_device_config *device_config = NULL;
4317 struct vxge_hw_device_attr attr;
4318 int i, j, no_of_vpath = 0, max_vpath_supported = 0;
4320 struct vxge_mac_addrs *entry;
4321 static int bus = -1, device = -1;
4324 enum vxge_hw_status is_privileged;
4328 vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__);
4331 /* In SRIOV-17 mode, functions of the same adapter
4332 * can be deployed on different buses
4334 if (((bus != pdev->bus->number) || (device != PCI_SLOT(pdev->devfn))) &&
4338 bus = pdev->bus->number;
4339 device = PCI_SLOT(pdev->devfn);
4342 if (driver_config->config_dev_cnt &&
4343 (driver_config->config_dev_cnt !=
4344 driver_config->total_dev_cnt))
4345 vxge_debug_init(VXGE_ERR,
4346 "%s: Configured %d of %d devices",
4348 driver_config->config_dev_cnt,
4349 driver_config->total_dev_cnt);
4350 driver_config->config_dev_cnt = 0;
4351 driver_config->total_dev_cnt = 0;
4354 /* Now making the CPU based no of vpath calculation
4355 * applicable for individual functions as well.
4357 driver_config->g_no_cpus = 0;
4358 driver_config->vpath_per_dev = max_config_vpath;
4360 driver_config->total_dev_cnt++;
4361 if (++driver_config->config_dev_cnt > max_config_dev) {
4366 device_config = kzalloc(sizeof(struct vxge_hw_device_config),
4368 if (!device_config) {
4370 vxge_debug_init(VXGE_ERR,
4371 "device_config : malloc failed %s %d",
4372 __FILE__, __LINE__);
4376 ll_config = kzalloc(sizeof(struct vxge_config), GFP_KERNEL);
4379 vxge_debug_init(VXGE_ERR,
4380 "device_config : malloc failed %s %d",
4381 __FILE__, __LINE__);
4384 ll_config->tx_steering_type = TX_MULTIQ_STEERING;
4385 ll_config->intr_type = MSI_X;
4386 ll_config->napi_weight = NEW_NAPI_WEIGHT;
4387 ll_config->rth_steering = RTH_STEERING;
4389 /* get the default configuration parameters */
4390 vxge_hw_device_config_default_get(device_config);
4392 /* initialize configuration parameters */
4393 vxge_device_config_init(device_config, &ll_config->intr_type);
4395 ret = pci_enable_device(pdev);
4397 vxge_debug_init(VXGE_ERR,
4398 "%s : can not enable PCI device", __func__);
4402 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
4403 vxge_debug_ll_config(VXGE_TRACE,
4404 "%s : using 64bit DMA", __func__);
4408 if (pci_set_consistent_dma_mask(pdev,
4409 DMA_BIT_MASK(64))) {
4410 vxge_debug_init(VXGE_ERR,
4411 "%s : unable to obtain 64bit DMA for "
4412 "consistent allocations", __func__);
4416 } else if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) {
4417 vxge_debug_ll_config(VXGE_TRACE,
4418 "%s : using 32bit DMA", __func__);
4424 ret = pci_request_region(pdev, 0, VXGE_DRIVER_NAME);
4426 vxge_debug_init(VXGE_ERR,
4427 "%s : request regions failed", __func__);
4431 pci_set_master(pdev);
4433 attr.bar0 = pci_ioremap_bar(pdev, 0);
4435 vxge_debug_init(VXGE_ERR,
4436 "%s : cannot remap io memory bar0", __func__);
4440 vxge_debug_ll_config(VXGE_TRACE,
4441 "pci ioremap bar0: %p:0x%llx",
4443 (unsigned long long)pci_resource_start(pdev, 0));
4445 status = vxge_hw_device_hw_info_get(attr.bar0,
4446 &ll_config->device_hw_info);
4447 if (status != VXGE_HW_OK) {
4448 vxge_debug_init(VXGE_ERR,
4449 "%s: Reading of hardware info failed."
4450 "Please try upgrading the firmware.", VXGE_DRIVER_NAME);
4455 vpath_mask = ll_config->device_hw_info.vpath_mask;
4456 if (vpath_mask == 0) {
4457 vxge_debug_ll_config(VXGE_TRACE,
4458 "%s: No vpaths available in device", VXGE_DRIVER_NAME);
4463 vxge_debug_ll_config(VXGE_TRACE,
4464 "%s:%d Vpath mask = %llx", __func__, __LINE__,
4465 (unsigned long long)vpath_mask);
4467 function_mode = ll_config->device_hw_info.function_mode;
4468 host_type = ll_config->device_hw_info.host_type;
4469 is_privileged = __vxge_hw_device_is_privilaged(host_type,
4470 ll_config->device_hw_info.func_id);
4472 /* Check how many vpaths are available */
4473 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
4474 if (!((vpath_mask) & vxge_mBIT(i)))
4476 max_vpath_supported++;
4480 num_vfs = vxge_get_num_vfs(function_mode) - 1;
4482 /* Enable SRIOV mode, if firmware has SRIOV support and if it is a PF */
4483 if (is_sriov(function_mode) && !is_sriov_initialized(pdev) &&
4484 (ll_config->intr_type != INTA)) {
4485 ret = pci_enable_sriov(pdev, num_vfs);
4487 vxge_debug_ll_config(VXGE_ERR,
4488 "Failed in enabling SRIOV mode: %d\n", ret);
4489 /* No need to fail out, as an error here is non-fatal */
4493 * Configure vpaths and get driver configured number of vpaths
4494 * which is less than or equal to the maximum vpaths per function.
4496 no_of_vpath = vxge_config_vpaths(device_config, vpath_mask, ll_config);
4498 vxge_debug_ll_config(VXGE_ERR,
4499 "%s: No more vpaths to configure", VXGE_DRIVER_NAME);
4504 /* Setting driver callbacks */
4505 attr.uld_callbacks = &vxge_callbacks;
4507 status = vxge_hw_device_initialize(&hldev, &attr, device_config);
4508 if (status != VXGE_HW_OK) {
4509 vxge_debug_init(VXGE_ERR,
4510 "Failed to initialize device (%d)", status);
4515 if (VXGE_FW_VER(ll_config->device_hw_info.fw_version.major,
4516 ll_config->device_hw_info.fw_version.minor,
4517 ll_config->device_hw_info.fw_version.build) >=
4518 VXGE_EPROM_FW_VER) {
4519 struct eprom_image img[VXGE_HW_MAX_ROM_IMAGES];
4521 status = vxge_hw_vpath_eprom_img_ver_get(hldev, img);
4522 if (status != VXGE_HW_OK) {
4523 vxge_debug_init(VXGE_ERR, "%s: Reading of EPROM failed",
4525 /* This is a non-fatal error, continue */
4528 for (i = 0; i < VXGE_HW_MAX_ROM_IMAGES; i++) {
4529 hldev->eprom_versions[i] = img[i].version;
4530 if (!img[i].is_valid)
4532 vxge_debug_init(VXGE_TRACE, "%s: EPROM %d, version "
4533 "%d.%d.%d.%d", VXGE_DRIVER_NAME, i,
4534 VXGE_EPROM_IMG_MAJOR(img[i].version),
4535 VXGE_EPROM_IMG_MINOR(img[i].version),
4536 VXGE_EPROM_IMG_FIX(img[i].version),
4537 VXGE_EPROM_IMG_BUILD(img[i].version));
4541 /* if FCS stripping is not disabled in MAC fail driver load */
4542 status = vxge_hw_vpath_strip_fcs_check(hldev, vpath_mask);
4543 if (status != VXGE_HW_OK) {
4544 vxge_debug_init(VXGE_ERR, "%s: FCS stripping is enabled in MAC"
4545 " failing driver load", VXGE_DRIVER_NAME);
4550 /* Always enable HWTS. This will always cause the FCS to be invalid,
4551 * due to the fact that HWTS is using the FCS as the location of the
4552 * timestamp. The HW FCS checking will still correctly determine if
4553 * there is a valid checksum, and the FCS is being removed by the driver
4554 * anyway. So no fucntionality is being lost. Since it is always
4555 * enabled, we now simply use the ioctl call to set whether or not the
4556 * driver should be paying attention to the HWTS.
4558 if (is_privileged == VXGE_HW_OK) {
4559 status = vxge_timestamp_config(hldev);
4560 if (status != VXGE_HW_OK) {
4561 vxge_debug_init(VXGE_ERR, "%s: HWTS enable failed",
4568 vxge_hw_device_debug_set(hldev, VXGE_ERR, VXGE_COMPONENT_LL);
4570 /* set private device info */
4571 pci_set_drvdata(pdev, hldev);
4573 ll_config->fifo_indicate_max_pkts = VXGE_FIFO_INDICATE_MAX_PKTS;
4574 ll_config->addr_learn_en = addr_learn_en;
4575 ll_config->rth_algorithm = RTH_ALG_JENKINS;
4576 ll_config->rth_hash_type_tcpipv4 = 1;
4577 ll_config->rth_hash_type_ipv4 = 0;
4578 ll_config->rth_hash_type_tcpipv6 = 0;
4579 ll_config->rth_hash_type_ipv6 = 0;
4580 ll_config->rth_hash_type_tcpipv6ex = 0;
4581 ll_config->rth_hash_type_ipv6ex = 0;
4582 ll_config->rth_bkt_sz = RTH_BUCKET_SIZE;
4583 ll_config->tx_pause_enable = VXGE_PAUSE_CTRL_ENABLE;
4584 ll_config->rx_pause_enable = VXGE_PAUSE_CTRL_ENABLE;
4586 ret = vxge_device_register(hldev, ll_config, high_dma, no_of_vpath,
4593 ret = vxge_probe_fw_update(vdev);
4597 vxge_hw_device_debug_set(hldev, VXGE_TRACE, VXGE_COMPONENT_LL);
4598 VXGE_COPY_DEBUG_INFO_TO_LL(vdev, vxge_hw_device_error_level_get(hldev),
4599 vxge_hw_device_trace_level_get(hldev));
4601 /* set private HW device info */
4602 vdev->mtu = VXGE_HW_DEFAULT_MTU;
4603 vdev->bar0 = attr.bar0;
4604 vdev->max_vpath_supported = max_vpath_supported;
4605 vdev->no_of_vpath = no_of_vpath;
4607 /* Virtual Path count */
4608 for (i = 0, j = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
4609 if (!vxge_bVALn(vpath_mask, i, 1))
4611 if (j >= vdev->no_of_vpath)
4614 vdev->vpaths[j].is_configured = 1;
4615 vdev->vpaths[j].device_id = i;
4616 vdev->vpaths[j].ring.driver_id = j;
4617 vdev->vpaths[j].vdev = vdev;
4618 vdev->vpaths[j].max_mac_addr_cnt = max_mac_vpath;
4619 memcpy((u8 *)vdev->vpaths[j].macaddr,
4620 ll_config->device_hw_info.mac_addrs[i],
4623 /* Initialize the mac address list header */
4624 INIT_LIST_HEAD(&vdev->vpaths[j].mac_addr_list);
4626 vdev->vpaths[j].mac_addr_cnt = 0;
4627 vdev->vpaths[j].mcast_addr_cnt = 0;
4630 vdev->exec_mode = VXGE_EXEC_MODE_DISABLE;
4631 vdev->max_config_port = max_config_port;
4633 vdev->vlan_tag_strip = vlan_tag_strip;
4635 /* map the hashing selector table to the configured vpaths */
4636 for (i = 0; i < vdev->no_of_vpath; i++)
4637 vdev->vpath_selector[i] = vpath_selector[i];
4639 macaddr = (u8 *)vdev->vpaths[0].macaddr;
4641 ll_config->device_hw_info.serial_number[VXGE_HW_INFO_LEN - 1] = '\0';
4642 ll_config->device_hw_info.product_desc[VXGE_HW_INFO_LEN - 1] = '\0';
4643 ll_config->device_hw_info.part_number[VXGE_HW_INFO_LEN - 1] = '\0';
4645 vxge_debug_init(VXGE_TRACE, "%s: SERIAL NUMBER: %s",
4646 vdev->ndev->name, ll_config->device_hw_info.serial_number);
4648 vxge_debug_init(VXGE_TRACE, "%s: PART NUMBER: %s",
4649 vdev->ndev->name, ll_config->device_hw_info.part_number);
4651 vxge_debug_init(VXGE_TRACE, "%s: Neterion %s Server Adapter",
4652 vdev->ndev->name, ll_config->device_hw_info.product_desc);
4654 vxge_debug_init(VXGE_TRACE, "%s: MAC ADDR: %pM",
4655 vdev->ndev->name, macaddr);
4657 vxge_debug_init(VXGE_TRACE, "%s: Link Width x%d",
4658 vdev->ndev->name, vxge_hw_device_link_width_get(hldev));
4660 vxge_debug_init(VXGE_TRACE,
4661 "%s: Firmware version : %s Date : %s", vdev->ndev->name,
4662 ll_config->device_hw_info.fw_version.version,
4663 ll_config->device_hw_info.fw_date.date);
4666 switch (ll_config->device_hw_info.function_mode) {
4667 case VXGE_HW_FUNCTION_MODE_SINGLE_FUNCTION:
4668 vxge_debug_init(VXGE_TRACE,
4669 "%s: Single Function Mode Enabled", vdev->ndev->name);
4671 case VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION:
4672 vxge_debug_init(VXGE_TRACE,
4673 "%s: Multi Function Mode Enabled", vdev->ndev->name);
4675 case VXGE_HW_FUNCTION_MODE_SRIOV:
4676 vxge_debug_init(VXGE_TRACE,
4677 "%s: Single Root IOV Mode Enabled", vdev->ndev->name);
4679 case VXGE_HW_FUNCTION_MODE_MRIOV:
4680 vxge_debug_init(VXGE_TRACE,
4681 "%s: Multi Root IOV Mode Enabled", vdev->ndev->name);
4686 vxge_print_parm(vdev, vpath_mask);
4688 /* Store the fw version for ethttool option */
4689 strcpy(vdev->fw_version, ll_config->device_hw_info.fw_version.version);
4690 memcpy(vdev->ndev->dev_addr, (u8 *)vdev->vpaths[0].macaddr, ETH_ALEN);
4691 memcpy(vdev->ndev->perm_addr, vdev->ndev->dev_addr, ETH_ALEN);
4693 /* Copy the station mac address to the list */
4694 for (i = 0; i < vdev->no_of_vpath; i++) {
4695 entry = kzalloc(sizeof(struct vxge_mac_addrs), GFP_KERNEL);
4696 if (NULL == entry) {
4697 vxge_debug_init(VXGE_ERR,
4698 "%s: mac_addr_list : memory allocation failed",
4703 macaddr = (u8 *)&entry->macaddr;
4704 memcpy(macaddr, vdev->ndev->dev_addr, ETH_ALEN);
4705 list_add(&entry->item, &vdev->vpaths[i].mac_addr_list);
4706 vdev->vpaths[i].mac_addr_cnt = 1;
4709 kfree(device_config);
4712 * INTA is shared in multi-function mode. This is unlike the INTA
4713 * implementation in MR mode, where each VH has its own INTA message.
4714 * - INTA is masked (disabled) as long as at least one function sets
4715 * its TITAN_MASK_ALL_INT.ALARM bit.
4716 * - INTA is unmasked (enabled) when all enabled functions have cleared
4717 * their own TITAN_MASK_ALL_INT.ALARM bit.
4718 * The TITAN_MASK_ALL_INT ALARM & TRAFFIC bits are cleared on power up.
4719 * Though this driver leaves the top level interrupts unmasked while
4720 * leaving the required module interrupt bits masked on exit, there
4721 * could be a rougue driver around that does not follow this procedure
4722 * resulting in a failure to generate interrupts. The following code is
4723 * present to prevent such a failure.
4726 if (ll_config->device_hw_info.function_mode ==
4727 VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION)
4728 if (vdev->config.intr_type == INTA)
4729 vxge_hw_device_unmask_all(hldev);
4731 vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d Exiting...",
4732 vdev->ndev->name, __func__, __LINE__);
4734 vxge_hw_device_debug_set(hldev, VXGE_ERR, VXGE_COMPONENT_LL);
4735 VXGE_COPY_DEBUG_INFO_TO_LL(vdev, vxge_hw_device_error_level_get(hldev),
4736 vxge_hw_device_trace_level_get(hldev));
4742 for (i = 0; i < vdev->no_of_vpath; i++)
4743 vxge_free_mac_add_list(&vdev->vpaths[i]);
4745 vxge_device_unregister(hldev);
4747 pci_set_drvdata(pdev, NULL);
4748 vxge_hw_device_terminate(hldev);
4749 pci_disable_sriov(pdev);
4753 pci_release_region(pdev, 0);
4755 pci_disable_device(pdev);
4758 kfree(device_config);
4759 driver_config->config_dev_cnt--;
4760 driver_config->total_dev_cnt--;
4765 * vxge_rem_nic - Free the PCI device
4766 * @pdev: structure containing the PCI related information of the device.
4767 * Description: This function is called by the Pci subsystem to release a
4768 * PCI device and free up all resource held up by the device.
4770 static void __devexit vxge_remove(struct pci_dev *pdev)
4772 struct __vxge_hw_device *hldev;
4773 struct vxgedev *vdev;
4776 hldev = pci_get_drvdata(pdev);
4780 vdev = netdev_priv(hldev->ndev);
4782 vxge_debug_entryexit(vdev->level_trace, "%s:%d", __func__, __LINE__);
4783 vxge_debug_init(vdev->level_trace, "%s : removing PCI device...",
4786 for (i = 0; i < vdev->no_of_vpath; i++)
4787 vxge_free_mac_add_list(&vdev->vpaths[i]);
4789 vxge_device_unregister(hldev);
4790 pci_set_drvdata(pdev, NULL);
4791 /* Do not call pci_disable_sriov here, as it will break child devices */
4792 vxge_hw_device_terminate(hldev);
4793 iounmap(vdev->bar0);
4794 pci_release_region(pdev, 0);
4795 pci_disable_device(pdev);
4796 driver_config->config_dev_cnt--;
4797 driver_config->total_dev_cnt--;
4799 vxge_debug_init(vdev->level_trace, "%s:%d Device unregistered",
4800 __func__, __LINE__);
4801 vxge_debug_entryexit(vdev->level_trace, "%s:%d Exiting...", __func__,
4805 static struct pci_error_handlers vxge_err_handler = {
4806 .error_detected = vxge_io_error_detected,
4807 .slot_reset = vxge_io_slot_reset,
4808 .resume = vxge_io_resume,
4811 static struct pci_driver vxge_driver = {
4812 .name = VXGE_DRIVER_NAME,
4813 .id_table = vxge_id_table,
4814 .probe = vxge_probe,
4815 .remove = __devexit_p(vxge_remove),
4817 .suspend = vxge_pm_suspend,
4818 .resume = vxge_pm_resume,
4820 .err_handler = &vxge_err_handler,
4828 pr_info("Copyright(c) 2002-2010 Exar Corp.\n");
4829 pr_info("Driver version: %s\n", DRV_VERSION);
4833 driver_config = kzalloc(sizeof(struct vxge_drv_config), GFP_KERNEL);
4837 ret = pci_register_driver(&vxge_driver);
4839 kfree(driver_config);
4843 if (driver_config->config_dev_cnt &&
4844 (driver_config->config_dev_cnt != driver_config->total_dev_cnt))
4845 vxge_debug_init(VXGE_ERR,
4846 "%s: Configured %d of %d devices",
4847 VXGE_DRIVER_NAME, driver_config->config_dev_cnt,
4848 driver_config->total_dev_cnt);
4856 pci_unregister_driver(&vxge_driver);
4857 kfree(driver_config);
4859 module_init(vxge_starter);
4860 module_exit(vxge_closer);