net: remove interrupt.h inclusion from netdevice.h
[linux-2.6-block.git] / drivers / net / vxge / vxge-main.c
CommitLineData
703da5a1
RV
1/******************************************************************************
2* This software may be used and distributed according to the terms of
3* the GNU General Public License (GPL), incorporated herein by reference.
4* Drivers based on or derived from this code fall under the GPL and must
5* retain the authorship, copyright and license notice. This file is not
6* a complete program and may only be used when the entire operating
7* system is licensed under the GPL.
8* See the file COPYING in this distribution for more information.
9*
926bd900 10* vxge-main.c: Driver for Exar Corp's X3100 Series 10GbE PCIe I/O
703da5a1 11* Virtualized Server Adapter.
926bd900 12* Copyright(c) 2002-2010 Exar Corp.
703da5a1
RV
13*
14* The module loadable parameters that are supported by the driver and a brief
15* explanation of all the variables:
16* vlan_tag_strip:
17* Strip VLAN Tag enable/disable. Instructs the device to remove
18* the VLAN tag from all received tagged frames that are not
19* replicated at the internal L2 switch.
20* 0 - Do not strip the VLAN tag.
21* 1 - Strip the VLAN tag.
22*
23* addr_learn_en:
24* Enable learning the mac address of the guest OS interface in
25* a virtualization environment.
26* 0 - DISABLE
27* 1 - ENABLE
28*
29* max_config_port:
30* Maximum number of port to be supported.
31* MIN -1 and MAX - 2
32*
33* max_config_vpath:
34* This configures the maximum no of VPATH configures for each
35* device function.
36* MIN - 1 and MAX - 17
37*
38* max_config_dev:
39* This configures maximum no of Device function to be enabled.
40* MIN - 1 and MAX - 17
41*
42******************************************************************************/
43
75f5e1c6
JP
44#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
45
703da5a1 46#include <linux/if_vlan.h>
a6b7a407 47#include <linux/interrupt.h>
703da5a1 48#include <linux/pci.h>
5a0e3ad6 49#include <linux/slab.h>
2b05e002 50#include <linux/tcp.h>
703da5a1
RV
51#include <net/ip.h>
52#include <linux/netdevice.h>
53#include <linux/etherdevice.h>
e8ac1756 54#include <linux/firmware.h>
b81b3733 55#include <linux/net_tstamp.h>
70c71606 56#include <linux/prefetch.h>
703da5a1
RV
57#include "vxge-main.h"
58#include "vxge-reg.h"
59
60MODULE_LICENSE("Dual BSD/GPL");
61MODULE_DESCRIPTION("Neterion's X3100 Series 10GbE PCIe I/O"
62 "Virtualized Server Adapter");
63
a3aa1884 64static DEFINE_PCI_DEVICE_TABLE(vxge_id_table) = {
703da5a1
RV
65 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_TITAN_WIN, PCI_ANY_ID,
66 PCI_ANY_ID},
67 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_TITAN_UNI, PCI_ANY_ID,
68 PCI_ANY_ID},
69 {0}
70};
71
72MODULE_DEVICE_TABLE(pci, vxge_id_table);
73
74VXGE_MODULE_PARAM_INT(vlan_tag_strip, VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_ENABLE);
75VXGE_MODULE_PARAM_INT(addr_learn_en, VXGE_HW_MAC_ADDR_LEARN_DEFAULT);
76VXGE_MODULE_PARAM_INT(max_config_port, VXGE_MAX_CONFIG_PORT);
77VXGE_MODULE_PARAM_INT(max_config_vpath, VXGE_USE_DEFAULT);
78VXGE_MODULE_PARAM_INT(max_mac_vpath, VXGE_MAX_MAC_ADDR_COUNT);
79VXGE_MODULE_PARAM_INT(max_config_dev, VXGE_MAX_CONFIG_DEV);
80
81static u16 vpath_selector[VXGE_HW_MAX_VIRTUAL_PATHS] =
82 {0, 1, 3, 3, 7, 7, 7, 7, 15, 15, 15, 15, 15, 15, 15, 15, 31};
83static unsigned int bw_percentage[VXGE_HW_MAX_VIRTUAL_PATHS] =
84 {[0 ...(VXGE_HW_MAX_VIRTUAL_PATHS - 1)] = 0xFF};
85module_param_array(bw_percentage, uint, NULL, 0);
86
87static struct vxge_drv_config *driver_config;
88
89static inline int is_vxge_card_up(struct vxgedev *vdev)
90{
91 return test_bit(__VXGE_STATE_CARD_UP, &vdev->state);
92}
93
94static inline void VXGE_COMPLETE_VPATH_TX(struct vxge_fifo *fifo)
95{
ff67df55
BL
96 struct sk_buff **skb_ptr = NULL;
97 struct sk_buff **temp;
98#define NR_SKB_COMPLETED 128
99 struct sk_buff *completed[NR_SKB_COMPLETED];
100 int more;
703da5a1 101
ff67df55
BL
102 do {
103 more = 0;
104 skb_ptr = completed;
105
98f45da2 106 if (__netif_tx_trylock(fifo->txq)) {
ff67df55
BL
107 vxge_hw_vpath_poll_tx(fifo->handle, &skb_ptr,
108 NR_SKB_COMPLETED, &more);
98f45da2 109 __netif_tx_unlock(fifo->txq);
ff67df55 110 }
98f45da2 111
ff67df55
BL
112 /* free SKBs */
113 for (temp = completed; temp != skb_ptr; temp++)
114 dev_kfree_skb_irq(*temp);
98f45da2 115 } while (more);
703da5a1
RV
116}
117
118static inline void VXGE_COMPLETE_ALL_TX(struct vxgedev *vdev)
119{
120 int i;
121
122 /* Complete all transmits */
123 for (i = 0; i < vdev->no_of_vpath; i++)
124 VXGE_COMPLETE_VPATH_TX(&vdev->vpaths[i].fifo);
125}
126
127static inline void VXGE_COMPLETE_ALL_RX(struct vxgedev *vdev)
128{
129 int i;
130 struct vxge_ring *ring;
131
132 /* Complete all receives*/
133 for (i = 0; i < vdev->no_of_vpath; i++) {
134 ring = &vdev->vpaths[i].ring;
135 vxge_hw_vpath_poll_rx(ring->handle);
136 }
137}
138
703da5a1
RV
139/*
140 * vxge_callback_link_up
141 *
142 * This function is called during interrupt context to notify link up state
143 * change.
144 */
528f7272 145static void vxge_callback_link_up(struct __vxge_hw_device *hldev)
703da5a1
RV
146{
147 struct net_device *dev = hldev->ndev;
5f54cebb 148 struct vxgedev *vdev = netdev_priv(dev);
703da5a1
RV
149
150 vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d",
151 vdev->ndev->name, __func__, __LINE__);
75f5e1c6 152 netdev_notice(vdev->ndev, "Link Up\n");
703da5a1
RV
153 vdev->stats.link_up++;
154
155 netif_carrier_on(vdev->ndev);
d03848e0 156 netif_tx_wake_all_queues(vdev->ndev);
703da5a1
RV
157
158 vxge_debug_entryexit(VXGE_TRACE,
159 "%s: %s:%d Exiting...", vdev->ndev->name, __func__, __LINE__);
160}
161
162/*
163 * vxge_callback_link_down
164 *
165 * This function is called during interrupt context to notify link down state
166 * change.
167 */
528f7272 168static void vxge_callback_link_down(struct __vxge_hw_device *hldev)
703da5a1
RV
169{
170 struct net_device *dev = hldev->ndev;
5f54cebb 171 struct vxgedev *vdev = netdev_priv(dev);
703da5a1
RV
172
173 vxge_debug_entryexit(VXGE_TRACE,
174 "%s: %s:%d", vdev->ndev->name, __func__, __LINE__);
75f5e1c6 175 netdev_notice(vdev->ndev, "Link Down\n");
703da5a1
RV
176
177 vdev->stats.link_down++;
178 netif_carrier_off(vdev->ndev);
d03848e0 179 netif_tx_stop_all_queues(vdev->ndev);
703da5a1
RV
180
181 vxge_debug_entryexit(VXGE_TRACE,
182 "%s: %s:%d Exiting...", vdev->ndev->name, __func__, __LINE__);
183}
184
185/*
186 * vxge_rx_alloc
187 *
188 * Allocate SKB.
189 */
528f7272 190static struct sk_buff *
703da5a1
RV
191vxge_rx_alloc(void *dtrh, struct vxge_ring *ring, const int skb_size)
192{
193 struct net_device *dev;
194 struct sk_buff *skb;
195 struct vxge_rx_priv *rx_priv;
196
197 dev = ring->ndev;
198 vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d",
199 ring->ndev->name, __func__, __LINE__);
200
201 rx_priv = vxge_hw_ring_rxd_private_get(dtrh);
202
203 /* try to allocate skb first. this one may fail */
204 skb = netdev_alloc_skb(dev, skb_size +
205 VXGE_HW_HEADER_ETHERNET_II_802_3_ALIGN);
206 if (skb == NULL) {
207 vxge_debug_mem(VXGE_ERR,
208 "%s: out of memory to allocate SKB", dev->name);
209 ring->stats.skb_alloc_fail++;
210 return NULL;
211 }
212
213 vxge_debug_mem(VXGE_TRACE,
214 "%s: %s:%d Skb : 0x%p", ring->ndev->name,
215 __func__, __LINE__, skb);
216
217 skb_reserve(skb, VXGE_HW_HEADER_ETHERNET_II_802_3_ALIGN);
218
219 rx_priv->skb = skb;
ea11bbe0 220 rx_priv->skb_data = NULL;
703da5a1
RV
221 rx_priv->data_size = skb_size;
222 vxge_debug_entryexit(VXGE_TRACE,
223 "%s: %s:%d Exiting...", ring->ndev->name, __func__, __LINE__);
224
225 return skb;
226}
227
228/*
229 * vxge_rx_map
230 */
231static int vxge_rx_map(void *dtrh, struct vxge_ring *ring)
232{
233 struct vxge_rx_priv *rx_priv;
234 dma_addr_t dma_addr;
235
236 vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d",
237 ring->ndev->name, __func__, __LINE__);
238 rx_priv = vxge_hw_ring_rxd_private_get(dtrh);
239
ea11bbe0
BL
240 rx_priv->skb_data = rx_priv->skb->data;
241 dma_addr = pci_map_single(ring->pdev, rx_priv->skb_data,
703da5a1
RV
242 rx_priv->data_size, PCI_DMA_FROMDEVICE);
243
fa15e99b 244 if (unlikely(pci_dma_mapping_error(ring->pdev, dma_addr))) {
703da5a1
RV
245 ring->stats.pci_map_fail++;
246 return -EIO;
247 }
248 vxge_debug_mem(VXGE_TRACE,
249 "%s: %s:%d 1 buffer mode dma_addr = 0x%llx",
250 ring->ndev->name, __func__, __LINE__,
251 (unsigned long long)dma_addr);
252 vxge_hw_ring_rxd_1b_set(dtrh, dma_addr, rx_priv->data_size);
253
254 rx_priv->data_dma = dma_addr;
255 vxge_debug_entryexit(VXGE_TRACE,
256 "%s: %s:%d Exiting...", ring->ndev->name, __func__, __LINE__);
257
258 return 0;
259}
260
261/*
262 * vxge_rx_initial_replenish
263 * Allocation of RxD as an initial replenish procedure.
264 */
265static enum vxge_hw_status
266vxge_rx_initial_replenish(void *dtrh, void *userdata)
267{
268 struct vxge_ring *ring = (struct vxge_ring *)userdata;
269 struct vxge_rx_priv *rx_priv;
270
271 vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d",
272 ring->ndev->name, __func__, __LINE__);
273 if (vxge_rx_alloc(dtrh, ring,
274 VXGE_LL_MAX_FRAME_SIZE(ring->ndev)) == NULL)
275 return VXGE_HW_FAIL;
276
277 if (vxge_rx_map(dtrh, ring)) {
278 rx_priv = vxge_hw_ring_rxd_private_get(dtrh);
279 dev_kfree_skb(rx_priv->skb);
280
281 return VXGE_HW_FAIL;
282 }
283 vxge_debug_entryexit(VXGE_TRACE,
284 "%s: %s:%d Exiting...", ring->ndev->name, __func__, __LINE__);
285
286 return VXGE_HW_OK;
287}
288
289static inline void
290vxge_rx_complete(struct vxge_ring *ring, struct sk_buff *skb, u16 vlan,
291 int pkt_length, struct vxge_hw_ring_rxd_info *ext_info)
292{
293
294 vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d",
295 ring->ndev->name, __func__, __LINE__);
296 skb_record_rx_queue(skb, ring->driver_id);
297 skb->protocol = eth_type_trans(skb, ring->ndev);
298
299 ring->stats.rx_frms++;
300 ring->stats.rx_bytes += pkt_length;
301
302 if (skb->pkt_type == PACKET_MULTICAST)
303 ring->stats.rx_mcast++;
304
305 vxge_debug_rx(VXGE_TRACE,
306 "%s: %s:%d skb protocol = %d",
307 ring->ndev->name, __func__, __LINE__, skb->protocol);
308
feb990d4
MM
309 if (ring->vlgrp && ext_info->vlan &&
310 (ring->vlan_tag_strip ==
311 VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_ENABLE))
312 vlan_gro_receive(ring->napi_p, ring->vlgrp,
313 ext_info->vlan, skb);
314 else
315 napi_gro_receive(ring->napi_p, skb);
316
703da5a1
RV
317 vxge_debug_entryexit(VXGE_TRACE,
318 "%s: %s:%d Exiting...", ring->ndev->name, __func__, __LINE__);
319}
320
321static inline void vxge_re_pre_post(void *dtr, struct vxge_ring *ring,
322 struct vxge_rx_priv *rx_priv)
323{
324 pci_dma_sync_single_for_device(ring->pdev,
325 rx_priv->data_dma, rx_priv->data_size, PCI_DMA_FROMDEVICE);
326
327 vxge_hw_ring_rxd_1b_set(dtr, rx_priv->data_dma, rx_priv->data_size);
328 vxge_hw_ring_rxd_pre_post(ring->handle, dtr);
329}
330
331static inline void vxge_post(int *dtr_cnt, void **first_dtr,
332 void *post_dtr, struct __vxge_hw_ring *ringh)
333{
334 int dtr_count = *dtr_cnt;
335 if ((*dtr_cnt % VXGE_HW_RXSYNC_FREQ_CNT) == 0) {
336 if (*first_dtr)
337 vxge_hw_ring_rxd_post_post_wmb(ringh, *first_dtr);
338 *first_dtr = post_dtr;
339 } else
340 vxge_hw_ring_rxd_post_post(ringh, post_dtr);
341 dtr_count++;
342 *dtr_cnt = dtr_count;
343}
344
345/*
346 * vxge_rx_1b_compl
347 *
348 * If the interrupt is because of a received frame or if the receive ring
349 * contains fresh as yet un-processed frames, this function is called.
350 */
42821a5b 351static enum vxge_hw_status
703da5a1
RV
352vxge_rx_1b_compl(struct __vxge_hw_ring *ringh, void *dtr,
353 u8 t_code, void *userdata)
354{
355 struct vxge_ring *ring = (struct vxge_ring *)userdata;
b81b3733 356 struct net_device *dev = ring->ndev;
703da5a1
RV
357 unsigned int dma_sizes;
358 void *first_dtr = NULL;
359 int dtr_cnt = 0;
360 int data_size;
361 dma_addr_t data_dma;
362 int pkt_length;
363 struct sk_buff *skb;
364 struct vxge_rx_priv *rx_priv;
365 struct vxge_hw_ring_rxd_info ext_info;
366 vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d",
367 ring->ndev->name, __func__, __LINE__);
703da5a1
RV
368
369 do {
3f23e436 370 prefetch((char *)dtr + L1_CACHE_BYTES);
703da5a1
RV
371 rx_priv = vxge_hw_ring_rxd_private_get(dtr);
372 skb = rx_priv->skb;
373 data_size = rx_priv->data_size;
374 data_dma = rx_priv->data_dma;
ea11bbe0 375 prefetch(rx_priv->skb_data);
703da5a1
RV
376
377 vxge_debug_rx(VXGE_TRACE,
378 "%s: %s:%d skb = 0x%p",
379 ring->ndev->name, __func__, __LINE__, skb);
380
381 vxge_hw_ring_rxd_1b_get(ringh, dtr, &dma_sizes);
382 pkt_length = dma_sizes;
383
22fa125e
SH
384 pkt_length -= ETH_FCS_LEN;
385
703da5a1
RV
386 vxge_debug_rx(VXGE_TRACE,
387 "%s: %s:%d Packet Length = %d",
388 ring->ndev->name, __func__, __LINE__, pkt_length);
389
390 vxge_hw_ring_rxd_1b_info_get(ringh, dtr, &ext_info);
391
392 /* check skb validity */
393 vxge_assert(skb);
394
395 prefetch((char *)skb + L1_CACHE_BYTES);
396 if (unlikely(t_code)) {
703da5a1
RV
397 if (vxge_hw_ring_handle_tcode(ringh, dtr, t_code) !=
398 VXGE_HW_OK) {
399
400 ring->stats.rx_errors++;
401 vxge_debug_rx(VXGE_TRACE,
402 "%s: %s :%d Rx T_code is %d",
403 ring->ndev->name, __func__,
404 __LINE__, t_code);
405
406 /* If the t_code is not supported and if the
407 * t_code is other than 0x5 (unparseable packet
408 * such as unknown UPV6 header), Drop it !!!
409 */
410 vxge_re_pre_post(dtr, ring, rx_priv);
411
412 vxge_post(&dtr_cnt, &first_dtr, dtr, ringh);
413 ring->stats.rx_dropped++;
414 continue;
415 }
416 }
417
418 if (pkt_length > VXGE_LL_RX_COPY_THRESHOLD) {
703da5a1 419 if (vxge_rx_alloc(dtr, ring, data_size) != NULL) {
703da5a1
RV
420 if (!vxge_rx_map(dtr, ring)) {
421 skb_put(skb, pkt_length);
422
423 pci_unmap_single(ring->pdev, data_dma,
424 data_size, PCI_DMA_FROMDEVICE);
425
426 vxge_hw_ring_rxd_pre_post(ringh, dtr);
427 vxge_post(&dtr_cnt, &first_dtr, dtr,
428 ringh);
429 } else {
430 dev_kfree_skb(rx_priv->skb);
431 rx_priv->skb = skb;
432 rx_priv->data_size = data_size;
433 vxge_re_pre_post(dtr, ring, rx_priv);
434
435 vxge_post(&dtr_cnt, &first_dtr, dtr,
436 ringh);
437 ring->stats.rx_dropped++;
438 break;
439 }
440 } else {
441 vxge_re_pre_post(dtr, ring, rx_priv);
442
443 vxge_post(&dtr_cnt, &first_dtr, dtr, ringh);
444 ring->stats.rx_dropped++;
445 break;
446 }
447 } else {
448 struct sk_buff *skb_up;
449
450 skb_up = netdev_alloc_skb(dev, pkt_length +
451 VXGE_HW_HEADER_ETHERNET_II_802_3_ALIGN);
452 if (skb_up != NULL) {
453 skb_reserve(skb_up,
454 VXGE_HW_HEADER_ETHERNET_II_802_3_ALIGN);
455
456 pci_dma_sync_single_for_cpu(ring->pdev,
457 data_dma, data_size,
458 PCI_DMA_FROMDEVICE);
459
460 vxge_debug_mem(VXGE_TRACE,
461 "%s: %s:%d skb_up = %p",
462 ring->ndev->name, __func__,
463 __LINE__, skb);
464 memcpy(skb_up->data, skb->data, pkt_length);
465
466 vxge_re_pre_post(dtr, ring, rx_priv);
467
468 vxge_post(&dtr_cnt, &first_dtr, dtr,
469 ringh);
470 /* will netif_rx small SKB instead */
471 skb = skb_up;
472 skb_put(skb, pkt_length);
473 } else {
474 vxge_re_pre_post(dtr, ring, rx_priv);
475
476 vxge_post(&dtr_cnt, &first_dtr, dtr, ringh);
477 vxge_debug_rx(VXGE_ERR,
478 "%s: vxge_rx_1b_compl: out of "
479 "memory", dev->name);
480 ring->stats.skb_alloc_fail++;
481 break;
482 }
483 }
484
485 if ((ext_info.proto & VXGE_HW_FRAME_PROTO_TCP_OR_UDP) &&
486 !(ext_info.proto & VXGE_HW_FRAME_PROTO_IP_FRAG) &&
feb990d4 487 (dev->features & NETIF_F_RXCSUM) && /* Offload Rx side CSUM */
703da5a1
RV
488 ext_info.l3_cksum == VXGE_HW_L3_CKSUM_OK &&
489 ext_info.l4_cksum == VXGE_HW_L4_CKSUM_OK)
490 skb->ip_summed = CHECKSUM_UNNECESSARY;
491 else
bc8acf2c 492 skb_checksum_none_assert(skb);
703da5a1 493
b81b3733
JM
494
495 if (ring->rx_hwts) {
496 struct skb_shared_hwtstamps *skb_hwts;
497 u32 ns = *(u32 *)(skb->head + pkt_length);
498
499 skb_hwts = skb_hwtstamps(skb);
500 skb_hwts->hwtstamp = ns_to_ktime(ns);
501 skb_hwts->syststamp.tv64 = 0;
502 }
503
47f01db4
JM
504 /* rth_hash_type and rth_it_hit are non-zero regardless of
505 * whether rss is enabled. Only the rth_value is zero/non-zero
506 * if rss is disabled/enabled, so key off of that.
507 */
508 if (ext_info.rth_value)
509 skb->rxhash = ext_info.rth_value;
510
703da5a1
RV
511 vxge_rx_complete(ring, skb, ext_info.vlan,
512 pkt_length, &ext_info);
513
514 ring->budget--;
515 ring->pkts_processed++;
516 if (!ring->budget)
517 break;
518
519 } while (vxge_hw_ring_rxd_next_completed(ringh, &dtr,
520 &t_code) == VXGE_HW_OK);
521
522 if (first_dtr)
523 vxge_hw_ring_rxd_post_post_wmb(ringh, first_dtr);
524
703da5a1
RV
525 vxge_debug_entryexit(VXGE_TRACE,
526 "%s:%d Exiting...",
527 __func__, __LINE__);
528 return VXGE_HW_OK;
529}
530
531/*
532 * vxge_xmit_compl
533 *
534 * If an interrupt was raised to indicate DMA complete of the Tx packet,
535 * this function is called. It identifies the last TxD whose buffer was
536 * freed and frees all skbs whose data have already DMA'ed into the NICs
537 * internal memory.
538 */
42821a5b 539static enum vxge_hw_status
703da5a1
RV
540vxge_xmit_compl(struct __vxge_hw_fifo *fifo_hw, void *dtr,
541 enum vxge_hw_fifo_tcode t_code, void *userdata,
ff67df55 542 struct sk_buff ***skb_ptr, int nr_skb, int *more)
703da5a1
RV
543{
544 struct vxge_fifo *fifo = (struct vxge_fifo *)userdata;
ff67df55 545 struct sk_buff *skb, **done_skb = *skb_ptr;
703da5a1
RV
546 int pkt_cnt = 0;
547
548 vxge_debug_entryexit(VXGE_TRACE,
549 "%s:%d Entered....", __func__, __LINE__);
550
551 do {
552 int frg_cnt;
553 skb_frag_t *frag;
554 int i = 0, j;
555 struct vxge_tx_priv *txd_priv =
556 vxge_hw_fifo_txdl_private_get(dtr);
557
558 skb = txd_priv->skb;
559 frg_cnt = skb_shinfo(skb)->nr_frags;
560 frag = &skb_shinfo(skb)->frags[0];
561
562 vxge_debug_tx(VXGE_TRACE,
563 "%s: %s:%d fifo_hw = %p dtr = %p "
564 "tcode = 0x%x", fifo->ndev->name, __func__,
565 __LINE__, fifo_hw, dtr, t_code);
566 /* check skb validity */
567 vxge_assert(skb);
568 vxge_debug_tx(VXGE_TRACE,
569 "%s: %s:%d skb = %p itxd_priv = %p frg_cnt = %d",
570 fifo->ndev->name, __func__, __LINE__,
571 skb, txd_priv, frg_cnt);
572 if (unlikely(t_code)) {
573 fifo->stats.tx_errors++;
574 vxge_debug_tx(VXGE_ERR,
575 "%s: tx: dtr %p completed due to "
576 "error t_code %01x", fifo->ndev->name,
577 dtr, t_code);
578 vxge_hw_fifo_handle_tcode(fifo_hw, dtr, t_code);
579 }
580
581 /* for unfragmented skb */
582 pci_unmap_single(fifo->pdev, txd_priv->dma_buffers[i++],
583 skb_headlen(skb), PCI_DMA_TODEVICE);
584
585 for (j = 0; j < frg_cnt; j++) {
586 pci_unmap_page(fifo->pdev,
587 txd_priv->dma_buffers[i++],
588 frag->size, PCI_DMA_TODEVICE);
589 frag += 1;
590 }
591
592 vxge_hw_fifo_txdl_free(fifo_hw, dtr);
593
594 /* Updating the statistics block */
595 fifo->stats.tx_frms++;
596 fifo->stats.tx_bytes += skb->len;
597
ff67df55
BL
598 *done_skb++ = skb;
599
600 if (--nr_skb <= 0) {
601 *more = 1;
602 break;
603 }
703da5a1
RV
604
605 pkt_cnt++;
606 if (pkt_cnt > fifo->indicate_max_pkts)
607 break;
608
609 } while (vxge_hw_fifo_txdl_next_completed(fifo_hw,
610 &dtr, &t_code) == VXGE_HW_OK);
611
ff67df55 612 *skb_ptr = done_skb;
98f45da2
JM
613 if (netif_tx_queue_stopped(fifo->txq))
614 netif_tx_wake_queue(fifo->txq);
703da5a1 615
703da5a1
RV
616 vxge_debug_entryexit(VXGE_TRACE,
617 "%s: %s:%d Exiting...",
618 fifo->ndev->name, __func__, __LINE__);
619 return VXGE_HW_OK;
620}
621
28679751 622/* select a vpath to transmit the packet */
98f45da2 623static u32 vxge_get_vpath_no(struct vxgedev *vdev, struct sk_buff *skb)
703da5a1
RV
624{
625 u16 queue_len, counter = 0;
626 if (skb->protocol == htons(ETH_P_IP)) {
627 struct iphdr *ip;
628 struct tcphdr *th;
629
630 ip = ip_hdr(skb);
631
632 if ((ip->frag_off & htons(IP_OFFSET|IP_MF)) == 0) {
633 th = (struct tcphdr *)(((unsigned char *)ip) +
634 ip->ihl*4);
635
636 queue_len = vdev->no_of_vpath;
637 counter = (ntohs(th->source) +
638 ntohs(th->dest)) &
639 vdev->vpath_selector[queue_len - 1];
640 if (counter >= queue_len)
641 counter = queue_len - 1;
703da5a1
RV
642 }
643 }
644 return counter;
645}
646
647static enum vxge_hw_status vxge_search_mac_addr_in_list(
648 struct vxge_vpath *vpath, u64 del_mac)
649{
650 struct list_head *entry, *next;
651 list_for_each_safe(entry, next, &vpath->mac_addr_list) {
652 if (((struct vxge_mac_addrs *)entry)->macaddr == del_mac)
653 return TRUE;
654 }
655 return FALSE;
656}
657
528f7272
JM
658static int vxge_mac_list_add(struct vxge_vpath *vpath, struct macInfo *mac)
659{
660 struct vxge_mac_addrs *new_mac_entry;
661 u8 *mac_address = NULL;
662
663 if (vpath->mac_addr_cnt >= VXGE_MAX_LEARN_MAC_ADDR_CNT)
664 return TRUE;
665
666 new_mac_entry = kzalloc(sizeof(struct vxge_mac_addrs), GFP_ATOMIC);
667 if (!new_mac_entry) {
668 vxge_debug_mem(VXGE_ERR,
669 "%s: memory allocation failed",
670 VXGE_DRIVER_NAME);
671 return FALSE;
672 }
673
674 list_add(&new_mac_entry->item, &vpath->mac_addr_list);
675
676 /* Copy the new mac address to the list */
677 mac_address = (u8 *)&new_mac_entry->macaddr;
678 memcpy(mac_address, mac->macaddr, ETH_ALEN);
679
680 new_mac_entry->state = mac->state;
681 vpath->mac_addr_cnt++;
682
683 /* Is this a multicast address */
684 if (0x01 & mac->macaddr[0])
685 vpath->mcast_addr_cnt++;
686
687 return TRUE;
688}
689
690/* Add a mac address to DA table */
691static enum vxge_hw_status
692vxge_add_mac_addr(struct vxgedev *vdev, struct macInfo *mac)
693{
694 enum vxge_hw_status status = VXGE_HW_OK;
695 struct vxge_vpath *vpath;
696 enum vxge_hw_vpath_mac_addr_add_mode duplicate_mode;
697
698 if (0x01 & mac->macaddr[0]) /* multicast address */
699 duplicate_mode = VXGE_HW_VPATH_MAC_ADDR_ADD_DUPLICATE;
700 else
701 duplicate_mode = VXGE_HW_VPATH_MAC_ADDR_REPLACE_DUPLICATE;
702
703 vpath = &vdev->vpaths[mac->vpath_no];
704 status = vxge_hw_vpath_mac_addr_add(vpath->handle, mac->macaddr,
705 mac->macmask, duplicate_mode);
706 if (status != VXGE_HW_OK) {
707 vxge_debug_init(VXGE_ERR,
708 "DA config add entry failed for vpath:%d",
709 vpath->device_id);
710 } else
711 if (FALSE == vxge_mac_list_add(vpath, mac))
712 status = -EPERM;
713
714 return status;
715}
716
703da5a1
RV
717static int vxge_learn_mac(struct vxgedev *vdev, u8 *mac_header)
718{
719 struct macInfo mac_info;
720 u8 *mac_address = NULL;
721 u64 mac_addr = 0, vpath_vector = 0;
722 int vpath_idx = 0;
723 enum vxge_hw_status status = VXGE_HW_OK;
724 struct vxge_vpath *vpath = NULL;
725 struct __vxge_hw_device *hldev;
726
d8ee7071 727 hldev = pci_get_drvdata(vdev->pdev);
703da5a1
RV
728
729 mac_address = (u8 *)&mac_addr;
730 memcpy(mac_address, mac_header, ETH_ALEN);
731
732 /* Is this mac address already in the list? */
733 for (vpath_idx = 0; vpath_idx < vdev->no_of_vpath; vpath_idx++) {
734 vpath = &vdev->vpaths[vpath_idx];
735 if (vxge_search_mac_addr_in_list(vpath, mac_addr))
736 return vpath_idx;
737 }
738
739 memset(&mac_info, 0, sizeof(struct macInfo));
740 memcpy(mac_info.macaddr, mac_header, ETH_ALEN);
741
742 /* Any vpath has room to add mac address to its da table? */
743 for (vpath_idx = 0; vpath_idx < vdev->no_of_vpath; vpath_idx++) {
744 vpath = &vdev->vpaths[vpath_idx];
745 if (vpath->mac_addr_cnt < vpath->max_mac_addr_cnt) {
746 /* Add this mac address to this vpath */
747 mac_info.vpath_no = vpath_idx;
748 mac_info.state = VXGE_LL_MAC_ADDR_IN_DA_TABLE;
749 status = vxge_add_mac_addr(vdev, &mac_info);
750 if (status != VXGE_HW_OK)
751 return -EPERM;
752 return vpath_idx;
753 }
754 }
755
756 mac_info.state = VXGE_LL_MAC_ADDR_IN_LIST;
757 vpath_idx = 0;
758 mac_info.vpath_no = vpath_idx;
759 /* Is the first vpath already selected as catch-basin ? */
760 vpath = &vdev->vpaths[vpath_idx];
761 if (vpath->mac_addr_cnt > vpath->max_mac_addr_cnt) {
762 /* Add this mac address to this vpath */
763 if (FALSE == vxge_mac_list_add(vpath, &mac_info))
764 return -EPERM;
765 return vpath_idx;
766 }
767
768 /* Select first vpath as catch-basin */
769 vpath_vector = vxge_mBIT(vpath->device_id);
770 status = vxge_hw_mgmt_reg_write(vpath->vdev->devh,
771 vxge_hw_mgmt_reg_type_mrpcim,
772 0,
773 (ulong)offsetof(
774 struct vxge_hw_mrpcim_reg,
775 rts_mgr_cbasin_cfg),
776 vpath_vector);
777 if (status != VXGE_HW_OK) {
778 vxge_debug_tx(VXGE_ERR,
779 "%s: Unable to set the vpath-%d in catch-basin mode",
780 VXGE_DRIVER_NAME, vpath->device_id);
781 return -EPERM;
782 }
783
784 if (FALSE == vxge_mac_list_add(vpath, &mac_info))
785 return -EPERM;
786
787 return vpath_idx;
788}
789
790/**
791 * vxge_xmit
792 * @skb : the socket buffer containing the Tx data.
793 * @dev : device pointer.
794 *
795 * This function is the Tx entry point of the driver. Neterion NIC supports
796 * certain protocol assist features on Tx side, namely CSO, S/G, LSO.
703da5a1 797*/
61357325 798static netdev_tx_t
703da5a1
RV
799vxge_xmit(struct sk_buff *skb, struct net_device *dev)
800{
801 struct vxge_fifo *fifo = NULL;
802 void *dtr_priv;
803 void *dtr = NULL;
804 struct vxgedev *vdev = NULL;
805 enum vxge_hw_status status;
806 int frg_cnt, first_frg_len;
807 skb_frag_t *frag;
808 int i = 0, j = 0, avail;
809 u64 dma_pointer;
810 struct vxge_tx_priv *txdl_priv = NULL;
811 struct __vxge_hw_fifo *fifo_hw;
703da5a1 812 int offload_type;
703da5a1 813 int vpath_no = 0;
703da5a1
RV
814
815 vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d",
816 dev->name, __func__, __LINE__);
817
818 /* A buffer with no data will be dropped */
819 if (unlikely(skb->len <= 0)) {
820 vxge_debug_tx(VXGE_ERR,
821 "%s: Buffer has no data..", dev->name);
822 dev_kfree_skb(skb);
823 return NETDEV_TX_OK;
824 }
825
5f54cebb 826 vdev = netdev_priv(dev);
703da5a1
RV
827
828 if (unlikely(!is_vxge_card_up(vdev))) {
829 vxge_debug_tx(VXGE_ERR,
830 "%s: vdev not initialized", dev->name);
831 dev_kfree_skb(skb);
832 return NETDEV_TX_OK;
833 }
834
835 if (vdev->config.addr_learn_en) {
836 vpath_no = vxge_learn_mac(vdev, skb->data + ETH_ALEN);
837 if (vpath_no == -EPERM) {
838 vxge_debug_tx(VXGE_ERR,
839 "%s: Failed to store the mac address",
840 dev->name);
841 dev_kfree_skb(skb);
842 return NETDEV_TX_OK;
843 }
844 }
845
846 if (vdev->config.tx_steering_type == TX_MULTIQ_STEERING)
847 vpath_no = skb_get_queue_mapping(skb);
848 else if (vdev->config.tx_steering_type == TX_PORT_STEERING)
98f45da2 849 vpath_no = vxge_get_vpath_no(vdev, skb);
703da5a1
RV
850
851 vxge_debug_tx(VXGE_TRACE, "%s: vpath_no= %d", dev->name, vpath_no);
852
853 if (vpath_no >= vdev->no_of_vpath)
854 vpath_no = 0;
855
856 fifo = &vdev->vpaths[vpath_no].fifo;
857 fifo_hw = fifo->handle;
858
98f45da2 859 if (netif_tx_queue_stopped(fifo->txq))
d03848e0 860 return NETDEV_TX_BUSY;
d03848e0 861
703da5a1
RV
862 avail = vxge_hw_fifo_free_txdl_count_get(fifo_hw);
863 if (avail == 0) {
864 vxge_debug_tx(VXGE_ERR,
865 "%s: No free TXDs available", dev->name);
866 fifo->stats.txd_not_free++;
98f45da2 867 goto _exit0;
703da5a1
RV
868 }
869
4403b371
BL
870 /* Last TXD? Stop tx queue to avoid dropping packets. TX
871 * completion will resume the queue.
872 */
873 if (avail == 1)
98f45da2 874 netif_tx_stop_queue(fifo->txq);
4403b371 875
703da5a1
RV
876 status = vxge_hw_fifo_txdl_reserve(fifo_hw, &dtr, &dtr_priv);
877 if (unlikely(status != VXGE_HW_OK)) {
878 vxge_debug_tx(VXGE_ERR,
879 "%s: Out of descriptors .", dev->name);
880 fifo->stats.txd_out_of_desc++;
98f45da2 881 goto _exit0;
703da5a1
RV
882 }
883
884 vxge_debug_tx(VXGE_TRACE,
885 "%s: %s:%d fifo_hw = %p dtr = %p dtr_priv = %p",
886 dev->name, __func__, __LINE__,
887 fifo_hw, dtr, dtr_priv);
888
eab6d18d 889 if (vlan_tx_tag_present(skb)) {
703da5a1
RV
890 u16 vlan_tag = vlan_tx_tag_get(skb);
891 vxge_hw_fifo_txdl_vlan_set(dtr, vlan_tag);
892 }
893
894 first_frg_len = skb_headlen(skb);
895
896 dma_pointer = pci_map_single(fifo->pdev, skb->data, first_frg_len,
897 PCI_DMA_TODEVICE);
898
899 if (unlikely(pci_dma_mapping_error(fifo->pdev, dma_pointer))) {
900 vxge_hw_fifo_txdl_free(fifo_hw, dtr);
703da5a1 901 fifo->stats.pci_map_fail++;
98f45da2 902 goto _exit0;
703da5a1
RV
903 }
904
905 txdl_priv = vxge_hw_fifo_txdl_private_get(dtr);
906 txdl_priv->skb = skb;
907 txdl_priv->dma_buffers[j] = dma_pointer;
908
909 frg_cnt = skb_shinfo(skb)->nr_frags;
910 vxge_debug_tx(VXGE_TRACE,
911 "%s: %s:%d skb = %p txdl_priv = %p "
912 "frag_cnt = %d dma_pointer = 0x%llx", dev->name,
913 __func__, __LINE__, skb, txdl_priv,
914 frg_cnt, (unsigned long long)dma_pointer);
915
916 vxge_hw_fifo_txdl_buffer_set(fifo_hw, dtr, j++, dma_pointer,
917 first_frg_len);
918
919 frag = &skb_shinfo(skb)->frags[0];
920 for (i = 0; i < frg_cnt; i++) {
921 /* ignore 0 length fragment */
922 if (!frag->size)
923 continue;
924
98f45da2 925 dma_pointer = (u64) pci_map_page(fifo->pdev, frag->page,
703da5a1
RV
926 frag->page_offset, frag->size,
927 PCI_DMA_TODEVICE);
928
929 if (unlikely(pci_dma_mapping_error(fifo->pdev, dma_pointer)))
98f45da2 930 goto _exit2;
703da5a1
RV
931 vxge_debug_tx(VXGE_TRACE,
932 "%s: %s:%d frag = %d dma_pointer = 0x%llx",
933 dev->name, __func__, __LINE__, i,
934 (unsigned long long)dma_pointer);
935
936 txdl_priv->dma_buffers[j] = dma_pointer;
937 vxge_hw_fifo_txdl_buffer_set(fifo_hw, dtr, j++, dma_pointer,
938 frag->size);
939 frag += 1;
940 }
941
942 offload_type = vxge_offload_type(skb);
943
944 if (offload_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) {
703da5a1
RV
945 int mss = vxge_tcp_mss(skb);
946 if (mss) {
98f45da2 947 vxge_debug_tx(VXGE_TRACE, "%s: %s:%d mss = %d",
703da5a1
RV
948 dev->name, __func__, __LINE__, mss);
949 vxge_hw_fifo_txdl_mss_set(dtr, mss);
950 } else {
951 vxge_assert(skb->len <=
952 dev->mtu + VXGE_HW_MAC_HEADER_MAX_SIZE);
953 vxge_assert(0);
954 goto _exit1;
955 }
956 }
957
958 if (skb->ip_summed == CHECKSUM_PARTIAL)
959 vxge_hw_fifo_txdl_cksum_set_bits(dtr,
960 VXGE_HW_FIFO_TXD_TX_CKO_IPV4_EN |
961 VXGE_HW_FIFO_TXD_TX_CKO_TCP_EN |
962 VXGE_HW_FIFO_TXD_TX_CKO_UDP_EN);
963
964 vxge_hw_fifo_txdl_post(fifo_hw, dtr);
703da5a1 965
703da5a1
RV
966 vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d Exiting...",
967 dev->name, __func__, __LINE__);
6ed10654 968 return NETDEV_TX_OK;
703da5a1 969
98f45da2 970_exit2:
703da5a1 971 vxge_debug_tx(VXGE_TRACE, "%s: pci_map_page failed", dev->name);
703da5a1
RV
972_exit1:
973 j = 0;
974 frag = &skb_shinfo(skb)->frags[0];
975
976 pci_unmap_single(fifo->pdev, txdl_priv->dma_buffers[j++],
977 skb_headlen(skb), PCI_DMA_TODEVICE);
978
979 for (; j < i; j++) {
980 pci_unmap_page(fifo->pdev, txdl_priv->dma_buffers[j],
981 frag->size, PCI_DMA_TODEVICE);
982 frag += 1;
983 }
984
985 vxge_hw_fifo_txdl_free(fifo_hw, dtr);
98f45da2
JM
986_exit0:
987 netif_tx_stop_queue(fifo->txq);
703da5a1 988 dev_kfree_skb(skb);
703da5a1 989
6ed10654 990 return NETDEV_TX_OK;
703da5a1
RV
991}
992
993/*
994 * vxge_rx_term
995 *
996 * Function will be called by hw function to abort all outstanding receive
997 * descriptors.
998 */
999static void
1000vxge_rx_term(void *dtrh, enum vxge_hw_rxd_state state, void *userdata)
1001{
1002 struct vxge_ring *ring = (struct vxge_ring *)userdata;
1003 struct vxge_rx_priv *rx_priv =
1004 vxge_hw_ring_rxd_private_get(dtrh);
1005
1006 vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d",
1007 ring->ndev->name, __func__, __LINE__);
1008 if (state != VXGE_HW_RXD_STATE_POSTED)
1009 return;
1010
1011 pci_unmap_single(ring->pdev, rx_priv->data_dma,
1012 rx_priv->data_size, PCI_DMA_FROMDEVICE);
1013
1014 dev_kfree_skb(rx_priv->skb);
ea11bbe0 1015 rx_priv->skb_data = NULL;
703da5a1
RV
1016
1017 vxge_debug_entryexit(VXGE_TRACE,
1018 "%s: %s:%d Exiting...",
1019 ring->ndev->name, __func__, __LINE__);
1020}
1021
1022/*
1023 * vxge_tx_term
1024 *
1025 * Function will be called to abort all outstanding tx descriptors
1026 */
1027static void
1028vxge_tx_term(void *dtrh, enum vxge_hw_txdl_state state, void *userdata)
1029{
1030 struct vxge_fifo *fifo = (struct vxge_fifo *)userdata;
1031 skb_frag_t *frag;
1032 int i = 0, j, frg_cnt;
1033 struct vxge_tx_priv *txd_priv = vxge_hw_fifo_txdl_private_get(dtrh);
1034 struct sk_buff *skb = txd_priv->skb;
1035
1036 vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__);
1037
1038 if (state != VXGE_HW_TXDL_STATE_POSTED)
1039 return;
1040
1041 /* check skb validity */
1042 vxge_assert(skb);
1043 frg_cnt = skb_shinfo(skb)->nr_frags;
1044 frag = &skb_shinfo(skb)->frags[0];
1045
1046 /* for unfragmented skb */
1047 pci_unmap_single(fifo->pdev, txd_priv->dma_buffers[i++],
1048 skb_headlen(skb), PCI_DMA_TODEVICE);
1049
1050 for (j = 0; j < frg_cnt; j++) {
1051 pci_unmap_page(fifo->pdev, txd_priv->dma_buffers[i++],
1052 frag->size, PCI_DMA_TODEVICE);
1053 frag += 1;
1054 }
1055
1056 dev_kfree_skb(skb);
1057
1058 vxge_debug_entryexit(VXGE_TRACE,
1059 "%s:%d Exiting...", __func__, __LINE__);
1060}
1061
528f7272
JM
1062static int vxge_mac_list_del(struct vxge_vpath *vpath, struct macInfo *mac)
1063{
1064 struct list_head *entry, *next;
1065 u64 del_mac = 0;
1066 u8 *mac_address = (u8 *) (&del_mac);
1067
1068 /* Copy the mac address to delete from the list */
1069 memcpy(mac_address, mac->macaddr, ETH_ALEN);
1070
1071 list_for_each_safe(entry, next, &vpath->mac_addr_list) {
1072 if (((struct vxge_mac_addrs *)entry)->macaddr == del_mac) {
1073 list_del(entry);
1074 kfree((struct vxge_mac_addrs *)entry);
1075 vpath->mac_addr_cnt--;
1076
1077 /* Is this a multicast address */
1078 if (0x01 & mac->macaddr[0])
1079 vpath->mcast_addr_cnt--;
1080 return TRUE;
1081 }
1082 }
1083
1084 return FALSE;
1085}
1086
1087/* delete a mac address from DA table */
1088static enum vxge_hw_status
1089vxge_del_mac_addr(struct vxgedev *vdev, struct macInfo *mac)
1090{
1091 enum vxge_hw_status status = VXGE_HW_OK;
1092 struct vxge_vpath *vpath;
1093
1094 vpath = &vdev->vpaths[mac->vpath_no];
1095 status = vxge_hw_vpath_mac_addr_delete(vpath->handle, mac->macaddr,
1096 mac->macmask);
1097 if (status != VXGE_HW_OK) {
1098 vxge_debug_init(VXGE_ERR,
1099 "DA config delete entry failed for vpath:%d",
1100 vpath->device_id);
1101 } else
1102 vxge_mac_list_del(vpath, mac);
1103 return status;
1104}
1105
703da5a1
RV
1106/**
1107 * vxge_set_multicast
1108 * @dev: pointer to the device structure
1109 *
1110 * Entry point for multicast address enable/disable
1111 * This function is a driver entry point which gets called by the kernel
1112 * whenever multicast addresses must be enabled/disabled. This also gets
1113 * called to set/reset promiscuous mode. Depending on the deivce flag, we
1114 * determine, if multicast address must be enabled or if promiscuous mode
1115 * is to be disabled etc.
1116 */
1117static void vxge_set_multicast(struct net_device *dev)
1118{
22bedad3 1119 struct netdev_hw_addr *ha;
703da5a1
RV
1120 struct vxgedev *vdev;
1121 int i, mcast_cnt = 0;
7adf7d1b
JM
1122 struct __vxge_hw_device *hldev;
1123 struct vxge_vpath *vpath;
703da5a1
RV
1124 enum vxge_hw_status status = VXGE_HW_OK;
1125 struct macInfo mac_info;
1126 int vpath_idx = 0;
1127 struct vxge_mac_addrs *mac_entry;
1128 struct list_head *list_head;
1129 struct list_head *entry, *next;
1130 u8 *mac_address = NULL;
1131
1132 vxge_debug_entryexit(VXGE_TRACE,
1133 "%s:%d", __func__, __LINE__);
1134
5f54cebb 1135 vdev = netdev_priv(dev);
703da5a1
RV
1136 hldev = (struct __vxge_hw_device *)vdev->devh;
1137
1138 if (unlikely(!is_vxge_card_up(vdev)))
1139 return;
1140
1141 if ((dev->flags & IFF_ALLMULTI) && (!vdev->all_multi_flg)) {
1142 for (i = 0; i < vdev->no_of_vpath; i++) {
7adf7d1b
JM
1143 vpath = &vdev->vpaths[i];
1144 vxge_assert(vpath->is_open);
1145 status = vxge_hw_vpath_mcast_enable(vpath->handle);
1146 if (status != VXGE_HW_OK)
1147 vxge_debug_init(VXGE_ERR, "failed to enable "
1148 "multicast, status %d", status);
703da5a1
RV
1149 vdev->all_multi_flg = 1;
1150 }
7adf7d1b 1151 } else if (!(dev->flags & IFF_ALLMULTI) && (vdev->all_multi_flg)) {
703da5a1 1152 for (i = 0; i < vdev->no_of_vpath; i++) {
7adf7d1b
JM
1153 vpath = &vdev->vpaths[i];
1154 vxge_assert(vpath->is_open);
1155 status = vxge_hw_vpath_mcast_disable(vpath->handle);
1156 if (status != VXGE_HW_OK)
1157 vxge_debug_init(VXGE_ERR, "failed to disable "
1158 "multicast, status %d", status);
1159 vdev->all_multi_flg = 0;
703da5a1
RV
1160 }
1161 }
1162
703da5a1
RV
1163
1164 if (!vdev->config.addr_learn_en) {
7adf7d1b
JM
1165 for (i = 0; i < vdev->no_of_vpath; i++) {
1166 vpath = &vdev->vpaths[i];
1167 vxge_assert(vpath->is_open);
1168
1169 if (dev->flags & IFF_PROMISC)
703da5a1 1170 status = vxge_hw_vpath_promisc_enable(
7adf7d1b
JM
1171 vpath->handle);
1172 else
703da5a1 1173 status = vxge_hw_vpath_promisc_disable(
7adf7d1b
JM
1174 vpath->handle);
1175 if (status != VXGE_HW_OK)
1176 vxge_debug_init(VXGE_ERR, "failed to %s promisc"
1177 ", status %d", dev->flags&IFF_PROMISC ?
1178 "enable" : "disable", status);
703da5a1
RV
1179 }
1180 }
1181
1182 memset(&mac_info, 0, sizeof(struct macInfo));
1183 /* Update individual M_CAST address list */
4cd24eaf 1184 if ((!vdev->all_multi_flg) && netdev_mc_count(dev)) {
703da5a1
RV
1185 mcast_cnt = vdev->vpaths[0].mcast_addr_cnt;
1186 list_head = &vdev->vpaths[0].mac_addr_list;
4cd24eaf 1187 if ((netdev_mc_count(dev) +
703da5a1
RV
1188 (vdev->vpaths[0].mac_addr_cnt - mcast_cnt)) >
1189 vdev->vpaths[0].max_mac_addr_cnt)
1190 goto _set_all_mcast;
1191
1192 /* Delete previous MC's */
1193 for (i = 0; i < mcast_cnt; i++) {
703da5a1 1194 list_for_each_safe(entry, next, list_head) {
2c91308f 1195 mac_entry = (struct vxge_mac_addrs *)entry;
703da5a1
RV
1196 /* Copy the mac address to delete */
1197 mac_address = (u8 *)&mac_entry->macaddr;
1198 memcpy(mac_info.macaddr, mac_address, ETH_ALEN);
1199
1200 /* Is this a multicast address */
1201 if (0x01 & mac_info.macaddr[0]) {
1202 for (vpath_idx = 0; vpath_idx <
1203 vdev->no_of_vpath;
1204 vpath_idx++) {
1205 mac_info.vpath_no = vpath_idx;
1206 status = vxge_del_mac_addr(
1207 vdev,
1208 &mac_info);
1209 }
1210 }
1211 }
1212 }
1213
1214 /* Add new ones */
22bedad3
JP
1215 netdev_for_each_mc_addr(ha, dev) {
1216 memcpy(mac_info.macaddr, ha->addr, ETH_ALEN);
703da5a1
RV
1217 for (vpath_idx = 0; vpath_idx < vdev->no_of_vpath;
1218 vpath_idx++) {
1219 mac_info.vpath_no = vpath_idx;
1220 mac_info.state = VXGE_LL_MAC_ADDR_IN_DA_TABLE;
1221 status = vxge_add_mac_addr(vdev, &mac_info);
1222 if (status != VXGE_HW_OK) {
1223 vxge_debug_init(VXGE_ERR,
1224 "%s:%d Setting individual"
1225 "multicast address failed",
1226 __func__, __LINE__);
1227 goto _set_all_mcast;
1228 }
1229 }
1230 }
1231
1232 return;
1233_set_all_mcast:
1234 mcast_cnt = vdev->vpaths[0].mcast_addr_cnt;
1235 /* Delete previous MC's */
1236 for (i = 0; i < mcast_cnt; i++) {
703da5a1 1237 list_for_each_safe(entry, next, list_head) {
2c91308f 1238 mac_entry = (struct vxge_mac_addrs *)entry;
703da5a1
RV
1239 /* Copy the mac address to delete */
1240 mac_address = (u8 *)&mac_entry->macaddr;
1241 memcpy(mac_info.macaddr, mac_address, ETH_ALEN);
1242
1243 /* Is this a multicast address */
1244 if (0x01 & mac_info.macaddr[0])
1245 break;
1246 }
1247
1248 for (vpath_idx = 0; vpath_idx < vdev->no_of_vpath;
1249 vpath_idx++) {
1250 mac_info.vpath_no = vpath_idx;
1251 status = vxge_del_mac_addr(vdev, &mac_info);
1252 }
1253 }
1254
1255 /* Enable all multicast */
1256 for (i = 0; i < vdev->no_of_vpath; i++) {
7adf7d1b
JM
1257 vpath = &vdev->vpaths[i];
1258 vxge_assert(vpath->is_open);
1259
1260 status = vxge_hw_vpath_mcast_enable(vpath->handle);
703da5a1
RV
1261 if (status != VXGE_HW_OK) {
1262 vxge_debug_init(VXGE_ERR,
1263 "%s:%d Enabling all multicasts failed",
1264 __func__, __LINE__);
1265 }
1266 vdev->all_multi_flg = 1;
1267 }
1268 dev->flags |= IFF_ALLMULTI;
1269 }
1270
1271 vxge_debug_entryexit(VXGE_TRACE,
1272 "%s:%d Exiting...", __func__, __LINE__);
1273}
1274
1275/**
1276 * vxge_set_mac_addr
1277 * @dev: pointer to the device structure
1278 *
1279 * Update entry "0" (default MAC addr)
1280 */
1281static int vxge_set_mac_addr(struct net_device *dev, void *p)
1282{
1283 struct sockaddr *addr = p;
1284 struct vxgedev *vdev;
2c91308f 1285 struct __vxge_hw_device *hldev;
703da5a1
RV
1286 enum vxge_hw_status status = VXGE_HW_OK;
1287 struct macInfo mac_info_new, mac_info_old;
1288 int vpath_idx = 0;
1289
1290 vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__);
1291
5f54cebb 1292 vdev = netdev_priv(dev);
703da5a1
RV
1293 hldev = vdev->devh;
1294
1295 if (!is_valid_ether_addr(addr->sa_data))
1296 return -EINVAL;
1297
1298 memset(&mac_info_new, 0, sizeof(struct macInfo));
1299 memset(&mac_info_old, 0, sizeof(struct macInfo));
1300
1301 vxge_debug_entryexit(VXGE_TRACE, "%s:%d Exiting...",
1302 __func__, __LINE__);
1303
1304 /* Get the old address */
1305 memcpy(mac_info_old.macaddr, dev->dev_addr, dev->addr_len);
1306
1307 /* Copy the new address */
1308 memcpy(mac_info_new.macaddr, addr->sa_data, dev->addr_len);
1309
1310 /* First delete the old mac address from all the vpaths
1311 as we can't specify the index while adding new mac address */
1312 for (vpath_idx = 0; vpath_idx < vdev->no_of_vpath; vpath_idx++) {
1313 struct vxge_vpath *vpath = &vdev->vpaths[vpath_idx];
1314 if (!vpath->is_open) {
1315 /* This can happen when this interface is added/removed
1316 to the bonding interface. Delete this station address
1317 from the linked list */
1318 vxge_mac_list_del(vpath, &mac_info_old);
1319
1320 /* Add this new address to the linked list
1321 for later restoring */
1322 vxge_mac_list_add(vpath, &mac_info_new);
1323
1324 continue;
1325 }
1326 /* Delete the station address */
1327 mac_info_old.vpath_no = vpath_idx;
1328 status = vxge_del_mac_addr(vdev, &mac_info_old);
1329 }
1330
1331 if (unlikely(!is_vxge_card_up(vdev))) {
1332 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
1333 return VXGE_HW_OK;
1334 }
1335
1336 /* Set this mac address to all the vpaths */
1337 for (vpath_idx = 0; vpath_idx < vdev->no_of_vpath; vpath_idx++) {
1338 mac_info_new.vpath_no = vpath_idx;
1339 mac_info_new.state = VXGE_LL_MAC_ADDR_IN_DA_TABLE;
1340 status = vxge_add_mac_addr(vdev, &mac_info_new);
1341 if (status != VXGE_HW_OK)
1342 return -EINVAL;
1343 }
1344
1345 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
1346
1347 return status;
1348}
1349
1350/*
1351 * vxge_vpath_intr_enable
1352 * @vdev: pointer to vdev
1353 * @vp_id: vpath for which to enable the interrupts
1354 *
1355 * Enables the interrupts for the vpath
1356*/
42821a5b 1357static void vxge_vpath_intr_enable(struct vxgedev *vdev, int vp_id)
703da5a1
RV
1358{
1359 struct vxge_vpath *vpath = &vdev->vpaths[vp_id];
b59c9457
SH
1360 int msix_id = 0;
1361 int tim_msix_id[4] = {0, 1, 0, 0};
1362 int alarm_msix_id = VXGE_ALARM_MSIX_ID;
703da5a1
RV
1363
1364 vxge_hw_vpath_intr_enable(vpath->handle);
1365
1366 if (vdev->config.intr_type == INTA)
1367 vxge_hw_vpath_inta_unmask_tx_rx(vpath->handle);
1368 else {
703da5a1
RV
1369 vxge_hw_vpath_msix_set(vpath->handle, tim_msix_id,
1370 alarm_msix_id);
1371
b59c9457 1372 msix_id = vpath->device_id * VXGE_HW_VPATH_MSIX_ACTIVE;
703da5a1
RV
1373 vxge_hw_vpath_msix_unmask(vpath->handle, msix_id);
1374 vxge_hw_vpath_msix_unmask(vpath->handle, msix_id + 1);
1375
1376 /* enable the alarm vector */
b59c9457
SH
1377 msix_id = (vpath->handle->vpath->hldev->first_vp_id *
1378 VXGE_HW_VPATH_MSIX_ACTIVE) + alarm_msix_id;
1379 vxge_hw_vpath_msix_unmask(vpath->handle, msix_id);
703da5a1
RV
1380 }
1381}
1382
1383/*
1384 * vxge_vpath_intr_disable
1385 * @vdev: pointer to vdev
1386 * @vp_id: vpath for which to disable the interrupts
1387 *
1388 * Disables the interrupts for the vpath
1389*/
42821a5b 1390static void vxge_vpath_intr_disable(struct vxgedev *vdev, int vp_id)
703da5a1
RV
1391{
1392 struct vxge_vpath *vpath = &vdev->vpaths[vp_id];
4d2a5b40 1393 struct __vxge_hw_device *hldev;
703da5a1
RV
1394 int msix_id;
1395
d8ee7071 1396 hldev = pci_get_drvdata(vdev->pdev);
4d2a5b40
JM
1397
1398 vxge_hw_vpath_wait_receive_idle(hldev, vpath->device_id);
1399
703da5a1
RV
1400 vxge_hw_vpath_intr_disable(vpath->handle);
1401
1402 if (vdev->config.intr_type == INTA)
1403 vxge_hw_vpath_inta_mask_tx_rx(vpath->handle);
1404 else {
b59c9457 1405 msix_id = vpath->device_id * VXGE_HW_VPATH_MSIX_ACTIVE;
703da5a1
RV
1406 vxge_hw_vpath_msix_mask(vpath->handle, msix_id);
1407 vxge_hw_vpath_msix_mask(vpath->handle, msix_id + 1);
1408
1409 /* disable the alarm vector */
b59c9457
SH
1410 msix_id = (vpath->handle->vpath->hldev->first_vp_id *
1411 VXGE_HW_VPATH_MSIX_ACTIVE) + VXGE_ALARM_MSIX_ID;
703da5a1
RV
1412 vxge_hw_vpath_msix_mask(vpath->handle, msix_id);
1413 }
1414}
1415
528f7272
JM
1416/* list all mac addresses from DA table */
1417static enum vxge_hw_status
1418vxge_search_mac_addr_in_da_table(struct vxge_vpath *vpath, struct macInfo *mac)
1419{
1420 enum vxge_hw_status status = VXGE_HW_OK;
1421 unsigned char macmask[ETH_ALEN];
1422 unsigned char macaddr[ETH_ALEN];
1423
1424 status = vxge_hw_vpath_mac_addr_get(vpath->handle,
1425 macaddr, macmask);
1426 if (status != VXGE_HW_OK) {
1427 vxge_debug_init(VXGE_ERR,
1428 "DA config list entry failed for vpath:%d",
1429 vpath->device_id);
1430 return status;
1431 }
1432
1433 while (memcmp(mac->macaddr, macaddr, ETH_ALEN)) {
1434 status = vxge_hw_vpath_mac_addr_get_next(vpath->handle,
1435 macaddr, macmask);
1436 if (status != VXGE_HW_OK)
1437 break;
1438 }
1439
1440 return status;
1441}
1442
1443/* Store all mac addresses from the list to the DA table */
1444static enum vxge_hw_status vxge_restore_vpath_mac_addr(struct vxge_vpath *vpath)
1445{
1446 enum vxge_hw_status status = VXGE_HW_OK;
1447 struct macInfo mac_info;
1448 u8 *mac_address = NULL;
1449 struct list_head *entry, *next;
1450
1451 memset(&mac_info, 0, sizeof(struct macInfo));
1452
1453 if (vpath->is_open) {
1454 list_for_each_safe(entry, next, &vpath->mac_addr_list) {
1455 mac_address =
1456 (u8 *)&
1457 ((struct vxge_mac_addrs *)entry)->macaddr;
1458 memcpy(mac_info.macaddr, mac_address, ETH_ALEN);
1459 ((struct vxge_mac_addrs *)entry)->state =
1460 VXGE_LL_MAC_ADDR_IN_DA_TABLE;
1461 /* does this mac address already exist in da table? */
1462 status = vxge_search_mac_addr_in_da_table(vpath,
1463 &mac_info);
1464 if (status != VXGE_HW_OK) {
1465 /* Add this mac address to the DA table */
1466 status = vxge_hw_vpath_mac_addr_add(
1467 vpath->handle, mac_info.macaddr,
1468 mac_info.macmask,
1469 VXGE_HW_VPATH_MAC_ADDR_ADD_DUPLICATE);
1470 if (status != VXGE_HW_OK) {
1471 vxge_debug_init(VXGE_ERR,
1472 "DA add entry failed for vpath:%d",
1473 vpath->device_id);
1474 ((struct vxge_mac_addrs *)entry)->state
1475 = VXGE_LL_MAC_ADDR_IN_LIST;
1476 }
1477 }
1478 }
1479 }
1480
1481 return status;
1482}
1483
1484/* Store all vlan ids from the list to the vid table */
1485static enum vxge_hw_status
1486vxge_restore_vpath_vid_table(struct vxge_vpath *vpath)
1487{
1488 enum vxge_hw_status status = VXGE_HW_OK;
1489 struct vxgedev *vdev = vpath->vdev;
1490 u16 vid;
1491
1492 if (vdev->vlgrp && vpath->is_open) {
1493
1494 for (vid = 0; vid < VLAN_N_VID; vid++) {
1495 if (!vlan_group_get_device(vdev->vlgrp, vid))
1496 continue;
1497 /* Add these vlan to the vid table */
1498 status = vxge_hw_vpath_vid_add(vpath->handle, vid);
1499 }
1500 }
1501
1502 return status;
1503}
1504
703da5a1
RV
1505/*
1506 * vxge_reset_vpath
1507 * @vdev: pointer to vdev
1508 * @vp_id: vpath to reset
1509 *
1510 * Resets the vpath
1511*/
1512static int vxge_reset_vpath(struct vxgedev *vdev, int vp_id)
1513{
1514 enum vxge_hw_status status = VXGE_HW_OK;
7adf7d1b 1515 struct vxge_vpath *vpath = &vdev->vpaths[vp_id];
703da5a1
RV
1516 int ret = 0;
1517
1518 /* check if device is down already */
1519 if (unlikely(!is_vxge_card_up(vdev)))
1520 return 0;
1521
1522 /* is device reset already scheduled */
1523 if (test_bit(__VXGE_STATE_RESET_CARD, &vdev->state))
1524 return 0;
1525
7adf7d1b
JM
1526 if (vpath->handle) {
1527 if (vxge_hw_vpath_reset(vpath->handle) == VXGE_HW_OK) {
703da5a1 1528 if (is_vxge_card_up(vdev) &&
7adf7d1b 1529 vxge_hw_vpath_recover_from_reset(vpath->handle)
703da5a1
RV
1530 != VXGE_HW_OK) {
1531 vxge_debug_init(VXGE_ERR,
1532 "vxge_hw_vpath_recover_from_reset"
1533 "failed for vpath:%d", vp_id);
1534 return status;
1535 }
1536 } else {
1537 vxge_debug_init(VXGE_ERR,
1538 "vxge_hw_vpath_reset failed for"
1539 "vpath:%d", vp_id);
1540 return status;
1541 }
1542 } else
1543 return VXGE_HW_FAIL;
1544
7adf7d1b
JM
1545 vxge_restore_vpath_mac_addr(vpath);
1546 vxge_restore_vpath_vid_table(vpath);
703da5a1
RV
1547
1548 /* Enable all broadcast */
7adf7d1b
JM
1549 vxge_hw_vpath_bcast_enable(vpath->handle);
1550
1551 /* Enable all multicast */
1552 if (vdev->all_multi_flg) {
1553 status = vxge_hw_vpath_mcast_enable(vpath->handle);
1554 if (status != VXGE_HW_OK)
1555 vxge_debug_init(VXGE_ERR,
1556 "%s:%d Enabling multicast failed",
1557 __func__, __LINE__);
1558 }
703da5a1
RV
1559
1560 /* Enable the interrupts */
1561 vxge_vpath_intr_enable(vdev, vp_id);
1562
1563 smp_wmb();
1564
1565 /* Enable the flow of traffic through the vpath */
7adf7d1b 1566 vxge_hw_vpath_enable(vpath->handle);
703da5a1
RV
1567
1568 smp_wmb();
7adf7d1b
JM
1569 vxge_hw_vpath_rx_doorbell_init(vpath->handle);
1570 vpath->ring.last_status = VXGE_HW_OK;
703da5a1
RV
1571
1572 /* Vpath reset done */
1573 clear_bit(vp_id, &vdev->vp_reset);
1574
1575 /* Start the vpath queue */
98f45da2
JM
1576 if (netif_tx_queue_stopped(vpath->fifo.txq))
1577 netif_tx_wake_queue(vpath->fifo.txq);
703da5a1
RV
1578
1579 return ret;
1580}
1581
16fded7d
JM
1582/* Configure CI */
1583static void vxge_config_ci_for_tti_rti(struct vxgedev *vdev)
1584{
1585 int i = 0;
1586
1587 /* Enable CI for RTI */
1588 if (vdev->config.intr_type == MSI_X) {
1589 for (i = 0; i < vdev->no_of_vpath; i++) {
1590 struct __vxge_hw_ring *hw_ring;
1591
1592 hw_ring = vdev->vpaths[i].ring.handle;
1593 vxge_hw_vpath_dynamic_rti_ci_set(hw_ring);
1594 }
1595 }
1596
1597 /* Enable CI for TTI */
1598 for (i = 0; i < vdev->no_of_vpath; i++) {
1599 struct __vxge_hw_fifo *hw_fifo = vdev->vpaths[i].fifo.handle;
1600 vxge_hw_vpath_tti_ci_set(hw_fifo);
1601 /*
1602 * For Inta (with or without napi), Set CI ON for only one
1603 * vpath. (Have only one free running timer).
1604 */
1605 if ((vdev->config.intr_type == INTA) && (i == 0))
1606 break;
1607 }
1608
1609 return;
1610}
1611
703da5a1
RV
1612static int do_vxge_reset(struct vxgedev *vdev, int event)
1613{
1614 enum vxge_hw_status status;
1615 int ret = 0, vp_id, i;
1616
1617 vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__);
1618
1619 if ((event == VXGE_LL_FULL_RESET) || (event == VXGE_LL_START_RESET)) {
1620 /* check if device is down already */
1621 if (unlikely(!is_vxge_card_up(vdev)))
1622 return 0;
1623
1624 /* is reset already scheduled */
1625 if (test_and_set_bit(__VXGE_STATE_RESET_CARD, &vdev->state))
1626 return 0;
1627 }
1628
1629 if (event == VXGE_LL_FULL_RESET) {
2e41f644
JM
1630 netif_carrier_off(vdev->ndev);
1631
703da5a1
RV
1632 /* wait for all the vpath reset to complete */
1633 for (vp_id = 0; vp_id < vdev->no_of_vpath; vp_id++) {
1634 while (test_bit(vp_id, &vdev->vp_reset))
1635 msleep(50);
1636 }
1637
2e41f644
JM
1638 netif_carrier_on(vdev->ndev);
1639
703da5a1
RV
1640 /* if execution mode is set to debug, don't reset the adapter */
1641 if (unlikely(vdev->exec_mode)) {
1642 vxge_debug_init(VXGE_ERR,
1643 "%s: execution mode is debug, returning..",
1644 vdev->ndev->name);
7adf7d1b
JM
1645 clear_bit(__VXGE_STATE_CARD_UP, &vdev->state);
1646 netif_tx_stop_all_queues(vdev->ndev);
1647 return 0;
703da5a1
RV
1648 }
1649 }
1650
1651 if (event == VXGE_LL_FULL_RESET) {
4d2a5b40 1652 vxge_hw_device_wait_receive_idle(vdev->devh);
703da5a1
RV
1653 vxge_hw_device_intr_disable(vdev->devh);
1654
1655 switch (vdev->cric_err_event) {
1656 case VXGE_HW_EVENT_UNKNOWN:
d03848e0 1657 netif_tx_stop_all_queues(vdev->ndev);
703da5a1
RV
1658 vxge_debug_init(VXGE_ERR,
1659 "fatal: %s: Disabling device due to"
1660 "unknown error",
1661 vdev->ndev->name);
1662 ret = -EPERM;
1663 goto out;
1664 case VXGE_HW_EVENT_RESET_START:
1665 break;
1666 case VXGE_HW_EVENT_RESET_COMPLETE:
1667 case VXGE_HW_EVENT_LINK_DOWN:
1668 case VXGE_HW_EVENT_LINK_UP:
1669 case VXGE_HW_EVENT_ALARM_CLEARED:
1670 case VXGE_HW_EVENT_ECCERR:
1671 case VXGE_HW_EVENT_MRPCIM_ECCERR:
1672 ret = -EPERM;
1673 goto out;
1674 case VXGE_HW_EVENT_FIFO_ERR:
1675 case VXGE_HW_EVENT_VPATH_ERR:
1676 break;
1677 case VXGE_HW_EVENT_CRITICAL_ERR:
d03848e0 1678 netif_tx_stop_all_queues(vdev->ndev);
703da5a1
RV
1679 vxge_debug_init(VXGE_ERR,
1680 "fatal: %s: Disabling device due to"
1681 "serious error",
1682 vdev->ndev->name);
1683 /* SOP or device reset required */
1684 /* This event is not currently used */
1685 ret = -EPERM;
1686 goto out;
1687 case VXGE_HW_EVENT_SERR:
d03848e0 1688 netif_tx_stop_all_queues(vdev->ndev);
703da5a1
RV
1689 vxge_debug_init(VXGE_ERR,
1690 "fatal: %s: Disabling device due to"
1691 "serious error",
1692 vdev->ndev->name);
1693 ret = -EPERM;
1694 goto out;
1695 case VXGE_HW_EVENT_SRPCIM_SERR:
1696 case VXGE_HW_EVENT_MRPCIM_SERR:
1697 ret = -EPERM;
1698 goto out;
1699 case VXGE_HW_EVENT_SLOT_FREEZE:
d03848e0 1700 netif_tx_stop_all_queues(vdev->ndev);
703da5a1
RV
1701 vxge_debug_init(VXGE_ERR,
1702 "fatal: %s: Disabling device due to"
1703 "slot freeze",
1704 vdev->ndev->name);
1705 ret = -EPERM;
1706 goto out;
1707 default:
1708 break;
1709
1710 }
1711 }
1712
1713 if ((event == VXGE_LL_FULL_RESET) || (event == VXGE_LL_START_RESET))
d03848e0 1714 netif_tx_stop_all_queues(vdev->ndev);
703da5a1
RV
1715
1716 if (event == VXGE_LL_FULL_RESET) {
1717 status = vxge_reset_all_vpaths(vdev);
1718 if (status != VXGE_HW_OK) {
1719 vxge_debug_init(VXGE_ERR,
1720 "fatal: %s: can not reset vpaths",
1721 vdev->ndev->name);
1722 ret = -EPERM;
1723 goto out;
1724 }
1725 }
1726
1727 if (event == VXGE_LL_COMPL_RESET) {
1728 for (i = 0; i < vdev->no_of_vpath; i++)
1729 if (vdev->vpaths[i].handle) {
1730 if (vxge_hw_vpath_recover_from_reset(
1731 vdev->vpaths[i].handle)
1732 != VXGE_HW_OK) {
1733 vxge_debug_init(VXGE_ERR,
1734 "vxge_hw_vpath_recover_"
1735 "from_reset failed for vpath: "
1736 "%d", i);
1737 ret = -EPERM;
1738 goto out;
1739 }
1740 } else {
1741 vxge_debug_init(VXGE_ERR,
1742 "vxge_hw_vpath_reset failed for "
1743 "vpath:%d", i);
1744 ret = -EPERM;
1745 goto out;
1746 }
1747 }
1748
1749 if ((event == VXGE_LL_FULL_RESET) || (event == VXGE_LL_COMPL_RESET)) {
1750 /* Reprogram the DA table with populated mac addresses */
1751 for (vp_id = 0; vp_id < vdev->no_of_vpath; vp_id++) {
1752 vxge_restore_vpath_mac_addr(&vdev->vpaths[vp_id]);
1753 vxge_restore_vpath_vid_table(&vdev->vpaths[vp_id]);
1754 }
1755
1756 /* enable vpath interrupts */
1757 for (i = 0; i < vdev->no_of_vpath; i++)
1758 vxge_vpath_intr_enable(vdev, i);
1759
1760 vxge_hw_device_intr_enable(vdev->devh);
1761
1762 smp_wmb();
1763
1764 /* Indicate card up */
1765 set_bit(__VXGE_STATE_CARD_UP, &vdev->state);
1766
1767 /* Get the traffic to flow through the vpaths */
1768 for (i = 0; i < vdev->no_of_vpath; i++) {
1769 vxge_hw_vpath_enable(vdev->vpaths[i].handle);
1770 smp_wmb();
1771 vxge_hw_vpath_rx_doorbell_init(vdev->vpaths[i].handle);
1772 }
1773
d03848e0 1774 netif_tx_wake_all_queues(vdev->ndev);
703da5a1
RV
1775 }
1776
16fded7d
JM
1777 /* configure CI */
1778 vxge_config_ci_for_tti_rti(vdev);
1779
703da5a1
RV
1780out:
1781 vxge_debug_entryexit(VXGE_TRACE,
1782 "%s:%d Exiting...", __func__, __LINE__);
1783
1784 /* Indicate reset done */
1785 if ((event == VXGE_LL_FULL_RESET) || (event == VXGE_LL_COMPL_RESET))
1786 clear_bit(__VXGE_STATE_RESET_CARD, &vdev->state);
1787 return ret;
1788}
1789
1790/*
1791 * vxge_reset
1792 * @vdev: pointer to ll device
1793 *
1794 * driver may reset the chip on events of serr, eccerr, etc
1795 */
2e41f644 1796static void vxge_reset(struct work_struct *work)
703da5a1 1797{
2e41f644
JM
1798 struct vxgedev *vdev = container_of(work, struct vxgedev, reset_task);
1799
1800 if (!netif_running(vdev->ndev))
1801 return;
1802
1803 do_vxge_reset(vdev, VXGE_LL_FULL_RESET);
703da5a1
RV
1804}
1805
1806/**
1807 * vxge_poll - Receive handler when Receive Polling is used.
1808 * @dev: pointer to the device structure.
1809 * @budget: Number of packets budgeted to be processed in this iteration.
1810 *
1811 * This function comes into picture only if Receive side is being handled
1812 * through polling (called NAPI in linux). It mostly does what the normal
1813 * Rx interrupt handler does in terms of descriptor and packet processing
1814 * but not in an interrupt context. Also it will process a specified number
1815 * of packets at most in one iteration. This value is passed down by the
1816 * kernel as the function argument 'budget'.
1817 */
1818static int vxge_poll_msix(struct napi_struct *napi, int budget)
1819{
16fded7d
JM
1820 struct vxge_ring *ring = container_of(napi, struct vxge_ring, napi);
1821 int pkts_processed;
703da5a1 1822 int budget_org = budget;
703da5a1 1823
16fded7d
JM
1824 ring->budget = budget;
1825 ring->pkts_processed = 0;
703da5a1 1826 vxge_hw_vpath_poll_rx(ring->handle);
16fded7d 1827 pkts_processed = ring->pkts_processed;
703da5a1
RV
1828
1829 if (ring->pkts_processed < budget_org) {
1830 napi_complete(napi);
16fded7d 1831
703da5a1
RV
1832 /* Re enable the Rx interrupts for the vpath */
1833 vxge_hw_channel_msix_unmask(
1834 (struct __vxge_hw_channel *)ring->handle,
1835 ring->rx_vector_no);
16fded7d 1836 mmiowb();
703da5a1
RV
1837 }
1838
16fded7d
JM
1839 /* We are copying and returning the local variable, in case if after
1840 * clearing the msix interrupt above, if the interrupt fires right
1841 * away which can preempt this NAPI thread */
1842 return pkts_processed;
703da5a1
RV
1843}
1844
1845static int vxge_poll_inta(struct napi_struct *napi, int budget)
1846{
1847 struct vxgedev *vdev = container_of(napi, struct vxgedev, napi);
1848 int pkts_processed = 0;
1849 int i;
1850 int budget_org = budget;
1851 struct vxge_ring *ring;
1852
d8ee7071 1853 struct __vxge_hw_device *hldev = pci_get_drvdata(vdev->pdev);
703da5a1
RV
1854
1855 for (i = 0; i < vdev->no_of_vpath; i++) {
1856 ring = &vdev->vpaths[i].ring;
1857 ring->budget = budget;
16fded7d 1858 ring->pkts_processed = 0;
703da5a1
RV
1859 vxge_hw_vpath_poll_rx(ring->handle);
1860 pkts_processed += ring->pkts_processed;
1861 budget -= ring->pkts_processed;
1862 if (budget <= 0)
1863 break;
1864 }
1865
1866 VXGE_COMPLETE_ALL_TX(vdev);
1867
1868 if (pkts_processed < budget_org) {
1869 napi_complete(napi);
1870 /* Re enable the Rx interrupts for the ring */
1871 vxge_hw_device_unmask_all(hldev);
1872 vxge_hw_device_flush_io(hldev);
1873 }
1874
1875 return pkts_processed;
1876}
1877
1878#ifdef CONFIG_NET_POLL_CONTROLLER
1879/**
1880 * vxge_netpoll - netpoll event handler entry point
1881 * @dev : pointer to the device structure.
1882 * Description:
1883 * This function will be called by upper layer to check for events on the
1884 * interface in situations where interrupts are disabled. It is used for
1885 * specific in-kernel networking tasks, such as remote consoles and kernel
1886 * debugging over the network (example netdump in RedHat).
1887 */
1888static void vxge_netpoll(struct net_device *dev)
1889{
2c91308f 1890 struct __vxge_hw_device *hldev;
703da5a1
RV
1891 struct vxgedev *vdev;
1892
5f54cebb 1893 vdev = netdev_priv(dev);
d8ee7071 1894 hldev = pci_get_drvdata(vdev->pdev);
703da5a1
RV
1895
1896 vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__);
1897
1898 if (pci_channel_offline(vdev->pdev))
1899 return;
1900
1901 disable_irq(dev->irq);
1902 vxge_hw_device_clear_tx_rx(hldev);
1903
1904 vxge_hw_device_clear_tx_rx(hldev);
1905 VXGE_COMPLETE_ALL_RX(vdev);
1906 VXGE_COMPLETE_ALL_TX(vdev);
1907
1908 enable_irq(dev->irq);
1909
1910 vxge_debug_entryexit(VXGE_TRACE,
1911 "%s:%d Exiting...", __func__, __LINE__);
703da5a1
RV
1912}
1913#endif
1914
1915/* RTH configuration */
1916static enum vxge_hw_status vxge_rth_configure(struct vxgedev *vdev)
1917{
1918 enum vxge_hw_status status = VXGE_HW_OK;
1919 struct vxge_hw_rth_hash_types hash_types;
1920 u8 itable[256] = {0}; /* indirection table */
1921 u8 mtable[256] = {0}; /* CPU to vpath mapping */
1922 int index;
1923
1924 /*
1925 * Filling
1926 * - itable with bucket numbers
1927 * - mtable with bucket-to-vpath mapping
1928 */
1929 for (index = 0; index < (1 << vdev->config.rth_bkt_sz); index++) {
1930 itable[index] = index;
1931 mtable[index] = index % vdev->no_of_vpath;
1932 }
1933
703da5a1
RV
1934 /* set indirection table, bucket-to-vpath mapping */
1935 status = vxge_hw_vpath_rts_rth_itable_set(vdev->vp_handles,
1936 vdev->no_of_vpath,
1937 mtable, itable,
1938 vdev->config.rth_bkt_sz);
1939 if (status != VXGE_HW_OK) {
1940 vxge_debug_init(VXGE_ERR,
1941 "RTH indirection table configuration failed "
1942 "for vpath:%d", vdev->vpaths[0].device_id);
1943 return status;
1944 }
1945
47f01db4
JM
1946 /* Fill RTH hash types */
1947 hash_types.hash_type_tcpipv4_en = vdev->config.rth_hash_type_tcpipv4;
1948 hash_types.hash_type_ipv4_en = vdev->config.rth_hash_type_ipv4;
1949 hash_types.hash_type_tcpipv6_en = vdev->config.rth_hash_type_tcpipv6;
1950 hash_types.hash_type_ipv6_en = vdev->config.rth_hash_type_ipv6;
1951 hash_types.hash_type_tcpipv6ex_en =
1952 vdev->config.rth_hash_type_tcpipv6ex;
1953 hash_types.hash_type_ipv6ex_en = vdev->config.rth_hash_type_ipv6ex;
1954
703da5a1 1955 /*
47f01db4
JM
1956 * Because the itable_set() method uses the active_table field
1957 * for the target virtual path the RTH config should be updated
1958 * for all VPATHs. The h/w only uses the lowest numbered VPATH
1959 * when steering frames.
1960 */
703da5a1
RV
1961 for (index = 0; index < vdev->no_of_vpath; index++) {
1962 status = vxge_hw_vpath_rts_rth_set(
1963 vdev->vpaths[index].handle,
1964 vdev->config.rth_algorithm,
1965 &hash_types,
1966 vdev->config.rth_bkt_sz);
703da5a1
RV
1967 if (status != VXGE_HW_OK) {
1968 vxge_debug_init(VXGE_ERR,
1969 "RTH configuration failed for vpath:%d",
1970 vdev->vpaths[index].device_id);
1971 return status;
1972 }
1973 }
1974
1975 return status;
1976}
1977
703da5a1 1978/* reset vpaths */
4d2a5b40 1979enum vxge_hw_status vxge_reset_all_vpaths(struct vxgedev *vdev)
703da5a1 1980{
703da5a1 1981 enum vxge_hw_status status = VXGE_HW_OK;
7adf7d1b
JM
1982 struct vxge_vpath *vpath;
1983 int i;
703da5a1 1984
7adf7d1b
JM
1985 for (i = 0; i < vdev->no_of_vpath; i++) {
1986 vpath = &vdev->vpaths[i];
1987 if (vpath->handle) {
1988 if (vxge_hw_vpath_reset(vpath->handle) == VXGE_HW_OK) {
703da5a1
RV
1989 if (is_vxge_card_up(vdev) &&
1990 vxge_hw_vpath_recover_from_reset(
7adf7d1b 1991 vpath->handle) != VXGE_HW_OK) {
703da5a1
RV
1992 vxge_debug_init(VXGE_ERR,
1993 "vxge_hw_vpath_recover_"
1994 "from_reset failed for vpath: "
1995 "%d", i);
1996 return status;
1997 }
1998 } else {
1999 vxge_debug_init(VXGE_ERR,
2000 "vxge_hw_vpath_reset failed for "
2001 "vpath:%d", i);
2002 return status;
2003 }
2004 }
7adf7d1b
JM
2005 }
2006
703da5a1
RV
2007 return status;
2008}
2009
2010/* close vpaths */
42821a5b 2011static void vxge_close_vpaths(struct vxgedev *vdev, int index)
703da5a1 2012{
7adf7d1b 2013 struct vxge_vpath *vpath;
703da5a1 2014 int i;
7adf7d1b 2015
703da5a1 2016 for (i = index; i < vdev->no_of_vpath; i++) {
7adf7d1b
JM
2017 vpath = &vdev->vpaths[i];
2018
2019 if (vpath->handle && vpath->is_open) {
2020 vxge_hw_vpath_close(vpath->handle);
703da5a1
RV
2021 vdev->stats.vpaths_open--;
2022 }
7adf7d1b
JM
2023 vpath->is_open = 0;
2024 vpath->handle = NULL;
703da5a1
RV
2025 }
2026}
2027
2028/* open vpaths */
42821a5b 2029static int vxge_open_vpaths(struct vxgedev *vdev)
703da5a1 2030{
7adf7d1b 2031 struct vxge_hw_vpath_attr attr;
703da5a1 2032 enum vxge_hw_status status;
7adf7d1b 2033 struct vxge_vpath *vpath;
703da5a1 2034 u32 vp_id = 0;
7adf7d1b 2035 int i;
703da5a1
RV
2036
2037 for (i = 0; i < vdev->no_of_vpath; i++) {
7adf7d1b 2038 vpath = &vdev->vpaths[i];
7adf7d1b 2039 vxge_assert(vpath->is_configured);
e7935c96
JM
2040
2041 if (!vdev->titan1) {
2042 struct vxge_hw_vp_config *vcfg;
2043 vcfg = &vdev->devh->config.vp_config[vpath->device_id];
2044
2045 vcfg->rti.urange_a = RTI_T1A_RX_URANGE_A;
2046 vcfg->rti.urange_b = RTI_T1A_RX_URANGE_B;
2047 vcfg->rti.urange_c = RTI_T1A_RX_URANGE_C;
2048 vcfg->tti.uec_a = TTI_T1A_TX_UFC_A;
2049 vcfg->tti.uec_b = TTI_T1A_TX_UFC_B;
2050 vcfg->tti.uec_c = TTI_T1A_TX_UFC_C(vdev->mtu);
2051 vcfg->tti.uec_d = TTI_T1A_TX_UFC_D(vdev->mtu);
2052 vcfg->tti.ltimer_val = VXGE_T1A_TTI_LTIMER_VAL;
2053 vcfg->tti.rtimer_val = VXGE_T1A_TTI_RTIMER_VAL;
2054 }
2055
7adf7d1b 2056 attr.vp_id = vpath->device_id;
703da5a1
RV
2057 attr.fifo_attr.callback = vxge_xmit_compl;
2058 attr.fifo_attr.txdl_term = vxge_tx_term;
2059 attr.fifo_attr.per_txdl_space = sizeof(struct vxge_tx_priv);
7adf7d1b 2060 attr.fifo_attr.userdata = &vpath->fifo;
703da5a1
RV
2061
2062 attr.ring_attr.callback = vxge_rx_1b_compl;
2063 attr.ring_attr.rxd_init = vxge_rx_initial_replenish;
2064 attr.ring_attr.rxd_term = vxge_rx_term;
2065 attr.ring_attr.per_rxd_space = sizeof(struct vxge_rx_priv);
7adf7d1b 2066 attr.ring_attr.userdata = &vpath->ring;
703da5a1 2067
7adf7d1b
JM
2068 vpath->ring.ndev = vdev->ndev;
2069 vpath->ring.pdev = vdev->pdev;
528f7272 2070
7adf7d1b 2071 status = vxge_hw_vpath_open(vdev->devh, &attr, &vpath->handle);
703da5a1 2072 if (status == VXGE_HW_OK) {
7adf7d1b 2073 vpath->fifo.handle =
703da5a1 2074 (struct __vxge_hw_fifo *)attr.fifo_attr.userdata;
7adf7d1b 2075 vpath->ring.handle =
703da5a1 2076 (struct __vxge_hw_ring *)attr.ring_attr.userdata;
7adf7d1b 2077 vpath->fifo.tx_steering_type =
703da5a1 2078 vdev->config.tx_steering_type;
7adf7d1b
JM
2079 vpath->fifo.ndev = vdev->ndev;
2080 vpath->fifo.pdev = vdev->pdev;
98f45da2
JM
2081 if (vdev->config.tx_steering_type)
2082 vpath->fifo.txq =
2083 netdev_get_tx_queue(vdev->ndev, i);
2084 else
2085 vpath->fifo.txq =
2086 netdev_get_tx_queue(vdev->ndev, 0);
7adf7d1b 2087 vpath->fifo.indicate_max_pkts =
703da5a1 2088 vdev->config.fifo_indicate_max_pkts;
16fded7d 2089 vpath->fifo.tx_vector_no = 0;
7adf7d1b 2090 vpath->ring.rx_vector_no = 0;
b81b3733 2091 vpath->ring.rx_hwts = vdev->rx_hwts;
7adf7d1b
JM
2092 vpath->is_open = 1;
2093 vdev->vp_handles[i] = vpath->handle;
7adf7d1b 2094 vpath->ring.vlan_tag_strip = vdev->vlan_tag_strip;
703da5a1
RV
2095 vdev->stats.vpaths_open++;
2096 } else {
2097 vdev->stats.vpath_open_fail++;
528f7272
JM
2098 vxge_debug_init(VXGE_ERR, "%s: vpath: %d failed to "
2099 "open with status: %d",
2100 vdev->ndev->name, vpath->device_id,
2101 status);
703da5a1
RV
2102 vxge_close_vpaths(vdev, 0);
2103 return -EPERM;
2104 }
2105
7adf7d1b 2106 vp_id = vpath->handle->vpath->vp_id;
703da5a1
RV
2107 vdev->vpaths_deployed |= vxge_mBIT(vp_id);
2108 }
528f7272 2109
703da5a1
RV
2110 return VXGE_HW_OK;
2111}
2112
16fded7d
JM
2113/**
2114 * adaptive_coalesce_tx_interrupts - Changes the interrupt coalescing
2115 * if the interrupts are not within a range
2116 * @fifo: pointer to transmit fifo structure
2117 * Description: The function changes boundary timer and restriction timer
2118 * value depends on the traffic
2119 * Return Value: None
2120 */
2121static void adaptive_coalesce_tx_interrupts(struct vxge_fifo *fifo)
2122{
2123 fifo->interrupt_count++;
2124 if (jiffies > fifo->jiffies + HZ / 100) {
2125 struct __vxge_hw_fifo *hw_fifo = fifo->handle;
2126
2127 fifo->jiffies = jiffies;
2128 if (fifo->interrupt_count > VXGE_T1A_MAX_TX_INTERRUPT_COUNT &&
2129 hw_fifo->rtimer != VXGE_TTI_RTIMER_ADAPT_VAL) {
2130 hw_fifo->rtimer = VXGE_TTI_RTIMER_ADAPT_VAL;
2131 vxge_hw_vpath_dynamic_tti_rtimer_set(hw_fifo);
2132 } else if (hw_fifo->rtimer != 0) {
2133 hw_fifo->rtimer = 0;
2134 vxge_hw_vpath_dynamic_tti_rtimer_set(hw_fifo);
2135 }
2136 fifo->interrupt_count = 0;
2137 }
2138}
2139
2140/**
2141 * adaptive_coalesce_rx_interrupts - Changes the interrupt coalescing
2142 * if the interrupts are not within a range
2143 * @ring: pointer to receive ring structure
2144 * Description: The function increases of decreases the packet counts within
2145 * the ranges of traffic utilization, if the interrupts due to this ring are
2146 * not within a fixed range.
2147 * Return Value: Nothing
2148 */
2149static void adaptive_coalesce_rx_interrupts(struct vxge_ring *ring)
2150{
2151 ring->interrupt_count++;
2152 if (jiffies > ring->jiffies + HZ / 100) {
2153 struct __vxge_hw_ring *hw_ring = ring->handle;
2154
2155 ring->jiffies = jiffies;
2156 if (ring->interrupt_count > VXGE_T1A_MAX_INTERRUPT_COUNT &&
2157 hw_ring->rtimer != VXGE_RTI_RTIMER_ADAPT_VAL) {
2158 hw_ring->rtimer = VXGE_RTI_RTIMER_ADAPT_VAL;
2159 vxge_hw_vpath_dynamic_rti_rtimer_set(hw_ring);
2160 } else if (hw_ring->rtimer != 0) {
2161 hw_ring->rtimer = 0;
2162 vxge_hw_vpath_dynamic_rti_rtimer_set(hw_ring);
2163 }
2164 ring->interrupt_count = 0;
2165 }
2166}
2167
703da5a1
RV
2168/*
2169 * vxge_isr_napi
2170 * @irq: the irq of the device.
2171 * @dev_id: a void pointer to the hldev structure of the Titan device
2172 * @ptregs: pointer to the registers pushed on the stack.
2173 *
2174 * This function is the ISR handler of the device when napi is enabled. It
2175 * identifies the reason for the interrupt and calls the relevant service
2176 * routines.
2177 */
2178static irqreturn_t vxge_isr_napi(int irq, void *dev_id)
2179{
703da5a1 2180 struct net_device *dev;
a5d165b5 2181 struct __vxge_hw_device *hldev;
703da5a1
RV
2182 u64 reason;
2183 enum vxge_hw_status status;
2c91308f 2184 struct vxgedev *vdev = (struct vxgedev *)dev_id;
703da5a1
RV
2185
2186 vxge_debug_intr(VXGE_TRACE, "%s:%d", __func__, __LINE__);
2187
a5d165b5 2188 dev = vdev->ndev;
d8ee7071 2189 hldev = pci_get_drvdata(vdev->pdev);
703da5a1
RV
2190
2191 if (pci_channel_offline(vdev->pdev))
2192 return IRQ_NONE;
2193
2194 if (unlikely(!is_vxge_card_up(vdev)))
4d2a5b40 2195 return IRQ_HANDLED;
703da5a1 2196
528f7272 2197 status = vxge_hw_device_begin_irq(hldev, vdev->exec_mode, &reason);
703da5a1
RV
2198 if (status == VXGE_HW_OK) {
2199 vxge_hw_device_mask_all(hldev);
2200
2201 if (reason &
2202 VXGE_HW_TITAN_GENERAL_INT_STATUS_VPATH_TRAFFIC_INT(
2203 vdev->vpaths_deployed >>
2204 (64 - VXGE_HW_MAX_VIRTUAL_PATHS))) {
2205
2206 vxge_hw_device_clear_tx_rx(hldev);
2207 napi_schedule(&vdev->napi);
2208 vxge_debug_intr(VXGE_TRACE,
2209 "%s:%d Exiting...", __func__, __LINE__);
2210 return IRQ_HANDLED;
2211 } else
2212 vxge_hw_device_unmask_all(hldev);
2213 } else if (unlikely((status == VXGE_HW_ERR_VPATH) ||
2214 (status == VXGE_HW_ERR_CRITICAL) ||
2215 (status == VXGE_HW_ERR_FIFO))) {
2216 vxge_hw_device_mask_all(hldev);
2217 vxge_hw_device_flush_io(hldev);
2218 return IRQ_HANDLED;
2219 } else if (unlikely(status == VXGE_HW_ERR_SLOT_FREEZE))
2220 return IRQ_HANDLED;
2221
2222 vxge_debug_intr(VXGE_TRACE, "%s:%d Exiting...", __func__, __LINE__);
2223 return IRQ_NONE;
2224}
2225
2226#ifdef CONFIG_PCI_MSI
2227
16fded7d 2228static irqreturn_t vxge_tx_msix_handle(int irq, void *dev_id)
703da5a1
RV
2229{
2230 struct vxge_fifo *fifo = (struct vxge_fifo *)dev_id;
2231
16fded7d
JM
2232 adaptive_coalesce_tx_interrupts(fifo);
2233
2234 vxge_hw_channel_msix_mask((struct __vxge_hw_channel *)fifo->handle,
2235 fifo->tx_vector_no);
2236
2237 vxge_hw_channel_msix_clear((struct __vxge_hw_channel *)fifo->handle,
2238 fifo->tx_vector_no);
2239
703da5a1
RV
2240 VXGE_COMPLETE_VPATH_TX(fifo);
2241
16fded7d
JM
2242 vxge_hw_channel_msix_unmask((struct __vxge_hw_channel *)fifo->handle,
2243 fifo->tx_vector_no);
2244
2245 mmiowb();
2246
703da5a1
RV
2247 return IRQ_HANDLED;
2248}
2249
16fded7d 2250static irqreturn_t vxge_rx_msix_napi_handle(int irq, void *dev_id)
703da5a1
RV
2251{
2252 struct vxge_ring *ring = (struct vxge_ring *)dev_id;
2253
16fded7d
JM
2254 adaptive_coalesce_rx_interrupts(ring);
2255
703da5a1 2256 vxge_hw_channel_msix_mask((struct __vxge_hw_channel *)ring->handle,
16fded7d
JM
2257 ring->rx_vector_no);
2258
2259 vxge_hw_channel_msix_clear((struct __vxge_hw_channel *)ring->handle,
2260 ring->rx_vector_no);
703da5a1
RV
2261
2262 napi_schedule(&ring->napi);
2263 return IRQ_HANDLED;
2264}
2265
2266static irqreturn_t
2267vxge_alarm_msix_handle(int irq, void *dev_id)
2268{
2269 int i;
2270 enum vxge_hw_status status;
2271 struct vxge_vpath *vpath = (struct vxge_vpath *)dev_id;
2272 struct vxgedev *vdev = vpath->vdev;
b59c9457
SH
2273 int msix_id = (vpath->handle->vpath->vp_id *
2274 VXGE_HW_VPATH_MSIX_ACTIVE) + VXGE_ALARM_MSIX_ID;
703da5a1
RV
2275
2276 for (i = 0; i < vdev->no_of_vpath; i++) {
25985edc 2277 /* Reduce the chance of losing alarm interrupts by masking
16fded7d
JM
2278 * the vector. A pending bit will be set if an alarm is
2279 * generated and on unmask the interrupt will be fired.
2280 */
b59c9457 2281 vxge_hw_vpath_msix_mask(vdev->vpaths[i].handle, msix_id);
16fded7d
JM
2282 vxge_hw_vpath_msix_clear(vdev->vpaths[i].handle, msix_id);
2283 mmiowb();
703da5a1
RV
2284
2285 status = vxge_hw_vpath_alarm_process(vdev->vpaths[i].handle,
2286 vdev->exec_mode);
2287 if (status == VXGE_HW_OK) {
703da5a1 2288 vxge_hw_vpath_msix_unmask(vdev->vpaths[i].handle,
16fded7d
JM
2289 msix_id);
2290 mmiowb();
703da5a1
RV
2291 continue;
2292 }
2293 vxge_debug_intr(VXGE_ERR,
2294 "%s: vxge_hw_vpath_alarm_process failed %x ",
2295 VXGE_DRIVER_NAME, status);
2296 }
2297 return IRQ_HANDLED;
2298}
2299
2300static int vxge_alloc_msix(struct vxgedev *vdev)
2301{
2302 int j, i, ret = 0;
b59c9457 2303 int msix_intr_vect = 0, temp;
703da5a1
RV
2304 vdev->intr_cnt = 0;
2305
b59c9457 2306start:
703da5a1
RV
2307 /* Tx/Rx MSIX Vectors count */
2308 vdev->intr_cnt = vdev->no_of_vpath * 2;
2309
2310 /* Alarm MSIX Vectors count */
2311 vdev->intr_cnt++;
2312
baeb2ffa
JP
2313 vdev->entries = kcalloc(vdev->intr_cnt, sizeof(struct msix_entry),
2314 GFP_KERNEL);
703da5a1
RV
2315 if (!vdev->entries) {
2316 vxge_debug_init(VXGE_ERR,
2317 "%s: memory allocation failed",
2318 VXGE_DRIVER_NAME);
cc413d90
MS
2319 ret = -ENOMEM;
2320 goto alloc_entries_failed;
703da5a1
RV
2321 }
2322
baeb2ffa
JP
2323 vdev->vxge_entries = kcalloc(vdev->intr_cnt,
2324 sizeof(struct vxge_msix_entry),
2325 GFP_KERNEL);
703da5a1
RV
2326 if (!vdev->vxge_entries) {
2327 vxge_debug_init(VXGE_ERR, "%s: memory allocation failed",
2328 VXGE_DRIVER_NAME);
cc413d90
MS
2329 ret = -ENOMEM;
2330 goto alloc_vxge_entries_failed;
703da5a1
RV
2331 }
2332
b59c9457 2333 for (i = 0, j = 0; i < vdev->no_of_vpath; i++) {
703da5a1
RV
2334
2335 msix_intr_vect = i * VXGE_HW_VPATH_MSIX_ACTIVE;
2336
2337 /* Initialize the fifo vector */
2338 vdev->entries[j].entry = msix_intr_vect;
2339 vdev->vxge_entries[j].entry = msix_intr_vect;
2340 vdev->vxge_entries[j].in_use = 0;
2341 j++;
2342
2343 /* Initialize the ring vector */
2344 vdev->entries[j].entry = msix_intr_vect + 1;
2345 vdev->vxge_entries[j].entry = msix_intr_vect + 1;
2346 vdev->vxge_entries[j].in_use = 0;
2347 j++;
2348 }
2349
2350 /* Initialize the alarm vector */
b59c9457
SH
2351 vdev->entries[j].entry = VXGE_ALARM_MSIX_ID;
2352 vdev->vxge_entries[j].entry = VXGE_ALARM_MSIX_ID;
703da5a1
RV
2353 vdev->vxge_entries[j].in_use = 0;
2354
b59c9457 2355 ret = pci_enable_msix(vdev->pdev, vdev->entries, vdev->intr_cnt);
b59c9457 2356 if (ret > 0) {
703da5a1
RV
2357 vxge_debug_init(VXGE_ERR,
2358 "%s: MSI-X enable failed for %d vectors, ret: %d",
b59c9457 2359 VXGE_DRIVER_NAME, vdev->intr_cnt, ret);
cc413d90
MS
2360 if ((max_config_vpath != VXGE_USE_DEFAULT) || (ret < 3)) {
2361 ret = -ENODEV;
2362 goto enable_msix_failed;
2363 }
2364
703da5a1
RV
2365 kfree(vdev->entries);
2366 kfree(vdev->vxge_entries);
2367 vdev->entries = NULL;
2368 vdev->vxge_entries = NULL;
b59c9457
SH
2369 /* Try with less no of vector by reducing no of vpaths count */
2370 temp = (ret - 1)/2;
2371 vxge_close_vpaths(vdev, temp);
2372 vdev->no_of_vpath = temp;
2373 goto start;
cc413d90
MS
2374 } else if (ret < 0) {
2375 ret = -ENODEV;
2376 goto enable_msix_failed;
2377 }
703da5a1 2378 return 0;
cc413d90
MS
2379
2380enable_msix_failed:
2381 kfree(vdev->vxge_entries);
2382alloc_vxge_entries_failed:
2383 kfree(vdev->entries);
2384alloc_entries_failed:
2385 return ret;
703da5a1
RV
2386}
2387
2388static int vxge_enable_msix(struct vxgedev *vdev)
2389{
2390
2391 int i, ret = 0;
703da5a1 2392 /* 0 - Tx, 1 - Rx */
b59c9457
SH
2393 int tim_msix_id[4] = {0, 1, 0, 0};
2394
703da5a1
RV
2395 vdev->intr_cnt = 0;
2396
2397 /* allocate msix vectors */
2398 ret = vxge_alloc_msix(vdev);
2399 if (!ret) {
703da5a1 2400 for (i = 0; i < vdev->no_of_vpath; i++) {
7adf7d1b 2401 struct vxge_vpath *vpath = &vdev->vpaths[i];
703da5a1 2402
7adf7d1b
JM
2403 /* If fifo or ring are not enabled, the MSIX vector for
2404 * it should be set to 0.
2405 */
2406 vpath->ring.rx_vector_no = (vpath->device_id *
2407 VXGE_HW_VPATH_MSIX_ACTIVE) + 1;
703da5a1 2408
16fded7d
JM
2409 vpath->fifo.tx_vector_no = (vpath->device_id *
2410 VXGE_HW_VPATH_MSIX_ACTIVE);
2411
7adf7d1b
JM
2412 vxge_hw_vpath_msix_set(vpath->handle, tim_msix_id,
2413 VXGE_ALARM_MSIX_ID);
703da5a1
RV
2414 }
2415 }
2416
2417 return ret;
2418}
2419
2420static void vxge_rem_msix_isr(struct vxgedev *vdev)
2421{
2422 int intr_cnt;
2423
b59c9457 2424 for (intr_cnt = 0; intr_cnt < (vdev->no_of_vpath * 2 + 1);
703da5a1
RV
2425 intr_cnt++) {
2426 if (vdev->vxge_entries[intr_cnt].in_use) {
2427 synchronize_irq(vdev->entries[intr_cnt].vector);
2428 free_irq(vdev->entries[intr_cnt].vector,
2429 vdev->vxge_entries[intr_cnt].arg);
2430 vdev->vxge_entries[intr_cnt].in_use = 0;
2431 }
2432 }
2433
2434 kfree(vdev->entries);
2435 kfree(vdev->vxge_entries);
2436 vdev->entries = NULL;
2437 vdev->vxge_entries = NULL;
2438
2439 if (vdev->config.intr_type == MSI_X)
2440 pci_disable_msix(vdev->pdev);
2441}
2442#endif
2443
2444static void vxge_rem_isr(struct vxgedev *vdev)
2445{
2c91308f 2446 struct __vxge_hw_device *hldev;
d8ee7071 2447 hldev = pci_get_drvdata(vdev->pdev);
703da5a1
RV
2448
2449#ifdef CONFIG_PCI_MSI
2450 if (vdev->config.intr_type == MSI_X) {
2451 vxge_rem_msix_isr(vdev);
2452 } else
2453#endif
2454 if (vdev->config.intr_type == INTA) {
2455 synchronize_irq(vdev->pdev->irq);
a5d165b5 2456 free_irq(vdev->pdev->irq, vdev);
703da5a1
RV
2457 }
2458}
2459
2460static int vxge_add_isr(struct vxgedev *vdev)
2461{
2462 int ret = 0;
703da5a1
RV
2463#ifdef CONFIG_PCI_MSI
2464 int vp_idx = 0, intr_idx = 0, intr_cnt = 0, msix_idx = 0, irq_req = 0;
703da5a1
RV
2465 int pci_fun = PCI_FUNC(vdev->pdev->devfn);
2466
2467 if (vdev->config.intr_type == MSI_X)
2468 ret = vxge_enable_msix(vdev);
2469
2470 if (ret) {
2471 vxge_debug_init(VXGE_ERR,
2472 "%s: Enabling MSI-X Failed", VXGE_DRIVER_NAME);
eb5f10c2
SH
2473 vxge_debug_init(VXGE_ERR,
2474 "%s: Defaulting to INTA", VXGE_DRIVER_NAME);
2475 vdev->config.intr_type = INTA;
703da5a1
RV
2476 }
2477
2478 if (vdev->config.intr_type == MSI_X) {
2479 for (intr_idx = 0;
2480 intr_idx < (vdev->no_of_vpath *
2481 VXGE_HW_VPATH_MSIX_ACTIVE); intr_idx++) {
2482
2483 msix_idx = intr_idx % VXGE_HW_VPATH_MSIX_ACTIVE;
2484 irq_req = 0;
2485
2486 switch (msix_idx) {
2487 case 0:
2488 snprintf(vdev->desc[intr_cnt], VXGE_INTR_STRLEN,
b59c9457
SH
2489 "%s:vxge:MSI-X %d - Tx - fn:%d vpath:%d",
2490 vdev->ndev->name,
2491 vdev->entries[intr_cnt].entry,
2492 pci_fun, vp_idx);
703da5a1
RV
2493 ret = request_irq(
2494 vdev->entries[intr_cnt].vector,
2495 vxge_tx_msix_handle, 0,
2496 vdev->desc[intr_cnt],
2497 &vdev->vpaths[vp_idx].fifo);
2498 vdev->vxge_entries[intr_cnt].arg =
2499 &vdev->vpaths[vp_idx].fifo;
2500 irq_req = 1;
2501 break;
2502 case 1:
2503 snprintf(vdev->desc[intr_cnt], VXGE_INTR_STRLEN,
b59c9457
SH
2504 "%s:vxge:MSI-X %d - Rx - fn:%d vpath:%d",
2505 vdev->ndev->name,
2506 vdev->entries[intr_cnt].entry,
2507 pci_fun, vp_idx);
703da5a1
RV
2508 ret = request_irq(
2509 vdev->entries[intr_cnt].vector,
2510 vxge_rx_msix_napi_handle,
2511 0,
2512 vdev->desc[intr_cnt],
2513 &vdev->vpaths[vp_idx].ring);
2514 vdev->vxge_entries[intr_cnt].arg =
2515 &vdev->vpaths[vp_idx].ring;
2516 irq_req = 1;
2517 break;
2518 }
2519
2520 if (ret) {
2521 vxge_debug_init(VXGE_ERR,
2522 "%s: MSIX - %d Registration failed",
2523 vdev->ndev->name, intr_cnt);
2524 vxge_rem_msix_isr(vdev);
eb5f10c2
SH
2525 vdev->config.intr_type = INTA;
2526 vxge_debug_init(VXGE_ERR,
2527 "%s: Defaulting to INTA"
2528 , vdev->ndev->name);
703da5a1 2529 goto INTA_MODE;
703da5a1
RV
2530 }
2531
2532 if (irq_req) {
2533 /* We requested for this msix interrupt */
2534 vdev->vxge_entries[intr_cnt].in_use = 1;
b59c9457
SH
2535 msix_idx += vdev->vpaths[vp_idx].device_id *
2536 VXGE_HW_VPATH_MSIX_ACTIVE;
703da5a1
RV
2537 vxge_hw_vpath_msix_unmask(
2538 vdev->vpaths[vp_idx].handle,
b59c9457 2539 msix_idx);
703da5a1
RV
2540 intr_cnt++;
2541 }
2542
2543 /* Point to next vpath handler */
8e95a202
JP
2544 if (((intr_idx + 1) % VXGE_HW_VPATH_MSIX_ACTIVE == 0) &&
2545 (vp_idx < (vdev->no_of_vpath - 1)))
2546 vp_idx++;
703da5a1
RV
2547 }
2548
b59c9457 2549 intr_cnt = vdev->no_of_vpath * 2;
703da5a1 2550 snprintf(vdev->desc[intr_cnt], VXGE_INTR_STRLEN,
b59c9457
SH
2551 "%s:vxge:MSI-X %d - Alarm - fn:%d",
2552 vdev->ndev->name,
2553 vdev->entries[intr_cnt].entry,
2554 pci_fun);
703da5a1
RV
2555 /* For Alarm interrupts */
2556 ret = request_irq(vdev->entries[intr_cnt].vector,
2557 vxge_alarm_msix_handle, 0,
2558 vdev->desc[intr_cnt],
b59c9457 2559 &vdev->vpaths[0]);
703da5a1
RV
2560 if (ret) {
2561 vxge_debug_init(VXGE_ERR,
2562 "%s: MSIX - %d Registration failed",
2563 vdev->ndev->name, intr_cnt);
2564 vxge_rem_msix_isr(vdev);
eb5f10c2
SH
2565 vdev->config.intr_type = INTA;
2566 vxge_debug_init(VXGE_ERR,
2567 "%s: Defaulting to INTA",
2568 vdev->ndev->name);
703da5a1 2569 goto INTA_MODE;
703da5a1
RV
2570 }
2571
b59c9457
SH
2572 msix_idx = (vdev->vpaths[0].handle->vpath->vp_id *
2573 VXGE_HW_VPATH_MSIX_ACTIVE) + VXGE_ALARM_MSIX_ID;
703da5a1 2574 vxge_hw_vpath_msix_unmask(vdev->vpaths[vp_idx].handle,
b59c9457 2575 msix_idx);
703da5a1 2576 vdev->vxge_entries[intr_cnt].in_use = 1;
b59c9457 2577 vdev->vxge_entries[intr_cnt].arg = &vdev->vpaths[0];
703da5a1
RV
2578 }
2579INTA_MODE:
2580#endif
703da5a1
RV
2581
2582 if (vdev->config.intr_type == INTA) {
b59c9457
SH
2583 snprintf(vdev->desc[0], VXGE_INTR_STRLEN,
2584 "%s:vxge:INTA", vdev->ndev->name);
eb5f10c2
SH
2585 vxge_hw_device_set_intr_type(vdev->devh,
2586 VXGE_HW_INTR_MODE_IRQLINE);
16fded7d
JM
2587
2588 vxge_hw_vpath_tti_ci_set(vdev->vpaths[0].fifo.handle);
2589
703da5a1
RV
2590 ret = request_irq((int) vdev->pdev->irq,
2591 vxge_isr_napi,
a5d165b5 2592 IRQF_SHARED, vdev->desc[0], vdev);
703da5a1
RV
2593 if (ret) {
2594 vxge_debug_init(VXGE_ERR,
2595 "%s %s-%d: ISR registration failed",
2596 VXGE_DRIVER_NAME, "IRQ", vdev->pdev->irq);
2597 return -ENODEV;
2598 }
2599 vxge_debug_init(VXGE_TRACE,
2600 "new %s-%d line allocated",
2601 "IRQ", vdev->pdev->irq);
2602 }
2603
2604 return VXGE_HW_OK;
2605}
2606
2607static void vxge_poll_vp_reset(unsigned long data)
2608{
2609 struct vxgedev *vdev = (struct vxgedev *)data;
2610 int i, j = 0;
2611
2612 for (i = 0; i < vdev->no_of_vpath; i++) {
2613 if (test_bit(i, &vdev->vp_reset)) {
2614 vxge_reset_vpath(vdev, i);
2615 j++;
2616 }
2617 }
2618 if (j && (vdev->config.intr_type != MSI_X)) {
2619 vxge_hw_device_unmask_all(vdev->devh);
2620 vxge_hw_device_flush_io(vdev->devh);
2621 }
2622
2623 mod_timer(&vdev->vp_reset_timer, jiffies + HZ / 2);
2624}
2625
2626static void vxge_poll_vp_lockup(unsigned long data)
2627{
2628 struct vxgedev *vdev = (struct vxgedev *)data;
703da5a1 2629 enum vxge_hw_status status = VXGE_HW_OK;
7adf7d1b
JM
2630 struct vxge_vpath *vpath;
2631 struct vxge_ring *ring;
2632 int i;
703da5a1
RV
2633
2634 for (i = 0; i < vdev->no_of_vpath; i++) {
2635 ring = &vdev->vpaths[i].ring;
2636 /* Did this vpath received any packets */
2637 if (ring->stats.prev_rx_frms == ring->stats.rx_frms) {
2638 status = vxge_hw_vpath_check_leak(ring->handle);
2639
2640 /* Did it received any packets last time */
2641 if ((VXGE_HW_FAIL == status) &&
2642 (VXGE_HW_FAIL == ring->last_status)) {
2643
2644 /* schedule vpath reset */
2645 if (!test_and_set_bit(i, &vdev->vp_reset)) {
7adf7d1b 2646 vpath = &vdev->vpaths[i];
703da5a1
RV
2647
2648 /* disable interrupts for this vpath */
2649 vxge_vpath_intr_disable(vdev, i);
2650
2651 /* stop the queue for this vpath */
98f45da2 2652 netif_tx_stop_queue(vpath->fifo.txq);
703da5a1
RV
2653 continue;
2654 }
2655 }
2656 }
2657 ring->stats.prev_rx_frms = ring->stats.rx_frms;
2658 ring->last_status = status;
2659 }
2660
2661 /* Check every 1 milli second */
2662 mod_timer(&vdev->vp_lockup_timer, jiffies + HZ / 1000);
2663}
2664
feb990d4
MM
2665static u32 vxge_fix_features(struct net_device *dev, u32 features)
2666{
2667 u32 changed = dev->features ^ features;
2668
2669 /* Enabling RTH requires some of the logic in vxge_device_register and a
2670 * vpath reset. Due to these restrictions, only allow modification
2671 * while the interface is down.
2672 */
2673 if ((changed & NETIF_F_RXHASH) && netif_running(dev))
2674 features ^= NETIF_F_RXHASH;
2675
2676 return features;
2677}
2678
2679static int vxge_set_features(struct net_device *dev, u32 features)
2680{
2681 struct vxgedev *vdev = netdev_priv(dev);
2682 u32 changed = dev->features ^ features;
2683
2684 if (!(changed & NETIF_F_RXHASH))
2685 return 0;
2686
2687 /* !netif_running() ensured by vxge_fix_features() */
2688
2689 vdev->devh->config.rth_en = !!(features & NETIF_F_RXHASH);
2690 if (vxge_reset_all_vpaths(vdev) != VXGE_HW_OK) {
2691 dev->features = features ^ NETIF_F_RXHASH;
2692 vdev->devh->config.rth_en = !!(dev->features & NETIF_F_RXHASH);
2693 return -EIO;
2694 }
2695
2696 return 0;
2697}
2698
703da5a1
RV
2699/**
2700 * vxge_open
2701 * @dev: pointer to the device structure.
2702 *
2703 * This function is the open entry point of the driver. It mainly calls a
2704 * function to allocate Rx buffers and inserts them into the buffer
2705 * descriptors and then enables the Rx part of the NIC.
2706 * Return value: '0' on success and an appropriate (-)ve integer as
2707 * defined in errno.h file on failure.
2708 */
528f7272 2709static int vxge_open(struct net_device *dev)
703da5a1
RV
2710{
2711 enum vxge_hw_status status;
2712 struct vxgedev *vdev;
2713 struct __vxge_hw_device *hldev;
7adf7d1b 2714 struct vxge_vpath *vpath;
703da5a1
RV
2715 int ret = 0;
2716 int i;
2717 u64 val64, function_mode;
528f7272 2718
703da5a1
RV
2719 vxge_debug_entryexit(VXGE_TRACE,
2720 "%s: %s:%d", dev->name, __func__, __LINE__);
2721
5f54cebb 2722 vdev = netdev_priv(dev);
d8ee7071 2723 hldev = pci_get_drvdata(vdev->pdev);
703da5a1
RV
2724 function_mode = vdev->config.device_hw_info.function_mode;
2725
2726 /* make sure you have link off by default every time Nic is
2727 * initialized */
2728 netif_carrier_off(dev);
2729
703da5a1
RV
2730 /* Open VPATHs */
2731 status = vxge_open_vpaths(vdev);
2732 if (status != VXGE_HW_OK) {
2733 vxge_debug_init(VXGE_ERR,
2734 "%s: fatal: Vpath open failed", vdev->ndev->name);
2735 ret = -EPERM;
2736 goto out0;
2737 }
2738
2739 vdev->mtu = dev->mtu;
2740
2741 status = vxge_add_isr(vdev);
2742 if (status != VXGE_HW_OK) {
2743 vxge_debug_init(VXGE_ERR,
2744 "%s: fatal: ISR add failed", dev->name);
2745 ret = -EPERM;
2746 goto out1;
2747 }
2748
703da5a1
RV
2749 if (vdev->config.intr_type != MSI_X) {
2750 netif_napi_add(dev, &vdev->napi, vxge_poll_inta,
2751 vdev->config.napi_weight);
2752 napi_enable(&vdev->napi);
7adf7d1b
JM
2753 for (i = 0; i < vdev->no_of_vpath; i++) {
2754 vpath = &vdev->vpaths[i];
2755 vpath->ring.napi_p = &vdev->napi;
2756 }
703da5a1
RV
2757 } else {
2758 for (i = 0; i < vdev->no_of_vpath; i++) {
7adf7d1b
JM
2759 vpath = &vdev->vpaths[i];
2760 netif_napi_add(dev, &vpath->ring.napi,
703da5a1 2761 vxge_poll_msix, vdev->config.napi_weight);
7adf7d1b
JM
2762 napi_enable(&vpath->ring.napi);
2763 vpath->ring.napi_p = &vpath->ring.napi;
703da5a1
RV
2764 }
2765 }
2766
2767 /* configure RTH */
2768 if (vdev->config.rth_steering) {
2769 status = vxge_rth_configure(vdev);
2770 if (status != VXGE_HW_OK) {
2771 vxge_debug_init(VXGE_ERR,
2772 "%s: fatal: RTH configuration failed",
2773 dev->name);
2774 ret = -EPERM;
2775 goto out2;
2776 }
2777 }
47f01db4
JM
2778 printk(KERN_INFO "%s: Receive Hashing Offload %s\n", dev->name,
2779 hldev->config.rth_en ? "enabled" : "disabled");
703da5a1
RV
2780
2781 for (i = 0; i < vdev->no_of_vpath; i++) {
7adf7d1b
JM
2782 vpath = &vdev->vpaths[i];
2783
703da5a1 2784 /* set initial mtu before enabling the device */
7adf7d1b 2785 status = vxge_hw_vpath_mtu_set(vpath->handle, vdev->mtu);
703da5a1
RV
2786 if (status != VXGE_HW_OK) {
2787 vxge_debug_init(VXGE_ERR,
2788 "%s: fatal: can not set new MTU", dev->name);
2789 ret = -EPERM;
2790 goto out2;
2791 }
2792 }
2793
2794 VXGE_DEVICE_DEBUG_LEVEL_SET(VXGE_TRACE, VXGE_COMPONENT_LL, vdev);
2795 vxge_debug_init(vdev->level_trace,
2796 "%s: MTU is %d", vdev->ndev->name, vdev->mtu);
2797 VXGE_DEVICE_DEBUG_LEVEL_SET(VXGE_ERR, VXGE_COMPONENT_LL, vdev);
2798
7adf7d1b
JM
2799 /* Restore the DA, VID table and also multicast and promiscuous mode
2800 * states
2801 */
2802 if (vdev->all_multi_flg) {
2803 for (i = 0; i < vdev->no_of_vpath; i++) {
2804 vpath = &vdev->vpaths[i];
2805 vxge_restore_vpath_mac_addr(vpath);
2806 vxge_restore_vpath_vid_table(vpath);
2807
2808 status = vxge_hw_vpath_mcast_enable(vpath->handle);
2809 if (status != VXGE_HW_OK)
2810 vxge_debug_init(VXGE_ERR,
2811 "%s:%d Enabling multicast failed",
2812 __func__, __LINE__);
2813 }
703da5a1
RV
2814 }
2815
2816 /* Enable vpath to sniff all unicast/multicast traffic that not
25985edc 2817 * addressed to them. We allow promiscuous mode for PF only
703da5a1
RV
2818 */
2819
2820 val64 = 0;
2821 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++)
2822 val64 |= VXGE_HW_RXMAC_AUTHORIZE_ALL_ADDR_VP(i);
2823
2824 vxge_hw_mgmt_reg_write(vdev->devh,
2825 vxge_hw_mgmt_reg_type_mrpcim,
2826 0,
2827 (ulong)offsetof(struct vxge_hw_mrpcim_reg,
2828 rxmac_authorize_all_addr),
2829 val64);
2830
2831 vxge_hw_mgmt_reg_write(vdev->devh,
2832 vxge_hw_mgmt_reg_type_mrpcim,
2833 0,
2834 (ulong)offsetof(struct vxge_hw_mrpcim_reg,
2835 rxmac_authorize_all_vid),
2836 val64);
2837
2838 vxge_set_multicast(dev);
2839
2840 /* Enabling Bcast and mcast for all vpath */
2841 for (i = 0; i < vdev->no_of_vpath; i++) {
7adf7d1b
JM
2842 vpath = &vdev->vpaths[i];
2843 status = vxge_hw_vpath_bcast_enable(vpath->handle);
703da5a1
RV
2844 if (status != VXGE_HW_OK)
2845 vxge_debug_init(VXGE_ERR,
2846 "%s : Can not enable bcast for vpath "
2847 "id %d", dev->name, i);
2848 if (vdev->config.addr_learn_en) {
7adf7d1b 2849 status = vxge_hw_vpath_mcast_enable(vpath->handle);
703da5a1
RV
2850 if (status != VXGE_HW_OK)
2851 vxge_debug_init(VXGE_ERR,
2852 "%s : Can not enable mcast for vpath "
2853 "id %d", dev->name, i);
2854 }
2855 }
2856
2857 vxge_hw_device_setpause_data(vdev->devh, 0,
2858 vdev->config.tx_pause_enable,
2859 vdev->config.rx_pause_enable);
2860
2861 if (vdev->vp_reset_timer.function == NULL)
2862 vxge_os_timer(vdev->vp_reset_timer,
2863 vxge_poll_vp_reset, vdev, (HZ/2));
2864
e7935c96
JM
2865 /* There is no need to check for RxD leak and RxD lookup on Titan1A */
2866 if (vdev->titan1 && vdev->vp_lockup_timer.function == NULL)
2867 vxge_os_timer(vdev->vp_lockup_timer, vxge_poll_vp_lockup, vdev,
2868 HZ / 2);
703da5a1
RV
2869
2870 set_bit(__VXGE_STATE_CARD_UP, &vdev->state);
2871
2872 smp_wmb();
2873
2874 if (vxge_hw_device_link_state_get(vdev->devh) == VXGE_HW_LINK_UP) {
2875 netif_carrier_on(vdev->ndev);
75f5e1c6 2876 netdev_notice(vdev->ndev, "Link Up\n");
703da5a1
RV
2877 vdev->stats.link_up++;
2878 }
2879
2880 vxge_hw_device_intr_enable(vdev->devh);
2881
2882 smp_wmb();
2883
2884 for (i = 0; i < vdev->no_of_vpath; i++) {
7adf7d1b
JM
2885 vpath = &vdev->vpaths[i];
2886
2887 vxge_hw_vpath_enable(vpath->handle);
703da5a1 2888 smp_wmb();
7adf7d1b 2889 vxge_hw_vpath_rx_doorbell_init(vpath->handle);
703da5a1
RV
2890 }
2891
d03848e0 2892 netif_tx_start_all_queues(vdev->ndev);
16fded7d
JM
2893
2894 /* configure CI */
2895 vxge_config_ci_for_tti_rti(vdev);
2896
703da5a1
RV
2897 goto out0;
2898
2899out2:
2900 vxge_rem_isr(vdev);
2901
2902 /* Disable napi */
2903 if (vdev->config.intr_type != MSI_X)
2904 napi_disable(&vdev->napi);
2905 else {
2906 for (i = 0; i < vdev->no_of_vpath; i++)
2907 napi_disable(&vdev->vpaths[i].ring.napi);
2908 }
2909
2910out1:
2911 vxge_close_vpaths(vdev, 0);
2912out0:
2913 vxge_debug_entryexit(VXGE_TRACE,
2914 "%s: %s:%d Exiting...",
2915 dev->name, __func__, __LINE__);
2916 return ret;
2917}
2918
25985edc 2919/* Loop through the mac address list and delete all the entries */
42821a5b 2920static void vxge_free_mac_add_list(struct vxge_vpath *vpath)
703da5a1
RV
2921{
2922
2923 struct list_head *entry, *next;
2924 if (list_empty(&vpath->mac_addr_list))
2925 return;
2926
2927 list_for_each_safe(entry, next, &vpath->mac_addr_list) {
2928 list_del(entry);
2929 kfree((struct vxge_mac_addrs *)entry);
2930 }
2931}
2932
2933static void vxge_napi_del_all(struct vxgedev *vdev)
2934{
2935 int i;
2936 if (vdev->config.intr_type != MSI_X)
2937 netif_napi_del(&vdev->napi);
2938 else {
2939 for (i = 0; i < vdev->no_of_vpath; i++)
2940 netif_napi_del(&vdev->vpaths[i].ring.napi);
2941 }
703da5a1
RV
2942}
2943
42821a5b 2944static int do_vxge_close(struct net_device *dev, int do_io)
703da5a1
RV
2945{
2946 enum vxge_hw_status status;
2947 struct vxgedev *vdev;
2948 struct __vxge_hw_device *hldev;
2949 int i;
2950 u64 val64, vpath_vector;
2951 vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d",
2952 dev->name, __func__, __LINE__);
2953
5f54cebb 2954 vdev = netdev_priv(dev);
d8ee7071 2955 hldev = pci_get_drvdata(vdev->pdev);
703da5a1 2956
bd9ee680
SH
2957 if (unlikely(!is_vxge_card_up(vdev)))
2958 return 0;
2959
703da5a1
RV
2960 /* If vxge_handle_crit_err task is executing,
2961 * wait till it completes. */
2962 while (test_and_set_bit(__VXGE_STATE_RESET_CARD, &vdev->state))
2963 msleep(50);
2964
703da5a1
RV
2965 if (do_io) {
2966 /* Put the vpath back in normal mode */
2967 vpath_vector = vxge_mBIT(vdev->vpaths[0].device_id);
2968 status = vxge_hw_mgmt_reg_read(vdev->devh,
2969 vxge_hw_mgmt_reg_type_mrpcim,
2970 0,
2971 (ulong)offsetof(
2972 struct vxge_hw_mrpcim_reg,
2973 rts_mgr_cbasin_cfg),
2974 &val64);
703da5a1
RV
2975 if (status == VXGE_HW_OK) {
2976 val64 &= ~vpath_vector;
2977 status = vxge_hw_mgmt_reg_write(vdev->devh,
2978 vxge_hw_mgmt_reg_type_mrpcim,
2979 0,
2980 (ulong)offsetof(
2981 struct vxge_hw_mrpcim_reg,
2982 rts_mgr_cbasin_cfg),
2983 val64);
2984 }
2985
25985edc 2986 /* Remove the function 0 from promiscuous mode */
703da5a1
RV
2987 vxge_hw_mgmt_reg_write(vdev->devh,
2988 vxge_hw_mgmt_reg_type_mrpcim,
2989 0,
2990 (ulong)offsetof(struct vxge_hw_mrpcim_reg,
2991 rxmac_authorize_all_addr),
2992 0);
2993
2994 vxge_hw_mgmt_reg_write(vdev->devh,
2995 vxge_hw_mgmt_reg_type_mrpcim,
2996 0,
2997 (ulong)offsetof(struct vxge_hw_mrpcim_reg,
2998 rxmac_authorize_all_vid),
2999 0);
3000
3001 smp_wmb();
3002 }
e7935c96
JM
3003
3004 if (vdev->titan1)
3005 del_timer_sync(&vdev->vp_lockup_timer);
703da5a1
RV
3006
3007 del_timer_sync(&vdev->vp_reset_timer);
3008
4d2a5b40
JM
3009 if (do_io)
3010 vxge_hw_device_wait_receive_idle(hldev);
3011
3012 clear_bit(__VXGE_STATE_CARD_UP, &vdev->state);
3013
703da5a1
RV
3014 /* Disable napi */
3015 if (vdev->config.intr_type != MSI_X)
3016 napi_disable(&vdev->napi);
3017 else {
3018 for (i = 0; i < vdev->no_of_vpath; i++)
3019 napi_disable(&vdev->vpaths[i].ring.napi);
3020 }
3021
3022 netif_carrier_off(vdev->ndev);
75f5e1c6 3023 netdev_notice(vdev->ndev, "Link Down\n");
d03848e0 3024 netif_tx_stop_all_queues(vdev->ndev);
703da5a1
RV
3025
3026 /* Note that at this point xmit() is stopped by upper layer */
3027 if (do_io)
3028 vxge_hw_device_intr_disable(vdev->devh);
3029
703da5a1
RV
3030 vxge_rem_isr(vdev);
3031
3032 vxge_napi_del_all(vdev);
3033
3034 if (do_io)
3035 vxge_reset_all_vpaths(vdev);
3036
3037 vxge_close_vpaths(vdev, 0);
3038
3039 vxge_debug_entryexit(VXGE_TRACE,
3040 "%s: %s:%d Exiting...", dev->name, __func__, __LINE__);
3041
703da5a1
RV
3042 clear_bit(__VXGE_STATE_RESET_CARD, &vdev->state);
3043
3044 return 0;
3045}
3046
3047/**
3048 * vxge_close
3049 * @dev: device pointer.
3050 *
3051 * This is the stop entry point of the driver. It needs to undo exactly
3052 * whatever was done by the open entry point, thus it's usually referred to
3053 * as the close function.Among other things this function mainly stops the
3054 * Rx side of the NIC and frees all the Rx buffers in the Rx rings.
3055 * Return value: '0' on success and an appropriate (-)ve integer as
3056 * defined in errno.h file on failure.
3057 */
528f7272 3058static int vxge_close(struct net_device *dev)
703da5a1
RV
3059{
3060 do_vxge_close(dev, 1);
3061 return 0;
3062}
3063
3064/**
3065 * vxge_change_mtu
3066 * @dev: net device pointer.
3067 * @new_mtu :the new MTU size for the device.
3068 *
3069 * A driver entry point to change MTU size for the device. Before changing
3070 * the MTU the device must be stopped.
3071 */
3072static int vxge_change_mtu(struct net_device *dev, int new_mtu)
3073{
3074 struct vxgedev *vdev = netdev_priv(dev);
3075
3076 vxge_debug_entryexit(vdev->level_trace,
3077 "%s:%d", __func__, __LINE__);
3078 if ((new_mtu < VXGE_HW_MIN_MTU) || (new_mtu > VXGE_HW_MAX_MTU)) {
3079 vxge_debug_init(vdev->level_err,
3080 "%s: mtu size is invalid", dev->name);
3081 return -EPERM;
3082 }
3083
3084 /* check if device is down already */
3085 if (unlikely(!is_vxge_card_up(vdev))) {
3086 /* just store new value, will use later on open() */
3087 dev->mtu = new_mtu;
3088 vxge_debug_init(vdev->level_err,
3089 "%s", "device is down on MTU change");
3090 return 0;
3091 }
3092
3093 vxge_debug_init(vdev->level_trace,
3094 "trying to apply new MTU %d", new_mtu);
3095
3096 if (vxge_close(dev))
3097 return -EIO;
3098
3099 dev->mtu = new_mtu;
3100 vdev->mtu = new_mtu;
3101
3102 if (vxge_open(dev))
3103 return -EIO;
3104
3105 vxge_debug_init(vdev->level_trace,
3106 "%s: MTU changed to %d", vdev->ndev->name, new_mtu);
3107
3108 vxge_debug_entryexit(vdev->level_trace,
3109 "%s:%d Exiting...", __func__, __LINE__);
3110
3111 return 0;
3112}
3113
3114/**
dd57f970 3115 * vxge_get_stats64
703da5a1 3116 * @dev: pointer to the device structure
dd57f970 3117 * @stats: pointer to struct rtnl_link_stats64
703da5a1 3118 *
703da5a1 3119 */
dd57f970
ED
3120static struct rtnl_link_stats64 *
3121vxge_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *net_stats)
703da5a1 3122{
dd57f970 3123 struct vxgedev *vdev = netdev_priv(dev);
703da5a1
RV
3124 int k;
3125
dd57f970 3126 /* net_stats already zeroed by caller */
703da5a1
RV
3127 for (k = 0; k < vdev->no_of_vpath; k++) {
3128 net_stats->rx_packets += vdev->vpaths[k].ring.stats.rx_frms;
3129 net_stats->rx_bytes += vdev->vpaths[k].ring.stats.rx_bytes;
3130 net_stats->rx_errors += vdev->vpaths[k].ring.stats.rx_errors;
3131 net_stats->multicast += vdev->vpaths[k].ring.stats.rx_mcast;
528f7272 3132 net_stats->rx_dropped += vdev->vpaths[k].ring.stats.rx_dropped;
703da5a1
RV
3133 net_stats->tx_packets += vdev->vpaths[k].fifo.stats.tx_frms;
3134 net_stats->tx_bytes += vdev->vpaths[k].fifo.stats.tx_bytes;
3135 net_stats->tx_errors += vdev->vpaths[k].fifo.stats.tx_errors;
3136 }
3137
3138 return net_stats;
3139}
3140
cd883a79 3141static enum vxge_hw_status vxge_timestamp_config(struct __vxge_hw_device *devh)
b81b3733
JM
3142{
3143 enum vxge_hw_status status;
3144 u64 val64;
3145
3146 /* Timestamp is passed to the driver via the FCS, therefore we
3147 * must disable the FCS stripping by the adapter. Since this is
3148 * required for the driver to load (due to a hardware bug),
3149 * there is no need to do anything special here.
3150 */
cd883a79
JM
3151 val64 = VXGE_HW_XMAC_TIMESTAMP_EN |
3152 VXGE_HW_XMAC_TIMESTAMP_USE_LINK_ID(0) |
3153 VXGE_HW_XMAC_TIMESTAMP_INTERVAL(0);
b81b3733 3154
cd883a79 3155 status = vxge_hw_mgmt_reg_write(devh,
b81b3733
JM
3156 vxge_hw_mgmt_reg_type_mrpcim,
3157 0,
3158 offsetof(struct vxge_hw_mrpcim_reg,
3159 xmac_timestamp),
3160 val64);
cd883a79
JM
3161 vxge_hw_device_flush_io(devh);
3162 devh->config.hwts_en = VXGE_HW_HWTS_ENABLE;
b81b3733
JM
3163 return status;
3164}
3165
3166static int vxge_hwtstamp_ioctl(struct vxgedev *vdev, void __user *data)
3167{
3168 struct hwtstamp_config config;
b81b3733
JM
3169 int i;
3170
3171 if (copy_from_user(&config, data, sizeof(config)))
3172 return -EFAULT;
3173
3174 /* reserved for future extensions */
3175 if (config.flags)
3176 return -EINVAL;
3177
3178 /* Transmit HW Timestamp not supported */
3179 switch (config.tx_type) {
3180 case HWTSTAMP_TX_OFF:
3181 break;
3182 case HWTSTAMP_TX_ON:
3183 default:
3184 return -ERANGE;
3185 }
3186
3187 switch (config.rx_filter) {
3188 case HWTSTAMP_FILTER_NONE:
b81b3733
JM
3189 vdev->rx_hwts = 0;
3190 config.rx_filter = HWTSTAMP_FILTER_NONE;
3191 break;
3192
3193 case HWTSTAMP_FILTER_ALL:
3194 case HWTSTAMP_FILTER_SOME:
3195 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
3196 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
3197 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
3198 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
3199 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
3200 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
3201 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
3202 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
3203 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
3204 case HWTSTAMP_FILTER_PTP_V2_EVENT:
3205 case HWTSTAMP_FILTER_PTP_V2_SYNC:
3206 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
cd883a79 3207 if (vdev->devh->config.hwts_en != VXGE_HW_HWTS_ENABLE)
b81b3733
JM
3208 return -EFAULT;
3209
3210 vdev->rx_hwts = 1;
3211 config.rx_filter = HWTSTAMP_FILTER_ALL;
3212 break;
3213
3214 default:
3215 return -ERANGE;
3216 }
3217
3218 for (i = 0; i < vdev->no_of_vpath; i++)
3219 vdev->vpaths[i].ring.rx_hwts = vdev->rx_hwts;
3220
3221 if (copy_to_user(data, &config, sizeof(config)))
3222 return -EFAULT;
3223
3224 return 0;
3225}
3226
703da5a1
RV
3227/**
3228 * vxge_ioctl
3229 * @dev: Device pointer.
3230 * @ifr: An IOCTL specific structure, that can contain a pointer to
3231 * a proprietary structure used to pass information to the driver.
3232 * @cmd: This is used to distinguish between the different commands that
3233 * can be passed to the IOCTL functions.
3234 *
3235 * Entry point for the Ioctl.
3236 */
3237static int vxge_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
3238{
b81b3733
JM
3239 struct vxgedev *vdev = netdev_priv(dev);
3240 int ret;
3241
3242 switch (cmd) {
3243 case SIOCSHWTSTAMP:
3244 ret = vxge_hwtstamp_ioctl(vdev, rq->ifr_data);
3245 if (ret)
3246 return ret;
3247 break;
3248 default:
3249 return -EOPNOTSUPP;
3250 }
3251
3252 return 0;
703da5a1
RV
3253}
3254
3255/**
3256 * vxge_tx_watchdog
3257 * @dev: pointer to net device structure
3258 *
3259 * Watchdog for transmit side.
3260 * This function is triggered if the Tx Queue is stopped
3261 * for a pre-defined amount of time when the Interface is still up.
3262 */
2e41f644 3263static void vxge_tx_watchdog(struct net_device *dev)
703da5a1
RV
3264{
3265 struct vxgedev *vdev;
3266
3267 vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__);
3268
5f54cebb 3269 vdev = netdev_priv(dev);
703da5a1
RV
3270
3271 vdev->cric_err_event = VXGE_HW_EVENT_RESET_START;
3272
2e41f644 3273 schedule_work(&vdev->reset_task);
703da5a1
RV
3274 vxge_debug_entryexit(VXGE_TRACE,
3275 "%s:%d Exiting...", __func__, __LINE__);
3276}
3277
3278/**
3279 * vxge_vlan_rx_register
3280 * @dev: net device pointer.
3281 * @grp: vlan group
3282 *
3283 * Vlan group registration
3284 */
3285static void
3286vxge_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
3287{
3288 struct vxgedev *vdev;
3289 struct vxge_vpath *vpath;
3290 int vp;
3291 u64 vid;
3292 enum vxge_hw_status status;
3293 int i;
3294
3295 vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__);
3296
5f54cebb 3297 vdev = netdev_priv(dev);
703da5a1
RV
3298
3299 vpath = &vdev->vpaths[0];
3300 if ((NULL == grp) && (vpath->is_open)) {
3301 /* Get the first vlan */
3302 status = vxge_hw_vpath_vid_get(vpath->handle, &vid);
3303
3304 while (status == VXGE_HW_OK) {
3305
3306 /* Delete this vlan from the vid table */
3307 for (vp = 0; vp < vdev->no_of_vpath; vp++) {
3308 vpath = &vdev->vpaths[vp];
3309 if (!vpath->is_open)
3310 continue;
3311
3312 vxge_hw_vpath_vid_delete(vpath->handle, vid);
3313 }
3314
3315 /* Get the next vlan to be deleted */
3316 vpath = &vdev->vpaths[0];
3317 status = vxge_hw_vpath_vid_get(vpath->handle, &vid);
3318 }
3319 }
3320
3321 vdev->vlgrp = grp;
3322
3323 for (i = 0; i < vdev->no_of_vpath; i++) {
3324 if (vdev->vpaths[i].is_configured)
3325 vdev->vpaths[i].ring.vlgrp = grp;
3326 }
3327
3328 vxge_debug_entryexit(VXGE_TRACE,
3329 "%s:%d Exiting...", __func__, __LINE__);
3330}
3331
3332/**
3333 * vxge_vlan_rx_add_vid
3334 * @dev: net device pointer.
3335 * @vid: vid
3336 *
3337 * Add the vlan id to the devices vlan id table
3338 */
3339static void
3340vxge_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
3341{
3342 struct vxgedev *vdev;
3343 struct vxge_vpath *vpath;
3344 int vp_id;
3345
5f54cebb 3346 vdev = netdev_priv(dev);
703da5a1
RV
3347
3348 /* Add these vlan to the vid table */
3349 for (vp_id = 0; vp_id < vdev->no_of_vpath; vp_id++) {
3350 vpath = &vdev->vpaths[vp_id];
3351 if (!vpath->is_open)
3352 continue;
3353 vxge_hw_vpath_vid_add(vpath->handle, vid);
3354 }
3355}
3356
3357/**
3358 * vxge_vlan_rx_add_vid
3359 * @dev: net device pointer.
3360 * @vid: vid
3361 *
3362 * Remove the vlan id from the device's vlan id table
3363 */
3364static void
3365vxge_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
3366{
3367 struct vxgedev *vdev;
3368 struct vxge_vpath *vpath;
3369 int vp_id;
3370
3371 vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__);
3372
5f54cebb 3373 vdev = netdev_priv(dev);
703da5a1
RV
3374
3375 vlan_group_set_device(vdev->vlgrp, vid, NULL);
3376
3377 /* Delete this vlan from the vid table */
3378 for (vp_id = 0; vp_id < vdev->no_of_vpath; vp_id++) {
3379 vpath = &vdev->vpaths[vp_id];
3380 if (!vpath->is_open)
3381 continue;
3382 vxge_hw_vpath_vid_delete(vpath->handle, vid);
3383 }
3384 vxge_debug_entryexit(VXGE_TRACE,
3385 "%s:%d Exiting...", __func__, __LINE__);
3386}
3387
3388static const struct net_device_ops vxge_netdev_ops = {
3389 .ndo_open = vxge_open,
3390 .ndo_stop = vxge_close,
dd57f970 3391 .ndo_get_stats64 = vxge_get_stats64,
703da5a1
RV
3392 .ndo_start_xmit = vxge_xmit,
3393 .ndo_validate_addr = eth_validate_addr,
3394 .ndo_set_multicast_list = vxge_set_multicast,
703da5a1 3395 .ndo_do_ioctl = vxge_ioctl,
703da5a1
RV
3396 .ndo_set_mac_address = vxge_set_mac_addr,
3397 .ndo_change_mtu = vxge_change_mtu,
feb990d4
MM
3398 .ndo_fix_features = vxge_fix_features,
3399 .ndo_set_features = vxge_set_features,
703da5a1
RV
3400 .ndo_vlan_rx_register = vxge_vlan_rx_register,
3401 .ndo_vlan_rx_kill_vid = vxge_vlan_rx_kill_vid,
3402 .ndo_vlan_rx_add_vid = vxge_vlan_rx_add_vid,
703da5a1
RV
3403 .ndo_tx_timeout = vxge_tx_watchdog,
3404#ifdef CONFIG_NET_POLL_CONTROLLER
3405 .ndo_poll_controller = vxge_netpoll,
3406#endif
3407};
3408
42821a5b 3409static int __devinit vxge_device_register(struct __vxge_hw_device *hldev,
3410 struct vxge_config *config,
3411 int high_dma, int no_of_vpath,
3412 struct vxgedev **vdev_out)
703da5a1
RV
3413{
3414 struct net_device *ndev;
3415 enum vxge_hw_status status = VXGE_HW_OK;
3416 struct vxgedev *vdev;
98f45da2 3417 int ret = 0, no_of_queue = 1;
703da5a1
RV
3418 u64 stat;
3419
3420 *vdev_out = NULL;
d03848e0 3421 if (config->tx_steering_type)
703da5a1
RV
3422 no_of_queue = no_of_vpath;
3423
3424 ndev = alloc_etherdev_mq(sizeof(struct vxgedev),
3425 no_of_queue);
3426 if (ndev == NULL) {
3427 vxge_debug_init(
3428 vxge_hw_device_trace_level_get(hldev),
3429 "%s : device allocation failed", __func__);
3430 ret = -ENODEV;
3431 goto _out0;
3432 }
3433
3434 vxge_debug_entryexit(
3435 vxge_hw_device_trace_level_get(hldev),
3436 "%s: %s:%d Entering...",
3437 ndev->name, __func__, __LINE__);
3438
3439 vdev = netdev_priv(ndev);
3440 memset(vdev, 0, sizeof(struct vxgedev));
3441
3442 vdev->ndev = ndev;
3443 vdev->devh = hldev;
3444 vdev->pdev = hldev->pdev;
3445 memcpy(&vdev->config, config, sizeof(struct vxge_config));
b81b3733 3446 vdev->rx_hwts = 0;
ff938e43 3447 vdev->titan1 = (vdev->pdev->revision == VXGE_HW_TITAN1_PCI_REVISION);
e7935c96 3448
703da5a1
RV
3449 SET_NETDEV_DEV(ndev, &vdev->pdev->dev);
3450
feb990d4
MM
3451 ndev->hw_features = NETIF_F_RXCSUM | NETIF_F_SG |
3452 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
3453 NETIF_F_TSO | NETIF_F_TSO6 |
3454 NETIF_F_HW_VLAN_TX;
3455 if (vdev->config.rth_steering != NO_STEERING)
3456 ndev->hw_features |= NETIF_F_RXHASH;
3457
3458 ndev->features |= ndev->hw_features |
3459 NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
3460
703da5a1
RV
3461 /* Driver entry points */
3462 ndev->irq = vdev->pdev->irq;
3463 ndev->base_addr = (unsigned long) hldev->bar0;
3464
3465 ndev->netdev_ops = &vxge_netdev_ops;
3466
3467 ndev->watchdog_timeo = VXGE_LL_WATCH_DOG_TIMEOUT;
2e41f644 3468 INIT_WORK(&vdev->reset_task, vxge_reset);
703da5a1 3469
42821a5b 3470 vxge_initialize_ethtool_ops(ndev);
703da5a1
RV
3471
3472 /* Allocate memory for vpath */
3473 vdev->vpaths = kzalloc((sizeof(struct vxge_vpath)) *
3474 no_of_vpath, GFP_KERNEL);
3475 if (!vdev->vpaths) {
3476 vxge_debug_init(VXGE_ERR,
3477 "%s: vpath memory allocation failed",
3478 vdev->ndev->name);
6cca2003 3479 ret = -ENOMEM;
703da5a1
RV
3480 goto _out1;
3481 }
3482
703da5a1
RV
3483 vxge_debug_init(vxge_hw_device_trace_level_get(hldev),
3484 "%s : checksuming enabled", __func__);
3485
3486 if (high_dma) {
3487 ndev->features |= NETIF_F_HIGHDMA;
3488 vxge_debug_init(vxge_hw_device_trace_level_get(hldev),
3489 "%s : using High DMA", __func__);
3490 }
3491
6cca2003
JM
3492 ret = register_netdev(ndev);
3493 if (ret) {
703da5a1
RV
3494 vxge_debug_init(vxge_hw_device_trace_level_get(hldev),
3495 "%s: %s : device registration failed!",
3496 ndev->name, __func__);
703da5a1
RV
3497 goto _out2;
3498 }
3499
3500 /* Set the factory defined MAC address initially */
3501 ndev->addr_len = ETH_ALEN;
3502
3503 /* Make Link state as off at this point, when the Link change
3504 * interrupt comes the state will be automatically changed to
3505 * the right state.
3506 */
3507 netif_carrier_off(ndev);
3508
3509 vxge_debug_init(vxge_hw_device_trace_level_get(hldev),
3510 "%s: Ethernet device registered",
3511 ndev->name);
3512
e8ac1756 3513 hldev->ndev = ndev;
703da5a1
RV
3514 *vdev_out = vdev;
3515
3516 /* Resetting the Device stats */
3517 status = vxge_hw_mrpcim_stats_access(
3518 hldev,
3519 VXGE_HW_STATS_OP_CLEAR_ALL_STATS,
3520 0,
3521 0,
3522 &stat);
3523
3524 if (status == VXGE_HW_ERR_PRIVILAGED_OPEARATION)
3525 vxge_debug_init(
3526 vxge_hw_device_trace_level_get(hldev),
3527 "%s: device stats clear returns"
3528 "VXGE_HW_ERR_PRIVILAGED_OPEARATION", ndev->name);
3529
3530 vxge_debug_entryexit(vxge_hw_device_trace_level_get(hldev),
3531 "%s: %s:%d Exiting...",
3532 ndev->name, __func__, __LINE__);
3533
3534 return ret;
3535_out2:
3536 kfree(vdev->vpaths);
3537_out1:
3538 free_netdev(ndev);
3539_out0:
3540 return ret;
3541}
3542
3543/*
3544 * vxge_device_unregister
3545 *
3546 * This function will unregister and free network device
3547 */
2c91308f 3548static void vxge_device_unregister(struct __vxge_hw_device *hldev)
703da5a1
RV
3549{
3550 struct vxgedev *vdev;
3551 struct net_device *dev;
3552 char buf[IFNAMSIZ];
703da5a1
RV
3553
3554 dev = hldev->ndev;
3555 vdev = netdev_priv(dev);
703da5a1 3556
2c91308f
JM
3557 vxge_debug_entryexit(vdev->level_trace, "%s: %s:%d", vdev->ndev->name,
3558 __func__, __LINE__);
3559
ead5d238 3560 strncpy(buf, dev->name, IFNAMSIZ);
703da5a1 3561
ba27d85c
TH
3562 flush_work_sync(&vdev->reset_task);
3563
703da5a1
RV
3564 /* in 2.6 will call stop() if device is up */
3565 unregister_netdev(dev);
3566
6cca2003
JM
3567 kfree(vdev->vpaths);
3568
3569 /* we are safe to free it now */
3570 free_netdev(dev);
3571
2c91308f
JM
3572 vxge_debug_init(vdev->level_trace, "%s: ethernet device unregistered",
3573 buf);
3574 vxge_debug_entryexit(vdev->level_trace, "%s: %s:%d Exiting...", buf,
3575 __func__, __LINE__);
703da5a1
RV
3576}
3577
3578/*
3579 * vxge_callback_crit_err
3580 *
3581 * This function is called by the alarm handler in interrupt context.
3582 * Driver must analyze it based on the event type.
3583 */
3584static void
3585vxge_callback_crit_err(struct __vxge_hw_device *hldev,
3586 enum vxge_hw_event type, u64 vp_id)
3587{
3588 struct net_device *dev = hldev->ndev;
5f54cebb 3589 struct vxgedev *vdev = netdev_priv(dev);
98f45da2 3590 struct vxge_vpath *vpath = NULL;
703da5a1
RV
3591 int vpath_idx;
3592
3593 vxge_debug_entryexit(vdev->level_trace,
3594 "%s: %s:%d", vdev->ndev->name, __func__, __LINE__);
3595
3596 /* Note: This event type should be used for device wide
3597 * indications only - Serious errors, Slot freeze and critical errors
3598 */
3599 vdev->cric_err_event = type;
3600
98f45da2
JM
3601 for (vpath_idx = 0; vpath_idx < vdev->no_of_vpath; vpath_idx++) {
3602 vpath = &vdev->vpaths[vpath_idx];
3603 if (vpath->device_id == vp_id)
703da5a1 3604 break;
98f45da2 3605 }
703da5a1
RV
3606
3607 if (!test_bit(__VXGE_STATE_RESET_CARD, &vdev->state)) {
3608 if (type == VXGE_HW_EVENT_SLOT_FREEZE) {
3609 vxge_debug_init(VXGE_ERR,
3610 "%s: Slot is frozen", vdev->ndev->name);
3611 } else if (type == VXGE_HW_EVENT_SERR) {
3612 vxge_debug_init(VXGE_ERR,
3613 "%s: Encountered Serious Error",
3614 vdev->ndev->name);
3615 } else if (type == VXGE_HW_EVENT_CRITICAL_ERR)
3616 vxge_debug_init(VXGE_ERR,
3617 "%s: Encountered Critical Error",
3618 vdev->ndev->name);
3619 }
3620
3621 if ((type == VXGE_HW_EVENT_SERR) ||
3622 (type == VXGE_HW_EVENT_SLOT_FREEZE)) {
3623 if (unlikely(vdev->exec_mode))
3624 clear_bit(__VXGE_STATE_CARD_UP, &vdev->state);
3625 } else if (type == VXGE_HW_EVENT_CRITICAL_ERR) {
3626 vxge_hw_device_mask_all(hldev);
3627 if (unlikely(vdev->exec_mode))
3628 clear_bit(__VXGE_STATE_CARD_UP, &vdev->state);
3629 } else if ((type == VXGE_HW_EVENT_FIFO_ERR) ||
3630 (type == VXGE_HW_EVENT_VPATH_ERR)) {
3631
3632 if (unlikely(vdev->exec_mode))
3633 clear_bit(__VXGE_STATE_CARD_UP, &vdev->state);
3634 else {
3635 /* check if this vpath is already set for reset */
3636 if (!test_and_set_bit(vpath_idx, &vdev->vp_reset)) {
3637
3638 /* disable interrupts for this vpath */
3639 vxge_vpath_intr_disable(vdev, vpath_idx);
3640
3641 /* stop the queue for this vpath */
98f45da2 3642 netif_tx_stop_queue(vpath->fifo.txq);
703da5a1
RV
3643 }
3644 }
3645 }
3646
3647 vxge_debug_entryexit(vdev->level_trace,
3648 "%s: %s:%d Exiting...",
3649 vdev->ndev->name, __func__, __LINE__);
3650}
3651
3652static void verify_bandwidth(void)
3653{
3654 int i, band_width, total = 0, equal_priority = 0;
3655
3656 /* 1. If user enters 0 for some fifo, give equal priority to all */
3657 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
3658 if (bw_percentage[i] == 0) {
3659 equal_priority = 1;
3660 break;
3661 }
3662 }
3663
3664 if (!equal_priority) {
3665 /* 2. If sum exceeds 100, give equal priority to all */
3666 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
3667 if (bw_percentage[i] == 0xFF)
3668 break;
3669
3670 total += bw_percentage[i];
3671 if (total > VXGE_HW_VPATH_BANDWIDTH_MAX) {
3672 equal_priority = 1;
3673 break;
3674 }
3675 }
3676 }
3677
3678 if (!equal_priority) {
3679 /* Is all the bandwidth consumed? */
3680 if (total < VXGE_HW_VPATH_BANDWIDTH_MAX) {
3681 if (i < VXGE_HW_MAX_VIRTUAL_PATHS) {
3682 /* Split rest of bw equally among next VPs*/
3683 band_width =
3684 (VXGE_HW_VPATH_BANDWIDTH_MAX - total) /
3685 (VXGE_HW_MAX_VIRTUAL_PATHS - i);
3686 if (band_width < 2) /* min of 2% */
3687 equal_priority = 1;
3688 else {
3689 for (; i < VXGE_HW_MAX_VIRTUAL_PATHS;
3690 i++)
3691 bw_percentage[i] =
3692 band_width;
3693 }
3694 }
3695 } else if (i < VXGE_HW_MAX_VIRTUAL_PATHS)
3696 equal_priority = 1;
3697 }
3698
3699 if (equal_priority) {
3700 vxge_debug_init(VXGE_ERR,
3701 "%s: Assigning equal bandwidth to all the vpaths",
3702 VXGE_DRIVER_NAME);
3703 bw_percentage[0] = VXGE_HW_VPATH_BANDWIDTH_MAX /
3704 VXGE_HW_MAX_VIRTUAL_PATHS;
3705 for (i = 1; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++)
3706 bw_percentage[i] = bw_percentage[0];
3707 }
703da5a1
RV
3708}
3709
3710/*
3711 * Vpath configuration
3712 */
3713static int __devinit vxge_config_vpaths(
3714 struct vxge_hw_device_config *device_config,
3715 u64 vpath_mask, struct vxge_config *config_param)
3716{
3717 int i, no_of_vpaths = 0, default_no_vpath = 0, temp;
3718 u32 txdl_size, txdl_per_memblock;
3719
3720 temp = driver_config->vpath_per_dev;
3721 if ((driver_config->vpath_per_dev == VXGE_USE_DEFAULT) &&
3722 (max_config_dev == VXGE_MAX_CONFIG_DEV)) {
3723 /* No more CPU. Return vpath number as zero.*/
3724 if (driver_config->g_no_cpus == -1)
3725 return 0;
3726
3727 if (!driver_config->g_no_cpus)
3728 driver_config->g_no_cpus = num_online_cpus();
3729
3730 driver_config->vpath_per_dev = driver_config->g_no_cpus >> 1;
3731 if (!driver_config->vpath_per_dev)
3732 driver_config->vpath_per_dev = 1;
3733
3734 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++)
3735 if (!vxge_bVALn(vpath_mask, i, 1))
3736 continue;
3737 else
3738 default_no_vpath++;
3739 if (default_no_vpath < driver_config->vpath_per_dev)
3740 driver_config->vpath_per_dev = default_no_vpath;
3741
3742 driver_config->g_no_cpus = driver_config->g_no_cpus -
3743 (driver_config->vpath_per_dev * 2);
3744 if (driver_config->g_no_cpus <= 0)
3745 driver_config->g_no_cpus = -1;
3746 }
3747
3748 if (driver_config->vpath_per_dev == 1) {
3749 vxge_debug_ll_config(VXGE_TRACE,
3750 "%s: Disable tx and rx steering, "
3751 "as single vpath is configured", VXGE_DRIVER_NAME);
3752 config_param->rth_steering = NO_STEERING;
3753 config_param->tx_steering_type = NO_STEERING;
3754 device_config->rth_en = 0;
3755 }
3756
3757 /* configure bandwidth */
3758 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++)
3759 device_config->vp_config[i].min_bandwidth = bw_percentage[i];
3760
3761 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
3762 device_config->vp_config[i].vp_id = i;
3763 device_config->vp_config[i].mtu = VXGE_HW_DEFAULT_MTU;
3764 if (no_of_vpaths < driver_config->vpath_per_dev) {
3765 if (!vxge_bVALn(vpath_mask, i, 1)) {
3766 vxge_debug_ll_config(VXGE_TRACE,
3767 "%s: vpath: %d is not available",
3768 VXGE_DRIVER_NAME, i);
3769 continue;
3770 } else {
3771 vxge_debug_ll_config(VXGE_TRACE,
3772 "%s: vpath: %d available",
3773 VXGE_DRIVER_NAME, i);
3774 no_of_vpaths++;
3775 }
3776 } else {
3777 vxge_debug_ll_config(VXGE_TRACE,
3778 "%s: vpath: %d is not configured, "
3779 "max_config_vpath exceeded",
3780 VXGE_DRIVER_NAME, i);
3781 break;
3782 }
3783
3784 /* Configure Tx fifo's */
3785 device_config->vp_config[i].fifo.enable =
3786 VXGE_HW_FIFO_ENABLE;
3787 device_config->vp_config[i].fifo.max_frags =
5beefb4f 3788 MAX_SKB_FRAGS + 1;
703da5a1
RV
3789 device_config->vp_config[i].fifo.memblock_size =
3790 VXGE_HW_MIN_FIFO_MEMBLOCK_SIZE;
3791
5beefb4f
SH
3792 txdl_size = device_config->vp_config[i].fifo.max_frags *
3793 sizeof(struct vxge_hw_fifo_txd);
703da5a1
RV
3794 txdl_per_memblock = VXGE_HW_MIN_FIFO_MEMBLOCK_SIZE / txdl_size;
3795
3796 device_config->vp_config[i].fifo.fifo_blocks =
3797 ((VXGE_DEF_FIFO_LENGTH - 1) / txdl_per_memblock) + 1;
3798
3799 device_config->vp_config[i].fifo.intr =
3800 VXGE_HW_FIFO_QUEUE_INTR_DISABLE;
3801
3802 /* Configure tti properties */
3803 device_config->vp_config[i].tti.intr_enable =
3804 VXGE_HW_TIM_INTR_ENABLE;
3805
3806 device_config->vp_config[i].tti.btimer_val =
3807 (VXGE_TTI_BTIMER_VAL * 1000) / 272;
3808
3809 device_config->vp_config[i].tti.timer_ac_en =
3810 VXGE_HW_TIM_TIMER_AC_ENABLE;
3811
528f7272
JM
3812 /* For msi-x with napi (each vector has a handler of its own) -
3813 * Set CI to OFF for all vpaths
3814 */
703da5a1
RV
3815 device_config->vp_config[i].tti.timer_ci_en =
3816 VXGE_HW_TIM_TIMER_CI_DISABLE;
3817
3818 device_config->vp_config[i].tti.timer_ri_en =
3819 VXGE_HW_TIM_TIMER_RI_DISABLE;
3820
3821 device_config->vp_config[i].tti.util_sel =
3822 VXGE_HW_TIM_UTIL_SEL_LEGACY_TX_NET_UTIL;
3823
3824 device_config->vp_config[i].tti.ltimer_val =
3825 (VXGE_TTI_LTIMER_VAL * 1000) / 272;
3826
3827 device_config->vp_config[i].tti.rtimer_val =
3828 (VXGE_TTI_RTIMER_VAL * 1000) / 272;
3829
3830 device_config->vp_config[i].tti.urange_a = TTI_TX_URANGE_A;
3831 device_config->vp_config[i].tti.urange_b = TTI_TX_URANGE_B;
3832 device_config->vp_config[i].tti.urange_c = TTI_TX_URANGE_C;
3833 device_config->vp_config[i].tti.uec_a = TTI_TX_UFC_A;
3834 device_config->vp_config[i].tti.uec_b = TTI_TX_UFC_B;
3835 device_config->vp_config[i].tti.uec_c = TTI_TX_UFC_C;
3836 device_config->vp_config[i].tti.uec_d = TTI_TX_UFC_D;
3837
3838 /* Configure Rx rings */
3839 device_config->vp_config[i].ring.enable =
3840 VXGE_HW_RING_ENABLE;
3841
3842 device_config->vp_config[i].ring.ring_blocks =
3843 VXGE_HW_DEF_RING_BLOCKS;
528f7272 3844
703da5a1
RV
3845 device_config->vp_config[i].ring.buffer_mode =
3846 VXGE_HW_RING_RXD_BUFFER_MODE_1;
528f7272 3847
703da5a1
RV
3848 device_config->vp_config[i].ring.rxds_limit =
3849 VXGE_HW_DEF_RING_RXDS_LIMIT;
528f7272 3850
703da5a1
RV
3851 device_config->vp_config[i].ring.scatter_mode =
3852 VXGE_HW_RING_SCATTER_MODE_A;
3853
3854 /* Configure rti properties */
3855 device_config->vp_config[i].rti.intr_enable =
3856 VXGE_HW_TIM_INTR_ENABLE;
3857
3858 device_config->vp_config[i].rti.btimer_val =
3859 (VXGE_RTI_BTIMER_VAL * 1000)/272;
3860
3861 device_config->vp_config[i].rti.timer_ac_en =
3862 VXGE_HW_TIM_TIMER_AC_ENABLE;
3863
3864 device_config->vp_config[i].rti.timer_ci_en =
3865 VXGE_HW_TIM_TIMER_CI_DISABLE;
3866
3867 device_config->vp_config[i].rti.timer_ri_en =
3868 VXGE_HW_TIM_TIMER_RI_DISABLE;
3869
3870 device_config->vp_config[i].rti.util_sel =
3871 VXGE_HW_TIM_UTIL_SEL_LEGACY_RX_NET_UTIL;
3872
3873 device_config->vp_config[i].rti.urange_a =
3874 RTI_RX_URANGE_A;
3875 device_config->vp_config[i].rti.urange_b =
3876 RTI_RX_URANGE_B;
3877 device_config->vp_config[i].rti.urange_c =
3878 RTI_RX_URANGE_C;
3879 device_config->vp_config[i].rti.uec_a = RTI_RX_UFC_A;
3880 device_config->vp_config[i].rti.uec_b = RTI_RX_UFC_B;
3881 device_config->vp_config[i].rti.uec_c = RTI_RX_UFC_C;
3882 device_config->vp_config[i].rti.uec_d = RTI_RX_UFC_D;
3883
3884 device_config->vp_config[i].rti.rtimer_val =
3885 (VXGE_RTI_RTIMER_VAL * 1000) / 272;
3886
3887 device_config->vp_config[i].rti.ltimer_val =
3888 (VXGE_RTI_LTIMER_VAL * 1000) / 272;
3889
3890 device_config->vp_config[i].rpa_strip_vlan_tag =
3891 vlan_tag_strip;
3892 }
3893
3894 driver_config->vpath_per_dev = temp;
3895 return no_of_vpaths;
3896}
3897
3898/* initialize device configuratrions */
3899static void __devinit vxge_device_config_init(
3900 struct vxge_hw_device_config *device_config,
3901 int *intr_type)
3902{
3903 /* Used for CQRQ/SRQ. */
3904 device_config->dma_blockpool_initial =
3905 VXGE_HW_INITIAL_DMA_BLOCK_POOL_SIZE;
3906
3907 device_config->dma_blockpool_max =
3908 VXGE_HW_MAX_DMA_BLOCK_POOL_SIZE;
3909
3910 if (max_mac_vpath > VXGE_MAX_MAC_ADDR_COUNT)
3911 max_mac_vpath = VXGE_MAX_MAC_ADDR_COUNT;
3912
3913#ifndef CONFIG_PCI_MSI
3914 vxge_debug_init(VXGE_ERR,
3915 "%s: This Kernel does not support "
3916 "MSI-X. Defaulting to INTA", VXGE_DRIVER_NAME);
3917 *intr_type = INTA;
3918#endif
3919
3920 /* Configure whether MSI-X or IRQL. */
3921 switch (*intr_type) {
3922 case INTA:
3923 device_config->intr_mode = VXGE_HW_INTR_MODE_IRQLINE;
3924 break;
3925
3926 case MSI_X:
16fded7d 3927 device_config->intr_mode = VXGE_HW_INTR_MODE_MSIX_ONE_SHOT;
703da5a1
RV
3928 break;
3929 }
528f7272 3930
703da5a1
RV
3931 /* Timer period between device poll */
3932 device_config->device_poll_millis = VXGE_TIMER_DELAY;
3933
3934 /* Configure mac based steering. */
3935 device_config->rts_mac_en = addr_learn_en;
3936
3937 /* Configure Vpaths */
3938 device_config->rth_it_type = VXGE_HW_RTH_IT_TYPE_MULTI_IT;
3939
3940 vxge_debug_ll_config(VXGE_TRACE, "%s : Device Config Params ",
3941 __func__);
703da5a1
RV
3942 vxge_debug_ll_config(VXGE_TRACE, "intr_mode : %d",
3943 device_config->intr_mode);
3944 vxge_debug_ll_config(VXGE_TRACE, "device_poll_millis : %d",
3945 device_config->device_poll_millis);
703da5a1
RV
3946 vxge_debug_ll_config(VXGE_TRACE, "rth_en : %d",
3947 device_config->rth_en);
3948 vxge_debug_ll_config(VXGE_TRACE, "rth_it_type : %d",
3949 device_config->rth_it_type);
3950}
3951
3952static void __devinit vxge_print_parm(struct vxgedev *vdev, u64 vpath_mask)
3953{
3954 int i;
3955
3956 vxge_debug_init(VXGE_TRACE,
3957 "%s: %d Vpath(s) opened",
3958 vdev->ndev->name, vdev->no_of_vpath);
3959
3960 switch (vdev->config.intr_type) {
3961 case INTA:
3962 vxge_debug_init(VXGE_TRACE,
3963 "%s: Interrupt type INTA", vdev->ndev->name);
3964 break;
3965
3966 case MSI_X:
3967 vxge_debug_init(VXGE_TRACE,
3968 "%s: Interrupt type MSI-X", vdev->ndev->name);
3969 break;
3970 }
3971
3972 if (vdev->config.rth_steering) {
3973 vxge_debug_init(VXGE_TRACE,
3974 "%s: RTH steering enabled for TCP_IPV4",
3975 vdev->ndev->name);
3976 } else {
3977 vxge_debug_init(VXGE_TRACE,
3978 "%s: RTH steering disabled", vdev->ndev->name);
3979 }
3980
3981 switch (vdev->config.tx_steering_type) {
3982 case NO_STEERING:
3983 vxge_debug_init(VXGE_TRACE,
3984 "%s: Tx steering disabled", vdev->ndev->name);
3985 break;
3986 case TX_PRIORITY_STEERING:
3987 vxge_debug_init(VXGE_TRACE,
3988 "%s: Unsupported tx steering option",
3989 vdev->ndev->name);
3990 vxge_debug_init(VXGE_TRACE,
3991 "%s: Tx steering disabled", vdev->ndev->name);
3992 vdev->config.tx_steering_type = 0;
3993 break;
3994 case TX_VLAN_STEERING:
3995 vxge_debug_init(VXGE_TRACE,
3996 "%s: Unsupported tx steering option",
3997 vdev->ndev->name);
3998 vxge_debug_init(VXGE_TRACE,
3999 "%s: Tx steering disabled", vdev->ndev->name);
4000 vdev->config.tx_steering_type = 0;
4001 break;
4002 case TX_MULTIQ_STEERING:
4003 vxge_debug_init(VXGE_TRACE,
4004 "%s: Tx multiqueue steering enabled",
4005 vdev->ndev->name);
4006 break;
4007 case TX_PORT_STEERING:
4008 vxge_debug_init(VXGE_TRACE,
4009 "%s: Tx port steering enabled",
4010 vdev->ndev->name);
4011 break;
4012 default:
4013 vxge_debug_init(VXGE_ERR,
4014 "%s: Unsupported tx steering type",
4015 vdev->ndev->name);
4016 vxge_debug_init(VXGE_TRACE,
4017 "%s: Tx steering disabled", vdev->ndev->name);
4018 vdev->config.tx_steering_type = 0;
4019 }
4020
703da5a1
RV
4021 if (vdev->config.addr_learn_en)
4022 vxge_debug_init(VXGE_TRACE,
4023 "%s: MAC Address learning enabled", vdev->ndev->name);
4024
703da5a1
RV
4025 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
4026 if (!vxge_bVALn(vpath_mask, i, 1))
4027 continue;
4028 vxge_debug_ll_config(VXGE_TRACE,
4029 "%s: MTU size - %d", vdev->ndev->name,
4030 ((struct __vxge_hw_device *)(vdev->devh))->
4031 config.vp_config[i].mtu);
4032 vxge_debug_init(VXGE_TRACE,
4033 "%s: VLAN tag stripping %s", vdev->ndev->name,
4034 ((struct __vxge_hw_device *)(vdev->devh))->
4035 config.vp_config[i].rpa_strip_vlan_tag
4036 ? "Enabled" : "Disabled");
703da5a1
RV
4037 vxge_debug_ll_config(VXGE_TRACE,
4038 "%s: Max frags : %d", vdev->ndev->name,
4039 ((struct __vxge_hw_device *)(vdev->devh))->
4040 config.vp_config[i].fifo.max_frags);
4041 break;
4042 }
4043}
4044
4045#ifdef CONFIG_PM
4046/**
4047 * vxge_pm_suspend - vxge power management suspend entry point
4048 *
4049 */
4050static int vxge_pm_suspend(struct pci_dev *pdev, pm_message_t state)
4051{
4052 return -ENOSYS;
4053}
4054/**
4055 * vxge_pm_resume - vxge power management resume entry point
4056 *
4057 */
4058static int vxge_pm_resume(struct pci_dev *pdev)
4059{
4060 return -ENOSYS;
4061}
4062
4063#endif
4064
4065/**
4066 * vxge_io_error_detected - called when PCI error is detected
4067 * @pdev: Pointer to PCI device
4068 * @state: The current pci connection state
4069 *
4070 * This function is called after a PCI bus error affecting
4071 * this device has been detected.
4072 */
4073static pci_ers_result_t vxge_io_error_detected(struct pci_dev *pdev,
4074 pci_channel_state_t state)
4075{
d8ee7071 4076 struct __vxge_hw_device *hldev = pci_get_drvdata(pdev);
703da5a1
RV
4077 struct net_device *netdev = hldev->ndev;
4078
4079 netif_device_detach(netdev);
4080
e33b992d
DN
4081 if (state == pci_channel_io_perm_failure)
4082 return PCI_ERS_RESULT_DISCONNECT;
4083
703da5a1
RV
4084 if (netif_running(netdev)) {
4085 /* Bring down the card, while avoiding PCI I/O */
4086 do_vxge_close(netdev, 0);
4087 }
4088
4089 pci_disable_device(pdev);
4090
4091 return PCI_ERS_RESULT_NEED_RESET;
4092}
4093
4094/**
4095 * vxge_io_slot_reset - called after the pci bus has been reset.
4096 * @pdev: Pointer to PCI device
4097 *
4098 * Restart the card from scratch, as if from a cold-boot.
4099 * At this point, the card has exprienced a hard reset,
4100 * followed by fixups by BIOS, and has its config space
4101 * set up identically to what it was at cold boot.
4102 */
4103static pci_ers_result_t vxge_io_slot_reset(struct pci_dev *pdev)
4104{
d8ee7071 4105 struct __vxge_hw_device *hldev = pci_get_drvdata(pdev);
703da5a1
RV
4106 struct net_device *netdev = hldev->ndev;
4107
4108 struct vxgedev *vdev = netdev_priv(netdev);
4109
4110 if (pci_enable_device(pdev)) {
75f5e1c6 4111 netdev_err(netdev, "Cannot re-enable device after reset\n");
703da5a1
RV
4112 return PCI_ERS_RESULT_DISCONNECT;
4113 }
4114
4115 pci_set_master(pdev);
528f7272 4116 do_vxge_reset(vdev, VXGE_LL_FULL_RESET);
703da5a1
RV
4117
4118 return PCI_ERS_RESULT_RECOVERED;
4119}
4120
4121/**
4122 * vxge_io_resume - called when traffic can start flowing again.
4123 * @pdev: Pointer to PCI device
4124 *
4125 * This callback is called when the error recovery driver tells
4126 * us that its OK to resume normal operation.
4127 */
4128static void vxge_io_resume(struct pci_dev *pdev)
4129{
d8ee7071 4130 struct __vxge_hw_device *hldev = pci_get_drvdata(pdev);
703da5a1
RV
4131 struct net_device *netdev = hldev->ndev;
4132
4133 if (netif_running(netdev)) {
4134 if (vxge_open(netdev)) {
75f5e1c6
JP
4135 netdev_err(netdev,
4136 "Can't bring device back up after reset\n");
703da5a1
RV
4137 return;
4138 }
4139 }
4140
4141 netif_device_attach(netdev);
4142}
4143
cb27ec60
SH
4144static inline u32 vxge_get_num_vfs(u64 function_mode)
4145{
4146 u32 num_functions = 0;
4147
4148 switch (function_mode) {
4149 case VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION:
4150 case VXGE_HW_FUNCTION_MODE_SRIOV_8:
4151 num_functions = 8;
4152 break;
4153 case VXGE_HW_FUNCTION_MODE_SINGLE_FUNCTION:
4154 num_functions = 1;
4155 break;
4156 case VXGE_HW_FUNCTION_MODE_SRIOV:
4157 case VXGE_HW_FUNCTION_MODE_MRIOV:
4158 case VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION_17:
4159 num_functions = 17;
4160 break;
4161 case VXGE_HW_FUNCTION_MODE_SRIOV_4:
4162 num_functions = 4;
4163 break;
4164 case VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION_2:
4165 num_functions = 2;
4166 break;
4167 case VXGE_HW_FUNCTION_MODE_MRIOV_8:
4168 num_functions = 8; /* TODO */
4169 break;
4170 }
4171 return num_functions;
4172}
4173
e8ac1756
JM
4174int vxge_fw_upgrade(struct vxgedev *vdev, char *fw_name, int override)
4175{
4176 struct __vxge_hw_device *hldev = vdev->devh;
4177 u32 maj, min, bld, cmaj, cmin, cbld;
4178 enum vxge_hw_status status;
4179 const struct firmware *fw;
4180 int ret;
4181
4182 ret = request_firmware(&fw, fw_name, &vdev->pdev->dev);
4183 if (ret) {
4184 vxge_debug_init(VXGE_ERR, "%s: Firmware file '%s' not found",
4185 VXGE_DRIVER_NAME, fw_name);
4186 goto out;
4187 }
4188
4189 /* Load the new firmware onto the adapter */
4190 status = vxge_update_fw_image(hldev, fw->data, fw->size);
4191 if (status != VXGE_HW_OK) {
4192 vxge_debug_init(VXGE_ERR,
4193 "%s: FW image download to adapter failed '%s'.",
4194 VXGE_DRIVER_NAME, fw_name);
4195 ret = -EIO;
4196 goto out;
4197 }
4198
4199 /* Read the version of the new firmware */
4200 status = vxge_hw_upgrade_read_version(hldev, &maj, &min, &bld);
4201 if (status != VXGE_HW_OK) {
4202 vxge_debug_init(VXGE_ERR,
4203 "%s: Upgrade read version failed '%s'.",
4204 VXGE_DRIVER_NAME, fw_name);
4205 ret = -EIO;
4206 goto out;
4207 }
4208
4209 cmaj = vdev->config.device_hw_info.fw_version.major;
4210 cmin = vdev->config.device_hw_info.fw_version.minor;
4211 cbld = vdev->config.device_hw_info.fw_version.build;
4212 /* It's possible the version in /lib/firmware is not the latest version.
4213 * If so, we could get into a loop of trying to upgrade to the latest
4214 * and flashing the older version.
4215 */
4216 if (VXGE_FW_VER(maj, min, bld) == VXGE_FW_VER(cmaj, cmin, cbld) &&
4217 !override) {
4218 ret = -EINVAL;
4219 goto out;
4220 }
4221
4222 printk(KERN_NOTICE "Upgrade to firmware version %d.%d.%d commencing\n",
4223 maj, min, bld);
4224
4225 /* Flash the adapter with the new firmware */
4226 status = vxge_hw_flash_fw(hldev);
4227 if (status != VXGE_HW_OK) {
4228 vxge_debug_init(VXGE_ERR, "%s: Upgrade commit failed '%s'.",
4229 VXGE_DRIVER_NAME, fw_name);
4230 ret = -EIO;
4231 goto out;
4232 }
4233
4234 printk(KERN_NOTICE "Upgrade of firmware successful! Adapter must be "
4235 "hard reset before using, thus requiring a system reboot or a "
4236 "hotplug event.\n");
4237
4238out:
e84f885e 4239 release_firmware(fw);
e8ac1756
JM
4240 return ret;
4241}
4242
4243static int vxge_probe_fw_update(struct vxgedev *vdev)
4244{
4245 u32 maj, min, bld;
4246 int ret, gpxe = 0;
4247 char *fw_name;
4248
4249 maj = vdev->config.device_hw_info.fw_version.major;
4250 min = vdev->config.device_hw_info.fw_version.minor;
4251 bld = vdev->config.device_hw_info.fw_version.build;
4252
4253 if (VXGE_FW_VER(maj, min, bld) == VXGE_CERT_FW_VER)
4254 return 0;
4255
4256 /* Ignore the build number when determining if the current firmware is
4257 * "too new" to load the driver
4258 */
4259 if (VXGE_FW_VER(maj, min, 0) > VXGE_CERT_FW_VER) {
4260 vxge_debug_init(VXGE_ERR, "%s: Firmware newer than last known "
4261 "version, unable to load driver\n",
4262 VXGE_DRIVER_NAME);
4263 return -EINVAL;
4264 }
4265
4266 /* Firmware 1.4.4 and older cannot be upgraded, and is too ancient to
4267 * work with this driver.
4268 */
4269 if (VXGE_FW_VER(maj, min, bld) <= VXGE_FW_DEAD_VER) {
4270 vxge_debug_init(VXGE_ERR, "%s: Firmware %d.%d.%d cannot be "
4271 "upgraded\n", VXGE_DRIVER_NAME, maj, min, bld);
4272 return -EINVAL;
4273 }
4274
4275 /* If file not specified, determine gPXE or not */
4276 if (VXGE_FW_VER(maj, min, bld) >= VXGE_EPROM_FW_VER) {
4277 int i;
4278 for (i = 0; i < VXGE_HW_MAX_ROM_IMAGES; i++)
4279 if (vdev->devh->eprom_versions[i]) {
4280 gpxe = 1;
4281 break;
4282 }
4283 }
4284 if (gpxe)
4285 fw_name = "vxge/X3fw-pxe.ncf";
4286 else
4287 fw_name = "vxge/X3fw.ncf";
4288
4289 ret = vxge_fw_upgrade(vdev, fw_name, 0);
4290 /* -EINVAL and -ENOENT are not fatal errors for flashing firmware on
4291 * probe, so ignore them
4292 */
4293 if (ret != -EINVAL && ret != -ENOENT)
4294 return -EIO;
4295 else
4296 ret = 0;
4297
4298 if (VXGE_FW_VER(VXGE_CERT_FW_VER_MAJOR, VXGE_CERT_FW_VER_MINOR, 0) >
4299 VXGE_FW_VER(maj, min, 0)) {
4300 vxge_debug_init(VXGE_ERR, "%s: Firmware %d.%d.%d is too old to"
4301 " be used with this driver.\n"
4302 "Please get the latest version from "
4303 "ftp://ftp.s2io.com/pub/X3100-Drivers/FIRMWARE",
4304 VXGE_DRIVER_NAME, maj, min, bld);
4305 return -EINVAL;
4306 }
4307
4308 return ret;
4309}
4310
c92bf70d
JM
4311static int __devinit is_sriov_initialized(struct pci_dev *pdev)
4312{
4313 int pos;
4314 u16 ctrl;
4315
4316 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
4317 if (pos) {
4318 pci_read_config_word(pdev, pos + PCI_SRIOV_CTRL, &ctrl);
4319 if (ctrl & PCI_SRIOV_CTRL_VFE)
4320 return 1;
4321 }
4322 return 0;
4323}
4324
703da5a1
RV
4325/**
4326 * vxge_probe
4327 * @pdev : structure containing the PCI related information of the device.
4328 * @pre: List of PCI devices supported by the driver listed in vxge_id_table.
4329 * Description:
4330 * This function is called when a new PCI device gets detected and initializes
4331 * it.
4332 * Return value:
4333 * returns 0 on success and negative on failure.
4334 *
4335 */
4336static int __devinit
4337vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre)
4338{
2c91308f 4339 struct __vxge_hw_device *hldev;
703da5a1
RV
4340 enum vxge_hw_status status;
4341 int ret;
4342 int high_dma = 0;
4343 u64 vpath_mask = 0;
4344 struct vxgedev *vdev;
7dad171c 4345 struct vxge_config *ll_config = NULL;
703da5a1
RV
4346 struct vxge_hw_device_config *device_config = NULL;
4347 struct vxge_hw_device_attr attr;
4348 int i, j, no_of_vpath = 0, max_vpath_supported = 0;
4349 u8 *macaddr;
4350 struct vxge_mac_addrs *entry;
4351 static int bus = -1, device = -1;
cb27ec60 4352 u32 host_type;
703da5a1 4353 u8 new_device = 0;
cb27ec60
SH
4354 enum vxge_hw_status is_privileged;
4355 u32 function_mode;
4356 u32 num_vfs = 0;
703da5a1
RV
4357
4358 vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__);
4359 attr.pdev = pdev;
4360
cb27ec60 4361 /* In SRIOV-17 mode, functions of the same adapter
528f7272
JM
4362 * can be deployed on different buses
4363 */
4364 if (((bus != pdev->bus->number) || (device != PCI_SLOT(pdev->devfn))) &&
4365 !pdev->is_virtfn)
703da5a1
RV
4366 new_device = 1;
4367
4368 bus = pdev->bus->number;
4369 device = PCI_SLOT(pdev->devfn);
4370
4371 if (new_device) {
4372 if (driver_config->config_dev_cnt &&
4373 (driver_config->config_dev_cnt !=
4374 driver_config->total_dev_cnt))
4375 vxge_debug_init(VXGE_ERR,
4376 "%s: Configured %d of %d devices",
4377 VXGE_DRIVER_NAME,
4378 driver_config->config_dev_cnt,
4379 driver_config->total_dev_cnt);
4380 driver_config->config_dev_cnt = 0;
4381 driver_config->total_dev_cnt = 0;
703da5a1 4382 }
528f7272 4383
9002397e
SH
4384 /* Now making the CPU based no of vpath calculation
4385 * applicable for individual functions as well.
4386 */
4387 driver_config->g_no_cpus = 0;
657205bd
SH
4388 driver_config->vpath_per_dev = max_config_vpath;
4389
703da5a1
RV
4390 driver_config->total_dev_cnt++;
4391 if (++driver_config->config_dev_cnt > max_config_dev) {
4392 ret = 0;
4393 goto _exit0;
4394 }
4395
4396 device_config = kzalloc(sizeof(struct vxge_hw_device_config),
4397 GFP_KERNEL);
4398 if (!device_config) {
4399 ret = -ENOMEM;
4400 vxge_debug_init(VXGE_ERR,
4401 "device_config : malloc failed %s %d",
4402 __FILE__, __LINE__);
4403 goto _exit0;
4404 }
4405
528f7272 4406 ll_config = kzalloc(sizeof(struct vxge_config), GFP_KERNEL);
7dad171c
PB
4407 if (!ll_config) {
4408 ret = -ENOMEM;
4409 vxge_debug_init(VXGE_ERR,
528f7272 4410 "device_config : malloc failed %s %d",
7dad171c
PB
4411 __FILE__, __LINE__);
4412 goto _exit0;
4413 }
4414 ll_config->tx_steering_type = TX_MULTIQ_STEERING;
4415 ll_config->intr_type = MSI_X;
4416 ll_config->napi_weight = NEW_NAPI_WEIGHT;
4417 ll_config->rth_steering = RTH_STEERING;
703da5a1
RV
4418
4419 /* get the default configuration parameters */
4420 vxge_hw_device_config_default_get(device_config);
4421
4422 /* initialize configuration parameters */
7dad171c 4423 vxge_device_config_init(device_config, &ll_config->intr_type);
703da5a1
RV
4424
4425 ret = pci_enable_device(pdev);
4426 if (ret) {
4427 vxge_debug_init(VXGE_ERR,
4428 "%s : can not enable PCI device", __func__);
4429 goto _exit0;
4430 }
4431
b3837cec 4432 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
703da5a1
RV
4433 vxge_debug_ll_config(VXGE_TRACE,
4434 "%s : using 64bit DMA", __func__);
4435
4436 high_dma = 1;
4437
4438 if (pci_set_consistent_dma_mask(pdev,
b3837cec 4439 DMA_BIT_MASK(64))) {
703da5a1
RV
4440 vxge_debug_init(VXGE_ERR,
4441 "%s : unable to obtain 64bit DMA for "
4442 "consistent allocations", __func__);
4443 ret = -ENOMEM;
4444 goto _exit1;
4445 }
b3837cec 4446 } else if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) {
703da5a1
RV
4447 vxge_debug_ll_config(VXGE_TRACE,
4448 "%s : using 32bit DMA", __func__);
4449 } else {
4450 ret = -ENOMEM;
4451 goto _exit1;
4452 }
4453
6cca2003
JM
4454 ret = pci_request_region(pdev, 0, VXGE_DRIVER_NAME);
4455 if (ret) {
703da5a1
RV
4456 vxge_debug_init(VXGE_ERR,
4457 "%s : request regions failed", __func__);
703da5a1
RV
4458 goto _exit1;
4459 }
4460
4461 pci_set_master(pdev);
4462
4463 attr.bar0 = pci_ioremap_bar(pdev, 0);
4464 if (!attr.bar0) {
4465 vxge_debug_init(VXGE_ERR,
4466 "%s : cannot remap io memory bar0", __func__);
4467 ret = -ENODEV;
4468 goto _exit2;
4469 }
4470 vxge_debug_ll_config(VXGE_TRACE,
4471 "pci ioremap bar0: %p:0x%llx",
4472 attr.bar0,
4473 (unsigned long long)pci_resource_start(pdev, 0));
4474
703da5a1 4475 status = vxge_hw_device_hw_info_get(attr.bar0,
7dad171c 4476 &ll_config->device_hw_info);
703da5a1
RV
4477 if (status != VXGE_HW_OK) {
4478 vxge_debug_init(VXGE_ERR,
4479 "%s: Reading of hardware info failed."
4480 "Please try upgrading the firmware.", VXGE_DRIVER_NAME);
4481 ret = -EINVAL;
7975d1ee 4482 goto _exit3;
703da5a1
RV
4483 }
4484
7dad171c 4485 vpath_mask = ll_config->device_hw_info.vpath_mask;
703da5a1
RV
4486 if (vpath_mask == 0) {
4487 vxge_debug_ll_config(VXGE_TRACE,
4488 "%s: No vpaths available in device", VXGE_DRIVER_NAME);
4489 ret = -EINVAL;
7975d1ee 4490 goto _exit3;
703da5a1
RV
4491 }
4492
4493 vxge_debug_ll_config(VXGE_TRACE,
4494 "%s:%d Vpath mask = %llx", __func__, __LINE__,
4495 (unsigned long long)vpath_mask);
4496
7dad171c
PB
4497 function_mode = ll_config->device_hw_info.function_mode;
4498 host_type = ll_config->device_hw_info.host_type;
cb27ec60 4499 is_privileged = __vxge_hw_device_is_privilaged(host_type,
7dad171c 4500 ll_config->device_hw_info.func_id);
cb27ec60 4501
703da5a1
RV
4502 /* Check how many vpaths are available */
4503 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
4504 if (!((vpath_mask) & vxge_mBIT(i)))
4505 continue;
4506 max_vpath_supported++;
4507 }
4508
cb27ec60
SH
4509 if (new_device)
4510 num_vfs = vxge_get_num_vfs(function_mode) - 1;
4511
5dbc9011 4512 /* Enable SRIOV mode, if firmware has SRIOV support and if it is a PF */
c92bf70d
JM
4513 if (is_sriov(function_mode) && !is_sriov_initialized(pdev) &&
4514 (ll_config->intr_type != INTA)) {
4515 ret = pci_enable_sriov(pdev, num_vfs);
cb27ec60
SH
4516 if (ret)
4517 vxge_debug_ll_config(VXGE_ERR,
4518 "Failed in enabling SRIOV mode: %d\n", ret);
c92bf70d 4519 /* No need to fail out, as an error here is non-fatal */
5dbc9011
SS
4520 }
4521
703da5a1
RV
4522 /*
4523 * Configure vpaths and get driver configured number of vpaths
4524 * which is less than or equal to the maximum vpaths per function.
4525 */
7dad171c 4526 no_of_vpath = vxge_config_vpaths(device_config, vpath_mask, ll_config);
703da5a1
RV
4527 if (!no_of_vpath) {
4528 vxge_debug_ll_config(VXGE_ERR,
4529 "%s: No more vpaths to configure", VXGE_DRIVER_NAME);
4530 ret = 0;
7975d1ee 4531 goto _exit3;
703da5a1
RV
4532 }
4533
4534 /* Setting driver callbacks */
4535 attr.uld_callbacks.link_up = vxge_callback_link_up;
4536 attr.uld_callbacks.link_down = vxge_callback_link_down;
4537 attr.uld_callbacks.crit_err = vxge_callback_crit_err;
4538
4539 status = vxge_hw_device_initialize(&hldev, &attr, device_config);
4540 if (status != VXGE_HW_OK) {
4541 vxge_debug_init(VXGE_ERR,
4542 "Failed to initialize device (%d)", status);
4543 ret = -EINVAL;
7975d1ee 4544 goto _exit3;
703da5a1
RV
4545 }
4546
e8ac1756
JM
4547 if (VXGE_FW_VER(ll_config->device_hw_info.fw_version.major,
4548 ll_config->device_hw_info.fw_version.minor,
4549 ll_config->device_hw_info.fw_version.build) >=
4550 VXGE_EPROM_FW_VER) {
4551 struct eprom_image img[VXGE_HW_MAX_ROM_IMAGES];
4552
4553 status = vxge_hw_vpath_eprom_img_ver_get(hldev, img);
4554 if (status != VXGE_HW_OK) {
4555 vxge_debug_init(VXGE_ERR, "%s: Reading of EPROM failed",
4556 VXGE_DRIVER_NAME);
4557 /* This is a non-fatal error, continue */
4558 }
4559
4560 for (i = 0; i < VXGE_HW_MAX_ROM_IMAGES; i++) {
4561 hldev->eprom_versions[i] = img[i].version;
4562 if (!img[i].is_valid)
4563 break;
4564 vxge_debug_init(VXGE_TRACE, "%s: EPROM %d, version "
1d15f81c 4565 "%d.%d.%d.%d", VXGE_DRIVER_NAME, i,
e8ac1756
JM
4566 VXGE_EPROM_IMG_MAJOR(img[i].version),
4567 VXGE_EPROM_IMG_MINOR(img[i].version),
4568 VXGE_EPROM_IMG_FIX(img[i].version),
4569 VXGE_EPROM_IMG_BUILD(img[i].version));
4570 }
4571 }
4572
fa41fd10 4573 /* if FCS stripping is not disabled in MAC fail driver load */
b81b3733
JM
4574 status = vxge_hw_vpath_strip_fcs_check(hldev, vpath_mask);
4575 if (status != VXGE_HW_OK) {
4576 vxge_debug_init(VXGE_ERR, "%s: FCS stripping is enabled in MAC"
4577 " failing driver load", VXGE_DRIVER_NAME);
fa41fd10
SH
4578 ret = -EINVAL;
4579 goto _exit4;
4580 }
4581
cd883a79
JM
4582 /* Always enable HWTS. This will always cause the FCS to be invalid,
4583 * due to the fact that HWTS is using the FCS as the location of the
4584 * timestamp. The HW FCS checking will still correctly determine if
4585 * there is a valid checksum, and the FCS is being removed by the driver
4586 * anyway. So no fucntionality is being lost. Since it is always
4587 * enabled, we now simply use the ioctl call to set whether or not the
4588 * driver should be paying attention to the HWTS.
4589 */
4590 if (is_privileged == VXGE_HW_OK) {
4591 status = vxge_timestamp_config(hldev);
4592 if (status != VXGE_HW_OK) {
4593 vxge_debug_init(VXGE_ERR, "%s: HWTS enable failed",
4594 VXGE_DRIVER_NAME);
4595 ret = -EFAULT;
4596 goto _exit4;
4597 }
4598 }
4599
703da5a1
RV
4600 vxge_hw_device_debug_set(hldev, VXGE_ERR, VXGE_COMPONENT_LL);
4601
4602 /* set private device info */
4603 pci_set_drvdata(pdev, hldev);
4604
7dad171c
PB
4605 ll_config->fifo_indicate_max_pkts = VXGE_FIFO_INDICATE_MAX_PKTS;
4606 ll_config->addr_learn_en = addr_learn_en;
4607 ll_config->rth_algorithm = RTH_ALG_JENKINS;
47f01db4
JM
4608 ll_config->rth_hash_type_tcpipv4 = 1;
4609 ll_config->rth_hash_type_ipv4 = 0;
4610 ll_config->rth_hash_type_tcpipv6 = 0;
4611 ll_config->rth_hash_type_ipv6 = 0;
4612 ll_config->rth_hash_type_tcpipv6ex = 0;
4613 ll_config->rth_hash_type_ipv6ex = 0;
7dad171c
PB
4614 ll_config->rth_bkt_sz = RTH_BUCKET_SIZE;
4615 ll_config->tx_pause_enable = VXGE_PAUSE_CTRL_ENABLE;
4616 ll_config->rx_pause_enable = VXGE_PAUSE_CTRL_ENABLE;
4617
e8ac1756
JM
4618 ret = vxge_device_register(hldev, ll_config, high_dma, no_of_vpath,
4619 &vdev);
4620 if (ret) {
703da5a1 4621 ret = -EINVAL;
7975d1ee 4622 goto _exit4;
703da5a1
RV
4623 }
4624
e8ac1756
JM
4625 ret = vxge_probe_fw_update(vdev);
4626 if (ret)
4627 goto _exit5;
4628
703da5a1
RV
4629 vxge_hw_device_debug_set(hldev, VXGE_TRACE, VXGE_COMPONENT_LL);
4630 VXGE_COPY_DEBUG_INFO_TO_LL(vdev, vxge_hw_device_error_level_get(hldev),
4631 vxge_hw_device_trace_level_get(hldev));
4632
4633 /* set private HW device info */
703da5a1
RV
4634 vdev->mtu = VXGE_HW_DEFAULT_MTU;
4635 vdev->bar0 = attr.bar0;
703da5a1
RV
4636 vdev->max_vpath_supported = max_vpath_supported;
4637 vdev->no_of_vpath = no_of_vpath;
4638
4639 /* Virtual Path count */
4640 for (i = 0, j = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
4641 if (!vxge_bVALn(vpath_mask, i, 1))
4642 continue;
4643 if (j >= vdev->no_of_vpath)
4644 break;
4645
4646 vdev->vpaths[j].is_configured = 1;
4647 vdev->vpaths[j].device_id = i;
703da5a1
RV
4648 vdev->vpaths[j].ring.driver_id = j;
4649 vdev->vpaths[j].vdev = vdev;
4650 vdev->vpaths[j].max_mac_addr_cnt = max_mac_vpath;
4651 memcpy((u8 *)vdev->vpaths[j].macaddr,
7dad171c 4652 ll_config->device_hw_info.mac_addrs[i],
703da5a1
RV
4653 ETH_ALEN);
4654
4655 /* Initialize the mac address list header */
4656 INIT_LIST_HEAD(&vdev->vpaths[j].mac_addr_list);
4657
4658 vdev->vpaths[j].mac_addr_cnt = 0;
4659 vdev->vpaths[j].mcast_addr_cnt = 0;
4660 j++;
4661 }
4662 vdev->exec_mode = VXGE_EXEC_MODE_DISABLE;
4663 vdev->max_config_port = max_config_port;
4664
4665 vdev->vlan_tag_strip = vlan_tag_strip;
4666
4667 /* map the hashing selector table to the configured vpaths */
4668 for (i = 0; i < vdev->no_of_vpath; i++)
4669 vdev->vpath_selector[i] = vpath_selector[i];
4670
4671 macaddr = (u8 *)vdev->vpaths[0].macaddr;
4672
7dad171c
PB
4673 ll_config->device_hw_info.serial_number[VXGE_HW_INFO_LEN - 1] = '\0';
4674 ll_config->device_hw_info.product_desc[VXGE_HW_INFO_LEN - 1] = '\0';
4675 ll_config->device_hw_info.part_number[VXGE_HW_INFO_LEN - 1] = '\0';
703da5a1
RV
4676
4677 vxge_debug_init(VXGE_TRACE, "%s: SERIAL NUMBER: %s",
7dad171c 4678 vdev->ndev->name, ll_config->device_hw_info.serial_number);
703da5a1
RV
4679
4680 vxge_debug_init(VXGE_TRACE, "%s: PART NUMBER: %s",
7dad171c 4681 vdev->ndev->name, ll_config->device_hw_info.part_number);
703da5a1
RV
4682
4683 vxge_debug_init(VXGE_TRACE, "%s: Neterion %s Server Adapter",
7dad171c 4684 vdev->ndev->name, ll_config->device_hw_info.product_desc);
703da5a1 4685
bf54e736 4686 vxge_debug_init(VXGE_TRACE, "%s: MAC ADDR: %pM",
4687 vdev->ndev->name, macaddr);
703da5a1
RV
4688
4689 vxge_debug_init(VXGE_TRACE, "%s: Link Width x%d",
4690 vdev->ndev->name, vxge_hw_device_link_width_get(hldev));
4691
4692 vxge_debug_init(VXGE_TRACE,
4693 "%s: Firmware version : %s Date : %s", vdev->ndev->name,
7dad171c
PB
4694 ll_config->device_hw_info.fw_version.version,
4695 ll_config->device_hw_info.fw_date.date);
703da5a1 4696
0a25bdc6 4697 if (new_device) {
7dad171c 4698 switch (ll_config->device_hw_info.function_mode) {
0a25bdc6
SH
4699 case VXGE_HW_FUNCTION_MODE_SINGLE_FUNCTION:
4700 vxge_debug_init(VXGE_TRACE,
4701 "%s: Single Function Mode Enabled", vdev->ndev->name);
4702 break;
4703 case VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION:
4704 vxge_debug_init(VXGE_TRACE,
4705 "%s: Multi Function Mode Enabled", vdev->ndev->name);
4706 break;
4707 case VXGE_HW_FUNCTION_MODE_SRIOV:
4708 vxge_debug_init(VXGE_TRACE,
4709 "%s: Single Root IOV Mode Enabled", vdev->ndev->name);
4710 break;
4711 case VXGE_HW_FUNCTION_MODE_MRIOV:
4712 vxge_debug_init(VXGE_TRACE,
4713 "%s: Multi Root IOV Mode Enabled", vdev->ndev->name);
4714 break;
4715 }
4716 }
4717
703da5a1
RV
4718 vxge_print_parm(vdev, vpath_mask);
4719
4720 /* Store the fw version for ethttool option */
7dad171c 4721 strcpy(vdev->fw_version, ll_config->device_hw_info.fw_version.version);
703da5a1
RV
4722 memcpy(vdev->ndev->dev_addr, (u8 *)vdev->vpaths[0].macaddr, ETH_ALEN);
4723 memcpy(vdev->ndev->perm_addr, vdev->ndev->dev_addr, ETH_ALEN);
4724
4725 /* Copy the station mac address to the list */
4726 for (i = 0; i < vdev->no_of_vpath; i++) {
e80be0b0 4727 entry = kzalloc(sizeof(struct vxge_mac_addrs), GFP_KERNEL);
703da5a1
RV
4728 if (NULL == entry) {
4729 vxge_debug_init(VXGE_ERR,
4730 "%s: mac_addr_list : memory allocation failed",
4731 vdev->ndev->name);
4732 ret = -EPERM;
e8ac1756 4733 goto _exit6;
703da5a1
RV
4734 }
4735 macaddr = (u8 *)&entry->macaddr;
4736 memcpy(macaddr, vdev->ndev->dev_addr, ETH_ALEN);
4737 list_add(&entry->item, &vdev->vpaths[i].mac_addr_list);
4738 vdev->vpaths[i].mac_addr_cnt = 1;
4739 }
4740
914d0d71 4741 kfree(device_config);
eb5f10c2
SH
4742
4743 /*
4744 * INTA is shared in multi-function mode. This is unlike the INTA
4745 * implementation in MR mode, where each VH has its own INTA message.
4746 * - INTA is masked (disabled) as long as at least one function sets
4747 * its TITAN_MASK_ALL_INT.ALARM bit.
4748 * - INTA is unmasked (enabled) when all enabled functions have cleared
4749 * their own TITAN_MASK_ALL_INT.ALARM bit.
4750 * The TITAN_MASK_ALL_INT ALARM & TRAFFIC bits are cleared on power up.
4751 * Though this driver leaves the top level interrupts unmasked while
4752 * leaving the required module interrupt bits masked on exit, there
4753 * could be a rougue driver around that does not follow this procedure
4754 * resulting in a failure to generate interrupts. The following code is
4755 * present to prevent such a failure.
4756 */
4757
7dad171c 4758 if (ll_config->device_hw_info.function_mode ==
eb5f10c2
SH
4759 VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION)
4760 if (vdev->config.intr_type == INTA)
4761 vxge_hw_device_unmask_all(hldev);
4762
703da5a1
RV
4763 vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d Exiting...",
4764 vdev->ndev->name, __func__, __LINE__);
4765
4766 vxge_hw_device_debug_set(hldev, VXGE_ERR, VXGE_COMPONENT_LL);
4767 VXGE_COPY_DEBUG_INFO_TO_LL(vdev, vxge_hw_device_error_level_get(hldev),
4768 vxge_hw_device_trace_level_get(hldev));
4769
7dad171c 4770 kfree(ll_config);
703da5a1
RV
4771 return 0;
4772
e8ac1756 4773_exit6:
703da5a1
RV
4774 for (i = 0; i < vdev->no_of_vpath; i++)
4775 vxge_free_mac_add_list(&vdev->vpaths[i]);
e8ac1756 4776_exit5:
703da5a1 4777 vxge_device_unregister(hldev);
7975d1ee 4778_exit4:
6cca2003 4779 pci_set_drvdata(pdev, NULL);
703da5a1 4780 vxge_hw_device_terminate(hldev);
6cca2003 4781 pci_disable_sriov(pdev);
703da5a1
RV
4782_exit3:
4783 iounmap(attr.bar0);
4784_exit2:
dc66daa9 4785 pci_release_region(pdev, 0);
703da5a1
RV
4786_exit1:
4787 pci_disable_device(pdev);
4788_exit0:
7dad171c 4789 kfree(ll_config);
703da5a1
RV
4790 kfree(device_config);
4791 driver_config->config_dev_cnt--;
6cca2003 4792 driver_config->total_dev_cnt--;
703da5a1
RV
4793 return ret;
4794}
4795
4796/**
4797 * vxge_rem_nic - Free the PCI device
4798 * @pdev: structure containing the PCI related information of the device.
4799 * Description: This function is called by the Pci subsystem to release a
4800 * PCI device and free up all resource held up by the device.
4801 */
2c91308f 4802static void __devexit vxge_remove(struct pci_dev *pdev)
703da5a1 4803{
2c91308f 4804 struct __vxge_hw_device *hldev;
6cca2003
JM
4805 struct vxgedev *vdev;
4806 int i;
703da5a1 4807
d8ee7071 4808 hldev = pci_get_drvdata(pdev);
703da5a1
RV
4809 if (hldev == NULL)
4810 return;
2c91308f 4811
6cca2003 4812 vdev = netdev_priv(hldev->ndev);
703da5a1 4813
2c91308f 4814 vxge_debug_entryexit(vdev->level_trace, "%s:%d", __func__, __LINE__);
2c91308f
JM
4815 vxge_debug_init(vdev->level_trace, "%s : removing PCI device...",
4816 __func__);
703da5a1 4817
6cca2003 4818 for (i = 0; i < vdev->no_of_vpath; i++)
703da5a1 4819 vxge_free_mac_add_list(&vdev->vpaths[i]);
703da5a1 4820
6cca2003
JM
4821 vxge_device_unregister(hldev);
4822 pci_set_drvdata(pdev, NULL);
4823 /* Do not call pci_disable_sriov here, as it will break child devices */
4824 vxge_hw_device_terminate(hldev);
703da5a1 4825 iounmap(vdev->bar0);
6cca2003
JM
4826 pci_release_region(pdev, 0);
4827 pci_disable_device(pdev);
4828 driver_config->config_dev_cnt--;
4829 driver_config->total_dev_cnt--;
703da5a1 4830
2c91308f
JM
4831 vxge_debug_init(vdev->level_trace, "%s:%d Device unregistered",
4832 __func__, __LINE__);
2c91308f
JM
4833 vxge_debug_entryexit(vdev->level_trace, "%s:%d Exiting...", __func__,
4834 __LINE__);
703da5a1
RV
4835}
4836
4837static struct pci_error_handlers vxge_err_handler = {
4838 .error_detected = vxge_io_error_detected,
4839 .slot_reset = vxge_io_slot_reset,
4840 .resume = vxge_io_resume,
4841};
4842
4843static struct pci_driver vxge_driver = {
4844 .name = VXGE_DRIVER_NAME,
4845 .id_table = vxge_id_table,
4846 .probe = vxge_probe,
4847 .remove = __devexit_p(vxge_remove),
4848#ifdef CONFIG_PM
4849 .suspend = vxge_pm_suspend,
4850 .resume = vxge_pm_resume,
4851#endif
4852 .err_handler = &vxge_err_handler,
4853};
4854
4855static int __init
4856vxge_starter(void)
4857{
4858 int ret = 0;
703da5a1 4859
75f5e1c6
JP
4860 pr_info("Copyright(c) 2002-2010 Exar Corp.\n");
4861 pr_info("Driver version: %s\n", DRV_VERSION);
703da5a1
RV
4862
4863 verify_bandwidth();
4864
4865 driver_config = kzalloc(sizeof(struct vxge_drv_config), GFP_KERNEL);
4866 if (!driver_config)
4867 return -ENOMEM;
4868
4869 ret = pci_register_driver(&vxge_driver);
528f7272
JM
4870 if (ret) {
4871 kfree(driver_config);
4872 goto err;
4873 }
703da5a1
RV
4874
4875 if (driver_config->config_dev_cnt &&
4876 (driver_config->config_dev_cnt != driver_config->total_dev_cnt))
4877 vxge_debug_init(VXGE_ERR,
4878 "%s: Configured %d of %d devices",
4879 VXGE_DRIVER_NAME, driver_config->config_dev_cnt,
4880 driver_config->total_dev_cnt);
528f7272 4881err:
703da5a1
RV
4882 return ret;
4883}
4884
4885static void __exit
4886vxge_closer(void)
4887{
4888 pci_unregister_driver(&vxge_driver);
4889 kfree(driver_config);
4890}
4891module_init(vxge_starter);
4892module_exit(vxge_closer);