qlcnic: fix cdrp race condition
[linux-2.6-block.git] / drivers / net / ethernet / qlogic / qlcnic / qlcnic_main.c
CommitLineData
af19b491 1/*
40839129
SV
2 * QLogic qlcnic NIC Driver
3 * Copyright (c) 2009-2010 QLogic Corporation
af19b491 4 *
40839129 5 * See LICENSE.qlcnic for copyright and licensing details.
af19b491
AKS
6 */
7
5a0e3ad6 8#include <linux/slab.h>
af19b491
AKS
9#include <linux/vmalloc.h>
10#include <linux/interrupt.h>
11
12#include "qlcnic.h"
13
7e56cac4 14#include <linux/swab.h>
af19b491 15#include <linux/dma-mapping.h>
af19b491
AKS
16#include <net/ip.h>
17#include <linux/ipv6.h>
18#include <linux/inetdevice.h>
19#include <linux/sysfs.h>
451724c8 20#include <linux/aer.h>
f94bc1e7 21#include <linux/log2.h>
af19b491 22
7f9a0c34 23MODULE_DESCRIPTION("QLogic 1/10 GbE Converged/Intelligent Ethernet Driver");
af19b491
AKS
24MODULE_LICENSE("GPL");
25MODULE_VERSION(QLCNIC_LINUX_VERSIONID);
26MODULE_FIRMWARE(QLCNIC_UNIFIED_ROMIMAGE_NAME);
27
28char qlcnic_driver_name[] = "qlcnic";
7f9a0c34
SV
29static const char qlcnic_driver_string[] = "QLogic 1/10 GbE "
30 "Converged/Intelligent Ethernet Driver v" QLCNIC_LINUX_VERSIONID;
af19b491 31
f7ec804a 32static struct workqueue_struct *qlcnic_wq;
b5e5492c 33static int qlcnic_mac_learn;
b11a25aa 34module_param(qlcnic_mac_learn, int, 0444);
b5e5492c
AKS
35MODULE_PARM_DESC(qlcnic_mac_learn, "Mac Filter (0=disabled, 1=enabled)");
36
af19b491 37static int use_msi = 1;
b11a25aa 38module_param(use_msi, int, 0444);
af19b491
AKS
39MODULE_PARM_DESC(use_msi, "MSI interrupt (0=disabled, 1=enabled");
40
41static int use_msi_x = 1;
b11a25aa 42module_param(use_msi_x, int, 0444);
af19b491
AKS
43MODULE_PARM_DESC(use_msi_x, "MSI-X interrupt (0=disabled, 1=enabled");
44
9ce13ca8 45static int auto_fw_reset = 1;
af19b491
AKS
46module_param(auto_fw_reset, int, 0644);
47MODULE_PARM_DESC(auto_fw_reset, "Auto firmware reset (0=disabled, 1=enabled");
48
4d5bdb38 49static int load_fw_file;
b11a25aa 50module_param(load_fw_file, int, 0444);
4d5bdb38
AKS
51MODULE_PARM_DESC(load_fw_file, "Load firmware from (0=flash, 1=file");
52
2e9d722d 53static int qlcnic_config_npars;
b11a25aa 54module_param(qlcnic_config_npars, int, 0444);
2e9d722d
AC
55MODULE_PARM_DESC(qlcnic_config_npars, "Configure NPARs (0=disabled, 1=enabled");
56
af19b491
AKS
57static int __devinit qlcnic_probe(struct pci_dev *pdev,
58 const struct pci_device_id *ent);
59static void __devexit qlcnic_remove(struct pci_dev *pdev);
60static int qlcnic_open(struct net_device *netdev);
61static int qlcnic_close(struct net_device *netdev);
af19b491 62static void qlcnic_tx_timeout(struct net_device *netdev);
af19b491
AKS
63static void qlcnic_attach_work(struct work_struct *work);
64static void qlcnic_fwinit_work(struct work_struct *work);
65static void qlcnic_fw_poll_work(struct work_struct *work);
66static void qlcnic_schedule_work(struct qlcnic_adapter *adapter,
67 work_func_t func, int delay);
68static void qlcnic_cancel_fw_work(struct qlcnic_adapter *adapter);
69static int qlcnic_poll(struct napi_struct *napi, int budget);
8f891387 70static int qlcnic_rx_poll(struct napi_struct *napi, int budget);
af19b491
AKS
71#ifdef CONFIG_NET_POLL_CONTROLLER
72static void qlcnic_poll_controller(struct net_device *netdev);
73#endif
74
75static void qlcnic_create_sysfs_entries(struct qlcnic_adapter *adapter);
76static void qlcnic_remove_sysfs_entries(struct qlcnic_adapter *adapter);
77static void qlcnic_create_diag_entries(struct qlcnic_adapter *adapter);
78static void qlcnic_remove_diag_entries(struct qlcnic_adapter *adapter);
79
6df900e9 80static void qlcnic_idc_debug_info(struct qlcnic_adapter *adapter, u8 encoding);
21854f02 81static void qlcnic_clr_all_drv_state(struct qlcnic_adapter *adapter, u8);
af19b491
AKS
82static int qlcnic_can_start_firmware(struct qlcnic_adapter *adapter);
83
7eb9855d 84static irqreturn_t qlcnic_tmp_intr(int irq, void *data);
af19b491
AKS
85static irqreturn_t qlcnic_intr(int irq, void *data);
86static irqreturn_t qlcnic_msi_intr(int irq, void *data);
87static irqreturn_t qlcnic_msix_intr(int irq, void *data);
88
89static struct net_device_stats *qlcnic_get_stats(struct net_device *netdev);
aec1e845 90static void qlcnic_restore_indev_addr(struct net_device *dev, unsigned long);
9f26f547
AC
91static int qlcnic_start_firmware(struct qlcnic_adapter *);
92
b5e5492c 93static void qlcnic_free_lb_filters_mem(struct qlcnic_adapter *adapter);
9f26f547 94static void qlcnic_dev_set_npar_ready(struct qlcnic_adapter *);
9f26f547
AC
95static int qlcnicvf_config_led(struct qlcnic_adapter *, u32, u32);
96static int qlcnicvf_config_bridged_mode(struct qlcnic_adapter *, u32);
97static int qlcnicvf_start_firmware(struct qlcnic_adapter *);
0325d69b
RB
98static void qlcnic_set_netdev_features(struct qlcnic_adapter *,
99 struct qlcnic_esw_func_cfg *);
b9796a14
AC
100static void qlcnic_vlan_rx_add(struct net_device *, u16);
101static void qlcnic_vlan_rx_del(struct net_device *, u16);
102
af19b491
AKS
103/* PCI Device ID Table */
104#define ENTRY(device) \
105 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, (device)), \
106 .class = PCI_CLASS_NETWORK_ETHERNET << 8, .class_mask = ~0}
107
108#define PCI_DEVICE_ID_QLOGIC_QLE824X 0x8020
109
6a902881 110static DEFINE_PCI_DEVICE_TABLE(qlcnic_pci_tbl) = {
af19b491
AKS
111 ENTRY(PCI_DEVICE_ID_QLOGIC_QLE824X),
112 {0,}
113};
114
115MODULE_DEVICE_TABLE(pci, qlcnic_pci_tbl);
116
117
b1fc6d3c 118inline void
af19b491
AKS
119qlcnic_update_cmd_producer(struct qlcnic_adapter *adapter,
120 struct qlcnic_host_tx_ring *tx_ring)
121{
122 writel(tx_ring->producer, tx_ring->crb_cmd_producer);
af19b491
AKS
123}
124
125static const u32 msi_tgt_status[8] = {
126 ISR_INT_TARGET_STATUS, ISR_INT_TARGET_STATUS_F1,
127 ISR_INT_TARGET_STATUS_F2, ISR_INT_TARGET_STATUS_F3,
128 ISR_INT_TARGET_STATUS_F4, ISR_INT_TARGET_STATUS_F5,
129 ISR_INT_TARGET_STATUS_F6, ISR_INT_TARGET_STATUS_F7
130};
131
132static const
133struct qlcnic_legacy_intr_set legacy_intr[] = QLCNIC_LEGACY_INTR_CONFIG;
134
135static inline void qlcnic_disable_int(struct qlcnic_host_sds_ring *sds_ring)
136{
137 writel(0, sds_ring->crb_intr_mask);
138}
139
140static inline void qlcnic_enable_int(struct qlcnic_host_sds_ring *sds_ring)
141{
142 struct qlcnic_adapter *adapter = sds_ring->adapter;
143
144 writel(0x1, sds_ring->crb_intr_mask);
145
146 if (!QLCNIC_IS_MSI_FAMILY(adapter))
147 writel(0xfbff, adapter->tgt_mask_reg);
148}
149
150static int
151qlcnic_alloc_sds_rings(struct qlcnic_recv_context *recv_ctx, int count)
152{
153 int size = sizeof(struct qlcnic_host_sds_ring) * count;
154
155 recv_ctx->sds_rings = kzalloc(size, GFP_KERNEL);
156
807540ba 157 return recv_ctx->sds_rings == NULL;
af19b491
AKS
158}
159
160static void
161qlcnic_free_sds_rings(struct qlcnic_recv_context *recv_ctx)
162{
163 if (recv_ctx->sds_rings != NULL)
164 kfree(recv_ctx->sds_rings);
165
166 recv_ctx->sds_rings = NULL;
167}
168
169static int
170qlcnic_napi_add(struct qlcnic_adapter *adapter, struct net_device *netdev)
171{
172 int ring;
173 struct qlcnic_host_sds_ring *sds_ring;
b1fc6d3c 174 struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
af19b491
AKS
175
176 if (qlcnic_alloc_sds_rings(recv_ctx, adapter->max_sds_rings))
177 return -ENOMEM;
178
179 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
180 sds_ring = &recv_ctx->sds_rings[ring];
8f891387 181
182 if (ring == adapter->max_sds_rings - 1)
183 netif_napi_add(netdev, &sds_ring->napi, qlcnic_poll,
184 QLCNIC_NETDEV_WEIGHT/adapter->max_sds_rings);
185 else
186 netif_napi_add(netdev, &sds_ring->napi,
187 qlcnic_rx_poll, QLCNIC_NETDEV_WEIGHT*2);
af19b491
AKS
188 }
189
190 return 0;
191}
192
193static void
194qlcnic_napi_del(struct qlcnic_adapter *adapter)
195{
196 int ring;
197 struct qlcnic_host_sds_ring *sds_ring;
b1fc6d3c 198 struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
af19b491
AKS
199
200 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
201 sds_ring = &recv_ctx->sds_rings[ring];
202 netif_napi_del(&sds_ring->napi);
203 }
204
b1fc6d3c 205 qlcnic_free_sds_rings(adapter->recv_ctx);
af19b491
AKS
206}
207
208static void
209qlcnic_napi_enable(struct qlcnic_adapter *adapter)
210{
211 int ring;
212 struct qlcnic_host_sds_ring *sds_ring;
b1fc6d3c 213 struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
af19b491 214
780ab790
AKS
215 if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
216 return;
217
af19b491
AKS
218 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
219 sds_ring = &recv_ctx->sds_rings[ring];
220 napi_enable(&sds_ring->napi);
221 qlcnic_enable_int(sds_ring);
222 }
223}
224
225static void
226qlcnic_napi_disable(struct qlcnic_adapter *adapter)
227{
228 int ring;
229 struct qlcnic_host_sds_ring *sds_ring;
b1fc6d3c 230 struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
af19b491 231
780ab790
AKS
232 if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
233 return;
234
af19b491
AKS
235 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
236 sds_ring = &recv_ctx->sds_rings[ring];
237 qlcnic_disable_int(sds_ring);
238 napi_synchronize(&sds_ring->napi);
239 napi_disable(&sds_ring->napi);
240 }
241}
242
243static void qlcnic_clear_stats(struct qlcnic_adapter *adapter)
244{
245 memset(&adapter->stats, 0, sizeof(adapter->stats));
af19b491
AKS
246}
247
af19b491
AKS
248static void qlcnic_set_msix_bit(struct pci_dev *pdev, int enable)
249{
250 u32 control;
251 int pos;
252
253 pos = pci_find_capability(pdev, PCI_CAP_ID_MSIX);
254 if (pos) {
255 pci_read_config_dword(pdev, pos, &control);
256 if (enable)
257 control |= PCI_MSIX_FLAGS_ENABLE;
258 else
259 control = 0;
260 pci_write_config_dword(pdev, pos, control);
261 }
262}
263
264static void qlcnic_init_msix_entries(struct qlcnic_adapter *adapter, int count)
265{
266 int i;
267
268 for (i = 0; i < count; i++)
269 adapter->msix_entries[i].entry = i;
270}
271
272static int
273qlcnic_read_mac_addr(struct qlcnic_adapter *adapter)
274{
2e9d722d 275 u8 mac_addr[ETH_ALEN];
af19b491
AKS
276 struct net_device *netdev = adapter->netdev;
277 struct pci_dev *pdev = adapter->pdev;
278
da48e6c3 279 if (qlcnic_get_mac_address(adapter, mac_addr) != 0)
af19b491
AKS
280 return -EIO;
281
2e9d722d 282 memcpy(netdev->dev_addr, mac_addr, ETH_ALEN);
af19b491
AKS
283 memcpy(netdev->perm_addr, netdev->dev_addr, netdev->addr_len);
284 memcpy(adapter->mac_addr, netdev->dev_addr, netdev->addr_len);
285
286 /* set station address */
287
288 if (!is_valid_ether_addr(netdev->perm_addr))
289 dev_warn(&pdev->dev, "Bad MAC address %pM.\n",
290 netdev->dev_addr);
291
292 return 0;
293}
294
295static int qlcnic_set_mac(struct net_device *netdev, void *p)
296{
297 struct qlcnic_adapter *adapter = netdev_priv(netdev);
298 struct sockaddr *addr = p;
299
7373373d
RB
300 if ((adapter->flags & QLCNIC_MAC_OVERRIDE_DISABLED))
301 return -EOPNOTSUPP;
302
af19b491
AKS
303 if (!is_valid_ether_addr(addr->sa_data))
304 return -EINVAL;
305
8a15ad1f 306 if (test_bit(__QLCNIC_DEV_UP, &adapter->state)) {
af19b491
AKS
307 netif_device_detach(netdev);
308 qlcnic_napi_disable(adapter);
309 }
310
311 memcpy(adapter->mac_addr, addr->sa_data, netdev->addr_len);
312 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
313 qlcnic_set_multi(adapter->netdev);
314
8a15ad1f 315 if (test_bit(__QLCNIC_DEV_UP, &adapter->state)) {
af19b491
AKS
316 netif_device_attach(netdev);
317 qlcnic_napi_enable(adapter);
318 }
319 return 0;
320}
321
322static const struct net_device_ops qlcnic_netdev_ops = {
323 .ndo_open = qlcnic_open,
324 .ndo_stop = qlcnic_close,
325 .ndo_start_xmit = qlcnic_xmit_frame,
326 .ndo_get_stats = qlcnic_get_stats,
327 .ndo_validate_addr = eth_validate_addr,
afc4b13d 328 .ndo_set_rx_mode = qlcnic_set_multi,
af19b491
AKS
329 .ndo_set_mac_address = qlcnic_set_mac,
330 .ndo_change_mtu = qlcnic_change_mtu,
135d84a9
MM
331 .ndo_fix_features = qlcnic_fix_features,
332 .ndo_set_features = qlcnic_set_features,
af19b491 333 .ndo_tx_timeout = qlcnic_tx_timeout,
b9796a14
AC
334 .ndo_vlan_rx_add_vid = qlcnic_vlan_rx_add,
335 .ndo_vlan_rx_kill_vid = qlcnic_vlan_rx_del,
af19b491
AKS
336#ifdef CONFIG_NET_POLL_CONTROLLER
337 .ndo_poll_controller = qlcnic_poll_controller,
338#endif
339};
340
2e9d722d 341static struct qlcnic_nic_template qlcnic_ops = {
2e9d722d
AC
342 .config_bridged_mode = qlcnic_config_bridged_mode,
343 .config_led = qlcnic_config_led,
9f26f547
AC
344 .start_firmware = qlcnic_start_firmware
345};
346
347static struct qlcnic_nic_template qlcnic_vf_ops = {
9f26f547
AC
348 .config_bridged_mode = qlcnicvf_config_bridged_mode,
349 .config_led = qlcnicvf_config_led,
9f26f547 350 .start_firmware = qlcnicvf_start_firmware
2e9d722d
AC
351};
352
f94bc1e7 353static int qlcnic_enable_msix(struct qlcnic_adapter *adapter, u32 num_msix)
af19b491 354{
af19b491 355 struct pci_dev *pdev = adapter->pdev;
f94bc1e7 356 int err = -1;
af19b491
AKS
357
358 adapter->max_sds_rings = 1;
af19b491 359 adapter->flags &= ~(QLCNIC_MSI_ENABLED | QLCNIC_MSIX_ENABLED);
af19b491
AKS
360 qlcnic_set_msix_bit(pdev, 0);
361
362 if (adapter->msix_supported) {
f94bc1e7 363 enable_msix:
af19b491
AKS
364 qlcnic_init_msix_entries(adapter, num_msix);
365 err = pci_enable_msix(pdev, adapter->msix_entries, num_msix);
366 if (err == 0) {
367 adapter->flags |= QLCNIC_MSIX_ENABLED;
368 qlcnic_set_msix_bit(pdev, 1);
369
b1fc6d3c 370 adapter->max_sds_rings = num_msix;
af19b491
AKS
371
372 dev_info(&pdev->dev, "using msi-x interrupts\n");
f94bc1e7 373 return err;
af19b491 374 }
f94bc1e7
SC
375 if (err > 0) {
376 num_msix = rounddown_pow_of_two(err);
377 if (num_msix)
378 goto enable_msix;
379 }
380 }
381 return err;
382}
af19b491 383
af19b491 384
f94bc1e7
SC
385static void qlcnic_enable_msi_legacy(struct qlcnic_adapter *adapter)
386{
387 const struct qlcnic_legacy_intr_set *legacy_intrp;
388 struct pci_dev *pdev = adapter->pdev;
af19b491
AKS
389
390 if (use_msi && !pci_enable_msi(pdev)) {
391 adapter->flags |= QLCNIC_MSI_ENABLED;
392 adapter->tgt_status_reg = qlcnic_get_ioaddr(adapter,
b1fc6d3c 393 msi_tgt_status[adapter->ahw->pci_func]);
af19b491
AKS
394 dev_info(&pdev->dev, "using msi interrupts\n");
395 adapter->msix_entries[0].vector = pdev->irq;
396 return;
397 }
398
f94bc1e7
SC
399 legacy_intrp = &legacy_intr[adapter->ahw->pci_func];
400
401 adapter->int_vec_bit = legacy_intrp->int_vec_bit;
402 adapter->tgt_status_reg = qlcnic_get_ioaddr(adapter,
403 legacy_intrp->tgt_status_reg);
404 adapter->tgt_mask_reg = qlcnic_get_ioaddr(adapter,
405 legacy_intrp->tgt_mask_reg);
406 adapter->isr_int_vec = qlcnic_get_ioaddr(adapter, ISR_INT_VECTOR);
407
408 adapter->crb_int_state_reg = qlcnic_get_ioaddr(adapter,
409 ISR_INT_STATE_REG);
af19b491
AKS
410 dev_info(&pdev->dev, "using legacy interrupts\n");
411 adapter->msix_entries[0].vector = pdev->irq;
412}
413
f94bc1e7
SC
414static void
415qlcnic_setup_intr(struct qlcnic_adapter *adapter)
416{
417 int num_msix;
418
419 if (adapter->msix_supported) {
5f6ec29a
SC
420 num_msix = rounddown_pow_of_two(min_t(int, num_online_cpus(),
421 QLCNIC_DEF_NUM_STS_DESC_RINGS));
f94bc1e7
SC
422 } else
423 num_msix = 1;
424
425 if (!qlcnic_enable_msix(adapter, num_msix))
426 return;
427
428 qlcnic_enable_msi_legacy(adapter);
429}
430
af19b491
AKS
431static void
432qlcnic_teardown_intr(struct qlcnic_adapter *adapter)
433{
434 if (adapter->flags & QLCNIC_MSIX_ENABLED)
435 pci_disable_msix(adapter->pdev);
436 if (adapter->flags & QLCNIC_MSI_ENABLED)
437 pci_disable_msi(adapter->pdev);
438}
439
440static void
441qlcnic_cleanup_pci_map(struct qlcnic_adapter *adapter)
442{
b1fc6d3c
AC
443 if (adapter->ahw->pci_base0 != NULL)
444 iounmap(adapter->ahw->pci_base0);
af19b491
AKS
445}
446
346fe763
RB
447static int
448qlcnic_init_pci_info(struct qlcnic_adapter *adapter)
449{
e88db3bd 450 struct qlcnic_pci_info *pci_info;
900853a4 451 int i, ret = 0;
346fe763
RB
452 u8 pfn;
453
e88db3bd
DC
454 pci_info = kcalloc(QLCNIC_MAX_PCI_FUNC, sizeof(*pci_info), GFP_KERNEL);
455 if (!pci_info)
456 return -ENOMEM;
457
ca315ac2 458 adapter->npars = kzalloc(sizeof(struct qlcnic_npar_info) *
346fe763 459 QLCNIC_MAX_PCI_FUNC, GFP_KERNEL);
e88db3bd 460 if (!adapter->npars) {
900853a4 461 ret = -ENOMEM;
e88db3bd
DC
462 goto err_pci_info;
463 }
346fe763 464
ca315ac2 465 adapter->eswitch = kzalloc(sizeof(struct qlcnic_eswitch) *
346fe763
RB
466 QLCNIC_NIU_MAX_XG_PORTS, GFP_KERNEL);
467 if (!adapter->eswitch) {
900853a4 468 ret = -ENOMEM;
ca315ac2 469 goto err_npars;
346fe763
RB
470 }
471
472 ret = qlcnic_get_pci_info(adapter, pci_info);
ca315ac2
DC
473 if (ret)
474 goto err_eswitch;
346fe763 475
ca315ac2
DC
476 for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
477 pfn = pci_info[i].id;
f848d6dd
SC
478 if (pfn > QLCNIC_MAX_PCI_FUNC) {
479 ret = QL_STATUS_INVALID_PARAM;
480 goto err_eswitch;
481 }
a1c0c459
SC
482 adapter->npars[pfn].active = (u8)pci_info[i].active;
483 adapter->npars[pfn].type = (u8)pci_info[i].type;
484 adapter->npars[pfn].phy_port = (u8)pci_info[i].default_port;
ca315ac2
DC
485 adapter->npars[pfn].min_bw = pci_info[i].tx_min_bw;
486 adapter->npars[pfn].max_bw = pci_info[i].tx_max_bw;
346fe763
RB
487 }
488
ca315ac2
DC
489 for (i = 0; i < QLCNIC_NIU_MAX_XG_PORTS; i++)
490 adapter->eswitch[i].flags |= QLCNIC_SWITCH_ENABLE;
491
e88db3bd 492 kfree(pci_info);
ca315ac2
DC
493 return 0;
494
495err_eswitch:
346fe763
RB
496 kfree(adapter->eswitch);
497 adapter->eswitch = NULL;
ca315ac2 498err_npars:
346fe763 499 kfree(adapter->npars);
ca315ac2 500 adapter->npars = NULL;
e88db3bd
DC
501err_pci_info:
502 kfree(pci_info);
346fe763
RB
503
504 return ret;
505}
506
2e9d722d
AC
507static int
508qlcnic_set_function_modes(struct qlcnic_adapter *adapter)
509{
510 u8 id;
511 u32 ref_count;
512 int i, ret = 1;
513 u32 data = QLCNIC_MGMT_FUNC;
b1fc6d3c 514 void __iomem *priv_op = adapter->ahw->pci_base0 + QLCNIC_DRV_OP_MODE;
2e9d722d
AC
515
516 /* If other drivers are not in use set their privilege level */
31018e06 517 ref_count = QLCRD32(adapter, QLCNIC_CRB_DRV_ACTIVE);
2e9d722d
AC
518 ret = qlcnic_api_lock(adapter);
519 if (ret)
520 goto err_lock;
2e9d722d 521
0e33c664
AC
522 if (qlcnic_config_npars) {
523 for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
346fe763 524 id = i;
0e33c664 525 if (adapter->npars[i].type != QLCNIC_TYPE_NIC ||
b1fc6d3c 526 id == adapter->ahw->pci_func)
0e33c664
AC
527 continue;
528 data |= (qlcnic_config_npars &
529 QLC_DEV_SET_DRV(0xf, id));
530 }
531 } else {
532 data = readl(priv_op);
b1fc6d3c 533 data = (data & ~QLC_DEV_SET_DRV(0xf, adapter->ahw->pci_func)) |
0e33c664 534 (QLC_DEV_SET_DRV(QLCNIC_MGMT_FUNC,
b1fc6d3c 535 adapter->ahw->pci_func));
2e9d722d
AC
536 }
537 writel(data, priv_op);
2e9d722d
AC
538 qlcnic_api_unlock(adapter);
539err_lock:
540 return ret;
541}
542
0866d96d
AC
543static void
544qlcnic_check_vf(struct qlcnic_adapter *adapter)
2e9d722d
AC
545{
546 void __iomem *msix_base_addr;
547 void __iomem *priv_op;
548 u32 func;
549 u32 msix_base;
550 u32 op_mode, priv_level;
551
552 /* Determine FW API version */
b1fc6d3c
AC
553 adapter->fw_hal_version = readl(adapter->ahw->pci_base0 +
554 QLCNIC_FW_API);
2e9d722d
AC
555
556 /* Find PCI function number */
557 pci_read_config_dword(adapter->pdev, QLCNIC_MSIX_TABLE_OFFSET, &func);
b1fc6d3c 558 msix_base_addr = adapter->ahw->pci_base0 + QLCNIC_MSIX_BASE;
2e9d722d
AC
559 msix_base = readl(msix_base_addr);
560 func = (func - msix_base)/QLCNIC_MSIX_TBL_PGSIZE;
b1fc6d3c 561 adapter->ahw->pci_func = func;
2e9d722d
AC
562
563 /* Determine function privilege level */
b1fc6d3c 564 priv_op = adapter->ahw->pci_base0 + QLCNIC_DRV_OP_MODE;
2e9d722d 565 op_mode = readl(priv_op);
0e33c664 566 if (op_mode == QLC_DEV_DRV_DEFAULT)
2e9d722d 567 priv_level = QLCNIC_MGMT_FUNC;
0e33c664 568 else
b1fc6d3c 569 priv_level = QLC_DEV_GET_DRV(op_mode, adapter->ahw->pci_func);
2e9d722d 570
0866d96d 571 if (priv_level == QLCNIC_NON_PRIV_FUNC) {
9f26f547
AC
572 adapter->op_mode = QLCNIC_NON_PRIV_FUNC;
573 dev_info(&adapter->pdev->dev,
574 "HAL Version: %d Non Privileged function\n",
575 adapter->fw_hal_version);
576 adapter->nic_ops = &qlcnic_vf_ops;
0866d96d
AC
577 } else
578 adapter->nic_ops = &qlcnic_ops;
2e9d722d
AC
579}
580
af19b491
AKS
581static int
582qlcnic_setup_pci_map(struct qlcnic_adapter *adapter)
583{
584 void __iomem *mem_ptr0 = NULL;
585 resource_size_t mem_base;
586 unsigned long mem_len, pci_len0 = 0;
587
588 struct pci_dev *pdev = adapter->pdev;
af19b491 589
af19b491
AKS
590 /* remap phys address */
591 mem_base = pci_resource_start(pdev, 0); /* 0 is for BAR 0 */
592 mem_len = pci_resource_len(pdev, 0);
593
594 if (mem_len == QLCNIC_PCI_2MB_SIZE) {
595
596 mem_ptr0 = pci_ioremap_bar(pdev, 0);
597 if (mem_ptr0 == NULL) {
598 dev_err(&pdev->dev, "failed to map PCI bar 0\n");
599 return -EIO;
600 }
601 pci_len0 = mem_len;
602 } else {
603 return -EIO;
604 }
605
606 dev_info(&pdev->dev, "%dMB memory map\n", (int)(mem_len>>20));
607
b1fc6d3c
AC
608 adapter->ahw->pci_base0 = mem_ptr0;
609 adapter->ahw->pci_len0 = pci_len0;
af19b491 610
0866d96d 611 qlcnic_check_vf(adapter);
2e9d722d 612
b1fc6d3c
AC
613 adapter->ahw->ocm_win_crb = qlcnic_get_ioaddr(adapter,
614 QLCNIC_PCIX_PS_REG(PCIX_OCM_WINDOW_REG(
615 adapter->ahw->pci_func)));
af19b491
AKS
616
617 return 0;
618}
619
620static void get_brd_name(struct qlcnic_adapter *adapter, char *name)
621{
622 struct pci_dev *pdev = adapter->pdev;
623 int i, found = 0;
624
625 for (i = 0; i < NUM_SUPPORTED_BOARDS; ++i) {
626 if (qlcnic_boards[i].vendor == pdev->vendor &&
627 qlcnic_boards[i].device == pdev->device &&
628 qlcnic_boards[i].sub_vendor == pdev->subsystem_vendor &&
629 qlcnic_boards[i].sub_device == pdev->subsystem_device) {
02f6e46f
SC
630 sprintf(name, "%pM: %s" ,
631 adapter->mac_addr,
632 qlcnic_boards[i].short_name);
af19b491
AKS
633 found = 1;
634 break;
635 }
636
637 }
638
639 if (!found)
7f9a0c34 640 sprintf(name, "%pM Gigabit Ethernet", adapter->mac_addr);
af19b491
AKS
641}
642
643static void
644qlcnic_check_options(struct qlcnic_adapter *adapter)
645{
031a4a26 646 u32 fw_major, fw_minor, fw_build, prev_fw_version;
af19b491 647 struct pci_dev *pdev = adapter->pdev;
031a4a26
SV
648 struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump;
649
650 prev_fw_version = adapter->fw_version;
af19b491
AKS
651
652 fw_major = QLCRD32(adapter, QLCNIC_FW_VERSION_MAJOR);
653 fw_minor = QLCRD32(adapter, QLCNIC_FW_VERSION_MINOR);
654 fw_build = QLCRD32(adapter, QLCNIC_FW_VERSION_SUB);
655
656 adapter->fw_version = QLCNIC_VERSION_CODE(fw_major, fw_minor, fw_build);
657
031a4a26
SV
658 if (adapter->op_mode != QLCNIC_NON_PRIV_FUNC) {
659 if (fw_dump->tmpl_hdr == NULL ||
660 adapter->fw_version > prev_fw_version) {
661 if (fw_dump->tmpl_hdr)
662 vfree(fw_dump->tmpl_hdr);
663 if (!qlcnic_fw_cmd_get_minidump_temp(adapter))
664 dev_info(&pdev->dev,
665 "Supports FW dump capability\n");
666 }
667 }
668
251a84c9
AKS
669 dev_info(&pdev->dev, "firmware v%d.%d.%d\n",
670 fw_major, fw_minor, fw_build);
b1fc6d3c 671 if (adapter->ahw->port_type == QLCNIC_XGBE) {
90d19005
SC
672 if (adapter->flags & QLCNIC_ESWITCH_ENABLED) {
673 adapter->num_rxd = DEFAULT_RCV_DESCRIPTORS_VF;
674 adapter->max_rxd = MAX_RCV_DESCRIPTORS_VF;
675 } else {
676 adapter->num_rxd = DEFAULT_RCV_DESCRIPTORS_10G;
677 adapter->max_rxd = MAX_RCV_DESCRIPTORS_10G;
678 }
679
af19b491 680 adapter->num_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_10G;
90d19005
SC
681 adapter->max_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_10G;
682
b1fc6d3c 683 } else if (adapter->ahw->port_type == QLCNIC_GBE) {
af19b491
AKS
684 adapter->num_rxd = DEFAULT_RCV_DESCRIPTORS_1G;
685 adapter->num_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_1G;
90d19005
SC
686 adapter->max_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_1G;
687 adapter->max_rxd = MAX_RCV_DESCRIPTORS_1G;
af19b491
AKS
688 }
689
690 adapter->msix_supported = !!use_msi_x;
af19b491
AKS
691
692 adapter->num_txd = MAX_CMD_DESCRIPTORS;
693
251b036a 694 adapter->max_rds_rings = MAX_RDS_RINGS;
af19b491
AKS
695}
696
174240a8
RB
697static int
698qlcnic_initialize_nic(struct qlcnic_adapter *adapter)
699{
700 int err;
701 struct qlcnic_info nic_info;
702
b1fc6d3c 703 err = qlcnic_get_nic_info(adapter, &nic_info, adapter->ahw->pci_func);
174240a8
RB
704 if (err)
705 return err;
706
a1c0c459 707 adapter->physical_port = (u8)nic_info.phys_port;
174240a8
RB
708 adapter->switch_mode = nic_info.switch_mode;
709 adapter->max_tx_ques = nic_info.max_tx_ques;
710 adapter->max_rx_ques = nic_info.max_rx_ques;
711 adapter->capabilities = nic_info.capabilities;
712 adapter->max_mac_filters = nic_info.max_mac_filters;
713 adapter->max_mtu = nic_info.max_mtu;
714
715 if (adapter->capabilities & BIT_6)
716 adapter->flags |= QLCNIC_ESWITCH_ENABLED;
717 else
718 adapter->flags &= ~QLCNIC_ESWITCH_ENABLED;
719
720 return err;
721}
722
8cf61f89
AKS
723static void
724qlcnic_set_vlan_config(struct qlcnic_adapter *adapter,
725 struct qlcnic_esw_func_cfg *esw_cfg)
726{
727 if (esw_cfg->discard_tagged)
728 adapter->flags &= ~QLCNIC_TAGGING_ENABLED;
729 else
730 adapter->flags |= QLCNIC_TAGGING_ENABLED;
731
732 if (esw_cfg->vlan_id)
733 adapter->pvid = esw_cfg->vlan_id;
734 else
735 adapter->pvid = 0;
736}
737
b9796a14
AC
738static void
739qlcnic_vlan_rx_add(struct net_device *netdev, u16 vid)
740{
741 struct qlcnic_adapter *adapter = netdev_priv(netdev);
742 set_bit(vid, adapter->vlans);
743}
744
745static void
746qlcnic_vlan_rx_del(struct net_device *netdev, u16 vid)
747{
748 struct qlcnic_adapter *adapter = netdev_priv(netdev);
749
750 qlcnic_restore_indev_addr(netdev, NETDEV_DOWN);
751 clear_bit(vid, adapter->vlans);
752}
753
0325d69b
RB
754static void
755qlcnic_set_eswitch_port_features(struct qlcnic_adapter *adapter,
756 struct qlcnic_esw_func_cfg *esw_cfg)
757{
ee07c1a7
RB
758 adapter->flags &= ~(QLCNIC_MACSPOOF | QLCNIC_MAC_OVERRIDE_DISABLED |
759 QLCNIC_PROMISC_DISABLED);
7613c87b
RB
760
761 if (esw_cfg->mac_anti_spoof)
762 adapter->flags |= QLCNIC_MACSPOOF;
fe4d434d 763
7373373d
RB
764 if (!esw_cfg->mac_override)
765 adapter->flags |= QLCNIC_MAC_OVERRIDE_DISABLED;
766
ee07c1a7
RB
767 if (!esw_cfg->promisc_mode)
768 adapter->flags |= QLCNIC_PROMISC_DISABLED;
769
0325d69b
RB
770 qlcnic_set_netdev_features(adapter, esw_cfg);
771}
772
773static int
774qlcnic_set_eswitch_port_config(struct qlcnic_adapter *adapter)
775{
776 struct qlcnic_esw_func_cfg esw_cfg;
777
778 if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED))
779 return 0;
780
b1fc6d3c 781 esw_cfg.pci_func = adapter->ahw->pci_func;
0325d69b
RB
782 if (qlcnic_get_eswitch_port_config(adapter, &esw_cfg))
783 return -EIO;
8cf61f89 784 qlcnic_set_vlan_config(adapter, &esw_cfg);
0325d69b
RB
785 qlcnic_set_eswitch_port_features(adapter, &esw_cfg);
786
787 return 0;
788}
789
790static void
791qlcnic_set_netdev_features(struct qlcnic_adapter *adapter,
792 struct qlcnic_esw_func_cfg *esw_cfg)
793{
794 struct net_device *netdev = adapter->netdev;
795 unsigned long features, vlan_features;
796
135d84a9 797 features = (NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM |
0325d69b
RB
798 NETIF_F_IPV6_CSUM | NETIF_F_GRO);
799 vlan_features = (NETIF_F_SG | NETIF_F_IP_CSUM |
b9796a14 800 NETIF_F_IPV6_CSUM | NETIF_F_HW_VLAN_FILTER);
0325d69b
RB
801
802 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_TSO) {
803 features |= (NETIF_F_TSO | NETIF_F_TSO6);
804 vlan_features |= (NETIF_F_TSO | NETIF_F_TSO6);
805 }
b56421d0
RB
806
807 if (netdev->features & NETIF_F_LRO)
0325d69b
RB
808 features |= NETIF_F_LRO;
809
810 if (esw_cfg->offload_flags & BIT_0) {
811 netdev->features |= features;
0325d69b
RB
812 if (!(esw_cfg->offload_flags & BIT_1))
813 netdev->features &= ~NETIF_F_TSO;
814 if (!(esw_cfg->offload_flags & BIT_2))
815 netdev->features &= ~NETIF_F_TSO6;
816 } else {
817 netdev->features &= ~features;
0325d69b
RB
818 }
819
820 netdev->vlan_features = (features & vlan_features);
821}
822
0866d96d
AC
823static int
824qlcnic_check_eswitch_mode(struct qlcnic_adapter *adapter)
825{
826 void __iomem *priv_op;
827 u32 op_mode, priv_level;
828 int err = 0;
829
174240a8
RB
830 err = qlcnic_initialize_nic(adapter);
831 if (err)
832 return err;
833
0866d96d
AC
834 if (adapter->flags & QLCNIC_ADAPTER_INITIALIZED)
835 return 0;
836
b1fc6d3c 837 priv_op = adapter->ahw->pci_base0 + QLCNIC_DRV_OP_MODE;
0866d96d 838 op_mode = readl(priv_op);
b1fc6d3c 839 priv_level = QLC_DEV_GET_DRV(op_mode, adapter->ahw->pci_func);
0866d96d
AC
840
841 if (op_mode == QLC_DEV_DRV_DEFAULT)
842 priv_level = QLCNIC_MGMT_FUNC;
843 else
b1fc6d3c 844 priv_level = QLC_DEV_GET_DRV(op_mode, adapter->ahw->pci_func);
0866d96d 845
174240a8 846 if (adapter->flags & QLCNIC_ESWITCH_ENABLED) {
0866d96d
AC
847 if (priv_level == QLCNIC_MGMT_FUNC) {
848 adapter->op_mode = QLCNIC_MGMT_FUNC;
849 err = qlcnic_init_pci_info(adapter);
850 if (err)
851 return err;
852 /* Set privilege level for other functions */
853 qlcnic_set_function_modes(adapter);
854 dev_info(&adapter->pdev->dev,
855 "HAL Version: %d, Management function\n",
856 adapter->fw_hal_version);
857 } else if (priv_level == QLCNIC_PRIV_FUNC) {
858 adapter->op_mode = QLCNIC_PRIV_FUNC;
859 dev_info(&adapter->pdev->dev,
860 "HAL Version: %d, Privileged function\n",
861 adapter->fw_hal_version);
862 }
174240a8 863 }
0866d96d
AC
864
865 adapter->flags |= QLCNIC_ADAPTER_INITIALIZED;
866
867 return err;
868}
869
0325d69b
RB
870static int
871qlcnic_set_default_offload_settings(struct qlcnic_adapter *adapter)
872{
873 struct qlcnic_esw_func_cfg esw_cfg;
874 struct qlcnic_npar_info *npar;
875 u8 i;
876
174240a8 877 if (adapter->need_fw_reset)
0325d69b
RB
878 return 0;
879
880 for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
881 if (adapter->npars[i].type != QLCNIC_TYPE_NIC)
882 continue;
883 memset(&esw_cfg, 0, sizeof(struct qlcnic_esw_func_cfg));
884 esw_cfg.pci_func = i;
885 esw_cfg.offload_flags = BIT_0;
7373373d 886 esw_cfg.mac_override = BIT_0;
ee07c1a7 887 esw_cfg.promisc_mode = BIT_0;
0325d69b
RB
888 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_TSO)
889 esw_cfg.offload_flags |= (BIT_1 | BIT_2);
890 if (qlcnic_config_switch_port(adapter, &esw_cfg))
891 return -EIO;
892 npar = &adapter->npars[i];
893 npar->pvid = esw_cfg.vlan_id;
7373373d 894 npar->mac_override = esw_cfg.mac_override;
0325d69b
RB
895 npar->mac_anti_spoof = esw_cfg.mac_anti_spoof;
896 npar->discard_tagged = esw_cfg.discard_tagged;
897 npar->promisc_mode = esw_cfg.promisc_mode;
898 npar->offload_flags = esw_cfg.offload_flags;
899 }
900
901 return 0;
902}
903
4e8acb01
RB
904static int
905qlcnic_reset_eswitch_config(struct qlcnic_adapter *adapter,
906 struct qlcnic_npar_info *npar, int pci_func)
907{
908 struct qlcnic_esw_func_cfg esw_cfg;
909 esw_cfg.op_mode = QLCNIC_PORT_DEFAULTS;
910 esw_cfg.pci_func = pci_func;
911 esw_cfg.vlan_id = npar->pvid;
7373373d 912 esw_cfg.mac_override = npar->mac_override;
4e8acb01
RB
913 esw_cfg.discard_tagged = npar->discard_tagged;
914 esw_cfg.mac_anti_spoof = npar->mac_anti_spoof;
915 esw_cfg.offload_flags = npar->offload_flags;
916 esw_cfg.promisc_mode = npar->promisc_mode;
917 if (qlcnic_config_switch_port(adapter, &esw_cfg))
918 return -EIO;
919
920 esw_cfg.op_mode = QLCNIC_ADD_VLAN;
921 if (qlcnic_config_switch_port(adapter, &esw_cfg))
922 return -EIO;
923
924 return 0;
925}
926
cea8975e
AC
927static int
928qlcnic_reset_npar_config(struct qlcnic_adapter *adapter)
929{
4e8acb01 930 int i, err;
cea8975e
AC
931 struct qlcnic_npar_info *npar;
932 struct qlcnic_info nic_info;
933
174240a8 934 if (!adapter->need_fw_reset)
cea8975e
AC
935 return 0;
936
4e8acb01
RB
937 /* Set the NPAR config data after FW reset */
938 for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
939 npar = &adapter->npars[i];
940 if (npar->type != QLCNIC_TYPE_NIC)
941 continue;
942 err = qlcnic_get_nic_info(adapter, &nic_info, i);
943 if (err)
944 return err;
945 nic_info.min_tx_bw = npar->min_bw;
946 nic_info.max_tx_bw = npar->max_bw;
947 err = qlcnic_set_nic_info(adapter, &nic_info);
948 if (err)
949 return err;
cea8975e 950
4e8acb01
RB
951 if (npar->enable_pm) {
952 err = qlcnic_config_port_mirroring(adapter,
953 npar->dest_npar, 1, i);
954 if (err)
955 return err;
cea8975e 956 }
4e8acb01
RB
957 err = qlcnic_reset_eswitch_config(adapter, npar, i);
958 if (err)
959 return err;
cea8975e 960 }
4e8acb01 961 return 0;
cea8975e
AC
962}
963
78f84e1a
AKS
964static int qlcnic_check_npar_opertional(struct qlcnic_adapter *adapter)
965{
966 u8 npar_opt_timeo = QLCNIC_DEV_NPAR_OPER_TIMEO;
967 u32 npar_state;
968
969 if (adapter->op_mode == QLCNIC_MGMT_FUNC)
970 return 0;
971
972 npar_state = QLCRD32(adapter, QLCNIC_CRB_DEV_NPAR_STATE);
973 while (npar_state != QLCNIC_DEV_NPAR_OPER && --npar_opt_timeo) {
974 msleep(1000);
975 npar_state = QLCRD32(adapter, QLCNIC_CRB_DEV_NPAR_STATE);
976 }
977 if (!npar_opt_timeo) {
978 dev_err(&adapter->pdev->dev,
979 "Waiting for NPAR state to opertional timeout\n");
980 return -EIO;
981 }
982 return 0;
983}
984
174240a8
RB
985static int
986qlcnic_set_mgmt_operations(struct qlcnic_adapter *adapter)
987{
988 int err;
989
990 if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED) ||
991 adapter->op_mode != QLCNIC_MGMT_FUNC)
992 return 0;
993
994 err = qlcnic_set_default_offload_settings(adapter);
995 if (err)
996 return err;
997
998 err = qlcnic_reset_npar_config(adapter);
999 if (err)
1000 return err;
1001
1002 qlcnic_dev_set_npar_ready(adapter);
1003
1004 return err;
1005}
1006
af19b491
AKS
1007static int
1008qlcnic_start_firmware(struct qlcnic_adapter *adapter)
1009{
d4066833 1010 int err;
af19b491 1011
aa5e18c0
SC
1012 err = qlcnic_can_start_firmware(adapter);
1013 if (err < 0)
1014 return err;
1015 else if (!err)
d4066833 1016 goto check_fw_status;
af19b491 1017
4d5bdb38
AKS
1018 if (load_fw_file)
1019 qlcnic_request_firmware(adapter);
8f891387 1020 else {
8cfdce08
SC
1021 err = qlcnic_check_flash_fw_ver(adapter);
1022 if (err)
8f891387 1023 goto err_out;
1024
4d5bdb38 1025 adapter->fw_type = QLCNIC_FLASH_ROMIMAGE;
8f891387 1026 }
af19b491
AKS
1027
1028 err = qlcnic_need_fw_reset(adapter);
af19b491 1029 if (err == 0)
4e70812b 1030 goto check_fw_status;
af19b491 1031
d4066833
SC
1032 err = qlcnic_pinit_from_rom(adapter);
1033 if (err)
1034 goto err_out;
af19b491
AKS
1035
1036 err = qlcnic_load_firmware(adapter);
1037 if (err)
1038 goto err_out;
1039
1040 qlcnic_release_firmware(adapter);
d4066833 1041 QLCWR32(adapter, CRB_DRIVER_VERSION, QLCNIC_DRIVER_VERSION);
af19b491 1042
d4066833
SC
1043check_fw_status:
1044 err = qlcnic_check_fw_status(adapter);
af19b491
AKS
1045 if (err)
1046 goto err_out;
1047
1048 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_READY);
6df900e9 1049 qlcnic_idc_debug_info(adapter, 1);
b18971d1 1050
0866d96d
AC
1051 err = qlcnic_check_eswitch_mode(adapter);
1052 if (err) {
1053 dev_err(&adapter->pdev->dev,
1054 "Memory allocation failed for eswitch\n");
1055 goto err_out;
1056 }
174240a8
RB
1057 err = qlcnic_set_mgmt_operations(adapter);
1058 if (err)
1059 goto err_out;
1060
1061 qlcnic_check_options(adapter);
af19b491
AKS
1062 adapter->need_fw_reset = 0;
1063
a7fc948f
AKS
1064 qlcnic_release_firmware(adapter);
1065 return 0;
af19b491
AKS
1066
1067err_out:
a7fc948f
AKS
1068 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_FAILED);
1069 dev_err(&adapter->pdev->dev, "Device state set to failed\n");
0866d96d 1070
af19b491
AKS
1071 qlcnic_release_firmware(adapter);
1072 return err;
1073}
1074
1075static int
1076qlcnic_request_irq(struct qlcnic_adapter *adapter)
1077{
1078 irq_handler_t handler;
1079 struct qlcnic_host_sds_ring *sds_ring;
1080 int err, ring;
1081
1082 unsigned long flags = 0;
1083 struct net_device *netdev = adapter->netdev;
b1fc6d3c 1084 struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
af19b491 1085
7eb9855d
AKS
1086 if (adapter->diag_test == QLCNIC_INTERRUPT_TEST) {
1087 handler = qlcnic_tmp_intr;
1088 if (!QLCNIC_IS_MSI_FAMILY(adapter))
1089 flags |= IRQF_SHARED;
1090
1091 } else {
1092 if (adapter->flags & QLCNIC_MSIX_ENABLED)
1093 handler = qlcnic_msix_intr;
1094 else if (adapter->flags & QLCNIC_MSI_ENABLED)
1095 handler = qlcnic_msi_intr;
1096 else {
1097 flags |= IRQF_SHARED;
1098 handler = qlcnic_intr;
1099 }
af19b491
AKS
1100 }
1101 adapter->irq = netdev->irq;
1102
1103 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
1104 sds_ring = &recv_ctx->sds_rings[ring];
1105 sprintf(sds_ring->name, "%s[%d]", netdev->name, ring);
1106 err = request_irq(sds_ring->irq, handler,
1107 flags, sds_ring->name, sds_ring);
1108 if (err)
1109 return err;
1110 }
1111
1112 return 0;
1113}
1114
1115static void
1116qlcnic_free_irq(struct qlcnic_adapter *adapter)
1117{
1118 int ring;
1119 struct qlcnic_host_sds_ring *sds_ring;
1120
b1fc6d3c 1121 struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
af19b491
AKS
1122
1123 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
1124 sds_ring = &recv_ctx->sds_rings[ring];
1125 free_irq(sds_ring->irq, sds_ring);
1126 }
1127}
1128
af19b491
AKS
1129static int
1130__qlcnic_up(struct qlcnic_adapter *adapter, struct net_device *netdev)
1131{
8a15ad1f
AKS
1132 int ring;
1133 struct qlcnic_host_rds_ring *rds_ring;
1134
af19b491
AKS
1135 if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
1136 return -EIO;
1137
8a15ad1f
AKS
1138 if (test_bit(__QLCNIC_DEV_UP, &adapter->state))
1139 return 0;
0325d69b
RB
1140 if (qlcnic_set_eswitch_port_config(adapter))
1141 return -EIO;
8a15ad1f
AKS
1142
1143 if (qlcnic_fw_create_ctx(adapter))
1144 return -EIO;
1145
1146 for (ring = 0; ring < adapter->max_rds_rings; ring++) {
b1fc6d3c
AC
1147 rds_ring = &adapter->recv_ctx->rds_rings[ring];
1148 qlcnic_post_rx_buffers(adapter, rds_ring);
8a15ad1f
AKS
1149 }
1150
af19b491
AKS
1151 qlcnic_set_multi(netdev);
1152 qlcnic_fw_cmd_set_mtu(adapter, netdev->mtu);
1153
b1fc6d3c 1154 adapter->ahw->linkup = 0;
af19b491
AKS
1155
1156 if (adapter->max_sds_rings > 1)
1157 qlcnic_config_rss(adapter, 1);
1158
1159 qlcnic_config_intr_coalesce(adapter);
1160
24763d80 1161 if (netdev->features & NETIF_F_LRO)
af19b491
AKS
1162 qlcnic_config_hw_lro(adapter, QLCNIC_LRO_ENABLED);
1163
1164 qlcnic_napi_enable(adapter);
1165
1166 qlcnic_linkevent_request(adapter, 1);
1167
68bf1c68 1168 adapter->reset_context = 0;
af19b491
AKS
1169 set_bit(__QLCNIC_DEV_UP, &adapter->state);
1170 return 0;
1171}
1172
1173/* Usage: During resume and firmware recovery module.*/
1174
1175static int
1176qlcnic_up(struct qlcnic_adapter *adapter, struct net_device *netdev)
1177{
1178 int err = 0;
1179
1180 rtnl_lock();
1181 if (netif_running(netdev))
1182 err = __qlcnic_up(adapter, netdev);
1183 rtnl_unlock();
1184
1185 return err;
1186}
1187
1188static void
1189__qlcnic_down(struct qlcnic_adapter *adapter, struct net_device *netdev)
1190{
1191 if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
1192 return;
1193
1194 if (!test_and_clear_bit(__QLCNIC_DEV_UP, &adapter->state))
1195 return;
1196
1197 smp_mb();
1198 spin_lock(&adapter->tx_clean_lock);
1199 netif_carrier_off(netdev);
1200 netif_tx_disable(netdev);
1201
1202 qlcnic_free_mac_list(adapter);
1203
b5e5492c
AKS
1204 if (adapter->fhash.fnum)
1205 qlcnic_delete_lb_filters(adapter);
1206
af19b491
AKS
1207 qlcnic_nic_set_promisc(adapter, QLCNIC_NIU_NON_PROMISC_MODE);
1208
1209 qlcnic_napi_disable(adapter);
1210
8a15ad1f
AKS
1211 qlcnic_fw_destroy_ctx(adapter);
1212
1213 qlcnic_reset_rx_buffers_list(adapter);
af19b491
AKS
1214 qlcnic_release_tx_buffers(adapter);
1215 spin_unlock(&adapter->tx_clean_lock);
1216}
1217
1218/* Usage: During suspend and firmware recovery module */
1219
1220static void
1221qlcnic_down(struct qlcnic_adapter *adapter, struct net_device *netdev)
1222{
1223 rtnl_lock();
1224 if (netif_running(netdev))
1225 __qlcnic_down(adapter, netdev);
1226 rtnl_unlock();
1227
1228}
1229
1230static int
1231qlcnic_attach(struct qlcnic_adapter *adapter)
1232{
1233 struct net_device *netdev = adapter->netdev;
1234 struct pci_dev *pdev = adapter->pdev;
8a15ad1f 1235 int err;
af19b491
AKS
1236
1237 if (adapter->is_up == QLCNIC_ADAPTER_UP_MAGIC)
1238 return 0;
1239
af19b491
AKS
1240 err = qlcnic_napi_add(adapter, netdev);
1241 if (err)
1242 return err;
1243
1244 err = qlcnic_alloc_sw_resources(adapter);
1245 if (err) {
1246 dev_err(&pdev->dev, "Error in setting sw resources\n");
8a15ad1f 1247 goto err_out_napi_del;
af19b491
AKS
1248 }
1249
1250 err = qlcnic_alloc_hw_resources(adapter);
1251 if (err) {
1252 dev_err(&pdev->dev, "Error in setting hw resources\n");
1253 goto err_out_free_sw;
1254 }
1255
af19b491
AKS
1256 err = qlcnic_request_irq(adapter);
1257 if (err) {
1258 dev_err(&pdev->dev, "failed to setup interrupt\n");
8a15ad1f 1259 goto err_out_free_hw;
af19b491
AKS
1260 }
1261
af19b491
AKS
1262 qlcnic_create_sysfs_entries(adapter);
1263
1264 adapter->is_up = QLCNIC_ADAPTER_UP_MAGIC;
1265 return 0;
1266
8a15ad1f 1267err_out_free_hw:
af19b491
AKS
1268 qlcnic_free_hw_resources(adapter);
1269err_out_free_sw:
1270 qlcnic_free_sw_resources(adapter);
8a15ad1f
AKS
1271err_out_napi_del:
1272 qlcnic_napi_del(adapter);
af19b491
AKS
1273 return err;
1274}
1275
1276static void
1277qlcnic_detach(struct qlcnic_adapter *adapter)
1278{
1279 if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
1280 return;
1281
1282 qlcnic_remove_sysfs_entries(adapter);
1283
1284 qlcnic_free_hw_resources(adapter);
1285 qlcnic_release_rx_buffers(adapter);
1286 qlcnic_free_irq(adapter);
1287 qlcnic_napi_del(adapter);
1288 qlcnic_free_sw_resources(adapter);
1289
1290 adapter->is_up = 0;
1291}
1292
7eb9855d
AKS
1293void qlcnic_diag_free_res(struct net_device *netdev, int max_sds_rings)
1294{
1295 struct qlcnic_adapter *adapter = netdev_priv(netdev);
1296 struct qlcnic_host_sds_ring *sds_ring;
1297 int ring;
1298
78ad3892 1299 clear_bit(__QLCNIC_DEV_UP, &adapter->state);
cdaff185
AKS
1300 if (adapter->diag_test == QLCNIC_INTERRUPT_TEST) {
1301 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
b1fc6d3c 1302 sds_ring = &adapter->recv_ctx->sds_rings[ring];
cdaff185
AKS
1303 qlcnic_disable_int(sds_ring);
1304 }
7eb9855d
AKS
1305 }
1306
8a15ad1f
AKS
1307 qlcnic_fw_destroy_ctx(adapter);
1308
7eb9855d
AKS
1309 qlcnic_detach(adapter);
1310
1311 adapter->diag_test = 0;
1312 adapter->max_sds_rings = max_sds_rings;
1313
1314 if (qlcnic_attach(adapter))
34ce3626 1315 goto out;
7eb9855d
AKS
1316
1317 if (netif_running(netdev))
1318 __qlcnic_up(adapter, netdev);
34ce3626 1319out:
7eb9855d
AKS
1320 netif_device_attach(netdev);
1321}
1322
b1fc6d3c
AC
1323static int qlcnic_alloc_adapter_resources(struct qlcnic_adapter *adapter)
1324{
1325 int err = 0;
1326 adapter->ahw = kzalloc(sizeof(struct qlcnic_hardware_context),
1327 GFP_KERNEL);
1328 if (!adapter->ahw) {
1329 dev_err(&adapter->pdev->dev,
1330 "Failed to allocate recv ctx resources for adapter\n");
1331 err = -ENOMEM;
1332 goto err_out;
1333 }
1334 adapter->recv_ctx = kzalloc(sizeof(struct qlcnic_recv_context),
1335 GFP_KERNEL);
1336 if (!adapter->recv_ctx) {
1337 dev_err(&adapter->pdev->dev,
1338 "Failed to allocate recv ctx resources for adapter\n");
1339 kfree(adapter->ahw);
1340 adapter->ahw = NULL;
1341 err = -ENOMEM;
8816d009 1342 goto err_out;
b1fc6d3c 1343 }
8816d009
AC
1344 /* Initialize interrupt coalesce parameters */
1345 adapter->ahw->coal.flag = QLCNIC_INTR_DEFAULT;
1346 adapter->ahw->coal.rx_time_us = QLCNIC_DEFAULT_INTR_COALESCE_RX_TIME_US;
1347 adapter->ahw->coal.rx_packets = QLCNIC_DEFAULT_INTR_COALESCE_RX_PACKETS;
b1fc6d3c
AC
1348err_out:
1349 return err;
1350}
1351
1352static void qlcnic_free_adapter_resources(struct qlcnic_adapter *adapter)
1353{
1354 kfree(adapter->recv_ctx);
1355 adapter->recv_ctx = NULL;
1356
18f2f616
AC
1357 if (adapter->ahw->fw_dump.tmpl_hdr) {
1358 vfree(adapter->ahw->fw_dump.tmpl_hdr);
1359 adapter->ahw->fw_dump.tmpl_hdr = NULL;
1360 }
b1fc6d3c
AC
1361 kfree(adapter->ahw);
1362 adapter->ahw = NULL;
1363}
1364
7eb9855d
AKS
1365int qlcnic_diag_alloc_res(struct net_device *netdev, int test)
1366{
1367 struct qlcnic_adapter *adapter = netdev_priv(netdev);
1368 struct qlcnic_host_sds_ring *sds_ring;
8a15ad1f 1369 struct qlcnic_host_rds_ring *rds_ring;
7eb9855d
AKS
1370 int ring;
1371 int ret;
1372
1373 netif_device_detach(netdev);
1374
1375 if (netif_running(netdev))
1376 __qlcnic_down(adapter, netdev);
1377
1378 qlcnic_detach(adapter);
1379
1380 adapter->max_sds_rings = 1;
1381 adapter->diag_test = test;
1382
1383 ret = qlcnic_attach(adapter);
34ce3626
AKS
1384 if (ret) {
1385 netif_device_attach(netdev);
7eb9855d 1386 return ret;
34ce3626 1387 }
7eb9855d 1388
8a15ad1f
AKS
1389 ret = qlcnic_fw_create_ctx(adapter);
1390 if (ret) {
1391 qlcnic_detach(adapter);
57e46248 1392 netif_device_attach(netdev);
8a15ad1f
AKS
1393 return ret;
1394 }
1395
1396 for (ring = 0; ring < adapter->max_rds_rings; ring++) {
b1fc6d3c
AC
1397 rds_ring = &adapter->recv_ctx->rds_rings[ring];
1398 qlcnic_post_rx_buffers(adapter, rds_ring);
8a15ad1f
AKS
1399 }
1400
cdaff185
AKS
1401 if (adapter->diag_test == QLCNIC_INTERRUPT_TEST) {
1402 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
b1fc6d3c 1403 sds_ring = &adapter->recv_ctx->sds_rings[ring];
cdaff185
AKS
1404 qlcnic_enable_int(sds_ring);
1405 }
7eb9855d 1406 }
22c8c934
SC
1407
1408 if (adapter->diag_test == QLCNIC_LOOPBACK_TEST) {
1409 adapter->ahw->loopback_state = 0;
1410 qlcnic_linkevent_request(adapter, 1);
1411 }
1412
78ad3892 1413 set_bit(__QLCNIC_DEV_UP, &adapter->state);
7eb9855d
AKS
1414
1415 return 0;
1416}
1417
68bf1c68
AKS
1418/* Reset context in hardware only */
1419static int
1420qlcnic_reset_hw_context(struct qlcnic_adapter *adapter)
1421{
1422 struct net_device *netdev = adapter->netdev;
1423
1424 if (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
1425 return -EBUSY;
1426
1427 netif_device_detach(netdev);
1428
1429 qlcnic_down(adapter, netdev);
1430
1431 qlcnic_up(adapter, netdev);
1432
1433 netif_device_attach(netdev);
1434
1435 clear_bit(__QLCNIC_RESETTING, &adapter->state);
1436 return 0;
1437}
1438
af19b491
AKS
1439int
1440qlcnic_reset_context(struct qlcnic_adapter *adapter)
1441{
1442 int err = 0;
1443 struct net_device *netdev = adapter->netdev;
1444
1445 if (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
1446 return -EBUSY;
1447
1448 if (adapter->is_up == QLCNIC_ADAPTER_UP_MAGIC) {
1449
1450 netif_device_detach(netdev);
1451
1452 if (netif_running(netdev))
1453 __qlcnic_down(adapter, netdev);
1454
1455 qlcnic_detach(adapter);
1456
1457 if (netif_running(netdev)) {
1458 err = qlcnic_attach(adapter);
1459 if (!err)
34ce3626 1460 __qlcnic_up(adapter, netdev);
af19b491
AKS
1461 }
1462
1463 netif_device_attach(netdev);
1464 }
1465
af19b491
AKS
1466 clear_bit(__QLCNIC_RESETTING, &adapter->state);
1467 return err;
1468}
1469
1470static int
1471qlcnic_setup_netdev(struct qlcnic_adapter *adapter,
1bb09fb9 1472 struct net_device *netdev, u8 pci_using_dac)
af19b491
AKS
1473{
1474 int err;
1475 struct pci_dev *pdev = adapter->pdev;
1476
af19b491
AKS
1477 adapter->mc_enabled = 0;
1478 adapter->max_mc_count = 38;
1479
1480 netdev->netdev_ops = &qlcnic_netdev_ops;
ef71ff83 1481 netdev->watchdog_timeo = 5*HZ;
af19b491
AKS
1482
1483 qlcnic_change_mtu(netdev, netdev->mtu);
1484
1485 SET_ETHTOOL_OPS(netdev, &qlcnic_ethtool_ops);
1486
135d84a9
MM
1487 netdev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM |
1488 NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM;
ac8d0c4f 1489
135d84a9
MM
1490 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_TSO)
1491 netdev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
1492 if (pci_using_dac)
1493 netdev->hw_features |= NETIF_F_HIGHDMA;
af19b491 1494
135d84a9 1495 netdev->vlan_features = netdev->hw_features;
af19b491
AKS
1496
1497 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_FVLANTX)
135d84a9 1498 netdev->hw_features |= NETIF_F_HW_VLAN_TX;
af19b491 1499 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_HW_LRO)
135d84a9
MM
1500 netdev->hw_features |= NETIF_F_LRO;
1501
1502 netdev->features |= netdev->hw_features |
1503 NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
1504
af19b491
AKS
1505 netdev->irq = adapter->msix_entries[0].vector;
1506
af19b491
AKS
1507 err = register_netdev(netdev);
1508 if (err) {
1509 dev_err(&pdev->dev, "failed to register net device\n");
1510 return err;
1511 }
1512
1513 return 0;
1514}
1515
1bb09fb9
AKS
1516static int qlcnic_set_dma_mask(struct pci_dev *pdev, u8 *pci_using_dac)
1517{
1518 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) &&
1519 !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)))
1520 *pci_using_dac = 1;
1521 else if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) &&
1522 !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)))
1523 *pci_using_dac = 0;
1524 else {
1525 dev_err(&pdev->dev, "Unable to set DMA mask, aborting\n");
1526 return -EIO;
1527 }
1528
1529 return 0;
1530}
1531
f94bc1e7
SC
1532static int
1533qlcnic_alloc_msix_entries(struct qlcnic_adapter *adapter, u16 count)
1534{
1535 adapter->msix_entries = kcalloc(count, sizeof(struct msix_entry),
1536 GFP_KERNEL);
1537
1538 if (adapter->msix_entries)
1539 return 0;
1540
1541 dev_err(&adapter->pdev->dev, "failed allocating msix_entries\n");
1542 return -ENOMEM;
1543}
1544
af19b491
AKS
1545static int __devinit
1546qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1547{
1548 struct net_device *netdev = NULL;
1549 struct qlcnic_adapter *adapter = NULL;
1550 int err;
af19b491 1551 uint8_t revision_id;
1bb09fb9 1552 uint8_t pci_using_dac;
da48e6c3 1553 char brd_name[QLCNIC_MAX_BOARD_NAME_LEN];
af19b491
AKS
1554
1555 err = pci_enable_device(pdev);
1556 if (err)
1557 return err;
1558
1559 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
1560 err = -ENODEV;
1561 goto err_out_disable_pdev;
1562 }
1563
1bb09fb9
AKS
1564 err = qlcnic_set_dma_mask(pdev, &pci_using_dac);
1565 if (err)
1566 goto err_out_disable_pdev;
1567
af19b491
AKS
1568 err = pci_request_regions(pdev, qlcnic_driver_name);
1569 if (err)
1570 goto err_out_disable_pdev;
1571
1572 pci_set_master(pdev);
451724c8 1573 pci_enable_pcie_error_reporting(pdev);
af19b491
AKS
1574
1575 netdev = alloc_etherdev(sizeof(struct qlcnic_adapter));
1576 if (!netdev) {
1577 dev_err(&pdev->dev, "failed to allocate net_device\n");
1578 err = -ENOMEM;
1579 goto err_out_free_res;
1580 }
1581
1582 SET_NETDEV_DEV(netdev, &pdev->dev);
1583
1584 adapter = netdev_priv(netdev);
1585 adapter->netdev = netdev;
1586 adapter->pdev = pdev;
af19b491 1587
b1fc6d3c
AC
1588 if (qlcnic_alloc_adapter_resources(adapter))
1589 goto err_out_free_netdev;
1590
1591 adapter->dev_rst_time = jiffies;
af19b491 1592 revision_id = pdev->revision;
b1fc6d3c 1593 adapter->ahw->revision_id = revision_id;
e5dcf6dc 1594 adapter->mac_learn = qlcnic_mac_learn;
af19b491 1595
b1fc6d3c
AC
1596 rwlock_init(&adapter->ahw->crb_lock);
1597 mutex_init(&adapter->ahw->mem_lock);
af19b491
AKS
1598
1599 spin_lock_init(&adapter->tx_clean_lock);
1600 INIT_LIST_HEAD(&adapter->mac_list);
1601
1602 err = qlcnic_setup_pci_map(adapter);
1603 if (err)
b1fc6d3c 1604 goto err_out_free_hw;
af19b491
AKS
1605
1606 /* This will be reset for mezz cards */
b1fc6d3c 1607 adapter->portnum = adapter->ahw->pci_func;
af19b491
AKS
1608
1609 err = qlcnic_get_board_info(adapter);
1610 if (err) {
1611 dev_err(&pdev->dev, "Error getting board config info.\n");
1612 goto err_out_iounmap;
1613 }
1614
8cfdce08
SC
1615 err = qlcnic_setup_idc_param(adapter);
1616 if (err)
b3a24649 1617 goto err_out_iounmap;
af19b491 1618
1dc0f3c5 1619 adapter->flags |= QLCNIC_NEED_FLR;
b0044bcf 1620
9f26f547 1621 err = adapter->nic_ops->start_firmware(adapter);
a7fc948f
AKS
1622 if (err) {
1623 dev_err(&pdev->dev, "Loading fw failed.Please Reboot\n");
af19b491 1624 goto err_out_decr_ref;
a7fc948f 1625 }
af19b491 1626
da48e6c3
RB
1627 if (qlcnic_read_mac_addr(adapter))
1628 dev_warn(&pdev->dev, "failed to read mac addr\n");
1629
1630 if (adapter->portnum == 0) {
1631 get_brd_name(adapter, brd_name);
1632
1633 pr_info("%s: %s Board Chip rev 0x%x\n",
1634 module_name(THIS_MODULE),
b1fc6d3c 1635 brd_name, adapter->ahw->revision_id);
da48e6c3
RB
1636 }
1637
af19b491
AKS
1638 qlcnic_clear_stats(adapter);
1639
f94bc1e7
SC
1640 err = qlcnic_alloc_msix_entries(adapter, adapter->max_rx_ques);
1641 if (err)
1642 goto err_out_decr_ref;
1643
af19b491
AKS
1644 qlcnic_setup_intr(adapter);
1645
1bb09fb9 1646 err = qlcnic_setup_netdev(adapter, netdev, pci_using_dac);
af19b491
AKS
1647 if (err)
1648 goto err_out_disable_msi;
1649
1650 pci_set_drvdata(pdev, adapter);
1651
1652 qlcnic_schedule_work(adapter, qlcnic_fw_poll_work, FW_POLL_DELAY);
1653
b1fc6d3c 1654 switch (adapter->ahw->port_type) {
af19b491
AKS
1655 case QLCNIC_GBE:
1656 dev_info(&adapter->pdev->dev, "%s: GbE port initialized\n",
1657 adapter->netdev->name);
1658 break;
1659 case QLCNIC_XGBE:
1660 dev_info(&adapter->pdev->dev, "%s: XGbE port initialized\n",
1661 adapter->netdev->name);
1662 break;
1663 }
1664
e5dcf6dc
SC
1665 if (adapter->mac_learn)
1666 qlcnic_alloc_lb_filters_mem(adapter);
1667
af19b491
AKS
1668 qlcnic_create_diag_entries(adapter);
1669
1670 return 0;
1671
1672err_out_disable_msi:
1673 qlcnic_teardown_intr(adapter);
f94bc1e7 1674 kfree(adapter->msix_entries);
af19b491
AKS
1675
1676err_out_decr_ref:
21854f02 1677 qlcnic_clr_all_drv_state(adapter, 0);
af19b491
AKS
1678
1679err_out_iounmap:
1680 qlcnic_cleanup_pci_map(adapter);
1681
b1fc6d3c
AC
1682err_out_free_hw:
1683 qlcnic_free_adapter_resources(adapter);
1684
af19b491
AKS
1685err_out_free_netdev:
1686 free_netdev(netdev);
1687
1688err_out_free_res:
1689 pci_release_regions(pdev);
1690
1691err_out_disable_pdev:
1692 pci_set_drvdata(pdev, NULL);
1693 pci_disable_device(pdev);
1694 return err;
1695}
1696
1697static void __devexit qlcnic_remove(struct pci_dev *pdev)
1698{
1699 struct qlcnic_adapter *adapter;
1700 struct net_device *netdev;
1701
1702 adapter = pci_get_drvdata(pdev);
1703 if (adapter == NULL)
1704 return;
1705
1706 netdev = adapter->netdev;
1707
1708 qlcnic_cancel_fw_work(adapter);
1709
1710 unregister_netdev(netdev);
1711
af19b491
AKS
1712 qlcnic_detach(adapter);
1713
2e9d722d
AC
1714 if (adapter->npars != NULL)
1715 kfree(adapter->npars);
1716 if (adapter->eswitch != NULL)
1717 kfree(adapter->eswitch);
1718
21854f02 1719 qlcnic_clr_all_drv_state(adapter, 0);
af19b491
AKS
1720
1721 clear_bit(__QLCNIC_RESETTING, &adapter->state);
1722
b5e5492c
AKS
1723 qlcnic_free_lb_filters_mem(adapter);
1724
af19b491 1725 qlcnic_teardown_intr(adapter);
f94bc1e7 1726 kfree(adapter->msix_entries);
af19b491
AKS
1727
1728 qlcnic_remove_diag_entries(adapter);
1729
1730 qlcnic_cleanup_pci_map(adapter);
1731
1732 qlcnic_release_firmware(adapter);
1733
451724c8 1734 pci_disable_pcie_error_reporting(pdev);
af19b491
AKS
1735 pci_release_regions(pdev);
1736 pci_disable_device(pdev);
1737 pci_set_drvdata(pdev, NULL);
1738
b1fc6d3c 1739 qlcnic_free_adapter_resources(adapter);
af19b491
AKS
1740 free_netdev(netdev);
1741}
1742static int __qlcnic_shutdown(struct pci_dev *pdev)
1743{
1744 struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
1745 struct net_device *netdev = adapter->netdev;
1746 int retval;
1747
1748 netif_device_detach(netdev);
1749
1750 qlcnic_cancel_fw_work(adapter);
1751
1752 if (netif_running(netdev))
1753 qlcnic_down(adapter, netdev);
1754
21854f02 1755 qlcnic_clr_all_drv_state(adapter, 0);
af19b491
AKS
1756
1757 clear_bit(__QLCNIC_RESETTING, &adapter->state);
1758
1759 retval = pci_save_state(pdev);
1760 if (retval)
1761 return retval;
1762
1763 if (qlcnic_wol_supported(adapter)) {
1764 pci_enable_wake(pdev, PCI_D3cold, 1);
1765 pci_enable_wake(pdev, PCI_D3hot, 1);
1766 }
1767
1768 return 0;
1769}
1770
1771static void qlcnic_shutdown(struct pci_dev *pdev)
1772{
1773 if (__qlcnic_shutdown(pdev))
1774 return;
1775
1776 pci_disable_device(pdev);
1777}
1778
1779#ifdef CONFIG_PM
1780static int
1781qlcnic_suspend(struct pci_dev *pdev, pm_message_t state)
1782{
1783 int retval;
1784
1785 retval = __qlcnic_shutdown(pdev);
1786 if (retval)
1787 return retval;
1788
1789 pci_set_power_state(pdev, pci_choose_state(pdev, state));
1790 return 0;
1791}
1792
1793static int
1794qlcnic_resume(struct pci_dev *pdev)
1795{
1796 struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
1797 struct net_device *netdev = adapter->netdev;
1798 int err;
1799
1800 err = pci_enable_device(pdev);
1801 if (err)
1802 return err;
1803
1804 pci_set_power_state(pdev, PCI_D0);
1805 pci_set_master(pdev);
1806 pci_restore_state(pdev);
1807
9f26f547 1808 err = adapter->nic_ops->start_firmware(adapter);
af19b491
AKS
1809 if (err) {
1810 dev_err(&pdev->dev, "failed to start firmware\n");
1811 return err;
1812 }
1813
1814 if (netif_running(netdev)) {
af19b491
AKS
1815 err = qlcnic_up(adapter, netdev);
1816 if (err)
52486a3a 1817 goto done;
af19b491 1818
aec1e845 1819 qlcnic_restore_indev_addr(netdev, NETDEV_UP);
af19b491 1820 }
52486a3a 1821done:
af19b491
AKS
1822 netif_device_attach(netdev);
1823 qlcnic_schedule_work(adapter, qlcnic_fw_poll_work, FW_POLL_DELAY);
1824 return 0;
af19b491
AKS
1825}
1826#endif
1827
1828static int qlcnic_open(struct net_device *netdev)
1829{
1830 struct qlcnic_adapter *adapter = netdev_priv(netdev);
1831 int err;
1832
c55ad8e5
AKS
1833 netif_carrier_off(netdev);
1834
af19b491
AKS
1835 err = qlcnic_attach(adapter);
1836 if (err)
1837 return err;
1838
1839 err = __qlcnic_up(adapter, netdev);
1840 if (err)
1841 goto err_out;
1842
1843 netif_start_queue(netdev);
1844
1845 return 0;
1846
1847err_out:
1848 qlcnic_detach(adapter);
1849 return err;
1850}
1851
1852/*
1853 * qlcnic_close - Disables a network interface entry point
1854 */
1855static int qlcnic_close(struct net_device *netdev)
1856{
1857 struct qlcnic_adapter *adapter = netdev_priv(netdev);
1858
1859 __qlcnic_down(adapter, netdev);
1860 return 0;
1861}
1862
e5dcf6dc 1863void qlcnic_alloc_lb_filters_mem(struct qlcnic_adapter *adapter)
b5e5492c
AKS
1864{
1865 void *head;
1866 int i;
1867
e5dcf6dc 1868 if (adapter->fhash.fmax && adapter->fhash.fhead)
b5e5492c
AKS
1869 return;
1870
1871 spin_lock_init(&adapter->mac_learn_lock);
1872
1873 head = kcalloc(QLCNIC_LB_MAX_FILTERS, sizeof(struct hlist_head),
1874 GFP_KERNEL);
1875 if (!head)
1876 return;
1877
1878 adapter->fhash.fmax = QLCNIC_LB_MAX_FILTERS;
43d620c8 1879 adapter->fhash.fhead = head;
b5e5492c
AKS
1880
1881 for (i = 0; i < adapter->fhash.fmax; i++)
1882 INIT_HLIST_HEAD(&adapter->fhash.fhead[i]);
1883}
1884
1885static void qlcnic_free_lb_filters_mem(struct qlcnic_adapter *adapter)
1886{
1887 if (adapter->fhash.fmax && adapter->fhash.fhead)
1888 kfree(adapter->fhash.fhead);
1889
1890 adapter->fhash.fhead = NULL;
1891 adapter->fhash.fmax = 0;
1892}
1893
1894static void qlcnic_change_filter(struct qlcnic_adapter *adapter,
7e56cac4 1895 u64 uaddr, __le16 vlan_id, struct qlcnic_host_tx_ring *tx_ring)
b5e5492c
AKS
1896{
1897 struct cmd_desc_type0 *hwdesc;
1898 struct qlcnic_nic_req *req;
1899 struct qlcnic_mac_req *mac_req;
7e56cac4 1900 struct qlcnic_vlan_req *vlan_req;
b5e5492c
AKS
1901 u32 producer;
1902 u64 word;
1903
1904 producer = tx_ring->producer;
1905 hwdesc = &tx_ring->desc_head[tx_ring->producer];
1906
1907 req = (struct qlcnic_nic_req *)hwdesc;
1908 memset(req, 0, sizeof(struct qlcnic_nic_req));
1909 req->qhdr = cpu_to_le64(QLCNIC_REQUEST << 23);
1910
1911 word = QLCNIC_MAC_EVENT | ((u64)(adapter->portnum) << 16);
1912 req->req_hdr = cpu_to_le64(word);
1913
1914 mac_req = (struct qlcnic_mac_req *)&(req->words[0]);
03c5d770 1915 mac_req->op = vlan_id ? QLCNIC_MAC_VLAN_ADD : QLCNIC_MAC_ADD;
b5e5492c
AKS
1916 memcpy(mac_req->mac_addr, &uaddr, ETH_ALEN);
1917
7e56cac4
SC
1918 vlan_req = (struct qlcnic_vlan_req *)&req->words[1];
1919 vlan_req->vlan_id = vlan_id;
03c5d770 1920
b5e5492c 1921 tx_ring->producer = get_next_index(producer, tx_ring->num_desc);
036d61f0 1922 smp_mb();
b5e5492c
AKS
1923}
1924
1925#define QLCNIC_MAC_HASH(MAC)\
1926 ((((MAC) & 0x70000) >> 0x10) | (((MAC) & 0x70000000000ULL) >> 0x25))
1927
1928static void
1929qlcnic_send_filter(struct qlcnic_adapter *adapter,
1930 struct qlcnic_host_tx_ring *tx_ring,
1931 struct cmd_desc_type0 *first_desc,
1932 struct sk_buff *skb)
1933{
1934 struct ethhdr *phdr = (struct ethhdr *)(skb->data);
1935 struct qlcnic_filter *fil, *tmp_fil;
1936 struct hlist_node *tmp_hnode, *n;
1937 struct hlist_head *head;
1938 u64 src_addr = 0;
7e56cac4 1939 __le16 vlan_id = 0;
b5e5492c
AKS
1940 u8 hindex;
1941
1942 if (!compare_ether_addr(phdr->h_source, adapter->mac_addr))
1943 return;
1944
1945 if (adapter->fhash.fnum >= adapter->fhash.fmax)
1946 return;
1947
03c5d770
AKS
1948 /* Only NPAR capable devices support vlan based learning*/
1949 if (adapter->flags & QLCNIC_ESWITCH_ENABLED)
1950 vlan_id = first_desc->vlan_TCI;
b5e5492c
AKS
1951 memcpy(&src_addr, phdr->h_source, ETH_ALEN);
1952 hindex = QLCNIC_MAC_HASH(src_addr) & (QLCNIC_LB_MAX_FILTERS - 1);
1953 head = &(adapter->fhash.fhead[hindex]);
1954
1955 hlist_for_each_entry_safe(tmp_fil, tmp_hnode, n, head, fnode) {
03c5d770
AKS
1956 if (!memcmp(tmp_fil->faddr, &src_addr, ETH_ALEN) &&
1957 tmp_fil->vlan_id == vlan_id) {
e5edb7b1 1958
1959 if (jiffies >
1960 (QLCNIC_READD_AGE * HZ + tmp_fil->ftime))
1961 qlcnic_change_filter(adapter, src_addr, vlan_id,
1962 tx_ring);
b5e5492c
AKS
1963 tmp_fil->ftime = jiffies;
1964 return;
1965 }
1966 }
1967
1968 fil = kzalloc(sizeof(struct qlcnic_filter), GFP_ATOMIC);
1969 if (!fil)
1970 return;
1971
03c5d770 1972 qlcnic_change_filter(adapter, src_addr, vlan_id, tx_ring);
b5e5492c
AKS
1973
1974 fil->ftime = jiffies;
03c5d770 1975 fil->vlan_id = vlan_id;
b5e5492c
AKS
1976 memcpy(fil->faddr, &src_addr, ETH_ALEN);
1977 spin_lock(&adapter->mac_learn_lock);
1978 hlist_add_head(&(fil->fnode), head);
1979 adapter->fhash.fnum++;
1980 spin_unlock(&adapter->mac_learn_lock);
1981}
1982
036d61f0
AC
1983static int
1984qlcnic_tx_pkt(struct qlcnic_adapter *adapter,
af19b491
AKS
1985 struct cmd_desc_type0 *first_desc,
1986 struct sk_buff *skb)
1987{
036d61f0
AC
1988 u8 opcode = 0, hdr_len = 0;
1989 u16 flags = 0, vlan_tci = 0;
1990 int copied, offset, copy_len;
af19b491
AKS
1991 struct cmd_desc_type0 *hwdesc;
1992 struct vlan_ethhdr *vh;
036d61f0
AC
1993 struct qlcnic_host_tx_ring *tx_ring = adapter->tx_ring;
1994 u16 protocol = ntohs(skb->protocol);
2e9d722d 1995 u32 producer = tx_ring->producer;
036d61f0
AC
1996
1997 if (protocol == ETH_P_8021Q) {
1998 vh = (struct vlan_ethhdr *)skb->data;
1999 flags = FLAGS_VLAN_TAGGED;
2000 vlan_tci = vh->h_vlan_TCI;
2001 } else if (vlan_tx_tag_present(skb)) {
2002 flags = FLAGS_VLAN_OOB;
2003 vlan_tci = vlan_tx_tag_get(skb);
2004 }
2005 if (unlikely(adapter->pvid)) {
2006 if (vlan_tci && !(adapter->flags & QLCNIC_TAGGING_ENABLED))
2007 return -EIO;
2008 if (vlan_tci && (adapter->flags & QLCNIC_TAGGING_ENABLED))
2009 goto set_flags;
2010
2011 flags = FLAGS_VLAN_OOB;
2012 vlan_tci = adapter->pvid;
2013 }
2014set_flags:
2015 qlcnic_set_tx_vlan_tci(first_desc, vlan_tci);
2016 qlcnic_set_tx_flags_opcode(first_desc, flags, opcode);
af19b491 2017
2e9d722d
AC
2018 if (*(skb->data) & BIT_0) {
2019 flags |= BIT_0;
2020 memcpy(&first_desc->eth_addr, skb->data, ETH_ALEN);
2021 }
036d61f0
AC
2022 opcode = TX_ETHER_PKT;
2023 if ((adapter->netdev->features & (NETIF_F_TSO | NETIF_F_TSO6)) &&
af19b491
AKS
2024 skb_shinfo(skb)->gso_size > 0) {
2025
2026 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
2027
2028 first_desc->mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
2029 first_desc->total_hdr_length = hdr_len;
036d61f0
AC
2030
2031 opcode = (protocol == ETH_P_IPV6) ? TX_TCP_LSO6 : TX_TCP_LSO;
2032
2033 /* For LSO, we need to copy the MAC/IP/TCP headers into
2034 * the descriptor ring */
2035 copied = 0;
2036 offset = 2;
2037
2038 if (flags & FLAGS_VLAN_OOB) {
af19b491
AKS
2039 first_desc->total_hdr_length += VLAN_HLEN;
2040 first_desc->tcp_hdr_offset = VLAN_HLEN;
2041 first_desc->ip_hdr_offset = VLAN_HLEN;
2042 /* Only in case of TSO on vlan device */
2043 flags |= FLAGS_VLAN_TAGGED;
036d61f0
AC
2044
2045 /* Create a TSO vlan header template for firmware */
2046
2047 hwdesc = &tx_ring->desc_head[producer];
2048 tx_ring->cmd_buf_arr[producer].skb = NULL;
2049
2050 copy_len = min((int)sizeof(struct cmd_desc_type0) -
2051 offset, hdr_len + VLAN_HLEN);
2052
2053 vh = (struct vlan_ethhdr *)((char *) hwdesc + 2);
2054 skb_copy_from_linear_data(skb, vh, 12);
2055 vh->h_vlan_proto = htons(ETH_P_8021Q);
2056 vh->h_vlan_TCI = htons(vlan_tci);
2057
2058 skb_copy_from_linear_data_offset(skb, 12,
2059 (char *)vh + 16, copy_len - 16);
2060
2061 copied = copy_len - VLAN_HLEN;
2062 offset = 0;
2063
2064 producer = get_next_index(producer, tx_ring->num_desc);
af19b491
AKS
2065 }
2066
036d61f0
AC
2067 while (copied < hdr_len) {
2068
2069 copy_len = min((int)sizeof(struct cmd_desc_type0) -
2070 offset, (hdr_len - copied));
2071
2072 hwdesc = &tx_ring->desc_head[producer];
2073 tx_ring->cmd_buf_arr[producer].skb = NULL;
2074
2075 skb_copy_from_linear_data_offset(skb, copied,
2076 (char *) hwdesc + offset, copy_len);
2077
2078 copied += copy_len;
2079 offset = 0;
2080
2081 producer = get_next_index(producer, tx_ring->num_desc);
2082 }
2083
2084 tx_ring->producer = producer;
2085 smp_mb();
2086 adapter->stats.lso_frames++;
af19b491
AKS
2087
2088 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
2089 u8 l4proto;
2090
036d61f0 2091 if (protocol == ETH_P_IP) {
af19b491
AKS
2092 l4proto = ip_hdr(skb)->protocol;
2093
2094 if (l4proto == IPPROTO_TCP)
2095 opcode = TX_TCP_PKT;
2096 else if (l4proto == IPPROTO_UDP)
2097 opcode = TX_UDP_PKT;
036d61f0 2098 } else if (protocol == ETH_P_IPV6) {
af19b491
AKS
2099 l4proto = ipv6_hdr(skb)->nexthdr;
2100
2101 if (l4proto == IPPROTO_TCP)
2102 opcode = TX_TCPV6_PKT;
2103 else if (l4proto == IPPROTO_UDP)
2104 opcode = TX_UDPV6_PKT;
2105 }
2106 }
af19b491
AKS
2107 first_desc->tcp_hdr_offset += skb_transport_offset(skb);
2108 first_desc->ip_hdr_offset += skb_network_offset(skb);
2109 qlcnic_set_tx_flags_opcode(first_desc, flags, opcode);
2110
036d61f0 2111 return 0;
af19b491
AKS
2112}
2113
2114static int
2115qlcnic_map_tx_skb(struct pci_dev *pdev,
2116 struct sk_buff *skb, struct qlcnic_cmd_buffer *pbuf)
2117{
2118 struct qlcnic_skb_frag *nf;
2119 struct skb_frag_struct *frag;
2120 int i, nr_frags;
2121 dma_addr_t map;
2122
2123 nr_frags = skb_shinfo(skb)->nr_frags;
2124 nf = &pbuf->frag_array[0];
2125
2126 map = pci_map_single(pdev, skb->data,
2127 skb_headlen(skb), PCI_DMA_TODEVICE);
2128 if (pci_dma_mapping_error(pdev, map))
2129 goto out_err;
2130
2131 nf->dma = map;
2132 nf->length = skb_headlen(skb);
2133
2134 for (i = 0; i < nr_frags; i++) {
2135 frag = &skb_shinfo(skb)->frags[i];
2136 nf = &pbuf->frag_array[i+1];
2137
2138 map = pci_map_page(pdev, frag->page, frag->page_offset,
2139 frag->size, PCI_DMA_TODEVICE);
2140 if (pci_dma_mapping_error(pdev, map))
2141 goto unwind;
2142
2143 nf->dma = map;
2144 nf->length = frag->size;
2145 }
2146
2147 return 0;
2148
2149unwind:
2150 while (--i >= 0) {
2151 nf = &pbuf->frag_array[i+1];
2152 pci_unmap_page(pdev, nf->dma, nf->length, PCI_DMA_TODEVICE);
2153 }
2154
2155 nf = &pbuf->frag_array[0];
2156 pci_unmap_single(pdev, nf->dma, skb_headlen(skb), PCI_DMA_TODEVICE);
2157
2158out_err:
2159 return -ENOMEM;
2160}
2161
036d61f0
AC
2162static void
2163qlcnic_unmap_buffers(struct pci_dev *pdev, struct sk_buff *skb,
2164 struct qlcnic_cmd_buffer *pbuf)
8cf61f89 2165{
036d61f0
AC
2166 struct qlcnic_skb_frag *nf = &pbuf->frag_array[0];
2167 int nr_frags = skb_shinfo(skb)->nr_frags;
2168 int i;
8cf61f89 2169
036d61f0
AC
2170 for (i = 0; i < nr_frags; i++) {
2171 nf = &pbuf->frag_array[i+1];
2172 pci_unmap_page(pdev, nf->dma, nf->length, PCI_DMA_TODEVICE);
8cf61f89 2173 }
8cf61f89 2174
036d61f0
AC
2175 nf = &pbuf->frag_array[0];
2176 pci_unmap_single(pdev, nf->dma, skb_headlen(skb), PCI_DMA_TODEVICE);
5b446c6a 2177 pbuf->skb = NULL;
8cf61f89
AKS
2178}
2179
af19b491
AKS
2180static inline void
2181qlcnic_clear_cmddesc(u64 *desc)
2182{
2183 desc[0] = 0ULL;
2184 desc[2] = 0ULL;
8cf61f89 2185 desc[7] = 0ULL;
af19b491
AKS
2186}
2187
cdaff185 2188netdev_tx_t
af19b491
AKS
2189qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
2190{
2191 struct qlcnic_adapter *adapter = netdev_priv(netdev);
2192 struct qlcnic_host_tx_ring *tx_ring = adapter->tx_ring;
2193 struct qlcnic_cmd_buffer *pbuf;
2194 struct qlcnic_skb_frag *buffrag;
2195 struct cmd_desc_type0 *hwdesc, *first_desc;
2196 struct pci_dev *pdev;
dcb50aff 2197 struct ethhdr *phdr;
91a403ca 2198 int delta = 0;
af19b491
AKS
2199 int i, k;
2200
2201 u32 producer;
036d61f0 2202 int frag_count;
af19b491
AKS
2203 u32 num_txd = tx_ring->num_desc;
2204
780ab790
AKS
2205 if (!test_bit(__QLCNIC_DEV_UP, &adapter->state)) {
2206 netif_stop_queue(netdev);
2207 return NETDEV_TX_BUSY;
2208 }
2209
fe4d434d 2210 if (adapter->flags & QLCNIC_MACSPOOF) {
dcb50aff
RB
2211 phdr = (struct ethhdr *)skb->data;
2212 if (compare_ether_addr(phdr->h_source,
fe4d434d
SC
2213 adapter->mac_addr))
2214 goto drop_packet;
2215 }
2216
af19b491 2217 frag_count = skb_shinfo(skb)->nr_frags + 1;
91a403ca
AKS
2218 /* 14 frags supported for normal packet and
2219 * 32 frags supported for TSO packet
2220 */
2221 if (!skb_is_gso(skb) && frag_count > QLCNIC_MAX_FRAGS_PER_TX) {
2222
2223 for (i = 0; i < (frag_count - QLCNIC_MAX_FRAGS_PER_TX); i++)
2224 delta += skb_shinfo(skb)->frags[i].size;
2225
2226 if (!__pskb_pull_tail(skb, delta))
2227 goto drop_packet;
2228
2229 frag_count = 1 + skb_shinfo(skb)->nr_frags;
2230 }
af19b491 2231
ef71ff83 2232 if (unlikely(qlcnic_tx_avail(tx_ring) <= TX_STOP_THRESH)) {
af19b491 2233 netif_stop_queue(netdev);
ef71ff83
RB
2234 if (qlcnic_tx_avail(tx_ring) > TX_STOP_THRESH)
2235 netif_start_queue(netdev);
2236 else {
2237 adapter->stats.xmit_off++;
2238 return NETDEV_TX_BUSY;
2239 }
af19b491
AKS
2240 }
2241
2242 producer = tx_ring->producer;
2243 pbuf = &tx_ring->cmd_buf_arr[producer];
2244
2245 pdev = adapter->pdev;
2246
8cf61f89
AKS
2247 first_desc = hwdesc = &tx_ring->desc_head[producer];
2248 qlcnic_clear_cmddesc((u64 *)hwdesc);
2249
8ae6df97
AKS
2250 if (qlcnic_map_tx_skb(pdev, skb, pbuf)) {
2251 adapter->stats.tx_dma_map_error++;
af19b491 2252 goto drop_packet;
8ae6df97 2253 }
af19b491
AKS
2254
2255 pbuf->skb = skb;
2256 pbuf->frag_count = frag_count;
2257
af19b491
AKS
2258 qlcnic_set_tx_frags_len(first_desc, frag_count, skb->len);
2259 qlcnic_set_tx_port(first_desc, adapter->portnum);
2260
2261 for (i = 0; i < frag_count; i++) {
2262
2263 k = i % 4;
2264
2265 if ((k == 0) && (i > 0)) {
2266 /* move to next desc.*/
2267 producer = get_next_index(producer, num_txd);
2268 hwdesc = &tx_ring->desc_head[producer];
2269 qlcnic_clear_cmddesc((u64 *)hwdesc);
2270 tx_ring->cmd_buf_arr[producer].skb = NULL;
2271 }
2272
2273 buffrag = &pbuf->frag_array[i];
2274
2275 hwdesc->buffer_length[k] = cpu_to_le16(buffrag->length);
2276 switch (k) {
2277 case 0:
2278 hwdesc->addr_buffer1 = cpu_to_le64(buffrag->dma);
2279 break;
2280 case 1:
2281 hwdesc->addr_buffer2 = cpu_to_le64(buffrag->dma);
2282 break;
2283 case 2:
2284 hwdesc->addr_buffer3 = cpu_to_le64(buffrag->dma);
2285 break;
2286 case 3:
2287 hwdesc->addr_buffer4 = cpu_to_le64(buffrag->dma);
2288 break;
2289 }
2290 }
2291
2292 tx_ring->producer = get_next_index(producer, num_txd);
036d61f0 2293 smp_mb();
af19b491 2294
036d61f0
AC
2295 if (unlikely(qlcnic_tx_pkt(adapter, first_desc, skb)))
2296 goto unwind_buff;
af19b491 2297
e5dcf6dc 2298 if (adapter->mac_learn)
b5e5492c
AKS
2299 qlcnic_send_filter(adapter, tx_ring, first_desc, skb);
2300
af19b491
AKS
2301 adapter->stats.txbytes += skb->len;
2302 adapter->stats.xmitcalled++;
2303
f127f472
SC
2304 qlcnic_update_cmd_producer(adapter, tx_ring);
2305
af19b491
AKS
2306 return NETDEV_TX_OK;
2307
036d61f0
AC
2308unwind_buff:
2309 qlcnic_unmap_buffers(pdev, skb, pbuf);
af19b491
AKS
2310drop_packet:
2311 adapter->stats.txdropped++;
2312 dev_kfree_skb_any(skb);
2313 return NETDEV_TX_OK;
2314}
2315
2316static int qlcnic_check_temp(struct qlcnic_adapter *adapter)
2317{
2318 struct net_device *netdev = adapter->netdev;
2319 u32 temp, temp_state, temp_val;
2320 int rv = 0;
2321
2322 temp = QLCRD32(adapter, CRB_TEMP_STATE);
2323
2324 temp_state = qlcnic_get_temp_state(temp);
2325 temp_val = qlcnic_get_temp_val(temp);
2326
2327 if (temp_state == QLCNIC_TEMP_PANIC) {
2328 dev_err(&netdev->dev,
2329 "Device temperature %d degrees C exceeds"
2330 " maximum allowed. Hardware has been shut down.\n",
2331 temp_val);
2332 rv = 1;
2333 } else if (temp_state == QLCNIC_TEMP_WARN) {
2334 if (adapter->temp == QLCNIC_TEMP_NORMAL) {
2335 dev_err(&netdev->dev,
2336 "Device temperature %d degrees C "
2337 "exceeds operating range."
2338 " Immediate action needed.\n",
2339 temp_val);
2340 }
2341 } else {
2342 if (adapter->temp == QLCNIC_TEMP_WARN) {
2343 dev_info(&netdev->dev,
2344 "Device temperature is now %d degrees C"
2345 " in normal range.\n", temp_val);
2346 }
2347 }
2348 adapter->temp = temp_state;
2349 return rv;
2350}
2351
2352void qlcnic_advert_link_change(struct qlcnic_adapter *adapter, int linkup)
2353{
2354 struct net_device *netdev = adapter->netdev;
2355
b1fc6d3c 2356 if (adapter->ahw->linkup && !linkup) {
69324275 2357 netdev_info(netdev, "NIC Link is down\n");
b1fc6d3c 2358 adapter->ahw->linkup = 0;
af19b491
AKS
2359 if (netif_running(netdev)) {
2360 netif_carrier_off(netdev);
2361 netif_stop_queue(netdev);
2362 }
b1fc6d3c 2363 } else if (!adapter->ahw->linkup && linkup) {
69324275 2364 netdev_info(netdev, "NIC Link is up\n");
b1fc6d3c 2365 adapter->ahw->linkup = 1;
af19b491
AKS
2366 if (netif_running(netdev)) {
2367 netif_carrier_on(netdev);
2368 netif_wake_queue(netdev);
2369 }
2370 }
2371}
2372
2373static void qlcnic_tx_timeout(struct net_device *netdev)
2374{
2375 struct qlcnic_adapter *adapter = netdev_priv(netdev);
2376
2377 if (test_bit(__QLCNIC_RESETTING, &adapter->state))
2378 return;
2379
2380 dev_err(&netdev->dev, "transmit timeout, resetting.\n");
af19b491
AKS
2381
2382 if (++adapter->tx_timeo_cnt >= QLCNIC_MAX_TX_TIMEOUTS)
68bf1c68
AKS
2383 adapter->need_fw_reset = 1;
2384 else
2385 adapter->reset_context = 1;
af19b491
AKS
2386}
2387
2388static struct net_device_stats *qlcnic_get_stats(struct net_device *netdev)
2389{
2390 struct qlcnic_adapter *adapter = netdev_priv(netdev);
2391 struct net_device_stats *stats = &netdev->stats;
2392
af19b491
AKS
2393 stats->rx_packets = adapter->stats.rx_pkts + adapter->stats.lro_pkts;
2394 stats->tx_packets = adapter->stats.xmitfinished;
7e382594 2395 stats->rx_bytes = adapter->stats.rxbytes + adapter->stats.lrobytes;
af19b491
AKS
2396 stats->tx_bytes = adapter->stats.txbytes;
2397 stats->rx_dropped = adapter->stats.rxdropped;
2398 stats->tx_dropped = adapter->stats.txdropped;
2399
2400 return stats;
2401}
2402
7eb9855d 2403static irqreturn_t qlcnic_clear_legacy_intr(struct qlcnic_adapter *adapter)
af19b491 2404{
af19b491
AKS
2405 u32 status;
2406
2407 status = readl(adapter->isr_int_vec);
2408
2409 if (!(status & adapter->int_vec_bit))
2410 return IRQ_NONE;
2411
2412 /* check interrupt state machine, to be sure */
2413 status = readl(adapter->crb_int_state_reg);
2414 if (!ISR_LEGACY_INT_TRIGGERED(status))
2415 return IRQ_NONE;
2416
2417 writel(0xffffffff, adapter->tgt_status_reg);
2418 /* read twice to ensure write is flushed */
2419 readl(adapter->isr_int_vec);
2420 readl(adapter->isr_int_vec);
2421
7eb9855d
AKS
2422 return IRQ_HANDLED;
2423}
2424
2425static irqreturn_t qlcnic_tmp_intr(int irq, void *data)
2426{
2427 struct qlcnic_host_sds_ring *sds_ring = data;
2428 struct qlcnic_adapter *adapter = sds_ring->adapter;
2429
2430 if (adapter->flags & QLCNIC_MSIX_ENABLED)
2431 goto done;
2432 else if (adapter->flags & QLCNIC_MSI_ENABLED) {
2433 writel(0xffffffff, adapter->tgt_status_reg);
2434 goto done;
2435 }
2436
2437 if (qlcnic_clear_legacy_intr(adapter) == IRQ_NONE)
2438 return IRQ_NONE;
2439
2440done:
2441 adapter->diag_cnt++;
2442 qlcnic_enable_int(sds_ring);
2443 return IRQ_HANDLED;
2444}
2445
2446static irqreturn_t qlcnic_intr(int irq, void *data)
2447{
2448 struct qlcnic_host_sds_ring *sds_ring = data;
2449 struct qlcnic_adapter *adapter = sds_ring->adapter;
2450
2451 if (qlcnic_clear_legacy_intr(adapter) == IRQ_NONE)
2452 return IRQ_NONE;
2453
af19b491
AKS
2454 napi_schedule(&sds_ring->napi);
2455
2456 return IRQ_HANDLED;
2457}
2458
2459static irqreturn_t qlcnic_msi_intr(int irq, void *data)
2460{
2461 struct qlcnic_host_sds_ring *sds_ring = data;
2462 struct qlcnic_adapter *adapter = sds_ring->adapter;
2463
2464 /* clear interrupt */
2465 writel(0xffffffff, adapter->tgt_status_reg);
2466
2467 napi_schedule(&sds_ring->napi);
2468 return IRQ_HANDLED;
2469}
2470
2471static irqreturn_t qlcnic_msix_intr(int irq, void *data)
2472{
2473 struct qlcnic_host_sds_ring *sds_ring = data;
2474
2475 napi_schedule(&sds_ring->napi);
2476 return IRQ_HANDLED;
2477}
2478
2479static int qlcnic_process_cmd_ring(struct qlcnic_adapter *adapter)
2480{
2481 u32 sw_consumer, hw_consumer;
2482 int count = 0, i;
2483 struct qlcnic_cmd_buffer *buffer;
2484 struct pci_dev *pdev = adapter->pdev;
2485 struct net_device *netdev = adapter->netdev;
2486 struct qlcnic_skb_frag *frag;
2487 int done;
2488 struct qlcnic_host_tx_ring *tx_ring = adapter->tx_ring;
2489
2490 if (!spin_trylock(&adapter->tx_clean_lock))
2491 return 1;
2492
2493 sw_consumer = tx_ring->sw_consumer;
2494 hw_consumer = le32_to_cpu(*(tx_ring->hw_consumer));
2495
2496 while (sw_consumer != hw_consumer) {
2497 buffer = &tx_ring->cmd_buf_arr[sw_consumer];
2498 if (buffer->skb) {
2499 frag = &buffer->frag_array[0];
2500 pci_unmap_single(pdev, frag->dma, frag->length,
2501 PCI_DMA_TODEVICE);
2502 frag->dma = 0ULL;
2503 for (i = 1; i < buffer->frag_count; i++) {
2504 frag++;
2505 pci_unmap_page(pdev, frag->dma, frag->length,
2506 PCI_DMA_TODEVICE);
2507 frag->dma = 0ULL;
2508 }
2509
2510 adapter->stats.xmitfinished++;
2511 dev_kfree_skb_any(buffer->skb);
2512 buffer->skb = NULL;
2513 }
2514
2515 sw_consumer = get_next_index(sw_consumer, tx_ring->num_desc);
2516 if (++count >= MAX_STATUS_HANDLE)
2517 break;
2518 }
2519
2520 if (count && netif_running(netdev)) {
2521 tx_ring->sw_consumer = sw_consumer;
2522
2523 smp_mb();
2524
2525 if (netif_queue_stopped(netdev) && netif_carrier_ok(netdev)) {
af19b491
AKS
2526 if (qlcnic_tx_avail(tx_ring) > TX_STOP_THRESH) {
2527 netif_wake_queue(netdev);
8bfe8b91 2528 adapter->stats.xmit_on++;
af19b491 2529 }
af19b491 2530 }
ef71ff83 2531 adapter->tx_timeo_cnt = 0;
af19b491
AKS
2532 }
2533 /*
2534 * If everything is freed up to consumer then check if the ring is full
2535 * If the ring is full then check if more needs to be freed and
2536 * schedule the call back again.
2537 *
2538 * This happens when there are 2 CPUs. One could be freeing and the
2539 * other filling it. If the ring is full when we get out of here and
2540 * the card has already interrupted the host then the host can miss the
2541 * interrupt.
2542 *
2543 * There is still a possible race condition and the host could miss an
2544 * interrupt. The card has to take care of this.
2545 */
2546 hw_consumer = le32_to_cpu(*(tx_ring->hw_consumer));
2547 done = (sw_consumer == hw_consumer);
2548 spin_unlock(&adapter->tx_clean_lock);
2549
2550 return done;
2551}
2552
2553static int qlcnic_poll(struct napi_struct *napi, int budget)
2554{
2555 struct qlcnic_host_sds_ring *sds_ring =
2556 container_of(napi, struct qlcnic_host_sds_ring, napi);
2557
2558 struct qlcnic_adapter *adapter = sds_ring->adapter;
2559
2560 int tx_complete;
2561 int work_done;
2562
2563 tx_complete = qlcnic_process_cmd_ring(adapter);
2564
2565 work_done = qlcnic_process_rcv_ring(sds_ring, budget);
2566
2567 if ((work_done < budget) && tx_complete) {
2568 napi_complete(&sds_ring->napi);
2569 if (test_bit(__QLCNIC_DEV_UP, &adapter->state))
2570 qlcnic_enable_int(sds_ring);
2571 }
2572
2573 return work_done;
2574}
2575
8f891387 2576static int qlcnic_rx_poll(struct napi_struct *napi, int budget)
2577{
2578 struct qlcnic_host_sds_ring *sds_ring =
2579 container_of(napi, struct qlcnic_host_sds_ring, napi);
2580
2581 struct qlcnic_adapter *adapter = sds_ring->adapter;
2582 int work_done;
2583
2584 work_done = qlcnic_process_rcv_ring(sds_ring, budget);
2585
2586 if (work_done < budget) {
2587 napi_complete(&sds_ring->napi);
2588 if (test_bit(__QLCNIC_DEV_UP, &adapter->state))
2589 qlcnic_enable_int(sds_ring);
2590 }
2591
2592 return work_done;
2593}
2594
af19b491
AKS
2595#ifdef CONFIG_NET_POLL_CONTROLLER
2596static void qlcnic_poll_controller(struct net_device *netdev)
2597{
bf82791e
YL
2598 int ring;
2599 struct qlcnic_host_sds_ring *sds_ring;
af19b491 2600 struct qlcnic_adapter *adapter = netdev_priv(netdev);
b1fc6d3c 2601 struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
bf82791e 2602
af19b491 2603 disable_irq(adapter->irq);
bf82791e
YL
2604 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
2605 sds_ring = &recv_ctx->sds_rings[ring];
2606 qlcnic_intr(adapter->irq, sds_ring);
2607 }
af19b491
AKS
2608 enable_irq(adapter->irq);
2609}
2610#endif
2611
6df900e9
SC
2612static void
2613qlcnic_idc_debug_info(struct qlcnic_adapter *adapter, u8 encoding)
2614{
2615 u32 val;
2616
2617 val = adapter->portnum & 0xf;
2618 val |= encoding << 7;
2619 val |= (jiffies - adapter->dev_rst_time) << 8;
2620
2621 QLCWR32(adapter, QLCNIC_CRB_DRV_SCRATCH, val);
2622 adapter->dev_rst_time = jiffies;
2623}
2624
ade91f8e
AKS
2625static int
2626qlcnic_set_drv_state(struct qlcnic_adapter *adapter, u8 state)
af19b491
AKS
2627{
2628 u32 val;
2629
2630 WARN_ON(state != QLCNIC_DEV_NEED_RESET &&
2631 state != QLCNIC_DEV_NEED_QUISCENT);
2632
2633 if (qlcnic_api_lock(adapter))
ade91f8e 2634 return -EIO;
af19b491
AKS
2635
2636 val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
2637
2638 if (state == QLCNIC_DEV_NEED_RESET)
6d2a4724 2639 QLC_DEV_SET_RST_RDY(val, adapter->portnum);
af19b491 2640 else if (state == QLCNIC_DEV_NEED_QUISCENT)
6d2a4724 2641 QLC_DEV_SET_QSCNT_RDY(val, adapter->portnum);
af19b491
AKS
2642
2643 QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
2644
2645 qlcnic_api_unlock(adapter);
ade91f8e
AKS
2646
2647 return 0;
af19b491
AKS
2648}
2649
1b95a839
AKS
2650static int
2651qlcnic_clr_drv_state(struct qlcnic_adapter *adapter)
2652{
2653 u32 val;
2654
2655 if (qlcnic_api_lock(adapter))
2656 return -EBUSY;
2657
2658 val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
6d2a4724 2659 QLC_DEV_CLR_RST_QSCNT(val, adapter->portnum);
1b95a839
AKS
2660 QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
2661
2662 qlcnic_api_unlock(adapter);
2663
2664 return 0;
2665}
2666
af19b491 2667static void
21854f02 2668qlcnic_clr_all_drv_state(struct qlcnic_adapter *adapter, u8 failed)
af19b491
AKS
2669{
2670 u32 val;
2671
2672 if (qlcnic_api_lock(adapter))
2673 goto err;
2674
31018e06 2675 val = QLCRD32(adapter, QLCNIC_CRB_DRV_ACTIVE);
6d2a4724 2676 QLC_DEV_CLR_REF_CNT(val, adapter->portnum);
31018e06 2677 QLCWR32(adapter, QLCNIC_CRB_DRV_ACTIVE, val);
af19b491 2678
21854f02
AKS
2679 if (failed) {
2680 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_FAILED);
2681 dev_info(&adapter->pdev->dev,
2682 "Device state set to Failed. Please Reboot\n");
2683 } else if (!(val & 0x11111111))
af19b491
AKS
2684 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_COLD);
2685
2686 val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
6d2a4724 2687 QLC_DEV_CLR_RST_QSCNT(val, adapter->portnum);
af19b491
AKS
2688 QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
2689
2690 qlcnic_api_unlock(adapter);
2691err:
2692 adapter->fw_fail_cnt = 0;
032a13c7 2693 adapter->flags &= ~QLCNIC_FW_HANG;
af19b491
AKS
2694 clear_bit(__QLCNIC_START_FW, &adapter->state);
2695 clear_bit(__QLCNIC_RESETTING, &adapter->state);
2696}
2697
f73dfc50 2698/* Grab api lock, before checking state */
af19b491
AKS
2699static int
2700qlcnic_check_drv_state(struct qlcnic_adapter *adapter)
2701{
602ca6f0 2702 int act, state, active_mask;
af19b491
AKS
2703
2704 state = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
31018e06 2705 act = QLCRD32(adapter, QLCNIC_CRB_DRV_ACTIVE);
af19b491 2706
602ca6f0
SV
2707 if (adapter->flags & QLCNIC_FW_RESET_OWNER) {
2708 active_mask = (~(1 << (adapter->ahw->pci_func * 4)));
2709 act = act & active_mask;
2710 }
2711
af19b491
AKS
2712 if (((state & 0x11111111) == (act & 0x11111111)) ||
2713 ((act & 0x11111111) == ((state >> 1) & 0x11111111)))
2714 return 0;
2715 else
2716 return 1;
2717}
2718
96f8118c
SC
2719static int qlcnic_check_idc_ver(struct qlcnic_adapter *adapter)
2720{
2721 u32 val = QLCRD32(adapter, QLCNIC_CRB_DRV_IDC_VER);
2722
2723 if (val != QLCNIC_DRV_IDC_VER) {
2724 dev_warn(&adapter->pdev->dev, "IDC Version mismatch, driver's"
2725 " idc ver = %x; reqd = %x\n", QLCNIC_DRV_IDC_VER, val);
2726 }
2727
2728 return 0;
2729}
2730
af19b491
AKS
2731static int
2732qlcnic_can_start_firmware(struct qlcnic_adapter *adapter)
2733{
2734 u32 val, prev_state;
aa5e18c0 2735 u8 dev_init_timeo = adapter->dev_init_timeo;
6d2a4724 2736 u8 portnum = adapter->portnum;
96f8118c 2737 u8 ret;
af19b491 2738
f73dfc50
AKS
2739 if (test_and_clear_bit(__QLCNIC_START_FW, &adapter->state))
2740 return 1;
2741
af19b491
AKS
2742 if (qlcnic_api_lock(adapter))
2743 return -1;
2744
31018e06 2745 val = QLCRD32(adapter, QLCNIC_CRB_DRV_ACTIVE);
6d2a4724
AKS
2746 if (!(val & (1 << (portnum * 4)))) {
2747 QLC_DEV_SET_REF_CNT(val, portnum);
31018e06 2748 QLCWR32(adapter, QLCNIC_CRB_DRV_ACTIVE, val);
af19b491
AKS
2749 }
2750
2751 prev_state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
65b5b420 2752 QLCDB(adapter, HW, "Device state = %u\n", prev_state);
af19b491
AKS
2753
2754 switch (prev_state) {
2755 case QLCNIC_DEV_COLD:
bbd8c6a4 2756 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_INITIALIZING);
96f8118c 2757 QLCWR32(adapter, QLCNIC_CRB_DRV_IDC_VER, QLCNIC_DRV_IDC_VER);
6df900e9 2758 qlcnic_idc_debug_info(adapter, 0);
af19b491
AKS
2759 qlcnic_api_unlock(adapter);
2760 return 1;
2761
2762 case QLCNIC_DEV_READY:
96f8118c 2763 ret = qlcnic_check_idc_ver(adapter);
af19b491 2764 qlcnic_api_unlock(adapter);
96f8118c 2765 return ret;
af19b491
AKS
2766
2767 case QLCNIC_DEV_NEED_RESET:
2768 val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
6d2a4724 2769 QLC_DEV_SET_RST_RDY(val, portnum);
af19b491
AKS
2770 QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
2771 break;
2772
2773 case QLCNIC_DEV_NEED_QUISCENT:
2774 val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
6d2a4724 2775 QLC_DEV_SET_QSCNT_RDY(val, portnum);
af19b491
AKS
2776 QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
2777 break;
2778
2779 case QLCNIC_DEV_FAILED:
a7fc948f 2780 dev_err(&adapter->pdev->dev, "Device in failed state.\n");
af19b491
AKS
2781 qlcnic_api_unlock(adapter);
2782 return -1;
bbd8c6a4
AKS
2783
2784 case QLCNIC_DEV_INITIALIZING:
2785 case QLCNIC_DEV_QUISCENT:
2786 break;
af19b491
AKS
2787 }
2788
2789 qlcnic_api_unlock(adapter);
aa5e18c0
SC
2790
2791 do {
af19b491 2792 msleep(1000);
a5e463d0
SC
2793 prev_state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
2794
2795 if (prev_state == QLCNIC_DEV_QUISCENT)
2796 continue;
2797 } while ((prev_state != QLCNIC_DEV_READY) && --dev_init_timeo);
af19b491 2798
65b5b420
AKS
2799 if (!dev_init_timeo) {
2800 dev_err(&adapter->pdev->dev,
2801 "Waiting for device to initialize timeout\n");
af19b491 2802 return -1;
65b5b420 2803 }
af19b491
AKS
2804
2805 if (qlcnic_api_lock(adapter))
2806 return -1;
2807
2808 val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
6d2a4724 2809 QLC_DEV_CLR_RST_QSCNT(val, portnum);
af19b491
AKS
2810 QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
2811
96f8118c 2812 ret = qlcnic_check_idc_ver(adapter);
af19b491
AKS
2813 qlcnic_api_unlock(adapter);
2814
96f8118c 2815 return ret;
af19b491
AKS
2816}
2817
2818static void
2819qlcnic_fwinit_work(struct work_struct *work)
2820{
2821 struct qlcnic_adapter *adapter = container_of(work,
2822 struct qlcnic_adapter, fw_work.work);
3c4b23b1 2823 u32 dev_state = 0xf;
7b749ff4 2824 u32 val;
af19b491 2825
f73dfc50
AKS
2826 if (qlcnic_api_lock(adapter))
2827 goto err_ret;
af19b491 2828
a5e463d0 2829 dev_state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
b8c17620
AKS
2830 if (dev_state == QLCNIC_DEV_QUISCENT ||
2831 dev_state == QLCNIC_DEV_NEED_QUISCENT) {
a5e463d0
SC
2832 qlcnic_api_unlock(adapter);
2833 qlcnic_schedule_work(adapter, qlcnic_fwinit_work,
2834 FW_POLL_DELAY * 2);
2835 return;
2836 }
2837
9f26f547 2838 if (adapter->op_mode == QLCNIC_NON_PRIV_FUNC) {
3c4b23b1
AKS
2839 qlcnic_api_unlock(adapter);
2840 goto wait_npar;
9f26f547
AC
2841 }
2842
f73dfc50
AKS
2843 if (adapter->fw_wait_cnt++ > adapter->reset_ack_timeo) {
2844 dev_err(&adapter->pdev->dev, "Reset:Failed to get ack %d sec\n",
2845 adapter->reset_ack_timeo);
2846 goto skip_ack_check;
2847 }
2848
2849 if (!qlcnic_check_drv_state(adapter)) {
2850skip_ack_check:
2851 dev_state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
a5e463d0 2852
f73dfc50
AKS
2853 if (dev_state == QLCNIC_DEV_NEED_RESET) {
2854 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE,
2855 QLCNIC_DEV_INITIALIZING);
2856 set_bit(__QLCNIC_START_FW, &adapter->state);
2857 QLCDB(adapter, DRV, "Restarting fw\n");
6df900e9 2858 qlcnic_idc_debug_info(adapter, 0);
7b749ff4
SV
2859 val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
2860 QLC_DEV_SET_RST_RDY(val, adapter->portnum);
2861 QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
af19b491
AKS
2862 }
2863
f73dfc50
AKS
2864 qlcnic_api_unlock(adapter);
2865
287e38aa 2866 rtnl_lock();
7b749ff4
SV
2867 if (adapter->ahw->fw_dump.enable &&
2868 (adapter->flags & QLCNIC_FW_RESET_OWNER)) {
9d6a6440
AC
2869 QLCDB(adapter, DRV, "Take FW dump\n");
2870 qlcnic_dump_fw(adapter);
032a13c7 2871 adapter->flags |= QLCNIC_FW_HANG;
9d6a6440 2872 }
287e38aa 2873 rtnl_unlock();
7b749ff4
SV
2874
2875 adapter->flags &= ~QLCNIC_FW_RESET_OWNER;
9f26f547 2876 if (!adapter->nic_ops->start_firmware(adapter)) {
af19b491 2877 qlcnic_schedule_work(adapter, qlcnic_attach_work, 0);
b18971d1 2878 adapter->fw_wait_cnt = 0;
af19b491
AKS
2879 return;
2880 }
af19b491
AKS
2881 goto err_ret;
2882 }
2883
f73dfc50 2884 qlcnic_api_unlock(adapter);
aa5e18c0 2885
9f26f547 2886wait_npar:
af19b491 2887 dev_state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
f73dfc50 2888 QLCDB(adapter, HW, "Func waiting: Device state=%u\n", dev_state);
65b5b420 2889
af19b491 2890 switch (dev_state) {
3c4b23b1 2891 case QLCNIC_DEV_READY:
9f26f547 2892 if (!adapter->nic_ops->start_firmware(adapter)) {
f73dfc50 2893 qlcnic_schedule_work(adapter, qlcnic_attach_work, 0);
b18971d1 2894 adapter->fw_wait_cnt = 0;
f73dfc50
AKS
2895 return;
2896 }
3c4b23b1
AKS
2897 case QLCNIC_DEV_FAILED:
2898 break;
2899 default:
2900 qlcnic_schedule_work(adapter,
2901 qlcnic_fwinit_work, FW_POLL_DELAY);
2902 return;
af19b491
AKS
2903 }
2904
2905err_ret:
f73dfc50
AKS
2906 dev_err(&adapter->pdev->dev, "Fwinit work failed state=%u "
2907 "fw_wait_cnt=%u\n", dev_state, adapter->fw_wait_cnt);
34ce3626 2908 netif_device_attach(adapter->netdev);
21854f02 2909 qlcnic_clr_all_drv_state(adapter, 0);
af19b491
AKS
2910}
2911
2912static void
2913qlcnic_detach_work(struct work_struct *work)
2914{
2915 struct qlcnic_adapter *adapter = container_of(work,
2916 struct qlcnic_adapter, fw_work.work);
2917 struct net_device *netdev = adapter->netdev;
2918 u32 status;
2919
2920 netif_device_detach(netdev);
2921
b8c17620
AKS
2922 /* Dont grab rtnl lock during Quiscent mode */
2923 if (adapter->dev_state == QLCNIC_DEV_NEED_QUISCENT) {
2924 if (netif_running(netdev))
2925 __qlcnic_down(adapter, netdev);
2926 } else
2927 qlcnic_down(adapter, netdev);
af19b491 2928
af19b491
AKS
2929 status = QLCRD32(adapter, QLCNIC_PEG_HALT_STATUS1);
2930
44f65b29
SC
2931 if (status & QLCNIC_RCODE_FATAL_ERROR) {
2932 dev_err(&adapter->pdev->dev,
2933 "Detaching the device: peg halt status1=0x%x\n",
2934 status);
2935
2936 if (QLCNIC_FWERROR_CODE(status) == QLCNIC_FWERROR_FAN_FAILURE) {
2937 dev_err(&adapter->pdev->dev,
2938 "On board active cooling fan failed. "
2939 "Device has been halted.\n");
2940 dev_err(&adapter->pdev->dev,
2941 "Replace the adapter.\n");
2942 }
2943
af19b491 2944 goto err_ret;
44f65b29 2945 }
af19b491 2946
44f65b29
SC
2947 if (adapter->temp == QLCNIC_TEMP_PANIC) {
2948 dev_err(&adapter->pdev->dev, "Detaching the device: temp=%d\n",
2949 adapter->temp);
af19b491 2950 goto err_ret;
44f65b29
SC
2951 }
2952
602ca6f0
SV
2953 /* Dont ack if this instance is the reset owner */
2954 if (!(adapter->flags & QLCNIC_FW_RESET_OWNER)) {
44f65b29
SC
2955 if (qlcnic_set_drv_state(adapter, adapter->dev_state)) {
2956 dev_err(&adapter->pdev->dev,
2957 "Failed to set driver state,"
2958 "detaching the device.\n");
602ca6f0 2959 goto err_ret;
44f65b29 2960 }
602ca6f0 2961 }
af19b491
AKS
2962
2963 adapter->fw_wait_cnt = 0;
2964
2965 qlcnic_schedule_work(adapter, qlcnic_fwinit_work, FW_POLL_DELAY);
2966
2967 return;
2968
2969err_ret:
34ce3626 2970 netif_device_attach(netdev);
21854f02 2971 qlcnic_clr_all_drv_state(adapter, 1);
af19b491
AKS
2972}
2973
3c4b23b1
AKS
2974/*Transit NPAR state to NON Operational */
2975static void
2976qlcnic_set_npar_non_operational(struct qlcnic_adapter *adapter)
2977{
2978 u32 state;
2979
2980 state = QLCRD32(adapter, QLCNIC_CRB_DEV_NPAR_STATE);
2981 if (state == QLCNIC_DEV_NPAR_NON_OPER)
2982 return;
2983
2984 if (qlcnic_api_lock(adapter))
2985 return;
2986 QLCWR32(adapter, QLCNIC_CRB_DEV_NPAR_STATE, QLCNIC_DEV_NPAR_NON_OPER);
2987 qlcnic_api_unlock(adapter);
2988}
2989
f73dfc50 2990/*Transit to RESET state from READY state only */
18f2f616 2991void
af19b491
AKS
2992qlcnic_dev_request_reset(struct qlcnic_adapter *adapter)
2993{
2994 u32 state;
2995
cea8975e 2996 adapter->need_fw_reset = 1;
af19b491
AKS
2997 if (qlcnic_api_lock(adapter))
2998 return;
2999
3000 state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
3001
f73dfc50 3002 if (state == QLCNIC_DEV_READY) {
af19b491 3003 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_NEED_RESET);
602ca6f0 3004 adapter->flags |= QLCNIC_FW_RESET_OWNER;
65b5b420 3005 QLCDB(adapter, DRV, "NEED_RESET state set\n");
6df900e9 3006 qlcnic_idc_debug_info(adapter, 0);
af19b491
AKS
3007 }
3008
3c4b23b1 3009 QLCWR32(adapter, QLCNIC_CRB_DEV_NPAR_STATE, QLCNIC_DEV_NPAR_NON_OPER);
af19b491
AKS
3010 qlcnic_api_unlock(adapter);
3011}
3012
9f26f547
AC
3013/* Transit to NPAR READY state from NPAR NOT READY state */
3014static void
3015qlcnic_dev_set_npar_ready(struct qlcnic_adapter *adapter)
3016{
9f26f547
AC
3017 if (qlcnic_api_lock(adapter))
3018 return;
3019
3c4b23b1
AKS
3020 QLCWR32(adapter, QLCNIC_CRB_DEV_NPAR_STATE, QLCNIC_DEV_NPAR_OPER);
3021 QLCDB(adapter, DRV, "NPAR operational state set\n");
9f26f547
AC
3022
3023 qlcnic_api_unlock(adapter);
3024}
3025
af19b491
AKS
3026static void
3027qlcnic_schedule_work(struct qlcnic_adapter *adapter,
3028 work_func_t func, int delay)
3029{
451724c8
SC
3030 if (test_bit(__QLCNIC_AER, &adapter->state))
3031 return;
3032
af19b491 3033 INIT_DELAYED_WORK(&adapter->fw_work, func);
f7ec804a
AKS
3034 queue_delayed_work(qlcnic_wq, &adapter->fw_work,
3035 round_jiffies_relative(delay));
af19b491
AKS
3036}
3037
3038static void
3039qlcnic_cancel_fw_work(struct qlcnic_adapter *adapter)
3040{
3041 while (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
3042 msleep(10);
3043
3044 cancel_delayed_work_sync(&adapter->fw_work);
3045}
3046
3047static void
3048qlcnic_attach_work(struct work_struct *work)
3049{
3050 struct qlcnic_adapter *adapter = container_of(work,
3051 struct qlcnic_adapter, fw_work.work);
3052 struct net_device *netdev = adapter->netdev;
b18971d1 3053 u32 npar_state;
af19b491 3054
b18971d1
AKS
3055 if (adapter->op_mode != QLCNIC_MGMT_FUNC) {
3056 npar_state = QLCRD32(adapter, QLCNIC_CRB_DEV_NPAR_STATE);
3057 if (adapter->fw_wait_cnt++ > QLCNIC_DEV_NPAR_OPER_TIMEO)
3058 qlcnic_clr_all_drv_state(adapter, 0);
3059 else if (npar_state != QLCNIC_DEV_NPAR_OPER)
3060 qlcnic_schedule_work(adapter, qlcnic_attach_work,
3061 FW_POLL_DELAY);
3062 else
3063 goto attach;
3064 QLCDB(adapter, DRV, "Waiting for NPAR state to operational\n");
3065 return;
3066 }
3067attach:
af19b491 3068 if (netif_running(netdev)) {
52486a3a 3069 if (qlcnic_up(adapter, netdev))
af19b491 3070 goto done;
af19b491 3071
aec1e845 3072 qlcnic_restore_indev_addr(netdev, NETDEV_UP);
af19b491
AKS
3073 }
3074
af19b491 3075done:
34ce3626 3076 netif_device_attach(netdev);
af19b491 3077 adapter->fw_fail_cnt = 0;
032a13c7 3078 adapter->flags &= ~QLCNIC_FW_HANG;
af19b491 3079 clear_bit(__QLCNIC_RESETTING, &adapter->state);
1b95a839
AKS
3080
3081 if (!qlcnic_clr_drv_state(adapter))
3082 qlcnic_schedule_work(adapter, qlcnic_fw_poll_work,
3083 FW_POLL_DELAY);
af19b491
AKS
3084}
3085
3086static int
3087qlcnic_check_health(struct qlcnic_adapter *adapter)
3088{
4e70812b 3089 u32 state = 0, heartbeat;
af19b491
AKS
3090 struct net_device *netdev = adapter->netdev;
3091
3092 if (qlcnic_check_temp(adapter))
3093 goto detach;
3094
2372a5f1 3095 if (adapter->need_fw_reset)
af19b491 3096 qlcnic_dev_request_reset(adapter);
af19b491
AKS
3097
3098 state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
b8c17620 3099 if (state == QLCNIC_DEV_NEED_RESET) {
3c4b23b1 3100 qlcnic_set_npar_non_operational(adapter);
af19b491 3101 adapter->need_fw_reset = 1;
b8c17620
AKS
3102 } else if (state == QLCNIC_DEV_NEED_QUISCENT)
3103 goto detach;
af19b491 3104
4e70812b
SC
3105 heartbeat = QLCRD32(adapter, QLCNIC_PEG_ALIVE_COUNTER);
3106 if (heartbeat != adapter->heartbeat) {
3107 adapter->heartbeat = heartbeat;
af19b491
AKS
3108 adapter->fw_fail_cnt = 0;
3109 if (adapter->need_fw_reset)
3110 goto detach;
68bf1c68 3111
9ce13ca8 3112 if (adapter->reset_context && auto_fw_reset) {
68bf1c68
AKS
3113 qlcnic_reset_hw_context(adapter);
3114 adapter->netdev->trans_start = jiffies;
3115 }
3116
af19b491
AKS
3117 return 0;
3118 }
3119
3120 if (++adapter->fw_fail_cnt < FW_FAIL_THRESH)
3121 return 0;
3122
032a13c7
SV
3123 adapter->flags |= QLCNIC_FW_HANG;
3124
af19b491
AKS
3125 qlcnic_dev_request_reset(adapter);
3126
9ce13ca8 3127 if (auto_fw_reset)
0df170b6 3128 clear_bit(__QLCNIC_FW_ATTACHED, &adapter->state);
af19b491
AKS
3129
3130 dev_info(&netdev->dev, "firmware hang detected\n");
c76ecb7a
SV
3131 dev_info(&adapter->pdev->dev, "Dumping hw/fw registers\n"
3132 "PEG_HALT_STATUS1: 0x%x, PEG_HALT_STATUS2: 0x%x,\n"
3133 "PEG_NET_0_PC: 0x%x, PEG_NET_1_PC: 0x%x,\n"
3134 "PEG_NET_2_PC: 0x%x, PEG_NET_3_PC: 0x%x,\n"
3135 "PEG_NET_4_PC: 0x%x\n",
3136 QLCRD32(adapter, QLCNIC_PEG_HALT_STATUS1),
3137 QLCRD32(adapter, QLCNIC_PEG_HALT_STATUS2),
3138 QLCRD32(adapter, QLCNIC_CRB_PEG_NET_0 + 0x3c),
3139 QLCRD32(adapter, QLCNIC_CRB_PEG_NET_1 + 0x3c),
3140 QLCRD32(adapter, QLCNIC_CRB_PEG_NET_2 + 0x3c),
3141 QLCRD32(adapter, QLCNIC_CRB_PEG_NET_3 + 0x3c),
3142 QLCRD32(adapter, QLCNIC_CRB_PEG_NET_4 + 0x3c));
af19b491
AKS
3143detach:
3144 adapter->dev_state = (state == QLCNIC_DEV_NEED_QUISCENT) ? state :
3145 QLCNIC_DEV_NEED_RESET;
3146
9ce13ca8 3147 if (auto_fw_reset &&
65b5b420
AKS
3148 !test_and_set_bit(__QLCNIC_RESETTING, &adapter->state)) {
3149
af19b491 3150 qlcnic_schedule_work(adapter, qlcnic_detach_work, 0);
65b5b420
AKS
3151 QLCDB(adapter, DRV, "fw recovery scheduled.\n");
3152 }
af19b491
AKS
3153
3154 return 1;
3155}
3156
3157static void
3158qlcnic_fw_poll_work(struct work_struct *work)
3159{
3160 struct qlcnic_adapter *adapter = container_of(work,
3161 struct qlcnic_adapter, fw_work.work);
3162
3163 if (test_bit(__QLCNIC_RESETTING, &adapter->state))
3164 goto reschedule;
3165
3166
3167 if (qlcnic_check_health(adapter))
3168 return;
3169
b5e5492c
AKS
3170 if (adapter->fhash.fnum)
3171 qlcnic_prune_lb_filters(adapter);
3172
af19b491
AKS
3173reschedule:
3174 qlcnic_schedule_work(adapter, qlcnic_fw_poll_work, FW_POLL_DELAY);
3175}
3176
451724c8
SC
3177static int qlcnic_is_first_func(struct pci_dev *pdev)
3178{
3179 struct pci_dev *oth_pdev;
3180 int val = pdev->devfn;
3181
3182 while (val-- > 0) {
3183 oth_pdev = pci_get_domain_bus_and_slot(pci_domain_nr
3184 (pdev->bus), pdev->bus->number,
3185 PCI_DEVFN(PCI_SLOT(pdev->devfn), val));
bfc978fa
AKS
3186 if (!oth_pdev)
3187 continue;
451724c8 3188
bfc978fa
AKS
3189 if (oth_pdev->current_state != PCI_D3cold) {
3190 pci_dev_put(oth_pdev);
451724c8 3191 return 0;
bfc978fa
AKS
3192 }
3193 pci_dev_put(oth_pdev);
451724c8
SC
3194 }
3195 return 1;
3196}
3197
3198static int qlcnic_attach_func(struct pci_dev *pdev)
3199{
3200 int err, first_func;
3201 struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
3202 struct net_device *netdev = adapter->netdev;
3203
3204 pdev->error_state = pci_channel_io_normal;
3205
3206 err = pci_enable_device(pdev);
3207 if (err)
3208 return err;
3209
3210 pci_set_power_state(pdev, PCI_D0);
3211 pci_set_master(pdev);
3212 pci_restore_state(pdev);
3213
3214 first_func = qlcnic_is_first_func(pdev);
3215
3216 if (qlcnic_api_lock(adapter))
3217 return -EINVAL;
3218
933fce12 3219 if (adapter->op_mode != QLCNIC_NON_PRIV_FUNC && first_func) {
451724c8
SC
3220 adapter->need_fw_reset = 1;
3221 set_bit(__QLCNIC_START_FW, &adapter->state);
3222 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_INITIALIZING);
3223 QLCDB(adapter, DRV, "Restarting fw\n");
3224 }
3225 qlcnic_api_unlock(adapter);
3226
3227 err = adapter->nic_ops->start_firmware(adapter);
3228 if (err)
3229 return err;
3230
3231 qlcnic_clr_drv_state(adapter);
3232 qlcnic_setup_intr(adapter);
3233
3234 if (netif_running(netdev)) {
3235 err = qlcnic_attach(adapter);
3236 if (err) {
21854f02 3237 qlcnic_clr_all_drv_state(adapter, 1);
451724c8
SC
3238 clear_bit(__QLCNIC_AER, &adapter->state);
3239 netif_device_attach(netdev);
3240 return err;
3241 }
3242
3243 err = qlcnic_up(adapter, netdev);
3244 if (err)
3245 goto done;
3246
aec1e845 3247 qlcnic_restore_indev_addr(netdev, NETDEV_UP);
451724c8
SC
3248 }
3249 done:
3250 netif_device_attach(netdev);
3251 return err;
3252}
3253
3254static pci_ers_result_t qlcnic_io_error_detected(struct pci_dev *pdev,
3255 pci_channel_state_t state)
3256{
3257 struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
3258 struct net_device *netdev = adapter->netdev;
3259
3260 if (state == pci_channel_io_perm_failure)
3261 return PCI_ERS_RESULT_DISCONNECT;
3262
3263 if (state == pci_channel_io_normal)
3264 return PCI_ERS_RESULT_RECOVERED;
3265
3266 set_bit(__QLCNIC_AER, &adapter->state);
3267 netif_device_detach(netdev);
3268
3269 cancel_delayed_work_sync(&adapter->fw_work);
3270
3271 if (netif_running(netdev))
3272 qlcnic_down(adapter, netdev);
3273
3274 qlcnic_detach(adapter);
3275 qlcnic_teardown_intr(adapter);
3276
3277 clear_bit(__QLCNIC_RESETTING, &adapter->state);
3278
3279 pci_save_state(pdev);
3280 pci_disable_device(pdev);
3281
3282 return PCI_ERS_RESULT_NEED_RESET;
3283}
3284
3285static pci_ers_result_t qlcnic_io_slot_reset(struct pci_dev *pdev)
3286{
3287 return qlcnic_attach_func(pdev) ? PCI_ERS_RESULT_DISCONNECT :
3288 PCI_ERS_RESULT_RECOVERED;
3289}
3290
3291static void qlcnic_io_resume(struct pci_dev *pdev)
3292{
3293 struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
3294
3295 pci_cleanup_aer_uncorrect_error_status(pdev);
3296
3297 if (QLCRD32(adapter, QLCNIC_CRB_DEV_STATE) == QLCNIC_DEV_READY &&
3298 test_and_clear_bit(__QLCNIC_AER, &adapter->state))
3299 qlcnic_schedule_work(adapter, qlcnic_fw_poll_work,
3300 FW_POLL_DELAY);
3301}
3302
87eb743b
AC
3303static int
3304qlcnicvf_start_firmware(struct qlcnic_adapter *adapter)
3305{
3306 int err;
3307
3308 err = qlcnic_can_start_firmware(adapter);
3309 if (err)
3310 return err;
3311
78f84e1a
AKS
3312 err = qlcnic_check_npar_opertional(adapter);
3313 if (err)
3314 return err;
3c4b23b1 3315
174240a8
RB
3316 err = qlcnic_initialize_nic(adapter);
3317 if (err)
3318 return err;
3319
87eb743b
AC
3320 qlcnic_check_options(adapter);
3321
7373373d
RB
3322 err = qlcnic_set_eswitch_port_config(adapter);
3323 if (err)
3324 return err;
3325
87eb743b
AC
3326 adapter->need_fw_reset = 0;
3327
3328 return err;
3329}
3330
3331static int
3332qlcnicvf_config_bridged_mode(struct qlcnic_adapter *adapter, u32 enable)
3333{
3334 return -EOPNOTSUPP;
3335}
3336
3337static int
3338qlcnicvf_config_led(struct qlcnic_adapter *adapter, u32 state, u32 rate)
3339{
3340 return -EOPNOTSUPP;
3341}
3342
af19b491
AKS
3343static ssize_t
3344qlcnic_store_bridged_mode(struct device *dev,
3345 struct device_attribute *attr, const char *buf, size_t len)
3346{
3347 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3348 unsigned long new;
3349 int ret = -EINVAL;
3350
3351 if (!(adapter->capabilities & QLCNIC_FW_CAPABILITY_BDG))
3352 goto err_out;
3353
8a15ad1f 3354 if (!test_bit(__QLCNIC_DEV_UP, &adapter->state))
af19b491
AKS
3355 goto err_out;
3356
3357 if (strict_strtoul(buf, 2, &new))
3358 goto err_out;
3359
2e9d722d 3360 if (!adapter->nic_ops->config_bridged_mode(adapter, !!new))
af19b491
AKS
3361 ret = len;
3362
3363err_out:
3364 return ret;
3365}
3366
3367static ssize_t
3368qlcnic_show_bridged_mode(struct device *dev,
3369 struct device_attribute *attr, char *buf)
3370{
3371 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3372 int bridged_mode = 0;
3373
3374 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_BDG)
3375 bridged_mode = !!(adapter->flags & QLCNIC_BRIDGE_ENABLED);
3376
3377 return sprintf(buf, "%d\n", bridged_mode);
3378}
3379
3380static struct device_attribute dev_attr_bridged_mode = {
3381 .attr = {.name = "bridged_mode", .mode = (S_IRUGO | S_IWUSR)},
3382 .show = qlcnic_show_bridged_mode,
3383 .store = qlcnic_store_bridged_mode,
3384};
3385
3386static ssize_t
3387qlcnic_store_diag_mode(struct device *dev,
3388 struct device_attribute *attr, const char *buf, size_t len)
3389{
3390 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3391 unsigned long new;
3392
3393 if (strict_strtoul(buf, 2, &new))
3394 return -EINVAL;
3395
3396 if (!!new != !!(adapter->flags & QLCNIC_DIAG_ENABLED))
3397 adapter->flags ^= QLCNIC_DIAG_ENABLED;
3398
3399 return len;
3400}
3401
3402static ssize_t
3403qlcnic_show_diag_mode(struct device *dev,
3404 struct device_attribute *attr, char *buf)
3405{
3406 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3407
3408 return sprintf(buf, "%d\n",
3409 !!(adapter->flags & QLCNIC_DIAG_ENABLED));
3410}
3411
3412static struct device_attribute dev_attr_diag_mode = {
3413 .attr = {.name = "diag_mode", .mode = (S_IRUGO | S_IWUSR)},
3414 .show = qlcnic_show_diag_mode,
3415 .store = qlcnic_store_diag_mode,
3416};
3417
f94bc1e7
SC
3418int qlcnic_validate_max_rss(struct net_device *netdev, u8 max_hw, u8 val)
3419{
3420 if (!use_msi_x && !use_msi) {
3421 netdev_info(netdev, "no msix or msi support, hence no rss\n");
3422 return -EINVAL;
3423 }
3424
3425 if ((val > max_hw) || (val < 2) || !is_power_of_2(val)) {
3426 netdev_info(netdev, "rss_ring valid range [2 - %x] in "
3427 " powers of 2\n", max_hw);
3428 return -EINVAL;
3429 }
3430 return 0;
3431
3432}
3433
3434int qlcnic_set_max_rss(struct qlcnic_adapter *adapter, u8 data)
3435{
3436 struct net_device *netdev = adapter->netdev;
3437 int err = 0;
3438
3439 if (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
3440 return -EBUSY;
3441
3442 netif_device_detach(netdev);
3443 if (netif_running(netdev))
3444 __qlcnic_down(adapter, netdev);
3445 qlcnic_detach(adapter);
3446 qlcnic_teardown_intr(adapter);
3447
3448 if (qlcnic_enable_msix(adapter, data)) {
3449 netdev_info(netdev, "failed setting max_rss; rss disabled\n");
3450 qlcnic_enable_msi_legacy(adapter);
3451 }
3452
3453 if (netif_running(netdev)) {
3454 err = qlcnic_attach(adapter);
3455 if (err)
3456 goto done;
3457 err = __qlcnic_up(adapter, netdev);
3458 if (err)
3459 goto done;
3460 qlcnic_restore_indev_addr(netdev, NETDEV_UP);
3461 }
3462 done:
3463 netif_device_attach(netdev);
3464 clear_bit(__QLCNIC_RESETTING, &adapter->state);
3465 return err;
3466}
3467
af19b491
AKS
3468static int
3469qlcnic_sysfs_validate_crb(struct qlcnic_adapter *adapter,
3470 loff_t offset, size_t size)
3471{
897e8c7c
DP
3472 size_t crb_size = 4;
3473
af19b491
AKS
3474 if (!(adapter->flags & QLCNIC_DIAG_ENABLED))
3475 return -EIO;
3476
897e8c7c
DP
3477 if (offset < QLCNIC_PCI_CRBSPACE) {
3478 if (ADDR_IN_RANGE(offset, QLCNIC_PCI_CAMQM,
3479 QLCNIC_PCI_CAMQM_END))
3480 crb_size = 8;
3481 else
3482 return -EINVAL;
3483 }
af19b491 3484
897e8c7c
DP
3485 if ((size != crb_size) || (offset & (crb_size-1)))
3486 return -EINVAL;
af19b491
AKS
3487
3488 return 0;
3489}
3490
3491static ssize_t
2c3c8bea
CW
3492qlcnic_sysfs_read_crb(struct file *filp, struct kobject *kobj,
3493 struct bin_attribute *attr,
af19b491
AKS
3494 char *buf, loff_t offset, size_t size)
3495{
3496 struct device *dev = container_of(kobj, struct device, kobj);
3497 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3498 u32 data;
897e8c7c 3499 u64 qmdata;
af19b491
AKS
3500 int ret;
3501
3502 ret = qlcnic_sysfs_validate_crb(adapter, offset, size);
3503 if (ret != 0)
3504 return ret;
3505
897e8c7c
DP
3506 if (ADDR_IN_RANGE(offset, QLCNIC_PCI_CAMQM, QLCNIC_PCI_CAMQM_END)) {
3507 qlcnic_pci_camqm_read_2M(adapter, offset, &qmdata);
3508 memcpy(buf, &qmdata, size);
3509 } else {
3510 data = QLCRD32(adapter, offset);
3511 memcpy(buf, &data, size);
3512 }
af19b491
AKS
3513 return size;
3514}
3515
3516static ssize_t
2c3c8bea
CW
3517qlcnic_sysfs_write_crb(struct file *filp, struct kobject *kobj,
3518 struct bin_attribute *attr,
af19b491
AKS
3519 char *buf, loff_t offset, size_t size)
3520{
3521 struct device *dev = container_of(kobj, struct device, kobj);
3522 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3523 u32 data;
897e8c7c 3524 u64 qmdata;
af19b491
AKS
3525 int ret;
3526
3527 ret = qlcnic_sysfs_validate_crb(adapter, offset, size);
3528 if (ret != 0)
3529 return ret;
3530
897e8c7c
DP
3531 if (ADDR_IN_RANGE(offset, QLCNIC_PCI_CAMQM, QLCNIC_PCI_CAMQM_END)) {
3532 memcpy(&qmdata, buf, size);
3533 qlcnic_pci_camqm_write_2M(adapter, offset, qmdata);
3534 } else {
3535 memcpy(&data, buf, size);
3536 QLCWR32(adapter, offset, data);
3537 }
af19b491
AKS
3538 return size;
3539}
3540
3541static int
3542qlcnic_sysfs_validate_mem(struct qlcnic_adapter *adapter,
3543 loff_t offset, size_t size)
3544{
3545 if (!(adapter->flags & QLCNIC_DIAG_ENABLED))
3546 return -EIO;
3547
3548 if ((size != 8) || (offset & 0x7))
3549 return -EIO;
3550
3551 return 0;
3552}
3553
3554static ssize_t
2c3c8bea
CW
3555qlcnic_sysfs_read_mem(struct file *filp, struct kobject *kobj,
3556 struct bin_attribute *attr,
af19b491
AKS
3557 char *buf, loff_t offset, size_t size)
3558{
3559 struct device *dev = container_of(kobj, struct device, kobj);
3560 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3561 u64 data;
3562 int ret;
3563
3564 ret = qlcnic_sysfs_validate_mem(adapter, offset, size);
3565 if (ret != 0)
3566 return ret;
3567
3568 if (qlcnic_pci_mem_read_2M(adapter, offset, &data))
3569 return -EIO;
3570
3571 memcpy(buf, &data, size);
3572
3573 return size;
3574}
3575
3576static ssize_t
2c3c8bea
CW
3577qlcnic_sysfs_write_mem(struct file *filp, struct kobject *kobj,
3578 struct bin_attribute *attr,
af19b491
AKS
3579 char *buf, loff_t offset, size_t size)
3580{
3581 struct device *dev = container_of(kobj, struct device, kobj);
3582 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3583 u64 data;
3584 int ret;
3585
3586 ret = qlcnic_sysfs_validate_mem(adapter, offset, size);
3587 if (ret != 0)
3588 return ret;
3589
3590 memcpy(&data, buf, size);
3591
3592 if (qlcnic_pci_mem_write_2M(adapter, offset, data))
3593 return -EIO;
3594
3595 return size;
3596}
3597
af19b491
AKS
3598static struct bin_attribute bin_attr_crb = {
3599 .attr = {.name = "crb", .mode = (S_IRUGO | S_IWUSR)},
3600 .size = 0,
3601 .read = qlcnic_sysfs_read_crb,
3602 .write = qlcnic_sysfs_write_crb,
3603};
3604
3605static struct bin_attribute bin_attr_mem = {
3606 .attr = {.name = "mem", .mode = (S_IRUGO | S_IWUSR)},
3607 .size = 0,
3608 .read = qlcnic_sysfs_read_mem,
3609 .write = qlcnic_sysfs_write_mem,
3610};
3611
cea8975e 3612static int
346fe763
RB
3613validate_pm_config(struct qlcnic_adapter *adapter,
3614 struct qlcnic_pm_func_cfg *pm_cfg, int count)
3615{
3616
3617 u8 src_pci_func, s_esw_id, d_esw_id;
3618 u8 dest_pci_func;
3619 int i;
3620
3621 for (i = 0; i < count; i++) {
3622 src_pci_func = pm_cfg[i].pci_func;
3623 dest_pci_func = pm_cfg[i].dest_npar;
3624 if (src_pci_func >= QLCNIC_MAX_PCI_FUNC
3625 || dest_pci_func >= QLCNIC_MAX_PCI_FUNC)
3626 return QL_STATUS_INVALID_PARAM;
3627
3628 if (adapter->npars[src_pci_func].type != QLCNIC_TYPE_NIC)
3629 return QL_STATUS_INVALID_PARAM;
3630
3631 if (adapter->npars[dest_pci_func].type != QLCNIC_TYPE_NIC)
3632 return QL_STATUS_INVALID_PARAM;
3633
346fe763
RB
3634 s_esw_id = adapter->npars[src_pci_func].phy_port;
3635 d_esw_id = adapter->npars[dest_pci_func].phy_port;
3636
3637 if (s_esw_id != d_esw_id)
3638 return QL_STATUS_INVALID_PARAM;
3639
3640 }
3641 return 0;
3642
3643}
3644
3645static ssize_t
3646qlcnic_sysfs_write_pm_config(struct file *filp, struct kobject *kobj,
3647 struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
3648{
3649 struct device *dev = container_of(kobj, struct device, kobj);
3650 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3651 struct qlcnic_pm_func_cfg *pm_cfg;
3652 u32 id, action, pci_func;
3653 int count, rem, i, ret;
3654
3655 count = size / sizeof(struct qlcnic_pm_func_cfg);
3656 rem = size % sizeof(struct qlcnic_pm_func_cfg);
3657 if (rem)
3658 return QL_STATUS_INVALID_PARAM;
3659
3660 pm_cfg = (struct qlcnic_pm_func_cfg *) buf;
3661
3662 ret = validate_pm_config(adapter, pm_cfg, count);
3663 if (ret)
3664 return ret;
3665 for (i = 0; i < count; i++) {
3666 pci_func = pm_cfg[i].pci_func;
4e8acb01 3667 action = !!pm_cfg[i].action;
346fe763
RB
3668 id = adapter->npars[pci_func].phy_port;
3669 ret = qlcnic_config_port_mirroring(adapter, id,
3670 action, pci_func);
3671 if (ret)
3672 return ret;
3673 }
3674
3675 for (i = 0; i < count; i++) {
3676 pci_func = pm_cfg[i].pci_func;
3677 id = adapter->npars[pci_func].phy_port;
4e8acb01 3678 adapter->npars[pci_func].enable_pm = !!pm_cfg[i].action;
346fe763
RB
3679 adapter->npars[pci_func].dest_npar = id;
3680 }
3681 return size;
3682}
3683
3684static ssize_t
3685qlcnic_sysfs_read_pm_config(struct file *filp, struct kobject *kobj,
3686 struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
3687{
3688 struct device *dev = container_of(kobj, struct device, kobj);
3689 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3690 struct qlcnic_pm_func_cfg pm_cfg[QLCNIC_MAX_PCI_FUNC];
3691 int i;
3692
3693 if (size != sizeof(pm_cfg))
3694 return QL_STATUS_INVALID_PARAM;
3695
3696 for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
3697 if (adapter->npars[i].type != QLCNIC_TYPE_NIC)
3698 continue;
3699 pm_cfg[i].action = adapter->npars[i].enable_pm;
3700 pm_cfg[i].dest_npar = 0;
3701 pm_cfg[i].pci_func = i;
3702 }
3703 memcpy(buf, &pm_cfg, size);
3704
3705 return size;
3706}
3707
cea8975e 3708static int
346fe763 3709validate_esw_config(struct qlcnic_adapter *adapter,
4e8acb01 3710 struct qlcnic_esw_func_cfg *esw_cfg, int count)
346fe763 3711{
7613c87b 3712 u32 op_mode;
346fe763
RB
3713 u8 pci_func;
3714 int i;
7613c87b 3715
b1fc6d3c 3716 op_mode = readl(adapter->ahw->pci_base0 + QLCNIC_DRV_OP_MODE);
7613c87b 3717
346fe763
RB
3718 for (i = 0; i < count; i++) {
3719 pci_func = esw_cfg[i].pci_func;
3720 if (pci_func >= QLCNIC_MAX_PCI_FUNC)
3721 return QL_STATUS_INVALID_PARAM;
3722
4e8acb01
RB
3723 if (adapter->op_mode == QLCNIC_MGMT_FUNC)
3724 if (adapter->npars[pci_func].type != QLCNIC_TYPE_NIC)
3725 return QL_STATUS_INVALID_PARAM;
346fe763 3726
4e8acb01
RB
3727 switch (esw_cfg[i].op_mode) {
3728 case QLCNIC_PORT_DEFAULTS:
7613c87b 3729 if (QLC_DEV_GET_DRV(op_mode, pci_func) !=
7373373d 3730 QLCNIC_NON_PRIV_FUNC) {
091056b2
AKS
3731 if (esw_cfg[i].mac_anti_spoof != 0)
3732 return QL_STATUS_INVALID_PARAM;
3733 if (esw_cfg[i].mac_override != 1)
3734 return QL_STATUS_INVALID_PARAM;
3735 if (esw_cfg[i].promisc_mode != 1)
3736 return QL_STATUS_INVALID_PARAM;
7373373d 3737 }
4e8acb01
RB
3738 break;
3739 case QLCNIC_ADD_VLAN:
346fe763
RB
3740 if (!IS_VALID_VLAN(esw_cfg[i].vlan_id))
3741 return QL_STATUS_INVALID_PARAM;
4e8acb01
RB
3742 if (!esw_cfg[i].op_type)
3743 return QL_STATUS_INVALID_PARAM;
3744 break;
3745 case QLCNIC_DEL_VLAN:
4e8acb01
RB
3746 if (!esw_cfg[i].op_type)
3747 return QL_STATUS_INVALID_PARAM;
3748 break;
3749 default:
346fe763 3750 return QL_STATUS_INVALID_PARAM;
4e8acb01 3751 }
346fe763 3752 }
346fe763
RB
3753 return 0;
3754}
3755
3756static ssize_t
3757qlcnic_sysfs_write_esw_config(struct file *file, struct kobject *kobj,
3758 struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
3759{
3760 struct device *dev = container_of(kobj, struct device, kobj);
3761 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3762 struct qlcnic_esw_func_cfg *esw_cfg;
4e8acb01 3763 struct qlcnic_npar_info *npar;
346fe763 3764 int count, rem, i, ret;
0325d69b 3765 u8 pci_func, op_mode = 0;
346fe763
RB
3766
3767 count = size / sizeof(struct qlcnic_esw_func_cfg);
3768 rem = size % sizeof(struct qlcnic_esw_func_cfg);
3769 if (rem)
3770 return QL_STATUS_INVALID_PARAM;
3771
3772 esw_cfg = (struct qlcnic_esw_func_cfg *) buf;
3773 ret = validate_esw_config(adapter, esw_cfg, count);
3774 if (ret)
3775 return ret;
3776
3777 for (i = 0; i < count; i++) {
0325d69b
RB
3778 if (adapter->op_mode == QLCNIC_MGMT_FUNC)
3779 if (qlcnic_config_switch_port(adapter, &esw_cfg[i]))
3780 return QL_STATUS_INVALID_PARAM;
e9a47700 3781
b1fc6d3c 3782 if (adapter->ahw->pci_func != esw_cfg[i].pci_func)
e9a47700
RB
3783 continue;
3784
3785 op_mode = esw_cfg[i].op_mode;
3786 qlcnic_get_eswitch_port_config(adapter, &esw_cfg[i]);
3787 esw_cfg[i].op_mode = op_mode;
b1fc6d3c 3788 esw_cfg[i].pci_func = adapter->ahw->pci_func;
e9a47700
RB
3789
3790 switch (esw_cfg[i].op_mode) {
3791 case QLCNIC_PORT_DEFAULTS:
3792 qlcnic_set_eswitch_port_features(adapter, &esw_cfg[i]);
3793 break;
8cf61f89
AKS
3794 case QLCNIC_ADD_VLAN:
3795 qlcnic_set_vlan_config(adapter, &esw_cfg[i]);
3796 break;
3797 case QLCNIC_DEL_VLAN:
3798 esw_cfg[i].vlan_id = 0;
3799 qlcnic_set_vlan_config(adapter, &esw_cfg[i]);
3800 break;
0325d69b 3801 }
346fe763
RB
3802 }
3803
0325d69b
RB
3804 if (adapter->op_mode != QLCNIC_MGMT_FUNC)
3805 goto out;
e9a47700 3806
346fe763
RB
3807 for (i = 0; i < count; i++) {
3808 pci_func = esw_cfg[i].pci_func;
4e8acb01
RB
3809 npar = &adapter->npars[pci_func];
3810 switch (esw_cfg[i].op_mode) {
3811 case QLCNIC_PORT_DEFAULTS:
3812 npar->promisc_mode = esw_cfg[i].promisc_mode;
7373373d 3813 npar->mac_override = esw_cfg[i].mac_override;
4e8acb01
RB
3814 npar->offload_flags = esw_cfg[i].offload_flags;
3815 npar->mac_anti_spoof = esw_cfg[i].mac_anti_spoof;
3816 npar->discard_tagged = esw_cfg[i].discard_tagged;
3817 break;
3818 case QLCNIC_ADD_VLAN:
3819 npar->pvid = esw_cfg[i].vlan_id;
3820 break;
3821 case QLCNIC_DEL_VLAN:
3822 npar->pvid = 0;
3823 break;
3824 }
346fe763 3825 }
0325d69b 3826out:
346fe763
RB
3827 return size;
3828}
3829
3830static ssize_t
3831qlcnic_sysfs_read_esw_config(struct file *file, struct kobject *kobj,
3832 struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
3833{
3834 struct device *dev = container_of(kobj, struct device, kobj);
3835 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3836 struct qlcnic_esw_func_cfg esw_cfg[QLCNIC_MAX_PCI_FUNC];
4e8acb01 3837 u8 i;
346fe763
RB
3838
3839 if (size != sizeof(esw_cfg))
3840 return QL_STATUS_INVALID_PARAM;
3841
3842 for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
3843 if (adapter->npars[i].type != QLCNIC_TYPE_NIC)
3844 continue;
4e8acb01
RB
3845 esw_cfg[i].pci_func = i;
3846 if (qlcnic_get_eswitch_port_config(adapter, &esw_cfg[i]))
3847 return QL_STATUS_INVALID_PARAM;
346fe763
RB
3848 }
3849 memcpy(buf, &esw_cfg, size);
3850
3851 return size;
3852}
3853
cea8975e 3854static int
346fe763
RB
3855validate_npar_config(struct qlcnic_adapter *adapter,
3856 struct qlcnic_npar_func_cfg *np_cfg, int count)
3857{
3858 u8 pci_func, i;
3859
3860 for (i = 0; i < count; i++) {
3861 pci_func = np_cfg[i].pci_func;
3862 if (pci_func >= QLCNIC_MAX_PCI_FUNC)
3863 return QL_STATUS_INVALID_PARAM;
3864
3865 if (adapter->npars[pci_func].type != QLCNIC_TYPE_NIC)
3866 return QL_STATUS_INVALID_PARAM;
3867
d12b0d9a
RB
3868 if (!IS_VALID_BW(np_cfg[i].min_bw) ||
3869 !IS_VALID_BW(np_cfg[i].max_bw))
346fe763
RB
3870 return QL_STATUS_INVALID_PARAM;
3871 }
3872 return 0;
3873}
3874
3875static ssize_t
3876qlcnic_sysfs_write_npar_config(struct file *file, struct kobject *kobj,
3877 struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
3878{
3879 struct device *dev = container_of(kobj, struct device, kobj);
3880 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3881 struct qlcnic_info nic_info;
3882 struct qlcnic_npar_func_cfg *np_cfg;
3883 int i, count, rem, ret;
3884 u8 pci_func;
3885
3886 count = size / sizeof(struct qlcnic_npar_func_cfg);
3887 rem = size % sizeof(struct qlcnic_npar_func_cfg);
3888 if (rem)
3889 return QL_STATUS_INVALID_PARAM;
3890
3891 np_cfg = (struct qlcnic_npar_func_cfg *) buf;
3892 ret = validate_npar_config(adapter, np_cfg, count);
3893 if (ret)
3894 return ret;
3895
3896 for (i = 0; i < count ; i++) {
3897 pci_func = np_cfg[i].pci_func;
3898 ret = qlcnic_get_nic_info(adapter, &nic_info, pci_func);
3899 if (ret)
3900 return ret;
3901 nic_info.pci_func = pci_func;
3902 nic_info.min_tx_bw = np_cfg[i].min_bw;
3903 nic_info.max_tx_bw = np_cfg[i].max_bw;
3904 ret = qlcnic_set_nic_info(adapter, &nic_info);
3905 if (ret)
3906 return ret;
cea8975e
AC
3907 adapter->npars[i].min_bw = nic_info.min_tx_bw;
3908 adapter->npars[i].max_bw = nic_info.max_tx_bw;
346fe763
RB
3909 }
3910
3911 return size;
3912
3913}
3914static ssize_t
3915qlcnic_sysfs_read_npar_config(struct file *file, struct kobject *kobj,
3916 struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
3917{
3918 struct device *dev = container_of(kobj, struct device, kobj);
3919 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3920 struct qlcnic_info nic_info;
3921 struct qlcnic_npar_func_cfg np_cfg[QLCNIC_MAX_PCI_FUNC];
3922 int i, ret;
3923
3924 if (size != sizeof(np_cfg))
3925 return QL_STATUS_INVALID_PARAM;
3926
3927 for (i = 0; i < QLCNIC_MAX_PCI_FUNC ; i++) {
3928 if (adapter->npars[i].type != QLCNIC_TYPE_NIC)
3929 continue;
3930 ret = qlcnic_get_nic_info(adapter, &nic_info, i);
3931 if (ret)
3932 return ret;
3933
3934 np_cfg[i].pci_func = i;
a1c0c459 3935 np_cfg[i].op_mode = (u8)nic_info.op_mode;
346fe763
RB
3936 np_cfg[i].port_num = nic_info.phys_port;
3937 np_cfg[i].fw_capab = nic_info.capabilities;
3938 np_cfg[i].min_bw = nic_info.min_tx_bw ;
3939 np_cfg[i].max_bw = nic_info.max_tx_bw;
3940 np_cfg[i].max_tx_queues = nic_info.max_tx_ques;
3941 np_cfg[i].max_rx_queues = nic_info.max_rx_ques;
3942 }
3943 memcpy(buf, &np_cfg, size);
3944 return size;
3945}
3946
b6021212
AKS
3947static ssize_t
3948qlcnic_sysfs_get_port_stats(struct file *file, struct kobject *kobj,
3949 struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
3950{
3951 struct device *dev = container_of(kobj, struct device, kobj);
3952 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3953 struct qlcnic_esw_statistics port_stats;
3954 int ret;
3955
3956 if (size != sizeof(struct qlcnic_esw_statistics))
3957 return QL_STATUS_INVALID_PARAM;
3958
3959 if (offset >= QLCNIC_MAX_PCI_FUNC)
3960 return QL_STATUS_INVALID_PARAM;
3961
3962 memset(&port_stats, 0, size);
3963 ret = qlcnic_get_port_stats(adapter, offset, QLCNIC_QUERY_RX_COUNTER,
3964 &port_stats.rx);
3965 if (ret)
3966 return ret;
3967
3968 ret = qlcnic_get_port_stats(adapter, offset, QLCNIC_QUERY_TX_COUNTER,
3969 &port_stats.tx);
3970 if (ret)
3971 return ret;
3972
3973 memcpy(buf, &port_stats, size);
3974 return size;
3975}
3976
3977static ssize_t
3978qlcnic_sysfs_get_esw_stats(struct file *file, struct kobject *kobj,
3979 struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
3980{
3981 struct device *dev = container_of(kobj, struct device, kobj);
3982 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3983 struct qlcnic_esw_statistics esw_stats;
3984 int ret;
3985
3986 if (size != sizeof(struct qlcnic_esw_statistics))
3987 return QL_STATUS_INVALID_PARAM;
3988
3989 if (offset >= QLCNIC_NIU_MAX_XG_PORTS)
3990 return QL_STATUS_INVALID_PARAM;
3991
3992 memset(&esw_stats, 0, size);
3993 ret = qlcnic_get_eswitch_stats(adapter, offset, QLCNIC_QUERY_RX_COUNTER,
3994 &esw_stats.rx);
3995 if (ret)
3996 return ret;
3997
3998 ret = qlcnic_get_eswitch_stats(adapter, offset, QLCNIC_QUERY_TX_COUNTER,
3999 &esw_stats.tx);
4000 if (ret)
4001 return ret;
4002
4003 memcpy(buf, &esw_stats, size);
4004 return size;
4005}
4006
4007static ssize_t
4008qlcnic_sysfs_clear_esw_stats(struct file *file, struct kobject *kobj,
4009 struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
4010{
4011 struct device *dev = container_of(kobj, struct device, kobj);
4012 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
4013 int ret;
4014
4015 if (offset >= QLCNIC_NIU_MAX_XG_PORTS)
4016 return QL_STATUS_INVALID_PARAM;
4017
4018 ret = qlcnic_clear_esw_stats(adapter, QLCNIC_STATS_ESWITCH, offset,
4019 QLCNIC_QUERY_RX_COUNTER);
4020 if (ret)
4021 return ret;
4022
4023 ret = qlcnic_clear_esw_stats(adapter, QLCNIC_STATS_ESWITCH, offset,
4024 QLCNIC_QUERY_TX_COUNTER);
4025 if (ret)
4026 return ret;
4027
4028 return size;
4029}
4030
4031static ssize_t
4032qlcnic_sysfs_clear_port_stats(struct file *file, struct kobject *kobj,
4033 struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
4034{
4035
4036 struct device *dev = container_of(kobj, struct device, kobj);
4037 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
4038 int ret;
4039
4040 if (offset >= QLCNIC_MAX_PCI_FUNC)
4041 return QL_STATUS_INVALID_PARAM;
4042
4043 ret = qlcnic_clear_esw_stats(adapter, QLCNIC_STATS_PORT, offset,
4044 QLCNIC_QUERY_RX_COUNTER);
4045 if (ret)
4046 return ret;
4047
4048 ret = qlcnic_clear_esw_stats(adapter, QLCNIC_STATS_PORT, offset,
4049 QLCNIC_QUERY_TX_COUNTER);
4050 if (ret)
4051 return ret;
4052
4053 return size;
4054}
4055
346fe763
RB
4056static ssize_t
4057qlcnic_sysfs_read_pci_config(struct file *file, struct kobject *kobj,
4058 struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
4059{
4060 struct device *dev = container_of(kobj, struct device, kobj);
4061 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
4062 struct qlcnic_pci_func_cfg pci_cfg[QLCNIC_MAX_PCI_FUNC];
e88db3bd 4063 struct qlcnic_pci_info *pci_info;
346fe763
RB
4064 int i, ret;
4065
4066 if (size != sizeof(pci_cfg))
4067 return QL_STATUS_INVALID_PARAM;
4068
e88db3bd
DC
4069 pci_info = kcalloc(QLCNIC_MAX_PCI_FUNC, sizeof(*pci_info), GFP_KERNEL);
4070 if (!pci_info)
4071 return -ENOMEM;
4072
346fe763 4073 ret = qlcnic_get_pci_info(adapter, pci_info);
e88db3bd
DC
4074 if (ret) {
4075 kfree(pci_info);
346fe763 4076 return ret;
e88db3bd 4077 }
346fe763
RB
4078
4079 for (i = 0; i < QLCNIC_MAX_PCI_FUNC ; i++) {
4080 pci_cfg[i].pci_func = pci_info[i].id;
4081 pci_cfg[i].func_type = pci_info[i].type;
4082 pci_cfg[i].port_num = pci_info[i].default_port;
4083 pci_cfg[i].min_bw = pci_info[i].tx_min_bw;
4084 pci_cfg[i].max_bw = pci_info[i].tx_max_bw;
4085 memcpy(&pci_cfg[i].def_mac_addr, &pci_info[i].mac, ETH_ALEN);
4086 }
4087 memcpy(buf, &pci_cfg, size);
e88db3bd 4088 kfree(pci_info);
346fe763 4089 return size;
346fe763
RB
4090}
4091static struct bin_attribute bin_attr_npar_config = {
4092 .attr = {.name = "npar_config", .mode = (S_IRUGO | S_IWUSR)},
4093 .size = 0,
4094 .read = qlcnic_sysfs_read_npar_config,
4095 .write = qlcnic_sysfs_write_npar_config,
4096};
4097
4098static struct bin_attribute bin_attr_pci_config = {
4099 .attr = {.name = "pci_config", .mode = (S_IRUGO | S_IWUSR)},
4100 .size = 0,
4101 .read = qlcnic_sysfs_read_pci_config,
4102 .write = NULL,
4103};
4104
b6021212
AKS
4105static struct bin_attribute bin_attr_port_stats = {
4106 .attr = {.name = "port_stats", .mode = (S_IRUGO | S_IWUSR)},
4107 .size = 0,
4108 .read = qlcnic_sysfs_get_port_stats,
4109 .write = qlcnic_sysfs_clear_port_stats,
4110};
4111
4112static struct bin_attribute bin_attr_esw_stats = {
4113 .attr = {.name = "esw_stats", .mode = (S_IRUGO | S_IWUSR)},
4114 .size = 0,
4115 .read = qlcnic_sysfs_get_esw_stats,
4116 .write = qlcnic_sysfs_clear_esw_stats,
4117};
4118
346fe763
RB
4119static struct bin_attribute bin_attr_esw_config = {
4120 .attr = {.name = "esw_config", .mode = (S_IRUGO | S_IWUSR)},
4121 .size = 0,
4122 .read = qlcnic_sysfs_read_esw_config,
4123 .write = qlcnic_sysfs_write_esw_config,
4124};
4125
4126static struct bin_attribute bin_attr_pm_config = {
4127 .attr = {.name = "pm_config", .mode = (S_IRUGO | S_IWUSR)},
4128 .size = 0,
4129 .read = qlcnic_sysfs_read_pm_config,
4130 .write = qlcnic_sysfs_write_pm_config,
4131};
4132
af19b491
AKS
4133static void
4134qlcnic_create_sysfs_entries(struct qlcnic_adapter *adapter)
4135{
4136 struct device *dev = &adapter->pdev->dev;
4137
4138 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_BDG)
4139 if (device_create_file(dev, &dev_attr_bridged_mode))
4140 dev_warn(dev,
4141 "failed to create bridged_mode sysfs entry\n");
4142}
4143
4144static void
4145qlcnic_remove_sysfs_entries(struct qlcnic_adapter *adapter)
4146{
4147 struct device *dev = &adapter->pdev->dev;
4148
4149 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_BDG)
4150 device_remove_file(dev, &dev_attr_bridged_mode);
4151}
4152
4153static void
4154qlcnic_create_diag_entries(struct qlcnic_adapter *adapter)
4155{
4156 struct device *dev = &adapter->pdev->dev;
4157
b6021212
AKS
4158 if (device_create_bin_file(dev, &bin_attr_port_stats))
4159 dev_info(dev, "failed to create port stats sysfs entry");
4160
132ff00a
AC
4161 if (adapter->op_mode == QLCNIC_NON_PRIV_FUNC)
4162 return;
af19b491
AKS
4163 if (device_create_file(dev, &dev_attr_diag_mode))
4164 dev_info(dev, "failed to create diag_mode sysfs entry\n");
4165 if (device_create_bin_file(dev, &bin_attr_crb))
4166 dev_info(dev, "failed to create crb sysfs entry\n");
4167 if (device_create_bin_file(dev, &bin_attr_mem))
4168 dev_info(dev, "failed to create mem sysfs entry\n");
53478fef
SC
4169 if (device_create_bin_file(dev, &bin_attr_pci_config))
4170 dev_info(dev, "failed to create pci config sysfs entry");
4e8acb01
RB
4171 if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED))
4172 return;
4173 if (device_create_bin_file(dev, &bin_attr_esw_config))
4174 dev_info(dev, "failed to create esw config sysfs entry");
4175 if (adapter->op_mode != QLCNIC_MGMT_FUNC)
346fe763 4176 return;
346fe763
RB
4177 if (device_create_bin_file(dev, &bin_attr_npar_config))
4178 dev_info(dev, "failed to create npar config sysfs entry");
346fe763
RB
4179 if (device_create_bin_file(dev, &bin_attr_pm_config))
4180 dev_info(dev, "failed to create pm config sysfs entry");
b6021212
AKS
4181 if (device_create_bin_file(dev, &bin_attr_esw_stats))
4182 dev_info(dev, "failed to create eswitch stats sysfs entry");
af19b491
AKS
4183}
4184
af19b491
AKS
4185static void
4186qlcnic_remove_diag_entries(struct qlcnic_adapter *adapter)
4187{
4188 struct device *dev = &adapter->pdev->dev;
4189
b6021212
AKS
4190 device_remove_bin_file(dev, &bin_attr_port_stats);
4191
132ff00a
AC
4192 if (adapter->op_mode == QLCNIC_NON_PRIV_FUNC)
4193 return;
af19b491
AKS
4194 device_remove_file(dev, &dev_attr_diag_mode);
4195 device_remove_bin_file(dev, &bin_attr_crb);
4196 device_remove_bin_file(dev, &bin_attr_mem);
53478fef 4197 device_remove_bin_file(dev, &bin_attr_pci_config);
4e8acb01
RB
4198 if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED))
4199 return;
4200 device_remove_bin_file(dev, &bin_attr_esw_config);
4201 if (adapter->op_mode != QLCNIC_MGMT_FUNC)
346fe763 4202 return;
346fe763 4203 device_remove_bin_file(dev, &bin_attr_npar_config);
346fe763 4204 device_remove_bin_file(dev, &bin_attr_pm_config);
b6021212 4205 device_remove_bin_file(dev, &bin_attr_esw_stats);
af19b491
AKS
4206}
4207
4208#ifdef CONFIG_INET
4209
4210#define is_qlcnic_netdev(dev) (dev->netdev_ops == &qlcnic_netdev_ops)
4211
af19b491 4212static void
aec1e845
AKS
4213qlcnic_config_indev_addr(struct qlcnic_adapter *adapter,
4214 struct net_device *dev, unsigned long event)
af19b491
AKS
4215{
4216 struct in_device *indev;
af19b491 4217
af19b491
AKS
4218 indev = in_dev_get(dev);
4219 if (!indev)
4220 return;
4221
4222 for_ifa(indev) {
4223 switch (event) {
4224 case NETDEV_UP:
4225 qlcnic_config_ipaddr(adapter,
4226 ifa->ifa_address, QLCNIC_IP_UP);
4227 break;
4228 case NETDEV_DOWN:
4229 qlcnic_config_ipaddr(adapter,
4230 ifa->ifa_address, QLCNIC_IP_DOWN);
4231 break;
4232 default:
4233 break;
4234 }
4235 } endfor_ifa(indev);
4236
4237 in_dev_put(indev);
af19b491
AKS
4238}
4239
aec1e845
AKS
4240static void
4241qlcnic_restore_indev_addr(struct net_device *netdev, unsigned long event)
4242{
4243 struct qlcnic_adapter *adapter = netdev_priv(netdev);
4244 struct net_device *dev;
4245 u16 vid;
4246
4247 qlcnic_config_indev_addr(adapter, netdev, event);
4248
b9796a14 4249 for_each_set_bit(vid, adapter->vlans, VLAN_N_VID) {
223bb15e 4250 dev = __vlan_find_dev_deep(netdev, vid);
aec1e845
AKS
4251 if (!dev)
4252 continue;
aec1e845
AKS
4253 qlcnic_config_indev_addr(adapter, dev, event);
4254 }
4255}
4256
af19b491
AKS
4257static int qlcnic_netdev_event(struct notifier_block *this,
4258 unsigned long event, void *ptr)
4259{
4260 struct qlcnic_adapter *adapter;
4261 struct net_device *dev = (struct net_device *)ptr;
4262
4263recheck:
4264 if (dev == NULL)
4265 goto done;
4266
4267 if (dev->priv_flags & IFF_802_1Q_VLAN) {
4268 dev = vlan_dev_real_dev(dev);
4269 goto recheck;
4270 }
4271
4272 if (!is_qlcnic_netdev(dev))
4273 goto done;
4274
4275 adapter = netdev_priv(dev);
4276
4277 if (!adapter)
4278 goto done;
4279
8a15ad1f 4280 if (!test_bit(__QLCNIC_DEV_UP, &adapter->state))
af19b491
AKS
4281 goto done;
4282
aec1e845 4283 qlcnic_config_indev_addr(adapter, dev, event);
af19b491
AKS
4284done:
4285 return NOTIFY_DONE;
4286}
4287
4288static int
4289qlcnic_inetaddr_event(struct notifier_block *this,
4290 unsigned long event, void *ptr)
4291{
4292 struct qlcnic_adapter *adapter;
4293 struct net_device *dev;
4294
4295 struct in_ifaddr *ifa = (struct in_ifaddr *)ptr;
4296
4297 dev = ifa->ifa_dev ? ifa->ifa_dev->dev : NULL;
4298
4299recheck:
aec1e845 4300 if (dev == NULL)
af19b491
AKS
4301 goto done;
4302
4303 if (dev->priv_flags & IFF_802_1Q_VLAN) {
4304 dev = vlan_dev_real_dev(dev);
4305 goto recheck;
4306 }
4307
4308 if (!is_qlcnic_netdev(dev))
4309 goto done;
4310
4311 adapter = netdev_priv(dev);
4312
251a84c9 4313 if (!adapter)
af19b491
AKS
4314 goto done;
4315
8a15ad1f 4316 if (!test_bit(__QLCNIC_DEV_UP, &adapter->state))
af19b491
AKS
4317 goto done;
4318
4319 switch (event) {
4320 case NETDEV_UP:
4321 qlcnic_config_ipaddr(adapter, ifa->ifa_address, QLCNIC_IP_UP);
4322 break;
4323 case NETDEV_DOWN:
4324 qlcnic_config_ipaddr(adapter, ifa->ifa_address, QLCNIC_IP_DOWN);
4325 break;
4326 default:
4327 break;
4328 }
4329
4330done:
4331 return NOTIFY_DONE;
4332}
4333
4334static struct notifier_block qlcnic_netdev_cb = {
4335 .notifier_call = qlcnic_netdev_event,
4336};
4337
4338static struct notifier_block qlcnic_inetaddr_cb = {
4339 .notifier_call = qlcnic_inetaddr_event,
4340};
4341#else
4342static void
aec1e845 4343qlcnic_restore_indev_addr(struct net_device *dev, unsigned long event)
af19b491
AKS
4344{ }
4345#endif
451724c8
SC
4346static struct pci_error_handlers qlcnic_err_handler = {
4347 .error_detected = qlcnic_io_error_detected,
4348 .slot_reset = qlcnic_io_slot_reset,
4349 .resume = qlcnic_io_resume,
4350};
af19b491
AKS
4351
4352static struct pci_driver qlcnic_driver = {
4353 .name = qlcnic_driver_name,
4354 .id_table = qlcnic_pci_tbl,
4355 .probe = qlcnic_probe,
4356 .remove = __devexit_p(qlcnic_remove),
4357#ifdef CONFIG_PM
4358 .suspend = qlcnic_suspend,
4359 .resume = qlcnic_resume,
4360#endif
451724c8
SC
4361 .shutdown = qlcnic_shutdown,
4362 .err_handler = &qlcnic_err_handler
4363
af19b491
AKS
4364};
4365
4366static int __init qlcnic_init_module(void)
4367{
0cf3a14c 4368 int ret;
af19b491
AKS
4369
4370 printk(KERN_INFO "%s\n", qlcnic_driver_string);
4371
f7ec804a
AKS
4372 qlcnic_wq = create_singlethread_workqueue("qlcnic");
4373 if (qlcnic_wq == NULL) {
4374 printk(KERN_ERR "qlcnic: cannot create workqueue\n");
4375 return -ENOMEM;
4376 }
4377
af19b491
AKS
4378#ifdef CONFIG_INET
4379 register_netdevice_notifier(&qlcnic_netdev_cb);
4380 register_inetaddr_notifier(&qlcnic_inetaddr_cb);
4381#endif
4382
0cf3a14c
AKS
4383 ret = pci_register_driver(&qlcnic_driver);
4384 if (ret) {
4385#ifdef CONFIG_INET
4386 unregister_inetaddr_notifier(&qlcnic_inetaddr_cb);
4387 unregister_netdevice_notifier(&qlcnic_netdev_cb);
4388#endif
f7ec804a 4389 destroy_workqueue(qlcnic_wq);
0cf3a14c 4390 }
af19b491 4391
0cf3a14c 4392 return ret;
af19b491
AKS
4393}
4394
4395module_init(qlcnic_init_module);
4396
4397static void __exit qlcnic_exit_module(void)
4398{
4399
4400 pci_unregister_driver(&qlcnic_driver);
4401
4402#ifdef CONFIG_INET
4403 unregister_inetaddr_notifier(&qlcnic_inetaddr_cb);
4404 unregister_netdevice_notifier(&qlcnic_netdev_cb);
4405#endif
f7ec804a 4406 destroy_workqueue(qlcnic_wq);
af19b491
AKS
4407}
4408
4409module_exit(qlcnic_exit_module);