net: ibmveth: convert to hw_features
[linux-2.6-block.git] / drivers / net / qlcnic / qlcnic_main.c
CommitLineData
af19b491 1/*
40839129
SV
2 * QLogic qlcnic NIC Driver
3 * Copyright (c) 2009-2010 QLogic Corporation
af19b491 4 *
40839129 5 * See LICENSE.qlcnic for copyright and licensing details.
af19b491
AKS
6 */
7
5a0e3ad6 8#include <linux/slab.h>
af19b491
AKS
9#include <linux/vmalloc.h>
10#include <linux/interrupt.h>
11
12#include "qlcnic.h"
13
7e56cac4 14#include <linux/swab.h>
af19b491 15#include <linux/dma-mapping.h>
af19b491
AKS
16#include <net/ip.h>
17#include <linux/ipv6.h>
18#include <linux/inetdevice.h>
19#include <linux/sysfs.h>
451724c8 20#include <linux/aer.h>
af19b491 21
7f9a0c34 22MODULE_DESCRIPTION("QLogic 1/10 GbE Converged/Intelligent Ethernet Driver");
af19b491
AKS
23MODULE_LICENSE("GPL");
24MODULE_VERSION(QLCNIC_LINUX_VERSIONID);
25MODULE_FIRMWARE(QLCNIC_UNIFIED_ROMIMAGE_NAME);
26
27char qlcnic_driver_name[] = "qlcnic";
7f9a0c34
SV
28static const char qlcnic_driver_string[] = "QLogic 1/10 GbE "
29 "Converged/Intelligent Ethernet Driver v" QLCNIC_LINUX_VERSIONID;
af19b491 30
f7ec804a 31static struct workqueue_struct *qlcnic_wq;
b5e5492c 32static int qlcnic_mac_learn;
b11a25aa 33module_param(qlcnic_mac_learn, int, 0444);
b5e5492c
AKS
34MODULE_PARM_DESC(qlcnic_mac_learn, "Mac Filter (0=disabled, 1=enabled)");
35
af19b491 36static int use_msi = 1;
b11a25aa 37module_param(use_msi, int, 0444);
af19b491
AKS
38MODULE_PARM_DESC(use_msi, "MSI interrupt (0=disabled, 1=enabled");
39
40static int use_msi_x = 1;
b11a25aa 41module_param(use_msi_x, int, 0444);
af19b491
AKS
42MODULE_PARM_DESC(use_msi_x, "MSI-X interrupt (0=disabled, 1=enabled");
43
9ce13ca8 44static int auto_fw_reset = 1;
af19b491
AKS
45module_param(auto_fw_reset, int, 0644);
46MODULE_PARM_DESC(auto_fw_reset, "Auto firmware reset (0=disabled, 1=enabled");
47
4d5bdb38 48static int load_fw_file;
b11a25aa 49module_param(load_fw_file, int, 0444);
4d5bdb38
AKS
50MODULE_PARM_DESC(load_fw_file, "Load firmware from (0=flash, 1=file");
51
2e9d722d 52static int qlcnic_config_npars;
b11a25aa 53module_param(qlcnic_config_npars, int, 0444);
2e9d722d
AC
54MODULE_PARM_DESC(qlcnic_config_npars, "Configure NPARs (0=disabled, 1=enabled");
55
af19b491
AKS
56static int __devinit qlcnic_probe(struct pci_dev *pdev,
57 const struct pci_device_id *ent);
58static void __devexit qlcnic_remove(struct pci_dev *pdev);
59static int qlcnic_open(struct net_device *netdev);
60static int qlcnic_close(struct net_device *netdev);
af19b491 61static void qlcnic_tx_timeout(struct net_device *netdev);
af19b491
AKS
62static void qlcnic_attach_work(struct work_struct *work);
63static void qlcnic_fwinit_work(struct work_struct *work);
64static void qlcnic_fw_poll_work(struct work_struct *work);
65static void qlcnic_schedule_work(struct qlcnic_adapter *adapter,
66 work_func_t func, int delay);
67static void qlcnic_cancel_fw_work(struct qlcnic_adapter *adapter);
68static int qlcnic_poll(struct napi_struct *napi, int budget);
8f891387 69static int qlcnic_rx_poll(struct napi_struct *napi, int budget);
af19b491
AKS
70#ifdef CONFIG_NET_POLL_CONTROLLER
71static void qlcnic_poll_controller(struct net_device *netdev);
72#endif
73
74static void qlcnic_create_sysfs_entries(struct qlcnic_adapter *adapter);
75static void qlcnic_remove_sysfs_entries(struct qlcnic_adapter *adapter);
76static void qlcnic_create_diag_entries(struct qlcnic_adapter *adapter);
77static void qlcnic_remove_diag_entries(struct qlcnic_adapter *adapter);
78
6df900e9 79static void qlcnic_idc_debug_info(struct qlcnic_adapter *adapter, u8 encoding);
21854f02 80static void qlcnic_clr_all_drv_state(struct qlcnic_adapter *adapter, u8);
af19b491
AKS
81static int qlcnic_can_start_firmware(struct qlcnic_adapter *adapter);
82
7eb9855d 83static irqreturn_t qlcnic_tmp_intr(int irq, void *data);
af19b491
AKS
84static irqreturn_t qlcnic_intr(int irq, void *data);
85static irqreturn_t qlcnic_msi_intr(int irq, void *data);
86static irqreturn_t qlcnic_msix_intr(int irq, void *data);
87
88static struct net_device_stats *qlcnic_get_stats(struct net_device *netdev);
aec1e845 89static void qlcnic_restore_indev_addr(struct net_device *dev, unsigned long);
9f26f547
AC
90static int qlcnic_start_firmware(struct qlcnic_adapter *);
91
b5e5492c
AKS
92static void qlcnic_alloc_lb_filters_mem(struct qlcnic_adapter *adapter);
93static void qlcnic_free_lb_filters_mem(struct qlcnic_adapter *adapter);
9f26f547 94static void qlcnic_dev_set_npar_ready(struct qlcnic_adapter *);
9f26f547
AC
95static int qlcnicvf_config_led(struct qlcnic_adapter *, u32, u32);
96static int qlcnicvf_config_bridged_mode(struct qlcnic_adapter *, u32);
97static int qlcnicvf_start_firmware(struct qlcnic_adapter *);
0325d69b
RB
98static void qlcnic_set_netdev_features(struct qlcnic_adapter *,
99 struct qlcnic_esw_func_cfg *);
b9796a14
AC
100static void qlcnic_vlan_rx_add(struct net_device *, u16);
101static void qlcnic_vlan_rx_del(struct net_device *, u16);
102
af19b491
AKS
103/* PCI Device ID Table */
104#define ENTRY(device) \
105 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, (device)), \
106 .class = PCI_CLASS_NETWORK_ETHERNET << 8, .class_mask = ~0}
107
108#define PCI_DEVICE_ID_QLOGIC_QLE824X 0x8020
109
6a902881 110static DEFINE_PCI_DEVICE_TABLE(qlcnic_pci_tbl) = {
af19b491
AKS
111 ENTRY(PCI_DEVICE_ID_QLOGIC_QLE824X),
112 {0,}
113};
114
115MODULE_DEVICE_TABLE(pci, qlcnic_pci_tbl);
116
117
b1fc6d3c 118inline void
af19b491
AKS
119qlcnic_update_cmd_producer(struct qlcnic_adapter *adapter,
120 struct qlcnic_host_tx_ring *tx_ring)
121{
122 writel(tx_ring->producer, tx_ring->crb_cmd_producer);
af19b491
AKS
123}
124
125static const u32 msi_tgt_status[8] = {
126 ISR_INT_TARGET_STATUS, ISR_INT_TARGET_STATUS_F1,
127 ISR_INT_TARGET_STATUS_F2, ISR_INT_TARGET_STATUS_F3,
128 ISR_INT_TARGET_STATUS_F4, ISR_INT_TARGET_STATUS_F5,
129 ISR_INT_TARGET_STATUS_F6, ISR_INT_TARGET_STATUS_F7
130};
131
132static const
133struct qlcnic_legacy_intr_set legacy_intr[] = QLCNIC_LEGACY_INTR_CONFIG;
134
135static inline void qlcnic_disable_int(struct qlcnic_host_sds_ring *sds_ring)
136{
137 writel(0, sds_ring->crb_intr_mask);
138}
139
140static inline void qlcnic_enable_int(struct qlcnic_host_sds_ring *sds_ring)
141{
142 struct qlcnic_adapter *adapter = sds_ring->adapter;
143
144 writel(0x1, sds_ring->crb_intr_mask);
145
146 if (!QLCNIC_IS_MSI_FAMILY(adapter))
147 writel(0xfbff, adapter->tgt_mask_reg);
148}
149
150static int
151qlcnic_alloc_sds_rings(struct qlcnic_recv_context *recv_ctx, int count)
152{
153 int size = sizeof(struct qlcnic_host_sds_ring) * count;
154
155 recv_ctx->sds_rings = kzalloc(size, GFP_KERNEL);
156
807540ba 157 return recv_ctx->sds_rings == NULL;
af19b491
AKS
158}
159
160static void
161qlcnic_free_sds_rings(struct qlcnic_recv_context *recv_ctx)
162{
163 if (recv_ctx->sds_rings != NULL)
164 kfree(recv_ctx->sds_rings);
165
166 recv_ctx->sds_rings = NULL;
167}
168
169static int
170qlcnic_napi_add(struct qlcnic_adapter *adapter, struct net_device *netdev)
171{
172 int ring;
173 struct qlcnic_host_sds_ring *sds_ring;
b1fc6d3c 174 struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
af19b491
AKS
175
176 if (qlcnic_alloc_sds_rings(recv_ctx, adapter->max_sds_rings))
177 return -ENOMEM;
178
179 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
180 sds_ring = &recv_ctx->sds_rings[ring];
8f891387 181
182 if (ring == adapter->max_sds_rings - 1)
183 netif_napi_add(netdev, &sds_ring->napi, qlcnic_poll,
184 QLCNIC_NETDEV_WEIGHT/adapter->max_sds_rings);
185 else
186 netif_napi_add(netdev, &sds_ring->napi,
187 qlcnic_rx_poll, QLCNIC_NETDEV_WEIGHT*2);
af19b491
AKS
188 }
189
190 return 0;
191}
192
193static void
194qlcnic_napi_del(struct qlcnic_adapter *adapter)
195{
196 int ring;
197 struct qlcnic_host_sds_ring *sds_ring;
b1fc6d3c 198 struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
af19b491
AKS
199
200 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
201 sds_ring = &recv_ctx->sds_rings[ring];
202 netif_napi_del(&sds_ring->napi);
203 }
204
b1fc6d3c 205 qlcnic_free_sds_rings(adapter->recv_ctx);
af19b491
AKS
206}
207
208static void
209qlcnic_napi_enable(struct qlcnic_adapter *adapter)
210{
211 int ring;
212 struct qlcnic_host_sds_ring *sds_ring;
b1fc6d3c 213 struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
af19b491 214
780ab790
AKS
215 if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
216 return;
217
af19b491
AKS
218 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
219 sds_ring = &recv_ctx->sds_rings[ring];
220 napi_enable(&sds_ring->napi);
221 qlcnic_enable_int(sds_ring);
222 }
223}
224
225static void
226qlcnic_napi_disable(struct qlcnic_adapter *adapter)
227{
228 int ring;
229 struct qlcnic_host_sds_ring *sds_ring;
b1fc6d3c 230 struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
af19b491 231
780ab790
AKS
232 if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
233 return;
234
af19b491
AKS
235 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
236 sds_ring = &recv_ctx->sds_rings[ring];
237 qlcnic_disable_int(sds_ring);
238 napi_synchronize(&sds_ring->napi);
239 napi_disable(&sds_ring->napi);
240 }
241}
242
243static void qlcnic_clear_stats(struct qlcnic_adapter *adapter)
244{
245 memset(&adapter->stats, 0, sizeof(adapter->stats));
af19b491
AKS
246}
247
af19b491
AKS
248static void qlcnic_set_msix_bit(struct pci_dev *pdev, int enable)
249{
250 u32 control;
251 int pos;
252
253 pos = pci_find_capability(pdev, PCI_CAP_ID_MSIX);
254 if (pos) {
255 pci_read_config_dword(pdev, pos, &control);
256 if (enable)
257 control |= PCI_MSIX_FLAGS_ENABLE;
258 else
259 control = 0;
260 pci_write_config_dword(pdev, pos, control);
261 }
262}
263
264static void qlcnic_init_msix_entries(struct qlcnic_adapter *adapter, int count)
265{
266 int i;
267
268 for (i = 0; i < count; i++)
269 adapter->msix_entries[i].entry = i;
270}
271
272static int
273qlcnic_read_mac_addr(struct qlcnic_adapter *adapter)
274{
2e9d722d 275 u8 mac_addr[ETH_ALEN];
af19b491
AKS
276 struct net_device *netdev = adapter->netdev;
277 struct pci_dev *pdev = adapter->pdev;
278
da48e6c3 279 if (qlcnic_get_mac_address(adapter, mac_addr) != 0)
af19b491
AKS
280 return -EIO;
281
2e9d722d 282 memcpy(netdev->dev_addr, mac_addr, ETH_ALEN);
af19b491
AKS
283 memcpy(netdev->perm_addr, netdev->dev_addr, netdev->addr_len);
284 memcpy(adapter->mac_addr, netdev->dev_addr, netdev->addr_len);
285
286 /* set station address */
287
288 if (!is_valid_ether_addr(netdev->perm_addr))
289 dev_warn(&pdev->dev, "Bad MAC address %pM.\n",
290 netdev->dev_addr);
291
292 return 0;
293}
294
295static int qlcnic_set_mac(struct net_device *netdev, void *p)
296{
297 struct qlcnic_adapter *adapter = netdev_priv(netdev);
298 struct sockaddr *addr = p;
299
7373373d
RB
300 if ((adapter->flags & QLCNIC_MAC_OVERRIDE_DISABLED))
301 return -EOPNOTSUPP;
302
af19b491
AKS
303 if (!is_valid_ether_addr(addr->sa_data))
304 return -EINVAL;
305
8a15ad1f 306 if (test_bit(__QLCNIC_DEV_UP, &adapter->state)) {
af19b491
AKS
307 netif_device_detach(netdev);
308 qlcnic_napi_disable(adapter);
309 }
310
311 memcpy(adapter->mac_addr, addr->sa_data, netdev->addr_len);
312 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
313 qlcnic_set_multi(adapter->netdev);
314
8a15ad1f 315 if (test_bit(__QLCNIC_DEV_UP, &adapter->state)) {
af19b491
AKS
316 netif_device_attach(netdev);
317 qlcnic_napi_enable(adapter);
318 }
319 return 0;
320}
321
322static const struct net_device_ops qlcnic_netdev_ops = {
323 .ndo_open = qlcnic_open,
324 .ndo_stop = qlcnic_close,
325 .ndo_start_xmit = qlcnic_xmit_frame,
326 .ndo_get_stats = qlcnic_get_stats,
327 .ndo_validate_addr = eth_validate_addr,
328 .ndo_set_multicast_list = qlcnic_set_multi,
329 .ndo_set_mac_address = qlcnic_set_mac,
330 .ndo_change_mtu = qlcnic_change_mtu,
331 .ndo_tx_timeout = qlcnic_tx_timeout,
b9796a14
AC
332 .ndo_vlan_rx_add_vid = qlcnic_vlan_rx_add,
333 .ndo_vlan_rx_kill_vid = qlcnic_vlan_rx_del,
af19b491
AKS
334#ifdef CONFIG_NET_POLL_CONTROLLER
335 .ndo_poll_controller = qlcnic_poll_controller,
336#endif
337};
338
2e9d722d 339static struct qlcnic_nic_template qlcnic_ops = {
2e9d722d
AC
340 .config_bridged_mode = qlcnic_config_bridged_mode,
341 .config_led = qlcnic_config_led,
9f26f547
AC
342 .start_firmware = qlcnic_start_firmware
343};
344
345static struct qlcnic_nic_template qlcnic_vf_ops = {
9f26f547
AC
346 .config_bridged_mode = qlcnicvf_config_bridged_mode,
347 .config_led = qlcnicvf_config_led,
9f26f547 348 .start_firmware = qlcnicvf_start_firmware
2e9d722d
AC
349};
350
af19b491
AKS
351static void
352qlcnic_setup_intr(struct qlcnic_adapter *adapter)
353{
354 const struct qlcnic_legacy_intr_set *legacy_intrp;
355 struct pci_dev *pdev = adapter->pdev;
356 int err, num_msix;
357
b1fc6d3c 358 if (adapter->msix_supported) {
af19b491
AKS
359 num_msix = (num_online_cpus() >= MSIX_ENTRIES_PER_ADAPTER) ?
360 MSIX_ENTRIES_PER_ADAPTER : 2;
361 } else
362 num_msix = 1;
363
364 adapter->max_sds_rings = 1;
365
366 adapter->flags &= ~(QLCNIC_MSI_ENABLED | QLCNIC_MSIX_ENABLED);
367
b1fc6d3c 368 legacy_intrp = &legacy_intr[adapter->ahw->pci_func];
af19b491
AKS
369
370 adapter->int_vec_bit = legacy_intrp->int_vec_bit;
371 adapter->tgt_status_reg = qlcnic_get_ioaddr(adapter,
372 legacy_intrp->tgt_status_reg);
373 adapter->tgt_mask_reg = qlcnic_get_ioaddr(adapter,
374 legacy_intrp->tgt_mask_reg);
375 adapter->isr_int_vec = qlcnic_get_ioaddr(adapter, ISR_INT_VECTOR);
376
377 adapter->crb_int_state_reg = qlcnic_get_ioaddr(adapter,
378 ISR_INT_STATE_REG);
379
380 qlcnic_set_msix_bit(pdev, 0);
381
382 if (adapter->msix_supported) {
383
384 qlcnic_init_msix_entries(adapter, num_msix);
385 err = pci_enable_msix(pdev, adapter->msix_entries, num_msix);
386 if (err == 0) {
387 adapter->flags |= QLCNIC_MSIX_ENABLED;
388 qlcnic_set_msix_bit(pdev, 1);
389
b1fc6d3c 390 adapter->max_sds_rings = num_msix;
af19b491
AKS
391
392 dev_info(&pdev->dev, "using msi-x interrupts\n");
393 return;
394 }
395
396 if (err > 0)
397 pci_disable_msix(pdev);
398
399 /* fall through for msi */
400 }
401
402 if (use_msi && !pci_enable_msi(pdev)) {
403 adapter->flags |= QLCNIC_MSI_ENABLED;
404 adapter->tgt_status_reg = qlcnic_get_ioaddr(adapter,
b1fc6d3c 405 msi_tgt_status[adapter->ahw->pci_func]);
af19b491
AKS
406 dev_info(&pdev->dev, "using msi interrupts\n");
407 adapter->msix_entries[0].vector = pdev->irq;
408 return;
409 }
410
411 dev_info(&pdev->dev, "using legacy interrupts\n");
412 adapter->msix_entries[0].vector = pdev->irq;
413}
414
415static void
416qlcnic_teardown_intr(struct qlcnic_adapter *adapter)
417{
418 if (adapter->flags & QLCNIC_MSIX_ENABLED)
419 pci_disable_msix(adapter->pdev);
420 if (adapter->flags & QLCNIC_MSI_ENABLED)
421 pci_disable_msi(adapter->pdev);
422}
423
424static void
425qlcnic_cleanup_pci_map(struct qlcnic_adapter *adapter)
426{
b1fc6d3c
AC
427 if (adapter->ahw->pci_base0 != NULL)
428 iounmap(adapter->ahw->pci_base0);
af19b491
AKS
429}
430
346fe763
RB
431static int
432qlcnic_init_pci_info(struct qlcnic_adapter *adapter)
433{
e88db3bd 434 struct qlcnic_pci_info *pci_info;
900853a4 435 int i, ret = 0;
346fe763
RB
436 u8 pfn;
437
e88db3bd
DC
438 pci_info = kcalloc(QLCNIC_MAX_PCI_FUNC, sizeof(*pci_info), GFP_KERNEL);
439 if (!pci_info)
440 return -ENOMEM;
441
ca315ac2 442 adapter->npars = kzalloc(sizeof(struct qlcnic_npar_info) *
346fe763 443 QLCNIC_MAX_PCI_FUNC, GFP_KERNEL);
e88db3bd 444 if (!adapter->npars) {
900853a4 445 ret = -ENOMEM;
e88db3bd
DC
446 goto err_pci_info;
447 }
346fe763 448
ca315ac2 449 adapter->eswitch = kzalloc(sizeof(struct qlcnic_eswitch) *
346fe763
RB
450 QLCNIC_NIU_MAX_XG_PORTS, GFP_KERNEL);
451 if (!adapter->eswitch) {
900853a4 452 ret = -ENOMEM;
ca315ac2 453 goto err_npars;
346fe763
RB
454 }
455
456 ret = qlcnic_get_pci_info(adapter, pci_info);
ca315ac2
DC
457 if (ret)
458 goto err_eswitch;
346fe763 459
ca315ac2
DC
460 for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
461 pfn = pci_info[i].id;
f848d6dd
SC
462 if (pfn > QLCNIC_MAX_PCI_FUNC) {
463 ret = QL_STATUS_INVALID_PARAM;
464 goto err_eswitch;
465 }
a1c0c459
SC
466 adapter->npars[pfn].active = (u8)pci_info[i].active;
467 adapter->npars[pfn].type = (u8)pci_info[i].type;
468 adapter->npars[pfn].phy_port = (u8)pci_info[i].default_port;
ca315ac2
DC
469 adapter->npars[pfn].min_bw = pci_info[i].tx_min_bw;
470 adapter->npars[pfn].max_bw = pci_info[i].tx_max_bw;
346fe763
RB
471 }
472
ca315ac2
DC
473 for (i = 0; i < QLCNIC_NIU_MAX_XG_PORTS; i++)
474 adapter->eswitch[i].flags |= QLCNIC_SWITCH_ENABLE;
475
e88db3bd 476 kfree(pci_info);
ca315ac2
DC
477 return 0;
478
479err_eswitch:
346fe763
RB
480 kfree(adapter->eswitch);
481 adapter->eswitch = NULL;
ca315ac2 482err_npars:
346fe763 483 kfree(adapter->npars);
ca315ac2 484 adapter->npars = NULL;
e88db3bd
DC
485err_pci_info:
486 kfree(pci_info);
346fe763
RB
487
488 return ret;
489}
490
2e9d722d
AC
491static int
492qlcnic_set_function_modes(struct qlcnic_adapter *adapter)
493{
494 u8 id;
495 u32 ref_count;
496 int i, ret = 1;
497 u32 data = QLCNIC_MGMT_FUNC;
b1fc6d3c 498 void __iomem *priv_op = adapter->ahw->pci_base0 + QLCNIC_DRV_OP_MODE;
2e9d722d
AC
499
500 /* If other drivers are not in use set their privilege level */
31018e06 501 ref_count = QLCRD32(adapter, QLCNIC_CRB_DRV_ACTIVE);
2e9d722d
AC
502 ret = qlcnic_api_lock(adapter);
503 if (ret)
504 goto err_lock;
2e9d722d 505
0e33c664
AC
506 if (qlcnic_config_npars) {
507 for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
346fe763 508 id = i;
0e33c664 509 if (adapter->npars[i].type != QLCNIC_TYPE_NIC ||
b1fc6d3c 510 id == adapter->ahw->pci_func)
0e33c664
AC
511 continue;
512 data |= (qlcnic_config_npars &
513 QLC_DEV_SET_DRV(0xf, id));
514 }
515 } else {
516 data = readl(priv_op);
b1fc6d3c 517 data = (data & ~QLC_DEV_SET_DRV(0xf, adapter->ahw->pci_func)) |
0e33c664 518 (QLC_DEV_SET_DRV(QLCNIC_MGMT_FUNC,
b1fc6d3c 519 adapter->ahw->pci_func));
2e9d722d
AC
520 }
521 writel(data, priv_op);
2e9d722d
AC
522 qlcnic_api_unlock(adapter);
523err_lock:
524 return ret;
525}
526
0866d96d
AC
527static void
528qlcnic_check_vf(struct qlcnic_adapter *adapter)
2e9d722d
AC
529{
530 void __iomem *msix_base_addr;
531 void __iomem *priv_op;
532 u32 func;
533 u32 msix_base;
534 u32 op_mode, priv_level;
535
536 /* Determine FW API version */
b1fc6d3c
AC
537 adapter->fw_hal_version = readl(adapter->ahw->pci_base0 +
538 QLCNIC_FW_API);
2e9d722d
AC
539
540 /* Find PCI function number */
541 pci_read_config_dword(adapter->pdev, QLCNIC_MSIX_TABLE_OFFSET, &func);
b1fc6d3c 542 msix_base_addr = adapter->ahw->pci_base0 + QLCNIC_MSIX_BASE;
2e9d722d
AC
543 msix_base = readl(msix_base_addr);
544 func = (func - msix_base)/QLCNIC_MSIX_TBL_PGSIZE;
b1fc6d3c 545 adapter->ahw->pci_func = func;
2e9d722d
AC
546
547 /* Determine function privilege level */
b1fc6d3c 548 priv_op = adapter->ahw->pci_base0 + QLCNIC_DRV_OP_MODE;
2e9d722d 549 op_mode = readl(priv_op);
0e33c664 550 if (op_mode == QLC_DEV_DRV_DEFAULT)
2e9d722d 551 priv_level = QLCNIC_MGMT_FUNC;
0e33c664 552 else
b1fc6d3c 553 priv_level = QLC_DEV_GET_DRV(op_mode, adapter->ahw->pci_func);
2e9d722d 554
0866d96d 555 if (priv_level == QLCNIC_NON_PRIV_FUNC) {
9f26f547
AC
556 adapter->op_mode = QLCNIC_NON_PRIV_FUNC;
557 dev_info(&adapter->pdev->dev,
558 "HAL Version: %d Non Privileged function\n",
559 adapter->fw_hal_version);
560 adapter->nic_ops = &qlcnic_vf_ops;
0866d96d
AC
561 } else
562 adapter->nic_ops = &qlcnic_ops;
2e9d722d
AC
563}
564
af19b491
AKS
565static int
566qlcnic_setup_pci_map(struct qlcnic_adapter *adapter)
567{
568 void __iomem *mem_ptr0 = NULL;
569 resource_size_t mem_base;
570 unsigned long mem_len, pci_len0 = 0;
571
572 struct pci_dev *pdev = adapter->pdev;
af19b491 573
af19b491
AKS
574 /* remap phys address */
575 mem_base = pci_resource_start(pdev, 0); /* 0 is for BAR 0 */
576 mem_len = pci_resource_len(pdev, 0);
577
578 if (mem_len == QLCNIC_PCI_2MB_SIZE) {
579
580 mem_ptr0 = pci_ioremap_bar(pdev, 0);
581 if (mem_ptr0 == NULL) {
582 dev_err(&pdev->dev, "failed to map PCI bar 0\n");
583 return -EIO;
584 }
585 pci_len0 = mem_len;
586 } else {
587 return -EIO;
588 }
589
590 dev_info(&pdev->dev, "%dMB memory map\n", (int)(mem_len>>20));
591
b1fc6d3c
AC
592 adapter->ahw->pci_base0 = mem_ptr0;
593 adapter->ahw->pci_len0 = pci_len0;
af19b491 594
0866d96d 595 qlcnic_check_vf(adapter);
2e9d722d 596
b1fc6d3c
AC
597 adapter->ahw->ocm_win_crb = qlcnic_get_ioaddr(adapter,
598 QLCNIC_PCIX_PS_REG(PCIX_OCM_WINDOW_REG(
599 adapter->ahw->pci_func)));
af19b491
AKS
600
601 return 0;
602}
603
604static void get_brd_name(struct qlcnic_adapter *adapter, char *name)
605{
606 struct pci_dev *pdev = adapter->pdev;
607 int i, found = 0;
608
609 for (i = 0; i < NUM_SUPPORTED_BOARDS; ++i) {
610 if (qlcnic_boards[i].vendor == pdev->vendor &&
611 qlcnic_boards[i].device == pdev->device &&
612 qlcnic_boards[i].sub_vendor == pdev->subsystem_vendor &&
613 qlcnic_boards[i].sub_device == pdev->subsystem_device) {
02f6e46f
SC
614 sprintf(name, "%pM: %s" ,
615 adapter->mac_addr,
616 qlcnic_boards[i].short_name);
af19b491
AKS
617 found = 1;
618 break;
619 }
620
621 }
622
623 if (!found)
7f9a0c34 624 sprintf(name, "%pM Gigabit Ethernet", adapter->mac_addr);
af19b491
AKS
625}
626
627static void
628qlcnic_check_options(struct qlcnic_adapter *adapter)
629{
630 u32 fw_major, fw_minor, fw_build;
af19b491 631 struct pci_dev *pdev = adapter->pdev;
af19b491
AKS
632
633 fw_major = QLCRD32(adapter, QLCNIC_FW_VERSION_MAJOR);
634 fw_minor = QLCRD32(adapter, QLCNIC_FW_VERSION_MINOR);
635 fw_build = QLCRD32(adapter, QLCNIC_FW_VERSION_SUB);
636
637 adapter->fw_version = QLCNIC_VERSION_CODE(fw_major, fw_minor, fw_build);
638
251a84c9
AKS
639 dev_info(&pdev->dev, "firmware v%d.%d.%d\n",
640 fw_major, fw_minor, fw_build);
b1fc6d3c 641 if (adapter->ahw->port_type == QLCNIC_XGBE) {
90d19005
SC
642 if (adapter->flags & QLCNIC_ESWITCH_ENABLED) {
643 adapter->num_rxd = DEFAULT_RCV_DESCRIPTORS_VF;
644 adapter->max_rxd = MAX_RCV_DESCRIPTORS_VF;
645 } else {
646 adapter->num_rxd = DEFAULT_RCV_DESCRIPTORS_10G;
647 adapter->max_rxd = MAX_RCV_DESCRIPTORS_10G;
648 }
649
af19b491 650 adapter->num_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_10G;
90d19005
SC
651 adapter->max_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_10G;
652
b1fc6d3c 653 } else if (adapter->ahw->port_type == QLCNIC_GBE) {
af19b491
AKS
654 adapter->num_rxd = DEFAULT_RCV_DESCRIPTORS_1G;
655 adapter->num_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_1G;
90d19005
SC
656 adapter->max_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_1G;
657 adapter->max_rxd = MAX_RCV_DESCRIPTORS_1G;
af19b491
AKS
658 }
659
660 adapter->msix_supported = !!use_msi_x;
af19b491
AKS
661
662 adapter->num_txd = MAX_CMD_DESCRIPTORS;
663
251b036a 664 adapter->max_rds_rings = MAX_RDS_RINGS;
af19b491
AKS
665}
666
174240a8
RB
667static int
668qlcnic_initialize_nic(struct qlcnic_adapter *adapter)
669{
670 int err;
671 struct qlcnic_info nic_info;
672
b1fc6d3c 673 err = qlcnic_get_nic_info(adapter, &nic_info, adapter->ahw->pci_func);
174240a8
RB
674 if (err)
675 return err;
676
a1c0c459 677 adapter->physical_port = (u8)nic_info.phys_port;
174240a8
RB
678 adapter->switch_mode = nic_info.switch_mode;
679 adapter->max_tx_ques = nic_info.max_tx_ques;
680 adapter->max_rx_ques = nic_info.max_rx_ques;
681 adapter->capabilities = nic_info.capabilities;
682 adapter->max_mac_filters = nic_info.max_mac_filters;
683 adapter->max_mtu = nic_info.max_mtu;
684
685 if (adapter->capabilities & BIT_6)
686 adapter->flags |= QLCNIC_ESWITCH_ENABLED;
687 else
688 adapter->flags &= ~QLCNIC_ESWITCH_ENABLED;
689
690 return err;
691}
692
8cf61f89
AKS
693static void
694qlcnic_set_vlan_config(struct qlcnic_adapter *adapter,
695 struct qlcnic_esw_func_cfg *esw_cfg)
696{
697 if (esw_cfg->discard_tagged)
698 adapter->flags &= ~QLCNIC_TAGGING_ENABLED;
699 else
700 adapter->flags |= QLCNIC_TAGGING_ENABLED;
701
702 if (esw_cfg->vlan_id)
703 adapter->pvid = esw_cfg->vlan_id;
704 else
705 adapter->pvid = 0;
706}
707
b9796a14
AC
708static void
709qlcnic_vlan_rx_add(struct net_device *netdev, u16 vid)
710{
711 struct qlcnic_adapter *adapter = netdev_priv(netdev);
712 set_bit(vid, adapter->vlans);
713}
714
715static void
716qlcnic_vlan_rx_del(struct net_device *netdev, u16 vid)
717{
718 struct qlcnic_adapter *adapter = netdev_priv(netdev);
719
720 qlcnic_restore_indev_addr(netdev, NETDEV_DOWN);
721 clear_bit(vid, adapter->vlans);
722}
723
0325d69b
RB
724static void
725qlcnic_set_eswitch_port_features(struct qlcnic_adapter *adapter,
726 struct qlcnic_esw_func_cfg *esw_cfg)
727{
ee07c1a7
RB
728 adapter->flags &= ~(QLCNIC_MACSPOOF | QLCNIC_MAC_OVERRIDE_DISABLED |
729 QLCNIC_PROMISC_DISABLED);
7613c87b
RB
730
731 if (esw_cfg->mac_anti_spoof)
732 adapter->flags |= QLCNIC_MACSPOOF;
fe4d434d 733
7373373d
RB
734 if (!esw_cfg->mac_override)
735 adapter->flags |= QLCNIC_MAC_OVERRIDE_DISABLED;
736
ee07c1a7
RB
737 if (!esw_cfg->promisc_mode)
738 adapter->flags |= QLCNIC_PROMISC_DISABLED;
739
0325d69b
RB
740 qlcnic_set_netdev_features(adapter, esw_cfg);
741}
742
743static int
744qlcnic_set_eswitch_port_config(struct qlcnic_adapter *adapter)
745{
746 struct qlcnic_esw_func_cfg esw_cfg;
747
748 if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED))
749 return 0;
750
b1fc6d3c 751 esw_cfg.pci_func = adapter->ahw->pci_func;
0325d69b
RB
752 if (qlcnic_get_eswitch_port_config(adapter, &esw_cfg))
753 return -EIO;
8cf61f89 754 qlcnic_set_vlan_config(adapter, &esw_cfg);
0325d69b
RB
755 qlcnic_set_eswitch_port_features(adapter, &esw_cfg);
756
757 return 0;
758}
759
760static void
761qlcnic_set_netdev_features(struct qlcnic_adapter *adapter,
762 struct qlcnic_esw_func_cfg *esw_cfg)
763{
764 struct net_device *netdev = adapter->netdev;
765 unsigned long features, vlan_features;
766
767 features = (NETIF_F_SG | NETIF_F_IP_CSUM |
768 NETIF_F_IPV6_CSUM | NETIF_F_GRO);
769 vlan_features = (NETIF_F_SG | NETIF_F_IP_CSUM |
b9796a14 770 NETIF_F_IPV6_CSUM | NETIF_F_HW_VLAN_FILTER);
0325d69b
RB
771
772 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_TSO) {
773 features |= (NETIF_F_TSO | NETIF_F_TSO6);
774 vlan_features |= (NETIF_F_TSO | NETIF_F_TSO6);
775 }
b56421d0
RB
776
777 if (netdev->features & NETIF_F_LRO)
0325d69b
RB
778 features |= NETIF_F_LRO;
779
780 if (esw_cfg->offload_flags & BIT_0) {
781 netdev->features |= features;
782 adapter->rx_csum = 1;
783 if (!(esw_cfg->offload_flags & BIT_1))
784 netdev->features &= ~NETIF_F_TSO;
785 if (!(esw_cfg->offload_flags & BIT_2))
786 netdev->features &= ~NETIF_F_TSO6;
787 } else {
788 netdev->features &= ~features;
789 adapter->rx_csum = 0;
790 }
791
792 netdev->vlan_features = (features & vlan_features);
793}
794
0866d96d
AC
795static int
796qlcnic_check_eswitch_mode(struct qlcnic_adapter *adapter)
797{
798 void __iomem *priv_op;
799 u32 op_mode, priv_level;
800 int err = 0;
801
174240a8
RB
802 err = qlcnic_initialize_nic(adapter);
803 if (err)
804 return err;
805
0866d96d
AC
806 if (adapter->flags & QLCNIC_ADAPTER_INITIALIZED)
807 return 0;
808
b1fc6d3c 809 priv_op = adapter->ahw->pci_base0 + QLCNIC_DRV_OP_MODE;
0866d96d 810 op_mode = readl(priv_op);
b1fc6d3c 811 priv_level = QLC_DEV_GET_DRV(op_mode, adapter->ahw->pci_func);
0866d96d
AC
812
813 if (op_mode == QLC_DEV_DRV_DEFAULT)
814 priv_level = QLCNIC_MGMT_FUNC;
815 else
b1fc6d3c 816 priv_level = QLC_DEV_GET_DRV(op_mode, adapter->ahw->pci_func);
0866d96d 817
174240a8 818 if (adapter->flags & QLCNIC_ESWITCH_ENABLED) {
0866d96d
AC
819 if (priv_level == QLCNIC_MGMT_FUNC) {
820 adapter->op_mode = QLCNIC_MGMT_FUNC;
821 err = qlcnic_init_pci_info(adapter);
822 if (err)
823 return err;
824 /* Set privilege level for other functions */
825 qlcnic_set_function_modes(adapter);
826 dev_info(&adapter->pdev->dev,
827 "HAL Version: %d, Management function\n",
828 adapter->fw_hal_version);
829 } else if (priv_level == QLCNIC_PRIV_FUNC) {
830 adapter->op_mode = QLCNIC_PRIV_FUNC;
831 dev_info(&adapter->pdev->dev,
832 "HAL Version: %d, Privileged function\n",
833 adapter->fw_hal_version);
834 }
174240a8 835 }
0866d96d
AC
836
837 adapter->flags |= QLCNIC_ADAPTER_INITIALIZED;
838
839 return err;
840}
841
0325d69b
RB
842static int
843qlcnic_set_default_offload_settings(struct qlcnic_adapter *adapter)
844{
845 struct qlcnic_esw_func_cfg esw_cfg;
846 struct qlcnic_npar_info *npar;
847 u8 i;
848
174240a8 849 if (adapter->need_fw_reset)
0325d69b
RB
850 return 0;
851
852 for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
853 if (adapter->npars[i].type != QLCNIC_TYPE_NIC)
854 continue;
855 memset(&esw_cfg, 0, sizeof(struct qlcnic_esw_func_cfg));
856 esw_cfg.pci_func = i;
857 esw_cfg.offload_flags = BIT_0;
7373373d 858 esw_cfg.mac_override = BIT_0;
ee07c1a7 859 esw_cfg.promisc_mode = BIT_0;
0325d69b
RB
860 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_TSO)
861 esw_cfg.offload_flags |= (BIT_1 | BIT_2);
862 if (qlcnic_config_switch_port(adapter, &esw_cfg))
863 return -EIO;
864 npar = &adapter->npars[i];
865 npar->pvid = esw_cfg.vlan_id;
7373373d 866 npar->mac_override = esw_cfg.mac_override;
0325d69b
RB
867 npar->mac_anti_spoof = esw_cfg.mac_anti_spoof;
868 npar->discard_tagged = esw_cfg.discard_tagged;
869 npar->promisc_mode = esw_cfg.promisc_mode;
870 npar->offload_flags = esw_cfg.offload_flags;
871 }
872
873 return 0;
874}
875
4e8acb01
RB
876static int
877qlcnic_reset_eswitch_config(struct qlcnic_adapter *adapter,
878 struct qlcnic_npar_info *npar, int pci_func)
879{
880 struct qlcnic_esw_func_cfg esw_cfg;
881 esw_cfg.op_mode = QLCNIC_PORT_DEFAULTS;
882 esw_cfg.pci_func = pci_func;
883 esw_cfg.vlan_id = npar->pvid;
7373373d 884 esw_cfg.mac_override = npar->mac_override;
4e8acb01
RB
885 esw_cfg.discard_tagged = npar->discard_tagged;
886 esw_cfg.mac_anti_spoof = npar->mac_anti_spoof;
887 esw_cfg.offload_flags = npar->offload_flags;
888 esw_cfg.promisc_mode = npar->promisc_mode;
889 if (qlcnic_config_switch_port(adapter, &esw_cfg))
890 return -EIO;
891
892 esw_cfg.op_mode = QLCNIC_ADD_VLAN;
893 if (qlcnic_config_switch_port(adapter, &esw_cfg))
894 return -EIO;
895
896 return 0;
897}
898
cea8975e
AC
899static int
900qlcnic_reset_npar_config(struct qlcnic_adapter *adapter)
901{
4e8acb01 902 int i, err;
cea8975e
AC
903 struct qlcnic_npar_info *npar;
904 struct qlcnic_info nic_info;
905
174240a8 906 if (!adapter->need_fw_reset)
cea8975e
AC
907 return 0;
908
4e8acb01
RB
909 /* Set the NPAR config data after FW reset */
910 for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
911 npar = &adapter->npars[i];
912 if (npar->type != QLCNIC_TYPE_NIC)
913 continue;
914 err = qlcnic_get_nic_info(adapter, &nic_info, i);
915 if (err)
916 return err;
917 nic_info.min_tx_bw = npar->min_bw;
918 nic_info.max_tx_bw = npar->max_bw;
919 err = qlcnic_set_nic_info(adapter, &nic_info);
920 if (err)
921 return err;
cea8975e 922
4e8acb01
RB
923 if (npar->enable_pm) {
924 err = qlcnic_config_port_mirroring(adapter,
925 npar->dest_npar, 1, i);
926 if (err)
927 return err;
cea8975e 928 }
4e8acb01
RB
929 err = qlcnic_reset_eswitch_config(adapter, npar, i);
930 if (err)
931 return err;
cea8975e 932 }
4e8acb01 933 return 0;
cea8975e
AC
934}
935
78f84e1a
AKS
936static int qlcnic_check_npar_opertional(struct qlcnic_adapter *adapter)
937{
938 u8 npar_opt_timeo = QLCNIC_DEV_NPAR_OPER_TIMEO;
939 u32 npar_state;
940
941 if (adapter->op_mode == QLCNIC_MGMT_FUNC)
942 return 0;
943
944 npar_state = QLCRD32(adapter, QLCNIC_CRB_DEV_NPAR_STATE);
945 while (npar_state != QLCNIC_DEV_NPAR_OPER && --npar_opt_timeo) {
946 msleep(1000);
947 npar_state = QLCRD32(adapter, QLCNIC_CRB_DEV_NPAR_STATE);
948 }
949 if (!npar_opt_timeo) {
950 dev_err(&adapter->pdev->dev,
951 "Waiting for NPAR state to opertional timeout\n");
952 return -EIO;
953 }
954 return 0;
955}
956
174240a8
RB
957static int
958qlcnic_set_mgmt_operations(struct qlcnic_adapter *adapter)
959{
960 int err;
961
962 if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED) ||
963 adapter->op_mode != QLCNIC_MGMT_FUNC)
964 return 0;
965
966 err = qlcnic_set_default_offload_settings(adapter);
967 if (err)
968 return err;
969
970 err = qlcnic_reset_npar_config(adapter);
971 if (err)
972 return err;
973
974 qlcnic_dev_set_npar_ready(adapter);
975
976 return err;
977}
978
af19b491
AKS
979static int
980qlcnic_start_firmware(struct qlcnic_adapter *adapter)
981{
d4066833 982 int err;
af19b491 983
aa5e18c0
SC
984 err = qlcnic_can_start_firmware(adapter);
985 if (err < 0)
986 return err;
987 else if (!err)
d4066833 988 goto check_fw_status;
af19b491 989
4d5bdb38
AKS
990 if (load_fw_file)
991 qlcnic_request_firmware(adapter);
8f891387 992 else {
8cfdce08
SC
993 err = qlcnic_check_flash_fw_ver(adapter);
994 if (err)
8f891387 995 goto err_out;
996
4d5bdb38 997 adapter->fw_type = QLCNIC_FLASH_ROMIMAGE;
8f891387 998 }
af19b491
AKS
999
1000 err = qlcnic_need_fw_reset(adapter);
af19b491 1001 if (err == 0)
4e70812b 1002 goto check_fw_status;
af19b491 1003
d4066833
SC
1004 err = qlcnic_pinit_from_rom(adapter);
1005 if (err)
1006 goto err_out;
af19b491
AKS
1007
1008 err = qlcnic_load_firmware(adapter);
1009 if (err)
1010 goto err_out;
1011
1012 qlcnic_release_firmware(adapter);
d4066833 1013 QLCWR32(adapter, CRB_DRIVER_VERSION, QLCNIC_DRIVER_VERSION);
af19b491 1014
d4066833
SC
1015check_fw_status:
1016 err = qlcnic_check_fw_status(adapter);
af19b491
AKS
1017 if (err)
1018 goto err_out;
1019
1020 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_READY);
6df900e9 1021 qlcnic_idc_debug_info(adapter, 1);
b18971d1 1022
0866d96d
AC
1023 err = qlcnic_check_eswitch_mode(adapter);
1024 if (err) {
1025 dev_err(&adapter->pdev->dev,
1026 "Memory allocation failed for eswitch\n");
1027 goto err_out;
1028 }
174240a8
RB
1029 err = qlcnic_set_mgmt_operations(adapter);
1030 if (err)
1031 goto err_out;
1032
1033 qlcnic_check_options(adapter);
af19b491
AKS
1034 adapter->need_fw_reset = 0;
1035
a7fc948f
AKS
1036 qlcnic_release_firmware(adapter);
1037 return 0;
af19b491
AKS
1038
1039err_out:
a7fc948f
AKS
1040 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_FAILED);
1041 dev_err(&adapter->pdev->dev, "Device state set to failed\n");
0866d96d 1042
af19b491
AKS
1043 qlcnic_release_firmware(adapter);
1044 return err;
1045}
1046
1047static int
1048qlcnic_request_irq(struct qlcnic_adapter *adapter)
1049{
1050 irq_handler_t handler;
1051 struct qlcnic_host_sds_ring *sds_ring;
1052 int err, ring;
1053
1054 unsigned long flags = 0;
1055 struct net_device *netdev = adapter->netdev;
b1fc6d3c 1056 struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
af19b491 1057
7eb9855d
AKS
1058 if (adapter->diag_test == QLCNIC_INTERRUPT_TEST) {
1059 handler = qlcnic_tmp_intr;
1060 if (!QLCNIC_IS_MSI_FAMILY(adapter))
1061 flags |= IRQF_SHARED;
1062
1063 } else {
1064 if (adapter->flags & QLCNIC_MSIX_ENABLED)
1065 handler = qlcnic_msix_intr;
1066 else if (adapter->flags & QLCNIC_MSI_ENABLED)
1067 handler = qlcnic_msi_intr;
1068 else {
1069 flags |= IRQF_SHARED;
1070 handler = qlcnic_intr;
1071 }
af19b491
AKS
1072 }
1073 adapter->irq = netdev->irq;
1074
1075 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
1076 sds_ring = &recv_ctx->sds_rings[ring];
1077 sprintf(sds_ring->name, "%s[%d]", netdev->name, ring);
1078 err = request_irq(sds_ring->irq, handler,
1079 flags, sds_ring->name, sds_ring);
1080 if (err)
1081 return err;
1082 }
1083
1084 return 0;
1085}
1086
1087static void
1088qlcnic_free_irq(struct qlcnic_adapter *adapter)
1089{
1090 int ring;
1091 struct qlcnic_host_sds_ring *sds_ring;
1092
b1fc6d3c 1093 struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
af19b491
AKS
1094
1095 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
1096 sds_ring = &recv_ctx->sds_rings[ring];
1097 free_irq(sds_ring->irq, sds_ring);
1098 }
1099}
1100
af19b491
AKS
1101static int
1102__qlcnic_up(struct qlcnic_adapter *adapter, struct net_device *netdev)
1103{
8a15ad1f
AKS
1104 int ring;
1105 struct qlcnic_host_rds_ring *rds_ring;
1106
af19b491
AKS
1107 if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
1108 return -EIO;
1109
8a15ad1f
AKS
1110 if (test_bit(__QLCNIC_DEV_UP, &adapter->state))
1111 return 0;
0325d69b
RB
1112 if (qlcnic_set_eswitch_port_config(adapter))
1113 return -EIO;
8a15ad1f
AKS
1114
1115 if (qlcnic_fw_create_ctx(adapter))
1116 return -EIO;
1117
1118 for (ring = 0; ring < adapter->max_rds_rings; ring++) {
b1fc6d3c
AC
1119 rds_ring = &adapter->recv_ctx->rds_rings[ring];
1120 qlcnic_post_rx_buffers(adapter, rds_ring);
8a15ad1f
AKS
1121 }
1122
af19b491
AKS
1123 qlcnic_set_multi(netdev);
1124 qlcnic_fw_cmd_set_mtu(adapter, netdev->mtu);
1125
b1fc6d3c 1126 adapter->ahw->linkup = 0;
af19b491
AKS
1127
1128 if (adapter->max_sds_rings > 1)
1129 qlcnic_config_rss(adapter, 1);
1130
1131 qlcnic_config_intr_coalesce(adapter);
1132
24763d80 1133 if (netdev->features & NETIF_F_LRO)
af19b491
AKS
1134 qlcnic_config_hw_lro(adapter, QLCNIC_LRO_ENABLED);
1135
1136 qlcnic_napi_enable(adapter);
1137
1138 qlcnic_linkevent_request(adapter, 1);
1139
68bf1c68 1140 adapter->reset_context = 0;
af19b491
AKS
1141 set_bit(__QLCNIC_DEV_UP, &adapter->state);
1142 return 0;
1143}
1144
1145/* Usage: During resume and firmware recovery module.*/
1146
1147static int
1148qlcnic_up(struct qlcnic_adapter *adapter, struct net_device *netdev)
1149{
1150 int err = 0;
1151
1152 rtnl_lock();
1153 if (netif_running(netdev))
1154 err = __qlcnic_up(adapter, netdev);
1155 rtnl_unlock();
1156
1157 return err;
1158}
1159
1160static void
1161__qlcnic_down(struct qlcnic_adapter *adapter, struct net_device *netdev)
1162{
1163 if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
1164 return;
1165
1166 if (!test_and_clear_bit(__QLCNIC_DEV_UP, &adapter->state))
1167 return;
1168
1169 smp_mb();
1170 spin_lock(&adapter->tx_clean_lock);
1171 netif_carrier_off(netdev);
1172 netif_tx_disable(netdev);
1173
1174 qlcnic_free_mac_list(adapter);
1175
b5e5492c
AKS
1176 if (adapter->fhash.fnum)
1177 qlcnic_delete_lb_filters(adapter);
1178
af19b491
AKS
1179 qlcnic_nic_set_promisc(adapter, QLCNIC_NIU_NON_PROMISC_MODE);
1180
1181 qlcnic_napi_disable(adapter);
1182
8a15ad1f
AKS
1183 qlcnic_fw_destroy_ctx(adapter);
1184
1185 qlcnic_reset_rx_buffers_list(adapter);
af19b491
AKS
1186 qlcnic_release_tx_buffers(adapter);
1187 spin_unlock(&adapter->tx_clean_lock);
1188}
1189
1190/* Usage: During suspend and firmware recovery module */
1191
1192static void
1193qlcnic_down(struct qlcnic_adapter *adapter, struct net_device *netdev)
1194{
1195 rtnl_lock();
1196 if (netif_running(netdev))
1197 __qlcnic_down(adapter, netdev);
1198 rtnl_unlock();
1199
1200}
1201
1202static int
1203qlcnic_attach(struct qlcnic_adapter *adapter)
1204{
1205 struct net_device *netdev = adapter->netdev;
1206 struct pci_dev *pdev = adapter->pdev;
8a15ad1f 1207 int err;
af19b491
AKS
1208
1209 if (adapter->is_up == QLCNIC_ADAPTER_UP_MAGIC)
1210 return 0;
1211
af19b491
AKS
1212 err = qlcnic_napi_add(adapter, netdev);
1213 if (err)
1214 return err;
1215
1216 err = qlcnic_alloc_sw_resources(adapter);
1217 if (err) {
1218 dev_err(&pdev->dev, "Error in setting sw resources\n");
8a15ad1f 1219 goto err_out_napi_del;
af19b491
AKS
1220 }
1221
1222 err = qlcnic_alloc_hw_resources(adapter);
1223 if (err) {
1224 dev_err(&pdev->dev, "Error in setting hw resources\n");
1225 goto err_out_free_sw;
1226 }
1227
af19b491
AKS
1228 err = qlcnic_request_irq(adapter);
1229 if (err) {
1230 dev_err(&pdev->dev, "failed to setup interrupt\n");
8a15ad1f 1231 goto err_out_free_hw;
af19b491
AKS
1232 }
1233
af19b491
AKS
1234 qlcnic_create_sysfs_entries(adapter);
1235
1236 adapter->is_up = QLCNIC_ADAPTER_UP_MAGIC;
1237 return 0;
1238
8a15ad1f 1239err_out_free_hw:
af19b491
AKS
1240 qlcnic_free_hw_resources(adapter);
1241err_out_free_sw:
1242 qlcnic_free_sw_resources(adapter);
8a15ad1f
AKS
1243err_out_napi_del:
1244 qlcnic_napi_del(adapter);
af19b491
AKS
1245 return err;
1246}
1247
1248static void
1249qlcnic_detach(struct qlcnic_adapter *adapter)
1250{
1251 if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
1252 return;
1253
1254 qlcnic_remove_sysfs_entries(adapter);
1255
1256 qlcnic_free_hw_resources(adapter);
1257 qlcnic_release_rx_buffers(adapter);
1258 qlcnic_free_irq(adapter);
1259 qlcnic_napi_del(adapter);
1260 qlcnic_free_sw_resources(adapter);
1261
1262 adapter->is_up = 0;
1263}
1264
7eb9855d
AKS
1265void qlcnic_diag_free_res(struct net_device *netdev, int max_sds_rings)
1266{
1267 struct qlcnic_adapter *adapter = netdev_priv(netdev);
1268 struct qlcnic_host_sds_ring *sds_ring;
1269 int ring;
1270
78ad3892 1271 clear_bit(__QLCNIC_DEV_UP, &adapter->state);
cdaff185
AKS
1272 if (adapter->diag_test == QLCNIC_INTERRUPT_TEST) {
1273 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
b1fc6d3c 1274 sds_ring = &adapter->recv_ctx->sds_rings[ring];
cdaff185
AKS
1275 qlcnic_disable_int(sds_ring);
1276 }
7eb9855d
AKS
1277 }
1278
8a15ad1f
AKS
1279 qlcnic_fw_destroy_ctx(adapter);
1280
7eb9855d
AKS
1281 qlcnic_detach(adapter);
1282
1283 adapter->diag_test = 0;
1284 adapter->max_sds_rings = max_sds_rings;
1285
1286 if (qlcnic_attach(adapter))
34ce3626 1287 goto out;
7eb9855d
AKS
1288
1289 if (netif_running(netdev))
1290 __qlcnic_up(adapter, netdev);
34ce3626 1291out:
7eb9855d
AKS
1292 netif_device_attach(netdev);
1293}
1294
b1fc6d3c
AC
1295static int qlcnic_alloc_adapter_resources(struct qlcnic_adapter *adapter)
1296{
1297 int err = 0;
1298 adapter->ahw = kzalloc(sizeof(struct qlcnic_hardware_context),
1299 GFP_KERNEL);
1300 if (!adapter->ahw) {
1301 dev_err(&adapter->pdev->dev,
1302 "Failed to allocate recv ctx resources for adapter\n");
1303 err = -ENOMEM;
1304 goto err_out;
1305 }
1306 adapter->recv_ctx = kzalloc(sizeof(struct qlcnic_recv_context),
1307 GFP_KERNEL);
1308 if (!adapter->recv_ctx) {
1309 dev_err(&adapter->pdev->dev,
1310 "Failed to allocate recv ctx resources for adapter\n");
1311 kfree(adapter->ahw);
1312 adapter->ahw = NULL;
1313 err = -ENOMEM;
8816d009 1314 goto err_out;
b1fc6d3c 1315 }
8816d009
AC
1316 /* Initialize interrupt coalesce parameters */
1317 adapter->ahw->coal.flag = QLCNIC_INTR_DEFAULT;
1318 adapter->ahw->coal.rx_time_us = QLCNIC_DEFAULT_INTR_COALESCE_RX_TIME_US;
1319 adapter->ahw->coal.rx_packets = QLCNIC_DEFAULT_INTR_COALESCE_RX_PACKETS;
b1fc6d3c
AC
1320err_out:
1321 return err;
1322}
1323
1324static void qlcnic_free_adapter_resources(struct qlcnic_adapter *adapter)
1325{
1326 kfree(adapter->recv_ctx);
1327 adapter->recv_ctx = NULL;
1328
1329 kfree(adapter->ahw);
1330 adapter->ahw = NULL;
1331}
1332
7eb9855d
AKS
1333int qlcnic_diag_alloc_res(struct net_device *netdev, int test)
1334{
1335 struct qlcnic_adapter *adapter = netdev_priv(netdev);
1336 struct qlcnic_host_sds_ring *sds_ring;
8a15ad1f 1337 struct qlcnic_host_rds_ring *rds_ring;
7eb9855d
AKS
1338 int ring;
1339 int ret;
1340
1341 netif_device_detach(netdev);
1342
1343 if (netif_running(netdev))
1344 __qlcnic_down(adapter, netdev);
1345
1346 qlcnic_detach(adapter);
1347
1348 adapter->max_sds_rings = 1;
1349 adapter->diag_test = test;
1350
1351 ret = qlcnic_attach(adapter);
34ce3626
AKS
1352 if (ret) {
1353 netif_device_attach(netdev);
7eb9855d 1354 return ret;
34ce3626 1355 }
7eb9855d 1356
8a15ad1f
AKS
1357 ret = qlcnic_fw_create_ctx(adapter);
1358 if (ret) {
1359 qlcnic_detach(adapter);
57e46248 1360 netif_device_attach(netdev);
8a15ad1f
AKS
1361 return ret;
1362 }
1363
1364 for (ring = 0; ring < adapter->max_rds_rings; ring++) {
b1fc6d3c
AC
1365 rds_ring = &adapter->recv_ctx->rds_rings[ring];
1366 qlcnic_post_rx_buffers(adapter, rds_ring);
8a15ad1f
AKS
1367 }
1368
cdaff185
AKS
1369 if (adapter->diag_test == QLCNIC_INTERRUPT_TEST) {
1370 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
b1fc6d3c 1371 sds_ring = &adapter->recv_ctx->sds_rings[ring];
cdaff185
AKS
1372 qlcnic_enable_int(sds_ring);
1373 }
7eb9855d 1374 }
78ad3892 1375 set_bit(__QLCNIC_DEV_UP, &adapter->state);
7eb9855d
AKS
1376
1377 return 0;
1378}
1379
68bf1c68
AKS
1380/* Reset context in hardware only */
1381static int
1382qlcnic_reset_hw_context(struct qlcnic_adapter *adapter)
1383{
1384 struct net_device *netdev = adapter->netdev;
1385
1386 if (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
1387 return -EBUSY;
1388
1389 netif_device_detach(netdev);
1390
1391 qlcnic_down(adapter, netdev);
1392
1393 qlcnic_up(adapter, netdev);
1394
1395 netif_device_attach(netdev);
1396
1397 clear_bit(__QLCNIC_RESETTING, &adapter->state);
1398 return 0;
1399}
1400
af19b491
AKS
1401int
1402qlcnic_reset_context(struct qlcnic_adapter *adapter)
1403{
1404 int err = 0;
1405 struct net_device *netdev = adapter->netdev;
1406
1407 if (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
1408 return -EBUSY;
1409
1410 if (adapter->is_up == QLCNIC_ADAPTER_UP_MAGIC) {
1411
1412 netif_device_detach(netdev);
1413
1414 if (netif_running(netdev))
1415 __qlcnic_down(adapter, netdev);
1416
1417 qlcnic_detach(adapter);
1418
1419 if (netif_running(netdev)) {
1420 err = qlcnic_attach(adapter);
1421 if (!err)
34ce3626 1422 __qlcnic_up(adapter, netdev);
af19b491
AKS
1423 }
1424
1425 netif_device_attach(netdev);
1426 }
1427
af19b491
AKS
1428 clear_bit(__QLCNIC_RESETTING, &adapter->state);
1429 return err;
1430}
1431
1432static int
1433qlcnic_setup_netdev(struct qlcnic_adapter *adapter,
1bb09fb9 1434 struct net_device *netdev, u8 pci_using_dac)
af19b491
AKS
1435{
1436 int err;
1437 struct pci_dev *pdev = adapter->pdev;
1438
1439 adapter->rx_csum = 1;
1440 adapter->mc_enabled = 0;
1441 adapter->max_mc_count = 38;
1442
1443 netdev->netdev_ops = &qlcnic_netdev_ops;
ef71ff83 1444 netdev->watchdog_timeo = 5*HZ;
af19b491
AKS
1445
1446 qlcnic_change_mtu(netdev, netdev->mtu);
1447
1448 SET_ETHTOOL_OPS(netdev, &qlcnic_ethtool_ops);
1449
2e9d722d 1450 netdev->features |= (NETIF_F_SG | NETIF_F_IP_CSUM |
d5790663 1451 NETIF_F_IPV6_CSUM | NETIF_F_GRO | NETIF_F_HW_VLAN_RX);
2e9d722d 1452 netdev->vlan_features |= (NETIF_F_SG | NETIF_F_IP_CSUM |
b9796a14 1453 NETIF_F_IPV6_CSUM | NETIF_F_HW_VLAN_FILTER);
ac8d0c4f
AC
1454
1455 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_TSO) {
1456 netdev->features |= (NETIF_F_TSO | NETIF_F_TSO6);
1457 netdev->vlan_features |= (NETIF_F_TSO | NETIF_F_TSO6);
1458 }
af19b491 1459
1bb09fb9 1460 if (pci_using_dac) {
af19b491
AKS
1461 netdev->features |= NETIF_F_HIGHDMA;
1462 netdev->vlan_features |= NETIF_F_HIGHDMA;
1463 }
1464
1465 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_FVLANTX)
1466 netdev->features |= (NETIF_F_HW_VLAN_TX);
1467
1468 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_HW_LRO)
1469 netdev->features |= NETIF_F_LRO;
af19b491
AKS
1470 netdev->irq = adapter->msix_entries[0].vector;
1471
af19b491 1472 netif_carrier_off(netdev);
af19b491
AKS
1473
1474 err = register_netdev(netdev);
1475 if (err) {
1476 dev_err(&pdev->dev, "failed to register net device\n");
1477 return err;
1478 }
1479
1480 return 0;
1481}
1482
1bb09fb9
AKS
1483static int qlcnic_set_dma_mask(struct pci_dev *pdev, u8 *pci_using_dac)
1484{
1485 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) &&
1486 !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)))
1487 *pci_using_dac = 1;
1488 else if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) &&
1489 !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)))
1490 *pci_using_dac = 0;
1491 else {
1492 dev_err(&pdev->dev, "Unable to set DMA mask, aborting\n");
1493 return -EIO;
1494 }
1495
1496 return 0;
1497}
1498
af19b491
AKS
1499static int __devinit
1500qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1501{
1502 struct net_device *netdev = NULL;
1503 struct qlcnic_adapter *adapter = NULL;
1504 int err;
af19b491 1505 uint8_t revision_id;
1bb09fb9 1506 uint8_t pci_using_dac;
da48e6c3 1507 char brd_name[QLCNIC_MAX_BOARD_NAME_LEN];
af19b491
AKS
1508
1509 err = pci_enable_device(pdev);
1510 if (err)
1511 return err;
1512
1513 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
1514 err = -ENODEV;
1515 goto err_out_disable_pdev;
1516 }
1517
1bb09fb9
AKS
1518 err = qlcnic_set_dma_mask(pdev, &pci_using_dac);
1519 if (err)
1520 goto err_out_disable_pdev;
1521
af19b491
AKS
1522 err = pci_request_regions(pdev, qlcnic_driver_name);
1523 if (err)
1524 goto err_out_disable_pdev;
1525
1526 pci_set_master(pdev);
451724c8 1527 pci_enable_pcie_error_reporting(pdev);
af19b491
AKS
1528
1529 netdev = alloc_etherdev(sizeof(struct qlcnic_adapter));
1530 if (!netdev) {
1531 dev_err(&pdev->dev, "failed to allocate net_device\n");
1532 err = -ENOMEM;
1533 goto err_out_free_res;
1534 }
1535
1536 SET_NETDEV_DEV(netdev, &pdev->dev);
1537
1538 adapter = netdev_priv(netdev);
1539 adapter->netdev = netdev;
1540 adapter->pdev = pdev;
af19b491 1541
b1fc6d3c
AC
1542 if (qlcnic_alloc_adapter_resources(adapter))
1543 goto err_out_free_netdev;
1544
1545 adapter->dev_rst_time = jiffies;
af19b491 1546 revision_id = pdev->revision;
b1fc6d3c 1547 adapter->ahw->revision_id = revision_id;
af19b491 1548
b1fc6d3c
AC
1549 rwlock_init(&adapter->ahw->crb_lock);
1550 mutex_init(&adapter->ahw->mem_lock);
af19b491
AKS
1551
1552 spin_lock_init(&adapter->tx_clean_lock);
1553 INIT_LIST_HEAD(&adapter->mac_list);
1554
1555 err = qlcnic_setup_pci_map(adapter);
1556 if (err)
b1fc6d3c 1557 goto err_out_free_hw;
af19b491
AKS
1558
1559 /* This will be reset for mezz cards */
b1fc6d3c 1560 adapter->portnum = adapter->ahw->pci_func;
af19b491
AKS
1561
1562 err = qlcnic_get_board_info(adapter);
1563 if (err) {
1564 dev_err(&pdev->dev, "Error getting board config info.\n");
1565 goto err_out_iounmap;
1566 }
1567
8cfdce08
SC
1568 err = qlcnic_setup_idc_param(adapter);
1569 if (err)
b3a24649 1570 goto err_out_iounmap;
af19b491 1571
1dc0f3c5 1572 adapter->flags |= QLCNIC_NEED_FLR;
b0044bcf 1573
9f26f547 1574 err = adapter->nic_ops->start_firmware(adapter);
a7fc948f
AKS
1575 if (err) {
1576 dev_err(&pdev->dev, "Loading fw failed.Please Reboot\n");
af19b491 1577 goto err_out_decr_ref;
a7fc948f 1578 }
af19b491 1579
da48e6c3
RB
1580 if (qlcnic_read_mac_addr(adapter))
1581 dev_warn(&pdev->dev, "failed to read mac addr\n");
1582
1583 if (adapter->portnum == 0) {
1584 get_brd_name(adapter, brd_name);
1585
1586 pr_info("%s: %s Board Chip rev 0x%x\n",
1587 module_name(THIS_MODULE),
b1fc6d3c 1588 brd_name, adapter->ahw->revision_id);
da48e6c3
RB
1589 }
1590
af19b491
AKS
1591 qlcnic_clear_stats(adapter);
1592
1593 qlcnic_setup_intr(adapter);
1594
1bb09fb9 1595 err = qlcnic_setup_netdev(adapter, netdev, pci_using_dac);
af19b491
AKS
1596 if (err)
1597 goto err_out_disable_msi;
1598
1599 pci_set_drvdata(pdev, adapter);
1600
1601 qlcnic_schedule_work(adapter, qlcnic_fw_poll_work, FW_POLL_DELAY);
1602
b1fc6d3c 1603 switch (adapter->ahw->port_type) {
af19b491
AKS
1604 case QLCNIC_GBE:
1605 dev_info(&adapter->pdev->dev, "%s: GbE port initialized\n",
1606 adapter->netdev->name);
1607 break;
1608 case QLCNIC_XGBE:
1609 dev_info(&adapter->pdev->dev, "%s: XGbE port initialized\n",
1610 adapter->netdev->name);
1611 break;
1612 }
1613
b5e5492c 1614 qlcnic_alloc_lb_filters_mem(adapter);
af19b491
AKS
1615 qlcnic_create_diag_entries(adapter);
1616
1617 return 0;
1618
1619err_out_disable_msi:
1620 qlcnic_teardown_intr(adapter);
1621
1622err_out_decr_ref:
21854f02 1623 qlcnic_clr_all_drv_state(adapter, 0);
af19b491
AKS
1624
1625err_out_iounmap:
1626 qlcnic_cleanup_pci_map(adapter);
1627
b1fc6d3c
AC
1628err_out_free_hw:
1629 qlcnic_free_adapter_resources(adapter);
1630
af19b491
AKS
1631err_out_free_netdev:
1632 free_netdev(netdev);
1633
1634err_out_free_res:
1635 pci_release_regions(pdev);
1636
1637err_out_disable_pdev:
1638 pci_set_drvdata(pdev, NULL);
1639 pci_disable_device(pdev);
1640 return err;
1641}
1642
1643static void __devexit qlcnic_remove(struct pci_dev *pdev)
1644{
1645 struct qlcnic_adapter *adapter;
1646 struct net_device *netdev;
1647
1648 adapter = pci_get_drvdata(pdev);
1649 if (adapter == NULL)
1650 return;
1651
1652 netdev = adapter->netdev;
1653
1654 qlcnic_cancel_fw_work(adapter);
1655
1656 unregister_netdev(netdev);
1657
af19b491
AKS
1658 qlcnic_detach(adapter);
1659
2e9d722d
AC
1660 if (adapter->npars != NULL)
1661 kfree(adapter->npars);
1662 if (adapter->eswitch != NULL)
1663 kfree(adapter->eswitch);
1664
21854f02 1665 qlcnic_clr_all_drv_state(adapter, 0);
af19b491
AKS
1666
1667 clear_bit(__QLCNIC_RESETTING, &adapter->state);
1668
b5e5492c
AKS
1669 qlcnic_free_lb_filters_mem(adapter);
1670
af19b491
AKS
1671 qlcnic_teardown_intr(adapter);
1672
1673 qlcnic_remove_diag_entries(adapter);
1674
1675 qlcnic_cleanup_pci_map(adapter);
1676
1677 qlcnic_release_firmware(adapter);
1678
451724c8 1679 pci_disable_pcie_error_reporting(pdev);
af19b491
AKS
1680 pci_release_regions(pdev);
1681 pci_disable_device(pdev);
1682 pci_set_drvdata(pdev, NULL);
1683
b1fc6d3c 1684 qlcnic_free_adapter_resources(adapter);
af19b491
AKS
1685 free_netdev(netdev);
1686}
1687static int __qlcnic_shutdown(struct pci_dev *pdev)
1688{
1689 struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
1690 struct net_device *netdev = adapter->netdev;
1691 int retval;
1692
1693 netif_device_detach(netdev);
1694
1695 qlcnic_cancel_fw_work(adapter);
1696
1697 if (netif_running(netdev))
1698 qlcnic_down(adapter, netdev);
1699
21854f02 1700 qlcnic_clr_all_drv_state(adapter, 0);
af19b491
AKS
1701
1702 clear_bit(__QLCNIC_RESETTING, &adapter->state);
1703
1704 retval = pci_save_state(pdev);
1705 if (retval)
1706 return retval;
1707
1708 if (qlcnic_wol_supported(adapter)) {
1709 pci_enable_wake(pdev, PCI_D3cold, 1);
1710 pci_enable_wake(pdev, PCI_D3hot, 1);
1711 }
1712
1713 return 0;
1714}
1715
1716static void qlcnic_shutdown(struct pci_dev *pdev)
1717{
1718 if (__qlcnic_shutdown(pdev))
1719 return;
1720
1721 pci_disable_device(pdev);
1722}
1723
1724#ifdef CONFIG_PM
1725static int
1726qlcnic_suspend(struct pci_dev *pdev, pm_message_t state)
1727{
1728 int retval;
1729
1730 retval = __qlcnic_shutdown(pdev);
1731 if (retval)
1732 return retval;
1733
1734 pci_set_power_state(pdev, pci_choose_state(pdev, state));
1735 return 0;
1736}
1737
1738static int
1739qlcnic_resume(struct pci_dev *pdev)
1740{
1741 struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
1742 struct net_device *netdev = adapter->netdev;
1743 int err;
1744
1745 err = pci_enable_device(pdev);
1746 if (err)
1747 return err;
1748
1749 pci_set_power_state(pdev, PCI_D0);
1750 pci_set_master(pdev);
1751 pci_restore_state(pdev);
1752
9f26f547 1753 err = adapter->nic_ops->start_firmware(adapter);
af19b491
AKS
1754 if (err) {
1755 dev_err(&pdev->dev, "failed to start firmware\n");
1756 return err;
1757 }
1758
1759 if (netif_running(netdev)) {
af19b491
AKS
1760 err = qlcnic_up(adapter, netdev);
1761 if (err)
52486a3a 1762 goto done;
af19b491 1763
aec1e845 1764 qlcnic_restore_indev_addr(netdev, NETDEV_UP);
af19b491 1765 }
52486a3a 1766done:
af19b491
AKS
1767 netif_device_attach(netdev);
1768 qlcnic_schedule_work(adapter, qlcnic_fw_poll_work, FW_POLL_DELAY);
1769 return 0;
af19b491
AKS
1770}
1771#endif
1772
1773static int qlcnic_open(struct net_device *netdev)
1774{
1775 struct qlcnic_adapter *adapter = netdev_priv(netdev);
1776 int err;
1777
af19b491
AKS
1778 err = qlcnic_attach(adapter);
1779 if (err)
1780 return err;
1781
1782 err = __qlcnic_up(adapter, netdev);
1783 if (err)
1784 goto err_out;
1785
1786 netif_start_queue(netdev);
1787
1788 return 0;
1789
1790err_out:
1791 qlcnic_detach(adapter);
1792 return err;
1793}
1794
1795/*
1796 * qlcnic_close - Disables a network interface entry point
1797 */
1798static int qlcnic_close(struct net_device *netdev)
1799{
1800 struct qlcnic_adapter *adapter = netdev_priv(netdev);
1801
1802 __qlcnic_down(adapter, netdev);
1803 return 0;
1804}
1805
b5e5492c
AKS
1806static void
1807qlcnic_alloc_lb_filters_mem(struct qlcnic_adapter *adapter)
1808{
1809 void *head;
1810 int i;
1811
1812 if (!qlcnic_mac_learn)
1813 return;
1814
1815 spin_lock_init(&adapter->mac_learn_lock);
1816
1817 head = kcalloc(QLCNIC_LB_MAX_FILTERS, sizeof(struct hlist_head),
1818 GFP_KERNEL);
1819 if (!head)
1820 return;
1821
1822 adapter->fhash.fmax = QLCNIC_LB_MAX_FILTERS;
1823 adapter->fhash.fhead = (struct hlist_head *)head;
1824
1825 for (i = 0; i < adapter->fhash.fmax; i++)
1826 INIT_HLIST_HEAD(&adapter->fhash.fhead[i]);
1827}
1828
1829static void qlcnic_free_lb_filters_mem(struct qlcnic_adapter *adapter)
1830{
1831 if (adapter->fhash.fmax && adapter->fhash.fhead)
1832 kfree(adapter->fhash.fhead);
1833
1834 adapter->fhash.fhead = NULL;
1835 adapter->fhash.fmax = 0;
1836}
1837
1838static void qlcnic_change_filter(struct qlcnic_adapter *adapter,
7e56cac4 1839 u64 uaddr, __le16 vlan_id, struct qlcnic_host_tx_ring *tx_ring)
b5e5492c
AKS
1840{
1841 struct cmd_desc_type0 *hwdesc;
1842 struct qlcnic_nic_req *req;
1843 struct qlcnic_mac_req *mac_req;
7e56cac4 1844 struct qlcnic_vlan_req *vlan_req;
b5e5492c
AKS
1845 u32 producer;
1846 u64 word;
1847
1848 producer = tx_ring->producer;
1849 hwdesc = &tx_ring->desc_head[tx_ring->producer];
1850
1851 req = (struct qlcnic_nic_req *)hwdesc;
1852 memset(req, 0, sizeof(struct qlcnic_nic_req));
1853 req->qhdr = cpu_to_le64(QLCNIC_REQUEST << 23);
1854
1855 word = QLCNIC_MAC_EVENT | ((u64)(adapter->portnum) << 16);
1856 req->req_hdr = cpu_to_le64(word);
1857
1858 mac_req = (struct qlcnic_mac_req *)&(req->words[0]);
03c5d770 1859 mac_req->op = vlan_id ? QLCNIC_MAC_VLAN_ADD : QLCNIC_MAC_ADD;
b5e5492c
AKS
1860 memcpy(mac_req->mac_addr, &uaddr, ETH_ALEN);
1861
7e56cac4
SC
1862 vlan_req = (struct qlcnic_vlan_req *)&req->words[1];
1863 vlan_req->vlan_id = vlan_id;
03c5d770 1864
b5e5492c 1865 tx_ring->producer = get_next_index(producer, tx_ring->num_desc);
036d61f0 1866 smp_mb();
b5e5492c
AKS
1867}
1868
1869#define QLCNIC_MAC_HASH(MAC)\
1870 ((((MAC) & 0x70000) >> 0x10) | (((MAC) & 0x70000000000ULL) >> 0x25))
1871
1872static void
1873qlcnic_send_filter(struct qlcnic_adapter *adapter,
1874 struct qlcnic_host_tx_ring *tx_ring,
1875 struct cmd_desc_type0 *first_desc,
1876 struct sk_buff *skb)
1877{
1878 struct ethhdr *phdr = (struct ethhdr *)(skb->data);
1879 struct qlcnic_filter *fil, *tmp_fil;
1880 struct hlist_node *tmp_hnode, *n;
1881 struct hlist_head *head;
1882 u64 src_addr = 0;
7e56cac4 1883 __le16 vlan_id = 0;
b5e5492c
AKS
1884 u8 hindex;
1885
1886 if (!compare_ether_addr(phdr->h_source, adapter->mac_addr))
1887 return;
1888
1889 if (adapter->fhash.fnum >= adapter->fhash.fmax)
1890 return;
1891
03c5d770
AKS
1892 /* Only NPAR capable devices support vlan based learning*/
1893 if (adapter->flags & QLCNIC_ESWITCH_ENABLED)
1894 vlan_id = first_desc->vlan_TCI;
b5e5492c
AKS
1895 memcpy(&src_addr, phdr->h_source, ETH_ALEN);
1896 hindex = QLCNIC_MAC_HASH(src_addr) & (QLCNIC_LB_MAX_FILTERS - 1);
1897 head = &(adapter->fhash.fhead[hindex]);
1898
1899 hlist_for_each_entry_safe(tmp_fil, tmp_hnode, n, head, fnode) {
03c5d770
AKS
1900 if (!memcmp(tmp_fil->faddr, &src_addr, ETH_ALEN) &&
1901 tmp_fil->vlan_id == vlan_id) {
e5edb7b1 1902
1903 if (jiffies >
1904 (QLCNIC_READD_AGE * HZ + tmp_fil->ftime))
1905 qlcnic_change_filter(adapter, src_addr, vlan_id,
1906 tx_ring);
b5e5492c
AKS
1907 tmp_fil->ftime = jiffies;
1908 return;
1909 }
1910 }
1911
1912 fil = kzalloc(sizeof(struct qlcnic_filter), GFP_ATOMIC);
1913 if (!fil)
1914 return;
1915
03c5d770 1916 qlcnic_change_filter(adapter, src_addr, vlan_id, tx_ring);
b5e5492c
AKS
1917
1918 fil->ftime = jiffies;
03c5d770 1919 fil->vlan_id = vlan_id;
b5e5492c
AKS
1920 memcpy(fil->faddr, &src_addr, ETH_ALEN);
1921 spin_lock(&adapter->mac_learn_lock);
1922 hlist_add_head(&(fil->fnode), head);
1923 adapter->fhash.fnum++;
1924 spin_unlock(&adapter->mac_learn_lock);
1925}
1926
036d61f0
AC
1927static int
1928qlcnic_tx_pkt(struct qlcnic_adapter *adapter,
af19b491
AKS
1929 struct cmd_desc_type0 *first_desc,
1930 struct sk_buff *skb)
1931{
036d61f0
AC
1932 u8 opcode = 0, hdr_len = 0;
1933 u16 flags = 0, vlan_tci = 0;
1934 int copied, offset, copy_len;
af19b491
AKS
1935 struct cmd_desc_type0 *hwdesc;
1936 struct vlan_ethhdr *vh;
036d61f0
AC
1937 struct qlcnic_host_tx_ring *tx_ring = adapter->tx_ring;
1938 u16 protocol = ntohs(skb->protocol);
2e9d722d 1939 u32 producer = tx_ring->producer;
036d61f0
AC
1940
1941 if (protocol == ETH_P_8021Q) {
1942 vh = (struct vlan_ethhdr *)skb->data;
1943 flags = FLAGS_VLAN_TAGGED;
1944 vlan_tci = vh->h_vlan_TCI;
1945 } else if (vlan_tx_tag_present(skb)) {
1946 flags = FLAGS_VLAN_OOB;
1947 vlan_tci = vlan_tx_tag_get(skb);
1948 }
1949 if (unlikely(adapter->pvid)) {
1950 if (vlan_tci && !(adapter->flags & QLCNIC_TAGGING_ENABLED))
1951 return -EIO;
1952 if (vlan_tci && (adapter->flags & QLCNIC_TAGGING_ENABLED))
1953 goto set_flags;
1954
1955 flags = FLAGS_VLAN_OOB;
1956 vlan_tci = adapter->pvid;
1957 }
1958set_flags:
1959 qlcnic_set_tx_vlan_tci(first_desc, vlan_tci);
1960 qlcnic_set_tx_flags_opcode(first_desc, flags, opcode);
af19b491 1961
2e9d722d
AC
1962 if (*(skb->data) & BIT_0) {
1963 flags |= BIT_0;
1964 memcpy(&first_desc->eth_addr, skb->data, ETH_ALEN);
1965 }
036d61f0
AC
1966 opcode = TX_ETHER_PKT;
1967 if ((adapter->netdev->features & (NETIF_F_TSO | NETIF_F_TSO6)) &&
af19b491
AKS
1968 skb_shinfo(skb)->gso_size > 0) {
1969
1970 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
1971
1972 first_desc->mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
1973 first_desc->total_hdr_length = hdr_len;
036d61f0
AC
1974
1975 opcode = (protocol == ETH_P_IPV6) ? TX_TCP_LSO6 : TX_TCP_LSO;
1976
1977 /* For LSO, we need to copy the MAC/IP/TCP headers into
1978 * the descriptor ring */
1979 copied = 0;
1980 offset = 2;
1981
1982 if (flags & FLAGS_VLAN_OOB) {
af19b491
AKS
1983 first_desc->total_hdr_length += VLAN_HLEN;
1984 first_desc->tcp_hdr_offset = VLAN_HLEN;
1985 first_desc->ip_hdr_offset = VLAN_HLEN;
1986 /* Only in case of TSO on vlan device */
1987 flags |= FLAGS_VLAN_TAGGED;
036d61f0
AC
1988
1989 /* Create a TSO vlan header template for firmware */
1990
1991 hwdesc = &tx_ring->desc_head[producer];
1992 tx_ring->cmd_buf_arr[producer].skb = NULL;
1993
1994 copy_len = min((int)sizeof(struct cmd_desc_type0) -
1995 offset, hdr_len + VLAN_HLEN);
1996
1997 vh = (struct vlan_ethhdr *)((char *) hwdesc + 2);
1998 skb_copy_from_linear_data(skb, vh, 12);
1999 vh->h_vlan_proto = htons(ETH_P_8021Q);
2000 vh->h_vlan_TCI = htons(vlan_tci);
2001
2002 skb_copy_from_linear_data_offset(skb, 12,
2003 (char *)vh + 16, copy_len - 16);
2004
2005 copied = copy_len - VLAN_HLEN;
2006 offset = 0;
2007
2008 producer = get_next_index(producer, tx_ring->num_desc);
af19b491
AKS
2009 }
2010
036d61f0
AC
2011 while (copied < hdr_len) {
2012
2013 copy_len = min((int)sizeof(struct cmd_desc_type0) -
2014 offset, (hdr_len - copied));
2015
2016 hwdesc = &tx_ring->desc_head[producer];
2017 tx_ring->cmd_buf_arr[producer].skb = NULL;
2018
2019 skb_copy_from_linear_data_offset(skb, copied,
2020 (char *) hwdesc + offset, copy_len);
2021
2022 copied += copy_len;
2023 offset = 0;
2024
2025 producer = get_next_index(producer, tx_ring->num_desc);
2026 }
2027
2028 tx_ring->producer = producer;
2029 smp_mb();
2030 adapter->stats.lso_frames++;
af19b491
AKS
2031
2032 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
2033 u8 l4proto;
2034
036d61f0 2035 if (protocol == ETH_P_IP) {
af19b491
AKS
2036 l4proto = ip_hdr(skb)->protocol;
2037
2038 if (l4proto == IPPROTO_TCP)
2039 opcode = TX_TCP_PKT;
2040 else if (l4proto == IPPROTO_UDP)
2041 opcode = TX_UDP_PKT;
036d61f0 2042 } else if (protocol == ETH_P_IPV6) {
af19b491
AKS
2043 l4proto = ipv6_hdr(skb)->nexthdr;
2044
2045 if (l4proto == IPPROTO_TCP)
2046 opcode = TX_TCPV6_PKT;
2047 else if (l4proto == IPPROTO_UDP)
2048 opcode = TX_UDPV6_PKT;
2049 }
2050 }
af19b491
AKS
2051 first_desc->tcp_hdr_offset += skb_transport_offset(skb);
2052 first_desc->ip_hdr_offset += skb_network_offset(skb);
2053 qlcnic_set_tx_flags_opcode(first_desc, flags, opcode);
2054
036d61f0 2055 return 0;
af19b491
AKS
2056}
2057
2058static int
2059qlcnic_map_tx_skb(struct pci_dev *pdev,
2060 struct sk_buff *skb, struct qlcnic_cmd_buffer *pbuf)
2061{
2062 struct qlcnic_skb_frag *nf;
2063 struct skb_frag_struct *frag;
2064 int i, nr_frags;
2065 dma_addr_t map;
2066
2067 nr_frags = skb_shinfo(skb)->nr_frags;
2068 nf = &pbuf->frag_array[0];
2069
2070 map = pci_map_single(pdev, skb->data,
2071 skb_headlen(skb), PCI_DMA_TODEVICE);
2072 if (pci_dma_mapping_error(pdev, map))
2073 goto out_err;
2074
2075 nf->dma = map;
2076 nf->length = skb_headlen(skb);
2077
2078 for (i = 0; i < nr_frags; i++) {
2079 frag = &skb_shinfo(skb)->frags[i];
2080 nf = &pbuf->frag_array[i+1];
2081
2082 map = pci_map_page(pdev, frag->page, frag->page_offset,
2083 frag->size, PCI_DMA_TODEVICE);
2084 if (pci_dma_mapping_error(pdev, map))
2085 goto unwind;
2086
2087 nf->dma = map;
2088 nf->length = frag->size;
2089 }
2090
2091 return 0;
2092
2093unwind:
2094 while (--i >= 0) {
2095 nf = &pbuf->frag_array[i+1];
2096 pci_unmap_page(pdev, nf->dma, nf->length, PCI_DMA_TODEVICE);
2097 }
2098
2099 nf = &pbuf->frag_array[0];
2100 pci_unmap_single(pdev, nf->dma, skb_headlen(skb), PCI_DMA_TODEVICE);
2101
2102out_err:
2103 return -ENOMEM;
2104}
2105
036d61f0
AC
2106static void
2107qlcnic_unmap_buffers(struct pci_dev *pdev, struct sk_buff *skb,
2108 struct qlcnic_cmd_buffer *pbuf)
8cf61f89 2109{
036d61f0
AC
2110 struct qlcnic_skb_frag *nf = &pbuf->frag_array[0];
2111 int nr_frags = skb_shinfo(skb)->nr_frags;
2112 int i;
8cf61f89 2113
036d61f0
AC
2114 for (i = 0; i < nr_frags; i++) {
2115 nf = &pbuf->frag_array[i+1];
2116 pci_unmap_page(pdev, nf->dma, nf->length, PCI_DMA_TODEVICE);
8cf61f89 2117 }
8cf61f89 2118
036d61f0
AC
2119 nf = &pbuf->frag_array[0];
2120 pci_unmap_single(pdev, nf->dma, skb_headlen(skb), PCI_DMA_TODEVICE);
8cf61f89
AKS
2121}
2122
af19b491
AKS
2123static inline void
2124qlcnic_clear_cmddesc(u64 *desc)
2125{
2126 desc[0] = 0ULL;
2127 desc[2] = 0ULL;
8cf61f89 2128 desc[7] = 0ULL;
af19b491
AKS
2129}
2130
cdaff185 2131netdev_tx_t
af19b491
AKS
2132qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
2133{
2134 struct qlcnic_adapter *adapter = netdev_priv(netdev);
2135 struct qlcnic_host_tx_ring *tx_ring = adapter->tx_ring;
2136 struct qlcnic_cmd_buffer *pbuf;
2137 struct qlcnic_skb_frag *buffrag;
2138 struct cmd_desc_type0 *hwdesc, *first_desc;
2139 struct pci_dev *pdev;
dcb50aff 2140 struct ethhdr *phdr;
91a403ca 2141 int delta = 0;
af19b491
AKS
2142 int i, k;
2143
2144 u32 producer;
036d61f0 2145 int frag_count;
af19b491
AKS
2146 u32 num_txd = tx_ring->num_desc;
2147
780ab790
AKS
2148 if (!test_bit(__QLCNIC_DEV_UP, &adapter->state)) {
2149 netif_stop_queue(netdev);
2150 return NETDEV_TX_BUSY;
2151 }
2152
fe4d434d 2153 if (adapter->flags & QLCNIC_MACSPOOF) {
dcb50aff
RB
2154 phdr = (struct ethhdr *)skb->data;
2155 if (compare_ether_addr(phdr->h_source,
fe4d434d
SC
2156 adapter->mac_addr))
2157 goto drop_packet;
2158 }
2159
af19b491 2160 frag_count = skb_shinfo(skb)->nr_frags + 1;
91a403ca
AKS
2161 /* 14 frags supported for normal packet and
2162 * 32 frags supported for TSO packet
2163 */
2164 if (!skb_is_gso(skb) && frag_count > QLCNIC_MAX_FRAGS_PER_TX) {
2165
2166 for (i = 0; i < (frag_count - QLCNIC_MAX_FRAGS_PER_TX); i++)
2167 delta += skb_shinfo(skb)->frags[i].size;
2168
2169 if (!__pskb_pull_tail(skb, delta))
2170 goto drop_packet;
2171
2172 frag_count = 1 + skb_shinfo(skb)->nr_frags;
2173 }
af19b491 2174
ef71ff83 2175 if (unlikely(qlcnic_tx_avail(tx_ring) <= TX_STOP_THRESH)) {
af19b491 2176 netif_stop_queue(netdev);
ef71ff83
RB
2177 if (qlcnic_tx_avail(tx_ring) > TX_STOP_THRESH)
2178 netif_start_queue(netdev);
2179 else {
2180 adapter->stats.xmit_off++;
2181 return NETDEV_TX_BUSY;
2182 }
af19b491
AKS
2183 }
2184
2185 producer = tx_ring->producer;
2186 pbuf = &tx_ring->cmd_buf_arr[producer];
2187
2188 pdev = adapter->pdev;
2189
8cf61f89
AKS
2190 first_desc = hwdesc = &tx_ring->desc_head[producer];
2191 qlcnic_clear_cmddesc((u64 *)hwdesc);
2192
8ae6df97
AKS
2193 if (qlcnic_map_tx_skb(pdev, skb, pbuf)) {
2194 adapter->stats.tx_dma_map_error++;
af19b491 2195 goto drop_packet;
8ae6df97 2196 }
af19b491
AKS
2197
2198 pbuf->skb = skb;
2199 pbuf->frag_count = frag_count;
2200
af19b491
AKS
2201 qlcnic_set_tx_frags_len(first_desc, frag_count, skb->len);
2202 qlcnic_set_tx_port(first_desc, adapter->portnum);
2203
2204 for (i = 0; i < frag_count; i++) {
2205
2206 k = i % 4;
2207
2208 if ((k == 0) && (i > 0)) {
2209 /* move to next desc.*/
2210 producer = get_next_index(producer, num_txd);
2211 hwdesc = &tx_ring->desc_head[producer];
2212 qlcnic_clear_cmddesc((u64 *)hwdesc);
2213 tx_ring->cmd_buf_arr[producer].skb = NULL;
2214 }
2215
2216 buffrag = &pbuf->frag_array[i];
2217
2218 hwdesc->buffer_length[k] = cpu_to_le16(buffrag->length);
2219 switch (k) {
2220 case 0:
2221 hwdesc->addr_buffer1 = cpu_to_le64(buffrag->dma);
2222 break;
2223 case 1:
2224 hwdesc->addr_buffer2 = cpu_to_le64(buffrag->dma);
2225 break;
2226 case 2:
2227 hwdesc->addr_buffer3 = cpu_to_le64(buffrag->dma);
2228 break;
2229 case 3:
2230 hwdesc->addr_buffer4 = cpu_to_le64(buffrag->dma);
2231 break;
2232 }
2233 }
2234
2235 tx_ring->producer = get_next_index(producer, num_txd);
036d61f0 2236 smp_mb();
af19b491 2237
036d61f0
AC
2238 if (unlikely(qlcnic_tx_pkt(adapter, first_desc, skb)))
2239 goto unwind_buff;
af19b491 2240
b5e5492c
AKS
2241 if (qlcnic_mac_learn)
2242 qlcnic_send_filter(adapter, tx_ring, first_desc, skb);
2243
af19b491
AKS
2244 qlcnic_update_cmd_producer(adapter, tx_ring);
2245
2246 adapter->stats.txbytes += skb->len;
2247 adapter->stats.xmitcalled++;
2248
2249 return NETDEV_TX_OK;
2250
036d61f0
AC
2251unwind_buff:
2252 qlcnic_unmap_buffers(pdev, skb, pbuf);
af19b491
AKS
2253drop_packet:
2254 adapter->stats.txdropped++;
2255 dev_kfree_skb_any(skb);
2256 return NETDEV_TX_OK;
2257}
2258
2259static int qlcnic_check_temp(struct qlcnic_adapter *adapter)
2260{
2261 struct net_device *netdev = adapter->netdev;
2262 u32 temp, temp_state, temp_val;
2263 int rv = 0;
2264
2265 temp = QLCRD32(adapter, CRB_TEMP_STATE);
2266
2267 temp_state = qlcnic_get_temp_state(temp);
2268 temp_val = qlcnic_get_temp_val(temp);
2269
2270 if (temp_state == QLCNIC_TEMP_PANIC) {
2271 dev_err(&netdev->dev,
2272 "Device temperature %d degrees C exceeds"
2273 " maximum allowed. Hardware has been shut down.\n",
2274 temp_val);
2275 rv = 1;
2276 } else if (temp_state == QLCNIC_TEMP_WARN) {
2277 if (adapter->temp == QLCNIC_TEMP_NORMAL) {
2278 dev_err(&netdev->dev,
2279 "Device temperature %d degrees C "
2280 "exceeds operating range."
2281 " Immediate action needed.\n",
2282 temp_val);
2283 }
2284 } else {
2285 if (adapter->temp == QLCNIC_TEMP_WARN) {
2286 dev_info(&netdev->dev,
2287 "Device temperature is now %d degrees C"
2288 " in normal range.\n", temp_val);
2289 }
2290 }
2291 adapter->temp = temp_state;
2292 return rv;
2293}
2294
2295void qlcnic_advert_link_change(struct qlcnic_adapter *adapter, int linkup)
2296{
2297 struct net_device *netdev = adapter->netdev;
2298
b1fc6d3c 2299 if (adapter->ahw->linkup && !linkup) {
69324275 2300 netdev_info(netdev, "NIC Link is down\n");
b1fc6d3c 2301 adapter->ahw->linkup = 0;
af19b491
AKS
2302 if (netif_running(netdev)) {
2303 netif_carrier_off(netdev);
2304 netif_stop_queue(netdev);
2305 }
b1fc6d3c 2306 } else if (!adapter->ahw->linkup && linkup) {
69324275 2307 netdev_info(netdev, "NIC Link is up\n");
b1fc6d3c 2308 adapter->ahw->linkup = 1;
af19b491
AKS
2309 if (netif_running(netdev)) {
2310 netif_carrier_on(netdev);
2311 netif_wake_queue(netdev);
2312 }
2313 }
2314}
2315
2316static void qlcnic_tx_timeout(struct net_device *netdev)
2317{
2318 struct qlcnic_adapter *adapter = netdev_priv(netdev);
2319
2320 if (test_bit(__QLCNIC_RESETTING, &adapter->state))
2321 return;
2322
2323 dev_err(&netdev->dev, "transmit timeout, resetting.\n");
af19b491
AKS
2324
2325 if (++adapter->tx_timeo_cnt >= QLCNIC_MAX_TX_TIMEOUTS)
68bf1c68
AKS
2326 adapter->need_fw_reset = 1;
2327 else
2328 adapter->reset_context = 1;
af19b491
AKS
2329}
2330
2331static struct net_device_stats *qlcnic_get_stats(struct net_device *netdev)
2332{
2333 struct qlcnic_adapter *adapter = netdev_priv(netdev);
2334 struct net_device_stats *stats = &netdev->stats;
2335
af19b491
AKS
2336 stats->rx_packets = adapter->stats.rx_pkts + adapter->stats.lro_pkts;
2337 stats->tx_packets = adapter->stats.xmitfinished;
7e382594 2338 stats->rx_bytes = adapter->stats.rxbytes + adapter->stats.lrobytes;
af19b491
AKS
2339 stats->tx_bytes = adapter->stats.txbytes;
2340 stats->rx_dropped = adapter->stats.rxdropped;
2341 stats->tx_dropped = adapter->stats.txdropped;
2342
2343 return stats;
2344}
2345
7eb9855d 2346static irqreturn_t qlcnic_clear_legacy_intr(struct qlcnic_adapter *adapter)
af19b491 2347{
af19b491
AKS
2348 u32 status;
2349
2350 status = readl(adapter->isr_int_vec);
2351
2352 if (!(status & adapter->int_vec_bit))
2353 return IRQ_NONE;
2354
2355 /* check interrupt state machine, to be sure */
2356 status = readl(adapter->crb_int_state_reg);
2357 if (!ISR_LEGACY_INT_TRIGGERED(status))
2358 return IRQ_NONE;
2359
2360 writel(0xffffffff, adapter->tgt_status_reg);
2361 /* read twice to ensure write is flushed */
2362 readl(adapter->isr_int_vec);
2363 readl(adapter->isr_int_vec);
2364
7eb9855d
AKS
2365 return IRQ_HANDLED;
2366}
2367
2368static irqreturn_t qlcnic_tmp_intr(int irq, void *data)
2369{
2370 struct qlcnic_host_sds_ring *sds_ring = data;
2371 struct qlcnic_adapter *adapter = sds_ring->adapter;
2372
2373 if (adapter->flags & QLCNIC_MSIX_ENABLED)
2374 goto done;
2375 else if (adapter->flags & QLCNIC_MSI_ENABLED) {
2376 writel(0xffffffff, adapter->tgt_status_reg);
2377 goto done;
2378 }
2379
2380 if (qlcnic_clear_legacy_intr(adapter) == IRQ_NONE)
2381 return IRQ_NONE;
2382
2383done:
2384 adapter->diag_cnt++;
2385 qlcnic_enable_int(sds_ring);
2386 return IRQ_HANDLED;
2387}
2388
2389static irqreturn_t qlcnic_intr(int irq, void *data)
2390{
2391 struct qlcnic_host_sds_ring *sds_ring = data;
2392 struct qlcnic_adapter *adapter = sds_ring->adapter;
2393
2394 if (qlcnic_clear_legacy_intr(adapter) == IRQ_NONE)
2395 return IRQ_NONE;
2396
af19b491
AKS
2397 napi_schedule(&sds_ring->napi);
2398
2399 return IRQ_HANDLED;
2400}
2401
2402static irqreturn_t qlcnic_msi_intr(int irq, void *data)
2403{
2404 struct qlcnic_host_sds_ring *sds_ring = data;
2405 struct qlcnic_adapter *adapter = sds_ring->adapter;
2406
2407 /* clear interrupt */
2408 writel(0xffffffff, adapter->tgt_status_reg);
2409
2410 napi_schedule(&sds_ring->napi);
2411 return IRQ_HANDLED;
2412}
2413
2414static irqreturn_t qlcnic_msix_intr(int irq, void *data)
2415{
2416 struct qlcnic_host_sds_ring *sds_ring = data;
2417
2418 napi_schedule(&sds_ring->napi);
2419 return IRQ_HANDLED;
2420}
2421
2422static int qlcnic_process_cmd_ring(struct qlcnic_adapter *adapter)
2423{
2424 u32 sw_consumer, hw_consumer;
2425 int count = 0, i;
2426 struct qlcnic_cmd_buffer *buffer;
2427 struct pci_dev *pdev = adapter->pdev;
2428 struct net_device *netdev = adapter->netdev;
2429 struct qlcnic_skb_frag *frag;
2430 int done;
2431 struct qlcnic_host_tx_ring *tx_ring = adapter->tx_ring;
2432
2433 if (!spin_trylock(&adapter->tx_clean_lock))
2434 return 1;
2435
2436 sw_consumer = tx_ring->sw_consumer;
2437 hw_consumer = le32_to_cpu(*(tx_ring->hw_consumer));
2438
2439 while (sw_consumer != hw_consumer) {
2440 buffer = &tx_ring->cmd_buf_arr[sw_consumer];
2441 if (buffer->skb) {
2442 frag = &buffer->frag_array[0];
2443 pci_unmap_single(pdev, frag->dma, frag->length,
2444 PCI_DMA_TODEVICE);
2445 frag->dma = 0ULL;
2446 for (i = 1; i < buffer->frag_count; i++) {
2447 frag++;
2448 pci_unmap_page(pdev, frag->dma, frag->length,
2449 PCI_DMA_TODEVICE);
2450 frag->dma = 0ULL;
2451 }
2452
2453 adapter->stats.xmitfinished++;
2454 dev_kfree_skb_any(buffer->skb);
2455 buffer->skb = NULL;
2456 }
2457
2458 sw_consumer = get_next_index(sw_consumer, tx_ring->num_desc);
2459 if (++count >= MAX_STATUS_HANDLE)
2460 break;
2461 }
2462
2463 if (count && netif_running(netdev)) {
2464 tx_ring->sw_consumer = sw_consumer;
2465
2466 smp_mb();
2467
2468 if (netif_queue_stopped(netdev) && netif_carrier_ok(netdev)) {
af19b491
AKS
2469 if (qlcnic_tx_avail(tx_ring) > TX_STOP_THRESH) {
2470 netif_wake_queue(netdev);
8bfe8b91 2471 adapter->stats.xmit_on++;
af19b491 2472 }
af19b491 2473 }
ef71ff83 2474 adapter->tx_timeo_cnt = 0;
af19b491
AKS
2475 }
2476 /*
2477 * If everything is freed up to consumer then check if the ring is full
2478 * If the ring is full then check if more needs to be freed and
2479 * schedule the call back again.
2480 *
2481 * This happens when there are 2 CPUs. One could be freeing and the
2482 * other filling it. If the ring is full when we get out of here and
2483 * the card has already interrupted the host then the host can miss the
2484 * interrupt.
2485 *
2486 * There is still a possible race condition and the host could miss an
2487 * interrupt. The card has to take care of this.
2488 */
2489 hw_consumer = le32_to_cpu(*(tx_ring->hw_consumer));
2490 done = (sw_consumer == hw_consumer);
2491 spin_unlock(&adapter->tx_clean_lock);
2492
2493 return done;
2494}
2495
2496static int qlcnic_poll(struct napi_struct *napi, int budget)
2497{
2498 struct qlcnic_host_sds_ring *sds_ring =
2499 container_of(napi, struct qlcnic_host_sds_ring, napi);
2500
2501 struct qlcnic_adapter *adapter = sds_ring->adapter;
2502
2503 int tx_complete;
2504 int work_done;
2505
2506 tx_complete = qlcnic_process_cmd_ring(adapter);
2507
2508 work_done = qlcnic_process_rcv_ring(sds_ring, budget);
2509
2510 if ((work_done < budget) && tx_complete) {
2511 napi_complete(&sds_ring->napi);
2512 if (test_bit(__QLCNIC_DEV_UP, &adapter->state))
2513 qlcnic_enable_int(sds_ring);
2514 }
2515
2516 return work_done;
2517}
2518
8f891387 2519static int qlcnic_rx_poll(struct napi_struct *napi, int budget)
2520{
2521 struct qlcnic_host_sds_ring *sds_ring =
2522 container_of(napi, struct qlcnic_host_sds_ring, napi);
2523
2524 struct qlcnic_adapter *adapter = sds_ring->adapter;
2525 int work_done;
2526
2527 work_done = qlcnic_process_rcv_ring(sds_ring, budget);
2528
2529 if (work_done < budget) {
2530 napi_complete(&sds_ring->napi);
2531 if (test_bit(__QLCNIC_DEV_UP, &adapter->state))
2532 qlcnic_enable_int(sds_ring);
2533 }
2534
2535 return work_done;
2536}
2537
af19b491
AKS
2538#ifdef CONFIG_NET_POLL_CONTROLLER
2539static void qlcnic_poll_controller(struct net_device *netdev)
2540{
bf82791e
YL
2541 int ring;
2542 struct qlcnic_host_sds_ring *sds_ring;
af19b491 2543 struct qlcnic_adapter *adapter = netdev_priv(netdev);
b1fc6d3c 2544 struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
bf82791e 2545
af19b491 2546 disable_irq(adapter->irq);
bf82791e
YL
2547 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
2548 sds_ring = &recv_ctx->sds_rings[ring];
2549 qlcnic_intr(adapter->irq, sds_ring);
2550 }
af19b491
AKS
2551 enable_irq(adapter->irq);
2552}
2553#endif
2554
6df900e9
SC
2555static void
2556qlcnic_idc_debug_info(struct qlcnic_adapter *adapter, u8 encoding)
2557{
2558 u32 val;
2559
2560 val = adapter->portnum & 0xf;
2561 val |= encoding << 7;
2562 val |= (jiffies - adapter->dev_rst_time) << 8;
2563
2564 QLCWR32(adapter, QLCNIC_CRB_DRV_SCRATCH, val);
2565 adapter->dev_rst_time = jiffies;
2566}
2567
ade91f8e
AKS
2568static int
2569qlcnic_set_drv_state(struct qlcnic_adapter *adapter, u8 state)
af19b491
AKS
2570{
2571 u32 val;
2572
2573 WARN_ON(state != QLCNIC_DEV_NEED_RESET &&
2574 state != QLCNIC_DEV_NEED_QUISCENT);
2575
2576 if (qlcnic_api_lock(adapter))
ade91f8e 2577 return -EIO;
af19b491
AKS
2578
2579 val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
2580
2581 if (state == QLCNIC_DEV_NEED_RESET)
6d2a4724 2582 QLC_DEV_SET_RST_RDY(val, adapter->portnum);
af19b491 2583 else if (state == QLCNIC_DEV_NEED_QUISCENT)
6d2a4724 2584 QLC_DEV_SET_QSCNT_RDY(val, adapter->portnum);
af19b491
AKS
2585
2586 QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
2587
2588 qlcnic_api_unlock(adapter);
ade91f8e
AKS
2589
2590 return 0;
af19b491
AKS
2591}
2592
1b95a839
AKS
2593static int
2594qlcnic_clr_drv_state(struct qlcnic_adapter *adapter)
2595{
2596 u32 val;
2597
2598 if (qlcnic_api_lock(adapter))
2599 return -EBUSY;
2600
2601 val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
6d2a4724 2602 QLC_DEV_CLR_RST_QSCNT(val, adapter->portnum);
1b95a839
AKS
2603 QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
2604
2605 qlcnic_api_unlock(adapter);
2606
2607 return 0;
2608}
2609
af19b491 2610static void
21854f02 2611qlcnic_clr_all_drv_state(struct qlcnic_adapter *adapter, u8 failed)
af19b491
AKS
2612{
2613 u32 val;
2614
2615 if (qlcnic_api_lock(adapter))
2616 goto err;
2617
31018e06 2618 val = QLCRD32(adapter, QLCNIC_CRB_DRV_ACTIVE);
6d2a4724 2619 QLC_DEV_CLR_REF_CNT(val, adapter->portnum);
31018e06 2620 QLCWR32(adapter, QLCNIC_CRB_DRV_ACTIVE, val);
af19b491 2621
21854f02
AKS
2622 if (failed) {
2623 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_FAILED);
2624 dev_info(&adapter->pdev->dev,
2625 "Device state set to Failed. Please Reboot\n");
2626 } else if (!(val & 0x11111111))
af19b491
AKS
2627 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_COLD);
2628
2629 val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
6d2a4724 2630 QLC_DEV_CLR_RST_QSCNT(val, adapter->portnum);
af19b491
AKS
2631 QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
2632
2633 qlcnic_api_unlock(adapter);
2634err:
2635 adapter->fw_fail_cnt = 0;
2636 clear_bit(__QLCNIC_START_FW, &adapter->state);
2637 clear_bit(__QLCNIC_RESETTING, &adapter->state);
2638}
2639
f73dfc50 2640/* Grab api lock, before checking state */
af19b491
AKS
2641static int
2642qlcnic_check_drv_state(struct qlcnic_adapter *adapter)
2643{
2644 int act, state;
2645
2646 state = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
31018e06 2647 act = QLCRD32(adapter, QLCNIC_CRB_DRV_ACTIVE);
af19b491
AKS
2648
2649 if (((state & 0x11111111) == (act & 0x11111111)) ||
2650 ((act & 0x11111111) == ((state >> 1) & 0x11111111)))
2651 return 0;
2652 else
2653 return 1;
2654}
2655
96f8118c
SC
2656static int qlcnic_check_idc_ver(struct qlcnic_adapter *adapter)
2657{
2658 u32 val = QLCRD32(adapter, QLCNIC_CRB_DRV_IDC_VER);
2659
2660 if (val != QLCNIC_DRV_IDC_VER) {
2661 dev_warn(&adapter->pdev->dev, "IDC Version mismatch, driver's"
2662 " idc ver = %x; reqd = %x\n", QLCNIC_DRV_IDC_VER, val);
2663 }
2664
2665 return 0;
2666}
2667
af19b491
AKS
2668static int
2669qlcnic_can_start_firmware(struct qlcnic_adapter *adapter)
2670{
2671 u32 val, prev_state;
aa5e18c0 2672 u8 dev_init_timeo = adapter->dev_init_timeo;
6d2a4724 2673 u8 portnum = adapter->portnum;
96f8118c 2674 u8 ret;
af19b491 2675
f73dfc50
AKS
2676 if (test_and_clear_bit(__QLCNIC_START_FW, &adapter->state))
2677 return 1;
2678
af19b491
AKS
2679 if (qlcnic_api_lock(adapter))
2680 return -1;
2681
31018e06 2682 val = QLCRD32(adapter, QLCNIC_CRB_DRV_ACTIVE);
6d2a4724
AKS
2683 if (!(val & (1 << (portnum * 4)))) {
2684 QLC_DEV_SET_REF_CNT(val, portnum);
31018e06 2685 QLCWR32(adapter, QLCNIC_CRB_DRV_ACTIVE, val);
af19b491
AKS
2686 }
2687
2688 prev_state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
65b5b420 2689 QLCDB(adapter, HW, "Device state = %u\n", prev_state);
af19b491
AKS
2690
2691 switch (prev_state) {
2692 case QLCNIC_DEV_COLD:
bbd8c6a4 2693 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_INITIALIZING);
96f8118c 2694 QLCWR32(adapter, QLCNIC_CRB_DRV_IDC_VER, QLCNIC_DRV_IDC_VER);
6df900e9 2695 qlcnic_idc_debug_info(adapter, 0);
af19b491
AKS
2696 qlcnic_api_unlock(adapter);
2697 return 1;
2698
2699 case QLCNIC_DEV_READY:
96f8118c 2700 ret = qlcnic_check_idc_ver(adapter);
af19b491 2701 qlcnic_api_unlock(adapter);
96f8118c 2702 return ret;
af19b491
AKS
2703
2704 case QLCNIC_DEV_NEED_RESET:
2705 val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
6d2a4724 2706 QLC_DEV_SET_RST_RDY(val, portnum);
af19b491
AKS
2707 QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
2708 break;
2709
2710 case QLCNIC_DEV_NEED_QUISCENT:
2711 val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
6d2a4724 2712 QLC_DEV_SET_QSCNT_RDY(val, portnum);
af19b491
AKS
2713 QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
2714 break;
2715
2716 case QLCNIC_DEV_FAILED:
a7fc948f 2717 dev_err(&adapter->pdev->dev, "Device in failed state.\n");
af19b491
AKS
2718 qlcnic_api_unlock(adapter);
2719 return -1;
bbd8c6a4
AKS
2720
2721 case QLCNIC_DEV_INITIALIZING:
2722 case QLCNIC_DEV_QUISCENT:
2723 break;
af19b491
AKS
2724 }
2725
2726 qlcnic_api_unlock(adapter);
aa5e18c0
SC
2727
2728 do {
af19b491 2729 msleep(1000);
a5e463d0
SC
2730 prev_state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
2731
2732 if (prev_state == QLCNIC_DEV_QUISCENT)
2733 continue;
2734 } while ((prev_state != QLCNIC_DEV_READY) && --dev_init_timeo);
af19b491 2735
65b5b420
AKS
2736 if (!dev_init_timeo) {
2737 dev_err(&adapter->pdev->dev,
2738 "Waiting for device to initialize timeout\n");
af19b491 2739 return -1;
65b5b420 2740 }
af19b491
AKS
2741
2742 if (qlcnic_api_lock(adapter))
2743 return -1;
2744
2745 val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
6d2a4724 2746 QLC_DEV_CLR_RST_QSCNT(val, portnum);
af19b491
AKS
2747 QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
2748
96f8118c 2749 ret = qlcnic_check_idc_ver(adapter);
af19b491
AKS
2750 qlcnic_api_unlock(adapter);
2751
96f8118c 2752 return ret;
af19b491
AKS
2753}
2754
2755static void
2756qlcnic_fwinit_work(struct work_struct *work)
2757{
2758 struct qlcnic_adapter *adapter = container_of(work,
2759 struct qlcnic_adapter, fw_work.work);
3c4b23b1 2760 u32 dev_state = 0xf;
af19b491 2761
f73dfc50
AKS
2762 if (qlcnic_api_lock(adapter))
2763 goto err_ret;
af19b491 2764
a5e463d0 2765 dev_state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
b8c17620
AKS
2766 if (dev_state == QLCNIC_DEV_QUISCENT ||
2767 dev_state == QLCNIC_DEV_NEED_QUISCENT) {
a5e463d0
SC
2768 qlcnic_api_unlock(adapter);
2769 qlcnic_schedule_work(adapter, qlcnic_fwinit_work,
2770 FW_POLL_DELAY * 2);
2771 return;
2772 }
2773
9f26f547 2774 if (adapter->op_mode == QLCNIC_NON_PRIV_FUNC) {
3c4b23b1
AKS
2775 qlcnic_api_unlock(adapter);
2776 goto wait_npar;
9f26f547
AC
2777 }
2778
f73dfc50
AKS
2779 if (adapter->fw_wait_cnt++ > adapter->reset_ack_timeo) {
2780 dev_err(&adapter->pdev->dev, "Reset:Failed to get ack %d sec\n",
2781 adapter->reset_ack_timeo);
2782 goto skip_ack_check;
2783 }
2784
2785 if (!qlcnic_check_drv_state(adapter)) {
2786skip_ack_check:
2787 dev_state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
a5e463d0 2788
f73dfc50
AKS
2789 if (dev_state == QLCNIC_DEV_NEED_RESET) {
2790 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE,
2791 QLCNIC_DEV_INITIALIZING);
2792 set_bit(__QLCNIC_START_FW, &adapter->state);
2793 QLCDB(adapter, DRV, "Restarting fw\n");
6df900e9 2794 qlcnic_idc_debug_info(adapter, 0);
af19b491
AKS
2795 }
2796
f73dfc50
AKS
2797 qlcnic_api_unlock(adapter);
2798
9f26f547 2799 if (!adapter->nic_ops->start_firmware(adapter)) {
af19b491 2800 qlcnic_schedule_work(adapter, qlcnic_attach_work, 0);
b18971d1 2801 adapter->fw_wait_cnt = 0;
af19b491
AKS
2802 return;
2803 }
af19b491
AKS
2804 goto err_ret;
2805 }
2806
f73dfc50 2807 qlcnic_api_unlock(adapter);
aa5e18c0 2808
9f26f547 2809wait_npar:
af19b491 2810 dev_state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
f73dfc50 2811 QLCDB(adapter, HW, "Func waiting: Device state=%u\n", dev_state);
65b5b420 2812
af19b491 2813 switch (dev_state) {
3c4b23b1 2814 case QLCNIC_DEV_READY:
9f26f547 2815 if (!adapter->nic_ops->start_firmware(adapter)) {
f73dfc50 2816 qlcnic_schedule_work(adapter, qlcnic_attach_work, 0);
b18971d1 2817 adapter->fw_wait_cnt = 0;
f73dfc50
AKS
2818 return;
2819 }
3c4b23b1
AKS
2820 case QLCNIC_DEV_FAILED:
2821 break;
2822 default:
2823 qlcnic_schedule_work(adapter,
2824 qlcnic_fwinit_work, FW_POLL_DELAY);
2825 return;
af19b491
AKS
2826 }
2827
2828err_ret:
f73dfc50
AKS
2829 dev_err(&adapter->pdev->dev, "Fwinit work failed state=%u "
2830 "fw_wait_cnt=%u\n", dev_state, adapter->fw_wait_cnt);
34ce3626 2831 netif_device_attach(adapter->netdev);
21854f02 2832 qlcnic_clr_all_drv_state(adapter, 0);
af19b491
AKS
2833}
2834
2835static void
2836qlcnic_detach_work(struct work_struct *work)
2837{
2838 struct qlcnic_adapter *adapter = container_of(work,
2839 struct qlcnic_adapter, fw_work.work);
2840 struct net_device *netdev = adapter->netdev;
2841 u32 status;
2842
2843 netif_device_detach(netdev);
2844
b8c17620
AKS
2845 /* Dont grab rtnl lock during Quiscent mode */
2846 if (adapter->dev_state == QLCNIC_DEV_NEED_QUISCENT) {
2847 if (netif_running(netdev))
2848 __qlcnic_down(adapter, netdev);
2849 } else
2850 qlcnic_down(adapter, netdev);
af19b491 2851
af19b491
AKS
2852 status = QLCRD32(adapter, QLCNIC_PEG_HALT_STATUS1);
2853
2854 if (status & QLCNIC_RCODE_FATAL_ERROR)
2855 goto err_ret;
2856
2857 if (adapter->temp == QLCNIC_TEMP_PANIC)
2858 goto err_ret;
2859
ade91f8e
AKS
2860 if (qlcnic_set_drv_state(adapter, adapter->dev_state))
2861 goto err_ret;
af19b491
AKS
2862
2863 adapter->fw_wait_cnt = 0;
2864
2865 qlcnic_schedule_work(adapter, qlcnic_fwinit_work, FW_POLL_DELAY);
2866
2867 return;
2868
2869err_ret:
65b5b420
AKS
2870 dev_err(&adapter->pdev->dev, "detach failed; status=%d temp=%d\n",
2871 status, adapter->temp);
34ce3626 2872 netif_device_attach(netdev);
21854f02 2873 qlcnic_clr_all_drv_state(adapter, 1);
af19b491
AKS
2874}
2875
3c4b23b1
AKS
2876/*Transit NPAR state to NON Operational */
2877static void
2878qlcnic_set_npar_non_operational(struct qlcnic_adapter *adapter)
2879{
2880 u32 state;
2881
2882 state = QLCRD32(adapter, QLCNIC_CRB_DEV_NPAR_STATE);
2883 if (state == QLCNIC_DEV_NPAR_NON_OPER)
2884 return;
2885
2886 if (qlcnic_api_lock(adapter))
2887 return;
2888 QLCWR32(adapter, QLCNIC_CRB_DEV_NPAR_STATE, QLCNIC_DEV_NPAR_NON_OPER);
2889 qlcnic_api_unlock(adapter);
2890}
2891
f73dfc50 2892/*Transit to RESET state from READY state only */
af19b491
AKS
2893static void
2894qlcnic_dev_request_reset(struct qlcnic_adapter *adapter)
2895{
2896 u32 state;
2897
cea8975e 2898 adapter->need_fw_reset = 1;
af19b491
AKS
2899 if (qlcnic_api_lock(adapter))
2900 return;
2901
2902 state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
2903
f73dfc50 2904 if (state == QLCNIC_DEV_READY) {
af19b491 2905 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_NEED_RESET);
65b5b420 2906 QLCDB(adapter, DRV, "NEED_RESET state set\n");
6df900e9 2907 qlcnic_idc_debug_info(adapter, 0);
af19b491
AKS
2908 }
2909
3c4b23b1 2910 QLCWR32(adapter, QLCNIC_CRB_DEV_NPAR_STATE, QLCNIC_DEV_NPAR_NON_OPER);
af19b491
AKS
2911 qlcnic_api_unlock(adapter);
2912}
2913
9f26f547
AC
2914/* Transit to NPAR READY state from NPAR NOT READY state */
2915static void
2916qlcnic_dev_set_npar_ready(struct qlcnic_adapter *adapter)
2917{
9f26f547
AC
2918 if (qlcnic_api_lock(adapter))
2919 return;
2920
3c4b23b1
AKS
2921 QLCWR32(adapter, QLCNIC_CRB_DEV_NPAR_STATE, QLCNIC_DEV_NPAR_OPER);
2922 QLCDB(adapter, DRV, "NPAR operational state set\n");
9f26f547
AC
2923
2924 qlcnic_api_unlock(adapter);
2925}
2926
af19b491
AKS
2927static void
2928qlcnic_schedule_work(struct qlcnic_adapter *adapter,
2929 work_func_t func, int delay)
2930{
451724c8
SC
2931 if (test_bit(__QLCNIC_AER, &adapter->state))
2932 return;
2933
af19b491 2934 INIT_DELAYED_WORK(&adapter->fw_work, func);
f7ec804a
AKS
2935 queue_delayed_work(qlcnic_wq, &adapter->fw_work,
2936 round_jiffies_relative(delay));
af19b491
AKS
2937}
2938
2939static void
2940qlcnic_cancel_fw_work(struct qlcnic_adapter *adapter)
2941{
2942 while (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
2943 msleep(10);
2944
2945 cancel_delayed_work_sync(&adapter->fw_work);
2946}
2947
2948static void
2949qlcnic_attach_work(struct work_struct *work)
2950{
2951 struct qlcnic_adapter *adapter = container_of(work,
2952 struct qlcnic_adapter, fw_work.work);
2953 struct net_device *netdev = adapter->netdev;
b18971d1 2954 u32 npar_state;
af19b491 2955
b18971d1
AKS
2956 if (adapter->op_mode != QLCNIC_MGMT_FUNC) {
2957 npar_state = QLCRD32(adapter, QLCNIC_CRB_DEV_NPAR_STATE);
2958 if (adapter->fw_wait_cnt++ > QLCNIC_DEV_NPAR_OPER_TIMEO)
2959 qlcnic_clr_all_drv_state(adapter, 0);
2960 else if (npar_state != QLCNIC_DEV_NPAR_OPER)
2961 qlcnic_schedule_work(adapter, qlcnic_attach_work,
2962 FW_POLL_DELAY);
2963 else
2964 goto attach;
2965 QLCDB(adapter, DRV, "Waiting for NPAR state to operational\n");
2966 return;
2967 }
2968attach:
af19b491 2969 if (netif_running(netdev)) {
52486a3a 2970 if (qlcnic_up(adapter, netdev))
af19b491 2971 goto done;
af19b491 2972
aec1e845 2973 qlcnic_restore_indev_addr(netdev, NETDEV_UP);
af19b491
AKS
2974 }
2975
af19b491 2976done:
34ce3626 2977 netif_device_attach(netdev);
af19b491
AKS
2978 adapter->fw_fail_cnt = 0;
2979 clear_bit(__QLCNIC_RESETTING, &adapter->state);
1b95a839
AKS
2980
2981 if (!qlcnic_clr_drv_state(adapter))
2982 qlcnic_schedule_work(adapter, qlcnic_fw_poll_work,
2983 FW_POLL_DELAY);
af19b491
AKS
2984}
2985
2986static int
2987qlcnic_check_health(struct qlcnic_adapter *adapter)
2988{
4e70812b 2989 u32 state = 0, heartbeat;
af19b491
AKS
2990 struct net_device *netdev = adapter->netdev;
2991
2992 if (qlcnic_check_temp(adapter))
2993 goto detach;
2994
2372a5f1 2995 if (adapter->need_fw_reset)
af19b491 2996 qlcnic_dev_request_reset(adapter);
af19b491
AKS
2997
2998 state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
b8c17620 2999 if (state == QLCNIC_DEV_NEED_RESET) {
3c4b23b1 3000 qlcnic_set_npar_non_operational(adapter);
af19b491 3001 adapter->need_fw_reset = 1;
b8c17620
AKS
3002 } else if (state == QLCNIC_DEV_NEED_QUISCENT)
3003 goto detach;
af19b491 3004
4e70812b
SC
3005 heartbeat = QLCRD32(adapter, QLCNIC_PEG_ALIVE_COUNTER);
3006 if (heartbeat != adapter->heartbeat) {
3007 adapter->heartbeat = heartbeat;
af19b491
AKS
3008 adapter->fw_fail_cnt = 0;
3009 if (adapter->need_fw_reset)
3010 goto detach;
68bf1c68 3011
9ce13ca8 3012 if (adapter->reset_context && auto_fw_reset) {
68bf1c68
AKS
3013 qlcnic_reset_hw_context(adapter);
3014 adapter->netdev->trans_start = jiffies;
3015 }
3016
af19b491
AKS
3017 return 0;
3018 }
3019
3020 if (++adapter->fw_fail_cnt < FW_FAIL_THRESH)
3021 return 0;
3022
3023 qlcnic_dev_request_reset(adapter);
3024
9ce13ca8 3025 if (auto_fw_reset)
0df170b6 3026 clear_bit(__QLCNIC_FW_ATTACHED, &adapter->state);
af19b491
AKS
3027
3028 dev_info(&netdev->dev, "firmware hang detected\n");
3029
3030detach:
3031 adapter->dev_state = (state == QLCNIC_DEV_NEED_QUISCENT) ? state :
3032 QLCNIC_DEV_NEED_RESET;
3033
9ce13ca8 3034 if (auto_fw_reset &&
65b5b420
AKS
3035 !test_and_set_bit(__QLCNIC_RESETTING, &adapter->state)) {
3036
af19b491 3037 qlcnic_schedule_work(adapter, qlcnic_detach_work, 0);
65b5b420
AKS
3038 QLCDB(adapter, DRV, "fw recovery scheduled.\n");
3039 }
af19b491
AKS
3040
3041 return 1;
3042}
3043
3044static void
3045qlcnic_fw_poll_work(struct work_struct *work)
3046{
3047 struct qlcnic_adapter *adapter = container_of(work,
3048 struct qlcnic_adapter, fw_work.work);
3049
3050 if (test_bit(__QLCNIC_RESETTING, &adapter->state))
3051 goto reschedule;
3052
3053
3054 if (qlcnic_check_health(adapter))
3055 return;
3056
b5e5492c
AKS
3057 if (adapter->fhash.fnum)
3058 qlcnic_prune_lb_filters(adapter);
3059
af19b491
AKS
3060reschedule:
3061 qlcnic_schedule_work(adapter, qlcnic_fw_poll_work, FW_POLL_DELAY);
3062}
3063
451724c8
SC
3064static int qlcnic_is_first_func(struct pci_dev *pdev)
3065{
3066 struct pci_dev *oth_pdev;
3067 int val = pdev->devfn;
3068
3069 while (val-- > 0) {
3070 oth_pdev = pci_get_domain_bus_and_slot(pci_domain_nr
3071 (pdev->bus), pdev->bus->number,
3072 PCI_DEVFN(PCI_SLOT(pdev->devfn), val));
bfc978fa
AKS
3073 if (!oth_pdev)
3074 continue;
451724c8 3075
bfc978fa
AKS
3076 if (oth_pdev->current_state != PCI_D3cold) {
3077 pci_dev_put(oth_pdev);
451724c8 3078 return 0;
bfc978fa
AKS
3079 }
3080 pci_dev_put(oth_pdev);
451724c8
SC
3081 }
3082 return 1;
3083}
3084
3085static int qlcnic_attach_func(struct pci_dev *pdev)
3086{
3087 int err, first_func;
3088 struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
3089 struct net_device *netdev = adapter->netdev;
3090
3091 pdev->error_state = pci_channel_io_normal;
3092
3093 err = pci_enable_device(pdev);
3094 if (err)
3095 return err;
3096
3097 pci_set_power_state(pdev, PCI_D0);
3098 pci_set_master(pdev);
3099 pci_restore_state(pdev);
3100
3101 first_func = qlcnic_is_first_func(pdev);
3102
3103 if (qlcnic_api_lock(adapter))
3104 return -EINVAL;
3105
933fce12 3106 if (adapter->op_mode != QLCNIC_NON_PRIV_FUNC && first_func) {
451724c8
SC
3107 adapter->need_fw_reset = 1;
3108 set_bit(__QLCNIC_START_FW, &adapter->state);
3109 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_INITIALIZING);
3110 QLCDB(adapter, DRV, "Restarting fw\n");
3111 }
3112 qlcnic_api_unlock(adapter);
3113
3114 err = adapter->nic_ops->start_firmware(adapter);
3115 if (err)
3116 return err;
3117
3118 qlcnic_clr_drv_state(adapter);
3119 qlcnic_setup_intr(adapter);
3120
3121 if (netif_running(netdev)) {
3122 err = qlcnic_attach(adapter);
3123 if (err) {
21854f02 3124 qlcnic_clr_all_drv_state(adapter, 1);
451724c8
SC
3125 clear_bit(__QLCNIC_AER, &adapter->state);
3126 netif_device_attach(netdev);
3127 return err;
3128 }
3129
3130 err = qlcnic_up(adapter, netdev);
3131 if (err)
3132 goto done;
3133
aec1e845 3134 qlcnic_restore_indev_addr(netdev, NETDEV_UP);
451724c8
SC
3135 }
3136 done:
3137 netif_device_attach(netdev);
3138 return err;
3139}
3140
3141static pci_ers_result_t qlcnic_io_error_detected(struct pci_dev *pdev,
3142 pci_channel_state_t state)
3143{
3144 struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
3145 struct net_device *netdev = adapter->netdev;
3146
3147 if (state == pci_channel_io_perm_failure)
3148 return PCI_ERS_RESULT_DISCONNECT;
3149
3150 if (state == pci_channel_io_normal)
3151 return PCI_ERS_RESULT_RECOVERED;
3152
3153 set_bit(__QLCNIC_AER, &adapter->state);
3154 netif_device_detach(netdev);
3155
3156 cancel_delayed_work_sync(&adapter->fw_work);
3157
3158 if (netif_running(netdev))
3159 qlcnic_down(adapter, netdev);
3160
3161 qlcnic_detach(adapter);
3162 qlcnic_teardown_intr(adapter);
3163
3164 clear_bit(__QLCNIC_RESETTING, &adapter->state);
3165
3166 pci_save_state(pdev);
3167 pci_disable_device(pdev);
3168
3169 return PCI_ERS_RESULT_NEED_RESET;
3170}
3171
3172static pci_ers_result_t qlcnic_io_slot_reset(struct pci_dev *pdev)
3173{
3174 return qlcnic_attach_func(pdev) ? PCI_ERS_RESULT_DISCONNECT :
3175 PCI_ERS_RESULT_RECOVERED;
3176}
3177
3178static void qlcnic_io_resume(struct pci_dev *pdev)
3179{
3180 struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
3181
3182 pci_cleanup_aer_uncorrect_error_status(pdev);
3183
3184 if (QLCRD32(adapter, QLCNIC_CRB_DEV_STATE) == QLCNIC_DEV_READY &&
3185 test_and_clear_bit(__QLCNIC_AER, &adapter->state))
3186 qlcnic_schedule_work(adapter, qlcnic_fw_poll_work,
3187 FW_POLL_DELAY);
3188}
3189
87eb743b
AC
3190static int
3191qlcnicvf_start_firmware(struct qlcnic_adapter *adapter)
3192{
3193 int err;
3194
3195 err = qlcnic_can_start_firmware(adapter);
3196 if (err)
3197 return err;
3198
78f84e1a
AKS
3199 err = qlcnic_check_npar_opertional(adapter);
3200 if (err)
3201 return err;
3c4b23b1 3202
174240a8
RB
3203 err = qlcnic_initialize_nic(adapter);
3204 if (err)
3205 return err;
3206
87eb743b
AC
3207 qlcnic_check_options(adapter);
3208
7373373d
RB
3209 err = qlcnic_set_eswitch_port_config(adapter);
3210 if (err)
3211 return err;
3212
87eb743b
AC
3213 adapter->need_fw_reset = 0;
3214
3215 return err;
3216}
3217
3218static int
3219qlcnicvf_config_bridged_mode(struct qlcnic_adapter *adapter, u32 enable)
3220{
3221 return -EOPNOTSUPP;
3222}
3223
3224static int
3225qlcnicvf_config_led(struct qlcnic_adapter *adapter, u32 state, u32 rate)
3226{
3227 return -EOPNOTSUPP;
3228}
3229
af19b491
AKS
3230static ssize_t
3231qlcnic_store_bridged_mode(struct device *dev,
3232 struct device_attribute *attr, const char *buf, size_t len)
3233{
3234 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3235 unsigned long new;
3236 int ret = -EINVAL;
3237
3238 if (!(adapter->capabilities & QLCNIC_FW_CAPABILITY_BDG))
3239 goto err_out;
3240
8a15ad1f 3241 if (!test_bit(__QLCNIC_DEV_UP, &adapter->state))
af19b491
AKS
3242 goto err_out;
3243
3244 if (strict_strtoul(buf, 2, &new))
3245 goto err_out;
3246
2e9d722d 3247 if (!adapter->nic_ops->config_bridged_mode(adapter, !!new))
af19b491
AKS
3248 ret = len;
3249
3250err_out:
3251 return ret;
3252}
3253
3254static ssize_t
3255qlcnic_show_bridged_mode(struct device *dev,
3256 struct device_attribute *attr, char *buf)
3257{
3258 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3259 int bridged_mode = 0;
3260
3261 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_BDG)
3262 bridged_mode = !!(adapter->flags & QLCNIC_BRIDGE_ENABLED);
3263
3264 return sprintf(buf, "%d\n", bridged_mode);
3265}
3266
3267static struct device_attribute dev_attr_bridged_mode = {
3268 .attr = {.name = "bridged_mode", .mode = (S_IRUGO | S_IWUSR)},
3269 .show = qlcnic_show_bridged_mode,
3270 .store = qlcnic_store_bridged_mode,
3271};
3272
3273static ssize_t
3274qlcnic_store_diag_mode(struct device *dev,
3275 struct device_attribute *attr, const char *buf, size_t len)
3276{
3277 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3278 unsigned long new;
3279
3280 if (strict_strtoul(buf, 2, &new))
3281 return -EINVAL;
3282
3283 if (!!new != !!(adapter->flags & QLCNIC_DIAG_ENABLED))
3284 adapter->flags ^= QLCNIC_DIAG_ENABLED;
3285
3286 return len;
3287}
3288
3289static ssize_t
3290qlcnic_show_diag_mode(struct device *dev,
3291 struct device_attribute *attr, char *buf)
3292{
3293 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3294
3295 return sprintf(buf, "%d\n",
3296 !!(adapter->flags & QLCNIC_DIAG_ENABLED));
3297}
3298
3299static struct device_attribute dev_attr_diag_mode = {
3300 .attr = {.name = "diag_mode", .mode = (S_IRUGO | S_IWUSR)},
3301 .show = qlcnic_show_diag_mode,
3302 .store = qlcnic_store_diag_mode,
3303};
3304
3305static int
3306qlcnic_sysfs_validate_crb(struct qlcnic_adapter *adapter,
3307 loff_t offset, size_t size)
3308{
897e8c7c
DP
3309 size_t crb_size = 4;
3310
af19b491
AKS
3311 if (!(adapter->flags & QLCNIC_DIAG_ENABLED))
3312 return -EIO;
3313
897e8c7c
DP
3314 if (offset < QLCNIC_PCI_CRBSPACE) {
3315 if (ADDR_IN_RANGE(offset, QLCNIC_PCI_CAMQM,
3316 QLCNIC_PCI_CAMQM_END))
3317 crb_size = 8;
3318 else
3319 return -EINVAL;
3320 }
af19b491 3321
897e8c7c
DP
3322 if ((size != crb_size) || (offset & (crb_size-1)))
3323 return -EINVAL;
af19b491
AKS
3324
3325 return 0;
3326}
3327
3328static ssize_t
2c3c8bea
CW
3329qlcnic_sysfs_read_crb(struct file *filp, struct kobject *kobj,
3330 struct bin_attribute *attr,
af19b491
AKS
3331 char *buf, loff_t offset, size_t size)
3332{
3333 struct device *dev = container_of(kobj, struct device, kobj);
3334 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3335 u32 data;
897e8c7c 3336 u64 qmdata;
af19b491
AKS
3337 int ret;
3338
3339 ret = qlcnic_sysfs_validate_crb(adapter, offset, size);
3340 if (ret != 0)
3341 return ret;
3342
897e8c7c
DP
3343 if (ADDR_IN_RANGE(offset, QLCNIC_PCI_CAMQM, QLCNIC_PCI_CAMQM_END)) {
3344 qlcnic_pci_camqm_read_2M(adapter, offset, &qmdata);
3345 memcpy(buf, &qmdata, size);
3346 } else {
3347 data = QLCRD32(adapter, offset);
3348 memcpy(buf, &data, size);
3349 }
af19b491
AKS
3350 return size;
3351}
3352
3353static ssize_t
2c3c8bea
CW
3354qlcnic_sysfs_write_crb(struct file *filp, struct kobject *kobj,
3355 struct bin_attribute *attr,
af19b491
AKS
3356 char *buf, loff_t offset, size_t size)
3357{
3358 struct device *dev = container_of(kobj, struct device, kobj);
3359 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3360 u32 data;
897e8c7c 3361 u64 qmdata;
af19b491
AKS
3362 int ret;
3363
3364 ret = qlcnic_sysfs_validate_crb(adapter, offset, size);
3365 if (ret != 0)
3366 return ret;
3367
897e8c7c
DP
3368 if (ADDR_IN_RANGE(offset, QLCNIC_PCI_CAMQM, QLCNIC_PCI_CAMQM_END)) {
3369 memcpy(&qmdata, buf, size);
3370 qlcnic_pci_camqm_write_2M(adapter, offset, qmdata);
3371 } else {
3372 memcpy(&data, buf, size);
3373 QLCWR32(adapter, offset, data);
3374 }
af19b491
AKS
3375 return size;
3376}
3377
3378static int
3379qlcnic_sysfs_validate_mem(struct qlcnic_adapter *adapter,
3380 loff_t offset, size_t size)
3381{
3382 if (!(adapter->flags & QLCNIC_DIAG_ENABLED))
3383 return -EIO;
3384
3385 if ((size != 8) || (offset & 0x7))
3386 return -EIO;
3387
3388 return 0;
3389}
3390
3391static ssize_t
2c3c8bea
CW
3392qlcnic_sysfs_read_mem(struct file *filp, struct kobject *kobj,
3393 struct bin_attribute *attr,
af19b491
AKS
3394 char *buf, loff_t offset, size_t size)
3395{
3396 struct device *dev = container_of(kobj, struct device, kobj);
3397 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3398 u64 data;
3399 int ret;
3400
3401 ret = qlcnic_sysfs_validate_mem(adapter, offset, size);
3402 if (ret != 0)
3403 return ret;
3404
3405 if (qlcnic_pci_mem_read_2M(adapter, offset, &data))
3406 return -EIO;
3407
3408 memcpy(buf, &data, size);
3409
3410 return size;
3411}
3412
3413static ssize_t
2c3c8bea
CW
3414qlcnic_sysfs_write_mem(struct file *filp, struct kobject *kobj,
3415 struct bin_attribute *attr,
af19b491
AKS
3416 char *buf, loff_t offset, size_t size)
3417{
3418 struct device *dev = container_of(kobj, struct device, kobj);
3419 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3420 u64 data;
3421 int ret;
3422
3423 ret = qlcnic_sysfs_validate_mem(adapter, offset, size);
3424 if (ret != 0)
3425 return ret;
3426
3427 memcpy(&data, buf, size);
3428
3429 if (qlcnic_pci_mem_write_2M(adapter, offset, data))
3430 return -EIO;
3431
3432 return size;
3433}
3434
3435
3436static struct bin_attribute bin_attr_crb = {
3437 .attr = {.name = "crb", .mode = (S_IRUGO | S_IWUSR)},
3438 .size = 0,
3439 .read = qlcnic_sysfs_read_crb,
3440 .write = qlcnic_sysfs_write_crb,
3441};
3442
3443static struct bin_attribute bin_attr_mem = {
3444 .attr = {.name = "mem", .mode = (S_IRUGO | S_IWUSR)},
3445 .size = 0,
3446 .read = qlcnic_sysfs_read_mem,
3447 .write = qlcnic_sysfs_write_mem,
3448};
3449
cea8975e 3450static int
346fe763
RB
3451validate_pm_config(struct qlcnic_adapter *adapter,
3452 struct qlcnic_pm_func_cfg *pm_cfg, int count)
3453{
3454
3455 u8 src_pci_func, s_esw_id, d_esw_id;
3456 u8 dest_pci_func;
3457 int i;
3458
3459 for (i = 0; i < count; i++) {
3460 src_pci_func = pm_cfg[i].pci_func;
3461 dest_pci_func = pm_cfg[i].dest_npar;
3462 if (src_pci_func >= QLCNIC_MAX_PCI_FUNC
3463 || dest_pci_func >= QLCNIC_MAX_PCI_FUNC)
3464 return QL_STATUS_INVALID_PARAM;
3465
3466 if (adapter->npars[src_pci_func].type != QLCNIC_TYPE_NIC)
3467 return QL_STATUS_INVALID_PARAM;
3468
3469 if (adapter->npars[dest_pci_func].type != QLCNIC_TYPE_NIC)
3470 return QL_STATUS_INVALID_PARAM;
3471
346fe763
RB
3472 s_esw_id = adapter->npars[src_pci_func].phy_port;
3473 d_esw_id = adapter->npars[dest_pci_func].phy_port;
3474
3475 if (s_esw_id != d_esw_id)
3476 return QL_STATUS_INVALID_PARAM;
3477
3478 }
3479 return 0;
3480
3481}
3482
3483static ssize_t
3484qlcnic_sysfs_write_pm_config(struct file *filp, struct kobject *kobj,
3485 struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
3486{
3487 struct device *dev = container_of(kobj, struct device, kobj);
3488 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3489 struct qlcnic_pm_func_cfg *pm_cfg;
3490 u32 id, action, pci_func;
3491 int count, rem, i, ret;
3492
3493 count = size / sizeof(struct qlcnic_pm_func_cfg);
3494 rem = size % sizeof(struct qlcnic_pm_func_cfg);
3495 if (rem)
3496 return QL_STATUS_INVALID_PARAM;
3497
3498 pm_cfg = (struct qlcnic_pm_func_cfg *) buf;
3499
3500 ret = validate_pm_config(adapter, pm_cfg, count);
3501 if (ret)
3502 return ret;
3503 for (i = 0; i < count; i++) {
3504 pci_func = pm_cfg[i].pci_func;
4e8acb01 3505 action = !!pm_cfg[i].action;
346fe763
RB
3506 id = adapter->npars[pci_func].phy_port;
3507 ret = qlcnic_config_port_mirroring(adapter, id,
3508 action, pci_func);
3509 if (ret)
3510 return ret;
3511 }
3512
3513 for (i = 0; i < count; i++) {
3514 pci_func = pm_cfg[i].pci_func;
3515 id = adapter->npars[pci_func].phy_port;
4e8acb01 3516 adapter->npars[pci_func].enable_pm = !!pm_cfg[i].action;
346fe763
RB
3517 adapter->npars[pci_func].dest_npar = id;
3518 }
3519 return size;
3520}
3521
3522static ssize_t
3523qlcnic_sysfs_read_pm_config(struct file *filp, struct kobject *kobj,
3524 struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
3525{
3526 struct device *dev = container_of(kobj, struct device, kobj);
3527 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3528 struct qlcnic_pm_func_cfg pm_cfg[QLCNIC_MAX_PCI_FUNC];
3529 int i;
3530
3531 if (size != sizeof(pm_cfg))
3532 return QL_STATUS_INVALID_PARAM;
3533
3534 for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
3535 if (adapter->npars[i].type != QLCNIC_TYPE_NIC)
3536 continue;
3537 pm_cfg[i].action = adapter->npars[i].enable_pm;
3538 pm_cfg[i].dest_npar = 0;
3539 pm_cfg[i].pci_func = i;
3540 }
3541 memcpy(buf, &pm_cfg, size);
3542
3543 return size;
3544}
3545
cea8975e 3546static int
346fe763 3547validate_esw_config(struct qlcnic_adapter *adapter,
4e8acb01 3548 struct qlcnic_esw_func_cfg *esw_cfg, int count)
346fe763 3549{
7613c87b 3550 u32 op_mode;
346fe763
RB
3551 u8 pci_func;
3552 int i;
7613c87b 3553
b1fc6d3c 3554 op_mode = readl(adapter->ahw->pci_base0 + QLCNIC_DRV_OP_MODE);
7613c87b 3555
346fe763
RB
3556 for (i = 0; i < count; i++) {
3557 pci_func = esw_cfg[i].pci_func;
3558 if (pci_func >= QLCNIC_MAX_PCI_FUNC)
3559 return QL_STATUS_INVALID_PARAM;
3560
4e8acb01
RB
3561 if (adapter->op_mode == QLCNIC_MGMT_FUNC)
3562 if (adapter->npars[pci_func].type != QLCNIC_TYPE_NIC)
3563 return QL_STATUS_INVALID_PARAM;
346fe763 3564
4e8acb01
RB
3565 switch (esw_cfg[i].op_mode) {
3566 case QLCNIC_PORT_DEFAULTS:
7613c87b 3567 if (QLC_DEV_GET_DRV(op_mode, pci_func) !=
7373373d 3568 QLCNIC_NON_PRIV_FUNC) {
091056b2
AKS
3569 if (esw_cfg[i].mac_anti_spoof != 0)
3570 return QL_STATUS_INVALID_PARAM;
3571 if (esw_cfg[i].mac_override != 1)
3572 return QL_STATUS_INVALID_PARAM;
3573 if (esw_cfg[i].promisc_mode != 1)
3574 return QL_STATUS_INVALID_PARAM;
7373373d 3575 }
4e8acb01
RB
3576 break;
3577 case QLCNIC_ADD_VLAN:
346fe763
RB
3578 if (!IS_VALID_VLAN(esw_cfg[i].vlan_id))
3579 return QL_STATUS_INVALID_PARAM;
4e8acb01
RB
3580 if (!esw_cfg[i].op_type)
3581 return QL_STATUS_INVALID_PARAM;
3582 break;
3583 case QLCNIC_DEL_VLAN:
4e8acb01
RB
3584 if (!esw_cfg[i].op_type)
3585 return QL_STATUS_INVALID_PARAM;
3586 break;
3587 default:
346fe763 3588 return QL_STATUS_INVALID_PARAM;
4e8acb01 3589 }
346fe763 3590 }
346fe763
RB
3591 return 0;
3592}
3593
3594static ssize_t
3595qlcnic_sysfs_write_esw_config(struct file *file, struct kobject *kobj,
3596 struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
3597{
3598 struct device *dev = container_of(kobj, struct device, kobj);
3599 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3600 struct qlcnic_esw_func_cfg *esw_cfg;
4e8acb01 3601 struct qlcnic_npar_info *npar;
346fe763 3602 int count, rem, i, ret;
0325d69b 3603 u8 pci_func, op_mode = 0;
346fe763
RB
3604
3605 count = size / sizeof(struct qlcnic_esw_func_cfg);
3606 rem = size % sizeof(struct qlcnic_esw_func_cfg);
3607 if (rem)
3608 return QL_STATUS_INVALID_PARAM;
3609
3610 esw_cfg = (struct qlcnic_esw_func_cfg *) buf;
3611 ret = validate_esw_config(adapter, esw_cfg, count);
3612 if (ret)
3613 return ret;
3614
3615 for (i = 0; i < count; i++) {
0325d69b
RB
3616 if (adapter->op_mode == QLCNIC_MGMT_FUNC)
3617 if (qlcnic_config_switch_port(adapter, &esw_cfg[i]))
3618 return QL_STATUS_INVALID_PARAM;
e9a47700 3619
b1fc6d3c 3620 if (adapter->ahw->pci_func != esw_cfg[i].pci_func)
e9a47700
RB
3621 continue;
3622
3623 op_mode = esw_cfg[i].op_mode;
3624 qlcnic_get_eswitch_port_config(adapter, &esw_cfg[i]);
3625 esw_cfg[i].op_mode = op_mode;
b1fc6d3c 3626 esw_cfg[i].pci_func = adapter->ahw->pci_func;
e9a47700
RB
3627
3628 switch (esw_cfg[i].op_mode) {
3629 case QLCNIC_PORT_DEFAULTS:
3630 qlcnic_set_eswitch_port_features(adapter, &esw_cfg[i]);
3631 break;
8cf61f89
AKS
3632 case QLCNIC_ADD_VLAN:
3633 qlcnic_set_vlan_config(adapter, &esw_cfg[i]);
3634 break;
3635 case QLCNIC_DEL_VLAN:
3636 esw_cfg[i].vlan_id = 0;
3637 qlcnic_set_vlan_config(adapter, &esw_cfg[i]);
3638 break;
0325d69b 3639 }
346fe763
RB
3640 }
3641
0325d69b
RB
3642 if (adapter->op_mode != QLCNIC_MGMT_FUNC)
3643 goto out;
e9a47700 3644
346fe763
RB
3645 for (i = 0; i < count; i++) {
3646 pci_func = esw_cfg[i].pci_func;
4e8acb01
RB
3647 npar = &adapter->npars[pci_func];
3648 switch (esw_cfg[i].op_mode) {
3649 case QLCNIC_PORT_DEFAULTS:
3650 npar->promisc_mode = esw_cfg[i].promisc_mode;
7373373d 3651 npar->mac_override = esw_cfg[i].mac_override;
4e8acb01
RB
3652 npar->offload_flags = esw_cfg[i].offload_flags;
3653 npar->mac_anti_spoof = esw_cfg[i].mac_anti_spoof;
3654 npar->discard_tagged = esw_cfg[i].discard_tagged;
3655 break;
3656 case QLCNIC_ADD_VLAN:
3657 npar->pvid = esw_cfg[i].vlan_id;
3658 break;
3659 case QLCNIC_DEL_VLAN:
3660 npar->pvid = 0;
3661 break;
3662 }
346fe763 3663 }
0325d69b 3664out:
346fe763
RB
3665 return size;
3666}
3667
3668static ssize_t
3669qlcnic_sysfs_read_esw_config(struct file *file, struct kobject *kobj,
3670 struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
3671{
3672 struct device *dev = container_of(kobj, struct device, kobj);
3673 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3674 struct qlcnic_esw_func_cfg esw_cfg[QLCNIC_MAX_PCI_FUNC];
4e8acb01 3675 u8 i;
346fe763
RB
3676
3677 if (size != sizeof(esw_cfg))
3678 return QL_STATUS_INVALID_PARAM;
3679
3680 for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
3681 if (adapter->npars[i].type != QLCNIC_TYPE_NIC)
3682 continue;
4e8acb01
RB
3683 esw_cfg[i].pci_func = i;
3684 if (qlcnic_get_eswitch_port_config(adapter, &esw_cfg[i]))
3685 return QL_STATUS_INVALID_PARAM;
346fe763
RB
3686 }
3687 memcpy(buf, &esw_cfg, size);
3688
3689 return size;
3690}
3691
cea8975e 3692static int
346fe763
RB
3693validate_npar_config(struct qlcnic_adapter *adapter,
3694 struct qlcnic_npar_func_cfg *np_cfg, int count)
3695{
3696 u8 pci_func, i;
3697
3698 for (i = 0; i < count; i++) {
3699 pci_func = np_cfg[i].pci_func;
3700 if (pci_func >= QLCNIC_MAX_PCI_FUNC)
3701 return QL_STATUS_INVALID_PARAM;
3702
3703 if (adapter->npars[pci_func].type != QLCNIC_TYPE_NIC)
3704 return QL_STATUS_INVALID_PARAM;
3705
d12b0d9a
RB
3706 if (!IS_VALID_BW(np_cfg[i].min_bw) ||
3707 !IS_VALID_BW(np_cfg[i].max_bw))
346fe763
RB
3708 return QL_STATUS_INVALID_PARAM;
3709 }
3710 return 0;
3711}
3712
3713static ssize_t
3714qlcnic_sysfs_write_npar_config(struct file *file, struct kobject *kobj,
3715 struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
3716{
3717 struct device *dev = container_of(kobj, struct device, kobj);
3718 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3719 struct qlcnic_info nic_info;
3720 struct qlcnic_npar_func_cfg *np_cfg;
3721 int i, count, rem, ret;
3722 u8 pci_func;
3723
3724 count = size / sizeof(struct qlcnic_npar_func_cfg);
3725 rem = size % sizeof(struct qlcnic_npar_func_cfg);
3726 if (rem)
3727 return QL_STATUS_INVALID_PARAM;
3728
3729 np_cfg = (struct qlcnic_npar_func_cfg *) buf;
3730 ret = validate_npar_config(adapter, np_cfg, count);
3731 if (ret)
3732 return ret;
3733
3734 for (i = 0; i < count ; i++) {
3735 pci_func = np_cfg[i].pci_func;
3736 ret = qlcnic_get_nic_info(adapter, &nic_info, pci_func);
3737 if (ret)
3738 return ret;
3739 nic_info.pci_func = pci_func;
3740 nic_info.min_tx_bw = np_cfg[i].min_bw;
3741 nic_info.max_tx_bw = np_cfg[i].max_bw;
3742 ret = qlcnic_set_nic_info(adapter, &nic_info);
3743 if (ret)
3744 return ret;
cea8975e
AC
3745 adapter->npars[i].min_bw = nic_info.min_tx_bw;
3746 adapter->npars[i].max_bw = nic_info.max_tx_bw;
346fe763
RB
3747 }
3748
3749 return size;
3750
3751}
3752static ssize_t
3753qlcnic_sysfs_read_npar_config(struct file *file, struct kobject *kobj,
3754 struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
3755{
3756 struct device *dev = container_of(kobj, struct device, kobj);
3757 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3758 struct qlcnic_info nic_info;
3759 struct qlcnic_npar_func_cfg np_cfg[QLCNIC_MAX_PCI_FUNC];
3760 int i, ret;
3761
3762 if (size != sizeof(np_cfg))
3763 return QL_STATUS_INVALID_PARAM;
3764
3765 for (i = 0; i < QLCNIC_MAX_PCI_FUNC ; i++) {
3766 if (adapter->npars[i].type != QLCNIC_TYPE_NIC)
3767 continue;
3768 ret = qlcnic_get_nic_info(adapter, &nic_info, i);
3769 if (ret)
3770 return ret;
3771
3772 np_cfg[i].pci_func = i;
a1c0c459 3773 np_cfg[i].op_mode = (u8)nic_info.op_mode;
346fe763
RB
3774 np_cfg[i].port_num = nic_info.phys_port;
3775 np_cfg[i].fw_capab = nic_info.capabilities;
3776 np_cfg[i].min_bw = nic_info.min_tx_bw ;
3777 np_cfg[i].max_bw = nic_info.max_tx_bw;
3778 np_cfg[i].max_tx_queues = nic_info.max_tx_ques;
3779 np_cfg[i].max_rx_queues = nic_info.max_rx_ques;
3780 }
3781 memcpy(buf, &np_cfg, size);
3782 return size;
3783}
3784
b6021212
AKS
3785static ssize_t
3786qlcnic_sysfs_get_port_stats(struct file *file, struct kobject *kobj,
3787 struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
3788{
3789 struct device *dev = container_of(kobj, struct device, kobj);
3790 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3791 struct qlcnic_esw_statistics port_stats;
3792 int ret;
3793
3794 if (size != sizeof(struct qlcnic_esw_statistics))
3795 return QL_STATUS_INVALID_PARAM;
3796
3797 if (offset >= QLCNIC_MAX_PCI_FUNC)
3798 return QL_STATUS_INVALID_PARAM;
3799
3800 memset(&port_stats, 0, size);
3801 ret = qlcnic_get_port_stats(adapter, offset, QLCNIC_QUERY_RX_COUNTER,
3802 &port_stats.rx);
3803 if (ret)
3804 return ret;
3805
3806 ret = qlcnic_get_port_stats(adapter, offset, QLCNIC_QUERY_TX_COUNTER,
3807 &port_stats.tx);
3808 if (ret)
3809 return ret;
3810
3811 memcpy(buf, &port_stats, size);
3812 return size;
3813}
3814
3815static ssize_t
3816qlcnic_sysfs_get_esw_stats(struct file *file, struct kobject *kobj,
3817 struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
3818{
3819 struct device *dev = container_of(kobj, struct device, kobj);
3820 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3821 struct qlcnic_esw_statistics esw_stats;
3822 int ret;
3823
3824 if (size != sizeof(struct qlcnic_esw_statistics))
3825 return QL_STATUS_INVALID_PARAM;
3826
3827 if (offset >= QLCNIC_NIU_MAX_XG_PORTS)
3828 return QL_STATUS_INVALID_PARAM;
3829
3830 memset(&esw_stats, 0, size);
3831 ret = qlcnic_get_eswitch_stats(adapter, offset, QLCNIC_QUERY_RX_COUNTER,
3832 &esw_stats.rx);
3833 if (ret)
3834 return ret;
3835
3836 ret = qlcnic_get_eswitch_stats(adapter, offset, QLCNIC_QUERY_TX_COUNTER,
3837 &esw_stats.tx);
3838 if (ret)
3839 return ret;
3840
3841 memcpy(buf, &esw_stats, size);
3842 return size;
3843}
3844
3845static ssize_t
3846qlcnic_sysfs_clear_esw_stats(struct file *file, struct kobject *kobj,
3847 struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
3848{
3849 struct device *dev = container_of(kobj, struct device, kobj);
3850 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3851 int ret;
3852
3853 if (offset >= QLCNIC_NIU_MAX_XG_PORTS)
3854 return QL_STATUS_INVALID_PARAM;
3855
3856 ret = qlcnic_clear_esw_stats(adapter, QLCNIC_STATS_ESWITCH, offset,
3857 QLCNIC_QUERY_RX_COUNTER);
3858 if (ret)
3859 return ret;
3860
3861 ret = qlcnic_clear_esw_stats(adapter, QLCNIC_STATS_ESWITCH, offset,
3862 QLCNIC_QUERY_TX_COUNTER);
3863 if (ret)
3864 return ret;
3865
3866 return size;
3867}
3868
3869static ssize_t
3870qlcnic_sysfs_clear_port_stats(struct file *file, struct kobject *kobj,
3871 struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
3872{
3873
3874 struct device *dev = container_of(kobj, struct device, kobj);
3875 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3876 int ret;
3877
3878 if (offset >= QLCNIC_MAX_PCI_FUNC)
3879 return QL_STATUS_INVALID_PARAM;
3880
3881 ret = qlcnic_clear_esw_stats(adapter, QLCNIC_STATS_PORT, offset,
3882 QLCNIC_QUERY_RX_COUNTER);
3883 if (ret)
3884 return ret;
3885
3886 ret = qlcnic_clear_esw_stats(adapter, QLCNIC_STATS_PORT, offset,
3887 QLCNIC_QUERY_TX_COUNTER);
3888 if (ret)
3889 return ret;
3890
3891 return size;
3892}
3893
346fe763
RB
3894static ssize_t
3895qlcnic_sysfs_read_pci_config(struct file *file, struct kobject *kobj,
3896 struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
3897{
3898 struct device *dev = container_of(kobj, struct device, kobj);
3899 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3900 struct qlcnic_pci_func_cfg pci_cfg[QLCNIC_MAX_PCI_FUNC];
e88db3bd 3901 struct qlcnic_pci_info *pci_info;
346fe763
RB
3902 int i, ret;
3903
3904 if (size != sizeof(pci_cfg))
3905 return QL_STATUS_INVALID_PARAM;
3906
e88db3bd
DC
3907 pci_info = kcalloc(QLCNIC_MAX_PCI_FUNC, sizeof(*pci_info), GFP_KERNEL);
3908 if (!pci_info)
3909 return -ENOMEM;
3910
346fe763 3911 ret = qlcnic_get_pci_info(adapter, pci_info);
e88db3bd
DC
3912 if (ret) {
3913 kfree(pci_info);
346fe763 3914 return ret;
e88db3bd 3915 }
346fe763
RB
3916
3917 for (i = 0; i < QLCNIC_MAX_PCI_FUNC ; i++) {
3918 pci_cfg[i].pci_func = pci_info[i].id;
3919 pci_cfg[i].func_type = pci_info[i].type;
3920 pci_cfg[i].port_num = pci_info[i].default_port;
3921 pci_cfg[i].min_bw = pci_info[i].tx_min_bw;
3922 pci_cfg[i].max_bw = pci_info[i].tx_max_bw;
3923 memcpy(&pci_cfg[i].def_mac_addr, &pci_info[i].mac, ETH_ALEN);
3924 }
3925 memcpy(buf, &pci_cfg, size);
e88db3bd 3926 kfree(pci_info);
346fe763 3927 return size;
346fe763
RB
3928}
3929static struct bin_attribute bin_attr_npar_config = {
3930 .attr = {.name = "npar_config", .mode = (S_IRUGO | S_IWUSR)},
3931 .size = 0,
3932 .read = qlcnic_sysfs_read_npar_config,
3933 .write = qlcnic_sysfs_write_npar_config,
3934};
3935
3936static struct bin_attribute bin_attr_pci_config = {
3937 .attr = {.name = "pci_config", .mode = (S_IRUGO | S_IWUSR)},
3938 .size = 0,
3939 .read = qlcnic_sysfs_read_pci_config,
3940 .write = NULL,
3941};
3942
b6021212
AKS
3943static struct bin_attribute bin_attr_port_stats = {
3944 .attr = {.name = "port_stats", .mode = (S_IRUGO | S_IWUSR)},
3945 .size = 0,
3946 .read = qlcnic_sysfs_get_port_stats,
3947 .write = qlcnic_sysfs_clear_port_stats,
3948};
3949
3950static struct bin_attribute bin_attr_esw_stats = {
3951 .attr = {.name = "esw_stats", .mode = (S_IRUGO | S_IWUSR)},
3952 .size = 0,
3953 .read = qlcnic_sysfs_get_esw_stats,
3954 .write = qlcnic_sysfs_clear_esw_stats,
3955};
3956
346fe763
RB
3957static struct bin_attribute bin_attr_esw_config = {
3958 .attr = {.name = "esw_config", .mode = (S_IRUGO | S_IWUSR)},
3959 .size = 0,
3960 .read = qlcnic_sysfs_read_esw_config,
3961 .write = qlcnic_sysfs_write_esw_config,
3962};
3963
3964static struct bin_attribute bin_attr_pm_config = {
3965 .attr = {.name = "pm_config", .mode = (S_IRUGO | S_IWUSR)},
3966 .size = 0,
3967 .read = qlcnic_sysfs_read_pm_config,
3968 .write = qlcnic_sysfs_write_pm_config,
3969};
3970
af19b491
AKS
3971static void
3972qlcnic_create_sysfs_entries(struct qlcnic_adapter *adapter)
3973{
3974 struct device *dev = &adapter->pdev->dev;
3975
3976 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_BDG)
3977 if (device_create_file(dev, &dev_attr_bridged_mode))
3978 dev_warn(dev,
3979 "failed to create bridged_mode sysfs entry\n");
3980}
3981
3982static void
3983qlcnic_remove_sysfs_entries(struct qlcnic_adapter *adapter)
3984{
3985 struct device *dev = &adapter->pdev->dev;
3986
3987 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_BDG)
3988 device_remove_file(dev, &dev_attr_bridged_mode);
3989}
3990
3991static void
3992qlcnic_create_diag_entries(struct qlcnic_adapter *adapter)
3993{
3994 struct device *dev = &adapter->pdev->dev;
3995
b6021212
AKS
3996 if (device_create_bin_file(dev, &bin_attr_port_stats))
3997 dev_info(dev, "failed to create port stats sysfs entry");
3998
132ff00a
AC
3999 if (adapter->op_mode == QLCNIC_NON_PRIV_FUNC)
4000 return;
af19b491
AKS
4001 if (device_create_file(dev, &dev_attr_diag_mode))
4002 dev_info(dev, "failed to create diag_mode sysfs entry\n");
4003 if (device_create_bin_file(dev, &bin_attr_crb))
4004 dev_info(dev, "failed to create crb sysfs entry\n");
4005 if (device_create_bin_file(dev, &bin_attr_mem))
4006 dev_info(dev, "failed to create mem sysfs entry\n");
53478fef
SC
4007 if (device_create_bin_file(dev, &bin_attr_pci_config))
4008 dev_info(dev, "failed to create pci config sysfs entry");
4e8acb01
RB
4009 if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED))
4010 return;
4011 if (device_create_bin_file(dev, &bin_attr_esw_config))
4012 dev_info(dev, "failed to create esw config sysfs entry");
4013 if (adapter->op_mode != QLCNIC_MGMT_FUNC)
346fe763 4014 return;
346fe763
RB
4015 if (device_create_bin_file(dev, &bin_attr_npar_config))
4016 dev_info(dev, "failed to create npar config sysfs entry");
346fe763
RB
4017 if (device_create_bin_file(dev, &bin_attr_pm_config))
4018 dev_info(dev, "failed to create pm config sysfs entry");
b6021212
AKS
4019 if (device_create_bin_file(dev, &bin_attr_esw_stats))
4020 dev_info(dev, "failed to create eswitch stats sysfs entry");
af19b491
AKS
4021}
4022
af19b491
AKS
4023static void
4024qlcnic_remove_diag_entries(struct qlcnic_adapter *adapter)
4025{
4026 struct device *dev = &adapter->pdev->dev;
4027
b6021212
AKS
4028 device_remove_bin_file(dev, &bin_attr_port_stats);
4029
132ff00a
AC
4030 if (adapter->op_mode == QLCNIC_NON_PRIV_FUNC)
4031 return;
af19b491
AKS
4032 device_remove_file(dev, &dev_attr_diag_mode);
4033 device_remove_bin_file(dev, &bin_attr_crb);
4034 device_remove_bin_file(dev, &bin_attr_mem);
53478fef 4035 device_remove_bin_file(dev, &bin_attr_pci_config);
4e8acb01
RB
4036 if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED))
4037 return;
4038 device_remove_bin_file(dev, &bin_attr_esw_config);
4039 if (adapter->op_mode != QLCNIC_MGMT_FUNC)
346fe763 4040 return;
346fe763 4041 device_remove_bin_file(dev, &bin_attr_npar_config);
346fe763 4042 device_remove_bin_file(dev, &bin_attr_pm_config);
b6021212 4043 device_remove_bin_file(dev, &bin_attr_esw_stats);
af19b491
AKS
4044}
4045
4046#ifdef CONFIG_INET
4047
4048#define is_qlcnic_netdev(dev) (dev->netdev_ops == &qlcnic_netdev_ops)
4049
af19b491 4050static void
aec1e845
AKS
4051qlcnic_config_indev_addr(struct qlcnic_adapter *adapter,
4052 struct net_device *dev, unsigned long event)
af19b491
AKS
4053{
4054 struct in_device *indev;
af19b491 4055
af19b491
AKS
4056 indev = in_dev_get(dev);
4057 if (!indev)
4058 return;
4059
4060 for_ifa(indev) {
4061 switch (event) {
4062 case NETDEV_UP:
4063 qlcnic_config_ipaddr(adapter,
4064 ifa->ifa_address, QLCNIC_IP_UP);
4065 break;
4066 case NETDEV_DOWN:
4067 qlcnic_config_ipaddr(adapter,
4068 ifa->ifa_address, QLCNIC_IP_DOWN);
4069 break;
4070 default:
4071 break;
4072 }
4073 } endfor_ifa(indev);
4074
4075 in_dev_put(indev);
af19b491
AKS
4076}
4077
aec1e845
AKS
4078static void
4079qlcnic_restore_indev_addr(struct net_device *netdev, unsigned long event)
4080{
4081 struct qlcnic_adapter *adapter = netdev_priv(netdev);
4082 struct net_device *dev;
4083 u16 vid;
4084
4085 qlcnic_config_indev_addr(adapter, netdev, event);
4086
b9796a14
AC
4087 for_each_set_bit(vid, adapter->vlans, VLAN_N_VID) {
4088 dev = vlan_find_dev(netdev, vid);
aec1e845
AKS
4089 if (!dev)
4090 continue;
aec1e845
AKS
4091 qlcnic_config_indev_addr(adapter, dev, event);
4092 }
4093}
4094
af19b491
AKS
4095static int qlcnic_netdev_event(struct notifier_block *this,
4096 unsigned long event, void *ptr)
4097{
4098 struct qlcnic_adapter *adapter;
4099 struct net_device *dev = (struct net_device *)ptr;
4100
4101recheck:
4102 if (dev == NULL)
4103 goto done;
4104
4105 if (dev->priv_flags & IFF_802_1Q_VLAN) {
4106 dev = vlan_dev_real_dev(dev);
4107 goto recheck;
4108 }
4109
4110 if (!is_qlcnic_netdev(dev))
4111 goto done;
4112
4113 adapter = netdev_priv(dev);
4114
4115 if (!adapter)
4116 goto done;
4117
8a15ad1f 4118 if (!test_bit(__QLCNIC_DEV_UP, &adapter->state))
af19b491
AKS
4119 goto done;
4120
aec1e845 4121 qlcnic_config_indev_addr(adapter, dev, event);
af19b491
AKS
4122done:
4123 return NOTIFY_DONE;
4124}
4125
4126static int
4127qlcnic_inetaddr_event(struct notifier_block *this,
4128 unsigned long event, void *ptr)
4129{
4130 struct qlcnic_adapter *adapter;
4131 struct net_device *dev;
4132
4133 struct in_ifaddr *ifa = (struct in_ifaddr *)ptr;
4134
4135 dev = ifa->ifa_dev ? ifa->ifa_dev->dev : NULL;
4136
4137recheck:
aec1e845 4138 if (dev == NULL)
af19b491
AKS
4139 goto done;
4140
4141 if (dev->priv_flags & IFF_802_1Q_VLAN) {
4142 dev = vlan_dev_real_dev(dev);
4143 goto recheck;
4144 }
4145
4146 if (!is_qlcnic_netdev(dev))
4147 goto done;
4148
4149 adapter = netdev_priv(dev);
4150
251a84c9 4151 if (!adapter)
af19b491
AKS
4152 goto done;
4153
8a15ad1f 4154 if (!test_bit(__QLCNIC_DEV_UP, &adapter->state))
af19b491
AKS
4155 goto done;
4156
4157 switch (event) {
4158 case NETDEV_UP:
4159 qlcnic_config_ipaddr(adapter, ifa->ifa_address, QLCNIC_IP_UP);
4160 break;
4161 case NETDEV_DOWN:
4162 qlcnic_config_ipaddr(adapter, ifa->ifa_address, QLCNIC_IP_DOWN);
4163 break;
4164 default:
4165 break;
4166 }
4167
4168done:
4169 return NOTIFY_DONE;
4170}
4171
4172static struct notifier_block qlcnic_netdev_cb = {
4173 .notifier_call = qlcnic_netdev_event,
4174};
4175
4176static struct notifier_block qlcnic_inetaddr_cb = {
4177 .notifier_call = qlcnic_inetaddr_event,
4178};
4179#else
4180static void
aec1e845 4181qlcnic_restore_indev_addr(struct net_device *dev, unsigned long event)
af19b491
AKS
4182{ }
4183#endif
451724c8
SC
4184static struct pci_error_handlers qlcnic_err_handler = {
4185 .error_detected = qlcnic_io_error_detected,
4186 .slot_reset = qlcnic_io_slot_reset,
4187 .resume = qlcnic_io_resume,
4188};
af19b491
AKS
4189
4190static struct pci_driver qlcnic_driver = {
4191 .name = qlcnic_driver_name,
4192 .id_table = qlcnic_pci_tbl,
4193 .probe = qlcnic_probe,
4194 .remove = __devexit_p(qlcnic_remove),
4195#ifdef CONFIG_PM
4196 .suspend = qlcnic_suspend,
4197 .resume = qlcnic_resume,
4198#endif
451724c8
SC
4199 .shutdown = qlcnic_shutdown,
4200 .err_handler = &qlcnic_err_handler
4201
af19b491
AKS
4202};
4203
4204static int __init qlcnic_init_module(void)
4205{
0cf3a14c 4206 int ret;
af19b491
AKS
4207
4208 printk(KERN_INFO "%s\n", qlcnic_driver_string);
4209
f7ec804a
AKS
4210 qlcnic_wq = create_singlethread_workqueue("qlcnic");
4211 if (qlcnic_wq == NULL) {
4212 printk(KERN_ERR "qlcnic: cannot create workqueue\n");
4213 return -ENOMEM;
4214 }
4215
af19b491
AKS
4216#ifdef CONFIG_INET
4217 register_netdevice_notifier(&qlcnic_netdev_cb);
4218 register_inetaddr_notifier(&qlcnic_inetaddr_cb);
4219#endif
4220
0cf3a14c
AKS
4221 ret = pci_register_driver(&qlcnic_driver);
4222 if (ret) {
4223#ifdef CONFIG_INET
4224 unregister_inetaddr_notifier(&qlcnic_inetaddr_cb);
4225 unregister_netdevice_notifier(&qlcnic_netdev_cb);
4226#endif
f7ec804a 4227 destroy_workqueue(qlcnic_wq);
0cf3a14c 4228 }
af19b491 4229
0cf3a14c 4230 return ret;
af19b491
AKS
4231}
4232
4233module_init(qlcnic_init_module);
4234
4235static void __exit qlcnic_exit_module(void)
4236{
4237
4238 pci_unregister_driver(&qlcnic_driver);
4239
4240#ifdef CONFIG_INET
4241 unregister_inetaddr_notifier(&qlcnic_inetaddr_cb);
4242 unregister_netdevice_notifier(&qlcnic_netdev_cb);
4243#endif
f7ec804a 4244 destroy_workqueue(qlcnic_wq);
af19b491
AKS
4245}
4246
4247module_exit(qlcnic_exit_module);