net-next: vmxnet3 fixes [3/5] Initialize link state at probe time
[linux-2.6-block.git] / drivers / net / qlcnic / qlcnic_main.c
CommitLineData
af19b491
AKS
1/*
2 * Copyright (C) 2009 - QLogic Corporation.
3 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version 2
8 * of the License, or (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place - Suite 330, Boston,
18 * MA 02111-1307, USA.
19 *
20 * The full GNU General Public License is included in this distribution
21 * in the file called "COPYING".
22 *
23 */
24
5a0e3ad6 25#include <linux/slab.h>
af19b491
AKS
26#include <linux/vmalloc.h>
27#include <linux/interrupt.h>
28
29#include "qlcnic.h"
30
31#include <linux/dma-mapping.h>
32#include <linux/if_vlan.h>
33#include <net/ip.h>
34#include <linux/ipv6.h>
35#include <linux/inetdevice.h>
36#include <linux/sysfs.h>
451724c8 37#include <linux/aer.h>
af19b491 38
7f9a0c34 39MODULE_DESCRIPTION("QLogic 1/10 GbE Converged/Intelligent Ethernet Driver");
af19b491
AKS
40MODULE_LICENSE("GPL");
41MODULE_VERSION(QLCNIC_LINUX_VERSIONID);
42MODULE_FIRMWARE(QLCNIC_UNIFIED_ROMIMAGE_NAME);
43
44char qlcnic_driver_name[] = "qlcnic";
7f9a0c34
SV
45static const char qlcnic_driver_string[] = "QLogic 1/10 GbE "
46 "Converged/Intelligent Ethernet Driver v" QLCNIC_LINUX_VERSIONID;
af19b491
AKS
47
48static int port_mode = QLCNIC_PORT_MODE_AUTO_NEG;
49
50/* Default to restricted 1G auto-neg mode */
51static int wol_port_mode = 5;
52
53static int use_msi = 1;
54module_param(use_msi, int, 0644);
55MODULE_PARM_DESC(use_msi, "MSI interrupt (0=disabled, 1=enabled");
56
57static int use_msi_x = 1;
58module_param(use_msi_x, int, 0644);
59MODULE_PARM_DESC(use_msi_x, "MSI-X interrupt (0=disabled, 1=enabled");
60
61static int auto_fw_reset = AUTO_FW_RESET_ENABLED;
62module_param(auto_fw_reset, int, 0644);
63MODULE_PARM_DESC(auto_fw_reset, "Auto firmware reset (0=disabled, 1=enabled");
64
4d5bdb38
AKS
65static int load_fw_file;
66module_param(load_fw_file, int, 0644);
67MODULE_PARM_DESC(load_fw_file, "Load firmware from (0=flash, 1=file");
68
2e9d722d
AC
69static int qlcnic_config_npars;
70module_param(qlcnic_config_npars, int, 0644);
71MODULE_PARM_DESC(qlcnic_config_npars, "Configure NPARs (0=disabled, 1=enabled");
72
af19b491
AKS
73static int __devinit qlcnic_probe(struct pci_dev *pdev,
74 const struct pci_device_id *ent);
75static void __devexit qlcnic_remove(struct pci_dev *pdev);
76static int qlcnic_open(struct net_device *netdev);
77static int qlcnic_close(struct net_device *netdev);
af19b491 78static void qlcnic_tx_timeout(struct net_device *netdev);
af19b491
AKS
79static void qlcnic_attach_work(struct work_struct *work);
80static void qlcnic_fwinit_work(struct work_struct *work);
81static void qlcnic_fw_poll_work(struct work_struct *work);
82static void qlcnic_schedule_work(struct qlcnic_adapter *adapter,
83 work_func_t func, int delay);
84static void qlcnic_cancel_fw_work(struct qlcnic_adapter *adapter);
85static int qlcnic_poll(struct napi_struct *napi, int budget);
8f891387 86static int qlcnic_rx_poll(struct napi_struct *napi, int budget);
af19b491
AKS
87#ifdef CONFIG_NET_POLL_CONTROLLER
88static void qlcnic_poll_controller(struct net_device *netdev);
89#endif
90
91static void qlcnic_create_sysfs_entries(struct qlcnic_adapter *adapter);
92static void qlcnic_remove_sysfs_entries(struct qlcnic_adapter *adapter);
93static void qlcnic_create_diag_entries(struct qlcnic_adapter *adapter);
94static void qlcnic_remove_diag_entries(struct qlcnic_adapter *adapter);
95
6df900e9 96static void qlcnic_idc_debug_info(struct qlcnic_adapter *adapter, u8 encoding);
af19b491
AKS
97static void qlcnic_clr_all_drv_state(struct qlcnic_adapter *adapter);
98static int qlcnic_can_start_firmware(struct qlcnic_adapter *adapter);
99
7eb9855d 100static irqreturn_t qlcnic_tmp_intr(int irq, void *data);
af19b491
AKS
101static irqreturn_t qlcnic_intr(int irq, void *data);
102static irqreturn_t qlcnic_msi_intr(int irq, void *data);
103static irqreturn_t qlcnic_msix_intr(int irq, void *data);
104
105static struct net_device_stats *qlcnic_get_stats(struct net_device *netdev);
106static void qlcnic_config_indev_addr(struct net_device *dev, unsigned long);
9f26f547
AC
107static int qlcnic_start_firmware(struct qlcnic_adapter *);
108
109static void qlcnic_dev_set_npar_ready(struct qlcnic_adapter *);
110static void qlcnicvf_clear_ilb_mode(struct qlcnic_adapter *);
111static int qlcnicvf_set_ilb_mode(struct qlcnic_adapter *);
112static int qlcnicvf_config_led(struct qlcnic_adapter *, u32, u32);
113static int qlcnicvf_config_bridged_mode(struct qlcnic_adapter *, u32);
114static int qlcnicvf_start_firmware(struct qlcnic_adapter *);
af19b491
AKS
115/* PCI Device ID Table */
116#define ENTRY(device) \
117 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, (device)), \
118 .class = PCI_CLASS_NETWORK_ETHERNET << 8, .class_mask = ~0}
119
120#define PCI_DEVICE_ID_QLOGIC_QLE824X 0x8020
121
6a902881 122static DEFINE_PCI_DEVICE_TABLE(qlcnic_pci_tbl) = {
af19b491
AKS
123 ENTRY(PCI_DEVICE_ID_QLOGIC_QLE824X),
124 {0,}
125};
126
127MODULE_DEVICE_TABLE(pci, qlcnic_pci_tbl);
128
129
130void
131qlcnic_update_cmd_producer(struct qlcnic_adapter *adapter,
132 struct qlcnic_host_tx_ring *tx_ring)
133{
134 writel(tx_ring->producer, tx_ring->crb_cmd_producer);
af19b491
AKS
135}
136
137static const u32 msi_tgt_status[8] = {
138 ISR_INT_TARGET_STATUS, ISR_INT_TARGET_STATUS_F1,
139 ISR_INT_TARGET_STATUS_F2, ISR_INT_TARGET_STATUS_F3,
140 ISR_INT_TARGET_STATUS_F4, ISR_INT_TARGET_STATUS_F5,
141 ISR_INT_TARGET_STATUS_F6, ISR_INT_TARGET_STATUS_F7
142};
143
144static const
145struct qlcnic_legacy_intr_set legacy_intr[] = QLCNIC_LEGACY_INTR_CONFIG;
146
147static inline void qlcnic_disable_int(struct qlcnic_host_sds_ring *sds_ring)
148{
149 writel(0, sds_ring->crb_intr_mask);
150}
151
152static inline void qlcnic_enable_int(struct qlcnic_host_sds_ring *sds_ring)
153{
154 struct qlcnic_adapter *adapter = sds_ring->adapter;
155
156 writel(0x1, sds_ring->crb_intr_mask);
157
158 if (!QLCNIC_IS_MSI_FAMILY(adapter))
159 writel(0xfbff, adapter->tgt_mask_reg);
160}
161
162static int
163qlcnic_alloc_sds_rings(struct qlcnic_recv_context *recv_ctx, int count)
164{
165 int size = sizeof(struct qlcnic_host_sds_ring) * count;
166
167 recv_ctx->sds_rings = kzalloc(size, GFP_KERNEL);
168
169 return (recv_ctx->sds_rings == NULL);
170}
171
172static void
173qlcnic_free_sds_rings(struct qlcnic_recv_context *recv_ctx)
174{
175 if (recv_ctx->sds_rings != NULL)
176 kfree(recv_ctx->sds_rings);
177
178 recv_ctx->sds_rings = NULL;
179}
180
181static int
182qlcnic_napi_add(struct qlcnic_adapter *adapter, struct net_device *netdev)
183{
184 int ring;
185 struct qlcnic_host_sds_ring *sds_ring;
186 struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
187
188 if (qlcnic_alloc_sds_rings(recv_ctx, adapter->max_sds_rings))
189 return -ENOMEM;
190
191 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
192 sds_ring = &recv_ctx->sds_rings[ring];
8f891387 193
194 if (ring == adapter->max_sds_rings - 1)
195 netif_napi_add(netdev, &sds_ring->napi, qlcnic_poll,
196 QLCNIC_NETDEV_WEIGHT/adapter->max_sds_rings);
197 else
198 netif_napi_add(netdev, &sds_ring->napi,
199 qlcnic_rx_poll, QLCNIC_NETDEV_WEIGHT*2);
af19b491
AKS
200 }
201
202 return 0;
203}
204
205static void
206qlcnic_napi_del(struct qlcnic_adapter *adapter)
207{
208 int ring;
209 struct qlcnic_host_sds_ring *sds_ring;
210 struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
211
212 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
213 sds_ring = &recv_ctx->sds_rings[ring];
214 netif_napi_del(&sds_ring->napi);
215 }
216
217 qlcnic_free_sds_rings(&adapter->recv_ctx);
218}
219
220static void
221qlcnic_napi_enable(struct qlcnic_adapter *adapter)
222{
223 int ring;
224 struct qlcnic_host_sds_ring *sds_ring;
225 struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
226
780ab790
AKS
227 if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
228 return;
229
af19b491
AKS
230 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
231 sds_ring = &recv_ctx->sds_rings[ring];
232 napi_enable(&sds_ring->napi);
233 qlcnic_enable_int(sds_ring);
234 }
235}
236
237static void
238qlcnic_napi_disable(struct qlcnic_adapter *adapter)
239{
240 int ring;
241 struct qlcnic_host_sds_ring *sds_ring;
242 struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
243
780ab790
AKS
244 if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
245 return;
246
af19b491
AKS
247 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
248 sds_ring = &recv_ctx->sds_rings[ring];
249 qlcnic_disable_int(sds_ring);
250 napi_synchronize(&sds_ring->napi);
251 napi_disable(&sds_ring->napi);
252 }
253}
254
255static void qlcnic_clear_stats(struct qlcnic_adapter *adapter)
256{
257 memset(&adapter->stats, 0, sizeof(adapter->stats));
af19b491
AKS
258}
259
af19b491
AKS
260static void qlcnic_set_port_mode(struct qlcnic_adapter *adapter)
261{
262 u32 val, data;
263
264 val = adapter->ahw.board_type;
265 if ((val == QLCNIC_BRDTYPE_P3_HMEZ) ||
266 (val == QLCNIC_BRDTYPE_P3_XG_LOM)) {
267 if (port_mode == QLCNIC_PORT_MODE_802_3_AP) {
268 data = QLCNIC_PORT_MODE_802_3_AP;
269 QLCWR32(adapter, QLCNIC_PORT_MODE_ADDR, data);
270 } else if (port_mode == QLCNIC_PORT_MODE_XG) {
271 data = QLCNIC_PORT_MODE_XG;
272 QLCWR32(adapter, QLCNIC_PORT_MODE_ADDR, data);
273 } else if (port_mode == QLCNIC_PORT_MODE_AUTO_NEG_1G) {
274 data = QLCNIC_PORT_MODE_AUTO_NEG_1G;
275 QLCWR32(adapter, QLCNIC_PORT_MODE_ADDR, data);
276 } else if (port_mode == QLCNIC_PORT_MODE_AUTO_NEG_XG) {
277 data = QLCNIC_PORT_MODE_AUTO_NEG_XG;
278 QLCWR32(adapter, QLCNIC_PORT_MODE_ADDR, data);
279 } else {
280 data = QLCNIC_PORT_MODE_AUTO_NEG;
281 QLCWR32(adapter, QLCNIC_PORT_MODE_ADDR, data);
282 }
283
284 if ((wol_port_mode != QLCNIC_PORT_MODE_802_3_AP) &&
285 (wol_port_mode != QLCNIC_PORT_MODE_XG) &&
286 (wol_port_mode != QLCNIC_PORT_MODE_AUTO_NEG_1G) &&
287 (wol_port_mode != QLCNIC_PORT_MODE_AUTO_NEG_XG)) {
288 wol_port_mode = QLCNIC_PORT_MODE_AUTO_NEG;
289 }
290 QLCWR32(adapter, QLCNIC_WOL_PORT_MODE, wol_port_mode);
291 }
292}
293
294static void qlcnic_set_msix_bit(struct pci_dev *pdev, int enable)
295{
296 u32 control;
297 int pos;
298
299 pos = pci_find_capability(pdev, PCI_CAP_ID_MSIX);
300 if (pos) {
301 pci_read_config_dword(pdev, pos, &control);
302 if (enable)
303 control |= PCI_MSIX_FLAGS_ENABLE;
304 else
305 control = 0;
306 pci_write_config_dword(pdev, pos, control);
307 }
308}
309
310static void qlcnic_init_msix_entries(struct qlcnic_adapter *adapter, int count)
311{
312 int i;
313
314 for (i = 0; i < count; i++)
315 adapter->msix_entries[i].entry = i;
316}
317
318static int
319qlcnic_read_mac_addr(struct qlcnic_adapter *adapter)
320{
2e9d722d 321 u8 mac_addr[ETH_ALEN];
af19b491
AKS
322 struct net_device *netdev = adapter->netdev;
323 struct pci_dev *pdev = adapter->pdev;
324
2e9d722d 325 if (adapter->nic_ops->get_mac_addr(adapter, mac_addr) != 0)
af19b491
AKS
326 return -EIO;
327
2e9d722d 328 memcpy(netdev->dev_addr, mac_addr, ETH_ALEN);
af19b491
AKS
329 memcpy(netdev->perm_addr, netdev->dev_addr, netdev->addr_len);
330 memcpy(adapter->mac_addr, netdev->dev_addr, netdev->addr_len);
331
332 /* set station address */
333
334 if (!is_valid_ether_addr(netdev->perm_addr))
335 dev_warn(&pdev->dev, "Bad MAC address %pM.\n",
336 netdev->dev_addr);
337
338 return 0;
339}
340
341static int qlcnic_set_mac(struct net_device *netdev, void *p)
342{
343 struct qlcnic_adapter *adapter = netdev_priv(netdev);
344 struct sockaddr *addr = p;
345
346 if (!is_valid_ether_addr(addr->sa_data))
347 return -EINVAL;
348
8a15ad1f 349 if (test_bit(__QLCNIC_DEV_UP, &adapter->state)) {
af19b491
AKS
350 netif_device_detach(netdev);
351 qlcnic_napi_disable(adapter);
352 }
353
354 memcpy(adapter->mac_addr, addr->sa_data, netdev->addr_len);
355 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
356 qlcnic_set_multi(adapter->netdev);
357
8a15ad1f 358 if (test_bit(__QLCNIC_DEV_UP, &adapter->state)) {
af19b491
AKS
359 netif_device_attach(netdev);
360 qlcnic_napi_enable(adapter);
361 }
362 return 0;
363}
364
365static const struct net_device_ops qlcnic_netdev_ops = {
366 .ndo_open = qlcnic_open,
367 .ndo_stop = qlcnic_close,
368 .ndo_start_xmit = qlcnic_xmit_frame,
369 .ndo_get_stats = qlcnic_get_stats,
370 .ndo_validate_addr = eth_validate_addr,
371 .ndo_set_multicast_list = qlcnic_set_multi,
372 .ndo_set_mac_address = qlcnic_set_mac,
373 .ndo_change_mtu = qlcnic_change_mtu,
374 .ndo_tx_timeout = qlcnic_tx_timeout,
375#ifdef CONFIG_NET_POLL_CONTROLLER
376 .ndo_poll_controller = qlcnic_poll_controller,
377#endif
378};
379
2e9d722d 380static struct qlcnic_nic_template qlcnic_ops = {
2e9d722d
AC
381 .get_mac_addr = qlcnic_get_mac_address,
382 .config_bridged_mode = qlcnic_config_bridged_mode,
383 .config_led = qlcnic_config_led,
384 .set_ilb_mode = qlcnic_set_ilb_mode,
9f26f547
AC
385 .clear_ilb_mode = qlcnic_clear_ilb_mode,
386 .start_firmware = qlcnic_start_firmware
387};
388
389static struct qlcnic_nic_template qlcnic_vf_ops = {
390 .get_mac_addr = qlcnic_get_mac_address,
391 .config_bridged_mode = qlcnicvf_config_bridged_mode,
392 .config_led = qlcnicvf_config_led,
393 .set_ilb_mode = qlcnicvf_set_ilb_mode,
394 .clear_ilb_mode = qlcnicvf_clear_ilb_mode,
395 .start_firmware = qlcnicvf_start_firmware
2e9d722d
AC
396};
397
af19b491
AKS
398static void
399qlcnic_setup_intr(struct qlcnic_adapter *adapter)
400{
401 const struct qlcnic_legacy_intr_set *legacy_intrp;
402 struct pci_dev *pdev = adapter->pdev;
403 int err, num_msix;
404
405 if (adapter->rss_supported) {
406 num_msix = (num_online_cpus() >= MSIX_ENTRIES_PER_ADAPTER) ?
407 MSIX_ENTRIES_PER_ADAPTER : 2;
408 } else
409 num_msix = 1;
410
411 adapter->max_sds_rings = 1;
412
413 adapter->flags &= ~(QLCNIC_MSI_ENABLED | QLCNIC_MSIX_ENABLED);
414
415 legacy_intrp = &legacy_intr[adapter->ahw.pci_func];
416
417 adapter->int_vec_bit = legacy_intrp->int_vec_bit;
418 adapter->tgt_status_reg = qlcnic_get_ioaddr(adapter,
419 legacy_intrp->tgt_status_reg);
420 adapter->tgt_mask_reg = qlcnic_get_ioaddr(adapter,
421 legacy_intrp->tgt_mask_reg);
422 adapter->isr_int_vec = qlcnic_get_ioaddr(adapter, ISR_INT_VECTOR);
423
424 adapter->crb_int_state_reg = qlcnic_get_ioaddr(adapter,
425 ISR_INT_STATE_REG);
426
427 qlcnic_set_msix_bit(pdev, 0);
428
429 if (adapter->msix_supported) {
430
431 qlcnic_init_msix_entries(adapter, num_msix);
432 err = pci_enable_msix(pdev, adapter->msix_entries, num_msix);
433 if (err == 0) {
434 adapter->flags |= QLCNIC_MSIX_ENABLED;
435 qlcnic_set_msix_bit(pdev, 1);
436
437 if (adapter->rss_supported)
438 adapter->max_sds_rings = num_msix;
439
440 dev_info(&pdev->dev, "using msi-x interrupts\n");
441 return;
442 }
443
444 if (err > 0)
445 pci_disable_msix(pdev);
446
447 /* fall through for msi */
448 }
449
450 if (use_msi && !pci_enable_msi(pdev)) {
451 adapter->flags |= QLCNIC_MSI_ENABLED;
452 adapter->tgt_status_reg = qlcnic_get_ioaddr(adapter,
453 msi_tgt_status[adapter->ahw.pci_func]);
454 dev_info(&pdev->dev, "using msi interrupts\n");
455 adapter->msix_entries[0].vector = pdev->irq;
456 return;
457 }
458
459 dev_info(&pdev->dev, "using legacy interrupts\n");
460 adapter->msix_entries[0].vector = pdev->irq;
461}
462
463static void
464qlcnic_teardown_intr(struct qlcnic_adapter *adapter)
465{
466 if (adapter->flags & QLCNIC_MSIX_ENABLED)
467 pci_disable_msix(adapter->pdev);
468 if (adapter->flags & QLCNIC_MSI_ENABLED)
469 pci_disable_msi(adapter->pdev);
470}
471
472static void
473qlcnic_cleanup_pci_map(struct qlcnic_adapter *adapter)
474{
475 if (adapter->ahw.pci_base0 != NULL)
476 iounmap(adapter->ahw.pci_base0);
477}
478
346fe763
RB
479static int
480qlcnic_init_pci_info(struct qlcnic_adapter *adapter)
481{
482 struct qlcnic_pci_info pci_info[QLCNIC_MAX_PCI_FUNC];
483 int i, ret = 0, err;
484 u8 pfn;
485
486 if (!adapter->npars)
487 adapter->npars = kzalloc(sizeof(struct qlcnic_npar_info) *
488 QLCNIC_MAX_PCI_FUNC, GFP_KERNEL);
489 if (!adapter->npars)
490 return -ENOMEM;
491
492 if (!adapter->eswitch)
493 adapter->eswitch = kzalloc(sizeof(struct qlcnic_eswitch) *
494 QLCNIC_NIU_MAX_XG_PORTS, GFP_KERNEL);
495 if (!adapter->eswitch) {
496 err = -ENOMEM;
497 goto err_eswitch;
498 }
499
500 ret = qlcnic_get_pci_info(adapter, pci_info);
501 if (!ret) {
502 for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
503 pfn = pci_info[i].id;
504 if (pfn > QLCNIC_MAX_PCI_FUNC)
505 return QL_STATUS_INVALID_PARAM;
506 adapter->npars[pfn].active = pci_info[i].active;
507 adapter->npars[pfn].type = pci_info[i].type;
508 adapter->npars[pfn].phy_port = pci_info[i].default_port;
509 adapter->npars[pfn].mac_learning = DEFAULT_MAC_LEARN;
cea8975e
AC
510 adapter->npars[pfn].min_bw = pci_info[i].tx_min_bw;
511 adapter->npars[pfn].max_bw = pci_info[i].tx_max_bw;
346fe763
RB
512 }
513
514 for (i = 0; i < QLCNIC_NIU_MAX_XG_PORTS; i++)
515 adapter->eswitch[i].flags |= QLCNIC_SWITCH_ENABLE;
516
517 return ret;
518 }
519
520 kfree(adapter->eswitch);
521 adapter->eswitch = NULL;
522err_eswitch:
523 kfree(adapter->npars);
524
525 return ret;
526}
527
2e9d722d
AC
528static int
529qlcnic_set_function_modes(struct qlcnic_adapter *adapter)
530{
531 u8 id;
532 u32 ref_count;
533 int i, ret = 1;
534 u32 data = QLCNIC_MGMT_FUNC;
535 void __iomem *priv_op = adapter->ahw.pci_base0 + QLCNIC_DRV_OP_MODE;
536
537 /* If other drivers are not in use set their privilege level */
538 ref_count = QLCRD32(adapter, QLCNIC_CRB_DEV_REF_COUNT);
539 ret = qlcnic_api_lock(adapter);
540 if (ret)
541 goto err_lock;
542 if (QLC_DEV_CLR_REF_CNT(ref_count, adapter->ahw.pci_func))
543 goto err_npar;
544
0e33c664
AC
545 if (qlcnic_config_npars) {
546 for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
346fe763 547 id = i;
0e33c664
AC
548 if (adapter->npars[i].type != QLCNIC_TYPE_NIC ||
549 id == adapter->ahw.pci_func)
550 continue;
551 data |= (qlcnic_config_npars &
552 QLC_DEV_SET_DRV(0xf, id));
553 }
554 } else {
555 data = readl(priv_op);
556 data = (data & ~QLC_DEV_SET_DRV(0xf, adapter->ahw.pci_func)) |
557 (QLC_DEV_SET_DRV(QLCNIC_MGMT_FUNC,
558 adapter->ahw.pci_func));
2e9d722d
AC
559 }
560 writel(data, priv_op);
2e9d722d
AC
561err_npar:
562 qlcnic_api_unlock(adapter);
563err_lock:
564 return ret;
565}
566
2e9d722d
AC
567static u32
568qlcnic_get_driver_mode(struct qlcnic_adapter *adapter)
569{
570 void __iomem *msix_base_addr;
571 void __iomem *priv_op;
346fe763 572 struct qlcnic_info nic_info;
2e9d722d
AC
573 u32 func;
574 u32 msix_base;
575 u32 op_mode, priv_level;
576
577 /* Determine FW API version */
578 adapter->fw_hal_version = readl(adapter->ahw.pci_base0 + QLCNIC_FW_API);
2e9d722d
AC
579
580 /* Find PCI function number */
581 pci_read_config_dword(adapter->pdev, QLCNIC_MSIX_TABLE_OFFSET, &func);
582 msix_base_addr = adapter->ahw.pci_base0 + QLCNIC_MSIX_BASE;
583 msix_base = readl(msix_base_addr);
584 func = (func - msix_base)/QLCNIC_MSIX_TBL_PGSIZE;
585 adapter->ahw.pci_func = func;
586
346fe763
RB
587 if (!qlcnic_get_nic_info(adapter, &nic_info, adapter->ahw.pci_func)) {
588 adapter->capabilities = nic_info.capabilities;
589
590 if (adapter->capabilities & BIT_6)
591 adapter->flags |= QLCNIC_ESWITCH_ENABLED;
592 else
593 adapter->flags &= ~QLCNIC_ESWITCH_ENABLED;
594 }
0e33c664
AC
595
596 if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED)) {
597 adapter->nic_ops = &qlcnic_ops;
598 return adapter->fw_hal_version;
599 }
600
2e9d722d
AC
601 /* Determine function privilege level */
602 priv_op = adapter->ahw.pci_base0 + QLCNIC_DRV_OP_MODE;
603 op_mode = readl(priv_op);
0e33c664 604 if (op_mode == QLC_DEV_DRV_DEFAULT)
2e9d722d 605 priv_level = QLCNIC_MGMT_FUNC;
0e33c664 606 else
2e9d722d
AC
607 priv_level = QLC_DEV_GET_DRV(op_mode, adapter->ahw.pci_func);
608
609 switch (priv_level) {
610 case QLCNIC_MGMT_FUNC:
611 adapter->op_mode = QLCNIC_MGMT_FUNC;
45918e2f 612 adapter->nic_ops = &qlcnic_ops;
346fe763 613 qlcnic_init_pci_info(adapter);
2e9d722d 614 /* Set privilege level for other functions */
0e33c664 615 qlcnic_set_function_modes(adapter);
2e9d722d
AC
616 dev_info(&adapter->pdev->dev,
617 "HAL Version: %d, Management function\n",
618 adapter->fw_hal_version);
619 break;
620 case QLCNIC_PRIV_FUNC:
621 adapter->op_mode = QLCNIC_PRIV_FUNC;
622 dev_info(&adapter->pdev->dev,
623 "HAL Version: %d, Privileged function\n",
624 adapter->fw_hal_version);
45918e2f 625 adapter->nic_ops = &qlcnic_ops;
2e9d722d 626 break;
9f26f547
AC
627 case QLCNIC_NON_PRIV_FUNC:
628 adapter->op_mode = QLCNIC_NON_PRIV_FUNC;
629 dev_info(&adapter->pdev->dev,
630 "HAL Version: %d Non Privileged function\n",
631 adapter->fw_hal_version);
632 adapter->nic_ops = &qlcnic_vf_ops;
633 break;
2e9d722d
AC
634 default:
635 dev_info(&adapter->pdev->dev, "Unknown function mode: %d\n",
636 priv_level);
637 return 0;
638 }
639 return adapter->fw_hal_version;
640}
641
af19b491
AKS
642static int
643qlcnic_setup_pci_map(struct qlcnic_adapter *adapter)
644{
645 void __iomem *mem_ptr0 = NULL;
646 resource_size_t mem_base;
647 unsigned long mem_len, pci_len0 = 0;
648
649 struct pci_dev *pdev = adapter->pdev;
af19b491 650
af19b491
AKS
651 /* remap phys address */
652 mem_base = pci_resource_start(pdev, 0); /* 0 is for BAR 0 */
653 mem_len = pci_resource_len(pdev, 0);
654
655 if (mem_len == QLCNIC_PCI_2MB_SIZE) {
656
657 mem_ptr0 = pci_ioremap_bar(pdev, 0);
658 if (mem_ptr0 == NULL) {
659 dev_err(&pdev->dev, "failed to map PCI bar 0\n");
660 return -EIO;
661 }
662 pci_len0 = mem_len;
663 } else {
664 return -EIO;
665 }
666
667 dev_info(&pdev->dev, "%dMB memory map\n", (int)(mem_len>>20));
668
669 adapter->ahw.pci_base0 = mem_ptr0;
670 adapter->ahw.pci_len0 = pci_len0;
671
2e9d722d
AC
672 if (!qlcnic_get_driver_mode(adapter)) {
673 iounmap(adapter->ahw.pci_base0);
674 return -EIO;
675 }
676
af19b491 677 adapter->ahw.ocm_win_crb = qlcnic_get_ioaddr(adapter,
2e9d722d 678 QLCNIC_PCIX_PS_REG(PCIX_OCM_WINDOW_REG(adapter->ahw.pci_func)));
af19b491
AKS
679
680 return 0;
681}
682
683static void get_brd_name(struct qlcnic_adapter *adapter, char *name)
684{
685 struct pci_dev *pdev = adapter->pdev;
686 int i, found = 0;
687
688 for (i = 0; i < NUM_SUPPORTED_BOARDS; ++i) {
689 if (qlcnic_boards[i].vendor == pdev->vendor &&
690 qlcnic_boards[i].device == pdev->device &&
691 qlcnic_boards[i].sub_vendor == pdev->subsystem_vendor &&
692 qlcnic_boards[i].sub_device == pdev->subsystem_device) {
02f6e46f
SC
693 sprintf(name, "%pM: %s" ,
694 adapter->mac_addr,
695 qlcnic_boards[i].short_name);
af19b491
AKS
696 found = 1;
697 break;
698 }
699
700 }
701
702 if (!found)
7f9a0c34 703 sprintf(name, "%pM Gigabit Ethernet", adapter->mac_addr);
af19b491
AKS
704}
705
706static void
707qlcnic_check_options(struct qlcnic_adapter *adapter)
708{
709 u32 fw_major, fw_minor, fw_build;
710 char brd_name[QLCNIC_MAX_BOARD_NAME_LEN];
711 char serial_num[32];
712 int i, offset, val;
713 int *ptr32;
714 struct pci_dev *pdev = adapter->pdev;
346fe763 715 struct qlcnic_info nic_info;
af19b491
AKS
716 adapter->driver_mismatch = 0;
717
718 ptr32 = (int *)&serial_num;
719 offset = QLCNIC_FW_SERIAL_NUM_OFFSET;
720 for (i = 0; i < 8; i++) {
721 if (qlcnic_rom_fast_read(adapter, offset, &val) == -1) {
722 dev_err(&pdev->dev, "error reading board info\n");
723 adapter->driver_mismatch = 1;
724 return;
725 }
726 ptr32[i] = cpu_to_le32(val);
727 offset += sizeof(u32);
728 }
729
730 fw_major = QLCRD32(adapter, QLCNIC_FW_VERSION_MAJOR);
731 fw_minor = QLCRD32(adapter, QLCNIC_FW_VERSION_MINOR);
732 fw_build = QLCRD32(adapter, QLCNIC_FW_VERSION_SUB);
733
734 adapter->fw_version = QLCNIC_VERSION_CODE(fw_major, fw_minor, fw_build);
735
736 if (adapter->portnum == 0) {
737 get_brd_name(adapter, brd_name);
738
739 pr_info("%s: %s Board Chip rev 0x%x\n",
740 module_name(THIS_MODULE),
741 brd_name, adapter->ahw.revision_id);
742 }
743
251a84c9
AKS
744 dev_info(&pdev->dev, "firmware v%d.%d.%d\n",
745 fw_major, fw_minor, fw_build);
af19b491 746
af19b491
AKS
747 adapter->flags &= ~QLCNIC_LRO_ENABLED;
748
749 if (adapter->ahw.port_type == QLCNIC_XGBE) {
750 adapter->num_rxd = DEFAULT_RCV_DESCRIPTORS_10G;
751 adapter->num_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_10G;
752 } else if (adapter->ahw.port_type == QLCNIC_GBE) {
753 adapter->num_rxd = DEFAULT_RCV_DESCRIPTORS_1G;
754 adapter->num_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_1G;
755 }
756
346fe763
RB
757 if (!qlcnic_get_nic_info(adapter, &nic_info, adapter->ahw.pci_func)) {
758 adapter->physical_port = nic_info.phys_port;
759 adapter->switch_mode = nic_info.switch_mode;
760 adapter->max_tx_ques = nic_info.max_tx_ques;
761 adapter->max_rx_ques = nic_info.max_rx_ques;
762 adapter->capabilities = nic_info.capabilities;
763 adapter->max_mac_filters = nic_info.max_mac_filters;
764 adapter->max_mtu = nic_info.max_mtu;
765 }
0e33c664 766
af19b491
AKS
767 adapter->msix_supported = !!use_msi_x;
768 adapter->rss_supported = !!use_msi_x;
769
770 adapter->num_txd = MAX_CMD_DESCRIPTORS;
771
af19b491
AKS
772 adapter->max_rds_rings = 2;
773}
774
cea8975e
AC
775static int
776qlcnic_reset_npar_config(struct qlcnic_adapter *adapter)
777{
778 int i, err = 0;
779 struct qlcnic_npar_info *npar;
780 struct qlcnic_info nic_info;
781
782 if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED) ||
783 !adapter->need_fw_reset)
784 return 0;
785
786 if (adapter->op_mode == QLCNIC_MGMT_FUNC) {
787 /* Set the NPAR config data after FW reset */
788 for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
789 npar = &adapter->npars[i];
790 if (npar->type != QLCNIC_TYPE_NIC)
791 continue;
792 err = qlcnic_get_nic_info(adapter, &nic_info, i);
793 if (err)
794 goto err_out;
795 nic_info.min_tx_bw = npar->min_bw;
796 nic_info.max_tx_bw = npar->max_bw;
797 err = qlcnic_set_nic_info(adapter, &nic_info);
798 if (err)
799 goto err_out;
800
801 if (npar->enable_pm) {
802 err = qlcnic_config_port_mirroring(adapter,
803 npar->dest_npar, 1, i);
804 if (err)
805 goto err_out;
806
807 }
808 npar->mac_learning = DEFAULT_MAC_LEARN;
809 npar->host_vlan_tag = 0;
810 npar->promisc_mode = 0;
811 npar->discard_tagged = 0;
812 npar->vlan_id = 0;
813 }
814 }
815err_out:
816 return err;
817}
818
af19b491
AKS
819static int
820qlcnic_start_firmware(struct qlcnic_adapter *adapter)
821{
822 int val, err, first_boot;
823
aa5e18c0
SC
824 err = qlcnic_can_start_firmware(adapter);
825 if (err < 0)
826 return err;
827 else if (!err)
af19b491
AKS
828 goto wait_init;
829
830 first_boot = QLCRD32(adapter, QLCNIC_CAM_RAM(0x1fc));
831 if (first_boot == 0x55555555)
832 /* This is the first boot after power up */
833 QLCWR32(adapter, QLCNIC_CAM_RAM(0x1fc), QLCNIC_BDINFO_MAGIC);
834
4d5bdb38
AKS
835 if (load_fw_file)
836 qlcnic_request_firmware(adapter);
8f891387 837 else {
838 if (qlcnic_check_flash_fw_ver(adapter))
839 goto err_out;
840
4d5bdb38 841 adapter->fw_type = QLCNIC_FLASH_ROMIMAGE;
8f891387 842 }
af19b491
AKS
843
844 err = qlcnic_need_fw_reset(adapter);
845 if (err < 0)
846 goto err_out;
847 if (err == 0)
848 goto wait_init;
849
850 if (first_boot != 0x55555555) {
851 QLCWR32(adapter, CRB_CMDPEG_STATE, 0);
900c6cff 852 QLCWR32(adapter, CRB_RCVPEG_STATE, 0);
af19b491
AKS
853 qlcnic_pinit_from_rom(adapter);
854 msleep(1);
855 }
856
af19b491
AKS
857 QLCWR32(adapter, QLCNIC_PEG_HALT_STATUS1, 0);
858 QLCWR32(adapter, QLCNIC_PEG_HALT_STATUS2, 0);
859
860 qlcnic_set_port_mode(adapter);
861
862 err = qlcnic_load_firmware(adapter);
863 if (err)
864 goto err_out;
865
866 qlcnic_release_firmware(adapter);
867
868 val = (_QLCNIC_LINUX_MAJOR << 16)
869 | ((_QLCNIC_LINUX_MINOR << 8))
870 | (_QLCNIC_LINUX_SUBVERSION);
871 QLCWR32(adapter, CRB_DRIVER_VERSION, val);
872
873wait_init:
874 /* Handshake with the card before we register the devices. */
900c6cff 875 err = qlcnic_init_firmware(adapter);
af19b491
AKS
876 if (err)
877 goto err_out;
878
879 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_READY);
6df900e9 880 qlcnic_idc_debug_info(adapter, 1);
af19b491 881
af19b491 882 qlcnic_check_options(adapter);
cea8975e
AC
883 if (qlcnic_reset_npar_config(adapter))
884 goto err_out;
885 qlcnic_dev_set_npar_ready(adapter);
2e9d722d 886
af19b491
AKS
887 adapter->need_fw_reset = 0;
888
a7fc948f
AKS
889 qlcnic_release_firmware(adapter);
890 return 0;
af19b491
AKS
891
892err_out:
a7fc948f
AKS
893 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_FAILED);
894 dev_err(&adapter->pdev->dev, "Device state set to failed\n");
af19b491
AKS
895 qlcnic_release_firmware(adapter);
896 return err;
897}
898
899static int
900qlcnic_request_irq(struct qlcnic_adapter *adapter)
901{
902 irq_handler_t handler;
903 struct qlcnic_host_sds_ring *sds_ring;
904 int err, ring;
905
906 unsigned long flags = 0;
907 struct net_device *netdev = adapter->netdev;
908 struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
909
7eb9855d
AKS
910 if (adapter->diag_test == QLCNIC_INTERRUPT_TEST) {
911 handler = qlcnic_tmp_intr;
912 if (!QLCNIC_IS_MSI_FAMILY(adapter))
913 flags |= IRQF_SHARED;
914
915 } else {
916 if (adapter->flags & QLCNIC_MSIX_ENABLED)
917 handler = qlcnic_msix_intr;
918 else if (adapter->flags & QLCNIC_MSI_ENABLED)
919 handler = qlcnic_msi_intr;
920 else {
921 flags |= IRQF_SHARED;
922 handler = qlcnic_intr;
923 }
af19b491
AKS
924 }
925 adapter->irq = netdev->irq;
926
927 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
928 sds_ring = &recv_ctx->sds_rings[ring];
929 sprintf(sds_ring->name, "%s[%d]", netdev->name, ring);
930 err = request_irq(sds_ring->irq, handler,
931 flags, sds_ring->name, sds_ring);
932 if (err)
933 return err;
934 }
935
936 return 0;
937}
938
939static void
940qlcnic_free_irq(struct qlcnic_adapter *adapter)
941{
942 int ring;
943 struct qlcnic_host_sds_ring *sds_ring;
944
945 struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
946
947 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
948 sds_ring = &recv_ctx->sds_rings[ring];
949 free_irq(sds_ring->irq, sds_ring);
950 }
951}
952
953static void
954qlcnic_init_coalesce_defaults(struct qlcnic_adapter *adapter)
955{
956 adapter->coal.flags = QLCNIC_INTR_DEFAULT;
957 adapter->coal.normal.data.rx_time_us =
958 QLCNIC_DEFAULT_INTR_COALESCE_RX_TIME_US;
959 adapter->coal.normal.data.rx_packets =
960 QLCNIC_DEFAULT_INTR_COALESCE_RX_PACKETS;
961 adapter->coal.normal.data.tx_time_us =
962 QLCNIC_DEFAULT_INTR_COALESCE_TX_TIME_US;
963 adapter->coal.normal.data.tx_packets =
964 QLCNIC_DEFAULT_INTR_COALESCE_TX_PACKETS;
965}
966
967static int
968__qlcnic_up(struct qlcnic_adapter *adapter, struct net_device *netdev)
969{
8a15ad1f
AKS
970 int ring;
971 struct qlcnic_host_rds_ring *rds_ring;
972
af19b491
AKS
973 if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
974 return -EIO;
975
8a15ad1f
AKS
976 if (test_bit(__QLCNIC_DEV_UP, &adapter->state))
977 return 0;
978
979 if (qlcnic_fw_create_ctx(adapter))
980 return -EIO;
981
982 for (ring = 0; ring < adapter->max_rds_rings; ring++) {
983 rds_ring = &adapter->recv_ctx.rds_rings[ring];
984 qlcnic_post_rx_buffers(adapter, ring, rds_ring);
985 }
986
af19b491
AKS
987 qlcnic_set_multi(netdev);
988 qlcnic_fw_cmd_set_mtu(adapter, netdev->mtu);
989
990 adapter->ahw.linkup = 0;
991
992 if (adapter->max_sds_rings > 1)
993 qlcnic_config_rss(adapter, 1);
994
995 qlcnic_config_intr_coalesce(adapter);
996
997 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_HW_LRO)
998 qlcnic_config_hw_lro(adapter, QLCNIC_LRO_ENABLED);
999
1000 qlcnic_napi_enable(adapter);
1001
1002 qlcnic_linkevent_request(adapter, 1);
1003
68bf1c68 1004 adapter->reset_context = 0;
af19b491
AKS
1005 set_bit(__QLCNIC_DEV_UP, &adapter->state);
1006 return 0;
1007}
1008
1009/* Usage: During resume and firmware recovery module.*/
1010
1011static int
1012qlcnic_up(struct qlcnic_adapter *adapter, struct net_device *netdev)
1013{
1014 int err = 0;
1015
1016 rtnl_lock();
1017 if (netif_running(netdev))
1018 err = __qlcnic_up(adapter, netdev);
1019 rtnl_unlock();
1020
1021 return err;
1022}
1023
1024static void
1025__qlcnic_down(struct qlcnic_adapter *adapter, struct net_device *netdev)
1026{
1027 if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
1028 return;
1029
1030 if (!test_and_clear_bit(__QLCNIC_DEV_UP, &adapter->state))
1031 return;
1032
1033 smp_mb();
1034 spin_lock(&adapter->tx_clean_lock);
1035 netif_carrier_off(netdev);
1036 netif_tx_disable(netdev);
1037
1038 qlcnic_free_mac_list(adapter);
1039
1040 qlcnic_nic_set_promisc(adapter, QLCNIC_NIU_NON_PROMISC_MODE);
1041
1042 qlcnic_napi_disable(adapter);
1043
8a15ad1f
AKS
1044 qlcnic_fw_destroy_ctx(adapter);
1045
1046 qlcnic_reset_rx_buffers_list(adapter);
af19b491
AKS
1047 qlcnic_release_tx_buffers(adapter);
1048 spin_unlock(&adapter->tx_clean_lock);
1049}
1050
1051/* Usage: During suspend and firmware recovery module */
1052
1053static void
1054qlcnic_down(struct qlcnic_adapter *adapter, struct net_device *netdev)
1055{
1056 rtnl_lock();
1057 if (netif_running(netdev))
1058 __qlcnic_down(adapter, netdev);
1059 rtnl_unlock();
1060
1061}
1062
1063static int
1064qlcnic_attach(struct qlcnic_adapter *adapter)
1065{
1066 struct net_device *netdev = adapter->netdev;
1067 struct pci_dev *pdev = adapter->pdev;
8a15ad1f 1068 int err;
af19b491
AKS
1069
1070 if (adapter->is_up == QLCNIC_ADAPTER_UP_MAGIC)
1071 return 0;
1072
af19b491
AKS
1073 err = qlcnic_napi_add(adapter, netdev);
1074 if (err)
1075 return err;
1076
1077 err = qlcnic_alloc_sw_resources(adapter);
1078 if (err) {
1079 dev_err(&pdev->dev, "Error in setting sw resources\n");
8a15ad1f 1080 goto err_out_napi_del;
af19b491
AKS
1081 }
1082
1083 err = qlcnic_alloc_hw_resources(adapter);
1084 if (err) {
1085 dev_err(&pdev->dev, "Error in setting hw resources\n");
1086 goto err_out_free_sw;
1087 }
1088
af19b491
AKS
1089 err = qlcnic_request_irq(adapter);
1090 if (err) {
1091 dev_err(&pdev->dev, "failed to setup interrupt\n");
8a15ad1f 1092 goto err_out_free_hw;
af19b491
AKS
1093 }
1094
1095 qlcnic_init_coalesce_defaults(adapter);
1096
1097 qlcnic_create_sysfs_entries(adapter);
1098
1099 adapter->is_up = QLCNIC_ADAPTER_UP_MAGIC;
1100 return 0;
1101
8a15ad1f 1102err_out_free_hw:
af19b491
AKS
1103 qlcnic_free_hw_resources(adapter);
1104err_out_free_sw:
1105 qlcnic_free_sw_resources(adapter);
8a15ad1f
AKS
1106err_out_napi_del:
1107 qlcnic_napi_del(adapter);
af19b491
AKS
1108 return err;
1109}
1110
1111static void
1112qlcnic_detach(struct qlcnic_adapter *adapter)
1113{
1114 if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
1115 return;
1116
1117 qlcnic_remove_sysfs_entries(adapter);
1118
1119 qlcnic_free_hw_resources(adapter);
1120 qlcnic_release_rx_buffers(adapter);
1121 qlcnic_free_irq(adapter);
1122 qlcnic_napi_del(adapter);
1123 qlcnic_free_sw_resources(adapter);
1124
1125 adapter->is_up = 0;
1126}
1127
7eb9855d
AKS
1128void qlcnic_diag_free_res(struct net_device *netdev, int max_sds_rings)
1129{
1130 struct qlcnic_adapter *adapter = netdev_priv(netdev);
1131 struct qlcnic_host_sds_ring *sds_ring;
1132 int ring;
1133
78ad3892 1134 clear_bit(__QLCNIC_DEV_UP, &adapter->state);
cdaff185
AKS
1135 if (adapter->diag_test == QLCNIC_INTERRUPT_TEST) {
1136 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
1137 sds_ring = &adapter->recv_ctx.sds_rings[ring];
1138 qlcnic_disable_int(sds_ring);
1139 }
7eb9855d
AKS
1140 }
1141
8a15ad1f
AKS
1142 qlcnic_fw_destroy_ctx(adapter);
1143
7eb9855d
AKS
1144 qlcnic_detach(adapter);
1145
1146 adapter->diag_test = 0;
1147 adapter->max_sds_rings = max_sds_rings;
1148
1149 if (qlcnic_attach(adapter))
34ce3626 1150 goto out;
7eb9855d
AKS
1151
1152 if (netif_running(netdev))
1153 __qlcnic_up(adapter, netdev);
34ce3626 1154out:
7eb9855d
AKS
1155 netif_device_attach(netdev);
1156}
1157
1158int qlcnic_diag_alloc_res(struct net_device *netdev, int test)
1159{
1160 struct qlcnic_adapter *adapter = netdev_priv(netdev);
1161 struct qlcnic_host_sds_ring *sds_ring;
8a15ad1f 1162 struct qlcnic_host_rds_ring *rds_ring;
7eb9855d
AKS
1163 int ring;
1164 int ret;
1165
1166 netif_device_detach(netdev);
1167
1168 if (netif_running(netdev))
1169 __qlcnic_down(adapter, netdev);
1170
1171 qlcnic_detach(adapter);
1172
1173 adapter->max_sds_rings = 1;
1174 adapter->diag_test = test;
1175
1176 ret = qlcnic_attach(adapter);
34ce3626
AKS
1177 if (ret) {
1178 netif_device_attach(netdev);
7eb9855d 1179 return ret;
34ce3626 1180 }
7eb9855d 1181
8a15ad1f
AKS
1182 ret = qlcnic_fw_create_ctx(adapter);
1183 if (ret) {
1184 qlcnic_detach(adapter);
1185 return ret;
1186 }
1187
1188 for (ring = 0; ring < adapter->max_rds_rings; ring++) {
1189 rds_ring = &adapter->recv_ctx.rds_rings[ring];
1190 qlcnic_post_rx_buffers(adapter, ring, rds_ring);
1191 }
1192
cdaff185
AKS
1193 if (adapter->diag_test == QLCNIC_INTERRUPT_TEST) {
1194 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
1195 sds_ring = &adapter->recv_ctx.sds_rings[ring];
1196 qlcnic_enable_int(sds_ring);
1197 }
7eb9855d 1198 }
78ad3892 1199 set_bit(__QLCNIC_DEV_UP, &adapter->state);
7eb9855d
AKS
1200
1201 return 0;
1202}
1203
68bf1c68
AKS
1204/* Reset context in hardware only */
1205static int
1206qlcnic_reset_hw_context(struct qlcnic_adapter *adapter)
1207{
1208 struct net_device *netdev = adapter->netdev;
1209
1210 if (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
1211 return -EBUSY;
1212
1213 netif_device_detach(netdev);
1214
1215 qlcnic_down(adapter, netdev);
1216
1217 qlcnic_up(adapter, netdev);
1218
1219 netif_device_attach(netdev);
1220
1221 clear_bit(__QLCNIC_RESETTING, &adapter->state);
1222 return 0;
1223}
1224
af19b491
AKS
1225int
1226qlcnic_reset_context(struct qlcnic_adapter *adapter)
1227{
1228 int err = 0;
1229 struct net_device *netdev = adapter->netdev;
1230
1231 if (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
1232 return -EBUSY;
1233
1234 if (adapter->is_up == QLCNIC_ADAPTER_UP_MAGIC) {
1235
1236 netif_device_detach(netdev);
1237
1238 if (netif_running(netdev))
1239 __qlcnic_down(adapter, netdev);
1240
1241 qlcnic_detach(adapter);
1242
1243 if (netif_running(netdev)) {
1244 err = qlcnic_attach(adapter);
1245 if (!err)
34ce3626 1246 __qlcnic_up(adapter, netdev);
af19b491
AKS
1247 }
1248
1249 netif_device_attach(netdev);
1250 }
1251
af19b491
AKS
1252 clear_bit(__QLCNIC_RESETTING, &adapter->state);
1253 return err;
1254}
1255
1256static int
1257qlcnic_setup_netdev(struct qlcnic_adapter *adapter,
1bb09fb9 1258 struct net_device *netdev, u8 pci_using_dac)
af19b491
AKS
1259{
1260 int err;
1261 struct pci_dev *pdev = adapter->pdev;
1262
1263 adapter->rx_csum = 1;
1264 adapter->mc_enabled = 0;
1265 adapter->max_mc_count = 38;
1266
1267 netdev->netdev_ops = &qlcnic_netdev_ops;
ef71ff83 1268 netdev->watchdog_timeo = 5*HZ;
af19b491
AKS
1269
1270 qlcnic_change_mtu(netdev, netdev->mtu);
1271
1272 SET_ETHTOOL_OPS(netdev, &qlcnic_ethtool_ops);
1273
2e9d722d 1274 netdev->features |= (NETIF_F_SG | NETIF_F_IP_CSUM |
ac8d0c4f 1275 NETIF_F_IPV6_CSUM | NETIF_F_GRO);
2e9d722d 1276 netdev->vlan_features |= (NETIF_F_SG | NETIF_F_IP_CSUM |
ac8d0c4f
AC
1277 NETIF_F_IPV6_CSUM);
1278
1279 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_TSO) {
1280 netdev->features |= (NETIF_F_TSO | NETIF_F_TSO6);
1281 netdev->vlan_features |= (NETIF_F_TSO | NETIF_F_TSO6);
1282 }
af19b491 1283
1bb09fb9 1284 if (pci_using_dac) {
af19b491
AKS
1285 netdev->features |= NETIF_F_HIGHDMA;
1286 netdev->vlan_features |= NETIF_F_HIGHDMA;
1287 }
1288
1289 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_FVLANTX)
1290 netdev->features |= (NETIF_F_HW_VLAN_TX);
1291
1292 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_HW_LRO)
1293 netdev->features |= NETIF_F_LRO;
1294
1295 netdev->irq = adapter->msix_entries[0].vector;
1296
af19b491
AKS
1297 if (qlcnic_read_mac_addr(adapter))
1298 dev_warn(&pdev->dev, "failed to read mac addr\n");
1299
1300 netif_carrier_off(netdev);
1301 netif_stop_queue(netdev);
1302
1303 err = register_netdev(netdev);
1304 if (err) {
1305 dev_err(&pdev->dev, "failed to register net device\n");
1306 return err;
1307 }
1308
1309 return 0;
1310}
1311
1bb09fb9
AKS
1312static int qlcnic_set_dma_mask(struct pci_dev *pdev, u8 *pci_using_dac)
1313{
1314 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) &&
1315 !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)))
1316 *pci_using_dac = 1;
1317 else if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) &&
1318 !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)))
1319 *pci_using_dac = 0;
1320 else {
1321 dev_err(&pdev->dev, "Unable to set DMA mask, aborting\n");
1322 return -EIO;
1323 }
1324
1325 return 0;
1326}
1327
af19b491
AKS
1328static int __devinit
1329qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1330{
1331 struct net_device *netdev = NULL;
1332 struct qlcnic_adapter *adapter = NULL;
1333 int err;
af19b491 1334 uint8_t revision_id;
1bb09fb9 1335 uint8_t pci_using_dac;
af19b491
AKS
1336
1337 err = pci_enable_device(pdev);
1338 if (err)
1339 return err;
1340
1341 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
1342 err = -ENODEV;
1343 goto err_out_disable_pdev;
1344 }
1345
1bb09fb9
AKS
1346 err = qlcnic_set_dma_mask(pdev, &pci_using_dac);
1347 if (err)
1348 goto err_out_disable_pdev;
1349
af19b491
AKS
1350 err = pci_request_regions(pdev, qlcnic_driver_name);
1351 if (err)
1352 goto err_out_disable_pdev;
1353
1354 pci_set_master(pdev);
451724c8 1355 pci_enable_pcie_error_reporting(pdev);
af19b491
AKS
1356
1357 netdev = alloc_etherdev(sizeof(struct qlcnic_adapter));
1358 if (!netdev) {
1359 dev_err(&pdev->dev, "failed to allocate net_device\n");
1360 err = -ENOMEM;
1361 goto err_out_free_res;
1362 }
1363
1364 SET_NETDEV_DEV(netdev, &pdev->dev);
1365
1366 adapter = netdev_priv(netdev);
1367 adapter->netdev = netdev;
1368 adapter->pdev = pdev;
6df900e9 1369 adapter->dev_rst_time = jiffies;
af19b491
AKS
1370
1371 revision_id = pdev->revision;
1372 adapter->ahw.revision_id = revision_id;
1373
1374 rwlock_init(&adapter->ahw.crb_lock);
1375 mutex_init(&adapter->ahw.mem_lock);
1376
1377 spin_lock_init(&adapter->tx_clean_lock);
1378 INIT_LIST_HEAD(&adapter->mac_list);
1379
1380 err = qlcnic_setup_pci_map(adapter);
1381 if (err)
1382 goto err_out_free_netdev;
1383
1384 /* This will be reset for mezz cards */
2e9d722d 1385 adapter->portnum = adapter->ahw.pci_func;
af19b491
AKS
1386
1387 err = qlcnic_get_board_info(adapter);
1388 if (err) {
1389 dev_err(&pdev->dev, "Error getting board config info.\n");
1390 goto err_out_iounmap;
1391 }
1392
02f6e46f
SC
1393 if (qlcnic_read_mac_addr(adapter))
1394 dev_warn(&pdev->dev, "failed to read mac addr\n");
1395
b3a24649
SC
1396 if (qlcnic_setup_idc_param(adapter))
1397 goto err_out_iounmap;
af19b491 1398
9f26f547 1399 err = adapter->nic_ops->start_firmware(adapter);
a7fc948f
AKS
1400 if (err) {
1401 dev_err(&pdev->dev, "Loading fw failed.Please Reboot\n");
af19b491 1402 goto err_out_decr_ref;
a7fc948f 1403 }
af19b491 1404
af19b491
AKS
1405 qlcnic_clear_stats(adapter);
1406
1407 qlcnic_setup_intr(adapter);
1408
1bb09fb9 1409 err = qlcnic_setup_netdev(adapter, netdev, pci_using_dac);
af19b491
AKS
1410 if (err)
1411 goto err_out_disable_msi;
1412
1413 pci_set_drvdata(pdev, adapter);
1414
1415 qlcnic_schedule_work(adapter, qlcnic_fw_poll_work, FW_POLL_DELAY);
1416
1417 switch (adapter->ahw.port_type) {
1418 case QLCNIC_GBE:
1419 dev_info(&adapter->pdev->dev, "%s: GbE port initialized\n",
1420 adapter->netdev->name);
1421 break;
1422 case QLCNIC_XGBE:
1423 dev_info(&adapter->pdev->dev, "%s: XGbE port initialized\n",
1424 adapter->netdev->name);
1425 break;
1426 }
1427
1428 qlcnic_create_diag_entries(adapter);
1429
1430 return 0;
1431
1432err_out_disable_msi:
1433 qlcnic_teardown_intr(adapter);
1434
1435err_out_decr_ref:
1436 qlcnic_clr_all_drv_state(adapter);
1437
1438err_out_iounmap:
1439 qlcnic_cleanup_pci_map(adapter);
1440
1441err_out_free_netdev:
1442 free_netdev(netdev);
1443
1444err_out_free_res:
1445 pci_release_regions(pdev);
1446
1447err_out_disable_pdev:
1448 pci_set_drvdata(pdev, NULL);
1449 pci_disable_device(pdev);
1450 return err;
1451}
1452
1453static void __devexit qlcnic_remove(struct pci_dev *pdev)
1454{
1455 struct qlcnic_adapter *adapter;
1456 struct net_device *netdev;
1457
1458 adapter = pci_get_drvdata(pdev);
1459 if (adapter == NULL)
1460 return;
1461
1462 netdev = adapter->netdev;
1463
1464 qlcnic_cancel_fw_work(adapter);
1465
1466 unregister_netdev(netdev);
1467
af19b491
AKS
1468 qlcnic_detach(adapter);
1469
2e9d722d
AC
1470 if (adapter->npars != NULL)
1471 kfree(adapter->npars);
1472 if (adapter->eswitch != NULL)
1473 kfree(adapter->eswitch);
1474
af19b491
AKS
1475 qlcnic_clr_all_drv_state(adapter);
1476
1477 clear_bit(__QLCNIC_RESETTING, &adapter->state);
1478
1479 qlcnic_teardown_intr(adapter);
1480
1481 qlcnic_remove_diag_entries(adapter);
1482
1483 qlcnic_cleanup_pci_map(adapter);
1484
1485 qlcnic_release_firmware(adapter);
1486
451724c8 1487 pci_disable_pcie_error_reporting(pdev);
af19b491
AKS
1488 pci_release_regions(pdev);
1489 pci_disable_device(pdev);
1490 pci_set_drvdata(pdev, NULL);
1491
1492 free_netdev(netdev);
1493}
1494static int __qlcnic_shutdown(struct pci_dev *pdev)
1495{
1496 struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
1497 struct net_device *netdev = adapter->netdev;
1498 int retval;
1499
1500 netif_device_detach(netdev);
1501
1502 qlcnic_cancel_fw_work(adapter);
1503
1504 if (netif_running(netdev))
1505 qlcnic_down(adapter, netdev);
1506
af19b491
AKS
1507 qlcnic_clr_all_drv_state(adapter);
1508
1509 clear_bit(__QLCNIC_RESETTING, &adapter->state);
1510
1511 retval = pci_save_state(pdev);
1512 if (retval)
1513 return retval;
1514
1515 if (qlcnic_wol_supported(adapter)) {
1516 pci_enable_wake(pdev, PCI_D3cold, 1);
1517 pci_enable_wake(pdev, PCI_D3hot, 1);
1518 }
1519
1520 return 0;
1521}
1522
1523static void qlcnic_shutdown(struct pci_dev *pdev)
1524{
1525 if (__qlcnic_shutdown(pdev))
1526 return;
1527
1528 pci_disable_device(pdev);
1529}
1530
1531#ifdef CONFIG_PM
1532static int
1533qlcnic_suspend(struct pci_dev *pdev, pm_message_t state)
1534{
1535 int retval;
1536
1537 retval = __qlcnic_shutdown(pdev);
1538 if (retval)
1539 return retval;
1540
1541 pci_set_power_state(pdev, pci_choose_state(pdev, state));
1542 return 0;
1543}
1544
1545static int
1546qlcnic_resume(struct pci_dev *pdev)
1547{
1548 struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
1549 struct net_device *netdev = adapter->netdev;
1550 int err;
1551
1552 err = pci_enable_device(pdev);
1553 if (err)
1554 return err;
1555
1556 pci_set_power_state(pdev, PCI_D0);
1557 pci_set_master(pdev);
1558 pci_restore_state(pdev);
1559
9f26f547 1560 err = adapter->nic_ops->start_firmware(adapter);
af19b491
AKS
1561 if (err) {
1562 dev_err(&pdev->dev, "failed to start firmware\n");
1563 return err;
1564 }
1565
1566 if (netif_running(netdev)) {
af19b491
AKS
1567 err = qlcnic_up(adapter, netdev);
1568 if (err)
52486a3a 1569 goto done;
af19b491
AKS
1570
1571 qlcnic_config_indev_addr(netdev, NETDEV_UP);
1572 }
52486a3a 1573done:
af19b491
AKS
1574 netif_device_attach(netdev);
1575 qlcnic_schedule_work(adapter, qlcnic_fw_poll_work, FW_POLL_DELAY);
1576 return 0;
af19b491
AKS
1577}
1578#endif
1579
1580static int qlcnic_open(struct net_device *netdev)
1581{
1582 struct qlcnic_adapter *adapter = netdev_priv(netdev);
1583 int err;
1584
1585 if (adapter->driver_mismatch)
1586 return -EIO;
1587
1588 err = qlcnic_attach(adapter);
1589 if (err)
1590 return err;
1591
1592 err = __qlcnic_up(adapter, netdev);
1593 if (err)
1594 goto err_out;
1595
1596 netif_start_queue(netdev);
1597
1598 return 0;
1599
1600err_out:
1601 qlcnic_detach(adapter);
1602 return err;
1603}
1604
1605/*
1606 * qlcnic_close - Disables a network interface entry point
1607 */
1608static int qlcnic_close(struct net_device *netdev)
1609{
1610 struct qlcnic_adapter *adapter = netdev_priv(netdev);
1611
1612 __qlcnic_down(adapter, netdev);
1613 return 0;
1614}
1615
1616static void
1617qlcnic_tso_check(struct net_device *netdev,
1618 struct qlcnic_host_tx_ring *tx_ring,
1619 struct cmd_desc_type0 *first_desc,
1620 struct sk_buff *skb)
1621{
1622 u8 opcode = TX_ETHER_PKT;
1623 __be16 protocol = skb->protocol;
1624 u16 flags = 0, vid = 0;
af19b491
AKS
1625 int copied, offset, copy_len, hdr_len = 0, tso = 0, vlan_oob = 0;
1626 struct cmd_desc_type0 *hwdesc;
1627 struct vlan_ethhdr *vh;
8bfe8b91 1628 struct qlcnic_adapter *adapter = netdev_priv(netdev);
2e9d722d 1629 u32 producer = tx_ring->producer;
af19b491
AKS
1630
1631 if (protocol == cpu_to_be16(ETH_P_8021Q)) {
1632
1633 vh = (struct vlan_ethhdr *)skb->data;
1634 protocol = vh->h_vlan_encapsulated_proto;
1635 flags = FLAGS_VLAN_TAGGED;
1636
1637 } else if (vlan_tx_tag_present(skb)) {
1638
1639 flags = FLAGS_VLAN_OOB;
1640 vid = vlan_tx_tag_get(skb);
1641 qlcnic_set_tx_vlan_tci(first_desc, vid);
1642 vlan_oob = 1;
1643 }
1644
2e9d722d
AC
1645 if (*(skb->data) & BIT_0) {
1646 flags |= BIT_0;
1647 memcpy(&first_desc->eth_addr, skb->data, ETH_ALEN);
1648 }
1649
af19b491
AKS
1650 if ((netdev->features & (NETIF_F_TSO | NETIF_F_TSO6)) &&
1651 skb_shinfo(skb)->gso_size > 0) {
1652
1653 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
1654
1655 first_desc->mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
1656 first_desc->total_hdr_length = hdr_len;
1657 if (vlan_oob) {
1658 first_desc->total_hdr_length += VLAN_HLEN;
1659 first_desc->tcp_hdr_offset = VLAN_HLEN;
1660 first_desc->ip_hdr_offset = VLAN_HLEN;
1661 /* Only in case of TSO on vlan device */
1662 flags |= FLAGS_VLAN_TAGGED;
1663 }
1664
1665 opcode = (protocol == cpu_to_be16(ETH_P_IPV6)) ?
1666 TX_TCP_LSO6 : TX_TCP_LSO;
1667 tso = 1;
1668
1669 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
1670 u8 l4proto;
1671
1672 if (protocol == cpu_to_be16(ETH_P_IP)) {
1673 l4proto = ip_hdr(skb)->protocol;
1674
1675 if (l4proto == IPPROTO_TCP)
1676 opcode = TX_TCP_PKT;
1677 else if (l4proto == IPPROTO_UDP)
1678 opcode = TX_UDP_PKT;
1679 } else if (protocol == cpu_to_be16(ETH_P_IPV6)) {
1680 l4proto = ipv6_hdr(skb)->nexthdr;
1681
1682 if (l4proto == IPPROTO_TCP)
1683 opcode = TX_TCPV6_PKT;
1684 else if (l4proto == IPPROTO_UDP)
1685 opcode = TX_UDPV6_PKT;
1686 }
1687 }
1688
1689 first_desc->tcp_hdr_offset += skb_transport_offset(skb);
1690 first_desc->ip_hdr_offset += skb_network_offset(skb);
1691 qlcnic_set_tx_flags_opcode(first_desc, flags, opcode);
1692
1693 if (!tso)
1694 return;
1695
1696 /* For LSO, we need to copy the MAC/IP/TCP headers into
1697 * the descriptor ring
1698 */
af19b491
AKS
1699 copied = 0;
1700 offset = 2;
1701
1702 if (vlan_oob) {
1703 /* Create a TSO vlan header template for firmware */
1704
1705 hwdesc = &tx_ring->desc_head[producer];
1706 tx_ring->cmd_buf_arr[producer].skb = NULL;
1707
1708 copy_len = min((int)sizeof(struct cmd_desc_type0) - offset,
1709 hdr_len + VLAN_HLEN);
1710
1711 vh = (struct vlan_ethhdr *)((char *)hwdesc + 2);
1712 skb_copy_from_linear_data(skb, vh, 12);
1713 vh->h_vlan_proto = htons(ETH_P_8021Q);
1714 vh->h_vlan_TCI = htons(vid);
1715 skb_copy_from_linear_data_offset(skb, 12,
1716 (char *)vh + 16, copy_len - 16);
1717
1718 copied = copy_len - VLAN_HLEN;
1719 offset = 0;
1720
1721 producer = get_next_index(producer, tx_ring->num_desc);
1722 }
1723
1724 while (copied < hdr_len) {
1725
1726 copy_len = min((int)sizeof(struct cmd_desc_type0) - offset,
1727 (hdr_len - copied));
1728
1729 hwdesc = &tx_ring->desc_head[producer];
1730 tx_ring->cmd_buf_arr[producer].skb = NULL;
1731
1732 skb_copy_from_linear_data_offset(skb, copied,
1733 (char *)hwdesc + offset, copy_len);
1734
1735 copied += copy_len;
1736 offset = 0;
1737
1738 producer = get_next_index(producer, tx_ring->num_desc);
1739 }
1740
1741 tx_ring->producer = producer;
1742 barrier();
8bfe8b91 1743 adapter->stats.lso_frames++;
af19b491
AKS
1744}
1745
1746static int
1747qlcnic_map_tx_skb(struct pci_dev *pdev,
1748 struct sk_buff *skb, struct qlcnic_cmd_buffer *pbuf)
1749{
1750 struct qlcnic_skb_frag *nf;
1751 struct skb_frag_struct *frag;
1752 int i, nr_frags;
1753 dma_addr_t map;
1754
1755 nr_frags = skb_shinfo(skb)->nr_frags;
1756 nf = &pbuf->frag_array[0];
1757
1758 map = pci_map_single(pdev, skb->data,
1759 skb_headlen(skb), PCI_DMA_TODEVICE);
1760 if (pci_dma_mapping_error(pdev, map))
1761 goto out_err;
1762
1763 nf->dma = map;
1764 nf->length = skb_headlen(skb);
1765
1766 for (i = 0; i < nr_frags; i++) {
1767 frag = &skb_shinfo(skb)->frags[i];
1768 nf = &pbuf->frag_array[i+1];
1769
1770 map = pci_map_page(pdev, frag->page, frag->page_offset,
1771 frag->size, PCI_DMA_TODEVICE);
1772 if (pci_dma_mapping_error(pdev, map))
1773 goto unwind;
1774
1775 nf->dma = map;
1776 nf->length = frag->size;
1777 }
1778
1779 return 0;
1780
1781unwind:
1782 while (--i >= 0) {
1783 nf = &pbuf->frag_array[i+1];
1784 pci_unmap_page(pdev, nf->dma, nf->length, PCI_DMA_TODEVICE);
1785 }
1786
1787 nf = &pbuf->frag_array[0];
1788 pci_unmap_single(pdev, nf->dma, skb_headlen(skb), PCI_DMA_TODEVICE);
1789
1790out_err:
1791 return -ENOMEM;
1792}
1793
1794static inline void
1795qlcnic_clear_cmddesc(u64 *desc)
1796{
1797 desc[0] = 0ULL;
1798 desc[2] = 0ULL;
1799}
1800
cdaff185 1801netdev_tx_t
af19b491
AKS
1802qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
1803{
1804 struct qlcnic_adapter *adapter = netdev_priv(netdev);
1805 struct qlcnic_host_tx_ring *tx_ring = adapter->tx_ring;
1806 struct qlcnic_cmd_buffer *pbuf;
1807 struct qlcnic_skb_frag *buffrag;
1808 struct cmd_desc_type0 *hwdesc, *first_desc;
1809 struct pci_dev *pdev;
1810 int i, k;
1811
1812 u32 producer;
1813 int frag_count, no_of_desc;
1814 u32 num_txd = tx_ring->num_desc;
1815
780ab790
AKS
1816 if (!test_bit(__QLCNIC_DEV_UP, &adapter->state)) {
1817 netif_stop_queue(netdev);
1818 return NETDEV_TX_BUSY;
1819 }
1820
af19b491
AKS
1821 frag_count = skb_shinfo(skb)->nr_frags + 1;
1822
1823 /* 4 fragments per cmd des */
1824 no_of_desc = (frag_count + 3) >> 2;
1825
ef71ff83 1826 if (unlikely(qlcnic_tx_avail(tx_ring) <= TX_STOP_THRESH)) {
af19b491 1827 netif_stop_queue(netdev);
ef71ff83
RB
1828 smp_mb();
1829 if (qlcnic_tx_avail(tx_ring) > TX_STOP_THRESH)
1830 netif_start_queue(netdev);
1831 else {
1832 adapter->stats.xmit_off++;
1833 return NETDEV_TX_BUSY;
1834 }
af19b491
AKS
1835 }
1836
1837 producer = tx_ring->producer;
1838 pbuf = &tx_ring->cmd_buf_arr[producer];
1839
1840 pdev = adapter->pdev;
1841
8ae6df97
AKS
1842 if (qlcnic_map_tx_skb(pdev, skb, pbuf)) {
1843 adapter->stats.tx_dma_map_error++;
af19b491 1844 goto drop_packet;
8ae6df97 1845 }
af19b491
AKS
1846
1847 pbuf->skb = skb;
1848 pbuf->frag_count = frag_count;
1849
1850 first_desc = hwdesc = &tx_ring->desc_head[producer];
1851 qlcnic_clear_cmddesc((u64 *)hwdesc);
1852
1853 qlcnic_set_tx_frags_len(first_desc, frag_count, skb->len);
1854 qlcnic_set_tx_port(first_desc, adapter->portnum);
1855
1856 for (i = 0; i < frag_count; i++) {
1857
1858 k = i % 4;
1859
1860 if ((k == 0) && (i > 0)) {
1861 /* move to next desc.*/
1862 producer = get_next_index(producer, num_txd);
1863 hwdesc = &tx_ring->desc_head[producer];
1864 qlcnic_clear_cmddesc((u64 *)hwdesc);
1865 tx_ring->cmd_buf_arr[producer].skb = NULL;
1866 }
1867
1868 buffrag = &pbuf->frag_array[i];
1869
1870 hwdesc->buffer_length[k] = cpu_to_le16(buffrag->length);
1871 switch (k) {
1872 case 0:
1873 hwdesc->addr_buffer1 = cpu_to_le64(buffrag->dma);
1874 break;
1875 case 1:
1876 hwdesc->addr_buffer2 = cpu_to_le64(buffrag->dma);
1877 break;
1878 case 2:
1879 hwdesc->addr_buffer3 = cpu_to_le64(buffrag->dma);
1880 break;
1881 case 3:
1882 hwdesc->addr_buffer4 = cpu_to_le64(buffrag->dma);
1883 break;
1884 }
1885 }
1886
1887 tx_ring->producer = get_next_index(producer, num_txd);
1888
1889 qlcnic_tso_check(netdev, tx_ring, first_desc, skb);
1890
1891 qlcnic_update_cmd_producer(adapter, tx_ring);
1892
1893 adapter->stats.txbytes += skb->len;
1894 adapter->stats.xmitcalled++;
1895
1896 return NETDEV_TX_OK;
1897
1898drop_packet:
1899 adapter->stats.txdropped++;
1900 dev_kfree_skb_any(skb);
1901 return NETDEV_TX_OK;
1902}
1903
1904static int qlcnic_check_temp(struct qlcnic_adapter *adapter)
1905{
1906 struct net_device *netdev = adapter->netdev;
1907 u32 temp, temp_state, temp_val;
1908 int rv = 0;
1909
1910 temp = QLCRD32(adapter, CRB_TEMP_STATE);
1911
1912 temp_state = qlcnic_get_temp_state(temp);
1913 temp_val = qlcnic_get_temp_val(temp);
1914
1915 if (temp_state == QLCNIC_TEMP_PANIC) {
1916 dev_err(&netdev->dev,
1917 "Device temperature %d degrees C exceeds"
1918 " maximum allowed. Hardware has been shut down.\n",
1919 temp_val);
1920 rv = 1;
1921 } else if (temp_state == QLCNIC_TEMP_WARN) {
1922 if (adapter->temp == QLCNIC_TEMP_NORMAL) {
1923 dev_err(&netdev->dev,
1924 "Device temperature %d degrees C "
1925 "exceeds operating range."
1926 " Immediate action needed.\n",
1927 temp_val);
1928 }
1929 } else {
1930 if (adapter->temp == QLCNIC_TEMP_WARN) {
1931 dev_info(&netdev->dev,
1932 "Device temperature is now %d degrees C"
1933 " in normal range.\n", temp_val);
1934 }
1935 }
1936 adapter->temp = temp_state;
1937 return rv;
1938}
1939
1940void qlcnic_advert_link_change(struct qlcnic_adapter *adapter, int linkup)
1941{
1942 struct net_device *netdev = adapter->netdev;
1943
1944 if (adapter->ahw.linkup && !linkup) {
1945 dev_info(&netdev->dev, "NIC Link is down\n");
1946 adapter->ahw.linkup = 0;
1947 if (netif_running(netdev)) {
1948 netif_carrier_off(netdev);
1949 netif_stop_queue(netdev);
1950 }
1951 } else if (!adapter->ahw.linkup && linkup) {
1952 dev_info(&netdev->dev, "NIC Link is up\n");
1953 adapter->ahw.linkup = 1;
1954 if (netif_running(netdev)) {
1955 netif_carrier_on(netdev);
1956 netif_wake_queue(netdev);
1957 }
1958 }
1959}
1960
1961static void qlcnic_tx_timeout(struct net_device *netdev)
1962{
1963 struct qlcnic_adapter *adapter = netdev_priv(netdev);
1964
1965 if (test_bit(__QLCNIC_RESETTING, &adapter->state))
1966 return;
1967
1968 dev_err(&netdev->dev, "transmit timeout, resetting.\n");
af19b491
AKS
1969
1970 if (++adapter->tx_timeo_cnt >= QLCNIC_MAX_TX_TIMEOUTS)
68bf1c68
AKS
1971 adapter->need_fw_reset = 1;
1972 else
1973 adapter->reset_context = 1;
af19b491
AKS
1974}
1975
1976static struct net_device_stats *qlcnic_get_stats(struct net_device *netdev)
1977{
1978 struct qlcnic_adapter *adapter = netdev_priv(netdev);
1979 struct net_device_stats *stats = &netdev->stats;
1980
1981 memset(stats, 0, sizeof(*stats));
1982
1983 stats->rx_packets = adapter->stats.rx_pkts + adapter->stats.lro_pkts;
1984 stats->tx_packets = adapter->stats.xmitfinished;
7e382594 1985 stats->rx_bytes = adapter->stats.rxbytes + adapter->stats.lrobytes;
af19b491
AKS
1986 stats->tx_bytes = adapter->stats.txbytes;
1987 stats->rx_dropped = adapter->stats.rxdropped;
1988 stats->tx_dropped = adapter->stats.txdropped;
1989
1990 return stats;
1991}
1992
7eb9855d 1993static irqreturn_t qlcnic_clear_legacy_intr(struct qlcnic_adapter *adapter)
af19b491 1994{
af19b491
AKS
1995 u32 status;
1996
1997 status = readl(adapter->isr_int_vec);
1998
1999 if (!(status & adapter->int_vec_bit))
2000 return IRQ_NONE;
2001
2002 /* check interrupt state machine, to be sure */
2003 status = readl(adapter->crb_int_state_reg);
2004 if (!ISR_LEGACY_INT_TRIGGERED(status))
2005 return IRQ_NONE;
2006
2007 writel(0xffffffff, adapter->tgt_status_reg);
2008 /* read twice to ensure write is flushed */
2009 readl(adapter->isr_int_vec);
2010 readl(adapter->isr_int_vec);
2011
7eb9855d
AKS
2012 return IRQ_HANDLED;
2013}
2014
2015static irqreturn_t qlcnic_tmp_intr(int irq, void *data)
2016{
2017 struct qlcnic_host_sds_ring *sds_ring = data;
2018 struct qlcnic_adapter *adapter = sds_ring->adapter;
2019
2020 if (adapter->flags & QLCNIC_MSIX_ENABLED)
2021 goto done;
2022 else if (adapter->flags & QLCNIC_MSI_ENABLED) {
2023 writel(0xffffffff, adapter->tgt_status_reg);
2024 goto done;
2025 }
2026
2027 if (qlcnic_clear_legacy_intr(adapter) == IRQ_NONE)
2028 return IRQ_NONE;
2029
2030done:
2031 adapter->diag_cnt++;
2032 qlcnic_enable_int(sds_ring);
2033 return IRQ_HANDLED;
2034}
2035
2036static irqreturn_t qlcnic_intr(int irq, void *data)
2037{
2038 struct qlcnic_host_sds_ring *sds_ring = data;
2039 struct qlcnic_adapter *adapter = sds_ring->adapter;
2040
2041 if (qlcnic_clear_legacy_intr(adapter) == IRQ_NONE)
2042 return IRQ_NONE;
2043
af19b491
AKS
2044 napi_schedule(&sds_ring->napi);
2045
2046 return IRQ_HANDLED;
2047}
2048
2049static irqreturn_t qlcnic_msi_intr(int irq, void *data)
2050{
2051 struct qlcnic_host_sds_ring *sds_ring = data;
2052 struct qlcnic_adapter *adapter = sds_ring->adapter;
2053
2054 /* clear interrupt */
2055 writel(0xffffffff, adapter->tgt_status_reg);
2056
2057 napi_schedule(&sds_ring->napi);
2058 return IRQ_HANDLED;
2059}
2060
2061static irqreturn_t qlcnic_msix_intr(int irq, void *data)
2062{
2063 struct qlcnic_host_sds_ring *sds_ring = data;
2064
2065 napi_schedule(&sds_ring->napi);
2066 return IRQ_HANDLED;
2067}
2068
2069static int qlcnic_process_cmd_ring(struct qlcnic_adapter *adapter)
2070{
2071 u32 sw_consumer, hw_consumer;
2072 int count = 0, i;
2073 struct qlcnic_cmd_buffer *buffer;
2074 struct pci_dev *pdev = adapter->pdev;
2075 struct net_device *netdev = adapter->netdev;
2076 struct qlcnic_skb_frag *frag;
2077 int done;
2078 struct qlcnic_host_tx_ring *tx_ring = adapter->tx_ring;
2079
2080 if (!spin_trylock(&adapter->tx_clean_lock))
2081 return 1;
2082
2083 sw_consumer = tx_ring->sw_consumer;
2084 hw_consumer = le32_to_cpu(*(tx_ring->hw_consumer));
2085
2086 while (sw_consumer != hw_consumer) {
2087 buffer = &tx_ring->cmd_buf_arr[sw_consumer];
2088 if (buffer->skb) {
2089 frag = &buffer->frag_array[0];
2090 pci_unmap_single(pdev, frag->dma, frag->length,
2091 PCI_DMA_TODEVICE);
2092 frag->dma = 0ULL;
2093 for (i = 1; i < buffer->frag_count; i++) {
2094 frag++;
2095 pci_unmap_page(pdev, frag->dma, frag->length,
2096 PCI_DMA_TODEVICE);
2097 frag->dma = 0ULL;
2098 }
2099
2100 adapter->stats.xmitfinished++;
2101 dev_kfree_skb_any(buffer->skb);
2102 buffer->skb = NULL;
2103 }
2104
2105 sw_consumer = get_next_index(sw_consumer, tx_ring->num_desc);
2106 if (++count >= MAX_STATUS_HANDLE)
2107 break;
2108 }
2109
2110 if (count && netif_running(netdev)) {
2111 tx_ring->sw_consumer = sw_consumer;
2112
2113 smp_mb();
2114
2115 if (netif_queue_stopped(netdev) && netif_carrier_ok(netdev)) {
af19b491
AKS
2116 if (qlcnic_tx_avail(tx_ring) > TX_STOP_THRESH) {
2117 netif_wake_queue(netdev);
8bfe8b91 2118 adapter->stats.xmit_on++;
af19b491 2119 }
af19b491 2120 }
ef71ff83 2121 adapter->tx_timeo_cnt = 0;
af19b491
AKS
2122 }
2123 /*
2124 * If everything is freed up to consumer then check if the ring is full
2125 * If the ring is full then check if more needs to be freed and
2126 * schedule the call back again.
2127 *
2128 * This happens when there are 2 CPUs. One could be freeing and the
2129 * other filling it. If the ring is full when we get out of here and
2130 * the card has already interrupted the host then the host can miss the
2131 * interrupt.
2132 *
2133 * There is still a possible race condition and the host could miss an
2134 * interrupt. The card has to take care of this.
2135 */
2136 hw_consumer = le32_to_cpu(*(tx_ring->hw_consumer));
2137 done = (sw_consumer == hw_consumer);
2138 spin_unlock(&adapter->tx_clean_lock);
2139
2140 return done;
2141}
2142
2143static int qlcnic_poll(struct napi_struct *napi, int budget)
2144{
2145 struct qlcnic_host_sds_ring *sds_ring =
2146 container_of(napi, struct qlcnic_host_sds_ring, napi);
2147
2148 struct qlcnic_adapter *adapter = sds_ring->adapter;
2149
2150 int tx_complete;
2151 int work_done;
2152
2153 tx_complete = qlcnic_process_cmd_ring(adapter);
2154
2155 work_done = qlcnic_process_rcv_ring(sds_ring, budget);
2156
2157 if ((work_done < budget) && tx_complete) {
2158 napi_complete(&sds_ring->napi);
2159 if (test_bit(__QLCNIC_DEV_UP, &adapter->state))
2160 qlcnic_enable_int(sds_ring);
2161 }
2162
2163 return work_done;
2164}
2165
8f891387 2166static int qlcnic_rx_poll(struct napi_struct *napi, int budget)
2167{
2168 struct qlcnic_host_sds_ring *sds_ring =
2169 container_of(napi, struct qlcnic_host_sds_ring, napi);
2170
2171 struct qlcnic_adapter *adapter = sds_ring->adapter;
2172 int work_done;
2173
2174 work_done = qlcnic_process_rcv_ring(sds_ring, budget);
2175
2176 if (work_done < budget) {
2177 napi_complete(&sds_ring->napi);
2178 if (test_bit(__QLCNIC_DEV_UP, &adapter->state))
2179 qlcnic_enable_int(sds_ring);
2180 }
2181
2182 return work_done;
2183}
2184
af19b491
AKS
2185#ifdef CONFIG_NET_POLL_CONTROLLER
2186static void qlcnic_poll_controller(struct net_device *netdev)
2187{
2188 struct qlcnic_adapter *adapter = netdev_priv(netdev);
2189 disable_irq(adapter->irq);
2190 qlcnic_intr(adapter->irq, adapter);
2191 enable_irq(adapter->irq);
2192}
2193#endif
2194
6df900e9
SC
2195static void
2196qlcnic_idc_debug_info(struct qlcnic_adapter *adapter, u8 encoding)
2197{
2198 u32 val;
2199
2200 val = adapter->portnum & 0xf;
2201 val |= encoding << 7;
2202 val |= (jiffies - adapter->dev_rst_time) << 8;
2203
2204 QLCWR32(adapter, QLCNIC_CRB_DRV_SCRATCH, val);
2205 adapter->dev_rst_time = jiffies;
2206}
2207
ade91f8e
AKS
2208static int
2209qlcnic_set_drv_state(struct qlcnic_adapter *adapter, u8 state)
af19b491
AKS
2210{
2211 u32 val;
2212
2213 WARN_ON(state != QLCNIC_DEV_NEED_RESET &&
2214 state != QLCNIC_DEV_NEED_QUISCENT);
2215
2216 if (qlcnic_api_lock(adapter))
ade91f8e 2217 return -EIO;
af19b491
AKS
2218
2219 val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
2220
2221 if (state == QLCNIC_DEV_NEED_RESET)
6d2a4724 2222 QLC_DEV_SET_RST_RDY(val, adapter->portnum);
af19b491 2223 else if (state == QLCNIC_DEV_NEED_QUISCENT)
6d2a4724 2224 QLC_DEV_SET_QSCNT_RDY(val, adapter->portnum);
af19b491
AKS
2225
2226 QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
2227
2228 qlcnic_api_unlock(adapter);
ade91f8e
AKS
2229
2230 return 0;
af19b491
AKS
2231}
2232
1b95a839
AKS
2233static int
2234qlcnic_clr_drv_state(struct qlcnic_adapter *adapter)
2235{
2236 u32 val;
2237
2238 if (qlcnic_api_lock(adapter))
2239 return -EBUSY;
2240
2241 val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
6d2a4724 2242 QLC_DEV_CLR_RST_QSCNT(val, adapter->portnum);
1b95a839
AKS
2243 QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
2244
2245 qlcnic_api_unlock(adapter);
2246
2247 return 0;
2248}
2249
af19b491
AKS
2250static void
2251qlcnic_clr_all_drv_state(struct qlcnic_adapter *adapter)
2252{
2253 u32 val;
2254
2255 if (qlcnic_api_lock(adapter))
2256 goto err;
2257
2258 val = QLCRD32(adapter, QLCNIC_CRB_DEV_REF_COUNT);
6d2a4724 2259 QLC_DEV_CLR_REF_CNT(val, adapter->portnum);
af19b491
AKS
2260 QLCWR32(adapter, QLCNIC_CRB_DEV_REF_COUNT, val);
2261
2262 if (!(val & 0x11111111))
2263 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_COLD);
2264
2265 val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
6d2a4724 2266 QLC_DEV_CLR_RST_QSCNT(val, adapter->portnum);
af19b491
AKS
2267 QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
2268
2269 qlcnic_api_unlock(adapter);
2270err:
2271 adapter->fw_fail_cnt = 0;
2272 clear_bit(__QLCNIC_START_FW, &adapter->state);
2273 clear_bit(__QLCNIC_RESETTING, &adapter->state);
2274}
2275
f73dfc50 2276/* Grab api lock, before checking state */
af19b491
AKS
2277static int
2278qlcnic_check_drv_state(struct qlcnic_adapter *adapter)
2279{
2280 int act, state;
2281
2282 state = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
2283 act = QLCRD32(adapter, QLCNIC_CRB_DEV_REF_COUNT);
2284
2285 if (((state & 0x11111111) == (act & 0x11111111)) ||
2286 ((act & 0x11111111) == ((state >> 1) & 0x11111111)))
2287 return 0;
2288 else
2289 return 1;
2290}
2291
96f8118c
SC
2292static int qlcnic_check_idc_ver(struct qlcnic_adapter *adapter)
2293{
2294 u32 val = QLCRD32(adapter, QLCNIC_CRB_DRV_IDC_VER);
2295
2296 if (val != QLCNIC_DRV_IDC_VER) {
2297 dev_warn(&adapter->pdev->dev, "IDC Version mismatch, driver's"
2298 " idc ver = %x; reqd = %x\n", QLCNIC_DRV_IDC_VER, val);
2299 }
2300
2301 return 0;
2302}
2303
af19b491
AKS
2304static int
2305qlcnic_can_start_firmware(struct qlcnic_adapter *adapter)
2306{
2307 u32 val, prev_state;
aa5e18c0 2308 u8 dev_init_timeo = adapter->dev_init_timeo;
6d2a4724 2309 u8 portnum = adapter->portnum;
96f8118c 2310 u8 ret;
af19b491 2311
f73dfc50
AKS
2312 if (test_and_clear_bit(__QLCNIC_START_FW, &adapter->state))
2313 return 1;
2314
af19b491
AKS
2315 if (qlcnic_api_lock(adapter))
2316 return -1;
2317
2318 val = QLCRD32(adapter, QLCNIC_CRB_DEV_REF_COUNT);
6d2a4724
AKS
2319 if (!(val & (1 << (portnum * 4)))) {
2320 QLC_DEV_SET_REF_CNT(val, portnum);
af19b491 2321 QLCWR32(adapter, QLCNIC_CRB_DEV_REF_COUNT, val);
af19b491
AKS
2322 }
2323
2324 prev_state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
65b5b420 2325 QLCDB(adapter, HW, "Device state = %u\n", prev_state);
af19b491
AKS
2326
2327 switch (prev_state) {
2328 case QLCNIC_DEV_COLD:
bbd8c6a4 2329 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_INITIALIZING);
96f8118c 2330 QLCWR32(adapter, QLCNIC_CRB_DRV_IDC_VER, QLCNIC_DRV_IDC_VER);
6df900e9 2331 qlcnic_idc_debug_info(adapter, 0);
af19b491
AKS
2332 qlcnic_api_unlock(adapter);
2333 return 1;
2334
2335 case QLCNIC_DEV_READY:
96f8118c 2336 ret = qlcnic_check_idc_ver(adapter);
af19b491 2337 qlcnic_api_unlock(adapter);
96f8118c 2338 return ret;
af19b491
AKS
2339
2340 case QLCNIC_DEV_NEED_RESET:
2341 val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
6d2a4724 2342 QLC_DEV_SET_RST_RDY(val, portnum);
af19b491
AKS
2343 QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
2344 break;
2345
2346 case QLCNIC_DEV_NEED_QUISCENT:
2347 val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
6d2a4724 2348 QLC_DEV_SET_QSCNT_RDY(val, portnum);
af19b491
AKS
2349 QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
2350 break;
2351
2352 case QLCNIC_DEV_FAILED:
a7fc948f 2353 dev_err(&adapter->pdev->dev, "Device in failed state.\n");
af19b491
AKS
2354 qlcnic_api_unlock(adapter);
2355 return -1;
bbd8c6a4
AKS
2356
2357 case QLCNIC_DEV_INITIALIZING:
2358 case QLCNIC_DEV_QUISCENT:
2359 break;
af19b491
AKS
2360 }
2361
2362 qlcnic_api_unlock(adapter);
aa5e18c0
SC
2363
2364 do {
af19b491 2365 msleep(1000);
a5e463d0
SC
2366 prev_state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
2367
2368 if (prev_state == QLCNIC_DEV_QUISCENT)
2369 continue;
2370 } while ((prev_state != QLCNIC_DEV_READY) && --dev_init_timeo);
af19b491 2371
65b5b420
AKS
2372 if (!dev_init_timeo) {
2373 dev_err(&adapter->pdev->dev,
2374 "Waiting for device to initialize timeout\n");
af19b491 2375 return -1;
65b5b420 2376 }
af19b491
AKS
2377
2378 if (qlcnic_api_lock(adapter))
2379 return -1;
2380
2381 val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
6d2a4724 2382 QLC_DEV_CLR_RST_QSCNT(val, portnum);
af19b491
AKS
2383 QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
2384
96f8118c 2385 ret = qlcnic_check_idc_ver(adapter);
af19b491
AKS
2386 qlcnic_api_unlock(adapter);
2387
96f8118c 2388 return ret;
af19b491
AKS
2389}
2390
2391static void
2392qlcnic_fwinit_work(struct work_struct *work)
2393{
2394 struct qlcnic_adapter *adapter = container_of(work,
2395 struct qlcnic_adapter, fw_work.work);
9f26f547 2396 u32 dev_state = 0xf, npar_state;
af19b491 2397
f73dfc50
AKS
2398 if (qlcnic_api_lock(adapter))
2399 goto err_ret;
af19b491 2400
a5e463d0
SC
2401 dev_state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
2402 if (dev_state == QLCNIC_DEV_QUISCENT) {
2403 qlcnic_api_unlock(adapter);
2404 qlcnic_schedule_work(adapter, qlcnic_fwinit_work,
2405 FW_POLL_DELAY * 2);
2406 return;
2407 }
2408
9f26f547
AC
2409 if (adapter->op_mode == QLCNIC_NON_PRIV_FUNC) {
2410 npar_state = QLCRD32(adapter, QLCNIC_CRB_DEV_NPAR_STATE);
2411 if (npar_state == QLCNIC_DEV_NPAR_RDY) {
2412 qlcnic_api_unlock(adapter);
2413 goto wait_npar;
2414 } else {
2415 qlcnic_schedule_work(adapter, qlcnic_fwinit_work,
2416 FW_POLL_DELAY);
2417 qlcnic_api_unlock(adapter);
2418 return;
2419 }
2420 }
2421
f73dfc50
AKS
2422 if (adapter->fw_wait_cnt++ > adapter->reset_ack_timeo) {
2423 dev_err(&adapter->pdev->dev, "Reset:Failed to get ack %d sec\n",
2424 adapter->reset_ack_timeo);
2425 goto skip_ack_check;
2426 }
2427
2428 if (!qlcnic_check_drv_state(adapter)) {
2429skip_ack_check:
2430 dev_state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
a5e463d0
SC
2431
2432 if (dev_state == QLCNIC_DEV_NEED_QUISCENT) {
2433 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE,
2434 QLCNIC_DEV_QUISCENT);
2435 qlcnic_schedule_work(adapter, qlcnic_fwinit_work,
2436 FW_POLL_DELAY * 2);
2437 QLCDB(adapter, DRV, "Quiscing the driver\n");
6df900e9
SC
2438 qlcnic_idc_debug_info(adapter, 0);
2439
a5e463d0
SC
2440 qlcnic_api_unlock(adapter);
2441 return;
2442 }
2443
f73dfc50
AKS
2444 if (dev_state == QLCNIC_DEV_NEED_RESET) {
2445 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE,
2446 QLCNIC_DEV_INITIALIZING);
2447 set_bit(__QLCNIC_START_FW, &adapter->state);
2448 QLCDB(adapter, DRV, "Restarting fw\n");
6df900e9 2449 qlcnic_idc_debug_info(adapter, 0);
af19b491
AKS
2450 }
2451
f73dfc50
AKS
2452 qlcnic_api_unlock(adapter);
2453
9f26f547 2454 if (!adapter->nic_ops->start_firmware(adapter)) {
af19b491
AKS
2455 qlcnic_schedule_work(adapter, qlcnic_attach_work, 0);
2456 return;
2457 }
af19b491
AKS
2458 goto err_ret;
2459 }
2460
f73dfc50 2461 qlcnic_api_unlock(adapter);
aa5e18c0 2462
9f26f547 2463wait_npar:
af19b491 2464 dev_state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
f73dfc50 2465 QLCDB(adapter, HW, "Func waiting: Device state=%u\n", dev_state);
65b5b420 2466
af19b491 2467 switch (dev_state) {
a5e463d0
SC
2468 case QLCNIC_DEV_QUISCENT:
2469 case QLCNIC_DEV_NEED_QUISCENT:
f73dfc50
AKS
2470 case QLCNIC_DEV_NEED_RESET:
2471 qlcnic_schedule_work(adapter,
2472 qlcnic_fwinit_work, FW_POLL_DELAY);
2473 return;
af19b491
AKS
2474 case QLCNIC_DEV_FAILED:
2475 break;
2476
2477 default:
9f26f547 2478 if (!adapter->nic_ops->start_firmware(adapter)) {
f73dfc50
AKS
2479 qlcnic_schedule_work(adapter, qlcnic_attach_work, 0);
2480 return;
2481 }
af19b491
AKS
2482 }
2483
2484err_ret:
f73dfc50
AKS
2485 dev_err(&adapter->pdev->dev, "Fwinit work failed state=%u "
2486 "fw_wait_cnt=%u\n", dev_state, adapter->fw_wait_cnt);
34ce3626 2487 netif_device_attach(adapter->netdev);
af19b491
AKS
2488 qlcnic_clr_all_drv_state(adapter);
2489}
2490
2491static void
2492qlcnic_detach_work(struct work_struct *work)
2493{
2494 struct qlcnic_adapter *adapter = container_of(work,
2495 struct qlcnic_adapter, fw_work.work);
2496 struct net_device *netdev = adapter->netdev;
2497 u32 status;
2498
2499 netif_device_detach(netdev);
2500
2501 qlcnic_down(adapter, netdev);
2502
af19b491
AKS
2503 status = QLCRD32(adapter, QLCNIC_PEG_HALT_STATUS1);
2504
2505 if (status & QLCNIC_RCODE_FATAL_ERROR)
2506 goto err_ret;
2507
2508 if (adapter->temp == QLCNIC_TEMP_PANIC)
2509 goto err_ret;
2510
ade91f8e
AKS
2511 if (qlcnic_set_drv_state(adapter, adapter->dev_state))
2512 goto err_ret;
af19b491
AKS
2513
2514 adapter->fw_wait_cnt = 0;
2515
2516 qlcnic_schedule_work(adapter, qlcnic_fwinit_work, FW_POLL_DELAY);
2517
2518 return;
2519
2520err_ret:
65b5b420
AKS
2521 dev_err(&adapter->pdev->dev, "detach failed; status=%d temp=%d\n",
2522 status, adapter->temp);
34ce3626 2523 netif_device_attach(netdev);
af19b491
AKS
2524 qlcnic_clr_all_drv_state(adapter);
2525
2526}
2527
f73dfc50 2528/*Transit to RESET state from READY state only */
af19b491
AKS
2529static void
2530qlcnic_dev_request_reset(struct qlcnic_adapter *adapter)
2531{
2532 u32 state;
2533
cea8975e 2534 adapter->need_fw_reset = 1;
af19b491
AKS
2535 if (qlcnic_api_lock(adapter))
2536 return;
2537
2538 state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
2539
f73dfc50 2540 if (state == QLCNIC_DEV_READY) {
af19b491 2541 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_NEED_RESET);
65b5b420 2542 QLCDB(adapter, DRV, "NEED_RESET state set\n");
6df900e9 2543 qlcnic_idc_debug_info(adapter, 0);
af19b491
AKS
2544 }
2545
2546 qlcnic_api_unlock(adapter);
2547}
2548
9f26f547
AC
2549/* Transit to NPAR READY state from NPAR NOT READY state */
2550static void
2551qlcnic_dev_set_npar_ready(struct qlcnic_adapter *adapter)
2552{
2553 u32 state;
2554
cea8975e
AC
2555 if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED) ||
2556 adapter->op_mode == QLCNIC_NON_PRIV_FUNC)
2557 return;
9f26f547
AC
2558 if (qlcnic_api_lock(adapter))
2559 return;
2560
2561 state = QLCRD32(adapter, QLCNIC_CRB_DEV_NPAR_STATE);
2562
2563 if (state != QLCNIC_DEV_NPAR_RDY) {
2564 QLCWR32(adapter, QLCNIC_CRB_DEV_NPAR_STATE,
2565 QLCNIC_DEV_NPAR_RDY);
2566 QLCDB(adapter, DRV, "NPAR READY state set\n");
2567 }
2568
2569 qlcnic_api_unlock(adapter);
2570}
2571
af19b491
AKS
2572static void
2573qlcnic_schedule_work(struct qlcnic_adapter *adapter,
2574 work_func_t func, int delay)
2575{
451724c8
SC
2576 if (test_bit(__QLCNIC_AER, &adapter->state))
2577 return;
2578
af19b491
AKS
2579 INIT_DELAYED_WORK(&adapter->fw_work, func);
2580 schedule_delayed_work(&adapter->fw_work, round_jiffies_relative(delay));
2581}
2582
2583static void
2584qlcnic_cancel_fw_work(struct qlcnic_adapter *adapter)
2585{
2586 while (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
2587 msleep(10);
2588
2589 cancel_delayed_work_sync(&adapter->fw_work);
2590}
2591
2592static void
2593qlcnic_attach_work(struct work_struct *work)
2594{
2595 struct qlcnic_adapter *adapter = container_of(work,
2596 struct qlcnic_adapter, fw_work.work);
2597 struct net_device *netdev = adapter->netdev;
af19b491
AKS
2598
2599 if (netif_running(netdev)) {
52486a3a 2600 if (qlcnic_up(adapter, netdev))
af19b491 2601 goto done;
af19b491
AKS
2602
2603 qlcnic_config_indev_addr(netdev, NETDEV_UP);
2604 }
2605
af19b491 2606done:
34ce3626 2607 netif_device_attach(netdev);
af19b491
AKS
2608 adapter->fw_fail_cnt = 0;
2609 clear_bit(__QLCNIC_RESETTING, &adapter->state);
1b95a839
AKS
2610
2611 if (!qlcnic_clr_drv_state(adapter))
2612 qlcnic_schedule_work(adapter, qlcnic_fw_poll_work,
2613 FW_POLL_DELAY);
af19b491
AKS
2614}
2615
2616static int
2617qlcnic_check_health(struct qlcnic_adapter *adapter)
2618{
2619 u32 state = 0, heartbit;
2620 struct net_device *netdev = adapter->netdev;
2621
2622 if (qlcnic_check_temp(adapter))
2623 goto detach;
2624
2372a5f1 2625 if (adapter->need_fw_reset)
af19b491 2626 qlcnic_dev_request_reset(adapter);
af19b491
AKS
2627
2628 state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
2629 if (state == QLCNIC_DEV_NEED_RESET || state == QLCNIC_DEV_NEED_QUISCENT)
2630 adapter->need_fw_reset = 1;
2631
2632 heartbit = QLCRD32(adapter, QLCNIC_PEG_ALIVE_COUNTER);
2633 if (heartbit != adapter->heartbit) {
2634 adapter->heartbit = heartbit;
2635 adapter->fw_fail_cnt = 0;
2636 if (adapter->need_fw_reset)
2637 goto detach;
68bf1c68 2638
0df170b6
AKS
2639 if (adapter->reset_context &&
2640 auto_fw_reset == AUTO_FW_RESET_ENABLED) {
68bf1c68
AKS
2641 qlcnic_reset_hw_context(adapter);
2642 adapter->netdev->trans_start = jiffies;
2643 }
2644
af19b491
AKS
2645 return 0;
2646 }
2647
2648 if (++adapter->fw_fail_cnt < FW_FAIL_THRESH)
2649 return 0;
2650
2651 qlcnic_dev_request_reset(adapter);
2652
0df170b6
AKS
2653 if ((auto_fw_reset == AUTO_FW_RESET_ENABLED))
2654 clear_bit(__QLCNIC_FW_ATTACHED, &adapter->state);
af19b491
AKS
2655
2656 dev_info(&netdev->dev, "firmware hang detected\n");
2657
2658detach:
2659 adapter->dev_state = (state == QLCNIC_DEV_NEED_QUISCENT) ? state :
2660 QLCNIC_DEV_NEED_RESET;
2661
2662 if ((auto_fw_reset == AUTO_FW_RESET_ENABLED) &&
65b5b420
AKS
2663 !test_and_set_bit(__QLCNIC_RESETTING, &adapter->state)) {
2664
af19b491 2665 qlcnic_schedule_work(adapter, qlcnic_detach_work, 0);
65b5b420
AKS
2666 QLCDB(adapter, DRV, "fw recovery scheduled.\n");
2667 }
af19b491
AKS
2668
2669 return 1;
2670}
2671
2672static void
2673qlcnic_fw_poll_work(struct work_struct *work)
2674{
2675 struct qlcnic_adapter *adapter = container_of(work,
2676 struct qlcnic_adapter, fw_work.work);
2677
2678 if (test_bit(__QLCNIC_RESETTING, &adapter->state))
2679 goto reschedule;
2680
2681
2682 if (qlcnic_check_health(adapter))
2683 return;
2684
2685reschedule:
2686 qlcnic_schedule_work(adapter, qlcnic_fw_poll_work, FW_POLL_DELAY);
2687}
2688
451724c8
SC
2689static int qlcnic_is_first_func(struct pci_dev *pdev)
2690{
2691 struct pci_dev *oth_pdev;
2692 int val = pdev->devfn;
2693
2694 while (val-- > 0) {
2695 oth_pdev = pci_get_domain_bus_and_slot(pci_domain_nr
2696 (pdev->bus), pdev->bus->number,
2697 PCI_DEVFN(PCI_SLOT(pdev->devfn), val));
2698
2699 if (oth_pdev && (oth_pdev->current_state != PCI_D3cold))
2700 return 0;
2701 }
2702 return 1;
2703}
2704
2705static int qlcnic_attach_func(struct pci_dev *pdev)
2706{
2707 int err, first_func;
2708 struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
2709 struct net_device *netdev = adapter->netdev;
2710
2711 pdev->error_state = pci_channel_io_normal;
2712
2713 err = pci_enable_device(pdev);
2714 if (err)
2715 return err;
2716
2717 pci_set_power_state(pdev, PCI_D0);
2718 pci_set_master(pdev);
2719 pci_restore_state(pdev);
2720
2721 first_func = qlcnic_is_first_func(pdev);
2722
2723 if (qlcnic_api_lock(adapter))
2724 return -EINVAL;
2725
2726 if (first_func) {
2727 adapter->need_fw_reset = 1;
2728 set_bit(__QLCNIC_START_FW, &adapter->state);
2729 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_INITIALIZING);
2730 QLCDB(adapter, DRV, "Restarting fw\n");
2731 }
2732 qlcnic_api_unlock(adapter);
2733
2734 err = adapter->nic_ops->start_firmware(adapter);
2735 if (err)
2736 return err;
2737
2738 qlcnic_clr_drv_state(adapter);
2739 qlcnic_setup_intr(adapter);
2740
2741 if (netif_running(netdev)) {
2742 err = qlcnic_attach(adapter);
2743 if (err) {
2744 qlcnic_clr_all_drv_state(adapter);
2745 clear_bit(__QLCNIC_AER, &adapter->state);
2746 netif_device_attach(netdev);
2747 return err;
2748 }
2749
2750 err = qlcnic_up(adapter, netdev);
2751 if (err)
2752 goto done;
2753
2754 qlcnic_config_indev_addr(netdev, NETDEV_UP);
2755 }
2756 done:
2757 netif_device_attach(netdev);
2758 return err;
2759}
2760
2761static pci_ers_result_t qlcnic_io_error_detected(struct pci_dev *pdev,
2762 pci_channel_state_t state)
2763{
2764 struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
2765 struct net_device *netdev = adapter->netdev;
2766
2767 if (state == pci_channel_io_perm_failure)
2768 return PCI_ERS_RESULT_DISCONNECT;
2769
2770 if (state == pci_channel_io_normal)
2771 return PCI_ERS_RESULT_RECOVERED;
2772
2773 set_bit(__QLCNIC_AER, &adapter->state);
2774 netif_device_detach(netdev);
2775
2776 cancel_delayed_work_sync(&adapter->fw_work);
2777
2778 if (netif_running(netdev))
2779 qlcnic_down(adapter, netdev);
2780
2781 qlcnic_detach(adapter);
2782 qlcnic_teardown_intr(adapter);
2783
2784 clear_bit(__QLCNIC_RESETTING, &adapter->state);
2785
2786 pci_save_state(pdev);
2787 pci_disable_device(pdev);
2788
2789 return PCI_ERS_RESULT_NEED_RESET;
2790}
2791
2792static pci_ers_result_t qlcnic_io_slot_reset(struct pci_dev *pdev)
2793{
2794 return qlcnic_attach_func(pdev) ? PCI_ERS_RESULT_DISCONNECT :
2795 PCI_ERS_RESULT_RECOVERED;
2796}
2797
2798static void qlcnic_io_resume(struct pci_dev *pdev)
2799{
2800 struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
2801
2802 pci_cleanup_aer_uncorrect_error_status(pdev);
2803
2804 if (QLCRD32(adapter, QLCNIC_CRB_DEV_STATE) == QLCNIC_DEV_READY &&
2805 test_and_clear_bit(__QLCNIC_AER, &adapter->state))
2806 qlcnic_schedule_work(adapter, qlcnic_fw_poll_work,
2807 FW_POLL_DELAY);
2808}
2809
2810
87eb743b
AC
2811static int
2812qlcnicvf_start_firmware(struct qlcnic_adapter *adapter)
2813{
2814 int err;
2815
2816 err = qlcnic_can_start_firmware(adapter);
2817 if (err)
2818 return err;
2819
2820 qlcnic_check_options(adapter);
2821
2822 adapter->need_fw_reset = 0;
2823
2824 return err;
2825}
2826
2827static int
2828qlcnicvf_config_bridged_mode(struct qlcnic_adapter *adapter, u32 enable)
2829{
2830 return -EOPNOTSUPP;
2831}
2832
2833static int
2834qlcnicvf_config_led(struct qlcnic_adapter *adapter, u32 state, u32 rate)
2835{
2836 return -EOPNOTSUPP;
2837}
2838
2839static int
2840qlcnicvf_set_ilb_mode(struct qlcnic_adapter *adapter)
2841{
2842 return -EOPNOTSUPP;
2843}
2844
2845static void
2846qlcnicvf_clear_ilb_mode(struct qlcnic_adapter *adapter)
2847{
2848 return;
2849}
2850
af19b491
AKS
2851static ssize_t
2852qlcnic_store_bridged_mode(struct device *dev,
2853 struct device_attribute *attr, const char *buf, size_t len)
2854{
2855 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
2856 unsigned long new;
2857 int ret = -EINVAL;
2858
2859 if (!(adapter->capabilities & QLCNIC_FW_CAPABILITY_BDG))
2860 goto err_out;
2861
8a15ad1f 2862 if (!test_bit(__QLCNIC_DEV_UP, &adapter->state))
af19b491
AKS
2863 goto err_out;
2864
2865 if (strict_strtoul(buf, 2, &new))
2866 goto err_out;
2867
2e9d722d 2868 if (!adapter->nic_ops->config_bridged_mode(adapter, !!new))
af19b491
AKS
2869 ret = len;
2870
2871err_out:
2872 return ret;
2873}
2874
2875static ssize_t
2876qlcnic_show_bridged_mode(struct device *dev,
2877 struct device_attribute *attr, char *buf)
2878{
2879 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
2880 int bridged_mode = 0;
2881
2882 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_BDG)
2883 bridged_mode = !!(adapter->flags & QLCNIC_BRIDGE_ENABLED);
2884
2885 return sprintf(buf, "%d\n", bridged_mode);
2886}
2887
2888static struct device_attribute dev_attr_bridged_mode = {
2889 .attr = {.name = "bridged_mode", .mode = (S_IRUGO | S_IWUSR)},
2890 .show = qlcnic_show_bridged_mode,
2891 .store = qlcnic_store_bridged_mode,
2892};
2893
2894static ssize_t
2895qlcnic_store_diag_mode(struct device *dev,
2896 struct device_attribute *attr, const char *buf, size_t len)
2897{
2898 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
2899 unsigned long new;
2900
2901 if (strict_strtoul(buf, 2, &new))
2902 return -EINVAL;
2903
2904 if (!!new != !!(adapter->flags & QLCNIC_DIAG_ENABLED))
2905 adapter->flags ^= QLCNIC_DIAG_ENABLED;
2906
2907 return len;
2908}
2909
2910static ssize_t
2911qlcnic_show_diag_mode(struct device *dev,
2912 struct device_attribute *attr, char *buf)
2913{
2914 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
2915
2916 return sprintf(buf, "%d\n",
2917 !!(adapter->flags & QLCNIC_DIAG_ENABLED));
2918}
2919
2920static struct device_attribute dev_attr_diag_mode = {
2921 .attr = {.name = "diag_mode", .mode = (S_IRUGO | S_IWUSR)},
2922 .show = qlcnic_show_diag_mode,
2923 .store = qlcnic_store_diag_mode,
2924};
2925
2926static int
2927qlcnic_sysfs_validate_crb(struct qlcnic_adapter *adapter,
2928 loff_t offset, size_t size)
2929{
897e8c7c
DP
2930 size_t crb_size = 4;
2931
af19b491
AKS
2932 if (!(adapter->flags & QLCNIC_DIAG_ENABLED))
2933 return -EIO;
2934
897e8c7c
DP
2935 if (offset < QLCNIC_PCI_CRBSPACE) {
2936 if (ADDR_IN_RANGE(offset, QLCNIC_PCI_CAMQM,
2937 QLCNIC_PCI_CAMQM_END))
2938 crb_size = 8;
2939 else
2940 return -EINVAL;
2941 }
af19b491 2942
897e8c7c
DP
2943 if ((size != crb_size) || (offset & (crb_size-1)))
2944 return -EINVAL;
af19b491
AKS
2945
2946 return 0;
2947}
2948
2949static ssize_t
2c3c8bea
CW
2950qlcnic_sysfs_read_crb(struct file *filp, struct kobject *kobj,
2951 struct bin_attribute *attr,
af19b491
AKS
2952 char *buf, loff_t offset, size_t size)
2953{
2954 struct device *dev = container_of(kobj, struct device, kobj);
2955 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
2956 u32 data;
897e8c7c 2957 u64 qmdata;
af19b491
AKS
2958 int ret;
2959
2960 ret = qlcnic_sysfs_validate_crb(adapter, offset, size);
2961 if (ret != 0)
2962 return ret;
2963
897e8c7c
DP
2964 if (ADDR_IN_RANGE(offset, QLCNIC_PCI_CAMQM, QLCNIC_PCI_CAMQM_END)) {
2965 qlcnic_pci_camqm_read_2M(adapter, offset, &qmdata);
2966 memcpy(buf, &qmdata, size);
2967 } else {
2968 data = QLCRD32(adapter, offset);
2969 memcpy(buf, &data, size);
2970 }
af19b491
AKS
2971 return size;
2972}
2973
2974static ssize_t
2c3c8bea
CW
2975qlcnic_sysfs_write_crb(struct file *filp, struct kobject *kobj,
2976 struct bin_attribute *attr,
af19b491
AKS
2977 char *buf, loff_t offset, size_t size)
2978{
2979 struct device *dev = container_of(kobj, struct device, kobj);
2980 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
2981 u32 data;
897e8c7c 2982 u64 qmdata;
af19b491
AKS
2983 int ret;
2984
2985 ret = qlcnic_sysfs_validate_crb(adapter, offset, size);
2986 if (ret != 0)
2987 return ret;
2988
897e8c7c
DP
2989 if (ADDR_IN_RANGE(offset, QLCNIC_PCI_CAMQM, QLCNIC_PCI_CAMQM_END)) {
2990 memcpy(&qmdata, buf, size);
2991 qlcnic_pci_camqm_write_2M(adapter, offset, qmdata);
2992 } else {
2993 memcpy(&data, buf, size);
2994 QLCWR32(adapter, offset, data);
2995 }
af19b491
AKS
2996 return size;
2997}
2998
2999static int
3000qlcnic_sysfs_validate_mem(struct qlcnic_adapter *adapter,
3001 loff_t offset, size_t size)
3002{
3003 if (!(adapter->flags & QLCNIC_DIAG_ENABLED))
3004 return -EIO;
3005
3006 if ((size != 8) || (offset & 0x7))
3007 return -EIO;
3008
3009 return 0;
3010}
3011
3012static ssize_t
2c3c8bea
CW
3013qlcnic_sysfs_read_mem(struct file *filp, struct kobject *kobj,
3014 struct bin_attribute *attr,
af19b491
AKS
3015 char *buf, loff_t offset, size_t size)
3016{
3017 struct device *dev = container_of(kobj, struct device, kobj);
3018 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3019 u64 data;
3020 int ret;
3021
3022 ret = qlcnic_sysfs_validate_mem(adapter, offset, size);
3023 if (ret != 0)
3024 return ret;
3025
3026 if (qlcnic_pci_mem_read_2M(adapter, offset, &data))
3027 return -EIO;
3028
3029 memcpy(buf, &data, size);
3030
3031 return size;
3032}
3033
3034static ssize_t
2c3c8bea
CW
3035qlcnic_sysfs_write_mem(struct file *filp, struct kobject *kobj,
3036 struct bin_attribute *attr,
af19b491
AKS
3037 char *buf, loff_t offset, size_t size)
3038{
3039 struct device *dev = container_of(kobj, struct device, kobj);
3040 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3041 u64 data;
3042 int ret;
3043
3044 ret = qlcnic_sysfs_validate_mem(adapter, offset, size);
3045 if (ret != 0)
3046 return ret;
3047
3048 memcpy(&data, buf, size);
3049
3050 if (qlcnic_pci_mem_write_2M(adapter, offset, data))
3051 return -EIO;
3052
3053 return size;
3054}
3055
3056
3057static struct bin_attribute bin_attr_crb = {
3058 .attr = {.name = "crb", .mode = (S_IRUGO | S_IWUSR)},
3059 .size = 0,
3060 .read = qlcnic_sysfs_read_crb,
3061 .write = qlcnic_sysfs_write_crb,
3062};
3063
3064static struct bin_attribute bin_attr_mem = {
3065 .attr = {.name = "mem", .mode = (S_IRUGO | S_IWUSR)},
3066 .size = 0,
3067 .read = qlcnic_sysfs_read_mem,
3068 .write = qlcnic_sysfs_write_mem,
3069};
3070
cea8975e 3071static int
346fe763
RB
3072validate_pm_config(struct qlcnic_adapter *adapter,
3073 struct qlcnic_pm_func_cfg *pm_cfg, int count)
3074{
3075
3076 u8 src_pci_func, s_esw_id, d_esw_id;
3077 u8 dest_pci_func;
3078 int i;
3079
3080 for (i = 0; i < count; i++) {
3081 src_pci_func = pm_cfg[i].pci_func;
3082 dest_pci_func = pm_cfg[i].dest_npar;
3083 if (src_pci_func >= QLCNIC_MAX_PCI_FUNC
3084 || dest_pci_func >= QLCNIC_MAX_PCI_FUNC)
3085 return QL_STATUS_INVALID_PARAM;
3086
3087 if (adapter->npars[src_pci_func].type != QLCNIC_TYPE_NIC)
3088 return QL_STATUS_INVALID_PARAM;
3089
3090 if (adapter->npars[dest_pci_func].type != QLCNIC_TYPE_NIC)
3091 return QL_STATUS_INVALID_PARAM;
3092
3093 if (!IS_VALID_MODE(pm_cfg[i].action))
3094 return QL_STATUS_INVALID_PARAM;
3095
3096 s_esw_id = adapter->npars[src_pci_func].phy_port;
3097 d_esw_id = adapter->npars[dest_pci_func].phy_port;
3098
3099 if (s_esw_id != d_esw_id)
3100 return QL_STATUS_INVALID_PARAM;
3101
3102 }
3103 return 0;
3104
3105}
3106
3107static ssize_t
3108qlcnic_sysfs_write_pm_config(struct file *filp, struct kobject *kobj,
3109 struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
3110{
3111 struct device *dev = container_of(kobj, struct device, kobj);
3112 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3113 struct qlcnic_pm_func_cfg *pm_cfg;
3114 u32 id, action, pci_func;
3115 int count, rem, i, ret;
3116
3117 count = size / sizeof(struct qlcnic_pm_func_cfg);
3118 rem = size % sizeof(struct qlcnic_pm_func_cfg);
3119 if (rem)
3120 return QL_STATUS_INVALID_PARAM;
3121
3122 pm_cfg = (struct qlcnic_pm_func_cfg *) buf;
3123
3124 ret = validate_pm_config(adapter, pm_cfg, count);
3125 if (ret)
3126 return ret;
3127 for (i = 0; i < count; i++) {
3128 pci_func = pm_cfg[i].pci_func;
3129 action = pm_cfg[i].action;
3130 id = adapter->npars[pci_func].phy_port;
3131 ret = qlcnic_config_port_mirroring(adapter, id,
3132 action, pci_func);
3133 if (ret)
3134 return ret;
3135 }
3136
3137 for (i = 0; i < count; i++) {
3138 pci_func = pm_cfg[i].pci_func;
3139 id = adapter->npars[pci_func].phy_port;
3140 adapter->npars[pci_func].enable_pm = pm_cfg[i].action;
3141 adapter->npars[pci_func].dest_npar = id;
3142 }
3143 return size;
3144}
3145
3146static ssize_t
3147qlcnic_sysfs_read_pm_config(struct file *filp, struct kobject *kobj,
3148 struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
3149{
3150 struct device *dev = container_of(kobj, struct device, kobj);
3151 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3152 struct qlcnic_pm_func_cfg pm_cfg[QLCNIC_MAX_PCI_FUNC];
3153 int i;
3154
3155 if (size != sizeof(pm_cfg))
3156 return QL_STATUS_INVALID_PARAM;
3157
3158 for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
3159 if (adapter->npars[i].type != QLCNIC_TYPE_NIC)
3160 continue;
3161 pm_cfg[i].action = adapter->npars[i].enable_pm;
3162 pm_cfg[i].dest_npar = 0;
3163 pm_cfg[i].pci_func = i;
3164 }
3165 memcpy(buf, &pm_cfg, size);
3166
3167 return size;
3168}
3169
cea8975e 3170static int
346fe763
RB
3171validate_esw_config(struct qlcnic_adapter *adapter,
3172 struct qlcnic_esw_func_cfg *esw_cfg, int count)
3173{
3174 u8 pci_func;
3175 int i;
3176
3177 for (i = 0; i < count; i++) {
3178 pci_func = esw_cfg[i].pci_func;
3179 if (pci_func >= QLCNIC_MAX_PCI_FUNC)
3180 return QL_STATUS_INVALID_PARAM;
3181
3182 if (adapter->npars[i].type != QLCNIC_TYPE_NIC)
3183 return QL_STATUS_INVALID_PARAM;
3184
3185 if (esw_cfg->host_vlan_tag == 1)
3186 if (!IS_VALID_VLAN(esw_cfg[i].vlan_id))
3187 return QL_STATUS_INVALID_PARAM;
3188
3189 if (!IS_VALID_MODE(esw_cfg[i].promisc_mode)
3190 || !IS_VALID_MODE(esw_cfg[i].host_vlan_tag)
3191 || !IS_VALID_MODE(esw_cfg[i].mac_learning)
3192 || !IS_VALID_MODE(esw_cfg[i].discard_tagged))
3193 return QL_STATUS_INVALID_PARAM;
3194 }
3195
3196 return 0;
3197}
3198
3199static ssize_t
3200qlcnic_sysfs_write_esw_config(struct file *file, struct kobject *kobj,
3201 struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
3202{
3203 struct device *dev = container_of(kobj, struct device, kobj);
3204 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3205 struct qlcnic_esw_func_cfg *esw_cfg;
346fe763 3206 int count, rem, i, ret;
cea8975e 3207 u8 id, pci_func;
346fe763
RB
3208
3209 count = size / sizeof(struct qlcnic_esw_func_cfg);
3210 rem = size % sizeof(struct qlcnic_esw_func_cfg);
3211 if (rem)
3212 return QL_STATUS_INVALID_PARAM;
3213
3214 esw_cfg = (struct qlcnic_esw_func_cfg *) buf;
3215 ret = validate_esw_config(adapter, esw_cfg, count);
3216 if (ret)
3217 return ret;
3218
3219 for (i = 0; i < count; i++) {
3220 pci_func = esw_cfg[i].pci_func;
3221 id = adapter->npars[pci_func].phy_port;
cea8975e
AC
3222 ret = qlcnic_config_switch_port(adapter, id,
3223 esw_cfg[i].host_vlan_tag,
3224 esw_cfg[i].discard_tagged,
3225 esw_cfg[i].promisc_mode,
3226 esw_cfg[i].mac_learning,
3227 esw_cfg[i].pci_func,
3228 esw_cfg[i].vlan_id);
346fe763
RB
3229 if (ret)
3230 return ret;
3231 }
3232
3233 for (i = 0; i < count; i++) {
3234 pci_func = esw_cfg[i].pci_func;
3235 adapter->npars[pci_func].promisc_mode = esw_cfg[i].promisc_mode;
3236 adapter->npars[pci_func].mac_learning = esw_cfg[i].mac_learning;
3237 adapter->npars[pci_func].vlan_id = esw_cfg[i].vlan_id;
3238 adapter->npars[pci_func].discard_tagged =
3239 esw_cfg[i].discard_tagged;
3240 adapter->npars[pci_func].host_vlan_tag =
3241 esw_cfg[i].host_vlan_tag;
3242 }
3243
3244 return size;
3245}
3246
3247static ssize_t
3248qlcnic_sysfs_read_esw_config(struct file *file, struct kobject *kobj,
3249 struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
3250{
3251 struct device *dev = container_of(kobj, struct device, kobj);
3252 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3253 struct qlcnic_esw_func_cfg esw_cfg[QLCNIC_MAX_PCI_FUNC];
3254 int i;
3255
3256 if (size != sizeof(esw_cfg))
3257 return QL_STATUS_INVALID_PARAM;
3258
3259 for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
3260 if (adapter->npars[i].type != QLCNIC_TYPE_NIC)
3261 continue;
3262
3263 esw_cfg[i].host_vlan_tag = adapter->npars[i].host_vlan_tag;
3264 esw_cfg[i].promisc_mode = adapter->npars[i].promisc_mode;
3265 esw_cfg[i].discard_tagged = adapter->npars[i].discard_tagged;
3266 esw_cfg[i].vlan_id = adapter->npars[i].vlan_id;
3267 esw_cfg[i].mac_learning = adapter->npars[i].mac_learning;
3268 }
3269 memcpy(buf, &esw_cfg, size);
3270
3271 return size;
3272}
3273
cea8975e 3274static int
346fe763
RB
3275validate_npar_config(struct qlcnic_adapter *adapter,
3276 struct qlcnic_npar_func_cfg *np_cfg, int count)
3277{
3278 u8 pci_func, i;
3279
3280 for (i = 0; i < count; i++) {
3281 pci_func = np_cfg[i].pci_func;
3282 if (pci_func >= QLCNIC_MAX_PCI_FUNC)
3283 return QL_STATUS_INVALID_PARAM;
3284
3285 if (adapter->npars[pci_func].type != QLCNIC_TYPE_NIC)
3286 return QL_STATUS_INVALID_PARAM;
3287
3288 if (!IS_VALID_BW(np_cfg[i].min_bw)
3289 || !IS_VALID_BW(np_cfg[i].max_bw)
3290 || !IS_VALID_RX_QUEUES(np_cfg[i].max_rx_queues)
3291 || !IS_VALID_TX_QUEUES(np_cfg[i].max_tx_queues))
3292 return QL_STATUS_INVALID_PARAM;
3293 }
3294 return 0;
3295}
3296
3297static ssize_t
3298qlcnic_sysfs_write_npar_config(struct file *file, struct kobject *kobj,
3299 struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
3300{
3301 struct device *dev = container_of(kobj, struct device, kobj);
3302 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3303 struct qlcnic_info nic_info;
3304 struct qlcnic_npar_func_cfg *np_cfg;
3305 int i, count, rem, ret;
3306 u8 pci_func;
3307
3308 count = size / sizeof(struct qlcnic_npar_func_cfg);
3309 rem = size % sizeof(struct qlcnic_npar_func_cfg);
3310 if (rem)
3311 return QL_STATUS_INVALID_PARAM;
3312
3313 np_cfg = (struct qlcnic_npar_func_cfg *) buf;
3314 ret = validate_npar_config(adapter, np_cfg, count);
3315 if (ret)
3316 return ret;
3317
3318 for (i = 0; i < count ; i++) {
3319 pci_func = np_cfg[i].pci_func;
3320 ret = qlcnic_get_nic_info(adapter, &nic_info, pci_func);
3321 if (ret)
3322 return ret;
3323 nic_info.pci_func = pci_func;
3324 nic_info.min_tx_bw = np_cfg[i].min_bw;
3325 nic_info.max_tx_bw = np_cfg[i].max_bw;
3326 ret = qlcnic_set_nic_info(adapter, &nic_info);
3327 if (ret)
3328 return ret;
cea8975e
AC
3329 adapter->npars[i].min_bw = nic_info.min_tx_bw;
3330 adapter->npars[i].max_bw = nic_info.max_tx_bw;
346fe763
RB
3331 }
3332
3333 return size;
3334
3335}
3336static ssize_t
3337qlcnic_sysfs_read_npar_config(struct file *file, struct kobject *kobj,
3338 struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
3339{
3340 struct device *dev = container_of(kobj, struct device, kobj);
3341 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3342 struct qlcnic_info nic_info;
3343 struct qlcnic_npar_func_cfg np_cfg[QLCNIC_MAX_PCI_FUNC];
3344 int i, ret;
3345
3346 if (size != sizeof(np_cfg))
3347 return QL_STATUS_INVALID_PARAM;
3348
3349 for (i = 0; i < QLCNIC_MAX_PCI_FUNC ; i++) {
3350 if (adapter->npars[i].type != QLCNIC_TYPE_NIC)
3351 continue;
3352 ret = qlcnic_get_nic_info(adapter, &nic_info, i);
3353 if (ret)
3354 return ret;
3355
3356 np_cfg[i].pci_func = i;
3357 np_cfg[i].op_mode = nic_info.op_mode;
3358 np_cfg[i].port_num = nic_info.phys_port;
3359 np_cfg[i].fw_capab = nic_info.capabilities;
3360 np_cfg[i].min_bw = nic_info.min_tx_bw ;
3361 np_cfg[i].max_bw = nic_info.max_tx_bw;
3362 np_cfg[i].max_tx_queues = nic_info.max_tx_ques;
3363 np_cfg[i].max_rx_queues = nic_info.max_rx_ques;
3364 }
3365 memcpy(buf, &np_cfg, size);
3366 return size;
3367}
3368
3369static ssize_t
3370qlcnic_sysfs_read_pci_config(struct file *file, struct kobject *kobj,
3371 struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
3372{
3373 struct device *dev = container_of(kobj, struct device, kobj);
3374 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3375 struct qlcnic_pci_func_cfg pci_cfg[QLCNIC_MAX_PCI_FUNC];
3376 struct qlcnic_pci_info pci_info[QLCNIC_MAX_PCI_FUNC];
3377 int i, ret;
3378
3379 if (size != sizeof(pci_cfg))
3380 return QL_STATUS_INVALID_PARAM;
3381
3382 ret = qlcnic_get_pci_info(adapter, pci_info);
3383 if (ret)
3384 return ret;
3385
3386 for (i = 0; i < QLCNIC_MAX_PCI_FUNC ; i++) {
3387 pci_cfg[i].pci_func = pci_info[i].id;
3388 pci_cfg[i].func_type = pci_info[i].type;
3389 pci_cfg[i].port_num = pci_info[i].default_port;
3390 pci_cfg[i].min_bw = pci_info[i].tx_min_bw;
3391 pci_cfg[i].max_bw = pci_info[i].tx_max_bw;
3392 memcpy(&pci_cfg[i].def_mac_addr, &pci_info[i].mac, ETH_ALEN);
3393 }
3394 memcpy(buf, &pci_cfg, size);
3395 return size;
3396
3397}
3398static struct bin_attribute bin_attr_npar_config = {
3399 .attr = {.name = "npar_config", .mode = (S_IRUGO | S_IWUSR)},
3400 .size = 0,
3401 .read = qlcnic_sysfs_read_npar_config,
3402 .write = qlcnic_sysfs_write_npar_config,
3403};
3404
3405static struct bin_attribute bin_attr_pci_config = {
3406 .attr = {.name = "pci_config", .mode = (S_IRUGO | S_IWUSR)},
3407 .size = 0,
3408 .read = qlcnic_sysfs_read_pci_config,
3409 .write = NULL,
3410};
3411
3412static struct bin_attribute bin_attr_esw_config = {
3413 .attr = {.name = "esw_config", .mode = (S_IRUGO | S_IWUSR)},
3414 .size = 0,
3415 .read = qlcnic_sysfs_read_esw_config,
3416 .write = qlcnic_sysfs_write_esw_config,
3417};
3418
3419static struct bin_attribute bin_attr_pm_config = {
3420 .attr = {.name = "pm_config", .mode = (S_IRUGO | S_IWUSR)},
3421 .size = 0,
3422 .read = qlcnic_sysfs_read_pm_config,
3423 .write = qlcnic_sysfs_write_pm_config,
3424};
3425
af19b491
AKS
3426static void
3427qlcnic_create_sysfs_entries(struct qlcnic_adapter *adapter)
3428{
3429 struct device *dev = &adapter->pdev->dev;
3430
3431 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_BDG)
3432 if (device_create_file(dev, &dev_attr_bridged_mode))
3433 dev_warn(dev,
3434 "failed to create bridged_mode sysfs entry\n");
3435}
3436
3437static void
3438qlcnic_remove_sysfs_entries(struct qlcnic_adapter *adapter)
3439{
3440 struct device *dev = &adapter->pdev->dev;
3441
3442 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_BDG)
3443 device_remove_file(dev, &dev_attr_bridged_mode);
3444}
3445
3446static void
3447qlcnic_create_diag_entries(struct qlcnic_adapter *adapter)
3448{
3449 struct device *dev = &adapter->pdev->dev;
3450
132ff00a
AC
3451 if (adapter->op_mode == QLCNIC_NON_PRIV_FUNC)
3452 return;
af19b491
AKS
3453 if (device_create_file(dev, &dev_attr_diag_mode))
3454 dev_info(dev, "failed to create diag_mode sysfs entry\n");
3455 if (device_create_bin_file(dev, &bin_attr_crb))
3456 dev_info(dev, "failed to create crb sysfs entry\n");
3457 if (device_create_bin_file(dev, &bin_attr_mem))
3458 dev_info(dev, "failed to create mem sysfs entry\n");
346fe763
RB
3459 if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED) ||
3460 adapter->op_mode != QLCNIC_MGMT_FUNC)
3461 return;
3462 if (device_create_bin_file(dev, &bin_attr_pci_config))
3463 dev_info(dev, "failed to create pci config sysfs entry");
3464 if (device_create_bin_file(dev, &bin_attr_npar_config))
3465 dev_info(dev, "failed to create npar config sysfs entry");
3466 if (device_create_bin_file(dev, &bin_attr_esw_config))
3467 dev_info(dev, "failed to create esw config sysfs entry");
3468 if (device_create_bin_file(dev, &bin_attr_pm_config))
3469 dev_info(dev, "failed to create pm config sysfs entry");
3470
af19b491
AKS
3471}
3472
af19b491
AKS
3473static void
3474qlcnic_remove_diag_entries(struct qlcnic_adapter *adapter)
3475{
3476 struct device *dev = &adapter->pdev->dev;
3477
132ff00a
AC
3478 if (adapter->op_mode == QLCNIC_NON_PRIV_FUNC)
3479 return;
af19b491
AKS
3480 device_remove_file(dev, &dev_attr_diag_mode);
3481 device_remove_bin_file(dev, &bin_attr_crb);
3482 device_remove_bin_file(dev, &bin_attr_mem);
346fe763
RB
3483 if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED) ||
3484 adapter->op_mode != QLCNIC_MGMT_FUNC)
3485 return;
3486 device_remove_bin_file(dev, &bin_attr_pci_config);
3487 device_remove_bin_file(dev, &bin_attr_npar_config);
3488 device_remove_bin_file(dev, &bin_attr_esw_config);
3489 device_remove_bin_file(dev, &bin_attr_pm_config);
af19b491
AKS
3490}
3491
3492#ifdef CONFIG_INET
3493
3494#define is_qlcnic_netdev(dev) (dev->netdev_ops == &qlcnic_netdev_ops)
3495
af19b491
AKS
3496static void
3497qlcnic_config_indev_addr(struct net_device *dev, unsigned long event)
3498{
3499 struct in_device *indev;
3500 struct qlcnic_adapter *adapter = netdev_priv(dev);
3501
af19b491
AKS
3502 indev = in_dev_get(dev);
3503 if (!indev)
3504 return;
3505
3506 for_ifa(indev) {
3507 switch (event) {
3508 case NETDEV_UP:
3509 qlcnic_config_ipaddr(adapter,
3510 ifa->ifa_address, QLCNIC_IP_UP);
3511 break;
3512 case NETDEV_DOWN:
3513 qlcnic_config_ipaddr(adapter,
3514 ifa->ifa_address, QLCNIC_IP_DOWN);
3515 break;
3516 default:
3517 break;
3518 }
3519 } endfor_ifa(indev);
3520
3521 in_dev_put(indev);
af19b491
AKS
3522}
3523
3524static int qlcnic_netdev_event(struct notifier_block *this,
3525 unsigned long event, void *ptr)
3526{
3527 struct qlcnic_adapter *adapter;
3528 struct net_device *dev = (struct net_device *)ptr;
3529
3530recheck:
3531 if (dev == NULL)
3532 goto done;
3533
3534 if (dev->priv_flags & IFF_802_1Q_VLAN) {
3535 dev = vlan_dev_real_dev(dev);
3536 goto recheck;
3537 }
3538
3539 if (!is_qlcnic_netdev(dev))
3540 goto done;
3541
3542 adapter = netdev_priv(dev);
3543
3544 if (!adapter)
3545 goto done;
3546
8a15ad1f 3547 if (!test_bit(__QLCNIC_DEV_UP, &adapter->state))
af19b491
AKS
3548 goto done;
3549
3550 qlcnic_config_indev_addr(dev, event);
3551done:
3552 return NOTIFY_DONE;
3553}
3554
3555static int
3556qlcnic_inetaddr_event(struct notifier_block *this,
3557 unsigned long event, void *ptr)
3558{
3559 struct qlcnic_adapter *adapter;
3560 struct net_device *dev;
3561
3562 struct in_ifaddr *ifa = (struct in_ifaddr *)ptr;
3563
3564 dev = ifa->ifa_dev ? ifa->ifa_dev->dev : NULL;
3565
3566recheck:
3567 if (dev == NULL || !netif_running(dev))
3568 goto done;
3569
3570 if (dev->priv_flags & IFF_802_1Q_VLAN) {
3571 dev = vlan_dev_real_dev(dev);
3572 goto recheck;
3573 }
3574
3575 if (!is_qlcnic_netdev(dev))
3576 goto done;
3577
3578 adapter = netdev_priv(dev);
3579
251a84c9 3580 if (!adapter)
af19b491
AKS
3581 goto done;
3582
8a15ad1f 3583 if (!test_bit(__QLCNIC_DEV_UP, &adapter->state))
af19b491
AKS
3584 goto done;
3585
3586 switch (event) {
3587 case NETDEV_UP:
3588 qlcnic_config_ipaddr(adapter, ifa->ifa_address, QLCNIC_IP_UP);
3589 break;
3590 case NETDEV_DOWN:
3591 qlcnic_config_ipaddr(adapter, ifa->ifa_address, QLCNIC_IP_DOWN);
3592 break;
3593 default:
3594 break;
3595 }
3596
3597done:
3598 return NOTIFY_DONE;
3599}
3600
3601static struct notifier_block qlcnic_netdev_cb = {
3602 .notifier_call = qlcnic_netdev_event,
3603};
3604
3605static struct notifier_block qlcnic_inetaddr_cb = {
3606 .notifier_call = qlcnic_inetaddr_event,
3607};
3608#else
3609static void
3610qlcnic_config_indev_addr(struct net_device *dev, unsigned long event)
3611{ }
3612#endif
451724c8
SC
3613static struct pci_error_handlers qlcnic_err_handler = {
3614 .error_detected = qlcnic_io_error_detected,
3615 .slot_reset = qlcnic_io_slot_reset,
3616 .resume = qlcnic_io_resume,
3617};
af19b491
AKS
3618
3619static struct pci_driver qlcnic_driver = {
3620 .name = qlcnic_driver_name,
3621 .id_table = qlcnic_pci_tbl,
3622 .probe = qlcnic_probe,
3623 .remove = __devexit_p(qlcnic_remove),
3624#ifdef CONFIG_PM
3625 .suspend = qlcnic_suspend,
3626 .resume = qlcnic_resume,
3627#endif
451724c8
SC
3628 .shutdown = qlcnic_shutdown,
3629 .err_handler = &qlcnic_err_handler
3630
af19b491
AKS
3631};
3632
3633static int __init qlcnic_init_module(void)
3634{
0cf3a14c 3635 int ret;
af19b491
AKS
3636
3637 printk(KERN_INFO "%s\n", qlcnic_driver_string);
3638
3639#ifdef CONFIG_INET
3640 register_netdevice_notifier(&qlcnic_netdev_cb);
3641 register_inetaddr_notifier(&qlcnic_inetaddr_cb);
3642#endif
3643
0cf3a14c
AKS
3644 ret = pci_register_driver(&qlcnic_driver);
3645 if (ret) {
3646#ifdef CONFIG_INET
3647 unregister_inetaddr_notifier(&qlcnic_inetaddr_cb);
3648 unregister_netdevice_notifier(&qlcnic_netdev_cb);
3649#endif
3650 }
af19b491 3651
0cf3a14c 3652 return ret;
af19b491
AKS
3653}
3654
3655module_init(qlcnic_init_module);
3656
3657static void __exit qlcnic_exit_module(void)
3658{
3659
3660 pci_unregister_driver(&qlcnic_driver);
3661
3662#ifdef CONFIG_INET
3663 unregister_inetaddr_notifier(&qlcnic_inetaddr_cb);
3664 unregister_netdevice_notifier(&qlcnic_netdev_cb);
3665#endif
3666}
3667
3668module_exit(qlcnic_exit_module);