From abbffa2aa9bd6f8df16d0d0a102af677510d8b9a Mon Sep 17 00:00:00 2001
[linux-2.6-block.git] / drivers / net / qlcnic / qlcnic_main.c
CommitLineData
af19b491
AKS
1/*
2 * Copyright (C) 2009 - QLogic Corporation.
3 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version 2
8 * of the License, or (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place - Suite 330, Boston,
18 * MA 02111-1307, USA.
19 *
20 * The full GNU General Public License is included in this distribution
21 * in the file called "COPYING".
22 *
23 */
24
5a0e3ad6 25#include <linux/slab.h>
af19b491
AKS
26#include <linux/vmalloc.h>
27#include <linux/interrupt.h>
28
29#include "qlcnic.h"
30
31#include <linux/dma-mapping.h>
32#include <linux/if_vlan.h>
33#include <net/ip.h>
34#include <linux/ipv6.h>
35#include <linux/inetdevice.h>
36#include <linux/sysfs.h>
37
38MODULE_DESCRIPTION("QLogic 10 GbE Converged Ethernet Driver");
39MODULE_LICENSE("GPL");
40MODULE_VERSION(QLCNIC_LINUX_VERSIONID);
41MODULE_FIRMWARE(QLCNIC_UNIFIED_ROMIMAGE_NAME);
42
43char qlcnic_driver_name[] = "qlcnic";
44static const char qlcnic_driver_string[] = "QLogic Converged Ethernet Driver v"
45 QLCNIC_LINUX_VERSIONID;
46
47static int port_mode = QLCNIC_PORT_MODE_AUTO_NEG;
48
49/* Default to restricted 1G auto-neg mode */
50static int wol_port_mode = 5;
51
52static int use_msi = 1;
53module_param(use_msi, int, 0644);
54MODULE_PARM_DESC(use_msi, "MSI interrupt (0=disabled, 1=enabled");
55
56static int use_msi_x = 1;
57module_param(use_msi_x, int, 0644);
58MODULE_PARM_DESC(use_msi_x, "MSI-X interrupt (0=disabled, 1=enabled");
59
60static int auto_fw_reset = AUTO_FW_RESET_ENABLED;
61module_param(auto_fw_reset, int, 0644);
62MODULE_PARM_DESC(auto_fw_reset, "Auto firmware reset (0=disabled, 1=enabled");
63
4d5bdb38
AKS
64static int load_fw_file;
65module_param(load_fw_file, int, 0644);
66MODULE_PARM_DESC(load_fw_file, "Load firmware from (0=flash, 1=file");
67
2e9d722d
AC
68static int qlcnic_config_npars;
69module_param(qlcnic_config_npars, int, 0644);
70MODULE_PARM_DESC(qlcnic_config_npars, "Configure NPARs (0=disabled, 1=enabled");
71
af19b491
AKS
72static int __devinit qlcnic_probe(struct pci_dev *pdev,
73 const struct pci_device_id *ent);
74static void __devexit qlcnic_remove(struct pci_dev *pdev);
75static int qlcnic_open(struct net_device *netdev);
76static int qlcnic_close(struct net_device *netdev);
af19b491
AKS
77static void qlcnic_tx_timeout(struct net_device *netdev);
78static void qlcnic_tx_timeout_task(struct work_struct *work);
79static void qlcnic_attach_work(struct work_struct *work);
80static void qlcnic_fwinit_work(struct work_struct *work);
81static void qlcnic_fw_poll_work(struct work_struct *work);
82static void qlcnic_schedule_work(struct qlcnic_adapter *adapter,
83 work_func_t func, int delay);
84static void qlcnic_cancel_fw_work(struct qlcnic_adapter *adapter);
85static int qlcnic_poll(struct napi_struct *napi, int budget);
86#ifdef CONFIG_NET_POLL_CONTROLLER
87static void qlcnic_poll_controller(struct net_device *netdev);
88#endif
89
90static void qlcnic_create_sysfs_entries(struct qlcnic_adapter *adapter);
91static void qlcnic_remove_sysfs_entries(struct qlcnic_adapter *adapter);
92static void qlcnic_create_diag_entries(struct qlcnic_adapter *adapter);
93static void qlcnic_remove_diag_entries(struct qlcnic_adapter *adapter);
94
6df900e9 95static void qlcnic_idc_debug_info(struct qlcnic_adapter *adapter, u8 encoding);
af19b491
AKS
96static void qlcnic_clr_all_drv_state(struct qlcnic_adapter *adapter);
97static int qlcnic_can_start_firmware(struct qlcnic_adapter *adapter);
98
7eb9855d 99static irqreturn_t qlcnic_tmp_intr(int irq, void *data);
af19b491
AKS
100static irqreturn_t qlcnic_intr(int irq, void *data);
101static irqreturn_t qlcnic_msi_intr(int irq, void *data);
102static irqreturn_t qlcnic_msix_intr(int irq, void *data);
103
104static struct net_device_stats *qlcnic_get_stats(struct net_device *netdev);
105static void qlcnic_config_indev_addr(struct net_device *dev, unsigned long);
9f26f547
AC
106static int qlcnic_start_firmware(struct qlcnic_adapter *);
107
108static void qlcnic_dev_set_npar_ready(struct qlcnic_adapter *);
109static void qlcnicvf_clear_ilb_mode(struct qlcnic_adapter *);
110static int qlcnicvf_set_ilb_mode(struct qlcnic_adapter *);
111static int qlcnicvf_config_led(struct qlcnic_adapter *, u32, u32);
112static int qlcnicvf_config_bridged_mode(struct qlcnic_adapter *, u32);
113static int qlcnicvf_start_firmware(struct qlcnic_adapter *);
af19b491
AKS
114/* PCI Device ID Table */
115#define ENTRY(device) \
116 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, (device)), \
117 .class = PCI_CLASS_NETWORK_ETHERNET << 8, .class_mask = ~0}
118
119#define PCI_DEVICE_ID_QLOGIC_QLE824X 0x8020
120
6a902881 121static DEFINE_PCI_DEVICE_TABLE(qlcnic_pci_tbl) = {
af19b491
AKS
122 ENTRY(PCI_DEVICE_ID_QLOGIC_QLE824X),
123 {0,}
124};
125
126MODULE_DEVICE_TABLE(pci, qlcnic_pci_tbl);
127
128
129void
130qlcnic_update_cmd_producer(struct qlcnic_adapter *adapter,
131 struct qlcnic_host_tx_ring *tx_ring)
132{
133 writel(tx_ring->producer, tx_ring->crb_cmd_producer);
134
135 if (qlcnic_tx_avail(tx_ring) <= TX_STOP_THRESH) {
136 netif_stop_queue(adapter->netdev);
137 smp_mb();
8bfe8b91 138 adapter->stats.xmit_off++;
af19b491
AKS
139 }
140}
141
142static const u32 msi_tgt_status[8] = {
143 ISR_INT_TARGET_STATUS, ISR_INT_TARGET_STATUS_F1,
144 ISR_INT_TARGET_STATUS_F2, ISR_INT_TARGET_STATUS_F3,
145 ISR_INT_TARGET_STATUS_F4, ISR_INT_TARGET_STATUS_F5,
146 ISR_INT_TARGET_STATUS_F6, ISR_INT_TARGET_STATUS_F7
147};
148
149static const
150struct qlcnic_legacy_intr_set legacy_intr[] = QLCNIC_LEGACY_INTR_CONFIG;
151
152static inline void qlcnic_disable_int(struct qlcnic_host_sds_ring *sds_ring)
153{
154 writel(0, sds_ring->crb_intr_mask);
155}
156
157static inline void qlcnic_enable_int(struct qlcnic_host_sds_ring *sds_ring)
158{
159 struct qlcnic_adapter *adapter = sds_ring->adapter;
160
161 writel(0x1, sds_ring->crb_intr_mask);
162
163 if (!QLCNIC_IS_MSI_FAMILY(adapter))
164 writel(0xfbff, adapter->tgt_mask_reg);
165}
166
167static int
168qlcnic_alloc_sds_rings(struct qlcnic_recv_context *recv_ctx, int count)
169{
170 int size = sizeof(struct qlcnic_host_sds_ring) * count;
171
172 recv_ctx->sds_rings = kzalloc(size, GFP_KERNEL);
173
174 return (recv_ctx->sds_rings == NULL);
175}
176
177static void
178qlcnic_free_sds_rings(struct qlcnic_recv_context *recv_ctx)
179{
180 if (recv_ctx->sds_rings != NULL)
181 kfree(recv_ctx->sds_rings);
182
183 recv_ctx->sds_rings = NULL;
184}
185
186static int
187qlcnic_napi_add(struct qlcnic_adapter *adapter, struct net_device *netdev)
188{
189 int ring;
190 struct qlcnic_host_sds_ring *sds_ring;
191 struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
192
193 if (qlcnic_alloc_sds_rings(recv_ctx, adapter->max_sds_rings))
194 return -ENOMEM;
195
196 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
197 sds_ring = &recv_ctx->sds_rings[ring];
198 netif_napi_add(netdev, &sds_ring->napi,
199 qlcnic_poll, QLCNIC_NETDEV_WEIGHT);
200 }
201
202 return 0;
203}
204
205static void
206qlcnic_napi_del(struct qlcnic_adapter *adapter)
207{
208 int ring;
209 struct qlcnic_host_sds_ring *sds_ring;
210 struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
211
212 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
213 sds_ring = &recv_ctx->sds_rings[ring];
214 netif_napi_del(&sds_ring->napi);
215 }
216
217 qlcnic_free_sds_rings(&adapter->recv_ctx);
218}
219
220static void
221qlcnic_napi_enable(struct qlcnic_adapter *adapter)
222{
223 int ring;
224 struct qlcnic_host_sds_ring *sds_ring;
225 struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
226
780ab790
AKS
227 if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
228 return;
229
af19b491
AKS
230 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
231 sds_ring = &recv_ctx->sds_rings[ring];
232 napi_enable(&sds_ring->napi);
233 qlcnic_enable_int(sds_ring);
234 }
235}
236
237static void
238qlcnic_napi_disable(struct qlcnic_adapter *adapter)
239{
240 int ring;
241 struct qlcnic_host_sds_ring *sds_ring;
242 struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
243
780ab790
AKS
244 if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
245 return;
246
af19b491
AKS
247 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
248 sds_ring = &recv_ctx->sds_rings[ring];
249 qlcnic_disable_int(sds_ring);
250 napi_synchronize(&sds_ring->napi);
251 napi_disable(&sds_ring->napi);
252 }
253}
254
255static void qlcnic_clear_stats(struct qlcnic_adapter *adapter)
256{
257 memset(&adapter->stats, 0, sizeof(adapter->stats));
af19b491
AKS
258}
259
af19b491
AKS
260static void qlcnic_set_port_mode(struct qlcnic_adapter *adapter)
261{
262 u32 val, data;
263
264 val = adapter->ahw.board_type;
265 if ((val == QLCNIC_BRDTYPE_P3_HMEZ) ||
266 (val == QLCNIC_BRDTYPE_P3_XG_LOM)) {
267 if (port_mode == QLCNIC_PORT_MODE_802_3_AP) {
268 data = QLCNIC_PORT_MODE_802_3_AP;
269 QLCWR32(adapter, QLCNIC_PORT_MODE_ADDR, data);
270 } else if (port_mode == QLCNIC_PORT_MODE_XG) {
271 data = QLCNIC_PORT_MODE_XG;
272 QLCWR32(adapter, QLCNIC_PORT_MODE_ADDR, data);
273 } else if (port_mode == QLCNIC_PORT_MODE_AUTO_NEG_1G) {
274 data = QLCNIC_PORT_MODE_AUTO_NEG_1G;
275 QLCWR32(adapter, QLCNIC_PORT_MODE_ADDR, data);
276 } else if (port_mode == QLCNIC_PORT_MODE_AUTO_NEG_XG) {
277 data = QLCNIC_PORT_MODE_AUTO_NEG_XG;
278 QLCWR32(adapter, QLCNIC_PORT_MODE_ADDR, data);
279 } else {
280 data = QLCNIC_PORT_MODE_AUTO_NEG;
281 QLCWR32(adapter, QLCNIC_PORT_MODE_ADDR, data);
282 }
283
284 if ((wol_port_mode != QLCNIC_PORT_MODE_802_3_AP) &&
285 (wol_port_mode != QLCNIC_PORT_MODE_XG) &&
286 (wol_port_mode != QLCNIC_PORT_MODE_AUTO_NEG_1G) &&
287 (wol_port_mode != QLCNIC_PORT_MODE_AUTO_NEG_XG)) {
288 wol_port_mode = QLCNIC_PORT_MODE_AUTO_NEG;
289 }
290 QLCWR32(adapter, QLCNIC_WOL_PORT_MODE, wol_port_mode);
291 }
292}
293
294static void qlcnic_set_msix_bit(struct pci_dev *pdev, int enable)
295{
296 u32 control;
297 int pos;
298
299 pos = pci_find_capability(pdev, PCI_CAP_ID_MSIX);
300 if (pos) {
301 pci_read_config_dword(pdev, pos, &control);
302 if (enable)
303 control |= PCI_MSIX_FLAGS_ENABLE;
304 else
305 control = 0;
306 pci_write_config_dword(pdev, pos, control);
307 }
308}
309
310static void qlcnic_init_msix_entries(struct qlcnic_adapter *adapter, int count)
311{
312 int i;
313
314 for (i = 0; i < count; i++)
315 adapter->msix_entries[i].entry = i;
316}
317
318static int
319qlcnic_read_mac_addr(struct qlcnic_adapter *adapter)
320{
2e9d722d 321 u8 mac_addr[ETH_ALEN];
af19b491
AKS
322 struct net_device *netdev = adapter->netdev;
323 struct pci_dev *pdev = adapter->pdev;
324
2e9d722d 325 if (adapter->nic_ops->get_mac_addr(adapter, mac_addr) != 0)
af19b491
AKS
326 return -EIO;
327
2e9d722d 328 memcpy(netdev->dev_addr, mac_addr, ETH_ALEN);
af19b491
AKS
329 memcpy(netdev->perm_addr, netdev->dev_addr, netdev->addr_len);
330 memcpy(adapter->mac_addr, netdev->dev_addr, netdev->addr_len);
331
332 /* set station address */
333
334 if (!is_valid_ether_addr(netdev->perm_addr))
335 dev_warn(&pdev->dev, "Bad MAC address %pM.\n",
336 netdev->dev_addr);
337
338 return 0;
339}
340
341static int qlcnic_set_mac(struct net_device *netdev, void *p)
342{
343 struct qlcnic_adapter *adapter = netdev_priv(netdev);
344 struct sockaddr *addr = p;
345
346 if (!is_valid_ether_addr(addr->sa_data))
347 return -EINVAL;
348
349 if (netif_running(netdev)) {
350 netif_device_detach(netdev);
351 qlcnic_napi_disable(adapter);
352 }
353
354 memcpy(adapter->mac_addr, addr->sa_data, netdev->addr_len);
355 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
356 qlcnic_set_multi(adapter->netdev);
357
358 if (netif_running(netdev)) {
359 netif_device_attach(netdev);
360 qlcnic_napi_enable(adapter);
361 }
362 return 0;
363}
364
365static const struct net_device_ops qlcnic_netdev_ops = {
366 .ndo_open = qlcnic_open,
367 .ndo_stop = qlcnic_close,
368 .ndo_start_xmit = qlcnic_xmit_frame,
369 .ndo_get_stats = qlcnic_get_stats,
370 .ndo_validate_addr = eth_validate_addr,
371 .ndo_set_multicast_list = qlcnic_set_multi,
372 .ndo_set_mac_address = qlcnic_set_mac,
373 .ndo_change_mtu = qlcnic_change_mtu,
374 .ndo_tx_timeout = qlcnic_tx_timeout,
375#ifdef CONFIG_NET_POLL_CONTROLLER
376 .ndo_poll_controller = qlcnic_poll_controller,
377#endif
378};
379
2e9d722d
AC
380static struct qlcnic_nic_template qlcnic_ops = {
381 .get_mac_addr = qlcnic_get_mac_addr,
382 .config_bridged_mode = qlcnic_config_bridged_mode,
383 .config_led = qlcnic_config_led,
384 .set_ilb_mode = qlcnic_set_ilb_mode,
9f26f547
AC
385 .clear_ilb_mode = qlcnic_clear_ilb_mode,
386 .start_firmware = qlcnic_start_firmware
2e9d722d
AC
387};
388
389static struct qlcnic_nic_template qlcnic_pf_ops = {
390 .get_mac_addr = qlcnic_get_mac_address,
391 .config_bridged_mode = qlcnic_config_bridged_mode,
392 .config_led = qlcnic_config_led,
393 .set_ilb_mode = qlcnic_set_ilb_mode,
9f26f547
AC
394 .clear_ilb_mode = qlcnic_clear_ilb_mode,
395 .start_firmware = qlcnic_start_firmware
396};
397
398static struct qlcnic_nic_template qlcnic_vf_ops = {
399 .get_mac_addr = qlcnic_get_mac_address,
400 .config_bridged_mode = qlcnicvf_config_bridged_mode,
401 .config_led = qlcnicvf_config_led,
402 .set_ilb_mode = qlcnicvf_set_ilb_mode,
403 .clear_ilb_mode = qlcnicvf_clear_ilb_mode,
404 .start_firmware = qlcnicvf_start_firmware
2e9d722d
AC
405};
406
af19b491
AKS
407static void
408qlcnic_setup_intr(struct qlcnic_adapter *adapter)
409{
410 const struct qlcnic_legacy_intr_set *legacy_intrp;
411 struct pci_dev *pdev = adapter->pdev;
412 int err, num_msix;
413
414 if (adapter->rss_supported) {
415 num_msix = (num_online_cpus() >= MSIX_ENTRIES_PER_ADAPTER) ?
416 MSIX_ENTRIES_PER_ADAPTER : 2;
417 } else
418 num_msix = 1;
419
420 adapter->max_sds_rings = 1;
421
422 adapter->flags &= ~(QLCNIC_MSI_ENABLED | QLCNIC_MSIX_ENABLED);
423
424 legacy_intrp = &legacy_intr[adapter->ahw.pci_func];
425
426 adapter->int_vec_bit = legacy_intrp->int_vec_bit;
427 adapter->tgt_status_reg = qlcnic_get_ioaddr(adapter,
428 legacy_intrp->tgt_status_reg);
429 adapter->tgt_mask_reg = qlcnic_get_ioaddr(adapter,
430 legacy_intrp->tgt_mask_reg);
431 adapter->isr_int_vec = qlcnic_get_ioaddr(adapter, ISR_INT_VECTOR);
432
433 adapter->crb_int_state_reg = qlcnic_get_ioaddr(adapter,
434 ISR_INT_STATE_REG);
435
436 qlcnic_set_msix_bit(pdev, 0);
437
438 if (adapter->msix_supported) {
439
440 qlcnic_init_msix_entries(adapter, num_msix);
441 err = pci_enable_msix(pdev, adapter->msix_entries, num_msix);
442 if (err == 0) {
443 adapter->flags |= QLCNIC_MSIX_ENABLED;
444 qlcnic_set_msix_bit(pdev, 1);
445
446 if (adapter->rss_supported)
447 adapter->max_sds_rings = num_msix;
448
449 dev_info(&pdev->dev, "using msi-x interrupts\n");
450 return;
451 }
452
453 if (err > 0)
454 pci_disable_msix(pdev);
455
456 /* fall through for msi */
457 }
458
459 if (use_msi && !pci_enable_msi(pdev)) {
460 adapter->flags |= QLCNIC_MSI_ENABLED;
461 adapter->tgt_status_reg = qlcnic_get_ioaddr(adapter,
462 msi_tgt_status[adapter->ahw.pci_func]);
463 dev_info(&pdev->dev, "using msi interrupts\n");
464 adapter->msix_entries[0].vector = pdev->irq;
465 return;
466 }
467
468 dev_info(&pdev->dev, "using legacy interrupts\n");
469 adapter->msix_entries[0].vector = pdev->irq;
470}
471
472static void
473qlcnic_teardown_intr(struct qlcnic_adapter *adapter)
474{
475 if (adapter->flags & QLCNIC_MSIX_ENABLED)
476 pci_disable_msix(adapter->pdev);
477 if (adapter->flags & QLCNIC_MSI_ENABLED)
478 pci_disable_msi(adapter->pdev);
479}
480
481static void
482qlcnic_cleanup_pci_map(struct qlcnic_adapter *adapter)
483{
484 if (adapter->ahw.pci_base0 != NULL)
485 iounmap(adapter->ahw.pci_base0);
486}
487
2e9d722d
AC
488static int
489qlcnic_set_function_modes(struct qlcnic_adapter *adapter)
490{
491 u8 id;
492 u32 ref_count;
493 int i, ret = 1;
494 u32 data = QLCNIC_MGMT_FUNC;
495 void __iomem *priv_op = adapter->ahw.pci_base0 + QLCNIC_DRV_OP_MODE;
496
497 /* If other drivers are not in use set their privilege level */
498 ref_count = QLCRD32(adapter, QLCNIC_CRB_DEV_REF_COUNT);
499 ret = qlcnic_api_lock(adapter);
500 if (ret)
501 goto err_lock;
502 if (QLC_DEV_CLR_REF_CNT(ref_count, adapter->ahw.pci_func))
503 goto err_npar;
504
505 for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
506 id = adapter->npars[i].id;
507 if (adapter->npars[i].type != QLCNIC_TYPE_NIC ||
508 id == adapter->ahw.pci_func)
509 continue;
510 data |= (qlcnic_config_npars & QLC_DEV_SET_DRV(0xf, id));
511 }
512 writel(data, priv_op);
513
514err_npar:
515 qlcnic_api_unlock(adapter);
516err_lock:
517 return ret;
518}
519
520static u8
521qlcnic_set_mgmt_driver(struct qlcnic_adapter *adapter)
522{
523 u8 i, ret = 0;
524
525 if (qlcnic_get_pci_info(adapter))
526 return ret;
527 /* Set the eswitch */
528 for (i = 0; i < QLCNIC_NIU_MAX_XG_PORTS; i++) {
529 if (!qlcnic_get_eswitch_capabilities(adapter, i,
530 &adapter->eswitch[i])) {
531 ret++;
532 qlcnic_toggle_eswitch(adapter, i, ret);
533 }
534 }
535 return ret;
536}
537
538static u32
539qlcnic_get_driver_mode(struct qlcnic_adapter *adapter)
540{
541 void __iomem *msix_base_addr;
542 void __iomem *priv_op;
543 u32 func;
544 u32 msix_base;
545 u32 op_mode, priv_level;
546
547 /* Determine FW API version */
548 adapter->fw_hal_version = readl(adapter->ahw.pci_base0 + QLCNIC_FW_API);
549 if (adapter->fw_hal_version == ~0) {
550 adapter->nic_ops = &qlcnic_ops;
551 adapter->fw_hal_version = QLCNIC_FW_BASE;
552 adapter->ahw.pci_func = PCI_FUNC(adapter->pdev->devfn);
553 dev_info(&adapter->pdev->dev,
554 "FW does not support nic partion\n");
555 return adapter->fw_hal_version;
556 }
557
558 /* Find PCI function number */
559 pci_read_config_dword(adapter->pdev, QLCNIC_MSIX_TABLE_OFFSET, &func);
560 msix_base_addr = adapter->ahw.pci_base0 + QLCNIC_MSIX_BASE;
561 msix_base = readl(msix_base_addr);
562 func = (func - msix_base)/QLCNIC_MSIX_TBL_PGSIZE;
563 adapter->ahw.pci_func = func;
564
565 /* Determine function privilege level */
566 priv_op = adapter->ahw.pci_base0 + QLCNIC_DRV_OP_MODE;
567 op_mode = readl(priv_op);
568 if (op_mode == QLC_DEV_DRV_DEFAULT) {
569 priv_level = QLCNIC_MGMT_FUNC;
570 if (qlcnic_api_lock(adapter))
571 return 0;
572 op_mode = (op_mode & ~QLC_DEV_SET_DRV(0xf, func)) |
573 (QLC_DEV_SET_DRV(QLCNIC_MGMT_FUNC, func));
574 writel(op_mode, priv_op);
575 qlcnic_api_unlock(adapter);
576
577 } else
578 priv_level = QLC_DEV_GET_DRV(op_mode, adapter->ahw.pci_func);
579
580 switch (priv_level) {
581 case QLCNIC_MGMT_FUNC:
582 adapter->op_mode = QLCNIC_MGMT_FUNC;
583 adapter->nic_ops = &qlcnic_pf_ops;
584 /* Set privilege level for other functions */
585 if (qlcnic_config_npars)
586 qlcnic_set_function_modes(adapter);
9f26f547 587 qlcnic_dev_set_npar_ready(adapter);
2e9d722d
AC
588 dev_info(&adapter->pdev->dev,
589 "HAL Version: %d, Management function\n",
590 adapter->fw_hal_version);
591 break;
592 case QLCNIC_PRIV_FUNC:
593 adapter->op_mode = QLCNIC_PRIV_FUNC;
594 dev_info(&adapter->pdev->dev,
595 "HAL Version: %d, Privileged function\n",
596 adapter->fw_hal_version);
597 adapter->nic_ops = &qlcnic_pf_ops;
598 break;
9f26f547
AC
599 case QLCNIC_NON_PRIV_FUNC:
600 adapter->op_mode = QLCNIC_NON_PRIV_FUNC;
601 dev_info(&adapter->pdev->dev,
602 "HAL Version: %d Non Privileged function\n",
603 adapter->fw_hal_version);
604 adapter->nic_ops = &qlcnic_vf_ops;
605 break;
2e9d722d
AC
606 default:
607 dev_info(&adapter->pdev->dev, "Unknown function mode: %d\n",
608 priv_level);
609 return 0;
610 }
611 return adapter->fw_hal_version;
612}
613
af19b491
AKS
614static int
615qlcnic_setup_pci_map(struct qlcnic_adapter *adapter)
616{
617 void __iomem *mem_ptr0 = NULL;
618 resource_size_t mem_base;
619 unsigned long mem_len, pci_len0 = 0;
620
621 struct pci_dev *pdev = adapter->pdev;
af19b491 622
af19b491
AKS
623 /* remap phys address */
624 mem_base = pci_resource_start(pdev, 0); /* 0 is for BAR 0 */
625 mem_len = pci_resource_len(pdev, 0);
626
627 if (mem_len == QLCNIC_PCI_2MB_SIZE) {
628
629 mem_ptr0 = pci_ioremap_bar(pdev, 0);
630 if (mem_ptr0 == NULL) {
631 dev_err(&pdev->dev, "failed to map PCI bar 0\n");
632 return -EIO;
633 }
634 pci_len0 = mem_len;
635 } else {
636 return -EIO;
637 }
638
639 dev_info(&pdev->dev, "%dMB memory map\n", (int)(mem_len>>20));
640
641 adapter->ahw.pci_base0 = mem_ptr0;
642 adapter->ahw.pci_len0 = pci_len0;
643
2e9d722d
AC
644 if (!qlcnic_get_driver_mode(adapter)) {
645 iounmap(adapter->ahw.pci_base0);
646 return -EIO;
647 }
648
af19b491 649 adapter->ahw.ocm_win_crb = qlcnic_get_ioaddr(adapter,
2e9d722d 650 QLCNIC_PCIX_PS_REG(PCIX_OCM_WINDOW_REG(adapter->ahw.pci_func)));
af19b491
AKS
651
652 return 0;
653}
654
655static void get_brd_name(struct qlcnic_adapter *adapter, char *name)
656{
657 struct pci_dev *pdev = adapter->pdev;
658 int i, found = 0;
659
660 for (i = 0; i < NUM_SUPPORTED_BOARDS; ++i) {
661 if (qlcnic_boards[i].vendor == pdev->vendor &&
662 qlcnic_boards[i].device == pdev->device &&
663 qlcnic_boards[i].sub_vendor == pdev->subsystem_vendor &&
664 qlcnic_boards[i].sub_device == pdev->subsystem_device) {
02f6e46f
SC
665 sprintf(name, "%pM: %s" ,
666 adapter->mac_addr,
667 qlcnic_boards[i].short_name);
af19b491
AKS
668 found = 1;
669 break;
670 }
671
672 }
673
674 if (!found)
675 name = "Unknown";
676}
677
678static void
679qlcnic_check_options(struct qlcnic_adapter *adapter)
680{
681 u32 fw_major, fw_minor, fw_build;
682 char brd_name[QLCNIC_MAX_BOARD_NAME_LEN];
683 char serial_num[32];
684 int i, offset, val;
685 int *ptr32;
686 struct pci_dev *pdev = adapter->pdev;
687
688 adapter->driver_mismatch = 0;
689
690 ptr32 = (int *)&serial_num;
691 offset = QLCNIC_FW_SERIAL_NUM_OFFSET;
692 for (i = 0; i < 8; i++) {
693 if (qlcnic_rom_fast_read(adapter, offset, &val) == -1) {
694 dev_err(&pdev->dev, "error reading board info\n");
695 adapter->driver_mismatch = 1;
696 return;
697 }
698 ptr32[i] = cpu_to_le32(val);
699 offset += sizeof(u32);
700 }
701
702 fw_major = QLCRD32(adapter, QLCNIC_FW_VERSION_MAJOR);
703 fw_minor = QLCRD32(adapter, QLCNIC_FW_VERSION_MINOR);
704 fw_build = QLCRD32(adapter, QLCNIC_FW_VERSION_SUB);
705
706 adapter->fw_version = QLCNIC_VERSION_CODE(fw_major, fw_minor, fw_build);
707
708 if (adapter->portnum == 0) {
709 get_brd_name(adapter, brd_name);
710
711 pr_info("%s: %s Board Chip rev 0x%x\n",
712 module_name(THIS_MODULE),
713 brd_name, adapter->ahw.revision_id);
714 }
715
251a84c9
AKS
716 dev_info(&pdev->dev, "firmware v%d.%d.%d\n",
717 fw_major, fw_minor, fw_build);
af19b491 718
2e9d722d
AC
719 if (adapter->fw_hal_version == QLCNIC_FW_NPAR)
720 qlcnic_get_nic_info(adapter, adapter->ahw.pci_func);
721 else
722 adapter->capabilities = QLCRD32(adapter, CRB_FW_CAPABILITIES_1);
af19b491
AKS
723
724 adapter->flags &= ~QLCNIC_LRO_ENABLED;
725
726 if (adapter->ahw.port_type == QLCNIC_XGBE) {
727 adapter->num_rxd = DEFAULT_RCV_DESCRIPTORS_10G;
728 adapter->num_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_10G;
729 } else if (adapter->ahw.port_type == QLCNIC_GBE) {
730 adapter->num_rxd = DEFAULT_RCV_DESCRIPTORS_1G;
731 adapter->num_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_1G;
732 }
733
734 adapter->msix_supported = !!use_msi_x;
735 adapter->rss_supported = !!use_msi_x;
736
737 adapter->num_txd = MAX_CMD_DESCRIPTORS;
738
af19b491
AKS
739 adapter->max_rds_rings = 2;
740}
741
742static int
743qlcnic_start_firmware(struct qlcnic_adapter *adapter)
744{
745 int val, err, first_boot;
746
aa5e18c0
SC
747 err = qlcnic_can_start_firmware(adapter);
748 if (err < 0)
749 return err;
750 else if (!err)
af19b491
AKS
751 goto wait_init;
752
753 first_boot = QLCRD32(adapter, QLCNIC_CAM_RAM(0x1fc));
754 if (first_boot == 0x55555555)
755 /* This is the first boot after power up */
756 QLCWR32(adapter, QLCNIC_CAM_RAM(0x1fc), QLCNIC_BDINFO_MAGIC);
757
4d5bdb38
AKS
758 if (load_fw_file)
759 qlcnic_request_firmware(adapter);
760 else
761 adapter->fw_type = QLCNIC_FLASH_ROMIMAGE;
af19b491
AKS
762
763 err = qlcnic_need_fw_reset(adapter);
764 if (err < 0)
765 goto err_out;
766 if (err == 0)
767 goto wait_init;
768
769 if (first_boot != 0x55555555) {
770 QLCWR32(adapter, CRB_CMDPEG_STATE, 0);
771 qlcnic_pinit_from_rom(adapter);
772 msleep(1);
773 }
774
af19b491
AKS
775 QLCWR32(adapter, QLCNIC_PEG_HALT_STATUS1, 0);
776 QLCWR32(adapter, QLCNIC_PEG_HALT_STATUS2, 0);
777
778 qlcnic_set_port_mode(adapter);
779
780 err = qlcnic_load_firmware(adapter);
781 if (err)
782 goto err_out;
783
784 qlcnic_release_firmware(adapter);
785
786 val = (_QLCNIC_LINUX_MAJOR << 16)
787 | ((_QLCNIC_LINUX_MINOR << 8))
788 | (_QLCNIC_LINUX_SUBVERSION);
789 QLCWR32(adapter, CRB_DRIVER_VERSION, val);
790
791wait_init:
792 /* Handshake with the card before we register the devices. */
793 err = qlcnic_phantom_init(adapter);
794 if (err)
795 goto err_out;
796
797 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_READY);
6df900e9 798 qlcnic_idc_debug_info(adapter, 1);
af19b491 799
9f26f547
AC
800 qlcnic_dev_set_npar_ready(adapter);
801
af19b491
AKS
802 qlcnic_check_options(adapter);
803
2e9d722d
AC
804 if (adapter->fw_hal_version != QLCNIC_FW_BASE &&
805 adapter->op_mode == QLCNIC_MGMT_FUNC)
806 qlcnic_set_mgmt_driver(adapter);
807
af19b491
AKS
808 adapter->need_fw_reset = 0;
809
a7fc948f
AKS
810 qlcnic_release_firmware(adapter);
811 return 0;
af19b491
AKS
812
813err_out:
a7fc948f
AKS
814 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_FAILED);
815 dev_err(&adapter->pdev->dev, "Device state set to failed\n");
af19b491
AKS
816 qlcnic_release_firmware(adapter);
817 return err;
818}
819
820static int
821qlcnic_request_irq(struct qlcnic_adapter *adapter)
822{
823 irq_handler_t handler;
824 struct qlcnic_host_sds_ring *sds_ring;
825 int err, ring;
826
827 unsigned long flags = 0;
828 struct net_device *netdev = adapter->netdev;
829 struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
830
7eb9855d
AKS
831 if (adapter->diag_test == QLCNIC_INTERRUPT_TEST) {
832 handler = qlcnic_tmp_intr;
833 if (!QLCNIC_IS_MSI_FAMILY(adapter))
834 flags |= IRQF_SHARED;
835
836 } else {
837 if (adapter->flags & QLCNIC_MSIX_ENABLED)
838 handler = qlcnic_msix_intr;
839 else if (adapter->flags & QLCNIC_MSI_ENABLED)
840 handler = qlcnic_msi_intr;
841 else {
842 flags |= IRQF_SHARED;
843 handler = qlcnic_intr;
844 }
af19b491
AKS
845 }
846 adapter->irq = netdev->irq;
847
848 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
849 sds_ring = &recv_ctx->sds_rings[ring];
850 sprintf(sds_ring->name, "%s[%d]", netdev->name, ring);
851 err = request_irq(sds_ring->irq, handler,
852 flags, sds_ring->name, sds_ring);
853 if (err)
854 return err;
855 }
856
857 return 0;
858}
859
860static void
861qlcnic_free_irq(struct qlcnic_adapter *adapter)
862{
863 int ring;
864 struct qlcnic_host_sds_ring *sds_ring;
865
866 struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
867
868 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
869 sds_ring = &recv_ctx->sds_rings[ring];
870 free_irq(sds_ring->irq, sds_ring);
871 }
872}
873
874static void
875qlcnic_init_coalesce_defaults(struct qlcnic_adapter *adapter)
876{
877 adapter->coal.flags = QLCNIC_INTR_DEFAULT;
878 adapter->coal.normal.data.rx_time_us =
879 QLCNIC_DEFAULT_INTR_COALESCE_RX_TIME_US;
880 adapter->coal.normal.data.rx_packets =
881 QLCNIC_DEFAULT_INTR_COALESCE_RX_PACKETS;
882 adapter->coal.normal.data.tx_time_us =
883 QLCNIC_DEFAULT_INTR_COALESCE_TX_TIME_US;
884 adapter->coal.normal.data.tx_packets =
885 QLCNIC_DEFAULT_INTR_COALESCE_TX_PACKETS;
886}
887
888static int
889__qlcnic_up(struct qlcnic_adapter *adapter, struct net_device *netdev)
890{
891 if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
892 return -EIO;
893
894 qlcnic_set_multi(netdev);
895 qlcnic_fw_cmd_set_mtu(adapter, netdev->mtu);
896
897 adapter->ahw.linkup = 0;
898
899 if (adapter->max_sds_rings > 1)
900 qlcnic_config_rss(adapter, 1);
901
902 qlcnic_config_intr_coalesce(adapter);
903
904 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_HW_LRO)
905 qlcnic_config_hw_lro(adapter, QLCNIC_LRO_ENABLED);
906
907 qlcnic_napi_enable(adapter);
908
909 qlcnic_linkevent_request(adapter, 1);
910
911 set_bit(__QLCNIC_DEV_UP, &adapter->state);
912 return 0;
913}
914
915/* Usage: During resume and firmware recovery module.*/
916
917static int
918qlcnic_up(struct qlcnic_adapter *adapter, struct net_device *netdev)
919{
920 int err = 0;
921
922 rtnl_lock();
923 if (netif_running(netdev))
924 err = __qlcnic_up(adapter, netdev);
925 rtnl_unlock();
926
927 return err;
928}
929
930static void
931__qlcnic_down(struct qlcnic_adapter *adapter, struct net_device *netdev)
932{
933 if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
934 return;
935
936 if (!test_and_clear_bit(__QLCNIC_DEV_UP, &adapter->state))
937 return;
938
939 smp_mb();
940 spin_lock(&adapter->tx_clean_lock);
941 netif_carrier_off(netdev);
942 netif_tx_disable(netdev);
943
944 qlcnic_free_mac_list(adapter);
945
946 qlcnic_nic_set_promisc(adapter, QLCNIC_NIU_NON_PROMISC_MODE);
947
948 qlcnic_napi_disable(adapter);
949
950 qlcnic_release_tx_buffers(adapter);
951 spin_unlock(&adapter->tx_clean_lock);
952}
953
954/* Usage: During suspend and firmware recovery module */
955
956static void
957qlcnic_down(struct qlcnic_adapter *adapter, struct net_device *netdev)
958{
959 rtnl_lock();
960 if (netif_running(netdev))
961 __qlcnic_down(adapter, netdev);
962 rtnl_unlock();
963
964}
965
966static int
967qlcnic_attach(struct qlcnic_adapter *adapter)
968{
969 struct net_device *netdev = adapter->netdev;
970 struct pci_dev *pdev = adapter->pdev;
971 int err, ring;
972 struct qlcnic_host_rds_ring *rds_ring;
973
974 if (adapter->is_up == QLCNIC_ADAPTER_UP_MAGIC)
975 return 0;
976
977 err = qlcnic_init_firmware(adapter);
978 if (err)
979 return err;
980
981 err = qlcnic_napi_add(adapter, netdev);
982 if (err)
983 return err;
984
985 err = qlcnic_alloc_sw_resources(adapter);
986 if (err) {
987 dev_err(&pdev->dev, "Error in setting sw resources\n");
988 return err;
989 }
990
991 err = qlcnic_alloc_hw_resources(adapter);
992 if (err) {
993 dev_err(&pdev->dev, "Error in setting hw resources\n");
994 goto err_out_free_sw;
995 }
996
997
998 for (ring = 0; ring < adapter->max_rds_rings; ring++) {
999 rds_ring = &adapter->recv_ctx.rds_rings[ring];
1000 qlcnic_post_rx_buffers(adapter, ring, rds_ring);
1001 }
1002
1003 err = qlcnic_request_irq(adapter);
1004 if (err) {
1005 dev_err(&pdev->dev, "failed to setup interrupt\n");
1006 goto err_out_free_rxbuf;
1007 }
1008
1009 qlcnic_init_coalesce_defaults(adapter);
1010
1011 qlcnic_create_sysfs_entries(adapter);
1012
1013 adapter->is_up = QLCNIC_ADAPTER_UP_MAGIC;
1014 return 0;
1015
1016err_out_free_rxbuf:
1017 qlcnic_release_rx_buffers(adapter);
1018 qlcnic_free_hw_resources(adapter);
1019err_out_free_sw:
1020 qlcnic_free_sw_resources(adapter);
1021 return err;
1022}
1023
1024static void
1025qlcnic_detach(struct qlcnic_adapter *adapter)
1026{
1027 if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
1028 return;
1029
1030 qlcnic_remove_sysfs_entries(adapter);
1031
1032 qlcnic_free_hw_resources(adapter);
1033 qlcnic_release_rx_buffers(adapter);
1034 qlcnic_free_irq(adapter);
1035 qlcnic_napi_del(adapter);
1036 qlcnic_free_sw_resources(adapter);
1037
1038 adapter->is_up = 0;
1039}
1040
7eb9855d
AKS
1041void qlcnic_diag_free_res(struct net_device *netdev, int max_sds_rings)
1042{
1043 struct qlcnic_adapter *adapter = netdev_priv(netdev);
1044 struct qlcnic_host_sds_ring *sds_ring;
1045 int ring;
1046
78ad3892 1047 clear_bit(__QLCNIC_DEV_UP, &adapter->state);
cdaff185
AKS
1048 if (adapter->diag_test == QLCNIC_INTERRUPT_TEST) {
1049 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
1050 sds_ring = &adapter->recv_ctx.sds_rings[ring];
1051 qlcnic_disable_int(sds_ring);
1052 }
7eb9855d
AKS
1053 }
1054
1055 qlcnic_detach(adapter);
1056
1057 adapter->diag_test = 0;
1058 adapter->max_sds_rings = max_sds_rings;
1059
1060 if (qlcnic_attach(adapter))
34ce3626 1061 goto out;
7eb9855d
AKS
1062
1063 if (netif_running(netdev))
1064 __qlcnic_up(adapter, netdev);
34ce3626 1065out:
7eb9855d
AKS
1066 netif_device_attach(netdev);
1067}
1068
1069int qlcnic_diag_alloc_res(struct net_device *netdev, int test)
1070{
1071 struct qlcnic_adapter *adapter = netdev_priv(netdev);
1072 struct qlcnic_host_sds_ring *sds_ring;
1073 int ring;
1074 int ret;
1075
1076 netif_device_detach(netdev);
1077
1078 if (netif_running(netdev))
1079 __qlcnic_down(adapter, netdev);
1080
1081 qlcnic_detach(adapter);
1082
1083 adapter->max_sds_rings = 1;
1084 adapter->diag_test = test;
1085
1086 ret = qlcnic_attach(adapter);
34ce3626
AKS
1087 if (ret) {
1088 netif_device_attach(netdev);
7eb9855d 1089 return ret;
34ce3626 1090 }
7eb9855d 1091
cdaff185
AKS
1092 if (adapter->diag_test == QLCNIC_INTERRUPT_TEST) {
1093 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
1094 sds_ring = &adapter->recv_ctx.sds_rings[ring];
1095 qlcnic_enable_int(sds_ring);
1096 }
7eb9855d 1097 }
78ad3892 1098 set_bit(__QLCNIC_DEV_UP, &adapter->state);
7eb9855d
AKS
1099
1100 return 0;
1101}
1102
af19b491
AKS
1103int
1104qlcnic_reset_context(struct qlcnic_adapter *adapter)
1105{
1106 int err = 0;
1107 struct net_device *netdev = adapter->netdev;
1108
1109 if (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
1110 return -EBUSY;
1111
1112 if (adapter->is_up == QLCNIC_ADAPTER_UP_MAGIC) {
1113
1114 netif_device_detach(netdev);
1115
1116 if (netif_running(netdev))
1117 __qlcnic_down(adapter, netdev);
1118
1119 qlcnic_detach(adapter);
1120
1121 if (netif_running(netdev)) {
1122 err = qlcnic_attach(adapter);
1123 if (!err)
34ce3626 1124 __qlcnic_up(adapter, netdev);
af19b491
AKS
1125 }
1126
1127 netif_device_attach(netdev);
1128 }
1129
af19b491
AKS
1130 clear_bit(__QLCNIC_RESETTING, &adapter->state);
1131 return err;
1132}
1133
1134static int
1135qlcnic_setup_netdev(struct qlcnic_adapter *adapter,
1bb09fb9 1136 struct net_device *netdev, u8 pci_using_dac)
af19b491
AKS
1137{
1138 int err;
1139 struct pci_dev *pdev = adapter->pdev;
1140
1141 adapter->rx_csum = 1;
1142 adapter->mc_enabled = 0;
1143 adapter->max_mc_count = 38;
1144
1145 netdev->netdev_ops = &qlcnic_netdev_ops;
1146 netdev->watchdog_timeo = 2*HZ;
1147
1148 qlcnic_change_mtu(netdev, netdev->mtu);
1149
1150 SET_ETHTOOL_OPS(netdev, &qlcnic_ethtool_ops);
1151
2e9d722d
AC
1152 netdev->features |= (NETIF_F_SG | NETIF_F_IP_CSUM |
1153 NETIF_F_IPV6_CSUM | NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6);
af19b491 1154
2e9d722d
AC
1155 netdev->vlan_features |= (NETIF_F_SG | NETIF_F_IP_CSUM |
1156 NETIF_F_IPV6_CSUM | NETIF_F_TSO | NETIF_F_TSO6);
af19b491 1157
1bb09fb9 1158 if (pci_using_dac) {
af19b491
AKS
1159 netdev->features |= NETIF_F_HIGHDMA;
1160 netdev->vlan_features |= NETIF_F_HIGHDMA;
1161 }
1162
1163 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_FVLANTX)
1164 netdev->features |= (NETIF_F_HW_VLAN_TX);
1165
1166 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_HW_LRO)
1167 netdev->features |= NETIF_F_LRO;
1168
1169 netdev->irq = adapter->msix_entries[0].vector;
1170
1171 INIT_WORK(&adapter->tx_timeout_task, qlcnic_tx_timeout_task);
1172
1173 if (qlcnic_read_mac_addr(adapter))
1174 dev_warn(&pdev->dev, "failed to read mac addr\n");
1175
1176 netif_carrier_off(netdev);
1177 netif_stop_queue(netdev);
1178
1179 err = register_netdev(netdev);
1180 if (err) {
1181 dev_err(&pdev->dev, "failed to register net device\n");
1182 return err;
1183 }
1184
1185 return 0;
1186}
1187
1bb09fb9
AKS
1188static int qlcnic_set_dma_mask(struct pci_dev *pdev, u8 *pci_using_dac)
1189{
1190 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) &&
1191 !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)))
1192 *pci_using_dac = 1;
1193 else if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) &&
1194 !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)))
1195 *pci_using_dac = 0;
1196 else {
1197 dev_err(&pdev->dev, "Unable to set DMA mask, aborting\n");
1198 return -EIO;
1199 }
1200
1201 return 0;
1202}
1203
af19b491
AKS
1204static int __devinit
1205qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1206{
1207 struct net_device *netdev = NULL;
1208 struct qlcnic_adapter *adapter = NULL;
1209 int err;
af19b491 1210 uint8_t revision_id;
1bb09fb9 1211 uint8_t pci_using_dac;
af19b491
AKS
1212
1213 err = pci_enable_device(pdev);
1214 if (err)
1215 return err;
1216
1217 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
1218 err = -ENODEV;
1219 goto err_out_disable_pdev;
1220 }
1221
1bb09fb9
AKS
1222 err = qlcnic_set_dma_mask(pdev, &pci_using_dac);
1223 if (err)
1224 goto err_out_disable_pdev;
1225
af19b491
AKS
1226 err = pci_request_regions(pdev, qlcnic_driver_name);
1227 if (err)
1228 goto err_out_disable_pdev;
1229
1230 pci_set_master(pdev);
1231
1232 netdev = alloc_etherdev(sizeof(struct qlcnic_adapter));
1233 if (!netdev) {
1234 dev_err(&pdev->dev, "failed to allocate net_device\n");
1235 err = -ENOMEM;
1236 goto err_out_free_res;
1237 }
1238
1239 SET_NETDEV_DEV(netdev, &pdev->dev);
1240
1241 adapter = netdev_priv(netdev);
1242 adapter->netdev = netdev;
1243 adapter->pdev = pdev;
6df900e9 1244 adapter->dev_rst_time = jiffies;
af19b491
AKS
1245
1246 revision_id = pdev->revision;
1247 adapter->ahw.revision_id = revision_id;
1248
1249 rwlock_init(&adapter->ahw.crb_lock);
1250 mutex_init(&adapter->ahw.mem_lock);
1251
1252 spin_lock_init(&adapter->tx_clean_lock);
1253 INIT_LIST_HEAD(&adapter->mac_list);
1254
1255 err = qlcnic_setup_pci_map(adapter);
1256 if (err)
1257 goto err_out_free_netdev;
1258
1259 /* This will be reset for mezz cards */
2e9d722d 1260 adapter->portnum = adapter->ahw.pci_func;
af19b491
AKS
1261
1262 err = qlcnic_get_board_info(adapter);
1263 if (err) {
1264 dev_err(&pdev->dev, "Error getting board config info.\n");
1265 goto err_out_iounmap;
1266 }
1267
02f6e46f
SC
1268 if (qlcnic_read_mac_addr(adapter))
1269 dev_warn(&pdev->dev, "failed to read mac addr\n");
1270
b3a24649
SC
1271 if (qlcnic_setup_idc_param(adapter))
1272 goto err_out_iounmap;
af19b491 1273
9f26f547 1274 err = adapter->nic_ops->start_firmware(adapter);
a7fc948f
AKS
1275 if (err) {
1276 dev_err(&pdev->dev, "Loading fw failed.Please Reboot\n");
af19b491 1277 goto err_out_decr_ref;
a7fc948f 1278 }
af19b491 1279
af19b491
AKS
1280 qlcnic_clear_stats(adapter);
1281
1282 qlcnic_setup_intr(adapter);
1283
1bb09fb9 1284 err = qlcnic_setup_netdev(adapter, netdev, pci_using_dac);
af19b491
AKS
1285 if (err)
1286 goto err_out_disable_msi;
1287
1288 pci_set_drvdata(pdev, adapter);
1289
1290 qlcnic_schedule_work(adapter, qlcnic_fw_poll_work, FW_POLL_DELAY);
1291
1292 switch (adapter->ahw.port_type) {
1293 case QLCNIC_GBE:
1294 dev_info(&adapter->pdev->dev, "%s: GbE port initialized\n",
1295 adapter->netdev->name);
1296 break;
1297 case QLCNIC_XGBE:
1298 dev_info(&adapter->pdev->dev, "%s: XGbE port initialized\n",
1299 adapter->netdev->name);
1300 break;
1301 }
1302
1303 qlcnic_create_diag_entries(adapter);
1304
1305 return 0;
1306
1307err_out_disable_msi:
1308 qlcnic_teardown_intr(adapter);
1309
1310err_out_decr_ref:
1311 qlcnic_clr_all_drv_state(adapter);
1312
1313err_out_iounmap:
1314 qlcnic_cleanup_pci_map(adapter);
1315
1316err_out_free_netdev:
1317 free_netdev(netdev);
1318
1319err_out_free_res:
1320 pci_release_regions(pdev);
1321
1322err_out_disable_pdev:
1323 pci_set_drvdata(pdev, NULL);
1324 pci_disable_device(pdev);
1325 return err;
1326}
1327
1328static void __devexit qlcnic_remove(struct pci_dev *pdev)
1329{
1330 struct qlcnic_adapter *adapter;
1331 struct net_device *netdev;
1332
1333 adapter = pci_get_drvdata(pdev);
1334 if (adapter == NULL)
1335 return;
1336
1337 netdev = adapter->netdev;
1338
1339 qlcnic_cancel_fw_work(adapter);
1340
1341 unregister_netdev(netdev);
1342
1343 cancel_work_sync(&adapter->tx_timeout_task);
1344
1345 qlcnic_detach(adapter);
1346
2e9d722d
AC
1347 if (adapter->npars != NULL)
1348 kfree(adapter->npars);
1349 if (adapter->eswitch != NULL)
1350 kfree(adapter->eswitch);
1351
af19b491
AKS
1352 qlcnic_clr_all_drv_state(adapter);
1353
1354 clear_bit(__QLCNIC_RESETTING, &adapter->state);
1355
1356 qlcnic_teardown_intr(adapter);
1357
1358 qlcnic_remove_diag_entries(adapter);
1359
1360 qlcnic_cleanup_pci_map(adapter);
1361
1362 qlcnic_release_firmware(adapter);
1363
1364 pci_release_regions(pdev);
1365 pci_disable_device(pdev);
1366 pci_set_drvdata(pdev, NULL);
1367
1368 free_netdev(netdev);
1369}
1370static int __qlcnic_shutdown(struct pci_dev *pdev)
1371{
1372 struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
1373 struct net_device *netdev = adapter->netdev;
1374 int retval;
1375
1376 netif_device_detach(netdev);
1377
1378 qlcnic_cancel_fw_work(adapter);
1379
1380 if (netif_running(netdev))
1381 qlcnic_down(adapter, netdev);
1382
1383 cancel_work_sync(&adapter->tx_timeout_task);
1384
1385 qlcnic_detach(adapter);
1386
1387 qlcnic_clr_all_drv_state(adapter);
1388
1389 clear_bit(__QLCNIC_RESETTING, &adapter->state);
1390
1391 retval = pci_save_state(pdev);
1392 if (retval)
1393 return retval;
1394
1395 if (qlcnic_wol_supported(adapter)) {
1396 pci_enable_wake(pdev, PCI_D3cold, 1);
1397 pci_enable_wake(pdev, PCI_D3hot, 1);
1398 }
1399
1400 return 0;
1401}
1402
1403static void qlcnic_shutdown(struct pci_dev *pdev)
1404{
1405 if (__qlcnic_shutdown(pdev))
1406 return;
1407
1408 pci_disable_device(pdev);
1409}
1410
1411#ifdef CONFIG_PM
1412static int
1413qlcnic_suspend(struct pci_dev *pdev, pm_message_t state)
1414{
1415 int retval;
1416
1417 retval = __qlcnic_shutdown(pdev);
1418 if (retval)
1419 return retval;
1420
1421 pci_set_power_state(pdev, pci_choose_state(pdev, state));
1422 return 0;
1423}
1424
1425static int
1426qlcnic_resume(struct pci_dev *pdev)
1427{
1428 struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
1429 struct net_device *netdev = adapter->netdev;
1430 int err;
1431
1432 err = pci_enable_device(pdev);
1433 if (err)
1434 return err;
1435
1436 pci_set_power_state(pdev, PCI_D0);
1437 pci_set_master(pdev);
1438 pci_restore_state(pdev);
1439
9f26f547 1440 err = adapter->nic_ops->start_firmware(adapter);
af19b491
AKS
1441 if (err) {
1442 dev_err(&pdev->dev, "failed to start firmware\n");
1443 return err;
1444 }
1445
1446 if (netif_running(netdev)) {
1447 err = qlcnic_attach(adapter);
1448 if (err)
1449 goto err_out;
1450
1451 err = qlcnic_up(adapter, netdev);
1452 if (err)
1453 goto err_out_detach;
1454
1455
1456 qlcnic_config_indev_addr(netdev, NETDEV_UP);
1457 }
1458
1459 netif_device_attach(netdev);
1460 qlcnic_schedule_work(adapter, qlcnic_fw_poll_work, FW_POLL_DELAY);
1461 return 0;
1462
1463err_out_detach:
1464 qlcnic_detach(adapter);
1465err_out:
1466 qlcnic_clr_all_drv_state(adapter);
34ce3626 1467 netif_device_attach(netdev);
af19b491
AKS
1468 return err;
1469}
1470#endif
1471
1472static int qlcnic_open(struct net_device *netdev)
1473{
1474 struct qlcnic_adapter *adapter = netdev_priv(netdev);
1475 int err;
1476
1477 if (adapter->driver_mismatch)
1478 return -EIO;
1479
1480 err = qlcnic_attach(adapter);
1481 if (err)
1482 return err;
1483
1484 err = __qlcnic_up(adapter, netdev);
1485 if (err)
1486 goto err_out;
1487
1488 netif_start_queue(netdev);
1489
1490 return 0;
1491
1492err_out:
1493 qlcnic_detach(adapter);
1494 return err;
1495}
1496
1497/*
1498 * qlcnic_close - Disables a network interface entry point
1499 */
1500static int qlcnic_close(struct net_device *netdev)
1501{
1502 struct qlcnic_adapter *adapter = netdev_priv(netdev);
1503
1504 __qlcnic_down(adapter, netdev);
1505 return 0;
1506}
1507
1508static void
1509qlcnic_tso_check(struct net_device *netdev,
1510 struct qlcnic_host_tx_ring *tx_ring,
1511 struct cmd_desc_type0 *first_desc,
1512 struct sk_buff *skb)
1513{
1514 u8 opcode = TX_ETHER_PKT;
1515 __be16 protocol = skb->protocol;
1516 u16 flags = 0, vid = 0;
af19b491
AKS
1517 int copied, offset, copy_len, hdr_len = 0, tso = 0, vlan_oob = 0;
1518 struct cmd_desc_type0 *hwdesc;
1519 struct vlan_ethhdr *vh;
8bfe8b91 1520 struct qlcnic_adapter *adapter = netdev_priv(netdev);
2e9d722d 1521 u32 producer = tx_ring->producer;
af19b491
AKS
1522
1523 if (protocol == cpu_to_be16(ETH_P_8021Q)) {
1524
1525 vh = (struct vlan_ethhdr *)skb->data;
1526 protocol = vh->h_vlan_encapsulated_proto;
1527 flags = FLAGS_VLAN_TAGGED;
1528
1529 } else if (vlan_tx_tag_present(skb)) {
1530
1531 flags = FLAGS_VLAN_OOB;
1532 vid = vlan_tx_tag_get(skb);
1533 qlcnic_set_tx_vlan_tci(first_desc, vid);
1534 vlan_oob = 1;
1535 }
1536
2e9d722d
AC
1537 if (*(skb->data) & BIT_0) {
1538 flags |= BIT_0;
1539 memcpy(&first_desc->eth_addr, skb->data, ETH_ALEN);
1540 }
1541
af19b491
AKS
1542 if ((netdev->features & (NETIF_F_TSO | NETIF_F_TSO6)) &&
1543 skb_shinfo(skb)->gso_size > 0) {
1544
1545 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
1546
1547 first_desc->mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
1548 first_desc->total_hdr_length = hdr_len;
1549 if (vlan_oob) {
1550 first_desc->total_hdr_length += VLAN_HLEN;
1551 first_desc->tcp_hdr_offset = VLAN_HLEN;
1552 first_desc->ip_hdr_offset = VLAN_HLEN;
1553 /* Only in case of TSO on vlan device */
1554 flags |= FLAGS_VLAN_TAGGED;
1555 }
1556
1557 opcode = (protocol == cpu_to_be16(ETH_P_IPV6)) ?
1558 TX_TCP_LSO6 : TX_TCP_LSO;
1559 tso = 1;
1560
1561 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
1562 u8 l4proto;
1563
1564 if (protocol == cpu_to_be16(ETH_P_IP)) {
1565 l4proto = ip_hdr(skb)->protocol;
1566
1567 if (l4proto == IPPROTO_TCP)
1568 opcode = TX_TCP_PKT;
1569 else if (l4proto == IPPROTO_UDP)
1570 opcode = TX_UDP_PKT;
1571 } else if (protocol == cpu_to_be16(ETH_P_IPV6)) {
1572 l4proto = ipv6_hdr(skb)->nexthdr;
1573
1574 if (l4proto == IPPROTO_TCP)
1575 opcode = TX_TCPV6_PKT;
1576 else if (l4proto == IPPROTO_UDP)
1577 opcode = TX_UDPV6_PKT;
1578 }
1579 }
1580
1581 first_desc->tcp_hdr_offset += skb_transport_offset(skb);
1582 first_desc->ip_hdr_offset += skb_network_offset(skb);
1583 qlcnic_set_tx_flags_opcode(first_desc, flags, opcode);
1584
1585 if (!tso)
1586 return;
1587
1588 /* For LSO, we need to copy the MAC/IP/TCP headers into
1589 * the descriptor ring
1590 */
af19b491
AKS
1591 copied = 0;
1592 offset = 2;
1593
1594 if (vlan_oob) {
1595 /* Create a TSO vlan header template for firmware */
1596
1597 hwdesc = &tx_ring->desc_head[producer];
1598 tx_ring->cmd_buf_arr[producer].skb = NULL;
1599
1600 copy_len = min((int)sizeof(struct cmd_desc_type0) - offset,
1601 hdr_len + VLAN_HLEN);
1602
1603 vh = (struct vlan_ethhdr *)((char *)hwdesc + 2);
1604 skb_copy_from_linear_data(skb, vh, 12);
1605 vh->h_vlan_proto = htons(ETH_P_8021Q);
1606 vh->h_vlan_TCI = htons(vid);
1607 skb_copy_from_linear_data_offset(skb, 12,
1608 (char *)vh + 16, copy_len - 16);
1609
1610 copied = copy_len - VLAN_HLEN;
1611 offset = 0;
1612
1613 producer = get_next_index(producer, tx_ring->num_desc);
1614 }
1615
1616 while (copied < hdr_len) {
1617
1618 copy_len = min((int)sizeof(struct cmd_desc_type0) - offset,
1619 (hdr_len - copied));
1620
1621 hwdesc = &tx_ring->desc_head[producer];
1622 tx_ring->cmd_buf_arr[producer].skb = NULL;
1623
1624 skb_copy_from_linear_data_offset(skb, copied,
1625 (char *)hwdesc + offset, copy_len);
1626
1627 copied += copy_len;
1628 offset = 0;
1629
1630 producer = get_next_index(producer, tx_ring->num_desc);
1631 }
1632
1633 tx_ring->producer = producer;
1634 barrier();
8bfe8b91 1635 adapter->stats.lso_frames++;
af19b491
AKS
1636}
1637
1638static int
1639qlcnic_map_tx_skb(struct pci_dev *pdev,
1640 struct sk_buff *skb, struct qlcnic_cmd_buffer *pbuf)
1641{
1642 struct qlcnic_skb_frag *nf;
1643 struct skb_frag_struct *frag;
1644 int i, nr_frags;
1645 dma_addr_t map;
1646
1647 nr_frags = skb_shinfo(skb)->nr_frags;
1648 nf = &pbuf->frag_array[0];
1649
1650 map = pci_map_single(pdev, skb->data,
1651 skb_headlen(skb), PCI_DMA_TODEVICE);
1652 if (pci_dma_mapping_error(pdev, map))
1653 goto out_err;
1654
1655 nf->dma = map;
1656 nf->length = skb_headlen(skb);
1657
1658 for (i = 0; i < nr_frags; i++) {
1659 frag = &skb_shinfo(skb)->frags[i];
1660 nf = &pbuf->frag_array[i+1];
1661
1662 map = pci_map_page(pdev, frag->page, frag->page_offset,
1663 frag->size, PCI_DMA_TODEVICE);
1664 if (pci_dma_mapping_error(pdev, map))
1665 goto unwind;
1666
1667 nf->dma = map;
1668 nf->length = frag->size;
1669 }
1670
1671 return 0;
1672
1673unwind:
1674 while (--i >= 0) {
1675 nf = &pbuf->frag_array[i+1];
1676 pci_unmap_page(pdev, nf->dma, nf->length, PCI_DMA_TODEVICE);
1677 }
1678
1679 nf = &pbuf->frag_array[0];
1680 pci_unmap_single(pdev, nf->dma, skb_headlen(skb), PCI_DMA_TODEVICE);
1681
1682out_err:
1683 return -ENOMEM;
1684}
1685
1686static inline void
1687qlcnic_clear_cmddesc(u64 *desc)
1688{
1689 desc[0] = 0ULL;
1690 desc[2] = 0ULL;
1691}
1692
cdaff185 1693netdev_tx_t
af19b491
AKS
1694qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
1695{
1696 struct qlcnic_adapter *adapter = netdev_priv(netdev);
1697 struct qlcnic_host_tx_ring *tx_ring = adapter->tx_ring;
1698 struct qlcnic_cmd_buffer *pbuf;
1699 struct qlcnic_skb_frag *buffrag;
1700 struct cmd_desc_type0 *hwdesc, *first_desc;
1701 struct pci_dev *pdev;
1702 int i, k;
1703
1704 u32 producer;
1705 int frag_count, no_of_desc;
1706 u32 num_txd = tx_ring->num_desc;
1707
780ab790
AKS
1708 if (!test_bit(__QLCNIC_DEV_UP, &adapter->state)) {
1709 netif_stop_queue(netdev);
1710 return NETDEV_TX_BUSY;
1711 }
1712
af19b491
AKS
1713 frag_count = skb_shinfo(skb)->nr_frags + 1;
1714
1715 /* 4 fragments per cmd des */
1716 no_of_desc = (frag_count + 3) >> 2;
1717
1718 if (unlikely(no_of_desc + 2 > qlcnic_tx_avail(tx_ring))) {
1719 netif_stop_queue(netdev);
8bfe8b91 1720 adapter->stats.xmit_off++;
af19b491
AKS
1721 return NETDEV_TX_BUSY;
1722 }
1723
1724 producer = tx_ring->producer;
1725 pbuf = &tx_ring->cmd_buf_arr[producer];
1726
1727 pdev = adapter->pdev;
1728
8ae6df97
AKS
1729 if (qlcnic_map_tx_skb(pdev, skb, pbuf)) {
1730 adapter->stats.tx_dma_map_error++;
af19b491 1731 goto drop_packet;
8ae6df97 1732 }
af19b491
AKS
1733
1734 pbuf->skb = skb;
1735 pbuf->frag_count = frag_count;
1736
1737 first_desc = hwdesc = &tx_ring->desc_head[producer];
1738 qlcnic_clear_cmddesc((u64 *)hwdesc);
1739
1740 qlcnic_set_tx_frags_len(first_desc, frag_count, skb->len);
1741 qlcnic_set_tx_port(first_desc, adapter->portnum);
1742
1743 for (i = 0; i < frag_count; i++) {
1744
1745 k = i % 4;
1746
1747 if ((k == 0) && (i > 0)) {
1748 /* move to next desc.*/
1749 producer = get_next_index(producer, num_txd);
1750 hwdesc = &tx_ring->desc_head[producer];
1751 qlcnic_clear_cmddesc((u64 *)hwdesc);
1752 tx_ring->cmd_buf_arr[producer].skb = NULL;
1753 }
1754
1755 buffrag = &pbuf->frag_array[i];
1756
1757 hwdesc->buffer_length[k] = cpu_to_le16(buffrag->length);
1758 switch (k) {
1759 case 0:
1760 hwdesc->addr_buffer1 = cpu_to_le64(buffrag->dma);
1761 break;
1762 case 1:
1763 hwdesc->addr_buffer2 = cpu_to_le64(buffrag->dma);
1764 break;
1765 case 2:
1766 hwdesc->addr_buffer3 = cpu_to_le64(buffrag->dma);
1767 break;
1768 case 3:
1769 hwdesc->addr_buffer4 = cpu_to_le64(buffrag->dma);
1770 break;
1771 }
1772 }
1773
1774 tx_ring->producer = get_next_index(producer, num_txd);
1775
1776 qlcnic_tso_check(netdev, tx_ring, first_desc, skb);
1777
1778 qlcnic_update_cmd_producer(adapter, tx_ring);
1779
1780 adapter->stats.txbytes += skb->len;
1781 adapter->stats.xmitcalled++;
1782
1783 return NETDEV_TX_OK;
1784
1785drop_packet:
1786 adapter->stats.txdropped++;
1787 dev_kfree_skb_any(skb);
1788 return NETDEV_TX_OK;
1789}
1790
1791static int qlcnic_check_temp(struct qlcnic_adapter *adapter)
1792{
1793 struct net_device *netdev = adapter->netdev;
1794 u32 temp, temp_state, temp_val;
1795 int rv = 0;
1796
1797 temp = QLCRD32(adapter, CRB_TEMP_STATE);
1798
1799 temp_state = qlcnic_get_temp_state(temp);
1800 temp_val = qlcnic_get_temp_val(temp);
1801
1802 if (temp_state == QLCNIC_TEMP_PANIC) {
1803 dev_err(&netdev->dev,
1804 "Device temperature %d degrees C exceeds"
1805 " maximum allowed. Hardware has been shut down.\n",
1806 temp_val);
1807 rv = 1;
1808 } else if (temp_state == QLCNIC_TEMP_WARN) {
1809 if (adapter->temp == QLCNIC_TEMP_NORMAL) {
1810 dev_err(&netdev->dev,
1811 "Device temperature %d degrees C "
1812 "exceeds operating range."
1813 " Immediate action needed.\n",
1814 temp_val);
1815 }
1816 } else {
1817 if (adapter->temp == QLCNIC_TEMP_WARN) {
1818 dev_info(&netdev->dev,
1819 "Device temperature is now %d degrees C"
1820 " in normal range.\n", temp_val);
1821 }
1822 }
1823 adapter->temp = temp_state;
1824 return rv;
1825}
1826
1827void qlcnic_advert_link_change(struct qlcnic_adapter *adapter, int linkup)
1828{
1829 struct net_device *netdev = adapter->netdev;
1830
1831 if (adapter->ahw.linkup && !linkup) {
1832 dev_info(&netdev->dev, "NIC Link is down\n");
1833 adapter->ahw.linkup = 0;
1834 if (netif_running(netdev)) {
1835 netif_carrier_off(netdev);
1836 netif_stop_queue(netdev);
1837 }
1838 } else if (!adapter->ahw.linkup && linkup) {
1839 dev_info(&netdev->dev, "NIC Link is up\n");
1840 adapter->ahw.linkup = 1;
1841 if (netif_running(netdev)) {
1842 netif_carrier_on(netdev);
1843 netif_wake_queue(netdev);
1844 }
1845 }
1846}
1847
1848static void qlcnic_tx_timeout(struct net_device *netdev)
1849{
1850 struct qlcnic_adapter *adapter = netdev_priv(netdev);
1851
1852 if (test_bit(__QLCNIC_RESETTING, &adapter->state))
1853 return;
1854
1855 dev_err(&netdev->dev, "transmit timeout, resetting.\n");
1856 schedule_work(&adapter->tx_timeout_task);
1857}
1858
1859static void qlcnic_tx_timeout_task(struct work_struct *work)
1860{
1861 struct qlcnic_adapter *adapter =
1862 container_of(work, struct qlcnic_adapter, tx_timeout_task);
1863
1864 if (!netif_running(adapter->netdev))
1865 return;
1866
1867 if (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
1868 return;
1869
1870 if (++adapter->tx_timeo_cnt >= QLCNIC_MAX_TX_TIMEOUTS)
1871 goto request_reset;
1872
1873 clear_bit(__QLCNIC_RESETTING, &adapter->state);
1874 if (!qlcnic_reset_context(adapter)) {
1875 adapter->netdev->trans_start = jiffies;
1876 return;
1877
1878 /* context reset failed, fall through for fw reset */
1879 }
1880
1881request_reset:
1882 adapter->need_fw_reset = 1;
1883 clear_bit(__QLCNIC_RESETTING, &adapter->state);
65b5b420 1884 QLCDB(adapter, DRV, "Resetting adapter\n");
af19b491
AKS
1885}
1886
1887static struct net_device_stats *qlcnic_get_stats(struct net_device *netdev)
1888{
1889 struct qlcnic_adapter *adapter = netdev_priv(netdev);
1890 struct net_device_stats *stats = &netdev->stats;
1891
1892 memset(stats, 0, sizeof(*stats));
1893
1894 stats->rx_packets = adapter->stats.rx_pkts + adapter->stats.lro_pkts;
1895 stats->tx_packets = adapter->stats.xmitfinished;
7e382594 1896 stats->rx_bytes = adapter->stats.rxbytes + adapter->stats.lrobytes;
af19b491
AKS
1897 stats->tx_bytes = adapter->stats.txbytes;
1898 stats->rx_dropped = adapter->stats.rxdropped;
1899 stats->tx_dropped = adapter->stats.txdropped;
1900
1901 return stats;
1902}
1903
7eb9855d 1904static irqreturn_t qlcnic_clear_legacy_intr(struct qlcnic_adapter *adapter)
af19b491 1905{
af19b491
AKS
1906 u32 status;
1907
1908 status = readl(adapter->isr_int_vec);
1909
1910 if (!(status & adapter->int_vec_bit))
1911 return IRQ_NONE;
1912
1913 /* check interrupt state machine, to be sure */
1914 status = readl(adapter->crb_int_state_reg);
1915 if (!ISR_LEGACY_INT_TRIGGERED(status))
1916 return IRQ_NONE;
1917
1918 writel(0xffffffff, adapter->tgt_status_reg);
1919 /* read twice to ensure write is flushed */
1920 readl(adapter->isr_int_vec);
1921 readl(adapter->isr_int_vec);
1922
7eb9855d
AKS
1923 return IRQ_HANDLED;
1924}
1925
1926static irqreturn_t qlcnic_tmp_intr(int irq, void *data)
1927{
1928 struct qlcnic_host_sds_ring *sds_ring = data;
1929 struct qlcnic_adapter *adapter = sds_ring->adapter;
1930
1931 if (adapter->flags & QLCNIC_MSIX_ENABLED)
1932 goto done;
1933 else if (adapter->flags & QLCNIC_MSI_ENABLED) {
1934 writel(0xffffffff, adapter->tgt_status_reg);
1935 goto done;
1936 }
1937
1938 if (qlcnic_clear_legacy_intr(adapter) == IRQ_NONE)
1939 return IRQ_NONE;
1940
1941done:
1942 adapter->diag_cnt++;
1943 qlcnic_enable_int(sds_ring);
1944 return IRQ_HANDLED;
1945}
1946
1947static irqreturn_t qlcnic_intr(int irq, void *data)
1948{
1949 struct qlcnic_host_sds_ring *sds_ring = data;
1950 struct qlcnic_adapter *adapter = sds_ring->adapter;
1951
1952 if (qlcnic_clear_legacy_intr(adapter) == IRQ_NONE)
1953 return IRQ_NONE;
1954
af19b491
AKS
1955 napi_schedule(&sds_ring->napi);
1956
1957 return IRQ_HANDLED;
1958}
1959
1960static irqreturn_t qlcnic_msi_intr(int irq, void *data)
1961{
1962 struct qlcnic_host_sds_ring *sds_ring = data;
1963 struct qlcnic_adapter *adapter = sds_ring->adapter;
1964
1965 /* clear interrupt */
1966 writel(0xffffffff, adapter->tgt_status_reg);
1967
1968 napi_schedule(&sds_ring->napi);
1969 return IRQ_HANDLED;
1970}
1971
1972static irqreturn_t qlcnic_msix_intr(int irq, void *data)
1973{
1974 struct qlcnic_host_sds_ring *sds_ring = data;
1975
1976 napi_schedule(&sds_ring->napi);
1977 return IRQ_HANDLED;
1978}
1979
1980static int qlcnic_process_cmd_ring(struct qlcnic_adapter *adapter)
1981{
1982 u32 sw_consumer, hw_consumer;
1983 int count = 0, i;
1984 struct qlcnic_cmd_buffer *buffer;
1985 struct pci_dev *pdev = adapter->pdev;
1986 struct net_device *netdev = adapter->netdev;
1987 struct qlcnic_skb_frag *frag;
1988 int done;
1989 struct qlcnic_host_tx_ring *tx_ring = adapter->tx_ring;
1990
1991 if (!spin_trylock(&adapter->tx_clean_lock))
1992 return 1;
1993
1994 sw_consumer = tx_ring->sw_consumer;
1995 hw_consumer = le32_to_cpu(*(tx_ring->hw_consumer));
1996
1997 while (sw_consumer != hw_consumer) {
1998 buffer = &tx_ring->cmd_buf_arr[sw_consumer];
1999 if (buffer->skb) {
2000 frag = &buffer->frag_array[0];
2001 pci_unmap_single(pdev, frag->dma, frag->length,
2002 PCI_DMA_TODEVICE);
2003 frag->dma = 0ULL;
2004 for (i = 1; i < buffer->frag_count; i++) {
2005 frag++;
2006 pci_unmap_page(pdev, frag->dma, frag->length,
2007 PCI_DMA_TODEVICE);
2008 frag->dma = 0ULL;
2009 }
2010
2011 adapter->stats.xmitfinished++;
2012 dev_kfree_skb_any(buffer->skb);
2013 buffer->skb = NULL;
2014 }
2015
2016 sw_consumer = get_next_index(sw_consumer, tx_ring->num_desc);
2017 if (++count >= MAX_STATUS_HANDLE)
2018 break;
2019 }
2020
2021 if (count && netif_running(netdev)) {
2022 tx_ring->sw_consumer = sw_consumer;
2023
2024 smp_mb();
2025
2026 if (netif_queue_stopped(netdev) && netif_carrier_ok(netdev)) {
2027 __netif_tx_lock(tx_ring->txq, smp_processor_id());
2028 if (qlcnic_tx_avail(tx_ring) > TX_STOP_THRESH) {
2029 netif_wake_queue(netdev);
2030 adapter->tx_timeo_cnt = 0;
8bfe8b91 2031 adapter->stats.xmit_on++;
af19b491
AKS
2032 }
2033 __netif_tx_unlock(tx_ring->txq);
2034 }
2035 }
2036 /*
2037 * If everything is freed up to consumer then check if the ring is full
2038 * If the ring is full then check if more needs to be freed and
2039 * schedule the call back again.
2040 *
2041 * This happens when there are 2 CPUs. One could be freeing and the
2042 * other filling it. If the ring is full when we get out of here and
2043 * the card has already interrupted the host then the host can miss the
2044 * interrupt.
2045 *
2046 * There is still a possible race condition and the host could miss an
2047 * interrupt. The card has to take care of this.
2048 */
2049 hw_consumer = le32_to_cpu(*(tx_ring->hw_consumer));
2050 done = (sw_consumer == hw_consumer);
2051 spin_unlock(&adapter->tx_clean_lock);
2052
2053 return done;
2054}
2055
2056static int qlcnic_poll(struct napi_struct *napi, int budget)
2057{
2058 struct qlcnic_host_sds_ring *sds_ring =
2059 container_of(napi, struct qlcnic_host_sds_ring, napi);
2060
2061 struct qlcnic_adapter *adapter = sds_ring->adapter;
2062
2063 int tx_complete;
2064 int work_done;
2065
2066 tx_complete = qlcnic_process_cmd_ring(adapter);
2067
2068 work_done = qlcnic_process_rcv_ring(sds_ring, budget);
2069
2070 if ((work_done < budget) && tx_complete) {
2071 napi_complete(&sds_ring->napi);
2072 if (test_bit(__QLCNIC_DEV_UP, &adapter->state))
2073 qlcnic_enable_int(sds_ring);
2074 }
2075
2076 return work_done;
2077}
2078
2079#ifdef CONFIG_NET_POLL_CONTROLLER
2080static void qlcnic_poll_controller(struct net_device *netdev)
2081{
2082 struct qlcnic_adapter *adapter = netdev_priv(netdev);
2083 disable_irq(adapter->irq);
2084 qlcnic_intr(adapter->irq, adapter);
2085 enable_irq(adapter->irq);
2086}
2087#endif
2088
6df900e9
SC
2089static void
2090qlcnic_idc_debug_info(struct qlcnic_adapter *adapter, u8 encoding)
2091{
2092 u32 val;
2093
2094 val = adapter->portnum & 0xf;
2095 val |= encoding << 7;
2096 val |= (jiffies - adapter->dev_rst_time) << 8;
2097
2098 QLCWR32(adapter, QLCNIC_CRB_DRV_SCRATCH, val);
2099 adapter->dev_rst_time = jiffies;
2100}
2101
ade91f8e
AKS
2102static int
2103qlcnic_set_drv_state(struct qlcnic_adapter *adapter, u8 state)
af19b491
AKS
2104{
2105 u32 val;
2106
2107 WARN_ON(state != QLCNIC_DEV_NEED_RESET &&
2108 state != QLCNIC_DEV_NEED_QUISCENT);
2109
2110 if (qlcnic_api_lock(adapter))
ade91f8e 2111 return -EIO;
af19b491
AKS
2112
2113 val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
2114
2115 if (state == QLCNIC_DEV_NEED_RESET)
6d2a4724 2116 QLC_DEV_SET_RST_RDY(val, adapter->portnum);
af19b491 2117 else if (state == QLCNIC_DEV_NEED_QUISCENT)
6d2a4724 2118 QLC_DEV_SET_QSCNT_RDY(val, adapter->portnum);
af19b491
AKS
2119
2120 QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
2121
2122 qlcnic_api_unlock(adapter);
ade91f8e
AKS
2123
2124 return 0;
af19b491
AKS
2125}
2126
1b95a839
AKS
2127static int
2128qlcnic_clr_drv_state(struct qlcnic_adapter *adapter)
2129{
2130 u32 val;
2131
2132 if (qlcnic_api_lock(adapter))
2133 return -EBUSY;
2134
2135 val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
6d2a4724 2136 QLC_DEV_CLR_RST_QSCNT(val, adapter->portnum);
1b95a839
AKS
2137 QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
2138
2139 qlcnic_api_unlock(adapter);
2140
2141 return 0;
2142}
2143
af19b491
AKS
2144static void
2145qlcnic_clr_all_drv_state(struct qlcnic_adapter *adapter)
2146{
2147 u32 val;
2148
2149 if (qlcnic_api_lock(adapter))
2150 goto err;
2151
2152 val = QLCRD32(adapter, QLCNIC_CRB_DEV_REF_COUNT);
6d2a4724 2153 QLC_DEV_CLR_REF_CNT(val, adapter->portnum);
af19b491
AKS
2154 QLCWR32(adapter, QLCNIC_CRB_DEV_REF_COUNT, val);
2155
2156 if (!(val & 0x11111111))
2157 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_COLD);
2158
2159 val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
6d2a4724 2160 QLC_DEV_CLR_RST_QSCNT(val, adapter->portnum);
af19b491
AKS
2161 QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
2162
2163 qlcnic_api_unlock(adapter);
2164err:
2165 adapter->fw_fail_cnt = 0;
2166 clear_bit(__QLCNIC_START_FW, &adapter->state);
2167 clear_bit(__QLCNIC_RESETTING, &adapter->state);
2168}
2169
f73dfc50 2170/* Grab api lock, before checking state */
af19b491
AKS
2171static int
2172qlcnic_check_drv_state(struct qlcnic_adapter *adapter)
2173{
2174 int act, state;
2175
2176 state = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
2177 act = QLCRD32(adapter, QLCNIC_CRB_DEV_REF_COUNT);
2178
2179 if (((state & 0x11111111) == (act & 0x11111111)) ||
2180 ((act & 0x11111111) == ((state >> 1) & 0x11111111)))
2181 return 0;
2182 else
2183 return 1;
2184}
2185
96f8118c
SC
2186static int qlcnic_check_idc_ver(struct qlcnic_adapter *adapter)
2187{
2188 u32 val = QLCRD32(adapter, QLCNIC_CRB_DRV_IDC_VER);
2189
2190 if (val != QLCNIC_DRV_IDC_VER) {
2191 dev_warn(&adapter->pdev->dev, "IDC Version mismatch, driver's"
2192 " idc ver = %x; reqd = %x\n", QLCNIC_DRV_IDC_VER, val);
2193 }
2194
2195 return 0;
2196}
2197
af19b491
AKS
2198static int
2199qlcnic_can_start_firmware(struct qlcnic_adapter *adapter)
2200{
2201 u32 val, prev_state;
aa5e18c0 2202 u8 dev_init_timeo = adapter->dev_init_timeo;
6d2a4724 2203 u8 portnum = adapter->portnum;
96f8118c 2204 u8 ret;
af19b491 2205
f73dfc50
AKS
2206 if (test_and_clear_bit(__QLCNIC_START_FW, &adapter->state))
2207 return 1;
2208
af19b491
AKS
2209 if (qlcnic_api_lock(adapter))
2210 return -1;
2211
2212 val = QLCRD32(adapter, QLCNIC_CRB_DEV_REF_COUNT);
6d2a4724
AKS
2213 if (!(val & (1 << (portnum * 4)))) {
2214 QLC_DEV_SET_REF_CNT(val, portnum);
af19b491 2215 QLCWR32(adapter, QLCNIC_CRB_DEV_REF_COUNT, val);
af19b491
AKS
2216 }
2217
2218 prev_state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
65b5b420 2219 QLCDB(adapter, HW, "Device state = %u\n", prev_state);
af19b491
AKS
2220
2221 switch (prev_state) {
2222 case QLCNIC_DEV_COLD:
bbd8c6a4 2223 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_INITIALIZING);
96f8118c 2224 QLCWR32(adapter, QLCNIC_CRB_DRV_IDC_VER, QLCNIC_DRV_IDC_VER);
6df900e9 2225 qlcnic_idc_debug_info(adapter, 0);
af19b491
AKS
2226 qlcnic_api_unlock(adapter);
2227 return 1;
2228
2229 case QLCNIC_DEV_READY:
96f8118c 2230 ret = qlcnic_check_idc_ver(adapter);
af19b491 2231 qlcnic_api_unlock(adapter);
96f8118c 2232 return ret;
af19b491
AKS
2233
2234 case QLCNIC_DEV_NEED_RESET:
2235 val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
6d2a4724 2236 QLC_DEV_SET_RST_RDY(val, portnum);
af19b491
AKS
2237 QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
2238 break;
2239
2240 case QLCNIC_DEV_NEED_QUISCENT:
2241 val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
6d2a4724 2242 QLC_DEV_SET_QSCNT_RDY(val, portnum);
af19b491
AKS
2243 QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
2244 break;
2245
2246 case QLCNIC_DEV_FAILED:
a7fc948f 2247 dev_err(&adapter->pdev->dev, "Device in failed state.\n");
af19b491
AKS
2248 qlcnic_api_unlock(adapter);
2249 return -1;
bbd8c6a4
AKS
2250
2251 case QLCNIC_DEV_INITIALIZING:
2252 case QLCNIC_DEV_QUISCENT:
2253 break;
af19b491
AKS
2254 }
2255
2256 qlcnic_api_unlock(adapter);
aa5e18c0
SC
2257
2258 do {
af19b491 2259 msleep(1000);
a5e463d0
SC
2260 prev_state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
2261
2262 if (prev_state == QLCNIC_DEV_QUISCENT)
2263 continue;
2264 } while ((prev_state != QLCNIC_DEV_READY) && --dev_init_timeo);
af19b491 2265
65b5b420
AKS
2266 if (!dev_init_timeo) {
2267 dev_err(&adapter->pdev->dev,
2268 "Waiting for device to initialize timeout\n");
af19b491 2269 return -1;
65b5b420 2270 }
af19b491
AKS
2271
2272 if (qlcnic_api_lock(adapter))
2273 return -1;
2274
2275 val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
6d2a4724 2276 QLC_DEV_CLR_RST_QSCNT(val, portnum);
af19b491
AKS
2277 QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
2278
96f8118c 2279 ret = qlcnic_check_idc_ver(adapter);
af19b491
AKS
2280 qlcnic_api_unlock(adapter);
2281
96f8118c 2282 return ret;
af19b491
AKS
2283}
2284
2285static void
2286qlcnic_fwinit_work(struct work_struct *work)
2287{
2288 struct qlcnic_adapter *adapter = container_of(work,
2289 struct qlcnic_adapter, fw_work.work);
9f26f547 2290 u32 dev_state = 0xf, npar_state;
af19b491 2291
f73dfc50
AKS
2292 if (qlcnic_api_lock(adapter))
2293 goto err_ret;
af19b491 2294
a5e463d0
SC
2295 dev_state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
2296 if (dev_state == QLCNIC_DEV_QUISCENT) {
2297 qlcnic_api_unlock(adapter);
2298 qlcnic_schedule_work(adapter, qlcnic_fwinit_work,
2299 FW_POLL_DELAY * 2);
2300 return;
2301 }
2302
9f26f547
AC
2303 if (adapter->op_mode == QLCNIC_NON_PRIV_FUNC) {
2304 npar_state = QLCRD32(adapter, QLCNIC_CRB_DEV_NPAR_STATE);
2305 if (npar_state == QLCNIC_DEV_NPAR_RDY) {
2306 qlcnic_api_unlock(adapter);
2307 goto wait_npar;
2308 } else {
2309 qlcnic_schedule_work(adapter, qlcnic_fwinit_work,
2310 FW_POLL_DELAY);
2311 qlcnic_api_unlock(adapter);
2312 return;
2313 }
2314 }
2315
f73dfc50
AKS
2316 if (adapter->fw_wait_cnt++ > adapter->reset_ack_timeo) {
2317 dev_err(&adapter->pdev->dev, "Reset:Failed to get ack %d sec\n",
2318 adapter->reset_ack_timeo);
2319 goto skip_ack_check;
2320 }
2321
2322 if (!qlcnic_check_drv_state(adapter)) {
2323skip_ack_check:
2324 dev_state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
a5e463d0
SC
2325
2326 if (dev_state == QLCNIC_DEV_NEED_QUISCENT) {
2327 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE,
2328 QLCNIC_DEV_QUISCENT);
2329 qlcnic_schedule_work(adapter, qlcnic_fwinit_work,
2330 FW_POLL_DELAY * 2);
2331 QLCDB(adapter, DRV, "Quiscing the driver\n");
6df900e9
SC
2332 qlcnic_idc_debug_info(adapter, 0);
2333
a5e463d0
SC
2334 qlcnic_api_unlock(adapter);
2335 return;
2336 }
2337
f73dfc50
AKS
2338 if (dev_state == QLCNIC_DEV_NEED_RESET) {
2339 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE,
2340 QLCNIC_DEV_INITIALIZING);
2341 set_bit(__QLCNIC_START_FW, &adapter->state);
2342 QLCDB(adapter, DRV, "Restarting fw\n");
6df900e9 2343 qlcnic_idc_debug_info(adapter, 0);
af19b491
AKS
2344 }
2345
f73dfc50
AKS
2346 qlcnic_api_unlock(adapter);
2347
9f26f547 2348 if (!adapter->nic_ops->start_firmware(adapter)) {
af19b491
AKS
2349 qlcnic_schedule_work(adapter, qlcnic_attach_work, 0);
2350 return;
2351 }
af19b491
AKS
2352 goto err_ret;
2353 }
2354
f73dfc50 2355 qlcnic_api_unlock(adapter);
aa5e18c0 2356
9f26f547 2357wait_npar:
af19b491 2358 dev_state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
f73dfc50 2359 QLCDB(adapter, HW, "Func waiting: Device state=%u\n", dev_state);
65b5b420 2360
af19b491 2361 switch (dev_state) {
a5e463d0
SC
2362 case QLCNIC_DEV_QUISCENT:
2363 case QLCNIC_DEV_NEED_QUISCENT:
f73dfc50
AKS
2364 case QLCNIC_DEV_NEED_RESET:
2365 qlcnic_schedule_work(adapter,
2366 qlcnic_fwinit_work, FW_POLL_DELAY);
2367 return;
af19b491
AKS
2368 case QLCNIC_DEV_FAILED:
2369 break;
2370
2371 default:
9f26f547 2372 if (!adapter->nic_ops->start_firmware(adapter)) {
f73dfc50
AKS
2373 qlcnic_schedule_work(adapter, qlcnic_attach_work, 0);
2374 return;
2375 }
af19b491
AKS
2376 }
2377
2378err_ret:
f73dfc50
AKS
2379 dev_err(&adapter->pdev->dev, "Fwinit work failed state=%u "
2380 "fw_wait_cnt=%u\n", dev_state, adapter->fw_wait_cnt);
34ce3626 2381 netif_device_attach(adapter->netdev);
af19b491
AKS
2382 qlcnic_clr_all_drv_state(adapter);
2383}
2384
2385static void
2386qlcnic_detach_work(struct work_struct *work)
2387{
2388 struct qlcnic_adapter *adapter = container_of(work,
2389 struct qlcnic_adapter, fw_work.work);
2390 struct net_device *netdev = adapter->netdev;
2391 u32 status;
2392
2393 netif_device_detach(netdev);
2394
2395 qlcnic_down(adapter, netdev);
2396
ce668443 2397 rtnl_lock();
af19b491 2398 qlcnic_detach(adapter);
ce668443 2399 rtnl_unlock();
af19b491
AKS
2400
2401 status = QLCRD32(adapter, QLCNIC_PEG_HALT_STATUS1);
2402
2403 if (status & QLCNIC_RCODE_FATAL_ERROR)
2404 goto err_ret;
2405
2406 if (adapter->temp == QLCNIC_TEMP_PANIC)
2407 goto err_ret;
2408
ade91f8e
AKS
2409 if (qlcnic_set_drv_state(adapter, adapter->dev_state))
2410 goto err_ret;
af19b491
AKS
2411
2412 adapter->fw_wait_cnt = 0;
2413
2414 qlcnic_schedule_work(adapter, qlcnic_fwinit_work, FW_POLL_DELAY);
2415
2416 return;
2417
2418err_ret:
65b5b420
AKS
2419 dev_err(&adapter->pdev->dev, "detach failed; status=%d temp=%d\n",
2420 status, adapter->temp);
34ce3626 2421 netif_device_attach(netdev);
af19b491
AKS
2422 qlcnic_clr_all_drv_state(adapter);
2423
2424}
2425
f73dfc50 2426/*Transit to RESET state from READY state only */
af19b491
AKS
2427static void
2428qlcnic_dev_request_reset(struct qlcnic_adapter *adapter)
2429{
2430 u32 state;
2431
2432 if (qlcnic_api_lock(adapter))
2433 return;
2434
2435 state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
2436
f73dfc50 2437 if (state == QLCNIC_DEV_READY) {
af19b491 2438 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_NEED_RESET);
65b5b420 2439 QLCDB(adapter, DRV, "NEED_RESET state set\n");
6df900e9 2440 qlcnic_idc_debug_info(adapter, 0);
af19b491
AKS
2441 }
2442
2443 qlcnic_api_unlock(adapter);
2444}
2445
9f26f547
AC
2446/* Transit to NPAR READY state from NPAR NOT READY state */
2447static void
2448qlcnic_dev_set_npar_ready(struct qlcnic_adapter *adapter)
2449{
2450 u32 state;
2451
2452 if (adapter->op_mode == QLCNIC_NON_PRIV_FUNC ||
2453 adapter->fw_hal_version == QLCNIC_FW_BASE)
2454 return;
2455
2456 if (qlcnic_api_lock(adapter))
2457 return;
2458
2459 state = QLCRD32(adapter, QLCNIC_CRB_DEV_NPAR_STATE);
2460
2461 if (state != QLCNIC_DEV_NPAR_RDY) {
2462 QLCWR32(adapter, QLCNIC_CRB_DEV_NPAR_STATE,
2463 QLCNIC_DEV_NPAR_RDY);
2464 QLCDB(adapter, DRV, "NPAR READY state set\n");
2465 }
2466
2467 qlcnic_api_unlock(adapter);
2468}
2469
af19b491
AKS
2470static void
2471qlcnic_schedule_work(struct qlcnic_adapter *adapter,
2472 work_func_t func, int delay)
2473{
2474 INIT_DELAYED_WORK(&adapter->fw_work, func);
2475 schedule_delayed_work(&adapter->fw_work, round_jiffies_relative(delay));
2476}
2477
2478static void
2479qlcnic_cancel_fw_work(struct qlcnic_adapter *adapter)
2480{
2481 while (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
2482 msleep(10);
2483
2484 cancel_delayed_work_sync(&adapter->fw_work);
2485}
2486
2487static void
2488qlcnic_attach_work(struct work_struct *work)
2489{
2490 struct qlcnic_adapter *adapter = container_of(work,
2491 struct qlcnic_adapter, fw_work.work);
2492 struct net_device *netdev = adapter->netdev;
2493 int err;
2494
2495 if (netif_running(netdev)) {
2496 err = qlcnic_attach(adapter);
2497 if (err)
2498 goto done;
2499
2500 err = qlcnic_up(adapter, netdev);
2501 if (err) {
2502 qlcnic_detach(adapter);
2503 goto done;
2504 }
2505
2506 qlcnic_config_indev_addr(netdev, NETDEV_UP);
2507 }
2508
af19b491 2509done:
34ce3626 2510 netif_device_attach(netdev);
af19b491
AKS
2511 adapter->fw_fail_cnt = 0;
2512 clear_bit(__QLCNIC_RESETTING, &adapter->state);
1b95a839
AKS
2513
2514 if (!qlcnic_clr_drv_state(adapter))
2515 qlcnic_schedule_work(adapter, qlcnic_fw_poll_work,
2516 FW_POLL_DELAY);
af19b491
AKS
2517}
2518
2519static int
2520qlcnic_check_health(struct qlcnic_adapter *adapter)
2521{
2522 u32 state = 0, heartbit;
2523 struct net_device *netdev = adapter->netdev;
2524
2525 if (qlcnic_check_temp(adapter))
2526 goto detach;
2527
2372a5f1 2528 if (adapter->need_fw_reset)
af19b491 2529 qlcnic_dev_request_reset(adapter);
af19b491
AKS
2530
2531 state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
2532 if (state == QLCNIC_DEV_NEED_RESET || state == QLCNIC_DEV_NEED_QUISCENT)
2533 adapter->need_fw_reset = 1;
2534
2535 heartbit = QLCRD32(adapter, QLCNIC_PEG_ALIVE_COUNTER);
2536 if (heartbit != adapter->heartbit) {
2537 adapter->heartbit = heartbit;
2538 adapter->fw_fail_cnt = 0;
2539 if (adapter->need_fw_reset)
2540 goto detach;
2541 return 0;
2542 }
2543
2544 if (++adapter->fw_fail_cnt < FW_FAIL_THRESH)
2545 return 0;
2546
2547 qlcnic_dev_request_reset(adapter);
2548
2549 clear_bit(__QLCNIC_FW_ATTACHED, &adapter->state);
2550
2551 dev_info(&netdev->dev, "firmware hang detected\n");
2552
2553detach:
2554 adapter->dev_state = (state == QLCNIC_DEV_NEED_QUISCENT) ? state :
2555 QLCNIC_DEV_NEED_RESET;
2556
2557 if ((auto_fw_reset == AUTO_FW_RESET_ENABLED) &&
65b5b420
AKS
2558 !test_and_set_bit(__QLCNIC_RESETTING, &adapter->state)) {
2559
af19b491 2560 qlcnic_schedule_work(adapter, qlcnic_detach_work, 0);
65b5b420
AKS
2561 QLCDB(adapter, DRV, "fw recovery scheduled.\n");
2562 }
af19b491
AKS
2563
2564 return 1;
2565}
2566
2567static void
2568qlcnic_fw_poll_work(struct work_struct *work)
2569{
2570 struct qlcnic_adapter *adapter = container_of(work,
2571 struct qlcnic_adapter, fw_work.work);
2572
2573 if (test_bit(__QLCNIC_RESETTING, &adapter->state))
2574 goto reschedule;
2575
2576
2577 if (qlcnic_check_health(adapter))
2578 return;
2579
2580reschedule:
2581 qlcnic_schedule_work(adapter, qlcnic_fw_poll_work, FW_POLL_DELAY);
2582}
2583
2584static ssize_t
2585qlcnic_store_bridged_mode(struct device *dev,
2586 struct device_attribute *attr, const char *buf, size_t len)
2587{
2588 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
2589 unsigned long new;
2590 int ret = -EINVAL;
2591
2592 if (!(adapter->capabilities & QLCNIC_FW_CAPABILITY_BDG))
2593 goto err_out;
2594
2595 if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
2596 goto err_out;
2597
2598 if (strict_strtoul(buf, 2, &new))
2599 goto err_out;
2600
2e9d722d 2601 if (!adapter->nic_ops->config_bridged_mode(adapter, !!new))
af19b491
AKS
2602 ret = len;
2603
2604err_out:
2605 return ret;
2606}
2607
2608static ssize_t
2609qlcnic_show_bridged_mode(struct device *dev,
2610 struct device_attribute *attr, char *buf)
2611{
2612 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
2613 int bridged_mode = 0;
2614
2615 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_BDG)
2616 bridged_mode = !!(adapter->flags & QLCNIC_BRIDGE_ENABLED);
2617
2618 return sprintf(buf, "%d\n", bridged_mode);
2619}
2620
2621static struct device_attribute dev_attr_bridged_mode = {
2622 .attr = {.name = "bridged_mode", .mode = (S_IRUGO | S_IWUSR)},
2623 .show = qlcnic_show_bridged_mode,
2624 .store = qlcnic_store_bridged_mode,
2625};
2626
2627static ssize_t
2628qlcnic_store_diag_mode(struct device *dev,
2629 struct device_attribute *attr, const char *buf, size_t len)
2630{
2631 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
2632 unsigned long new;
2633
2634 if (strict_strtoul(buf, 2, &new))
2635 return -EINVAL;
2636
2637 if (!!new != !!(adapter->flags & QLCNIC_DIAG_ENABLED))
2638 adapter->flags ^= QLCNIC_DIAG_ENABLED;
2639
2640 return len;
2641}
2642
2643static ssize_t
2644qlcnic_show_diag_mode(struct device *dev,
2645 struct device_attribute *attr, char *buf)
2646{
2647 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
2648
2649 return sprintf(buf, "%d\n",
2650 !!(adapter->flags & QLCNIC_DIAG_ENABLED));
2651}
2652
2653static struct device_attribute dev_attr_diag_mode = {
2654 .attr = {.name = "diag_mode", .mode = (S_IRUGO | S_IWUSR)},
2655 .show = qlcnic_show_diag_mode,
2656 .store = qlcnic_store_diag_mode,
2657};
2658
2659static int
2660qlcnic_sysfs_validate_crb(struct qlcnic_adapter *adapter,
2661 loff_t offset, size_t size)
2662{
897e8c7c
DP
2663 size_t crb_size = 4;
2664
af19b491
AKS
2665 if (!(adapter->flags & QLCNIC_DIAG_ENABLED))
2666 return -EIO;
2667
897e8c7c
DP
2668 if (offset < QLCNIC_PCI_CRBSPACE) {
2669 if (ADDR_IN_RANGE(offset, QLCNIC_PCI_CAMQM,
2670 QLCNIC_PCI_CAMQM_END))
2671 crb_size = 8;
2672 else
2673 return -EINVAL;
2674 }
af19b491 2675
897e8c7c
DP
2676 if ((size != crb_size) || (offset & (crb_size-1)))
2677 return -EINVAL;
af19b491
AKS
2678
2679 return 0;
2680}
2681
2682static ssize_t
2c3c8bea
CW
2683qlcnic_sysfs_read_crb(struct file *filp, struct kobject *kobj,
2684 struct bin_attribute *attr,
af19b491
AKS
2685 char *buf, loff_t offset, size_t size)
2686{
2687 struct device *dev = container_of(kobj, struct device, kobj);
2688 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
2689 u32 data;
897e8c7c 2690 u64 qmdata;
af19b491
AKS
2691 int ret;
2692
2693 ret = qlcnic_sysfs_validate_crb(adapter, offset, size);
2694 if (ret != 0)
2695 return ret;
2696
897e8c7c
DP
2697 if (ADDR_IN_RANGE(offset, QLCNIC_PCI_CAMQM, QLCNIC_PCI_CAMQM_END)) {
2698 qlcnic_pci_camqm_read_2M(adapter, offset, &qmdata);
2699 memcpy(buf, &qmdata, size);
2700 } else {
2701 data = QLCRD32(adapter, offset);
2702 memcpy(buf, &data, size);
2703 }
af19b491
AKS
2704 return size;
2705}
2706
2707static ssize_t
2c3c8bea
CW
2708qlcnic_sysfs_write_crb(struct file *filp, struct kobject *kobj,
2709 struct bin_attribute *attr,
af19b491
AKS
2710 char *buf, loff_t offset, size_t size)
2711{
2712 struct device *dev = container_of(kobj, struct device, kobj);
2713 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
2714 u32 data;
897e8c7c 2715 u64 qmdata;
af19b491
AKS
2716 int ret;
2717
2718 ret = qlcnic_sysfs_validate_crb(adapter, offset, size);
2719 if (ret != 0)
2720 return ret;
2721
897e8c7c
DP
2722 if (ADDR_IN_RANGE(offset, QLCNIC_PCI_CAMQM, QLCNIC_PCI_CAMQM_END)) {
2723 memcpy(&qmdata, buf, size);
2724 qlcnic_pci_camqm_write_2M(adapter, offset, qmdata);
2725 } else {
2726 memcpy(&data, buf, size);
2727 QLCWR32(adapter, offset, data);
2728 }
af19b491
AKS
2729 return size;
2730}
2731
2732static int
2733qlcnic_sysfs_validate_mem(struct qlcnic_adapter *adapter,
2734 loff_t offset, size_t size)
2735{
2736 if (!(adapter->flags & QLCNIC_DIAG_ENABLED))
2737 return -EIO;
2738
2739 if ((size != 8) || (offset & 0x7))
2740 return -EIO;
2741
2742 return 0;
2743}
2744
2745static ssize_t
2c3c8bea
CW
2746qlcnic_sysfs_read_mem(struct file *filp, struct kobject *kobj,
2747 struct bin_attribute *attr,
af19b491
AKS
2748 char *buf, loff_t offset, size_t size)
2749{
2750 struct device *dev = container_of(kobj, struct device, kobj);
2751 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
2752 u64 data;
2753 int ret;
2754
2755 ret = qlcnic_sysfs_validate_mem(adapter, offset, size);
2756 if (ret != 0)
2757 return ret;
2758
2759 if (qlcnic_pci_mem_read_2M(adapter, offset, &data))
2760 return -EIO;
2761
2762 memcpy(buf, &data, size);
2763
2764 return size;
2765}
2766
2767static ssize_t
2c3c8bea
CW
2768qlcnic_sysfs_write_mem(struct file *filp, struct kobject *kobj,
2769 struct bin_attribute *attr,
af19b491
AKS
2770 char *buf, loff_t offset, size_t size)
2771{
2772 struct device *dev = container_of(kobj, struct device, kobj);
2773 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
2774 u64 data;
2775 int ret;
2776
2777 ret = qlcnic_sysfs_validate_mem(adapter, offset, size);
2778 if (ret != 0)
2779 return ret;
2780
2781 memcpy(&data, buf, size);
2782
2783 if (qlcnic_pci_mem_write_2M(adapter, offset, data))
2784 return -EIO;
2785
2786 return size;
2787}
2788
2789
2790static struct bin_attribute bin_attr_crb = {
2791 .attr = {.name = "crb", .mode = (S_IRUGO | S_IWUSR)},
2792 .size = 0,
2793 .read = qlcnic_sysfs_read_crb,
2794 .write = qlcnic_sysfs_write_crb,
2795};
2796
2797static struct bin_attribute bin_attr_mem = {
2798 .attr = {.name = "mem", .mode = (S_IRUGO | S_IWUSR)},
2799 .size = 0,
2800 .read = qlcnic_sysfs_read_mem,
2801 .write = qlcnic_sysfs_write_mem,
2802};
2803
2804static void
2805qlcnic_create_sysfs_entries(struct qlcnic_adapter *adapter)
2806{
2807 struct device *dev = &adapter->pdev->dev;
2808
2809 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_BDG)
2810 if (device_create_file(dev, &dev_attr_bridged_mode))
2811 dev_warn(dev,
2812 "failed to create bridged_mode sysfs entry\n");
2813}
2814
2815static void
2816qlcnic_remove_sysfs_entries(struct qlcnic_adapter *adapter)
2817{
2818 struct device *dev = &adapter->pdev->dev;
2819
2820 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_BDG)
2821 device_remove_file(dev, &dev_attr_bridged_mode);
2822}
2823
2824static void
2825qlcnic_create_diag_entries(struct qlcnic_adapter *adapter)
2826{
2827 struct device *dev = &adapter->pdev->dev;
2828
2829 if (device_create_file(dev, &dev_attr_diag_mode))
2830 dev_info(dev, "failed to create diag_mode sysfs entry\n");
2831 if (device_create_bin_file(dev, &bin_attr_crb))
2832 dev_info(dev, "failed to create crb sysfs entry\n");
2833 if (device_create_bin_file(dev, &bin_attr_mem))
2834 dev_info(dev, "failed to create mem sysfs entry\n");
2835}
2836
2837
2838static void
2839qlcnic_remove_diag_entries(struct qlcnic_adapter *adapter)
2840{
2841 struct device *dev = &adapter->pdev->dev;
2842
2843 device_remove_file(dev, &dev_attr_diag_mode);
2844 device_remove_bin_file(dev, &bin_attr_crb);
2845 device_remove_bin_file(dev, &bin_attr_mem);
2846}
2847
2848#ifdef CONFIG_INET
2849
2850#define is_qlcnic_netdev(dev) (dev->netdev_ops == &qlcnic_netdev_ops)
2851
af19b491
AKS
2852static void
2853qlcnic_config_indev_addr(struct net_device *dev, unsigned long event)
2854{
2855 struct in_device *indev;
2856 struct qlcnic_adapter *adapter = netdev_priv(dev);
2857
af19b491
AKS
2858 indev = in_dev_get(dev);
2859 if (!indev)
2860 return;
2861
2862 for_ifa(indev) {
2863 switch (event) {
2864 case NETDEV_UP:
2865 qlcnic_config_ipaddr(adapter,
2866 ifa->ifa_address, QLCNIC_IP_UP);
2867 break;
2868 case NETDEV_DOWN:
2869 qlcnic_config_ipaddr(adapter,
2870 ifa->ifa_address, QLCNIC_IP_DOWN);
2871 break;
2872 default:
2873 break;
2874 }
2875 } endfor_ifa(indev);
2876
2877 in_dev_put(indev);
af19b491
AKS
2878}
2879
2880static int qlcnic_netdev_event(struct notifier_block *this,
2881 unsigned long event, void *ptr)
2882{
2883 struct qlcnic_adapter *adapter;
2884 struct net_device *dev = (struct net_device *)ptr;
2885
2886recheck:
2887 if (dev == NULL)
2888 goto done;
2889
2890 if (dev->priv_flags & IFF_802_1Q_VLAN) {
2891 dev = vlan_dev_real_dev(dev);
2892 goto recheck;
2893 }
2894
2895 if (!is_qlcnic_netdev(dev))
2896 goto done;
2897
2898 adapter = netdev_priv(dev);
2899
2900 if (!adapter)
2901 goto done;
2902
2903 if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
2904 goto done;
2905
2906 qlcnic_config_indev_addr(dev, event);
2907done:
2908 return NOTIFY_DONE;
2909}
2910
2911static int
2912qlcnic_inetaddr_event(struct notifier_block *this,
2913 unsigned long event, void *ptr)
2914{
2915 struct qlcnic_adapter *adapter;
2916 struct net_device *dev;
2917
2918 struct in_ifaddr *ifa = (struct in_ifaddr *)ptr;
2919
2920 dev = ifa->ifa_dev ? ifa->ifa_dev->dev : NULL;
2921
2922recheck:
2923 if (dev == NULL || !netif_running(dev))
2924 goto done;
2925
2926 if (dev->priv_flags & IFF_802_1Q_VLAN) {
2927 dev = vlan_dev_real_dev(dev);
2928 goto recheck;
2929 }
2930
2931 if (!is_qlcnic_netdev(dev))
2932 goto done;
2933
2934 adapter = netdev_priv(dev);
2935
251a84c9 2936 if (!adapter)
af19b491
AKS
2937 goto done;
2938
2939 if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
2940 goto done;
2941
2942 switch (event) {
2943 case NETDEV_UP:
2944 qlcnic_config_ipaddr(adapter, ifa->ifa_address, QLCNIC_IP_UP);
2945 break;
2946 case NETDEV_DOWN:
2947 qlcnic_config_ipaddr(adapter, ifa->ifa_address, QLCNIC_IP_DOWN);
2948 break;
2949 default:
2950 break;
2951 }
2952
2953done:
2954 return NOTIFY_DONE;
2955}
2956
9f26f547
AC
2957static int
2958qlcnicvf_start_firmware(struct qlcnic_adapter *adapter)
2959{
2960 int err;
2961
2962 err = qlcnic_can_start_firmware(adapter);
2963 if (err)
2964 return err;
2965
2966 qlcnic_check_options(adapter);
2967
2968 adapter->need_fw_reset = 0;
2969
2970 return err;
2971}
2972
2973static int
2974qlcnicvf_config_bridged_mode(struct qlcnic_adapter *adapter, u32 enable)
2975{
2976 return -EOPNOTSUPP;
2977}
2978
2979static int
2980qlcnicvf_config_led(struct qlcnic_adapter *adapter, u32 state, u32 rate)
2981{
2982 return -EOPNOTSUPP;
2983}
2984
2985static int
2986qlcnicvf_set_ilb_mode(struct qlcnic_adapter *adapter)
2987{
2988 return -EOPNOTSUPP;
2989}
2990
2991static void
2992qlcnicvf_clear_ilb_mode(struct qlcnic_adapter *adapter)
2993{
2994 return;
2995}
2996
2997
af19b491
AKS
2998static struct notifier_block qlcnic_netdev_cb = {
2999 .notifier_call = qlcnic_netdev_event,
3000};
3001
3002static struct notifier_block qlcnic_inetaddr_cb = {
3003 .notifier_call = qlcnic_inetaddr_event,
3004};
3005#else
3006static void
3007qlcnic_config_indev_addr(struct net_device *dev, unsigned long event)
3008{ }
3009#endif
3010
3011static struct pci_driver qlcnic_driver = {
3012 .name = qlcnic_driver_name,
3013 .id_table = qlcnic_pci_tbl,
3014 .probe = qlcnic_probe,
3015 .remove = __devexit_p(qlcnic_remove),
3016#ifdef CONFIG_PM
3017 .suspend = qlcnic_suspend,
3018 .resume = qlcnic_resume,
3019#endif
3020 .shutdown = qlcnic_shutdown
3021};
3022
3023static int __init qlcnic_init_module(void)
3024{
3025
3026 printk(KERN_INFO "%s\n", qlcnic_driver_string);
3027
3028#ifdef CONFIG_INET
3029 register_netdevice_notifier(&qlcnic_netdev_cb);
3030 register_inetaddr_notifier(&qlcnic_inetaddr_cb);
3031#endif
3032
3033
3034 return pci_register_driver(&qlcnic_driver);
3035}
3036
3037module_init(qlcnic_init_module);
3038
3039static void __exit qlcnic_exit_module(void)
3040{
3041
3042 pci_unregister_driver(&qlcnic_driver);
3043
3044#ifdef CONFIG_INET
3045 unregister_inetaddr_notifier(&qlcnic_inetaddr_cb);
3046 unregister_netdevice_notifier(&qlcnic_netdev_cb);
3047#endif
3048}
3049
3050module_exit(qlcnic_exit_module);