qlcnic: support port vlan id
[linux-2.6-block.git] / drivers / net / qlcnic / qlcnic_main.c
CommitLineData
af19b491
AKS
1/*
2 * Copyright (C) 2009 - QLogic Corporation.
3 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version 2
8 * of the License, or (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place - Suite 330, Boston,
18 * MA 02111-1307, USA.
19 *
20 * The full GNU General Public License is included in this distribution
21 * in the file called "COPYING".
22 *
23 */
24
5a0e3ad6 25#include <linux/slab.h>
af19b491
AKS
26#include <linux/vmalloc.h>
27#include <linux/interrupt.h>
28
29#include "qlcnic.h"
30
31#include <linux/dma-mapping.h>
32#include <linux/if_vlan.h>
33#include <net/ip.h>
34#include <linux/ipv6.h>
35#include <linux/inetdevice.h>
36#include <linux/sysfs.h>
451724c8 37#include <linux/aer.h>
af19b491 38
7f9a0c34 39MODULE_DESCRIPTION("QLogic 1/10 GbE Converged/Intelligent Ethernet Driver");
af19b491
AKS
40MODULE_LICENSE("GPL");
41MODULE_VERSION(QLCNIC_LINUX_VERSIONID);
42MODULE_FIRMWARE(QLCNIC_UNIFIED_ROMIMAGE_NAME);
43
44char qlcnic_driver_name[] = "qlcnic";
7f9a0c34
SV
45static const char qlcnic_driver_string[] = "QLogic 1/10 GbE "
46 "Converged/Intelligent Ethernet Driver v" QLCNIC_LINUX_VERSIONID;
af19b491
AKS
47
48static int port_mode = QLCNIC_PORT_MODE_AUTO_NEG;
49
50/* Default to restricted 1G auto-neg mode */
51static int wol_port_mode = 5;
52
53static int use_msi = 1;
54module_param(use_msi, int, 0644);
55MODULE_PARM_DESC(use_msi, "MSI interrupt (0=disabled, 1=enabled");
56
57static int use_msi_x = 1;
58module_param(use_msi_x, int, 0644);
59MODULE_PARM_DESC(use_msi_x, "MSI-X interrupt (0=disabled, 1=enabled");
60
61static int auto_fw_reset = AUTO_FW_RESET_ENABLED;
62module_param(auto_fw_reset, int, 0644);
63MODULE_PARM_DESC(auto_fw_reset, "Auto firmware reset (0=disabled, 1=enabled");
64
4d5bdb38
AKS
65static int load_fw_file;
66module_param(load_fw_file, int, 0644);
67MODULE_PARM_DESC(load_fw_file, "Load firmware from (0=flash, 1=file");
68
2e9d722d
AC
69static int qlcnic_config_npars;
70module_param(qlcnic_config_npars, int, 0644);
71MODULE_PARM_DESC(qlcnic_config_npars, "Configure NPARs (0=disabled, 1=enabled");
72
af19b491
AKS
73static int __devinit qlcnic_probe(struct pci_dev *pdev,
74 const struct pci_device_id *ent);
75static void __devexit qlcnic_remove(struct pci_dev *pdev);
76static int qlcnic_open(struct net_device *netdev);
77static int qlcnic_close(struct net_device *netdev);
af19b491 78static void qlcnic_tx_timeout(struct net_device *netdev);
af19b491
AKS
79static void qlcnic_attach_work(struct work_struct *work);
80static void qlcnic_fwinit_work(struct work_struct *work);
81static void qlcnic_fw_poll_work(struct work_struct *work);
82static void qlcnic_schedule_work(struct qlcnic_adapter *adapter,
83 work_func_t func, int delay);
84static void qlcnic_cancel_fw_work(struct qlcnic_adapter *adapter);
85static int qlcnic_poll(struct napi_struct *napi, int budget);
8f891387 86static int qlcnic_rx_poll(struct napi_struct *napi, int budget);
af19b491
AKS
87#ifdef CONFIG_NET_POLL_CONTROLLER
88static void qlcnic_poll_controller(struct net_device *netdev);
89#endif
90
91static void qlcnic_create_sysfs_entries(struct qlcnic_adapter *adapter);
92static void qlcnic_remove_sysfs_entries(struct qlcnic_adapter *adapter);
93static void qlcnic_create_diag_entries(struct qlcnic_adapter *adapter);
94static void qlcnic_remove_diag_entries(struct qlcnic_adapter *adapter);
95
6df900e9 96static void qlcnic_idc_debug_info(struct qlcnic_adapter *adapter, u8 encoding);
21854f02 97static void qlcnic_clr_all_drv_state(struct qlcnic_adapter *adapter, u8);
af19b491
AKS
98static int qlcnic_can_start_firmware(struct qlcnic_adapter *adapter);
99
7eb9855d 100static irqreturn_t qlcnic_tmp_intr(int irq, void *data);
af19b491
AKS
101static irqreturn_t qlcnic_intr(int irq, void *data);
102static irqreturn_t qlcnic_msi_intr(int irq, void *data);
103static irqreturn_t qlcnic_msix_intr(int irq, void *data);
104
105static struct net_device_stats *qlcnic_get_stats(struct net_device *netdev);
106static void qlcnic_config_indev_addr(struct net_device *dev, unsigned long);
9f26f547
AC
107static int qlcnic_start_firmware(struct qlcnic_adapter *);
108
109static void qlcnic_dev_set_npar_ready(struct qlcnic_adapter *);
9f26f547
AC
110static int qlcnicvf_config_led(struct qlcnic_adapter *, u32, u32);
111static int qlcnicvf_config_bridged_mode(struct qlcnic_adapter *, u32);
112static int qlcnicvf_start_firmware(struct qlcnic_adapter *);
0325d69b
RB
113static void qlcnic_set_netdev_features(struct qlcnic_adapter *,
114 struct qlcnic_esw_func_cfg *);
af19b491
AKS
115/* PCI Device ID Table */
116#define ENTRY(device) \
117 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, (device)), \
118 .class = PCI_CLASS_NETWORK_ETHERNET << 8, .class_mask = ~0}
119
120#define PCI_DEVICE_ID_QLOGIC_QLE824X 0x8020
121
6a902881 122static DEFINE_PCI_DEVICE_TABLE(qlcnic_pci_tbl) = {
af19b491
AKS
123 ENTRY(PCI_DEVICE_ID_QLOGIC_QLE824X),
124 {0,}
125};
126
127MODULE_DEVICE_TABLE(pci, qlcnic_pci_tbl);
128
129
130void
131qlcnic_update_cmd_producer(struct qlcnic_adapter *adapter,
132 struct qlcnic_host_tx_ring *tx_ring)
133{
134 writel(tx_ring->producer, tx_ring->crb_cmd_producer);
af19b491
AKS
135}
136
137static const u32 msi_tgt_status[8] = {
138 ISR_INT_TARGET_STATUS, ISR_INT_TARGET_STATUS_F1,
139 ISR_INT_TARGET_STATUS_F2, ISR_INT_TARGET_STATUS_F3,
140 ISR_INT_TARGET_STATUS_F4, ISR_INT_TARGET_STATUS_F5,
141 ISR_INT_TARGET_STATUS_F6, ISR_INT_TARGET_STATUS_F7
142};
143
144static const
145struct qlcnic_legacy_intr_set legacy_intr[] = QLCNIC_LEGACY_INTR_CONFIG;
146
147static inline void qlcnic_disable_int(struct qlcnic_host_sds_ring *sds_ring)
148{
149 writel(0, sds_ring->crb_intr_mask);
150}
151
152static inline void qlcnic_enable_int(struct qlcnic_host_sds_ring *sds_ring)
153{
154 struct qlcnic_adapter *adapter = sds_ring->adapter;
155
156 writel(0x1, sds_ring->crb_intr_mask);
157
158 if (!QLCNIC_IS_MSI_FAMILY(adapter))
159 writel(0xfbff, adapter->tgt_mask_reg);
160}
161
162static int
163qlcnic_alloc_sds_rings(struct qlcnic_recv_context *recv_ctx, int count)
164{
165 int size = sizeof(struct qlcnic_host_sds_ring) * count;
166
167 recv_ctx->sds_rings = kzalloc(size, GFP_KERNEL);
168
169 return (recv_ctx->sds_rings == NULL);
170}
171
172static void
173qlcnic_free_sds_rings(struct qlcnic_recv_context *recv_ctx)
174{
175 if (recv_ctx->sds_rings != NULL)
176 kfree(recv_ctx->sds_rings);
177
178 recv_ctx->sds_rings = NULL;
179}
180
181static int
182qlcnic_napi_add(struct qlcnic_adapter *adapter, struct net_device *netdev)
183{
184 int ring;
185 struct qlcnic_host_sds_ring *sds_ring;
186 struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
187
188 if (qlcnic_alloc_sds_rings(recv_ctx, adapter->max_sds_rings))
189 return -ENOMEM;
190
191 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
192 sds_ring = &recv_ctx->sds_rings[ring];
8f891387 193
194 if (ring == adapter->max_sds_rings - 1)
195 netif_napi_add(netdev, &sds_ring->napi, qlcnic_poll,
196 QLCNIC_NETDEV_WEIGHT/adapter->max_sds_rings);
197 else
198 netif_napi_add(netdev, &sds_ring->napi,
199 qlcnic_rx_poll, QLCNIC_NETDEV_WEIGHT*2);
af19b491
AKS
200 }
201
202 return 0;
203}
204
205static void
206qlcnic_napi_del(struct qlcnic_adapter *adapter)
207{
208 int ring;
209 struct qlcnic_host_sds_ring *sds_ring;
210 struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
211
212 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
213 sds_ring = &recv_ctx->sds_rings[ring];
214 netif_napi_del(&sds_ring->napi);
215 }
216
217 qlcnic_free_sds_rings(&adapter->recv_ctx);
218}
219
220static void
221qlcnic_napi_enable(struct qlcnic_adapter *adapter)
222{
223 int ring;
224 struct qlcnic_host_sds_ring *sds_ring;
225 struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
226
780ab790
AKS
227 if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
228 return;
229
af19b491
AKS
230 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
231 sds_ring = &recv_ctx->sds_rings[ring];
232 napi_enable(&sds_ring->napi);
233 qlcnic_enable_int(sds_ring);
234 }
235}
236
237static void
238qlcnic_napi_disable(struct qlcnic_adapter *adapter)
239{
240 int ring;
241 struct qlcnic_host_sds_ring *sds_ring;
242 struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
243
780ab790
AKS
244 if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
245 return;
246
af19b491
AKS
247 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
248 sds_ring = &recv_ctx->sds_rings[ring];
249 qlcnic_disable_int(sds_ring);
250 napi_synchronize(&sds_ring->napi);
251 napi_disable(&sds_ring->napi);
252 }
253}
254
255static void qlcnic_clear_stats(struct qlcnic_adapter *adapter)
256{
257 memset(&adapter->stats, 0, sizeof(adapter->stats));
af19b491
AKS
258}
259
af19b491
AKS
260static void qlcnic_set_port_mode(struct qlcnic_adapter *adapter)
261{
262 u32 val, data;
263
264 val = adapter->ahw.board_type;
265 if ((val == QLCNIC_BRDTYPE_P3_HMEZ) ||
266 (val == QLCNIC_BRDTYPE_P3_XG_LOM)) {
267 if (port_mode == QLCNIC_PORT_MODE_802_3_AP) {
268 data = QLCNIC_PORT_MODE_802_3_AP;
269 QLCWR32(adapter, QLCNIC_PORT_MODE_ADDR, data);
270 } else if (port_mode == QLCNIC_PORT_MODE_XG) {
271 data = QLCNIC_PORT_MODE_XG;
272 QLCWR32(adapter, QLCNIC_PORT_MODE_ADDR, data);
273 } else if (port_mode == QLCNIC_PORT_MODE_AUTO_NEG_1G) {
274 data = QLCNIC_PORT_MODE_AUTO_NEG_1G;
275 QLCWR32(adapter, QLCNIC_PORT_MODE_ADDR, data);
276 } else if (port_mode == QLCNIC_PORT_MODE_AUTO_NEG_XG) {
277 data = QLCNIC_PORT_MODE_AUTO_NEG_XG;
278 QLCWR32(adapter, QLCNIC_PORT_MODE_ADDR, data);
279 } else {
280 data = QLCNIC_PORT_MODE_AUTO_NEG;
281 QLCWR32(adapter, QLCNIC_PORT_MODE_ADDR, data);
282 }
283
284 if ((wol_port_mode != QLCNIC_PORT_MODE_802_3_AP) &&
285 (wol_port_mode != QLCNIC_PORT_MODE_XG) &&
286 (wol_port_mode != QLCNIC_PORT_MODE_AUTO_NEG_1G) &&
287 (wol_port_mode != QLCNIC_PORT_MODE_AUTO_NEG_XG)) {
288 wol_port_mode = QLCNIC_PORT_MODE_AUTO_NEG;
289 }
290 QLCWR32(adapter, QLCNIC_WOL_PORT_MODE, wol_port_mode);
291 }
292}
293
294static void qlcnic_set_msix_bit(struct pci_dev *pdev, int enable)
295{
296 u32 control;
297 int pos;
298
299 pos = pci_find_capability(pdev, PCI_CAP_ID_MSIX);
300 if (pos) {
301 pci_read_config_dword(pdev, pos, &control);
302 if (enable)
303 control |= PCI_MSIX_FLAGS_ENABLE;
304 else
305 control = 0;
306 pci_write_config_dword(pdev, pos, control);
307 }
308}
309
310static void qlcnic_init_msix_entries(struct qlcnic_adapter *adapter, int count)
311{
312 int i;
313
314 for (i = 0; i < count; i++)
315 adapter->msix_entries[i].entry = i;
316}
317
318static int
319qlcnic_read_mac_addr(struct qlcnic_adapter *adapter)
320{
2e9d722d 321 u8 mac_addr[ETH_ALEN];
af19b491
AKS
322 struct net_device *netdev = adapter->netdev;
323 struct pci_dev *pdev = adapter->pdev;
324
2e9d722d 325 if (adapter->nic_ops->get_mac_addr(adapter, mac_addr) != 0)
af19b491
AKS
326 return -EIO;
327
2e9d722d 328 memcpy(netdev->dev_addr, mac_addr, ETH_ALEN);
af19b491
AKS
329 memcpy(netdev->perm_addr, netdev->dev_addr, netdev->addr_len);
330 memcpy(adapter->mac_addr, netdev->dev_addr, netdev->addr_len);
331
332 /* set station address */
333
334 if (!is_valid_ether_addr(netdev->perm_addr))
335 dev_warn(&pdev->dev, "Bad MAC address %pM.\n",
336 netdev->dev_addr);
337
338 return 0;
339}
340
341static int qlcnic_set_mac(struct net_device *netdev, void *p)
342{
343 struct qlcnic_adapter *adapter = netdev_priv(netdev);
344 struct sockaddr *addr = p;
345
346 if (!is_valid_ether_addr(addr->sa_data))
347 return -EINVAL;
348
8a15ad1f 349 if (test_bit(__QLCNIC_DEV_UP, &adapter->state)) {
af19b491
AKS
350 netif_device_detach(netdev);
351 qlcnic_napi_disable(adapter);
352 }
353
354 memcpy(adapter->mac_addr, addr->sa_data, netdev->addr_len);
355 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
356 qlcnic_set_multi(adapter->netdev);
357
8a15ad1f 358 if (test_bit(__QLCNIC_DEV_UP, &adapter->state)) {
af19b491
AKS
359 netif_device_attach(netdev);
360 qlcnic_napi_enable(adapter);
361 }
362 return 0;
363}
364
365static const struct net_device_ops qlcnic_netdev_ops = {
366 .ndo_open = qlcnic_open,
367 .ndo_stop = qlcnic_close,
368 .ndo_start_xmit = qlcnic_xmit_frame,
369 .ndo_get_stats = qlcnic_get_stats,
370 .ndo_validate_addr = eth_validate_addr,
371 .ndo_set_multicast_list = qlcnic_set_multi,
372 .ndo_set_mac_address = qlcnic_set_mac,
373 .ndo_change_mtu = qlcnic_change_mtu,
374 .ndo_tx_timeout = qlcnic_tx_timeout,
375#ifdef CONFIG_NET_POLL_CONTROLLER
376 .ndo_poll_controller = qlcnic_poll_controller,
377#endif
378};
379
2e9d722d 380static struct qlcnic_nic_template qlcnic_ops = {
2e9d722d
AC
381 .get_mac_addr = qlcnic_get_mac_address,
382 .config_bridged_mode = qlcnic_config_bridged_mode,
383 .config_led = qlcnic_config_led,
9f26f547
AC
384 .start_firmware = qlcnic_start_firmware
385};
386
387static struct qlcnic_nic_template qlcnic_vf_ops = {
388 .get_mac_addr = qlcnic_get_mac_address,
389 .config_bridged_mode = qlcnicvf_config_bridged_mode,
390 .config_led = qlcnicvf_config_led,
9f26f547 391 .start_firmware = qlcnicvf_start_firmware
2e9d722d
AC
392};
393
af19b491
AKS
394static void
395qlcnic_setup_intr(struct qlcnic_adapter *adapter)
396{
397 const struct qlcnic_legacy_intr_set *legacy_intrp;
398 struct pci_dev *pdev = adapter->pdev;
399 int err, num_msix;
400
401 if (adapter->rss_supported) {
402 num_msix = (num_online_cpus() >= MSIX_ENTRIES_PER_ADAPTER) ?
403 MSIX_ENTRIES_PER_ADAPTER : 2;
404 } else
405 num_msix = 1;
406
407 adapter->max_sds_rings = 1;
408
409 adapter->flags &= ~(QLCNIC_MSI_ENABLED | QLCNIC_MSIX_ENABLED);
410
411 legacy_intrp = &legacy_intr[adapter->ahw.pci_func];
412
413 adapter->int_vec_bit = legacy_intrp->int_vec_bit;
414 adapter->tgt_status_reg = qlcnic_get_ioaddr(adapter,
415 legacy_intrp->tgt_status_reg);
416 adapter->tgt_mask_reg = qlcnic_get_ioaddr(adapter,
417 legacy_intrp->tgt_mask_reg);
418 adapter->isr_int_vec = qlcnic_get_ioaddr(adapter, ISR_INT_VECTOR);
419
420 adapter->crb_int_state_reg = qlcnic_get_ioaddr(adapter,
421 ISR_INT_STATE_REG);
422
423 qlcnic_set_msix_bit(pdev, 0);
424
425 if (adapter->msix_supported) {
426
427 qlcnic_init_msix_entries(adapter, num_msix);
428 err = pci_enable_msix(pdev, adapter->msix_entries, num_msix);
429 if (err == 0) {
430 adapter->flags |= QLCNIC_MSIX_ENABLED;
431 qlcnic_set_msix_bit(pdev, 1);
432
433 if (adapter->rss_supported)
434 adapter->max_sds_rings = num_msix;
435
436 dev_info(&pdev->dev, "using msi-x interrupts\n");
437 return;
438 }
439
440 if (err > 0)
441 pci_disable_msix(pdev);
442
443 /* fall through for msi */
444 }
445
446 if (use_msi && !pci_enable_msi(pdev)) {
447 adapter->flags |= QLCNIC_MSI_ENABLED;
448 adapter->tgt_status_reg = qlcnic_get_ioaddr(adapter,
449 msi_tgt_status[adapter->ahw.pci_func]);
450 dev_info(&pdev->dev, "using msi interrupts\n");
451 adapter->msix_entries[0].vector = pdev->irq;
452 return;
453 }
454
455 dev_info(&pdev->dev, "using legacy interrupts\n");
456 adapter->msix_entries[0].vector = pdev->irq;
457}
458
459static void
460qlcnic_teardown_intr(struct qlcnic_adapter *adapter)
461{
462 if (adapter->flags & QLCNIC_MSIX_ENABLED)
463 pci_disable_msix(adapter->pdev);
464 if (adapter->flags & QLCNIC_MSI_ENABLED)
465 pci_disable_msi(adapter->pdev);
466}
467
468static void
469qlcnic_cleanup_pci_map(struct qlcnic_adapter *adapter)
470{
471 if (adapter->ahw.pci_base0 != NULL)
472 iounmap(adapter->ahw.pci_base0);
473}
474
346fe763
RB
475static int
476qlcnic_init_pci_info(struct qlcnic_adapter *adapter)
477{
e88db3bd 478 struct qlcnic_pci_info *pci_info;
900853a4 479 int i, ret = 0;
346fe763
RB
480 u8 pfn;
481
e88db3bd
DC
482 pci_info = kcalloc(QLCNIC_MAX_PCI_FUNC, sizeof(*pci_info), GFP_KERNEL);
483 if (!pci_info)
484 return -ENOMEM;
485
ca315ac2 486 adapter->npars = kzalloc(sizeof(struct qlcnic_npar_info) *
346fe763 487 QLCNIC_MAX_PCI_FUNC, GFP_KERNEL);
e88db3bd 488 if (!adapter->npars) {
900853a4 489 ret = -ENOMEM;
e88db3bd
DC
490 goto err_pci_info;
491 }
346fe763 492
ca315ac2 493 adapter->eswitch = kzalloc(sizeof(struct qlcnic_eswitch) *
346fe763
RB
494 QLCNIC_NIU_MAX_XG_PORTS, GFP_KERNEL);
495 if (!adapter->eswitch) {
900853a4 496 ret = -ENOMEM;
ca315ac2 497 goto err_npars;
346fe763
RB
498 }
499
500 ret = qlcnic_get_pci_info(adapter, pci_info);
ca315ac2
DC
501 if (ret)
502 goto err_eswitch;
346fe763 503
ca315ac2
DC
504 for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
505 pfn = pci_info[i].id;
506 if (pfn > QLCNIC_MAX_PCI_FUNC)
507 return QL_STATUS_INVALID_PARAM;
508 adapter->npars[pfn].active = pci_info[i].active;
509 adapter->npars[pfn].type = pci_info[i].type;
510 adapter->npars[pfn].phy_port = pci_info[i].default_port;
ca315ac2
DC
511 adapter->npars[pfn].min_bw = pci_info[i].tx_min_bw;
512 adapter->npars[pfn].max_bw = pci_info[i].tx_max_bw;
346fe763
RB
513 }
514
ca315ac2
DC
515 for (i = 0; i < QLCNIC_NIU_MAX_XG_PORTS; i++)
516 adapter->eswitch[i].flags |= QLCNIC_SWITCH_ENABLE;
517
e88db3bd 518 kfree(pci_info);
ca315ac2
DC
519 return 0;
520
521err_eswitch:
346fe763
RB
522 kfree(adapter->eswitch);
523 adapter->eswitch = NULL;
ca315ac2 524err_npars:
346fe763 525 kfree(adapter->npars);
ca315ac2 526 adapter->npars = NULL;
e88db3bd
DC
527err_pci_info:
528 kfree(pci_info);
346fe763
RB
529
530 return ret;
531}
532
2e9d722d
AC
533static int
534qlcnic_set_function_modes(struct qlcnic_adapter *adapter)
535{
536 u8 id;
537 u32 ref_count;
538 int i, ret = 1;
539 u32 data = QLCNIC_MGMT_FUNC;
540 void __iomem *priv_op = adapter->ahw.pci_base0 + QLCNIC_DRV_OP_MODE;
541
542 /* If other drivers are not in use set their privilege level */
543 ref_count = QLCRD32(adapter, QLCNIC_CRB_DEV_REF_COUNT);
544 ret = qlcnic_api_lock(adapter);
545 if (ret)
546 goto err_lock;
2e9d722d 547
0e33c664
AC
548 if (qlcnic_config_npars) {
549 for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
346fe763 550 id = i;
0e33c664
AC
551 if (adapter->npars[i].type != QLCNIC_TYPE_NIC ||
552 id == adapter->ahw.pci_func)
553 continue;
554 data |= (qlcnic_config_npars &
555 QLC_DEV_SET_DRV(0xf, id));
556 }
557 } else {
558 data = readl(priv_op);
559 data = (data & ~QLC_DEV_SET_DRV(0xf, adapter->ahw.pci_func)) |
560 (QLC_DEV_SET_DRV(QLCNIC_MGMT_FUNC,
561 adapter->ahw.pci_func));
2e9d722d
AC
562 }
563 writel(data, priv_op);
2e9d722d
AC
564 qlcnic_api_unlock(adapter);
565err_lock:
566 return ret;
567}
568
2e9d722d
AC
569static u32
570qlcnic_get_driver_mode(struct qlcnic_adapter *adapter)
571{
572 void __iomem *msix_base_addr;
573 void __iomem *priv_op;
346fe763 574 struct qlcnic_info nic_info;
2e9d722d
AC
575 u32 func;
576 u32 msix_base;
577 u32 op_mode, priv_level;
578
579 /* Determine FW API version */
580 adapter->fw_hal_version = readl(adapter->ahw.pci_base0 + QLCNIC_FW_API);
2e9d722d
AC
581
582 /* Find PCI function number */
583 pci_read_config_dword(adapter->pdev, QLCNIC_MSIX_TABLE_OFFSET, &func);
584 msix_base_addr = adapter->ahw.pci_base0 + QLCNIC_MSIX_BASE;
585 msix_base = readl(msix_base_addr);
586 func = (func - msix_base)/QLCNIC_MSIX_TBL_PGSIZE;
587 adapter->ahw.pci_func = func;
588
346fe763
RB
589 if (!qlcnic_get_nic_info(adapter, &nic_info, adapter->ahw.pci_func)) {
590 adapter->capabilities = nic_info.capabilities;
591
592 if (adapter->capabilities & BIT_6)
593 adapter->flags |= QLCNIC_ESWITCH_ENABLED;
594 else
595 adapter->flags &= ~QLCNIC_ESWITCH_ENABLED;
596 }
0e33c664
AC
597
598 if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED)) {
599 adapter->nic_ops = &qlcnic_ops;
600 return adapter->fw_hal_version;
601 }
602
2e9d722d
AC
603 /* Determine function privilege level */
604 priv_op = adapter->ahw.pci_base0 + QLCNIC_DRV_OP_MODE;
605 op_mode = readl(priv_op);
0e33c664 606 if (op_mode == QLC_DEV_DRV_DEFAULT)
2e9d722d 607 priv_level = QLCNIC_MGMT_FUNC;
0e33c664 608 else
2e9d722d
AC
609 priv_level = QLC_DEV_GET_DRV(op_mode, adapter->ahw.pci_func);
610
611 switch (priv_level) {
612 case QLCNIC_MGMT_FUNC:
613 adapter->op_mode = QLCNIC_MGMT_FUNC;
45918e2f 614 adapter->nic_ops = &qlcnic_ops;
346fe763 615 qlcnic_init_pci_info(adapter);
2e9d722d 616 /* Set privilege level for other functions */
0e33c664 617 qlcnic_set_function_modes(adapter);
2e9d722d
AC
618 dev_info(&adapter->pdev->dev,
619 "HAL Version: %d, Management function\n",
620 adapter->fw_hal_version);
621 break;
622 case QLCNIC_PRIV_FUNC:
623 adapter->op_mode = QLCNIC_PRIV_FUNC;
624 dev_info(&adapter->pdev->dev,
625 "HAL Version: %d, Privileged function\n",
626 adapter->fw_hal_version);
45918e2f 627 adapter->nic_ops = &qlcnic_ops;
2e9d722d 628 break;
9f26f547
AC
629 case QLCNIC_NON_PRIV_FUNC:
630 adapter->op_mode = QLCNIC_NON_PRIV_FUNC;
631 dev_info(&adapter->pdev->dev,
632 "HAL Version: %d Non Privileged function\n",
633 adapter->fw_hal_version);
634 adapter->nic_ops = &qlcnic_vf_ops;
635 break;
2e9d722d
AC
636 default:
637 dev_info(&adapter->pdev->dev, "Unknown function mode: %d\n",
638 priv_level);
639 return 0;
640 }
641 return adapter->fw_hal_version;
642}
643
af19b491
AKS
644static int
645qlcnic_setup_pci_map(struct qlcnic_adapter *adapter)
646{
647 void __iomem *mem_ptr0 = NULL;
648 resource_size_t mem_base;
649 unsigned long mem_len, pci_len0 = 0;
650
651 struct pci_dev *pdev = adapter->pdev;
af19b491 652
af19b491
AKS
653 /* remap phys address */
654 mem_base = pci_resource_start(pdev, 0); /* 0 is for BAR 0 */
655 mem_len = pci_resource_len(pdev, 0);
656
657 if (mem_len == QLCNIC_PCI_2MB_SIZE) {
658
659 mem_ptr0 = pci_ioremap_bar(pdev, 0);
660 if (mem_ptr0 == NULL) {
661 dev_err(&pdev->dev, "failed to map PCI bar 0\n");
662 return -EIO;
663 }
664 pci_len0 = mem_len;
665 } else {
666 return -EIO;
667 }
668
669 dev_info(&pdev->dev, "%dMB memory map\n", (int)(mem_len>>20));
670
671 adapter->ahw.pci_base0 = mem_ptr0;
672 adapter->ahw.pci_len0 = pci_len0;
673
2e9d722d
AC
674 if (!qlcnic_get_driver_mode(adapter)) {
675 iounmap(adapter->ahw.pci_base0);
676 return -EIO;
677 }
678
af19b491 679 adapter->ahw.ocm_win_crb = qlcnic_get_ioaddr(adapter,
2e9d722d 680 QLCNIC_PCIX_PS_REG(PCIX_OCM_WINDOW_REG(adapter->ahw.pci_func)));
af19b491
AKS
681
682 return 0;
683}
684
685static void get_brd_name(struct qlcnic_adapter *adapter, char *name)
686{
687 struct pci_dev *pdev = adapter->pdev;
688 int i, found = 0;
689
690 for (i = 0; i < NUM_SUPPORTED_BOARDS; ++i) {
691 if (qlcnic_boards[i].vendor == pdev->vendor &&
692 qlcnic_boards[i].device == pdev->device &&
693 qlcnic_boards[i].sub_vendor == pdev->subsystem_vendor &&
694 qlcnic_boards[i].sub_device == pdev->subsystem_device) {
02f6e46f
SC
695 sprintf(name, "%pM: %s" ,
696 adapter->mac_addr,
697 qlcnic_boards[i].short_name);
af19b491
AKS
698 found = 1;
699 break;
700 }
701
702 }
703
704 if (!found)
7f9a0c34 705 sprintf(name, "%pM Gigabit Ethernet", adapter->mac_addr);
af19b491
AKS
706}
707
708static void
709qlcnic_check_options(struct qlcnic_adapter *adapter)
710{
711 u32 fw_major, fw_minor, fw_build;
712 char brd_name[QLCNIC_MAX_BOARD_NAME_LEN];
af19b491 713 struct pci_dev *pdev = adapter->pdev;
346fe763 714 struct qlcnic_info nic_info;
af19b491
AKS
715
716 fw_major = QLCRD32(adapter, QLCNIC_FW_VERSION_MAJOR);
717 fw_minor = QLCRD32(adapter, QLCNIC_FW_VERSION_MINOR);
718 fw_build = QLCRD32(adapter, QLCNIC_FW_VERSION_SUB);
719
720 adapter->fw_version = QLCNIC_VERSION_CODE(fw_major, fw_minor, fw_build);
721
722 if (adapter->portnum == 0) {
723 get_brd_name(adapter, brd_name);
724
725 pr_info("%s: %s Board Chip rev 0x%x\n",
726 module_name(THIS_MODULE),
727 brd_name, adapter->ahw.revision_id);
728 }
729
251a84c9
AKS
730 dev_info(&pdev->dev, "firmware v%d.%d.%d\n",
731 fw_major, fw_minor, fw_build);
af19b491 732
af19b491
AKS
733 adapter->flags &= ~QLCNIC_LRO_ENABLED;
734
735 if (adapter->ahw.port_type == QLCNIC_XGBE) {
736 adapter->num_rxd = DEFAULT_RCV_DESCRIPTORS_10G;
737 adapter->num_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_10G;
738 } else if (adapter->ahw.port_type == QLCNIC_GBE) {
739 adapter->num_rxd = DEFAULT_RCV_DESCRIPTORS_1G;
740 adapter->num_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_1G;
741 }
742
346fe763
RB
743 if (!qlcnic_get_nic_info(adapter, &nic_info, adapter->ahw.pci_func)) {
744 adapter->physical_port = nic_info.phys_port;
745 adapter->switch_mode = nic_info.switch_mode;
746 adapter->max_tx_ques = nic_info.max_tx_ques;
747 adapter->max_rx_ques = nic_info.max_rx_ques;
748 adapter->capabilities = nic_info.capabilities;
749 adapter->max_mac_filters = nic_info.max_mac_filters;
750 adapter->max_mtu = nic_info.max_mtu;
751 }
0e33c664 752
af19b491
AKS
753 adapter->msix_supported = !!use_msi_x;
754 adapter->rss_supported = !!use_msi_x;
755
756 adapter->num_txd = MAX_CMD_DESCRIPTORS;
757
251b036a 758 adapter->max_rds_rings = MAX_RDS_RINGS;
af19b491
AKS
759}
760
8cf61f89
AKS
761static void
762qlcnic_set_vlan_config(struct qlcnic_adapter *adapter,
763 struct qlcnic_esw_func_cfg *esw_cfg)
764{
765 if (esw_cfg->discard_tagged)
766 adapter->flags &= ~QLCNIC_TAGGING_ENABLED;
767 else
768 adapter->flags |= QLCNIC_TAGGING_ENABLED;
769
770 if (esw_cfg->vlan_id)
771 adapter->pvid = esw_cfg->vlan_id;
772 else
773 adapter->pvid = 0;
774}
775
0325d69b
RB
776static void
777qlcnic_set_eswitch_port_features(struct qlcnic_adapter *adapter,
778 struct qlcnic_esw_func_cfg *esw_cfg)
779{
fe4d434d
SC
780 adapter->flags &= ~QLCNIC_MACSPOOF;
781 if (adapter->op_mode == QLCNIC_NON_PRIV_FUNC)
782 if (esw_cfg->mac_anti_spoof)
783 adapter->flags |= QLCNIC_MACSPOOF;
784
0325d69b
RB
785 qlcnic_set_netdev_features(adapter, esw_cfg);
786}
787
788static int
789qlcnic_set_eswitch_port_config(struct qlcnic_adapter *adapter)
790{
791 struct qlcnic_esw_func_cfg esw_cfg;
792
793 if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED))
794 return 0;
795
796 esw_cfg.pci_func = adapter->ahw.pci_func;
797 if (qlcnic_get_eswitch_port_config(adapter, &esw_cfg))
798 return -EIO;
8cf61f89 799 qlcnic_set_vlan_config(adapter, &esw_cfg);
0325d69b
RB
800 qlcnic_set_eswitch_port_features(adapter, &esw_cfg);
801
802 return 0;
803}
804
805static void
806qlcnic_set_netdev_features(struct qlcnic_adapter *adapter,
807 struct qlcnic_esw_func_cfg *esw_cfg)
808{
809 struct net_device *netdev = adapter->netdev;
810 unsigned long features, vlan_features;
811
812 features = (NETIF_F_SG | NETIF_F_IP_CSUM |
813 NETIF_F_IPV6_CSUM | NETIF_F_GRO);
814 vlan_features = (NETIF_F_SG | NETIF_F_IP_CSUM |
815 NETIF_F_IPV6_CSUM);
816
817 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_TSO) {
818 features |= (NETIF_F_TSO | NETIF_F_TSO6);
819 vlan_features |= (NETIF_F_TSO | NETIF_F_TSO6);
820 }
821 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_HW_LRO)
822 features |= NETIF_F_LRO;
823
824 if (esw_cfg->offload_flags & BIT_0) {
825 netdev->features |= features;
826 adapter->rx_csum = 1;
827 if (!(esw_cfg->offload_flags & BIT_1))
828 netdev->features &= ~NETIF_F_TSO;
829 if (!(esw_cfg->offload_flags & BIT_2))
830 netdev->features &= ~NETIF_F_TSO6;
831 } else {
832 netdev->features &= ~features;
833 adapter->rx_csum = 0;
834 }
835
836 netdev->vlan_features = (features & vlan_features);
837}
838
839static int
840qlcnic_set_default_offload_settings(struct qlcnic_adapter *adapter)
841{
842 struct qlcnic_esw_func_cfg esw_cfg;
843 struct qlcnic_npar_info *npar;
844 u8 i;
845
846 if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED) ||
847 adapter->need_fw_reset ||
848 adapter->op_mode != QLCNIC_MGMT_FUNC)
849 return 0;
850
851 for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
852 if (adapter->npars[i].type != QLCNIC_TYPE_NIC)
853 continue;
854 memset(&esw_cfg, 0, sizeof(struct qlcnic_esw_func_cfg));
855 esw_cfg.pci_func = i;
856 esw_cfg.offload_flags = BIT_0;
857 esw_cfg.mac_learning = BIT_0;
858 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_TSO)
859 esw_cfg.offload_flags |= (BIT_1 | BIT_2);
860 if (qlcnic_config_switch_port(adapter, &esw_cfg))
861 return -EIO;
862 npar = &adapter->npars[i];
863 npar->pvid = esw_cfg.vlan_id;
864 npar->mac_learning = esw_cfg.offload_flags;
865 npar->mac_anti_spoof = esw_cfg.mac_anti_spoof;
866 npar->discard_tagged = esw_cfg.discard_tagged;
867 npar->promisc_mode = esw_cfg.promisc_mode;
868 npar->offload_flags = esw_cfg.offload_flags;
869 }
870
871 return 0;
872}
873
4e8acb01
RB
874static int
875qlcnic_reset_eswitch_config(struct qlcnic_adapter *adapter,
876 struct qlcnic_npar_info *npar, int pci_func)
877{
878 struct qlcnic_esw_func_cfg esw_cfg;
879 esw_cfg.op_mode = QLCNIC_PORT_DEFAULTS;
880 esw_cfg.pci_func = pci_func;
881 esw_cfg.vlan_id = npar->pvid;
882 esw_cfg.mac_learning = npar->mac_learning;
883 esw_cfg.discard_tagged = npar->discard_tagged;
884 esw_cfg.mac_anti_spoof = npar->mac_anti_spoof;
885 esw_cfg.offload_flags = npar->offload_flags;
886 esw_cfg.promisc_mode = npar->promisc_mode;
887 if (qlcnic_config_switch_port(adapter, &esw_cfg))
888 return -EIO;
889
890 esw_cfg.op_mode = QLCNIC_ADD_VLAN;
891 if (qlcnic_config_switch_port(adapter, &esw_cfg))
892 return -EIO;
893
894 return 0;
895}
896
cea8975e
AC
897static int
898qlcnic_reset_npar_config(struct qlcnic_adapter *adapter)
899{
4e8acb01 900 int i, err;
cea8975e
AC
901 struct qlcnic_npar_info *npar;
902 struct qlcnic_info nic_info;
903
904 if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED) ||
4e8acb01 905 !adapter->need_fw_reset || adapter->op_mode != QLCNIC_MGMT_FUNC)
cea8975e
AC
906 return 0;
907
4e8acb01
RB
908 /* Set the NPAR config data after FW reset */
909 for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
910 npar = &adapter->npars[i];
911 if (npar->type != QLCNIC_TYPE_NIC)
912 continue;
913 err = qlcnic_get_nic_info(adapter, &nic_info, i);
914 if (err)
915 return err;
916 nic_info.min_tx_bw = npar->min_bw;
917 nic_info.max_tx_bw = npar->max_bw;
918 err = qlcnic_set_nic_info(adapter, &nic_info);
919 if (err)
920 return err;
cea8975e 921
4e8acb01
RB
922 if (npar->enable_pm) {
923 err = qlcnic_config_port_mirroring(adapter,
924 npar->dest_npar, 1, i);
925 if (err)
926 return err;
cea8975e 927 }
4e8acb01
RB
928 err = qlcnic_reset_eswitch_config(adapter, npar, i);
929 if (err)
930 return err;
cea8975e 931 }
4e8acb01 932 return 0;
cea8975e
AC
933}
934
78f84e1a
AKS
935static int qlcnic_check_npar_opertional(struct qlcnic_adapter *adapter)
936{
937 u8 npar_opt_timeo = QLCNIC_DEV_NPAR_OPER_TIMEO;
938 u32 npar_state;
939
940 if (adapter->op_mode == QLCNIC_MGMT_FUNC)
941 return 0;
942
943 npar_state = QLCRD32(adapter, QLCNIC_CRB_DEV_NPAR_STATE);
944 while (npar_state != QLCNIC_DEV_NPAR_OPER && --npar_opt_timeo) {
945 msleep(1000);
946 npar_state = QLCRD32(adapter, QLCNIC_CRB_DEV_NPAR_STATE);
947 }
948 if (!npar_opt_timeo) {
949 dev_err(&adapter->pdev->dev,
950 "Waiting for NPAR state to opertional timeout\n");
951 return -EIO;
952 }
953 return 0;
954}
955
af19b491
AKS
956static int
957qlcnic_start_firmware(struct qlcnic_adapter *adapter)
958{
d4066833 959 int err;
af19b491 960
aa5e18c0
SC
961 err = qlcnic_can_start_firmware(adapter);
962 if (err < 0)
963 return err;
964 else if (!err)
d4066833 965 goto check_fw_status;
af19b491 966
4d5bdb38
AKS
967 if (load_fw_file)
968 qlcnic_request_firmware(adapter);
8f891387 969 else {
970 if (qlcnic_check_flash_fw_ver(adapter))
971 goto err_out;
972
4d5bdb38 973 adapter->fw_type = QLCNIC_FLASH_ROMIMAGE;
8f891387 974 }
af19b491
AKS
975
976 err = qlcnic_need_fw_reset(adapter);
af19b491 977 if (err == 0)
d4066833 978 goto set_dev_ready;
af19b491 979
d4066833
SC
980 err = qlcnic_pinit_from_rom(adapter);
981 if (err)
982 goto err_out;
af19b491
AKS
983 qlcnic_set_port_mode(adapter);
984
985 err = qlcnic_load_firmware(adapter);
986 if (err)
987 goto err_out;
988
989 qlcnic_release_firmware(adapter);
d4066833 990 QLCWR32(adapter, CRB_DRIVER_VERSION, QLCNIC_DRIVER_VERSION);
af19b491 991
d4066833
SC
992check_fw_status:
993 err = qlcnic_check_fw_status(adapter);
af19b491
AKS
994 if (err)
995 goto err_out;
996
d4066833 997set_dev_ready:
af19b491 998 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_READY);
6df900e9 999 qlcnic_idc_debug_info(adapter, 1);
78f84e1a
AKS
1000 err = qlcnic_check_npar_opertional(adapter);
1001 if (err) {
1002 qlcnic_release_firmware(adapter);
1003 return err;
1004 }
0325d69b
RB
1005 if (qlcnic_set_default_offload_settings(adapter))
1006 goto err_out;
cea8975e
AC
1007 if (qlcnic_reset_npar_config(adapter))
1008 goto err_out;
1009 qlcnic_dev_set_npar_ready(adapter);
4e8acb01 1010 qlcnic_check_options(adapter);
af19b491
AKS
1011 adapter->need_fw_reset = 0;
1012
a7fc948f
AKS
1013 qlcnic_release_firmware(adapter);
1014 return 0;
af19b491
AKS
1015
1016err_out:
a7fc948f
AKS
1017 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_FAILED);
1018 dev_err(&adapter->pdev->dev, "Device state set to failed\n");
af19b491
AKS
1019 qlcnic_release_firmware(adapter);
1020 return err;
1021}
1022
1023static int
1024qlcnic_request_irq(struct qlcnic_adapter *adapter)
1025{
1026 irq_handler_t handler;
1027 struct qlcnic_host_sds_ring *sds_ring;
1028 int err, ring;
1029
1030 unsigned long flags = 0;
1031 struct net_device *netdev = adapter->netdev;
1032 struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
1033
7eb9855d
AKS
1034 if (adapter->diag_test == QLCNIC_INTERRUPT_TEST) {
1035 handler = qlcnic_tmp_intr;
1036 if (!QLCNIC_IS_MSI_FAMILY(adapter))
1037 flags |= IRQF_SHARED;
1038
1039 } else {
1040 if (adapter->flags & QLCNIC_MSIX_ENABLED)
1041 handler = qlcnic_msix_intr;
1042 else if (adapter->flags & QLCNIC_MSI_ENABLED)
1043 handler = qlcnic_msi_intr;
1044 else {
1045 flags |= IRQF_SHARED;
1046 handler = qlcnic_intr;
1047 }
af19b491
AKS
1048 }
1049 adapter->irq = netdev->irq;
1050
1051 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
1052 sds_ring = &recv_ctx->sds_rings[ring];
1053 sprintf(sds_ring->name, "%s[%d]", netdev->name, ring);
1054 err = request_irq(sds_ring->irq, handler,
1055 flags, sds_ring->name, sds_ring);
1056 if (err)
1057 return err;
1058 }
1059
1060 return 0;
1061}
1062
1063static void
1064qlcnic_free_irq(struct qlcnic_adapter *adapter)
1065{
1066 int ring;
1067 struct qlcnic_host_sds_ring *sds_ring;
1068
1069 struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
1070
1071 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
1072 sds_ring = &recv_ctx->sds_rings[ring];
1073 free_irq(sds_ring->irq, sds_ring);
1074 }
1075}
1076
1077static void
1078qlcnic_init_coalesce_defaults(struct qlcnic_adapter *adapter)
1079{
1080 adapter->coal.flags = QLCNIC_INTR_DEFAULT;
1081 adapter->coal.normal.data.rx_time_us =
1082 QLCNIC_DEFAULT_INTR_COALESCE_RX_TIME_US;
1083 adapter->coal.normal.data.rx_packets =
1084 QLCNIC_DEFAULT_INTR_COALESCE_RX_PACKETS;
1085 adapter->coal.normal.data.tx_time_us =
1086 QLCNIC_DEFAULT_INTR_COALESCE_TX_TIME_US;
1087 adapter->coal.normal.data.tx_packets =
1088 QLCNIC_DEFAULT_INTR_COALESCE_TX_PACKETS;
1089}
1090
1091static int
1092__qlcnic_up(struct qlcnic_adapter *adapter, struct net_device *netdev)
1093{
8a15ad1f
AKS
1094 int ring;
1095 struct qlcnic_host_rds_ring *rds_ring;
1096
af19b491
AKS
1097 if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
1098 return -EIO;
1099
8a15ad1f
AKS
1100 if (test_bit(__QLCNIC_DEV_UP, &adapter->state))
1101 return 0;
0325d69b
RB
1102 if (qlcnic_set_eswitch_port_config(adapter))
1103 return -EIO;
8a15ad1f
AKS
1104
1105 if (qlcnic_fw_create_ctx(adapter))
1106 return -EIO;
1107
1108 for (ring = 0; ring < adapter->max_rds_rings; ring++) {
1109 rds_ring = &adapter->recv_ctx.rds_rings[ring];
1110 qlcnic_post_rx_buffers(adapter, ring, rds_ring);
1111 }
1112
af19b491
AKS
1113 qlcnic_set_multi(netdev);
1114 qlcnic_fw_cmd_set_mtu(adapter, netdev->mtu);
1115
1116 adapter->ahw.linkup = 0;
1117
1118 if (adapter->max_sds_rings > 1)
1119 qlcnic_config_rss(adapter, 1);
1120
1121 qlcnic_config_intr_coalesce(adapter);
1122
24763d80 1123 if (netdev->features & NETIF_F_LRO)
af19b491
AKS
1124 qlcnic_config_hw_lro(adapter, QLCNIC_LRO_ENABLED);
1125
1126 qlcnic_napi_enable(adapter);
1127
1128 qlcnic_linkevent_request(adapter, 1);
1129
68bf1c68 1130 adapter->reset_context = 0;
af19b491
AKS
1131 set_bit(__QLCNIC_DEV_UP, &adapter->state);
1132 return 0;
1133}
1134
1135/* Usage: During resume and firmware recovery module.*/
1136
1137static int
1138qlcnic_up(struct qlcnic_adapter *adapter, struct net_device *netdev)
1139{
1140 int err = 0;
1141
1142 rtnl_lock();
1143 if (netif_running(netdev))
1144 err = __qlcnic_up(adapter, netdev);
1145 rtnl_unlock();
1146
1147 return err;
1148}
1149
1150static void
1151__qlcnic_down(struct qlcnic_adapter *adapter, struct net_device *netdev)
1152{
1153 if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
1154 return;
1155
1156 if (!test_and_clear_bit(__QLCNIC_DEV_UP, &adapter->state))
1157 return;
1158
1159 smp_mb();
1160 spin_lock(&adapter->tx_clean_lock);
1161 netif_carrier_off(netdev);
1162 netif_tx_disable(netdev);
1163
1164 qlcnic_free_mac_list(adapter);
1165
1166 qlcnic_nic_set_promisc(adapter, QLCNIC_NIU_NON_PROMISC_MODE);
1167
1168 qlcnic_napi_disable(adapter);
1169
8a15ad1f
AKS
1170 qlcnic_fw_destroy_ctx(adapter);
1171
1172 qlcnic_reset_rx_buffers_list(adapter);
af19b491
AKS
1173 qlcnic_release_tx_buffers(adapter);
1174 spin_unlock(&adapter->tx_clean_lock);
1175}
1176
1177/* Usage: During suspend and firmware recovery module */
1178
1179static void
1180qlcnic_down(struct qlcnic_adapter *adapter, struct net_device *netdev)
1181{
1182 rtnl_lock();
1183 if (netif_running(netdev))
1184 __qlcnic_down(adapter, netdev);
1185 rtnl_unlock();
1186
1187}
1188
1189static int
1190qlcnic_attach(struct qlcnic_adapter *adapter)
1191{
1192 struct net_device *netdev = adapter->netdev;
1193 struct pci_dev *pdev = adapter->pdev;
8a15ad1f 1194 int err;
af19b491
AKS
1195
1196 if (adapter->is_up == QLCNIC_ADAPTER_UP_MAGIC)
1197 return 0;
1198
af19b491
AKS
1199 err = qlcnic_napi_add(adapter, netdev);
1200 if (err)
1201 return err;
1202
1203 err = qlcnic_alloc_sw_resources(adapter);
1204 if (err) {
1205 dev_err(&pdev->dev, "Error in setting sw resources\n");
8a15ad1f 1206 goto err_out_napi_del;
af19b491
AKS
1207 }
1208
1209 err = qlcnic_alloc_hw_resources(adapter);
1210 if (err) {
1211 dev_err(&pdev->dev, "Error in setting hw resources\n");
1212 goto err_out_free_sw;
1213 }
1214
af19b491
AKS
1215 err = qlcnic_request_irq(adapter);
1216 if (err) {
1217 dev_err(&pdev->dev, "failed to setup interrupt\n");
8a15ad1f 1218 goto err_out_free_hw;
af19b491
AKS
1219 }
1220
1221 qlcnic_init_coalesce_defaults(adapter);
1222
1223 qlcnic_create_sysfs_entries(adapter);
1224
1225 adapter->is_up = QLCNIC_ADAPTER_UP_MAGIC;
1226 return 0;
1227
8a15ad1f 1228err_out_free_hw:
af19b491
AKS
1229 qlcnic_free_hw_resources(adapter);
1230err_out_free_sw:
1231 qlcnic_free_sw_resources(adapter);
8a15ad1f
AKS
1232err_out_napi_del:
1233 qlcnic_napi_del(adapter);
af19b491
AKS
1234 return err;
1235}
1236
1237static void
1238qlcnic_detach(struct qlcnic_adapter *adapter)
1239{
1240 if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
1241 return;
1242
1243 qlcnic_remove_sysfs_entries(adapter);
1244
1245 qlcnic_free_hw_resources(adapter);
1246 qlcnic_release_rx_buffers(adapter);
1247 qlcnic_free_irq(adapter);
1248 qlcnic_napi_del(adapter);
1249 qlcnic_free_sw_resources(adapter);
1250
1251 adapter->is_up = 0;
1252}
1253
7eb9855d
AKS
1254void qlcnic_diag_free_res(struct net_device *netdev, int max_sds_rings)
1255{
1256 struct qlcnic_adapter *adapter = netdev_priv(netdev);
1257 struct qlcnic_host_sds_ring *sds_ring;
1258 int ring;
1259
78ad3892 1260 clear_bit(__QLCNIC_DEV_UP, &adapter->state);
cdaff185
AKS
1261 if (adapter->diag_test == QLCNIC_INTERRUPT_TEST) {
1262 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
1263 sds_ring = &adapter->recv_ctx.sds_rings[ring];
1264 qlcnic_disable_int(sds_ring);
1265 }
7eb9855d
AKS
1266 }
1267
8a15ad1f
AKS
1268 qlcnic_fw_destroy_ctx(adapter);
1269
7eb9855d
AKS
1270 qlcnic_detach(adapter);
1271
1272 adapter->diag_test = 0;
1273 adapter->max_sds_rings = max_sds_rings;
1274
1275 if (qlcnic_attach(adapter))
34ce3626 1276 goto out;
7eb9855d
AKS
1277
1278 if (netif_running(netdev))
1279 __qlcnic_up(adapter, netdev);
34ce3626 1280out:
7eb9855d
AKS
1281 netif_device_attach(netdev);
1282}
1283
1284int qlcnic_diag_alloc_res(struct net_device *netdev, int test)
1285{
1286 struct qlcnic_adapter *adapter = netdev_priv(netdev);
1287 struct qlcnic_host_sds_ring *sds_ring;
8a15ad1f 1288 struct qlcnic_host_rds_ring *rds_ring;
7eb9855d
AKS
1289 int ring;
1290 int ret;
1291
1292 netif_device_detach(netdev);
1293
1294 if (netif_running(netdev))
1295 __qlcnic_down(adapter, netdev);
1296
1297 qlcnic_detach(adapter);
1298
1299 adapter->max_sds_rings = 1;
1300 adapter->diag_test = test;
1301
1302 ret = qlcnic_attach(adapter);
34ce3626
AKS
1303 if (ret) {
1304 netif_device_attach(netdev);
7eb9855d 1305 return ret;
34ce3626 1306 }
7eb9855d 1307
8a15ad1f
AKS
1308 ret = qlcnic_fw_create_ctx(adapter);
1309 if (ret) {
1310 qlcnic_detach(adapter);
57e46248 1311 netif_device_attach(netdev);
8a15ad1f
AKS
1312 return ret;
1313 }
1314
1315 for (ring = 0; ring < adapter->max_rds_rings; ring++) {
1316 rds_ring = &adapter->recv_ctx.rds_rings[ring];
1317 qlcnic_post_rx_buffers(adapter, ring, rds_ring);
1318 }
1319
cdaff185
AKS
1320 if (adapter->diag_test == QLCNIC_INTERRUPT_TEST) {
1321 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
1322 sds_ring = &adapter->recv_ctx.sds_rings[ring];
1323 qlcnic_enable_int(sds_ring);
1324 }
7eb9855d 1325 }
78ad3892 1326 set_bit(__QLCNIC_DEV_UP, &adapter->state);
7eb9855d
AKS
1327
1328 return 0;
1329}
1330
68bf1c68
AKS
1331/* Reset context in hardware only */
1332static int
1333qlcnic_reset_hw_context(struct qlcnic_adapter *adapter)
1334{
1335 struct net_device *netdev = adapter->netdev;
1336
1337 if (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
1338 return -EBUSY;
1339
1340 netif_device_detach(netdev);
1341
1342 qlcnic_down(adapter, netdev);
1343
1344 qlcnic_up(adapter, netdev);
1345
1346 netif_device_attach(netdev);
1347
1348 clear_bit(__QLCNIC_RESETTING, &adapter->state);
1349 return 0;
1350}
1351
af19b491
AKS
1352int
1353qlcnic_reset_context(struct qlcnic_adapter *adapter)
1354{
1355 int err = 0;
1356 struct net_device *netdev = adapter->netdev;
1357
1358 if (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
1359 return -EBUSY;
1360
1361 if (adapter->is_up == QLCNIC_ADAPTER_UP_MAGIC) {
1362
1363 netif_device_detach(netdev);
1364
1365 if (netif_running(netdev))
1366 __qlcnic_down(adapter, netdev);
1367
1368 qlcnic_detach(adapter);
1369
1370 if (netif_running(netdev)) {
1371 err = qlcnic_attach(adapter);
1372 if (!err)
34ce3626 1373 __qlcnic_up(adapter, netdev);
af19b491
AKS
1374 }
1375
1376 netif_device_attach(netdev);
1377 }
1378
af19b491
AKS
1379 clear_bit(__QLCNIC_RESETTING, &adapter->state);
1380 return err;
1381}
1382
1383static int
1384qlcnic_setup_netdev(struct qlcnic_adapter *adapter,
1bb09fb9 1385 struct net_device *netdev, u8 pci_using_dac)
af19b491
AKS
1386{
1387 int err;
1388 struct pci_dev *pdev = adapter->pdev;
1389
1390 adapter->rx_csum = 1;
1391 adapter->mc_enabled = 0;
1392 adapter->max_mc_count = 38;
1393
1394 netdev->netdev_ops = &qlcnic_netdev_ops;
ef71ff83 1395 netdev->watchdog_timeo = 5*HZ;
af19b491
AKS
1396
1397 qlcnic_change_mtu(netdev, netdev->mtu);
1398
1399 SET_ETHTOOL_OPS(netdev, &qlcnic_ethtool_ops);
1400
2e9d722d 1401 netdev->features |= (NETIF_F_SG | NETIF_F_IP_CSUM |
ac8d0c4f 1402 NETIF_F_IPV6_CSUM | NETIF_F_GRO);
2e9d722d 1403 netdev->vlan_features |= (NETIF_F_SG | NETIF_F_IP_CSUM |
ac8d0c4f
AC
1404 NETIF_F_IPV6_CSUM);
1405
1406 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_TSO) {
1407 netdev->features |= (NETIF_F_TSO | NETIF_F_TSO6);
1408 netdev->vlan_features |= (NETIF_F_TSO | NETIF_F_TSO6);
1409 }
af19b491 1410
1bb09fb9 1411 if (pci_using_dac) {
af19b491
AKS
1412 netdev->features |= NETIF_F_HIGHDMA;
1413 netdev->vlan_features |= NETIF_F_HIGHDMA;
1414 }
1415
1416 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_FVLANTX)
1417 netdev->features |= (NETIF_F_HW_VLAN_TX);
1418
1419 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_HW_LRO)
1420 netdev->features |= NETIF_F_LRO;
af19b491
AKS
1421 netdev->irq = adapter->msix_entries[0].vector;
1422
af19b491
AKS
1423 if (qlcnic_read_mac_addr(adapter))
1424 dev_warn(&pdev->dev, "failed to read mac addr\n");
1425
1426 netif_carrier_off(netdev);
1427 netif_stop_queue(netdev);
1428
1429 err = register_netdev(netdev);
1430 if (err) {
1431 dev_err(&pdev->dev, "failed to register net device\n");
1432 return err;
1433 }
1434
1435 return 0;
1436}
1437
1bb09fb9
AKS
1438static int qlcnic_set_dma_mask(struct pci_dev *pdev, u8 *pci_using_dac)
1439{
1440 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) &&
1441 !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)))
1442 *pci_using_dac = 1;
1443 else if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) &&
1444 !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)))
1445 *pci_using_dac = 0;
1446 else {
1447 dev_err(&pdev->dev, "Unable to set DMA mask, aborting\n");
1448 return -EIO;
1449 }
1450
1451 return 0;
1452}
1453
af19b491
AKS
1454static int __devinit
1455qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1456{
1457 struct net_device *netdev = NULL;
1458 struct qlcnic_adapter *adapter = NULL;
1459 int err;
af19b491 1460 uint8_t revision_id;
1bb09fb9 1461 uint8_t pci_using_dac;
af19b491
AKS
1462
1463 err = pci_enable_device(pdev);
1464 if (err)
1465 return err;
1466
1467 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
1468 err = -ENODEV;
1469 goto err_out_disable_pdev;
1470 }
1471
1bb09fb9
AKS
1472 err = qlcnic_set_dma_mask(pdev, &pci_using_dac);
1473 if (err)
1474 goto err_out_disable_pdev;
1475
af19b491
AKS
1476 err = pci_request_regions(pdev, qlcnic_driver_name);
1477 if (err)
1478 goto err_out_disable_pdev;
1479
1480 pci_set_master(pdev);
451724c8 1481 pci_enable_pcie_error_reporting(pdev);
af19b491
AKS
1482
1483 netdev = alloc_etherdev(sizeof(struct qlcnic_adapter));
1484 if (!netdev) {
1485 dev_err(&pdev->dev, "failed to allocate net_device\n");
1486 err = -ENOMEM;
1487 goto err_out_free_res;
1488 }
1489
1490 SET_NETDEV_DEV(netdev, &pdev->dev);
1491
1492 adapter = netdev_priv(netdev);
1493 adapter->netdev = netdev;
1494 adapter->pdev = pdev;
6df900e9 1495 adapter->dev_rst_time = jiffies;
af19b491
AKS
1496
1497 revision_id = pdev->revision;
1498 adapter->ahw.revision_id = revision_id;
1499
1500 rwlock_init(&adapter->ahw.crb_lock);
1501 mutex_init(&adapter->ahw.mem_lock);
1502
1503 spin_lock_init(&adapter->tx_clean_lock);
1504 INIT_LIST_HEAD(&adapter->mac_list);
1505
1506 err = qlcnic_setup_pci_map(adapter);
1507 if (err)
1508 goto err_out_free_netdev;
1509
1510 /* This will be reset for mezz cards */
2e9d722d 1511 adapter->portnum = adapter->ahw.pci_func;
af19b491
AKS
1512
1513 err = qlcnic_get_board_info(adapter);
1514 if (err) {
1515 dev_err(&pdev->dev, "Error getting board config info.\n");
1516 goto err_out_iounmap;
1517 }
1518
02f6e46f
SC
1519 if (qlcnic_read_mac_addr(adapter))
1520 dev_warn(&pdev->dev, "failed to read mac addr\n");
1521
b3a24649
SC
1522 if (qlcnic_setup_idc_param(adapter))
1523 goto err_out_iounmap;
af19b491 1524
9f26f547 1525 err = adapter->nic_ops->start_firmware(adapter);
a7fc948f
AKS
1526 if (err) {
1527 dev_err(&pdev->dev, "Loading fw failed.Please Reboot\n");
af19b491 1528 goto err_out_decr_ref;
a7fc948f 1529 }
af19b491 1530
af19b491
AKS
1531 qlcnic_clear_stats(adapter);
1532
1533 qlcnic_setup_intr(adapter);
1534
1bb09fb9 1535 err = qlcnic_setup_netdev(adapter, netdev, pci_using_dac);
af19b491
AKS
1536 if (err)
1537 goto err_out_disable_msi;
1538
1539 pci_set_drvdata(pdev, adapter);
1540
1541 qlcnic_schedule_work(adapter, qlcnic_fw_poll_work, FW_POLL_DELAY);
1542
1543 switch (adapter->ahw.port_type) {
1544 case QLCNIC_GBE:
1545 dev_info(&adapter->pdev->dev, "%s: GbE port initialized\n",
1546 adapter->netdev->name);
1547 break;
1548 case QLCNIC_XGBE:
1549 dev_info(&adapter->pdev->dev, "%s: XGbE port initialized\n",
1550 adapter->netdev->name);
1551 break;
1552 }
1553
1554 qlcnic_create_diag_entries(adapter);
1555
1556 return 0;
1557
1558err_out_disable_msi:
1559 qlcnic_teardown_intr(adapter);
1560
1561err_out_decr_ref:
21854f02 1562 qlcnic_clr_all_drv_state(adapter, 0);
af19b491
AKS
1563
1564err_out_iounmap:
1565 qlcnic_cleanup_pci_map(adapter);
1566
1567err_out_free_netdev:
1568 free_netdev(netdev);
1569
1570err_out_free_res:
1571 pci_release_regions(pdev);
1572
1573err_out_disable_pdev:
1574 pci_set_drvdata(pdev, NULL);
1575 pci_disable_device(pdev);
1576 return err;
1577}
1578
1579static void __devexit qlcnic_remove(struct pci_dev *pdev)
1580{
1581 struct qlcnic_adapter *adapter;
1582 struct net_device *netdev;
1583
1584 adapter = pci_get_drvdata(pdev);
1585 if (adapter == NULL)
1586 return;
1587
1588 netdev = adapter->netdev;
1589
1590 qlcnic_cancel_fw_work(adapter);
1591
1592 unregister_netdev(netdev);
1593
af19b491
AKS
1594 qlcnic_detach(adapter);
1595
2e9d722d
AC
1596 if (adapter->npars != NULL)
1597 kfree(adapter->npars);
1598 if (adapter->eswitch != NULL)
1599 kfree(adapter->eswitch);
1600
21854f02 1601 qlcnic_clr_all_drv_state(adapter, 0);
af19b491
AKS
1602
1603 clear_bit(__QLCNIC_RESETTING, &adapter->state);
1604
1605 qlcnic_teardown_intr(adapter);
1606
1607 qlcnic_remove_diag_entries(adapter);
1608
1609 qlcnic_cleanup_pci_map(adapter);
1610
1611 qlcnic_release_firmware(adapter);
1612
451724c8 1613 pci_disable_pcie_error_reporting(pdev);
af19b491
AKS
1614 pci_release_regions(pdev);
1615 pci_disable_device(pdev);
1616 pci_set_drvdata(pdev, NULL);
1617
1618 free_netdev(netdev);
1619}
1620static int __qlcnic_shutdown(struct pci_dev *pdev)
1621{
1622 struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
1623 struct net_device *netdev = adapter->netdev;
1624 int retval;
1625
1626 netif_device_detach(netdev);
1627
1628 qlcnic_cancel_fw_work(adapter);
1629
1630 if (netif_running(netdev))
1631 qlcnic_down(adapter, netdev);
1632
21854f02 1633 qlcnic_clr_all_drv_state(adapter, 0);
af19b491
AKS
1634
1635 clear_bit(__QLCNIC_RESETTING, &adapter->state);
1636
1637 retval = pci_save_state(pdev);
1638 if (retval)
1639 return retval;
1640
1641 if (qlcnic_wol_supported(adapter)) {
1642 pci_enable_wake(pdev, PCI_D3cold, 1);
1643 pci_enable_wake(pdev, PCI_D3hot, 1);
1644 }
1645
1646 return 0;
1647}
1648
1649static void qlcnic_shutdown(struct pci_dev *pdev)
1650{
1651 if (__qlcnic_shutdown(pdev))
1652 return;
1653
1654 pci_disable_device(pdev);
1655}
1656
1657#ifdef CONFIG_PM
1658static int
1659qlcnic_suspend(struct pci_dev *pdev, pm_message_t state)
1660{
1661 int retval;
1662
1663 retval = __qlcnic_shutdown(pdev);
1664 if (retval)
1665 return retval;
1666
1667 pci_set_power_state(pdev, pci_choose_state(pdev, state));
1668 return 0;
1669}
1670
1671static int
1672qlcnic_resume(struct pci_dev *pdev)
1673{
1674 struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
1675 struct net_device *netdev = adapter->netdev;
1676 int err;
1677
1678 err = pci_enable_device(pdev);
1679 if (err)
1680 return err;
1681
1682 pci_set_power_state(pdev, PCI_D0);
1683 pci_set_master(pdev);
1684 pci_restore_state(pdev);
1685
9f26f547 1686 err = adapter->nic_ops->start_firmware(adapter);
af19b491
AKS
1687 if (err) {
1688 dev_err(&pdev->dev, "failed to start firmware\n");
1689 return err;
1690 }
1691
1692 if (netif_running(netdev)) {
af19b491
AKS
1693 err = qlcnic_up(adapter, netdev);
1694 if (err)
52486a3a 1695 goto done;
af19b491
AKS
1696
1697 qlcnic_config_indev_addr(netdev, NETDEV_UP);
1698 }
52486a3a 1699done:
af19b491
AKS
1700 netif_device_attach(netdev);
1701 qlcnic_schedule_work(adapter, qlcnic_fw_poll_work, FW_POLL_DELAY);
1702 return 0;
af19b491
AKS
1703}
1704#endif
1705
1706static int qlcnic_open(struct net_device *netdev)
1707{
1708 struct qlcnic_adapter *adapter = netdev_priv(netdev);
1709 int err;
1710
af19b491
AKS
1711 err = qlcnic_attach(adapter);
1712 if (err)
1713 return err;
1714
1715 err = __qlcnic_up(adapter, netdev);
1716 if (err)
1717 goto err_out;
1718
1719 netif_start_queue(netdev);
1720
1721 return 0;
1722
1723err_out:
1724 qlcnic_detach(adapter);
1725 return err;
1726}
1727
1728/*
1729 * qlcnic_close - Disables a network interface entry point
1730 */
1731static int qlcnic_close(struct net_device *netdev)
1732{
1733 struct qlcnic_adapter *adapter = netdev_priv(netdev);
1734
1735 __qlcnic_down(adapter, netdev);
1736 return 0;
1737}
1738
1739static void
1740qlcnic_tso_check(struct net_device *netdev,
1741 struct qlcnic_host_tx_ring *tx_ring,
1742 struct cmd_desc_type0 *first_desc,
1743 struct sk_buff *skb)
1744{
1745 u8 opcode = TX_ETHER_PKT;
1746 __be16 protocol = skb->protocol;
8cf61f89
AKS
1747 u16 flags = 0;
1748 int copied, offset, copy_len, hdr_len = 0, tso = 0;
af19b491
AKS
1749 struct cmd_desc_type0 *hwdesc;
1750 struct vlan_ethhdr *vh;
8bfe8b91 1751 struct qlcnic_adapter *adapter = netdev_priv(netdev);
2e9d722d 1752 u32 producer = tx_ring->producer;
8cf61f89 1753 int vlan_oob = first_desc->flags_opcode & cpu_to_le16(FLAGS_VLAN_OOB);
af19b491 1754
2e9d722d
AC
1755 if (*(skb->data) & BIT_0) {
1756 flags |= BIT_0;
1757 memcpy(&first_desc->eth_addr, skb->data, ETH_ALEN);
1758 }
1759
af19b491
AKS
1760 if ((netdev->features & (NETIF_F_TSO | NETIF_F_TSO6)) &&
1761 skb_shinfo(skb)->gso_size > 0) {
1762
1763 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
1764
1765 first_desc->mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
1766 first_desc->total_hdr_length = hdr_len;
1767 if (vlan_oob) {
1768 first_desc->total_hdr_length += VLAN_HLEN;
1769 first_desc->tcp_hdr_offset = VLAN_HLEN;
1770 first_desc->ip_hdr_offset = VLAN_HLEN;
1771 /* Only in case of TSO on vlan device */
1772 flags |= FLAGS_VLAN_TAGGED;
1773 }
1774
1775 opcode = (protocol == cpu_to_be16(ETH_P_IPV6)) ?
1776 TX_TCP_LSO6 : TX_TCP_LSO;
1777 tso = 1;
1778
1779 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
1780 u8 l4proto;
1781
1782 if (protocol == cpu_to_be16(ETH_P_IP)) {
1783 l4proto = ip_hdr(skb)->protocol;
1784
1785 if (l4proto == IPPROTO_TCP)
1786 opcode = TX_TCP_PKT;
1787 else if (l4proto == IPPROTO_UDP)
1788 opcode = TX_UDP_PKT;
1789 } else if (protocol == cpu_to_be16(ETH_P_IPV6)) {
1790 l4proto = ipv6_hdr(skb)->nexthdr;
1791
1792 if (l4proto == IPPROTO_TCP)
1793 opcode = TX_TCPV6_PKT;
1794 else if (l4proto == IPPROTO_UDP)
1795 opcode = TX_UDPV6_PKT;
1796 }
1797 }
1798
1799 first_desc->tcp_hdr_offset += skb_transport_offset(skb);
1800 first_desc->ip_hdr_offset += skb_network_offset(skb);
1801 qlcnic_set_tx_flags_opcode(first_desc, flags, opcode);
1802
1803 if (!tso)
1804 return;
1805
1806 /* For LSO, we need to copy the MAC/IP/TCP headers into
1807 * the descriptor ring
1808 */
af19b491
AKS
1809 copied = 0;
1810 offset = 2;
1811
1812 if (vlan_oob) {
1813 /* Create a TSO vlan header template for firmware */
1814
1815 hwdesc = &tx_ring->desc_head[producer];
1816 tx_ring->cmd_buf_arr[producer].skb = NULL;
1817
1818 copy_len = min((int)sizeof(struct cmd_desc_type0) - offset,
1819 hdr_len + VLAN_HLEN);
1820
1821 vh = (struct vlan_ethhdr *)((char *)hwdesc + 2);
1822 skb_copy_from_linear_data(skb, vh, 12);
1823 vh->h_vlan_proto = htons(ETH_P_8021Q);
8cf61f89 1824 vh->h_vlan_TCI = htons(first_desc->vlan_TCI);
af19b491
AKS
1825 skb_copy_from_linear_data_offset(skb, 12,
1826 (char *)vh + 16, copy_len - 16);
1827
1828 copied = copy_len - VLAN_HLEN;
1829 offset = 0;
1830
1831 producer = get_next_index(producer, tx_ring->num_desc);
1832 }
1833
1834 while (copied < hdr_len) {
1835
1836 copy_len = min((int)sizeof(struct cmd_desc_type0) - offset,
1837 (hdr_len - copied));
1838
1839 hwdesc = &tx_ring->desc_head[producer];
1840 tx_ring->cmd_buf_arr[producer].skb = NULL;
1841
1842 skb_copy_from_linear_data_offset(skb, copied,
1843 (char *)hwdesc + offset, copy_len);
1844
1845 copied += copy_len;
1846 offset = 0;
1847
1848 producer = get_next_index(producer, tx_ring->num_desc);
1849 }
1850
1851 tx_ring->producer = producer;
1852 barrier();
8bfe8b91 1853 adapter->stats.lso_frames++;
af19b491
AKS
1854}
1855
1856static int
1857qlcnic_map_tx_skb(struct pci_dev *pdev,
1858 struct sk_buff *skb, struct qlcnic_cmd_buffer *pbuf)
1859{
1860 struct qlcnic_skb_frag *nf;
1861 struct skb_frag_struct *frag;
1862 int i, nr_frags;
1863 dma_addr_t map;
1864
1865 nr_frags = skb_shinfo(skb)->nr_frags;
1866 nf = &pbuf->frag_array[0];
1867
1868 map = pci_map_single(pdev, skb->data,
1869 skb_headlen(skb), PCI_DMA_TODEVICE);
1870 if (pci_dma_mapping_error(pdev, map))
1871 goto out_err;
1872
1873 nf->dma = map;
1874 nf->length = skb_headlen(skb);
1875
1876 for (i = 0; i < nr_frags; i++) {
1877 frag = &skb_shinfo(skb)->frags[i];
1878 nf = &pbuf->frag_array[i+1];
1879
1880 map = pci_map_page(pdev, frag->page, frag->page_offset,
1881 frag->size, PCI_DMA_TODEVICE);
1882 if (pci_dma_mapping_error(pdev, map))
1883 goto unwind;
1884
1885 nf->dma = map;
1886 nf->length = frag->size;
1887 }
1888
1889 return 0;
1890
1891unwind:
1892 while (--i >= 0) {
1893 nf = &pbuf->frag_array[i+1];
1894 pci_unmap_page(pdev, nf->dma, nf->length, PCI_DMA_TODEVICE);
1895 }
1896
1897 nf = &pbuf->frag_array[0];
1898 pci_unmap_single(pdev, nf->dma, skb_headlen(skb), PCI_DMA_TODEVICE);
1899
1900out_err:
1901 return -ENOMEM;
1902}
1903
8cf61f89
AKS
1904static int
1905qlcnic_check_tx_tagging(struct qlcnic_adapter *adapter,
1906 struct sk_buff *skb,
1907 struct cmd_desc_type0 *first_desc)
1908{
1909 u8 opcode = 0;
1910 u16 flags = 0;
1911 __be16 protocol = skb->protocol;
1912 struct vlan_ethhdr *vh;
1913
1914 if (protocol == cpu_to_be16(ETH_P_8021Q)) {
1915 vh = (struct vlan_ethhdr *)skb->data;
1916 protocol = vh->h_vlan_encapsulated_proto;
1917 flags = FLAGS_VLAN_TAGGED;
1918 qlcnic_set_tx_vlan_tci(first_desc, ntohs(vh->h_vlan_TCI));
1919 } else if (vlan_tx_tag_present(skb)) {
1920 flags = FLAGS_VLAN_OOB;
1921 qlcnic_set_tx_vlan_tci(first_desc, vlan_tx_tag_get(skb));
1922 }
1923 if (unlikely(adapter->pvid)) {
1924 if (first_desc->vlan_TCI &&
1925 !(adapter->flags & QLCNIC_TAGGING_ENABLED))
1926 return -EIO;
1927 if (first_desc->vlan_TCI &&
1928 (adapter->flags & QLCNIC_TAGGING_ENABLED))
1929 goto set_flags;
1930
1931 flags = FLAGS_VLAN_OOB;
1932 qlcnic_set_tx_vlan_tci(first_desc, adapter->pvid);
1933 }
1934set_flags:
1935 qlcnic_set_tx_flags_opcode(first_desc, flags, opcode);
1936 return 0;
1937}
1938
af19b491
AKS
1939static inline void
1940qlcnic_clear_cmddesc(u64 *desc)
1941{
1942 desc[0] = 0ULL;
1943 desc[2] = 0ULL;
8cf61f89 1944 desc[7] = 0ULL;
af19b491
AKS
1945}
1946
cdaff185 1947netdev_tx_t
af19b491
AKS
1948qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
1949{
1950 struct qlcnic_adapter *adapter = netdev_priv(netdev);
1951 struct qlcnic_host_tx_ring *tx_ring = adapter->tx_ring;
1952 struct qlcnic_cmd_buffer *pbuf;
1953 struct qlcnic_skb_frag *buffrag;
1954 struct cmd_desc_type0 *hwdesc, *first_desc;
1955 struct pci_dev *pdev;
1956 int i, k;
1957
1958 u32 producer;
1959 int frag_count, no_of_desc;
1960 u32 num_txd = tx_ring->num_desc;
1961
780ab790
AKS
1962 if (!test_bit(__QLCNIC_DEV_UP, &adapter->state)) {
1963 netif_stop_queue(netdev);
1964 return NETDEV_TX_BUSY;
1965 }
1966
fe4d434d
SC
1967 if (adapter->flags & QLCNIC_MACSPOOF) {
1968 if (compare_ether_addr(eth_hdr(skb)->h_source,
1969 adapter->mac_addr))
1970 goto drop_packet;
1971 }
1972
af19b491
AKS
1973 frag_count = skb_shinfo(skb)->nr_frags + 1;
1974
1975 /* 4 fragments per cmd des */
1976 no_of_desc = (frag_count + 3) >> 2;
1977
ef71ff83 1978 if (unlikely(qlcnic_tx_avail(tx_ring) <= TX_STOP_THRESH)) {
af19b491 1979 netif_stop_queue(netdev);
ef71ff83
RB
1980 smp_mb();
1981 if (qlcnic_tx_avail(tx_ring) > TX_STOP_THRESH)
1982 netif_start_queue(netdev);
1983 else {
1984 adapter->stats.xmit_off++;
1985 return NETDEV_TX_BUSY;
1986 }
af19b491
AKS
1987 }
1988
1989 producer = tx_ring->producer;
1990 pbuf = &tx_ring->cmd_buf_arr[producer];
1991
1992 pdev = adapter->pdev;
1993
8cf61f89
AKS
1994 first_desc = hwdesc = &tx_ring->desc_head[producer];
1995 qlcnic_clear_cmddesc((u64 *)hwdesc);
1996
1997 if (qlcnic_check_tx_tagging(adapter, skb, first_desc))
1998 goto drop_packet;
1999
8ae6df97
AKS
2000 if (qlcnic_map_tx_skb(pdev, skb, pbuf)) {
2001 adapter->stats.tx_dma_map_error++;
af19b491 2002 goto drop_packet;
8ae6df97 2003 }
af19b491
AKS
2004
2005 pbuf->skb = skb;
2006 pbuf->frag_count = frag_count;
2007
af19b491
AKS
2008 qlcnic_set_tx_frags_len(first_desc, frag_count, skb->len);
2009 qlcnic_set_tx_port(first_desc, adapter->portnum);
2010
2011 for (i = 0; i < frag_count; i++) {
2012
2013 k = i % 4;
2014
2015 if ((k == 0) && (i > 0)) {
2016 /* move to next desc.*/
2017 producer = get_next_index(producer, num_txd);
2018 hwdesc = &tx_ring->desc_head[producer];
2019 qlcnic_clear_cmddesc((u64 *)hwdesc);
2020 tx_ring->cmd_buf_arr[producer].skb = NULL;
2021 }
2022
2023 buffrag = &pbuf->frag_array[i];
2024
2025 hwdesc->buffer_length[k] = cpu_to_le16(buffrag->length);
2026 switch (k) {
2027 case 0:
2028 hwdesc->addr_buffer1 = cpu_to_le64(buffrag->dma);
2029 break;
2030 case 1:
2031 hwdesc->addr_buffer2 = cpu_to_le64(buffrag->dma);
2032 break;
2033 case 2:
2034 hwdesc->addr_buffer3 = cpu_to_le64(buffrag->dma);
2035 break;
2036 case 3:
2037 hwdesc->addr_buffer4 = cpu_to_le64(buffrag->dma);
2038 break;
2039 }
2040 }
2041
2042 tx_ring->producer = get_next_index(producer, num_txd);
2043
2044 qlcnic_tso_check(netdev, tx_ring, first_desc, skb);
2045
2046 qlcnic_update_cmd_producer(adapter, tx_ring);
2047
2048 adapter->stats.txbytes += skb->len;
2049 adapter->stats.xmitcalled++;
2050
2051 return NETDEV_TX_OK;
2052
2053drop_packet:
2054 adapter->stats.txdropped++;
2055 dev_kfree_skb_any(skb);
2056 return NETDEV_TX_OK;
2057}
2058
2059static int qlcnic_check_temp(struct qlcnic_adapter *adapter)
2060{
2061 struct net_device *netdev = adapter->netdev;
2062 u32 temp, temp_state, temp_val;
2063 int rv = 0;
2064
2065 temp = QLCRD32(adapter, CRB_TEMP_STATE);
2066
2067 temp_state = qlcnic_get_temp_state(temp);
2068 temp_val = qlcnic_get_temp_val(temp);
2069
2070 if (temp_state == QLCNIC_TEMP_PANIC) {
2071 dev_err(&netdev->dev,
2072 "Device temperature %d degrees C exceeds"
2073 " maximum allowed. Hardware has been shut down.\n",
2074 temp_val);
2075 rv = 1;
2076 } else if (temp_state == QLCNIC_TEMP_WARN) {
2077 if (adapter->temp == QLCNIC_TEMP_NORMAL) {
2078 dev_err(&netdev->dev,
2079 "Device temperature %d degrees C "
2080 "exceeds operating range."
2081 " Immediate action needed.\n",
2082 temp_val);
2083 }
2084 } else {
2085 if (adapter->temp == QLCNIC_TEMP_WARN) {
2086 dev_info(&netdev->dev,
2087 "Device temperature is now %d degrees C"
2088 " in normal range.\n", temp_val);
2089 }
2090 }
2091 adapter->temp = temp_state;
2092 return rv;
2093}
2094
2095void qlcnic_advert_link_change(struct qlcnic_adapter *adapter, int linkup)
2096{
2097 struct net_device *netdev = adapter->netdev;
2098
2099 if (adapter->ahw.linkup && !linkup) {
69324275 2100 netdev_info(netdev, "NIC Link is down\n");
af19b491
AKS
2101 adapter->ahw.linkup = 0;
2102 if (netif_running(netdev)) {
2103 netif_carrier_off(netdev);
2104 netif_stop_queue(netdev);
2105 }
2106 } else if (!adapter->ahw.linkup && linkup) {
69324275 2107 netdev_info(netdev, "NIC Link is up\n");
af19b491
AKS
2108 adapter->ahw.linkup = 1;
2109 if (netif_running(netdev)) {
2110 netif_carrier_on(netdev);
2111 netif_wake_queue(netdev);
2112 }
2113 }
2114}
2115
2116static void qlcnic_tx_timeout(struct net_device *netdev)
2117{
2118 struct qlcnic_adapter *adapter = netdev_priv(netdev);
2119
2120 if (test_bit(__QLCNIC_RESETTING, &adapter->state))
2121 return;
2122
2123 dev_err(&netdev->dev, "transmit timeout, resetting.\n");
af19b491
AKS
2124
2125 if (++adapter->tx_timeo_cnt >= QLCNIC_MAX_TX_TIMEOUTS)
68bf1c68
AKS
2126 adapter->need_fw_reset = 1;
2127 else
2128 adapter->reset_context = 1;
af19b491
AKS
2129}
2130
2131static struct net_device_stats *qlcnic_get_stats(struct net_device *netdev)
2132{
2133 struct qlcnic_adapter *adapter = netdev_priv(netdev);
2134 struct net_device_stats *stats = &netdev->stats;
2135
af19b491
AKS
2136 stats->rx_packets = adapter->stats.rx_pkts + adapter->stats.lro_pkts;
2137 stats->tx_packets = adapter->stats.xmitfinished;
7e382594 2138 stats->rx_bytes = adapter->stats.rxbytes + adapter->stats.lrobytes;
af19b491
AKS
2139 stats->tx_bytes = adapter->stats.txbytes;
2140 stats->rx_dropped = adapter->stats.rxdropped;
2141 stats->tx_dropped = adapter->stats.txdropped;
2142
2143 return stats;
2144}
2145
7eb9855d 2146static irqreturn_t qlcnic_clear_legacy_intr(struct qlcnic_adapter *adapter)
af19b491 2147{
af19b491
AKS
2148 u32 status;
2149
2150 status = readl(adapter->isr_int_vec);
2151
2152 if (!(status & adapter->int_vec_bit))
2153 return IRQ_NONE;
2154
2155 /* check interrupt state machine, to be sure */
2156 status = readl(adapter->crb_int_state_reg);
2157 if (!ISR_LEGACY_INT_TRIGGERED(status))
2158 return IRQ_NONE;
2159
2160 writel(0xffffffff, adapter->tgt_status_reg);
2161 /* read twice to ensure write is flushed */
2162 readl(adapter->isr_int_vec);
2163 readl(adapter->isr_int_vec);
2164
7eb9855d
AKS
2165 return IRQ_HANDLED;
2166}
2167
2168static irqreturn_t qlcnic_tmp_intr(int irq, void *data)
2169{
2170 struct qlcnic_host_sds_ring *sds_ring = data;
2171 struct qlcnic_adapter *adapter = sds_ring->adapter;
2172
2173 if (adapter->flags & QLCNIC_MSIX_ENABLED)
2174 goto done;
2175 else if (adapter->flags & QLCNIC_MSI_ENABLED) {
2176 writel(0xffffffff, adapter->tgt_status_reg);
2177 goto done;
2178 }
2179
2180 if (qlcnic_clear_legacy_intr(adapter) == IRQ_NONE)
2181 return IRQ_NONE;
2182
2183done:
2184 adapter->diag_cnt++;
2185 qlcnic_enable_int(sds_ring);
2186 return IRQ_HANDLED;
2187}
2188
2189static irqreturn_t qlcnic_intr(int irq, void *data)
2190{
2191 struct qlcnic_host_sds_ring *sds_ring = data;
2192 struct qlcnic_adapter *adapter = sds_ring->adapter;
2193
2194 if (qlcnic_clear_legacy_intr(adapter) == IRQ_NONE)
2195 return IRQ_NONE;
2196
af19b491
AKS
2197 napi_schedule(&sds_ring->napi);
2198
2199 return IRQ_HANDLED;
2200}
2201
2202static irqreturn_t qlcnic_msi_intr(int irq, void *data)
2203{
2204 struct qlcnic_host_sds_ring *sds_ring = data;
2205 struct qlcnic_adapter *adapter = sds_ring->adapter;
2206
2207 /* clear interrupt */
2208 writel(0xffffffff, adapter->tgt_status_reg);
2209
2210 napi_schedule(&sds_ring->napi);
2211 return IRQ_HANDLED;
2212}
2213
2214static irqreturn_t qlcnic_msix_intr(int irq, void *data)
2215{
2216 struct qlcnic_host_sds_ring *sds_ring = data;
2217
2218 napi_schedule(&sds_ring->napi);
2219 return IRQ_HANDLED;
2220}
2221
2222static int qlcnic_process_cmd_ring(struct qlcnic_adapter *adapter)
2223{
2224 u32 sw_consumer, hw_consumer;
2225 int count = 0, i;
2226 struct qlcnic_cmd_buffer *buffer;
2227 struct pci_dev *pdev = adapter->pdev;
2228 struct net_device *netdev = adapter->netdev;
2229 struct qlcnic_skb_frag *frag;
2230 int done;
2231 struct qlcnic_host_tx_ring *tx_ring = adapter->tx_ring;
2232
2233 if (!spin_trylock(&adapter->tx_clean_lock))
2234 return 1;
2235
2236 sw_consumer = tx_ring->sw_consumer;
2237 hw_consumer = le32_to_cpu(*(tx_ring->hw_consumer));
2238
2239 while (sw_consumer != hw_consumer) {
2240 buffer = &tx_ring->cmd_buf_arr[sw_consumer];
2241 if (buffer->skb) {
2242 frag = &buffer->frag_array[0];
2243 pci_unmap_single(pdev, frag->dma, frag->length,
2244 PCI_DMA_TODEVICE);
2245 frag->dma = 0ULL;
2246 for (i = 1; i < buffer->frag_count; i++) {
2247 frag++;
2248 pci_unmap_page(pdev, frag->dma, frag->length,
2249 PCI_DMA_TODEVICE);
2250 frag->dma = 0ULL;
2251 }
2252
2253 adapter->stats.xmitfinished++;
2254 dev_kfree_skb_any(buffer->skb);
2255 buffer->skb = NULL;
2256 }
2257
2258 sw_consumer = get_next_index(sw_consumer, tx_ring->num_desc);
2259 if (++count >= MAX_STATUS_HANDLE)
2260 break;
2261 }
2262
2263 if (count && netif_running(netdev)) {
2264 tx_ring->sw_consumer = sw_consumer;
2265
2266 smp_mb();
2267
2268 if (netif_queue_stopped(netdev) && netif_carrier_ok(netdev)) {
af19b491
AKS
2269 if (qlcnic_tx_avail(tx_ring) > TX_STOP_THRESH) {
2270 netif_wake_queue(netdev);
8bfe8b91 2271 adapter->stats.xmit_on++;
af19b491 2272 }
af19b491 2273 }
ef71ff83 2274 adapter->tx_timeo_cnt = 0;
af19b491
AKS
2275 }
2276 /*
2277 * If everything is freed up to consumer then check if the ring is full
2278 * If the ring is full then check if more needs to be freed and
2279 * schedule the call back again.
2280 *
2281 * This happens when there are 2 CPUs. One could be freeing and the
2282 * other filling it. If the ring is full when we get out of here and
2283 * the card has already interrupted the host then the host can miss the
2284 * interrupt.
2285 *
2286 * There is still a possible race condition and the host could miss an
2287 * interrupt. The card has to take care of this.
2288 */
2289 hw_consumer = le32_to_cpu(*(tx_ring->hw_consumer));
2290 done = (sw_consumer == hw_consumer);
2291 spin_unlock(&adapter->tx_clean_lock);
2292
2293 return done;
2294}
2295
2296static int qlcnic_poll(struct napi_struct *napi, int budget)
2297{
2298 struct qlcnic_host_sds_ring *sds_ring =
2299 container_of(napi, struct qlcnic_host_sds_ring, napi);
2300
2301 struct qlcnic_adapter *adapter = sds_ring->adapter;
2302
2303 int tx_complete;
2304 int work_done;
2305
2306 tx_complete = qlcnic_process_cmd_ring(adapter);
2307
2308 work_done = qlcnic_process_rcv_ring(sds_ring, budget);
2309
2310 if ((work_done < budget) && tx_complete) {
2311 napi_complete(&sds_ring->napi);
2312 if (test_bit(__QLCNIC_DEV_UP, &adapter->state))
2313 qlcnic_enable_int(sds_ring);
2314 }
2315
2316 return work_done;
2317}
2318
8f891387 2319static int qlcnic_rx_poll(struct napi_struct *napi, int budget)
2320{
2321 struct qlcnic_host_sds_ring *sds_ring =
2322 container_of(napi, struct qlcnic_host_sds_ring, napi);
2323
2324 struct qlcnic_adapter *adapter = sds_ring->adapter;
2325 int work_done;
2326
2327 work_done = qlcnic_process_rcv_ring(sds_ring, budget);
2328
2329 if (work_done < budget) {
2330 napi_complete(&sds_ring->napi);
2331 if (test_bit(__QLCNIC_DEV_UP, &adapter->state))
2332 qlcnic_enable_int(sds_ring);
2333 }
2334
2335 return work_done;
2336}
2337
af19b491
AKS
2338#ifdef CONFIG_NET_POLL_CONTROLLER
2339static void qlcnic_poll_controller(struct net_device *netdev)
2340{
2341 struct qlcnic_adapter *adapter = netdev_priv(netdev);
2342 disable_irq(adapter->irq);
2343 qlcnic_intr(adapter->irq, adapter);
2344 enable_irq(adapter->irq);
2345}
2346#endif
2347
6df900e9
SC
2348static void
2349qlcnic_idc_debug_info(struct qlcnic_adapter *adapter, u8 encoding)
2350{
2351 u32 val;
2352
2353 val = adapter->portnum & 0xf;
2354 val |= encoding << 7;
2355 val |= (jiffies - adapter->dev_rst_time) << 8;
2356
2357 QLCWR32(adapter, QLCNIC_CRB_DRV_SCRATCH, val);
2358 adapter->dev_rst_time = jiffies;
2359}
2360
ade91f8e
AKS
2361static int
2362qlcnic_set_drv_state(struct qlcnic_adapter *adapter, u8 state)
af19b491
AKS
2363{
2364 u32 val;
2365
2366 WARN_ON(state != QLCNIC_DEV_NEED_RESET &&
2367 state != QLCNIC_DEV_NEED_QUISCENT);
2368
2369 if (qlcnic_api_lock(adapter))
ade91f8e 2370 return -EIO;
af19b491
AKS
2371
2372 val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
2373
2374 if (state == QLCNIC_DEV_NEED_RESET)
6d2a4724 2375 QLC_DEV_SET_RST_RDY(val, adapter->portnum);
af19b491 2376 else if (state == QLCNIC_DEV_NEED_QUISCENT)
6d2a4724 2377 QLC_DEV_SET_QSCNT_RDY(val, adapter->portnum);
af19b491
AKS
2378
2379 QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
2380
2381 qlcnic_api_unlock(adapter);
ade91f8e
AKS
2382
2383 return 0;
af19b491
AKS
2384}
2385
1b95a839
AKS
2386static int
2387qlcnic_clr_drv_state(struct qlcnic_adapter *adapter)
2388{
2389 u32 val;
2390
2391 if (qlcnic_api_lock(adapter))
2392 return -EBUSY;
2393
2394 val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
6d2a4724 2395 QLC_DEV_CLR_RST_QSCNT(val, adapter->portnum);
1b95a839
AKS
2396 QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
2397
2398 qlcnic_api_unlock(adapter);
2399
2400 return 0;
2401}
2402
af19b491 2403static void
21854f02 2404qlcnic_clr_all_drv_state(struct qlcnic_adapter *adapter, u8 failed)
af19b491
AKS
2405{
2406 u32 val;
2407
2408 if (qlcnic_api_lock(adapter))
2409 goto err;
2410
2411 val = QLCRD32(adapter, QLCNIC_CRB_DEV_REF_COUNT);
6d2a4724 2412 QLC_DEV_CLR_REF_CNT(val, adapter->portnum);
af19b491
AKS
2413 QLCWR32(adapter, QLCNIC_CRB_DEV_REF_COUNT, val);
2414
21854f02
AKS
2415 if (failed) {
2416 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_FAILED);
2417 dev_info(&adapter->pdev->dev,
2418 "Device state set to Failed. Please Reboot\n");
2419 } else if (!(val & 0x11111111))
af19b491
AKS
2420 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_COLD);
2421
2422 val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
6d2a4724 2423 QLC_DEV_CLR_RST_QSCNT(val, adapter->portnum);
af19b491
AKS
2424 QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
2425
2426 qlcnic_api_unlock(adapter);
2427err:
2428 adapter->fw_fail_cnt = 0;
2429 clear_bit(__QLCNIC_START_FW, &adapter->state);
2430 clear_bit(__QLCNIC_RESETTING, &adapter->state);
2431}
2432
f73dfc50 2433/* Grab api lock, before checking state */
af19b491
AKS
2434static int
2435qlcnic_check_drv_state(struct qlcnic_adapter *adapter)
2436{
2437 int act, state;
2438
2439 state = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
2440 act = QLCRD32(adapter, QLCNIC_CRB_DEV_REF_COUNT);
2441
2442 if (((state & 0x11111111) == (act & 0x11111111)) ||
2443 ((act & 0x11111111) == ((state >> 1) & 0x11111111)))
2444 return 0;
2445 else
2446 return 1;
2447}
2448
96f8118c
SC
2449static int qlcnic_check_idc_ver(struct qlcnic_adapter *adapter)
2450{
2451 u32 val = QLCRD32(adapter, QLCNIC_CRB_DRV_IDC_VER);
2452
2453 if (val != QLCNIC_DRV_IDC_VER) {
2454 dev_warn(&adapter->pdev->dev, "IDC Version mismatch, driver's"
2455 " idc ver = %x; reqd = %x\n", QLCNIC_DRV_IDC_VER, val);
2456 }
2457
2458 return 0;
2459}
2460
af19b491
AKS
2461static int
2462qlcnic_can_start_firmware(struct qlcnic_adapter *adapter)
2463{
2464 u32 val, prev_state;
aa5e18c0 2465 u8 dev_init_timeo = adapter->dev_init_timeo;
6d2a4724 2466 u8 portnum = adapter->portnum;
96f8118c 2467 u8 ret;
af19b491 2468
f73dfc50
AKS
2469 if (test_and_clear_bit(__QLCNIC_START_FW, &adapter->state))
2470 return 1;
2471
af19b491
AKS
2472 if (qlcnic_api_lock(adapter))
2473 return -1;
2474
2475 val = QLCRD32(adapter, QLCNIC_CRB_DEV_REF_COUNT);
6d2a4724
AKS
2476 if (!(val & (1 << (portnum * 4)))) {
2477 QLC_DEV_SET_REF_CNT(val, portnum);
af19b491 2478 QLCWR32(adapter, QLCNIC_CRB_DEV_REF_COUNT, val);
af19b491
AKS
2479 }
2480
2481 prev_state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
65b5b420 2482 QLCDB(adapter, HW, "Device state = %u\n", prev_state);
af19b491
AKS
2483
2484 switch (prev_state) {
2485 case QLCNIC_DEV_COLD:
bbd8c6a4 2486 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_INITIALIZING);
96f8118c 2487 QLCWR32(adapter, QLCNIC_CRB_DRV_IDC_VER, QLCNIC_DRV_IDC_VER);
6df900e9 2488 qlcnic_idc_debug_info(adapter, 0);
af19b491
AKS
2489 qlcnic_api_unlock(adapter);
2490 return 1;
2491
2492 case QLCNIC_DEV_READY:
96f8118c 2493 ret = qlcnic_check_idc_ver(adapter);
af19b491 2494 qlcnic_api_unlock(adapter);
96f8118c 2495 return ret;
af19b491
AKS
2496
2497 case QLCNIC_DEV_NEED_RESET:
2498 val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
6d2a4724 2499 QLC_DEV_SET_RST_RDY(val, portnum);
af19b491
AKS
2500 QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
2501 break;
2502
2503 case QLCNIC_DEV_NEED_QUISCENT:
2504 val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
6d2a4724 2505 QLC_DEV_SET_QSCNT_RDY(val, portnum);
af19b491
AKS
2506 QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
2507 break;
2508
2509 case QLCNIC_DEV_FAILED:
a7fc948f 2510 dev_err(&adapter->pdev->dev, "Device in failed state.\n");
af19b491
AKS
2511 qlcnic_api_unlock(adapter);
2512 return -1;
bbd8c6a4
AKS
2513
2514 case QLCNIC_DEV_INITIALIZING:
2515 case QLCNIC_DEV_QUISCENT:
2516 break;
af19b491
AKS
2517 }
2518
2519 qlcnic_api_unlock(adapter);
aa5e18c0
SC
2520
2521 do {
af19b491 2522 msleep(1000);
a5e463d0
SC
2523 prev_state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
2524
2525 if (prev_state == QLCNIC_DEV_QUISCENT)
2526 continue;
2527 } while ((prev_state != QLCNIC_DEV_READY) && --dev_init_timeo);
af19b491 2528
65b5b420
AKS
2529 if (!dev_init_timeo) {
2530 dev_err(&adapter->pdev->dev,
2531 "Waiting for device to initialize timeout\n");
af19b491 2532 return -1;
65b5b420 2533 }
af19b491
AKS
2534
2535 if (qlcnic_api_lock(adapter))
2536 return -1;
2537
2538 val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
6d2a4724 2539 QLC_DEV_CLR_RST_QSCNT(val, portnum);
af19b491
AKS
2540 QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
2541
96f8118c 2542 ret = qlcnic_check_idc_ver(adapter);
af19b491
AKS
2543 qlcnic_api_unlock(adapter);
2544
96f8118c 2545 return ret;
af19b491
AKS
2546}
2547
2548static void
2549qlcnic_fwinit_work(struct work_struct *work)
2550{
2551 struct qlcnic_adapter *adapter = container_of(work,
2552 struct qlcnic_adapter, fw_work.work);
3c4b23b1 2553 u32 dev_state = 0xf;
af19b491 2554
f73dfc50
AKS
2555 if (qlcnic_api_lock(adapter))
2556 goto err_ret;
af19b491 2557
a5e463d0
SC
2558 dev_state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
2559 if (dev_state == QLCNIC_DEV_QUISCENT) {
2560 qlcnic_api_unlock(adapter);
2561 qlcnic_schedule_work(adapter, qlcnic_fwinit_work,
2562 FW_POLL_DELAY * 2);
2563 return;
2564 }
2565
9f26f547 2566 if (adapter->op_mode == QLCNIC_NON_PRIV_FUNC) {
3c4b23b1
AKS
2567 qlcnic_api_unlock(adapter);
2568 goto wait_npar;
9f26f547
AC
2569 }
2570
f73dfc50
AKS
2571 if (adapter->fw_wait_cnt++ > adapter->reset_ack_timeo) {
2572 dev_err(&adapter->pdev->dev, "Reset:Failed to get ack %d sec\n",
2573 adapter->reset_ack_timeo);
2574 goto skip_ack_check;
2575 }
2576
2577 if (!qlcnic_check_drv_state(adapter)) {
2578skip_ack_check:
2579 dev_state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
a5e463d0
SC
2580
2581 if (dev_state == QLCNIC_DEV_NEED_QUISCENT) {
2582 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE,
2583 QLCNIC_DEV_QUISCENT);
2584 qlcnic_schedule_work(adapter, qlcnic_fwinit_work,
2585 FW_POLL_DELAY * 2);
2586 QLCDB(adapter, DRV, "Quiscing the driver\n");
6df900e9
SC
2587 qlcnic_idc_debug_info(adapter, 0);
2588
a5e463d0
SC
2589 qlcnic_api_unlock(adapter);
2590 return;
2591 }
2592
f73dfc50
AKS
2593 if (dev_state == QLCNIC_DEV_NEED_RESET) {
2594 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE,
2595 QLCNIC_DEV_INITIALIZING);
2596 set_bit(__QLCNIC_START_FW, &adapter->state);
2597 QLCDB(adapter, DRV, "Restarting fw\n");
6df900e9 2598 qlcnic_idc_debug_info(adapter, 0);
af19b491
AKS
2599 }
2600
f73dfc50
AKS
2601 qlcnic_api_unlock(adapter);
2602
9f26f547 2603 if (!adapter->nic_ops->start_firmware(adapter)) {
af19b491
AKS
2604 qlcnic_schedule_work(adapter, qlcnic_attach_work, 0);
2605 return;
2606 }
af19b491
AKS
2607 goto err_ret;
2608 }
2609
f73dfc50 2610 qlcnic_api_unlock(adapter);
aa5e18c0 2611
9f26f547 2612wait_npar:
af19b491 2613 dev_state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
f73dfc50 2614 QLCDB(adapter, HW, "Func waiting: Device state=%u\n", dev_state);
65b5b420 2615
af19b491 2616 switch (dev_state) {
3c4b23b1 2617 case QLCNIC_DEV_READY:
9f26f547 2618 if (!adapter->nic_ops->start_firmware(adapter)) {
f73dfc50
AKS
2619 qlcnic_schedule_work(adapter, qlcnic_attach_work, 0);
2620 return;
2621 }
3c4b23b1
AKS
2622 case QLCNIC_DEV_FAILED:
2623 break;
2624 default:
2625 qlcnic_schedule_work(adapter,
2626 qlcnic_fwinit_work, FW_POLL_DELAY);
2627 return;
af19b491
AKS
2628 }
2629
2630err_ret:
f73dfc50
AKS
2631 dev_err(&adapter->pdev->dev, "Fwinit work failed state=%u "
2632 "fw_wait_cnt=%u\n", dev_state, adapter->fw_wait_cnt);
34ce3626 2633 netif_device_attach(adapter->netdev);
21854f02 2634 qlcnic_clr_all_drv_state(adapter, 0);
af19b491
AKS
2635}
2636
2637static void
2638qlcnic_detach_work(struct work_struct *work)
2639{
2640 struct qlcnic_adapter *adapter = container_of(work,
2641 struct qlcnic_adapter, fw_work.work);
2642 struct net_device *netdev = adapter->netdev;
2643 u32 status;
2644
2645 netif_device_detach(netdev);
2646
2647 qlcnic_down(adapter, netdev);
2648
af19b491
AKS
2649 status = QLCRD32(adapter, QLCNIC_PEG_HALT_STATUS1);
2650
2651 if (status & QLCNIC_RCODE_FATAL_ERROR)
2652 goto err_ret;
2653
2654 if (adapter->temp == QLCNIC_TEMP_PANIC)
2655 goto err_ret;
2656
ade91f8e
AKS
2657 if (qlcnic_set_drv_state(adapter, adapter->dev_state))
2658 goto err_ret;
af19b491
AKS
2659
2660 adapter->fw_wait_cnt = 0;
2661
2662 qlcnic_schedule_work(adapter, qlcnic_fwinit_work, FW_POLL_DELAY);
2663
2664 return;
2665
2666err_ret:
65b5b420
AKS
2667 dev_err(&adapter->pdev->dev, "detach failed; status=%d temp=%d\n",
2668 status, adapter->temp);
34ce3626 2669 netif_device_attach(netdev);
21854f02 2670 qlcnic_clr_all_drv_state(adapter, 1);
af19b491
AKS
2671}
2672
3c4b23b1
AKS
2673/*Transit NPAR state to NON Operational */
2674static void
2675qlcnic_set_npar_non_operational(struct qlcnic_adapter *adapter)
2676{
2677 u32 state;
2678
2679 state = QLCRD32(adapter, QLCNIC_CRB_DEV_NPAR_STATE);
2680 if (state == QLCNIC_DEV_NPAR_NON_OPER)
2681 return;
2682
2683 if (qlcnic_api_lock(adapter))
2684 return;
2685 QLCWR32(adapter, QLCNIC_CRB_DEV_NPAR_STATE, QLCNIC_DEV_NPAR_NON_OPER);
2686 qlcnic_api_unlock(adapter);
2687}
2688
f73dfc50 2689/*Transit to RESET state from READY state only */
af19b491
AKS
2690static void
2691qlcnic_dev_request_reset(struct qlcnic_adapter *adapter)
2692{
2693 u32 state;
2694
cea8975e 2695 adapter->need_fw_reset = 1;
af19b491
AKS
2696 if (qlcnic_api_lock(adapter))
2697 return;
2698
2699 state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
2700
f73dfc50 2701 if (state == QLCNIC_DEV_READY) {
af19b491 2702 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_NEED_RESET);
65b5b420 2703 QLCDB(adapter, DRV, "NEED_RESET state set\n");
6df900e9 2704 qlcnic_idc_debug_info(adapter, 0);
af19b491
AKS
2705 }
2706
3c4b23b1 2707 QLCWR32(adapter, QLCNIC_CRB_DEV_NPAR_STATE, QLCNIC_DEV_NPAR_NON_OPER);
af19b491
AKS
2708 qlcnic_api_unlock(adapter);
2709}
2710
9f26f547
AC
2711/* Transit to NPAR READY state from NPAR NOT READY state */
2712static void
2713qlcnic_dev_set_npar_ready(struct qlcnic_adapter *adapter)
2714{
cea8975e 2715 if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED) ||
3c4b23b1 2716 adapter->op_mode != QLCNIC_MGMT_FUNC)
cea8975e 2717 return;
9f26f547
AC
2718 if (qlcnic_api_lock(adapter))
2719 return;
2720
3c4b23b1
AKS
2721 QLCWR32(adapter, QLCNIC_CRB_DEV_NPAR_STATE, QLCNIC_DEV_NPAR_OPER);
2722 QLCDB(adapter, DRV, "NPAR operational state set\n");
9f26f547
AC
2723
2724 qlcnic_api_unlock(adapter);
2725}
2726
af19b491
AKS
2727static void
2728qlcnic_schedule_work(struct qlcnic_adapter *adapter,
2729 work_func_t func, int delay)
2730{
451724c8
SC
2731 if (test_bit(__QLCNIC_AER, &adapter->state))
2732 return;
2733
af19b491
AKS
2734 INIT_DELAYED_WORK(&adapter->fw_work, func);
2735 schedule_delayed_work(&adapter->fw_work, round_jiffies_relative(delay));
2736}
2737
2738static void
2739qlcnic_cancel_fw_work(struct qlcnic_adapter *adapter)
2740{
2741 while (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
2742 msleep(10);
2743
2744 cancel_delayed_work_sync(&adapter->fw_work);
2745}
2746
2747static void
2748qlcnic_attach_work(struct work_struct *work)
2749{
2750 struct qlcnic_adapter *adapter = container_of(work,
2751 struct qlcnic_adapter, fw_work.work);
2752 struct net_device *netdev = adapter->netdev;
af19b491
AKS
2753
2754 if (netif_running(netdev)) {
52486a3a 2755 if (qlcnic_up(adapter, netdev))
af19b491 2756 goto done;
af19b491
AKS
2757
2758 qlcnic_config_indev_addr(netdev, NETDEV_UP);
2759 }
2760
af19b491 2761done:
34ce3626 2762 netif_device_attach(netdev);
af19b491
AKS
2763 adapter->fw_fail_cnt = 0;
2764 clear_bit(__QLCNIC_RESETTING, &adapter->state);
1b95a839
AKS
2765
2766 if (!qlcnic_clr_drv_state(adapter))
2767 qlcnic_schedule_work(adapter, qlcnic_fw_poll_work,
2768 FW_POLL_DELAY);
af19b491
AKS
2769}
2770
2771static int
2772qlcnic_check_health(struct qlcnic_adapter *adapter)
2773{
2774 u32 state = 0, heartbit;
2775 struct net_device *netdev = adapter->netdev;
2776
2777 if (qlcnic_check_temp(adapter))
2778 goto detach;
2779
2372a5f1 2780 if (adapter->need_fw_reset)
af19b491 2781 qlcnic_dev_request_reset(adapter);
af19b491
AKS
2782
2783 state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
3c4b23b1
AKS
2784 if (state == QLCNIC_DEV_NEED_RESET ||
2785 state == QLCNIC_DEV_NEED_QUISCENT) {
2786 qlcnic_set_npar_non_operational(adapter);
af19b491 2787 adapter->need_fw_reset = 1;
3c4b23b1 2788 }
af19b491
AKS
2789
2790 heartbit = QLCRD32(adapter, QLCNIC_PEG_ALIVE_COUNTER);
2791 if (heartbit != adapter->heartbit) {
2792 adapter->heartbit = heartbit;
2793 adapter->fw_fail_cnt = 0;
2794 if (adapter->need_fw_reset)
2795 goto detach;
68bf1c68 2796
0df170b6
AKS
2797 if (adapter->reset_context &&
2798 auto_fw_reset == AUTO_FW_RESET_ENABLED) {
68bf1c68
AKS
2799 qlcnic_reset_hw_context(adapter);
2800 adapter->netdev->trans_start = jiffies;
2801 }
2802
af19b491
AKS
2803 return 0;
2804 }
2805
2806 if (++adapter->fw_fail_cnt < FW_FAIL_THRESH)
2807 return 0;
2808
2809 qlcnic_dev_request_reset(adapter);
2810
0df170b6
AKS
2811 if ((auto_fw_reset == AUTO_FW_RESET_ENABLED))
2812 clear_bit(__QLCNIC_FW_ATTACHED, &adapter->state);
af19b491
AKS
2813
2814 dev_info(&netdev->dev, "firmware hang detected\n");
2815
2816detach:
2817 adapter->dev_state = (state == QLCNIC_DEV_NEED_QUISCENT) ? state :
2818 QLCNIC_DEV_NEED_RESET;
2819
2820 if ((auto_fw_reset == AUTO_FW_RESET_ENABLED) &&
65b5b420
AKS
2821 !test_and_set_bit(__QLCNIC_RESETTING, &adapter->state)) {
2822
af19b491 2823 qlcnic_schedule_work(adapter, qlcnic_detach_work, 0);
65b5b420
AKS
2824 QLCDB(adapter, DRV, "fw recovery scheduled.\n");
2825 }
af19b491
AKS
2826
2827 return 1;
2828}
2829
2830static void
2831qlcnic_fw_poll_work(struct work_struct *work)
2832{
2833 struct qlcnic_adapter *adapter = container_of(work,
2834 struct qlcnic_adapter, fw_work.work);
2835
2836 if (test_bit(__QLCNIC_RESETTING, &adapter->state))
2837 goto reschedule;
2838
2839
2840 if (qlcnic_check_health(adapter))
2841 return;
2842
2843reschedule:
2844 qlcnic_schedule_work(adapter, qlcnic_fw_poll_work, FW_POLL_DELAY);
2845}
2846
451724c8
SC
2847static int qlcnic_is_first_func(struct pci_dev *pdev)
2848{
2849 struct pci_dev *oth_pdev;
2850 int val = pdev->devfn;
2851
2852 while (val-- > 0) {
2853 oth_pdev = pci_get_domain_bus_and_slot(pci_domain_nr
2854 (pdev->bus), pdev->bus->number,
2855 PCI_DEVFN(PCI_SLOT(pdev->devfn), val));
bfc978fa
AKS
2856 if (!oth_pdev)
2857 continue;
451724c8 2858
bfc978fa
AKS
2859 if (oth_pdev->current_state != PCI_D3cold) {
2860 pci_dev_put(oth_pdev);
451724c8 2861 return 0;
bfc978fa
AKS
2862 }
2863 pci_dev_put(oth_pdev);
451724c8
SC
2864 }
2865 return 1;
2866}
2867
2868static int qlcnic_attach_func(struct pci_dev *pdev)
2869{
2870 int err, first_func;
2871 struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
2872 struct net_device *netdev = adapter->netdev;
2873
2874 pdev->error_state = pci_channel_io_normal;
2875
2876 err = pci_enable_device(pdev);
2877 if (err)
2878 return err;
2879
2880 pci_set_power_state(pdev, PCI_D0);
2881 pci_set_master(pdev);
2882 pci_restore_state(pdev);
2883
2884 first_func = qlcnic_is_first_func(pdev);
2885
2886 if (qlcnic_api_lock(adapter))
2887 return -EINVAL;
2888
933fce12 2889 if (adapter->op_mode != QLCNIC_NON_PRIV_FUNC && first_func) {
451724c8
SC
2890 adapter->need_fw_reset = 1;
2891 set_bit(__QLCNIC_START_FW, &adapter->state);
2892 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_INITIALIZING);
2893 QLCDB(adapter, DRV, "Restarting fw\n");
2894 }
2895 qlcnic_api_unlock(adapter);
2896
2897 err = adapter->nic_ops->start_firmware(adapter);
2898 if (err)
2899 return err;
2900
2901 qlcnic_clr_drv_state(adapter);
2902 qlcnic_setup_intr(adapter);
2903
2904 if (netif_running(netdev)) {
2905 err = qlcnic_attach(adapter);
2906 if (err) {
21854f02 2907 qlcnic_clr_all_drv_state(adapter, 1);
451724c8
SC
2908 clear_bit(__QLCNIC_AER, &adapter->state);
2909 netif_device_attach(netdev);
2910 return err;
2911 }
2912
2913 err = qlcnic_up(adapter, netdev);
2914 if (err)
2915 goto done;
2916
2917 qlcnic_config_indev_addr(netdev, NETDEV_UP);
2918 }
2919 done:
2920 netif_device_attach(netdev);
2921 return err;
2922}
2923
2924static pci_ers_result_t qlcnic_io_error_detected(struct pci_dev *pdev,
2925 pci_channel_state_t state)
2926{
2927 struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
2928 struct net_device *netdev = adapter->netdev;
2929
2930 if (state == pci_channel_io_perm_failure)
2931 return PCI_ERS_RESULT_DISCONNECT;
2932
2933 if (state == pci_channel_io_normal)
2934 return PCI_ERS_RESULT_RECOVERED;
2935
2936 set_bit(__QLCNIC_AER, &adapter->state);
2937 netif_device_detach(netdev);
2938
2939 cancel_delayed_work_sync(&adapter->fw_work);
2940
2941 if (netif_running(netdev))
2942 qlcnic_down(adapter, netdev);
2943
2944 qlcnic_detach(adapter);
2945 qlcnic_teardown_intr(adapter);
2946
2947 clear_bit(__QLCNIC_RESETTING, &adapter->state);
2948
2949 pci_save_state(pdev);
2950 pci_disable_device(pdev);
2951
2952 return PCI_ERS_RESULT_NEED_RESET;
2953}
2954
2955static pci_ers_result_t qlcnic_io_slot_reset(struct pci_dev *pdev)
2956{
2957 return qlcnic_attach_func(pdev) ? PCI_ERS_RESULT_DISCONNECT :
2958 PCI_ERS_RESULT_RECOVERED;
2959}
2960
2961static void qlcnic_io_resume(struct pci_dev *pdev)
2962{
2963 struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
2964
2965 pci_cleanup_aer_uncorrect_error_status(pdev);
2966
2967 if (QLCRD32(adapter, QLCNIC_CRB_DEV_STATE) == QLCNIC_DEV_READY &&
2968 test_and_clear_bit(__QLCNIC_AER, &adapter->state))
2969 qlcnic_schedule_work(adapter, qlcnic_fw_poll_work,
2970 FW_POLL_DELAY);
2971}
2972
87eb743b
AC
2973static int
2974qlcnicvf_start_firmware(struct qlcnic_adapter *adapter)
2975{
2976 int err;
2977
2978 err = qlcnic_can_start_firmware(adapter);
2979 if (err)
2980 return err;
2981
78f84e1a
AKS
2982 err = qlcnic_check_npar_opertional(adapter);
2983 if (err)
2984 return err;
3c4b23b1 2985
87eb743b
AC
2986 qlcnic_check_options(adapter);
2987
2988 adapter->need_fw_reset = 0;
2989
2990 return err;
2991}
2992
2993static int
2994qlcnicvf_config_bridged_mode(struct qlcnic_adapter *adapter, u32 enable)
2995{
2996 return -EOPNOTSUPP;
2997}
2998
2999static int
3000qlcnicvf_config_led(struct qlcnic_adapter *adapter, u32 state, u32 rate)
3001{
3002 return -EOPNOTSUPP;
3003}
3004
af19b491
AKS
3005static ssize_t
3006qlcnic_store_bridged_mode(struct device *dev,
3007 struct device_attribute *attr, const char *buf, size_t len)
3008{
3009 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3010 unsigned long new;
3011 int ret = -EINVAL;
3012
3013 if (!(adapter->capabilities & QLCNIC_FW_CAPABILITY_BDG))
3014 goto err_out;
3015
8a15ad1f 3016 if (!test_bit(__QLCNIC_DEV_UP, &adapter->state))
af19b491
AKS
3017 goto err_out;
3018
3019 if (strict_strtoul(buf, 2, &new))
3020 goto err_out;
3021
2e9d722d 3022 if (!adapter->nic_ops->config_bridged_mode(adapter, !!new))
af19b491
AKS
3023 ret = len;
3024
3025err_out:
3026 return ret;
3027}
3028
3029static ssize_t
3030qlcnic_show_bridged_mode(struct device *dev,
3031 struct device_attribute *attr, char *buf)
3032{
3033 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3034 int bridged_mode = 0;
3035
3036 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_BDG)
3037 bridged_mode = !!(adapter->flags & QLCNIC_BRIDGE_ENABLED);
3038
3039 return sprintf(buf, "%d\n", bridged_mode);
3040}
3041
3042static struct device_attribute dev_attr_bridged_mode = {
3043 .attr = {.name = "bridged_mode", .mode = (S_IRUGO | S_IWUSR)},
3044 .show = qlcnic_show_bridged_mode,
3045 .store = qlcnic_store_bridged_mode,
3046};
3047
3048static ssize_t
3049qlcnic_store_diag_mode(struct device *dev,
3050 struct device_attribute *attr, const char *buf, size_t len)
3051{
3052 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3053 unsigned long new;
3054
3055 if (strict_strtoul(buf, 2, &new))
3056 return -EINVAL;
3057
3058 if (!!new != !!(adapter->flags & QLCNIC_DIAG_ENABLED))
3059 adapter->flags ^= QLCNIC_DIAG_ENABLED;
3060
3061 return len;
3062}
3063
3064static ssize_t
3065qlcnic_show_diag_mode(struct device *dev,
3066 struct device_attribute *attr, char *buf)
3067{
3068 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3069
3070 return sprintf(buf, "%d\n",
3071 !!(adapter->flags & QLCNIC_DIAG_ENABLED));
3072}
3073
3074static struct device_attribute dev_attr_diag_mode = {
3075 .attr = {.name = "diag_mode", .mode = (S_IRUGO | S_IWUSR)},
3076 .show = qlcnic_show_diag_mode,
3077 .store = qlcnic_store_diag_mode,
3078};
3079
3080static int
3081qlcnic_sysfs_validate_crb(struct qlcnic_adapter *adapter,
3082 loff_t offset, size_t size)
3083{
897e8c7c
DP
3084 size_t crb_size = 4;
3085
af19b491
AKS
3086 if (!(adapter->flags & QLCNIC_DIAG_ENABLED))
3087 return -EIO;
3088
897e8c7c
DP
3089 if (offset < QLCNIC_PCI_CRBSPACE) {
3090 if (ADDR_IN_RANGE(offset, QLCNIC_PCI_CAMQM,
3091 QLCNIC_PCI_CAMQM_END))
3092 crb_size = 8;
3093 else
3094 return -EINVAL;
3095 }
af19b491 3096
897e8c7c
DP
3097 if ((size != crb_size) || (offset & (crb_size-1)))
3098 return -EINVAL;
af19b491
AKS
3099
3100 return 0;
3101}
3102
3103static ssize_t
2c3c8bea
CW
3104qlcnic_sysfs_read_crb(struct file *filp, struct kobject *kobj,
3105 struct bin_attribute *attr,
af19b491
AKS
3106 char *buf, loff_t offset, size_t size)
3107{
3108 struct device *dev = container_of(kobj, struct device, kobj);
3109 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3110 u32 data;
897e8c7c 3111 u64 qmdata;
af19b491
AKS
3112 int ret;
3113
3114 ret = qlcnic_sysfs_validate_crb(adapter, offset, size);
3115 if (ret != 0)
3116 return ret;
3117
897e8c7c
DP
3118 if (ADDR_IN_RANGE(offset, QLCNIC_PCI_CAMQM, QLCNIC_PCI_CAMQM_END)) {
3119 qlcnic_pci_camqm_read_2M(adapter, offset, &qmdata);
3120 memcpy(buf, &qmdata, size);
3121 } else {
3122 data = QLCRD32(adapter, offset);
3123 memcpy(buf, &data, size);
3124 }
af19b491
AKS
3125 return size;
3126}
3127
3128static ssize_t
2c3c8bea
CW
3129qlcnic_sysfs_write_crb(struct file *filp, struct kobject *kobj,
3130 struct bin_attribute *attr,
af19b491
AKS
3131 char *buf, loff_t offset, size_t size)
3132{
3133 struct device *dev = container_of(kobj, struct device, kobj);
3134 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3135 u32 data;
897e8c7c 3136 u64 qmdata;
af19b491
AKS
3137 int ret;
3138
3139 ret = qlcnic_sysfs_validate_crb(adapter, offset, size);
3140 if (ret != 0)
3141 return ret;
3142
897e8c7c
DP
3143 if (ADDR_IN_RANGE(offset, QLCNIC_PCI_CAMQM, QLCNIC_PCI_CAMQM_END)) {
3144 memcpy(&qmdata, buf, size);
3145 qlcnic_pci_camqm_write_2M(adapter, offset, qmdata);
3146 } else {
3147 memcpy(&data, buf, size);
3148 QLCWR32(adapter, offset, data);
3149 }
af19b491
AKS
3150 return size;
3151}
3152
3153static int
3154qlcnic_sysfs_validate_mem(struct qlcnic_adapter *adapter,
3155 loff_t offset, size_t size)
3156{
3157 if (!(adapter->flags & QLCNIC_DIAG_ENABLED))
3158 return -EIO;
3159
3160 if ((size != 8) || (offset & 0x7))
3161 return -EIO;
3162
3163 return 0;
3164}
3165
3166static ssize_t
2c3c8bea
CW
3167qlcnic_sysfs_read_mem(struct file *filp, struct kobject *kobj,
3168 struct bin_attribute *attr,
af19b491
AKS
3169 char *buf, loff_t offset, size_t size)
3170{
3171 struct device *dev = container_of(kobj, struct device, kobj);
3172 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3173 u64 data;
3174 int ret;
3175
3176 ret = qlcnic_sysfs_validate_mem(adapter, offset, size);
3177 if (ret != 0)
3178 return ret;
3179
3180 if (qlcnic_pci_mem_read_2M(adapter, offset, &data))
3181 return -EIO;
3182
3183 memcpy(buf, &data, size);
3184
3185 return size;
3186}
3187
3188static ssize_t
2c3c8bea
CW
3189qlcnic_sysfs_write_mem(struct file *filp, struct kobject *kobj,
3190 struct bin_attribute *attr,
af19b491
AKS
3191 char *buf, loff_t offset, size_t size)
3192{
3193 struct device *dev = container_of(kobj, struct device, kobj);
3194 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3195 u64 data;
3196 int ret;
3197
3198 ret = qlcnic_sysfs_validate_mem(adapter, offset, size);
3199 if (ret != 0)
3200 return ret;
3201
3202 memcpy(&data, buf, size);
3203
3204 if (qlcnic_pci_mem_write_2M(adapter, offset, data))
3205 return -EIO;
3206
3207 return size;
3208}
3209
3210
3211static struct bin_attribute bin_attr_crb = {
3212 .attr = {.name = "crb", .mode = (S_IRUGO | S_IWUSR)},
3213 .size = 0,
3214 .read = qlcnic_sysfs_read_crb,
3215 .write = qlcnic_sysfs_write_crb,
3216};
3217
3218static struct bin_attribute bin_attr_mem = {
3219 .attr = {.name = "mem", .mode = (S_IRUGO | S_IWUSR)},
3220 .size = 0,
3221 .read = qlcnic_sysfs_read_mem,
3222 .write = qlcnic_sysfs_write_mem,
3223};
3224
cea8975e 3225static int
346fe763
RB
3226validate_pm_config(struct qlcnic_adapter *adapter,
3227 struct qlcnic_pm_func_cfg *pm_cfg, int count)
3228{
3229
3230 u8 src_pci_func, s_esw_id, d_esw_id;
3231 u8 dest_pci_func;
3232 int i;
3233
3234 for (i = 0; i < count; i++) {
3235 src_pci_func = pm_cfg[i].pci_func;
3236 dest_pci_func = pm_cfg[i].dest_npar;
3237 if (src_pci_func >= QLCNIC_MAX_PCI_FUNC
3238 || dest_pci_func >= QLCNIC_MAX_PCI_FUNC)
3239 return QL_STATUS_INVALID_PARAM;
3240
3241 if (adapter->npars[src_pci_func].type != QLCNIC_TYPE_NIC)
3242 return QL_STATUS_INVALID_PARAM;
3243
3244 if (adapter->npars[dest_pci_func].type != QLCNIC_TYPE_NIC)
3245 return QL_STATUS_INVALID_PARAM;
3246
346fe763
RB
3247 s_esw_id = adapter->npars[src_pci_func].phy_port;
3248 d_esw_id = adapter->npars[dest_pci_func].phy_port;
3249
3250 if (s_esw_id != d_esw_id)
3251 return QL_STATUS_INVALID_PARAM;
3252
3253 }
3254 return 0;
3255
3256}
3257
3258static ssize_t
3259qlcnic_sysfs_write_pm_config(struct file *filp, struct kobject *kobj,
3260 struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
3261{
3262 struct device *dev = container_of(kobj, struct device, kobj);
3263 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3264 struct qlcnic_pm_func_cfg *pm_cfg;
3265 u32 id, action, pci_func;
3266 int count, rem, i, ret;
3267
3268 count = size / sizeof(struct qlcnic_pm_func_cfg);
3269 rem = size % sizeof(struct qlcnic_pm_func_cfg);
3270 if (rem)
3271 return QL_STATUS_INVALID_PARAM;
3272
3273 pm_cfg = (struct qlcnic_pm_func_cfg *) buf;
3274
3275 ret = validate_pm_config(adapter, pm_cfg, count);
3276 if (ret)
3277 return ret;
3278 for (i = 0; i < count; i++) {
3279 pci_func = pm_cfg[i].pci_func;
4e8acb01 3280 action = !!pm_cfg[i].action;
346fe763
RB
3281 id = adapter->npars[pci_func].phy_port;
3282 ret = qlcnic_config_port_mirroring(adapter, id,
3283 action, pci_func);
3284 if (ret)
3285 return ret;
3286 }
3287
3288 for (i = 0; i < count; i++) {
3289 pci_func = pm_cfg[i].pci_func;
3290 id = adapter->npars[pci_func].phy_port;
4e8acb01 3291 adapter->npars[pci_func].enable_pm = !!pm_cfg[i].action;
346fe763
RB
3292 adapter->npars[pci_func].dest_npar = id;
3293 }
3294 return size;
3295}
3296
3297static ssize_t
3298qlcnic_sysfs_read_pm_config(struct file *filp, struct kobject *kobj,
3299 struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
3300{
3301 struct device *dev = container_of(kobj, struct device, kobj);
3302 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3303 struct qlcnic_pm_func_cfg pm_cfg[QLCNIC_MAX_PCI_FUNC];
3304 int i;
3305
3306 if (size != sizeof(pm_cfg))
3307 return QL_STATUS_INVALID_PARAM;
3308
3309 for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
3310 if (adapter->npars[i].type != QLCNIC_TYPE_NIC)
3311 continue;
3312 pm_cfg[i].action = adapter->npars[i].enable_pm;
3313 pm_cfg[i].dest_npar = 0;
3314 pm_cfg[i].pci_func = i;
3315 }
3316 memcpy(buf, &pm_cfg, size);
3317
3318 return size;
3319}
3320
cea8975e 3321static int
346fe763 3322validate_esw_config(struct qlcnic_adapter *adapter,
4e8acb01 3323 struct qlcnic_esw_func_cfg *esw_cfg, int count)
346fe763
RB
3324{
3325 u8 pci_func;
3326 int i;
346fe763
RB
3327 for (i = 0; i < count; i++) {
3328 pci_func = esw_cfg[i].pci_func;
3329 if (pci_func >= QLCNIC_MAX_PCI_FUNC)
3330 return QL_STATUS_INVALID_PARAM;
3331
4e8acb01
RB
3332 if (adapter->op_mode == QLCNIC_MGMT_FUNC)
3333 if (adapter->npars[pci_func].type != QLCNIC_TYPE_NIC)
3334 return QL_STATUS_INVALID_PARAM;
346fe763 3335
4e8acb01
RB
3336 switch (esw_cfg[i].op_mode) {
3337 case QLCNIC_PORT_DEFAULTS:
3338 break;
3339 case QLCNIC_ADD_VLAN:
346fe763
RB
3340 if (!IS_VALID_VLAN(esw_cfg[i].vlan_id))
3341 return QL_STATUS_INVALID_PARAM;
4e8acb01
RB
3342 if (!esw_cfg[i].op_type)
3343 return QL_STATUS_INVALID_PARAM;
3344 break;
3345 case QLCNIC_DEL_VLAN:
4e8acb01
RB
3346 if (!esw_cfg[i].op_type)
3347 return QL_STATUS_INVALID_PARAM;
3348 break;
3349 default:
346fe763 3350 return QL_STATUS_INVALID_PARAM;
4e8acb01 3351 }
346fe763 3352 }
346fe763
RB
3353 return 0;
3354}
3355
3356static ssize_t
3357qlcnic_sysfs_write_esw_config(struct file *file, struct kobject *kobj,
3358 struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
3359{
3360 struct device *dev = container_of(kobj, struct device, kobj);
3361 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3362 struct qlcnic_esw_func_cfg *esw_cfg;
4e8acb01 3363 struct qlcnic_npar_info *npar;
346fe763 3364 int count, rem, i, ret;
0325d69b 3365 u8 pci_func, op_mode = 0;
346fe763
RB
3366
3367 count = size / sizeof(struct qlcnic_esw_func_cfg);
3368 rem = size % sizeof(struct qlcnic_esw_func_cfg);
3369 if (rem)
3370 return QL_STATUS_INVALID_PARAM;
3371
3372 esw_cfg = (struct qlcnic_esw_func_cfg *) buf;
3373 ret = validate_esw_config(adapter, esw_cfg, count);
3374 if (ret)
3375 return ret;
3376
3377 for (i = 0; i < count; i++) {
0325d69b
RB
3378 if (adapter->op_mode == QLCNIC_MGMT_FUNC)
3379 if (qlcnic_config_switch_port(adapter, &esw_cfg[i]))
3380 return QL_STATUS_INVALID_PARAM;
e9a47700
RB
3381
3382 if (adapter->ahw.pci_func != esw_cfg[i].pci_func)
3383 continue;
3384
3385 op_mode = esw_cfg[i].op_mode;
3386 qlcnic_get_eswitch_port_config(adapter, &esw_cfg[i]);
3387 esw_cfg[i].op_mode = op_mode;
3388 esw_cfg[i].pci_func = adapter->ahw.pci_func;
3389
3390 switch (esw_cfg[i].op_mode) {
3391 case QLCNIC_PORT_DEFAULTS:
3392 qlcnic_set_eswitch_port_features(adapter, &esw_cfg[i]);
3393 break;
8cf61f89
AKS
3394 case QLCNIC_ADD_VLAN:
3395 qlcnic_set_vlan_config(adapter, &esw_cfg[i]);
3396 break;
3397 case QLCNIC_DEL_VLAN:
3398 esw_cfg[i].vlan_id = 0;
3399 qlcnic_set_vlan_config(adapter, &esw_cfg[i]);
3400 break;
0325d69b 3401 }
346fe763
RB
3402 }
3403
0325d69b
RB
3404 if (adapter->op_mode != QLCNIC_MGMT_FUNC)
3405 goto out;
e9a47700 3406
346fe763
RB
3407 for (i = 0; i < count; i++) {
3408 pci_func = esw_cfg[i].pci_func;
4e8acb01
RB
3409 npar = &adapter->npars[pci_func];
3410 switch (esw_cfg[i].op_mode) {
3411 case QLCNIC_PORT_DEFAULTS:
3412 npar->promisc_mode = esw_cfg[i].promisc_mode;
3413 npar->mac_learning = esw_cfg[i].mac_learning;
3414 npar->offload_flags = esw_cfg[i].offload_flags;
3415 npar->mac_anti_spoof = esw_cfg[i].mac_anti_spoof;
3416 npar->discard_tagged = esw_cfg[i].discard_tagged;
3417 break;
3418 case QLCNIC_ADD_VLAN:
3419 npar->pvid = esw_cfg[i].vlan_id;
3420 break;
3421 case QLCNIC_DEL_VLAN:
3422 npar->pvid = 0;
3423 break;
3424 }
346fe763 3425 }
0325d69b 3426out:
346fe763
RB
3427 return size;
3428}
3429
3430static ssize_t
3431qlcnic_sysfs_read_esw_config(struct file *file, struct kobject *kobj,
3432 struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
3433{
3434 struct device *dev = container_of(kobj, struct device, kobj);
3435 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3436 struct qlcnic_esw_func_cfg esw_cfg[QLCNIC_MAX_PCI_FUNC];
4e8acb01 3437 u8 i;
346fe763
RB
3438
3439 if (size != sizeof(esw_cfg))
3440 return QL_STATUS_INVALID_PARAM;
3441
3442 for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
3443 if (adapter->npars[i].type != QLCNIC_TYPE_NIC)
3444 continue;
4e8acb01
RB
3445 esw_cfg[i].pci_func = i;
3446 if (qlcnic_get_eswitch_port_config(adapter, &esw_cfg[i]))
3447 return QL_STATUS_INVALID_PARAM;
346fe763
RB
3448 }
3449 memcpy(buf, &esw_cfg, size);
3450
3451 return size;
3452}
3453
cea8975e 3454static int
346fe763
RB
3455validate_npar_config(struct qlcnic_adapter *adapter,
3456 struct qlcnic_npar_func_cfg *np_cfg, int count)
3457{
3458 u8 pci_func, i;
3459
3460 for (i = 0; i < count; i++) {
3461 pci_func = np_cfg[i].pci_func;
3462 if (pci_func >= QLCNIC_MAX_PCI_FUNC)
3463 return QL_STATUS_INVALID_PARAM;
3464
3465 if (adapter->npars[pci_func].type != QLCNIC_TYPE_NIC)
3466 return QL_STATUS_INVALID_PARAM;
3467
3468 if (!IS_VALID_BW(np_cfg[i].min_bw)
3469 || !IS_VALID_BW(np_cfg[i].max_bw)
3470 || !IS_VALID_RX_QUEUES(np_cfg[i].max_rx_queues)
3471 || !IS_VALID_TX_QUEUES(np_cfg[i].max_tx_queues))
3472 return QL_STATUS_INVALID_PARAM;
3473 }
3474 return 0;
3475}
3476
3477static ssize_t
3478qlcnic_sysfs_write_npar_config(struct file *file, struct kobject *kobj,
3479 struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
3480{
3481 struct device *dev = container_of(kobj, struct device, kobj);
3482 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3483 struct qlcnic_info nic_info;
3484 struct qlcnic_npar_func_cfg *np_cfg;
3485 int i, count, rem, ret;
3486 u8 pci_func;
3487
3488 count = size / sizeof(struct qlcnic_npar_func_cfg);
3489 rem = size % sizeof(struct qlcnic_npar_func_cfg);
3490 if (rem)
3491 return QL_STATUS_INVALID_PARAM;
3492
3493 np_cfg = (struct qlcnic_npar_func_cfg *) buf;
3494 ret = validate_npar_config(adapter, np_cfg, count);
3495 if (ret)
3496 return ret;
3497
3498 for (i = 0; i < count ; i++) {
3499 pci_func = np_cfg[i].pci_func;
3500 ret = qlcnic_get_nic_info(adapter, &nic_info, pci_func);
3501 if (ret)
3502 return ret;
3503 nic_info.pci_func = pci_func;
3504 nic_info.min_tx_bw = np_cfg[i].min_bw;
3505 nic_info.max_tx_bw = np_cfg[i].max_bw;
3506 ret = qlcnic_set_nic_info(adapter, &nic_info);
3507 if (ret)
3508 return ret;
cea8975e
AC
3509 adapter->npars[i].min_bw = nic_info.min_tx_bw;
3510 adapter->npars[i].max_bw = nic_info.max_tx_bw;
346fe763
RB
3511 }
3512
3513 return size;
3514
3515}
3516static ssize_t
3517qlcnic_sysfs_read_npar_config(struct file *file, struct kobject *kobj,
3518 struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
3519{
3520 struct device *dev = container_of(kobj, struct device, kobj);
3521 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3522 struct qlcnic_info nic_info;
3523 struct qlcnic_npar_func_cfg np_cfg[QLCNIC_MAX_PCI_FUNC];
3524 int i, ret;
3525
3526 if (size != sizeof(np_cfg))
3527 return QL_STATUS_INVALID_PARAM;
3528
3529 for (i = 0; i < QLCNIC_MAX_PCI_FUNC ; i++) {
3530 if (adapter->npars[i].type != QLCNIC_TYPE_NIC)
3531 continue;
3532 ret = qlcnic_get_nic_info(adapter, &nic_info, i);
3533 if (ret)
3534 return ret;
3535
3536 np_cfg[i].pci_func = i;
3537 np_cfg[i].op_mode = nic_info.op_mode;
3538 np_cfg[i].port_num = nic_info.phys_port;
3539 np_cfg[i].fw_capab = nic_info.capabilities;
3540 np_cfg[i].min_bw = nic_info.min_tx_bw ;
3541 np_cfg[i].max_bw = nic_info.max_tx_bw;
3542 np_cfg[i].max_tx_queues = nic_info.max_tx_ques;
3543 np_cfg[i].max_rx_queues = nic_info.max_rx_ques;
3544 }
3545 memcpy(buf, &np_cfg, size);
3546 return size;
3547}
3548
b6021212
AKS
3549static ssize_t
3550qlcnic_sysfs_get_port_stats(struct file *file, struct kobject *kobj,
3551 struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
3552{
3553 struct device *dev = container_of(kobj, struct device, kobj);
3554 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3555 struct qlcnic_esw_statistics port_stats;
3556 int ret;
3557
3558 if (size != sizeof(struct qlcnic_esw_statistics))
3559 return QL_STATUS_INVALID_PARAM;
3560
3561 if (offset >= QLCNIC_MAX_PCI_FUNC)
3562 return QL_STATUS_INVALID_PARAM;
3563
3564 memset(&port_stats, 0, size);
3565 ret = qlcnic_get_port_stats(adapter, offset, QLCNIC_QUERY_RX_COUNTER,
3566 &port_stats.rx);
3567 if (ret)
3568 return ret;
3569
3570 ret = qlcnic_get_port_stats(adapter, offset, QLCNIC_QUERY_TX_COUNTER,
3571 &port_stats.tx);
3572 if (ret)
3573 return ret;
3574
3575 memcpy(buf, &port_stats, size);
3576 return size;
3577}
3578
3579static ssize_t
3580qlcnic_sysfs_get_esw_stats(struct file *file, struct kobject *kobj,
3581 struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
3582{
3583 struct device *dev = container_of(kobj, struct device, kobj);
3584 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3585 struct qlcnic_esw_statistics esw_stats;
3586 int ret;
3587
3588 if (size != sizeof(struct qlcnic_esw_statistics))
3589 return QL_STATUS_INVALID_PARAM;
3590
3591 if (offset >= QLCNIC_NIU_MAX_XG_PORTS)
3592 return QL_STATUS_INVALID_PARAM;
3593
3594 memset(&esw_stats, 0, size);
3595 ret = qlcnic_get_eswitch_stats(adapter, offset, QLCNIC_QUERY_RX_COUNTER,
3596 &esw_stats.rx);
3597 if (ret)
3598 return ret;
3599
3600 ret = qlcnic_get_eswitch_stats(adapter, offset, QLCNIC_QUERY_TX_COUNTER,
3601 &esw_stats.tx);
3602 if (ret)
3603 return ret;
3604
3605 memcpy(buf, &esw_stats, size);
3606 return size;
3607}
3608
3609static ssize_t
3610qlcnic_sysfs_clear_esw_stats(struct file *file, struct kobject *kobj,
3611 struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
3612{
3613 struct device *dev = container_of(kobj, struct device, kobj);
3614 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3615 int ret;
3616
3617 if (offset >= QLCNIC_NIU_MAX_XG_PORTS)
3618 return QL_STATUS_INVALID_PARAM;
3619
3620 ret = qlcnic_clear_esw_stats(adapter, QLCNIC_STATS_ESWITCH, offset,
3621 QLCNIC_QUERY_RX_COUNTER);
3622 if (ret)
3623 return ret;
3624
3625 ret = qlcnic_clear_esw_stats(adapter, QLCNIC_STATS_ESWITCH, offset,
3626 QLCNIC_QUERY_TX_COUNTER);
3627 if (ret)
3628 return ret;
3629
3630 return size;
3631}
3632
3633static ssize_t
3634qlcnic_sysfs_clear_port_stats(struct file *file, struct kobject *kobj,
3635 struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
3636{
3637
3638 struct device *dev = container_of(kobj, struct device, kobj);
3639 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3640 int ret;
3641
3642 if (offset >= QLCNIC_MAX_PCI_FUNC)
3643 return QL_STATUS_INVALID_PARAM;
3644
3645 ret = qlcnic_clear_esw_stats(adapter, QLCNIC_STATS_PORT, offset,
3646 QLCNIC_QUERY_RX_COUNTER);
3647 if (ret)
3648 return ret;
3649
3650 ret = qlcnic_clear_esw_stats(adapter, QLCNIC_STATS_PORT, offset,
3651 QLCNIC_QUERY_TX_COUNTER);
3652 if (ret)
3653 return ret;
3654
3655 return size;
3656}
3657
346fe763
RB
3658static ssize_t
3659qlcnic_sysfs_read_pci_config(struct file *file, struct kobject *kobj,
3660 struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
3661{
3662 struct device *dev = container_of(kobj, struct device, kobj);
3663 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3664 struct qlcnic_pci_func_cfg pci_cfg[QLCNIC_MAX_PCI_FUNC];
e88db3bd 3665 struct qlcnic_pci_info *pci_info;
346fe763
RB
3666 int i, ret;
3667
3668 if (size != sizeof(pci_cfg))
3669 return QL_STATUS_INVALID_PARAM;
3670
e88db3bd
DC
3671 pci_info = kcalloc(QLCNIC_MAX_PCI_FUNC, sizeof(*pci_info), GFP_KERNEL);
3672 if (!pci_info)
3673 return -ENOMEM;
3674
346fe763 3675 ret = qlcnic_get_pci_info(adapter, pci_info);
e88db3bd
DC
3676 if (ret) {
3677 kfree(pci_info);
346fe763 3678 return ret;
e88db3bd 3679 }
346fe763
RB
3680
3681 for (i = 0; i < QLCNIC_MAX_PCI_FUNC ; i++) {
3682 pci_cfg[i].pci_func = pci_info[i].id;
3683 pci_cfg[i].func_type = pci_info[i].type;
3684 pci_cfg[i].port_num = pci_info[i].default_port;
3685 pci_cfg[i].min_bw = pci_info[i].tx_min_bw;
3686 pci_cfg[i].max_bw = pci_info[i].tx_max_bw;
3687 memcpy(&pci_cfg[i].def_mac_addr, &pci_info[i].mac, ETH_ALEN);
3688 }
3689 memcpy(buf, &pci_cfg, size);
e88db3bd 3690 kfree(pci_info);
346fe763 3691 return size;
346fe763
RB
3692}
3693static struct bin_attribute bin_attr_npar_config = {
3694 .attr = {.name = "npar_config", .mode = (S_IRUGO | S_IWUSR)},
3695 .size = 0,
3696 .read = qlcnic_sysfs_read_npar_config,
3697 .write = qlcnic_sysfs_write_npar_config,
3698};
3699
3700static struct bin_attribute bin_attr_pci_config = {
3701 .attr = {.name = "pci_config", .mode = (S_IRUGO | S_IWUSR)},
3702 .size = 0,
3703 .read = qlcnic_sysfs_read_pci_config,
3704 .write = NULL,
3705};
3706
b6021212
AKS
3707static struct bin_attribute bin_attr_port_stats = {
3708 .attr = {.name = "port_stats", .mode = (S_IRUGO | S_IWUSR)},
3709 .size = 0,
3710 .read = qlcnic_sysfs_get_port_stats,
3711 .write = qlcnic_sysfs_clear_port_stats,
3712};
3713
3714static struct bin_attribute bin_attr_esw_stats = {
3715 .attr = {.name = "esw_stats", .mode = (S_IRUGO | S_IWUSR)},
3716 .size = 0,
3717 .read = qlcnic_sysfs_get_esw_stats,
3718 .write = qlcnic_sysfs_clear_esw_stats,
3719};
3720
346fe763
RB
3721static struct bin_attribute bin_attr_esw_config = {
3722 .attr = {.name = "esw_config", .mode = (S_IRUGO | S_IWUSR)},
3723 .size = 0,
3724 .read = qlcnic_sysfs_read_esw_config,
3725 .write = qlcnic_sysfs_write_esw_config,
3726};
3727
3728static struct bin_attribute bin_attr_pm_config = {
3729 .attr = {.name = "pm_config", .mode = (S_IRUGO | S_IWUSR)},
3730 .size = 0,
3731 .read = qlcnic_sysfs_read_pm_config,
3732 .write = qlcnic_sysfs_write_pm_config,
3733};
3734
af19b491
AKS
3735static void
3736qlcnic_create_sysfs_entries(struct qlcnic_adapter *adapter)
3737{
3738 struct device *dev = &adapter->pdev->dev;
3739
3740 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_BDG)
3741 if (device_create_file(dev, &dev_attr_bridged_mode))
3742 dev_warn(dev,
3743 "failed to create bridged_mode sysfs entry\n");
3744}
3745
3746static void
3747qlcnic_remove_sysfs_entries(struct qlcnic_adapter *adapter)
3748{
3749 struct device *dev = &adapter->pdev->dev;
3750
3751 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_BDG)
3752 device_remove_file(dev, &dev_attr_bridged_mode);
3753}
3754
3755static void
3756qlcnic_create_diag_entries(struct qlcnic_adapter *adapter)
3757{
3758 struct device *dev = &adapter->pdev->dev;
3759
b6021212
AKS
3760 if (device_create_bin_file(dev, &bin_attr_port_stats))
3761 dev_info(dev, "failed to create port stats sysfs entry");
3762
132ff00a
AC
3763 if (adapter->op_mode == QLCNIC_NON_PRIV_FUNC)
3764 return;
af19b491
AKS
3765 if (device_create_file(dev, &dev_attr_diag_mode))
3766 dev_info(dev, "failed to create diag_mode sysfs entry\n");
3767 if (device_create_bin_file(dev, &bin_attr_crb))
3768 dev_info(dev, "failed to create crb sysfs entry\n");
3769 if (device_create_bin_file(dev, &bin_attr_mem))
3770 dev_info(dev, "failed to create mem sysfs entry\n");
4e8acb01
RB
3771 if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED))
3772 return;
3773 if (device_create_bin_file(dev, &bin_attr_esw_config))
3774 dev_info(dev, "failed to create esw config sysfs entry");
3775 if (adapter->op_mode != QLCNIC_MGMT_FUNC)
346fe763
RB
3776 return;
3777 if (device_create_bin_file(dev, &bin_attr_pci_config))
3778 dev_info(dev, "failed to create pci config sysfs entry");
3779 if (device_create_bin_file(dev, &bin_attr_npar_config))
3780 dev_info(dev, "failed to create npar config sysfs entry");
346fe763
RB
3781 if (device_create_bin_file(dev, &bin_attr_pm_config))
3782 dev_info(dev, "failed to create pm config sysfs entry");
b6021212
AKS
3783 if (device_create_bin_file(dev, &bin_attr_esw_stats))
3784 dev_info(dev, "failed to create eswitch stats sysfs entry");
af19b491
AKS
3785}
3786
af19b491
AKS
3787static void
3788qlcnic_remove_diag_entries(struct qlcnic_adapter *adapter)
3789{
3790 struct device *dev = &adapter->pdev->dev;
3791
b6021212
AKS
3792 device_remove_bin_file(dev, &bin_attr_port_stats);
3793
132ff00a
AC
3794 if (adapter->op_mode == QLCNIC_NON_PRIV_FUNC)
3795 return;
af19b491
AKS
3796 device_remove_file(dev, &dev_attr_diag_mode);
3797 device_remove_bin_file(dev, &bin_attr_crb);
3798 device_remove_bin_file(dev, &bin_attr_mem);
4e8acb01
RB
3799 if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED))
3800 return;
3801 device_remove_bin_file(dev, &bin_attr_esw_config);
3802 if (adapter->op_mode != QLCNIC_MGMT_FUNC)
346fe763
RB
3803 return;
3804 device_remove_bin_file(dev, &bin_attr_pci_config);
3805 device_remove_bin_file(dev, &bin_attr_npar_config);
346fe763 3806 device_remove_bin_file(dev, &bin_attr_pm_config);
b6021212 3807 device_remove_bin_file(dev, &bin_attr_esw_stats);
af19b491
AKS
3808}
3809
3810#ifdef CONFIG_INET
3811
3812#define is_qlcnic_netdev(dev) (dev->netdev_ops == &qlcnic_netdev_ops)
3813
af19b491
AKS
3814static void
3815qlcnic_config_indev_addr(struct net_device *dev, unsigned long event)
3816{
3817 struct in_device *indev;
3818 struct qlcnic_adapter *adapter = netdev_priv(dev);
3819
af19b491
AKS
3820 indev = in_dev_get(dev);
3821 if (!indev)
3822 return;
3823
3824 for_ifa(indev) {
3825 switch (event) {
3826 case NETDEV_UP:
3827 qlcnic_config_ipaddr(adapter,
3828 ifa->ifa_address, QLCNIC_IP_UP);
3829 break;
3830 case NETDEV_DOWN:
3831 qlcnic_config_ipaddr(adapter,
3832 ifa->ifa_address, QLCNIC_IP_DOWN);
3833 break;
3834 default:
3835 break;
3836 }
3837 } endfor_ifa(indev);
3838
3839 in_dev_put(indev);
af19b491
AKS
3840}
3841
3842static int qlcnic_netdev_event(struct notifier_block *this,
3843 unsigned long event, void *ptr)
3844{
3845 struct qlcnic_adapter *adapter;
3846 struct net_device *dev = (struct net_device *)ptr;
3847
3848recheck:
3849 if (dev == NULL)
3850 goto done;
3851
3852 if (dev->priv_flags & IFF_802_1Q_VLAN) {
3853 dev = vlan_dev_real_dev(dev);
3854 goto recheck;
3855 }
3856
3857 if (!is_qlcnic_netdev(dev))
3858 goto done;
3859
3860 adapter = netdev_priv(dev);
3861
3862 if (!adapter)
3863 goto done;
3864
8a15ad1f 3865 if (!test_bit(__QLCNIC_DEV_UP, &adapter->state))
af19b491
AKS
3866 goto done;
3867
3868 qlcnic_config_indev_addr(dev, event);
3869done:
3870 return NOTIFY_DONE;
3871}
3872
3873static int
3874qlcnic_inetaddr_event(struct notifier_block *this,
3875 unsigned long event, void *ptr)
3876{
3877 struct qlcnic_adapter *adapter;
3878 struct net_device *dev;
3879
3880 struct in_ifaddr *ifa = (struct in_ifaddr *)ptr;
3881
3882 dev = ifa->ifa_dev ? ifa->ifa_dev->dev : NULL;
3883
3884recheck:
3885 if (dev == NULL || !netif_running(dev))
3886 goto done;
3887
3888 if (dev->priv_flags & IFF_802_1Q_VLAN) {
3889 dev = vlan_dev_real_dev(dev);
3890 goto recheck;
3891 }
3892
3893 if (!is_qlcnic_netdev(dev))
3894 goto done;
3895
3896 adapter = netdev_priv(dev);
3897
251a84c9 3898 if (!adapter)
af19b491
AKS
3899 goto done;
3900
8a15ad1f 3901 if (!test_bit(__QLCNIC_DEV_UP, &adapter->state))
af19b491
AKS
3902 goto done;
3903
3904 switch (event) {
3905 case NETDEV_UP:
3906 qlcnic_config_ipaddr(adapter, ifa->ifa_address, QLCNIC_IP_UP);
3907 break;
3908 case NETDEV_DOWN:
3909 qlcnic_config_ipaddr(adapter, ifa->ifa_address, QLCNIC_IP_DOWN);
3910 break;
3911 default:
3912 break;
3913 }
3914
3915done:
3916 return NOTIFY_DONE;
3917}
3918
3919static struct notifier_block qlcnic_netdev_cb = {
3920 .notifier_call = qlcnic_netdev_event,
3921};
3922
3923static struct notifier_block qlcnic_inetaddr_cb = {
3924 .notifier_call = qlcnic_inetaddr_event,
3925};
3926#else
3927static void
3928qlcnic_config_indev_addr(struct net_device *dev, unsigned long event)
3929{ }
3930#endif
451724c8
SC
3931static struct pci_error_handlers qlcnic_err_handler = {
3932 .error_detected = qlcnic_io_error_detected,
3933 .slot_reset = qlcnic_io_slot_reset,
3934 .resume = qlcnic_io_resume,
3935};
af19b491
AKS
3936
3937static struct pci_driver qlcnic_driver = {
3938 .name = qlcnic_driver_name,
3939 .id_table = qlcnic_pci_tbl,
3940 .probe = qlcnic_probe,
3941 .remove = __devexit_p(qlcnic_remove),
3942#ifdef CONFIG_PM
3943 .suspend = qlcnic_suspend,
3944 .resume = qlcnic_resume,
3945#endif
451724c8
SC
3946 .shutdown = qlcnic_shutdown,
3947 .err_handler = &qlcnic_err_handler
3948
af19b491
AKS
3949};
3950
3951static int __init qlcnic_init_module(void)
3952{
0cf3a14c 3953 int ret;
af19b491
AKS
3954
3955 printk(KERN_INFO "%s\n", qlcnic_driver_string);
3956
3957#ifdef CONFIG_INET
3958 register_netdevice_notifier(&qlcnic_netdev_cb);
3959 register_inetaddr_notifier(&qlcnic_inetaddr_cb);
3960#endif
3961
0cf3a14c
AKS
3962 ret = pci_register_driver(&qlcnic_driver);
3963 if (ret) {
3964#ifdef CONFIG_INET
3965 unregister_inetaddr_notifier(&qlcnic_inetaddr_cb);
3966 unregister_netdevice_notifier(&qlcnic_netdev_cb);
3967#endif
3968 }
af19b491 3969
0cf3a14c 3970 return ret;
af19b491
AKS
3971}
3972
3973module_init(qlcnic_init_module);
3974
3975static void __exit qlcnic_exit_module(void)
3976{
3977
3978 pci_unregister_driver(&qlcnic_driver);
3979
3980#ifdef CONFIG_INET
3981 unregister_inetaddr_notifier(&qlcnic_inetaddr_cb);
3982 unregister_netdevice_notifier(&qlcnic_netdev_cb);
3983#endif
3984}
3985
3986module_exit(qlcnic_exit_module);