qlcnic: fix mac override capability
[linux-2.6-block.git] / drivers / net / qlcnic / qlcnic_main.c
CommitLineData
af19b491
AKS
1/*
2 * Copyright (C) 2009 - QLogic Corporation.
3 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version 2
8 * of the License, or (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place - Suite 330, Boston,
18 * MA 02111-1307, USA.
19 *
20 * The full GNU General Public License is included in this distribution
21 * in the file called "COPYING".
22 *
23 */
24
5a0e3ad6 25#include <linux/slab.h>
af19b491
AKS
26#include <linux/vmalloc.h>
27#include <linux/interrupt.h>
28
29#include "qlcnic.h"
30
31#include <linux/dma-mapping.h>
32#include <linux/if_vlan.h>
33#include <net/ip.h>
34#include <linux/ipv6.h>
35#include <linux/inetdevice.h>
36#include <linux/sysfs.h>
451724c8 37#include <linux/aer.h>
af19b491 38
7f9a0c34 39MODULE_DESCRIPTION("QLogic 1/10 GbE Converged/Intelligent Ethernet Driver");
af19b491
AKS
40MODULE_LICENSE("GPL");
41MODULE_VERSION(QLCNIC_LINUX_VERSIONID);
42MODULE_FIRMWARE(QLCNIC_UNIFIED_ROMIMAGE_NAME);
43
44char qlcnic_driver_name[] = "qlcnic";
7f9a0c34
SV
45static const char qlcnic_driver_string[] = "QLogic 1/10 GbE "
46 "Converged/Intelligent Ethernet Driver v" QLCNIC_LINUX_VERSIONID;
af19b491
AKS
47
48static int port_mode = QLCNIC_PORT_MODE_AUTO_NEG;
49
50/* Default to restricted 1G auto-neg mode */
51static int wol_port_mode = 5;
52
53static int use_msi = 1;
54module_param(use_msi, int, 0644);
55MODULE_PARM_DESC(use_msi, "MSI interrupt (0=disabled, 1=enabled");
56
57static int use_msi_x = 1;
58module_param(use_msi_x, int, 0644);
59MODULE_PARM_DESC(use_msi_x, "MSI-X interrupt (0=disabled, 1=enabled");
60
61static int auto_fw_reset = AUTO_FW_RESET_ENABLED;
62module_param(auto_fw_reset, int, 0644);
63MODULE_PARM_DESC(auto_fw_reset, "Auto firmware reset (0=disabled, 1=enabled");
64
4d5bdb38
AKS
65static int load_fw_file;
66module_param(load_fw_file, int, 0644);
67MODULE_PARM_DESC(load_fw_file, "Load firmware from (0=flash, 1=file");
68
2e9d722d
AC
69static int qlcnic_config_npars;
70module_param(qlcnic_config_npars, int, 0644);
71MODULE_PARM_DESC(qlcnic_config_npars, "Configure NPARs (0=disabled, 1=enabled");
72
af19b491
AKS
73static int __devinit qlcnic_probe(struct pci_dev *pdev,
74 const struct pci_device_id *ent);
75static void __devexit qlcnic_remove(struct pci_dev *pdev);
76static int qlcnic_open(struct net_device *netdev);
77static int qlcnic_close(struct net_device *netdev);
af19b491 78static void qlcnic_tx_timeout(struct net_device *netdev);
af19b491
AKS
79static void qlcnic_attach_work(struct work_struct *work);
80static void qlcnic_fwinit_work(struct work_struct *work);
81static void qlcnic_fw_poll_work(struct work_struct *work);
82static void qlcnic_schedule_work(struct qlcnic_adapter *adapter,
83 work_func_t func, int delay);
84static void qlcnic_cancel_fw_work(struct qlcnic_adapter *adapter);
85static int qlcnic_poll(struct napi_struct *napi, int budget);
8f891387 86static int qlcnic_rx_poll(struct napi_struct *napi, int budget);
af19b491
AKS
87#ifdef CONFIG_NET_POLL_CONTROLLER
88static void qlcnic_poll_controller(struct net_device *netdev);
89#endif
90
91static void qlcnic_create_sysfs_entries(struct qlcnic_adapter *adapter);
92static void qlcnic_remove_sysfs_entries(struct qlcnic_adapter *adapter);
93static void qlcnic_create_diag_entries(struct qlcnic_adapter *adapter);
94static void qlcnic_remove_diag_entries(struct qlcnic_adapter *adapter);
95
6df900e9 96static void qlcnic_idc_debug_info(struct qlcnic_adapter *adapter, u8 encoding);
21854f02 97static void qlcnic_clr_all_drv_state(struct qlcnic_adapter *adapter, u8);
af19b491
AKS
98static int qlcnic_can_start_firmware(struct qlcnic_adapter *adapter);
99
7eb9855d 100static irqreturn_t qlcnic_tmp_intr(int irq, void *data);
af19b491
AKS
101static irqreturn_t qlcnic_intr(int irq, void *data);
102static irqreturn_t qlcnic_msi_intr(int irq, void *data);
103static irqreturn_t qlcnic_msix_intr(int irq, void *data);
104
105static struct net_device_stats *qlcnic_get_stats(struct net_device *netdev);
106static void qlcnic_config_indev_addr(struct net_device *dev, unsigned long);
9f26f547
AC
107static int qlcnic_start_firmware(struct qlcnic_adapter *);
108
109static void qlcnic_dev_set_npar_ready(struct qlcnic_adapter *);
9f26f547
AC
110static int qlcnicvf_config_led(struct qlcnic_adapter *, u32, u32);
111static int qlcnicvf_config_bridged_mode(struct qlcnic_adapter *, u32);
112static int qlcnicvf_start_firmware(struct qlcnic_adapter *);
0325d69b
RB
113static void qlcnic_set_netdev_features(struct qlcnic_adapter *,
114 struct qlcnic_esw_func_cfg *);
af19b491
AKS
115/* PCI Device ID Table */
116#define ENTRY(device) \
117 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, (device)), \
118 .class = PCI_CLASS_NETWORK_ETHERNET << 8, .class_mask = ~0}
119
120#define PCI_DEVICE_ID_QLOGIC_QLE824X 0x8020
121
6a902881 122static DEFINE_PCI_DEVICE_TABLE(qlcnic_pci_tbl) = {
af19b491
AKS
123 ENTRY(PCI_DEVICE_ID_QLOGIC_QLE824X),
124 {0,}
125};
126
127MODULE_DEVICE_TABLE(pci, qlcnic_pci_tbl);
128
129
130void
131qlcnic_update_cmd_producer(struct qlcnic_adapter *adapter,
132 struct qlcnic_host_tx_ring *tx_ring)
133{
134 writel(tx_ring->producer, tx_ring->crb_cmd_producer);
af19b491
AKS
135}
136
137static const u32 msi_tgt_status[8] = {
138 ISR_INT_TARGET_STATUS, ISR_INT_TARGET_STATUS_F1,
139 ISR_INT_TARGET_STATUS_F2, ISR_INT_TARGET_STATUS_F3,
140 ISR_INT_TARGET_STATUS_F4, ISR_INT_TARGET_STATUS_F5,
141 ISR_INT_TARGET_STATUS_F6, ISR_INT_TARGET_STATUS_F7
142};
143
144static const
145struct qlcnic_legacy_intr_set legacy_intr[] = QLCNIC_LEGACY_INTR_CONFIG;
146
147static inline void qlcnic_disable_int(struct qlcnic_host_sds_ring *sds_ring)
148{
149 writel(0, sds_ring->crb_intr_mask);
150}
151
152static inline void qlcnic_enable_int(struct qlcnic_host_sds_ring *sds_ring)
153{
154 struct qlcnic_adapter *adapter = sds_ring->adapter;
155
156 writel(0x1, sds_ring->crb_intr_mask);
157
158 if (!QLCNIC_IS_MSI_FAMILY(adapter))
159 writel(0xfbff, adapter->tgt_mask_reg);
160}
161
162static int
163qlcnic_alloc_sds_rings(struct qlcnic_recv_context *recv_ctx, int count)
164{
165 int size = sizeof(struct qlcnic_host_sds_ring) * count;
166
167 recv_ctx->sds_rings = kzalloc(size, GFP_KERNEL);
168
169 return (recv_ctx->sds_rings == NULL);
170}
171
172static void
173qlcnic_free_sds_rings(struct qlcnic_recv_context *recv_ctx)
174{
175 if (recv_ctx->sds_rings != NULL)
176 kfree(recv_ctx->sds_rings);
177
178 recv_ctx->sds_rings = NULL;
179}
180
181static int
182qlcnic_napi_add(struct qlcnic_adapter *adapter, struct net_device *netdev)
183{
184 int ring;
185 struct qlcnic_host_sds_ring *sds_ring;
186 struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
187
188 if (qlcnic_alloc_sds_rings(recv_ctx, adapter->max_sds_rings))
189 return -ENOMEM;
190
191 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
192 sds_ring = &recv_ctx->sds_rings[ring];
8f891387 193
194 if (ring == adapter->max_sds_rings - 1)
195 netif_napi_add(netdev, &sds_ring->napi, qlcnic_poll,
196 QLCNIC_NETDEV_WEIGHT/adapter->max_sds_rings);
197 else
198 netif_napi_add(netdev, &sds_ring->napi,
199 qlcnic_rx_poll, QLCNIC_NETDEV_WEIGHT*2);
af19b491
AKS
200 }
201
202 return 0;
203}
204
205static void
206qlcnic_napi_del(struct qlcnic_adapter *adapter)
207{
208 int ring;
209 struct qlcnic_host_sds_ring *sds_ring;
210 struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
211
212 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
213 sds_ring = &recv_ctx->sds_rings[ring];
214 netif_napi_del(&sds_ring->napi);
215 }
216
217 qlcnic_free_sds_rings(&adapter->recv_ctx);
218}
219
220static void
221qlcnic_napi_enable(struct qlcnic_adapter *adapter)
222{
223 int ring;
224 struct qlcnic_host_sds_ring *sds_ring;
225 struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
226
780ab790
AKS
227 if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
228 return;
229
af19b491
AKS
230 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
231 sds_ring = &recv_ctx->sds_rings[ring];
232 napi_enable(&sds_ring->napi);
233 qlcnic_enable_int(sds_ring);
234 }
235}
236
237static void
238qlcnic_napi_disable(struct qlcnic_adapter *adapter)
239{
240 int ring;
241 struct qlcnic_host_sds_ring *sds_ring;
242 struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
243
780ab790
AKS
244 if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
245 return;
246
af19b491
AKS
247 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
248 sds_ring = &recv_ctx->sds_rings[ring];
249 qlcnic_disable_int(sds_ring);
250 napi_synchronize(&sds_ring->napi);
251 napi_disable(&sds_ring->napi);
252 }
253}
254
255static void qlcnic_clear_stats(struct qlcnic_adapter *adapter)
256{
257 memset(&adapter->stats, 0, sizeof(adapter->stats));
af19b491
AKS
258}
259
af19b491
AKS
260static void qlcnic_set_port_mode(struct qlcnic_adapter *adapter)
261{
262 u32 val, data;
263
264 val = adapter->ahw.board_type;
265 if ((val == QLCNIC_BRDTYPE_P3_HMEZ) ||
266 (val == QLCNIC_BRDTYPE_P3_XG_LOM)) {
267 if (port_mode == QLCNIC_PORT_MODE_802_3_AP) {
268 data = QLCNIC_PORT_MODE_802_3_AP;
269 QLCWR32(adapter, QLCNIC_PORT_MODE_ADDR, data);
270 } else if (port_mode == QLCNIC_PORT_MODE_XG) {
271 data = QLCNIC_PORT_MODE_XG;
272 QLCWR32(adapter, QLCNIC_PORT_MODE_ADDR, data);
273 } else if (port_mode == QLCNIC_PORT_MODE_AUTO_NEG_1G) {
274 data = QLCNIC_PORT_MODE_AUTO_NEG_1G;
275 QLCWR32(adapter, QLCNIC_PORT_MODE_ADDR, data);
276 } else if (port_mode == QLCNIC_PORT_MODE_AUTO_NEG_XG) {
277 data = QLCNIC_PORT_MODE_AUTO_NEG_XG;
278 QLCWR32(adapter, QLCNIC_PORT_MODE_ADDR, data);
279 } else {
280 data = QLCNIC_PORT_MODE_AUTO_NEG;
281 QLCWR32(adapter, QLCNIC_PORT_MODE_ADDR, data);
282 }
283
284 if ((wol_port_mode != QLCNIC_PORT_MODE_802_3_AP) &&
285 (wol_port_mode != QLCNIC_PORT_MODE_XG) &&
286 (wol_port_mode != QLCNIC_PORT_MODE_AUTO_NEG_1G) &&
287 (wol_port_mode != QLCNIC_PORT_MODE_AUTO_NEG_XG)) {
288 wol_port_mode = QLCNIC_PORT_MODE_AUTO_NEG;
289 }
290 QLCWR32(adapter, QLCNIC_WOL_PORT_MODE, wol_port_mode);
291 }
292}
293
294static void qlcnic_set_msix_bit(struct pci_dev *pdev, int enable)
295{
296 u32 control;
297 int pos;
298
299 pos = pci_find_capability(pdev, PCI_CAP_ID_MSIX);
300 if (pos) {
301 pci_read_config_dword(pdev, pos, &control);
302 if (enable)
303 control |= PCI_MSIX_FLAGS_ENABLE;
304 else
305 control = 0;
306 pci_write_config_dword(pdev, pos, control);
307 }
308}
309
310static void qlcnic_init_msix_entries(struct qlcnic_adapter *adapter, int count)
311{
312 int i;
313
314 for (i = 0; i < count; i++)
315 adapter->msix_entries[i].entry = i;
316}
317
318static int
319qlcnic_read_mac_addr(struct qlcnic_adapter *adapter)
320{
2e9d722d 321 u8 mac_addr[ETH_ALEN];
af19b491
AKS
322 struct net_device *netdev = adapter->netdev;
323 struct pci_dev *pdev = adapter->pdev;
324
da48e6c3 325 if (qlcnic_get_mac_address(adapter, mac_addr) != 0)
af19b491
AKS
326 return -EIO;
327
2e9d722d 328 memcpy(netdev->dev_addr, mac_addr, ETH_ALEN);
af19b491
AKS
329 memcpy(netdev->perm_addr, netdev->dev_addr, netdev->addr_len);
330 memcpy(adapter->mac_addr, netdev->dev_addr, netdev->addr_len);
331
332 /* set station address */
333
334 if (!is_valid_ether_addr(netdev->perm_addr))
335 dev_warn(&pdev->dev, "Bad MAC address %pM.\n",
336 netdev->dev_addr);
337
338 return 0;
339}
340
341static int qlcnic_set_mac(struct net_device *netdev, void *p)
342{
343 struct qlcnic_adapter *adapter = netdev_priv(netdev);
344 struct sockaddr *addr = p;
345
7373373d
RB
346 if ((adapter->flags & QLCNIC_MAC_OVERRIDE_DISABLED))
347 return -EOPNOTSUPP;
348
af19b491
AKS
349 if (!is_valid_ether_addr(addr->sa_data))
350 return -EINVAL;
351
8a15ad1f 352 if (test_bit(__QLCNIC_DEV_UP, &adapter->state)) {
af19b491
AKS
353 netif_device_detach(netdev);
354 qlcnic_napi_disable(adapter);
355 }
356
357 memcpy(adapter->mac_addr, addr->sa_data, netdev->addr_len);
358 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
359 qlcnic_set_multi(adapter->netdev);
360
8a15ad1f 361 if (test_bit(__QLCNIC_DEV_UP, &adapter->state)) {
af19b491
AKS
362 netif_device_attach(netdev);
363 qlcnic_napi_enable(adapter);
364 }
365 return 0;
366}
367
368static const struct net_device_ops qlcnic_netdev_ops = {
369 .ndo_open = qlcnic_open,
370 .ndo_stop = qlcnic_close,
371 .ndo_start_xmit = qlcnic_xmit_frame,
372 .ndo_get_stats = qlcnic_get_stats,
373 .ndo_validate_addr = eth_validate_addr,
374 .ndo_set_multicast_list = qlcnic_set_multi,
375 .ndo_set_mac_address = qlcnic_set_mac,
376 .ndo_change_mtu = qlcnic_change_mtu,
377 .ndo_tx_timeout = qlcnic_tx_timeout,
378#ifdef CONFIG_NET_POLL_CONTROLLER
379 .ndo_poll_controller = qlcnic_poll_controller,
380#endif
381};
382
2e9d722d 383static struct qlcnic_nic_template qlcnic_ops = {
2e9d722d
AC
384 .config_bridged_mode = qlcnic_config_bridged_mode,
385 .config_led = qlcnic_config_led,
9f26f547
AC
386 .start_firmware = qlcnic_start_firmware
387};
388
389static struct qlcnic_nic_template qlcnic_vf_ops = {
9f26f547
AC
390 .config_bridged_mode = qlcnicvf_config_bridged_mode,
391 .config_led = qlcnicvf_config_led,
9f26f547 392 .start_firmware = qlcnicvf_start_firmware
2e9d722d
AC
393};
394
af19b491
AKS
395static void
396qlcnic_setup_intr(struct qlcnic_adapter *adapter)
397{
398 const struct qlcnic_legacy_intr_set *legacy_intrp;
399 struct pci_dev *pdev = adapter->pdev;
400 int err, num_msix;
401
402 if (adapter->rss_supported) {
403 num_msix = (num_online_cpus() >= MSIX_ENTRIES_PER_ADAPTER) ?
404 MSIX_ENTRIES_PER_ADAPTER : 2;
405 } else
406 num_msix = 1;
407
408 adapter->max_sds_rings = 1;
409
410 adapter->flags &= ~(QLCNIC_MSI_ENABLED | QLCNIC_MSIX_ENABLED);
411
412 legacy_intrp = &legacy_intr[adapter->ahw.pci_func];
413
414 adapter->int_vec_bit = legacy_intrp->int_vec_bit;
415 adapter->tgt_status_reg = qlcnic_get_ioaddr(adapter,
416 legacy_intrp->tgt_status_reg);
417 adapter->tgt_mask_reg = qlcnic_get_ioaddr(adapter,
418 legacy_intrp->tgt_mask_reg);
419 adapter->isr_int_vec = qlcnic_get_ioaddr(adapter, ISR_INT_VECTOR);
420
421 adapter->crb_int_state_reg = qlcnic_get_ioaddr(adapter,
422 ISR_INT_STATE_REG);
423
424 qlcnic_set_msix_bit(pdev, 0);
425
426 if (adapter->msix_supported) {
427
428 qlcnic_init_msix_entries(adapter, num_msix);
429 err = pci_enable_msix(pdev, adapter->msix_entries, num_msix);
430 if (err == 0) {
431 adapter->flags |= QLCNIC_MSIX_ENABLED;
432 qlcnic_set_msix_bit(pdev, 1);
433
434 if (adapter->rss_supported)
435 adapter->max_sds_rings = num_msix;
436
437 dev_info(&pdev->dev, "using msi-x interrupts\n");
438 return;
439 }
440
441 if (err > 0)
442 pci_disable_msix(pdev);
443
444 /* fall through for msi */
445 }
446
447 if (use_msi && !pci_enable_msi(pdev)) {
448 adapter->flags |= QLCNIC_MSI_ENABLED;
449 adapter->tgt_status_reg = qlcnic_get_ioaddr(adapter,
450 msi_tgt_status[adapter->ahw.pci_func]);
451 dev_info(&pdev->dev, "using msi interrupts\n");
452 adapter->msix_entries[0].vector = pdev->irq;
453 return;
454 }
455
456 dev_info(&pdev->dev, "using legacy interrupts\n");
457 adapter->msix_entries[0].vector = pdev->irq;
458}
459
460static void
461qlcnic_teardown_intr(struct qlcnic_adapter *adapter)
462{
463 if (adapter->flags & QLCNIC_MSIX_ENABLED)
464 pci_disable_msix(adapter->pdev);
465 if (adapter->flags & QLCNIC_MSI_ENABLED)
466 pci_disable_msi(adapter->pdev);
467}
468
469static void
470qlcnic_cleanup_pci_map(struct qlcnic_adapter *adapter)
471{
472 if (adapter->ahw.pci_base0 != NULL)
473 iounmap(adapter->ahw.pci_base0);
474}
475
346fe763
RB
476static int
477qlcnic_init_pci_info(struct qlcnic_adapter *adapter)
478{
e88db3bd 479 struct qlcnic_pci_info *pci_info;
900853a4 480 int i, ret = 0;
346fe763
RB
481 u8 pfn;
482
e88db3bd
DC
483 pci_info = kcalloc(QLCNIC_MAX_PCI_FUNC, sizeof(*pci_info), GFP_KERNEL);
484 if (!pci_info)
485 return -ENOMEM;
486
ca315ac2 487 adapter->npars = kzalloc(sizeof(struct qlcnic_npar_info) *
346fe763 488 QLCNIC_MAX_PCI_FUNC, GFP_KERNEL);
e88db3bd 489 if (!adapter->npars) {
900853a4 490 ret = -ENOMEM;
e88db3bd
DC
491 goto err_pci_info;
492 }
346fe763 493
ca315ac2 494 adapter->eswitch = kzalloc(sizeof(struct qlcnic_eswitch) *
346fe763
RB
495 QLCNIC_NIU_MAX_XG_PORTS, GFP_KERNEL);
496 if (!adapter->eswitch) {
900853a4 497 ret = -ENOMEM;
ca315ac2 498 goto err_npars;
346fe763
RB
499 }
500
501 ret = qlcnic_get_pci_info(adapter, pci_info);
ca315ac2
DC
502 if (ret)
503 goto err_eswitch;
346fe763 504
ca315ac2
DC
505 for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
506 pfn = pci_info[i].id;
507 if (pfn > QLCNIC_MAX_PCI_FUNC)
508 return QL_STATUS_INVALID_PARAM;
509 adapter->npars[pfn].active = pci_info[i].active;
510 adapter->npars[pfn].type = pci_info[i].type;
511 adapter->npars[pfn].phy_port = pci_info[i].default_port;
ca315ac2
DC
512 adapter->npars[pfn].min_bw = pci_info[i].tx_min_bw;
513 adapter->npars[pfn].max_bw = pci_info[i].tx_max_bw;
346fe763
RB
514 }
515
ca315ac2
DC
516 for (i = 0; i < QLCNIC_NIU_MAX_XG_PORTS; i++)
517 adapter->eswitch[i].flags |= QLCNIC_SWITCH_ENABLE;
518
e88db3bd 519 kfree(pci_info);
ca315ac2
DC
520 return 0;
521
522err_eswitch:
346fe763
RB
523 kfree(adapter->eswitch);
524 adapter->eswitch = NULL;
ca315ac2 525err_npars:
346fe763 526 kfree(adapter->npars);
ca315ac2 527 adapter->npars = NULL;
e88db3bd
DC
528err_pci_info:
529 kfree(pci_info);
346fe763
RB
530
531 return ret;
532}
533
2e9d722d
AC
534static int
535qlcnic_set_function_modes(struct qlcnic_adapter *adapter)
536{
537 u8 id;
538 u32 ref_count;
539 int i, ret = 1;
540 u32 data = QLCNIC_MGMT_FUNC;
541 void __iomem *priv_op = adapter->ahw.pci_base0 + QLCNIC_DRV_OP_MODE;
542
543 /* If other drivers are not in use set their privilege level */
31018e06 544 ref_count = QLCRD32(adapter, QLCNIC_CRB_DRV_ACTIVE);
2e9d722d
AC
545 ret = qlcnic_api_lock(adapter);
546 if (ret)
547 goto err_lock;
2e9d722d 548
0e33c664
AC
549 if (qlcnic_config_npars) {
550 for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
346fe763 551 id = i;
0e33c664
AC
552 if (adapter->npars[i].type != QLCNIC_TYPE_NIC ||
553 id == adapter->ahw.pci_func)
554 continue;
555 data |= (qlcnic_config_npars &
556 QLC_DEV_SET_DRV(0xf, id));
557 }
558 } else {
559 data = readl(priv_op);
560 data = (data & ~QLC_DEV_SET_DRV(0xf, adapter->ahw.pci_func)) |
561 (QLC_DEV_SET_DRV(QLCNIC_MGMT_FUNC,
562 adapter->ahw.pci_func));
2e9d722d
AC
563 }
564 writel(data, priv_op);
2e9d722d
AC
565 qlcnic_api_unlock(adapter);
566err_lock:
567 return ret;
568}
569
0866d96d
AC
570static void
571qlcnic_check_vf(struct qlcnic_adapter *adapter)
2e9d722d
AC
572{
573 void __iomem *msix_base_addr;
574 void __iomem *priv_op;
575 u32 func;
576 u32 msix_base;
577 u32 op_mode, priv_level;
578
579 /* Determine FW API version */
580 adapter->fw_hal_version = readl(adapter->ahw.pci_base0 + QLCNIC_FW_API);
2e9d722d
AC
581
582 /* Find PCI function number */
583 pci_read_config_dword(adapter->pdev, QLCNIC_MSIX_TABLE_OFFSET, &func);
584 msix_base_addr = adapter->ahw.pci_base0 + QLCNIC_MSIX_BASE;
585 msix_base = readl(msix_base_addr);
586 func = (func - msix_base)/QLCNIC_MSIX_TBL_PGSIZE;
587 adapter->ahw.pci_func = func;
588
589 /* Determine function privilege level */
590 priv_op = adapter->ahw.pci_base0 + QLCNIC_DRV_OP_MODE;
591 op_mode = readl(priv_op);
0e33c664 592 if (op_mode == QLC_DEV_DRV_DEFAULT)
2e9d722d 593 priv_level = QLCNIC_MGMT_FUNC;
0e33c664 594 else
2e9d722d
AC
595 priv_level = QLC_DEV_GET_DRV(op_mode, adapter->ahw.pci_func);
596
0866d96d 597 if (priv_level == QLCNIC_NON_PRIV_FUNC) {
9f26f547
AC
598 adapter->op_mode = QLCNIC_NON_PRIV_FUNC;
599 dev_info(&adapter->pdev->dev,
600 "HAL Version: %d Non Privileged function\n",
601 adapter->fw_hal_version);
602 adapter->nic_ops = &qlcnic_vf_ops;
0866d96d
AC
603 } else
604 adapter->nic_ops = &qlcnic_ops;
2e9d722d
AC
605}
606
af19b491
AKS
607static int
608qlcnic_setup_pci_map(struct qlcnic_adapter *adapter)
609{
610 void __iomem *mem_ptr0 = NULL;
611 resource_size_t mem_base;
612 unsigned long mem_len, pci_len0 = 0;
613
614 struct pci_dev *pdev = adapter->pdev;
af19b491 615
af19b491
AKS
616 /* remap phys address */
617 mem_base = pci_resource_start(pdev, 0); /* 0 is for BAR 0 */
618 mem_len = pci_resource_len(pdev, 0);
619
620 if (mem_len == QLCNIC_PCI_2MB_SIZE) {
621
622 mem_ptr0 = pci_ioremap_bar(pdev, 0);
623 if (mem_ptr0 == NULL) {
624 dev_err(&pdev->dev, "failed to map PCI bar 0\n");
625 return -EIO;
626 }
627 pci_len0 = mem_len;
628 } else {
629 return -EIO;
630 }
631
632 dev_info(&pdev->dev, "%dMB memory map\n", (int)(mem_len>>20));
633
634 adapter->ahw.pci_base0 = mem_ptr0;
635 adapter->ahw.pci_len0 = pci_len0;
636
0866d96d 637 qlcnic_check_vf(adapter);
2e9d722d 638
af19b491 639 adapter->ahw.ocm_win_crb = qlcnic_get_ioaddr(adapter,
2e9d722d 640 QLCNIC_PCIX_PS_REG(PCIX_OCM_WINDOW_REG(adapter->ahw.pci_func)));
af19b491
AKS
641
642 return 0;
643}
644
645static void get_brd_name(struct qlcnic_adapter *adapter, char *name)
646{
647 struct pci_dev *pdev = adapter->pdev;
648 int i, found = 0;
649
650 for (i = 0; i < NUM_SUPPORTED_BOARDS; ++i) {
651 if (qlcnic_boards[i].vendor == pdev->vendor &&
652 qlcnic_boards[i].device == pdev->device &&
653 qlcnic_boards[i].sub_vendor == pdev->subsystem_vendor &&
654 qlcnic_boards[i].sub_device == pdev->subsystem_device) {
02f6e46f
SC
655 sprintf(name, "%pM: %s" ,
656 adapter->mac_addr,
657 qlcnic_boards[i].short_name);
af19b491
AKS
658 found = 1;
659 break;
660 }
661
662 }
663
664 if (!found)
7f9a0c34 665 sprintf(name, "%pM Gigabit Ethernet", adapter->mac_addr);
af19b491
AKS
666}
667
668static void
669qlcnic_check_options(struct qlcnic_adapter *adapter)
670{
671 u32 fw_major, fw_minor, fw_build;
af19b491 672 struct pci_dev *pdev = adapter->pdev;
af19b491
AKS
673
674 fw_major = QLCRD32(adapter, QLCNIC_FW_VERSION_MAJOR);
675 fw_minor = QLCRD32(adapter, QLCNIC_FW_VERSION_MINOR);
676 fw_build = QLCRD32(adapter, QLCNIC_FW_VERSION_SUB);
677
678 adapter->fw_version = QLCNIC_VERSION_CODE(fw_major, fw_minor, fw_build);
679
251a84c9
AKS
680 dev_info(&pdev->dev, "firmware v%d.%d.%d\n",
681 fw_major, fw_minor, fw_build);
af19b491 682
af19b491
AKS
683 adapter->flags &= ~QLCNIC_LRO_ENABLED;
684
685 if (adapter->ahw.port_type == QLCNIC_XGBE) {
686 adapter->num_rxd = DEFAULT_RCV_DESCRIPTORS_10G;
687 adapter->num_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_10G;
688 } else if (adapter->ahw.port_type == QLCNIC_GBE) {
689 adapter->num_rxd = DEFAULT_RCV_DESCRIPTORS_1G;
690 adapter->num_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_1G;
691 }
692
693 adapter->msix_supported = !!use_msi_x;
694 adapter->rss_supported = !!use_msi_x;
695
696 adapter->num_txd = MAX_CMD_DESCRIPTORS;
697
251b036a 698 adapter->max_rds_rings = MAX_RDS_RINGS;
af19b491
AKS
699}
700
174240a8
RB
701static int
702qlcnic_initialize_nic(struct qlcnic_adapter *adapter)
703{
704 int err;
705 struct qlcnic_info nic_info;
706
707 err = qlcnic_get_nic_info(adapter, &nic_info, adapter->ahw.pci_func);
708 if (err)
709 return err;
710
711 adapter->physical_port = nic_info.phys_port;
712 adapter->switch_mode = nic_info.switch_mode;
713 adapter->max_tx_ques = nic_info.max_tx_ques;
714 adapter->max_rx_ques = nic_info.max_rx_ques;
715 adapter->capabilities = nic_info.capabilities;
716 adapter->max_mac_filters = nic_info.max_mac_filters;
717 adapter->max_mtu = nic_info.max_mtu;
718
719 if (adapter->capabilities & BIT_6)
720 adapter->flags |= QLCNIC_ESWITCH_ENABLED;
721 else
722 adapter->flags &= ~QLCNIC_ESWITCH_ENABLED;
723
724 return err;
725}
726
8cf61f89
AKS
727static void
728qlcnic_set_vlan_config(struct qlcnic_adapter *adapter,
729 struct qlcnic_esw_func_cfg *esw_cfg)
730{
731 if (esw_cfg->discard_tagged)
732 adapter->flags &= ~QLCNIC_TAGGING_ENABLED;
733 else
734 adapter->flags |= QLCNIC_TAGGING_ENABLED;
735
736 if (esw_cfg->vlan_id)
737 adapter->pvid = esw_cfg->vlan_id;
738 else
739 adapter->pvid = 0;
740}
741
0325d69b
RB
742static void
743qlcnic_set_eswitch_port_features(struct qlcnic_adapter *adapter,
744 struct qlcnic_esw_func_cfg *esw_cfg)
745{
fe4d434d 746 adapter->flags &= ~QLCNIC_MACSPOOF;
7373373d 747 adapter->flags &= ~QLCNIC_MAC_OVERRIDE_DISABLED;
7613c87b
RB
748
749 if (esw_cfg->mac_anti_spoof)
750 adapter->flags |= QLCNIC_MACSPOOF;
fe4d434d 751
7373373d
RB
752 if (!esw_cfg->mac_override)
753 adapter->flags |= QLCNIC_MAC_OVERRIDE_DISABLED;
754
0325d69b
RB
755 qlcnic_set_netdev_features(adapter, esw_cfg);
756}
757
758static int
759qlcnic_set_eswitch_port_config(struct qlcnic_adapter *adapter)
760{
761 struct qlcnic_esw_func_cfg esw_cfg;
762
763 if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED))
764 return 0;
765
766 esw_cfg.pci_func = adapter->ahw.pci_func;
767 if (qlcnic_get_eswitch_port_config(adapter, &esw_cfg))
768 return -EIO;
8cf61f89 769 qlcnic_set_vlan_config(adapter, &esw_cfg);
0325d69b
RB
770 qlcnic_set_eswitch_port_features(adapter, &esw_cfg);
771
772 return 0;
773}
774
775static void
776qlcnic_set_netdev_features(struct qlcnic_adapter *adapter,
777 struct qlcnic_esw_func_cfg *esw_cfg)
778{
779 struct net_device *netdev = adapter->netdev;
780 unsigned long features, vlan_features;
781
782 features = (NETIF_F_SG | NETIF_F_IP_CSUM |
783 NETIF_F_IPV6_CSUM | NETIF_F_GRO);
784 vlan_features = (NETIF_F_SG | NETIF_F_IP_CSUM |
785 NETIF_F_IPV6_CSUM);
786
787 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_TSO) {
788 features |= (NETIF_F_TSO | NETIF_F_TSO6);
789 vlan_features |= (NETIF_F_TSO | NETIF_F_TSO6);
790 }
791 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_HW_LRO)
792 features |= NETIF_F_LRO;
793
794 if (esw_cfg->offload_flags & BIT_0) {
795 netdev->features |= features;
796 adapter->rx_csum = 1;
797 if (!(esw_cfg->offload_flags & BIT_1))
798 netdev->features &= ~NETIF_F_TSO;
799 if (!(esw_cfg->offload_flags & BIT_2))
800 netdev->features &= ~NETIF_F_TSO6;
801 } else {
802 netdev->features &= ~features;
803 adapter->rx_csum = 0;
804 }
805
806 netdev->vlan_features = (features & vlan_features);
807}
808
0866d96d
AC
809static int
810qlcnic_check_eswitch_mode(struct qlcnic_adapter *adapter)
811{
812 void __iomem *priv_op;
813 u32 op_mode, priv_level;
814 int err = 0;
815
174240a8
RB
816 err = qlcnic_initialize_nic(adapter);
817 if (err)
818 return err;
819
0866d96d
AC
820 if (adapter->flags & QLCNIC_ADAPTER_INITIALIZED)
821 return 0;
822
823 priv_op = adapter->ahw.pci_base0 + QLCNIC_DRV_OP_MODE;
824 op_mode = readl(priv_op);
825 priv_level = QLC_DEV_GET_DRV(op_mode, adapter->ahw.pci_func);
826
827 if (op_mode == QLC_DEV_DRV_DEFAULT)
828 priv_level = QLCNIC_MGMT_FUNC;
829 else
830 priv_level = QLC_DEV_GET_DRV(op_mode, adapter->ahw.pci_func);
831
174240a8 832 if (adapter->flags & QLCNIC_ESWITCH_ENABLED) {
0866d96d
AC
833 if (priv_level == QLCNIC_MGMT_FUNC) {
834 adapter->op_mode = QLCNIC_MGMT_FUNC;
835 err = qlcnic_init_pci_info(adapter);
836 if (err)
837 return err;
838 /* Set privilege level for other functions */
839 qlcnic_set_function_modes(adapter);
840 dev_info(&adapter->pdev->dev,
841 "HAL Version: %d, Management function\n",
842 adapter->fw_hal_version);
843 } else if (priv_level == QLCNIC_PRIV_FUNC) {
844 adapter->op_mode = QLCNIC_PRIV_FUNC;
845 dev_info(&adapter->pdev->dev,
846 "HAL Version: %d, Privileged function\n",
847 adapter->fw_hal_version);
848 }
174240a8 849 }
0866d96d
AC
850
851 adapter->flags |= QLCNIC_ADAPTER_INITIALIZED;
852
853 return err;
854}
855
0325d69b
RB
856static int
857qlcnic_set_default_offload_settings(struct qlcnic_adapter *adapter)
858{
859 struct qlcnic_esw_func_cfg esw_cfg;
860 struct qlcnic_npar_info *npar;
861 u8 i;
862
174240a8 863 if (adapter->need_fw_reset)
0325d69b
RB
864 return 0;
865
866 for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
867 if (adapter->npars[i].type != QLCNIC_TYPE_NIC)
868 continue;
869 memset(&esw_cfg, 0, sizeof(struct qlcnic_esw_func_cfg));
870 esw_cfg.pci_func = i;
871 esw_cfg.offload_flags = BIT_0;
7373373d 872 esw_cfg.mac_override = BIT_0;
0325d69b
RB
873 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_TSO)
874 esw_cfg.offload_flags |= (BIT_1 | BIT_2);
875 if (qlcnic_config_switch_port(adapter, &esw_cfg))
876 return -EIO;
877 npar = &adapter->npars[i];
878 npar->pvid = esw_cfg.vlan_id;
7373373d 879 npar->mac_override = esw_cfg.mac_override;
0325d69b
RB
880 npar->mac_anti_spoof = esw_cfg.mac_anti_spoof;
881 npar->discard_tagged = esw_cfg.discard_tagged;
882 npar->promisc_mode = esw_cfg.promisc_mode;
883 npar->offload_flags = esw_cfg.offload_flags;
884 }
885
886 return 0;
887}
888
4e8acb01
RB
889static int
890qlcnic_reset_eswitch_config(struct qlcnic_adapter *adapter,
891 struct qlcnic_npar_info *npar, int pci_func)
892{
893 struct qlcnic_esw_func_cfg esw_cfg;
894 esw_cfg.op_mode = QLCNIC_PORT_DEFAULTS;
895 esw_cfg.pci_func = pci_func;
896 esw_cfg.vlan_id = npar->pvid;
7373373d 897 esw_cfg.mac_override = npar->mac_override;
4e8acb01
RB
898 esw_cfg.discard_tagged = npar->discard_tagged;
899 esw_cfg.mac_anti_spoof = npar->mac_anti_spoof;
900 esw_cfg.offload_flags = npar->offload_flags;
901 esw_cfg.promisc_mode = npar->promisc_mode;
902 if (qlcnic_config_switch_port(adapter, &esw_cfg))
903 return -EIO;
904
905 esw_cfg.op_mode = QLCNIC_ADD_VLAN;
906 if (qlcnic_config_switch_port(adapter, &esw_cfg))
907 return -EIO;
908
909 return 0;
910}
911
cea8975e
AC
912static int
913qlcnic_reset_npar_config(struct qlcnic_adapter *adapter)
914{
4e8acb01 915 int i, err;
cea8975e
AC
916 struct qlcnic_npar_info *npar;
917 struct qlcnic_info nic_info;
918
174240a8 919 if (!adapter->need_fw_reset)
cea8975e
AC
920 return 0;
921
4e8acb01
RB
922 /* Set the NPAR config data after FW reset */
923 for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
924 npar = &adapter->npars[i];
925 if (npar->type != QLCNIC_TYPE_NIC)
926 continue;
927 err = qlcnic_get_nic_info(adapter, &nic_info, i);
928 if (err)
929 return err;
930 nic_info.min_tx_bw = npar->min_bw;
931 nic_info.max_tx_bw = npar->max_bw;
932 err = qlcnic_set_nic_info(adapter, &nic_info);
933 if (err)
934 return err;
cea8975e 935
4e8acb01
RB
936 if (npar->enable_pm) {
937 err = qlcnic_config_port_mirroring(adapter,
938 npar->dest_npar, 1, i);
939 if (err)
940 return err;
cea8975e 941 }
4e8acb01
RB
942 err = qlcnic_reset_eswitch_config(adapter, npar, i);
943 if (err)
944 return err;
cea8975e 945 }
4e8acb01 946 return 0;
cea8975e
AC
947}
948
78f84e1a
AKS
949static int qlcnic_check_npar_opertional(struct qlcnic_adapter *adapter)
950{
951 u8 npar_opt_timeo = QLCNIC_DEV_NPAR_OPER_TIMEO;
952 u32 npar_state;
953
954 if (adapter->op_mode == QLCNIC_MGMT_FUNC)
955 return 0;
956
957 npar_state = QLCRD32(adapter, QLCNIC_CRB_DEV_NPAR_STATE);
958 while (npar_state != QLCNIC_DEV_NPAR_OPER && --npar_opt_timeo) {
959 msleep(1000);
960 npar_state = QLCRD32(adapter, QLCNIC_CRB_DEV_NPAR_STATE);
961 }
962 if (!npar_opt_timeo) {
963 dev_err(&adapter->pdev->dev,
964 "Waiting for NPAR state to opertional timeout\n");
965 return -EIO;
966 }
967 return 0;
968}
969
174240a8
RB
970static int
971qlcnic_set_mgmt_operations(struct qlcnic_adapter *adapter)
972{
973 int err;
974
975 if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED) ||
976 adapter->op_mode != QLCNIC_MGMT_FUNC)
977 return 0;
978
979 err = qlcnic_set_default_offload_settings(adapter);
980 if (err)
981 return err;
982
983 err = qlcnic_reset_npar_config(adapter);
984 if (err)
985 return err;
986
987 qlcnic_dev_set_npar_ready(adapter);
988
989 return err;
990}
991
af19b491
AKS
992static int
993qlcnic_start_firmware(struct qlcnic_adapter *adapter)
994{
d4066833 995 int err;
af19b491 996
aa5e18c0
SC
997 err = qlcnic_can_start_firmware(adapter);
998 if (err < 0)
999 return err;
1000 else if (!err)
d4066833 1001 goto check_fw_status;
af19b491 1002
4d5bdb38
AKS
1003 if (load_fw_file)
1004 qlcnic_request_firmware(adapter);
8f891387 1005 else {
8cfdce08
SC
1006 err = qlcnic_check_flash_fw_ver(adapter);
1007 if (err)
8f891387 1008 goto err_out;
1009
4d5bdb38 1010 adapter->fw_type = QLCNIC_FLASH_ROMIMAGE;
8f891387 1011 }
af19b491
AKS
1012
1013 err = qlcnic_need_fw_reset(adapter);
af19b491 1014 if (err == 0)
4e70812b 1015 goto check_fw_status;
af19b491 1016
d4066833
SC
1017 err = qlcnic_pinit_from_rom(adapter);
1018 if (err)
1019 goto err_out;
af19b491
AKS
1020 qlcnic_set_port_mode(adapter);
1021
1022 err = qlcnic_load_firmware(adapter);
1023 if (err)
1024 goto err_out;
1025
1026 qlcnic_release_firmware(adapter);
d4066833 1027 QLCWR32(adapter, CRB_DRIVER_VERSION, QLCNIC_DRIVER_VERSION);
af19b491 1028
d4066833
SC
1029check_fw_status:
1030 err = qlcnic_check_fw_status(adapter);
af19b491
AKS
1031 if (err)
1032 goto err_out;
1033
1034 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_READY);
6df900e9 1035 qlcnic_idc_debug_info(adapter, 1);
b18971d1 1036
0866d96d
AC
1037 err = qlcnic_check_eswitch_mode(adapter);
1038 if (err) {
1039 dev_err(&adapter->pdev->dev,
1040 "Memory allocation failed for eswitch\n");
1041 goto err_out;
1042 }
174240a8
RB
1043 err = qlcnic_set_mgmt_operations(adapter);
1044 if (err)
1045 goto err_out;
1046
1047 qlcnic_check_options(adapter);
af19b491
AKS
1048 adapter->need_fw_reset = 0;
1049
a7fc948f
AKS
1050 qlcnic_release_firmware(adapter);
1051 return 0;
af19b491
AKS
1052
1053err_out:
a7fc948f
AKS
1054 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_FAILED);
1055 dev_err(&adapter->pdev->dev, "Device state set to failed\n");
0866d96d 1056
af19b491
AKS
1057 qlcnic_release_firmware(adapter);
1058 return err;
1059}
1060
1061static int
1062qlcnic_request_irq(struct qlcnic_adapter *adapter)
1063{
1064 irq_handler_t handler;
1065 struct qlcnic_host_sds_ring *sds_ring;
1066 int err, ring;
1067
1068 unsigned long flags = 0;
1069 struct net_device *netdev = adapter->netdev;
1070 struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
1071
7eb9855d
AKS
1072 if (adapter->diag_test == QLCNIC_INTERRUPT_TEST) {
1073 handler = qlcnic_tmp_intr;
1074 if (!QLCNIC_IS_MSI_FAMILY(adapter))
1075 flags |= IRQF_SHARED;
1076
1077 } else {
1078 if (adapter->flags & QLCNIC_MSIX_ENABLED)
1079 handler = qlcnic_msix_intr;
1080 else if (adapter->flags & QLCNIC_MSI_ENABLED)
1081 handler = qlcnic_msi_intr;
1082 else {
1083 flags |= IRQF_SHARED;
1084 handler = qlcnic_intr;
1085 }
af19b491
AKS
1086 }
1087 adapter->irq = netdev->irq;
1088
1089 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
1090 sds_ring = &recv_ctx->sds_rings[ring];
1091 sprintf(sds_ring->name, "%s[%d]", netdev->name, ring);
1092 err = request_irq(sds_ring->irq, handler,
1093 flags, sds_ring->name, sds_ring);
1094 if (err)
1095 return err;
1096 }
1097
1098 return 0;
1099}
1100
1101static void
1102qlcnic_free_irq(struct qlcnic_adapter *adapter)
1103{
1104 int ring;
1105 struct qlcnic_host_sds_ring *sds_ring;
1106
1107 struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
1108
1109 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
1110 sds_ring = &recv_ctx->sds_rings[ring];
1111 free_irq(sds_ring->irq, sds_ring);
1112 }
1113}
1114
1115static void
1116qlcnic_init_coalesce_defaults(struct qlcnic_adapter *adapter)
1117{
1118 adapter->coal.flags = QLCNIC_INTR_DEFAULT;
1119 adapter->coal.normal.data.rx_time_us =
1120 QLCNIC_DEFAULT_INTR_COALESCE_RX_TIME_US;
1121 adapter->coal.normal.data.rx_packets =
1122 QLCNIC_DEFAULT_INTR_COALESCE_RX_PACKETS;
1123 adapter->coal.normal.data.tx_time_us =
1124 QLCNIC_DEFAULT_INTR_COALESCE_TX_TIME_US;
1125 adapter->coal.normal.data.tx_packets =
1126 QLCNIC_DEFAULT_INTR_COALESCE_TX_PACKETS;
1127}
1128
1129static int
1130__qlcnic_up(struct qlcnic_adapter *adapter, struct net_device *netdev)
1131{
8a15ad1f
AKS
1132 int ring;
1133 struct qlcnic_host_rds_ring *rds_ring;
1134
af19b491
AKS
1135 if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
1136 return -EIO;
1137
8a15ad1f
AKS
1138 if (test_bit(__QLCNIC_DEV_UP, &adapter->state))
1139 return 0;
0325d69b
RB
1140 if (qlcnic_set_eswitch_port_config(adapter))
1141 return -EIO;
8a15ad1f
AKS
1142
1143 if (qlcnic_fw_create_ctx(adapter))
1144 return -EIO;
1145
1146 for (ring = 0; ring < adapter->max_rds_rings; ring++) {
1147 rds_ring = &adapter->recv_ctx.rds_rings[ring];
1148 qlcnic_post_rx_buffers(adapter, ring, rds_ring);
1149 }
1150
af19b491
AKS
1151 qlcnic_set_multi(netdev);
1152 qlcnic_fw_cmd_set_mtu(adapter, netdev->mtu);
1153
1154 adapter->ahw.linkup = 0;
1155
1156 if (adapter->max_sds_rings > 1)
1157 qlcnic_config_rss(adapter, 1);
1158
1159 qlcnic_config_intr_coalesce(adapter);
1160
24763d80 1161 if (netdev->features & NETIF_F_LRO)
af19b491
AKS
1162 qlcnic_config_hw_lro(adapter, QLCNIC_LRO_ENABLED);
1163
1164 qlcnic_napi_enable(adapter);
1165
1166 qlcnic_linkevent_request(adapter, 1);
1167
68bf1c68 1168 adapter->reset_context = 0;
af19b491
AKS
1169 set_bit(__QLCNIC_DEV_UP, &adapter->state);
1170 return 0;
1171}
1172
1173/* Usage: During resume and firmware recovery module.*/
1174
1175static int
1176qlcnic_up(struct qlcnic_adapter *adapter, struct net_device *netdev)
1177{
1178 int err = 0;
1179
1180 rtnl_lock();
1181 if (netif_running(netdev))
1182 err = __qlcnic_up(adapter, netdev);
1183 rtnl_unlock();
1184
1185 return err;
1186}
1187
1188static void
1189__qlcnic_down(struct qlcnic_adapter *adapter, struct net_device *netdev)
1190{
1191 if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
1192 return;
1193
1194 if (!test_and_clear_bit(__QLCNIC_DEV_UP, &adapter->state))
1195 return;
1196
1197 smp_mb();
1198 spin_lock(&adapter->tx_clean_lock);
1199 netif_carrier_off(netdev);
1200 netif_tx_disable(netdev);
1201
1202 qlcnic_free_mac_list(adapter);
1203
1204 qlcnic_nic_set_promisc(adapter, QLCNIC_NIU_NON_PROMISC_MODE);
1205
1206 qlcnic_napi_disable(adapter);
1207
8a15ad1f
AKS
1208 qlcnic_fw_destroy_ctx(adapter);
1209
1210 qlcnic_reset_rx_buffers_list(adapter);
af19b491
AKS
1211 qlcnic_release_tx_buffers(adapter);
1212 spin_unlock(&adapter->tx_clean_lock);
1213}
1214
1215/* Usage: During suspend and firmware recovery module */
1216
1217static void
1218qlcnic_down(struct qlcnic_adapter *adapter, struct net_device *netdev)
1219{
1220 rtnl_lock();
1221 if (netif_running(netdev))
1222 __qlcnic_down(adapter, netdev);
1223 rtnl_unlock();
1224
1225}
1226
1227static int
1228qlcnic_attach(struct qlcnic_adapter *adapter)
1229{
1230 struct net_device *netdev = adapter->netdev;
1231 struct pci_dev *pdev = adapter->pdev;
8a15ad1f 1232 int err;
af19b491
AKS
1233
1234 if (adapter->is_up == QLCNIC_ADAPTER_UP_MAGIC)
1235 return 0;
1236
af19b491
AKS
1237 err = qlcnic_napi_add(adapter, netdev);
1238 if (err)
1239 return err;
1240
1241 err = qlcnic_alloc_sw_resources(adapter);
1242 if (err) {
1243 dev_err(&pdev->dev, "Error in setting sw resources\n");
8a15ad1f 1244 goto err_out_napi_del;
af19b491
AKS
1245 }
1246
1247 err = qlcnic_alloc_hw_resources(adapter);
1248 if (err) {
1249 dev_err(&pdev->dev, "Error in setting hw resources\n");
1250 goto err_out_free_sw;
1251 }
1252
af19b491
AKS
1253 err = qlcnic_request_irq(adapter);
1254 if (err) {
1255 dev_err(&pdev->dev, "failed to setup interrupt\n");
8a15ad1f 1256 goto err_out_free_hw;
af19b491
AKS
1257 }
1258
1259 qlcnic_init_coalesce_defaults(adapter);
1260
1261 qlcnic_create_sysfs_entries(adapter);
1262
1263 adapter->is_up = QLCNIC_ADAPTER_UP_MAGIC;
1264 return 0;
1265
8a15ad1f 1266err_out_free_hw:
af19b491
AKS
1267 qlcnic_free_hw_resources(adapter);
1268err_out_free_sw:
1269 qlcnic_free_sw_resources(adapter);
8a15ad1f
AKS
1270err_out_napi_del:
1271 qlcnic_napi_del(adapter);
af19b491
AKS
1272 return err;
1273}
1274
1275static void
1276qlcnic_detach(struct qlcnic_adapter *adapter)
1277{
1278 if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
1279 return;
1280
1281 qlcnic_remove_sysfs_entries(adapter);
1282
1283 qlcnic_free_hw_resources(adapter);
1284 qlcnic_release_rx_buffers(adapter);
1285 qlcnic_free_irq(adapter);
1286 qlcnic_napi_del(adapter);
1287 qlcnic_free_sw_resources(adapter);
1288
1289 adapter->is_up = 0;
1290}
1291
7eb9855d
AKS
1292void qlcnic_diag_free_res(struct net_device *netdev, int max_sds_rings)
1293{
1294 struct qlcnic_adapter *adapter = netdev_priv(netdev);
1295 struct qlcnic_host_sds_ring *sds_ring;
1296 int ring;
1297
78ad3892 1298 clear_bit(__QLCNIC_DEV_UP, &adapter->state);
cdaff185
AKS
1299 if (adapter->diag_test == QLCNIC_INTERRUPT_TEST) {
1300 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
1301 sds_ring = &adapter->recv_ctx.sds_rings[ring];
1302 qlcnic_disable_int(sds_ring);
1303 }
7eb9855d
AKS
1304 }
1305
8a15ad1f
AKS
1306 qlcnic_fw_destroy_ctx(adapter);
1307
7eb9855d
AKS
1308 qlcnic_detach(adapter);
1309
1310 adapter->diag_test = 0;
1311 adapter->max_sds_rings = max_sds_rings;
1312
1313 if (qlcnic_attach(adapter))
34ce3626 1314 goto out;
7eb9855d
AKS
1315
1316 if (netif_running(netdev))
1317 __qlcnic_up(adapter, netdev);
34ce3626 1318out:
7eb9855d
AKS
1319 netif_device_attach(netdev);
1320}
1321
1322int qlcnic_diag_alloc_res(struct net_device *netdev, int test)
1323{
1324 struct qlcnic_adapter *adapter = netdev_priv(netdev);
1325 struct qlcnic_host_sds_ring *sds_ring;
8a15ad1f 1326 struct qlcnic_host_rds_ring *rds_ring;
7eb9855d
AKS
1327 int ring;
1328 int ret;
1329
1330 netif_device_detach(netdev);
1331
1332 if (netif_running(netdev))
1333 __qlcnic_down(adapter, netdev);
1334
1335 qlcnic_detach(adapter);
1336
1337 adapter->max_sds_rings = 1;
1338 adapter->diag_test = test;
1339
1340 ret = qlcnic_attach(adapter);
34ce3626
AKS
1341 if (ret) {
1342 netif_device_attach(netdev);
7eb9855d 1343 return ret;
34ce3626 1344 }
7eb9855d 1345
8a15ad1f
AKS
1346 ret = qlcnic_fw_create_ctx(adapter);
1347 if (ret) {
1348 qlcnic_detach(adapter);
57e46248 1349 netif_device_attach(netdev);
8a15ad1f
AKS
1350 return ret;
1351 }
1352
1353 for (ring = 0; ring < adapter->max_rds_rings; ring++) {
1354 rds_ring = &adapter->recv_ctx.rds_rings[ring];
1355 qlcnic_post_rx_buffers(adapter, ring, rds_ring);
1356 }
1357
cdaff185
AKS
1358 if (adapter->diag_test == QLCNIC_INTERRUPT_TEST) {
1359 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
1360 sds_ring = &adapter->recv_ctx.sds_rings[ring];
1361 qlcnic_enable_int(sds_ring);
1362 }
7eb9855d 1363 }
78ad3892 1364 set_bit(__QLCNIC_DEV_UP, &adapter->state);
7eb9855d
AKS
1365
1366 return 0;
1367}
1368
68bf1c68
AKS
1369/* Reset context in hardware only */
1370static int
1371qlcnic_reset_hw_context(struct qlcnic_adapter *adapter)
1372{
1373 struct net_device *netdev = adapter->netdev;
1374
1375 if (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
1376 return -EBUSY;
1377
1378 netif_device_detach(netdev);
1379
1380 qlcnic_down(adapter, netdev);
1381
1382 qlcnic_up(adapter, netdev);
1383
1384 netif_device_attach(netdev);
1385
1386 clear_bit(__QLCNIC_RESETTING, &adapter->state);
1387 return 0;
1388}
1389
af19b491
AKS
1390int
1391qlcnic_reset_context(struct qlcnic_adapter *adapter)
1392{
1393 int err = 0;
1394 struct net_device *netdev = adapter->netdev;
1395
1396 if (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
1397 return -EBUSY;
1398
1399 if (adapter->is_up == QLCNIC_ADAPTER_UP_MAGIC) {
1400
1401 netif_device_detach(netdev);
1402
1403 if (netif_running(netdev))
1404 __qlcnic_down(adapter, netdev);
1405
1406 qlcnic_detach(adapter);
1407
1408 if (netif_running(netdev)) {
1409 err = qlcnic_attach(adapter);
1410 if (!err)
34ce3626 1411 __qlcnic_up(adapter, netdev);
af19b491
AKS
1412 }
1413
1414 netif_device_attach(netdev);
1415 }
1416
af19b491
AKS
1417 clear_bit(__QLCNIC_RESETTING, &adapter->state);
1418 return err;
1419}
1420
1421static int
1422qlcnic_setup_netdev(struct qlcnic_adapter *adapter,
1bb09fb9 1423 struct net_device *netdev, u8 pci_using_dac)
af19b491
AKS
1424{
1425 int err;
1426 struct pci_dev *pdev = adapter->pdev;
1427
1428 adapter->rx_csum = 1;
1429 adapter->mc_enabled = 0;
1430 adapter->max_mc_count = 38;
1431
1432 netdev->netdev_ops = &qlcnic_netdev_ops;
ef71ff83 1433 netdev->watchdog_timeo = 5*HZ;
af19b491
AKS
1434
1435 qlcnic_change_mtu(netdev, netdev->mtu);
1436
1437 SET_ETHTOOL_OPS(netdev, &qlcnic_ethtool_ops);
1438
2e9d722d 1439 netdev->features |= (NETIF_F_SG | NETIF_F_IP_CSUM |
ac8d0c4f 1440 NETIF_F_IPV6_CSUM | NETIF_F_GRO);
2e9d722d 1441 netdev->vlan_features |= (NETIF_F_SG | NETIF_F_IP_CSUM |
ac8d0c4f
AC
1442 NETIF_F_IPV6_CSUM);
1443
1444 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_TSO) {
1445 netdev->features |= (NETIF_F_TSO | NETIF_F_TSO6);
1446 netdev->vlan_features |= (NETIF_F_TSO | NETIF_F_TSO6);
1447 }
af19b491 1448
1bb09fb9 1449 if (pci_using_dac) {
af19b491
AKS
1450 netdev->features |= NETIF_F_HIGHDMA;
1451 netdev->vlan_features |= NETIF_F_HIGHDMA;
1452 }
1453
1454 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_FVLANTX)
1455 netdev->features |= (NETIF_F_HW_VLAN_TX);
1456
1457 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_HW_LRO)
1458 netdev->features |= NETIF_F_LRO;
af19b491
AKS
1459 netdev->irq = adapter->msix_entries[0].vector;
1460
af19b491
AKS
1461 netif_carrier_off(netdev);
1462 netif_stop_queue(netdev);
1463
1464 err = register_netdev(netdev);
1465 if (err) {
1466 dev_err(&pdev->dev, "failed to register net device\n");
1467 return err;
1468 }
1469
1470 return 0;
1471}
1472
1bb09fb9
AKS
1473static int qlcnic_set_dma_mask(struct pci_dev *pdev, u8 *pci_using_dac)
1474{
1475 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) &&
1476 !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)))
1477 *pci_using_dac = 1;
1478 else if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) &&
1479 !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)))
1480 *pci_using_dac = 0;
1481 else {
1482 dev_err(&pdev->dev, "Unable to set DMA mask, aborting\n");
1483 return -EIO;
1484 }
1485
1486 return 0;
1487}
1488
af19b491
AKS
1489static int __devinit
1490qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1491{
1492 struct net_device *netdev = NULL;
1493 struct qlcnic_adapter *adapter = NULL;
1494 int err;
af19b491 1495 uint8_t revision_id;
1bb09fb9 1496 uint8_t pci_using_dac;
da48e6c3 1497 char brd_name[QLCNIC_MAX_BOARD_NAME_LEN];
af19b491
AKS
1498
1499 err = pci_enable_device(pdev);
1500 if (err)
1501 return err;
1502
1503 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
1504 err = -ENODEV;
1505 goto err_out_disable_pdev;
1506 }
1507
1bb09fb9
AKS
1508 err = qlcnic_set_dma_mask(pdev, &pci_using_dac);
1509 if (err)
1510 goto err_out_disable_pdev;
1511
af19b491
AKS
1512 err = pci_request_regions(pdev, qlcnic_driver_name);
1513 if (err)
1514 goto err_out_disable_pdev;
1515
1516 pci_set_master(pdev);
451724c8 1517 pci_enable_pcie_error_reporting(pdev);
af19b491
AKS
1518
1519 netdev = alloc_etherdev(sizeof(struct qlcnic_adapter));
1520 if (!netdev) {
1521 dev_err(&pdev->dev, "failed to allocate net_device\n");
1522 err = -ENOMEM;
1523 goto err_out_free_res;
1524 }
1525
1526 SET_NETDEV_DEV(netdev, &pdev->dev);
1527
1528 adapter = netdev_priv(netdev);
1529 adapter->netdev = netdev;
1530 adapter->pdev = pdev;
6df900e9 1531 adapter->dev_rst_time = jiffies;
af19b491
AKS
1532
1533 revision_id = pdev->revision;
1534 adapter->ahw.revision_id = revision_id;
1535
1536 rwlock_init(&adapter->ahw.crb_lock);
1537 mutex_init(&adapter->ahw.mem_lock);
1538
1539 spin_lock_init(&adapter->tx_clean_lock);
1540 INIT_LIST_HEAD(&adapter->mac_list);
1541
1542 err = qlcnic_setup_pci_map(adapter);
1543 if (err)
1544 goto err_out_free_netdev;
1545
1546 /* This will be reset for mezz cards */
2e9d722d 1547 adapter->portnum = adapter->ahw.pci_func;
af19b491
AKS
1548
1549 err = qlcnic_get_board_info(adapter);
1550 if (err) {
1551 dev_err(&pdev->dev, "Error getting board config info.\n");
1552 goto err_out_iounmap;
1553 }
1554
8cfdce08
SC
1555 err = qlcnic_setup_idc_param(adapter);
1556 if (err)
b3a24649 1557 goto err_out_iounmap;
af19b491 1558
9f26f547 1559 err = adapter->nic_ops->start_firmware(adapter);
a7fc948f
AKS
1560 if (err) {
1561 dev_err(&pdev->dev, "Loading fw failed.Please Reboot\n");
af19b491 1562 goto err_out_decr_ref;
a7fc948f 1563 }
af19b491 1564
da48e6c3
RB
1565 if (qlcnic_read_mac_addr(adapter))
1566 dev_warn(&pdev->dev, "failed to read mac addr\n");
1567
1568 if (adapter->portnum == 0) {
1569 get_brd_name(adapter, brd_name);
1570
1571 pr_info("%s: %s Board Chip rev 0x%x\n",
1572 module_name(THIS_MODULE),
1573 brd_name, adapter->ahw.revision_id);
1574 }
1575
af19b491
AKS
1576 qlcnic_clear_stats(adapter);
1577
1578 qlcnic_setup_intr(adapter);
1579
1bb09fb9 1580 err = qlcnic_setup_netdev(adapter, netdev, pci_using_dac);
af19b491
AKS
1581 if (err)
1582 goto err_out_disable_msi;
1583
1584 pci_set_drvdata(pdev, adapter);
1585
1586 qlcnic_schedule_work(adapter, qlcnic_fw_poll_work, FW_POLL_DELAY);
1587
1588 switch (adapter->ahw.port_type) {
1589 case QLCNIC_GBE:
1590 dev_info(&adapter->pdev->dev, "%s: GbE port initialized\n",
1591 adapter->netdev->name);
1592 break;
1593 case QLCNIC_XGBE:
1594 dev_info(&adapter->pdev->dev, "%s: XGbE port initialized\n",
1595 adapter->netdev->name);
1596 break;
1597 }
1598
1599 qlcnic_create_diag_entries(adapter);
1600
1601 return 0;
1602
1603err_out_disable_msi:
1604 qlcnic_teardown_intr(adapter);
1605
1606err_out_decr_ref:
21854f02 1607 qlcnic_clr_all_drv_state(adapter, 0);
af19b491
AKS
1608
1609err_out_iounmap:
1610 qlcnic_cleanup_pci_map(adapter);
1611
1612err_out_free_netdev:
1613 free_netdev(netdev);
1614
1615err_out_free_res:
1616 pci_release_regions(pdev);
1617
1618err_out_disable_pdev:
1619 pci_set_drvdata(pdev, NULL);
1620 pci_disable_device(pdev);
1621 return err;
1622}
1623
1624static void __devexit qlcnic_remove(struct pci_dev *pdev)
1625{
1626 struct qlcnic_adapter *adapter;
1627 struct net_device *netdev;
1628
1629 adapter = pci_get_drvdata(pdev);
1630 if (adapter == NULL)
1631 return;
1632
1633 netdev = adapter->netdev;
1634
1635 qlcnic_cancel_fw_work(adapter);
1636
1637 unregister_netdev(netdev);
1638
af19b491
AKS
1639 qlcnic_detach(adapter);
1640
2e9d722d
AC
1641 if (adapter->npars != NULL)
1642 kfree(adapter->npars);
1643 if (adapter->eswitch != NULL)
1644 kfree(adapter->eswitch);
1645
21854f02 1646 qlcnic_clr_all_drv_state(adapter, 0);
af19b491
AKS
1647
1648 clear_bit(__QLCNIC_RESETTING, &adapter->state);
1649
1650 qlcnic_teardown_intr(adapter);
1651
1652 qlcnic_remove_diag_entries(adapter);
1653
1654 qlcnic_cleanup_pci_map(adapter);
1655
1656 qlcnic_release_firmware(adapter);
1657
451724c8 1658 pci_disable_pcie_error_reporting(pdev);
af19b491
AKS
1659 pci_release_regions(pdev);
1660 pci_disable_device(pdev);
1661 pci_set_drvdata(pdev, NULL);
1662
1663 free_netdev(netdev);
1664}
1665static int __qlcnic_shutdown(struct pci_dev *pdev)
1666{
1667 struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
1668 struct net_device *netdev = adapter->netdev;
1669 int retval;
1670
1671 netif_device_detach(netdev);
1672
1673 qlcnic_cancel_fw_work(adapter);
1674
1675 if (netif_running(netdev))
1676 qlcnic_down(adapter, netdev);
1677
21854f02 1678 qlcnic_clr_all_drv_state(adapter, 0);
af19b491
AKS
1679
1680 clear_bit(__QLCNIC_RESETTING, &adapter->state);
1681
1682 retval = pci_save_state(pdev);
1683 if (retval)
1684 return retval;
1685
1686 if (qlcnic_wol_supported(adapter)) {
1687 pci_enable_wake(pdev, PCI_D3cold, 1);
1688 pci_enable_wake(pdev, PCI_D3hot, 1);
1689 }
1690
1691 return 0;
1692}
1693
1694static void qlcnic_shutdown(struct pci_dev *pdev)
1695{
1696 if (__qlcnic_shutdown(pdev))
1697 return;
1698
1699 pci_disable_device(pdev);
1700}
1701
1702#ifdef CONFIG_PM
1703static int
1704qlcnic_suspend(struct pci_dev *pdev, pm_message_t state)
1705{
1706 int retval;
1707
1708 retval = __qlcnic_shutdown(pdev);
1709 if (retval)
1710 return retval;
1711
1712 pci_set_power_state(pdev, pci_choose_state(pdev, state));
1713 return 0;
1714}
1715
1716static int
1717qlcnic_resume(struct pci_dev *pdev)
1718{
1719 struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
1720 struct net_device *netdev = adapter->netdev;
1721 int err;
1722
1723 err = pci_enable_device(pdev);
1724 if (err)
1725 return err;
1726
1727 pci_set_power_state(pdev, PCI_D0);
1728 pci_set_master(pdev);
1729 pci_restore_state(pdev);
1730
9f26f547 1731 err = adapter->nic_ops->start_firmware(adapter);
af19b491
AKS
1732 if (err) {
1733 dev_err(&pdev->dev, "failed to start firmware\n");
1734 return err;
1735 }
1736
1737 if (netif_running(netdev)) {
af19b491
AKS
1738 err = qlcnic_up(adapter, netdev);
1739 if (err)
52486a3a 1740 goto done;
af19b491
AKS
1741
1742 qlcnic_config_indev_addr(netdev, NETDEV_UP);
1743 }
52486a3a 1744done:
af19b491
AKS
1745 netif_device_attach(netdev);
1746 qlcnic_schedule_work(adapter, qlcnic_fw_poll_work, FW_POLL_DELAY);
1747 return 0;
af19b491
AKS
1748}
1749#endif
1750
1751static int qlcnic_open(struct net_device *netdev)
1752{
1753 struct qlcnic_adapter *adapter = netdev_priv(netdev);
1754 int err;
1755
af19b491
AKS
1756 err = qlcnic_attach(adapter);
1757 if (err)
1758 return err;
1759
1760 err = __qlcnic_up(adapter, netdev);
1761 if (err)
1762 goto err_out;
1763
1764 netif_start_queue(netdev);
1765
1766 return 0;
1767
1768err_out:
1769 qlcnic_detach(adapter);
1770 return err;
1771}
1772
1773/*
1774 * qlcnic_close - Disables a network interface entry point
1775 */
1776static int qlcnic_close(struct net_device *netdev)
1777{
1778 struct qlcnic_adapter *adapter = netdev_priv(netdev);
1779
1780 __qlcnic_down(adapter, netdev);
1781 return 0;
1782}
1783
1784static void
1785qlcnic_tso_check(struct net_device *netdev,
1786 struct qlcnic_host_tx_ring *tx_ring,
1787 struct cmd_desc_type0 *first_desc,
1788 struct sk_buff *skb)
1789{
1790 u8 opcode = TX_ETHER_PKT;
1791 __be16 protocol = skb->protocol;
8cf61f89
AKS
1792 u16 flags = 0;
1793 int copied, offset, copy_len, hdr_len = 0, tso = 0;
af19b491
AKS
1794 struct cmd_desc_type0 *hwdesc;
1795 struct vlan_ethhdr *vh;
8bfe8b91 1796 struct qlcnic_adapter *adapter = netdev_priv(netdev);
2e9d722d 1797 u32 producer = tx_ring->producer;
8cf61f89 1798 int vlan_oob = first_desc->flags_opcode & cpu_to_le16(FLAGS_VLAN_OOB);
af19b491 1799
2e9d722d
AC
1800 if (*(skb->data) & BIT_0) {
1801 flags |= BIT_0;
1802 memcpy(&first_desc->eth_addr, skb->data, ETH_ALEN);
1803 }
1804
af19b491
AKS
1805 if ((netdev->features & (NETIF_F_TSO | NETIF_F_TSO6)) &&
1806 skb_shinfo(skb)->gso_size > 0) {
1807
1808 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
1809
1810 first_desc->mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
1811 first_desc->total_hdr_length = hdr_len;
1812 if (vlan_oob) {
1813 first_desc->total_hdr_length += VLAN_HLEN;
1814 first_desc->tcp_hdr_offset = VLAN_HLEN;
1815 first_desc->ip_hdr_offset = VLAN_HLEN;
1816 /* Only in case of TSO on vlan device */
1817 flags |= FLAGS_VLAN_TAGGED;
1818 }
1819
1820 opcode = (protocol == cpu_to_be16(ETH_P_IPV6)) ?
1821 TX_TCP_LSO6 : TX_TCP_LSO;
1822 tso = 1;
1823
1824 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
1825 u8 l4proto;
1826
1827 if (protocol == cpu_to_be16(ETH_P_IP)) {
1828 l4proto = ip_hdr(skb)->protocol;
1829
1830 if (l4proto == IPPROTO_TCP)
1831 opcode = TX_TCP_PKT;
1832 else if (l4proto == IPPROTO_UDP)
1833 opcode = TX_UDP_PKT;
1834 } else if (protocol == cpu_to_be16(ETH_P_IPV6)) {
1835 l4proto = ipv6_hdr(skb)->nexthdr;
1836
1837 if (l4proto == IPPROTO_TCP)
1838 opcode = TX_TCPV6_PKT;
1839 else if (l4proto == IPPROTO_UDP)
1840 opcode = TX_UDPV6_PKT;
1841 }
1842 }
1843
1844 first_desc->tcp_hdr_offset += skb_transport_offset(skb);
1845 first_desc->ip_hdr_offset += skb_network_offset(skb);
1846 qlcnic_set_tx_flags_opcode(first_desc, flags, opcode);
1847
1848 if (!tso)
1849 return;
1850
1851 /* For LSO, we need to copy the MAC/IP/TCP headers into
1852 * the descriptor ring
1853 */
af19b491
AKS
1854 copied = 0;
1855 offset = 2;
1856
1857 if (vlan_oob) {
1858 /* Create a TSO vlan header template for firmware */
1859
1860 hwdesc = &tx_ring->desc_head[producer];
1861 tx_ring->cmd_buf_arr[producer].skb = NULL;
1862
1863 copy_len = min((int)sizeof(struct cmd_desc_type0) - offset,
1864 hdr_len + VLAN_HLEN);
1865
1866 vh = (struct vlan_ethhdr *)((char *)hwdesc + 2);
1867 skb_copy_from_linear_data(skb, vh, 12);
1868 vh->h_vlan_proto = htons(ETH_P_8021Q);
8cf61f89 1869 vh->h_vlan_TCI = htons(first_desc->vlan_TCI);
af19b491
AKS
1870 skb_copy_from_linear_data_offset(skb, 12,
1871 (char *)vh + 16, copy_len - 16);
1872
1873 copied = copy_len - VLAN_HLEN;
1874 offset = 0;
1875
1876 producer = get_next_index(producer, tx_ring->num_desc);
1877 }
1878
1879 while (copied < hdr_len) {
1880
1881 copy_len = min((int)sizeof(struct cmd_desc_type0) - offset,
1882 (hdr_len - copied));
1883
1884 hwdesc = &tx_ring->desc_head[producer];
1885 tx_ring->cmd_buf_arr[producer].skb = NULL;
1886
1887 skb_copy_from_linear_data_offset(skb, copied,
1888 (char *)hwdesc + offset, copy_len);
1889
1890 copied += copy_len;
1891 offset = 0;
1892
1893 producer = get_next_index(producer, tx_ring->num_desc);
1894 }
1895
1896 tx_ring->producer = producer;
1897 barrier();
8bfe8b91 1898 adapter->stats.lso_frames++;
af19b491
AKS
1899}
1900
1901static int
1902qlcnic_map_tx_skb(struct pci_dev *pdev,
1903 struct sk_buff *skb, struct qlcnic_cmd_buffer *pbuf)
1904{
1905 struct qlcnic_skb_frag *nf;
1906 struct skb_frag_struct *frag;
1907 int i, nr_frags;
1908 dma_addr_t map;
1909
1910 nr_frags = skb_shinfo(skb)->nr_frags;
1911 nf = &pbuf->frag_array[0];
1912
1913 map = pci_map_single(pdev, skb->data,
1914 skb_headlen(skb), PCI_DMA_TODEVICE);
1915 if (pci_dma_mapping_error(pdev, map))
1916 goto out_err;
1917
1918 nf->dma = map;
1919 nf->length = skb_headlen(skb);
1920
1921 for (i = 0; i < nr_frags; i++) {
1922 frag = &skb_shinfo(skb)->frags[i];
1923 nf = &pbuf->frag_array[i+1];
1924
1925 map = pci_map_page(pdev, frag->page, frag->page_offset,
1926 frag->size, PCI_DMA_TODEVICE);
1927 if (pci_dma_mapping_error(pdev, map))
1928 goto unwind;
1929
1930 nf->dma = map;
1931 nf->length = frag->size;
1932 }
1933
1934 return 0;
1935
1936unwind:
1937 while (--i >= 0) {
1938 nf = &pbuf->frag_array[i+1];
1939 pci_unmap_page(pdev, nf->dma, nf->length, PCI_DMA_TODEVICE);
1940 }
1941
1942 nf = &pbuf->frag_array[0];
1943 pci_unmap_single(pdev, nf->dma, skb_headlen(skb), PCI_DMA_TODEVICE);
1944
1945out_err:
1946 return -ENOMEM;
1947}
1948
8cf61f89
AKS
1949static int
1950qlcnic_check_tx_tagging(struct qlcnic_adapter *adapter,
1951 struct sk_buff *skb,
1952 struct cmd_desc_type0 *first_desc)
1953{
1954 u8 opcode = 0;
1955 u16 flags = 0;
1956 __be16 protocol = skb->protocol;
1957 struct vlan_ethhdr *vh;
1958
1959 if (protocol == cpu_to_be16(ETH_P_8021Q)) {
1960 vh = (struct vlan_ethhdr *)skb->data;
1961 protocol = vh->h_vlan_encapsulated_proto;
1962 flags = FLAGS_VLAN_TAGGED;
1963 qlcnic_set_tx_vlan_tci(first_desc, ntohs(vh->h_vlan_TCI));
1964 } else if (vlan_tx_tag_present(skb)) {
1965 flags = FLAGS_VLAN_OOB;
1966 qlcnic_set_tx_vlan_tci(first_desc, vlan_tx_tag_get(skb));
1967 }
1968 if (unlikely(adapter->pvid)) {
1969 if (first_desc->vlan_TCI &&
1970 !(adapter->flags & QLCNIC_TAGGING_ENABLED))
1971 return -EIO;
1972 if (first_desc->vlan_TCI &&
1973 (adapter->flags & QLCNIC_TAGGING_ENABLED))
1974 goto set_flags;
1975
1976 flags = FLAGS_VLAN_OOB;
1977 qlcnic_set_tx_vlan_tci(first_desc, adapter->pvid);
1978 }
1979set_flags:
1980 qlcnic_set_tx_flags_opcode(first_desc, flags, opcode);
1981 return 0;
1982}
1983
af19b491
AKS
1984static inline void
1985qlcnic_clear_cmddesc(u64 *desc)
1986{
1987 desc[0] = 0ULL;
1988 desc[2] = 0ULL;
8cf61f89 1989 desc[7] = 0ULL;
af19b491
AKS
1990}
1991
cdaff185 1992netdev_tx_t
af19b491
AKS
1993qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
1994{
1995 struct qlcnic_adapter *adapter = netdev_priv(netdev);
1996 struct qlcnic_host_tx_ring *tx_ring = adapter->tx_ring;
1997 struct qlcnic_cmd_buffer *pbuf;
1998 struct qlcnic_skb_frag *buffrag;
1999 struct cmd_desc_type0 *hwdesc, *first_desc;
2000 struct pci_dev *pdev;
dcb50aff 2001 struct ethhdr *phdr;
af19b491
AKS
2002 int i, k;
2003
2004 u32 producer;
2005 int frag_count, no_of_desc;
2006 u32 num_txd = tx_ring->num_desc;
2007
780ab790
AKS
2008 if (!test_bit(__QLCNIC_DEV_UP, &adapter->state)) {
2009 netif_stop_queue(netdev);
2010 return NETDEV_TX_BUSY;
2011 }
2012
fe4d434d 2013 if (adapter->flags & QLCNIC_MACSPOOF) {
dcb50aff
RB
2014 phdr = (struct ethhdr *)skb->data;
2015 if (compare_ether_addr(phdr->h_source,
fe4d434d
SC
2016 adapter->mac_addr))
2017 goto drop_packet;
2018 }
2019
af19b491
AKS
2020 frag_count = skb_shinfo(skb)->nr_frags + 1;
2021
2022 /* 4 fragments per cmd des */
2023 no_of_desc = (frag_count + 3) >> 2;
2024
ef71ff83 2025 if (unlikely(qlcnic_tx_avail(tx_ring) <= TX_STOP_THRESH)) {
af19b491 2026 netif_stop_queue(netdev);
ef71ff83
RB
2027 smp_mb();
2028 if (qlcnic_tx_avail(tx_ring) > TX_STOP_THRESH)
2029 netif_start_queue(netdev);
2030 else {
2031 adapter->stats.xmit_off++;
2032 return NETDEV_TX_BUSY;
2033 }
af19b491
AKS
2034 }
2035
2036 producer = tx_ring->producer;
2037 pbuf = &tx_ring->cmd_buf_arr[producer];
2038
2039 pdev = adapter->pdev;
2040
8cf61f89
AKS
2041 first_desc = hwdesc = &tx_ring->desc_head[producer];
2042 qlcnic_clear_cmddesc((u64 *)hwdesc);
2043
2044 if (qlcnic_check_tx_tagging(adapter, skb, first_desc))
2045 goto drop_packet;
2046
8ae6df97
AKS
2047 if (qlcnic_map_tx_skb(pdev, skb, pbuf)) {
2048 adapter->stats.tx_dma_map_error++;
af19b491 2049 goto drop_packet;
8ae6df97 2050 }
af19b491
AKS
2051
2052 pbuf->skb = skb;
2053 pbuf->frag_count = frag_count;
2054
af19b491
AKS
2055 qlcnic_set_tx_frags_len(first_desc, frag_count, skb->len);
2056 qlcnic_set_tx_port(first_desc, adapter->portnum);
2057
2058 for (i = 0; i < frag_count; i++) {
2059
2060 k = i % 4;
2061
2062 if ((k == 0) && (i > 0)) {
2063 /* move to next desc.*/
2064 producer = get_next_index(producer, num_txd);
2065 hwdesc = &tx_ring->desc_head[producer];
2066 qlcnic_clear_cmddesc((u64 *)hwdesc);
2067 tx_ring->cmd_buf_arr[producer].skb = NULL;
2068 }
2069
2070 buffrag = &pbuf->frag_array[i];
2071
2072 hwdesc->buffer_length[k] = cpu_to_le16(buffrag->length);
2073 switch (k) {
2074 case 0:
2075 hwdesc->addr_buffer1 = cpu_to_le64(buffrag->dma);
2076 break;
2077 case 1:
2078 hwdesc->addr_buffer2 = cpu_to_le64(buffrag->dma);
2079 break;
2080 case 2:
2081 hwdesc->addr_buffer3 = cpu_to_le64(buffrag->dma);
2082 break;
2083 case 3:
2084 hwdesc->addr_buffer4 = cpu_to_le64(buffrag->dma);
2085 break;
2086 }
2087 }
2088
2089 tx_ring->producer = get_next_index(producer, num_txd);
2090
2091 qlcnic_tso_check(netdev, tx_ring, first_desc, skb);
2092
2093 qlcnic_update_cmd_producer(adapter, tx_ring);
2094
2095 adapter->stats.txbytes += skb->len;
2096 adapter->stats.xmitcalled++;
2097
2098 return NETDEV_TX_OK;
2099
2100drop_packet:
2101 adapter->stats.txdropped++;
2102 dev_kfree_skb_any(skb);
2103 return NETDEV_TX_OK;
2104}
2105
2106static int qlcnic_check_temp(struct qlcnic_adapter *adapter)
2107{
2108 struct net_device *netdev = adapter->netdev;
2109 u32 temp, temp_state, temp_val;
2110 int rv = 0;
2111
2112 temp = QLCRD32(adapter, CRB_TEMP_STATE);
2113
2114 temp_state = qlcnic_get_temp_state(temp);
2115 temp_val = qlcnic_get_temp_val(temp);
2116
2117 if (temp_state == QLCNIC_TEMP_PANIC) {
2118 dev_err(&netdev->dev,
2119 "Device temperature %d degrees C exceeds"
2120 " maximum allowed. Hardware has been shut down.\n",
2121 temp_val);
2122 rv = 1;
2123 } else if (temp_state == QLCNIC_TEMP_WARN) {
2124 if (adapter->temp == QLCNIC_TEMP_NORMAL) {
2125 dev_err(&netdev->dev,
2126 "Device temperature %d degrees C "
2127 "exceeds operating range."
2128 " Immediate action needed.\n",
2129 temp_val);
2130 }
2131 } else {
2132 if (adapter->temp == QLCNIC_TEMP_WARN) {
2133 dev_info(&netdev->dev,
2134 "Device temperature is now %d degrees C"
2135 " in normal range.\n", temp_val);
2136 }
2137 }
2138 adapter->temp = temp_state;
2139 return rv;
2140}
2141
2142void qlcnic_advert_link_change(struct qlcnic_adapter *adapter, int linkup)
2143{
2144 struct net_device *netdev = adapter->netdev;
2145
2146 if (adapter->ahw.linkup && !linkup) {
69324275 2147 netdev_info(netdev, "NIC Link is down\n");
af19b491
AKS
2148 adapter->ahw.linkup = 0;
2149 if (netif_running(netdev)) {
2150 netif_carrier_off(netdev);
2151 netif_stop_queue(netdev);
2152 }
2153 } else if (!adapter->ahw.linkup && linkup) {
69324275 2154 netdev_info(netdev, "NIC Link is up\n");
af19b491
AKS
2155 adapter->ahw.linkup = 1;
2156 if (netif_running(netdev)) {
2157 netif_carrier_on(netdev);
2158 netif_wake_queue(netdev);
2159 }
2160 }
2161}
2162
2163static void qlcnic_tx_timeout(struct net_device *netdev)
2164{
2165 struct qlcnic_adapter *adapter = netdev_priv(netdev);
2166
2167 if (test_bit(__QLCNIC_RESETTING, &adapter->state))
2168 return;
2169
2170 dev_err(&netdev->dev, "transmit timeout, resetting.\n");
af19b491
AKS
2171
2172 if (++adapter->tx_timeo_cnt >= QLCNIC_MAX_TX_TIMEOUTS)
68bf1c68
AKS
2173 adapter->need_fw_reset = 1;
2174 else
2175 adapter->reset_context = 1;
af19b491
AKS
2176}
2177
2178static struct net_device_stats *qlcnic_get_stats(struct net_device *netdev)
2179{
2180 struct qlcnic_adapter *adapter = netdev_priv(netdev);
2181 struct net_device_stats *stats = &netdev->stats;
2182
af19b491
AKS
2183 stats->rx_packets = adapter->stats.rx_pkts + adapter->stats.lro_pkts;
2184 stats->tx_packets = adapter->stats.xmitfinished;
7e382594 2185 stats->rx_bytes = adapter->stats.rxbytes + adapter->stats.lrobytes;
af19b491
AKS
2186 stats->tx_bytes = adapter->stats.txbytes;
2187 stats->rx_dropped = adapter->stats.rxdropped;
2188 stats->tx_dropped = adapter->stats.txdropped;
2189
2190 return stats;
2191}
2192
7eb9855d 2193static irqreturn_t qlcnic_clear_legacy_intr(struct qlcnic_adapter *adapter)
af19b491 2194{
af19b491
AKS
2195 u32 status;
2196
2197 status = readl(adapter->isr_int_vec);
2198
2199 if (!(status & adapter->int_vec_bit))
2200 return IRQ_NONE;
2201
2202 /* check interrupt state machine, to be sure */
2203 status = readl(adapter->crb_int_state_reg);
2204 if (!ISR_LEGACY_INT_TRIGGERED(status))
2205 return IRQ_NONE;
2206
2207 writel(0xffffffff, adapter->tgt_status_reg);
2208 /* read twice to ensure write is flushed */
2209 readl(adapter->isr_int_vec);
2210 readl(adapter->isr_int_vec);
2211
7eb9855d
AKS
2212 return IRQ_HANDLED;
2213}
2214
2215static irqreturn_t qlcnic_tmp_intr(int irq, void *data)
2216{
2217 struct qlcnic_host_sds_ring *sds_ring = data;
2218 struct qlcnic_adapter *adapter = sds_ring->adapter;
2219
2220 if (adapter->flags & QLCNIC_MSIX_ENABLED)
2221 goto done;
2222 else if (adapter->flags & QLCNIC_MSI_ENABLED) {
2223 writel(0xffffffff, adapter->tgt_status_reg);
2224 goto done;
2225 }
2226
2227 if (qlcnic_clear_legacy_intr(adapter) == IRQ_NONE)
2228 return IRQ_NONE;
2229
2230done:
2231 adapter->diag_cnt++;
2232 qlcnic_enable_int(sds_ring);
2233 return IRQ_HANDLED;
2234}
2235
2236static irqreturn_t qlcnic_intr(int irq, void *data)
2237{
2238 struct qlcnic_host_sds_ring *sds_ring = data;
2239 struct qlcnic_adapter *adapter = sds_ring->adapter;
2240
2241 if (qlcnic_clear_legacy_intr(adapter) == IRQ_NONE)
2242 return IRQ_NONE;
2243
af19b491
AKS
2244 napi_schedule(&sds_ring->napi);
2245
2246 return IRQ_HANDLED;
2247}
2248
2249static irqreturn_t qlcnic_msi_intr(int irq, void *data)
2250{
2251 struct qlcnic_host_sds_ring *sds_ring = data;
2252 struct qlcnic_adapter *adapter = sds_ring->adapter;
2253
2254 /* clear interrupt */
2255 writel(0xffffffff, adapter->tgt_status_reg);
2256
2257 napi_schedule(&sds_ring->napi);
2258 return IRQ_HANDLED;
2259}
2260
2261static irqreturn_t qlcnic_msix_intr(int irq, void *data)
2262{
2263 struct qlcnic_host_sds_ring *sds_ring = data;
2264
2265 napi_schedule(&sds_ring->napi);
2266 return IRQ_HANDLED;
2267}
2268
2269static int qlcnic_process_cmd_ring(struct qlcnic_adapter *adapter)
2270{
2271 u32 sw_consumer, hw_consumer;
2272 int count = 0, i;
2273 struct qlcnic_cmd_buffer *buffer;
2274 struct pci_dev *pdev = adapter->pdev;
2275 struct net_device *netdev = adapter->netdev;
2276 struct qlcnic_skb_frag *frag;
2277 int done;
2278 struct qlcnic_host_tx_ring *tx_ring = adapter->tx_ring;
2279
2280 if (!spin_trylock(&adapter->tx_clean_lock))
2281 return 1;
2282
2283 sw_consumer = tx_ring->sw_consumer;
2284 hw_consumer = le32_to_cpu(*(tx_ring->hw_consumer));
2285
2286 while (sw_consumer != hw_consumer) {
2287 buffer = &tx_ring->cmd_buf_arr[sw_consumer];
2288 if (buffer->skb) {
2289 frag = &buffer->frag_array[0];
2290 pci_unmap_single(pdev, frag->dma, frag->length,
2291 PCI_DMA_TODEVICE);
2292 frag->dma = 0ULL;
2293 for (i = 1; i < buffer->frag_count; i++) {
2294 frag++;
2295 pci_unmap_page(pdev, frag->dma, frag->length,
2296 PCI_DMA_TODEVICE);
2297 frag->dma = 0ULL;
2298 }
2299
2300 adapter->stats.xmitfinished++;
2301 dev_kfree_skb_any(buffer->skb);
2302 buffer->skb = NULL;
2303 }
2304
2305 sw_consumer = get_next_index(sw_consumer, tx_ring->num_desc);
2306 if (++count >= MAX_STATUS_HANDLE)
2307 break;
2308 }
2309
2310 if (count && netif_running(netdev)) {
2311 tx_ring->sw_consumer = sw_consumer;
2312
2313 smp_mb();
2314
2315 if (netif_queue_stopped(netdev) && netif_carrier_ok(netdev)) {
af19b491
AKS
2316 if (qlcnic_tx_avail(tx_ring) > TX_STOP_THRESH) {
2317 netif_wake_queue(netdev);
8bfe8b91 2318 adapter->stats.xmit_on++;
af19b491 2319 }
af19b491 2320 }
ef71ff83 2321 adapter->tx_timeo_cnt = 0;
af19b491
AKS
2322 }
2323 /*
2324 * If everything is freed up to consumer then check if the ring is full
2325 * If the ring is full then check if more needs to be freed and
2326 * schedule the call back again.
2327 *
2328 * This happens when there are 2 CPUs. One could be freeing and the
2329 * other filling it. If the ring is full when we get out of here and
2330 * the card has already interrupted the host then the host can miss the
2331 * interrupt.
2332 *
2333 * There is still a possible race condition and the host could miss an
2334 * interrupt. The card has to take care of this.
2335 */
2336 hw_consumer = le32_to_cpu(*(tx_ring->hw_consumer));
2337 done = (sw_consumer == hw_consumer);
2338 spin_unlock(&adapter->tx_clean_lock);
2339
2340 return done;
2341}
2342
2343static int qlcnic_poll(struct napi_struct *napi, int budget)
2344{
2345 struct qlcnic_host_sds_ring *sds_ring =
2346 container_of(napi, struct qlcnic_host_sds_ring, napi);
2347
2348 struct qlcnic_adapter *adapter = sds_ring->adapter;
2349
2350 int tx_complete;
2351 int work_done;
2352
2353 tx_complete = qlcnic_process_cmd_ring(adapter);
2354
2355 work_done = qlcnic_process_rcv_ring(sds_ring, budget);
2356
2357 if ((work_done < budget) && tx_complete) {
2358 napi_complete(&sds_ring->napi);
2359 if (test_bit(__QLCNIC_DEV_UP, &adapter->state))
2360 qlcnic_enable_int(sds_ring);
2361 }
2362
2363 return work_done;
2364}
2365
8f891387 2366static int qlcnic_rx_poll(struct napi_struct *napi, int budget)
2367{
2368 struct qlcnic_host_sds_ring *sds_ring =
2369 container_of(napi, struct qlcnic_host_sds_ring, napi);
2370
2371 struct qlcnic_adapter *adapter = sds_ring->adapter;
2372 int work_done;
2373
2374 work_done = qlcnic_process_rcv_ring(sds_ring, budget);
2375
2376 if (work_done < budget) {
2377 napi_complete(&sds_ring->napi);
2378 if (test_bit(__QLCNIC_DEV_UP, &adapter->state))
2379 qlcnic_enable_int(sds_ring);
2380 }
2381
2382 return work_done;
2383}
2384
af19b491
AKS
2385#ifdef CONFIG_NET_POLL_CONTROLLER
2386static void qlcnic_poll_controller(struct net_device *netdev)
2387{
2388 struct qlcnic_adapter *adapter = netdev_priv(netdev);
2389 disable_irq(adapter->irq);
2390 qlcnic_intr(adapter->irq, adapter);
2391 enable_irq(adapter->irq);
2392}
2393#endif
2394
6df900e9
SC
2395static void
2396qlcnic_idc_debug_info(struct qlcnic_adapter *adapter, u8 encoding)
2397{
2398 u32 val;
2399
2400 val = adapter->portnum & 0xf;
2401 val |= encoding << 7;
2402 val |= (jiffies - adapter->dev_rst_time) << 8;
2403
2404 QLCWR32(adapter, QLCNIC_CRB_DRV_SCRATCH, val);
2405 adapter->dev_rst_time = jiffies;
2406}
2407
ade91f8e
AKS
2408static int
2409qlcnic_set_drv_state(struct qlcnic_adapter *adapter, u8 state)
af19b491
AKS
2410{
2411 u32 val;
2412
2413 WARN_ON(state != QLCNIC_DEV_NEED_RESET &&
2414 state != QLCNIC_DEV_NEED_QUISCENT);
2415
2416 if (qlcnic_api_lock(adapter))
ade91f8e 2417 return -EIO;
af19b491
AKS
2418
2419 val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
2420
2421 if (state == QLCNIC_DEV_NEED_RESET)
6d2a4724 2422 QLC_DEV_SET_RST_RDY(val, adapter->portnum);
af19b491 2423 else if (state == QLCNIC_DEV_NEED_QUISCENT)
6d2a4724 2424 QLC_DEV_SET_QSCNT_RDY(val, adapter->portnum);
af19b491
AKS
2425
2426 QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
2427
2428 qlcnic_api_unlock(adapter);
ade91f8e
AKS
2429
2430 return 0;
af19b491
AKS
2431}
2432
1b95a839
AKS
2433static int
2434qlcnic_clr_drv_state(struct qlcnic_adapter *adapter)
2435{
2436 u32 val;
2437
2438 if (qlcnic_api_lock(adapter))
2439 return -EBUSY;
2440
2441 val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
6d2a4724 2442 QLC_DEV_CLR_RST_QSCNT(val, adapter->portnum);
1b95a839
AKS
2443 QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
2444
2445 qlcnic_api_unlock(adapter);
2446
2447 return 0;
2448}
2449
af19b491 2450static void
21854f02 2451qlcnic_clr_all_drv_state(struct qlcnic_adapter *adapter, u8 failed)
af19b491
AKS
2452{
2453 u32 val;
2454
2455 if (qlcnic_api_lock(adapter))
2456 goto err;
2457
31018e06 2458 val = QLCRD32(adapter, QLCNIC_CRB_DRV_ACTIVE);
6d2a4724 2459 QLC_DEV_CLR_REF_CNT(val, adapter->portnum);
31018e06 2460 QLCWR32(adapter, QLCNIC_CRB_DRV_ACTIVE, val);
af19b491 2461
21854f02
AKS
2462 if (failed) {
2463 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_FAILED);
2464 dev_info(&adapter->pdev->dev,
2465 "Device state set to Failed. Please Reboot\n");
2466 } else if (!(val & 0x11111111))
af19b491
AKS
2467 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_COLD);
2468
2469 val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
6d2a4724 2470 QLC_DEV_CLR_RST_QSCNT(val, adapter->portnum);
af19b491
AKS
2471 QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
2472
2473 qlcnic_api_unlock(adapter);
2474err:
2475 adapter->fw_fail_cnt = 0;
2476 clear_bit(__QLCNIC_START_FW, &adapter->state);
2477 clear_bit(__QLCNIC_RESETTING, &adapter->state);
2478}
2479
f73dfc50 2480/* Grab api lock, before checking state */
af19b491
AKS
2481static int
2482qlcnic_check_drv_state(struct qlcnic_adapter *adapter)
2483{
2484 int act, state;
2485
2486 state = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
31018e06 2487 act = QLCRD32(adapter, QLCNIC_CRB_DRV_ACTIVE);
af19b491
AKS
2488
2489 if (((state & 0x11111111) == (act & 0x11111111)) ||
2490 ((act & 0x11111111) == ((state >> 1) & 0x11111111)))
2491 return 0;
2492 else
2493 return 1;
2494}
2495
96f8118c
SC
2496static int qlcnic_check_idc_ver(struct qlcnic_adapter *adapter)
2497{
2498 u32 val = QLCRD32(adapter, QLCNIC_CRB_DRV_IDC_VER);
2499
2500 if (val != QLCNIC_DRV_IDC_VER) {
2501 dev_warn(&adapter->pdev->dev, "IDC Version mismatch, driver's"
2502 " idc ver = %x; reqd = %x\n", QLCNIC_DRV_IDC_VER, val);
2503 }
2504
2505 return 0;
2506}
2507
af19b491
AKS
2508static int
2509qlcnic_can_start_firmware(struct qlcnic_adapter *adapter)
2510{
2511 u32 val, prev_state;
aa5e18c0 2512 u8 dev_init_timeo = adapter->dev_init_timeo;
6d2a4724 2513 u8 portnum = adapter->portnum;
96f8118c 2514 u8 ret;
af19b491 2515
f73dfc50
AKS
2516 if (test_and_clear_bit(__QLCNIC_START_FW, &adapter->state))
2517 return 1;
2518
af19b491
AKS
2519 if (qlcnic_api_lock(adapter))
2520 return -1;
2521
31018e06 2522 val = QLCRD32(adapter, QLCNIC_CRB_DRV_ACTIVE);
6d2a4724
AKS
2523 if (!(val & (1 << (portnum * 4)))) {
2524 QLC_DEV_SET_REF_CNT(val, portnum);
31018e06 2525 QLCWR32(adapter, QLCNIC_CRB_DRV_ACTIVE, val);
af19b491
AKS
2526 }
2527
2528 prev_state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
65b5b420 2529 QLCDB(adapter, HW, "Device state = %u\n", prev_state);
af19b491
AKS
2530
2531 switch (prev_state) {
2532 case QLCNIC_DEV_COLD:
bbd8c6a4 2533 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_INITIALIZING);
96f8118c 2534 QLCWR32(adapter, QLCNIC_CRB_DRV_IDC_VER, QLCNIC_DRV_IDC_VER);
6df900e9 2535 qlcnic_idc_debug_info(adapter, 0);
af19b491
AKS
2536 qlcnic_api_unlock(adapter);
2537 return 1;
2538
2539 case QLCNIC_DEV_READY:
96f8118c 2540 ret = qlcnic_check_idc_ver(adapter);
af19b491 2541 qlcnic_api_unlock(adapter);
96f8118c 2542 return ret;
af19b491
AKS
2543
2544 case QLCNIC_DEV_NEED_RESET:
2545 val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
6d2a4724 2546 QLC_DEV_SET_RST_RDY(val, portnum);
af19b491
AKS
2547 QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
2548 break;
2549
2550 case QLCNIC_DEV_NEED_QUISCENT:
2551 val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
6d2a4724 2552 QLC_DEV_SET_QSCNT_RDY(val, portnum);
af19b491
AKS
2553 QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
2554 break;
2555
2556 case QLCNIC_DEV_FAILED:
a7fc948f 2557 dev_err(&adapter->pdev->dev, "Device in failed state.\n");
af19b491
AKS
2558 qlcnic_api_unlock(adapter);
2559 return -1;
bbd8c6a4
AKS
2560
2561 case QLCNIC_DEV_INITIALIZING:
2562 case QLCNIC_DEV_QUISCENT:
2563 break;
af19b491
AKS
2564 }
2565
2566 qlcnic_api_unlock(adapter);
aa5e18c0
SC
2567
2568 do {
af19b491 2569 msleep(1000);
a5e463d0
SC
2570 prev_state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
2571
2572 if (prev_state == QLCNIC_DEV_QUISCENT)
2573 continue;
2574 } while ((prev_state != QLCNIC_DEV_READY) && --dev_init_timeo);
af19b491 2575
65b5b420
AKS
2576 if (!dev_init_timeo) {
2577 dev_err(&adapter->pdev->dev,
2578 "Waiting for device to initialize timeout\n");
af19b491 2579 return -1;
65b5b420 2580 }
af19b491
AKS
2581
2582 if (qlcnic_api_lock(adapter))
2583 return -1;
2584
2585 val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
6d2a4724 2586 QLC_DEV_CLR_RST_QSCNT(val, portnum);
af19b491
AKS
2587 QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
2588
96f8118c 2589 ret = qlcnic_check_idc_ver(adapter);
af19b491
AKS
2590 qlcnic_api_unlock(adapter);
2591
96f8118c 2592 return ret;
af19b491
AKS
2593}
2594
2595static void
2596qlcnic_fwinit_work(struct work_struct *work)
2597{
2598 struct qlcnic_adapter *adapter = container_of(work,
2599 struct qlcnic_adapter, fw_work.work);
3c4b23b1 2600 u32 dev_state = 0xf;
af19b491 2601
f73dfc50
AKS
2602 if (qlcnic_api_lock(adapter))
2603 goto err_ret;
af19b491 2604
a5e463d0
SC
2605 dev_state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
2606 if (dev_state == QLCNIC_DEV_QUISCENT) {
2607 qlcnic_api_unlock(adapter);
2608 qlcnic_schedule_work(adapter, qlcnic_fwinit_work,
2609 FW_POLL_DELAY * 2);
2610 return;
2611 }
2612
9f26f547 2613 if (adapter->op_mode == QLCNIC_NON_PRIV_FUNC) {
3c4b23b1
AKS
2614 qlcnic_api_unlock(adapter);
2615 goto wait_npar;
9f26f547
AC
2616 }
2617
f73dfc50
AKS
2618 if (adapter->fw_wait_cnt++ > adapter->reset_ack_timeo) {
2619 dev_err(&adapter->pdev->dev, "Reset:Failed to get ack %d sec\n",
2620 adapter->reset_ack_timeo);
2621 goto skip_ack_check;
2622 }
2623
2624 if (!qlcnic_check_drv_state(adapter)) {
2625skip_ack_check:
2626 dev_state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
a5e463d0
SC
2627
2628 if (dev_state == QLCNIC_DEV_NEED_QUISCENT) {
2629 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE,
2630 QLCNIC_DEV_QUISCENT);
2631 qlcnic_schedule_work(adapter, qlcnic_fwinit_work,
2632 FW_POLL_DELAY * 2);
2633 QLCDB(adapter, DRV, "Quiscing the driver\n");
6df900e9
SC
2634 qlcnic_idc_debug_info(adapter, 0);
2635
a5e463d0
SC
2636 qlcnic_api_unlock(adapter);
2637 return;
2638 }
2639
f73dfc50
AKS
2640 if (dev_state == QLCNIC_DEV_NEED_RESET) {
2641 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE,
2642 QLCNIC_DEV_INITIALIZING);
2643 set_bit(__QLCNIC_START_FW, &adapter->state);
2644 QLCDB(adapter, DRV, "Restarting fw\n");
6df900e9 2645 qlcnic_idc_debug_info(adapter, 0);
af19b491
AKS
2646 }
2647
f73dfc50
AKS
2648 qlcnic_api_unlock(adapter);
2649
9f26f547 2650 if (!adapter->nic_ops->start_firmware(adapter)) {
af19b491 2651 qlcnic_schedule_work(adapter, qlcnic_attach_work, 0);
b18971d1 2652 adapter->fw_wait_cnt = 0;
af19b491
AKS
2653 return;
2654 }
af19b491
AKS
2655 goto err_ret;
2656 }
2657
f73dfc50 2658 qlcnic_api_unlock(adapter);
aa5e18c0 2659
9f26f547 2660wait_npar:
af19b491 2661 dev_state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
f73dfc50 2662 QLCDB(adapter, HW, "Func waiting: Device state=%u\n", dev_state);
65b5b420 2663
af19b491 2664 switch (dev_state) {
3c4b23b1 2665 case QLCNIC_DEV_READY:
9f26f547 2666 if (!adapter->nic_ops->start_firmware(adapter)) {
f73dfc50 2667 qlcnic_schedule_work(adapter, qlcnic_attach_work, 0);
b18971d1 2668 adapter->fw_wait_cnt = 0;
f73dfc50
AKS
2669 return;
2670 }
3c4b23b1
AKS
2671 case QLCNIC_DEV_FAILED:
2672 break;
2673 default:
2674 qlcnic_schedule_work(adapter,
2675 qlcnic_fwinit_work, FW_POLL_DELAY);
2676 return;
af19b491
AKS
2677 }
2678
2679err_ret:
f73dfc50
AKS
2680 dev_err(&adapter->pdev->dev, "Fwinit work failed state=%u "
2681 "fw_wait_cnt=%u\n", dev_state, adapter->fw_wait_cnt);
34ce3626 2682 netif_device_attach(adapter->netdev);
21854f02 2683 qlcnic_clr_all_drv_state(adapter, 0);
af19b491
AKS
2684}
2685
2686static void
2687qlcnic_detach_work(struct work_struct *work)
2688{
2689 struct qlcnic_adapter *adapter = container_of(work,
2690 struct qlcnic_adapter, fw_work.work);
2691 struct net_device *netdev = adapter->netdev;
2692 u32 status;
2693
2694 netif_device_detach(netdev);
2695
2696 qlcnic_down(adapter, netdev);
2697
af19b491
AKS
2698 status = QLCRD32(adapter, QLCNIC_PEG_HALT_STATUS1);
2699
2700 if (status & QLCNIC_RCODE_FATAL_ERROR)
2701 goto err_ret;
2702
2703 if (adapter->temp == QLCNIC_TEMP_PANIC)
2704 goto err_ret;
2705
ade91f8e
AKS
2706 if (qlcnic_set_drv_state(adapter, adapter->dev_state))
2707 goto err_ret;
af19b491
AKS
2708
2709 adapter->fw_wait_cnt = 0;
2710
2711 qlcnic_schedule_work(adapter, qlcnic_fwinit_work, FW_POLL_DELAY);
2712
2713 return;
2714
2715err_ret:
65b5b420
AKS
2716 dev_err(&adapter->pdev->dev, "detach failed; status=%d temp=%d\n",
2717 status, adapter->temp);
34ce3626 2718 netif_device_attach(netdev);
21854f02 2719 qlcnic_clr_all_drv_state(adapter, 1);
af19b491
AKS
2720}
2721
3c4b23b1
AKS
2722/*Transit NPAR state to NON Operational */
2723static void
2724qlcnic_set_npar_non_operational(struct qlcnic_adapter *adapter)
2725{
2726 u32 state;
2727
2728 state = QLCRD32(adapter, QLCNIC_CRB_DEV_NPAR_STATE);
2729 if (state == QLCNIC_DEV_NPAR_NON_OPER)
2730 return;
2731
2732 if (qlcnic_api_lock(adapter))
2733 return;
2734 QLCWR32(adapter, QLCNIC_CRB_DEV_NPAR_STATE, QLCNIC_DEV_NPAR_NON_OPER);
2735 qlcnic_api_unlock(adapter);
2736}
2737
f73dfc50 2738/*Transit to RESET state from READY state only */
af19b491
AKS
2739static void
2740qlcnic_dev_request_reset(struct qlcnic_adapter *adapter)
2741{
2742 u32 state;
2743
cea8975e 2744 adapter->need_fw_reset = 1;
af19b491
AKS
2745 if (qlcnic_api_lock(adapter))
2746 return;
2747
2748 state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
2749
f73dfc50 2750 if (state == QLCNIC_DEV_READY) {
af19b491 2751 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_NEED_RESET);
65b5b420 2752 QLCDB(adapter, DRV, "NEED_RESET state set\n");
6df900e9 2753 qlcnic_idc_debug_info(adapter, 0);
af19b491
AKS
2754 }
2755
3c4b23b1 2756 QLCWR32(adapter, QLCNIC_CRB_DEV_NPAR_STATE, QLCNIC_DEV_NPAR_NON_OPER);
af19b491
AKS
2757 qlcnic_api_unlock(adapter);
2758}
2759
9f26f547
AC
2760/* Transit to NPAR READY state from NPAR NOT READY state */
2761static void
2762qlcnic_dev_set_npar_ready(struct qlcnic_adapter *adapter)
2763{
9f26f547
AC
2764 if (qlcnic_api_lock(adapter))
2765 return;
2766
3c4b23b1
AKS
2767 QLCWR32(adapter, QLCNIC_CRB_DEV_NPAR_STATE, QLCNIC_DEV_NPAR_OPER);
2768 QLCDB(adapter, DRV, "NPAR operational state set\n");
9f26f547
AC
2769
2770 qlcnic_api_unlock(adapter);
2771}
2772
af19b491
AKS
2773static void
2774qlcnic_schedule_work(struct qlcnic_adapter *adapter,
2775 work_func_t func, int delay)
2776{
451724c8
SC
2777 if (test_bit(__QLCNIC_AER, &adapter->state))
2778 return;
2779
af19b491
AKS
2780 INIT_DELAYED_WORK(&adapter->fw_work, func);
2781 schedule_delayed_work(&adapter->fw_work, round_jiffies_relative(delay));
2782}
2783
2784static void
2785qlcnic_cancel_fw_work(struct qlcnic_adapter *adapter)
2786{
2787 while (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
2788 msleep(10);
2789
2790 cancel_delayed_work_sync(&adapter->fw_work);
2791}
2792
2793static void
2794qlcnic_attach_work(struct work_struct *work)
2795{
2796 struct qlcnic_adapter *adapter = container_of(work,
2797 struct qlcnic_adapter, fw_work.work);
2798 struct net_device *netdev = adapter->netdev;
b18971d1 2799 u32 npar_state;
af19b491 2800
b18971d1
AKS
2801 if (adapter->op_mode != QLCNIC_MGMT_FUNC) {
2802 npar_state = QLCRD32(adapter, QLCNIC_CRB_DEV_NPAR_STATE);
2803 if (adapter->fw_wait_cnt++ > QLCNIC_DEV_NPAR_OPER_TIMEO)
2804 qlcnic_clr_all_drv_state(adapter, 0);
2805 else if (npar_state != QLCNIC_DEV_NPAR_OPER)
2806 qlcnic_schedule_work(adapter, qlcnic_attach_work,
2807 FW_POLL_DELAY);
2808 else
2809 goto attach;
2810 QLCDB(adapter, DRV, "Waiting for NPAR state to operational\n");
2811 return;
2812 }
2813attach:
af19b491 2814 if (netif_running(netdev)) {
52486a3a 2815 if (qlcnic_up(adapter, netdev))
af19b491 2816 goto done;
af19b491
AKS
2817
2818 qlcnic_config_indev_addr(netdev, NETDEV_UP);
2819 }
2820
af19b491 2821done:
34ce3626 2822 netif_device_attach(netdev);
af19b491
AKS
2823 adapter->fw_fail_cnt = 0;
2824 clear_bit(__QLCNIC_RESETTING, &adapter->state);
1b95a839
AKS
2825
2826 if (!qlcnic_clr_drv_state(adapter))
2827 qlcnic_schedule_work(adapter, qlcnic_fw_poll_work,
2828 FW_POLL_DELAY);
af19b491
AKS
2829}
2830
2831static int
2832qlcnic_check_health(struct qlcnic_adapter *adapter)
2833{
4e70812b 2834 u32 state = 0, heartbeat;
af19b491
AKS
2835 struct net_device *netdev = adapter->netdev;
2836
2837 if (qlcnic_check_temp(adapter))
2838 goto detach;
2839
2372a5f1 2840 if (adapter->need_fw_reset)
af19b491 2841 qlcnic_dev_request_reset(adapter);
af19b491
AKS
2842
2843 state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
3c4b23b1
AKS
2844 if (state == QLCNIC_DEV_NEED_RESET ||
2845 state == QLCNIC_DEV_NEED_QUISCENT) {
2846 qlcnic_set_npar_non_operational(adapter);
af19b491 2847 adapter->need_fw_reset = 1;
3c4b23b1 2848 }
af19b491 2849
4e70812b
SC
2850 heartbeat = QLCRD32(adapter, QLCNIC_PEG_ALIVE_COUNTER);
2851 if (heartbeat != adapter->heartbeat) {
2852 adapter->heartbeat = heartbeat;
af19b491
AKS
2853 adapter->fw_fail_cnt = 0;
2854 if (adapter->need_fw_reset)
2855 goto detach;
68bf1c68 2856
0df170b6
AKS
2857 if (adapter->reset_context &&
2858 auto_fw_reset == AUTO_FW_RESET_ENABLED) {
68bf1c68
AKS
2859 qlcnic_reset_hw_context(adapter);
2860 adapter->netdev->trans_start = jiffies;
2861 }
2862
af19b491
AKS
2863 return 0;
2864 }
2865
2866 if (++adapter->fw_fail_cnt < FW_FAIL_THRESH)
2867 return 0;
2868
2869 qlcnic_dev_request_reset(adapter);
2870
0df170b6
AKS
2871 if ((auto_fw_reset == AUTO_FW_RESET_ENABLED))
2872 clear_bit(__QLCNIC_FW_ATTACHED, &adapter->state);
af19b491
AKS
2873
2874 dev_info(&netdev->dev, "firmware hang detected\n");
2875
2876detach:
2877 adapter->dev_state = (state == QLCNIC_DEV_NEED_QUISCENT) ? state :
2878 QLCNIC_DEV_NEED_RESET;
2879
2880 if ((auto_fw_reset == AUTO_FW_RESET_ENABLED) &&
65b5b420
AKS
2881 !test_and_set_bit(__QLCNIC_RESETTING, &adapter->state)) {
2882
af19b491 2883 qlcnic_schedule_work(adapter, qlcnic_detach_work, 0);
65b5b420
AKS
2884 QLCDB(adapter, DRV, "fw recovery scheduled.\n");
2885 }
af19b491
AKS
2886
2887 return 1;
2888}
2889
2890static void
2891qlcnic_fw_poll_work(struct work_struct *work)
2892{
2893 struct qlcnic_adapter *adapter = container_of(work,
2894 struct qlcnic_adapter, fw_work.work);
2895
2896 if (test_bit(__QLCNIC_RESETTING, &adapter->state))
2897 goto reschedule;
2898
2899
2900 if (qlcnic_check_health(adapter))
2901 return;
2902
2903reschedule:
2904 qlcnic_schedule_work(adapter, qlcnic_fw_poll_work, FW_POLL_DELAY);
2905}
2906
451724c8
SC
2907static int qlcnic_is_first_func(struct pci_dev *pdev)
2908{
2909 struct pci_dev *oth_pdev;
2910 int val = pdev->devfn;
2911
2912 while (val-- > 0) {
2913 oth_pdev = pci_get_domain_bus_and_slot(pci_domain_nr
2914 (pdev->bus), pdev->bus->number,
2915 PCI_DEVFN(PCI_SLOT(pdev->devfn), val));
bfc978fa
AKS
2916 if (!oth_pdev)
2917 continue;
451724c8 2918
bfc978fa
AKS
2919 if (oth_pdev->current_state != PCI_D3cold) {
2920 pci_dev_put(oth_pdev);
451724c8 2921 return 0;
bfc978fa
AKS
2922 }
2923 pci_dev_put(oth_pdev);
451724c8
SC
2924 }
2925 return 1;
2926}
2927
2928static int qlcnic_attach_func(struct pci_dev *pdev)
2929{
2930 int err, first_func;
2931 struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
2932 struct net_device *netdev = adapter->netdev;
2933
2934 pdev->error_state = pci_channel_io_normal;
2935
2936 err = pci_enable_device(pdev);
2937 if (err)
2938 return err;
2939
2940 pci_set_power_state(pdev, PCI_D0);
2941 pci_set_master(pdev);
2942 pci_restore_state(pdev);
2943
2944 first_func = qlcnic_is_first_func(pdev);
2945
2946 if (qlcnic_api_lock(adapter))
2947 return -EINVAL;
2948
933fce12 2949 if (adapter->op_mode != QLCNIC_NON_PRIV_FUNC && first_func) {
451724c8
SC
2950 adapter->need_fw_reset = 1;
2951 set_bit(__QLCNIC_START_FW, &adapter->state);
2952 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_INITIALIZING);
2953 QLCDB(adapter, DRV, "Restarting fw\n");
2954 }
2955 qlcnic_api_unlock(adapter);
2956
2957 err = adapter->nic_ops->start_firmware(adapter);
2958 if (err)
2959 return err;
2960
2961 qlcnic_clr_drv_state(adapter);
2962 qlcnic_setup_intr(adapter);
2963
2964 if (netif_running(netdev)) {
2965 err = qlcnic_attach(adapter);
2966 if (err) {
21854f02 2967 qlcnic_clr_all_drv_state(adapter, 1);
451724c8
SC
2968 clear_bit(__QLCNIC_AER, &adapter->state);
2969 netif_device_attach(netdev);
2970 return err;
2971 }
2972
2973 err = qlcnic_up(adapter, netdev);
2974 if (err)
2975 goto done;
2976
2977 qlcnic_config_indev_addr(netdev, NETDEV_UP);
2978 }
2979 done:
2980 netif_device_attach(netdev);
2981 return err;
2982}
2983
2984static pci_ers_result_t qlcnic_io_error_detected(struct pci_dev *pdev,
2985 pci_channel_state_t state)
2986{
2987 struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
2988 struct net_device *netdev = adapter->netdev;
2989
2990 if (state == pci_channel_io_perm_failure)
2991 return PCI_ERS_RESULT_DISCONNECT;
2992
2993 if (state == pci_channel_io_normal)
2994 return PCI_ERS_RESULT_RECOVERED;
2995
2996 set_bit(__QLCNIC_AER, &adapter->state);
2997 netif_device_detach(netdev);
2998
2999 cancel_delayed_work_sync(&adapter->fw_work);
3000
3001 if (netif_running(netdev))
3002 qlcnic_down(adapter, netdev);
3003
3004 qlcnic_detach(adapter);
3005 qlcnic_teardown_intr(adapter);
3006
3007 clear_bit(__QLCNIC_RESETTING, &adapter->state);
3008
3009 pci_save_state(pdev);
3010 pci_disable_device(pdev);
3011
3012 return PCI_ERS_RESULT_NEED_RESET;
3013}
3014
3015static pci_ers_result_t qlcnic_io_slot_reset(struct pci_dev *pdev)
3016{
3017 return qlcnic_attach_func(pdev) ? PCI_ERS_RESULT_DISCONNECT :
3018 PCI_ERS_RESULT_RECOVERED;
3019}
3020
3021static void qlcnic_io_resume(struct pci_dev *pdev)
3022{
3023 struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
3024
3025 pci_cleanup_aer_uncorrect_error_status(pdev);
3026
3027 if (QLCRD32(adapter, QLCNIC_CRB_DEV_STATE) == QLCNIC_DEV_READY &&
3028 test_and_clear_bit(__QLCNIC_AER, &adapter->state))
3029 qlcnic_schedule_work(adapter, qlcnic_fw_poll_work,
3030 FW_POLL_DELAY);
3031}
3032
87eb743b
AC
3033static int
3034qlcnicvf_start_firmware(struct qlcnic_adapter *adapter)
3035{
3036 int err;
3037
3038 err = qlcnic_can_start_firmware(adapter);
3039 if (err)
3040 return err;
3041
78f84e1a
AKS
3042 err = qlcnic_check_npar_opertional(adapter);
3043 if (err)
3044 return err;
3c4b23b1 3045
174240a8
RB
3046 err = qlcnic_initialize_nic(adapter);
3047 if (err)
3048 return err;
3049
87eb743b
AC
3050 qlcnic_check_options(adapter);
3051
7373373d
RB
3052 err = qlcnic_set_eswitch_port_config(adapter);
3053 if (err)
3054 return err;
3055
87eb743b
AC
3056 adapter->need_fw_reset = 0;
3057
3058 return err;
3059}
3060
3061static int
3062qlcnicvf_config_bridged_mode(struct qlcnic_adapter *adapter, u32 enable)
3063{
3064 return -EOPNOTSUPP;
3065}
3066
3067static int
3068qlcnicvf_config_led(struct qlcnic_adapter *adapter, u32 state, u32 rate)
3069{
3070 return -EOPNOTSUPP;
3071}
3072
af19b491
AKS
3073static ssize_t
3074qlcnic_store_bridged_mode(struct device *dev,
3075 struct device_attribute *attr, const char *buf, size_t len)
3076{
3077 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3078 unsigned long new;
3079 int ret = -EINVAL;
3080
3081 if (!(adapter->capabilities & QLCNIC_FW_CAPABILITY_BDG))
3082 goto err_out;
3083
8a15ad1f 3084 if (!test_bit(__QLCNIC_DEV_UP, &adapter->state))
af19b491
AKS
3085 goto err_out;
3086
3087 if (strict_strtoul(buf, 2, &new))
3088 goto err_out;
3089
2e9d722d 3090 if (!adapter->nic_ops->config_bridged_mode(adapter, !!new))
af19b491
AKS
3091 ret = len;
3092
3093err_out:
3094 return ret;
3095}
3096
3097static ssize_t
3098qlcnic_show_bridged_mode(struct device *dev,
3099 struct device_attribute *attr, char *buf)
3100{
3101 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3102 int bridged_mode = 0;
3103
3104 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_BDG)
3105 bridged_mode = !!(adapter->flags & QLCNIC_BRIDGE_ENABLED);
3106
3107 return sprintf(buf, "%d\n", bridged_mode);
3108}
3109
3110static struct device_attribute dev_attr_bridged_mode = {
3111 .attr = {.name = "bridged_mode", .mode = (S_IRUGO | S_IWUSR)},
3112 .show = qlcnic_show_bridged_mode,
3113 .store = qlcnic_store_bridged_mode,
3114};
3115
3116static ssize_t
3117qlcnic_store_diag_mode(struct device *dev,
3118 struct device_attribute *attr, const char *buf, size_t len)
3119{
3120 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3121 unsigned long new;
3122
3123 if (strict_strtoul(buf, 2, &new))
3124 return -EINVAL;
3125
3126 if (!!new != !!(adapter->flags & QLCNIC_DIAG_ENABLED))
3127 adapter->flags ^= QLCNIC_DIAG_ENABLED;
3128
3129 return len;
3130}
3131
3132static ssize_t
3133qlcnic_show_diag_mode(struct device *dev,
3134 struct device_attribute *attr, char *buf)
3135{
3136 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3137
3138 return sprintf(buf, "%d\n",
3139 !!(adapter->flags & QLCNIC_DIAG_ENABLED));
3140}
3141
3142static struct device_attribute dev_attr_diag_mode = {
3143 .attr = {.name = "diag_mode", .mode = (S_IRUGO | S_IWUSR)},
3144 .show = qlcnic_show_diag_mode,
3145 .store = qlcnic_store_diag_mode,
3146};
3147
3148static int
3149qlcnic_sysfs_validate_crb(struct qlcnic_adapter *adapter,
3150 loff_t offset, size_t size)
3151{
897e8c7c
DP
3152 size_t crb_size = 4;
3153
af19b491
AKS
3154 if (!(adapter->flags & QLCNIC_DIAG_ENABLED))
3155 return -EIO;
3156
897e8c7c
DP
3157 if (offset < QLCNIC_PCI_CRBSPACE) {
3158 if (ADDR_IN_RANGE(offset, QLCNIC_PCI_CAMQM,
3159 QLCNIC_PCI_CAMQM_END))
3160 crb_size = 8;
3161 else
3162 return -EINVAL;
3163 }
af19b491 3164
897e8c7c
DP
3165 if ((size != crb_size) || (offset & (crb_size-1)))
3166 return -EINVAL;
af19b491
AKS
3167
3168 return 0;
3169}
3170
3171static ssize_t
2c3c8bea
CW
3172qlcnic_sysfs_read_crb(struct file *filp, struct kobject *kobj,
3173 struct bin_attribute *attr,
af19b491
AKS
3174 char *buf, loff_t offset, size_t size)
3175{
3176 struct device *dev = container_of(kobj, struct device, kobj);
3177 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3178 u32 data;
897e8c7c 3179 u64 qmdata;
af19b491
AKS
3180 int ret;
3181
3182 ret = qlcnic_sysfs_validate_crb(adapter, offset, size);
3183 if (ret != 0)
3184 return ret;
3185
897e8c7c
DP
3186 if (ADDR_IN_RANGE(offset, QLCNIC_PCI_CAMQM, QLCNIC_PCI_CAMQM_END)) {
3187 qlcnic_pci_camqm_read_2M(adapter, offset, &qmdata);
3188 memcpy(buf, &qmdata, size);
3189 } else {
3190 data = QLCRD32(adapter, offset);
3191 memcpy(buf, &data, size);
3192 }
af19b491
AKS
3193 return size;
3194}
3195
3196static ssize_t
2c3c8bea
CW
3197qlcnic_sysfs_write_crb(struct file *filp, struct kobject *kobj,
3198 struct bin_attribute *attr,
af19b491
AKS
3199 char *buf, loff_t offset, size_t size)
3200{
3201 struct device *dev = container_of(kobj, struct device, kobj);
3202 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3203 u32 data;
897e8c7c 3204 u64 qmdata;
af19b491
AKS
3205 int ret;
3206
3207 ret = qlcnic_sysfs_validate_crb(adapter, offset, size);
3208 if (ret != 0)
3209 return ret;
3210
897e8c7c
DP
3211 if (ADDR_IN_RANGE(offset, QLCNIC_PCI_CAMQM, QLCNIC_PCI_CAMQM_END)) {
3212 memcpy(&qmdata, buf, size);
3213 qlcnic_pci_camqm_write_2M(adapter, offset, qmdata);
3214 } else {
3215 memcpy(&data, buf, size);
3216 QLCWR32(adapter, offset, data);
3217 }
af19b491
AKS
3218 return size;
3219}
3220
3221static int
3222qlcnic_sysfs_validate_mem(struct qlcnic_adapter *adapter,
3223 loff_t offset, size_t size)
3224{
3225 if (!(adapter->flags & QLCNIC_DIAG_ENABLED))
3226 return -EIO;
3227
3228 if ((size != 8) || (offset & 0x7))
3229 return -EIO;
3230
3231 return 0;
3232}
3233
3234static ssize_t
2c3c8bea
CW
3235qlcnic_sysfs_read_mem(struct file *filp, struct kobject *kobj,
3236 struct bin_attribute *attr,
af19b491
AKS
3237 char *buf, loff_t offset, size_t size)
3238{
3239 struct device *dev = container_of(kobj, struct device, kobj);
3240 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3241 u64 data;
3242 int ret;
3243
3244 ret = qlcnic_sysfs_validate_mem(adapter, offset, size);
3245 if (ret != 0)
3246 return ret;
3247
3248 if (qlcnic_pci_mem_read_2M(adapter, offset, &data))
3249 return -EIO;
3250
3251 memcpy(buf, &data, size);
3252
3253 return size;
3254}
3255
3256static ssize_t
2c3c8bea
CW
3257qlcnic_sysfs_write_mem(struct file *filp, struct kobject *kobj,
3258 struct bin_attribute *attr,
af19b491
AKS
3259 char *buf, loff_t offset, size_t size)
3260{
3261 struct device *dev = container_of(kobj, struct device, kobj);
3262 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3263 u64 data;
3264 int ret;
3265
3266 ret = qlcnic_sysfs_validate_mem(adapter, offset, size);
3267 if (ret != 0)
3268 return ret;
3269
3270 memcpy(&data, buf, size);
3271
3272 if (qlcnic_pci_mem_write_2M(adapter, offset, data))
3273 return -EIO;
3274
3275 return size;
3276}
3277
3278
3279static struct bin_attribute bin_attr_crb = {
3280 .attr = {.name = "crb", .mode = (S_IRUGO | S_IWUSR)},
3281 .size = 0,
3282 .read = qlcnic_sysfs_read_crb,
3283 .write = qlcnic_sysfs_write_crb,
3284};
3285
3286static struct bin_attribute bin_attr_mem = {
3287 .attr = {.name = "mem", .mode = (S_IRUGO | S_IWUSR)},
3288 .size = 0,
3289 .read = qlcnic_sysfs_read_mem,
3290 .write = qlcnic_sysfs_write_mem,
3291};
3292
cea8975e 3293static int
346fe763
RB
3294validate_pm_config(struct qlcnic_adapter *adapter,
3295 struct qlcnic_pm_func_cfg *pm_cfg, int count)
3296{
3297
3298 u8 src_pci_func, s_esw_id, d_esw_id;
3299 u8 dest_pci_func;
3300 int i;
3301
3302 for (i = 0; i < count; i++) {
3303 src_pci_func = pm_cfg[i].pci_func;
3304 dest_pci_func = pm_cfg[i].dest_npar;
3305 if (src_pci_func >= QLCNIC_MAX_PCI_FUNC
3306 || dest_pci_func >= QLCNIC_MAX_PCI_FUNC)
3307 return QL_STATUS_INVALID_PARAM;
3308
3309 if (adapter->npars[src_pci_func].type != QLCNIC_TYPE_NIC)
3310 return QL_STATUS_INVALID_PARAM;
3311
3312 if (adapter->npars[dest_pci_func].type != QLCNIC_TYPE_NIC)
3313 return QL_STATUS_INVALID_PARAM;
3314
346fe763
RB
3315 s_esw_id = adapter->npars[src_pci_func].phy_port;
3316 d_esw_id = adapter->npars[dest_pci_func].phy_port;
3317
3318 if (s_esw_id != d_esw_id)
3319 return QL_STATUS_INVALID_PARAM;
3320
3321 }
3322 return 0;
3323
3324}
3325
3326static ssize_t
3327qlcnic_sysfs_write_pm_config(struct file *filp, struct kobject *kobj,
3328 struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
3329{
3330 struct device *dev = container_of(kobj, struct device, kobj);
3331 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3332 struct qlcnic_pm_func_cfg *pm_cfg;
3333 u32 id, action, pci_func;
3334 int count, rem, i, ret;
3335
3336 count = size / sizeof(struct qlcnic_pm_func_cfg);
3337 rem = size % sizeof(struct qlcnic_pm_func_cfg);
3338 if (rem)
3339 return QL_STATUS_INVALID_PARAM;
3340
3341 pm_cfg = (struct qlcnic_pm_func_cfg *) buf;
3342
3343 ret = validate_pm_config(adapter, pm_cfg, count);
3344 if (ret)
3345 return ret;
3346 for (i = 0; i < count; i++) {
3347 pci_func = pm_cfg[i].pci_func;
4e8acb01 3348 action = !!pm_cfg[i].action;
346fe763
RB
3349 id = adapter->npars[pci_func].phy_port;
3350 ret = qlcnic_config_port_mirroring(adapter, id,
3351 action, pci_func);
3352 if (ret)
3353 return ret;
3354 }
3355
3356 for (i = 0; i < count; i++) {
3357 pci_func = pm_cfg[i].pci_func;
3358 id = adapter->npars[pci_func].phy_port;
4e8acb01 3359 adapter->npars[pci_func].enable_pm = !!pm_cfg[i].action;
346fe763
RB
3360 adapter->npars[pci_func].dest_npar = id;
3361 }
3362 return size;
3363}
3364
3365static ssize_t
3366qlcnic_sysfs_read_pm_config(struct file *filp, struct kobject *kobj,
3367 struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
3368{
3369 struct device *dev = container_of(kobj, struct device, kobj);
3370 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3371 struct qlcnic_pm_func_cfg pm_cfg[QLCNIC_MAX_PCI_FUNC];
3372 int i;
3373
3374 if (size != sizeof(pm_cfg))
3375 return QL_STATUS_INVALID_PARAM;
3376
3377 for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
3378 if (adapter->npars[i].type != QLCNIC_TYPE_NIC)
3379 continue;
3380 pm_cfg[i].action = adapter->npars[i].enable_pm;
3381 pm_cfg[i].dest_npar = 0;
3382 pm_cfg[i].pci_func = i;
3383 }
3384 memcpy(buf, &pm_cfg, size);
3385
3386 return size;
3387}
3388
cea8975e 3389static int
346fe763 3390validate_esw_config(struct qlcnic_adapter *adapter,
4e8acb01 3391 struct qlcnic_esw_func_cfg *esw_cfg, int count)
346fe763 3392{
7613c87b 3393 u32 op_mode;
346fe763
RB
3394 u8 pci_func;
3395 int i;
7613c87b
RB
3396
3397 op_mode = readl(adapter->ahw.pci_base0 + QLCNIC_DRV_OP_MODE);
3398
346fe763
RB
3399 for (i = 0; i < count; i++) {
3400 pci_func = esw_cfg[i].pci_func;
3401 if (pci_func >= QLCNIC_MAX_PCI_FUNC)
3402 return QL_STATUS_INVALID_PARAM;
3403
4e8acb01
RB
3404 if (adapter->op_mode == QLCNIC_MGMT_FUNC)
3405 if (adapter->npars[pci_func].type != QLCNIC_TYPE_NIC)
3406 return QL_STATUS_INVALID_PARAM;
346fe763 3407
4e8acb01
RB
3408 switch (esw_cfg[i].op_mode) {
3409 case QLCNIC_PORT_DEFAULTS:
7613c87b 3410 if (QLC_DEV_GET_DRV(op_mode, pci_func) !=
7373373d 3411 QLCNIC_NON_PRIV_FUNC) {
7613c87b 3412 esw_cfg[i].mac_anti_spoof = 0;
7373373d
RB
3413 esw_cfg[i].mac_override = 1;
3414 }
4e8acb01
RB
3415 break;
3416 case QLCNIC_ADD_VLAN:
346fe763
RB
3417 if (!IS_VALID_VLAN(esw_cfg[i].vlan_id))
3418 return QL_STATUS_INVALID_PARAM;
4e8acb01
RB
3419 if (!esw_cfg[i].op_type)
3420 return QL_STATUS_INVALID_PARAM;
3421 break;
3422 case QLCNIC_DEL_VLAN:
4e8acb01
RB
3423 if (!esw_cfg[i].op_type)
3424 return QL_STATUS_INVALID_PARAM;
3425 break;
3426 default:
346fe763 3427 return QL_STATUS_INVALID_PARAM;
4e8acb01 3428 }
346fe763 3429 }
346fe763
RB
3430 return 0;
3431}
3432
3433static ssize_t
3434qlcnic_sysfs_write_esw_config(struct file *file, struct kobject *kobj,
3435 struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
3436{
3437 struct device *dev = container_of(kobj, struct device, kobj);
3438 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3439 struct qlcnic_esw_func_cfg *esw_cfg;
4e8acb01 3440 struct qlcnic_npar_info *npar;
346fe763 3441 int count, rem, i, ret;
0325d69b 3442 u8 pci_func, op_mode = 0;
346fe763
RB
3443
3444 count = size / sizeof(struct qlcnic_esw_func_cfg);
3445 rem = size % sizeof(struct qlcnic_esw_func_cfg);
3446 if (rem)
3447 return QL_STATUS_INVALID_PARAM;
3448
3449 esw_cfg = (struct qlcnic_esw_func_cfg *) buf;
3450 ret = validate_esw_config(adapter, esw_cfg, count);
3451 if (ret)
3452 return ret;
3453
3454 for (i = 0; i < count; i++) {
0325d69b
RB
3455 if (adapter->op_mode == QLCNIC_MGMT_FUNC)
3456 if (qlcnic_config_switch_port(adapter, &esw_cfg[i]))
3457 return QL_STATUS_INVALID_PARAM;
e9a47700
RB
3458
3459 if (adapter->ahw.pci_func != esw_cfg[i].pci_func)
3460 continue;
3461
3462 op_mode = esw_cfg[i].op_mode;
3463 qlcnic_get_eswitch_port_config(adapter, &esw_cfg[i]);
3464 esw_cfg[i].op_mode = op_mode;
3465 esw_cfg[i].pci_func = adapter->ahw.pci_func;
3466
3467 switch (esw_cfg[i].op_mode) {
3468 case QLCNIC_PORT_DEFAULTS:
3469 qlcnic_set_eswitch_port_features(adapter, &esw_cfg[i]);
3470 break;
8cf61f89
AKS
3471 case QLCNIC_ADD_VLAN:
3472 qlcnic_set_vlan_config(adapter, &esw_cfg[i]);
3473 break;
3474 case QLCNIC_DEL_VLAN:
3475 esw_cfg[i].vlan_id = 0;
3476 qlcnic_set_vlan_config(adapter, &esw_cfg[i]);
3477 break;
0325d69b 3478 }
346fe763
RB
3479 }
3480
0325d69b
RB
3481 if (adapter->op_mode != QLCNIC_MGMT_FUNC)
3482 goto out;
e9a47700 3483
346fe763
RB
3484 for (i = 0; i < count; i++) {
3485 pci_func = esw_cfg[i].pci_func;
4e8acb01
RB
3486 npar = &adapter->npars[pci_func];
3487 switch (esw_cfg[i].op_mode) {
3488 case QLCNIC_PORT_DEFAULTS:
3489 npar->promisc_mode = esw_cfg[i].promisc_mode;
7373373d 3490 npar->mac_override = esw_cfg[i].mac_override;
4e8acb01
RB
3491 npar->offload_flags = esw_cfg[i].offload_flags;
3492 npar->mac_anti_spoof = esw_cfg[i].mac_anti_spoof;
3493 npar->discard_tagged = esw_cfg[i].discard_tagged;
3494 break;
3495 case QLCNIC_ADD_VLAN:
3496 npar->pvid = esw_cfg[i].vlan_id;
3497 break;
3498 case QLCNIC_DEL_VLAN:
3499 npar->pvid = 0;
3500 break;
3501 }
346fe763 3502 }
0325d69b 3503out:
346fe763
RB
3504 return size;
3505}
3506
3507static ssize_t
3508qlcnic_sysfs_read_esw_config(struct file *file, struct kobject *kobj,
3509 struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
3510{
3511 struct device *dev = container_of(kobj, struct device, kobj);
3512 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3513 struct qlcnic_esw_func_cfg esw_cfg[QLCNIC_MAX_PCI_FUNC];
4e8acb01 3514 u8 i;
346fe763
RB
3515
3516 if (size != sizeof(esw_cfg))
3517 return QL_STATUS_INVALID_PARAM;
3518
3519 for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
3520 if (adapter->npars[i].type != QLCNIC_TYPE_NIC)
3521 continue;
4e8acb01
RB
3522 esw_cfg[i].pci_func = i;
3523 if (qlcnic_get_eswitch_port_config(adapter, &esw_cfg[i]))
3524 return QL_STATUS_INVALID_PARAM;
346fe763
RB
3525 }
3526 memcpy(buf, &esw_cfg, size);
3527
3528 return size;
3529}
3530
cea8975e 3531static int
346fe763
RB
3532validate_npar_config(struct qlcnic_adapter *adapter,
3533 struct qlcnic_npar_func_cfg *np_cfg, int count)
3534{
3535 u8 pci_func, i;
3536
3537 for (i = 0; i < count; i++) {
3538 pci_func = np_cfg[i].pci_func;
3539 if (pci_func >= QLCNIC_MAX_PCI_FUNC)
3540 return QL_STATUS_INVALID_PARAM;
3541
3542 if (adapter->npars[pci_func].type != QLCNIC_TYPE_NIC)
3543 return QL_STATUS_INVALID_PARAM;
3544
3545 if (!IS_VALID_BW(np_cfg[i].min_bw)
3546 || !IS_VALID_BW(np_cfg[i].max_bw)
3547 || !IS_VALID_RX_QUEUES(np_cfg[i].max_rx_queues)
3548 || !IS_VALID_TX_QUEUES(np_cfg[i].max_tx_queues))
3549 return QL_STATUS_INVALID_PARAM;
3550 }
3551 return 0;
3552}
3553
3554static ssize_t
3555qlcnic_sysfs_write_npar_config(struct file *file, struct kobject *kobj,
3556 struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
3557{
3558 struct device *dev = container_of(kobj, struct device, kobj);
3559 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3560 struct qlcnic_info nic_info;
3561 struct qlcnic_npar_func_cfg *np_cfg;
3562 int i, count, rem, ret;
3563 u8 pci_func;
3564
3565 count = size / sizeof(struct qlcnic_npar_func_cfg);
3566 rem = size % sizeof(struct qlcnic_npar_func_cfg);
3567 if (rem)
3568 return QL_STATUS_INVALID_PARAM;
3569
3570 np_cfg = (struct qlcnic_npar_func_cfg *) buf;
3571 ret = validate_npar_config(adapter, np_cfg, count);
3572 if (ret)
3573 return ret;
3574
3575 for (i = 0; i < count ; i++) {
3576 pci_func = np_cfg[i].pci_func;
3577 ret = qlcnic_get_nic_info(adapter, &nic_info, pci_func);
3578 if (ret)
3579 return ret;
3580 nic_info.pci_func = pci_func;
3581 nic_info.min_tx_bw = np_cfg[i].min_bw;
3582 nic_info.max_tx_bw = np_cfg[i].max_bw;
3583 ret = qlcnic_set_nic_info(adapter, &nic_info);
3584 if (ret)
3585 return ret;
cea8975e
AC
3586 adapter->npars[i].min_bw = nic_info.min_tx_bw;
3587 adapter->npars[i].max_bw = nic_info.max_tx_bw;
346fe763
RB
3588 }
3589
3590 return size;
3591
3592}
3593static ssize_t
3594qlcnic_sysfs_read_npar_config(struct file *file, struct kobject *kobj,
3595 struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
3596{
3597 struct device *dev = container_of(kobj, struct device, kobj);
3598 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3599 struct qlcnic_info nic_info;
3600 struct qlcnic_npar_func_cfg np_cfg[QLCNIC_MAX_PCI_FUNC];
3601 int i, ret;
3602
3603 if (size != sizeof(np_cfg))
3604 return QL_STATUS_INVALID_PARAM;
3605
3606 for (i = 0; i < QLCNIC_MAX_PCI_FUNC ; i++) {
3607 if (adapter->npars[i].type != QLCNIC_TYPE_NIC)
3608 continue;
3609 ret = qlcnic_get_nic_info(adapter, &nic_info, i);
3610 if (ret)
3611 return ret;
3612
3613 np_cfg[i].pci_func = i;
3614 np_cfg[i].op_mode = nic_info.op_mode;
3615 np_cfg[i].port_num = nic_info.phys_port;
3616 np_cfg[i].fw_capab = nic_info.capabilities;
3617 np_cfg[i].min_bw = nic_info.min_tx_bw ;
3618 np_cfg[i].max_bw = nic_info.max_tx_bw;
3619 np_cfg[i].max_tx_queues = nic_info.max_tx_ques;
3620 np_cfg[i].max_rx_queues = nic_info.max_rx_ques;
3621 }
3622 memcpy(buf, &np_cfg, size);
3623 return size;
3624}
3625
b6021212
AKS
3626static ssize_t
3627qlcnic_sysfs_get_port_stats(struct file *file, struct kobject *kobj,
3628 struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
3629{
3630 struct device *dev = container_of(kobj, struct device, kobj);
3631 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3632 struct qlcnic_esw_statistics port_stats;
3633 int ret;
3634
3635 if (size != sizeof(struct qlcnic_esw_statistics))
3636 return QL_STATUS_INVALID_PARAM;
3637
3638 if (offset >= QLCNIC_MAX_PCI_FUNC)
3639 return QL_STATUS_INVALID_PARAM;
3640
3641 memset(&port_stats, 0, size);
3642 ret = qlcnic_get_port_stats(adapter, offset, QLCNIC_QUERY_RX_COUNTER,
3643 &port_stats.rx);
3644 if (ret)
3645 return ret;
3646
3647 ret = qlcnic_get_port_stats(adapter, offset, QLCNIC_QUERY_TX_COUNTER,
3648 &port_stats.tx);
3649 if (ret)
3650 return ret;
3651
3652 memcpy(buf, &port_stats, size);
3653 return size;
3654}
3655
3656static ssize_t
3657qlcnic_sysfs_get_esw_stats(struct file *file, struct kobject *kobj,
3658 struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
3659{
3660 struct device *dev = container_of(kobj, struct device, kobj);
3661 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3662 struct qlcnic_esw_statistics esw_stats;
3663 int ret;
3664
3665 if (size != sizeof(struct qlcnic_esw_statistics))
3666 return QL_STATUS_INVALID_PARAM;
3667
3668 if (offset >= QLCNIC_NIU_MAX_XG_PORTS)
3669 return QL_STATUS_INVALID_PARAM;
3670
3671 memset(&esw_stats, 0, size);
3672 ret = qlcnic_get_eswitch_stats(adapter, offset, QLCNIC_QUERY_RX_COUNTER,
3673 &esw_stats.rx);
3674 if (ret)
3675 return ret;
3676
3677 ret = qlcnic_get_eswitch_stats(adapter, offset, QLCNIC_QUERY_TX_COUNTER,
3678 &esw_stats.tx);
3679 if (ret)
3680 return ret;
3681
3682 memcpy(buf, &esw_stats, size);
3683 return size;
3684}
3685
3686static ssize_t
3687qlcnic_sysfs_clear_esw_stats(struct file *file, struct kobject *kobj,
3688 struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
3689{
3690 struct device *dev = container_of(kobj, struct device, kobj);
3691 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3692 int ret;
3693
3694 if (offset >= QLCNIC_NIU_MAX_XG_PORTS)
3695 return QL_STATUS_INVALID_PARAM;
3696
3697 ret = qlcnic_clear_esw_stats(adapter, QLCNIC_STATS_ESWITCH, offset,
3698 QLCNIC_QUERY_RX_COUNTER);
3699 if (ret)
3700 return ret;
3701
3702 ret = qlcnic_clear_esw_stats(adapter, QLCNIC_STATS_ESWITCH, offset,
3703 QLCNIC_QUERY_TX_COUNTER);
3704 if (ret)
3705 return ret;
3706
3707 return size;
3708}
3709
3710static ssize_t
3711qlcnic_sysfs_clear_port_stats(struct file *file, struct kobject *kobj,
3712 struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
3713{
3714
3715 struct device *dev = container_of(kobj, struct device, kobj);
3716 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3717 int ret;
3718
3719 if (offset >= QLCNIC_MAX_PCI_FUNC)
3720 return QL_STATUS_INVALID_PARAM;
3721
3722 ret = qlcnic_clear_esw_stats(adapter, QLCNIC_STATS_PORT, offset,
3723 QLCNIC_QUERY_RX_COUNTER);
3724 if (ret)
3725 return ret;
3726
3727 ret = qlcnic_clear_esw_stats(adapter, QLCNIC_STATS_PORT, offset,
3728 QLCNIC_QUERY_TX_COUNTER);
3729 if (ret)
3730 return ret;
3731
3732 return size;
3733}
3734
346fe763
RB
3735static ssize_t
3736qlcnic_sysfs_read_pci_config(struct file *file, struct kobject *kobj,
3737 struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
3738{
3739 struct device *dev = container_of(kobj, struct device, kobj);
3740 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3741 struct qlcnic_pci_func_cfg pci_cfg[QLCNIC_MAX_PCI_FUNC];
e88db3bd 3742 struct qlcnic_pci_info *pci_info;
346fe763
RB
3743 int i, ret;
3744
3745 if (size != sizeof(pci_cfg))
3746 return QL_STATUS_INVALID_PARAM;
3747
e88db3bd
DC
3748 pci_info = kcalloc(QLCNIC_MAX_PCI_FUNC, sizeof(*pci_info), GFP_KERNEL);
3749 if (!pci_info)
3750 return -ENOMEM;
3751
346fe763 3752 ret = qlcnic_get_pci_info(adapter, pci_info);
e88db3bd
DC
3753 if (ret) {
3754 kfree(pci_info);
346fe763 3755 return ret;
e88db3bd 3756 }
346fe763
RB
3757
3758 for (i = 0; i < QLCNIC_MAX_PCI_FUNC ; i++) {
3759 pci_cfg[i].pci_func = pci_info[i].id;
3760 pci_cfg[i].func_type = pci_info[i].type;
3761 pci_cfg[i].port_num = pci_info[i].default_port;
3762 pci_cfg[i].min_bw = pci_info[i].tx_min_bw;
3763 pci_cfg[i].max_bw = pci_info[i].tx_max_bw;
3764 memcpy(&pci_cfg[i].def_mac_addr, &pci_info[i].mac, ETH_ALEN);
3765 }
3766 memcpy(buf, &pci_cfg, size);
e88db3bd 3767 kfree(pci_info);
346fe763 3768 return size;
346fe763
RB
3769}
3770static struct bin_attribute bin_attr_npar_config = {
3771 .attr = {.name = "npar_config", .mode = (S_IRUGO | S_IWUSR)},
3772 .size = 0,
3773 .read = qlcnic_sysfs_read_npar_config,
3774 .write = qlcnic_sysfs_write_npar_config,
3775};
3776
3777static struct bin_attribute bin_attr_pci_config = {
3778 .attr = {.name = "pci_config", .mode = (S_IRUGO | S_IWUSR)},
3779 .size = 0,
3780 .read = qlcnic_sysfs_read_pci_config,
3781 .write = NULL,
3782};
3783
b6021212
AKS
3784static struct bin_attribute bin_attr_port_stats = {
3785 .attr = {.name = "port_stats", .mode = (S_IRUGO | S_IWUSR)},
3786 .size = 0,
3787 .read = qlcnic_sysfs_get_port_stats,
3788 .write = qlcnic_sysfs_clear_port_stats,
3789};
3790
3791static struct bin_attribute bin_attr_esw_stats = {
3792 .attr = {.name = "esw_stats", .mode = (S_IRUGO | S_IWUSR)},
3793 .size = 0,
3794 .read = qlcnic_sysfs_get_esw_stats,
3795 .write = qlcnic_sysfs_clear_esw_stats,
3796};
3797
346fe763
RB
3798static struct bin_attribute bin_attr_esw_config = {
3799 .attr = {.name = "esw_config", .mode = (S_IRUGO | S_IWUSR)},
3800 .size = 0,
3801 .read = qlcnic_sysfs_read_esw_config,
3802 .write = qlcnic_sysfs_write_esw_config,
3803};
3804
3805static struct bin_attribute bin_attr_pm_config = {
3806 .attr = {.name = "pm_config", .mode = (S_IRUGO | S_IWUSR)},
3807 .size = 0,
3808 .read = qlcnic_sysfs_read_pm_config,
3809 .write = qlcnic_sysfs_write_pm_config,
3810};
3811
af19b491
AKS
3812static void
3813qlcnic_create_sysfs_entries(struct qlcnic_adapter *adapter)
3814{
3815 struct device *dev = &adapter->pdev->dev;
3816
3817 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_BDG)
3818 if (device_create_file(dev, &dev_attr_bridged_mode))
3819 dev_warn(dev,
3820 "failed to create bridged_mode sysfs entry\n");
3821}
3822
3823static void
3824qlcnic_remove_sysfs_entries(struct qlcnic_adapter *adapter)
3825{
3826 struct device *dev = &adapter->pdev->dev;
3827
3828 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_BDG)
3829 device_remove_file(dev, &dev_attr_bridged_mode);
3830}
3831
3832static void
3833qlcnic_create_diag_entries(struct qlcnic_adapter *adapter)
3834{
3835 struct device *dev = &adapter->pdev->dev;
3836
b6021212
AKS
3837 if (device_create_bin_file(dev, &bin_attr_port_stats))
3838 dev_info(dev, "failed to create port stats sysfs entry");
3839
132ff00a
AC
3840 if (adapter->op_mode == QLCNIC_NON_PRIV_FUNC)
3841 return;
af19b491
AKS
3842 if (device_create_file(dev, &dev_attr_diag_mode))
3843 dev_info(dev, "failed to create diag_mode sysfs entry\n");
3844 if (device_create_bin_file(dev, &bin_attr_crb))
3845 dev_info(dev, "failed to create crb sysfs entry\n");
3846 if (device_create_bin_file(dev, &bin_attr_mem))
3847 dev_info(dev, "failed to create mem sysfs entry\n");
4e8acb01
RB
3848 if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED))
3849 return;
3850 if (device_create_bin_file(dev, &bin_attr_esw_config))
3851 dev_info(dev, "failed to create esw config sysfs entry");
3852 if (adapter->op_mode != QLCNIC_MGMT_FUNC)
346fe763
RB
3853 return;
3854 if (device_create_bin_file(dev, &bin_attr_pci_config))
3855 dev_info(dev, "failed to create pci config sysfs entry");
3856 if (device_create_bin_file(dev, &bin_attr_npar_config))
3857 dev_info(dev, "failed to create npar config sysfs entry");
346fe763
RB
3858 if (device_create_bin_file(dev, &bin_attr_pm_config))
3859 dev_info(dev, "failed to create pm config sysfs entry");
b6021212
AKS
3860 if (device_create_bin_file(dev, &bin_attr_esw_stats))
3861 dev_info(dev, "failed to create eswitch stats sysfs entry");
af19b491
AKS
3862}
3863
af19b491
AKS
3864static void
3865qlcnic_remove_diag_entries(struct qlcnic_adapter *adapter)
3866{
3867 struct device *dev = &adapter->pdev->dev;
3868
b6021212
AKS
3869 device_remove_bin_file(dev, &bin_attr_port_stats);
3870
132ff00a
AC
3871 if (adapter->op_mode == QLCNIC_NON_PRIV_FUNC)
3872 return;
af19b491
AKS
3873 device_remove_file(dev, &dev_attr_diag_mode);
3874 device_remove_bin_file(dev, &bin_attr_crb);
3875 device_remove_bin_file(dev, &bin_attr_mem);
4e8acb01
RB
3876 if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED))
3877 return;
3878 device_remove_bin_file(dev, &bin_attr_esw_config);
3879 if (adapter->op_mode != QLCNIC_MGMT_FUNC)
346fe763
RB
3880 return;
3881 device_remove_bin_file(dev, &bin_attr_pci_config);
3882 device_remove_bin_file(dev, &bin_attr_npar_config);
346fe763 3883 device_remove_bin_file(dev, &bin_attr_pm_config);
b6021212 3884 device_remove_bin_file(dev, &bin_attr_esw_stats);
af19b491
AKS
3885}
3886
3887#ifdef CONFIG_INET
3888
3889#define is_qlcnic_netdev(dev) (dev->netdev_ops == &qlcnic_netdev_ops)
3890
af19b491
AKS
3891static void
3892qlcnic_config_indev_addr(struct net_device *dev, unsigned long event)
3893{
3894 struct in_device *indev;
3895 struct qlcnic_adapter *adapter = netdev_priv(dev);
3896
af19b491
AKS
3897 indev = in_dev_get(dev);
3898 if (!indev)
3899 return;
3900
3901 for_ifa(indev) {
3902 switch (event) {
3903 case NETDEV_UP:
3904 qlcnic_config_ipaddr(adapter,
3905 ifa->ifa_address, QLCNIC_IP_UP);
3906 break;
3907 case NETDEV_DOWN:
3908 qlcnic_config_ipaddr(adapter,
3909 ifa->ifa_address, QLCNIC_IP_DOWN);
3910 break;
3911 default:
3912 break;
3913 }
3914 } endfor_ifa(indev);
3915
3916 in_dev_put(indev);
af19b491
AKS
3917}
3918
3919static int qlcnic_netdev_event(struct notifier_block *this,
3920 unsigned long event, void *ptr)
3921{
3922 struct qlcnic_adapter *adapter;
3923 struct net_device *dev = (struct net_device *)ptr;
3924
3925recheck:
3926 if (dev == NULL)
3927 goto done;
3928
3929 if (dev->priv_flags & IFF_802_1Q_VLAN) {
3930 dev = vlan_dev_real_dev(dev);
3931 goto recheck;
3932 }
3933
3934 if (!is_qlcnic_netdev(dev))
3935 goto done;
3936
3937 adapter = netdev_priv(dev);
3938
3939 if (!adapter)
3940 goto done;
3941
8a15ad1f 3942 if (!test_bit(__QLCNIC_DEV_UP, &adapter->state))
af19b491
AKS
3943 goto done;
3944
3945 qlcnic_config_indev_addr(dev, event);
3946done:
3947 return NOTIFY_DONE;
3948}
3949
3950static int
3951qlcnic_inetaddr_event(struct notifier_block *this,
3952 unsigned long event, void *ptr)
3953{
3954 struct qlcnic_adapter *adapter;
3955 struct net_device *dev;
3956
3957 struct in_ifaddr *ifa = (struct in_ifaddr *)ptr;
3958
3959 dev = ifa->ifa_dev ? ifa->ifa_dev->dev : NULL;
3960
3961recheck:
3962 if (dev == NULL || !netif_running(dev))
3963 goto done;
3964
3965 if (dev->priv_flags & IFF_802_1Q_VLAN) {
3966 dev = vlan_dev_real_dev(dev);
3967 goto recheck;
3968 }
3969
3970 if (!is_qlcnic_netdev(dev))
3971 goto done;
3972
3973 adapter = netdev_priv(dev);
3974
251a84c9 3975 if (!adapter)
af19b491
AKS
3976 goto done;
3977
8a15ad1f 3978 if (!test_bit(__QLCNIC_DEV_UP, &adapter->state))
af19b491
AKS
3979 goto done;
3980
3981 switch (event) {
3982 case NETDEV_UP:
3983 qlcnic_config_ipaddr(adapter, ifa->ifa_address, QLCNIC_IP_UP);
3984 break;
3985 case NETDEV_DOWN:
3986 qlcnic_config_ipaddr(adapter, ifa->ifa_address, QLCNIC_IP_DOWN);
3987 break;
3988 default:
3989 break;
3990 }
3991
3992done:
3993 return NOTIFY_DONE;
3994}
3995
3996static struct notifier_block qlcnic_netdev_cb = {
3997 .notifier_call = qlcnic_netdev_event,
3998};
3999
4000static struct notifier_block qlcnic_inetaddr_cb = {
4001 .notifier_call = qlcnic_inetaddr_event,
4002};
4003#else
4004static void
4005qlcnic_config_indev_addr(struct net_device *dev, unsigned long event)
4006{ }
4007#endif
451724c8
SC
4008static struct pci_error_handlers qlcnic_err_handler = {
4009 .error_detected = qlcnic_io_error_detected,
4010 .slot_reset = qlcnic_io_slot_reset,
4011 .resume = qlcnic_io_resume,
4012};
af19b491
AKS
4013
4014static struct pci_driver qlcnic_driver = {
4015 .name = qlcnic_driver_name,
4016 .id_table = qlcnic_pci_tbl,
4017 .probe = qlcnic_probe,
4018 .remove = __devexit_p(qlcnic_remove),
4019#ifdef CONFIG_PM
4020 .suspend = qlcnic_suspend,
4021 .resume = qlcnic_resume,
4022#endif
451724c8
SC
4023 .shutdown = qlcnic_shutdown,
4024 .err_handler = &qlcnic_err_handler
4025
af19b491
AKS
4026};
4027
4028static int __init qlcnic_init_module(void)
4029{
0cf3a14c 4030 int ret;
af19b491
AKS
4031
4032 printk(KERN_INFO "%s\n", qlcnic_driver_string);
4033
4034#ifdef CONFIG_INET
4035 register_netdevice_notifier(&qlcnic_netdev_cb);
4036 register_inetaddr_notifier(&qlcnic_inetaddr_cb);
4037#endif
4038
0cf3a14c
AKS
4039 ret = pci_register_driver(&qlcnic_driver);
4040 if (ret) {
4041#ifdef CONFIG_INET
4042 unregister_inetaddr_notifier(&qlcnic_inetaddr_cb);
4043 unregister_netdevice_notifier(&qlcnic_netdev_cb);
4044#endif
4045 }
af19b491 4046
0cf3a14c 4047 return ret;
af19b491
AKS
4048}
4049
4050module_init(qlcnic_init_module);
4051
4052static void __exit qlcnic_exit_module(void)
4053{
4054
4055 pci_unregister_driver(&qlcnic_driver);
4056
4057#ifdef CONFIG_INET
4058 unregister_inetaddr_notifier(&qlcnic_inetaddr_cb);
4059 unregister_netdevice_notifier(&qlcnic_netdev_cb);
4060#endif
4061}
4062
4063module_exit(qlcnic_exit_module);