qlcnic: Disable loopback support
[linux-2.6-block.git] / drivers / net / qlcnic / qlcnic_main.c
CommitLineData
af19b491
AKS
1/*
2 * Copyright (C) 2009 - QLogic Corporation.
3 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version 2
8 * of the License, or (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place - Suite 330, Boston,
18 * MA 02111-1307, USA.
19 *
20 * The full GNU General Public License is included in this distribution
21 * in the file called "COPYING".
22 *
23 */
24
5a0e3ad6 25#include <linux/slab.h>
af19b491
AKS
26#include <linux/vmalloc.h>
27#include <linux/interrupt.h>
28
29#include "qlcnic.h"
30
7e56cac4 31#include <linux/swab.h>
af19b491
AKS
32#include <linux/dma-mapping.h>
33#include <linux/if_vlan.h>
34#include <net/ip.h>
35#include <linux/ipv6.h>
36#include <linux/inetdevice.h>
37#include <linux/sysfs.h>
451724c8 38#include <linux/aer.h>
af19b491 39
7f9a0c34 40MODULE_DESCRIPTION("QLogic 1/10 GbE Converged/Intelligent Ethernet Driver");
af19b491
AKS
41MODULE_LICENSE("GPL");
42MODULE_VERSION(QLCNIC_LINUX_VERSIONID);
43MODULE_FIRMWARE(QLCNIC_UNIFIED_ROMIMAGE_NAME);
44
45char qlcnic_driver_name[] = "qlcnic";
7f9a0c34
SV
46static const char qlcnic_driver_string[] = "QLogic 1/10 GbE "
47 "Converged/Intelligent Ethernet Driver v" QLCNIC_LINUX_VERSIONID;
af19b491 48
f7ec804a 49static struct workqueue_struct *qlcnic_wq;
b5e5492c
AKS
50static int qlcnic_mac_learn;
51module_param(qlcnic_mac_learn, int, 0644);
52MODULE_PARM_DESC(qlcnic_mac_learn, "Mac Filter (0=disabled, 1=enabled)");
53
af19b491
AKS
54static int use_msi = 1;
55module_param(use_msi, int, 0644);
56MODULE_PARM_DESC(use_msi, "MSI interrupt (0=disabled, 1=enabled");
57
58static int use_msi_x = 1;
59module_param(use_msi_x, int, 0644);
60MODULE_PARM_DESC(use_msi_x, "MSI-X interrupt (0=disabled, 1=enabled");
61
62static int auto_fw_reset = AUTO_FW_RESET_ENABLED;
63module_param(auto_fw_reset, int, 0644);
64MODULE_PARM_DESC(auto_fw_reset, "Auto firmware reset (0=disabled, 1=enabled");
65
4d5bdb38
AKS
66static int load_fw_file;
67module_param(load_fw_file, int, 0644);
68MODULE_PARM_DESC(load_fw_file, "Load firmware from (0=flash, 1=file");
69
2e9d722d
AC
70static int qlcnic_config_npars;
71module_param(qlcnic_config_npars, int, 0644);
72MODULE_PARM_DESC(qlcnic_config_npars, "Configure NPARs (0=disabled, 1=enabled");
73
af19b491
AKS
74static int __devinit qlcnic_probe(struct pci_dev *pdev,
75 const struct pci_device_id *ent);
76static void __devexit qlcnic_remove(struct pci_dev *pdev);
77static int qlcnic_open(struct net_device *netdev);
78static int qlcnic_close(struct net_device *netdev);
af19b491 79static void qlcnic_tx_timeout(struct net_device *netdev);
af19b491
AKS
80static void qlcnic_attach_work(struct work_struct *work);
81static void qlcnic_fwinit_work(struct work_struct *work);
82static void qlcnic_fw_poll_work(struct work_struct *work);
83static void qlcnic_schedule_work(struct qlcnic_adapter *adapter,
84 work_func_t func, int delay);
85static void qlcnic_cancel_fw_work(struct qlcnic_adapter *adapter);
86static int qlcnic_poll(struct napi_struct *napi, int budget);
8f891387 87static int qlcnic_rx_poll(struct napi_struct *napi, int budget);
af19b491
AKS
88#ifdef CONFIG_NET_POLL_CONTROLLER
89static void qlcnic_poll_controller(struct net_device *netdev);
90#endif
91
92static void qlcnic_create_sysfs_entries(struct qlcnic_adapter *adapter);
93static void qlcnic_remove_sysfs_entries(struct qlcnic_adapter *adapter);
94static void qlcnic_create_diag_entries(struct qlcnic_adapter *adapter);
95static void qlcnic_remove_diag_entries(struct qlcnic_adapter *adapter);
96
6df900e9 97static void qlcnic_idc_debug_info(struct qlcnic_adapter *adapter, u8 encoding);
21854f02 98static void qlcnic_clr_all_drv_state(struct qlcnic_adapter *adapter, u8);
af19b491
AKS
99static int qlcnic_can_start_firmware(struct qlcnic_adapter *adapter);
100
7eb9855d 101static irqreturn_t qlcnic_tmp_intr(int irq, void *data);
af19b491
AKS
102static irqreturn_t qlcnic_intr(int irq, void *data);
103static irqreturn_t qlcnic_msi_intr(int irq, void *data);
104static irqreturn_t qlcnic_msix_intr(int irq, void *data);
105
106static struct net_device_stats *qlcnic_get_stats(struct net_device *netdev);
aec1e845 107static void qlcnic_restore_indev_addr(struct net_device *dev, unsigned long);
9f26f547
AC
108static int qlcnic_start_firmware(struct qlcnic_adapter *);
109
b5e5492c
AKS
110static void qlcnic_alloc_lb_filters_mem(struct qlcnic_adapter *adapter);
111static void qlcnic_free_lb_filters_mem(struct qlcnic_adapter *adapter);
9f26f547 112static void qlcnic_dev_set_npar_ready(struct qlcnic_adapter *);
9f26f547
AC
113static int qlcnicvf_config_led(struct qlcnic_adapter *, u32, u32);
114static int qlcnicvf_config_bridged_mode(struct qlcnic_adapter *, u32);
115static int qlcnicvf_start_firmware(struct qlcnic_adapter *);
0325d69b
RB
116static void qlcnic_set_netdev_features(struct qlcnic_adapter *,
117 struct qlcnic_esw_func_cfg *);
af19b491
AKS
118/* PCI Device ID Table */
119#define ENTRY(device) \
120 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, (device)), \
121 .class = PCI_CLASS_NETWORK_ETHERNET << 8, .class_mask = ~0}
122
123#define PCI_DEVICE_ID_QLOGIC_QLE824X 0x8020
124
6a902881 125static DEFINE_PCI_DEVICE_TABLE(qlcnic_pci_tbl) = {
af19b491
AKS
126 ENTRY(PCI_DEVICE_ID_QLOGIC_QLE824X),
127 {0,}
128};
129
130MODULE_DEVICE_TABLE(pci, qlcnic_pci_tbl);
131
132
133void
134qlcnic_update_cmd_producer(struct qlcnic_adapter *adapter,
135 struct qlcnic_host_tx_ring *tx_ring)
136{
137 writel(tx_ring->producer, tx_ring->crb_cmd_producer);
af19b491
AKS
138}
139
140static const u32 msi_tgt_status[8] = {
141 ISR_INT_TARGET_STATUS, ISR_INT_TARGET_STATUS_F1,
142 ISR_INT_TARGET_STATUS_F2, ISR_INT_TARGET_STATUS_F3,
143 ISR_INT_TARGET_STATUS_F4, ISR_INT_TARGET_STATUS_F5,
144 ISR_INT_TARGET_STATUS_F6, ISR_INT_TARGET_STATUS_F7
145};
146
147static const
148struct qlcnic_legacy_intr_set legacy_intr[] = QLCNIC_LEGACY_INTR_CONFIG;
149
150static inline void qlcnic_disable_int(struct qlcnic_host_sds_ring *sds_ring)
151{
152 writel(0, sds_ring->crb_intr_mask);
153}
154
155static inline void qlcnic_enable_int(struct qlcnic_host_sds_ring *sds_ring)
156{
157 struct qlcnic_adapter *adapter = sds_ring->adapter;
158
159 writel(0x1, sds_ring->crb_intr_mask);
160
161 if (!QLCNIC_IS_MSI_FAMILY(adapter))
162 writel(0xfbff, adapter->tgt_mask_reg);
163}
164
165static int
166qlcnic_alloc_sds_rings(struct qlcnic_recv_context *recv_ctx, int count)
167{
168 int size = sizeof(struct qlcnic_host_sds_ring) * count;
169
170 recv_ctx->sds_rings = kzalloc(size, GFP_KERNEL);
171
807540ba 172 return recv_ctx->sds_rings == NULL;
af19b491
AKS
173}
174
175static void
176qlcnic_free_sds_rings(struct qlcnic_recv_context *recv_ctx)
177{
178 if (recv_ctx->sds_rings != NULL)
179 kfree(recv_ctx->sds_rings);
180
181 recv_ctx->sds_rings = NULL;
182}
183
184static int
185qlcnic_napi_add(struct qlcnic_adapter *adapter, struct net_device *netdev)
186{
187 int ring;
188 struct qlcnic_host_sds_ring *sds_ring;
189 struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
190
191 if (qlcnic_alloc_sds_rings(recv_ctx, adapter->max_sds_rings))
192 return -ENOMEM;
193
194 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
195 sds_ring = &recv_ctx->sds_rings[ring];
8f891387 196
197 if (ring == adapter->max_sds_rings - 1)
198 netif_napi_add(netdev, &sds_ring->napi, qlcnic_poll,
199 QLCNIC_NETDEV_WEIGHT/adapter->max_sds_rings);
200 else
201 netif_napi_add(netdev, &sds_ring->napi,
202 qlcnic_rx_poll, QLCNIC_NETDEV_WEIGHT*2);
af19b491
AKS
203 }
204
205 return 0;
206}
207
208static void
209qlcnic_napi_del(struct qlcnic_adapter *adapter)
210{
211 int ring;
212 struct qlcnic_host_sds_ring *sds_ring;
213 struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
214
215 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
216 sds_ring = &recv_ctx->sds_rings[ring];
217 netif_napi_del(&sds_ring->napi);
218 }
219
220 qlcnic_free_sds_rings(&adapter->recv_ctx);
221}
222
223static void
224qlcnic_napi_enable(struct qlcnic_adapter *adapter)
225{
226 int ring;
227 struct qlcnic_host_sds_ring *sds_ring;
228 struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
229
780ab790
AKS
230 if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
231 return;
232
af19b491
AKS
233 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
234 sds_ring = &recv_ctx->sds_rings[ring];
235 napi_enable(&sds_ring->napi);
236 qlcnic_enable_int(sds_ring);
237 }
238}
239
240static void
241qlcnic_napi_disable(struct qlcnic_adapter *adapter)
242{
243 int ring;
244 struct qlcnic_host_sds_ring *sds_ring;
245 struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
246
780ab790
AKS
247 if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
248 return;
249
af19b491
AKS
250 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
251 sds_ring = &recv_ctx->sds_rings[ring];
252 qlcnic_disable_int(sds_ring);
253 napi_synchronize(&sds_ring->napi);
254 napi_disable(&sds_ring->napi);
255 }
256}
257
258static void qlcnic_clear_stats(struct qlcnic_adapter *adapter)
259{
260 memset(&adapter->stats, 0, sizeof(adapter->stats));
af19b491
AKS
261}
262
af19b491
AKS
263static void qlcnic_set_msix_bit(struct pci_dev *pdev, int enable)
264{
265 u32 control;
266 int pos;
267
268 pos = pci_find_capability(pdev, PCI_CAP_ID_MSIX);
269 if (pos) {
270 pci_read_config_dword(pdev, pos, &control);
271 if (enable)
272 control |= PCI_MSIX_FLAGS_ENABLE;
273 else
274 control = 0;
275 pci_write_config_dword(pdev, pos, control);
276 }
277}
278
279static void qlcnic_init_msix_entries(struct qlcnic_adapter *adapter, int count)
280{
281 int i;
282
283 for (i = 0; i < count; i++)
284 adapter->msix_entries[i].entry = i;
285}
286
287static int
288qlcnic_read_mac_addr(struct qlcnic_adapter *adapter)
289{
2e9d722d 290 u8 mac_addr[ETH_ALEN];
af19b491
AKS
291 struct net_device *netdev = adapter->netdev;
292 struct pci_dev *pdev = adapter->pdev;
293
da48e6c3 294 if (qlcnic_get_mac_address(adapter, mac_addr) != 0)
af19b491
AKS
295 return -EIO;
296
2e9d722d 297 memcpy(netdev->dev_addr, mac_addr, ETH_ALEN);
af19b491
AKS
298 memcpy(netdev->perm_addr, netdev->dev_addr, netdev->addr_len);
299 memcpy(adapter->mac_addr, netdev->dev_addr, netdev->addr_len);
300
301 /* set station address */
302
303 if (!is_valid_ether_addr(netdev->perm_addr))
304 dev_warn(&pdev->dev, "Bad MAC address %pM.\n",
305 netdev->dev_addr);
306
307 return 0;
308}
309
310static int qlcnic_set_mac(struct net_device *netdev, void *p)
311{
312 struct qlcnic_adapter *adapter = netdev_priv(netdev);
313 struct sockaddr *addr = p;
314
7373373d
RB
315 if ((adapter->flags & QLCNIC_MAC_OVERRIDE_DISABLED))
316 return -EOPNOTSUPP;
317
af19b491
AKS
318 if (!is_valid_ether_addr(addr->sa_data))
319 return -EINVAL;
320
8a15ad1f 321 if (test_bit(__QLCNIC_DEV_UP, &adapter->state)) {
af19b491
AKS
322 netif_device_detach(netdev);
323 qlcnic_napi_disable(adapter);
324 }
325
326 memcpy(adapter->mac_addr, addr->sa_data, netdev->addr_len);
327 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
328 qlcnic_set_multi(adapter->netdev);
329
8a15ad1f 330 if (test_bit(__QLCNIC_DEV_UP, &adapter->state)) {
af19b491
AKS
331 netif_device_attach(netdev);
332 qlcnic_napi_enable(adapter);
333 }
334 return 0;
335}
336
d5790663
AKS
337static void qlcnic_vlan_rx_register(struct net_device *netdev,
338 struct vlan_group *grp)
339{
340 struct qlcnic_adapter *adapter = netdev_priv(netdev);
341 adapter->vlgrp = grp;
342}
343
af19b491
AKS
344static const struct net_device_ops qlcnic_netdev_ops = {
345 .ndo_open = qlcnic_open,
346 .ndo_stop = qlcnic_close,
347 .ndo_start_xmit = qlcnic_xmit_frame,
348 .ndo_get_stats = qlcnic_get_stats,
349 .ndo_validate_addr = eth_validate_addr,
350 .ndo_set_multicast_list = qlcnic_set_multi,
351 .ndo_set_mac_address = qlcnic_set_mac,
352 .ndo_change_mtu = qlcnic_change_mtu,
353 .ndo_tx_timeout = qlcnic_tx_timeout,
d5790663 354 .ndo_vlan_rx_register = qlcnic_vlan_rx_register,
af19b491
AKS
355#ifdef CONFIG_NET_POLL_CONTROLLER
356 .ndo_poll_controller = qlcnic_poll_controller,
357#endif
358};
359
2e9d722d 360static struct qlcnic_nic_template qlcnic_ops = {
2e9d722d
AC
361 .config_bridged_mode = qlcnic_config_bridged_mode,
362 .config_led = qlcnic_config_led,
9f26f547
AC
363 .start_firmware = qlcnic_start_firmware
364};
365
366static struct qlcnic_nic_template qlcnic_vf_ops = {
9f26f547
AC
367 .config_bridged_mode = qlcnicvf_config_bridged_mode,
368 .config_led = qlcnicvf_config_led,
9f26f547 369 .start_firmware = qlcnicvf_start_firmware
2e9d722d
AC
370};
371
af19b491
AKS
372static void
373qlcnic_setup_intr(struct qlcnic_adapter *adapter)
374{
375 const struct qlcnic_legacy_intr_set *legacy_intrp;
376 struct pci_dev *pdev = adapter->pdev;
377 int err, num_msix;
378
379 if (adapter->rss_supported) {
380 num_msix = (num_online_cpus() >= MSIX_ENTRIES_PER_ADAPTER) ?
381 MSIX_ENTRIES_PER_ADAPTER : 2;
382 } else
383 num_msix = 1;
384
385 adapter->max_sds_rings = 1;
386
387 adapter->flags &= ~(QLCNIC_MSI_ENABLED | QLCNIC_MSIX_ENABLED);
388
389 legacy_intrp = &legacy_intr[adapter->ahw.pci_func];
390
391 adapter->int_vec_bit = legacy_intrp->int_vec_bit;
392 adapter->tgt_status_reg = qlcnic_get_ioaddr(adapter,
393 legacy_intrp->tgt_status_reg);
394 adapter->tgt_mask_reg = qlcnic_get_ioaddr(adapter,
395 legacy_intrp->tgt_mask_reg);
396 adapter->isr_int_vec = qlcnic_get_ioaddr(adapter, ISR_INT_VECTOR);
397
398 adapter->crb_int_state_reg = qlcnic_get_ioaddr(adapter,
399 ISR_INT_STATE_REG);
400
401 qlcnic_set_msix_bit(pdev, 0);
402
403 if (adapter->msix_supported) {
404
405 qlcnic_init_msix_entries(adapter, num_msix);
406 err = pci_enable_msix(pdev, adapter->msix_entries, num_msix);
407 if (err == 0) {
408 adapter->flags |= QLCNIC_MSIX_ENABLED;
409 qlcnic_set_msix_bit(pdev, 1);
410
411 if (adapter->rss_supported)
412 adapter->max_sds_rings = num_msix;
413
414 dev_info(&pdev->dev, "using msi-x interrupts\n");
415 return;
416 }
417
418 if (err > 0)
419 pci_disable_msix(pdev);
420
421 /* fall through for msi */
422 }
423
424 if (use_msi && !pci_enable_msi(pdev)) {
425 adapter->flags |= QLCNIC_MSI_ENABLED;
426 adapter->tgt_status_reg = qlcnic_get_ioaddr(adapter,
427 msi_tgt_status[adapter->ahw.pci_func]);
428 dev_info(&pdev->dev, "using msi interrupts\n");
429 adapter->msix_entries[0].vector = pdev->irq;
430 return;
431 }
432
433 dev_info(&pdev->dev, "using legacy interrupts\n");
434 adapter->msix_entries[0].vector = pdev->irq;
435}
436
437static void
438qlcnic_teardown_intr(struct qlcnic_adapter *adapter)
439{
440 if (adapter->flags & QLCNIC_MSIX_ENABLED)
441 pci_disable_msix(adapter->pdev);
442 if (adapter->flags & QLCNIC_MSI_ENABLED)
443 pci_disable_msi(adapter->pdev);
444}
445
446static void
447qlcnic_cleanup_pci_map(struct qlcnic_adapter *adapter)
448{
449 if (adapter->ahw.pci_base0 != NULL)
450 iounmap(adapter->ahw.pci_base0);
451}
452
346fe763
RB
453static int
454qlcnic_init_pci_info(struct qlcnic_adapter *adapter)
455{
e88db3bd 456 struct qlcnic_pci_info *pci_info;
900853a4 457 int i, ret = 0;
346fe763
RB
458 u8 pfn;
459
e88db3bd
DC
460 pci_info = kcalloc(QLCNIC_MAX_PCI_FUNC, sizeof(*pci_info), GFP_KERNEL);
461 if (!pci_info)
462 return -ENOMEM;
463
ca315ac2 464 adapter->npars = kzalloc(sizeof(struct qlcnic_npar_info) *
346fe763 465 QLCNIC_MAX_PCI_FUNC, GFP_KERNEL);
e88db3bd 466 if (!adapter->npars) {
900853a4 467 ret = -ENOMEM;
e88db3bd
DC
468 goto err_pci_info;
469 }
346fe763 470
ca315ac2 471 adapter->eswitch = kzalloc(sizeof(struct qlcnic_eswitch) *
346fe763
RB
472 QLCNIC_NIU_MAX_XG_PORTS, GFP_KERNEL);
473 if (!adapter->eswitch) {
900853a4 474 ret = -ENOMEM;
ca315ac2 475 goto err_npars;
346fe763
RB
476 }
477
478 ret = qlcnic_get_pci_info(adapter, pci_info);
ca315ac2
DC
479 if (ret)
480 goto err_eswitch;
346fe763 481
ca315ac2
DC
482 for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
483 pfn = pci_info[i].id;
484 if (pfn > QLCNIC_MAX_PCI_FUNC)
485 return QL_STATUS_INVALID_PARAM;
a1c0c459
SC
486 adapter->npars[pfn].active = (u8)pci_info[i].active;
487 adapter->npars[pfn].type = (u8)pci_info[i].type;
488 adapter->npars[pfn].phy_port = (u8)pci_info[i].default_port;
ca315ac2
DC
489 adapter->npars[pfn].min_bw = pci_info[i].tx_min_bw;
490 adapter->npars[pfn].max_bw = pci_info[i].tx_max_bw;
346fe763
RB
491 }
492
ca315ac2
DC
493 for (i = 0; i < QLCNIC_NIU_MAX_XG_PORTS; i++)
494 adapter->eswitch[i].flags |= QLCNIC_SWITCH_ENABLE;
495
e88db3bd 496 kfree(pci_info);
ca315ac2
DC
497 return 0;
498
499err_eswitch:
346fe763
RB
500 kfree(adapter->eswitch);
501 adapter->eswitch = NULL;
ca315ac2 502err_npars:
346fe763 503 kfree(adapter->npars);
ca315ac2 504 adapter->npars = NULL;
e88db3bd
DC
505err_pci_info:
506 kfree(pci_info);
346fe763
RB
507
508 return ret;
509}
510
2e9d722d
AC
511static int
512qlcnic_set_function_modes(struct qlcnic_adapter *adapter)
513{
514 u8 id;
515 u32 ref_count;
516 int i, ret = 1;
517 u32 data = QLCNIC_MGMT_FUNC;
518 void __iomem *priv_op = adapter->ahw.pci_base0 + QLCNIC_DRV_OP_MODE;
519
520 /* If other drivers are not in use set their privilege level */
31018e06 521 ref_count = QLCRD32(adapter, QLCNIC_CRB_DRV_ACTIVE);
2e9d722d
AC
522 ret = qlcnic_api_lock(adapter);
523 if (ret)
524 goto err_lock;
2e9d722d 525
0e33c664
AC
526 if (qlcnic_config_npars) {
527 for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
346fe763 528 id = i;
0e33c664
AC
529 if (adapter->npars[i].type != QLCNIC_TYPE_NIC ||
530 id == adapter->ahw.pci_func)
531 continue;
532 data |= (qlcnic_config_npars &
533 QLC_DEV_SET_DRV(0xf, id));
534 }
535 } else {
536 data = readl(priv_op);
537 data = (data & ~QLC_DEV_SET_DRV(0xf, adapter->ahw.pci_func)) |
538 (QLC_DEV_SET_DRV(QLCNIC_MGMT_FUNC,
539 adapter->ahw.pci_func));
2e9d722d
AC
540 }
541 writel(data, priv_op);
2e9d722d
AC
542 qlcnic_api_unlock(adapter);
543err_lock:
544 return ret;
545}
546
0866d96d
AC
547static void
548qlcnic_check_vf(struct qlcnic_adapter *adapter)
2e9d722d
AC
549{
550 void __iomem *msix_base_addr;
551 void __iomem *priv_op;
552 u32 func;
553 u32 msix_base;
554 u32 op_mode, priv_level;
555
556 /* Determine FW API version */
557 adapter->fw_hal_version = readl(adapter->ahw.pci_base0 + QLCNIC_FW_API);
2e9d722d
AC
558
559 /* Find PCI function number */
560 pci_read_config_dword(adapter->pdev, QLCNIC_MSIX_TABLE_OFFSET, &func);
561 msix_base_addr = adapter->ahw.pci_base0 + QLCNIC_MSIX_BASE;
562 msix_base = readl(msix_base_addr);
563 func = (func - msix_base)/QLCNIC_MSIX_TBL_PGSIZE;
564 adapter->ahw.pci_func = func;
565
566 /* Determine function privilege level */
567 priv_op = adapter->ahw.pci_base0 + QLCNIC_DRV_OP_MODE;
568 op_mode = readl(priv_op);
0e33c664 569 if (op_mode == QLC_DEV_DRV_DEFAULT)
2e9d722d 570 priv_level = QLCNIC_MGMT_FUNC;
0e33c664 571 else
2e9d722d
AC
572 priv_level = QLC_DEV_GET_DRV(op_mode, adapter->ahw.pci_func);
573
0866d96d 574 if (priv_level == QLCNIC_NON_PRIV_FUNC) {
9f26f547
AC
575 adapter->op_mode = QLCNIC_NON_PRIV_FUNC;
576 dev_info(&adapter->pdev->dev,
577 "HAL Version: %d Non Privileged function\n",
578 adapter->fw_hal_version);
579 adapter->nic_ops = &qlcnic_vf_ops;
0866d96d
AC
580 } else
581 adapter->nic_ops = &qlcnic_ops;
2e9d722d
AC
582}
583
af19b491
AKS
584static int
585qlcnic_setup_pci_map(struct qlcnic_adapter *adapter)
586{
587 void __iomem *mem_ptr0 = NULL;
588 resource_size_t mem_base;
589 unsigned long mem_len, pci_len0 = 0;
590
591 struct pci_dev *pdev = adapter->pdev;
af19b491 592
af19b491
AKS
593 /* remap phys address */
594 mem_base = pci_resource_start(pdev, 0); /* 0 is for BAR 0 */
595 mem_len = pci_resource_len(pdev, 0);
596
597 if (mem_len == QLCNIC_PCI_2MB_SIZE) {
598
599 mem_ptr0 = pci_ioremap_bar(pdev, 0);
600 if (mem_ptr0 == NULL) {
601 dev_err(&pdev->dev, "failed to map PCI bar 0\n");
602 return -EIO;
603 }
604 pci_len0 = mem_len;
605 } else {
606 return -EIO;
607 }
608
609 dev_info(&pdev->dev, "%dMB memory map\n", (int)(mem_len>>20));
610
611 adapter->ahw.pci_base0 = mem_ptr0;
612 adapter->ahw.pci_len0 = pci_len0;
613
0866d96d 614 qlcnic_check_vf(adapter);
2e9d722d 615
af19b491 616 adapter->ahw.ocm_win_crb = qlcnic_get_ioaddr(adapter,
2e9d722d 617 QLCNIC_PCIX_PS_REG(PCIX_OCM_WINDOW_REG(adapter->ahw.pci_func)));
af19b491
AKS
618
619 return 0;
620}
621
622static void get_brd_name(struct qlcnic_adapter *adapter, char *name)
623{
624 struct pci_dev *pdev = adapter->pdev;
625 int i, found = 0;
626
627 for (i = 0; i < NUM_SUPPORTED_BOARDS; ++i) {
628 if (qlcnic_boards[i].vendor == pdev->vendor &&
629 qlcnic_boards[i].device == pdev->device &&
630 qlcnic_boards[i].sub_vendor == pdev->subsystem_vendor &&
631 qlcnic_boards[i].sub_device == pdev->subsystem_device) {
02f6e46f
SC
632 sprintf(name, "%pM: %s" ,
633 adapter->mac_addr,
634 qlcnic_boards[i].short_name);
af19b491
AKS
635 found = 1;
636 break;
637 }
638
639 }
640
641 if (!found)
7f9a0c34 642 sprintf(name, "%pM Gigabit Ethernet", adapter->mac_addr);
af19b491
AKS
643}
644
645static void
646qlcnic_check_options(struct qlcnic_adapter *adapter)
647{
648 u32 fw_major, fw_minor, fw_build;
af19b491 649 struct pci_dev *pdev = adapter->pdev;
af19b491
AKS
650
651 fw_major = QLCRD32(adapter, QLCNIC_FW_VERSION_MAJOR);
652 fw_minor = QLCRD32(adapter, QLCNIC_FW_VERSION_MINOR);
653 fw_build = QLCRD32(adapter, QLCNIC_FW_VERSION_SUB);
654
655 adapter->fw_version = QLCNIC_VERSION_CODE(fw_major, fw_minor, fw_build);
656
251a84c9
AKS
657 dev_info(&pdev->dev, "firmware v%d.%d.%d\n",
658 fw_major, fw_minor, fw_build);
af19b491 659 if (adapter->ahw.port_type == QLCNIC_XGBE) {
90d19005
SC
660 if (adapter->flags & QLCNIC_ESWITCH_ENABLED) {
661 adapter->num_rxd = DEFAULT_RCV_DESCRIPTORS_VF;
662 adapter->max_rxd = MAX_RCV_DESCRIPTORS_VF;
663 } else {
664 adapter->num_rxd = DEFAULT_RCV_DESCRIPTORS_10G;
665 adapter->max_rxd = MAX_RCV_DESCRIPTORS_10G;
666 }
667
af19b491 668 adapter->num_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_10G;
90d19005
SC
669 adapter->max_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_10G;
670
af19b491
AKS
671 } else if (adapter->ahw.port_type == QLCNIC_GBE) {
672 adapter->num_rxd = DEFAULT_RCV_DESCRIPTORS_1G;
673 adapter->num_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_1G;
90d19005
SC
674 adapter->max_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_1G;
675 adapter->max_rxd = MAX_RCV_DESCRIPTORS_1G;
af19b491
AKS
676 }
677
678 adapter->msix_supported = !!use_msi_x;
679 adapter->rss_supported = !!use_msi_x;
680
681 adapter->num_txd = MAX_CMD_DESCRIPTORS;
682
251b036a 683 adapter->max_rds_rings = MAX_RDS_RINGS;
af19b491
AKS
684}
685
174240a8
RB
686static int
687qlcnic_initialize_nic(struct qlcnic_adapter *adapter)
688{
689 int err;
690 struct qlcnic_info nic_info;
691
692 err = qlcnic_get_nic_info(adapter, &nic_info, adapter->ahw.pci_func);
693 if (err)
694 return err;
695
a1c0c459 696 adapter->physical_port = (u8)nic_info.phys_port;
174240a8
RB
697 adapter->switch_mode = nic_info.switch_mode;
698 adapter->max_tx_ques = nic_info.max_tx_ques;
699 adapter->max_rx_ques = nic_info.max_rx_ques;
700 adapter->capabilities = nic_info.capabilities;
701 adapter->max_mac_filters = nic_info.max_mac_filters;
702 adapter->max_mtu = nic_info.max_mtu;
703
704 if (adapter->capabilities & BIT_6)
705 adapter->flags |= QLCNIC_ESWITCH_ENABLED;
706 else
707 adapter->flags &= ~QLCNIC_ESWITCH_ENABLED;
708
709 return err;
710}
711
8cf61f89
AKS
712static void
713qlcnic_set_vlan_config(struct qlcnic_adapter *adapter,
714 struct qlcnic_esw_func_cfg *esw_cfg)
715{
716 if (esw_cfg->discard_tagged)
717 adapter->flags &= ~QLCNIC_TAGGING_ENABLED;
718 else
719 adapter->flags |= QLCNIC_TAGGING_ENABLED;
720
721 if (esw_cfg->vlan_id)
722 adapter->pvid = esw_cfg->vlan_id;
723 else
724 adapter->pvid = 0;
725}
726
0325d69b
RB
727static void
728qlcnic_set_eswitch_port_features(struct qlcnic_adapter *adapter,
729 struct qlcnic_esw_func_cfg *esw_cfg)
730{
ee07c1a7
RB
731 adapter->flags &= ~(QLCNIC_MACSPOOF | QLCNIC_MAC_OVERRIDE_DISABLED |
732 QLCNIC_PROMISC_DISABLED);
7613c87b
RB
733
734 if (esw_cfg->mac_anti_spoof)
735 adapter->flags |= QLCNIC_MACSPOOF;
fe4d434d 736
7373373d
RB
737 if (!esw_cfg->mac_override)
738 adapter->flags |= QLCNIC_MAC_OVERRIDE_DISABLED;
739
ee07c1a7
RB
740 if (!esw_cfg->promisc_mode)
741 adapter->flags |= QLCNIC_PROMISC_DISABLED;
742
0325d69b
RB
743 qlcnic_set_netdev_features(adapter, esw_cfg);
744}
745
746static int
747qlcnic_set_eswitch_port_config(struct qlcnic_adapter *adapter)
748{
749 struct qlcnic_esw_func_cfg esw_cfg;
750
751 if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED))
752 return 0;
753
754 esw_cfg.pci_func = adapter->ahw.pci_func;
755 if (qlcnic_get_eswitch_port_config(adapter, &esw_cfg))
756 return -EIO;
8cf61f89 757 qlcnic_set_vlan_config(adapter, &esw_cfg);
0325d69b
RB
758 qlcnic_set_eswitch_port_features(adapter, &esw_cfg);
759
760 return 0;
761}
762
763static void
764qlcnic_set_netdev_features(struct qlcnic_adapter *adapter,
765 struct qlcnic_esw_func_cfg *esw_cfg)
766{
767 struct net_device *netdev = adapter->netdev;
768 unsigned long features, vlan_features;
769
770 features = (NETIF_F_SG | NETIF_F_IP_CSUM |
771 NETIF_F_IPV6_CSUM | NETIF_F_GRO);
772 vlan_features = (NETIF_F_SG | NETIF_F_IP_CSUM |
773 NETIF_F_IPV6_CSUM);
774
775 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_TSO) {
776 features |= (NETIF_F_TSO | NETIF_F_TSO6);
777 vlan_features |= (NETIF_F_TSO | NETIF_F_TSO6);
778 }
779 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_HW_LRO)
780 features |= NETIF_F_LRO;
781
782 if (esw_cfg->offload_flags & BIT_0) {
783 netdev->features |= features;
784 adapter->rx_csum = 1;
785 if (!(esw_cfg->offload_flags & BIT_1))
786 netdev->features &= ~NETIF_F_TSO;
787 if (!(esw_cfg->offload_flags & BIT_2))
788 netdev->features &= ~NETIF_F_TSO6;
789 } else {
790 netdev->features &= ~features;
791 adapter->rx_csum = 0;
792 }
793
794 netdev->vlan_features = (features & vlan_features);
795}
796
0866d96d
AC
797static int
798qlcnic_check_eswitch_mode(struct qlcnic_adapter *adapter)
799{
800 void __iomem *priv_op;
801 u32 op_mode, priv_level;
802 int err = 0;
803
174240a8
RB
804 err = qlcnic_initialize_nic(adapter);
805 if (err)
806 return err;
807
0866d96d
AC
808 if (adapter->flags & QLCNIC_ADAPTER_INITIALIZED)
809 return 0;
810
811 priv_op = adapter->ahw.pci_base0 + QLCNIC_DRV_OP_MODE;
812 op_mode = readl(priv_op);
813 priv_level = QLC_DEV_GET_DRV(op_mode, adapter->ahw.pci_func);
814
815 if (op_mode == QLC_DEV_DRV_DEFAULT)
816 priv_level = QLCNIC_MGMT_FUNC;
817 else
818 priv_level = QLC_DEV_GET_DRV(op_mode, adapter->ahw.pci_func);
819
174240a8 820 if (adapter->flags & QLCNIC_ESWITCH_ENABLED) {
0866d96d
AC
821 if (priv_level == QLCNIC_MGMT_FUNC) {
822 adapter->op_mode = QLCNIC_MGMT_FUNC;
823 err = qlcnic_init_pci_info(adapter);
824 if (err)
825 return err;
826 /* Set privilege level for other functions */
827 qlcnic_set_function_modes(adapter);
828 dev_info(&adapter->pdev->dev,
829 "HAL Version: %d, Management function\n",
830 adapter->fw_hal_version);
831 } else if (priv_level == QLCNIC_PRIV_FUNC) {
832 adapter->op_mode = QLCNIC_PRIV_FUNC;
833 dev_info(&adapter->pdev->dev,
834 "HAL Version: %d, Privileged function\n",
835 adapter->fw_hal_version);
836 }
174240a8 837 }
0866d96d
AC
838
839 adapter->flags |= QLCNIC_ADAPTER_INITIALIZED;
840
841 return err;
842}
843
0325d69b
RB
844static int
845qlcnic_set_default_offload_settings(struct qlcnic_adapter *adapter)
846{
847 struct qlcnic_esw_func_cfg esw_cfg;
848 struct qlcnic_npar_info *npar;
849 u8 i;
850
174240a8 851 if (adapter->need_fw_reset)
0325d69b
RB
852 return 0;
853
854 for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
855 if (adapter->npars[i].type != QLCNIC_TYPE_NIC)
856 continue;
857 memset(&esw_cfg, 0, sizeof(struct qlcnic_esw_func_cfg));
858 esw_cfg.pci_func = i;
859 esw_cfg.offload_flags = BIT_0;
7373373d 860 esw_cfg.mac_override = BIT_0;
ee07c1a7 861 esw_cfg.promisc_mode = BIT_0;
0325d69b
RB
862 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_TSO)
863 esw_cfg.offload_flags |= (BIT_1 | BIT_2);
864 if (qlcnic_config_switch_port(adapter, &esw_cfg))
865 return -EIO;
866 npar = &adapter->npars[i];
867 npar->pvid = esw_cfg.vlan_id;
7373373d 868 npar->mac_override = esw_cfg.mac_override;
0325d69b
RB
869 npar->mac_anti_spoof = esw_cfg.mac_anti_spoof;
870 npar->discard_tagged = esw_cfg.discard_tagged;
871 npar->promisc_mode = esw_cfg.promisc_mode;
872 npar->offload_flags = esw_cfg.offload_flags;
873 }
874
875 return 0;
876}
877
4e8acb01
RB
878static int
879qlcnic_reset_eswitch_config(struct qlcnic_adapter *adapter,
880 struct qlcnic_npar_info *npar, int pci_func)
881{
882 struct qlcnic_esw_func_cfg esw_cfg;
883 esw_cfg.op_mode = QLCNIC_PORT_DEFAULTS;
884 esw_cfg.pci_func = pci_func;
885 esw_cfg.vlan_id = npar->pvid;
7373373d 886 esw_cfg.mac_override = npar->mac_override;
4e8acb01
RB
887 esw_cfg.discard_tagged = npar->discard_tagged;
888 esw_cfg.mac_anti_spoof = npar->mac_anti_spoof;
889 esw_cfg.offload_flags = npar->offload_flags;
890 esw_cfg.promisc_mode = npar->promisc_mode;
891 if (qlcnic_config_switch_port(adapter, &esw_cfg))
892 return -EIO;
893
894 esw_cfg.op_mode = QLCNIC_ADD_VLAN;
895 if (qlcnic_config_switch_port(adapter, &esw_cfg))
896 return -EIO;
897
898 return 0;
899}
900
cea8975e
AC
901static int
902qlcnic_reset_npar_config(struct qlcnic_adapter *adapter)
903{
4e8acb01 904 int i, err;
cea8975e
AC
905 struct qlcnic_npar_info *npar;
906 struct qlcnic_info nic_info;
907
174240a8 908 if (!adapter->need_fw_reset)
cea8975e
AC
909 return 0;
910
4e8acb01
RB
911 /* Set the NPAR config data after FW reset */
912 for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
913 npar = &adapter->npars[i];
914 if (npar->type != QLCNIC_TYPE_NIC)
915 continue;
916 err = qlcnic_get_nic_info(adapter, &nic_info, i);
917 if (err)
918 return err;
919 nic_info.min_tx_bw = npar->min_bw;
920 nic_info.max_tx_bw = npar->max_bw;
921 err = qlcnic_set_nic_info(adapter, &nic_info);
922 if (err)
923 return err;
cea8975e 924
4e8acb01
RB
925 if (npar->enable_pm) {
926 err = qlcnic_config_port_mirroring(adapter,
927 npar->dest_npar, 1, i);
928 if (err)
929 return err;
cea8975e 930 }
4e8acb01
RB
931 err = qlcnic_reset_eswitch_config(adapter, npar, i);
932 if (err)
933 return err;
cea8975e 934 }
4e8acb01 935 return 0;
cea8975e
AC
936}
937
78f84e1a
AKS
938static int qlcnic_check_npar_opertional(struct qlcnic_adapter *adapter)
939{
940 u8 npar_opt_timeo = QLCNIC_DEV_NPAR_OPER_TIMEO;
941 u32 npar_state;
942
943 if (adapter->op_mode == QLCNIC_MGMT_FUNC)
944 return 0;
945
946 npar_state = QLCRD32(adapter, QLCNIC_CRB_DEV_NPAR_STATE);
947 while (npar_state != QLCNIC_DEV_NPAR_OPER && --npar_opt_timeo) {
948 msleep(1000);
949 npar_state = QLCRD32(adapter, QLCNIC_CRB_DEV_NPAR_STATE);
950 }
951 if (!npar_opt_timeo) {
952 dev_err(&adapter->pdev->dev,
953 "Waiting for NPAR state to opertional timeout\n");
954 return -EIO;
955 }
956 return 0;
957}
958
174240a8
RB
959static int
960qlcnic_set_mgmt_operations(struct qlcnic_adapter *adapter)
961{
962 int err;
963
964 if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED) ||
965 adapter->op_mode != QLCNIC_MGMT_FUNC)
966 return 0;
967
968 err = qlcnic_set_default_offload_settings(adapter);
969 if (err)
970 return err;
971
972 err = qlcnic_reset_npar_config(adapter);
973 if (err)
974 return err;
975
976 qlcnic_dev_set_npar_ready(adapter);
977
978 return err;
979}
980
af19b491
AKS
981static int
982qlcnic_start_firmware(struct qlcnic_adapter *adapter)
983{
d4066833 984 int err;
af19b491 985
aa5e18c0
SC
986 err = qlcnic_can_start_firmware(adapter);
987 if (err < 0)
988 return err;
989 else if (!err)
d4066833 990 goto check_fw_status;
af19b491 991
4d5bdb38
AKS
992 if (load_fw_file)
993 qlcnic_request_firmware(adapter);
8f891387 994 else {
8cfdce08
SC
995 err = qlcnic_check_flash_fw_ver(adapter);
996 if (err)
8f891387 997 goto err_out;
998
4d5bdb38 999 adapter->fw_type = QLCNIC_FLASH_ROMIMAGE;
8f891387 1000 }
af19b491
AKS
1001
1002 err = qlcnic_need_fw_reset(adapter);
af19b491 1003 if (err == 0)
4e70812b 1004 goto check_fw_status;
af19b491 1005
d4066833
SC
1006 err = qlcnic_pinit_from_rom(adapter);
1007 if (err)
1008 goto err_out;
af19b491
AKS
1009
1010 err = qlcnic_load_firmware(adapter);
1011 if (err)
1012 goto err_out;
1013
1014 qlcnic_release_firmware(adapter);
d4066833 1015 QLCWR32(adapter, CRB_DRIVER_VERSION, QLCNIC_DRIVER_VERSION);
af19b491 1016
d4066833
SC
1017check_fw_status:
1018 err = qlcnic_check_fw_status(adapter);
af19b491
AKS
1019 if (err)
1020 goto err_out;
1021
1022 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_READY);
6df900e9 1023 qlcnic_idc_debug_info(adapter, 1);
b18971d1 1024
0866d96d
AC
1025 err = qlcnic_check_eswitch_mode(adapter);
1026 if (err) {
1027 dev_err(&adapter->pdev->dev,
1028 "Memory allocation failed for eswitch\n");
1029 goto err_out;
1030 }
174240a8
RB
1031 err = qlcnic_set_mgmt_operations(adapter);
1032 if (err)
1033 goto err_out;
1034
1035 qlcnic_check_options(adapter);
af19b491
AKS
1036 adapter->need_fw_reset = 0;
1037
a7fc948f
AKS
1038 qlcnic_release_firmware(adapter);
1039 return 0;
af19b491
AKS
1040
1041err_out:
a7fc948f
AKS
1042 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_FAILED);
1043 dev_err(&adapter->pdev->dev, "Device state set to failed\n");
0866d96d 1044
af19b491
AKS
1045 qlcnic_release_firmware(adapter);
1046 return err;
1047}
1048
1049static int
1050qlcnic_request_irq(struct qlcnic_adapter *adapter)
1051{
1052 irq_handler_t handler;
1053 struct qlcnic_host_sds_ring *sds_ring;
1054 int err, ring;
1055
1056 unsigned long flags = 0;
1057 struct net_device *netdev = adapter->netdev;
1058 struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
1059
7eb9855d
AKS
1060 if (adapter->diag_test == QLCNIC_INTERRUPT_TEST) {
1061 handler = qlcnic_tmp_intr;
1062 if (!QLCNIC_IS_MSI_FAMILY(adapter))
1063 flags |= IRQF_SHARED;
1064
1065 } else {
1066 if (adapter->flags & QLCNIC_MSIX_ENABLED)
1067 handler = qlcnic_msix_intr;
1068 else if (adapter->flags & QLCNIC_MSI_ENABLED)
1069 handler = qlcnic_msi_intr;
1070 else {
1071 flags |= IRQF_SHARED;
1072 handler = qlcnic_intr;
1073 }
af19b491
AKS
1074 }
1075 adapter->irq = netdev->irq;
1076
1077 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
1078 sds_ring = &recv_ctx->sds_rings[ring];
1079 sprintf(sds_ring->name, "%s[%d]", netdev->name, ring);
1080 err = request_irq(sds_ring->irq, handler,
1081 flags, sds_ring->name, sds_ring);
1082 if (err)
1083 return err;
1084 }
1085
1086 return 0;
1087}
1088
1089static void
1090qlcnic_free_irq(struct qlcnic_adapter *adapter)
1091{
1092 int ring;
1093 struct qlcnic_host_sds_ring *sds_ring;
1094
1095 struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
1096
1097 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
1098 sds_ring = &recv_ctx->sds_rings[ring];
1099 free_irq(sds_ring->irq, sds_ring);
1100 }
1101}
1102
1103static void
1104qlcnic_init_coalesce_defaults(struct qlcnic_adapter *adapter)
1105{
1106 adapter->coal.flags = QLCNIC_INTR_DEFAULT;
1107 adapter->coal.normal.data.rx_time_us =
1108 QLCNIC_DEFAULT_INTR_COALESCE_RX_TIME_US;
1109 adapter->coal.normal.data.rx_packets =
1110 QLCNIC_DEFAULT_INTR_COALESCE_RX_PACKETS;
1111 adapter->coal.normal.data.tx_time_us =
1112 QLCNIC_DEFAULT_INTR_COALESCE_TX_TIME_US;
1113 adapter->coal.normal.data.tx_packets =
1114 QLCNIC_DEFAULT_INTR_COALESCE_TX_PACKETS;
1115}
1116
1117static int
1118__qlcnic_up(struct qlcnic_adapter *adapter, struct net_device *netdev)
1119{
8a15ad1f
AKS
1120 int ring;
1121 struct qlcnic_host_rds_ring *rds_ring;
1122
af19b491
AKS
1123 if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
1124 return -EIO;
1125
8a15ad1f
AKS
1126 if (test_bit(__QLCNIC_DEV_UP, &adapter->state))
1127 return 0;
0325d69b
RB
1128 if (qlcnic_set_eswitch_port_config(adapter))
1129 return -EIO;
8a15ad1f
AKS
1130
1131 if (qlcnic_fw_create_ctx(adapter))
1132 return -EIO;
1133
1134 for (ring = 0; ring < adapter->max_rds_rings; ring++) {
1135 rds_ring = &adapter->recv_ctx.rds_rings[ring];
1136 qlcnic_post_rx_buffers(adapter, ring, rds_ring);
1137 }
1138
af19b491
AKS
1139 qlcnic_set_multi(netdev);
1140 qlcnic_fw_cmd_set_mtu(adapter, netdev->mtu);
1141
1142 adapter->ahw.linkup = 0;
1143
1144 if (adapter->max_sds_rings > 1)
1145 qlcnic_config_rss(adapter, 1);
1146
1147 qlcnic_config_intr_coalesce(adapter);
1148
24763d80 1149 if (netdev->features & NETIF_F_LRO)
af19b491
AKS
1150 qlcnic_config_hw_lro(adapter, QLCNIC_LRO_ENABLED);
1151
1152 qlcnic_napi_enable(adapter);
1153
1154 qlcnic_linkevent_request(adapter, 1);
1155
68bf1c68 1156 adapter->reset_context = 0;
af19b491
AKS
1157 set_bit(__QLCNIC_DEV_UP, &adapter->state);
1158 return 0;
1159}
1160
1161/* Usage: During resume and firmware recovery module.*/
1162
1163static int
1164qlcnic_up(struct qlcnic_adapter *adapter, struct net_device *netdev)
1165{
1166 int err = 0;
1167
1168 rtnl_lock();
1169 if (netif_running(netdev))
1170 err = __qlcnic_up(adapter, netdev);
1171 rtnl_unlock();
1172
1173 return err;
1174}
1175
1176static void
1177__qlcnic_down(struct qlcnic_adapter *adapter, struct net_device *netdev)
1178{
1179 if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
1180 return;
1181
1182 if (!test_and_clear_bit(__QLCNIC_DEV_UP, &adapter->state))
1183 return;
1184
1185 smp_mb();
1186 spin_lock(&adapter->tx_clean_lock);
1187 netif_carrier_off(netdev);
1188 netif_tx_disable(netdev);
1189
1190 qlcnic_free_mac_list(adapter);
1191
b5e5492c
AKS
1192 if (adapter->fhash.fnum)
1193 qlcnic_delete_lb_filters(adapter);
1194
af19b491
AKS
1195 qlcnic_nic_set_promisc(adapter, QLCNIC_NIU_NON_PROMISC_MODE);
1196
1197 qlcnic_napi_disable(adapter);
1198
8a15ad1f
AKS
1199 qlcnic_fw_destroy_ctx(adapter);
1200
1201 qlcnic_reset_rx_buffers_list(adapter);
af19b491
AKS
1202 qlcnic_release_tx_buffers(adapter);
1203 spin_unlock(&adapter->tx_clean_lock);
1204}
1205
1206/* Usage: During suspend and firmware recovery module */
1207
1208static void
1209qlcnic_down(struct qlcnic_adapter *adapter, struct net_device *netdev)
1210{
1211 rtnl_lock();
1212 if (netif_running(netdev))
1213 __qlcnic_down(adapter, netdev);
1214 rtnl_unlock();
1215
1216}
1217
1218static int
1219qlcnic_attach(struct qlcnic_adapter *adapter)
1220{
1221 struct net_device *netdev = adapter->netdev;
1222 struct pci_dev *pdev = adapter->pdev;
8a15ad1f 1223 int err;
af19b491
AKS
1224
1225 if (adapter->is_up == QLCNIC_ADAPTER_UP_MAGIC)
1226 return 0;
1227
af19b491
AKS
1228 err = qlcnic_napi_add(adapter, netdev);
1229 if (err)
1230 return err;
1231
1232 err = qlcnic_alloc_sw_resources(adapter);
1233 if (err) {
1234 dev_err(&pdev->dev, "Error in setting sw resources\n");
8a15ad1f 1235 goto err_out_napi_del;
af19b491
AKS
1236 }
1237
1238 err = qlcnic_alloc_hw_resources(adapter);
1239 if (err) {
1240 dev_err(&pdev->dev, "Error in setting hw resources\n");
1241 goto err_out_free_sw;
1242 }
1243
af19b491
AKS
1244 err = qlcnic_request_irq(adapter);
1245 if (err) {
1246 dev_err(&pdev->dev, "failed to setup interrupt\n");
8a15ad1f 1247 goto err_out_free_hw;
af19b491
AKS
1248 }
1249
1250 qlcnic_init_coalesce_defaults(adapter);
1251
1252 qlcnic_create_sysfs_entries(adapter);
1253
1254 adapter->is_up = QLCNIC_ADAPTER_UP_MAGIC;
1255 return 0;
1256
8a15ad1f 1257err_out_free_hw:
af19b491
AKS
1258 qlcnic_free_hw_resources(adapter);
1259err_out_free_sw:
1260 qlcnic_free_sw_resources(adapter);
8a15ad1f
AKS
1261err_out_napi_del:
1262 qlcnic_napi_del(adapter);
af19b491
AKS
1263 return err;
1264}
1265
1266static void
1267qlcnic_detach(struct qlcnic_adapter *adapter)
1268{
1269 if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
1270 return;
1271
1272 qlcnic_remove_sysfs_entries(adapter);
1273
1274 qlcnic_free_hw_resources(adapter);
1275 qlcnic_release_rx_buffers(adapter);
1276 qlcnic_free_irq(adapter);
1277 qlcnic_napi_del(adapter);
1278 qlcnic_free_sw_resources(adapter);
1279
1280 adapter->is_up = 0;
1281}
1282
7eb9855d
AKS
1283void qlcnic_diag_free_res(struct net_device *netdev, int max_sds_rings)
1284{
1285 struct qlcnic_adapter *adapter = netdev_priv(netdev);
1286 struct qlcnic_host_sds_ring *sds_ring;
1287 int ring;
1288
78ad3892 1289 clear_bit(__QLCNIC_DEV_UP, &adapter->state);
cdaff185
AKS
1290 if (adapter->diag_test == QLCNIC_INTERRUPT_TEST) {
1291 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
1292 sds_ring = &adapter->recv_ctx.sds_rings[ring];
1293 qlcnic_disable_int(sds_ring);
1294 }
7eb9855d
AKS
1295 }
1296
8a15ad1f
AKS
1297 qlcnic_fw_destroy_ctx(adapter);
1298
7eb9855d
AKS
1299 qlcnic_detach(adapter);
1300
1301 adapter->diag_test = 0;
1302 adapter->max_sds_rings = max_sds_rings;
1303
1304 if (qlcnic_attach(adapter))
34ce3626 1305 goto out;
7eb9855d
AKS
1306
1307 if (netif_running(netdev))
1308 __qlcnic_up(adapter, netdev);
34ce3626 1309out:
7eb9855d
AKS
1310 netif_device_attach(netdev);
1311}
1312
1313int qlcnic_diag_alloc_res(struct net_device *netdev, int test)
1314{
1315 struct qlcnic_adapter *adapter = netdev_priv(netdev);
1316 struct qlcnic_host_sds_ring *sds_ring;
8a15ad1f 1317 struct qlcnic_host_rds_ring *rds_ring;
7eb9855d
AKS
1318 int ring;
1319 int ret;
1320
1321 netif_device_detach(netdev);
1322
1323 if (netif_running(netdev))
1324 __qlcnic_down(adapter, netdev);
1325
1326 qlcnic_detach(adapter);
1327
1328 adapter->max_sds_rings = 1;
1329 adapter->diag_test = test;
1330
1331 ret = qlcnic_attach(adapter);
34ce3626
AKS
1332 if (ret) {
1333 netif_device_attach(netdev);
7eb9855d 1334 return ret;
34ce3626 1335 }
7eb9855d 1336
8a15ad1f
AKS
1337 ret = qlcnic_fw_create_ctx(adapter);
1338 if (ret) {
1339 qlcnic_detach(adapter);
57e46248 1340 netif_device_attach(netdev);
8a15ad1f
AKS
1341 return ret;
1342 }
1343
1344 for (ring = 0; ring < adapter->max_rds_rings; ring++) {
1345 rds_ring = &adapter->recv_ctx.rds_rings[ring];
1346 qlcnic_post_rx_buffers(adapter, ring, rds_ring);
1347 }
1348
cdaff185
AKS
1349 if (adapter->diag_test == QLCNIC_INTERRUPT_TEST) {
1350 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
1351 sds_ring = &adapter->recv_ctx.sds_rings[ring];
1352 qlcnic_enable_int(sds_ring);
1353 }
7eb9855d 1354 }
78ad3892 1355 set_bit(__QLCNIC_DEV_UP, &adapter->state);
7eb9855d
AKS
1356
1357 return 0;
1358}
1359
68bf1c68
AKS
1360/* Reset context in hardware only */
1361static int
1362qlcnic_reset_hw_context(struct qlcnic_adapter *adapter)
1363{
1364 struct net_device *netdev = adapter->netdev;
1365
1366 if (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
1367 return -EBUSY;
1368
1369 netif_device_detach(netdev);
1370
1371 qlcnic_down(adapter, netdev);
1372
1373 qlcnic_up(adapter, netdev);
1374
1375 netif_device_attach(netdev);
1376
1377 clear_bit(__QLCNIC_RESETTING, &adapter->state);
1378 return 0;
1379}
1380
af19b491
AKS
1381int
1382qlcnic_reset_context(struct qlcnic_adapter *adapter)
1383{
1384 int err = 0;
1385 struct net_device *netdev = adapter->netdev;
1386
1387 if (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
1388 return -EBUSY;
1389
1390 if (adapter->is_up == QLCNIC_ADAPTER_UP_MAGIC) {
1391
1392 netif_device_detach(netdev);
1393
1394 if (netif_running(netdev))
1395 __qlcnic_down(adapter, netdev);
1396
1397 qlcnic_detach(adapter);
1398
1399 if (netif_running(netdev)) {
1400 err = qlcnic_attach(adapter);
1401 if (!err)
34ce3626 1402 __qlcnic_up(adapter, netdev);
af19b491
AKS
1403 }
1404
1405 netif_device_attach(netdev);
1406 }
1407
af19b491
AKS
1408 clear_bit(__QLCNIC_RESETTING, &adapter->state);
1409 return err;
1410}
1411
1412static int
1413qlcnic_setup_netdev(struct qlcnic_adapter *adapter,
1bb09fb9 1414 struct net_device *netdev, u8 pci_using_dac)
af19b491
AKS
1415{
1416 int err;
1417 struct pci_dev *pdev = adapter->pdev;
1418
1419 adapter->rx_csum = 1;
1420 adapter->mc_enabled = 0;
1421 adapter->max_mc_count = 38;
1422
1423 netdev->netdev_ops = &qlcnic_netdev_ops;
ef71ff83 1424 netdev->watchdog_timeo = 5*HZ;
af19b491
AKS
1425
1426 qlcnic_change_mtu(netdev, netdev->mtu);
1427
1428 SET_ETHTOOL_OPS(netdev, &qlcnic_ethtool_ops);
1429
2e9d722d 1430 netdev->features |= (NETIF_F_SG | NETIF_F_IP_CSUM |
d5790663 1431 NETIF_F_IPV6_CSUM | NETIF_F_GRO | NETIF_F_HW_VLAN_RX);
2e9d722d 1432 netdev->vlan_features |= (NETIF_F_SG | NETIF_F_IP_CSUM |
ac8d0c4f
AC
1433 NETIF_F_IPV6_CSUM);
1434
1435 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_TSO) {
1436 netdev->features |= (NETIF_F_TSO | NETIF_F_TSO6);
1437 netdev->vlan_features |= (NETIF_F_TSO | NETIF_F_TSO6);
1438 }
af19b491 1439
1bb09fb9 1440 if (pci_using_dac) {
af19b491
AKS
1441 netdev->features |= NETIF_F_HIGHDMA;
1442 netdev->vlan_features |= NETIF_F_HIGHDMA;
1443 }
1444
1445 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_FVLANTX)
1446 netdev->features |= (NETIF_F_HW_VLAN_TX);
1447
1448 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_HW_LRO)
1449 netdev->features |= NETIF_F_LRO;
af19b491
AKS
1450 netdev->irq = adapter->msix_entries[0].vector;
1451
af19b491 1452 netif_carrier_off(netdev);
af19b491
AKS
1453
1454 err = register_netdev(netdev);
1455 if (err) {
1456 dev_err(&pdev->dev, "failed to register net device\n");
1457 return err;
1458 }
1459
1460 return 0;
1461}
1462
1bb09fb9
AKS
1463static int qlcnic_set_dma_mask(struct pci_dev *pdev, u8 *pci_using_dac)
1464{
1465 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) &&
1466 !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)))
1467 *pci_using_dac = 1;
1468 else if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) &&
1469 !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)))
1470 *pci_using_dac = 0;
1471 else {
1472 dev_err(&pdev->dev, "Unable to set DMA mask, aborting\n");
1473 return -EIO;
1474 }
1475
1476 return 0;
1477}
1478
af19b491
AKS
1479static int __devinit
1480qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1481{
1482 struct net_device *netdev = NULL;
1483 struct qlcnic_adapter *adapter = NULL;
1484 int err;
af19b491 1485 uint8_t revision_id;
1bb09fb9 1486 uint8_t pci_using_dac;
da48e6c3 1487 char brd_name[QLCNIC_MAX_BOARD_NAME_LEN];
b0044bcf 1488 u32 val;
af19b491
AKS
1489
1490 err = pci_enable_device(pdev);
1491 if (err)
1492 return err;
1493
1494 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
1495 err = -ENODEV;
1496 goto err_out_disable_pdev;
1497 }
1498
1bb09fb9
AKS
1499 err = qlcnic_set_dma_mask(pdev, &pci_using_dac);
1500 if (err)
1501 goto err_out_disable_pdev;
1502
af19b491
AKS
1503 err = pci_request_regions(pdev, qlcnic_driver_name);
1504 if (err)
1505 goto err_out_disable_pdev;
1506
1507 pci_set_master(pdev);
451724c8 1508 pci_enable_pcie_error_reporting(pdev);
af19b491
AKS
1509
1510 netdev = alloc_etherdev(sizeof(struct qlcnic_adapter));
1511 if (!netdev) {
1512 dev_err(&pdev->dev, "failed to allocate net_device\n");
1513 err = -ENOMEM;
1514 goto err_out_free_res;
1515 }
1516
1517 SET_NETDEV_DEV(netdev, &pdev->dev);
1518
1519 adapter = netdev_priv(netdev);
1520 adapter->netdev = netdev;
1521 adapter->pdev = pdev;
6df900e9 1522 adapter->dev_rst_time = jiffies;
af19b491
AKS
1523
1524 revision_id = pdev->revision;
1525 adapter->ahw.revision_id = revision_id;
1526
1527 rwlock_init(&adapter->ahw.crb_lock);
1528 mutex_init(&adapter->ahw.mem_lock);
1529
1530 spin_lock_init(&adapter->tx_clean_lock);
1531 INIT_LIST_HEAD(&adapter->mac_list);
1532
1533 err = qlcnic_setup_pci_map(adapter);
1534 if (err)
1535 goto err_out_free_netdev;
1536
1537 /* This will be reset for mezz cards */
2e9d722d 1538 adapter->portnum = adapter->ahw.pci_func;
af19b491
AKS
1539
1540 err = qlcnic_get_board_info(adapter);
1541 if (err) {
1542 dev_err(&pdev->dev, "Error getting board config info.\n");
1543 goto err_out_iounmap;
1544 }
1545
8cfdce08
SC
1546 err = qlcnic_setup_idc_param(adapter);
1547 if (err)
b3a24649 1548 goto err_out_iounmap;
af19b491 1549
b0044bcf
RB
1550 val = QLCRD32(adapter, QLCNIC_CRB_DRV_ACTIVE);
1551 if (QLC_DEV_CHECK_ACTIVE(val, adapter->portnum))
1552 adapter->flags |= QLCNIC_NEED_FLR;
1553
9f26f547 1554 err = adapter->nic_ops->start_firmware(adapter);
a7fc948f
AKS
1555 if (err) {
1556 dev_err(&pdev->dev, "Loading fw failed.Please Reboot\n");
af19b491 1557 goto err_out_decr_ref;
a7fc948f 1558 }
af19b491 1559
da48e6c3
RB
1560 if (qlcnic_read_mac_addr(adapter))
1561 dev_warn(&pdev->dev, "failed to read mac addr\n");
1562
1563 if (adapter->portnum == 0) {
1564 get_brd_name(adapter, brd_name);
1565
1566 pr_info("%s: %s Board Chip rev 0x%x\n",
1567 module_name(THIS_MODULE),
1568 brd_name, adapter->ahw.revision_id);
1569 }
1570
af19b491
AKS
1571 qlcnic_clear_stats(adapter);
1572
1573 qlcnic_setup_intr(adapter);
1574
1bb09fb9 1575 err = qlcnic_setup_netdev(adapter, netdev, pci_using_dac);
af19b491
AKS
1576 if (err)
1577 goto err_out_disable_msi;
1578
1579 pci_set_drvdata(pdev, adapter);
1580
1581 qlcnic_schedule_work(adapter, qlcnic_fw_poll_work, FW_POLL_DELAY);
1582
1583 switch (adapter->ahw.port_type) {
1584 case QLCNIC_GBE:
1585 dev_info(&adapter->pdev->dev, "%s: GbE port initialized\n",
1586 adapter->netdev->name);
1587 break;
1588 case QLCNIC_XGBE:
1589 dev_info(&adapter->pdev->dev, "%s: XGbE port initialized\n",
1590 adapter->netdev->name);
1591 break;
1592 }
1593
b5e5492c 1594 qlcnic_alloc_lb_filters_mem(adapter);
af19b491
AKS
1595 qlcnic_create_diag_entries(adapter);
1596
1597 return 0;
1598
1599err_out_disable_msi:
1600 qlcnic_teardown_intr(adapter);
1601
1602err_out_decr_ref:
21854f02 1603 qlcnic_clr_all_drv_state(adapter, 0);
af19b491
AKS
1604
1605err_out_iounmap:
1606 qlcnic_cleanup_pci_map(adapter);
1607
1608err_out_free_netdev:
1609 free_netdev(netdev);
1610
1611err_out_free_res:
1612 pci_release_regions(pdev);
1613
1614err_out_disable_pdev:
1615 pci_set_drvdata(pdev, NULL);
1616 pci_disable_device(pdev);
1617 return err;
1618}
1619
1620static void __devexit qlcnic_remove(struct pci_dev *pdev)
1621{
1622 struct qlcnic_adapter *adapter;
1623 struct net_device *netdev;
1624
1625 adapter = pci_get_drvdata(pdev);
1626 if (adapter == NULL)
1627 return;
1628
1629 netdev = adapter->netdev;
1630
1631 qlcnic_cancel_fw_work(adapter);
1632
1633 unregister_netdev(netdev);
1634
af19b491
AKS
1635 qlcnic_detach(adapter);
1636
2e9d722d
AC
1637 if (adapter->npars != NULL)
1638 kfree(adapter->npars);
1639 if (adapter->eswitch != NULL)
1640 kfree(adapter->eswitch);
1641
21854f02 1642 qlcnic_clr_all_drv_state(adapter, 0);
af19b491
AKS
1643
1644 clear_bit(__QLCNIC_RESETTING, &adapter->state);
1645
b5e5492c
AKS
1646 qlcnic_free_lb_filters_mem(adapter);
1647
af19b491
AKS
1648 qlcnic_teardown_intr(adapter);
1649
1650 qlcnic_remove_diag_entries(adapter);
1651
1652 qlcnic_cleanup_pci_map(adapter);
1653
1654 qlcnic_release_firmware(adapter);
1655
451724c8 1656 pci_disable_pcie_error_reporting(pdev);
af19b491
AKS
1657 pci_release_regions(pdev);
1658 pci_disable_device(pdev);
1659 pci_set_drvdata(pdev, NULL);
1660
1661 free_netdev(netdev);
1662}
1663static int __qlcnic_shutdown(struct pci_dev *pdev)
1664{
1665 struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
1666 struct net_device *netdev = adapter->netdev;
1667 int retval;
1668
1669 netif_device_detach(netdev);
1670
1671 qlcnic_cancel_fw_work(adapter);
1672
1673 if (netif_running(netdev))
1674 qlcnic_down(adapter, netdev);
1675
21854f02 1676 qlcnic_clr_all_drv_state(adapter, 0);
af19b491
AKS
1677
1678 clear_bit(__QLCNIC_RESETTING, &adapter->state);
1679
1680 retval = pci_save_state(pdev);
1681 if (retval)
1682 return retval;
1683
1684 if (qlcnic_wol_supported(adapter)) {
1685 pci_enable_wake(pdev, PCI_D3cold, 1);
1686 pci_enable_wake(pdev, PCI_D3hot, 1);
1687 }
1688
1689 return 0;
1690}
1691
1692static void qlcnic_shutdown(struct pci_dev *pdev)
1693{
1694 if (__qlcnic_shutdown(pdev))
1695 return;
1696
1697 pci_disable_device(pdev);
1698}
1699
1700#ifdef CONFIG_PM
1701static int
1702qlcnic_suspend(struct pci_dev *pdev, pm_message_t state)
1703{
1704 int retval;
1705
1706 retval = __qlcnic_shutdown(pdev);
1707 if (retval)
1708 return retval;
1709
1710 pci_set_power_state(pdev, pci_choose_state(pdev, state));
1711 return 0;
1712}
1713
1714static int
1715qlcnic_resume(struct pci_dev *pdev)
1716{
1717 struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
1718 struct net_device *netdev = adapter->netdev;
1719 int err;
1720
1721 err = pci_enable_device(pdev);
1722 if (err)
1723 return err;
1724
1725 pci_set_power_state(pdev, PCI_D0);
1726 pci_set_master(pdev);
1727 pci_restore_state(pdev);
1728
9f26f547 1729 err = adapter->nic_ops->start_firmware(adapter);
af19b491
AKS
1730 if (err) {
1731 dev_err(&pdev->dev, "failed to start firmware\n");
1732 return err;
1733 }
1734
1735 if (netif_running(netdev)) {
af19b491
AKS
1736 err = qlcnic_up(adapter, netdev);
1737 if (err)
52486a3a 1738 goto done;
af19b491 1739
aec1e845 1740 qlcnic_restore_indev_addr(netdev, NETDEV_UP);
af19b491 1741 }
52486a3a 1742done:
af19b491
AKS
1743 netif_device_attach(netdev);
1744 qlcnic_schedule_work(adapter, qlcnic_fw_poll_work, FW_POLL_DELAY);
1745 return 0;
af19b491
AKS
1746}
1747#endif
1748
1749static int qlcnic_open(struct net_device *netdev)
1750{
1751 struct qlcnic_adapter *adapter = netdev_priv(netdev);
1752 int err;
1753
af19b491
AKS
1754 err = qlcnic_attach(adapter);
1755 if (err)
1756 return err;
1757
1758 err = __qlcnic_up(adapter, netdev);
1759 if (err)
1760 goto err_out;
1761
1762 netif_start_queue(netdev);
1763
1764 return 0;
1765
1766err_out:
1767 qlcnic_detach(adapter);
1768 return err;
1769}
1770
1771/*
1772 * qlcnic_close - Disables a network interface entry point
1773 */
1774static int qlcnic_close(struct net_device *netdev)
1775{
1776 struct qlcnic_adapter *adapter = netdev_priv(netdev);
1777
1778 __qlcnic_down(adapter, netdev);
1779 return 0;
1780}
1781
b5e5492c
AKS
1782static void
1783qlcnic_alloc_lb_filters_mem(struct qlcnic_adapter *adapter)
1784{
1785 void *head;
1786 int i;
1787
1788 if (!qlcnic_mac_learn)
1789 return;
1790
1791 spin_lock_init(&adapter->mac_learn_lock);
1792
1793 head = kcalloc(QLCNIC_LB_MAX_FILTERS, sizeof(struct hlist_head),
1794 GFP_KERNEL);
1795 if (!head)
1796 return;
1797
1798 adapter->fhash.fmax = QLCNIC_LB_MAX_FILTERS;
1799 adapter->fhash.fhead = (struct hlist_head *)head;
1800
1801 for (i = 0; i < adapter->fhash.fmax; i++)
1802 INIT_HLIST_HEAD(&adapter->fhash.fhead[i]);
1803}
1804
1805static void qlcnic_free_lb_filters_mem(struct qlcnic_adapter *adapter)
1806{
1807 if (adapter->fhash.fmax && adapter->fhash.fhead)
1808 kfree(adapter->fhash.fhead);
1809
1810 adapter->fhash.fhead = NULL;
1811 adapter->fhash.fmax = 0;
1812}
1813
1814static void qlcnic_change_filter(struct qlcnic_adapter *adapter,
7e56cac4 1815 u64 uaddr, __le16 vlan_id, struct qlcnic_host_tx_ring *tx_ring)
b5e5492c
AKS
1816{
1817 struct cmd_desc_type0 *hwdesc;
1818 struct qlcnic_nic_req *req;
1819 struct qlcnic_mac_req *mac_req;
7e56cac4 1820 struct qlcnic_vlan_req *vlan_req;
b5e5492c
AKS
1821 u32 producer;
1822 u64 word;
1823
1824 producer = tx_ring->producer;
1825 hwdesc = &tx_ring->desc_head[tx_ring->producer];
1826
1827 req = (struct qlcnic_nic_req *)hwdesc;
1828 memset(req, 0, sizeof(struct qlcnic_nic_req));
1829 req->qhdr = cpu_to_le64(QLCNIC_REQUEST << 23);
1830
1831 word = QLCNIC_MAC_EVENT | ((u64)(adapter->portnum) << 16);
1832 req->req_hdr = cpu_to_le64(word);
1833
1834 mac_req = (struct qlcnic_mac_req *)&(req->words[0]);
03c5d770 1835 mac_req->op = vlan_id ? QLCNIC_MAC_VLAN_ADD : QLCNIC_MAC_ADD;
b5e5492c
AKS
1836 memcpy(mac_req->mac_addr, &uaddr, ETH_ALEN);
1837
7e56cac4
SC
1838 vlan_req = (struct qlcnic_vlan_req *)&req->words[1];
1839 vlan_req->vlan_id = vlan_id;
03c5d770 1840
b5e5492c
AKS
1841 tx_ring->producer = get_next_index(producer, tx_ring->num_desc);
1842}
1843
1844#define QLCNIC_MAC_HASH(MAC)\
1845 ((((MAC) & 0x70000) >> 0x10) | (((MAC) & 0x70000000000ULL) >> 0x25))
1846
1847static void
1848qlcnic_send_filter(struct qlcnic_adapter *adapter,
1849 struct qlcnic_host_tx_ring *tx_ring,
1850 struct cmd_desc_type0 *first_desc,
1851 struct sk_buff *skb)
1852{
1853 struct ethhdr *phdr = (struct ethhdr *)(skb->data);
1854 struct qlcnic_filter *fil, *tmp_fil;
1855 struct hlist_node *tmp_hnode, *n;
1856 struct hlist_head *head;
1857 u64 src_addr = 0;
7e56cac4 1858 __le16 vlan_id = 0;
b5e5492c
AKS
1859 u8 hindex;
1860
1861 if (!compare_ether_addr(phdr->h_source, adapter->mac_addr))
1862 return;
1863
1864 if (adapter->fhash.fnum >= adapter->fhash.fmax)
1865 return;
1866
03c5d770
AKS
1867 /* Only NPAR capable devices support vlan based learning*/
1868 if (adapter->flags & QLCNIC_ESWITCH_ENABLED)
1869 vlan_id = first_desc->vlan_TCI;
b5e5492c
AKS
1870 memcpy(&src_addr, phdr->h_source, ETH_ALEN);
1871 hindex = QLCNIC_MAC_HASH(src_addr) & (QLCNIC_LB_MAX_FILTERS - 1);
1872 head = &(adapter->fhash.fhead[hindex]);
1873
1874 hlist_for_each_entry_safe(tmp_fil, tmp_hnode, n, head, fnode) {
03c5d770
AKS
1875 if (!memcmp(tmp_fil->faddr, &src_addr, ETH_ALEN) &&
1876 tmp_fil->vlan_id == vlan_id) {
e5edb7b1 1877
1878 if (jiffies >
1879 (QLCNIC_READD_AGE * HZ + tmp_fil->ftime))
1880 qlcnic_change_filter(adapter, src_addr, vlan_id,
1881 tx_ring);
b5e5492c
AKS
1882 tmp_fil->ftime = jiffies;
1883 return;
1884 }
1885 }
1886
1887 fil = kzalloc(sizeof(struct qlcnic_filter), GFP_ATOMIC);
1888 if (!fil)
1889 return;
1890
03c5d770 1891 qlcnic_change_filter(adapter, src_addr, vlan_id, tx_ring);
b5e5492c
AKS
1892
1893 fil->ftime = jiffies;
03c5d770 1894 fil->vlan_id = vlan_id;
b5e5492c
AKS
1895 memcpy(fil->faddr, &src_addr, ETH_ALEN);
1896 spin_lock(&adapter->mac_learn_lock);
1897 hlist_add_head(&(fil->fnode), head);
1898 adapter->fhash.fnum++;
1899 spin_unlock(&adapter->mac_learn_lock);
1900}
1901
af19b491
AKS
1902static void
1903qlcnic_tso_check(struct net_device *netdev,
1904 struct qlcnic_host_tx_ring *tx_ring,
1905 struct cmd_desc_type0 *first_desc,
1906 struct sk_buff *skb)
1907{
1908 u8 opcode = TX_ETHER_PKT;
1909 __be16 protocol = skb->protocol;
8cf61f89
AKS
1910 u16 flags = 0;
1911 int copied, offset, copy_len, hdr_len = 0, tso = 0;
af19b491
AKS
1912 struct cmd_desc_type0 *hwdesc;
1913 struct vlan_ethhdr *vh;
8bfe8b91 1914 struct qlcnic_adapter *adapter = netdev_priv(netdev);
2e9d722d 1915 u32 producer = tx_ring->producer;
7e56cac4
SC
1916 __le16 vlan_oob = first_desc->flags_opcode &
1917 cpu_to_le16(FLAGS_VLAN_OOB);
af19b491 1918
2e9d722d
AC
1919 if (*(skb->data) & BIT_0) {
1920 flags |= BIT_0;
1921 memcpy(&first_desc->eth_addr, skb->data, ETH_ALEN);
1922 }
1923
af19b491
AKS
1924 if ((netdev->features & (NETIF_F_TSO | NETIF_F_TSO6)) &&
1925 skb_shinfo(skb)->gso_size > 0) {
1926
1927 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
1928
1929 first_desc->mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
1930 first_desc->total_hdr_length = hdr_len;
1931 if (vlan_oob) {
1932 first_desc->total_hdr_length += VLAN_HLEN;
1933 first_desc->tcp_hdr_offset = VLAN_HLEN;
1934 first_desc->ip_hdr_offset = VLAN_HLEN;
1935 /* Only in case of TSO on vlan device */
1936 flags |= FLAGS_VLAN_TAGGED;
1937 }
1938
1939 opcode = (protocol == cpu_to_be16(ETH_P_IPV6)) ?
1940 TX_TCP_LSO6 : TX_TCP_LSO;
1941 tso = 1;
1942
1943 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
1944 u8 l4proto;
1945
1946 if (protocol == cpu_to_be16(ETH_P_IP)) {
1947 l4proto = ip_hdr(skb)->protocol;
1948
1949 if (l4proto == IPPROTO_TCP)
1950 opcode = TX_TCP_PKT;
1951 else if (l4proto == IPPROTO_UDP)
1952 opcode = TX_UDP_PKT;
1953 } else if (protocol == cpu_to_be16(ETH_P_IPV6)) {
1954 l4proto = ipv6_hdr(skb)->nexthdr;
1955
1956 if (l4proto == IPPROTO_TCP)
1957 opcode = TX_TCPV6_PKT;
1958 else if (l4proto == IPPROTO_UDP)
1959 opcode = TX_UDPV6_PKT;
1960 }
1961 }
1962
1963 first_desc->tcp_hdr_offset += skb_transport_offset(skb);
1964 first_desc->ip_hdr_offset += skb_network_offset(skb);
1965 qlcnic_set_tx_flags_opcode(first_desc, flags, opcode);
1966
1967 if (!tso)
1968 return;
1969
1970 /* For LSO, we need to copy the MAC/IP/TCP headers into
1971 * the descriptor ring
1972 */
af19b491
AKS
1973 copied = 0;
1974 offset = 2;
1975
1976 if (vlan_oob) {
1977 /* Create a TSO vlan header template for firmware */
1978
1979 hwdesc = &tx_ring->desc_head[producer];
1980 tx_ring->cmd_buf_arr[producer].skb = NULL;
1981
1982 copy_len = min((int)sizeof(struct cmd_desc_type0) - offset,
1983 hdr_len + VLAN_HLEN);
1984
1985 vh = (struct vlan_ethhdr *)((char *)hwdesc + 2);
1986 skb_copy_from_linear_data(skb, vh, 12);
1987 vh->h_vlan_proto = htons(ETH_P_8021Q);
7e56cac4
SC
1988 vh->h_vlan_TCI = (__be16)swab16((u16)first_desc->vlan_TCI);
1989
af19b491
AKS
1990 skb_copy_from_linear_data_offset(skb, 12,
1991 (char *)vh + 16, copy_len - 16);
1992
1993 copied = copy_len - VLAN_HLEN;
1994 offset = 0;
1995
1996 producer = get_next_index(producer, tx_ring->num_desc);
1997 }
1998
1999 while (copied < hdr_len) {
2000
2001 copy_len = min((int)sizeof(struct cmd_desc_type0) - offset,
2002 (hdr_len - copied));
2003
2004 hwdesc = &tx_ring->desc_head[producer];
2005 tx_ring->cmd_buf_arr[producer].skb = NULL;
2006
2007 skb_copy_from_linear_data_offset(skb, copied,
2008 (char *)hwdesc + offset, copy_len);
2009
2010 copied += copy_len;
2011 offset = 0;
2012
2013 producer = get_next_index(producer, tx_ring->num_desc);
2014 }
2015
2016 tx_ring->producer = producer;
2017 barrier();
8bfe8b91 2018 adapter->stats.lso_frames++;
af19b491
AKS
2019}
2020
2021static int
2022qlcnic_map_tx_skb(struct pci_dev *pdev,
2023 struct sk_buff *skb, struct qlcnic_cmd_buffer *pbuf)
2024{
2025 struct qlcnic_skb_frag *nf;
2026 struct skb_frag_struct *frag;
2027 int i, nr_frags;
2028 dma_addr_t map;
2029
2030 nr_frags = skb_shinfo(skb)->nr_frags;
2031 nf = &pbuf->frag_array[0];
2032
2033 map = pci_map_single(pdev, skb->data,
2034 skb_headlen(skb), PCI_DMA_TODEVICE);
2035 if (pci_dma_mapping_error(pdev, map))
2036 goto out_err;
2037
2038 nf->dma = map;
2039 nf->length = skb_headlen(skb);
2040
2041 for (i = 0; i < nr_frags; i++) {
2042 frag = &skb_shinfo(skb)->frags[i];
2043 nf = &pbuf->frag_array[i+1];
2044
2045 map = pci_map_page(pdev, frag->page, frag->page_offset,
2046 frag->size, PCI_DMA_TODEVICE);
2047 if (pci_dma_mapping_error(pdev, map))
2048 goto unwind;
2049
2050 nf->dma = map;
2051 nf->length = frag->size;
2052 }
2053
2054 return 0;
2055
2056unwind:
2057 while (--i >= 0) {
2058 nf = &pbuf->frag_array[i+1];
2059 pci_unmap_page(pdev, nf->dma, nf->length, PCI_DMA_TODEVICE);
2060 }
2061
2062 nf = &pbuf->frag_array[0];
2063 pci_unmap_single(pdev, nf->dma, skb_headlen(skb), PCI_DMA_TODEVICE);
2064
2065out_err:
2066 return -ENOMEM;
2067}
2068
8cf61f89
AKS
2069static int
2070qlcnic_check_tx_tagging(struct qlcnic_adapter *adapter,
2071 struct sk_buff *skb,
2072 struct cmd_desc_type0 *first_desc)
2073{
2074 u8 opcode = 0;
2075 u16 flags = 0;
2076 __be16 protocol = skb->protocol;
2077 struct vlan_ethhdr *vh;
2078
2079 if (protocol == cpu_to_be16(ETH_P_8021Q)) {
2080 vh = (struct vlan_ethhdr *)skb->data;
2081 protocol = vh->h_vlan_encapsulated_proto;
2082 flags = FLAGS_VLAN_TAGGED;
2083 qlcnic_set_tx_vlan_tci(first_desc, ntohs(vh->h_vlan_TCI));
2084 } else if (vlan_tx_tag_present(skb)) {
2085 flags = FLAGS_VLAN_OOB;
2086 qlcnic_set_tx_vlan_tci(first_desc, vlan_tx_tag_get(skb));
2087 }
2088 if (unlikely(adapter->pvid)) {
2089 if (first_desc->vlan_TCI &&
2090 !(adapter->flags & QLCNIC_TAGGING_ENABLED))
2091 return -EIO;
2092 if (first_desc->vlan_TCI &&
2093 (adapter->flags & QLCNIC_TAGGING_ENABLED))
2094 goto set_flags;
2095
2096 flags = FLAGS_VLAN_OOB;
2097 qlcnic_set_tx_vlan_tci(first_desc, adapter->pvid);
2098 }
2099set_flags:
2100 qlcnic_set_tx_flags_opcode(first_desc, flags, opcode);
2101 return 0;
2102}
2103
af19b491
AKS
2104static inline void
2105qlcnic_clear_cmddesc(u64 *desc)
2106{
2107 desc[0] = 0ULL;
2108 desc[2] = 0ULL;
8cf61f89 2109 desc[7] = 0ULL;
af19b491
AKS
2110}
2111
cdaff185 2112netdev_tx_t
af19b491
AKS
2113qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
2114{
2115 struct qlcnic_adapter *adapter = netdev_priv(netdev);
2116 struct qlcnic_host_tx_ring *tx_ring = adapter->tx_ring;
2117 struct qlcnic_cmd_buffer *pbuf;
2118 struct qlcnic_skb_frag *buffrag;
2119 struct cmd_desc_type0 *hwdesc, *first_desc;
2120 struct pci_dev *pdev;
dcb50aff 2121 struct ethhdr *phdr;
af19b491
AKS
2122 int i, k;
2123
2124 u32 producer;
2125 int frag_count, no_of_desc;
2126 u32 num_txd = tx_ring->num_desc;
2127
780ab790
AKS
2128 if (!test_bit(__QLCNIC_DEV_UP, &adapter->state)) {
2129 netif_stop_queue(netdev);
2130 return NETDEV_TX_BUSY;
2131 }
2132
fe4d434d 2133 if (adapter->flags & QLCNIC_MACSPOOF) {
dcb50aff
RB
2134 phdr = (struct ethhdr *)skb->data;
2135 if (compare_ether_addr(phdr->h_source,
fe4d434d
SC
2136 adapter->mac_addr))
2137 goto drop_packet;
2138 }
2139
af19b491
AKS
2140 frag_count = skb_shinfo(skb)->nr_frags + 1;
2141
2142 /* 4 fragments per cmd des */
2143 no_of_desc = (frag_count + 3) >> 2;
2144
ef71ff83 2145 if (unlikely(qlcnic_tx_avail(tx_ring) <= TX_STOP_THRESH)) {
af19b491 2146 netif_stop_queue(netdev);
ef71ff83
RB
2147 smp_mb();
2148 if (qlcnic_tx_avail(tx_ring) > TX_STOP_THRESH)
2149 netif_start_queue(netdev);
2150 else {
2151 adapter->stats.xmit_off++;
2152 return NETDEV_TX_BUSY;
2153 }
af19b491
AKS
2154 }
2155
2156 producer = tx_ring->producer;
2157 pbuf = &tx_ring->cmd_buf_arr[producer];
2158
2159 pdev = adapter->pdev;
2160
8cf61f89
AKS
2161 first_desc = hwdesc = &tx_ring->desc_head[producer];
2162 qlcnic_clear_cmddesc((u64 *)hwdesc);
2163
2164 if (qlcnic_check_tx_tagging(adapter, skb, first_desc))
2165 goto drop_packet;
2166
8ae6df97
AKS
2167 if (qlcnic_map_tx_skb(pdev, skb, pbuf)) {
2168 adapter->stats.tx_dma_map_error++;
af19b491 2169 goto drop_packet;
8ae6df97 2170 }
af19b491
AKS
2171
2172 pbuf->skb = skb;
2173 pbuf->frag_count = frag_count;
2174
af19b491
AKS
2175 qlcnic_set_tx_frags_len(first_desc, frag_count, skb->len);
2176 qlcnic_set_tx_port(first_desc, adapter->portnum);
2177
2178 for (i = 0; i < frag_count; i++) {
2179
2180 k = i % 4;
2181
2182 if ((k == 0) && (i > 0)) {
2183 /* move to next desc.*/
2184 producer = get_next_index(producer, num_txd);
2185 hwdesc = &tx_ring->desc_head[producer];
2186 qlcnic_clear_cmddesc((u64 *)hwdesc);
2187 tx_ring->cmd_buf_arr[producer].skb = NULL;
2188 }
2189
2190 buffrag = &pbuf->frag_array[i];
2191
2192 hwdesc->buffer_length[k] = cpu_to_le16(buffrag->length);
2193 switch (k) {
2194 case 0:
2195 hwdesc->addr_buffer1 = cpu_to_le64(buffrag->dma);
2196 break;
2197 case 1:
2198 hwdesc->addr_buffer2 = cpu_to_le64(buffrag->dma);
2199 break;
2200 case 2:
2201 hwdesc->addr_buffer3 = cpu_to_le64(buffrag->dma);
2202 break;
2203 case 3:
2204 hwdesc->addr_buffer4 = cpu_to_le64(buffrag->dma);
2205 break;
2206 }
2207 }
2208
2209 tx_ring->producer = get_next_index(producer, num_txd);
2210
2211 qlcnic_tso_check(netdev, tx_ring, first_desc, skb);
2212
b5e5492c
AKS
2213 if (qlcnic_mac_learn)
2214 qlcnic_send_filter(adapter, tx_ring, first_desc, skb);
2215
af19b491
AKS
2216 qlcnic_update_cmd_producer(adapter, tx_ring);
2217
2218 adapter->stats.txbytes += skb->len;
2219 adapter->stats.xmitcalled++;
2220
2221 return NETDEV_TX_OK;
2222
2223drop_packet:
2224 adapter->stats.txdropped++;
2225 dev_kfree_skb_any(skb);
2226 return NETDEV_TX_OK;
2227}
2228
2229static int qlcnic_check_temp(struct qlcnic_adapter *adapter)
2230{
2231 struct net_device *netdev = adapter->netdev;
2232 u32 temp, temp_state, temp_val;
2233 int rv = 0;
2234
2235 temp = QLCRD32(adapter, CRB_TEMP_STATE);
2236
2237 temp_state = qlcnic_get_temp_state(temp);
2238 temp_val = qlcnic_get_temp_val(temp);
2239
2240 if (temp_state == QLCNIC_TEMP_PANIC) {
2241 dev_err(&netdev->dev,
2242 "Device temperature %d degrees C exceeds"
2243 " maximum allowed. Hardware has been shut down.\n",
2244 temp_val);
2245 rv = 1;
2246 } else if (temp_state == QLCNIC_TEMP_WARN) {
2247 if (adapter->temp == QLCNIC_TEMP_NORMAL) {
2248 dev_err(&netdev->dev,
2249 "Device temperature %d degrees C "
2250 "exceeds operating range."
2251 " Immediate action needed.\n",
2252 temp_val);
2253 }
2254 } else {
2255 if (adapter->temp == QLCNIC_TEMP_WARN) {
2256 dev_info(&netdev->dev,
2257 "Device temperature is now %d degrees C"
2258 " in normal range.\n", temp_val);
2259 }
2260 }
2261 adapter->temp = temp_state;
2262 return rv;
2263}
2264
2265void qlcnic_advert_link_change(struct qlcnic_adapter *adapter, int linkup)
2266{
2267 struct net_device *netdev = adapter->netdev;
2268
2269 if (adapter->ahw.linkup && !linkup) {
69324275 2270 netdev_info(netdev, "NIC Link is down\n");
af19b491
AKS
2271 adapter->ahw.linkup = 0;
2272 if (netif_running(netdev)) {
2273 netif_carrier_off(netdev);
2274 netif_stop_queue(netdev);
2275 }
2276 } else if (!adapter->ahw.linkup && linkup) {
69324275 2277 netdev_info(netdev, "NIC Link is up\n");
af19b491
AKS
2278 adapter->ahw.linkup = 1;
2279 if (netif_running(netdev)) {
2280 netif_carrier_on(netdev);
2281 netif_wake_queue(netdev);
2282 }
2283 }
2284}
2285
2286static void qlcnic_tx_timeout(struct net_device *netdev)
2287{
2288 struct qlcnic_adapter *adapter = netdev_priv(netdev);
2289
2290 if (test_bit(__QLCNIC_RESETTING, &adapter->state))
2291 return;
2292
2293 dev_err(&netdev->dev, "transmit timeout, resetting.\n");
af19b491
AKS
2294
2295 if (++adapter->tx_timeo_cnt >= QLCNIC_MAX_TX_TIMEOUTS)
68bf1c68
AKS
2296 adapter->need_fw_reset = 1;
2297 else
2298 adapter->reset_context = 1;
af19b491
AKS
2299}
2300
2301static struct net_device_stats *qlcnic_get_stats(struct net_device *netdev)
2302{
2303 struct qlcnic_adapter *adapter = netdev_priv(netdev);
2304 struct net_device_stats *stats = &netdev->stats;
2305
af19b491
AKS
2306 stats->rx_packets = adapter->stats.rx_pkts + adapter->stats.lro_pkts;
2307 stats->tx_packets = adapter->stats.xmitfinished;
7e382594 2308 stats->rx_bytes = adapter->stats.rxbytes + adapter->stats.lrobytes;
af19b491
AKS
2309 stats->tx_bytes = adapter->stats.txbytes;
2310 stats->rx_dropped = adapter->stats.rxdropped;
2311 stats->tx_dropped = adapter->stats.txdropped;
2312
2313 return stats;
2314}
2315
7eb9855d 2316static irqreturn_t qlcnic_clear_legacy_intr(struct qlcnic_adapter *adapter)
af19b491 2317{
af19b491
AKS
2318 u32 status;
2319
2320 status = readl(adapter->isr_int_vec);
2321
2322 if (!(status & adapter->int_vec_bit))
2323 return IRQ_NONE;
2324
2325 /* check interrupt state machine, to be sure */
2326 status = readl(adapter->crb_int_state_reg);
2327 if (!ISR_LEGACY_INT_TRIGGERED(status))
2328 return IRQ_NONE;
2329
2330 writel(0xffffffff, adapter->tgt_status_reg);
2331 /* read twice to ensure write is flushed */
2332 readl(adapter->isr_int_vec);
2333 readl(adapter->isr_int_vec);
2334
7eb9855d
AKS
2335 return IRQ_HANDLED;
2336}
2337
2338static irqreturn_t qlcnic_tmp_intr(int irq, void *data)
2339{
2340 struct qlcnic_host_sds_ring *sds_ring = data;
2341 struct qlcnic_adapter *adapter = sds_ring->adapter;
2342
2343 if (adapter->flags & QLCNIC_MSIX_ENABLED)
2344 goto done;
2345 else if (adapter->flags & QLCNIC_MSI_ENABLED) {
2346 writel(0xffffffff, adapter->tgt_status_reg);
2347 goto done;
2348 }
2349
2350 if (qlcnic_clear_legacy_intr(adapter) == IRQ_NONE)
2351 return IRQ_NONE;
2352
2353done:
2354 adapter->diag_cnt++;
2355 qlcnic_enable_int(sds_ring);
2356 return IRQ_HANDLED;
2357}
2358
2359static irqreturn_t qlcnic_intr(int irq, void *data)
2360{
2361 struct qlcnic_host_sds_ring *sds_ring = data;
2362 struct qlcnic_adapter *adapter = sds_ring->adapter;
2363
2364 if (qlcnic_clear_legacy_intr(adapter) == IRQ_NONE)
2365 return IRQ_NONE;
2366
af19b491
AKS
2367 napi_schedule(&sds_ring->napi);
2368
2369 return IRQ_HANDLED;
2370}
2371
2372static irqreturn_t qlcnic_msi_intr(int irq, void *data)
2373{
2374 struct qlcnic_host_sds_ring *sds_ring = data;
2375 struct qlcnic_adapter *adapter = sds_ring->adapter;
2376
2377 /* clear interrupt */
2378 writel(0xffffffff, adapter->tgt_status_reg);
2379
2380 napi_schedule(&sds_ring->napi);
2381 return IRQ_HANDLED;
2382}
2383
2384static irqreturn_t qlcnic_msix_intr(int irq, void *data)
2385{
2386 struct qlcnic_host_sds_ring *sds_ring = data;
2387
2388 napi_schedule(&sds_ring->napi);
2389 return IRQ_HANDLED;
2390}
2391
2392static int qlcnic_process_cmd_ring(struct qlcnic_adapter *adapter)
2393{
2394 u32 sw_consumer, hw_consumer;
2395 int count = 0, i;
2396 struct qlcnic_cmd_buffer *buffer;
2397 struct pci_dev *pdev = adapter->pdev;
2398 struct net_device *netdev = adapter->netdev;
2399 struct qlcnic_skb_frag *frag;
2400 int done;
2401 struct qlcnic_host_tx_ring *tx_ring = adapter->tx_ring;
2402
2403 if (!spin_trylock(&adapter->tx_clean_lock))
2404 return 1;
2405
2406 sw_consumer = tx_ring->sw_consumer;
2407 hw_consumer = le32_to_cpu(*(tx_ring->hw_consumer));
2408
2409 while (sw_consumer != hw_consumer) {
2410 buffer = &tx_ring->cmd_buf_arr[sw_consumer];
2411 if (buffer->skb) {
2412 frag = &buffer->frag_array[0];
2413 pci_unmap_single(pdev, frag->dma, frag->length,
2414 PCI_DMA_TODEVICE);
2415 frag->dma = 0ULL;
2416 for (i = 1; i < buffer->frag_count; i++) {
2417 frag++;
2418 pci_unmap_page(pdev, frag->dma, frag->length,
2419 PCI_DMA_TODEVICE);
2420 frag->dma = 0ULL;
2421 }
2422
2423 adapter->stats.xmitfinished++;
2424 dev_kfree_skb_any(buffer->skb);
2425 buffer->skb = NULL;
2426 }
2427
2428 sw_consumer = get_next_index(sw_consumer, tx_ring->num_desc);
2429 if (++count >= MAX_STATUS_HANDLE)
2430 break;
2431 }
2432
2433 if (count && netif_running(netdev)) {
2434 tx_ring->sw_consumer = sw_consumer;
2435
2436 smp_mb();
2437
2438 if (netif_queue_stopped(netdev) && netif_carrier_ok(netdev)) {
af19b491
AKS
2439 if (qlcnic_tx_avail(tx_ring) > TX_STOP_THRESH) {
2440 netif_wake_queue(netdev);
8bfe8b91 2441 adapter->stats.xmit_on++;
af19b491 2442 }
af19b491 2443 }
ef71ff83 2444 adapter->tx_timeo_cnt = 0;
af19b491
AKS
2445 }
2446 /*
2447 * If everything is freed up to consumer then check if the ring is full
2448 * If the ring is full then check if more needs to be freed and
2449 * schedule the call back again.
2450 *
2451 * This happens when there are 2 CPUs. One could be freeing and the
2452 * other filling it. If the ring is full when we get out of here and
2453 * the card has already interrupted the host then the host can miss the
2454 * interrupt.
2455 *
2456 * There is still a possible race condition and the host could miss an
2457 * interrupt. The card has to take care of this.
2458 */
2459 hw_consumer = le32_to_cpu(*(tx_ring->hw_consumer));
2460 done = (sw_consumer == hw_consumer);
2461 spin_unlock(&adapter->tx_clean_lock);
2462
2463 return done;
2464}
2465
2466static int qlcnic_poll(struct napi_struct *napi, int budget)
2467{
2468 struct qlcnic_host_sds_ring *sds_ring =
2469 container_of(napi, struct qlcnic_host_sds_ring, napi);
2470
2471 struct qlcnic_adapter *adapter = sds_ring->adapter;
2472
2473 int tx_complete;
2474 int work_done;
2475
2476 tx_complete = qlcnic_process_cmd_ring(adapter);
2477
2478 work_done = qlcnic_process_rcv_ring(sds_ring, budget);
2479
2480 if ((work_done < budget) && tx_complete) {
2481 napi_complete(&sds_ring->napi);
2482 if (test_bit(__QLCNIC_DEV_UP, &adapter->state))
2483 qlcnic_enable_int(sds_ring);
2484 }
2485
2486 return work_done;
2487}
2488
8f891387 2489static int qlcnic_rx_poll(struct napi_struct *napi, int budget)
2490{
2491 struct qlcnic_host_sds_ring *sds_ring =
2492 container_of(napi, struct qlcnic_host_sds_ring, napi);
2493
2494 struct qlcnic_adapter *adapter = sds_ring->adapter;
2495 int work_done;
2496
2497 work_done = qlcnic_process_rcv_ring(sds_ring, budget);
2498
2499 if (work_done < budget) {
2500 napi_complete(&sds_ring->napi);
2501 if (test_bit(__QLCNIC_DEV_UP, &adapter->state))
2502 qlcnic_enable_int(sds_ring);
2503 }
2504
2505 return work_done;
2506}
2507
af19b491
AKS
2508#ifdef CONFIG_NET_POLL_CONTROLLER
2509static void qlcnic_poll_controller(struct net_device *netdev)
2510{
bf82791e
YL
2511 int ring;
2512 struct qlcnic_host_sds_ring *sds_ring;
af19b491 2513 struct qlcnic_adapter *adapter = netdev_priv(netdev);
bf82791e
YL
2514 struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
2515
af19b491 2516 disable_irq(adapter->irq);
bf82791e
YL
2517 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
2518 sds_ring = &recv_ctx->sds_rings[ring];
2519 qlcnic_intr(adapter->irq, sds_ring);
2520 }
af19b491
AKS
2521 enable_irq(adapter->irq);
2522}
2523#endif
2524
6df900e9
SC
2525static void
2526qlcnic_idc_debug_info(struct qlcnic_adapter *adapter, u8 encoding)
2527{
2528 u32 val;
2529
2530 val = adapter->portnum & 0xf;
2531 val |= encoding << 7;
2532 val |= (jiffies - adapter->dev_rst_time) << 8;
2533
2534 QLCWR32(adapter, QLCNIC_CRB_DRV_SCRATCH, val);
2535 adapter->dev_rst_time = jiffies;
2536}
2537
ade91f8e
AKS
2538static int
2539qlcnic_set_drv_state(struct qlcnic_adapter *adapter, u8 state)
af19b491
AKS
2540{
2541 u32 val;
2542
2543 WARN_ON(state != QLCNIC_DEV_NEED_RESET &&
2544 state != QLCNIC_DEV_NEED_QUISCENT);
2545
2546 if (qlcnic_api_lock(adapter))
ade91f8e 2547 return -EIO;
af19b491
AKS
2548
2549 val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
2550
2551 if (state == QLCNIC_DEV_NEED_RESET)
6d2a4724 2552 QLC_DEV_SET_RST_RDY(val, adapter->portnum);
af19b491 2553 else if (state == QLCNIC_DEV_NEED_QUISCENT)
6d2a4724 2554 QLC_DEV_SET_QSCNT_RDY(val, adapter->portnum);
af19b491
AKS
2555
2556 QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
2557
2558 qlcnic_api_unlock(adapter);
ade91f8e
AKS
2559
2560 return 0;
af19b491
AKS
2561}
2562
1b95a839
AKS
2563static int
2564qlcnic_clr_drv_state(struct qlcnic_adapter *adapter)
2565{
2566 u32 val;
2567
2568 if (qlcnic_api_lock(adapter))
2569 return -EBUSY;
2570
2571 val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
6d2a4724 2572 QLC_DEV_CLR_RST_QSCNT(val, adapter->portnum);
1b95a839
AKS
2573 QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
2574
2575 qlcnic_api_unlock(adapter);
2576
2577 return 0;
2578}
2579
af19b491 2580static void
21854f02 2581qlcnic_clr_all_drv_state(struct qlcnic_adapter *adapter, u8 failed)
af19b491
AKS
2582{
2583 u32 val;
2584
2585 if (qlcnic_api_lock(adapter))
2586 goto err;
2587
31018e06 2588 val = QLCRD32(adapter, QLCNIC_CRB_DRV_ACTIVE);
6d2a4724 2589 QLC_DEV_CLR_REF_CNT(val, adapter->portnum);
31018e06 2590 QLCWR32(adapter, QLCNIC_CRB_DRV_ACTIVE, val);
af19b491 2591
21854f02
AKS
2592 if (failed) {
2593 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_FAILED);
2594 dev_info(&adapter->pdev->dev,
2595 "Device state set to Failed. Please Reboot\n");
2596 } else if (!(val & 0x11111111))
af19b491
AKS
2597 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_COLD);
2598
2599 val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
6d2a4724 2600 QLC_DEV_CLR_RST_QSCNT(val, adapter->portnum);
af19b491
AKS
2601 QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
2602
2603 qlcnic_api_unlock(adapter);
2604err:
2605 adapter->fw_fail_cnt = 0;
2606 clear_bit(__QLCNIC_START_FW, &adapter->state);
2607 clear_bit(__QLCNIC_RESETTING, &adapter->state);
2608}
2609
f73dfc50 2610/* Grab api lock, before checking state */
af19b491
AKS
2611static int
2612qlcnic_check_drv_state(struct qlcnic_adapter *adapter)
2613{
2614 int act, state;
2615
2616 state = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
31018e06 2617 act = QLCRD32(adapter, QLCNIC_CRB_DRV_ACTIVE);
af19b491
AKS
2618
2619 if (((state & 0x11111111) == (act & 0x11111111)) ||
2620 ((act & 0x11111111) == ((state >> 1) & 0x11111111)))
2621 return 0;
2622 else
2623 return 1;
2624}
2625
96f8118c
SC
2626static int qlcnic_check_idc_ver(struct qlcnic_adapter *adapter)
2627{
2628 u32 val = QLCRD32(adapter, QLCNIC_CRB_DRV_IDC_VER);
2629
2630 if (val != QLCNIC_DRV_IDC_VER) {
2631 dev_warn(&adapter->pdev->dev, "IDC Version mismatch, driver's"
2632 " idc ver = %x; reqd = %x\n", QLCNIC_DRV_IDC_VER, val);
2633 }
2634
2635 return 0;
2636}
2637
af19b491
AKS
2638static int
2639qlcnic_can_start_firmware(struct qlcnic_adapter *adapter)
2640{
2641 u32 val, prev_state;
aa5e18c0 2642 u8 dev_init_timeo = adapter->dev_init_timeo;
6d2a4724 2643 u8 portnum = adapter->portnum;
96f8118c 2644 u8 ret;
af19b491 2645
f73dfc50
AKS
2646 if (test_and_clear_bit(__QLCNIC_START_FW, &adapter->state))
2647 return 1;
2648
af19b491
AKS
2649 if (qlcnic_api_lock(adapter))
2650 return -1;
2651
31018e06 2652 val = QLCRD32(adapter, QLCNIC_CRB_DRV_ACTIVE);
6d2a4724
AKS
2653 if (!(val & (1 << (portnum * 4)))) {
2654 QLC_DEV_SET_REF_CNT(val, portnum);
31018e06 2655 QLCWR32(adapter, QLCNIC_CRB_DRV_ACTIVE, val);
af19b491
AKS
2656 }
2657
2658 prev_state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
65b5b420 2659 QLCDB(adapter, HW, "Device state = %u\n", prev_state);
af19b491
AKS
2660
2661 switch (prev_state) {
2662 case QLCNIC_DEV_COLD:
bbd8c6a4 2663 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_INITIALIZING);
96f8118c 2664 QLCWR32(adapter, QLCNIC_CRB_DRV_IDC_VER, QLCNIC_DRV_IDC_VER);
6df900e9 2665 qlcnic_idc_debug_info(adapter, 0);
af19b491
AKS
2666 qlcnic_api_unlock(adapter);
2667 return 1;
2668
2669 case QLCNIC_DEV_READY:
96f8118c 2670 ret = qlcnic_check_idc_ver(adapter);
af19b491 2671 qlcnic_api_unlock(adapter);
96f8118c 2672 return ret;
af19b491
AKS
2673
2674 case QLCNIC_DEV_NEED_RESET:
2675 val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
6d2a4724 2676 QLC_DEV_SET_RST_RDY(val, portnum);
af19b491
AKS
2677 QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
2678 break;
2679
2680 case QLCNIC_DEV_NEED_QUISCENT:
2681 val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
6d2a4724 2682 QLC_DEV_SET_QSCNT_RDY(val, portnum);
af19b491
AKS
2683 QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
2684 break;
2685
2686 case QLCNIC_DEV_FAILED:
a7fc948f 2687 dev_err(&adapter->pdev->dev, "Device in failed state.\n");
af19b491
AKS
2688 qlcnic_api_unlock(adapter);
2689 return -1;
bbd8c6a4
AKS
2690
2691 case QLCNIC_DEV_INITIALIZING:
2692 case QLCNIC_DEV_QUISCENT:
2693 break;
af19b491
AKS
2694 }
2695
2696 qlcnic_api_unlock(adapter);
aa5e18c0
SC
2697
2698 do {
af19b491 2699 msleep(1000);
a5e463d0
SC
2700 prev_state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
2701
2702 if (prev_state == QLCNIC_DEV_QUISCENT)
2703 continue;
2704 } while ((prev_state != QLCNIC_DEV_READY) && --dev_init_timeo);
af19b491 2705
65b5b420
AKS
2706 if (!dev_init_timeo) {
2707 dev_err(&adapter->pdev->dev,
2708 "Waiting for device to initialize timeout\n");
af19b491 2709 return -1;
65b5b420 2710 }
af19b491
AKS
2711
2712 if (qlcnic_api_lock(adapter))
2713 return -1;
2714
2715 val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
6d2a4724 2716 QLC_DEV_CLR_RST_QSCNT(val, portnum);
af19b491
AKS
2717 QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
2718
96f8118c 2719 ret = qlcnic_check_idc_ver(adapter);
af19b491
AKS
2720 qlcnic_api_unlock(adapter);
2721
96f8118c 2722 return ret;
af19b491
AKS
2723}
2724
2725static void
2726qlcnic_fwinit_work(struct work_struct *work)
2727{
2728 struct qlcnic_adapter *adapter = container_of(work,
2729 struct qlcnic_adapter, fw_work.work);
3c4b23b1 2730 u32 dev_state = 0xf;
af19b491 2731
f73dfc50
AKS
2732 if (qlcnic_api_lock(adapter))
2733 goto err_ret;
af19b491 2734
a5e463d0 2735 dev_state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
b8c17620
AKS
2736 if (dev_state == QLCNIC_DEV_QUISCENT ||
2737 dev_state == QLCNIC_DEV_NEED_QUISCENT) {
a5e463d0
SC
2738 qlcnic_api_unlock(adapter);
2739 qlcnic_schedule_work(adapter, qlcnic_fwinit_work,
2740 FW_POLL_DELAY * 2);
2741 return;
2742 }
2743
9f26f547 2744 if (adapter->op_mode == QLCNIC_NON_PRIV_FUNC) {
3c4b23b1
AKS
2745 qlcnic_api_unlock(adapter);
2746 goto wait_npar;
9f26f547
AC
2747 }
2748
f73dfc50
AKS
2749 if (adapter->fw_wait_cnt++ > adapter->reset_ack_timeo) {
2750 dev_err(&adapter->pdev->dev, "Reset:Failed to get ack %d sec\n",
2751 adapter->reset_ack_timeo);
2752 goto skip_ack_check;
2753 }
2754
2755 if (!qlcnic_check_drv_state(adapter)) {
2756skip_ack_check:
2757 dev_state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
a5e463d0 2758
f73dfc50
AKS
2759 if (dev_state == QLCNIC_DEV_NEED_RESET) {
2760 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE,
2761 QLCNIC_DEV_INITIALIZING);
2762 set_bit(__QLCNIC_START_FW, &adapter->state);
2763 QLCDB(adapter, DRV, "Restarting fw\n");
6df900e9 2764 qlcnic_idc_debug_info(adapter, 0);
af19b491
AKS
2765 }
2766
f73dfc50
AKS
2767 qlcnic_api_unlock(adapter);
2768
9f26f547 2769 if (!adapter->nic_ops->start_firmware(adapter)) {
af19b491 2770 qlcnic_schedule_work(adapter, qlcnic_attach_work, 0);
b18971d1 2771 adapter->fw_wait_cnt = 0;
af19b491
AKS
2772 return;
2773 }
af19b491
AKS
2774 goto err_ret;
2775 }
2776
f73dfc50 2777 qlcnic_api_unlock(adapter);
aa5e18c0 2778
9f26f547 2779wait_npar:
af19b491 2780 dev_state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
f73dfc50 2781 QLCDB(adapter, HW, "Func waiting: Device state=%u\n", dev_state);
65b5b420 2782
af19b491 2783 switch (dev_state) {
3c4b23b1 2784 case QLCNIC_DEV_READY:
9f26f547 2785 if (!adapter->nic_ops->start_firmware(adapter)) {
f73dfc50 2786 qlcnic_schedule_work(adapter, qlcnic_attach_work, 0);
b18971d1 2787 adapter->fw_wait_cnt = 0;
f73dfc50
AKS
2788 return;
2789 }
3c4b23b1
AKS
2790 case QLCNIC_DEV_FAILED:
2791 break;
2792 default:
2793 qlcnic_schedule_work(adapter,
2794 qlcnic_fwinit_work, FW_POLL_DELAY);
2795 return;
af19b491
AKS
2796 }
2797
2798err_ret:
f73dfc50
AKS
2799 dev_err(&adapter->pdev->dev, "Fwinit work failed state=%u "
2800 "fw_wait_cnt=%u\n", dev_state, adapter->fw_wait_cnt);
34ce3626 2801 netif_device_attach(adapter->netdev);
21854f02 2802 qlcnic_clr_all_drv_state(adapter, 0);
af19b491
AKS
2803}
2804
2805static void
2806qlcnic_detach_work(struct work_struct *work)
2807{
2808 struct qlcnic_adapter *adapter = container_of(work,
2809 struct qlcnic_adapter, fw_work.work);
2810 struct net_device *netdev = adapter->netdev;
2811 u32 status;
2812
2813 netif_device_detach(netdev);
2814
b8c17620
AKS
2815 /* Dont grab rtnl lock during Quiscent mode */
2816 if (adapter->dev_state == QLCNIC_DEV_NEED_QUISCENT) {
2817 if (netif_running(netdev))
2818 __qlcnic_down(adapter, netdev);
2819 } else
2820 qlcnic_down(adapter, netdev);
af19b491 2821
af19b491
AKS
2822 status = QLCRD32(adapter, QLCNIC_PEG_HALT_STATUS1);
2823
2824 if (status & QLCNIC_RCODE_FATAL_ERROR)
2825 goto err_ret;
2826
2827 if (adapter->temp == QLCNIC_TEMP_PANIC)
2828 goto err_ret;
2829
ade91f8e
AKS
2830 if (qlcnic_set_drv_state(adapter, adapter->dev_state))
2831 goto err_ret;
af19b491
AKS
2832
2833 adapter->fw_wait_cnt = 0;
2834
2835 qlcnic_schedule_work(adapter, qlcnic_fwinit_work, FW_POLL_DELAY);
2836
2837 return;
2838
2839err_ret:
65b5b420
AKS
2840 dev_err(&adapter->pdev->dev, "detach failed; status=%d temp=%d\n",
2841 status, adapter->temp);
34ce3626 2842 netif_device_attach(netdev);
21854f02 2843 qlcnic_clr_all_drv_state(adapter, 1);
af19b491
AKS
2844}
2845
3c4b23b1
AKS
2846/*Transit NPAR state to NON Operational */
2847static void
2848qlcnic_set_npar_non_operational(struct qlcnic_adapter *adapter)
2849{
2850 u32 state;
2851
2852 state = QLCRD32(adapter, QLCNIC_CRB_DEV_NPAR_STATE);
2853 if (state == QLCNIC_DEV_NPAR_NON_OPER)
2854 return;
2855
2856 if (qlcnic_api_lock(adapter))
2857 return;
2858 QLCWR32(adapter, QLCNIC_CRB_DEV_NPAR_STATE, QLCNIC_DEV_NPAR_NON_OPER);
2859 qlcnic_api_unlock(adapter);
2860}
2861
f73dfc50 2862/*Transit to RESET state from READY state only */
af19b491
AKS
2863static void
2864qlcnic_dev_request_reset(struct qlcnic_adapter *adapter)
2865{
2866 u32 state;
2867
cea8975e 2868 adapter->need_fw_reset = 1;
af19b491
AKS
2869 if (qlcnic_api_lock(adapter))
2870 return;
2871
2872 state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
2873
f73dfc50 2874 if (state == QLCNIC_DEV_READY) {
af19b491 2875 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_NEED_RESET);
65b5b420 2876 QLCDB(adapter, DRV, "NEED_RESET state set\n");
6df900e9 2877 qlcnic_idc_debug_info(adapter, 0);
af19b491
AKS
2878 }
2879
3c4b23b1 2880 QLCWR32(adapter, QLCNIC_CRB_DEV_NPAR_STATE, QLCNIC_DEV_NPAR_NON_OPER);
af19b491
AKS
2881 qlcnic_api_unlock(adapter);
2882}
2883
9f26f547
AC
2884/* Transit to NPAR READY state from NPAR NOT READY state */
2885static void
2886qlcnic_dev_set_npar_ready(struct qlcnic_adapter *adapter)
2887{
9f26f547
AC
2888 if (qlcnic_api_lock(adapter))
2889 return;
2890
3c4b23b1
AKS
2891 QLCWR32(adapter, QLCNIC_CRB_DEV_NPAR_STATE, QLCNIC_DEV_NPAR_OPER);
2892 QLCDB(adapter, DRV, "NPAR operational state set\n");
9f26f547
AC
2893
2894 qlcnic_api_unlock(adapter);
2895}
2896
af19b491
AKS
2897static void
2898qlcnic_schedule_work(struct qlcnic_adapter *adapter,
2899 work_func_t func, int delay)
2900{
451724c8
SC
2901 if (test_bit(__QLCNIC_AER, &adapter->state))
2902 return;
2903
af19b491 2904 INIT_DELAYED_WORK(&adapter->fw_work, func);
f7ec804a
AKS
2905 queue_delayed_work(qlcnic_wq, &adapter->fw_work,
2906 round_jiffies_relative(delay));
af19b491
AKS
2907}
2908
2909static void
2910qlcnic_cancel_fw_work(struct qlcnic_adapter *adapter)
2911{
2912 while (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
2913 msleep(10);
2914
2915 cancel_delayed_work_sync(&adapter->fw_work);
2916}
2917
2918static void
2919qlcnic_attach_work(struct work_struct *work)
2920{
2921 struct qlcnic_adapter *adapter = container_of(work,
2922 struct qlcnic_adapter, fw_work.work);
2923 struct net_device *netdev = adapter->netdev;
b18971d1 2924 u32 npar_state;
af19b491 2925
b18971d1
AKS
2926 if (adapter->op_mode != QLCNIC_MGMT_FUNC) {
2927 npar_state = QLCRD32(adapter, QLCNIC_CRB_DEV_NPAR_STATE);
2928 if (adapter->fw_wait_cnt++ > QLCNIC_DEV_NPAR_OPER_TIMEO)
2929 qlcnic_clr_all_drv_state(adapter, 0);
2930 else if (npar_state != QLCNIC_DEV_NPAR_OPER)
2931 qlcnic_schedule_work(adapter, qlcnic_attach_work,
2932 FW_POLL_DELAY);
2933 else
2934 goto attach;
2935 QLCDB(adapter, DRV, "Waiting for NPAR state to operational\n");
2936 return;
2937 }
2938attach:
af19b491 2939 if (netif_running(netdev)) {
52486a3a 2940 if (qlcnic_up(adapter, netdev))
af19b491 2941 goto done;
af19b491 2942
aec1e845 2943 qlcnic_restore_indev_addr(netdev, NETDEV_UP);
af19b491
AKS
2944 }
2945
af19b491 2946done:
34ce3626 2947 netif_device_attach(netdev);
af19b491
AKS
2948 adapter->fw_fail_cnt = 0;
2949 clear_bit(__QLCNIC_RESETTING, &adapter->state);
1b95a839
AKS
2950
2951 if (!qlcnic_clr_drv_state(adapter))
2952 qlcnic_schedule_work(adapter, qlcnic_fw_poll_work,
2953 FW_POLL_DELAY);
af19b491
AKS
2954}
2955
2956static int
2957qlcnic_check_health(struct qlcnic_adapter *adapter)
2958{
4e70812b 2959 u32 state = 0, heartbeat;
af19b491
AKS
2960 struct net_device *netdev = adapter->netdev;
2961
2962 if (qlcnic_check_temp(adapter))
2963 goto detach;
2964
2372a5f1 2965 if (adapter->need_fw_reset)
af19b491 2966 qlcnic_dev_request_reset(adapter);
af19b491
AKS
2967
2968 state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
b8c17620 2969 if (state == QLCNIC_DEV_NEED_RESET) {
3c4b23b1 2970 qlcnic_set_npar_non_operational(adapter);
af19b491 2971 adapter->need_fw_reset = 1;
b8c17620
AKS
2972 } else if (state == QLCNIC_DEV_NEED_QUISCENT)
2973 goto detach;
af19b491 2974
4e70812b
SC
2975 heartbeat = QLCRD32(adapter, QLCNIC_PEG_ALIVE_COUNTER);
2976 if (heartbeat != adapter->heartbeat) {
2977 adapter->heartbeat = heartbeat;
af19b491
AKS
2978 adapter->fw_fail_cnt = 0;
2979 if (adapter->need_fw_reset)
2980 goto detach;
68bf1c68 2981
0df170b6
AKS
2982 if (adapter->reset_context &&
2983 auto_fw_reset == AUTO_FW_RESET_ENABLED) {
68bf1c68
AKS
2984 qlcnic_reset_hw_context(adapter);
2985 adapter->netdev->trans_start = jiffies;
2986 }
2987
af19b491
AKS
2988 return 0;
2989 }
2990
2991 if (++adapter->fw_fail_cnt < FW_FAIL_THRESH)
2992 return 0;
2993
2994 qlcnic_dev_request_reset(adapter);
2995
0df170b6
AKS
2996 if ((auto_fw_reset == AUTO_FW_RESET_ENABLED))
2997 clear_bit(__QLCNIC_FW_ATTACHED, &adapter->state);
af19b491
AKS
2998
2999 dev_info(&netdev->dev, "firmware hang detected\n");
3000
3001detach:
3002 adapter->dev_state = (state == QLCNIC_DEV_NEED_QUISCENT) ? state :
3003 QLCNIC_DEV_NEED_RESET;
3004
3005 if ((auto_fw_reset == AUTO_FW_RESET_ENABLED) &&
65b5b420
AKS
3006 !test_and_set_bit(__QLCNIC_RESETTING, &adapter->state)) {
3007
af19b491 3008 qlcnic_schedule_work(adapter, qlcnic_detach_work, 0);
65b5b420
AKS
3009 QLCDB(adapter, DRV, "fw recovery scheduled.\n");
3010 }
af19b491
AKS
3011
3012 return 1;
3013}
3014
3015static void
3016qlcnic_fw_poll_work(struct work_struct *work)
3017{
3018 struct qlcnic_adapter *adapter = container_of(work,
3019 struct qlcnic_adapter, fw_work.work);
3020
3021 if (test_bit(__QLCNIC_RESETTING, &adapter->state))
3022 goto reschedule;
3023
3024
3025 if (qlcnic_check_health(adapter))
3026 return;
3027
b5e5492c
AKS
3028 if (adapter->fhash.fnum)
3029 qlcnic_prune_lb_filters(adapter);
3030
af19b491
AKS
3031reschedule:
3032 qlcnic_schedule_work(adapter, qlcnic_fw_poll_work, FW_POLL_DELAY);
3033}
3034
451724c8
SC
3035static int qlcnic_is_first_func(struct pci_dev *pdev)
3036{
3037 struct pci_dev *oth_pdev;
3038 int val = pdev->devfn;
3039
3040 while (val-- > 0) {
3041 oth_pdev = pci_get_domain_bus_and_slot(pci_domain_nr
3042 (pdev->bus), pdev->bus->number,
3043 PCI_DEVFN(PCI_SLOT(pdev->devfn), val));
bfc978fa
AKS
3044 if (!oth_pdev)
3045 continue;
451724c8 3046
bfc978fa
AKS
3047 if (oth_pdev->current_state != PCI_D3cold) {
3048 pci_dev_put(oth_pdev);
451724c8 3049 return 0;
bfc978fa
AKS
3050 }
3051 pci_dev_put(oth_pdev);
451724c8
SC
3052 }
3053 return 1;
3054}
3055
3056static int qlcnic_attach_func(struct pci_dev *pdev)
3057{
3058 int err, first_func;
3059 struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
3060 struct net_device *netdev = adapter->netdev;
3061
3062 pdev->error_state = pci_channel_io_normal;
3063
3064 err = pci_enable_device(pdev);
3065 if (err)
3066 return err;
3067
3068 pci_set_power_state(pdev, PCI_D0);
3069 pci_set_master(pdev);
3070 pci_restore_state(pdev);
3071
3072 first_func = qlcnic_is_first_func(pdev);
3073
3074 if (qlcnic_api_lock(adapter))
3075 return -EINVAL;
3076
933fce12 3077 if (adapter->op_mode != QLCNIC_NON_PRIV_FUNC && first_func) {
451724c8
SC
3078 adapter->need_fw_reset = 1;
3079 set_bit(__QLCNIC_START_FW, &adapter->state);
3080 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_INITIALIZING);
3081 QLCDB(adapter, DRV, "Restarting fw\n");
3082 }
3083 qlcnic_api_unlock(adapter);
3084
3085 err = adapter->nic_ops->start_firmware(adapter);
3086 if (err)
3087 return err;
3088
3089 qlcnic_clr_drv_state(adapter);
3090 qlcnic_setup_intr(adapter);
3091
3092 if (netif_running(netdev)) {
3093 err = qlcnic_attach(adapter);
3094 if (err) {
21854f02 3095 qlcnic_clr_all_drv_state(adapter, 1);
451724c8
SC
3096 clear_bit(__QLCNIC_AER, &adapter->state);
3097 netif_device_attach(netdev);
3098 return err;
3099 }
3100
3101 err = qlcnic_up(adapter, netdev);
3102 if (err)
3103 goto done;
3104
aec1e845 3105 qlcnic_restore_indev_addr(netdev, NETDEV_UP);
451724c8
SC
3106 }
3107 done:
3108 netif_device_attach(netdev);
3109 return err;
3110}
3111
3112static pci_ers_result_t qlcnic_io_error_detected(struct pci_dev *pdev,
3113 pci_channel_state_t state)
3114{
3115 struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
3116 struct net_device *netdev = adapter->netdev;
3117
3118 if (state == pci_channel_io_perm_failure)
3119 return PCI_ERS_RESULT_DISCONNECT;
3120
3121 if (state == pci_channel_io_normal)
3122 return PCI_ERS_RESULT_RECOVERED;
3123
3124 set_bit(__QLCNIC_AER, &adapter->state);
3125 netif_device_detach(netdev);
3126
3127 cancel_delayed_work_sync(&adapter->fw_work);
3128
3129 if (netif_running(netdev))
3130 qlcnic_down(adapter, netdev);
3131
3132 qlcnic_detach(adapter);
3133 qlcnic_teardown_intr(adapter);
3134
3135 clear_bit(__QLCNIC_RESETTING, &adapter->state);
3136
3137 pci_save_state(pdev);
3138 pci_disable_device(pdev);
3139
3140 return PCI_ERS_RESULT_NEED_RESET;
3141}
3142
3143static pci_ers_result_t qlcnic_io_slot_reset(struct pci_dev *pdev)
3144{
3145 return qlcnic_attach_func(pdev) ? PCI_ERS_RESULT_DISCONNECT :
3146 PCI_ERS_RESULT_RECOVERED;
3147}
3148
3149static void qlcnic_io_resume(struct pci_dev *pdev)
3150{
3151 struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
3152
3153 pci_cleanup_aer_uncorrect_error_status(pdev);
3154
3155 if (QLCRD32(adapter, QLCNIC_CRB_DEV_STATE) == QLCNIC_DEV_READY &&
3156 test_and_clear_bit(__QLCNIC_AER, &adapter->state))
3157 qlcnic_schedule_work(adapter, qlcnic_fw_poll_work,
3158 FW_POLL_DELAY);
3159}
3160
87eb743b
AC
3161static int
3162qlcnicvf_start_firmware(struct qlcnic_adapter *adapter)
3163{
3164 int err;
3165
3166 err = qlcnic_can_start_firmware(adapter);
3167 if (err)
3168 return err;
3169
78f84e1a
AKS
3170 err = qlcnic_check_npar_opertional(adapter);
3171 if (err)
3172 return err;
3c4b23b1 3173
174240a8
RB
3174 err = qlcnic_initialize_nic(adapter);
3175 if (err)
3176 return err;
3177
87eb743b
AC
3178 qlcnic_check_options(adapter);
3179
7373373d
RB
3180 err = qlcnic_set_eswitch_port_config(adapter);
3181 if (err)
3182 return err;
3183
87eb743b
AC
3184 adapter->need_fw_reset = 0;
3185
3186 return err;
3187}
3188
3189static int
3190qlcnicvf_config_bridged_mode(struct qlcnic_adapter *adapter, u32 enable)
3191{
3192 return -EOPNOTSUPP;
3193}
3194
3195static int
3196qlcnicvf_config_led(struct qlcnic_adapter *adapter, u32 state, u32 rate)
3197{
3198 return -EOPNOTSUPP;
3199}
3200
af19b491
AKS
3201static ssize_t
3202qlcnic_store_bridged_mode(struct device *dev,
3203 struct device_attribute *attr, const char *buf, size_t len)
3204{
3205 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3206 unsigned long new;
3207 int ret = -EINVAL;
3208
3209 if (!(adapter->capabilities & QLCNIC_FW_CAPABILITY_BDG))
3210 goto err_out;
3211
8a15ad1f 3212 if (!test_bit(__QLCNIC_DEV_UP, &adapter->state))
af19b491
AKS
3213 goto err_out;
3214
3215 if (strict_strtoul(buf, 2, &new))
3216 goto err_out;
3217
2e9d722d 3218 if (!adapter->nic_ops->config_bridged_mode(adapter, !!new))
af19b491
AKS
3219 ret = len;
3220
3221err_out:
3222 return ret;
3223}
3224
3225static ssize_t
3226qlcnic_show_bridged_mode(struct device *dev,
3227 struct device_attribute *attr, char *buf)
3228{
3229 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3230 int bridged_mode = 0;
3231
3232 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_BDG)
3233 bridged_mode = !!(adapter->flags & QLCNIC_BRIDGE_ENABLED);
3234
3235 return sprintf(buf, "%d\n", bridged_mode);
3236}
3237
3238static struct device_attribute dev_attr_bridged_mode = {
3239 .attr = {.name = "bridged_mode", .mode = (S_IRUGO | S_IWUSR)},
3240 .show = qlcnic_show_bridged_mode,
3241 .store = qlcnic_store_bridged_mode,
3242};
3243
3244static ssize_t
3245qlcnic_store_diag_mode(struct device *dev,
3246 struct device_attribute *attr, const char *buf, size_t len)
3247{
3248 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3249 unsigned long new;
3250
3251 if (strict_strtoul(buf, 2, &new))
3252 return -EINVAL;
3253
3254 if (!!new != !!(adapter->flags & QLCNIC_DIAG_ENABLED))
3255 adapter->flags ^= QLCNIC_DIAG_ENABLED;
3256
3257 return len;
3258}
3259
3260static ssize_t
3261qlcnic_show_diag_mode(struct device *dev,
3262 struct device_attribute *attr, char *buf)
3263{
3264 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3265
3266 return sprintf(buf, "%d\n",
3267 !!(adapter->flags & QLCNIC_DIAG_ENABLED));
3268}
3269
3270static struct device_attribute dev_attr_diag_mode = {
3271 .attr = {.name = "diag_mode", .mode = (S_IRUGO | S_IWUSR)},
3272 .show = qlcnic_show_diag_mode,
3273 .store = qlcnic_store_diag_mode,
3274};
3275
3276static int
3277qlcnic_sysfs_validate_crb(struct qlcnic_adapter *adapter,
3278 loff_t offset, size_t size)
3279{
897e8c7c
DP
3280 size_t crb_size = 4;
3281
af19b491
AKS
3282 if (!(adapter->flags & QLCNIC_DIAG_ENABLED))
3283 return -EIO;
3284
897e8c7c
DP
3285 if (offset < QLCNIC_PCI_CRBSPACE) {
3286 if (ADDR_IN_RANGE(offset, QLCNIC_PCI_CAMQM,
3287 QLCNIC_PCI_CAMQM_END))
3288 crb_size = 8;
3289 else
3290 return -EINVAL;
3291 }
af19b491 3292
897e8c7c
DP
3293 if ((size != crb_size) || (offset & (crb_size-1)))
3294 return -EINVAL;
af19b491
AKS
3295
3296 return 0;
3297}
3298
3299static ssize_t
2c3c8bea
CW
3300qlcnic_sysfs_read_crb(struct file *filp, struct kobject *kobj,
3301 struct bin_attribute *attr,
af19b491
AKS
3302 char *buf, loff_t offset, size_t size)
3303{
3304 struct device *dev = container_of(kobj, struct device, kobj);
3305 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3306 u32 data;
897e8c7c 3307 u64 qmdata;
af19b491
AKS
3308 int ret;
3309
3310 ret = qlcnic_sysfs_validate_crb(adapter, offset, size);
3311 if (ret != 0)
3312 return ret;
3313
897e8c7c
DP
3314 if (ADDR_IN_RANGE(offset, QLCNIC_PCI_CAMQM, QLCNIC_PCI_CAMQM_END)) {
3315 qlcnic_pci_camqm_read_2M(adapter, offset, &qmdata);
3316 memcpy(buf, &qmdata, size);
3317 } else {
3318 data = QLCRD32(adapter, offset);
3319 memcpy(buf, &data, size);
3320 }
af19b491
AKS
3321 return size;
3322}
3323
3324static ssize_t
2c3c8bea
CW
3325qlcnic_sysfs_write_crb(struct file *filp, struct kobject *kobj,
3326 struct bin_attribute *attr,
af19b491
AKS
3327 char *buf, loff_t offset, size_t size)
3328{
3329 struct device *dev = container_of(kobj, struct device, kobj);
3330 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3331 u32 data;
897e8c7c 3332 u64 qmdata;
af19b491
AKS
3333 int ret;
3334
3335 ret = qlcnic_sysfs_validate_crb(adapter, offset, size);
3336 if (ret != 0)
3337 return ret;
3338
897e8c7c
DP
3339 if (ADDR_IN_RANGE(offset, QLCNIC_PCI_CAMQM, QLCNIC_PCI_CAMQM_END)) {
3340 memcpy(&qmdata, buf, size);
3341 qlcnic_pci_camqm_write_2M(adapter, offset, qmdata);
3342 } else {
3343 memcpy(&data, buf, size);
3344 QLCWR32(adapter, offset, data);
3345 }
af19b491
AKS
3346 return size;
3347}
3348
3349static int
3350qlcnic_sysfs_validate_mem(struct qlcnic_adapter *adapter,
3351 loff_t offset, size_t size)
3352{
3353 if (!(adapter->flags & QLCNIC_DIAG_ENABLED))
3354 return -EIO;
3355
3356 if ((size != 8) || (offset & 0x7))
3357 return -EIO;
3358
3359 return 0;
3360}
3361
3362static ssize_t
2c3c8bea
CW
3363qlcnic_sysfs_read_mem(struct file *filp, struct kobject *kobj,
3364 struct bin_attribute *attr,
af19b491
AKS
3365 char *buf, loff_t offset, size_t size)
3366{
3367 struct device *dev = container_of(kobj, struct device, kobj);
3368 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3369 u64 data;
3370 int ret;
3371
3372 ret = qlcnic_sysfs_validate_mem(adapter, offset, size);
3373 if (ret != 0)
3374 return ret;
3375
3376 if (qlcnic_pci_mem_read_2M(adapter, offset, &data))
3377 return -EIO;
3378
3379 memcpy(buf, &data, size);
3380
3381 return size;
3382}
3383
3384static ssize_t
2c3c8bea
CW
3385qlcnic_sysfs_write_mem(struct file *filp, struct kobject *kobj,
3386 struct bin_attribute *attr,
af19b491
AKS
3387 char *buf, loff_t offset, size_t size)
3388{
3389 struct device *dev = container_of(kobj, struct device, kobj);
3390 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3391 u64 data;
3392 int ret;
3393
3394 ret = qlcnic_sysfs_validate_mem(adapter, offset, size);
3395 if (ret != 0)
3396 return ret;
3397
3398 memcpy(&data, buf, size);
3399
3400 if (qlcnic_pci_mem_write_2M(adapter, offset, data))
3401 return -EIO;
3402
3403 return size;
3404}
3405
3406
3407static struct bin_attribute bin_attr_crb = {
3408 .attr = {.name = "crb", .mode = (S_IRUGO | S_IWUSR)},
3409 .size = 0,
3410 .read = qlcnic_sysfs_read_crb,
3411 .write = qlcnic_sysfs_write_crb,
3412};
3413
3414static struct bin_attribute bin_attr_mem = {
3415 .attr = {.name = "mem", .mode = (S_IRUGO | S_IWUSR)},
3416 .size = 0,
3417 .read = qlcnic_sysfs_read_mem,
3418 .write = qlcnic_sysfs_write_mem,
3419};
3420
cea8975e 3421static int
346fe763
RB
3422validate_pm_config(struct qlcnic_adapter *adapter,
3423 struct qlcnic_pm_func_cfg *pm_cfg, int count)
3424{
3425
3426 u8 src_pci_func, s_esw_id, d_esw_id;
3427 u8 dest_pci_func;
3428 int i;
3429
3430 for (i = 0; i < count; i++) {
3431 src_pci_func = pm_cfg[i].pci_func;
3432 dest_pci_func = pm_cfg[i].dest_npar;
3433 if (src_pci_func >= QLCNIC_MAX_PCI_FUNC
3434 || dest_pci_func >= QLCNIC_MAX_PCI_FUNC)
3435 return QL_STATUS_INVALID_PARAM;
3436
3437 if (adapter->npars[src_pci_func].type != QLCNIC_TYPE_NIC)
3438 return QL_STATUS_INVALID_PARAM;
3439
3440 if (adapter->npars[dest_pci_func].type != QLCNIC_TYPE_NIC)
3441 return QL_STATUS_INVALID_PARAM;
3442
346fe763
RB
3443 s_esw_id = adapter->npars[src_pci_func].phy_port;
3444 d_esw_id = adapter->npars[dest_pci_func].phy_port;
3445
3446 if (s_esw_id != d_esw_id)
3447 return QL_STATUS_INVALID_PARAM;
3448
3449 }
3450 return 0;
3451
3452}
3453
3454static ssize_t
3455qlcnic_sysfs_write_pm_config(struct file *filp, struct kobject *kobj,
3456 struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
3457{
3458 struct device *dev = container_of(kobj, struct device, kobj);
3459 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3460 struct qlcnic_pm_func_cfg *pm_cfg;
3461 u32 id, action, pci_func;
3462 int count, rem, i, ret;
3463
3464 count = size / sizeof(struct qlcnic_pm_func_cfg);
3465 rem = size % sizeof(struct qlcnic_pm_func_cfg);
3466 if (rem)
3467 return QL_STATUS_INVALID_PARAM;
3468
3469 pm_cfg = (struct qlcnic_pm_func_cfg *) buf;
3470
3471 ret = validate_pm_config(adapter, pm_cfg, count);
3472 if (ret)
3473 return ret;
3474 for (i = 0; i < count; i++) {
3475 pci_func = pm_cfg[i].pci_func;
4e8acb01 3476 action = !!pm_cfg[i].action;
346fe763
RB
3477 id = adapter->npars[pci_func].phy_port;
3478 ret = qlcnic_config_port_mirroring(adapter, id,
3479 action, pci_func);
3480 if (ret)
3481 return ret;
3482 }
3483
3484 for (i = 0; i < count; i++) {
3485 pci_func = pm_cfg[i].pci_func;
3486 id = adapter->npars[pci_func].phy_port;
4e8acb01 3487 adapter->npars[pci_func].enable_pm = !!pm_cfg[i].action;
346fe763
RB
3488 adapter->npars[pci_func].dest_npar = id;
3489 }
3490 return size;
3491}
3492
3493static ssize_t
3494qlcnic_sysfs_read_pm_config(struct file *filp, struct kobject *kobj,
3495 struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
3496{
3497 struct device *dev = container_of(kobj, struct device, kobj);
3498 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3499 struct qlcnic_pm_func_cfg pm_cfg[QLCNIC_MAX_PCI_FUNC];
3500 int i;
3501
3502 if (size != sizeof(pm_cfg))
3503 return QL_STATUS_INVALID_PARAM;
3504
3505 for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
3506 if (adapter->npars[i].type != QLCNIC_TYPE_NIC)
3507 continue;
3508 pm_cfg[i].action = adapter->npars[i].enable_pm;
3509 pm_cfg[i].dest_npar = 0;
3510 pm_cfg[i].pci_func = i;
3511 }
3512 memcpy(buf, &pm_cfg, size);
3513
3514 return size;
3515}
3516
cea8975e 3517static int
346fe763 3518validate_esw_config(struct qlcnic_adapter *adapter,
4e8acb01 3519 struct qlcnic_esw_func_cfg *esw_cfg, int count)
346fe763 3520{
7613c87b 3521 u32 op_mode;
346fe763
RB
3522 u8 pci_func;
3523 int i;
7613c87b
RB
3524
3525 op_mode = readl(adapter->ahw.pci_base0 + QLCNIC_DRV_OP_MODE);
3526
346fe763
RB
3527 for (i = 0; i < count; i++) {
3528 pci_func = esw_cfg[i].pci_func;
3529 if (pci_func >= QLCNIC_MAX_PCI_FUNC)
3530 return QL_STATUS_INVALID_PARAM;
3531
4e8acb01
RB
3532 if (adapter->op_mode == QLCNIC_MGMT_FUNC)
3533 if (adapter->npars[pci_func].type != QLCNIC_TYPE_NIC)
3534 return QL_STATUS_INVALID_PARAM;
346fe763 3535
4e8acb01
RB
3536 switch (esw_cfg[i].op_mode) {
3537 case QLCNIC_PORT_DEFAULTS:
7613c87b 3538 if (QLC_DEV_GET_DRV(op_mode, pci_func) !=
7373373d 3539 QLCNIC_NON_PRIV_FUNC) {
7613c87b 3540 esw_cfg[i].mac_anti_spoof = 0;
7373373d 3541 esw_cfg[i].mac_override = 1;
ee07c1a7 3542 esw_cfg[i].promisc_mode = 1;
7373373d 3543 }
4e8acb01
RB
3544 break;
3545 case QLCNIC_ADD_VLAN:
346fe763
RB
3546 if (!IS_VALID_VLAN(esw_cfg[i].vlan_id))
3547 return QL_STATUS_INVALID_PARAM;
4e8acb01
RB
3548 if (!esw_cfg[i].op_type)
3549 return QL_STATUS_INVALID_PARAM;
3550 break;
3551 case QLCNIC_DEL_VLAN:
4e8acb01
RB
3552 if (!esw_cfg[i].op_type)
3553 return QL_STATUS_INVALID_PARAM;
3554 break;
3555 default:
346fe763 3556 return QL_STATUS_INVALID_PARAM;
4e8acb01 3557 }
346fe763 3558 }
346fe763
RB
3559 return 0;
3560}
3561
3562static ssize_t
3563qlcnic_sysfs_write_esw_config(struct file *file, struct kobject *kobj,
3564 struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
3565{
3566 struct device *dev = container_of(kobj, struct device, kobj);
3567 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3568 struct qlcnic_esw_func_cfg *esw_cfg;
4e8acb01 3569 struct qlcnic_npar_info *npar;
346fe763 3570 int count, rem, i, ret;
0325d69b 3571 u8 pci_func, op_mode = 0;
346fe763
RB
3572
3573 count = size / sizeof(struct qlcnic_esw_func_cfg);
3574 rem = size % sizeof(struct qlcnic_esw_func_cfg);
3575 if (rem)
3576 return QL_STATUS_INVALID_PARAM;
3577
3578 esw_cfg = (struct qlcnic_esw_func_cfg *) buf;
3579 ret = validate_esw_config(adapter, esw_cfg, count);
3580 if (ret)
3581 return ret;
3582
3583 for (i = 0; i < count; i++) {
0325d69b
RB
3584 if (adapter->op_mode == QLCNIC_MGMT_FUNC)
3585 if (qlcnic_config_switch_port(adapter, &esw_cfg[i]))
3586 return QL_STATUS_INVALID_PARAM;
e9a47700
RB
3587
3588 if (adapter->ahw.pci_func != esw_cfg[i].pci_func)
3589 continue;
3590
3591 op_mode = esw_cfg[i].op_mode;
3592 qlcnic_get_eswitch_port_config(adapter, &esw_cfg[i]);
3593 esw_cfg[i].op_mode = op_mode;
3594 esw_cfg[i].pci_func = adapter->ahw.pci_func;
3595
3596 switch (esw_cfg[i].op_mode) {
3597 case QLCNIC_PORT_DEFAULTS:
3598 qlcnic_set_eswitch_port_features(adapter, &esw_cfg[i]);
3599 break;
8cf61f89
AKS
3600 case QLCNIC_ADD_VLAN:
3601 qlcnic_set_vlan_config(adapter, &esw_cfg[i]);
3602 break;
3603 case QLCNIC_DEL_VLAN:
3604 esw_cfg[i].vlan_id = 0;
3605 qlcnic_set_vlan_config(adapter, &esw_cfg[i]);
3606 break;
0325d69b 3607 }
346fe763
RB
3608 }
3609
0325d69b
RB
3610 if (adapter->op_mode != QLCNIC_MGMT_FUNC)
3611 goto out;
e9a47700 3612
346fe763
RB
3613 for (i = 0; i < count; i++) {
3614 pci_func = esw_cfg[i].pci_func;
4e8acb01
RB
3615 npar = &adapter->npars[pci_func];
3616 switch (esw_cfg[i].op_mode) {
3617 case QLCNIC_PORT_DEFAULTS:
3618 npar->promisc_mode = esw_cfg[i].promisc_mode;
7373373d 3619 npar->mac_override = esw_cfg[i].mac_override;
4e8acb01
RB
3620 npar->offload_flags = esw_cfg[i].offload_flags;
3621 npar->mac_anti_spoof = esw_cfg[i].mac_anti_spoof;
3622 npar->discard_tagged = esw_cfg[i].discard_tagged;
3623 break;
3624 case QLCNIC_ADD_VLAN:
3625 npar->pvid = esw_cfg[i].vlan_id;
3626 break;
3627 case QLCNIC_DEL_VLAN:
3628 npar->pvid = 0;
3629 break;
3630 }
346fe763 3631 }
0325d69b 3632out:
346fe763
RB
3633 return size;
3634}
3635
3636static ssize_t
3637qlcnic_sysfs_read_esw_config(struct file *file, struct kobject *kobj,
3638 struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
3639{
3640 struct device *dev = container_of(kobj, struct device, kobj);
3641 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3642 struct qlcnic_esw_func_cfg esw_cfg[QLCNIC_MAX_PCI_FUNC];
4e8acb01 3643 u8 i;
346fe763
RB
3644
3645 if (size != sizeof(esw_cfg))
3646 return QL_STATUS_INVALID_PARAM;
3647
3648 for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
3649 if (adapter->npars[i].type != QLCNIC_TYPE_NIC)
3650 continue;
4e8acb01
RB
3651 esw_cfg[i].pci_func = i;
3652 if (qlcnic_get_eswitch_port_config(adapter, &esw_cfg[i]))
3653 return QL_STATUS_INVALID_PARAM;
346fe763
RB
3654 }
3655 memcpy(buf, &esw_cfg, size);
3656
3657 return size;
3658}
3659
cea8975e 3660static int
346fe763
RB
3661validate_npar_config(struct qlcnic_adapter *adapter,
3662 struct qlcnic_npar_func_cfg *np_cfg, int count)
3663{
3664 u8 pci_func, i;
3665
3666 for (i = 0; i < count; i++) {
3667 pci_func = np_cfg[i].pci_func;
3668 if (pci_func >= QLCNIC_MAX_PCI_FUNC)
3669 return QL_STATUS_INVALID_PARAM;
3670
3671 if (adapter->npars[pci_func].type != QLCNIC_TYPE_NIC)
3672 return QL_STATUS_INVALID_PARAM;
3673
3674 if (!IS_VALID_BW(np_cfg[i].min_bw)
3675 || !IS_VALID_BW(np_cfg[i].max_bw)
3676 || !IS_VALID_RX_QUEUES(np_cfg[i].max_rx_queues)
3677 || !IS_VALID_TX_QUEUES(np_cfg[i].max_tx_queues))
3678 return QL_STATUS_INVALID_PARAM;
3679 }
3680 return 0;
3681}
3682
3683static ssize_t
3684qlcnic_sysfs_write_npar_config(struct file *file, struct kobject *kobj,
3685 struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
3686{
3687 struct device *dev = container_of(kobj, struct device, kobj);
3688 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3689 struct qlcnic_info nic_info;
3690 struct qlcnic_npar_func_cfg *np_cfg;
3691 int i, count, rem, ret;
3692 u8 pci_func;
3693
3694 count = size / sizeof(struct qlcnic_npar_func_cfg);
3695 rem = size % sizeof(struct qlcnic_npar_func_cfg);
3696 if (rem)
3697 return QL_STATUS_INVALID_PARAM;
3698
3699 np_cfg = (struct qlcnic_npar_func_cfg *) buf;
3700 ret = validate_npar_config(adapter, np_cfg, count);
3701 if (ret)
3702 return ret;
3703
3704 for (i = 0; i < count ; i++) {
3705 pci_func = np_cfg[i].pci_func;
3706 ret = qlcnic_get_nic_info(adapter, &nic_info, pci_func);
3707 if (ret)
3708 return ret;
3709 nic_info.pci_func = pci_func;
3710 nic_info.min_tx_bw = np_cfg[i].min_bw;
3711 nic_info.max_tx_bw = np_cfg[i].max_bw;
3712 ret = qlcnic_set_nic_info(adapter, &nic_info);
3713 if (ret)
3714 return ret;
cea8975e
AC
3715 adapter->npars[i].min_bw = nic_info.min_tx_bw;
3716 adapter->npars[i].max_bw = nic_info.max_tx_bw;
346fe763
RB
3717 }
3718
3719 return size;
3720
3721}
3722static ssize_t
3723qlcnic_sysfs_read_npar_config(struct file *file, struct kobject *kobj,
3724 struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
3725{
3726 struct device *dev = container_of(kobj, struct device, kobj);
3727 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3728 struct qlcnic_info nic_info;
3729 struct qlcnic_npar_func_cfg np_cfg[QLCNIC_MAX_PCI_FUNC];
3730 int i, ret;
3731
3732 if (size != sizeof(np_cfg))
3733 return QL_STATUS_INVALID_PARAM;
3734
3735 for (i = 0; i < QLCNIC_MAX_PCI_FUNC ; i++) {
3736 if (adapter->npars[i].type != QLCNIC_TYPE_NIC)
3737 continue;
3738 ret = qlcnic_get_nic_info(adapter, &nic_info, i);
3739 if (ret)
3740 return ret;
3741
3742 np_cfg[i].pci_func = i;
a1c0c459 3743 np_cfg[i].op_mode = (u8)nic_info.op_mode;
346fe763
RB
3744 np_cfg[i].port_num = nic_info.phys_port;
3745 np_cfg[i].fw_capab = nic_info.capabilities;
3746 np_cfg[i].min_bw = nic_info.min_tx_bw ;
3747 np_cfg[i].max_bw = nic_info.max_tx_bw;
3748 np_cfg[i].max_tx_queues = nic_info.max_tx_ques;
3749 np_cfg[i].max_rx_queues = nic_info.max_rx_ques;
3750 }
3751 memcpy(buf, &np_cfg, size);
3752 return size;
3753}
3754
b6021212
AKS
3755static ssize_t
3756qlcnic_sysfs_get_port_stats(struct file *file, struct kobject *kobj,
3757 struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
3758{
3759 struct device *dev = container_of(kobj, struct device, kobj);
3760 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3761 struct qlcnic_esw_statistics port_stats;
3762 int ret;
3763
3764 if (size != sizeof(struct qlcnic_esw_statistics))
3765 return QL_STATUS_INVALID_PARAM;
3766
3767 if (offset >= QLCNIC_MAX_PCI_FUNC)
3768 return QL_STATUS_INVALID_PARAM;
3769
3770 memset(&port_stats, 0, size);
3771 ret = qlcnic_get_port_stats(adapter, offset, QLCNIC_QUERY_RX_COUNTER,
3772 &port_stats.rx);
3773 if (ret)
3774 return ret;
3775
3776 ret = qlcnic_get_port_stats(adapter, offset, QLCNIC_QUERY_TX_COUNTER,
3777 &port_stats.tx);
3778 if (ret)
3779 return ret;
3780
3781 memcpy(buf, &port_stats, size);
3782 return size;
3783}
3784
3785static ssize_t
3786qlcnic_sysfs_get_esw_stats(struct file *file, struct kobject *kobj,
3787 struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
3788{
3789 struct device *dev = container_of(kobj, struct device, kobj);
3790 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3791 struct qlcnic_esw_statistics esw_stats;
3792 int ret;
3793
3794 if (size != sizeof(struct qlcnic_esw_statistics))
3795 return QL_STATUS_INVALID_PARAM;
3796
3797 if (offset >= QLCNIC_NIU_MAX_XG_PORTS)
3798 return QL_STATUS_INVALID_PARAM;
3799
3800 memset(&esw_stats, 0, size);
3801 ret = qlcnic_get_eswitch_stats(adapter, offset, QLCNIC_QUERY_RX_COUNTER,
3802 &esw_stats.rx);
3803 if (ret)
3804 return ret;
3805
3806 ret = qlcnic_get_eswitch_stats(adapter, offset, QLCNIC_QUERY_TX_COUNTER,
3807 &esw_stats.tx);
3808 if (ret)
3809 return ret;
3810
3811 memcpy(buf, &esw_stats, size);
3812 return size;
3813}
3814
3815static ssize_t
3816qlcnic_sysfs_clear_esw_stats(struct file *file, struct kobject *kobj,
3817 struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
3818{
3819 struct device *dev = container_of(kobj, struct device, kobj);
3820 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3821 int ret;
3822
3823 if (offset >= QLCNIC_NIU_MAX_XG_PORTS)
3824 return QL_STATUS_INVALID_PARAM;
3825
3826 ret = qlcnic_clear_esw_stats(adapter, QLCNIC_STATS_ESWITCH, offset,
3827 QLCNIC_QUERY_RX_COUNTER);
3828 if (ret)
3829 return ret;
3830
3831 ret = qlcnic_clear_esw_stats(adapter, QLCNIC_STATS_ESWITCH, offset,
3832 QLCNIC_QUERY_TX_COUNTER);
3833 if (ret)
3834 return ret;
3835
3836 return size;
3837}
3838
3839static ssize_t
3840qlcnic_sysfs_clear_port_stats(struct file *file, struct kobject *kobj,
3841 struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
3842{
3843
3844 struct device *dev = container_of(kobj, struct device, kobj);
3845 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3846 int ret;
3847
3848 if (offset >= QLCNIC_MAX_PCI_FUNC)
3849 return QL_STATUS_INVALID_PARAM;
3850
3851 ret = qlcnic_clear_esw_stats(adapter, QLCNIC_STATS_PORT, offset,
3852 QLCNIC_QUERY_RX_COUNTER);
3853 if (ret)
3854 return ret;
3855
3856 ret = qlcnic_clear_esw_stats(adapter, QLCNIC_STATS_PORT, offset,
3857 QLCNIC_QUERY_TX_COUNTER);
3858 if (ret)
3859 return ret;
3860
3861 return size;
3862}
3863
346fe763
RB
3864static ssize_t
3865qlcnic_sysfs_read_pci_config(struct file *file, struct kobject *kobj,
3866 struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
3867{
3868 struct device *dev = container_of(kobj, struct device, kobj);
3869 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3870 struct qlcnic_pci_func_cfg pci_cfg[QLCNIC_MAX_PCI_FUNC];
e88db3bd 3871 struct qlcnic_pci_info *pci_info;
346fe763
RB
3872 int i, ret;
3873
3874 if (size != sizeof(pci_cfg))
3875 return QL_STATUS_INVALID_PARAM;
3876
e88db3bd
DC
3877 pci_info = kcalloc(QLCNIC_MAX_PCI_FUNC, sizeof(*pci_info), GFP_KERNEL);
3878 if (!pci_info)
3879 return -ENOMEM;
3880
346fe763 3881 ret = qlcnic_get_pci_info(adapter, pci_info);
e88db3bd
DC
3882 if (ret) {
3883 kfree(pci_info);
346fe763 3884 return ret;
e88db3bd 3885 }
346fe763
RB
3886
3887 for (i = 0; i < QLCNIC_MAX_PCI_FUNC ; i++) {
3888 pci_cfg[i].pci_func = pci_info[i].id;
3889 pci_cfg[i].func_type = pci_info[i].type;
3890 pci_cfg[i].port_num = pci_info[i].default_port;
3891 pci_cfg[i].min_bw = pci_info[i].tx_min_bw;
3892 pci_cfg[i].max_bw = pci_info[i].tx_max_bw;
3893 memcpy(&pci_cfg[i].def_mac_addr, &pci_info[i].mac, ETH_ALEN);
3894 }
3895 memcpy(buf, &pci_cfg, size);
e88db3bd 3896 kfree(pci_info);
346fe763 3897 return size;
346fe763
RB
3898}
3899static struct bin_attribute bin_attr_npar_config = {
3900 .attr = {.name = "npar_config", .mode = (S_IRUGO | S_IWUSR)},
3901 .size = 0,
3902 .read = qlcnic_sysfs_read_npar_config,
3903 .write = qlcnic_sysfs_write_npar_config,
3904};
3905
3906static struct bin_attribute bin_attr_pci_config = {
3907 .attr = {.name = "pci_config", .mode = (S_IRUGO | S_IWUSR)},
3908 .size = 0,
3909 .read = qlcnic_sysfs_read_pci_config,
3910 .write = NULL,
3911};
3912
b6021212
AKS
3913static struct bin_attribute bin_attr_port_stats = {
3914 .attr = {.name = "port_stats", .mode = (S_IRUGO | S_IWUSR)},
3915 .size = 0,
3916 .read = qlcnic_sysfs_get_port_stats,
3917 .write = qlcnic_sysfs_clear_port_stats,
3918};
3919
3920static struct bin_attribute bin_attr_esw_stats = {
3921 .attr = {.name = "esw_stats", .mode = (S_IRUGO | S_IWUSR)},
3922 .size = 0,
3923 .read = qlcnic_sysfs_get_esw_stats,
3924 .write = qlcnic_sysfs_clear_esw_stats,
3925};
3926
346fe763
RB
3927static struct bin_attribute bin_attr_esw_config = {
3928 .attr = {.name = "esw_config", .mode = (S_IRUGO | S_IWUSR)},
3929 .size = 0,
3930 .read = qlcnic_sysfs_read_esw_config,
3931 .write = qlcnic_sysfs_write_esw_config,
3932};
3933
3934static struct bin_attribute bin_attr_pm_config = {
3935 .attr = {.name = "pm_config", .mode = (S_IRUGO | S_IWUSR)},
3936 .size = 0,
3937 .read = qlcnic_sysfs_read_pm_config,
3938 .write = qlcnic_sysfs_write_pm_config,
3939};
3940
af19b491
AKS
3941static void
3942qlcnic_create_sysfs_entries(struct qlcnic_adapter *adapter)
3943{
3944 struct device *dev = &adapter->pdev->dev;
3945
3946 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_BDG)
3947 if (device_create_file(dev, &dev_attr_bridged_mode))
3948 dev_warn(dev,
3949 "failed to create bridged_mode sysfs entry\n");
3950}
3951
3952static void
3953qlcnic_remove_sysfs_entries(struct qlcnic_adapter *adapter)
3954{
3955 struct device *dev = &adapter->pdev->dev;
3956
3957 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_BDG)
3958 device_remove_file(dev, &dev_attr_bridged_mode);
3959}
3960
3961static void
3962qlcnic_create_diag_entries(struct qlcnic_adapter *adapter)
3963{
3964 struct device *dev = &adapter->pdev->dev;
3965
b6021212
AKS
3966 if (device_create_bin_file(dev, &bin_attr_port_stats))
3967 dev_info(dev, "failed to create port stats sysfs entry");
3968
132ff00a
AC
3969 if (adapter->op_mode == QLCNIC_NON_PRIV_FUNC)
3970 return;
af19b491
AKS
3971 if (device_create_file(dev, &dev_attr_diag_mode))
3972 dev_info(dev, "failed to create diag_mode sysfs entry\n");
3973 if (device_create_bin_file(dev, &bin_attr_crb))
3974 dev_info(dev, "failed to create crb sysfs entry\n");
3975 if (device_create_bin_file(dev, &bin_attr_mem))
3976 dev_info(dev, "failed to create mem sysfs entry\n");
4e8acb01
RB
3977 if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED))
3978 return;
3979 if (device_create_bin_file(dev, &bin_attr_esw_config))
3980 dev_info(dev, "failed to create esw config sysfs entry");
3981 if (adapter->op_mode != QLCNIC_MGMT_FUNC)
346fe763
RB
3982 return;
3983 if (device_create_bin_file(dev, &bin_attr_pci_config))
3984 dev_info(dev, "failed to create pci config sysfs entry");
3985 if (device_create_bin_file(dev, &bin_attr_npar_config))
3986 dev_info(dev, "failed to create npar config sysfs entry");
346fe763
RB
3987 if (device_create_bin_file(dev, &bin_attr_pm_config))
3988 dev_info(dev, "failed to create pm config sysfs entry");
b6021212
AKS
3989 if (device_create_bin_file(dev, &bin_attr_esw_stats))
3990 dev_info(dev, "failed to create eswitch stats sysfs entry");
af19b491
AKS
3991}
3992
af19b491
AKS
3993static void
3994qlcnic_remove_diag_entries(struct qlcnic_adapter *adapter)
3995{
3996 struct device *dev = &adapter->pdev->dev;
3997
b6021212
AKS
3998 device_remove_bin_file(dev, &bin_attr_port_stats);
3999
132ff00a
AC
4000 if (adapter->op_mode == QLCNIC_NON_PRIV_FUNC)
4001 return;
af19b491
AKS
4002 device_remove_file(dev, &dev_attr_diag_mode);
4003 device_remove_bin_file(dev, &bin_attr_crb);
4004 device_remove_bin_file(dev, &bin_attr_mem);
4e8acb01
RB
4005 if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED))
4006 return;
4007 device_remove_bin_file(dev, &bin_attr_esw_config);
4008 if (adapter->op_mode != QLCNIC_MGMT_FUNC)
346fe763
RB
4009 return;
4010 device_remove_bin_file(dev, &bin_attr_pci_config);
4011 device_remove_bin_file(dev, &bin_attr_npar_config);
346fe763 4012 device_remove_bin_file(dev, &bin_attr_pm_config);
b6021212 4013 device_remove_bin_file(dev, &bin_attr_esw_stats);
af19b491
AKS
4014}
4015
4016#ifdef CONFIG_INET
4017
4018#define is_qlcnic_netdev(dev) (dev->netdev_ops == &qlcnic_netdev_ops)
4019
af19b491 4020static void
aec1e845
AKS
4021qlcnic_config_indev_addr(struct qlcnic_adapter *adapter,
4022 struct net_device *dev, unsigned long event)
af19b491
AKS
4023{
4024 struct in_device *indev;
af19b491 4025
af19b491
AKS
4026 indev = in_dev_get(dev);
4027 if (!indev)
4028 return;
4029
4030 for_ifa(indev) {
4031 switch (event) {
4032 case NETDEV_UP:
4033 qlcnic_config_ipaddr(adapter,
4034 ifa->ifa_address, QLCNIC_IP_UP);
4035 break;
4036 case NETDEV_DOWN:
4037 qlcnic_config_ipaddr(adapter,
4038 ifa->ifa_address, QLCNIC_IP_DOWN);
4039 break;
4040 default:
4041 break;
4042 }
4043 } endfor_ifa(indev);
4044
4045 in_dev_put(indev);
af19b491
AKS
4046}
4047
aec1e845
AKS
4048static void
4049qlcnic_restore_indev_addr(struct net_device *netdev, unsigned long event)
4050{
4051 struct qlcnic_adapter *adapter = netdev_priv(netdev);
4052 struct net_device *dev;
4053 u16 vid;
4054
4055 qlcnic_config_indev_addr(adapter, netdev, event);
4056
4057 if (!adapter->vlgrp)
4058 return;
4059
b738127d 4060 for (vid = 0; vid < VLAN_N_VID; vid++) {
aec1e845
AKS
4061 dev = vlan_group_get_device(adapter->vlgrp, vid);
4062 if (!dev)
4063 continue;
4064
4065 qlcnic_config_indev_addr(adapter, dev, event);
4066 }
4067}
4068
af19b491
AKS
4069static int qlcnic_netdev_event(struct notifier_block *this,
4070 unsigned long event, void *ptr)
4071{
4072 struct qlcnic_adapter *adapter;
4073 struct net_device *dev = (struct net_device *)ptr;
4074
4075recheck:
4076 if (dev == NULL)
4077 goto done;
4078
4079 if (dev->priv_flags & IFF_802_1Q_VLAN) {
4080 dev = vlan_dev_real_dev(dev);
4081 goto recheck;
4082 }
4083
4084 if (!is_qlcnic_netdev(dev))
4085 goto done;
4086
4087 adapter = netdev_priv(dev);
4088
4089 if (!adapter)
4090 goto done;
4091
8a15ad1f 4092 if (!test_bit(__QLCNIC_DEV_UP, &adapter->state))
af19b491
AKS
4093 goto done;
4094
aec1e845 4095 qlcnic_config_indev_addr(adapter, dev, event);
af19b491
AKS
4096done:
4097 return NOTIFY_DONE;
4098}
4099
4100static int
4101qlcnic_inetaddr_event(struct notifier_block *this,
4102 unsigned long event, void *ptr)
4103{
4104 struct qlcnic_adapter *adapter;
4105 struct net_device *dev;
4106
4107 struct in_ifaddr *ifa = (struct in_ifaddr *)ptr;
4108
4109 dev = ifa->ifa_dev ? ifa->ifa_dev->dev : NULL;
4110
4111recheck:
aec1e845 4112 if (dev == NULL)
af19b491
AKS
4113 goto done;
4114
4115 if (dev->priv_flags & IFF_802_1Q_VLAN) {
4116 dev = vlan_dev_real_dev(dev);
4117 goto recheck;
4118 }
4119
4120 if (!is_qlcnic_netdev(dev))
4121 goto done;
4122
4123 adapter = netdev_priv(dev);
4124
251a84c9 4125 if (!adapter)
af19b491
AKS
4126 goto done;
4127
8a15ad1f 4128 if (!test_bit(__QLCNIC_DEV_UP, &adapter->state))
af19b491
AKS
4129 goto done;
4130
4131 switch (event) {
4132 case NETDEV_UP:
4133 qlcnic_config_ipaddr(adapter, ifa->ifa_address, QLCNIC_IP_UP);
4134 break;
4135 case NETDEV_DOWN:
4136 qlcnic_config_ipaddr(adapter, ifa->ifa_address, QLCNIC_IP_DOWN);
4137 break;
4138 default:
4139 break;
4140 }
4141
4142done:
4143 return NOTIFY_DONE;
4144}
4145
4146static struct notifier_block qlcnic_netdev_cb = {
4147 .notifier_call = qlcnic_netdev_event,
4148};
4149
4150static struct notifier_block qlcnic_inetaddr_cb = {
4151 .notifier_call = qlcnic_inetaddr_event,
4152};
4153#else
4154static void
aec1e845 4155qlcnic_restore_indev_addr(struct net_device *dev, unsigned long event)
af19b491
AKS
4156{ }
4157#endif
451724c8
SC
4158static struct pci_error_handlers qlcnic_err_handler = {
4159 .error_detected = qlcnic_io_error_detected,
4160 .slot_reset = qlcnic_io_slot_reset,
4161 .resume = qlcnic_io_resume,
4162};
af19b491
AKS
4163
4164static struct pci_driver qlcnic_driver = {
4165 .name = qlcnic_driver_name,
4166 .id_table = qlcnic_pci_tbl,
4167 .probe = qlcnic_probe,
4168 .remove = __devexit_p(qlcnic_remove),
4169#ifdef CONFIG_PM
4170 .suspend = qlcnic_suspend,
4171 .resume = qlcnic_resume,
4172#endif
451724c8
SC
4173 .shutdown = qlcnic_shutdown,
4174 .err_handler = &qlcnic_err_handler
4175
af19b491
AKS
4176};
4177
4178static int __init qlcnic_init_module(void)
4179{
0cf3a14c 4180 int ret;
af19b491
AKS
4181
4182 printk(KERN_INFO "%s\n", qlcnic_driver_string);
4183
f7ec804a
AKS
4184 qlcnic_wq = create_singlethread_workqueue("qlcnic");
4185 if (qlcnic_wq == NULL) {
4186 printk(KERN_ERR "qlcnic: cannot create workqueue\n");
4187 return -ENOMEM;
4188 }
4189
af19b491
AKS
4190#ifdef CONFIG_INET
4191 register_netdevice_notifier(&qlcnic_netdev_cb);
4192 register_inetaddr_notifier(&qlcnic_inetaddr_cb);
4193#endif
4194
0cf3a14c
AKS
4195 ret = pci_register_driver(&qlcnic_driver);
4196 if (ret) {
4197#ifdef CONFIG_INET
4198 unregister_inetaddr_notifier(&qlcnic_inetaddr_cb);
4199 unregister_netdevice_notifier(&qlcnic_netdev_cb);
4200#endif
f7ec804a 4201 destroy_workqueue(qlcnic_wq);
0cf3a14c 4202 }
af19b491 4203
0cf3a14c 4204 return ret;
af19b491
AKS
4205}
4206
4207module_init(qlcnic_init_module);
4208
4209static void __exit qlcnic_exit_module(void)
4210{
4211
4212 pci_unregister_driver(&qlcnic_driver);
4213
4214#ifdef CONFIG_INET
4215 unregister_inetaddr_notifier(&qlcnic_inetaddr_cb);
4216 unregister_netdevice_notifier(&qlcnic_netdev_cb);
4217#endif
f7ec804a 4218 destroy_workqueue(qlcnic_wq);
af19b491
AKS
4219}
4220
4221module_exit(qlcnic_exit_module);