gianfar: Fix TX ring processing on SMP machines
[linux-2.6-block.git] / drivers / net / cxgb3 / cxgb3_main.c
CommitLineData
4d22de3e 1/*
a02d44a0 2 * Copyright (c) 2003-2008 Chelsio, Inc. All rights reserved.
4d22de3e 3 *
1d68e93d
DLR
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
4d22de3e 9 *
1d68e93d
DLR
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
4d22de3e 31 */
4d22de3e
DLR
32#include <linux/module.h>
33#include <linux/moduleparam.h>
34#include <linux/init.h>
35#include <linux/pci.h>
36#include <linux/dma-mapping.h>
37#include <linux/netdevice.h>
38#include <linux/etherdevice.h>
39#include <linux/if_vlan.h>
0f07c4ee 40#include <linux/mdio.h>
4d22de3e
DLR
41#include <linux/sockios.h>
42#include <linux/workqueue.h>
43#include <linux/proc_fs.h>
44#include <linux/rtnetlink.h>
2e283962 45#include <linux/firmware.h>
d9da466a 46#include <linux/log2.h>
34336ec0 47#include <linux/stringify.h>
4d22de3e
DLR
48#include <asm/uaccess.h>
49
50#include "common.h"
51#include "cxgb3_ioctl.h"
52#include "regs.h"
53#include "cxgb3_offload.h"
54#include "version.h"
55
56#include "cxgb3_ctl_defs.h"
57#include "t3_cpl.h"
58#include "firmware_exports.h"
59
60enum {
61 MAX_TXQ_ENTRIES = 16384,
62 MAX_CTRL_TXQ_ENTRIES = 1024,
63 MAX_RSPQ_ENTRIES = 16384,
64 MAX_RX_BUFFERS = 16384,
65 MAX_RX_JUMBO_BUFFERS = 16384,
66 MIN_TXQ_ENTRIES = 4,
67 MIN_CTRL_TXQ_ENTRIES = 4,
68 MIN_RSPQ_ENTRIES = 32,
69 MIN_FL_ENTRIES = 32
70};
71
72#define PORT_MASK ((1 << MAX_NPORTS) - 1)
73
74#define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
75 NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
76 NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
77
78#define EEPROM_MAGIC 0x38E2F10C
79
678771d6
DLR
80#define CH_DEVICE(devid, idx) \
81 { PCI_VENDOR_ID_CHELSIO, devid, PCI_ANY_ID, PCI_ANY_ID, 0, 0, idx }
4d22de3e 82
a3aa1884 83static DEFINE_PCI_DEVICE_TABLE(cxgb3_pci_tbl) = {
678771d6
DLR
84 CH_DEVICE(0x20, 0), /* PE9000 */
85 CH_DEVICE(0x21, 1), /* T302E */
86 CH_DEVICE(0x22, 2), /* T310E */
87 CH_DEVICE(0x23, 3), /* T320X */
88 CH_DEVICE(0x24, 1), /* T302X */
89 CH_DEVICE(0x25, 3), /* T320E */
90 CH_DEVICE(0x26, 2), /* T310X */
91 CH_DEVICE(0x30, 2), /* T3B10 */
92 CH_DEVICE(0x31, 3), /* T3B20 */
93 CH_DEVICE(0x32, 1), /* T3B02 */
ce03aadd 94 CH_DEVICE(0x35, 6), /* T3C20-derived T3C10 */
74451424
DLR
95 CH_DEVICE(0x36, 3), /* S320E-CR */
96 CH_DEVICE(0x37, 7), /* N320E-G2 */
4d22de3e
DLR
97 {0,}
98};
99
100MODULE_DESCRIPTION(DRV_DESC);
101MODULE_AUTHOR("Chelsio Communications");
1d68e93d 102MODULE_LICENSE("Dual BSD/GPL");
4d22de3e
DLR
103MODULE_VERSION(DRV_VERSION);
104MODULE_DEVICE_TABLE(pci, cxgb3_pci_tbl);
105
106static int dflt_msg_enable = DFLT_MSG_ENABLE;
107
108module_param(dflt_msg_enable, int, 0644);
109MODULE_PARM_DESC(dflt_msg_enable, "Chelsio T3 default message enable bitmap");
110
111/*
112 * The driver uses the best interrupt scheme available on a platform in the
113 * order MSI-X, MSI, legacy pin interrupts. This parameter determines which
114 * of these schemes the driver may consider as follows:
115 *
116 * msi = 2: choose from among all three options
117 * msi = 1: only consider MSI and pin interrupts
118 * msi = 0: force pin interrupts
119 */
120static int msi = 2;
121
122module_param(msi, int, 0644);
123MODULE_PARM_DESC(msi, "whether to use MSI or MSI-X");
124
125/*
126 * The driver enables offload as a default.
127 * To disable it, use ofld_disable = 1.
128 */
129
130static int ofld_disable = 0;
131
132module_param(ofld_disable, int, 0644);
133MODULE_PARM_DESC(ofld_disable, "whether to enable offload at init time or not");
134
135/*
136 * We have work elements that we need to cancel when an interface is taken
137 * down. Normally the work elements would be executed by keventd but that
138 * can deadlock because of linkwatch. If our close method takes the rtnl
139 * lock and linkwatch is ahead of our work elements in keventd, linkwatch
140 * will block keventd as it needs the rtnl lock, and we'll deadlock waiting
141 * for our work to complete. Get our own work queue to solve this.
142 */
143static struct workqueue_struct *cxgb3_wq;
144
145/**
146 * link_report - show link status and link speed/duplex
147 * @p: the port whose settings are to be reported
148 *
149 * Shows the link status, speed, and duplex of a port.
150 */
151static void link_report(struct net_device *dev)
152{
153 if (!netif_carrier_ok(dev))
154 printk(KERN_INFO "%s: link down\n", dev->name);
155 else {
156 const char *s = "10Mbps";
157 const struct port_info *p = netdev_priv(dev);
158
159 switch (p->link_config.speed) {
160 case SPEED_10000:
161 s = "10Gbps";
162 break;
163 case SPEED_1000:
164 s = "1000Mbps";
165 break;
166 case SPEED_100:
167 s = "100Mbps";
168 break;
169 }
170
171 printk(KERN_INFO "%s: link up, %s, %s-duplex\n", dev->name, s,
172 p->link_config.duplex == DUPLEX_FULL ? "full" : "half");
173 }
174}
175
34701fde
DLR
176static void enable_tx_fifo_drain(struct adapter *adapter,
177 struct port_info *pi)
178{
179 t3_set_reg_field(adapter, A_XGM_TXFIFO_CFG + pi->mac.offset, 0,
180 F_ENDROPPKT);
181 t3_write_reg(adapter, A_XGM_RX_CTRL + pi->mac.offset, 0);
182 t3_write_reg(adapter, A_XGM_TX_CTRL + pi->mac.offset, F_TXEN);
183 t3_write_reg(adapter, A_XGM_RX_CTRL + pi->mac.offset, F_RXEN);
184}
185
186static void disable_tx_fifo_drain(struct adapter *adapter,
187 struct port_info *pi)
188{
189 t3_set_reg_field(adapter, A_XGM_TXFIFO_CFG + pi->mac.offset,
190 F_ENDROPPKT, 0);
191}
192
bf792094
DLR
193void t3_os_link_fault(struct adapter *adap, int port_id, int state)
194{
195 struct net_device *dev = adap->port[port_id];
196 struct port_info *pi = netdev_priv(dev);
197
198 if (state == netif_carrier_ok(dev))
199 return;
200
201 if (state) {
202 struct cmac *mac = &pi->mac;
203
204 netif_carrier_on(dev);
205
34701fde
DLR
206 disable_tx_fifo_drain(adap, pi);
207
bf792094
DLR
208 /* Clear local faults */
209 t3_xgm_intr_disable(adap, pi->port_id);
210 t3_read_reg(adap, A_XGM_INT_STATUS +
211 pi->mac.offset);
212 t3_write_reg(adap,
213 A_XGM_INT_CAUSE + pi->mac.offset,
214 F_XGM_INT);
215
216 t3_set_reg_field(adap,
217 A_XGM_INT_ENABLE +
218 pi->mac.offset,
219 F_XGM_INT, F_XGM_INT);
220 t3_xgm_intr_enable(adap, pi->port_id);
221
222 t3_mac_enable(mac, MAC_DIRECTION_TX);
34701fde 223 } else {
bf792094
DLR
224 netif_carrier_off(dev);
225
34701fde
DLR
226 /* Flush TX FIFO */
227 enable_tx_fifo_drain(adap, pi);
228 }
bf792094
DLR
229 link_report(dev);
230}
231
4d22de3e
DLR
232/**
233 * t3_os_link_changed - handle link status changes
234 * @adapter: the adapter associated with the link change
235 * @port_id: the port index whose limk status has changed
236 * @link_stat: the new status of the link
237 * @speed: the new speed setting
238 * @duplex: the new duplex setting
239 * @pause: the new flow-control setting
240 *
241 * This is the OS-dependent handler for link status changes. The OS
242 * neutral handler takes care of most of the processing for these events,
243 * then calls this handler for any OS-specific processing.
244 */
245void t3_os_link_changed(struct adapter *adapter, int port_id, int link_stat,
246 int speed, int duplex, int pause)
247{
248 struct net_device *dev = adapter->port[port_id];
6d6dabac
DLR
249 struct port_info *pi = netdev_priv(dev);
250 struct cmac *mac = &pi->mac;
4d22de3e
DLR
251
252 /* Skip changes from disabled ports. */
253 if (!netif_running(dev))
254 return;
255
256 if (link_stat != netif_carrier_ok(dev)) {
6d6dabac 257 if (link_stat) {
34701fde
DLR
258 disable_tx_fifo_drain(adapter, pi);
259
59cf8107 260 t3_mac_enable(mac, MAC_DIRECTION_RX);
bf792094
DLR
261
262 /* Clear local faults */
263 t3_xgm_intr_disable(adapter, pi->port_id);
264 t3_read_reg(adapter, A_XGM_INT_STATUS +
265 pi->mac.offset);
266 t3_write_reg(adapter,
267 A_XGM_INT_CAUSE + pi->mac.offset,
268 F_XGM_INT);
269
270 t3_set_reg_field(adapter,
271 A_XGM_INT_ENABLE + pi->mac.offset,
272 F_XGM_INT, F_XGM_INT);
273 t3_xgm_intr_enable(adapter, pi->port_id);
274
4d22de3e 275 netif_carrier_on(dev);
6d6dabac 276 } else {
4d22de3e 277 netif_carrier_off(dev);
bf792094
DLR
278
279 t3_xgm_intr_disable(adapter, pi->port_id);
280 t3_read_reg(adapter, A_XGM_INT_STATUS + pi->mac.offset);
281 t3_set_reg_field(adapter,
282 A_XGM_INT_ENABLE + pi->mac.offset,
283 F_XGM_INT, 0);
284
285 if (is_10G(adapter))
286 pi->phy.ops->power_down(&pi->phy, 1);
287
288 t3_read_reg(adapter, A_XGM_INT_STATUS + pi->mac.offset);
59cf8107
DLR
289 t3_mac_disable(mac, MAC_DIRECTION_RX);
290 t3_link_start(&pi->phy, mac, &pi->link_config);
34701fde
DLR
291
292 /* Flush TX FIFO */
293 enable_tx_fifo_drain(adapter, pi);
6d6dabac
DLR
294 }
295
4d22de3e
DLR
296 link_report(dev);
297 }
298}
299
1e882025
DLR
300/**
301 * t3_os_phymod_changed - handle PHY module changes
302 * @phy: the PHY reporting the module change
303 * @mod_type: new module type
304 *
305 * This is the OS-dependent handler for PHY module changes. It is
306 * invoked when a PHY module is removed or inserted for any OS-specific
307 * processing.
308 */
309void t3_os_phymod_changed(struct adapter *adap, int port_id)
310{
311 static const char *mod_str[] = {
312 NULL, "SR", "LR", "LRM", "TWINAX", "TWINAX", "unknown"
313 };
314
315 const struct net_device *dev = adap->port[port_id];
316 const struct port_info *pi = netdev_priv(dev);
317
318 if (pi->phy.modtype == phy_modtype_none)
319 printk(KERN_INFO "%s: PHY module unplugged\n", dev->name);
320 else
321 printk(KERN_INFO "%s: %s PHY module inserted\n", dev->name,
322 mod_str[pi->phy.modtype]);
323}
324
4d22de3e
DLR
325static void cxgb_set_rxmode(struct net_device *dev)
326{
4d22de3e
DLR
327 struct port_info *pi = netdev_priv(dev);
328
0988d269 329 t3_mac_set_rx_mode(&pi->mac, dev);
4d22de3e
DLR
330}
331
332/**
333 * link_start - enable a port
334 * @dev: the device to enable
335 *
336 * Performs the MAC and PHY actions needed to enable a port.
337 */
338static void link_start(struct net_device *dev)
339{
4d22de3e
DLR
340 struct port_info *pi = netdev_priv(dev);
341 struct cmac *mac = &pi->mac;
342
4d22de3e 343 t3_mac_reset(mac);
f14d42f3 344 t3_mac_set_num_ucast(mac, MAX_MAC_IDX);
4d22de3e 345 t3_mac_set_mtu(mac, dev->mtu);
f14d42f3
KX
346 t3_mac_set_address(mac, LAN_MAC_IDX, dev->dev_addr);
347 t3_mac_set_address(mac, SAN_MAC_IDX, pi->iscsic.mac_addr);
0988d269 348 t3_mac_set_rx_mode(mac, dev);
4d22de3e
DLR
349 t3_link_start(&pi->phy, mac, &pi->link_config);
350 t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
351}
352
353static inline void cxgb_disable_msi(struct adapter *adapter)
354{
355 if (adapter->flags & USING_MSIX) {
356 pci_disable_msix(adapter->pdev);
357 adapter->flags &= ~USING_MSIX;
358 } else if (adapter->flags & USING_MSI) {
359 pci_disable_msi(adapter->pdev);
360 adapter->flags &= ~USING_MSI;
361 }
362}
363
364/*
365 * Interrupt handler for asynchronous events used with MSI-X.
366 */
367static irqreturn_t t3_async_intr_handler(int irq, void *cookie)
368{
369 t3_slow_intr_handler(cookie);
370 return IRQ_HANDLED;
371}
372
373/*
374 * Name the MSI-X interrupts.
375 */
376static void name_msix_vecs(struct adapter *adap)
377{
378 int i, j, msi_idx = 1, n = sizeof(adap->msix_info[0].desc) - 1;
379
380 snprintf(adap->msix_info[0].desc, n, "%s", adap->name);
381 adap->msix_info[0].desc[n] = 0;
382
383 for_each_port(adap, j) {
384 struct net_device *d = adap->port[j];
385 const struct port_info *pi = netdev_priv(d);
386
387 for (i = 0; i < pi->nqsets; i++, msi_idx++) {
388 snprintf(adap->msix_info[msi_idx].desc, n,
8c263761 389 "%s-%d", d->name, pi->first_qset + i);
4d22de3e
DLR
390 adap->msix_info[msi_idx].desc[n] = 0;
391 }
8c263761 392 }
4d22de3e
DLR
393}
394
395static int request_msix_data_irqs(struct adapter *adap)
396{
397 int i, j, err, qidx = 0;
398
399 for_each_port(adap, i) {
400 int nqsets = adap2pinfo(adap, i)->nqsets;
401
402 for (j = 0; j < nqsets; ++j) {
403 err = request_irq(adap->msix_info[qidx + 1].vec,
404 t3_intr_handler(adap,
405 adap->sge.qs[qidx].
406 rspq.polling), 0,
407 adap->msix_info[qidx + 1].desc,
408 &adap->sge.qs[qidx]);
409 if (err) {
410 while (--qidx >= 0)
411 free_irq(adap->msix_info[qidx + 1].vec,
412 &adap->sge.qs[qidx]);
413 return err;
414 }
415 qidx++;
416 }
417 }
418 return 0;
419}
420
8c263761
DLR
421static void free_irq_resources(struct adapter *adapter)
422{
423 if (adapter->flags & USING_MSIX) {
424 int i, n = 0;
425
426 free_irq(adapter->msix_info[0].vec, adapter);
427 for_each_port(adapter, i)
5cda9364 428 n += adap2pinfo(adapter, i)->nqsets;
8c263761
DLR
429
430 for (i = 0; i < n; ++i)
431 free_irq(adapter->msix_info[i + 1].vec,
432 &adapter->sge.qs[i]);
433 } else
434 free_irq(adapter->pdev->irq, adapter);
435}
436
b881955b
DLR
437static int await_mgmt_replies(struct adapter *adap, unsigned long init_cnt,
438 unsigned long n)
439{
440 int attempts = 5;
441
442 while (adap->sge.qs[0].rspq.offload_pkts < init_cnt + n) {
443 if (!--attempts)
444 return -ETIMEDOUT;
445 msleep(10);
446 }
447 return 0;
448}
449
450static int init_tp_parity(struct adapter *adap)
451{
452 int i;
453 struct sk_buff *skb;
454 struct cpl_set_tcb_field *greq;
455 unsigned long cnt = adap->sge.qs[0].rspq.offload_pkts;
456
457 t3_tp_set_offload_mode(adap, 1);
458
459 for (i = 0; i < 16; i++) {
460 struct cpl_smt_write_req *req;
461
74b793e1
DLR
462 skb = alloc_skb(sizeof(*req), GFP_KERNEL);
463 if (!skb)
464 skb = adap->nofail_skb;
465 if (!skb)
466 goto alloc_skb_fail;
467
b881955b
DLR
468 req = (struct cpl_smt_write_req *)__skb_put(skb, sizeof(*req));
469 memset(req, 0, sizeof(*req));
470 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
471 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SMT_WRITE_REQ, i));
dce7d1d0 472 req->mtu_idx = NMTUS - 1;
b881955b
DLR
473 req->iff = i;
474 t3_mgmt_tx(adap, skb);
74b793e1
DLR
475 if (skb == adap->nofail_skb) {
476 await_mgmt_replies(adap, cnt, i + 1);
477 adap->nofail_skb = alloc_skb(sizeof(*greq), GFP_KERNEL);
478 if (!adap->nofail_skb)
479 goto alloc_skb_fail;
480 }
b881955b
DLR
481 }
482
483 for (i = 0; i < 2048; i++) {
484 struct cpl_l2t_write_req *req;
485
74b793e1
DLR
486 skb = alloc_skb(sizeof(*req), GFP_KERNEL);
487 if (!skb)
488 skb = adap->nofail_skb;
489 if (!skb)
490 goto alloc_skb_fail;
491
b881955b
DLR
492 req = (struct cpl_l2t_write_req *)__skb_put(skb, sizeof(*req));
493 memset(req, 0, sizeof(*req));
494 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
495 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_L2T_WRITE_REQ, i));
496 req->params = htonl(V_L2T_W_IDX(i));
497 t3_mgmt_tx(adap, skb);
74b793e1
DLR
498 if (skb == adap->nofail_skb) {
499 await_mgmt_replies(adap, cnt, 16 + i + 1);
500 adap->nofail_skb = alloc_skb(sizeof(*greq), GFP_KERNEL);
501 if (!adap->nofail_skb)
502 goto alloc_skb_fail;
503 }
b881955b
DLR
504 }
505
506 for (i = 0; i < 2048; i++) {
507 struct cpl_rte_write_req *req;
508
74b793e1
DLR
509 skb = alloc_skb(sizeof(*req), GFP_KERNEL);
510 if (!skb)
511 skb = adap->nofail_skb;
512 if (!skb)
513 goto alloc_skb_fail;
514
b881955b
DLR
515 req = (struct cpl_rte_write_req *)__skb_put(skb, sizeof(*req));
516 memset(req, 0, sizeof(*req));
517 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
518 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_RTE_WRITE_REQ, i));
519 req->l2t_idx = htonl(V_L2T_W_IDX(i));
520 t3_mgmt_tx(adap, skb);
74b793e1
DLR
521 if (skb == adap->nofail_skb) {
522 await_mgmt_replies(adap, cnt, 16 + 2048 + i + 1);
523 adap->nofail_skb = alloc_skb(sizeof(*greq), GFP_KERNEL);
524 if (!adap->nofail_skb)
525 goto alloc_skb_fail;
526 }
b881955b
DLR
527 }
528
74b793e1
DLR
529 skb = alloc_skb(sizeof(*greq), GFP_KERNEL);
530 if (!skb)
531 skb = adap->nofail_skb;
532 if (!skb)
533 goto alloc_skb_fail;
534
b881955b
DLR
535 greq = (struct cpl_set_tcb_field *)__skb_put(skb, sizeof(*greq));
536 memset(greq, 0, sizeof(*greq));
537 greq->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
538 OPCODE_TID(greq) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, 0));
539 greq->mask = cpu_to_be64(1);
540 t3_mgmt_tx(adap, skb);
541
542 i = await_mgmt_replies(adap, cnt, 16 + 2048 + 2048 + 1);
74b793e1
DLR
543 if (skb == adap->nofail_skb) {
544 i = await_mgmt_replies(adap, cnt, 16 + 2048 + 2048 + 1);
545 adap->nofail_skb = alloc_skb(sizeof(*greq), GFP_KERNEL);
546 }
547
b881955b
DLR
548 t3_tp_set_offload_mode(adap, 0);
549 return i;
74b793e1
DLR
550
551alloc_skb_fail:
552 t3_tp_set_offload_mode(adap, 0);
553 return -ENOMEM;
b881955b
DLR
554}
555
4d22de3e
DLR
556/**
557 * setup_rss - configure RSS
558 * @adap: the adapter
559 *
560 * Sets up RSS to distribute packets to multiple receive queues. We
561 * configure the RSS CPU lookup table to distribute to the number of HW
562 * receive queues, and the response queue lookup table to narrow that
563 * down to the response queues actually configured for each port.
564 * We always configure the RSS mapping for two ports since the mapping
565 * table has plenty of entries.
566 */
567static void setup_rss(struct adapter *adap)
568{
569 int i;
570 unsigned int nq0 = adap2pinfo(adap, 0)->nqsets;
571 unsigned int nq1 = adap->port[1] ? adap2pinfo(adap, 1)->nqsets : 1;
572 u8 cpus[SGE_QSETS + 1];
573 u16 rspq_map[RSS_TABLE_SIZE];
574
575 for (i = 0; i < SGE_QSETS; ++i)
576 cpus[i] = i;
577 cpus[SGE_QSETS] = 0xff; /* terminator */
578
579 for (i = 0; i < RSS_TABLE_SIZE / 2; ++i) {
580 rspq_map[i] = i % nq0;
581 rspq_map[i + RSS_TABLE_SIZE / 2] = (i % nq1) + nq0;
582 }
583
584 t3_config_rss(adap, F_RQFEEDBACKENABLE | F_TNLLKPEN | F_TNLMAPEN |
585 F_TNLPRTEN | F_TNL2TUPEN | F_TNL4TUPEN |
a2604be5 586 V_RRCPLCPUSIZE(6) | F_HASHTOEPLITZ, cpus, rspq_map);
4d22de3e
DLR
587}
588
bea3348e 589static void init_napi(struct adapter *adap)
4d22de3e 590{
bea3348e 591 int i;
4d22de3e 592
bea3348e
SH
593 for (i = 0; i < SGE_QSETS; i++) {
594 struct sge_qset *qs = &adap->sge.qs[i];
4d22de3e 595
bea3348e
SH
596 if (qs->adap)
597 netif_napi_add(qs->netdev, &qs->napi, qs->napi.poll,
598 64);
4d22de3e 599 }
48c4b6db
DLR
600
601 /*
602 * netif_napi_add() can be called only once per napi_struct because it
603 * adds each new napi_struct to a list. Be careful not to call it a
604 * second time, e.g., during EEH recovery, by making a note of it.
605 */
606 adap->flags |= NAPI_INIT;
4d22de3e
DLR
607}
608
609/*
610 * Wait until all NAPI handlers are descheduled. This includes the handlers of
611 * both netdevices representing interfaces and the dummy ones for the extra
612 * queues.
613 */
614static void quiesce_rx(struct adapter *adap)
615{
616 int i;
4d22de3e 617
bea3348e
SH
618 for (i = 0; i < SGE_QSETS; i++)
619 if (adap->sge.qs[i].adap)
620 napi_disable(&adap->sge.qs[i].napi);
621}
4d22de3e 622
bea3348e
SH
623static void enable_all_napi(struct adapter *adap)
624{
625 int i;
626 for (i = 0; i < SGE_QSETS; i++)
627 if (adap->sge.qs[i].adap)
628 napi_enable(&adap->sge.qs[i].napi);
4d22de3e
DLR
629}
630
04ecb072
DLR
631/**
632 * set_qset_lro - Turn a queue set's LRO capability on and off
633 * @dev: the device the qset is attached to
634 * @qset_idx: the queue set index
635 * @val: the LRO switch
636 *
637 * Sets LRO on or off for a particular queue set.
638 * the device's features flag is updated to reflect the LRO
639 * capability when all queues belonging to the device are
640 * in the same state.
641 */
642static void set_qset_lro(struct net_device *dev, int qset_idx, int val)
643{
644 struct port_info *pi = netdev_priv(dev);
645 struct adapter *adapter = pi->adapter;
04ecb072
DLR
646
647 adapter->params.sge.qset[qset_idx].lro = !!val;
648 adapter->sge.qs[qset_idx].lro_enabled = !!val;
04ecb072
DLR
649}
650
4d22de3e
DLR
651/**
652 * setup_sge_qsets - configure SGE Tx/Rx/response queues
653 * @adap: the adapter
654 *
655 * Determines how many sets of SGE queues to use and initializes them.
656 * We support multiple queue sets per port if we have MSI-X, otherwise
657 * just one queue set per port.
658 */
659static int setup_sge_qsets(struct adapter *adap)
660{
bea3348e 661 int i, j, err, irq_idx = 0, qset_idx = 0;
8ac3ba68 662 unsigned int ntxq = SGE_TXQ_PER_SET;
4d22de3e
DLR
663
664 if (adap->params.rev > 0 && !(adap->flags & USING_MSI))
665 irq_idx = -1;
666
667 for_each_port(adap, i) {
668 struct net_device *dev = adap->port[i];
bea3348e 669 struct port_info *pi = netdev_priv(dev);
4d22de3e 670
bea3348e 671 pi->qs = &adap->sge.qs[pi->first_qset];
e594e96e 672 for (j = 0; j < pi->nqsets; ++j, ++qset_idx) {
47fd23fe 673 set_qset_lro(dev, qset_idx, pi->rx_offload & T3_LRO);
4d22de3e
DLR
674 err = t3_sge_alloc_qset(adap, qset_idx, 1,
675 (adap->flags & USING_MSIX) ? qset_idx + 1 :
676 irq_idx,
82ad3329
DLR
677 &adap->params.sge.qset[qset_idx], ntxq, dev,
678 netdev_get_tx_queue(dev, j));
4d22de3e
DLR
679 if (err) {
680 t3_free_sge_resources(adap);
681 return err;
682 }
683 }
684 }
685
686 return 0;
687}
688
3e5192ee 689static ssize_t attr_show(struct device *d, char *buf,
896392ef 690 ssize_t(*format) (struct net_device *, char *))
4d22de3e
DLR
691{
692 ssize_t len;
4d22de3e
DLR
693
694 /* Synchronize with ioctls that may shut down the device */
695 rtnl_lock();
896392ef 696 len = (*format) (to_net_dev(d), buf);
4d22de3e
DLR
697 rtnl_unlock();
698 return len;
699}
700
3e5192ee 701static ssize_t attr_store(struct device *d,
0ee8d33c 702 const char *buf, size_t len,
896392ef 703 ssize_t(*set) (struct net_device *, unsigned int),
4d22de3e
DLR
704 unsigned int min_val, unsigned int max_val)
705{
706 char *endp;
707 ssize_t ret;
708 unsigned int val;
4d22de3e
DLR
709
710 if (!capable(CAP_NET_ADMIN))
711 return -EPERM;
712
713 val = simple_strtoul(buf, &endp, 0);
714 if (endp == buf || val < min_val || val > max_val)
715 return -EINVAL;
716
717 rtnl_lock();
896392ef 718 ret = (*set) (to_net_dev(d), val);
4d22de3e
DLR
719 if (!ret)
720 ret = len;
721 rtnl_unlock();
722 return ret;
723}
724
725#define CXGB3_SHOW(name, val_expr) \
896392ef 726static ssize_t format_##name(struct net_device *dev, char *buf) \
4d22de3e 727{ \
5fbf816f
DLR
728 struct port_info *pi = netdev_priv(dev); \
729 struct adapter *adap = pi->adapter; \
4d22de3e
DLR
730 return sprintf(buf, "%u\n", val_expr); \
731} \
0ee8d33c
DLR
732static ssize_t show_##name(struct device *d, struct device_attribute *attr, \
733 char *buf) \
4d22de3e 734{ \
3e5192ee 735 return attr_show(d, buf, format_##name); \
4d22de3e
DLR
736}
737
896392ef 738static ssize_t set_nfilters(struct net_device *dev, unsigned int val)
4d22de3e 739{
5fbf816f
DLR
740 struct port_info *pi = netdev_priv(dev);
741 struct adapter *adap = pi->adapter;
9f238486 742 int min_tids = is_offload(adap) ? MC5_MIN_TIDS : 0;
896392ef 743
4d22de3e
DLR
744 if (adap->flags & FULL_INIT_DONE)
745 return -EBUSY;
746 if (val && adap->params.rev == 0)
747 return -EINVAL;
9f238486
DLR
748 if (val > t3_mc5_size(&adap->mc5) - adap->params.mc5.nservers -
749 min_tids)
4d22de3e
DLR
750 return -EINVAL;
751 adap->params.mc5.nfilters = val;
752 return 0;
753}
754
0ee8d33c
DLR
755static ssize_t store_nfilters(struct device *d, struct device_attribute *attr,
756 const char *buf, size_t len)
4d22de3e 757{
3e5192ee 758 return attr_store(d, buf, len, set_nfilters, 0, ~0);
4d22de3e
DLR
759}
760
896392ef 761static ssize_t set_nservers(struct net_device *dev, unsigned int val)
4d22de3e 762{
5fbf816f
DLR
763 struct port_info *pi = netdev_priv(dev);
764 struct adapter *adap = pi->adapter;
896392ef 765
4d22de3e
DLR
766 if (adap->flags & FULL_INIT_DONE)
767 return -EBUSY;
9f238486
DLR
768 if (val > t3_mc5_size(&adap->mc5) - adap->params.mc5.nfilters -
769 MC5_MIN_TIDS)
4d22de3e
DLR
770 return -EINVAL;
771 adap->params.mc5.nservers = val;
772 return 0;
773}
774
0ee8d33c
DLR
775static ssize_t store_nservers(struct device *d, struct device_attribute *attr,
776 const char *buf, size_t len)
4d22de3e 777{
3e5192ee 778 return attr_store(d, buf, len, set_nservers, 0, ~0);
4d22de3e
DLR
779}
780
781#define CXGB3_ATTR_R(name, val_expr) \
782CXGB3_SHOW(name, val_expr) \
0ee8d33c 783static DEVICE_ATTR(name, S_IRUGO, show_##name, NULL)
4d22de3e
DLR
784
785#define CXGB3_ATTR_RW(name, val_expr, store_method) \
786CXGB3_SHOW(name, val_expr) \
0ee8d33c 787static DEVICE_ATTR(name, S_IRUGO | S_IWUSR, show_##name, store_method)
4d22de3e
DLR
788
789CXGB3_ATTR_R(cam_size, t3_mc5_size(&adap->mc5));
790CXGB3_ATTR_RW(nfilters, adap->params.mc5.nfilters, store_nfilters);
791CXGB3_ATTR_RW(nservers, adap->params.mc5.nservers, store_nservers);
792
793static struct attribute *cxgb3_attrs[] = {
0ee8d33c
DLR
794 &dev_attr_cam_size.attr,
795 &dev_attr_nfilters.attr,
796 &dev_attr_nservers.attr,
4d22de3e
DLR
797 NULL
798};
799
800static struct attribute_group cxgb3_attr_group = {.attrs = cxgb3_attrs };
801
3e5192ee 802static ssize_t tm_attr_show(struct device *d,
0ee8d33c 803 char *buf, int sched)
4d22de3e 804{
5fbf816f
DLR
805 struct port_info *pi = netdev_priv(to_net_dev(d));
806 struct adapter *adap = pi->adapter;
4d22de3e 807 unsigned int v, addr, bpt, cpt;
5fbf816f 808 ssize_t len;
4d22de3e
DLR
809
810 addr = A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2;
811 rtnl_lock();
812 t3_write_reg(adap, A_TP_TM_PIO_ADDR, addr);
813 v = t3_read_reg(adap, A_TP_TM_PIO_DATA);
814 if (sched & 1)
815 v >>= 16;
816 bpt = (v >> 8) & 0xff;
817 cpt = v & 0xff;
818 if (!cpt)
819 len = sprintf(buf, "disabled\n");
820 else {
821 v = (adap->params.vpd.cclk * 1000) / cpt;
822 len = sprintf(buf, "%u Kbps\n", (v * bpt) / 125);
823 }
824 rtnl_unlock();
825 return len;
826}
827
3e5192ee 828static ssize_t tm_attr_store(struct device *d,
0ee8d33c 829 const char *buf, size_t len, int sched)
4d22de3e 830{
5fbf816f
DLR
831 struct port_info *pi = netdev_priv(to_net_dev(d));
832 struct adapter *adap = pi->adapter;
833 unsigned int val;
4d22de3e
DLR
834 char *endp;
835 ssize_t ret;
4d22de3e
DLR
836
837 if (!capable(CAP_NET_ADMIN))
838 return -EPERM;
839
840 val = simple_strtoul(buf, &endp, 0);
841 if (endp == buf || val > 10000000)
842 return -EINVAL;
843
844 rtnl_lock();
845 ret = t3_config_sched(adap, val, sched);
846 if (!ret)
847 ret = len;
848 rtnl_unlock();
849 return ret;
850}
851
852#define TM_ATTR(name, sched) \
0ee8d33c
DLR
853static ssize_t show_##name(struct device *d, struct device_attribute *attr, \
854 char *buf) \
4d22de3e 855{ \
3e5192ee 856 return tm_attr_show(d, buf, sched); \
4d22de3e 857} \
0ee8d33c
DLR
858static ssize_t store_##name(struct device *d, struct device_attribute *attr, \
859 const char *buf, size_t len) \
4d22de3e 860{ \
3e5192ee 861 return tm_attr_store(d, buf, len, sched); \
4d22de3e 862} \
0ee8d33c 863static DEVICE_ATTR(name, S_IRUGO | S_IWUSR, show_##name, store_##name)
4d22de3e
DLR
864
865TM_ATTR(sched0, 0);
866TM_ATTR(sched1, 1);
867TM_ATTR(sched2, 2);
868TM_ATTR(sched3, 3);
869TM_ATTR(sched4, 4);
870TM_ATTR(sched5, 5);
871TM_ATTR(sched6, 6);
872TM_ATTR(sched7, 7);
873
874static struct attribute *offload_attrs[] = {
0ee8d33c
DLR
875 &dev_attr_sched0.attr,
876 &dev_attr_sched1.attr,
877 &dev_attr_sched2.attr,
878 &dev_attr_sched3.attr,
879 &dev_attr_sched4.attr,
880 &dev_attr_sched5.attr,
881 &dev_attr_sched6.attr,
882 &dev_attr_sched7.attr,
4d22de3e
DLR
883 NULL
884};
885
886static struct attribute_group offload_attr_group = {.attrs = offload_attrs };
887
888/*
889 * Sends an sk_buff to an offload queue driver
890 * after dealing with any active network taps.
891 */
892static inline int offload_tx(struct t3cdev *tdev, struct sk_buff *skb)
893{
894 int ret;
895
896 local_bh_disable();
897 ret = t3_offload_tx(tdev, skb);
898 local_bh_enable();
899 return ret;
900}
901
902static int write_smt_entry(struct adapter *adapter, int idx)
903{
904 struct cpl_smt_write_req *req;
f14d42f3 905 struct port_info *pi = netdev_priv(adapter->port[idx]);
4d22de3e
DLR
906 struct sk_buff *skb = alloc_skb(sizeof(*req), GFP_KERNEL);
907
908 if (!skb)
909 return -ENOMEM;
910
911 req = (struct cpl_smt_write_req *)__skb_put(skb, sizeof(*req));
912 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
913 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SMT_WRITE_REQ, idx));
914 req->mtu_idx = NMTUS - 1; /* should be 0 but there's a T3 bug */
915 req->iff = idx;
4d22de3e 916 memcpy(req->src_mac0, adapter->port[idx]->dev_addr, ETH_ALEN);
f14d42f3 917 memcpy(req->src_mac1, pi->iscsic.mac_addr, ETH_ALEN);
4d22de3e
DLR
918 skb->priority = 1;
919 offload_tx(&adapter->tdev, skb);
920 return 0;
921}
922
923static int init_smt(struct adapter *adapter)
924{
925 int i;
926
927 for_each_port(adapter, i)
928 write_smt_entry(adapter, i);
929 return 0;
930}
931
932static void init_port_mtus(struct adapter *adapter)
933{
934 unsigned int mtus = adapter->port[0]->mtu;
935
936 if (adapter->port[1])
937 mtus |= adapter->port[1]->mtu << 16;
938 t3_write_reg(adapter, A_TP_MTU_PORT_TABLE, mtus);
939}
940
8c263761 941static int send_pktsched_cmd(struct adapter *adap, int sched, int qidx, int lo,
14ab9892
DLR
942 int hi, int port)
943{
944 struct sk_buff *skb;
945 struct mngt_pktsched_wr *req;
8c263761 946 int ret;
14ab9892 947
74b793e1
DLR
948 skb = alloc_skb(sizeof(*req), GFP_KERNEL);
949 if (!skb)
950 skb = adap->nofail_skb;
951 if (!skb)
952 return -ENOMEM;
953
14ab9892
DLR
954 req = (struct mngt_pktsched_wr *)skb_put(skb, sizeof(*req));
955 req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_MNGT));
956 req->mngt_opcode = FW_MNGTOPCODE_PKTSCHED_SET;
957 req->sched = sched;
958 req->idx = qidx;
959 req->min = lo;
960 req->max = hi;
961 req->binding = port;
8c263761 962 ret = t3_mgmt_tx(adap, skb);
74b793e1
DLR
963 if (skb == adap->nofail_skb) {
964 adap->nofail_skb = alloc_skb(sizeof(struct cpl_set_tcb_field),
965 GFP_KERNEL);
966 if (!adap->nofail_skb)
967 ret = -ENOMEM;
968 }
8c263761
DLR
969
970 return ret;
14ab9892
DLR
971}
972
8c263761 973static int bind_qsets(struct adapter *adap)
14ab9892 974{
8c263761 975 int i, j, err = 0;
14ab9892
DLR
976
977 for_each_port(adap, i) {
978 const struct port_info *pi = adap2pinfo(adap, i);
979
8c263761
DLR
980 for (j = 0; j < pi->nqsets; ++j) {
981 int ret = send_pktsched_cmd(adap, 1,
982 pi->first_qset + j, -1,
983 -1, i);
984 if (ret)
985 err = ret;
986 }
14ab9892 987 }
8c263761
DLR
988
989 return err;
14ab9892
DLR
990}
991
34336ec0
BH
992#define FW_VERSION __stringify(FW_VERSION_MAJOR) "." \
993 __stringify(FW_VERSION_MINOR) "." __stringify(FW_VERSION_MICRO)
994#define FW_FNAME "cxgb3/t3fw-" FW_VERSION ".bin"
995#define TPSRAM_VERSION __stringify(TP_VERSION_MAJOR) "." \
996 __stringify(TP_VERSION_MINOR) "." __stringify(TP_VERSION_MICRO)
997#define TPSRAM_NAME "cxgb3/t3%c_psram-" TPSRAM_VERSION ".bin"
2e8c07c3
DLR
998#define AEL2005_OPT_EDC_NAME "cxgb3/ael2005_opt_edc.bin"
999#define AEL2005_TWX_EDC_NAME "cxgb3/ael2005_twx_edc.bin"
9450526a 1000#define AEL2020_TWX_EDC_NAME "cxgb3/ael2020_twx_edc.bin"
34336ec0
BH
1001MODULE_FIRMWARE(FW_FNAME);
1002MODULE_FIRMWARE("cxgb3/t3b_psram-" TPSRAM_VERSION ".bin");
1003MODULE_FIRMWARE("cxgb3/t3c_psram-" TPSRAM_VERSION ".bin");
1004MODULE_FIRMWARE(AEL2005_OPT_EDC_NAME);
1005MODULE_FIRMWARE(AEL2005_TWX_EDC_NAME);
1006MODULE_FIRMWARE(AEL2020_TWX_EDC_NAME);
2e8c07c3
DLR
1007
1008static inline const char *get_edc_fw_name(int edc_idx)
1009{
1010 const char *fw_name = NULL;
1011
1012 switch (edc_idx) {
1013 case EDC_OPT_AEL2005:
1014 fw_name = AEL2005_OPT_EDC_NAME;
1015 break;
1016 case EDC_TWX_AEL2005:
1017 fw_name = AEL2005_TWX_EDC_NAME;
1018 break;
1019 case EDC_TWX_AEL2020:
1020 fw_name = AEL2020_TWX_EDC_NAME;
1021 break;
1022 }
1023 return fw_name;
1024}
1025
1026int t3_get_edc_fw(struct cphy *phy, int edc_idx, int size)
1027{
1028 struct adapter *adapter = phy->adapter;
1029 const struct firmware *fw;
1030 char buf[64];
1031 u32 csum;
1032 const __be32 *p;
1033 u16 *cache = phy->phy_cache;
1034 int i, ret;
1035
1036 snprintf(buf, sizeof(buf), get_edc_fw_name(edc_idx));
1037
1038 ret = request_firmware(&fw, buf, &adapter->pdev->dev);
1039 if (ret < 0) {
1040 dev_err(&adapter->pdev->dev,
1041 "could not upgrade firmware: unable to load %s\n",
1042 buf);
1043 return ret;
1044 }
1045
1046 /* check size, take checksum in account */
1047 if (fw->size > size + 4) {
1048 CH_ERR(adapter, "firmware image too large %u, expected %d\n",
1049 (unsigned int)fw->size, size + 4);
1050 ret = -EINVAL;
1051 }
1052
1053 /* compute checksum */
1054 p = (const __be32 *)fw->data;
1055 for (csum = 0, i = 0; i < fw->size / sizeof(csum); i++)
1056 csum += ntohl(p[i]);
1057
1058 if (csum != 0xffffffff) {
1059 CH_ERR(adapter, "corrupted firmware image, checksum %u\n",
1060 csum);
1061 ret = -EINVAL;
1062 }
1063
1064 for (i = 0; i < size / 4 ; i++) {
1065 *cache++ = (be32_to_cpu(p[i]) & 0xffff0000) >> 16;
1066 *cache++ = be32_to_cpu(p[i]) & 0xffff;
1067 }
1068
1069 release_firmware(fw);
1070
1071 return ret;
1072}
2e283962
DLR
1073
1074static int upgrade_fw(struct adapter *adap)
1075{
1076 int ret;
2e283962
DLR
1077 const struct firmware *fw;
1078 struct device *dev = &adap->pdev->dev;
1079
34336ec0 1080 ret = request_firmware(&fw, FW_FNAME, dev);
2e283962
DLR
1081 if (ret < 0) {
1082 dev_err(dev, "could not upgrade firmware: unable to load %s\n",
34336ec0 1083 FW_FNAME);
2e283962
DLR
1084 return ret;
1085 }
1086 ret = t3_load_fw(adap, fw->data, fw->size);
1087 release_firmware(fw);
47330077
DLR
1088
1089 if (ret == 0)
1090 dev_info(dev, "successful upgrade to firmware %d.%d.%d\n",
1091 FW_VERSION_MAJOR, FW_VERSION_MINOR, FW_VERSION_MICRO);
1092 else
1093 dev_err(dev, "failed to upgrade to firmware %d.%d.%d\n",
1094 FW_VERSION_MAJOR, FW_VERSION_MINOR, FW_VERSION_MICRO);
2eab17ab 1095
47330077
DLR
1096 return ret;
1097}
1098
1099static inline char t3rev2char(struct adapter *adapter)
1100{
1101 char rev = 0;
1102
1103 switch(adapter->params.rev) {
1104 case T3_REV_B:
1105 case T3_REV_B2:
1106 rev = 'b';
1107 break;
1aafee26
DLR
1108 case T3_REV_C:
1109 rev = 'c';
1110 break;
47330077
DLR
1111 }
1112 return rev;
1113}
1114
9265fabf 1115static int update_tpsram(struct adapter *adap)
47330077
DLR
1116{
1117 const struct firmware *tpsram;
1118 char buf[64];
1119 struct device *dev = &adap->pdev->dev;
1120 int ret;
1121 char rev;
2eab17ab 1122
47330077
DLR
1123 rev = t3rev2char(adap);
1124 if (!rev)
1125 return 0;
1126
34336ec0 1127 snprintf(buf, sizeof(buf), TPSRAM_NAME, rev);
47330077
DLR
1128
1129 ret = request_firmware(&tpsram, buf, dev);
1130 if (ret < 0) {
1131 dev_err(dev, "could not load TP SRAM: unable to load %s\n",
1132 buf);
1133 return ret;
1134 }
2eab17ab 1135
47330077
DLR
1136 ret = t3_check_tpsram(adap, tpsram->data, tpsram->size);
1137 if (ret)
2eab17ab 1138 goto release_tpsram;
47330077
DLR
1139
1140 ret = t3_set_proto_sram(adap, tpsram->data);
1141 if (ret == 0)
1142 dev_info(dev,
1143 "successful update of protocol engine "
1144 "to %d.%d.%d\n",
1145 TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO);
1146 else
1147 dev_err(dev, "failed to update of protocol engine %d.%d.%d\n",
1148 TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO);
1149 if (ret)
1150 dev_err(dev, "loading protocol SRAM failed\n");
1151
1152release_tpsram:
1153 release_firmware(tpsram);
2eab17ab 1154
2e283962
DLR
1155 return ret;
1156}
1157
4d22de3e
DLR
1158/**
1159 * cxgb_up - enable the adapter
1160 * @adapter: adapter being enabled
1161 *
1162 * Called when the first port is enabled, this function performs the
1163 * actions necessary to make an adapter operational, such as completing
1164 * the initialization of HW modules, and enabling interrupts.
1165 *
1166 * Must be called with the rtnl lock held.
1167 */
1168static int cxgb_up(struct adapter *adap)
1169{
c54f5c24 1170 int err;
4d22de3e
DLR
1171
1172 if (!(adap->flags & FULL_INIT_DONE)) {
8207befa 1173 err = t3_check_fw_version(adap);
a5a3b460 1174 if (err == -EINVAL) {
2e283962 1175 err = upgrade_fw(adap);
8207befa
DLR
1176 CH_WARN(adap, "FW upgrade to %d.%d.%d %s\n",
1177 FW_VERSION_MAJOR, FW_VERSION_MINOR,
1178 FW_VERSION_MICRO, err ? "failed" : "succeeded");
a5a3b460 1179 }
4d22de3e 1180
8207befa 1181 err = t3_check_tpsram_version(adap);
47330077
DLR
1182 if (err == -EINVAL) {
1183 err = update_tpsram(adap);
8207befa
DLR
1184 CH_WARN(adap, "TP upgrade to %d.%d.%d %s\n",
1185 TP_VERSION_MAJOR, TP_VERSION_MINOR,
1186 TP_VERSION_MICRO, err ? "failed" : "succeeded");
47330077
DLR
1187 }
1188
20d3fc11
DLR
1189 /*
1190 * Clear interrupts now to catch errors if t3_init_hw fails.
1191 * We clear them again later as initialization may trigger
1192 * conditions that can interrupt.
1193 */
1194 t3_intr_clear(adap);
1195
4d22de3e
DLR
1196 err = t3_init_hw(adap, 0);
1197 if (err)
1198 goto out;
1199
b881955b 1200 t3_set_reg_field(adap, A_TP_PARA_REG5, 0, F_RXDDPOFFINIT);
6cdbd77e 1201 t3_write_reg(adap, A_ULPRX_TDDP_PSZ, V_HPZ0(PAGE_SHIFT - 12));
bea3348e 1202
4d22de3e
DLR
1203 err = setup_sge_qsets(adap);
1204 if (err)
1205 goto out;
1206
1207 setup_rss(adap);
48c4b6db
DLR
1208 if (!(adap->flags & NAPI_INIT))
1209 init_napi(adap);
31563789
DLR
1210
1211 t3_start_sge_timers(adap);
4d22de3e
DLR
1212 adap->flags |= FULL_INIT_DONE;
1213 }
1214
1215 t3_intr_clear(adap);
1216
1217 if (adap->flags & USING_MSIX) {
1218 name_msix_vecs(adap);
1219 err = request_irq(adap->msix_info[0].vec,
1220 t3_async_intr_handler, 0,
1221 adap->msix_info[0].desc, adap);
1222 if (err)
1223 goto irq_err;
1224
42256f57
DLR
1225 err = request_msix_data_irqs(adap);
1226 if (err) {
4d22de3e
DLR
1227 free_irq(adap->msix_info[0].vec, adap);
1228 goto irq_err;
1229 }
1230 } else if ((err = request_irq(adap->pdev->irq,
1231 t3_intr_handler(adap,
1232 adap->sge.qs[0].rspq.
1233 polling),
2db6346f
TG
1234 (adap->flags & USING_MSI) ?
1235 0 : IRQF_SHARED,
4d22de3e
DLR
1236 adap->name, adap)))
1237 goto irq_err;
1238
bea3348e 1239 enable_all_napi(adap);
4d22de3e
DLR
1240 t3_sge_start(adap);
1241 t3_intr_enable(adap);
14ab9892 1242
b881955b
DLR
1243 if (adap->params.rev >= T3_REV_C && !(adap->flags & TP_PARITY_INIT) &&
1244 is_offload(adap) && init_tp_parity(adap) == 0)
1245 adap->flags |= TP_PARITY_INIT;
1246
1247 if (adap->flags & TP_PARITY_INIT) {
1248 t3_write_reg(adap, A_TP_INT_CAUSE,
1249 F_CMCACHEPERR | F_ARPLUTPERR);
1250 t3_write_reg(adap, A_TP_INT_ENABLE, 0x7fbfffff);
1251 }
1252
8c263761
DLR
1253 if (!(adap->flags & QUEUES_BOUND)) {
1254 err = bind_qsets(adap);
1255 if (err) {
1256 CH_ERR(adap, "failed to bind qsets, err %d\n", err);
1257 t3_intr_disable(adap);
1258 free_irq_resources(adap);
1259 goto out;
1260 }
1261 adap->flags |= QUEUES_BOUND;
1262 }
14ab9892 1263
4d22de3e
DLR
1264out:
1265 return err;
1266irq_err:
1267 CH_ERR(adap, "request_irq failed, err %d\n", err);
1268 goto out;
1269}
1270
1271/*
1272 * Release resources when all the ports and offloading have been stopped.
1273 */
1274static void cxgb_down(struct adapter *adapter)
1275{
1276 t3_sge_stop(adapter);
1277 spin_lock_irq(&adapter->work_lock); /* sync with PHY intr task */
1278 t3_intr_disable(adapter);
1279 spin_unlock_irq(&adapter->work_lock);
1280
8c263761 1281 free_irq_resources(adapter);
4d22de3e 1282 quiesce_rx(adapter);
c80b0c28 1283 flush_workqueue(cxgb3_wq); /* wait for external IRQ handler */
4d22de3e
DLR
1284}
1285
1286static void schedule_chk_task(struct adapter *adap)
1287{
1288 unsigned int timeo;
1289
1290 timeo = adap->params.linkpoll_period ?
1291 (HZ * adap->params.linkpoll_period) / 10 :
1292 adap->params.stats_update_period * HZ;
1293 if (timeo)
1294 queue_delayed_work(cxgb3_wq, &adap->adap_check_task, timeo);
1295}
1296
1297static int offload_open(struct net_device *dev)
1298{
5fbf816f
DLR
1299 struct port_info *pi = netdev_priv(dev);
1300 struct adapter *adapter = pi->adapter;
1301 struct t3cdev *tdev = dev2t3cdev(dev);
4d22de3e 1302 int adap_up = adapter->open_device_map & PORT_MASK;
c54f5c24 1303 int err;
4d22de3e
DLR
1304
1305 if (test_and_set_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map))
1306 return 0;
1307
1308 if (!adap_up && (err = cxgb_up(adapter)) < 0)
48c4b6db 1309 goto out;
4d22de3e
DLR
1310
1311 t3_tp_set_offload_mode(adapter, 1);
1312 tdev->lldev = adapter->port[0];
1313 err = cxgb3_offload_activate(adapter);
1314 if (err)
1315 goto out;
1316
1317 init_port_mtus(adapter);
1318 t3_load_mtus(adapter, adapter->params.mtus, adapter->params.a_wnd,
1319 adapter->params.b_wnd,
1320 adapter->params.rev == 0 ?
1321 adapter->port[0]->mtu : 0xffff);
1322 init_smt(adapter);
1323
d96a51f6
DN
1324 if (sysfs_create_group(&tdev->lldev->dev.kobj, &offload_attr_group))
1325 dev_dbg(&dev->dev, "cannot create sysfs group\n");
4d22de3e
DLR
1326
1327 /* Call back all registered clients */
1328 cxgb3_add_clients(tdev);
1329
1330out:
1331 /* restore them in case the offload module has changed them */
1332 if (err) {
1333 t3_tp_set_offload_mode(adapter, 0);
1334 clear_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map);
1335 cxgb3_set_dummy_ops(tdev);
1336 }
1337 return err;
1338}
1339
1340static int offload_close(struct t3cdev *tdev)
1341{
1342 struct adapter *adapter = tdev2adap(tdev);
1343
1344 if (!test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map))
1345 return 0;
1346
1347 /* Call back all registered clients */
1348 cxgb3_remove_clients(tdev);
1349
0ee8d33c 1350 sysfs_remove_group(&tdev->lldev->dev.kobj, &offload_attr_group);
4d22de3e 1351
c80b0c28
DLR
1352 /* Flush work scheduled while releasing TIDs */
1353 flush_scheduled_work();
1354
4d22de3e
DLR
1355 tdev->lldev = NULL;
1356 cxgb3_set_dummy_ops(tdev);
1357 t3_tp_set_offload_mode(adapter, 0);
1358 clear_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map);
1359
1360 if (!adapter->open_device_map)
1361 cxgb_down(adapter);
1362
1363 cxgb3_offload_deactivate(adapter);
1364 return 0;
1365}
1366
1367static int cxgb_open(struct net_device *dev)
1368{
4d22de3e 1369 struct port_info *pi = netdev_priv(dev);
5fbf816f 1370 struct adapter *adapter = pi->adapter;
4d22de3e 1371 int other_ports = adapter->open_device_map & PORT_MASK;
5fbf816f 1372 int err;
4d22de3e 1373
48c4b6db 1374 if (!adapter->open_device_map && (err = cxgb_up(adapter)) < 0)
4d22de3e
DLR
1375 return err;
1376
1377 set_bit(pi->port_id, &adapter->open_device_map);
8ac3ba68 1378 if (is_offload(adapter) && !ofld_disable) {
4d22de3e
DLR
1379 err = offload_open(dev);
1380 if (err)
1381 printk(KERN_WARNING
1382 "Could not initialize offload capabilities\n");
1383 }
1384
82ad3329 1385 dev->real_num_tx_queues = pi->nqsets;
4d22de3e
DLR
1386 link_start(dev);
1387 t3_port_intr_enable(adapter, pi->port_id);
82ad3329 1388 netif_tx_start_all_queues(dev);
4d22de3e
DLR
1389 if (!other_ports)
1390 schedule_chk_task(adapter);
1391
fa0d4c11 1392 cxgb3_event_notify(&adapter->tdev, OFFLOAD_PORT_UP, pi->port_id);
4d22de3e
DLR
1393 return 0;
1394}
1395
1396static int cxgb_close(struct net_device *dev)
1397{
5fbf816f
DLR
1398 struct port_info *pi = netdev_priv(dev);
1399 struct adapter *adapter = pi->adapter;
4d22de3e 1400
e8d19370
DLR
1401
1402 if (!adapter->open_device_map)
1403 return 0;
1404
bf792094
DLR
1405 /* Stop link fault interrupts */
1406 t3_xgm_intr_disable(adapter, pi->port_id);
1407 t3_read_reg(adapter, A_XGM_INT_STATUS + pi->mac.offset);
1408
5fbf816f 1409 t3_port_intr_disable(adapter, pi->port_id);
82ad3329 1410 netif_tx_stop_all_queues(dev);
5fbf816f 1411 pi->phy.ops->power_down(&pi->phy, 1);
4d22de3e 1412 netif_carrier_off(dev);
5fbf816f 1413 t3_mac_disable(&pi->mac, MAC_DIRECTION_TX | MAC_DIRECTION_RX);
4d22de3e 1414
20d3fc11 1415 spin_lock_irq(&adapter->work_lock); /* sync with update task */
5fbf816f 1416 clear_bit(pi->port_id, &adapter->open_device_map);
20d3fc11 1417 spin_unlock_irq(&adapter->work_lock);
4d22de3e
DLR
1418
1419 if (!(adapter->open_device_map & PORT_MASK))
c80b0c28 1420 cancel_delayed_work_sync(&adapter->adap_check_task);
4d22de3e
DLR
1421
1422 if (!adapter->open_device_map)
1423 cxgb_down(adapter);
1424
fa0d4c11 1425 cxgb3_event_notify(&adapter->tdev, OFFLOAD_PORT_DOWN, pi->port_id);
4d22de3e
DLR
1426 return 0;
1427}
1428
1429static struct net_device_stats *cxgb_get_stats(struct net_device *dev)
1430{
5fbf816f
DLR
1431 struct port_info *pi = netdev_priv(dev);
1432 struct adapter *adapter = pi->adapter;
1433 struct net_device_stats *ns = &pi->netstats;
4d22de3e
DLR
1434 const struct mac_stats *pstats;
1435
1436 spin_lock(&adapter->stats_lock);
5fbf816f 1437 pstats = t3_mac_update_stats(&pi->mac);
4d22de3e
DLR
1438 spin_unlock(&adapter->stats_lock);
1439
1440 ns->tx_bytes = pstats->tx_octets;
1441 ns->tx_packets = pstats->tx_frames;
1442 ns->rx_bytes = pstats->rx_octets;
1443 ns->rx_packets = pstats->rx_frames;
1444 ns->multicast = pstats->rx_mcast_frames;
1445
1446 ns->tx_errors = pstats->tx_underrun;
1447 ns->rx_errors = pstats->rx_symbol_errs + pstats->rx_fcs_errs +
1448 pstats->rx_too_long + pstats->rx_jabber + pstats->rx_short +
1449 pstats->rx_fifo_ovfl;
1450
1451 /* detailed rx_errors */
1452 ns->rx_length_errors = pstats->rx_jabber + pstats->rx_too_long;
1453 ns->rx_over_errors = 0;
1454 ns->rx_crc_errors = pstats->rx_fcs_errs;
1455 ns->rx_frame_errors = pstats->rx_symbol_errs;
1456 ns->rx_fifo_errors = pstats->rx_fifo_ovfl;
1457 ns->rx_missed_errors = pstats->rx_cong_drops;
1458
1459 /* detailed tx_errors */
1460 ns->tx_aborted_errors = 0;
1461 ns->tx_carrier_errors = 0;
1462 ns->tx_fifo_errors = pstats->tx_underrun;
1463 ns->tx_heartbeat_errors = 0;
1464 ns->tx_window_errors = 0;
1465 return ns;
1466}
1467
1468static u32 get_msglevel(struct net_device *dev)
1469{
5fbf816f
DLR
1470 struct port_info *pi = netdev_priv(dev);
1471 struct adapter *adapter = pi->adapter;
4d22de3e
DLR
1472
1473 return adapter->msg_enable;
1474}
1475
1476static void set_msglevel(struct net_device *dev, u32 val)
1477{
5fbf816f
DLR
1478 struct port_info *pi = netdev_priv(dev);
1479 struct adapter *adapter = pi->adapter;
4d22de3e
DLR
1480
1481 adapter->msg_enable = val;
1482}
1483
1484static char stats_strings[][ETH_GSTRING_LEN] = {
1485 "TxOctetsOK ",
1486 "TxFramesOK ",
1487 "TxMulticastFramesOK",
1488 "TxBroadcastFramesOK",
1489 "TxPauseFrames ",
1490 "TxUnderrun ",
1491 "TxExtUnderrun ",
1492
1493 "TxFrames64 ",
1494 "TxFrames65To127 ",
1495 "TxFrames128To255 ",
1496 "TxFrames256To511 ",
1497 "TxFrames512To1023 ",
1498 "TxFrames1024To1518 ",
1499 "TxFrames1519ToMax ",
1500
1501 "RxOctetsOK ",
1502 "RxFramesOK ",
1503 "RxMulticastFramesOK",
1504 "RxBroadcastFramesOK",
1505 "RxPauseFrames ",
1506 "RxFCSErrors ",
1507 "RxSymbolErrors ",
1508 "RxShortErrors ",
1509 "RxJabberErrors ",
1510 "RxLengthErrors ",
1511 "RxFIFOoverflow ",
1512
1513 "RxFrames64 ",
1514 "RxFrames65To127 ",
1515 "RxFrames128To255 ",
1516 "RxFrames256To511 ",
1517 "RxFrames512To1023 ",
1518 "RxFrames1024To1518 ",
1519 "RxFrames1519ToMax ",
1520
1521 "PhyFIFOErrors ",
1522 "TSO ",
1523 "VLANextractions ",
1524 "VLANinsertions ",
1525 "TxCsumOffload ",
1526 "RxCsumGood ",
b47385bd
DLR
1527 "LroAggregated ",
1528 "LroFlushed ",
1529 "LroNoDesc ",
fc90664e
DLR
1530 "RxDrops ",
1531
1532 "CheckTXEnToggled ",
1533 "CheckResets ",
1534
bf792094 1535 "LinkFaults ",
4d22de3e
DLR
1536};
1537
b9f2c044 1538static int get_sset_count(struct net_device *dev, int sset)
4d22de3e 1539{
b9f2c044
JG
1540 switch (sset) {
1541 case ETH_SS_STATS:
1542 return ARRAY_SIZE(stats_strings);
1543 default:
1544 return -EOPNOTSUPP;
1545 }
4d22de3e
DLR
1546}
1547
1548#define T3_REGMAP_SIZE (3 * 1024)
1549
1550static int get_regs_len(struct net_device *dev)
1551{
1552 return T3_REGMAP_SIZE;
1553}
1554
1555static int get_eeprom_len(struct net_device *dev)
1556{
1557 return EEPROMSIZE;
1558}
1559
1560static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1561{
5fbf816f
DLR
1562 struct port_info *pi = netdev_priv(dev);
1563 struct adapter *adapter = pi->adapter;
4d22de3e 1564 u32 fw_vers = 0;
47330077 1565 u32 tp_vers = 0;
4d22de3e 1566
cf3760da 1567 spin_lock(&adapter->stats_lock);
4d22de3e 1568 t3_get_fw_version(adapter, &fw_vers);
47330077 1569 t3_get_tp_version(adapter, &tp_vers);
cf3760da 1570 spin_unlock(&adapter->stats_lock);
4d22de3e
DLR
1571
1572 strcpy(info->driver, DRV_NAME);
1573 strcpy(info->version, DRV_VERSION);
1574 strcpy(info->bus_info, pci_name(adapter->pdev));
1575 if (!fw_vers)
1576 strcpy(info->fw_version, "N/A");
4aac3899 1577 else {
4d22de3e 1578 snprintf(info->fw_version, sizeof(info->fw_version),
47330077 1579 "%s %u.%u.%u TP %u.%u.%u",
4aac3899
DLR
1580 G_FW_VERSION_TYPE(fw_vers) ? "T" : "N",
1581 G_FW_VERSION_MAJOR(fw_vers),
1582 G_FW_VERSION_MINOR(fw_vers),
47330077
DLR
1583 G_FW_VERSION_MICRO(fw_vers),
1584 G_TP_VERSION_MAJOR(tp_vers),
1585 G_TP_VERSION_MINOR(tp_vers),
1586 G_TP_VERSION_MICRO(tp_vers));
4aac3899 1587 }
4d22de3e
DLR
1588}
1589
1590static void get_strings(struct net_device *dev, u32 stringset, u8 * data)
1591{
1592 if (stringset == ETH_SS_STATS)
1593 memcpy(data, stats_strings, sizeof(stats_strings));
1594}
1595
1596static unsigned long collect_sge_port_stats(struct adapter *adapter,
1597 struct port_info *p, int idx)
1598{
1599 int i;
1600 unsigned long tot = 0;
1601
8c263761
DLR
1602 for (i = p->first_qset; i < p->first_qset + p->nqsets; ++i)
1603 tot += adapter->sge.qs[i].port_stats[idx];
4d22de3e
DLR
1604 return tot;
1605}
1606
1607static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
1608 u64 *data)
1609{
4d22de3e 1610 struct port_info *pi = netdev_priv(dev);
5fbf816f 1611 struct adapter *adapter = pi->adapter;
4d22de3e
DLR
1612 const struct mac_stats *s;
1613
1614 spin_lock(&adapter->stats_lock);
1615 s = t3_mac_update_stats(&pi->mac);
1616 spin_unlock(&adapter->stats_lock);
1617
1618 *data++ = s->tx_octets;
1619 *data++ = s->tx_frames;
1620 *data++ = s->tx_mcast_frames;
1621 *data++ = s->tx_bcast_frames;
1622 *data++ = s->tx_pause;
1623 *data++ = s->tx_underrun;
1624 *data++ = s->tx_fifo_urun;
1625
1626 *data++ = s->tx_frames_64;
1627 *data++ = s->tx_frames_65_127;
1628 *data++ = s->tx_frames_128_255;
1629 *data++ = s->tx_frames_256_511;
1630 *data++ = s->tx_frames_512_1023;
1631 *data++ = s->tx_frames_1024_1518;
1632 *data++ = s->tx_frames_1519_max;
1633
1634 *data++ = s->rx_octets;
1635 *data++ = s->rx_frames;
1636 *data++ = s->rx_mcast_frames;
1637 *data++ = s->rx_bcast_frames;
1638 *data++ = s->rx_pause;
1639 *data++ = s->rx_fcs_errs;
1640 *data++ = s->rx_symbol_errs;
1641 *data++ = s->rx_short;
1642 *data++ = s->rx_jabber;
1643 *data++ = s->rx_too_long;
1644 *data++ = s->rx_fifo_ovfl;
1645
1646 *data++ = s->rx_frames_64;
1647 *data++ = s->rx_frames_65_127;
1648 *data++ = s->rx_frames_128_255;
1649 *data++ = s->rx_frames_256_511;
1650 *data++ = s->rx_frames_512_1023;
1651 *data++ = s->rx_frames_1024_1518;
1652 *data++ = s->rx_frames_1519_max;
1653
1654 *data++ = pi->phy.fifo_errors;
1655
1656 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_TSO);
1657 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_VLANEX);
1658 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_VLANINS);
1659 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_TX_CSUM);
1660 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_RX_CSUM_GOOD);
7be2df45
HX
1661 *data++ = 0;
1662 *data++ = 0;
1663 *data++ = 0;
4d22de3e 1664 *data++ = s->rx_cong_drops;
fc90664e
DLR
1665
1666 *data++ = s->num_toggled;
1667 *data++ = s->num_resets;
bf792094
DLR
1668
1669 *data++ = s->link_faults;
4d22de3e
DLR
1670}
1671
1672static inline void reg_block_dump(struct adapter *ap, void *buf,
1673 unsigned int start, unsigned int end)
1674{
1675 u32 *p = buf + start;
1676
1677 for (; start <= end; start += sizeof(u32))
1678 *p++ = t3_read_reg(ap, start);
1679}
1680
1681static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
1682 void *buf)
1683{
5fbf816f
DLR
1684 struct port_info *pi = netdev_priv(dev);
1685 struct adapter *ap = pi->adapter;
4d22de3e
DLR
1686
1687 /*
1688 * Version scheme:
1689 * bits 0..9: chip version
1690 * bits 10..15: chip revision
1691 * bit 31: set for PCIe cards
1692 */
1693 regs->version = 3 | (ap->params.rev << 10) | (is_pcie(ap) << 31);
1694
1695 /*
1696 * We skip the MAC statistics registers because they are clear-on-read.
1697 * Also reading multi-register stats would need to synchronize with the
1698 * periodic mac stats accumulation. Hard to justify the complexity.
1699 */
1700 memset(buf, 0, T3_REGMAP_SIZE);
1701 reg_block_dump(ap, buf, 0, A_SG_RSPQ_CREDIT_RETURN);
1702 reg_block_dump(ap, buf, A_SG_HI_DRB_HI_THRSH, A_ULPRX_PBL_ULIMIT);
1703 reg_block_dump(ap, buf, A_ULPTX_CONFIG, A_MPS_INT_CAUSE);
1704 reg_block_dump(ap, buf, A_CPL_SWITCH_CNTRL, A_CPL_MAP_TBL_DATA);
1705 reg_block_dump(ap, buf, A_SMB_GLOBAL_TIME_CFG, A_XGM_SERDES_STAT3);
1706 reg_block_dump(ap, buf, A_XGM_SERDES_STATUS0,
1707 XGM_REG(A_XGM_SERDES_STAT3, 1));
1708 reg_block_dump(ap, buf, XGM_REG(A_XGM_SERDES_STATUS0, 1),
1709 XGM_REG(A_XGM_RX_SPI4_SOP_EOP_CNT, 1));
1710}
1711
1712static int restart_autoneg(struct net_device *dev)
1713{
1714 struct port_info *p = netdev_priv(dev);
1715
1716 if (!netif_running(dev))
1717 return -EAGAIN;
1718 if (p->link_config.autoneg != AUTONEG_ENABLE)
1719 return -EINVAL;
1720 p->phy.ops->autoneg_restart(&p->phy);
1721 return 0;
1722}
1723
1724static int cxgb3_phys_id(struct net_device *dev, u32 data)
1725{
5fbf816f
DLR
1726 struct port_info *pi = netdev_priv(dev);
1727 struct adapter *adapter = pi->adapter;
4d22de3e 1728 int i;
4d22de3e
DLR
1729
1730 if (data == 0)
1731 data = 2;
1732
1733 for (i = 0; i < data * 2; i++) {
1734 t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
1735 (i & 1) ? F_GPIO0_OUT_VAL : 0);
1736 if (msleep_interruptible(500))
1737 break;
1738 }
1739 t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
1740 F_GPIO0_OUT_VAL);
1741 return 0;
1742}
1743
1744static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1745{
1746 struct port_info *p = netdev_priv(dev);
1747
1748 cmd->supported = p->link_config.supported;
1749 cmd->advertising = p->link_config.advertising;
1750
1751 if (netif_carrier_ok(dev)) {
1752 cmd->speed = p->link_config.speed;
1753 cmd->duplex = p->link_config.duplex;
1754 } else {
1755 cmd->speed = -1;
1756 cmd->duplex = -1;
1757 }
1758
1759 cmd->port = (cmd->supported & SUPPORTED_TP) ? PORT_TP : PORT_FIBRE;
0f07c4ee 1760 cmd->phy_address = p->phy.mdio.prtad;
4d22de3e
DLR
1761 cmd->transceiver = XCVR_EXTERNAL;
1762 cmd->autoneg = p->link_config.autoneg;
1763 cmd->maxtxpkt = 0;
1764 cmd->maxrxpkt = 0;
1765 return 0;
1766}
1767
1768static int speed_duplex_to_caps(int speed, int duplex)
1769{
1770 int cap = 0;
1771
1772 switch (speed) {
1773 case SPEED_10:
1774 if (duplex == DUPLEX_FULL)
1775 cap = SUPPORTED_10baseT_Full;
1776 else
1777 cap = SUPPORTED_10baseT_Half;
1778 break;
1779 case SPEED_100:
1780 if (duplex == DUPLEX_FULL)
1781 cap = SUPPORTED_100baseT_Full;
1782 else
1783 cap = SUPPORTED_100baseT_Half;
1784 break;
1785 case SPEED_1000:
1786 if (duplex == DUPLEX_FULL)
1787 cap = SUPPORTED_1000baseT_Full;
1788 else
1789 cap = SUPPORTED_1000baseT_Half;
1790 break;
1791 case SPEED_10000:
1792 if (duplex == DUPLEX_FULL)
1793 cap = SUPPORTED_10000baseT_Full;
1794 }
1795 return cap;
1796}
1797
1798#define ADVERTISED_MASK (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
1799 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
1800 ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full | \
1801 ADVERTISED_10000baseT_Full)
1802
1803static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1804{
1805 struct port_info *p = netdev_priv(dev);
1806 struct link_config *lc = &p->link_config;
1807
9b1e3656
DLR
1808 if (!(lc->supported & SUPPORTED_Autoneg)) {
1809 /*
1810 * PHY offers a single speed/duplex. See if that's what's
1811 * being requested.
1812 */
1813 if (cmd->autoneg == AUTONEG_DISABLE) {
97915b5b 1814 int cap = speed_duplex_to_caps(cmd->speed, cmd->duplex);
9b1e3656
DLR
1815 if (lc->supported & cap)
1816 return 0;
1817 }
1818 return -EINVAL;
1819 }
4d22de3e
DLR
1820
1821 if (cmd->autoneg == AUTONEG_DISABLE) {
1822 int cap = speed_duplex_to_caps(cmd->speed, cmd->duplex);
1823
1824 if (!(lc->supported & cap) || cmd->speed == SPEED_1000)
1825 return -EINVAL;
1826 lc->requested_speed = cmd->speed;
1827 lc->requested_duplex = cmd->duplex;
1828 lc->advertising = 0;
1829 } else {
1830 cmd->advertising &= ADVERTISED_MASK;
1831 cmd->advertising &= lc->supported;
1832 if (!cmd->advertising)
1833 return -EINVAL;
1834 lc->requested_speed = SPEED_INVALID;
1835 lc->requested_duplex = DUPLEX_INVALID;
1836 lc->advertising = cmd->advertising | ADVERTISED_Autoneg;
1837 }
1838 lc->autoneg = cmd->autoneg;
1839 if (netif_running(dev))
1840 t3_link_start(&p->phy, &p->mac, lc);
1841 return 0;
1842}
1843
1844static void get_pauseparam(struct net_device *dev,
1845 struct ethtool_pauseparam *epause)
1846{
1847 struct port_info *p = netdev_priv(dev);
1848
1849 epause->autoneg = (p->link_config.requested_fc & PAUSE_AUTONEG) != 0;
1850 epause->rx_pause = (p->link_config.fc & PAUSE_RX) != 0;
1851 epause->tx_pause = (p->link_config.fc & PAUSE_TX) != 0;
1852}
1853
1854static int set_pauseparam(struct net_device *dev,
1855 struct ethtool_pauseparam *epause)
1856{
1857 struct port_info *p = netdev_priv(dev);
1858 struct link_config *lc = &p->link_config;
1859
1860 if (epause->autoneg == AUTONEG_DISABLE)
1861 lc->requested_fc = 0;
1862 else if (lc->supported & SUPPORTED_Autoneg)
1863 lc->requested_fc = PAUSE_AUTONEG;
1864 else
1865 return -EINVAL;
1866
1867 if (epause->rx_pause)
1868 lc->requested_fc |= PAUSE_RX;
1869 if (epause->tx_pause)
1870 lc->requested_fc |= PAUSE_TX;
1871 if (lc->autoneg == AUTONEG_ENABLE) {
1872 if (netif_running(dev))
1873 t3_link_start(&p->phy, &p->mac, lc);
1874 } else {
1875 lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1876 if (netif_running(dev))
1877 t3_mac_set_speed_duplex_fc(&p->mac, -1, -1, lc->fc);
1878 }
1879 return 0;
1880}
1881
1882static u32 get_rx_csum(struct net_device *dev)
1883{
1884 struct port_info *p = netdev_priv(dev);
1885
47fd23fe 1886 return p->rx_offload & T3_RX_CSUM;
4d22de3e
DLR
1887}
1888
1889static int set_rx_csum(struct net_device *dev, u32 data)
1890{
1891 struct port_info *p = netdev_priv(dev);
1892
47fd23fe
RD
1893 if (data) {
1894 p->rx_offload |= T3_RX_CSUM;
1895 } else {
b47385bd
DLR
1896 int i;
1897
47fd23fe 1898 p->rx_offload &= ~(T3_RX_CSUM | T3_LRO);
04ecb072
DLR
1899 for (i = p->first_qset; i < p->first_qset + p->nqsets; i++)
1900 set_qset_lro(dev, i, 0);
b47385bd 1901 }
4d22de3e
DLR
1902 return 0;
1903}
1904
1905static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
1906{
5fbf816f
DLR
1907 struct port_info *pi = netdev_priv(dev);
1908 struct adapter *adapter = pi->adapter;
05b97b30 1909 const struct qset_params *q = &adapter->params.sge.qset[pi->first_qset];
4d22de3e
DLR
1910
1911 e->rx_max_pending = MAX_RX_BUFFERS;
1912 e->rx_mini_max_pending = 0;
1913 e->rx_jumbo_max_pending = MAX_RX_JUMBO_BUFFERS;
1914 e->tx_max_pending = MAX_TXQ_ENTRIES;
1915
05b97b30
DLR
1916 e->rx_pending = q->fl_size;
1917 e->rx_mini_pending = q->rspq_size;
1918 e->rx_jumbo_pending = q->jumbo_size;
1919 e->tx_pending = q->txq_size[0];
4d22de3e
DLR
1920}
1921
1922static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
1923{
5fbf816f
DLR
1924 struct port_info *pi = netdev_priv(dev);
1925 struct adapter *adapter = pi->adapter;
05b97b30 1926 struct qset_params *q;
5fbf816f 1927 int i;
4d22de3e
DLR
1928
1929 if (e->rx_pending > MAX_RX_BUFFERS ||
1930 e->rx_jumbo_pending > MAX_RX_JUMBO_BUFFERS ||
1931 e->tx_pending > MAX_TXQ_ENTRIES ||
1932 e->rx_mini_pending > MAX_RSPQ_ENTRIES ||
1933 e->rx_mini_pending < MIN_RSPQ_ENTRIES ||
1934 e->rx_pending < MIN_FL_ENTRIES ||
1935 e->rx_jumbo_pending < MIN_FL_ENTRIES ||
1936 e->tx_pending < adapter->params.nports * MIN_TXQ_ENTRIES)
1937 return -EINVAL;
1938
1939 if (adapter->flags & FULL_INIT_DONE)
1940 return -EBUSY;
1941
05b97b30
DLR
1942 q = &adapter->params.sge.qset[pi->first_qset];
1943 for (i = 0; i < pi->nqsets; ++i, ++q) {
4d22de3e
DLR
1944 q->rspq_size = e->rx_mini_pending;
1945 q->fl_size = e->rx_pending;
1946 q->jumbo_size = e->rx_jumbo_pending;
1947 q->txq_size[0] = e->tx_pending;
1948 q->txq_size[1] = e->tx_pending;
1949 q->txq_size[2] = e->tx_pending;
1950 }
1951 return 0;
1952}
1953
1954static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
1955{
5fbf816f
DLR
1956 struct port_info *pi = netdev_priv(dev);
1957 struct adapter *adapter = pi->adapter;
4d22de3e
DLR
1958 struct qset_params *qsp = &adapter->params.sge.qset[0];
1959 struct sge_qset *qs = &adapter->sge.qs[0];
1960
1961 if (c->rx_coalesce_usecs * 10 > M_NEWTIMER)
1962 return -EINVAL;
1963
1964 qsp->coalesce_usecs = c->rx_coalesce_usecs;
1965 t3_update_qset_coalesce(qs, qsp);
1966 return 0;
1967}
1968
1969static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
1970{
5fbf816f
DLR
1971 struct port_info *pi = netdev_priv(dev);
1972 struct adapter *adapter = pi->adapter;
4d22de3e
DLR
1973 struct qset_params *q = adapter->params.sge.qset;
1974
1975 c->rx_coalesce_usecs = q->coalesce_usecs;
1976 return 0;
1977}
1978
1979static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e,
1980 u8 * data)
1981{
5fbf816f
DLR
1982 struct port_info *pi = netdev_priv(dev);
1983 struct adapter *adapter = pi->adapter;
4d22de3e 1984 int i, err = 0;
4d22de3e
DLR
1985
1986 u8 *buf = kmalloc(EEPROMSIZE, GFP_KERNEL);
1987 if (!buf)
1988 return -ENOMEM;
1989
1990 e->magic = EEPROM_MAGIC;
1991 for (i = e->offset & ~3; !err && i < e->offset + e->len; i += 4)
05e5c116 1992 err = t3_seeprom_read(adapter, i, (__le32 *) & buf[i]);
4d22de3e
DLR
1993
1994 if (!err)
1995 memcpy(data, buf + e->offset, e->len);
1996 kfree(buf);
1997 return err;
1998}
1999
2000static int set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
2001 u8 * data)
2002{
5fbf816f
DLR
2003 struct port_info *pi = netdev_priv(dev);
2004 struct adapter *adapter = pi->adapter;
05e5c116
AV
2005 u32 aligned_offset, aligned_len;
2006 __le32 *p;
4d22de3e 2007 u8 *buf;
c54f5c24 2008 int err;
4d22de3e
DLR
2009
2010 if (eeprom->magic != EEPROM_MAGIC)
2011 return -EINVAL;
2012
2013 aligned_offset = eeprom->offset & ~3;
2014 aligned_len = (eeprom->len + (eeprom->offset & 3) + 3) & ~3;
2015
2016 if (aligned_offset != eeprom->offset || aligned_len != eeprom->len) {
2017 buf = kmalloc(aligned_len, GFP_KERNEL);
2018 if (!buf)
2019 return -ENOMEM;
05e5c116 2020 err = t3_seeprom_read(adapter, aligned_offset, (__le32 *) buf);
4d22de3e
DLR
2021 if (!err && aligned_len > 4)
2022 err = t3_seeprom_read(adapter,
2023 aligned_offset + aligned_len - 4,
05e5c116 2024 (__le32 *) & buf[aligned_len - 4]);
4d22de3e
DLR
2025 if (err)
2026 goto out;
2027 memcpy(buf + (eeprom->offset & 3), data, eeprom->len);
2028 } else
2029 buf = data;
2030
2031 err = t3_seeprom_wp(adapter, 0);
2032 if (err)
2033 goto out;
2034
05e5c116 2035 for (p = (__le32 *) buf; !err && aligned_len; aligned_len -= 4, p++) {
4d22de3e
DLR
2036 err = t3_seeprom_write(adapter, aligned_offset, *p);
2037 aligned_offset += 4;
2038 }
2039
2040 if (!err)
2041 err = t3_seeprom_wp(adapter, 1);
2042out:
2043 if (buf != data)
2044 kfree(buf);
2045 return err;
2046}
2047
2048static void get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2049{
2050 wol->supported = 0;
2051 wol->wolopts = 0;
2052 memset(&wol->sopass, 0, sizeof(wol->sopass));
2053}
2054
2055static const struct ethtool_ops cxgb_ethtool_ops = {
2056 .get_settings = get_settings,
2057 .set_settings = set_settings,
2058 .get_drvinfo = get_drvinfo,
2059 .get_msglevel = get_msglevel,
2060 .set_msglevel = set_msglevel,
2061 .get_ringparam = get_sge_param,
2062 .set_ringparam = set_sge_param,
2063 .get_coalesce = get_coalesce,
2064 .set_coalesce = set_coalesce,
2065 .get_eeprom_len = get_eeprom_len,
2066 .get_eeprom = get_eeprom,
2067 .set_eeprom = set_eeprom,
2068 .get_pauseparam = get_pauseparam,
2069 .set_pauseparam = set_pauseparam,
2070 .get_rx_csum = get_rx_csum,
2071 .set_rx_csum = set_rx_csum,
4d22de3e 2072 .set_tx_csum = ethtool_op_set_tx_csum,
4d22de3e
DLR
2073 .set_sg = ethtool_op_set_sg,
2074 .get_link = ethtool_op_get_link,
2075 .get_strings = get_strings,
2076 .phys_id = cxgb3_phys_id,
2077 .nway_reset = restart_autoneg,
b9f2c044 2078 .get_sset_count = get_sset_count,
4d22de3e
DLR
2079 .get_ethtool_stats = get_stats,
2080 .get_regs_len = get_regs_len,
2081 .get_regs = get_regs,
2082 .get_wol = get_wol,
4d22de3e 2083 .set_tso = ethtool_op_set_tso,
4d22de3e
DLR
2084};
2085
2086static int in_range(int val, int lo, int hi)
2087{
2088 return val < 0 || (val <= hi && val >= lo);
2089}
2090
2091static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
2092{
5fbf816f
DLR
2093 struct port_info *pi = netdev_priv(dev);
2094 struct adapter *adapter = pi->adapter;
4d22de3e 2095 u32 cmd;
5fbf816f 2096 int ret;
4d22de3e
DLR
2097
2098 if (copy_from_user(&cmd, useraddr, sizeof(cmd)))
2099 return -EFAULT;
2100
2101 switch (cmd) {
4d22de3e
DLR
2102 case CHELSIO_SET_QSET_PARAMS:{
2103 int i;
2104 struct qset_params *q;
2105 struct ch_qset_params t;
8c263761
DLR
2106 int q1 = pi->first_qset;
2107 int nqsets = pi->nqsets;
4d22de3e
DLR
2108
2109 if (!capable(CAP_NET_ADMIN))
2110 return -EPERM;
2111 if (copy_from_user(&t, useraddr, sizeof(t)))
2112 return -EFAULT;
2113 if (t.qset_idx >= SGE_QSETS)
2114 return -EINVAL;
2115 if (!in_range(t.intr_lat, 0, M_NEWTIMER) ||
8e95a202
JP
2116 !in_range(t.cong_thres, 0, 255) ||
2117 !in_range(t.txq_size[0], MIN_TXQ_ENTRIES,
2118 MAX_TXQ_ENTRIES) ||
2119 !in_range(t.txq_size[1], MIN_TXQ_ENTRIES,
2120 MAX_TXQ_ENTRIES) ||
2121 !in_range(t.txq_size[2], MIN_CTRL_TXQ_ENTRIES,
2122 MAX_CTRL_TXQ_ENTRIES) ||
2123 !in_range(t.fl_size[0], MIN_FL_ENTRIES,
2124 MAX_RX_BUFFERS) ||
2125 !in_range(t.fl_size[1], MIN_FL_ENTRIES,
2126 MAX_RX_JUMBO_BUFFERS) ||
2127 !in_range(t.rspq_size, MIN_RSPQ_ENTRIES,
2128 MAX_RSPQ_ENTRIES))
4d22de3e 2129 return -EINVAL;
8c263761
DLR
2130
2131 if ((adapter->flags & FULL_INIT_DONE) && t.lro > 0)
2132 for_each_port(adapter, i) {
2133 pi = adap2pinfo(adapter, i);
2134 if (t.qset_idx >= pi->first_qset &&
2135 t.qset_idx < pi->first_qset + pi->nqsets &&
47fd23fe 2136 !(pi->rx_offload & T3_RX_CSUM))
8c263761
DLR
2137 return -EINVAL;
2138 }
2139
4d22de3e
DLR
2140 if ((adapter->flags & FULL_INIT_DONE) &&
2141 (t.rspq_size >= 0 || t.fl_size[0] >= 0 ||
2142 t.fl_size[1] >= 0 || t.txq_size[0] >= 0 ||
2143 t.txq_size[1] >= 0 || t.txq_size[2] >= 0 ||
2144 t.polling >= 0 || t.cong_thres >= 0))
2145 return -EBUSY;
2146
8c263761
DLR
2147 /* Allow setting of any available qset when offload enabled */
2148 if (test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map)) {
2149 q1 = 0;
2150 for_each_port(adapter, i) {
2151 pi = adap2pinfo(adapter, i);
2152 nqsets += pi->first_qset + pi->nqsets;
2153 }
2154 }
2155
2156 if (t.qset_idx < q1)
2157 return -EINVAL;
2158 if (t.qset_idx > q1 + nqsets - 1)
2159 return -EINVAL;
2160
4d22de3e
DLR
2161 q = &adapter->params.sge.qset[t.qset_idx];
2162
2163 if (t.rspq_size >= 0)
2164 q->rspq_size = t.rspq_size;
2165 if (t.fl_size[0] >= 0)
2166 q->fl_size = t.fl_size[0];
2167 if (t.fl_size[1] >= 0)
2168 q->jumbo_size = t.fl_size[1];
2169 if (t.txq_size[0] >= 0)
2170 q->txq_size[0] = t.txq_size[0];
2171 if (t.txq_size[1] >= 0)
2172 q->txq_size[1] = t.txq_size[1];
2173 if (t.txq_size[2] >= 0)
2174 q->txq_size[2] = t.txq_size[2];
2175 if (t.cong_thres >= 0)
2176 q->cong_thres = t.cong_thres;
2177 if (t.intr_lat >= 0) {
2178 struct sge_qset *qs =
2179 &adapter->sge.qs[t.qset_idx];
2180
2181 q->coalesce_usecs = t.intr_lat;
2182 t3_update_qset_coalesce(qs, q);
2183 }
2184 if (t.polling >= 0) {
2185 if (adapter->flags & USING_MSIX)
2186 q->polling = t.polling;
2187 else {
2188 /* No polling with INTx for T3A */
2189 if (adapter->params.rev == 0 &&
2190 !(adapter->flags & USING_MSI))
2191 t.polling = 0;
2192
2193 for (i = 0; i < SGE_QSETS; i++) {
2194 q = &adapter->params.sge.
2195 qset[i];
2196 q->polling = t.polling;
2197 }
2198 }
2199 }
04ecb072
DLR
2200 if (t.lro >= 0)
2201 set_qset_lro(dev, t.qset_idx, t.lro);
2202
4d22de3e
DLR
2203 break;
2204 }
2205 case CHELSIO_GET_QSET_PARAMS:{
2206 struct qset_params *q;
2207 struct ch_qset_params t;
8c263761
DLR
2208 int q1 = pi->first_qset;
2209 int nqsets = pi->nqsets;
2210 int i;
4d22de3e
DLR
2211
2212 if (copy_from_user(&t, useraddr, sizeof(t)))
2213 return -EFAULT;
8c263761
DLR
2214
2215 /* Display qsets for all ports when offload enabled */
2216 if (test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map)) {
2217 q1 = 0;
2218 for_each_port(adapter, i) {
2219 pi = adap2pinfo(adapter, i);
2220 nqsets = pi->first_qset + pi->nqsets;
2221 }
2222 }
2223
2224 if (t.qset_idx >= nqsets)
4d22de3e
DLR
2225 return -EINVAL;
2226
8c263761 2227 q = &adapter->params.sge.qset[q1 + t.qset_idx];
4d22de3e
DLR
2228 t.rspq_size = q->rspq_size;
2229 t.txq_size[0] = q->txq_size[0];
2230 t.txq_size[1] = q->txq_size[1];
2231 t.txq_size[2] = q->txq_size[2];
2232 t.fl_size[0] = q->fl_size;
2233 t.fl_size[1] = q->jumbo_size;
2234 t.polling = q->polling;
b47385bd 2235 t.lro = q->lro;
4d22de3e
DLR
2236 t.intr_lat = q->coalesce_usecs;
2237 t.cong_thres = q->cong_thres;
8c263761
DLR
2238 t.qnum = q1;
2239
2240 if (adapter->flags & USING_MSIX)
2241 t.vector = adapter->msix_info[q1 + t.qset_idx + 1].vec;
2242 else
2243 t.vector = adapter->pdev->irq;
4d22de3e
DLR
2244
2245 if (copy_to_user(useraddr, &t, sizeof(t)))
2246 return -EFAULT;
2247 break;
2248 }
2249 case CHELSIO_SET_QSET_NUM:{
2250 struct ch_reg edata;
4d22de3e
DLR
2251 unsigned int i, first_qset = 0, other_qsets = 0;
2252
2253 if (!capable(CAP_NET_ADMIN))
2254 return -EPERM;
2255 if (adapter->flags & FULL_INIT_DONE)
2256 return -EBUSY;
2257 if (copy_from_user(&edata, useraddr, sizeof(edata)))
2258 return -EFAULT;
2259 if (edata.val < 1 ||
2260 (edata.val > 1 && !(adapter->flags & USING_MSIX)))
2261 return -EINVAL;
2262
2263 for_each_port(adapter, i)
2264 if (adapter->port[i] && adapter->port[i] != dev)
2265 other_qsets += adap2pinfo(adapter, i)->nqsets;
2266
2267 if (edata.val + other_qsets > SGE_QSETS)
2268 return -EINVAL;
2269
2270 pi->nqsets = edata.val;
2271
2272 for_each_port(adapter, i)
2273 if (adapter->port[i]) {
2274 pi = adap2pinfo(adapter, i);
2275 pi->first_qset = first_qset;
2276 first_qset += pi->nqsets;
2277 }
2278 break;
2279 }
2280 case CHELSIO_GET_QSET_NUM:{
2281 struct ch_reg edata;
4d22de3e
DLR
2282
2283 edata.cmd = CHELSIO_GET_QSET_NUM;
2284 edata.val = pi->nqsets;
2285 if (copy_to_user(useraddr, &edata, sizeof(edata)))
2286 return -EFAULT;
2287 break;
2288 }
2289 case CHELSIO_LOAD_FW:{
2290 u8 *fw_data;
2291 struct ch_mem_range t;
2292
1b3aa7af 2293 if (!capable(CAP_SYS_RAWIO))
4d22de3e
DLR
2294 return -EPERM;
2295 if (copy_from_user(&t, useraddr, sizeof(t)))
2296 return -EFAULT;
1b3aa7af 2297 /* Check t.len sanity ? */
4d22de3e
DLR
2298 fw_data = kmalloc(t.len, GFP_KERNEL);
2299 if (!fw_data)
2300 return -ENOMEM;
2301
2302 if (copy_from_user
2303 (fw_data, useraddr + sizeof(t), t.len)) {
2304 kfree(fw_data);
2305 return -EFAULT;
2306 }
2307
2308 ret = t3_load_fw(adapter, fw_data, t.len);
2309 kfree(fw_data);
2310 if (ret)
2311 return ret;
2312 break;
2313 }
2314 case CHELSIO_SETMTUTAB:{
2315 struct ch_mtus m;
2316 int i;
2317
2318 if (!is_offload(adapter))
2319 return -EOPNOTSUPP;
2320 if (!capable(CAP_NET_ADMIN))
2321 return -EPERM;
2322 if (offload_running(adapter))
2323 return -EBUSY;
2324 if (copy_from_user(&m, useraddr, sizeof(m)))
2325 return -EFAULT;
2326 if (m.nmtus != NMTUS)
2327 return -EINVAL;
2328 if (m.mtus[0] < 81) /* accommodate SACK */
2329 return -EINVAL;
2330
2331 /* MTUs must be in ascending order */
2332 for (i = 1; i < NMTUS; ++i)
2333 if (m.mtus[i] < m.mtus[i - 1])
2334 return -EINVAL;
2335
2336 memcpy(adapter->params.mtus, m.mtus,
2337 sizeof(adapter->params.mtus));
2338 break;
2339 }
2340 case CHELSIO_GET_PM:{
2341 struct tp_params *p = &adapter->params.tp;
2342 struct ch_pm m = {.cmd = CHELSIO_GET_PM };
2343
2344 if (!is_offload(adapter))
2345 return -EOPNOTSUPP;
2346 m.tx_pg_sz = p->tx_pg_size;
2347 m.tx_num_pg = p->tx_num_pgs;
2348 m.rx_pg_sz = p->rx_pg_size;
2349 m.rx_num_pg = p->rx_num_pgs;
2350 m.pm_total = p->pmtx_size + p->chan_rx_size * p->nchan;
2351 if (copy_to_user(useraddr, &m, sizeof(m)))
2352 return -EFAULT;
2353 break;
2354 }
2355 case CHELSIO_SET_PM:{
2356 struct ch_pm m;
2357 struct tp_params *p = &adapter->params.tp;
2358
2359 if (!is_offload(adapter))
2360 return -EOPNOTSUPP;
2361 if (!capable(CAP_NET_ADMIN))
2362 return -EPERM;
2363 if (adapter->flags & FULL_INIT_DONE)
2364 return -EBUSY;
2365 if (copy_from_user(&m, useraddr, sizeof(m)))
2366 return -EFAULT;
d9da466a 2367 if (!is_power_of_2(m.rx_pg_sz) ||
2368 !is_power_of_2(m.tx_pg_sz))
4d22de3e
DLR
2369 return -EINVAL; /* not power of 2 */
2370 if (!(m.rx_pg_sz & 0x14000))
2371 return -EINVAL; /* not 16KB or 64KB */
2372 if (!(m.tx_pg_sz & 0x1554000))
2373 return -EINVAL;
2374 if (m.tx_num_pg == -1)
2375 m.tx_num_pg = p->tx_num_pgs;
2376 if (m.rx_num_pg == -1)
2377 m.rx_num_pg = p->rx_num_pgs;
2378 if (m.tx_num_pg % 24 || m.rx_num_pg % 24)
2379 return -EINVAL;
2380 if (m.rx_num_pg * m.rx_pg_sz > p->chan_rx_size ||
2381 m.tx_num_pg * m.tx_pg_sz > p->chan_tx_size)
2382 return -EINVAL;
2383 p->rx_pg_size = m.rx_pg_sz;
2384 p->tx_pg_size = m.tx_pg_sz;
2385 p->rx_num_pgs = m.rx_num_pg;
2386 p->tx_num_pgs = m.tx_num_pg;
2387 break;
2388 }
2389 case CHELSIO_GET_MEM:{
2390 struct ch_mem_range t;
2391 struct mc7 *mem;
2392 u64 buf[32];
2393
2394 if (!is_offload(adapter))
2395 return -EOPNOTSUPP;
2396 if (!(adapter->flags & FULL_INIT_DONE))
2397 return -EIO; /* need the memory controllers */
2398 if (copy_from_user(&t, useraddr, sizeof(t)))
2399 return -EFAULT;
2400 if ((t.addr & 7) || (t.len & 7))
2401 return -EINVAL;
2402 if (t.mem_id == MEM_CM)
2403 mem = &adapter->cm;
2404 else if (t.mem_id == MEM_PMRX)
2405 mem = &adapter->pmrx;
2406 else if (t.mem_id == MEM_PMTX)
2407 mem = &adapter->pmtx;
2408 else
2409 return -EINVAL;
2410
2411 /*
1825494a
DLR
2412 * Version scheme:
2413 * bits 0..9: chip version
2414 * bits 10..15: chip revision
2415 */
4d22de3e
DLR
2416 t.version = 3 | (adapter->params.rev << 10);
2417 if (copy_to_user(useraddr, &t, sizeof(t)))
2418 return -EFAULT;
2419
2420 /*
2421 * Read 256 bytes at a time as len can be large and we don't
2422 * want to use huge intermediate buffers.
2423 */
2424 useraddr += sizeof(t); /* advance to start of buffer */
2425 while (t.len) {
2426 unsigned int chunk =
2427 min_t(unsigned int, t.len, sizeof(buf));
2428
2429 ret =
2430 t3_mc7_bd_read(mem, t.addr / 8, chunk / 8,
2431 buf);
2432 if (ret)
2433 return ret;
2434 if (copy_to_user(useraddr, buf, chunk))
2435 return -EFAULT;
2436 useraddr += chunk;
2437 t.addr += chunk;
2438 t.len -= chunk;
2439 }
2440 break;
2441 }
2442 case CHELSIO_SET_TRACE_FILTER:{
2443 struct ch_trace t;
2444 const struct trace_params *tp;
2445
2446 if (!capable(CAP_NET_ADMIN))
2447 return -EPERM;
2448 if (!offload_running(adapter))
2449 return -EAGAIN;
2450 if (copy_from_user(&t, useraddr, sizeof(t)))
2451 return -EFAULT;
2452
2453 tp = (const struct trace_params *)&t.sip;
2454 if (t.config_tx)
2455 t3_config_trace_filter(adapter, tp, 0,
2456 t.invert_match,
2457 t.trace_tx);
2458 if (t.config_rx)
2459 t3_config_trace_filter(adapter, tp, 1,
2460 t.invert_match,
2461 t.trace_rx);
2462 break;
2463 }
4d22de3e
DLR
2464 default:
2465 return -EOPNOTSUPP;
2466 }
2467 return 0;
2468}
2469
2470static int cxgb_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
2471{
4d22de3e 2472 struct mii_ioctl_data *data = if_mii(req);
5fbf816f
DLR
2473 struct port_info *pi = netdev_priv(dev);
2474 struct adapter *adapter = pi->adapter;
4d22de3e
DLR
2475
2476 switch (cmd) {
0f07c4ee
BH
2477 case SIOCGMIIREG:
2478 case SIOCSMIIREG:
2479 /* Convert phy_id from older PRTAD/DEVAD format */
2480 if (is_10G(adapter) &&
2481 !mdio_phy_id_is_c45(data->phy_id) &&
2482 (data->phy_id & 0x1f00) &&
2483 !(data->phy_id & 0xe0e0))
2484 data->phy_id = mdio_phy_id_c45(data->phy_id >> 8,
2485 data->phy_id & 0x1f);
4d22de3e 2486 /* FALLTHRU */
0f07c4ee
BH
2487 case SIOCGMIIPHY:
2488 return mdio_mii_ioctl(&pi->phy.mdio, data, cmd);
4d22de3e
DLR
2489 case SIOCCHIOCTL:
2490 return cxgb_extension_ioctl(dev, req->ifr_data);
2491 default:
2492 return -EOPNOTSUPP;
2493 }
4d22de3e
DLR
2494}
2495
2496static int cxgb_change_mtu(struct net_device *dev, int new_mtu)
2497{
4d22de3e 2498 struct port_info *pi = netdev_priv(dev);
5fbf816f
DLR
2499 struct adapter *adapter = pi->adapter;
2500 int ret;
4d22de3e
DLR
2501
2502 if (new_mtu < 81) /* accommodate SACK */
2503 return -EINVAL;
2504 if ((ret = t3_mac_set_mtu(&pi->mac, new_mtu)))
2505 return ret;
2506 dev->mtu = new_mtu;
2507 init_port_mtus(adapter);
2508 if (adapter->params.rev == 0 && offload_running(adapter))
2509 t3_load_mtus(adapter, adapter->params.mtus,
2510 adapter->params.a_wnd, adapter->params.b_wnd,
2511 adapter->port[0]->mtu);
2512 return 0;
2513}
2514
2515static int cxgb_set_mac_addr(struct net_device *dev, void *p)
2516{
4d22de3e 2517 struct port_info *pi = netdev_priv(dev);
5fbf816f 2518 struct adapter *adapter = pi->adapter;
4d22de3e
DLR
2519 struct sockaddr *addr = p;
2520
2521 if (!is_valid_ether_addr(addr->sa_data))
2522 return -EINVAL;
2523
2524 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
f14d42f3 2525 t3_mac_set_address(&pi->mac, LAN_MAC_IDX, dev->dev_addr);
4d22de3e
DLR
2526 if (offload_running(adapter))
2527 write_smt_entry(adapter, pi->port_id);
2528 return 0;
2529}
2530
2531/**
2532 * t3_synchronize_rx - wait for current Rx processing on a port to complete
2533 * @adap: the adapter
2534 * @p: the port
2535 *
2536 * Ensures that current Rx processing on any of the queues associated with
2537 * the given port completes before returning. We do this by acquiring and
2538 * releasing the locks of the response queues associated with the port.
2539 */
2540static void t3_synchronize_rx(struct adapter *adap, const struct port_info *p)
2541{
2542 int i;
2543
8c263761
DLR
2544 for (i = p->first_qset; i < p->first_qset + p->nqsets; i++) {
2545 struct sge_rspq *q = &adap->sge.qs[i].rspq;
4d22de3e
DLR
2546
2547 spin_lock_irq(&q->lock);
2548 spin_unlock_irq(&q->lock);
2549 }
2550}
2551
2552static void vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
2553{
4d22de3e 2554 struct port_info *pi = netdev_priv(dev);
5fbf816f 2555 struct adapter *adapter = pi->adapter;
4d22de3e
DLR
2556
2557 pi->vlan_grp = grp;
2558 if (adapter->params.rev > 0)
2559 t3_set_vlan_accel(adapter, 1 << pi->port_id, grp != NULL);
2560 else {
2561 /* single control for all ports */
2562 unsigned int i, have_vlans = 0;
2563 for_each_port(adapter, i)
2564 have_vlans |= adap2pinfo(adapter, i)->vlan_grp != NULL;
2565
2566 t3_set_vlan_accel(adapter, 1, have_vlans);
2567 }
2568 t3_synchronize_rx(adapter, pi);
2569}
2570
4d22de3e
DLR
2571#ifdef CONFIG_NET_POLL_CONTROLLER
2572static void cxgb_netpoll(struct net_device *dev)
2573{
890de332 2574 struct port_info *pi = netdev_priv(dev);
5fbf816f 2575 struct adapter *adapter = pi->adapter;
890de332 2576 int qidx;
4d22de3e 2577
890de332
DLR
2578 for (qidx = pi->first_qset; qidx < pi->first_qset + pi->nqsets; qidx++) {
2579 struct sge_qset *qs = &adapter->sge.qs[qidx];
2580 void *source;
2eab17ab 2581
890de332
DLR
2582 if (adapter->flags & USING_MSIX)
2583 source = qs;
2584 else
2585 source = adapter;
2586
2587 t3_intr_handler(adapter, qs->rspq.polling) (0, source);
2588 }
4d22de3e
DLR
2589}
2590#endif
2591
2592/*
2593 * Periodic accumulation of MAC statistics.
2594 */
2595static void mac_stats_update(struct adapter *adapter)
2596{
2597 int i;
2598
2599 for_each_port(adapter, i) {
2600 struct net_device *dev = adapter->port[i];
2601 struct port_info *p = netdev_priv(dev);
2602
2603 if (netif_running(dev)) {
2604 spin_lock(&adapter->stats_lock);
2605 t3_mac_update_stats(&p->mac);
2606 spin_unlock(&adapter->stats_lock);
2607 }
2608 }
2609}
2610
2611static void check_link_status(struct adapter *adapter)
2612{
2613 int i;
2614
2615 for_each_port(adapter, i) {
2616 struct net_device *dev = adapter->port[i];
2617 struct port_info *p = netdev_priv(dev);
c22c8149 2618 int link_fault;
4d22de3e 2619
bf792094 2620 spin_lock_irq(&adapter->work_lock);
c22c8149
DLR
2621 link_fault = p->link_fault;
2622 spin_unlock_irq(&adapter->work_lock);
2623
2624 if (link_fault) {
3851c66c 2625 t3_link_fault(adapter, i);
bf792094
DLR
2626 continue;
2627 }
bf792094
DLR
2628
2629 if (!(p->phy.caps & SUPPORTED_IRQ) && netif_running(dev)) {
2630 t3_xgm_intr_disable(adapter, i);
2631 t3_read_reg(adapter, A_XGM_INT_STATUS + p->mac.offset);
2632
4d22de3e 2633 t3_link_changed(adapter, i);
bf792094
DLR
2634 t3_xgm_intr_enable(adapter, i);
2635 }
4d22de3e
DLR
2636 }
2637}
2638
fc90664e
DLR
2639static void check_t3b2_mac(struct adapter *adapter)
2640{
2641 int i;
2642
f2d961c9
DLR
2643 if (!rtnl_trylock()) /* synchronize with ifdown */
2644 return;
2645
fc90664e
DLR
2646 for_each_port(adapter, i) {
2647 struct net_device *dev = adapter->port[i];
2648 struct port_info *p = netdev_priv(dev);
2649 int status;
2650
2651 if (!netif_running(dev))
2652 continue;
2653
2654 status = 0;
6d6dabac 2655 if (netif_running(dev) && netif_carrier_ok(dev))
fc90664e
DLR
2656 status = t3b2_mac_watchdog_task(&p->mac);
2657 if (status == 1)
2658 p->mac.stats.num_toggled++;
2659 else if (status == 2) {
2660 struct cmac *mac = &p->mac;
2661
2662 t3_mac_set_mtu(mac, dev->mtu);
f14d42f3 2663 t3_mac_set_address(mac, LAN_MAC_IDX, dev->dev_addr);
fc90664e
DLR
2664 cxgb_set_rxmode(dev);
2665 t3_link_start(&p->phy, mac, &p->link_config);
2666 t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
2667 t3_port_intr_enable(adapter, p->port_id);
2668 p->mac.stats.num_resets++;
2669 }
2670 }
2671 rtnl_unlock();
2672}
2673
2674
4d22de3e
DLR
2675static void t3_adap_check_task(struct work_struct *work)
2676{
2677 struct adapter *adapter = container_of(work, struct adapter,
2678 adap_check_task.work);
2679 const struct adapter_params *p = &adapter->params;
fc882196
DLR
2680 int port;
2681 unsigned int v, status, reset;
4d22de3e
DLR
2682
2683 adapter->check_task_cnt++;
2684
3851c66c 2685 check_link_status(adapter);
4d22de3e
DLR
2686
2687 /* Accumulate MAC stats if needed */
2688 if (!p->linkpoll_period ||
2689 (adapter->check_task_cnt * p->linkpoll_period) / 10 >=
2690 p->stats_update_period) {
2691 mac_stats_update(adapter);
2692 adapter->check_task_cnt = 0;
2693 }
2694
fc90664e
DLR
2695 if (p->rev == T3_REV_B2)
2696 check_t3b2_mac(adapter);
2697
fc882196
DLR
2698 /*
2699 * Scan the XGMAC's to check for various conditions which we want to
2700 * monitor in a periodic polling manner rather than via an interrupt
2701 * condition. This is used for conditions which would otherwise flood
2702 * the system with interrupts and we only really need to know that the
2703 * conditions are "happening" ... For each condition we count the
2704 * detection of the condition and reset it for the next polling loop.
2705 */
2706 for_each_port(adapter, port) {
2707 struct cmac *mac = &adap2pinfo(adapter, port)->mac;
2708 u32 cause;
2709
2710 cause = t3_read_reg(adapter, A_XGM_INT_CAUSE + mac->offset);
2711 reset = 0;
2712 if (cause & F_RXFIFO_OVERFLOW) {
2713 mac->stats.rx_fifo_ovfl++;
2714 reset |= F_RXFIFO_OVERFLOW;
2715 }
2716
2717 t3_write_reg(adapter, A_XGM_INT_CAUSE + mac->offset, reset);
2718 }
2719
2720 /*
2721 * We do the same as above for FL_EMPTY interrupts.
2722 */
2723 status = t3_read_reg(adapter, A_SG_INT_CAUSE);
2724 reset = 0;
2725
2726 if (status & F_FLEMPTY) {
2727 struct sge_qset *qs = &adapter->sge.qs[0];
2728 int i = 0;
2729
2730 reset |= F_FLEMPTY;
2731
2732 v = (t3_read_reg(adapter, A_SG_RSPQ_FL_STATUS) >> S_FL0EMPTY) &
2733 0xffff;
2734
2735 while (v) {
2736 qs->fl[i].empty += (v & 1);
2737 if (i)
2738 qs++;
2739 i ^= 1;
2740 v >>= 1;
2741 }
2742 }
2743
2744 t3_write_reg(adapter, A_SG_INT_CAUSE, reset);
2745
4d22de3e 2746 /* Schedule the next check update if any port is active. */
20d3fc11 2747 spin_lock_irq(&adapter->work_lock);
4d22de3e
DLR
2748 if (adapter->open_device_map & PORT_MASK)
2749 schedule_chk_task(adapter);
20d3fc11 2750 spin_unlock_irq(&adapter->work_lock);
4d22de3e
DLR
2751}
2752
2753/*
2754 * Processes external (PHY) interrupts in process context.
2755 */
2756static void ext_intr_task(struct work_struct *work)
2757{
2758 struct adapter *adapter = container_of(work, struct adapter,
2759 ext_intr_handler_task);
bf792094
DLR
2760 int i;
2761
2762 /* Disable link fault interrupts */
2763 for_each_port(adapter, i) {
2764 struct net_device *dev = adapter->port[i];
2765 struct port_info *p = netdev_priv(dev);
2766
2767 t3_xgm_intr_disable(adapter, i);
2768 t3_read_reg(adapter, A_XGM_INT_STATUS + p->mac.offset);
2769 }
4d22de3e 2770
bf792094 2771 /* Re-enable link fault interrupts */
4d22de3e
DLR
2772 t3_phy_intr_handler(adapter);
2773
bf792094
DLR
2774 for_each_port(adapter, i)
2775 t3_xgm_intr_enable(adapter, i);
2776
4d22de3e
DLR
2777 /* Now reenable external interrupts */
2778 spin_lock_irq(&adapter->work_lock);
2779 if (adapter->slow_intr_mask) {
2780 adapter->slow_intr_mask |= F_T3DBG;
2781 t3_write_reg(adapter, A_PL_INT_CAUSE0, F_T3DBG);
2782 t3_write_reg(adapter, A_PL_INT_ENABLE0,
2783 adapter->slow_intr_mask);
2784 }
2785 spin_unlock_irq(&adapter->work_lock);
2786}
2787
2788/*
2789 * Interrupt-context handler for external (PHY) interrupts.
2790 */
2791void t3_os_ext_intr_handler(struct adapter *adapter)
2792{
2793 /*
2794 * Schedule a task to handle external interrupts as they may be slow
2795 * and we use a mutex to protect MDIO registers. We disable PHY
2796 * interrupts in the meantime and let the task reenable them when
2797 * it's done.
2798 */
2799 spin_lock(&adapter->work_lock);
2800 if (adapter->slow_intr_mask) {
2801 adapter->slow_intr_mask &= ~F_T3DBG;
2802 t3_write_reg(adapter, A_PL_INT_ENABLE0,
2803 adapter->slow_intr_mask);
2804 queue_work(cxgb3_wq, &adapter->ext_intr_handler_task);
2805 }
2806 spin_unlock(&adapter->work_lock);
2807}
2808
bf792094
DLR
2809void t3_os_link_fault_handler(struct adapter *adapter, int port_id)
2810{
2811 struct net_device *netdev = adapter->port[port_id];
2812 struct port_info *pi = netdev_priv(netdev);
2813
2814 spin_lock(&adapter->work_lock);
2815 pi->link_fault = 1;
bf792094
DLR
2816 spin_unlock(&adapter->work_lock);
2817}
2818
20d3fc11
DLR
2819static int t3_adapter_error(struct adapter *adapter, int reset)
2820{
2821 int i, ret = 0;
2822
cb0bc205
DLR
2823 if (is_offload(adapter) &&
2824 test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map)) {
fa0d4c11 2825 cxgb3_event_notify(&adapter->tdev, OFFLOAD_STATUS_DOWN, 0);
cb0bc205
DLR
2826 offload_close(&adapter->tdev);
2827 }
2828
20d3fc11
DLR
2829 /* Stop all ports */
2830 for_each_port(adapter, i) {
2831 struct net_device *netdev = adapter->port[i];
2832
2833 if (netif_running(netdev))
2834 cxgb_close(netdev);
2835 }
2836
20d3fc11
DLR
2837 /* Stop SGE timers */
2838 t3_stop_sge_timers(adapter);
2839
2840 adapter->flags &= ~FULL_INIT_DONE;
2841
2842 if (reset)
2843 ret = t3_reset_adapter(adapter);
2844
2845 pci_disable_device(adapter->pdev);
2846
2847 return ret;
2848}
2849
2850static int t3_reenable_adapter(struct adapter *adapter)
2851{
2852 if (pci_enable_device(adapter->pdev)) {
2853 dev_err(&adapter->pdev->dev,
2854 "Cannot re-enable PCI device after reset.\n");
2855 goto err;
2856 }
2857 pci_set_master(adapter->pdev);
2858 pci_restore_state(adapter->pdev);
ccdddf50 2859 pci_save_state(adapter->pdev);
20d3fc11
DLR
2860
2861 /* Free sge resources */
2862 t3_free_sge_resources(adapter);
2863
2864 if (t3_replay_prep_adapter(adapter))
2865 goto err;
2866
2867 return 0;
2868err:
2869 return -1;
2870}
2871
2872static void t3_resume_ports(struct adapter *adapter)
2873{
2874 int i;
2875
2876 /* Restart the ports */
2877 for_each_port(adapter, i) {
2878 struct net_device *netdev = adapter->port[i];
2879
2880 if (netif_running(netdev)) {
2881 if (cxgb_open(netdev)) {
2882 dev_err(&adapter->pdev->dev,
2883 "can't bring device back up"
2884 " after reset\n");
2885 continue;
2886 }
2887 }
2888 }
cb0bc205
DLR
2889
2890 if (is_offload(adapter) && !ofld_disable)
fa0d4c11 2891 cxgb3_event_notify(&adapter->tdev, OFFLOAD_STATUS_UP, 0);
20d3fc11
DLR
2892}
2893
2894/*
2895 * processes a fatal error.
2896 * Bring the ports down, reset the chip, bring the ports back up.
2897 */
2898static void fatal_error_task(struct work_struct *work)
2899{
2900 struct adapter *adapter = container_of(work, struct adapter,
2901 fatal_error_handler_task);
2902 int err = 0;
2903
2904 rtnl_lock();
2905 err = t3_adapter_error(adapter, 1);
2906 if (!err)
2907 err = t3_reenable_adapter(adapter);
2908 if (!err)
2909 t3_resume_ports(adapter);
2910
2911 CH_ALERT(adapter, "adapter reset %s\n", err ? "failed" : "succeeded");
2912 rtnl_unlock();
2913}
2914
4d22de3e
DLR
2915void t3_fatal_err(struct adapter *adapter)
2916{
2917 unsigned int fw_status[4];
2918
2919 if (adapter->flags & FULL_INIT_DONE) {
2920 t3_sge_stop(adapter);
c64c2eae
DLR
2921 t3_write_reg(adapter, A_XGM_TX_CTRL, 0);
2922 t3_write_reg(adapter, A_XGM_RX_CTRL, 0);
2923 t3_write_reg(adapter, XGM_REG(A_XGM_TX_CTRL, 1), 0);
2924 t3_write_reg(adapter, XGM_REG(A_XGM_RX_CTRL, 1), 0);
20d3fc11
DLR
2925
2926 spin_lock(&adapter->work_lock);
4d22de3e 2927 t3_intr_disable(adapter);
20d3fc11
DLR
2928 queue_work(cxgb3_wq, &adapter->fatal_error_handler_task);
2929 spin_unlock(&adapter->work_lock);
4d22de3e
DLR
2930 }
2931 CH_ALERT(adapter, "encountered fatal error, operation suspended\n");
2932 if (!t3_cim_ctl_blk_read(adapter, 0xa0, 4, fw_status))
2933 CH_ALERT(adapter, "FW status: 0x%x, 0x%x, 0x%x, 0x%x\n",
2934 fw_status[0], fw_status[1],
2935 fw_status[2], fw_status[3]);
4d22de3e
DLR
2936}
2937
91a6b50c
DLR
2938/**
2939 * t3_io_error_detected - called when PCI error is detected
2940 * @pdev: Pointer to PCI device
2941 * @state: The current pci connection state
2942 *
2943 * This function is called after a PCI bus error affecting
2944 * this device has been detected.
2945 */
2946static pci_ers_result_t t3_io_error_detected(struct pci_dev *pdev,
2947 pci_channel_state_t state)
2948{
bc4b6b52 2949 struct adapter *adapter = pci_get_drvdata(pdev);
20d3fc11 2950 int ret;
91a6b50c 2951
e8d19370
DLR
2952 if (state == pci_channel_io_perm_failure)
2953 return PCI_ERS_RESULT_DISCONNECT;
2954
20d3fc11 2955 ret = t3_adapter_error(adapter, 0);
91a6b50c 2956
48c4b6db 2957 /* Request a slot reset. */
91a6b50c
DLR
2958 return PCI_ERS_RESULT_NEED_RESET;
2959}
2960
2961/**
2962 * t3_io_slot_reset - called after the pci bus has been reset.
2963 * @pdev: Pointer to PCI device
2964 *
2965 * Restart the card from scratch, as if from a cold-boot.
2966 */
2967static pci_ers_result_t t3_io_slot_reset(struct pci_dev *pdev)
2968{
bc4b6b52 2969 struct adapter *adapter = pci_get_drvdata(pdev);
91a6b50c 2970
20d3fc11
DLR
2971 if (!t3_reenable_adapter(adapter))
2972 return PCI_ERS_RESULT_RECOVERED;
91a6b50c 2973
48c4b6db 2974 return PCI_ERS_RESULT_DISCONNECT;
91a6b50c
DLR
2975}
2976
2977/**
2978 * t3_io_resume - called when traffic can start flowing again.
2979 * @pdev: Pointer to PCI device
2980 *
2981 * This callback is called when the error recovery driver tells us that
2982 * its OK to resume normal operation.
2983 */
2984static void t3_io_resume(struct pci_dev *pdev)
2985{
bc4b6b52 2986 struct adapter *adapter = pci_get_drvdata(pdev);
91a6b50c 2987
68f40c10
DLR
2988 CH_ALERT(adapter, "adapter recovering, PEX ERR 0x%x\n",
2989 t3_read_reg(adapter, A_PCIE_PEX_ERR));
2990
20d3fc11 2991 t3_resume_ports(adapter);
91a6b50c
DLR
2992}
2993
2994static struct pci_error_handlers t3_err_handler = {
2995 .error_detected = t3_io_error_detected,
2996 .slot_reset = t3_io_slot_reset,
2997 .resume = t3_io_resume,
2998};
2999
8c263761
DLR
3000/*
3001 * Set the number of qsets based on the number of CPUs and the number of ports,
3002 * not to exceed the number of available qsets, assuming there are enough qsets
3003 * per port in HW.
3004 */
3005static void set_nqsets(struct adapter *adap)
3006{
3007 int i, j = 0;
3008 int num_cpus = num_online_cpus();
3009 int hwports = adap->params.nports;
5cda9364 3010 int nqsets = adap->msix_nvectors - 1;
8c263761 3011
f9ee3882 3012 if (adap->params.rev > 0 && adap->flags & USING_MSIX) {
8c263761
DLR
3013 if (hwports == 2 &&
3014 (hwports * nqsets > SGE_QSETS ||
3015 num_cpus >= nqsets / hwports))
3016 nqsets /= hwports;
3017 if (nqsets > num_cpus)
3018 nqsets = num_cpus;
3019 if (nqsets < 1 || hwports == 4)
3020 nqsets = 1;
3021 } else
3022 nqsets = 1;
3023
3024 for_each_port(adap, i) {
3025 struct port_info *pi = adap2pinfo(adap, i);
3026
3027 pi->first_qset = j;
3028 pi->nqsets = nqsets;
3029 j = pi->first_qset + nqsets;
3030
3031 dev_info(&adap->pdev->dev,
3032 "Port %d using %d queue sets.\n", i, nqsets);
3033 }
3034}
3035
4d22de3e
DLR
3036static int __devinit cxgb_enable_msix(struct adapter *adap)
3037{
3038 struct msix_entry entries[SGE_QSETS + 1];
5cda9364 3039 int vectors;
4d22de3e
DLR
3040 int i, err;
3041
5cda9364
DLR
3042 vectors = ARRAY_SIZE(entries);
3043 for (i = 0; i < vectors; ++i)
4d22de3e
DLR
3044 entries[i].entry = i;
3045
5cda9364
DLR
3046 while ((err = pci_enable_msix(adap->pdev, entries, vectors)) > 0)
3047 vectors = err;
3048
2c2f409f
DLR
3049 if (err < 0)
3050 pci_disable_msix(adap->pdev);
3051
3052 if (!err && vectors < (adap->params.nports + 1)) {
3053 pci_disable_msix(adap->pdev);
5cda9364 3054 err = -1;
2c2f409f 3055 }
5cda9364 3056
4d22de3e 3057 if (!err) {
5cda9364 3058 for (i = 0; i < vectors; ++i)
4d22de3e 3059 adap->msix_info[i].vec = entries[i].vector;
5cda9364
DLR
3060 adap->msix_nvectors = vectors;
3061 }
3062
4d22de3e
DLR
3063 return err;
3064}
3065
3066static void __devinit print_port_info(struct adapter *adap,
3067 const struct adapter_info *ai)
3068{
3069 static const char *pci_variant[] = {
3070 "PCI", "PCI-X", "PCI-X ECC", "PCI-X 266", "PCI Express"
3071 };
3072
3073 int i;
3074 char buf[80];
3075
3076 if (is_pcie(adap))
3077 snprintf(buf, sizeof(buf), "%s x%d",
3078 pci_variant[adap->params.pci.variant],
3079 adap->params.pci.width);
3080 else
3081 snprintf(buf, sizeof(buf), "%s %dMHz/%d-bit",
3082 pci_variant[adap->params.pci.variant],
3083 adap->params.pci.speed, adap->params.pci.width);
3084
3085 for_each_port(adap, i) {
3086 struct net_device *dev = adap->port[i];
3087 const struct port_info *pi = netdev_priv(dev);
3088
3089 if (!test_bit(i, &adap->registered_device_map))
3090 continue;
8ac3ba68 3091 printk(KERN_INFO "%s: %s %s %sNIC (rev %d) %s%s\n",
04497982 3092 dev->name, ai->desc, pi->phy.desc,
8ac3ba68 3093 is_offload(adap) ? "R" : "", adap->params.rev, buf,
4d22de3e
DLR
3094 (adap->flags & USING_MSIX) ? " MSI-X" :
3095 (adap->flags & USING_MSI) ? " MSI" : "");
3096 if (adap->name == dev->name && adap->params.vpd.mclk)
167cdf5f
DLR
3097 printk(KERN_INFO
3098 "%s: %uMB CM, %uMB PMTX, %uMB PMRX, S/N: %s\n",
4d22de3e
DLR
3099 adap->name, t3_mc7_size(&adap->cm) >> 20,
3100 t3_mc7_size(&adap->pmtx) >> 20,
167cdf5f
DLR
3101 t3_mc7_size(&adap->pmrx) >> 20,
3102 adap->params.vpd.sn);
4d22de3e
DLR
3103 }
3104}
3105
dd752696
SH
3106static const struct net_device_ops cxgb_netdev_ops = {
3107 .ndo_open = cxgb_open,
3108 .ndo_stop = cxgb_close,
43a944f3 3109 .ndo_start_xmit = t3_eth_xmit,
dd752696
SH
3110 .ndo_get_stats = cxgb_get_stats,
3111 .ndo_validate_addr = eth_validate_addr,
3112 .ndo_set_multicast_list = cxgb_set_rxmode,
3113 .ndo_do_ioctl = cxgb_ioctl,
3114 .ndo_change_mtu = cxgb_change_mtu,
3115 .ndo_set_mac_address = cxgb_set_mac_addr,
3116 .ndo_vlan_rx_register = vlan_rx_register,
3117#ifdef CONFIG_NET_POLL_CONTROLLER
3118 .ndo_poll_controller = cxgb_netpoll,
3119#endif
3120};
3121
f14d42f3
KX
3122static void __devinit cxgb3_init_iscsi_mac(struct net_device *dev)
3123{
3124 struct port_info *pi = netdev_priv(dev);
3125
3126 memcpy(pi->iscsic.mac_addr, dev->dev_addr, ETH_ALEN);
3127 pi->iscsic.mac_addr[3] |= 0x80;
3128}
3129
4d22de3e
DLR
3130static int __devinit init_one(struct pci_dev *pdev,
3131 const struct pci_device_id *ent)
3132{
3133 static int version_printed;
3134
3135 int i, err, pci_using_dac = 0;
68f40c10 3136 resource_size_t mmio_start, mmio_len;
4d22de3e
DLR
3137 const struct adapter_info *ai;
3138 struct adapter *adapter = NULL;
3139 struct port_info *pi;
3140
3141 if (!version_printed) {
3142 printk(KERN_INFO "%s - version %s\n", DRV_DESC, DRV_VERSION);
3143 ++version_printed;
3144 }
3145
3146 if (!cxgb3_wq) {
3147 cxgb3_wq = create_singlethread_workqueue(DRV_NAME);
3148 if (!cxgb3_wq) {
3149 printk(KERN_ERR DRV_NAME
3150 ": cannot initialize work queue\n");
3151 return -ENOMEM;
3152 }
3153 }
3154
3155 err = pci_request_regions(pdev, DRV_NAME);
3156 if (err) {
3157 /* Just info, some other driver may have claimed the device. */
3158 dev_info(&pdev->dev, "cannot obtain PCI resources\n");
3159 return err;
3160 }
3161
3162 err = pci_enable_device(pdev);
3163 if (err) {
3164 dev_err(&pdev->dev, "cannot enable PCI device\n");
3165 goto out_release_regions;
3166 }
3167
6a35528a 3168 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
4d22de3e 3169 pci_using_dac = 1;
6a35528a 3170 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
4d22de3e
DLR
3171 if (err) {
3172 dev_err(&pdev->dev, "unable to obtain 64-bit DMA for "
3173 "coherent allocations\n");
3174 goto out_disable_device;
3175 }
284901a9 3176 } else if ((err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) != 0) {
4d22de3e
DLR
3177 dev_err(&pdev->dev, "no usable DMA configuration\n");
3178 goto out_disable_device;
3179 }
3180
3181 pci_set_master(pdev);
204e2f98 3182 pci_save_state(pdev);
4d22de3e
DLR
3183
3184 mmio_start = pci_resource_start(pdev, 0);
3185 mmio_len = pci_resource_len(pdev, 0);
3186 ai = t3_get_adapter_info(ent->driver_data);
3187
3188 adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
3189 if (!adapter) {
3190 err = -ENOMEM;
3191 goto out_disable_device;
3192 }
3193
74b793e1
DLR
3194 adapter->nofail_skb =
3195 alloc_skb(sizeof(struct cpl_set_tcb_field), GFP_KERNEL);
3196 if (!adapter->nofail_skb) {
3197 dev_err(&pdev->dev, "cannot allocate nofail buffer\n");
3198 err = -ENOMEM;
3199 goto out_free_adapter;
3200 }
3201
4d22de3e
DLR
3202 adapter->regs = ioremap_nocache(mmio_start, mmio_len);
3203 if (!adapter->regs) {
3204 dev_err(&pdev->dev, "cannot map device registers\n");
3205 err = -ENOMEM;
3206 goto out_free_adapter;
3207 }
3208
3209 adapter->pdev = pdev;
3210 adapter->name = pci_name(pdev);
3211 adapter->msg_enable = dflt_msg_enable;
3212 adapter->mmio_len = mmio_len;
3213
3214 mutex_init(&adapter->mdio_lock);
3215 spin_lock_init(&adapter->work_lock);
3216 spin_lock_init(&adapter->stats_lock);
3217
3218 INIT_LIST_HEAD(&adapter->adapter_list);
3219 INIT_WORK(&adapter->ext_intr_handler_task, ext_intr_task);
20d3fc11 3220 INIT_WORK(&adapter->fatal_error_handler_task, fatal_error_task);
4d22de3e
DLR
3221 INIT_DELAYED_WORK(&adapter->adap_check_task, t3_adap_check_task);
3222
952cdf33 3223 for (i = 0; i < ai->nports0 + ai->nports1; ++i) {
4d22de3e
DLR
3224 struct net_device *netdev;
3225
82ad3329 3226 netdev = alloc_etherdev_mq(sizeof(struct port_info), SGE_QSETS);
4d22de3e
DLR
3227 if (!netdev) {
3228 err = -ENOMEM;
3229 goto out_free_dev;
3230 }
3231
4d22de3e
DLR
3232 SET_NETDEV_DEV(netdev, &pdev->dev);
3233
3234 adapter->port[i] = netdev;
3235 pi = netdev_priv(netdev);
5fbf816f 3236 pi->adapter = adapter;
47fd23fe 3237 pi->rx_offload = T3_RX_CSUM | T3_LRO;
4d22de3e
DLR
3238 pi->port_id = i;
3239 netif_carrier_off(netdev);
82ad3329 3240 netif_tx_stop_all_queues(netdev);
4d22de3e
DLR
3241 netdev->irq = pdev->irq;
3242 netdev->mem_start = mmio_start;
3243 netdev->mem_end = mmio_start + mmio_len - 1;
4d22de3e 3244 netdev->features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO;
7be2df45 3245 netdev->features |= NETIF_F_GRO;
4d22de3e
DLR
3246 if (pci_using_dac)
3247 netdev->features |= NETIF_F_HIGHDMA;
3248
3249 netdev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
dd752696 3250 netdev->netdev_ops = &cxgb_netdev_ops;
4d22de3e
DLR
3251 SET_ETHTOOL_OPS(netdev, &cxgb_ethtool_ops);
3252 }
3253
5fbf816f 3254 pci_set_drvdata(pdev, adapter);
4d22de3e
DLR
3255 if (t3_prep_adapter(adapter, ai, 1) < 0) {
3256 err = -ENODEV;
3257 goto out_free_dev;
3258 }
2eab17ab 3259
4d22de3e
DLR
3260 /*
3261 * The card is now ready to go. If any errors occur during device
3262 * registration we do not fail the whole card but rather proceed only
3263 * with the ports we manage to register successfully. However we must
3264 * register at least one net device.
3265 */
3266 for_each_port(adapter, i) {
3267 err = register_netdev(adapter->port[i]);
3268 if (err)
3269 dev_warn(&pdev->dev,
3270 "cannot register net device %s, skipping\n",
3271 adapter->port[i]->name);
3272 else {
3273 /*
3274 * Change the name we use for messages to the name of
3275 * the first successfully registered interface.
3276 */
3277 if (!adapter->registered_device_map)
3278 adapter->name = adapter->port[i]->name;
3279
3280 __set_bit(i, &adapter->registered_device_map);
3281 }
3282 }
3283 if (!adapter->registered_device_map) {
3284 dev_err(&pdev->dev, "could not register any net devices\n");
3285 goto out_free_dev;
3286 }
3287
f14d42f3
KX
3288 for_each_port(adapter, i)
3289 cxgb3_init_iscsi_mac(adapter->port[i]);
3290
4d22de3e
DLR
3291 /* Driver's ready. Reflect it on LEDs */
3292 t3_led_ready(adapter);
3293
3294 if (is_offload(adapter)) {
3295 __set_bit(OFFLOAD_DEVMAP_BIT, &adapter->registered_device_map);
3296 cxgb3_adapter_ofld(adapter);
3297 }
3298
3299 /* See what interrupts we'll be using */
3300 if (msi > 1 && cxgb_enable_msix(adapter) == 0)
3301 adapter->flags |= USING_MSIX;
3302 else if (msi > 0 && pci_enable_msi(pdev) == 0)
3303 adapter->flags |= USING_MSI;
3304
8c263761
DLR
3305 set_nqsets(adapter);
3306
0ee8d33c 3307 err = sysfs_create_group(&adapter->port[0]->dev.kobj,
4d22de3e
DLR
3308 &cxgb3_attr_group);
3309
3310 print_port_info(adapter, ai);
3311 return 0;
3312
3313out_free_dev:
3314 iounmap(adapter->regs);
952cdf33 3315 for (i = ai->nports0 + ai->nports1 - 1; i >= 0; --i)
4d22de3e
DLR
3316 if (adapter->port[i])
3317 free_netdev(adapter->port[i]);
3318
3319out_free_adapter:
3320 kfree(adapter);
3321
3322out_disable_device:
3323 pci_disable_device(pdev);
3324out_release_regions:
3325 pci_release_regions(pdev);
3326 pci_set_drvdata(pdev, NULL);
3327 return err;
3328}
3329
3330static void __devexit remove_one(struct pci_dev *pdev)
3331{
5fbf816f 3332 struct adapter *adapter = pci_get_drvdata(pdev);
4d22de3e 3333
5fbf816f 3334 if (adapter) {
4d22de3e 3335 int i;
4d22de3e
DLR
3336
3337 t3_sge_stop(adapter);
0ee8d33c 3338 sysfs_remove_group(&adapter->port[0]->dev.kobj,
4d22de3e
DLR
3339 &cxgb3_attr_group);
3340
4d22de3e
DLR
3341 if (is_offload(adapter)) {
3342 cxgb3_adapter_unofld(adapter);
3343 if (test_bit(OFFLOAD_DEVMAP_BIT,
3344 &adapter->open_device_map))
3345 offload_close(&adapter->tdev);
3346 }
3347
67d92ab7
DLR
3348 for_each_port(adapter, i)
3349 if (test_bit(i, &adapter->registered_device_map))
3350 unregister_netdev(adapter->port[i]);
3351
0ca41c04 3352 t3_stop_sge_timers(adapter);
4d22de3e
DLR
3353 t3_free_sge_resources(adapter);
3354 cxgb_disable_msi(adapter);
3355
4d22de3e
DLR
3356 for_each_port(adapter, i)
3357 if (adapter->port[i])
3358 free_netdev(adapter->port[i]);
3359
3360 iounmap(adapter->regs);
74b793e1
DLR
3361 if (adapter->nofail_skb)
3362 kfree_skb(adapter->nofail_skb);
4d22de3e
DLR
3363 kfree(adapter);
3364 pci_release_regions(pdev);
3365 pci_disable_device(pdev);
3366 pci_set_drvdata(pdev, NULL);
3367 }
3368}
3369
3370static struct pci_driver driver = {
3371 .name = DRV_NAME,
3372 .id_table = cxgb3_pci_tbl,
3373 .probe = init_one,
3374 .remove = __devexit_p(remove_one),
91a6b50c 3375 .err_handler = &t3_err_handler,
4d22de3e
DLR
3376};
3377
3378static int __init cxgb3_init_module(void)
3379{
3380 int ret;
3381
3382 cxgb3_offload_init();
3383
3384 ret = pci_register_driver(&driver);
3385 return ret;
3386}
3387
3388static void __exit cxgb3_cleanup_module(void)
3389{
3390 pci_unregister_driver(&driver);
3391 if (cxgb3_wq)
3392 destroy_workqueue(cxgb3_wq);
3393}
3394
3395module_init(cxgb3_init_module);
3396module_exit(cxgb3_cleanup_module);