iwlwifi: don't include iwl-dev.h from iwl-devtrace.h
[linux-2.6-block.git] / drivers / net / cxgb3 / cxgb3_main.c
CommitLineData
4d22de3e 1/*
a02d44a0 2 * Copyright (c) 2003-2008 Chelsio, Inc. All rights reserved.
4d22de3e 3 *
1d68e93d
DLR
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
4d22de3e 9 *
1d68e93d
DLR
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
4d22de3e 31 */
4d22de3e
DLR
32#include <linux/module.h>
33#include <linux/moduleparam.h>
34#include <linux/init.h>
35#include <linux/pci.h>
36#include <linux/dma-mapping.h>
37#include <linux/netdevice.h>
38#include <linux/etherdevice.h>
39#include <linux/if_vlan.h>
0f07c4ee 40#include <linux/mdio.h>
4d22de3e
DLR
41#include <linux/sockios.h>
42#include <linux/workqueue.h>
43#include <linux/proc_fs.h>
44#include <linux/rtnetlink.h>
2e283962 45#include <linux/firmware.h>
d9da466a 46#include <linux/log2.h>
34336ec0 47#include <linux/stringify.h>
e998f245 48#include <linux/sched.h>
4d22de3e
DLR
49#include <asm/uaccess.h>
50
51#include "common.h"
52#include "cxgb3_ioctl.h"
53#include "regs.h"
54#include "cxgb3_offload.h"
55#include "version.h"
56
57#include "cxgb3_ctl_defs.h"
58#include "t3_cpl.h"
59#include "firmware_exports.h"
60
61enum {
62 MAX_TXQ_ENTRIES = 16384,
63 MAX_CTRL_TXQ_ENTRIES = 1024,
64 MAX_RSPQ_ENTRIES = 16384,
65 MAX_RX_BUFFERS = 16384,
66 MAX_RX_JUMBO_BUFFERS = 16384,
67 MIN_TXQ_ENTRIES = 4,
68 MIN_CTRL_TXQ_ENTRIES = 4,
69 MIN_RSPQ_ENTRIES = 32,
70 MIN_FL_ENTRIES = 32
71};
72
73#define PORT_MASK ((1 << MAX_NPORTS) - 1)
74
75#define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
76 NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
77 NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
78
79#define EEPROM_MAGIC 0x38E2F10C
80
678771d6
DLR
81#define CH_DEVICE(devid, idx) \
82 { PCI_VENDOR_ID_CHELSIO, devid, PCI_ANY_ID, PCI_ANY_ID, 0, 0, idx }
4d22de3e 83
a3aa1884 84static DEFINE_PCI_DEVICE_TABLE(cxgb3_pci_tbl) = {
678771d6
DLR
85 CH_DEVICE(0x20, 0), /* PE9000 */
86 CH_DEVICE(0x21, 1), /* T302E */
87 CH_DEVICE(0x22, 2), /* T310E */
88 CH_DEVICE(0x23, 3), /* T320X */
89 CH_DEVICE(0x24, 1), /* T302X */
90 CH_DEVICE(0x25, 3), /* T320E */
91 CH_DEVICE(0x26, 2), /* T310X */
92 CH_DEVICE(0x30, 2), /* T3B10 */
93 CH_DEVICE(0x31, 3), /* T3B20 */
94 CH_DEVICE(0x32, 1), /* T3B02 */
ce03aadd 95 CH_DEVICE(0x35, 6), /* T3C20-derived T3C10 */
74451424
DLR
96 CH_DEVICE(0x36, 3), /* S320E-CR */
97 CH_DEVICE(0x37, 7), /* N320E-G2 */
4d22de3e
DLR
98 {0,}
99};
100
101MODULE_DESCRIPTION(DRV_DESC);
102MODULE_AUTHOR("Chelsio Communications");
1d68e93d 103MODULE_LICENSE("Dual BSD/GPL");
4d22de3e
DLR
104MODULE_VERSION(DRV_VERSION);
105MODULE_DEVICE_TABLE(pci, cxgb3_pci_tbl);
106
107static int dflt_msg_enable = DFLT_MSG_ENABLE;
108
109module_param(dflt_msg_enable, int, 0644);
110MODULE_PARM_DESC(dflt_msg_enable, "Chelsio T3 default message enable bitmap");
111
112/*
113 * The driver uses the best interrupt scheme available on a platform in the
114 * order MSI-X, MSI, legacy pin interrupts. This parameter determines which
115 * of these schemes the driver may consider as follows:
116 *
117 * msi = 2: choose from among all three options
118 * msi = 1: only consider MSI and pin interrupts
119 * msi = 0: force pin interrupts
120 */
121static int msi = 2;
122
123module_param(msi, int, 0644);
124MODULE_PARM_DESC(msi, "whether to use MSI or MSI-X");
125
126/*
127 * The driver enables offload as a default.
128 * To disable it, use ofld_disable = 1.
129 */
130
131static int ofld_disable = 0;
132
133module_param(ofld_disable, int, 0644);
134MODULE_PARM_DESC(ofld_disable, "whether to enable offload at init time or not");
135
136/*
137 * We have work elements that we need to cancel when an interface is taken
138 * down. Normally the work elements would be executed by keventd but that
139 * can deadlock because of linkwatch. If our close method takes the rtnl
140 * lock and linkwatch is ahead of our work elements in keventd, linkwatch
141 * will block keventd as it needs the rtnl lock, and we'll deadlock waiting
142 * for our work to complete. Get our own work queue to solve this.
143 */
e998f245 144struct workqueue_struct *cxgb3_wq;
4d22de3e
DLR
145
146/**
147 * link_report - show link status and link speed/duplex
148 * @p: the port whose settings are to be reported
149 *
150 * Shows the link status, speed, and duplex of a port.
151 */
152static void link_report(struct net_device *dev)
153{
154 if (!netif_carrier_ok(dev))
155 printk(KERN_INFO "%s: link down\n", dev->name);
156 else {
157 const char *s = "10Mbps";
158 const struct port_info *p = netdev_priv(dev);
159
160 switch (p->link_config.speed) {
161 case SPEED_10000:
162 s = "10Gbps";
163 break;
164 case SPEED_1000:
165 s = "1000Mbps";
166 break;
167 case SPEED_100:
168 s = "100Mbps";
169 break;
170 }
171
172 printk(KERN_INFO "%s: link up, %s, %s-duplex\n", dev->name, s,
173 p->link_config.duplex == DUPLEX_FULL ? "full" : "half");
174 }
175}
176
34701fde
DLR
177static void enable_tx_fifo_drain(struct adapter *adapter,
178 struct port_info *pi)
179{
180 t3_set_reg_field(adapter, A_XGM_TXFIFO_CFG + pi->mac.offset, 0,
181 F_ENDROPPKT);
182 t3_write_reg(adapter, A_XGM_RX_CTRL + pi->mac.offset, 0);
183 t3_write_reg(adapter, A_XGM_TX_CTRL + pi->mac.offset, F_TXEN);
184 t3_write_reg(adapter, A_XGM_RX_CTRL + pi->mac.offset, F_RXEN);
185}
186
187static void disable_tx_fifo_drain(struct adapter *adapter,
188 struct port_info *pi)
189{
190 t3_set_reg_field(adapter, A_XGM_TXFIFO_CFG + pi->mac.offset,
191 F_ENDROPPKT, 0);
192}
193
bf792094
DLR
194void t3_os_link_fault(struct adapter *adap, int port_id, int state)
195{
196 struct net_device *dev = adap->port[port_id];
197 struct port_info *pi = netdev_priv(dev);
198
199 if (state == netif_carrier_ok(dev))
200 return;
201
202 if (state) {
203 struct cmac *mac = &pi->mac;
204
205 netif_carrier_on(dev);
206
34701fde
DLR
207 disable_tx_fifo_drain(adap, pi);
208
bf792094
DLR
209 /* Clear local faults */
210 t3_xgm_intr_disable(adap, pi->port_id);
211 t3_read_reg(adap, A_XGM_INT_STATUS +
212 pi->mac.offset);
213 t3_write_reg(adap,
214 A_XGM_INT_CAUSE + pi->mac.offset,
215 F_XGM_INT);
216
217 t3_set_reg_field(adap,
218 A_XGM_INT_ENABLE +
219 pi->mac.offset,
220 F_XGM_INT, F_XGM_INT);
221 t3_xgm_intr_enable(adap, pi->port_id);
222
223 t3_mac_enable(mac, MAC_DIRECTION_TX);
34701fde 224 } else {
bf792094
DLR
225 netif_carrier_off(dev);
226
34701fde
DLR
227 /* Flush TX FIFO */
228 enable_tx_fifo_drain(adap, pi);
229 }
bf792094
DLR
230 link_report(dev);
231}
232
4d22de3e
DLR
233/**
234 * t3_os_link_changed - handle link status changes
235 * @adapter: the adapter associated with the link change
236 * @port_id: the port index whose limk status has changed
237 * @link_stat: the new status of the link
238 * @speed: the new speed setting
239 * @duplex: the new duplex setting
240 * @pause: the new flow-control setting
241 *
242 * This is the OS-dependent handler for link status changes. The OS
243 * neutral handler takes care of most of the processing for these events,
244 * then calls this handler for any OS-specific processing.
245 */
246void t3_os_link_changed(struct adapter *adapter, int port_id, int link_stat,
247 int speed, int duplex, int pause)
248{
249 struct net_device *dev = adapter->port[port_id];
6d6dabac
DLR
250 struct port_info *pi = netdev_priv(dev);
251 struct cmac *mac = &pi->mac;
4d22de3e
DLR
252
253 /* Skip changes from disabled ports. */
254 if (!netif_running(dev))
255 return;
256
257 if (link_stat != netif_carrier_ok(dev)) {
6d6dabac 258 if (link_stat) {
34701fde
DLR
259 disable_tx_fifo_drain(adapter, pi);
260
59cf8107 261 t3_mac_enable(mac, MAC_DIRECTION_RX);
bf792094
DLR
262
263 /* Clear local faults */
264 t3_xgm_intr_disable(adapter, pi->port_id);
265 t3_read_reg(adapter, A_XGM_INT_STATUS +
266 pi->mac.offset);
267 t3_write_reg(adapter,
268 A_XGM_INT_CAUSE + pi->mac.offset,
269 F_XGM_INT);
270
271 t3_set_reg_field(adapter,
272 A_XGM_INT_ENABLE + pi->mac.offset,
273 F_XGM_INT, F_XGM_INT);
274 t3_xgm_intr_enable(adapter, pi->port_id);
275
4d22de3e 276 netif_carrier_on(dev);
6d6dabac 277 } else {
4d22de3e 278 netif_carrier_off(dev);
bf792094
DLR
279
280 t3_xgm_intr_disable(adapter, pi->port_id);
281 t3_read_reg(adapter, A_XGM_INT_STATUS + pi->mac.offset);
282 t3_set_reg_field(adapter,
283 A_XGM_INT_ENABLE + pi->mac.offset,
284 F_XGM_INT, 0);
285
286 if (is_10G(adapter))
287 pi->phy.ops->power_down(&pi->phy, 1);
288
289 t3_read_reg(adapter, A_XGM_INT_STATUS + pi->mac.offset);
59cf8107
DLR
290 t3_mac_disable(mac, MAC_DIRECTION_RX);
291 t3_link_start(&pi->phy, mac, &pi->link_config);
34701fde
DLR
292
293 /* Flush TX FIFO */
294 enable_tx_fifo_drain(adapter, pi);
6d6dabac
DLR
295 }
296
4d22de3e
DLR
297 link_report(dev);
298 }
299}
300
1e882025
DLR
301/**
302 * t3_os_phymod_changed - handle PHY module changes
303 * @phy: the PHY reporting the module change
304 * @mod_type: new module type
305 *
306 * This is the OS-dependent handler for PHY module changes. It is
307 * invoked when a PHY module is removed or inserted for any OS-specific
308 * processing.
309 */
310void t3_os_phymod_changed(struct adapter *adap, int port_id)
311{
312 static const char *mod_str[] = {
313 NULL, "SR", "LR", "LRM", "TWINAX", "TWINAX", "unknown"
314 };
315
316 const struct net_device *dev = adap->port[port_id];
317 const struct port_info *pi = netdev_priv(dev);
318
319 if (pi->phy.modtype == phy_modtype_none)
320 printk(KERN_INFO "%s: PHY module unplugged\n", dev->name);
321 else
322 printk(KERN_INFO "%s: %s PHY module inserted\n", dev->name,
323 mod_str[pi->phy.modtype]);
324}
325
4d22de3e
DLR
326static void cxgb_set_rxmode(struct net_device *dev)
327{
4d22de3e
DLR
328 struct port_info *pi = netdev_priv(dev);
329
0988d269 330 t3_mac_set_rx_mode(&pi->mac, dev);
4d22de3e
DLR
331}
332
333/**
334 * link_start - enable a port
335 * @dev: the device to enable
336 *
337 * Performs the MAC and PHY actions needed to enable a port.
338 */
339static void link_start(struct net_device *dev)
340{
4d22de3e
DLR
341 struct port_info *pi = netdev_priv(dev);
342 struct cmac *mac = &pi->mac;
343
4d22de3e 344 t3_mac_reset(mac);
f14d42f3 345 t3_mac_set_num_ucast(mac, MAX_MAC_IDX);
4d22de3e 346 t3_mac_set_mtu(mac, dev->mtu);
f14d42f3
KX
347 t3_mac_set_address(mac, LAN_MAC_IDX, dev->dev_addr);
348 t3_mac_set_address(mac, SAN_MAC_IDX, pi->iscsic.mac_addr);
0988d269 349 t3_mac_set_rx_mode(mac, dev);
4d22de3e
DLR
350 t3_link_start(&pi->phy, mac, &pi->link_config);
351 t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
352}
353
354static inline void cxgb_disable_msi(struct adapter *adapter)
355{
356 if (adapter->flags & USING_MSIX) {
357 pci_disable_msix(adapter->pdev);
358 adapter->flags &= ~USING_MSIX;
359 } else if (adapter->flags & USING_MSI) {
360 pci_disable_msi(adapter->pdev);
361 adapter->flags &= ~USING_MSI;
362 }
363}
364
365/*
366 * Interrupt handler for asynchronous events used with MSI-X.
367 */
368static irqreturn_t t3_async_intr_handler(int irq, void *cookie)
369{
370 t3_slow_intr_handler(cookie);
371 return IRQ_HANDLED;
372}
373
374/*
375 * Name the MSI-X interrupts.
376 */
377static void name_msix_vecs(struct adapter *adap)
378{
379 int i, j, msi_idx = 1, n = sizeof(adap->msix_info[0].desc) - 1;
380
381 snprintf(adap->msix_info[0].desc, n, "%s", adap->name);
382 adap->msix_info[0].desc[n] = 0;
383
384 for_each_port(adap, j) {
385 struct net_device *d = adap->port[j];
386 const struct port_info *pi = netdev_priv(d);
387
388 for (i = 0; i < pi->nqsets; i++, msi_idx++) {
389 snprintf(adap->msix_info[msi_idx].desc, n,
8c263761 390 "%s-%d", d->name, pi->first_qset + i);
4d22de3e
DLR
391 adap->msix_info[msi_idx].desc[n] = 0;
392 }
8c263761 393 }
4d22de3e
DLR
394}
395
396static int request_msix_data_irqs(struct adapter *adap)
397{
398 int i, j, err, qidx = 0;
399
400 for_each_port(adap, i) {
401 int nqsets = adap2pinfo(adap, i)->nqsets;
402
403 for (j = 0; j < nqsets; ++j) {
404 err = request_irq(adap->msix_info[qidx + 1].vec,
405 t3_intr_handler(adap,
406 adap->sge.qs[qidx].
407 rspq.polling), 0,
408 adap->msix_info[qidx + 1].desc,
409 &adap->sge.qs[qidx]);
410 if (err) {
411 while (--qidx >= 0)
412 free_irq(adap->msix_info[qidx + 1].vec,
413 &adap->sge.qs[qidx]);
414 return err;
415 }
416 qidx++;
417 }
418 }
419 return 0;
420}
421
8c263761
DLR
422static void free_irq_resources(struct adapter *adapter)
423{
424 if (adapter->flags & USING_MSIX) {
425 int i, n = 0;
426
427 free_irq(adapter->msix_info[0].vec, adapter);
428 for_each_port(adapter, i)
5cda9364 429 n += adap2pinfo(adapter, i)->nqsets;
8c263761
DLR
430
431 for (i = 0; i < n; ++i)
432 free_irq(adapter->msix_info[i + 1].vec,
433 &adapter->sge.qs[i]);
434 } else
435 free_irq(adapter->pdev->irq, adapter);
436}
437
b881955b
DLR
438static int await_mgmt_replies(struct adapter *adap, unsigned long init_cnt,
439 unsigned long n)
440{
441 int attempts = 5;
442
443 while (adap->sge.qs[0].rspq.offload_pkts < init_cnt + n) {
444 if (!--attempts)
445 return -ETIMEDOUT;
446 msleep(10);
447 }
448 return 0;
449}
450
451static int init_tp_parity(struct adapter *adap)
452{
453 int i;
454 struct sk_buff *skb;
455 struct cpl_set_tcb_field *greq;
456 unsigned long cnt = adap->sge.qs[0].rspq.offload_pkts;
457
458 t3_tp_set_offload_mode(adap, 1);
459
460 for (i = 0; i < 16; i++) {
461 struct cpl_smt_write_req *req;
462
74b793e1
DLR
463 skb = alloc_skb(sizeof(*req), GFP_KERNEL);
464 if (!skb)
465 skb = adap->nofail_skb;
466 if (!skb)
467 goto alloc_skb_fail;
468
b881955b
DLR
469 req = (struct cpl_smt_write_req *)__skb_put(skb, sizeof(*req));
470 memset(req, 0, sizeof(*req));
471 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
472 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SMT_WRITE_REQ, i));
dce7d1d0 473 req->mtu_idx = NMTUS - 1;
b881955b
DLR
474 req->iff = i;
475 t3_mgmt_tx(adap, skb);
74b793e1
DLR
476 if (skb == adap->nofail_skb) {
477 await_mgmt_replies(adap, cnt, i + 1);
478 adap->nofail_skb = alloc_skb(sizeof(*greq), GFP_KERNEL);
479 if (!adap->nofail_skb)
480 goto alloc_skb_fail;
481 }
b881955b
DLR
482 }
483
484 for (i = 0; i < 2048; i++) {
485 struct cpl_l2t_write_req *req;
486
74b793e1
DLR
487 skb = alloc_skb(sizeof(*req), GFP_KERNEL);
488 if (!skb)
489 skb = adap->nofail_skb;
490 if (!skb)
491 goto alloc_skb_fail;
492
b881955b
DLR
493 req = (struct cpl_l2t_write_req *)__skb_put(skb, sizeof(*req));
494 memset(req, 0, sizeof(*req));
495 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
496 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_L2T_WRITE_REQ, i));
497 req->params = htonl(V_L2T_W_IDX(i));
498 t3_mgmt_tx(adap, skb);
74b793e1
DLR
499 if (skb == adap->nofail_skb) {
500 await_mgmt_replies(adap, cnt, 16 + i + 1);
501 adap->nofail_skb = alloc_skb(sizeof(*greq), GFP_KERNEL);
502 if (!adap->nofail_skb)
503 goto alloc_skb_fail;
504 }
b881955b
DLR
505 }
506
507 for (i = 0; i < 2048; i++) {
508 struct cpl_rte_write_req *req;
509
74b793e1
DLR
510 skb = alloc_skb(sizeof(*req), GFP_KERNEL);
511 if (!skb)
512 skb = adap->nofail_skb;
513 if (!skb)
514 goto alloc_skb_fail;
515
b881955b
DLR
516 req = (struct cpl_rte_write_req *)__skb_put(skb, sizeof(*req));
517 memset(req, 0, sizeof(*req));
518 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
519 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_RTE_WRITE_REQ, i));
520 req->l2t_idx = htonl(V_L2T_W_IDX(i));
521 t3_mgmt_tx(adap, skb);
74b793e1
DLR
522 if (skb == adap->nofail_skb) {
523 await_mgmt_replies(adap, cnt, 16 + 2048 + i + 1);
524 adap->nofail_skb = alloc_skb(sizeof(*greq), GFP_KERNEL);
525 if (!adap->nofail_skb)
526 goto alloc_skb_fail;
527 }
b881955b
DLR
528 }
529
74b793e1
DLR
530 skb = alloc_skb(sizeof(*greq), GFP_KERNEL);
531 if (!skb)
532 skb = adap->nofail_skb;
533 if (!skb)
534 goto alloc_skb_fail;
535
b881955b
DLR
536 greq = (struct cpl_set_tcb_field *)__skb_put(skb, sizeof(*greq));
537 memset(greq, 0, sizeof(*greq));
538 greq->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
539 OPCODE_TID(greq) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, 0));
540 greq->mask = cpu_to_be64(1);
541 t3_mgmt_tx(adap, skb);
542
543 i = await_mgmt_replies(adap, cnt, 16 + 2048 + 2048 + 1);
74b793e1
DLR
544 if (skb == adap->nofail_skb) {
545 i = await_mgmt_replies(adap, cnt, 16 + 2048 + 2048 + 1);
546 adap->nofail_skb = alloc_skb(sizeof(*greq), GFP_KERNEL);
547 }
548
b881955b
DLR
549 t3_tp_set_offload_mode(adap, 0);
550 return i;
74b793e1
DLR
551
552alloc_skb_fail:
553 t3_tp_set_offload_mode(adap, 0);
554 return -ENOMEM;
b881955b
DLR
555}
556
4d22de3e
DLR
557/**
558 * setup_rss - configure RSS
559 * @adap: the adapter
560 *
561 * Sets up RSS to distribute packets to multiple receive queues. We
562 * configure the RSS CPU lookup table to distribute to the number of HW
563 * receive queues, and the response queue lookup table to narrow that
564 * down to the response queues actually configured for each port.
565 * We always configure the RSS mapping for two ports since the mapping
566 * table has plenty of entries.
567 */
568static void setup_rss(struct adapter *adap)
569{
570 int i;
571 unsigned int nq0 = adap2pinfo(adap, 0)->nqsets;
572 unsigned int nq1 = adap->port[1] ? adap2pinfo(adap, 1)->nqsets : 1;
573 u8 cpus[SGE_QSETS + 1];
574 u16 rspq_map[RSS_TABLE_SIZE];
575
576 for (i = 0; i < SGE_QSETS; ++i)
577 cpus[i] = i;
578 cpus[SGE_QSETS] = 0xff; /* terminator */
579
580 for (i = 0; i < RSS_TABLE_SIZE / 2; ++i) {
581 rspq_map[i] = i % nq0;
582 rspq_map[i + RSS_TABLE_SIZE / 2] = (i % nq1) + nq0;
583 }
584
585 t3_config_rss(adap, F_RQFEEDBACKENABLE | F_TNLLKPEN | F_TNLMAPEN |
586 F_TNLPRTEN | F_TNL2TUPEN | F_TNL4TUPEN |
a2604be5 587 V_RRCPLCPUSIZE(6) | F_HASHTOEPLITZ, cpus, rspq_map);
4d22de3e
DLR
588}
589
e998f245
SW
590static void ring_dbs(struct adapter *adap)
591{
592 int i, j;
593
594 for (i = 0; i < SGE_QSETS; i++) {
595 struct sge_qset *qs = &adap->sge.qs[i];
596
597 if (qs->adap)
598 for (j = 0; j < SGE_TXQ_PER_SET; j++)
599 t3_write_reg(adap, A_SG_KDOORBELL, F_SELEGRCNTX | V_EGRCNTX(qs->txq[j].cntxt_id));
600 }
601}
602
bea3348e 603static void init_napi(struct adapter *adap)
4d22de3e 604{
bea3348e 605 int i;
4d22de3e 606
bea3348e
SH
607 for (i = 0; i < SGE_QSETS; i++) {
608 struct sge_qset *qs = &adap->sge.qs[i];
4d22de3e 609
bea3348e
SH
610 if (qs->adap)
611 netif_napi_add(qs->netdev, &qs->napi, qs->napi.poll,
612 64);
4d22de3e 613 }
48c4b6db
DLR
614
615 /*
616 * netif_napi_add() can be called only once per napi_struct because it
617 * adds each new napi_struct to a list. Be careful not to call it a
618 * second time, e.g., during EEH recovery, by making a note of it.
619 */
620 adap->flags |= NAPI_INIT;
4d22de3e
DLR
621}
622
623/*
624 * Wait until all NAPI handlers are descheduled. This includes the handlers of
625 * both netdevices representing interfaces and the dummy ones for the extra
626 * queues.
627 */
628static void quiesce_rx(struct adapter *adap)
629{
630 int i;
4d22de3e 631
bea3348e
SH
632 for (i = 0; i < SGE_QSETS; i++)
633 if (adap->sge.qs[i].adap)
634 napi_disable(&adap->sge.qs[i].napi);
635}
4d22de3e 636
bea3348e
SH
637static void enable_all_napi(struct adapter *adap)
638{
639 int i;
640 for (i = 0; i < SGE_QSETS; i++)
641 if (adap->sge.qs[i].adap)
642 napi_enable(&adap->sge.qs[i].napi);
4d22de3e
DLR
643}
644
04ecb072
DLR
645/**
646 * set_qset_lro - Turn a queue set's LRO capability on and off
647 * @dev: the device the qset is attached to
648 * @qset_idx: the queue set index
649 * @val: the LRO switch
650 *
651 * Sets LRO on or off for a particular queue set.
652 * the device's features flag is updated to reflect the LRO
653 * capability when all queues belonging to the device are
654 * in the same state.
655 */
656static void set_qset_lro(struct net_device *dev, int qset_idx, int val)
657{
658 struct port_info *pi = netdev_priv(dev);
659 struct adapter *adapter = pi->adapter;
04ecb072
DLR
660
661 adapter->params.sge.qset[qset_idx].lro = !!val;
662 adapter->sge.qs[qset_idx].lro_enabled = !!val;
04ecb072
DLR
663}
664
4d22de3e
DLR
665/**
666 * setup_sge_qsets - configure SGE Tx/Rx/response queues
667 * @adap: the adapter
668 *
669 * Determines how many sets of SGE queues to use and initializes them.
670 * We support multiple queue sets per port if we have MSI-X, otherwise
671 * just one queue set per port.
672 */
673static int setup_sge_qsets(struct adapter *adap)
674{
bea3348e 675 int i, j, err, irq_idx = 0, qset_idx = 0;
8ac3ba68 676 unsigned int ntxq = SGE_TXQ_PER_SET;
4d22de3e
DLR
677
678 if (adap->params.rev > 0 && !(adap->flags & USING_MSI))
679 irq_idx = -1;
680
681 for_each_port(adap, i) {
682 struct net_device *dev = adap->port[i];
bea3348e 683 struct port_info *pi = netdev_priv(dev);
4d22de3e 684
bea3348e 685 pi->qs = &adap->sge.qs[pi->first_qset];
e594e96e 686 for (j = 0; j < pi->nqsets; ++j, ++qset_idx) {
47fd23fe 687 set_qset_lro(dev, qset_idx, pi->rx_offload & T3_LRO);
4d22de3e
DLR
688 err = t3_sge_alloc_qset(adap, qset_idx, 1,
689 (adap->flags & USING_MSIX) ? qset_idx + 1 :
690 irq_idx,
82ad3329
DLR
691 &adap->params.sge.qset[qset_idx], ntxq, dev,
692 netdev_get_tx_queue(dev, j));
4d22de3e
DLR
693 if (err) {
694 t3_free_sge_resources(adap);
695 return err;
696 }
697 }
698 }
699
700 return 0;
701}
702
3e5192ee 703static ssize_t attr_show(struct device *d, char *buf,
896392ef 704 ssize_t(*format) (struct net_device *, char *))
4d22de3e
DLR
705{
706 ssize_t len;
4d22de3e
DLR
707
708 /* Synchronize with ioctls that may shut down the device */
709 rtnl_lock();
896392ef 710 len = (*format) (to_net_dev(d), buf);
4d22de3e
DLR
711 rtnl_unlock();
712 return len;
713}
714
3e5192ee 715static ssize_t attr_store(struct device *d,
0ee8d33c 716 const char *buf, size_t len,
896392ef 717 ssize_t(*set) (struct net_device *, unsigned int),
4d22de3e
DLR
718 unsigned int min_val, unsigned int max_val)
719{
720 char *endp;
721 ssize_t ret;
722 unsigned int val;
4d22de3e
DLR
723
724 if (!capable(CAP_NET_ADMIN))
725 return -EPERM;
726
727 val = simple_strtoul(buf, &endp, 0);
728 if (endp == buf || val < min_val || val > max_val)
729 return -EINVAL;
730
731 rtnl_lock();
896392ef 732 ret = (*set) (to_net_dev(d), val);
4d22de3e
DLR
733 if (!ret)
734 ret = len;
735 rtnl_unlock();
736 return ret;
737}
738
739#define CXGB3_SHOW(name, val_expr) \
896392ef 740static ssize_t format_##name(struct net_device *dev, char *buf) \
4d22de3e 741{ \
5fbf816f
DLR
742 struct port_info *pi = netdev_priv(dev); \
743 struct adapter *adap = pi->adapter; \
4d22de3e
DLR
744 return sprintf(buf, "%u\n", val_expr); \
745} \
0ee8d33c
DLR
746static ssize_t show_##name(struct device *d, struct device_attribute *attr, \
747 char *buf) \
4d22de3e 748{ \
3e5192ee 749 return attr_show(d, buf, format_##name); \
4d22de3e
DLR
750}
751
896392ef 752static ssize_t set_nfilters(struct net_device *dev, unsigned int val)
4d22de3e 753{
5fbf816f
DLR
754 struct port_info *pi = netdev_priv(dev);
755 struct adapter *adap = pi->adapter;
9f238486 756 int min_tids = is_offload(adap) ? MC5_MIN_TIDS : 0;
896392ef 757
4d22de3e
DLR
758 if (adap->flags & FULL_INIT_DONE)
759 return -EBUSY;
760 if (val && adap->params.rev == 0)
761 return -EINVAL;
9f238486
DLR
762 if (val > t3_mc5_size(&adap->mc5) - adap->params.mc5.nservers -
763 min_tids)
4d22de3e
DLR
764 return -EINVAL;
765 adap->params.mc5.nfilters = val;
766 return 0;
767}
768
0ee8d33c
DLR
769static ssize_t store_nfilters(struct device *d, struct device_attribute *attr,
770 const char *buf, size_t len)
4d22de3e 771{
3e5192ee 772 return attr_store(d, buf, len, set_nfilters, 0, ~0);
4d22de3e
DLR
773}
774
896392ef 775static ssize_t set_nservers(struct net_device *dev, unsigned int val)
4d22de3e 776{
5fbf816f
DLR
777 struct port_info *pi = netdev_priv(dev);
778 struct adapter *adap = pi->adapter;
896392ef 779
4d22de3e
DLR
780 if (adap->flags & FULL_INIT_DONE)
781 return -EBUSY;
9f238486
DLR
782 if (val > t3_mc5_size(&adap->mc5) - adap->params.mc5.nfilters -
783 MC5_MIN_TIDS)
4d22de3e
DLR
784 return -EINVAL;
785 adap->params.mc5.nservers = val;
786 return 0;
787}
788
0ee8d33c
DLR
789static ssize_t store_nservers(struct device *d, struct device_attribute *attr,
790 const char *buf, size_t len)
4d22de3e 791{
3e5192ee 792 return attr_store(d, buf, len, set_nservers, 0, ~0);
4d22de3e
DLR
793}
794
795#define CXGB3_ATTR_R(name, val_expr) \
796CXGB3_SHOW(name, val_expr) \
0ee8d33c 797static DEVICE_ATTR(name, S_IRUGO, show_##name, NULL)
4d22de3e
DLR
798
799#define CXGB3_ATTR_RW(name, val_expr, store_method) \
800CXGB3_SHOW(name, val_expr) \
0ee8d33c 801static DEVICE_ATTR(name, S_IRUGO | S_IWUSR, show_##name, store_method)
4d22de3e
DLR
802
803CXGB3_ATTR_R(cam_size, t3_mc5_size(&adap->mc5));
804CXGB3_ATTR_RW(nfilters, adap->params.mc5.nfilters, store_nfilters);
805CXGB3_ATTR_RW(nservers, adap->params.mc5.nservers, store_nservers);
806
807static struct attribute *cxgb3_attrs[] = {
0ee8d33c
DLR
808 &dev_attr_cam_size.attr,
809 &dev_attr_nfilters.attr,
810 &dev_attr_nservers.attr,
4d22de3e
DLR
811 NULL
812};
813
814static struct attribute_group cxgb3_attr_group = {.attrs = cxgb3_attrs };
815
3e5192ee 816static ssize_t tm_attr_show(struct device *d,
0ee8d33c 817 char *buf, int sched)
4d22de3e 818{
5fbf816f
DLR
819 struct port_info *pi = netdev_priv(to_net_dev(d));
820 struct adapter *adap = pi->adapter;
4d22de3e 821 unsigned int v, addr, bpt, cpt;
5fbf816f 822 ssize_t len;
4d22de3e
DLR
823
824 addr = A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2;
825 rtnl_lock();
826 t3_write_reg(adap, A_TP_TM_PIO_ADDR, addr);
827 v = t3_read_reg(adap, A_TP_TM_PIO_DATA);
828 if (sched & 1)
829 v >>= 16;
830 bpt = (v >> 8) & 0xff;
831 cpt = v & 0xff;
832 if (!cpt)
833 len = sprintf(buf, "disabled\n");
834 else {
835 v = (adap->params.vpd.cclk * 1000) / cpt;
836 len = sprintf(buf, "%u Kbps\n", (v * bpt) / 125);
837 }
838 rtnl_unlock();
839 return len;
840}
841
3e5192ee 842static ssize_t tm_attr_store(struct device *d,
0ee8d33c 843 const char *buf, size_t len, int sched)
4d22de3e 844{
5fbf816f
DLR
845 struct port_info *pi = netdev_priv(to_net_dev(d));
846 struct adapter *adap = pi->adapter;
847 unsigned int val;
4d22de3e
DLR
848 char *endp;
849 ssize_t ret;
4d22de3e
DLR
850
851 if (!capable(CAP_NET_ADMIN))
852 return -EPERM;
853
854 val = simple_strtoul(buf, &endp, 0);
855 if (endp == buf || val > 10000000)
856 return -EINVAL;
857
858 rtnl_lock();
859 ret = t3_config_sched(adap, val, sched);
860 if (!ret)
861 ret = len;
862 rtnl_unlock();
863 return ret;
864}
865
866#define TM_ATTR(name, sched) \
0ee8d33c
DLR
867static ssize_t show_##name(struct device *d, struct device_attribute *attr, \
868 char *buf) \
4d22de3e 869{ \
3e5192ee 870 return tm_attr_show(d, buf, sched); \
4d22de3e 871} \
0ee8d33c
DLR
872static ssize_t store_##name(struct device *d, struct device_attribute *attr, \
873 const char *buf, size_t len) \
4d22de3e 874{ \
3e5192ee 875 return tm_attr_store(d, buf, len, sched); \
4d22de3e 876} \
0ee8d33c 877static DEVICE_ATTR(name, S_IRUGO | S_IWUSR, show_##name, store_##name)
4d22de3e
DLR
878
879TM_ATTR(sched0, 0);
880TM_ATTR(sched1, 1);
881TM_ATTR(sched2, 2);
882TM_ATTR(sched3, 3);
883TM_ATTR(sched4, 4);
884TM_ATTR(sched5, 5);
885TM_ATTR(sched6, 6);
886TM_ATTR(sched7, 7);
887
888static struct attribute *offload_attrs[] = {
0ee8d33c
DLR
889 &dev_attr_sched0.attr,
890 &dev_attr_sched1.attr,
891 &dev_attr_sched2.attr,
892 &dev_attr_sched3.attr,
893 &dev_attr_sched4.attr,
894 &dev_attr_sched5.attr,
895 &dev_attr_sched6.attr,
896 &dev_attr_sched7.attr,
4d22de3e
DLR
897 NULL
898};
899
900static struct attribute_group offload_attr_group = {.attrs = offload_attrs };
901
902/*
903 * Sends an sk_buff to an offload queue driver
904 * after dealing with any active network taps.
905 */
906static inline int offload_tx(struct t3cdev *tdev, struct sk_buff *skb)
907{
908 int ret;
909
910 local_bh_disable();
911 ret = t3_offload_tx(tdev, skb);
912 local_bh_enable();
913 return ret;
914}
915
916static int write_smt_entry(struct adapter *adapter, int idx)
917{
918 struct cpl_smt_write_req *req;
f14d42f3 919 struct port_info *pi = netdev_priv(adapter->port[idx]);
4d22de3e
DLR
920 struct sk_buff *skb = alloc_skb(sizeof(*req), GFP_KERNEL);
921
922 if (!skb)
923 return -ENOMEM;
924
925 req = (struct cpl_smt_write_req *)__skb_put(skb, sizeof(*req));
926 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
927 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SMT_WRITE_REQ, idx));
928 req->mtu_idx = NMTUS - 1; /* should be 0 but there's a T3 bug */
929 req->iff = idx;
4d22de3e 930 memcpy(req->src_mac0, adapter->port[idx]->dev_addr, ETH_ALEN);
f14d42f3 931 memcpy(req->src_mac1, pi->iscsic.mac_addr, ETH_ALEN);
4d22de3e
DLR
932 skb->priority = 1;
933 offload_tx(&adapter->tdev, skb);
934 return 0;
935}
936
937static int init_smt(struct adapter *adapter)
938{
939 int i;
940
941 for_each_port(adapter, i)
942 write_smt_entry(adapter, i);
943 return 0;
944}
945
946static void init_port_mtus(struct adapter *adapter)
947{
948 unsigned int mtus = adapter->port[0]->mtu;
949
950 if (adapter->port[1])
951 mtus |= adapter->port[1]->mtu << 16;
952 t3_write_reg(adapter, A_TP_MTU_PORT_TABLE, mtus);
953}
954
8c263761 955static int send_pktsched_cmd(struct adapter *adap, int sched, int qidx, int lo,
14ab9892
DLR
956 int hi, int port)
957{
958 struct sk_buff *skb;
959 struct mngt_pktsched_wr *req;
8c263761 960 int ret;
14ab9892 961
74b793e1
DLR
962 skb = alloc_skb(sizeof(*req), GFP_KERNEL);
963 if (!skb)
964 skb = adap->nofail_skb;
965 if (!skb)
966 return -ENOMEM;
967
14ab9892
DLR
968 req = (struct mngt_pktsched_wr *)skb_put(skb, sizeof(*req));
969 req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_MNGT));
970 req->mngt_opcode = FW_MNGTOPCODE_PKTSCHED_SET;
971 req->sched = sched;
972 req->idx = qidx;
973 req->min = lo;
974 req->max = hi;
975 req->binding = port;
8c263761 976 ret = t3_mgmt_tx(adap, skb);
74b793e1
DLR
977 if (skb == adap->nofail_skb) {
978 adap->nofail_skb = alloc_skb(sizeof(struct cpl_set_tcb_field),
979 GFP_KERNEL);
980 if (!adap->nofail_skb)
981 ret = -ENOMEM;
982 }
8c263761
DLR
983
984 return ret;
14ab9892
DLR
985}
986
8c263761 987static int bind_qsets(struct adapter *adap)
14ab9892 988{
8c263761 989 int i, j, err = 0;
14ab9892
DLR
990
991 for_each_port(adap, i) {
992 const struct port_info *pi = adap2pinfo(adap, i);
993
8c263761
DLR
994 for (j = 0; j < pi->nqsets; ++j) {
995 int ret = send_pktsched_cmd(adap, 1,
996 pi->first_qset + j, -1,
997 -1, i);
998 if (ret)
999 err = ret;
1000 }
14ab9892 1001 }
8c263761
DLR
1002
1003 return err;
14ab9892
DLR
1004}
1005
34336ec0
BH
1006#define FW_VERSION __stringify(FW_VERSION_MAJOR) "." \
1007 __stringify(FW_VERSION_MINOR) "." __stringify(FW_VERSION_MICRO)
1008#define FW_FNAME "cxgb3/t3fw-" FW_VERSION ".bin"
1009#define TPSRAM_VERSION __stringify(TP_VERSION_MAJOR) "." \
1010 __stringify(TP_VERSION_MINOR) "." __stringify(TP_VERSION_MICRO)
1011#define TPSRAM_NAME "cxgb3/t3%c_psram-" TPSRAM_VERSION ".bin"
2e8c07c3
DLR
1012#define AEL2005_OPT_EDC_NAME "cxgb3/ael2005_opt_edc.bin"
1013#define AEL2005_TWX_EDC_NAME "cxgb3/ael2005_twx_edc.bin"
9450526a 1014#define AEL2020_TWX_EDC_NAME "cxgb3/ael2020_twx_edc.bin"
34336ec0
BH
1015MODULE_FIRMWARE(FW_FNAME);
1016MODULE_FIRMWARE("cxgb3/t3b_psram-" TPSRAM_VERSION ".bin");
1017MODULE_FIRMWARE("cxgb3/t3c_psram-" TPSRAM_VERSION ".bin");
1018MODULE_FIRMWARE(AEL2005_OPT_EDC_NAME);
1019MODULE_FIRMWARE(AEL2005_TWX_EDC_NAME);
1020MODULE_FIRMWARE(AEL2020_TWX_EDC_NAME);
2e8c07c3
DLR
1021
1022static inline const char *get_edc_fw_name(int edc_idx)
1023{
1024 const char *fw_name = NULL;
1025
1026 switch (edc_idx) {
1027 case EDC_OPT_AEL2005:
1028 fw_name = AEL2005_OPT_EDC_NAME;
1029 break;
1030 case EDC_TWX_AEL2005:
1031 fw_name = AEL2005_TWX_EDC_NAME;
1032 break;
1033 case EDC_TWX_AEL2020:
1034 fw_name = AEL2020_TWX_EDC_NAME;
1035 break;
1036 }
1037 return fw_name;
1038}
1039
1040int t3_get_edc_fw(struct cphy *phy, int edc_idx, int size)
1041{
1042 struct adapter *adapter = phy->adapter;
1043 const struct firmware *fw;
1044 char buf[64];
1045 u32 csum;
1046 const __be32 *p;
1047 u16 *cache = phy->phy_cache;
1048 int i, ret;
1049
1050 snprintf(buf, sizeof(buf), get_edc_fw_name(edc_idx));
1051
1052 ret = request_firmware(&fw, buf, &adapter->pdev->dev);
1053 if (ret < 0) {
1054 dev_err(&adapter->pdev->dev,
1055 "could not upgrade firmware: unable to load %s\n",
1056 buf);
1057 return ret;
1058 }
1059
1060 /* check size, take checksum in account */
1061 if (fw->size > size + 4) {
1062 CH_ERR(adapter, "firmware image too large %u, expected %d\n",
1063 (unsigned int)fw->size, size + 4);
1064 ret = -EINVAL;
1065 }
1066
1067 /* compute checksum */
1068 p = (const __be32 *)fw->data;
1069 for (csum = 0, i = 0; i < fw->size / sizeof(csum); i++)
1070 csum += ntohl(p[i]);
1071
1072 if (csum != 0xffffffff) {
1073 CH_ERR(adapter, "corrupted firmware image, checksum %u\n",
1074 csum);
1075 ret = -EINVAL;
1076 }
1077
1078 for (i = 0; i < size / 4 ; i++) {
1079 *cache++ = (be32_to_cpu(p[i]) & 0xffff0000) >> 16;
1080 *cache++ = be32_to_cpu(p[i]) & 0xffff;
1081 }
1082
1083 release_firmware(fw);
1084
1085 return ret;
1086}
2e283962
DLR
1087
1088static int upgrade_fw(struct adapter *adap)
1089{
1090 int ret;
2e283962
DLR
1091 const struct firmware *fw;
1092 struct device *dev = &adap->pdev->dev;
1093
34336ec0 1094 ret = request_firmware(&fw, FW_FNAME, dev);
2e283962
DLR
1095 if (ret < 0) {
1096 dev_err(dev, "could not upgrade firmware: unable to load %s\n",
34336ec0 1097 FW_FNAME);
2e283962
DLR
1098 return ret;
1099 }
1100 ret = t3_load_fw(adap, fw->data, fw->size);
1101 release_firmware(fw);
47330077
DLR
1102
1103 if (ret == 0)
1104 dev_info(dev, "successful upgrade to firmware %d.%d.%d\n",
1105 FW_VERSION_MAJOR, FW_VERSION_MINOR, FW_VERSION_MICRO);
1106 else
1107 dev_err(dev, "failed to upgrade to firmware %d.%d.%d\n",
1108 FW_VERSION_MAJOR, FW_VERSION_MINOR, FW_VERSION_MICRO);
2eab17ab 1109
47330077
DLR
1110 return ret;
1111}
1112
1113static inline char t3rev2char(struct adapter *adapter)
1114{
1115 char rev = 0;
1116
1117 switch(adapter->params.rev) {
1118 case T3_REV_B:
1119 case T3_REV_B2:
1120 rev = 'b';
1121 break;
1aafee26
DLR
1122 case T3_REV_C:
1123 rev = 'c';
1124 break;
47330077
DLR
1125 }
1126 return rev;
1127}
1128
9265fabf 1129static int update_tpsram(struct adapter *adap)
47330077
DLR
1130{
1131 const struct firmware *tpsram;
1132 char buf[64];
1133 struct device *dev = &adap->pdev->dev;
1134 int ret;
1135 char rev;
2eab17ab 1136
47330077
DLR
1137 rev = t3rev2char(adap);
1138 if (!rev)
1139 return 0;
1140
34336ec0 1141 snprintf(buf, sizeof(buf), TPSRAM_NAME, rev);
47330077
DLR
1142
1143 ret = request_firmware(&tpsram, buf, dev);
1144 if (ret < 0) {
1145 dev_err(dev, "could not load TP SRAM: unable to load %s\n",
1146 buf);
1147 return ret;
1148 }
2eab17ab 1149
47330077
DLR
1150 ret = t3_check_tpsram(adap, tpsram->data, tpsram->size);
1151 if (ret)
2eab17ab 1152 goto release_tpsram;
47330077
DLR
1153
1154 ret = t3_set_proto_sram(adap, tpsram->data);
1155 if (ret == 0)
1156 dev_info(dev,
1157 "successful update of protocol engine "
1158 "to %d.%d.%d\n",
1159 TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO);
1160 else
1161 dev_err(dev, "failed to update of protocol engine %d.%d.%d\n",
1162 TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO);
1163 if (ret)
1164 dev_err(dev, "loading protocol SRAM failed\n");
1165
1166release_tpsram:
1167 release_firmware(tpsram);
2eab17ab 1168
2e283962
DLR
1169 return ret;
1170}
1171
4d22de3e
DLR
1172/**
1173 * cxgb_up - enable the adapter
1174 * @adapter: adapter being enabled
1175 *
1176 * Called when the first port is enabled, this function performs the
1177 * actions necessary to make an adapter operational, such as completing
1178 * the initialization of HW modules, and enabling interrupts.
1179 *
1180 * Must be called with the rtnl lock held.
1181 */
1182static int cxgb_up(struct adapter *adap)
1183{
c54f5c24 1184 int err;
4d22de3e
DLR
1185
1186 if (!(adap->flags & FULL_INIT_DONE)) {
8207befa 1187 err = t3_check_fw_version(adap);
a5a3b460 1188 if (err == -EINVAL) {
2e283962 1189 err = upgrade_fw(adap);
8207befa
DLR
1190 CH_WARN(adap, "FW upgrade to %d.%d.%d %s\n",
1191 FW_VERSION_MAJOR, FW_VERSION_MINOR,
1192 FW_VERSION_MICRO, err ? "failed" : "succeeded");
a5a3b460 1193 }
4d22de3e 1194
8207befa 1195 err = t3_check_tpsram_version(adap);
47330077
DLR
1196 if (err == -EINVAL) {
1197 err = update_tpsram(adap);
8207befa
DLR
1198 CH_WARN(adap, "TP upgrade to %d.%d.%d %s\n",
1199 TP_VERSION_MAJOR, TP_VERSION_MINOR,
1200 TP_VERSION_MICRO, err ? "failed" : "succeeded");
47330077
DLR
1201 }
1202
20d3fc11
DLR
1203 /*
1204 * Clear interrupts now to catch errors if t3_init_hw fails.
1205 * We clear them again later as initialization may trigger
1206 * conditions that can interrupt.
1207 */
1208 t3_intr_clear(adap);
1209
4d22de3e
DLR
1210 err = t3_init_hw(adap, 0);
1211 if (err)
1212 goto out;
1213
b881955b 1214 t3_set_reg_field(adap, A_TP_PARA_REG5, 0, F_RXDDPOFFINIT);
6cdbd77e 1215 t3_write_reg(adap, A_ULPRX_TDDP_PSZ, V_HPZ0(PAGE_SHIFT - 12));
bea3348e 1216
4d22de3e
DLR
1217 err = setup_sge_qsets(adap);
1218 if (err)
1219 goto out;
1220
1221 setup_rss(adap);
48c4b6db
DLR
1222 if (!(adap->flags & NAPI_INIT))
1223 init_napi(adap);
31563789
DLR
1224
1225 t3_start_sge_timers(adap);
4d22de3e
DLR
1226 adap->flags |= FULL_INIT_DONE;
1227 }
1228
1229 t3_intr_clear(adap);
1230
1231 if (adap->flags & USING_MSIX) {
1232 name_msix_vecs(adap);
1233 err = request_irq(adap->msix_info[0].vec,
1234 t3_async_intr_handler, 0,
1235 adap->msix_info[0].desc, adap);
1236 if (err)
1237 goto irq_err;
1238
42256f57
DLR
1239 err = request_msix_data_irqs(adap);
1240 if (err) {
4d22de3e
DLR
1241 free_irq(adap->msix_info[0].vec, adap);
1242 goto irq_err;
1243 }
1244 } else if ((err = request_irq(adap->pdev->irq,
1245 t3_intr_handler(adap,
1246 adap->sge.qs[0].rspq.
1247 polling),
2db6346f
TG
1248 (adap->flags & USING_MSI) ?
1249 0 : IRQF_SHARED,
4d22de3e
DLR
1250 adap->name, adap)))
1251 goto irq_err;
1252
bea3348e 1253 enable_all_napi(adap);
4d22de3e
DLR
1254 t3_sge_start(adap);
1255 t3_intr_enable(adap);
14ab9892 1256
b881955b
DLR
1257 if (adap->params.rev >= T3_REV_C && !(adap->flags & TP_PARITY_INIT) &&
1258 is_offload(adap) && init_tp_parity(adap) == 0)
1259 adap->flags |= TP_PARITY_INIT;
1260
1261 if (adap->flags & TP_PARITY_INIT) {
1262 t3_write_reg(adap, A_TP_INT_CAUSE,
1263 F_CMCACHEPERR | F_ARPLUTPERR);
1264 t3_write_reg(adap, A_TP_INT_ENABLE, 0x7fbfffff);
1265 }
1266
8c263761
DLR
1267 if (!(adap->flags & QUEUES_BOUND)) {
1268 err = bind_qsets(adap);
1269 if (err) {
1270 CH_ERR(adap, "failed to bind qsets, err %d\n", err);
1271 t3_intr_disable(adap);
1272 free_irq_resources(adap);
1273 goto out;
1274 }
1275 adap->flags |= QUEUES_BOUND;
1276 }
14ab9892 1277
4d22de3e
DLR
1278out:
1279 return err;
1280irq_err:
1281 CH_ERR(adap, "request_irq failed, err %d\n", err);
1282 goto out;
1283}
1284
1285/*
1286 * Release resources when all the ports and offloading have been stopped.
1287 */
1288static void cxgb_down(struct adapter *adapter)
1289{
1290 t3_sge_stop(adapter);
1291 spin_lock_irq(&adapter->work_lock); /* sync with PHY intr task */
1292 t3_intr_disable(adapter);
1293 spin_unlock_irq(&adapter->work_lock);
1294
8c263761 1295 free_irq_resources(adapter);
4d22de3e 1296 quiesce_rx(adapter);
a6f018e3 1297 t3_sge_stop(adapter);
c80b0c28 1298 flush_workqueue(cxgb3_wq); /* wait for external IRQ handler */
4d22de3e
DLR
1299}
1300
1301static void schedule_chk_task(struct adapter *adap)
1302{
1303 unsigned int timeo;
1304
1305 timeo = adap->params.linkpoll_period ?
1306 (HZ * adap->params.linkpoll_period) / 10 :
1307 adap->params.stats_update_period * HZ;
1308 if (timeo)
1309 queue_delayed_work(cxgb3_wq, &adap->adap_check_task, timeo);
1310}
1311
1312static int offload_open(struct net_device *dev)
1313{
5fbf816f
DLR
1314 struct port_info *pi = netdev_priv(dev);
1315 struct adapter *adapter = pi->adapter;
1316 struct t3cdev *tdev = dev2t3cdev(dev);
4d22de3e 1317 int adap_up = adapter->open_device_map & PORT_MASK;
c54f5c24 1318 int err;
4d22de3e
DLR
1319
1320 if (test_and_set_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map))
1321 return 0;
1322
1323 if (!adap_up && (err = cxgb_up(adapter)) < 0)
48c4b6db 1324 goto out;
4d22de3e
DLR
1325
1326 t3_tp_set_offload_mode(adapter, 1);
1327 tdev->lldev = adapter->port[0];
1328 err = cxgb3_offload_activate(adapter);
1329 if (err)
1330 goto out;
1331
1332 init_port_mtus(adapter);
1333 t3_load_mtus(adapter, adapter->params.mtus, adapter->params.a_wnd,
1334 adapter->params.b_wnd,
1335 adapter->params.rev == 0 ?
1336 adapter->port[0]->mtu : 0xffff);
1337 init_smt(adapter);
1338
d96a51f6
DN
1339 if (sysfs_create_group(&tdev->lldev->dev.kobj, &offload_attr_group))
1340 dev_dbg(&dev->dev, "cannot create sysfs group\n");
4d22de3e
DLR
1341
1342 /* Call back all registered clients */
1343 cxgb3_add_clients(tdev);
1344
1345out:
1346 /* restore them in case the offload module has changed them */
1347 if (err) {
1348 t3_tp_set_offload_mode(adapter, 0);
1349 clear_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map);
1350 cxgb3_set_dummy_ops(tdev);
1351 }
1352 return err;
1353}
1354
1355static int offload_close(struct t3cdev *tdev)
1356{
1357 struct adapter *adapter = tdev2adap(tdev);
1358
1359 if (!test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map))
1360 return 0;
1361
1362 /* Call back all registered clients */
1363 cxgb3_remove_clients(tdev);
1364
0ee8d33c 1365 sysfs_remove_group(&tdev->lldev->dev.kobj, &offload_attr_group);
4d22de3e 1366
c80b0c28
DLR
1367 /* Flush work scheduled while releasing TIDs */
1368 flush_scheduled_work();
1369
4d22de3e
DLR
1370 tdev->lldev = NULL;
1371 cxgb3_set_dummy_ops(tdev);
1372 t3_tp_set_offload_mode(adapter, 0);
1373 clear_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map);
1374
1375 if (!adapter->open_device_map)
1376 cxgb_down(adapter);
1377
1378 cxgb3_offload_deactivate(adapter);
1379 return 0;
1380}
1381
1382static int cxgb_open(struct net_device *dev)
1383{
4d22de3e 1384 struct port_info *pi = netdev_priv(dev);
5fbf816f 1385 struct adapter *adapter = pi->adapter;
4d22de3e 1386 int other_ports = adapter->open_device_map & PORT_MASK;
5fbf816f 1387 int err;
4d22de3e 1388
48c4b6db 1389 if (!adapter->open_device_map && (err = cxgb_up(adapter)) < 0)
4d22de3e
DLR
1390 return err;
1391
1392 set_bit(pi->port_id, &adapter->open_device_map);
8ac3ba68 1393 if (is_offload(adapter) && !ofld_disable) {
4d22de3e
DLR
1394 err = offload_open(dev);
1395 if (err)
1396 printk(KERN_WARNING
1397 "Could not initialize offload capabilities\n");
1398 }
1399
82ad3329 1400 dev->real_num_tx_queues = pi->nqsets;
4d22de3e
DLR
1401 link_start(dev);
1402 t3_port_intr_enable(adapter, pi->port_id);
82ad3329 1403 netif_tx_start_all_queues(dev);
4d22de3e
DLR
1404 if (!other_ports)
1405 schedule_chk_task(adapter);
1406
fa0d4c11 1407 cxgb3_event_notify(&adapter->tdev, OFFLOAD_PORT_UP, pi->port_id);
4d22de3e
DLR
1408 return 0;
1409}
1410
1411static int cxgb_close(struct net_device *dev)
1412{
5fbf816f
DLR
1413 struct port_info *pi = netdev_priv(dev);
1414 struct adapter *adapter = pi->adapter;
4d22de3e 1415
e8d19370
DLR
1416
1417 if (!adapter->open_device_map)
1418 return 0;
1419
bf792094
DLR
1420 /* Stop link fault interrupts */
1421 t3_xgm_intr_disable(adapter, pi->port_id);
1422 t3_read_reg(adapter, A_XGM_INT_STATUS + pi->mac.offset);
1423
5fbf816f 1424 t3_port_intr_disable(adapter, pi->port_id);
82ad3329 1425 netif_tx_stop_all_queues(dev);
5fbf816f 1426 pi->phy.ops->power_down(&pi->phy, 1);
4d22de3e 1427 netif_carrier_off(dev);
5fbf816f 1428 t3_mac_disable(&pi->mac, MAC_DIRECTION_TX | MAC_DIRECTION_RX);
4d22de3e 1429
20d3fc11 1430 spin_lock_irq(&adapter->work_lock); /* sync with update task */
5fbf816f 1431 clear_bit(pi->port_id, &adapter->open_device_map);
20d3fc11 1432 spin_unlock_irq(&adapter->work_lock);
4d22de3e
DLR
1433
1434 if (!(adapter->open_device_map & PORT_MASK))
c80b0c28 1435 cancel_delayed_work_sync(&adapter->adap_check_task);
4d22de3e
DLR
1436
1437 if (!adapter->open_device_map)
1438 cxgb_down(adapter);
1439
fa0d4c11 1440 cxgb3_event_notify(&adapter->tdev, OFFLOAD_PORT_DOWN, pi->port_id);
4d22de3e
DLR
1441 return 0;
1442}
1443
1444static struct net_device_stats *cxgb_get_stats(struct net_device *dev)
1445{
5fbf816f
DLR
1446 struct port_info *pi = netdev_priv(dev);
1447 struct adapter *adapter = pi->adapter;
1448 struct net_device_stats *ns = &pi->netstats;
4d22de3e
DLR
1449 const struct mac_stats *pstats;
1450
1451 spin_lock(&adapter->stats_lock);
5fbf816f 1452 pstats = t3_mac_update_stats(&pi->mac);
4d22de3e
DLR
1453 spin_unlock(&adapter->stats_lock);
1454
1455 ns->tx_bytes = pstats->tx_octets;
1456 ns->tx_packets = pstats->tx_frames;
1457 ns->rx_bytes = pstats->rx_octets;
1458 ns->rx_packets = pstats->rx_frames;
1459 ns->multicast = pstats->rx_mcast_frames;
1460
1461 ns->tx_errors = pstats->tx_underrun;
1462 ns->rx_errors = pstats->rx_symbol_errs + pstats->rx_fcs_errs +
1463 pstats->rx_too_long + pstats->rx_jabber + pstats->rx_short +
1464 pstats->rx_fifo_ovfl;
1465
1466 /* detailed rx_errors */
1467 ns->rx_length_errors = pstats->rx_jabber + pstats->rx_too_long;
1468 ns->rx_over_errors = 0;
1469 ns->rx_crc_errors = pstats->rx_fcs_errs;
1470 ns->rx_frame_errors = pstats->rx_symbol_errs;
1471 ns->rx_fifo_errors = pstats->rx_fifo_ovfl;
1472 ns->rx_missed_errors = pstats->rx_cong_drops;
1473
1474 /* detailed tx_errors */
1475 ns->tx_aborted_errors = 0;
1476 ns->tx_carrier_errors = 0;
1477 ns->tx_fifo_errors = pstats->tx_underrun;
1478 ns->tx_heartbeat_errors = 0;
1479 ns->tx_window_errors = 0;
1480 return ns;
1481}
1482
1483static u32 get_msglevel(struct net_device *dev)
1484{
5fbf816f
DLR
1485 struct port_info *pi = netdev_priv(dev);
1486 struct adapter *adapter = pi->adapter;
4d22de3e
DLR
1487
1488 return adapter->msg_enable;
1489}
1490
1491static void set_msglevel(struct net_device *dev, u32 val)
1492{
5fbf816f
DLR
1493 struct port_info *pi = netdev_priv(dev);
1494 struct adapter *adapter = pi->adapter;
4d22de3e
DLR
1495
1496 adapter->msg_enable = val;
1497}
1498
1499static char stats_strings[][ETH_GSTRING_LEN] = {
1500 "TxOctetsOK ",
1501 "TxFramesOK ",
1502 "TxMulticastFramesOK",
1503 "TxBroadcastFramesOK",
1504 "TxPauseFrames ",
1505 "TxUnderrun ",
1506 "TxExtUnderrun ",
1507
1508 "TxFrames64 ",
1509 "TxFrames65To127 ",
1510 "TxFrames128To255 ",
1511 "TxFrames256To511 ",
1512 "TxFrames512To1023 ",
1513 "TxFrames1024To1518 ",
1514 "TxFrames1519ToMax ",
1515
1516 "RxOctetsOK ",
1517 "RxFramesOK ",
1518 "RxMulticastFramesOK",
1519 "RxBroadcastFramesOK",
1520 "RxPauseFrames ",
1521 "RxFCSErrors ",
1522 "RxSymbolErrors ",
1523 "RxShortErrors ",
1524 "RxJabberErrors ",
1525 "RxLengthErrors ",
1526 "RxFIFOoverflow ",
1527
1528 "RxFrames64 ",
1529 "RxFrames65To127 ",
1530 "RxFrames128To255 ",
1531 "RxFrames256To511 ",
1532 "RxFrames512To1023 ",
1533 "RxFrames1024To1518 ",
1534 "RxFrames1519ToMax ",
1535
1536 "PhyFIFOErrors ",
1537 "TSO ",
1538 "VLANextractions ",
1539 "VLANinsertions ",
1540 "TxCsumOffload ",
1541 "RxCsumGood ",
b47385bd
DLR
1542 "LroAggregated ",
1543 "LroFlushed ",
1544 "LroNoDesc ",
fc90664e
DLR
1545 "RxDrops ",
1546
1547 "CheckTXEnToggled ",
1548 "CheckResets ",
1549
bf792094 1550 "LinkFaults ",
4d22de3e
DLR
1551};
1552
b9f2c044 1553static int get_sset_count(struct net_device *dev, int sset)
4d22de3e 1554{
b9f2c044
JG
1555 switch (sset) {
1556 case ETH_SS_STATS:
1557 return ARRAY_SIZE(stats_strings);
1558 default:
1559 return -EOPNOTSUPP;
1560 }
4d22de3e
DLR
1561}
1562
1563#define T3_REGMAP_SIZE (3 * 1024)
1564
1565static int get_regs_len(struct net_device *dev)
1566{
1567 return T3_REGMAP_SIZE;
1568}
1569
1570static int get_eeprom_len(struct net_device *dev)
1571{
1572 return EEPROMSIZE;
1573}
1574
1575static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1576{
5fbf816f
DLR
1577 struct port_info *pi = netdev_priv(dev);
1578 struct adapter *adapter = pi->adapter;
4d22de3e 1579 u32 fw_vers = 0;
47330077 1580 u32 tp_vers = 0;
4d22de3e 1581
cf3760da 1582 spin_lock(&adapter->stats_lock);
4d22de3e 1583 t3_get_fw_version(adapter, &fw_vers);
47330077 1584 t3_get_tp_version(adapter, &tp_vers);
cf3760da 1585 spin_unlock(&adapter->stats_lock);
4d22de3e
DLR
1586
1587 strcpy(info->driver, DRV_NAME);
1588 strcpy(info->version, DRV_VERSION);
1589 strcpy(info->bus_info, pci_name(adapter->pdev));
1590 if (!fw_vers)
1591 strcpy(info->fw_version, "N/A");
4aac3899 1592 else {
4d22de3e 1593 snprintf(info->fw_version, sizeof(info->fw_version),
47330077 1594 "%s %u.%u.%u TP %u.%u.%u",
4aac3899
DLR
1595 G_FW_VERSION_TYPE(fw_vers) ? "T" : "N",
1596 G_FW_VERSION_MAJOR(fw_vers),
1597 G_FW_VERSION_MINOR(fw_vers),
47330077
DLR
1598 G_FW_VERSION_MICRO(fw_vers),
1599 G_TP_VERSION_MAJOR(tp_vers),
1600 G_TP_VERSION_MINOR(tp_vers),
1601 G_TP_VERSION_MICRO(tp_vers));
4aac3899 1602 }
4d22de3e
DLR
1603}
1604
1605static void get_strings(struct net_device *dev, u32 stringset, u8 * data)
1606{
1607 if (stringset == ETH_SS_STATS)
1608 memcpy(data, stats_strings, sizeof(stats_strings));
1609}
1610
1611static unsigned long collect_sge_port_stats(struct adapter *adapter,
1612 struct port_info *p, int idx)
1613{
1614 int i;
1615 unsigned long tot = 0;
1616
8c263761
DLR
1617 for (i = p->first_qset; i < p->first_qset + p->nqsets; ++i)
1618 tot += adapter->sge.qs[i].port_stats[idx];
4d22de3e
DLR
1619 return tot;
1620}
1621
1622static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
1623 u64 *data)
1624{
4d22de3e 1625 struct port_info *pi = netdev_priv(dev);
5fbf816f 1626 struct adapter *adapter = pi->adapter;
4d22de3e
DLR
1627 const struct mac_stats *s;
1628
1629 spin_lock(&adapter->stats_lock);
1630 s = t3_mac_update_stats(&pi->mac);
1631 spin_unlock(&adapter->stats_lock);
1632
1633 *data++ = s->tx_octets;
1634 *data++ = s->tx_frames;
1635 *data++ = s->tx_mcast_frames;
1636 *data++ = s->tx_bcast_frames;
1637 *data++ = s->tx_pause;
1638 *data++ = s->tx_underrun;
1639 *data++ = s->tx_fifo_urun;
1640
1641 *data++ = s->tx_frames_64;
1642 *data++ = s->tx_frames_65_127;
1643 *data++ = s->tx_frames_128_255;
1644 *data++ = s->tx_frames_256_511;
1645 *data++ = s->tx_frames_512_1023;
1646 *data++ = s->tx_frames_1024_1518;
1647 *data++ = s->tx_frames_1519_max;
1648
1649 *data++ = s->rx_octets;
1650 *data++ = s->rx_frames;
1651 *data++ = s->rx_mcast_frames;
1652 *data++ = s->rx_bcast_frames;
1653 *data++ = s->rx_pause;
1654 *data++ = s->rx_fcs_errs;
1655 *data++ = s->rx_symbol_errs;
1656 *data++ = s->rx_short;
1657 *data++ = s->rx_jabber;
1658 *data++ = s->rx_too_long;
1659 *data++ = s->rx_fifo_ovfl;
1660
1661 *data++ = s->rx_frames_64;
1662 *data++ = s->rx_frames_65_127;
1663 *data++ = s->rx_frames_128_255;
1664 *data++ = s->rx_frames_256_511;
1665 *data++ = s->rx_frames_512_1023;
1666 *data++ = s->rx_frames_1024_1518;
1667 *data++ = s->rx_frames_1519_max;
1668
1669 *data++ = pi->phy.fifo_errors;
1670
1671 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_TSO);
1672 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_VLANEX);
1673 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_VLANINS);
1674 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_TX_CSUM);
1675 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_RX_CSUM_GOOD);
7be2df45
HX
1676 *data++ = 0;
1677 *data++ = 0;
1678 *data++ = 0;
4d22de3e 1679 *data++ = s->rx_cong_drops;
fc90664e
DLR
1680
1681 *data++ = s->num_toggled;
1682 *data++ = s->num_resets;
bf792094
DLR
1683
1684 *data++ = s->link_faults;
4d22de3e
DLR
1685}
1686
1687static inline void reg_block_dump(struct adapter *ap, void *buf,
1688 unsigned int start, unsigned int end)
1689{
1690 u32 *p = buf + start;
1691
1692 for (; start <= end; start += sizeof(u32))
1693 *p++ = t3_read_reg(ap, start);
1694}
1695
1696static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
1697 void *buf)
1698{
5fbf816f
DLR
1699 struct port_info *pi = netdev_priv(dev);
1700 struct adapter *ap = pi->adapter;
4d22de3e
DLR
1701
1702 /*
1703 * Version scheme:
1704 * bits 0..9: chip version
1705 * bits 10..15: chip revision
1706 * bit 31: set for PCIe cards
1707 */
1708 regs->version = 3 | (ap->params.rev << 10) | (is_pcie(ap) << 31);
1709
1710 /*
1711 * We skip the MAC statistics registers because they are clear-on-read.
1712 * Also reading multi-register stats would need to synchronize with the
1713 * periodic mac stats accumulation. Hard to justify the complexity.
1714 */
1715 memset(buf, 0, T3_REGMAP_SIZE);
1716 reg_block_dump(ap, buf, 0, A_SG_RSPQ_CREDIT_RETURN);
1717 reg_block_dump(ap, buf, A_SG_HI_DRB_HI_THRSH, A_ULPRX_PBL_ULIMIT);
1718 reg_block_dump(ap, buf, A_ULPTX_CONFIG, A_MPS_INT_CAUSE);
1719 reg_block_dump(ap, buf, A_CPL_SWITCH_CNTRL, A_CPL_MAP_TBL_DATA);
1720 reg_block_dump(ap, buf, A_SMB_GLOBAL_TIME_CFG, A_XGM_SERDES_STAT3);
1721 reg_block_dump(ap, buf, A_XGM_SERDES_STATUS0,
1722 XGM_REG(A_XGM_SERDES_STAT3, 1));
1723 reg_block_dump(ap, buf, XGM_REG(A_XGM_SERDES_STATUS0, 1),
1724 XGM_REG(A_XGM_RX_SPI4_SOP_EOP_CNT, 1));
1725}
1726
1727static int restart_autoneg(struct net_device *dev)
1728{
1729 struct port_info *p = netdev_priv(dev);
1730
1731 if (!netif_running(dev))
1732 return -EAGAIN;
1733 if (p->link_config.autoneg != AUTONEG_ENABLE)
1734 return -EINVAL;
1735 p->phy.ops->autoneg_restart(&p->phy);
1736 return 0;
1737}
1738
1739static int cxgb3_phys_id(struct net_device *dev, u32 data)
1740{
5fbf816f
DLR
1741 struct port_info *pi = netdev_priv(dev);
1742 struct adapter *adapter = pi->adapter;
4d22de3e 1743 int i;
4d22de3e
DLR
1744
1745 if (data == 0)
1746 data = 2;
1747
1748 for (i = 0; i < data * 2; i++) {
1749 t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
1750 (i & 1) ? F_GPIO0_OUT_VAL : 0);
1751 if (msleep_interruptible(500))
1752 break;
1753 }
1754 t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
1755 F_GPIO0_OUT_VAL);
1756 return 0;
1757}
1758
1759static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1760{
1761 struct port_info *p = netdev_priv(dev);
1762
1763 cmd->supported = p->link_config.supported;
1764 cmd->advertising = p->link_config.advertising;
1765
1766 if (netif_carrier_ok(dev)) {
1767 cmd->speed = p->link_config.speed;
1768 cmd->duplex = p->link_config.duplex;
1769 } else {
1770 cmd->speed = -1;
1771 cmd->duplex = -1;
1772 }
1773
1774 cmd->port = (cmd->supported & SUPPORTED_TP) ? PORT_TP : PORT_FIBRE;
0f07c4ee 1775 cmd->phy_address = p->phy.mdio.prtad;
4d22de3e
DLR
1776 cmd->transceiver = XCVR_EXTERNAL;
1777 cmd->autoneg = p->link_config.autoneg;
1778 cmd->maxtxpkt = 0;
1779 cmd->maxrxpkt = 0;
1780 return 0;
1781}
1782
1783static int speed_duplex_to_caps(int speed, int duplex)
1784{
1785 int cap = 0;
1786
1787 switch (speed) {
1788 case SPEED_10:
1789 if (duplex == DUPLEX_FULL)
1790 cap = SUPPORTED_10baseT_Full;
1791 else
1792 cap = SUPPORTED_10baseT_Half;
1793 break;
1794 case SPEED_100:
1795 if (duplex == DUPLEX_FULL)
1796 cap = SUPPORTED_100baseT_Full;
1797 else
1798 cap = SUPPORTED_100baseT_Half;
1799 break;
1800 case SPEED_1000:
1801 if (duplex == DUPLEX_FULL)
1802 cap = SUPPORTED_1000baseT_Full;
1803 else
1804 cap = SUPPORTED_1000baseT_Half;
1805 break;
1806 case SPEED_10000:
1807 if (duplex == DUPLEX_FULL)
1808 cap = SUPPORTED_10000baseT_Full;
1809 }
1810 return cap;
1811}
1812
1813#define ADVERTISED_MASK (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
1814 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
1815 ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full | \
1816 ADVERTISED_10000baseT_Full)
1817
1818static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1819{
1820 struct port_info *p = netdev_priv(dev);
1821 struct link_config *lc = &p->link_config;
1822
9b1e3656
DLR
1823 if (!(lc->supported & SUPPORTED_Autoneg)) {
1824 /*
1825 * PHY offers a single speed/duplex. See if that's what's
1826 * being requested.
1827 */
1828 if (cmd->autoneg == AUTONEG_DISABLE) {
97915b5b 1829 int cap = speed_duplex_to_caps(cmd->speed, cmd->duplex);
9b1e3656
DLR
1830 if (lc->supported & cap)
1831 return 0;
1832 }
1833 return -EINVAL;
1834 }
4d22de3e
DLR
1835
1836 if (cmd->autoneg == AUTONEG_DISABLE) {
1837 int cap = speed_duplex_to_caps(cmd->speed, cmd->duplex);
1838
1839 if (!(lc->supported & cap) || cmd->speed == SPEED_1000)
1840 return -EINVAL;
1841 lc->requested_speed = cmd->speed;
1842 lc->requested_duplex = cmd->duplex;
1843 lc->advertising = 0;
1844 } else {
1845 cmd->advertising &= ADVERTISED_MASK;
1846 cmd->advertising &= lc->supported;
1847 if (!cmd->advertising)
1848 return -EINVAL;
1849 lc->requested_speed = SPEED_INVALID;
1850 lc->requested_duplex = DUPLEX_INVALID;
1851 lc->advertising = cmd->advertising | ADVERTISED_Autoneg;
1852 }
1853 lc->autoneg = cmd->autoneg;
1854 if (netif_running(dev))
1855 t3_link_start(&p->phy, &p->mac, lc);
1856 return 0;
1857}
1858
1859static void get_pauseparam(struct net_device *dev,
1860 struct ethtool_pauseparam *epause)
1861{
1862 struct port_info *p = netdev_priv(dev);
1863
1864 epause->autoneg = (p->link_config.requested_fc & PAUSE_AUTONEG) != 0;
1865 epause->rx_pause = (p->link_config.fc & PAUSE_RX) != 0;
1866 epause->tx_pause = (p->link_config.fc & PAUSE_TX) != 0;
1867}
1868
1869static int set_pauseparam(struct net_device *dev,
1870 struct ethtool_pauseparam *epause)
1871{
1872 struct port_info *p = netdev_priv(dev);
1873 struct link_config *lc = &p->link_config;
1874
1875 if (epause->autoneg == AUTONEG_DISABLE)
1876 lc->requested_fc = 0;
1877 else if (lc->supported & SUPPORTED_Autoneg)
1878 lc->requested_fc = PAUSE_AUTONEG;
1879 else
1880 return -EINVAL;
1881
1882 if (epause->rx_pause)
1883 lc->requested_fc |= PAUSE_RX;
1884 if (epause->tx_pause)
1885 lc->requested_fc |= PAUSE_TX;
1886 if (lc->autoneg == AUTONEG_ENABLE) {
1887 if (netif_running(dev))
1888 t3_link_start(&p->phy, &p->mac, lc);
1889 } else {
1890 lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1891 if (netif_running(dev))
1892 t3_mac_set_speed_duplex_fc(&p->mac, -1, -1, lc->fc);
1893 }
1894 return 0;
1895}
1896
1897static u32 get_rx_csum(struct net_device *dev)
1898{
1899 struct port_info *p = netdev_priv(dev);
1900
47fd23fe 1901 return p->rx_offload & T3_RX_CSUM;
4d22de3e
DLR
1902}
1903
1904static int set_rx_csum(struct net_device *dev, u32 data)
1905{
1906 struct port_info *p = netdev_priv(dev);
1907
47fd23fe
RD
1908 if (data) {
1909 p->rx_offload |= T3_RX_CSUM;
1910 } else {
b47385bd
DLR
1911 int i;
1912
47fd23fe 1913 p->rx_offload &= ~(T3_RX_CSUM | T3_LRO);
04ecb072
DLR
1914 for (i = p->first_qset; i < p->first_qset + p->nqsets; i++)
1915 set_qset_lro(dev, i, 0);
b47385bd 1916 }
4d22de3e
DLR
1917 return 0;
1918}
1919
1920static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
1921{
5fbf816f
DLR
1922 struct port_info *pi = netdev_priv(dev);
1923 struct adapter *adapter = pi->adapter;
05b97b30 1924 const struct qset_params *q = &adapter->params.sge.qset[pi->first_qset];
4d22de3e
DLR
1925
1926 e->rx_max_pending = MAX_RX_BUFFERS;
1927 e->rx_mini_max_pending = 0;
1928 e->rx_jumbo_max_pending = MAX_RX_JUMBO_BUFFERS;
1929 e->tx_max_pending = MAX_TXQ_ENTRIES;
1930
05b97b30
DLR
1931 e->rx_pending = q->fl_size;
1932 e->rx_mini_pending = q->rspq_size;
1933 e->rx_jumbo_pending = q->jumbo_size;
1934 e->tx_pending = q->txq_size[0];
4d22de3e
DLR
1935}
1936
1937static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
1938{
5fbf816f
DLR
1939 struct port_info *pi = netdev_priv(dev);
1940 struct adapter *adapter = pi->adapter;
05b97b30 1941 struct qset_params *q;
5fbf816f 1942 int i;
4d22de3e
DLR
1943
1944 if (e->rx_pending > MAX_RX_BUFFERS ||
1945 e->rx_jumbo_pending > MAX_RX_JUMBO_BUFFERS ||
1946 e->tx_pending > MAX_TXQ_ENTRIES ||
1947 e->rx_mini_pending > MAX_RSPQ_ENTRIES ||
1948 e->rx_mini_pending < MIN_RSPQ_ENTRIES ||
1949 e->rx_pending < MIN_FL_ENTRIES ||
1950 e->rx_jumbo_pending < MIN_FL_ENTRIES ||
1951 e->tx_pending < adapter->params.nports * MIN_TXQ_ENTRIES)
1952 return -EINVAL;
1953
1954 if (adapter->flags & FULL_INIT_DONE)
1955 return -EBUSY;
1956
05b97b30
DLR
1957 q = &adapter->params.sge.qset[pi->first_qset];
1958 for (i = 0; i < pi->nqsets; ++i, ++q) {
4d22de3e
DLR
1959 q->rspq_size = e->rx_mini_pending;
1960 q->fl_size = e->rx_pending;
1961 q->jumbo_size = e->rx_jumbo_pending;
1962 q->txq_size[0] = e->tx_pending;
1963 q->txq_size[1] = e->tx_pending;
1964 q->txq_size[2] = e->tx_pending;
1965 }
1966 return 0;
1967}
1968
1969static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
1970{
5fbf816f
DLR
1971 struct port_info *pi = netdev_priv(dev);
1972 struct adapter *adapter = pi->adapter;
4d22de3e
DLR
1973 struct qset_params *qsp = &adapter->params.sge.qset[0];
1974 struct sge_qset *qs = &adapter->sge.qs[0];
1975
1976 if (c->rx_coalesce_usecs * 10 > M_NEWTIMER)
1977 return -EINVAL;
1978
1979 qsp->coalesce_usecs = c->rx_coalesce_usecs;
1980 t3_update_qset_coalesce(qs, qsp);
1981 return 0;
1982}
1983
1984static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
1985{
5fbf816f
DLR
1986 struct port_info *pi = netdev_priv(dev);
1987 struct adapter *adapter = pi->adapter;
4d22de3e
DLR
1988 struct qset_params *q = adapter->params.sge.qset;
1989
1990 c->rx_coalesce_usecs = q->coalesce_usecs;
1991 return 0;
1992}
1993
1994static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e,
1995 u8 * data)
1996{
5fbf816f
DLR
1997 struct port_info *pi = netdev_priv(dev);
1998 struct adapter *adapter = pi->adapter;
4d22de3e 1999 int i, err = 0;
4d22de3e
DLR
2000
2001 u8 *buf = kmalloc(EEPROMSIZE, GFP_KERNEL);
2002 if (!buf)
2003 return -ENOMEM;
2004
2005 e->magic = EEPROM_MAGIC;
2006 for (i = e->offset & ~3; !err && i < e->offset + e->len; i += 4)
05e5c116 2007 err = t3_seeprom_read(adapter, i, (__le32 *) & buf[i]);
4d22de3e
DLR
2008
2009 if (!err)
2010 memcpy(data, buf + e->offset, e->len);
2011 kfree(buf);
2012 return err;
2013}
2014
2015static int set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
2016 u8 * data)
2017{
5fbf816f
DLR
2018 struct port_info *pi = netdev_priv(dev);
2019 struct adapter *adapter = pi->adapter;
05e5c116
AV
2020 u32 aligned_offset, aligned_len;
2021 __le32 *p;
4d22de3e 2022 u8 *buf;
c54f5c24 2023 int err;
4d22de3e
DLR
2024
2025 if (eeprom->magic != EEPROM_MAGIC)
2026 return -EINVAL;
2027
2028 aligned_offset = eeprom->offset & ~3;
2029 aligned_len = (eeprom->len + (eeprom->offset & 3) + 3) & ~3;
2030
2031 if (aligned_offset != eeprom->offset || aligned_len != eeprom->len) {
2032 buf = kmalloc(aligned_len, GFP_KERNEL);
2033 if (!buf)
2034 return -ENOMEM;
05e5c116 2035 err = t3_seeprom_read(adapter, aligned_offset, (__le32 *) buf);
4d22de3e
DLR
2036 if (!err && aligned_len > 4)
2037 err = t3_seeprom_read(adapter,
2038 aligned_offset + aligned_len - 4,
05e5c116 2039 (__le32 *) & buf[aligned_len - 4]);
4d22de3e
DLR
2040 if (err)
2041 goto out;
2042 memcpy(buf + (eeprom->offset & 3), data, eeprom->len);
2043 } else
2044 buf = data;
2045
2046 err = t3_seeprom_wp(adapter, 0);
2047 if (err)
2048 goto out;
2049
05e5c116 2050 for (p = (__le32 *) buf; !err && aligned_len; aligned_len -= 4, p++) {
4d22de3e
DLR
2051 err = t3_seeprom_write(adapter, aligned_offset, *p);
2052 aligned_offset += 4;
2053 }
2054
2055 if (!err)
2056 err = t3_seeprom_wp(adapter, 1);
2057out:
2058 if (buf != data)
2059 kfree(buf);
2060 return err;
2061}
2062
2063static void get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2064{
2065 wol->supported = 0;
2066 wol->wolopts = 0;
2067 memset(&wol->sopass, 0, sizeof(wol->sopass));
2068}
2069
2070static const struct ethtool_ops cxgb_ethtool_ops = {
2071 .get_settings = get_settings,
2072 .set_settings = set_settings,
2073 .get_drvinfo = get_drvinfo,
2074 .get_msglevel = get_msglevel,
2075 .set_msglevel = set_msglevel,
2076 .get_ringparam = get_sge_param,
2077 .set_ringparam = set_sge_param,
2078 .get_coalesce = get_coalesce,
2079 .set_coalesce = set_coalesce,
2080 .get_eeprom_len = get_eeprom_len,
2081 .get_eeprom = get_eeprom,
2082 .set_eeprom = set_eeprom,
2083 .get_pauseparam = get_pauseparam,
2084 .set_pauseparam = set_pauseparam,
2085 .get_rx_csum = get_rx_csum,
2086 .set_rx_csum = set_rx_csum,
4d22de3e 2087 .set_tx_csum = ethtool_op_set_tx_csum,
4d22de3e
DLR
2088 .set_sg = ethtool_op_set_sg,
2089 .get_link = ethtool_op_get_link,
2090 .get_strings = get_strings,
2091 .phys_id = cxgb3_phys_id,
2092 .nway_reset = restart_autoneg,
b9f2c044 2093 .get_sset_count = get_sset_count,
4d22de3e
DLR
2094 .get_ethtool_stats = get_stats,
2095 .get_regs_len = get_regs_len,
2096 .get_regs = get_regs,
2097 .get_wol = get_wol,
4d22de3e 2098 .set_tso = ethtool_op_set_tso,
4d22de3e
DLR
2099};
2100
2101static int in_range(int val, int lo, int hi)
2102{
2103 return val < 0 || (val <= hi && val >= lo);
2104}
2105
2106static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
2107{
5fbf816f
DLR
2108 struct port_info *pi = netdev_priv(dev);
2109 struct adapter *adapter = pi->adapter;
4d22de3e 2110 u32 cmd;
5fbf816f 2111 int ret;
4d22de3e
DLR
2112
2113 if (copy_from_user(&cmd, useraddr, sizeof(cmd)))
2114 return -EFAULT;
2115
2116 switch (cmd) {
4d22de3e
DLR
2117 case CHELSIO_SET_QSET_PARAMS:{
2118 int i;
2119 struct qset_params *q;
2120 struct ch_qset_params t;
8c263761
DLR
2121 int q1 = pi->first_qset;
2122 int nqsets = pi->nqsets;
4d22de3e
DLR
2123
2124 if (!capable(CAP_NET_ADMIN))
2125 return -EPERM;
2126 if (copy_from_user(&t, useraddr, sizeof(t)))
2127 return -EFAULT;
2128 if (t.qset_idx >= SGE_QSETS)
2129 return -EINVAL;
2130 if (!in_range(t.intr_lat, 0, M_NEWTIMER) ||
8e95a202
JP
2131 !in_range(t.cong_thres, 0, 255) ||
2132 !in_range(t.txq_size[0], MIN_TXQ_ENTRIES,
2133 MAX_TXQ_ENTRIES) ||
2134 !in_range(t.txq_size[1], MIN_TXQ_ENTRIES,
2135 MAX_TXQ_ENTRIES) ||
2136 !in_range(t.txq_size[2], MIN_CTRL_TXQ_ENTRIES,
2137 MAX_CTRL_TXQ_ENTRIES) ||
2138 !in_range(t.fl_size[0], MIN_FL_ENTRIES,
2139 MAX_RX_BUFFERS) ||
2140 !in_range(t.fl_size[1], MIN_FL_ENTRIES,
2141 MAX_RX_JUMBO_BUFFERS) ||
2142 !in_range(t.rspq_size, MIN_RSPQ_ENTRIES,
2143 MAX_RSPQ_ENTRIES))
4d22de3e 2144 return -EINVAL;
8c263761
DLR
2145
2146 if ((adapter->flags & FULL_INIT_DONE) && t.lro > 0)
2147 for_each_port(adapter, i) {
2148 pi = adap2pinfo(adapter, i);
2149 if (t.qset_idx >= pi->first_qset &&
2150 t.qset_idx < pi->first_qset + pi->nqsets &&
47fd23fe 2151 !(pi->rx_offload & T3_RX_CSUM))
8c263761
DLR
2152 return -EINVAL;
2153 }
2154
4d22de3e
DLR
2155 if ((adapter->flags & FULL_INIT_DONE) &&
2156 (t.rspq_size >= 0 || t.fl_size[0] >= 0 ||
2157 t.fl_size[1] >= 0 || t.txq_size[0] >= 0 ||
2158 t.txq_size[1] >= 0 || t.txq_size[2] >= 0 ||
2159 t.polling >= 0 || t.cong_thres >= 0))
2160 return -EBUSY;
2161
8c263761
DLR
2162 /* Allow setting of any available qset when offload enabled */
2163 if (test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map)) {
2164 q1 = 0;
2165 for_each_port(adapter, i) {
2166 pi = adap2pinfo(adapter, i);
2167 nqsets += pi->first_qset + pi->nqsets;
2168 }
2169 }
2170
2171 if (t.qset_idx < q1)
2172 return -EINVAL;
2173 if (t.qset_idx > q1 + nqsets - 1)
2174 return -EINVAL;
2175
4d22de3e
DLR
2176 q = &adapter->params.sge.qset[t.qset_idx];
2177
2178 if (t.rspq_size >= 0)
2179 q->rspq_size = t.rspq_size;
2180 if (t.fl_size[0] >= 0)
2181 q->fl_size = t.fl_size[0];
2182 if (t.fl_size[1] >= 0)
2183 q->jumbo_size = t.fl_size[1];
2184 if (t.txq_size[0] >= 0)
2185 q->txq_size[0] = t.txq_size[0];
2186 if (t.txq_size[1] >= 0)
2187 q->txq_size[1] = t.txq_size[1];
2188 if (t.txq_size[2] >= 0)
2189 q->txq_size[2] = t.txq_size[2];
2190 if (t.cong_thres >= 0)
2191 q->cong_thres = t.cong_thres;
2192 if (t.intr_lat >= 0) {
2193 struct sge_qset *qs =
2194 &adapter->sge.qs[t.qset_idx];
2195
2196 q->coalesce_usecs = t.intr_lat;
2197 t3_update_qset_coalesce(qs, q);
2198 }
2199 if (t.polling >= 0) {
2200 if (adapter->flags & USING_MSIX)
2201 q->polling = t.polling;
2202 else {
2203 /* No polling with INTx for T3A */
2204 if (adapter->params.rev == 0 &&
2205 !(adapter->flags & USING_MSI))
2206 t.polling = 0;
2207
2208 for (i = 0; i < SGE_QSETS; i++) {
2209 q = &adapter->params.sge.
2210 qset[i];
2211 q->polling = t.polling;
2212 }
2213 }
2214 }
04ecb072
DLR
2215 if (t.lro >= 0)
2216 set_qset_lro(dev, t.qset_idx, t.lro);
2217
4d22de3e
DLR
2218 break;
2219 }
2220 case CHELSIO_GET_QSET_PARAMS:{
2221 struct qset_params *q;
2222 struct ch_qset_params t;
8c263761
DLR
2223 int q1 = pi->first_qset;
2224 int nqsets = pi->nqsets;
2225 int i;
4d22de3e
DLR
2226
2227 if (copy_from_user(&t, useraddr, sizeof(t)))
2228 return -EFAULT;
8c263761
DLR
2229
2230 /* Display qsets for all ports when offload enabled */
2231 if (test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map)) {
2232 q1 = 0;
2233 for_each_port(adapter, i) {
2234 pi = adap2pinfo(adapter, i);
2235 nqsets = pi->first_qset + pi->nqsets;
2236 }
2237 }
2238
2239 if (t.qset_idx >= nqsets)
4d22de3e
DLR
2240 return -EINVAL;
2241
8c263761 2242 q = &adapter->params.sge.qset[q1 + t.qset_idx];
4d22de3e
DLR
2243 t.rspq_size = q->rspq_size;
2244 t.txq_size[0] = q->txq_size[0];
2245 t.txq_size[1] = q->txq_size[1];
2246 t.txq_size[2] = q->txq_size[2];
2247 t.fl_size[0] = q->fl_size;
2248 t.fl_size[1] = q->jumbo_size;
2249 t.polling = q->polling;
b47385bd 2250 t.lro = q->lro;
4d22de3e
DLR
2251 t.intr_lat = q->coalesce_usecs;
2252 t.cong_thres = q->cong_thres;
8c263761
DLR
2253 t.qnum = q1;
2254
2255 if (adapter->flags & USING_MSIX)
2256 t.vector = adapter->msix_info[q1 + t.qset_idx + 1].vec;
2257 else
2258 t.vector = adapter->pdev->irq;
4d22de3e
DLR
2259
2260 if (copy_to_user(useraddr, &t, sizeof(t)))
2261 return -EFAULT;
2262 break;
2263 }
2264 case CHELSIO_SET_QSET_NUM:{
2265 struct ch_reg edata;
4d22de3e
DLR
2266 unsigned int i, first_qset = 0, other_qsets = 0;
2267
2268 if (!capable(CAP_NET_ADMIN))
2269 return -EPERM;
2270 if (adapter->flags & FULL_INIT_DONE)
2271 return -EBUSY;
2272 if (copy_from_user(&edata, useraddr, sizeof(edata)))
2273 return -EFAULT;
2274 if (edata.val < 1 ||
2275 (edata.val > 1 && !(adapter->flags & USING_MSIX)))
2276 return -EINVAL;
2277
2278 for_each_port(adapter, i)
2279 if (adapter->port[i] && adapter->port[i] != dev)
2280 other_qsets += adap2pinfo(adapter, i)->nqsets;
2281
2282 if (edata.val + other_qsets > SGE_QSETS)
2283 return -EINVAL;
2284
2285 pi->nqsets = edata.val;
2286
2287 for_each_port(adapter, i)
2288 if (adapter->port[i]) {
2289 pi = adap2pinfo(adapter, i);
2290 pi->first_qset = first_qset;
2291 first_qset += pi->nqsets;
2292 }
2293 break;
2294 }
2295 case CHELSIO_GET_QSET_NUM:{
2296 struct ch_reg edata;
4d22de3e
DLR
2297
2298 edata.cmd = CHELSIO_GET_QSET_NUM;
2299 edata.val = pi->nqsets;
2300 if (copy_to_user(useraddr, &edata, sizeof(edata)))
2301 return -EFAULT;
2302 break;
2303 }
2304 case CHELSIO_LOAD_FW:{
2305 u8 *fw_data;
2306 struct ch_mem_range t;
2307
1b3aa7af 2308 if (!capable(CAP_SYS_RAWIO))
4d22de3e
DLR
2309 return -EPERM;
2310 if (copy_from_user(&t, useraddr, sizeof(t)))
2311 return -EFAULT;
1b3aa7af 2312 /* Check t.len sanity ? */
4d22de3e
DLR
2313 fw_data = kmalloc(t.len, GFP_KERNEL);
2314 if (!fw_data)
2315 return -ENOMEM;
2316
2317 if (copy_from_user
2318 (fw_data, useraddr + sizeof(t), t.len)) {
2319 kfree(fw_data);
2320 return -EFAULT;
2321 }
2322
2323 ret = t3_load_fw(adapter, fw_data, t.len);
2324 kfree(fw_data);
2325 if (ret)
2326 return ret;
2327 break;
2328 }
2329 case CHELSIO_SETMTUTAB:{
2330 struct ch_mtus m;
2331 int i;
2332
2333 if (!is_offload(adapter))
2334 return -EOPNOTSUPP;
2335 if (!capable(CAP_NET_ADMIN))
2336 return -EPERM;
2337 if (offload_running(adapter))
2338 return -EBUSY;
2339 if (copy_from_user(&m, useraddr, sizeof(m)))
2340 return -EFAULT;
2341 if (m.nmtus != NMTUS)
2342 return -EINVAL;
2343 if (m.mtus[0] < 81) /* accommodate SACK */
2344 return -EINVAL;
2345
2346 /* MTUs must be in ascending order */
2347 for (i = 1; i < NMTUS; ++i)
2348 if (m.mtus[i] < m.mtus[i - 1])
2349 return -EINVAL;
2350
2351 memcpy(adapter->params.mtus, m.mtus,
2352 sizeof(adapter->params.mtus));
2353 break;
2354 }
2355 case CHELSIO_GET_PM:{
2356 struct tp_params *p = &adapter->params.tp;
2357 struct ch_pm m = {.cmd = CHELSIO_GET_PM };
2358
2359 if (!is_offload(adapter))
2360 return -EOPNOTSUPP;
2361 m.tx_pg_sz = p->tx_pg_size;
2362 m.tx_num_pg = p->tx_num_pgs;
2363 m.rx_pg_sz = p->rx_pg_size;
2364 m.rx_num_pg = p->rx_num_pgs;
2365 m.pm_total = p->pmtx_size + p->chan_rx_size * p->nchan;
2366 if (copy_to_user(useraddr, &m, sizeof(m)))
2367 return -EFAULT;
2368 break;
2369 }
2370 case CHELSIO_SET_PM:{
2371 struct ch_pm m;
2372 struct tp_params *p = &adapter->params.tp;
2373
2374 if (!is_offload(adapter))
2375 return -EOPNOTSUPP;
2376 if (!capable(CAP_NET_ADMIN))
2377 return -EPERM;
2378 if (adapter->flags & FULL_INIT_DONE)
2379 return -EBUSY;
2380 if (copy_from_user(&m, useraddr, sizeof(m)))
2381 return -EFAULT;
d9da466a 2382 if (!is_power_of_2(m.rx_pg_sz) ||
2383 !is_power_of_2(m.tx_pg_sz))
4d22de3e
DLR
2384 return -EINVAL; /* not power of 2 */
2385 if (!(m.rx_pg_sz & 0x14000))
2386 return -EINVAL; /* not 16KB or 64KB */
2387 if (!(m.tx_pg_sz & 0x1554000))
2388 return -EINVAL;
2389 if (m.tx_num_pg == -1)
2390 m.tx_num_pg = p->tx_num_pgs;
2391 if (m.rx_num_pg == -1)
2392 m.rx_num_pg = p->rx_num_pgs;
2393 if (m.tx_num_pg % 24 || m.rx_num_pg % 24)
2394 return -EINVAL;
2395 if (m.rx_num_pg * m.rx_pg_sz > p->chan_rx_size ||
2396 m.tx_num_pg * m.tx_pg_sz > p->chan_tx_size)
2397 return -EINVAL;
2398 p->rx_pg_size = m.rx_pg_sz;
2399 p->tx_pg_size = m.tx_pg_sz;
2400 p->rx_num_pgs = m.rx_num_pg;
2401 p->tx_num_pgs = m.tx_num_pg;
2402 break;
2403 }
2404 case CHELSIO_GET_MEM:{
2405 struct ch_mem_range t;
2406 struct mc7 *mem;
2407 u64 buf[32];
2408
2409 if (!is_offload(adapter))
2410 return -EOPNOTSUPP;
2411 if (!(adapter->flags & FULL_INIT_DONE))
2412 return -EIO; /* need the memory controllers */
2413 if (copy_from_user(&t, useraddr, sizeof(t)))
2414 return -EFAULT;
2415 if ((t.addr & 7) || (t.len & 7))
2416 return -EINVAL;
2417 if (t.mem_id == MEM_CM)
2418 mem = &adapter->cm;
2419 else if (t.mem_id == MEM_PMRX)
2420 mem = &adapter->pmrx;
2421 else if (t.mem_id == MEM_PMTX)
2422 mem = &adapter->pmtx;
2423 else
2424 return -EINVAL;
2425
2426 /*
1825494a
DLR
2427 * Version scheme:
2428 * bits 0..9: chip version
2429 * bits 10..15: chip revision
2430 */
4d22de3e
DLR
2431 t.version = 3 | (adapter->params.rev << 10);
2432 if (copy_to_user(useraddr, &t, sizeof(t)))
2433 return -EFAULT;
2434
2435 /*
2436 * Read 256 bytes at a time as len can be large and we don't
2437 * want to use huge intermediate buffers.
2438 */
2439 useraddr += sizeof(t); /* advance to start of buffer */
2440 while (t.len) {
2441 unsigned int chunk =
2442 min_t(unsigned int, t.len, sizeof(buf));
2443
2444 ret =
2445 t3_mc7_bd_read(mem, t.addr / 8, chunk / 8,
2446 buf);
2447 if (ret)
2448 return ret;
2449 if (copy_to_user(useraddr, buf, chunk))
2450 return -EFAULT;
2451 useraddr += chunk;
2452 t.addr += chunk;
2453 t.len -= chunk;
2454 }
2455 break;
2456 }
2457 case CHELSIO_SET_TRACE_FILTER:{
2458 struct ch_trace t;
2459 const struct trace_params *tp;
2460
2461 if (!capable(CAP_NET_ADMIN))
2462 return -EPERM;
2463 if (!offload_running(adapter))
2464 return -EAGAIN;
2465 if (copy_from_user(&t, useraddr, sizeof(t)))
2466 return -EFAULT;
2467
2468 tp = (const struct trace_params *)&t.sip;
2469 if (t.config_tx)
2470 t3_config_trace_filter(adapter, tp, 0,
2471 t.invert_match,
2472 t.trace_tx);
2473 if (t.config_rx)
2474 t3_config_trace_filter(adapter, tp, 1,
2475 t.invert_match,
2476 t.trace_rx);
2477 break;
2478 }
4d22de3e
DLR
2479 default:
2480 return -EOPNOTSUPP;
2481 }
2482 return 0;
2483}
2484
2485static int cxgb_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
2486{
4d22de3e 2487 struct mii_ioctl_data *data = if_mii(req);
5fbf816f
DLR
2488 struct port_info *pi = netdev_priv(dev);
2489 struct adapter *adapter = pi->adapter;
4d22de3e
DLR
2490
2491 switch (cmd) {
0f07c4ee
BH
2492 case SIOCGMIIREG:
2493 case SIOCSMIIREG:
2494 /* Convert phy_id from older PRTAD/DEVAD format */
2495 if (is_10G(adapter) &&
2496 !mdio_phy_id_is_c45(data->phy_id) &&
2497 (data->phy_id & 0x1f00) &&
2498 !(data->phy_id & 0xe0e0))
2499 data->phy_id = mdio_phy_id_c45(data->phy_id >> 8,
2500 data->phy_id & 0x1f);
4d22de3e 2501 /* FALLTHRU */
0f07c4ee
BH
2502 case SIOCGMIIPHY:
2503 return mdio_mii_ioctl(&pi->phy.mdio, data, cmd);
4d22de3e
DLR
2504 case SIOCCHIOCTL:
2505 return cxgb_extension_ioctl(dev, req->ifr_data);
2506 default:
2507 return -EOPNOTSUPP;
2508 }
4d22de3e
DLR
2509}
2510
2511static int cxgb_change_mtu(struct net_device *dev, int new_mtu)
2512{
4d22de3e 2513 struct port_info *pi = netdev_priv(dev);
5fbf816f
DLR
2514 struct adapter *adapter = pi->adapter;
2515 int ret;
4d22de3e
DLR
2516
2517 if (new_mtu < 81) /* accommodate SACK */
2518 return -EINVAL;
2519 if ((ret = t3_mac_set_mtu(&pi->mac, new_mtu)))
2520 return ret;
2521 dev->mtu = new_mtu;
2522 init_port_mtus(adapter);
2523 if (adapter->params.rev == 0 && offload_running(adapter))
2524 t3_load_mtus(adapter, adapter->params.mtus,
2525 adapter->params.a_wnd, adapter->params.b_wnd,
2526 adapter->port[0]->mtu);
2527 return 0;
2528}
2529
2530static int cxgb_set_mac_addr(struct net_device *dev, void *p)
2531{
4d22de3e 2532 struct port_info *pi = netdev_priv(dev);
5fbf816f 2533 struct adapter *adapter = pi->adapter;
4d22de3e
DLR
2534 struct sockaddr *addr = p;
2535
2536 if (!is_valid_ether_addr(addr->sa_data))
2537 return -EINVAL;
2538
2539 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
f14d42f3 2540 t3_mac_set_address(&pi->mac, LAN_MAC_IDX, dev->dev_addr);
4d22de3e
DLR
2541 if (offload_running(adapter))
2542 write_smt_entry(adapter, pi->port_id);
2543 return 0;
2544}
2545
2546/**
2547 * t3_synchronize_rx - wait for current Rx processing on a port to complete
2548 * @adap: the adapter
2549 * @p: the port
2550 *
2551 * Ensures that current Rx processing on any of the queues associated with
2552 * the given port completes before returning. We do this by acquiring and
2553 * releasing the locks of the response queues associated with the port.
2554 */
2555static void t3_synchronize_rx(struct adapter *adap, const struct port_info *p)
2556{
2557 int i;
2558
8c263761
DLR
2559 for (i = p->first_qset; i < p->first_qset + p->nqsets; i++) {
2560 struct sge_rspq *q = &adap->sge.qs[i].rspq;
4d22de3e
DLR
2561
2562 spin_lock_irq(&q->lock);
2563 spin_unlock_irq(&q->lock);
2564 }
2565}
2566
2567static void vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
2568{
4d22de3e 2569 struct port_info *pi = netdev_priv(dev);
5fbf816f 2570 struct adapter *adapter = pi->adapter;
4d22de3e
DLR
2571
2572 pi->vlan_grp = grp;
2573 if (adapter->params.rev > 0)
2574 t3_set_vlan_accel(adapter, 1 << pi->port_id, grp != NULL);
2575 else {
2576 /* single control for all ports */
2577 unsigned int i, have_vlans = 0;
2578 for_each_port(adapter, i)
2579 have_vlans |= adap2pinfo(adapter, i)->vlan_grp != NULL;
2580
2581 t3_set_vlan_accel(adapter, 1, have_vlans);
2582 }
2583 t3_synchronize_rx(adapter, pi);
2584}
2585
4d22de3e
DLR
2586#ifdef CONFIG_NET_POLL_CONTROLLER
2587static void cxgb_netpoll(struct net_device *dev)
2588{
890de332 2589 struct port_info *pi = netdev_priv(dev);
5fbf816f 2590 struct adapter *adapter = pi->adapter;
890de332 2591 int qidx;
4d22de3e 2592
890de332
DLR
2593 for (qidx = pi->first_qset; qidx < pi->first_qset + pi->nqsets; qidx++) {
2594 struct sge_qset *qs = &adapter->sge.qs[qidx];
2595 void *source;
2eab17ab 2596
890de332
DLR
2597 if (adapter->flags & USING_MSIX)
2598 source = qs;
2599 else
2600 source = adapter;
2601
2602 t3_intr_handler(adapter, qs->rspq.polling) (0, source);
2603 }
4d22de3e
DLR
2604}
2605#endif
2606
2607/*
2608 * Periodic accumulation of MAC statistics.
2609 */
2610static void mac_stats_update(struct adapter *adapter)
2611{
2612 int i;
2613
2614 for_each_port(adapter, i) {
2615 struct net_device *dev = adapter->port[i];
2616 struct port_info *p = netdev_priv(dev);
2617
2618 if (netif_running(dev)) {
2619 spin_lock(&adapter->stats_lock);
2620 t3_mac_update_stats(&p->mac);
2621 spin_unlock(&adapter->stats_lock);
2622 }
2623 }
2624}
2625
2626static void check_link_status(struct adapter *adapter)
2627{
2628 int i;
2629
2630 for_each_port(adapter, i) {
2631 struct net_device *dev = adapter->port[i];
2632 struct port_info *p = netdev_priv(dev);
c22c8149 2633 int link_fault;
4d22de3e 2634
bf792094 2635 spin_lock_irq(&adapter->work_lock);
c22c8149
DLR
2636 link_fault = p->link_fault;
2637 spin_unlock_irq(&adapter->work_lock);
2638
2639 if (link_fault) {
3851c66c 2640 t3_link_fault(adapter, i);
bf792094
DLR
2641 continue;
2642 }
bf792094
DLR
2643
2644 if (!(p->phy.caps & SUPPORTED_IRQ) && netif_running(dev)) {
2645 t3_xgm_intr_disable(adapter, i);
2646 t3_read_reg(adapter, A_XGM_INT_STATUS + p->mac.offset);
2647
4d22de3e 2648 t3_link_changed(adapter, i);
bf792094
DLR
2649 t3_xgm_intr_enable(adapter, i);
2650 }
4d22de3e
DLR
2651 }
2652}
2653
fc90664e
DLR
2654static void check_t3b2_mac(struct adapter *adapter)
2655{
2656 int i;
2657
f2d961c9
DLR
2658 if (!rtnl_trylock()) /* synchronize with ifdown */
2659 return;
2660
fc90664e
DLR
2661 for_each_port(adapter, i) {
2662 struct net_device *dev = adapter->port[i];
2663 struct port_info *p = netdev_priv(dev);
2664 int status;
2665
2666 if (!netif_running(dev))
2667 continue;
2668
2669 status = 0;
6d6dabac 2670 if (netif_running(dev) && netif_carrier_ok(dev))
fc90664e
DLR
2671 status = t3b2_mac_watchdog_task(&p->mac);
2672 if (status == 1)
2673 p->mac.stats.num_toggled++;
2674 else if (status == 2) {
2675 struct cmac *mac = &p->mac;
2676
2677 t3_mac_set_mtu(mac, dev->mtu);
f14d42f3 2678 t3_mac_set_address(mac, LAN_MAC_IDX, dev->dev_addr);
fc90664e
DLR
2679 cxgb_set_rxmode(dev);
2680 t3_link_start(&p->phy, mac, &p->link_config);
2681 t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
2682 t3_port_intr_enable(adapter, p->port_id);
2683 p->mac.stats.num_resets++;
2684 }
2685 }
2686 rtnl_unlock();
2687}
2688
2689
4d22de3e
DLR
2690static void t3_adap_check_task(struct work_struct *work)
2691{
2692 struct adapter *adapter = container_of(work, struct adapter,
2693 adap_check_task.work);
2694 const struct adapter_params *p = &adapter->params;
fc882196
DLR
2695 int port;
2696 unsigned int v, status, reset;
4d22de3e
DLR
2697
2698 adapter->check_task_cnt++;
2699
3851c66c 2700 check_link_status(adapter);
4d22de3e
DLR
2701
2702 /* Accumulate MAC stats if needed */
2703 if (!p->linkpoll_period ||
2704 (adapter->check_task_cnt * p->linkpoll_period) / 10 >=
2705 p->stats_update_period) {
2706 mac_stats_update(adapter);
2707 adapter->check_task_cnt = 0;
2708 }
2709
fc90664e
DLR
2710 if (p->rev == T3_REV_B2)
2711 check_t3b2_mac(adapter);
2712
fc882196
DLR
2713 /*
2714 * Scan the XGMAC's to check for various conditions which we want to
2715 * monitor in a periodic polling manner rather than via an interrupt
2716 * condition. This is used for conditions which would otherwise flood
2717 * the system with interrupts and we only really need to know that the
2718 * conditions are "happening" ... For each condition we count the
2719 * detection of the condition and reset it for the next polling loop.
2720 */
2721 for_each_port(adapter, port) {
2722 struct cmac *mac = &adap2pinfo(adapter, port)->mac;
2723 u32 cause;
2724
2725 cause = t3_read_reg(adapter, A_XGM_INT_CAUSE + mac->offset);
2726 reset = 0;
2727 if (cause & F_RXFIFO_OVERFLOW) {
2728 mac->stats.rx_fifo_ovfl++;
2729 reset |= F_RXFIFO_OVERFLOW;
2730 }
2731
2732 t3_write_reg(adapter, A_XGM_INT_CAUSE + mac->offset, reset);
2733 }
2734
2735 /*
2736 * We do the same as above for FL_EMPTY interrupts.
2737 */
2738 status = t3_read_reg(adapter, A_SG_INT_CAUSE);
2739 reset = 0;
2740
2741 if (status & F_FLEMPTY) {
2742 struct sge_qset *qs = &adapter->sge.qs[0];
2743 int i = 0;
2744
2745 reset |= F_FLEMPTY;
2746
2747 v = (t3_read_reg(adapter, A_SG_RSPQ_FL_STATUS) >> S_FL0EMPTY) &
2748 0xffff;
2749
2750 while (v) {
2751 qs->fl[i].empty += (v & 1);
2752 if (i)
2753 qs++;
2754 i ^= 1;
2755 v >>= 1;
2756 }
2757 }
2758
2759 t3_write_reg(adapter, A_SG_INT_CAUSE, reset);
2760
4d22de3e 2761 /* Schedule the next check update if any port is active. */
20d3fc11 2762 spin_lock_irq(&adapter->work_lock);
4d22de3e
DLR
2763 if (adapter->open_device_map & PORT_MASK)
2764 schedule_chk_task(adapter);
20d3fc11 2765 spin_unlock_irq(&adapter->work_lock);
4d22de3e
DLR
2766}
2767
e998f245
SW
2768static void db_full_task(struct work_struct *work)
2769{
2770 struct adapter *adapter = container_of(work, struct adapter,
2771 db_full_task);
2772
2773 cxgb3_event_notify(&adapter->tdev, OFFLOAD_DB_FULL, 0);
2774}
2775
2776static void db_empty_task(struct work_struct *work)
2777{
2778 struct adapter *adapter = container_of(work, struct adapter,
2779 db_empty_task);
2780
2781 cxgb3_event_notify(&adapter->tdev, OFFLOAD_DB_EMPTY, 0);
2782}
2783
2784static void db_drop_task(struct work_struct *work)
2785{
2786 struct adapter *adapter = container_of(work, struct adapter,
2787 db_drop_task);
2788 unsigned long delay = 1000;
2789 unsigned short r;
2790
2791 cxgb3_event_notify(&adapter->tdev, OFFLOAD_DB_DROP, 0);
2792
2793 /*
2794 * Sleep a while before ringing the driver qset dbs.
2795 * The delay is between 1000-2023 usecs.
2796 */
2797 get_random_bytes(&r, 2);
2798 delay += r & 1023;
2799 set_current_state(TASK_UNINTERRUPTIBLE);
2800 schedule_timeout(usecs_to_jiffies(delay));
2801 ring_dbs(adapter);
2802}
2803
4d22de3e
DLR
2804/*
2805 * Processes external (PHY) interrupts in process context.
2806 */
2807static void ext_intr_task(struct work_struct *work)
2808{
2809 struct adapter *adapter = container_of(work, struct adapter,
2810 ext_intr_handler_task);
bf792094
DLR
2811 int i;
2812
2813 /* Disable link fault interrupts */
2814 for_each_port(adapter, i) {
2815 struct net_device *dev = adapter->port[i];
2816 struct port_info *p = netdev_priv(dev);
2817
2818 t3_xgm_intr_disable(adapter, i);
2819 t3_read_reg(adapter, A_XGM_INT_STATUS + p->mac.offset);
2820 }
4d22de3e 2821
bf792094 2822 /* Re-enable link fault interrupts */
4d22de3e
DLR
2823 t3_phy_intr_handler(adapter);
2824
bf792094
DLR
2825 for_each_port(adapter, i)
2826 t3_xgm_intr_enable(adapter, i);
2827
4d22de3e
DLR
2828 /* Now reenable external interrupts */
2829 spin_lock_irq(&adapter->work_lock);
2830 if (adapter->slow_intr_mask) {
2831 adapter->slow_intr_mask |= F_T3DBG;
2832 t3_write_reg(adapter, A_PL_INT_CAUSE0, F_T3DBG);
2833 t3_write_reg(adapter, A_PL_INT_ENABLE0,
2834 adapter->slow_intr_mask);
2835 }
2836 spin_unlock_irq(&adapter->work_lock);
2837}
2838
2839/*
2840 * Interrupt-context handler for external (PHY) interrupts.
2841 */
2842void t3_os_ext_intr_handler(struct adapter *adapter)
2843{
2844 /*
2845 * Schedule a task to handle external interrupts as they may be slow
2846 * and we use a mutex to protect MDIO registers. We disable PHY
2847 * interrupts in the meantime and let the task reenable them when
2848 * it's done.
2849 */
2850 spin_lock(&adapter->work_lock);
2851 if (adapter->slow_intr_mask) {
2852 adapter->slow_intr_mask &= ~F_T3DBG;
2853 t3_write_reg(adapter, A_PL_INT_ENABLE0,
2854 adapter->slow_intr_mask);
2855 queue_work(cxgb3_wq, &adapter->ext_intr_handler_task);
2856 }
2857 spin_unlock(&adapter->work_lock);
2858}
2859
bf792094
DLR
2860void t3_os_link_fault_handler(struct adapter *adapter, int port_id)
2861{
2862 struct net_device *netdev = adapter->port[port_id];
2863 struct port_info *pi = netdev_priv(netdev);
2864
2865 spin_lock(&adapter->work_lock);
2866 pi->link_fault = 1;
bf792094
DLR
2867 spin_unlock(&adapter->work_lock);
2868}
2869
20d3fc11
DLR
2870static int t3_adapter_error(struct adapter *adapter, int reset)
2871{
2872 int i, ret = 0;
2873
cb0bc205
DLR
2874 if (is_offload(adapter) &&
2875 test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map)) {
fa0d4c11 2876 cxgb3_event_notify(&adapter->tdev, OFFLOAD_STATUS_DOWN, 0);
cb0bc205
DLR
2877 offload_close(&adapter->tdev);
2878 }
2879
20d3fc11
DLR
2880 /* Stop all ports */
2881 for_each_port(adapter, i) {
2882 struct net_device *netdev = adapter->port[i];
2883
2884 if (netif_running(netdev))
2885 cxgb_close(netdev);
2886 }
2887
20d3fc11
DLR
2888 /* Stop SGE timers */
2889 t3_stop_sge_timers(adapter);
2890
2891 adapter->flags &= ~FULL_INIT_DONE;
2892
2893 if (reset)
2894 ret = t3_reset_adapter(adapter);
2895
2896 pci_disable_device(adapter->pdev);
2897
2898 return ret;
2899}
2900
2901static int t3_reenable_adapter(struct adapter *adapter)
2902{
2903 if (pci_enable_device(adapter->pdev)) {
2904 dev_err(&adapter->pdev->dev,
2905 "Cannot re-enable PCI device after reset.\n");
2906 goto err;
2907 }
2908 pci_set_master(adapter->pdev);
2909 pci_restore_state(adapter->pdev);
ccdddf50 2910 pci_save_state(adapter->pdev);
20d3fc11
DLR
2911
2912 /* Free sge resources */
2913 t3_free_sge_resources(adapter);
2914
2915 if (t3_replay_prep_adapter(adapter))
2916 goto err;
2917
2918 return 0;
2919err:
2920 return -1;
2921}
2922
2923static void t3_resume_ports(struct adapter *adapter)
2924{
2925 int i;
2926
2927 /* Restart the ports */
2928 for_each_port(adapter, i) {
2929 struct net_device *netdev = adapter->port[i];
2930
2931 if (netif_running(netdev)) {
2932 if (cxgb_open(netdev)) {
2933 dev_err(&adapter->pdev->dev,
2934 "can't bring device back up"
2935 " after reset\n");
2936 continue;
2937 }
2938 }
2939 }
cb0bc205
DLR
2940
2941 if (is_offload(adapter) && !ofld_disable)
fa0d4c11 2942 cxgb3_event_notify(&adapter->tdev, OFFLOAD_STATUS_UP, 0);
20d3fc11
DLR
2943}
2944
2945/*
2946 * processes a fatal error.
2947 * Bring the ports down, reset the chip, bring the ports back up.
2948 */
2949static void fatal_error_task(struct work_struct *work)
2950{
2951 struct adapter *adapter = container_of(work, struct adapter,
2952 fatal_error_handler_task);
2953 int err = 0;
2954
2955 rtnl_lock();
2956 err = t3_adapter_error(adapter, 1);
2957 if (!err)
2958 err = t3_reenable_adapter(adapter);
2959 if (!err)
2960 t3_resume_ports(adapter);
2961
2962 CH_ALERT(adapter, "adapter reset %s\n", err ? "failed" : "succeeded");
2963 rtnl_unlock();
2964}
2965
4d22de3e
DLR
2966void t3_fatal_err(struct adapter *adapter)
2967{
2968 unsigned int fw_status[4];
2969
2970 if (adapter->flags & FULL_INIT_DONE) {
2971 t3_sge_stop(adapter);
c64c2eae
DLR
2972 t3_write_reg(adapter, A_XGM_TX_CTRL, 0);
2973 t3_write_reg(adapter, A_XGM_RX_CTRL, 0);
2974 t3_write_reg(adapter, XGM_REG(A_XGM_TX_CTRL, 1), 0);
2975 t3_write_reg(adapter, XGM_REG(A_XGM_RX_CTRL, 1), 0);
20d3fc11
DLR
2976
2977 spin_lock(&adapter->work_lock);
4d22de3e 2978 t3_intr_disable(adapter);
20d3fc11
DLR
2979 queue_work(cxgb3_wq, &adapter->fatal_error_handler_task);
2980 spin_unlock(&adapter->work_lock);
4d22de3e
DLR
2981 }
2982 CH_ALERT(adapter, "encountered fatal error, operation suspended\n");
2983 if (!t3_cim_ctl_blk_read(adapter, 0xa0, 4, fw_status))
2984 CH_ALERT(adapter, "FW status: 0x%x, 0x%x, 0x%x, 0x%x\n",
2985 fw_status[0], fw_status[1],
2986 fw_status[2], fw_status[3]);
4d22de3e
DLR
2987}
2988
91a6b50c
DLR
2989/**
2990 * t3_io_error_detected - called when PCI error is detected
2991 * @pdev: Pointer to PCI device
2992 * @state: The current pci connection state
2993 *
2994 * This function is called after a PCI bus error affecting
2995 * this device has been detected.
2996 */
2997static pci_ers_result_t t3_io_error_detected(struct pci_dev *pdev,
2998 pci_channel_state_t state)
2999{
bc4b6b52 3000 struct adapter *adapter = pci_get_drvdata(pdev);
20d3fc11 3001 int ret;
91a6b50c 3002
e8d19370
DLR
3003 if (state == pci_channel_io_perm_failure)
3004 return PCI_ERS_RESULT_DISCONNECT;
3005
20d3fc11 3006 ret = t3_adapter_error(adapter, 0);
91a6b50c 3007
48c4b6db 3008 /* Request a slot reset. */
91a6b50c
DLR
3009 return PCI_ERS_RESULT_NEED_RESET;
3010}
3011
3012/**
3013 * t3_io_slot_reset - called after the pci bus has been reset.
3014 * @pdev: Pointer to PCI device
3015 *
3016 * Restart the card from scratch, as if from a cold-boot.
3017 */
3018static pci_ers_result_t t3_io_slot_reset(struct pci_dev *pdev)
3019{
bc4b6b52 3020 struct adapter *adapter = pci_get_drvdata(pdev);
91a6b50c 3021
20d3fc11
DLR
3022 if (!t3_reenable_adapter(adapter))
3023 return PCI_ERS_RESULT_RECOVERED;
91a6b50c 3024
48c4b6db 3025 return PCI_ERS_RESULT_DISCONNECT;
91a6b50c
DLR
3026}
3027
3028/**
3029 * t3_io_resume - called when traffic can start flowing again.
3030 * @pdev: Pointer to PCI device
3031 *
3032 * This callback is called when the error recovery driver tells us that
3033 * its OK to resume normal operation.
3034 */
3035static void t3_io_resume(struct pci_dev *pdev)
3036{
bc4b6b52 3037 struct adapter *adapter = pci_get_drvdata(pdev);
91a6b50c 3038
68f40c10
DLR
3039 CH_ALERT(adapter, "adapter recovering, PEX ERR 0x%x\n",
3040 t3_read_reg(adapter, A_PCIE_PEX_ERR));
3041
20d3fc11 3042 t3_resume_ports(adapter);
91a6b50c
DLR
3043}
3044
3045static struct pci_error_handlers t3_err_handler = {
3046 .error_detected = t3_io_error_detected,
3047 .slot_reset = t3_io_slot_reset,
3048 .resume = t3_io_resume,
3049};
3050
8c263761
DLR
3051/*
3052 * Set the number of qsets based on the number of CPUs and the number of ports,
3053 * not to exceed the number of available qsets, assuming there are enough qsets
3054 * per port in HW.
3055 */
3056static void set_nqsets(struct adapter *adap)
3057{
3058 int i, j = 0;
3059 int num_cpus = num_online_cpus();
3060 int hwports = adap->params.nports;
5cda9364 3061 int nqsets = adap->msix_nvectors - 1;
8c263761 3062
f9ee3882 3063 if (adap->params.rev > 0 && adap->flags & USING_MSIX) {
8c263761
DLR
3064 if (hwports == 2 &&
3065 (hwports * nqsets > SGE_QSETS ||
3066 num_cpus >= nqsets / hwports))
3067 nqsets /= hwports;
3068 if (nqsets > num_cpus)
3069 nqsets = num_cpus;
3070 if (nqsets < 1 || hwports == 4)
3071 nqsets = 1;
3072 } else
3073 nqsets = 1;
3074
3075 for_each_port(adap, i) {
3076 struct port_info *pi = adap2pinfo(adap, i);
3077
3078 pi->first_qset = j;
3079 pi->nqsets = nqsets;
3080 j = pi->first_qset + nqsets;
3081
3082 dev_info(&adap->pdev->dev,
3083 "Port %d using %d queue sets.\n", i, nqsets);
3084 }
3085}
3086
4d22de3e
DLR
3087static int __devinit cxgb_enable_msix(struct adapter *adap)
3088{
3089 struct msix_entry entries[SGE_QSETS + 1];
5cda9364 3090 int vectors;
4d22de3e
DLR
3091 int i, err;
3092
5cda9364
DLR
3093 vectors = ARRAY_SIZE(entries);
3094 for (i = 0; i < vectors; ++i)
4d22de3e
DLR
3095 entries[i].entry = i;
3096
5cda9364
DLR
3097 while ((err = pci_enable_msix(adap->pdev, entries, vectors)) > 0)
3098 vectors = err;
3099
2c2f409f
DLR
3100 if (err < 0)
3101 pci_disable_msix(adap->pdev);
3102
3103 if (!err && vectors < (adap->params.nports + 1)) {
3104 pci_disable_msix(adap->pdev);
5cda9364 3105 err = -1;
2c2f409f 3106 }
5cda9364 3107
4d22de3e 3108 if (!err) {
5cda9364 3109 for (i = 0; i < vectors; ++i)
4d22de3e 3110 adap->msix_info[i].vec = entries[i].vector;
5cda9364
DLR
3111 adap->msix_nvectors = vectors;
3112 }
3113
4d22de3e
DLR
3114 return err;
3115}
3116
3117static void __devinit print_port_info(struct adapter *adap,
3118 const struct adapter_info *ai)
3119{
3120 static const char *pci_variant[] = {
3121 "PCI", "PCI-X", "PCI-X ECC", "PCI-X 266", "PCI Express"
3122 };
3123
3124 int i;
3125 char buf[80];
3126
3127 if (is_pcie(adap))
3128 snprintf(buf, sizeof(buf), "%s x%d",
3129 pci_variant[adap->params.pci.variant],
3130 adap->params.pci.width);
3131 else
3132 snprintf(buf, sizeof(buf), "%s %dMHz/%d-bit",
3133 pci_variant[adap->params.pci.variant],
3134 adap->params.pci.speed, adap->params.pci.width);
3135
3136 for_each_port(adap, i) {
3137 struct net_device *dev = adap->port[i];
3138 const struct port_info *pi = netdev_priv(dev);
3139
3140 if (!test_bit(i, &adap->registered_device_map))
3141 continue;
8ac3ba68 3142 printk(KERN_INFO "%s: %s %s %sNIC (rev %d) %s%s\n",
04497982 3143 dev->name, ai->desc, pi->phy.desc,
8ac3ba68 3144 is_offload(adap) ? "R" : "", adap->params.rev, buf,
4d22de3e
DLR
3145 (adap->flags & USING_MSIX) ? " MSI-X" :
3146 (adap->flags & USING_MSI) ? " MSI" : "");
3147 if (adap->name == dev->name && adap->params.vpd.mclk)
167cdf5f
DLR
3148 printk(KERN_INFO
3149 "%s: %uMB CM, %uMB PMTX, %uMB PMRX, S/N: %s\n",
4d22de3e
DLR
3150 adap->name, t3_mc7_size(&adap->cm) >> 20,
3151 t3_mc7_size(&adap->pmtx) >> 20,
167cdf5f
DLR
3152 t3_mc7_size(&adap->pmrx) >> 20,
3153 adap->params.vpd.sn);
4d22de3e
DLR
3154 }
3155}
3156
dd752696
SH
3157static const struct net_device_ops cxgb_netdev_ops = {
3158 .ndo_open = cxgb_open,
3159 .ndo_stop = cxgb_close,
43a944f3 3160 .ndo_start_xmit = t3_eth_xmit,
dd752696
SH
3161 .ndo_get_stats = cxgb_get_stats,
3162 .ndo_validate_addr = eth_validate_addr,
3163 .ndo_set_multicast_list = cxgb_set_rxmode,
3164 .ndo_do_ioctl = cxgb_ioctl,
3165 .ndo_change_mtu = cxgb_change_mtu,
3166 .ndo_set_mac_address = cxgb_set_mac_addr,
3167 .ndo_vlan_rx_register = vlan_rx_register,
3168#ifdef CONFIG_NET_POLL_CONTROLLER
3169 .ndo_poll_controller = cxgb_netpoll,
3170#endif
3171};
3172
f14d42f3
KX
3173static void __devinit cxgb3_init_iscsi_mac(struct net_device *dev)
3174{
3175 struct port_info *pi = netdev_priv(dev);
3176
3177 memcpy(pi->iscsic.mac_addr, dev->dev_addr, ETH_ALEN);
3178 pi->iscsic.mac_addr[3] |= 0x80;
3179}
3180
4d22de3e
DLR
3181static int __devinit init_one(struct pci_dev *pdev,
3182 const struct pci_device_id *ent)
3183{
3184 static int version_printed;
3185
3186 int i, err, pci_using_dac = 0;
68f40c10 3187 resource_size_t mmio_start, mmio_len;
4d22de3e
DLR
3188 const struct adapter_info *ai;
3189 struct adapter *adapter = NULL;
3190 struct port_info *pi;
3191
3192 if (!version_printed) {
3193 printk(KERN_INFO "%s - version %s\n", DRV_DESC, DRV_VERSION);
3194 ++version_printed;
3195 }
3196
3197 if (!cxgb3_wq) {
3198 cxgb3_wq = create_singlethread_workqueue(DRV_NAME);
3199 if (!cxgb3_wq) {
3200 printk(KERN_ERR DRV_NAME
3201 ": cannot initialize work queue\n");
3202 return -ENOMEM;
3203 }
3204 }
3205
3206 err = pci_request_regions(pdev, DRV_NAME);
3207 if (err) {
3208 /* Just info, some other driver may have claimed the device. */
3209 dev_info(&pdev->dev, "cannot obtain PCI resources\n");
3210 return err;
3211 }
3212
3213 err = pci_enable_device(pdev);
3214 if (err) {
3215 dev_err(&pdev->dev, "cannot enable PCI device\n");
3216 goto out_release_regions;
3217 }
3218
6a35528a 3219 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
4d22de3e 3220 pci_using_dac = 1;
6a35528a 3221 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
4d22de3e
DLR
3222 if (err) {
3223 dev_err(&pdev->dev, "unable to obtain 64-bit DMA for "
3224 "coherent allocations\n");
3225 goto out_disable_device;
3226 }
284901a9 3227 } else if ((err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) != 0) {
4d22de3e
DLR
3228 dev_err(&pdev->dev, "no usable DMA configuration\n");
3229 goto out_disable_device;
3230 }
3231
3232 pci_set_master(pdev);
204e2f98 3233 pci_save_state(pdev);
4d22de3e
DLR
3234
3235 mmio_start = pci_resource_start(pdev, 0);
3236 mmio_len = pci_resource_len(pdev, 0);
3237 ai = t3_get_adapter_info(ent->driver_data);
3238
3239 adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
3240 if (!adapter) {
3241 err = -ENOMEM;
3242 goto out_disable_device;
3243 }
3244
74b793e1
DLR
3245 adapter->nofail_skb =
3246 alloc_skb(sizeof(struct cpl_set_tcb_field), GFP_KERNEL);
3247 if (!adapter->nofail_skb) {
3248 dev_err(&pdev->dev, "cannot allocate nofail buffer\n");
3249 err = -ENOMEM;
3250 goto out_free_adapter;
3251 }
3252
4d22de3e
DLR
3253 adapter->regs = ioremap_nocache(mmio_start, mmio_len);
3254 if (!adapter->regs) {
3255 dev_err(&pdev->dev, "cannot map device registers\n");
3256 err = -ENOMEM;
3257 goto out_free_adapter;
3258 }
3259
3260 adapter->pdev = pdev;
3261 adapter->name = pci_name(pdev);
3262 adapter->msg_enable = dflt_msg_enable;
3263 adapter->mmio_len = mmio_len;
3264
3265 mutex_init(&adapter->mdio_lock);
3266 spin_lock_init(&adapter->work_lock);
3267 spin_lock_init(&adapter->stats_lock);
3268
3269 INIT_LIST_HEAD(&adapter->adapter_list);
3270 INIT_WORK(&adapter->ext_intr_handler_task, ext_intr_task);
20d3fc11 3271 INIT_WORK(&adapter->fatal_error_handler_task, fatal_error_task);
e998f245
SW
3272
3273 INIT_WORK(&adapter->db_full_task, db_full_task);
3274 INIT_WORK(&adapter->db_empty_task, db_empty_task);
3275 INIT_WORK(&adapter->db_drop_task, db_drop_task);
3276
4d22de3e
DLR
3277 INIT_DELAYED_WORK(&adapter->adap_check_task, t3_adap_check_task);
3278
952cdf33 3279 for (i = 0; i < ai->nports0 + ai->nports1; ++i) {
4d22de3e
DLR
3280 struct net_device *netdev;
3281
82ad3329 3282 netdev = alloc_etherdev_mq(sizeof(struct port_info), SGE_QSETS);
4d22de3e
DLR
3283 if (!netdev) {
3284 err = -ENOMEM;
3285 goto out_free_dev;
3286 }
3287
4d22de3e
DLR
3288 SET_NETDEV_DEV(netdev, &pdev->dev);
3289
3290 adapter->port[i] = netdev;
3291 pi = netdev_priv(netdev);
5fbf816f 3292 pi->adapter = adapter;
47fd23fe 3293 pi->rx_offload = T3_RX_CSUM | T3_LRO;
4d22de3e
DLR
3294 pi->port_id = i;
3295 netif_carrier_off(netdev);
82ad3329 3296 netif_tx_stop_all_queues(netdev);
4d22de3e
DLR
3297 netdev->irq = pdev->irq;
3298 netdev->mem_start = mmio_start;
3299 netdev->mem_end = mmio_start + mmio_len - 1;
4d22de3e 3300 netdev->features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO;
7be2df45 3301 netdev->features |= NETIF_F_GRO;
4d22de3e
DLR
3302 if (pci_using_dac)
3303 netdev->features |= NETIF_F_HIGHDMA;
3304
3305 netdev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
dd752696 3306 netdev->netdev_ops = &cxgb_netdev_ops;
4d22de3e
DLR
3307 SET_ETHTOOL_OPS(netdev, &cxgb_ethtool_ops);
3308 }
3309
5fbf816f 3310 pci_set_drvdata(pdev, adapter);
4d22de3e
DLR
3311 if (t3_prep_adapter(adapter, ai, 1) < 0) {
3312 err = -ENODEV;
3313 goto out_free_dev;
3314 }
2eab17ab 3315
4d22de3e
DLR
3316 /*
3317 * The card is now ready to go. If any errors occur during device
3318 * registration we do not fail the whole card but rather proceed only
3319 * with the ports we manage to register successfully. However we must
3320 * register at least one net device.
3321 */
3322 for_each_port(adapter, i) {
3323 err = register_netdev(adapter->port[i]);
3324 if (err)
3325 dev_warn(&pdev->dev,
3326 "cannot register net device %s, skipping\n",
3327 adapter->port[i]->name);
3328 else {
3329 /*
3330 * Change the name we use for messages to the name of
3331 * the first successfully registered interface.
3332 */
3333 if (!adapter->registered_device_map)
3334 adapter->name = adapter->port[i]->name;
3335
3336 __set_bit(i, &adapter->registered_device_map);
3337 }
3338 }
3339 if (!adapter->registered_device_map) {
3340 dev_err(&pdev->dev, "could not register any net devices\n");
3341 goto out_free_dev;
3342 }
3343
f14d42f3
KX
3344 for_each_port(adapter, i)
3345 cxgb3_init_iscsi_mac(adapter->port[i]);
3346
4d22de3e
DLR
3347 /* Driver's ready. Reflect it on LEDs */
3348 t3_led_ready(adapter);
3349
3350 if (is_offload(adapter)) {
3351 __set_bit(OFFLOAD_DEVMAP_BIT, &adapter->registered_device_map);
3352 cxgb3_adapter_ofld(adapter);
3353 }
3354
3355 /* See what interrupts we'll be using */
3356 if (msi > 1 && cxgb_enable_msix(adapter) == 0)
3357 adapter->flags |= USING_MSIX;
3358 else if (msi > 0 && pci_enable_msi(pdev) == 0)
3359 adapter->flags |= USING_MSI;
3360
8c263761
DLR
3361 set_nqsets(adapter);
3362
0ee8d33c 3363 err = sysfs_create_group(&adapter->port[0]->dev.kobj,
4d22de3e
DLR
3364 &cxgb3_attr_group);
3365
3366 print_port_info(adapter, ai);
3367 return 0;
3368
3369out_free_dev:
3370 iounmap(adapter->regs);
952cdf33 3371 for (i = ai->nports0 + ai->nports1 - 1; i >= 0; --i)
4d22de3e
DLR
3372 if (adapter->port[i])
3373 free_netdev(adapter->port[i]);
3374
3375out_free_adapter:
3376 kfree(adapter);
3377
3378out_disable_device:
3379 pci_disable_device(pdev);
3380out_release_regions:
3381 pci_release_regions(pdev);
3382 pci_set_drvdata(pdev, NULL);
3383 return err;
3384}
3385
3386static void __devexit remove_one(struct pci_dev *pdev)
3387{
5fbf816f 3388 struct adapter *adapter = pci_get_drvdata(pdev);
4d22de3e 3389
5fbf816f 3390 if (adapter) {
4d22de3e 3391 int i;
4d22de3e
DLR
3392
3393 t3_sge_stop(adapter);
0ee8d33c 3394 sysfs_remove_group(&adapter->port[0]->dev.kobj,
4d22de3e
DLR
3395 &cxgb3_attr_group);
3396
4d22de3e
DLR
3397 if (is_offload(adapter)) {
3398 cxgb3_adapter_unofld(adapter);
3399 if (test_bit(OFFLOAD_DEVMAP_BIT,
3400 &adapter->open_device_map))
3401 offload_close(&adapter->tdev);
3402 }
3403
67d92ab7
DLR
3404 for_each_port(adapter, i)
3405 if (test_bit(i, &adapter->registered_device_map))
3406 unregister_netdev(adapter->port[i]);
3407
0ca41c04 3408 t3_stop_sge_timers(adapter);
4d22de3e
DLR
3409 t3_free_sge_resources(adapter);
3410 cxgb_disable_msi(adapter);
3411
4d22de3e
DLR
3412 for_each_port(adapter, i)
3413 if (adapter->port[i])
3414 free_netdev(adapter->port[i]);
3415
3416 iounmap(adapter->regs);
74b793e1
DLR
3417 if (adapter->nofail_skb)
3418 kfree_skb(adapter->nofail_skb);
4d22de3e
DLR
3419 kfree(adapter);
3420 pci_release_regions(pdev);
3421 pci_disable_device(pdev);
3422 pci_set_drvdata(pdev, NULL);
3423 }
3424}
3425
3426static struct pci_driver driver = {
3427 .name = DRV_NAME,
3428 .id_table = cxgb3_pci_tbl,
3429 .probe = init_one,
3430 .remove = __devexit_p(remove_one),
91a6b50c 3431 .err_handler = &t3_err_handler,
4d22de3e
DLR
3432};
3433
3434static int __init cxgb3_init_module(void)
3435{
3436 int ret;
3437
3438 cxgb3_offload_init();
3439
3440 ret = pci_register_driver(&driver);
3441 return ret;
3442}
3443
3444static void __exit cxgb3_cleanup_module(void)
3445{
3446 pci_unregister_driver(&driver);
3447 if (cxgb3_wq)
3448 destroy_workqueue(cxgb3_wq);
3449}
3450
3451module_init(cxgb3_init_module);
3452module_exit(cxgb3_cleanup_module);