RDMA/cxgb3: Set the appropriate IO channel in rdma_init work requests
[linux-2.6-block.git] / drivers / net / cxgb3 / cxgb3_main.c
CommitLineData
4d22de3e 1/*
a02d44a0 2 * Copyright (c) 2003-2008 Chelsio, Inc. All rights reserved.
4d22de3e 3 *
1d68e93d
DLR
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
4d22de3e 9 *
1d68e93d
DLR
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
4d22de3e 31 */
4d22de3e
DLR
32#include <linux/module.h>
33#include <linux/moduleparam.h>
34#include <linux/init.h>
35#include <linux/pci.h>
36#include <linux/dma-mapping.h>
37#include <linux/netdevice.h>
38#include <linux/etherdevice.h>
39#include <linux/if_vlan.h>
0f07c4ee 40#include <linux/mdio.h>
4d22de3e
DLR
41#include <linux/sockios.h>
42#include <linux/workqueue.h>
43#include <linux/proc_fs.h>
44#include <linux/rtnetlink.h>
2e283962 45#include <linux/firmware.h>
d9da466a 46#include <linux/log2.h>
4d22de3e
DLR
47#include <asm/uaccess.h>
48
49#include "common.h"
50#include "cxgb3_ioctl.h"
51#include "regs.h"
52#include "cxgb3_offload.h"
53#include "version.h"
54
55#include "cxgb3_ctl_defs.h"
56#include "t3_cpl.h"
57#include "firmware_exports.h"
58
59enum {
60 MAX_TXQ_ENTRIES = 16384,
61 MAX_CTRL_TXQ_ENTRIES = 1024,
62 MAX_RSPQ_ENTRIES = 16384,
63 MAX_RX_BUFFERS = 16384,
64 MAX_RX_JUMBO_BUFFERS = 16384,
65 MIN_TXQ_ENTRIES = 4,
66 MIN_CTRL_TXQ_ENTRIES = 4,
67 MIN_RSPQ_ENTRIES = 32,
68 MIN_FL_ENTRIES = 32
69};
70
71#define PORT_MASK ((1 << MAX_NPORTS) - 1)
72
73#define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
74 NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
75 NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
76
77#define EEPROM_MAGIC 0x38E2F10C
78
678771d6
DLR
79#define CH_DEVICE(devid, idx) \
80 { PCI_VENDOR_ID_CHELSIO, devid, PCI_ANY_ID, PCI_ANY_ID, 0, 0, idx }
4d22de3e
DLR
81
82static const struct pci_device_id cxgb3_pci_tbl[] = {
678771d6
DLR
83 CH_DEVICE(0x20, 0), /* PE9000 */
84 CH_DEVICE(0x21, 1), /* T302E */
85 CH_DEVICE(0x22, 2), /* T310E */
86 CH_DEVICE(0x23, 3), /* T320X */
87 CH_DEVICE(0x24, 1), /* T302X */
88 CH_DEVICE(0x25, 3), /* T320E */
89 CH_DEVICE(0x26, 2), /* T310X */
90 CH_DEVICE(0x30, 2), /* T3B10 */
91 CH_DEVICE(0x31, 3), /* T3B20 */
92 CH_DEVICE(0x32, 1), /* T3B02 */
ce03aadd 93 CH_DEVICE(0x35, 6), /* T3C20-derived T3C10 */
74451424
DLR
94 CH_DEVICE(0x36, 3), /* S320E-CR */
95 CH_DEVICE(0x37, 7), /* N320E-G2 */
4d22de3e
DLR
96 {0,}
97};
98
99MODULE_DESCRIPTION(DRV_DESC);
100MODULE_AUTHOR("Chelsio Communications");
1d68e93d 101MODULE_LICENSE("Dual BSD/GPL");
4d22de3e
DLR
102MODULE_VERSION(DRV_VERSION);
103MODULE_DEVICE_TABLE(pci, cxgb3_pci_tbl);
104
105static int dflt_msg_enable = DFLT_MSG_ENABLE;
106
107module_param(dflt_msg_enable, int, 0644);
108MODULE_PARM_DESC(dflt_msg_enable, "Chelsio T3 default message enable bitmap");
109
110/*
111 * The driver uses the best interrupt scheme available on a platform in the
112 * order MSI-X, MSI, legacy pin interrupts. This parameter determines which
113 * of these schemes the driver may consider as follows:
114 *
115 * msi = 2: choose from among all three options
116 * msi = 1: only consider MSI and pin interrupts
117 * msi = 0: force pin interrupts
118 */
119static int msi = 2;
120
121module_param(msi, int, 0644);
122MODULE_PARM_DESC(msi, "whether to use MSI or MSI-X");
123
124/*
125 * The driver enables offload as a default.
126 * To disable it, use ofld_disable = 1.
127 */
128
129static int ofld_disable = 0;
130
131module_param(ofld_disable, int, 0644);
132MODULE_PARM_DESC(ofld_disable, "whether to enable offload at init time or not");
133
134/*
135 * We have work elements that we need to cancel when an interface is taken
136 * down. Normally the work elements would be executed by keventd but that
137 * can deadlock because of linkwatch. If our close method takes the rtnl
138 * lock and linkwatch is ahead of our work elements in keventd, linkwatch
139 * will block keventd as it needs the rtnl lock, and we'll deadlock waiting
140 * for our work to complete. Get our own work queue to solve this.
141 */
142static struct workqueue_struct *cxgb3_wq;
143
144/**
145 * link_report - show link status and link speed/duplex
146 * @p: the port whose settings are to be reported
147 *
148 * Shows the link status, speed, and duplex of a port.
149 */
150static void link_report(struct net_device *dev)
151{
152 if (!netif_carrier_ok(dev))
153 printk(KERN_INFO "%s: link down\n", dev->name);
154 else {
155 const char *s = "10Mbps";
156 const struct port_info *p = netdev_priv(dev);
157
158 switch (p->link_config.speed) {
159 case SPEED_10000:
160 s = "10Gbps";
161 break;
162 case SPEED_1000:
163 s = "1000Mbps";
164 break;
165 case SPEED_100:
166 s = "100Mbps";
167 break;
168 }
169
170 printk(KERN_INFO "%s: link up, %s, %s-duplex\n", dev->name, s,
171 p->link_config.duplex == DUPLEX_FULL ? "full" : "half");
172 }
173}
174
bf792094
DLR
175void t3_os_link_fault(struct adapter *adap, int port_id, int state)
176{
177 struct net_device *dev = adap->port[port_id];
178 struct port_info *pi = netdev_priv(dev);
179
180 if (state == netif_carrier_ok(dev))
181 return;
182
183 if (state) {
184 struct cmac *mac = &pi->mac;
185
186 netif_carrier_on(dev);
187
188 /* Clear local faults */
189 t3_xgm_intr_disable(adap, pi->port_id);
190 t3_read_reg(adap, A_XGM_INT_STATUS +
191 pi->mac.offset);
192 t3_write_reg(adap,
193 A_XGM_INT_CAUSE + pi->mac.offset,
194 F_XGM_INT);
195
196 t3_set_reg_field(adap,
197 A_XGM_INT_ENABLE +
198 pi->mac.offset,
199 F_XGM_INT, F_XGM_INT);
200 t3_xgm_intr_enable(adap, pi->port_id);
201
202 t3_mac_enable(mac, MAC_DIRECTION_TX);
203 } else
204 netif_carrier_off(dev);
205
206 link_report(dev);
207}
208
4d22de3e
DLR
209/**
210 * t3_os_link_changed - handle link status changes
211 * @adapter: the adapter associated with the link change
212 * @port_id: the port index whose limk status has changed
213 * @link_stat: the new status of the link
214 * @speed: the new speed setting
215 * @duplex: the new duplex setting
216 * @pause: the new flow-control setting
217 *
218 * This is the OS-dependent handler for link status changes. The OS
219 * neutral handler takes care of most of the processing for these events,
220 * then calls this handler for any OS-specific processing.
221 */
222void t3_os_link_changed(struct adapter *adapter, int port_id, int link_stat,
223 int speed, int duplex, int pause)
224{
225 struct net_device *dev = adapter->port[port_id];
6d6dabac
DLR
226 struct port_info *pi = netdev_priv(dev);
227 struct cmac *mac = &pi->mac;
4d22de3e
DLR
228
229 /* Skip changes from disabled ports. */
230 if (!netif_running(dev))
231 return;
232
233 if (link_stat != netif_carrier_ok(dev)) {
6d6dabac 234 if (link_stat) {
59cf8107 235 t3_mac_enable(mac, MAC_DIRECTION_RX);
bf792094
DLR
236
237 /* Clear local faults */
238 t3_xgm_intr_disable(adapter, pi->port_id);
239 t3_read_reg(adapter, A_XGM_INT_STATUS +
240 pi->mac.offset);
241 t3_write_reg(adapter,
242 A_XGM_INT_CAUSE + pi->mac.offset,
243 F_XGM_INT);
244
245 t3_set_reg_field(adapter,
246 A_XGM_INT_ENABLE + pi->mac.offset,
247 F_XGM_INT, F_XGM_INT);
248 t3_xgm_intr_enable(adapter, pi->port_id);
249
4d22de3e 250 netif_carrier_on(dev);
6d6dabac 251 } else {
4d22de3e 252 netif_carrier_off(dev);
bf792094
DLR
253
254 t3_xgm_intr_disable(adapter, pi->port_id);
255 t3_read_reg(adapter, A_XGM_INT_STATUS + pi->mac.offset);
256 t3_set_reg_field(adapter,
257 A_XGM_INT_ENABLE + pi->mac.offset,
258 F_XGM_INT, 0);
259
260 if (is_10G(adapter))
261 pi->phy.ops->power_down(&pi->phy, 1);
262
263 t3_read_reg(adapter, A_XGM_INT_STATUS + pi->mac.offset);
59cf8107
DLR
264 t3_mac_disable(mac, MAC_DIRECTION_RX);
265 t3_link_start(&pi->phy, mac, &pi->link_config);
6d6dabac
DLR
266 }
267
4d22de3e
DLR
268 link_report(dev);
269 }
270}
271
1e882025
DLR
272/**
273 * t3_os_phymod_changed - handle PHY module changes
274 * @phy: the PHY reporting the module change
275 * @mod_type: new module type
276 *
277 * This is the OS-dependent handler for PHY module changes. It is
278 * invoked when a PHY module is removed or inserted for any OS-specific
279 * processing.
280 */
281void t3_os_phymod_changed(struct adapter *adap, int port_id)
282{
283 static const char *mod_str[] = {
284 NULL, "SR", "LR", "LRM", "TWINAX", "TWINAX", "unknown"
285 };
286
287 const struct net_device *dev = adap->port[port_id];
288 const struct port_info *pi = netdev_priv(dev);
289
290 if (pi->phy.modtype == phy_modtype_none)
291 printk(KERN_INFO "%s: PHY module unplugged\n", dev->name);
292 else
293 printk(KERN_INFO "%s: %s PHY module inserted\n", dev->name,
294 mod_str[pi->phy.modtype]);
295}
296
4d22de3e
DLR
297static void cxgb_set_rxmode(struct net_device *dev)
298{
299 struct t3_rx_mode rm;
300 struct port_info *pi = netdev_priv(dev);
301
302 init_rx_mode(&rm, dev, dev->mc_list);
303 t3_mac_set_rx_mode(&pi->mac, &rm);
304}
305
306/**
307 * link_start - enable a port
308 * @dev: the device to enable
309 *
310 * Performs the MAC and PHY actions needed to enable a port.
311 */
312static void link_start(struct net_device *dev)
313{
314 struct t3_rx_mode rm;
315 struct port_info *pi = netdev_priv(dev);
316 struct cmac *mac = &pi->mac;
317
318 init_rx_mode(&rm, dev, dev->mc_list);
319 t3_mac_reset(mac);
320 t3_mac_set_mtu(mac, dev->mtu);
321 t3_mac_set_address(mac, 0, dev->dev_addr);
322 t3_mac_set_rx_mode(mac, &rm);
323 t3_link_start(&pi->phy, mac, &pi->link_config);
324 t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
325}
326
327static inline void cxgb_disable_msi(struct adapter *adapter)
328{
329 if (adapter->flags & USING_MSIX) {
330 pci_disable_msix(adapter->pdev);
331 adapter->flags &= ~USING_MSIX;
332 } else if (adapter->flags & USING_MSI) {
333 pci_disable_msi(adapter->pdev);
334 adapter->flags &= ~USING_MSI;
335 }
336}
337
338/*
339 * Interrupt handler for asynchronous events used with MSI-X.
340 */
341static irqreturn_t t3_async_intr_handler(int irq, void *cookie)
342{
343 t3_slow_intr_handler(cookie);
344 return IRQ_HANDLED;
345}
346
347/*
348 * Name the MSI-X interrupts.
349 */
350static void name_msix_vecs(struct adapter *adap)
351{
352 int i, j, msi_idx = 1, n = sizeof(adap->msix_info[0].desc) - 1;
353
354 snprintf(adap->msix_info[0].desc, n, "%s", adap->name);
355 adap->msix_info[0].desc[n] = 0;
356
357 for_each_port(adap, j) {
358 struct net_device *d = adap->port[j];
359 const struct port_info *pi = netdev_priv(d);
360
361 for (i = 0; i < pi->nqsets; i++, msi_idx++) {
362 snprintf(adap->msix_info[msi_idx].desc, n,
8c263761 363 "%s-%d", d->name, pi->first_qset + i);
4d22de3e
DLR
364 adap->msix_info[msi_idx].desc[n] = 0;
365 }
8c263761 366 }
4d22de3e
DLR
367}
368
369static int request_msix_data_irqs(struct adapter *adap)
370{
371 int i, j, err, qidx = 0;
372
373 for_each_port(adap, i) {
374 int nqsets = adap2pinfo(adap, i)->nqsets;
375
376 for (j = 0; j < nqsets; ++j) {
377 err = request_irq(adap->msix_info[qidx + 1].vec,
378 t3_intr_handler(adap,
379 adap->sge.qs[qidx].
380 rspq.polling), 0,
381 adap->msix_info[qidx + 1].desc,
382 &adap->sge.qs[qidx]);
383 if (err) {
384 while (--qidx >= 0)
385 free_irq(adap->msix_info[qidx + 1].vec,
386 &adap->sge.qs[qidx]);
387 return err;
388 }
389 qidx++;
390 }
391 }
392 return 0;
393}
394
8c263761
DLR
395static void free_irq_resources(struct adapter *adapter)
396{
397 if (adapter->flags & USING_MSIX) {
398 int i, n = 0;
399
400 free_irq(adapter->msix_info[0].vec, adapter);
401 for_each_port(adapter, i)
5cda9364 402 n += adap2pinfo(adapter, i)->nqsets;
8c263761
DLR
403
404 for (i = 0; i < n; ++i)
405 free_irq(adapter->msix_info[i + 1].vec,
406 &adapter->sge.qs[i]);
407 } else
408 free_irq(adapter->pdev->irq, adapter);
409}
410
b881955b
DLR
411static int await_mgmt_replies(struct adapter *adap, unsigned long init_cnt,
412 unsigned long n)
413{
414 int attempts = 5;
415
416 while (adap->sge.qs[0].rspq.offload_pkts < init_cnt + n) {
417 if (!--attempts)
418 return -ETIMEDOUT;
419 msleep(10);
420 }
421 return 0;
422}
423
424static int init_tp_parity(struct adapter *adap)
425{
426 int i;
427 struct sk_buff *skb;
428 struct cpl_set_tcb_field *greq;
429 unsigned long cnt = adap->sge.qs[0].rspq.offload_pkts;
430
431 t3_tp_set_offload_mode(adap, 1);
432
433 for (i = 0; i < 16; i++) {
434 struct cpl_smt_write_req *req;
435
74b793e1
DLR
436 skb = alloc_skb(sizeof(*req), GFP_KERNEL);
437 if (!skb)
438 skb = adap->nofail_skb;
439 if (!skb)
440 goto alloc_skb_fail;
441
b881955b
DLR
442 req = (struct cpl_smt_write_req *)__skb_put(skb, sizeof(*req));
443 memset(req, 0, sizeof(*req));
444 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
445 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SMT_WRITE_REQ, i));
446 req->iff = i;
447 t3_mgmt_tx(adap, skb);
74b793e1
DLR
448 if (skb == adap->nofail_skb) {
449 await_mgmt_replies(adap, cnt, i + 1);
450 adap->nofail_skb = alloc_skb(sizeof(*greq), GFP_KERNEL);
451 if (!adap->nofail_skb)
452 goto alloc_skb_fail;
453 }
b881955b
DLR
454 }
455
456 for (i = 0; i < 2048; i++) {
457 struct cpl_l2t_write_req *req;
458
74b793e1
DLR
459 skb = alloc_skb(sizeof(*req), GFP_KERNEL);
460 if (!skb)
461 skb = adap->nofail_skb;
462 if (!skb)
463 goto alloc_skb_fail;
464
b881955b
DLR
465 req = (struct cpl_l2t_write_req *)__skb_put(skb, sizeof(*req));
466 memset(req, 0, sizeof(*req));
467 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
468 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_L2T_WRITE_REQ, i));
469 req->params = htonl(V_L2T_W_IDX(i));
470 t3_mgmt_tx(adap, skb);
74b793e1
DLR
471 if (skb == adap->nofail_skb) {
472 await_mgmt_replies(adap, cnt, 16 + i + 1);
473 adap->nofail_skb = alloc_skb(sizeof(*greq), GFP_KERNEL);
474 if (!adap->nofail_skb)
475 goto alloc_skb_fail;
476 }
b881955b
DLR
477 }
478
479 for (i = 0; i < 2048; i++) {
480 struct cpl_rte_write_req *req;
481
74b793e1
DLR
482 skb = alloc_skb(sizeof(*req), GFP_KERNEL);
483 if (!skb)
484 skb = adap->nofail_skb;
485 if (!skb)
486 goto alloc_skb_fail;
487
b881955b
DLR
488 req = (struct cpl_rte_write_req *)__skb_put(skb, sizeof(*req));
489 memset(req, 0, sizeof(*req));
490 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
491 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_RTE_WRITE_REQ, i));
492 req->l2t_idx = htonl(V_L2T_W_IDX(i));
493 t3_mgmt_tx(adap, skb);
74b793e1
DLR
494 if (skb == adap->nofail_skb) {
495 await_mgmt_replies(adap, cnt, 16 + 2048 + i + 1);
496 adap->nofail_skb = alloc_skb(sizeof(*greq), GFP_KERNEL);
497 if (!adap->nofail_skb)
498 goto alloc_skb_fail;
499 }
b881955b
DLR
500 }
501
74b793e1
DLR
502 skb = alloc_skb(sizeof(*greq), GFP_KERNEL);
503 if (!skb)
504 skb = adap->nofail_skb;
505 if (!skb)
506 goto alloc_skb_fail;
507
b881955b
DLR
508 greq = (struct cpl_set_tcb_field *)__skb_put(skb, sizeof(*greq));
509 memset(greq, 0, sizeof(*greq));
510 greq->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
511 OPCODE_TID(greq) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, 0));
512 greq->mask = cpu_to_be64(1);
513 t3_mgmt_tx(adap, skb);
514
515 i = await_mgmt_replies(adap, cnt, 16 + 2048 + 2048 + 1);
74b793e1
DLR
516 if (skb == adap->nofail_skb) {
517 i = await_mgmt_replies(adap, cnt, 16 + 2048 + 2048 + 1);
518 adap->nofail_skb = alloc_skb(sizeof(*greq), GFP_KERNEL);
519 }
520
b881955b
DLR
521 t3_tp_set_offload_mode(adap, 0);
522 return i;
74b793e1
DLR
523
524alloc_skb_fail:
525 t3_tp_set_offload_mode(adap, 0);
526 return -ENOMEM;
b881955b
DLR
527}
528
4d22de3e
DLR
529/**
530 * setup_rss - configure RSS
531 * @adap: the adapter
532 *
533 * Sets up RSS to distribute packets to multiple receive queues. We
534 * configure the RSS CPU lookup table to distribute to the number of HW
535 * receive queues, and the response queue lookup table to narrow that
536 * down to the response queues actually configured for each port.
537 * We always configure the RSS mapping for two ports since the mapping
538 * table has plenty of entries.
539 */
540static void setup_rss(struct adapter *adap)
541{
542 int i;
543 unsigned int nq0 = adap2pinfo(adap, 0)->nqsets;
544 unsigned int nq1 = adap->port[1] ? adap2pinfo(adap, 1)->nqsets : 1;
545 u8 cpus[SGE_QSETS + 1];
546 u16 rspq_map[RSS_TABLE_SIZE];
547
548 for (i = 0; i < SGE_QSETS; ++i)
549 cpus[i] = i;
550 cpus[SGE_QSETS] = 0xff; /* terminator */
551
552 for (i = 0; i < RSS_TABLE_SIZE / 2; ++i) {
553 rspq_map[i] = i % nq0;
554 rspq_map[i + RSS_TABLE_SIZE / 2] = (i % nq1) + nq0;
555 }
556
557 t3_config_rss(adap, F_RQFEEDBACKENABLE | F_TNLLKPEN | F_TNLMAPEN |
558 F_TNLPRTEN | F_TNL2TUPEN | F_TNL4TUPEN |
a2604be5 559 V_RRCPLCPUSIZE(6) | F_HASHTOEPLITZ, cpus, rspq_map);
4d22de3e
DLR
560}
561
bea3348e 562static void init_napi(struct adapter *adap)
4d22de3e 563{
bea3348e 564 int i;
4d22de3e 565
bea3348e
SH
566 for (i = 0; i < SGE_QSETS; i++) {
567 struct sge_qset *qs = &adap->sge.qs[i];
4d22de3e 568
bea3348e
SH
569 if (qs->adap)
570 netif_napi_add(qs->netdev, &qs->napi, qs->napi.poll,
571 64);
4d22de3e 572 }
48c4b6db
DLR
573
574 /*
575 * netif_napi_add() can be called only once per napi_struct because it
576 * adds each new napi_struct to a list. Be careful not to call it a
577 * second time, e.g., during EEH recovery, by making a note of it.
578 */
579 adap->flags |= NAPI_INIT;
4d22de3e
DLR
580}
581
582/*
583 * Wait until all NAPI handlers are descheduled. This includes the handlers of
584 * both netdevices representing interfaces and the dummy ones for the extra
585 * queues.
586 */
587static void quiesce_rx(struct adapter *adap)
588{
589 int i;
4d22de3e 590
bea3348e
SH
591 for (i = 0; i < SGE_QSETS; i++)
592 if (adap->sge.qs[i].adap)
593 napi_disable(&adap->sge.qs[i].napi);
594}
4d22de3e 595
bea3348e
SH
596static void enable_all_napi(struct adapter *adap)
597{
598 int i;
599 for (i = 0; i < SGE_QSETS; i++)
600 if (adap->sge.qs[i].adap)
601 napi_enable(&adap->sge.qs[i].napi);
4d22de3e
DLR
602}
603
04ecb072
DLR
604/**
605 * set_qset_lro - Turn a queue set's LRO capability on and off
606 * @dev: the device the qset is attached to
607 * @qset_idx: the queue set index
608 * @val: the LRO switch
609 *
610 * Sets LRO on or off for a particular queue set.
611 * the device's features flag is updated to reflect the LRO
612 * capability when all queues belonging to the device are
613 * in the same state.
614 */
615static void set_qset_lro(struct net_device *dev, int qset_idx, int val)
616{
617 struct port_info *pi = netdev_priv(dev);
618 struct adapter *adapter = pi->adapter;
04ecb072
DLR
619
620 adapter->params.sge.qset[qset_idx].lro = !!val;
621 adapter->sge.qs[qset_idx].lro_enabled = !!val;
04ecb072
DLR
622}
623
4d22de3e
DLR
624/**
625 * setup_sge_qsets - configure SGE Tx/Rx/response queues
626 * @adap: the adapter
627 *
628 * Determines how many sets of SGE queues to use and initializes them.
629 * We support multiple queue sets per port if we have MSI-X, otherwise
630 * just one queue set per port.
631 */
632static int setup_sge_qsets(struct adapter *adap)
633{
bea3348e 634 int i, j, err, irq_idx = 0, qset_idx = 0;
8ac3ba68 635 unsigned int ntxq = SGE_TXQ_PER_SET;
4d22de3e
DLR
636
637 if (adap->params.rev > 0 && !(adap->flags & USING_MSI))
638 irq_idx = -1;
639
640 for_each_port(adap, i) {
641 struct net_device *dev = adap->port[i];
bea3348e 642 struct port_info *pi = netdev_priv(dev);
4d22de3e 643
bea3348e 644 pi->qs = &adap->sge.qs[pi->first_qset];
e594e96e 645 for (j = 0; j < pi->nqsets; ++j, ++qset_idx) {
47fd23fe 646 set_qset_lro(dev, qset_idx, pi->rx_offload & T3_LRO);
4d22de3e
DLR
647 err = t3_sge_alloc_qset(adap, qset_idx, 1,
648 (adap->flags & USING_MSIX) ? qset_idx + 1 :
649 irq_idx,
82ad3329
DLR
650 &adap->params.sge.qset[qset_idx], ntxq, dev,
651 netdev_get_tx_queue(dev, j));
4d22de3e
DLR
652 if (err) {
653 t3_free_sge_resources(adap);
654 return err;
655 }
656 }
657 }
658
659 return 0;
660}
661
3e5192ee 662static ssize_t attr_show(struct device *d, char *buf,
896392ef 663 ssize_t(*format) (struct net_device *, char *))
4d22de3e
DLR
664{
665 ssize_t len;
4d22de3e
DLR
666
667 /* Synchronize with ioctls that may shut down the device */
668 rtnl_lock();
896392ef 669 len = (*format) (to_net_dev(d), buf);
4d22de3e
DLR
670 rtnl_unlock();
671 return len;
672}
673
3e5192ee 674static ssize_t attr_store(struct device *d,
0ee8d33c 675 const char *buf, size_t len,
896392ef 676 ssize_t(*set) (struct net_device *, unsigned int),
4d22de3e
DLR
677 unsigned int min_val, unsigned int max_val)
678{
679 char *endp;
680 ssize_t ret;
681 unsigned int val;
4d22de3e
DLR
682
683 if (!capable(CAP_NET_ADMIN))
684 return -EPERM;
685
686 val = simple_strtoul(buf, &endp, 0);
687 if (endp == buf || val < min_val || val > max_val)
688 return -EINVAL;
689
690 rtnl_lock();
896392ef 691 ret = (*set) (to_net_dev(d), val);
4d22de3e
DLR
692 if (!ret)
693 ret = len;
694 rtnl_unlock();
695 return ret;
696}
697
698#define CXGB3_SHOW(name, val_expr) \
896392ef 699static ssize_t format_##name(struct net_device *dev, char *buf) \
4d22de3e 700{ \
5fbf816f
DLR
701 struct port_info *pi = netdev_priv(dev); \
702 struct adapter *adap = pi->adapter; \
4d22de3e
DLR
703 return sprintf(buf, "%u\n", val_expr); \
704} \
0ee8d33c
DLR
705static ssize_t show_##name(struct device *d, struct device_attribute *attr, \
706 char *buf) \
4d22de3e 707{ \
3e5192ee 708 return attr_show(d, buf, format_##name); \
4d22de3e
DLR
709}
710
896392ef 711static ssize_t set_nfilters(struct net_device *dev, unsigned int val)
4d22de3e 712{
5fbf816f
DLR
713 struct port_info *pi = netdev_priv(dev);
714 struct adapter *adap = pi->adapter;
9f238486 715 int min_tids = is_offload(adap) ? MC5_MIN_TIDS : 0;
896392ef 716
4d22de3e
DLR
717 if (adap->flags & FULL_INIT_DONE)
718 return -EBUSY;
719 if (val && adap->params.rev == 0)
720 return -EINVAL;
9f238486
DLR
721 if (val > t3_mc5_size(&adap->mc5) - adap->params.mc5.nservers -
722 min_tids)
4d22de3e
DLR
723 return -EINVAL;
724 adap->params.mc5.nfilters = val;
725 return 0;
726}
727
0ee8d33c
DLR
728static ssize_t store_nfilters(struct device *d, struct device_attribute *attr,
729 const char *buf, size_t len)
4d22de3e 730{
3e5192ee 731 return attr_store(d, buf, len, set_nfilters, 0, ~0);
4d22de3e
DLR
732}
733
896392ef 734static ssize_t set_nservers(struct net_device *dev, unsigned int val)
4d22de3e 735{
5fbf816f
DLR
736 struct port_info *pi = netdev_priv(dev);
737 struct adapter *adap = pi->adapter;
896392ef 738
4d22de3e
DLR
739 if (adap->flags & FULL_INIT_DONE)
740 return -EBUSY;
9f238486
DLR
741 if (val > t3_mc5_size(&adap->mc5) - adap->params.mc5.nfilters -
742 MC5_MIN_TIDS)
4d22de3e
DLR
743 return -EINVAL;
744 adap->params.mc5.nservers = val;
745 return 0;
746}
747
0ee8d33c
DLR
748static ssize_t store_nservers(struct device *d, struct device_attribute *attr,
749 const char *buf, size_t len)
4d22de3e 750{
3e5192ee 751 return attr_store(d, buf, len, set_nservers, 0, ~0);
4d22de3e
DLR
752}
753
754#define CXGB3_ATTR_R(name, val_expr) \
755CXGB3_SHOW(name, val_expr) \
0ee8d33c 756static DEVICE_ATTR(name, S_IRUGO, show_##name, NULL)
4d22de3e
DLR
757
758#define CXGB3_ATTR_RW(name, val_expr, store_method) \
759CXGB3_SHOW(name, val_expr) \
0ee8d33c 760static DEVICE_ATTR(name, S_IRUGO | S_IWUSR, show_##name, store_method)
4d22de3e
DLR
761
762CXGB3_ATTR_R(cam_size, t3_mc5_size(&adap->mc5));
763CXGB3_ATTR_RW(nfilters, adap->params.mc5.nfilters, store_nfilters);
764CXGB3_ATTR_RW(nservers, adap->params.mc5.nservers, store_nservers);
765
766static struct attribute *cxgb3_attrs[] = {
0ee8d33c
DLR
767 &dev_attr_cam_size.attr,
768 &dev_attr_nfilters.attr,
769 &dev_attr_nservers.attr,
4d22de3e
DLR
770 NULL
771};
772
773static struct attribute_group cxgb3_attr_group = {.attrs = cxgb3_attrs };
774
3e5192ee 775static ssize_t tm_attr_show(struct device *d,
0ee8d33c 776 char *buf, int sched)
4d22de3e 777{
5fbf816f
DLR
778 struct port_info *pi = netdev_priv(to_net_dev(d));
779 struct adapter *adap = pi->adapter;
4d22de3e 780 unsigned int v, addr, bpt, cpt;
5fbf816f 781 ssize_t len;
4d22de3e
DLR
782
783 addr = A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2;
784 rtnl_lock();
785 t3_write_reg(adap, A_TP_TM_PIO_ADDR, addr);
786 v = t3_read_reg(adap, A_TP_TM_PIO_DATA);
787 if (sched & 1)
788 v >>= 16;
789 bpt = (v >> 8) & 0xff;
790 cpt = v & 0xff;
791 if (!cpt)
792 len = sprintf(buf, "disabled\n");
793 else {
794 v = (adap->params.vpd.cclk * 1000) / cpt;
795 len = sprintf(buf, "%u Kbps\n", (v * bpt) / 125);
796 }
797 rtnl_unlock();
798 return len;
799}
800
3e5192ee 801static ssize_t tm_attr_store(struct device *d,
0ee8d33c 802 const char *buf, size_t len, int sched)
4d22de3e 803{
5fbf816f
DLR
804 struct port_info *pi = netdev_priv(to_net_dev(d));
805 struct adapter *adap = pi->adapter;
806 unsigned int val;
4d22de3e
DLR
807 char *endp;
808 ssize_t ret;
4d22de3e
DLR
809
810 if (!capable(CAP_NET_ADMIN))
811 return -EPERM;
812
813 val = simple_strtoul(buf, &endp, 0);
814 if (endp == buf || val > 10000000)
815 return -EINVAL;
816
817 rtnl_lock();
818 ret = t3_config_sched(adap, val, sched);
819 if (!ret)
820 ret = len;
821 rtnl_unlock();
822 return ret;
823}
824
825#define TM_ATTR(name, sched) \
0ee8d33c
DLR
826static ssize_t show_##name(struct device *d, struct device_attribute *attr, \
827 char *buf) \
4d22de3e 828{ \
3e5192ee 829 return tm_attr_show(d, buf, sched); \
4d22de3e 830} \
0ee8d33c
DLR
831static ssize_t store_##name(struct device *d, struct device_attribute *attr, \
832 const char *buf, size_t len) \
4d22de3e 833{ \
3e5192ee 834 return tm_attr_store(d, buf, len, sched); \
4d22de3e 835} \
0ee8d33c 836static DEVICE_ATTR(name, S_IRUGO | S_IWUSR, show_##name, store_##name)
4d22de3e
DLR
837
838TM_ATTR(sched0, 0);
839TM_ATTR(sched1, 1);
840TM_ATTR(sched2, 2);
841TM_ATTR(sched3, 3);
842TM_ATTR(sched4, 4);
843TM_ATTR(sched5, 5);
844TM_ATTR(sched6, 6);
845TM_ATTR(sched7, 7);
846
847static struct attribute *offload_attrs[] = {
0ee8d33c
DLR
848 &dev_attr_sched0.attr,
849 &dev_attr_sched1.attr,
850 &dev_attr_sched2.attr,
851 &dev_attr_sched3.attr,
852 &dev_attr_sched4.attr,
853 &dev_attr_sched5.attr,
854 &dev_attr_sched6.attr,
855 &dev_attr_sched7.attr,
4d22de3e
DLR
856 NULL
857};
858
859static struct attribute_group offload_attr_group = {.attrs = offload_attrs };
860
861/*
862 * Sends an sk_buff to an offload queue driver
863 * after dealing with any active network taps.
864 */
865static inline int offload_tx(struct t3cdev *tdev, struct sk_buff *skb)
866{
867 int ret;
868
869 local_bh_disable();
870 ret = t3_offload_tx(tdev, skb);
871 local_bh_enable();
872 return ret;
873}
874
875static int write_smt_entry(struct adapter *adapter, int idx)
876{
877 struct cpl_smt_write_req *req;
878 struct sk_buff *skb = alloc_skb(sizeof(*req), GFP_KERNEL);
879
880 if (!skb)
881 return -ENOMEM;
882
883 req = (struct cpl_smt_write_req *)__skb_put(skb, sizeof(*req));
884 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
885 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SMT_WRITE_REQ, idx));
886 req->mtu_idx = NMTUS - 1; /* should be 0 but there's a T3 bug */
887 req->iff = idx;
888 memset(req->src_mac1, 0, sizeof(req->src_mac1));
889 memcpy(req->src_mac0, adapter->port[idx]->dev_addr, ETH_ALEN);
890 skb->priority = 1;
891 offload_tx(&adapter->tdev, skb);
892 return 0;
893}
894
895static int init_smt(struct adapter *adapter)
896{
897 int i;
898
899 for_each_port(adapter, i)
900 write_smt_entry(adapter, i);
901 return 0;
902}
903
904static void init_port_mtus(struct adapter *adapter)
905{
906 unsigned int mtus = adapter->port[0]->mtu;
907
908 if (adapter->port[1])
909 mtus |= adapter->port[1]->mtu << 16;
910 t3_write_reg(adapter, A_TP_MTU_PORT_TABLE, mtus);
911}
912
8c263761 913static int send_pktsched_cmd(struct adapter *adap, int sched, int qidx, int lo,
14ab9892
DLR
914 int hi, int port)
915{
916 struct sk_buff *skb;
917 struct mngt_pktsched_wr *req;
8c263761 918 int ret;
14ab9892 919
74b793e1
DLR
920 skb = alloc_skb(sizeof(*req), GFP_KERNEL);
921 if (!skb)
922 skb = adap->nofail_skb;
923 if (!skb)
924 return -ENOMEM;
925
14ab9892
DLR
926 req = (struct mngt_pktsched_wr *)skb_put(skb, sizeof(*req));
927 req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_MNGT));
928 req->mngt_opcode = FW_MNGTOPCODE_PKTSCHED_SET;
929 req->sched = sched;
930 req->idx = qidx;
931 req->min = lo;
932 req->max = hi;
933 req->binding = port;
8c263761 934 ret = t3_mgmt_tx(adap, skb);
74b793e1
DLR
935 if (skb == adap->nofail_skb) {
936 adap->nofail_skb = alloc_skb(sizeof(struct cpl_set_tcb_field),
937 GFP_KERNEL);
938 if (!adap->nofail_skb)
939 ret = -ENOMEM;
940 }
8c263761
DLR
941
942 return ret;
14ab9892
DLR
943}
944
8c263761 945static int bind_qsets(struct adapter *adap)
14ab9892 946{
8c263761 947 int i, j, err = 0;
14ab9892
DLR
948
949 for_each_port(adap, i) {
950 const struct port_info *pi = adap2pinfo(adap, i);
951
8c263761
DLR
952 for (j = 0; j < pi->nqsets; ++j) {
953 int ret = send_pktsched_cmd(adap, 1,
954 pi->first_qset + j, -1,
955 -1, i);
956 if (ret)
957 err = ret;
958 }
14ab9892 959 }
8c263761
DLR
960
961 return err;
14ab9892
DLR
962}
963
851fd7bd
DLR
964#define FW_FNAME "cxgb3/t3fw-%d.%d.%d.bin"
965#define TPSRAM_NAME "cxgb3/t3%c_psram-%d.%d.%d.bin"
2e283962
DLR
966
967static int upgrade_fw(struct adapter *adap)
968{
969 int ret;
970 char buf[64];
971 const struct firmware *fw;
972 struct device *dev = &adap->pdev->dev;
973
974 snprintf(buf, sizeof(buf), FW_FNAME, FW_VERSION_MAJOR,
7f672cf5 975 FW_VERSION_MINOR, FW_VERSION_MICRO);
2e283962
DLR
976 ret = request_firmware(&fw, buf, dev);
977 if (ret < 0) {
978 dev_err(dev, "could not upgrade firmware: unable to load %s\n",
979 buf);
980 return ret;
981 }
982 ret = t3_load_fw(adap, fw->data, fw->size);
983 release_firmware(fw);
47330077
DLR
984
985 if (ret == 0)
986 dev_info(dev, "successful upgrade to firmware %d.%d.%d\n",
987 FW_VERSION_MAJOR, FW_VERSION_MINOR, FW_VERSION_MICRO);
988 else
989 dev_err(dev, "failed to upgrade to firmware %d.%d.%d\n",
990 FW_VERSION_MAJOR, FW_VERSION_MINOR, FW_VERSION_MICRO);
2eab17ab 991
47330077
DLR
992 return ret;
993}
994
995static inline char t3rev2char(struct adapter *adapter)
996{
997 char rev = 0;
998
999 switch(adapter->params.rev) {
1000 case T3_REV_B:
1001 case T3_REV_B2:
1002 rev = 'b';
1003 break;
1aafee26
DLR
1004 case T3_REV_C:
1005 rev = 'c';
1006 break;
47330077
DLR
1007 }
1008 return rev;
1009}
1010
9265fabf 1011static int update_tpsram(struct adapter *adap)
47330077
DLR
1012{
1013 const struct firmware *tpsram;
1014 char buf[64];
1015 struct device *dev = &adap->pdev->dev;
1016 int ret;
1017 char rev;
2eab17ab 1018
47330077
DLR
1019 rev = t3rev2char(adap);
1020 if (!rev)
1021 return 0;
1022
1023 snprintf(buf, sizeof(buf), TPSRAM_NAME, rev,
1024 TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO);
1025
1026 ret = request_firmware(&tpsram, buf, dev);
1027 if (ret < 0) {
1028 dev_err(dev, "could not load TP SRAM: unable to load %s\n",
1029 buf);
1030 return ret;
1031 }
2eab17ab 1032
47330077
DLR
1033 ret = t3_check_tpsram(adap, tpsram->data, tpsram->size);
1034 if (ret)
2eab17ab 1035 goto release_tpsram;
47330077
DLR
1036
1037 ret = t3_set_proto_sram(adap, tpsram->data);
1038 if (ret == 0)
1039 dev_info(dev,
1040 "successful update of protocol engine "
1041 "to %d.%d.%d\n",
1042 TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO);
1043 else
1044 dev_err(dev, "failed to update of protocol engine %d.%d.%d\n",
1045 TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO);
1046 if (ret)
1047 dev_err(dev, "loading protocol SRAM failed\n");
1048
1049release_tpsram:
1050 release_firmware(tpsram);
2eab17ab 1051
2e283962
DLR
1052 return ret;
1053}
1054
4d22de3e
DLR
1055/**
1056 * cxgb_up - enable the adapter
1057 * @adapter: adapter being enabled
1058 *
1059 * Called when the first port is enabled, this function performs the
1060 * actions necessary to make an adapter operational, such as completing
1061 * the initialization of HW modules, and enabling interrupts.
1062 *
1063 * Must be called with the rtnl lock held.
1064 */
1065static int cxgb_up(struct adapter *adap)
1066{
c54f5c24 1067 int err;
4d22de3e
DLR
1068
1069 if (!(adap->flags & FULL_INIT_DONE)) {
8207befa 1070 err = t3_check_fw_version(adap);
a5a3b460 1071 if (err == -EINVAL) {
2e283962 1072 err = upgrade_fw(adap);
8207befa
DLR
1073 CH_WARN(adap, "FW upgrade to %d.%d.%d %s\n",
1074 FW_VERSION_MAJOR, FW_VERSION_MINOR,
1075 FW_VERSION_MICRO, err ? "failed" : "succeeded");
a5a3b460 1076 }
4d22de3e 1077
8207befa 1078 err = t3_check_tpsram_version(adap);
47330077
DLR
1079 if (err == -EINVAL) {
1080 err = update_tpsram(adap);
8207befa
DLR
1081 CH_WARN(adap, "TP upgrade to %d.%d.%d %s\n",
1082 TP_VERSION_MAJOR, TP_VERSION_MINOR,
1083 TP_VERSION_MICRO, err ? "failed" : "succeeded");
47330077
DLR
1084 }
1085
20d3fc11
DLR
1086 /*
1087 * Clear interrupts now to catch errors if t3_init_hw fails.
1088 * We clear them again later as initialization may trigger
1089 * conditions that can interrupt.
1090 */
1091 t3_intr_clear(adap);
1092
4d22de3e
DLR
1093 err = t3_init_hw(adap, 0);
1094 if (err)
1095 goto out;
1096
b881955b 1097 t3_set_reg_field(adap, A_TP_PARA_REG5, 0, F_RXDDPOFFINIT);
6cdbd77e 1098 t3_write_reg(adap, A_ULPRX_TDDP_PSZ, V_HPZ0(PAGE_SHIFT - 12));
bea3348e 1099
4d22de3e
DLR
1100 err = setup_sge_qsets(adap);
1101 if (err)
1102 goto out;
1103
1104 setup_rss(adap);
48c4b6db
DLR
1105 if (!(adap->flags & NAPI_INIT))
1106 init_napi(adap);
31563789
DLR
1107
1108 t3_start_sge_timers(adap);
4d22de3e
DLR
1109 adap->flags |= FULL_INIT_DONE;
1110 }
1111
1112 t3_intr_clear(adap);
1113
1114 if (adap->flags & USING_MSIX) {
1115 name_msix_vecs(adap);
1116 err = request_irq(adap->msix_info[0].vec,
1117 t3_async_intr_handler, 0,
1118 adap->msix_info[0].desc, adap);
1119 if (err)
1120 goto irq_err;
1121
42256f57
DLR
1122 err = request_msix_data_irqs(adap);
1123 if (err) {
4d22de3e
DLR
1124 free_irq(adap->msix_info[0].vec, adap);
1125 goto irq_err;
1126 }
1127 } else if ((err = request_irq(adap->pdev->irq,
1128 t3_intr_handler(adap,
1129 adap->sge.qs[0].rspq.
1130 polling),
2db6346f
TG
1131 (adap->flags & USING_MSI) ?
1132 0 : IRQF_SHARED,
4d22de3e
DLR
1133 adap->name, adap)))
1134 goto irq_err;
1135
bea3348e 1136 enable_all_napi(adap);
4d22de3e
DLR
1137 t3_sge_start(adap);
1138 t3_intr_enable(adap);
14ab9892 1139
b881955b
DLR
1140 if (adap->params.rev >= T3_REV_C && !(adap->flags & TP_PARITY_INIT) &&
1141 is_offload(adap) && init_tp_parity(adap) == 0)
1142 adap->flags |= TP_PARITY_INIT;
1143
1144 if (adap->flags & TP_PARITY_INIT) {
1145 t3_write_reg(adap, A_TP_INT_CAUSE,
1146 F_CMCACHEPERR | F_ARPLUTPERR);
1147 t3_write_reg(adap, A_TP_INT_ENABLE, 0x7fbfffff);
1148 }
1149
8c263761
DLR
1150 if (!(adap->flags & QUEUES_BOUND)) {
1151 err = bind_qsets(adap);
1152 if (err) {
1153 CH_ERR(adap, "failed to bind qsets, err %d\n", err);
1154 t3_intr_disable(adap);
1155 free_irq_resources(adap);
1156 goto out;
1157 }
1158 adap->flags |= QUEUES_BOUND;
1159 }
14ab9892 1160
4d22de3e
DLR
1161out:
1162 return err;
1163irq_err:
1164 CH_ERR(adap, "request_irq failed, err %d\n", err);
1165 goto out;
1166}
1167
1168/*
1169 * Release resources when all the ports and offloading have been stopped.
1170 */
1171static void cxgb_down(struct adapter *adapter)
1172{
1173 t3_sge_stop(adapter);
1174 spin_lock_irq(&adapter->work_lock); /* sync with PHY intr task */
1175 t3_intr_disable(adapter);
1176 spin_unlock_irq(&adapter->work_lock);
1177
8c263761 1178 free_irq_resources(adapter);
4d22de3e 1179 quiesce_rx(adapter);
c80b0c28 1180 flush_workqueue(cxgb3_wq); /* wait for external IRQ handler */
4d22de3e
DLR
1181}
1182
1183static void schedule_chk_task(struct adapter *adap)
1184{
1185 unsigned int timeo;
1186
1187 timeo = adap->params.linkpoll_period ?
1188 (HZ * adap->params.linkpoll_period) / 10 :
1189 adap->params.stats_update_period * HZ;
1190 if (timeo)
1191 queue_delayed_work(cxgb3_wq, &adap->adap_check_task, timeo);
1192}
1193
1194static int offload_open(struct net_device *dev)
1195{
5fbf816f
DLR
1196 struct port_info *pi = netdev_priv(dev);
1197 struct adapter *adapter = pi->adapter;
1198 struct t3cdev *tdev = dev2t3cdev(dev);
4d22de3e 1199 int adap_up = adapter->open_device_map & PORT_MASK;
c54f5c24 1200 int err;
4d22de3e
DLR
1201
1202 if (test_and_set_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map))
1203 return 0;
1204
1205 if (!adap_up && (err = cxgb_up(adapter)) < 0)
48c4b6db 1206 goto out;
4d22de3e
DLR
1207
1208 t3_tp_set_offload_mode(adapter, 1);
1209 tdev->lldev = adapter->port[0];
1210 err = cxgb3_offload_activate(adapter);
1211 if (err)
1212 goto out;
1213
1214 init_port_mtus(adapter);
1215 t3_load_mtus(adapter, adapter->params.mtus, adapter->params.a_wnd,
1216 adapter->params.b_wnd,
1217 adapter->params.rev == 0 ?
1218 adapter->port[0]->mtu : 0xffff);
1219 init_smt(adapter);
1220
d96a51f6
DN
1221 if (sysfs_create_group(&tdev->lldev->dev.kobj, &offload_attr_group))
1222 dev_dbg(&dev->dev, "cannot create sysfs group\n");
4d22de3e
DLR
1223
1224 /* Call back all registered clients */
1225 cxgb3_add_clients(tdev);
1226
1227out:
1228 /* restore them in case the offload module has changed them */
1229 if (err) {
1230 t3_tp_set_offload_mode(adapter, 0);
1231 clear_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map);
1232 cxgb3_set_dummy_ops(tdev);
1233 }
1234 return err;
1235}
1236
1237static int offload_close(struct t3cdev *tdev)
1238{
1239 struct adapter *adapter = tdev2adap(tdev);
1240
1241 if (!test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map))
1242 return 0;
1243
1244 /* Call back all registered clients */
1245 cxgb3_remove_clients(tdev);
1246
0ee8d33c 1247 sysfs_remove_group(&tdev->lldev->dev.kobj, &offload_attr_group);
4d22de3e 1248
c80b0c28
DLR
1249 /* Flush work scheduled while releasing TIDs */
1250 flush_scheduled_work();
1251
4d22de3e
DLR
1252 tdev->lldev = NULL;
1253 cxgb3_set_dummy_ops(tdev);
1254 t3_tp_set_offload_mode(adapter, 0);
1255 clear_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map);
1256
1257 if (!adapter->open_device_map)
1258 cxgb_down(adapter);
1259
1260 cxgb3_offload_deactivate(adapter);
1261 return 0;
1262}
1263
1264static int cxgb_open(struct net_device *dev)
1265{
4d22de3e 1266 struct port_info *pi = netdev_priv(dev);
5fbf816f 1267 struct adapter *adapter = pi->adapter;
4d22de3e 1268 int other_ports = adapter->open_device_map & PORT_MASK;
5fbf816f 1269 int err;
4d22de3e 1270
48c4b6db 1271 if (!adapter->open_device_map && (err = cxgb_up(adapter)) < 0)
4d22de3e
DLR
1272 return err;
1273
1274 set_bit(pi->port_id, &adapter->open_device_map);
8ac3ba68 1275 if (is_offload(adapter) && !ofld_disable) {
4d22de3e
DLR
1276 err = offload_open(dev);
1277 if (err)
1278 printk(KERN_WARNING
1279 "Could not initialize offload capabilities\n");
1280 }
1281
82ad3329 1282 dev->real_num_tx_queues = pi->nqsets;
4d22de3e
DLR
1283 link_start(dev);
1284 t3_port_intr_enable(adapter, pi->port_id);
82ad3329 1285 netif_tx_start_all_queues(dev);
4d22de3e
DLR
1286 if (!other_ports)
1287 schedule_chk_task(adapter);
1288
1289 return 0;
1290}
1291
1292static int cxgb_close(struct net_device *dev)
1293{
5fbf816f
DLR
1294 struct port_info *pi = netdev_priv(dev);
1295 struct adapter *adapter = pi->adapter;
4d22de3e 1296
e8d19370
DLR
1297
1298 if (!adapter->open_device_map)
1299 return 0;
1300
bf792094
DLR
1301 /* Stop link fault interrupts */
1302 t3_xgm_intr_disable(adapter, pi->port_id);
1303 t3_read_reg(adapter, A_XGM_INT_STATUS + pi->mac.offset);
1304
5fbf816f 1305 t3_port_intr_disable(adapter, pi->port_id);
82ad3329 1306 netif_tx_stop_all_queues(dev);
5fbf816f 1307 pi->phy.ops->power_down(&pi->phy, 1);
4d22de3e 1308 netif_carrier_off(dev);
5fbf816f 1309 t3_mac_disable(&pi->mac, MAC_DIRECTION_TX | MAC_DIRECTION_RX);
4d22de3e 1310
20d3fc11 1311 spin_lock_irq(&adapter->work_lock); /* sync with update task */
5fbf816f 1312 clear_bit(pi->port_id, &adapter->open_device_map);
20d3fc11 1313 spin_unlock_irq(&adapter->work_lock);
4d22de3e
DLR
1314
1315 if (!(adapter->open_device_map & PORT_MASK))
c80b0c28 1316 cancel_delayed_work_sync(&adapter->adap_check_task);
4d22de3e
DLR
1317
1318 if (!adapter->open_device_map)
1319 cxgb_down(adapter);
1320
1321 return 0;
1322}
1323
1324static struct net_device_stats *cxgb_get_stats(struct net_device *dev)
1325{
5fbf816f
DLR
1326 struct port_info *pi = netdev_priv(dev);
1327 struct adapter *adapter = pi->adapter;
1328 struct net_device_stats *ns = &pi->netstats;
4d22de3e
DLR
1329 const struct mac_stats *pstats;
1330
1331 spin_lock(&adapter->stats_lock);
5fbf816f 1332 pstats = t3_mac_update_stats(&pi->mac);
4d22de3e
DLR
1333 spin_unlock(&adapter->stats_lock);
1334
1335 ns->tx_bytes = pstats->tx_octets;
1336 ns->tx_packets = pstats->tx_frames;
1337 ns->rx_bytes = pstats->rx_octets;
1338 ns->rx_packets = pstats->rx_frames;
1339 ns->multicast = pstats->rx_mcast_frames;
1340
1341 ns->tx_errors = pstats->tx_underrun;
1342 ns->rx_errors = pstats->rx_symbol_errs + pstats->rx_fcs_errs +
1343 pstats->rx_too_long + pstats->rx_jabber + pstats->rx_short +
1344 pstats->rx_fifo_ovfl;
1345
1346 /* detailed rx_errors */
1347 ns->rx_length_errors = pstats->rx_jabber + pstats->rx_too_long;
1348 ns->rx_over_errors = 0;
1349 ns->rx_crc_errors = pstats->rx_fcs_errs;
1350 ns->rx_frame_errors = pstats->rx_symbol_errs;
1351 ns->rx_fifo_errors = pstats->rx_fifo_ovfl;
1352 ns->rx_missed_errors = pstats->rx_cong_drops;
1353
1354 /* detailed tx_errors */
1355 ns->tx_aborted_errors = 0;
1356 ns->tx_carrier_errors = 0;
1357 ns->tx_fifo_errors = pstats->tx_underrun;
1358 ns->tx_heartbeat_errors = 0;
1359 ns->tx_window_errors = 0;
1360 return ns;
1361}
1362
1363static u32 get_msglevel(struct net_device *dev)
1364{
5fbf816f
DLR
1365 struct port_info *pi = netdev_priv(dev);
1366 struct adapter *adapter = pi->adapter;
4d22de3e
DLR
1367
1368 return adapter->msg_enable;
1369}
1370
1371static void set_msglevel(struct net_device *dev, u32 val)
1372{
5fbf816f
DLR
1373 struct port_info *pi = netdev_priv(dev);
1374 struct adapter *adapter = pi->adapter;
4d22de3e
DLR
1375
1376 adapter->msg_enable = val;
1377}
1378
1379static char stats_strings[][ETH_GSTRING_LEN] = {
1380 "TxOctetsOK ",
1381 "TxFramesOK ",
1382 "TxMulticastFramesOK",
1383 "TxBroadcastFramesOK",
1384 "TxPauseFrames ",
1385 "TxUnderrun ",
1386 "TxExtUnderrun ",
1387
1388 "TxFrames64 ",
1389 "TxFrames65To127 ",
1390 "TxFrames128To255 ",
1391 "TxFrames256To511 ",
1392 "TxFrames512To1023 ",
1393 "TxFrames1024To1518 ",
1394 "TxFrames1519ToMax ",
1395
1396 "RxOctetsOK ",
1397 "RxFramesOK ",
1398 "RxMulticastFramesOK",
1399 "RxBroadcastFramesOK",
1400 "RxPauseFrames ",
1401 "RxFCSErrors ",
1402 "RxSymbolErrors ",
1403 "RxShortErrors ",
1404 "RxJabberErrors ",
1405 "RxLengthErrors ",
1406 "RxFIFOoverflow ",
1407
1408 "RxFrames64 ",
1409 "RxFrames65To127 ",
1410 "RxFrames128To255 ",
1411 "RxFrames256To511 ",
1412 "RxFrames512To1023 ",
1413 "RxFrames1024To1518 ",
1414 "RxFrames1519ToMax ",
1415
1416 "PhyFIFOErrors ",
1417 "TSO ",
1418 "VLANextractions ",
1419 "VLANinsertions ",
1420 "TxCsumOffload ",
1421 "RxCsumGood ",
b47385bd
DLR
1422 "LroAggregated ",
1423 "LroFlushed ",
1424 "LroNoDesc ",
fc90664e
DLR
1425 "RxDrops ",
1426
1427 "CheckTXEnToggled ",
1428 "CheckResets ",
1429
bf792094 1430 "LinkFaults ",
4d22de3e
DLR
1431};
1432
b9f2c044 1433static int get_sset_count(struct net_device *dev, int sset)
4d22de3e 1434{
b9f2c044
JG
1435 switch (sset) {
1436 case ETH_SS_STATS:
1437 return ARRAY_SIZE(stats_strings);
1438 default:
1439 return -EOPNOTSUPP;
1440 }
4d22de3e
DLR
1441}
1442
1443#define T3_REGMAP_SIZE (3 * 1024)
1444
1445static int get_regs_len(struct net_device *dev)
1446{
1447 return T3_REGMAP_SIZE;
1448}
1449
1450static int get_eeprom_len(struct net_device *dev)
1451{
1452 return EEPROMSIZE;
1453}
1454
1455static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1456{
5fbf816f
DLR
1457 struct port_info *pi = netdev_priv(dev);
1458 struct adapter *adapter = pi->adapter;
4d22de3e 1459 u32 fw_vers = 0;
47330077 1460 u32 tp_vers = 0;
4d22de3e 1461
cf3760da 1462 spin_lock(&adapter->stats_lock);
4d22de3e 1463 t3_get_fw_version(adapter, &fw_vers);
47330077 1464 t3_get_tp_version(adapter, &tp_vers);
cf3760da 1465 spin_unlock(&adapter->stats_lock);
4d22de3e
DLR
1466
1467 strcpy(info->driver, DRV_NAME);
1468 strcpy(info->version, DRV_VERSION);
1469 strcpy(info->bus_info, pci_name(adapter->pdev));
1470 if (!fw_vers)
1471 strcpy(info->fw_version, "N/A");
4aac3899 1472 else {
4d22de3e 1473 snprintf(info->fw_version, sizeof(info->fw_version),
47330077 1474 "%s %u.%u.%u TP %u.%u.%u",
4aac3899
DLR
1475 G_FW_VERSION_TYPE(fw_vers) ? "T" : "N",
1476 G_FW_VERSION_MAJOR(fw_vers),
1477 G_FW_VERSION_MINOR(fw_vers),
47330077
DLR
1478 G_FW_VERSION_MICRO(fw_vers),
1479 G_TP_VERSION_MAJOR(tp_vers),
1480 G_TP_VERSION_MINOR(tp_vers),
1481 G_TP_VERSION_MICRO(tp_vers));
4aac3899 1482 }
4d22de3e
DLR
1483}
1484
1485static void get_strings(struct net_device *dev, u32 stringset, u8 * data)
1486{
1487 if (stringset == ETH_SS_STATS)
1488 memcpy(data, stats_strings, sizeof(stats_strings));
1489}
1490
1491static unsigned long collect_sge_port_stats(struct adapter *adapter,
1492 struct port_info *p, int idx)
1493{
1494 int i;
1495 unsigned long tot = 0;
1496
8c263761
DLR
1497 for (i = p->first_qset; i < p->first_qset + p->nqsets; ++i)
1498 tot += adapter->sge.qs[i].port_stats[idx];
4d22de3e
DLR
1499 return tot;
1500}
1501
1502static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
1503 u64 *data)
1504{
4d22de3e 1505 struct port_info *pi = netdev_priv(dev);
5fbf816f 1506 struct adapter *adapter = pi->adapter;
4d22de3e
DLR
1507 const struct mac_stats *s;
1508
1509 spin_lock(&adapter->stats_lock);
1510 s = t3_mac_update_stats(&pi->mac);
1511 spin_unlock(&adapter->stats_lock);
1512
1513 *data++ = s->tx_octets;
1514 *data++ = s->tx_frames;
1515 *data++ = s->tx_mcast_frames;
1516 *data++ = s->tx_bcast_frames;
1517 *data++ = s->tx_pause;
1518 *data++ = s->tx_underrun;
1519 *data++ = s->tx_fifo_urun;
1520
1521 *data++ = s->tx_frames_64;
1522 *data++ = s->tx_frames_65_127;
1523 *data++ = s->tx_frames_128_255;
1524 *data++ = s->tx_frames_256_511;
1525 *data++ = s->tx_frames_512_1023;
1526 *data++ = s->tx_frames_1024_1518;
1527 *data++ = s->tx_frames_1519_max;
1528
1529 *data++ = s->rx_octets;
1530 *data++ = s->rx_frames;
1531 *data++ = s->rx_mcast_frames;
1532 *data++ = s->rx_bcast_frames;
1533 *data++ = s->rx_pause;
1534 *data++ = s->rx_fcs_errs;
1535 *data++ = s->rx_symbol_errs;
1536 *data++ = s->rx_short;
1537 *data++ = s->rx_jabber;
1538 *data++ = s->rx_too_long;
1539 *data++ = s->rx_fifo_ovfl;
1540
1541 *data++ = s->rx_frames_64;
1542 *data++ = s->rx_frames_65_127;
1543 *data++ = s->rx_frames_128_255;
1544 *data++ = s->rx_frames_256_511;
1545 *data++ = s->rx_frames_512_1023;
1546 *data++ = s->rx_frames_1024_1518;
1547 *data++ = s->rx_frames_1519_max;
1548
1549 *data++ = pi->phy.fifo_errors;
1550
1551 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_TSO);
1552 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_VLANEX);
1553 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_VLANINS);
1554 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_TX_CSUM);
1555 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_RX_CSUM_GOOD);
7be2df45
HX
1556 *data++ = 0;
1557 *data++ = 0;
1558 *data++ = 0;
4d22de3e 1559 *data++ = s->rx_cong_drops;
fc90664e
DLR
1560
1561 *data++ = s->num_toggled;
1562 *data++ = s->num_resets;
bf792094
DLR
1563
1564 *data++ = s->link_faults;
4d22de3e
DLR
1565}
1566
1567static inline void reg_block_dump(struct adapter *ap, void *buf,
1568 unsigned int start, unsigned int end)
1569{
1570 u32 *p = buf + start;
1571
1572 for (; start <= end; start += sizeof(u32))
1573 *p++ = t3_read_reg(ap, start);
1574}
1575
1576static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
1577 void *buf)
1578{
5fbf816f
DLR
1579 struct port_info *pi = netdev_priv(dev);
1580 struct adapter *ap = pi->adapter;
4d22de3e
DLR
1581
1582 /*
1583 * Version scheme:
1584 * bits 0..9: chip version
1585 * bits 10..15: chip revision
1586 * bit 31: set for PCIe cards
1587 */
1588 regs->version = 3 | (ap->params.rev << 10) | (is_pcie(ap) << 31);
1589
1590 /*
1591 * We skip the MAC statistics registers because they are clear-on-read.
1592 * Also reading multi-register stats would need to synchronize with the
1593 * periodic mac stats accumulation. Hard to justify the complexity.
1594 */
1595 memset(buf, 0, T3_REGMAP_SIZE);
1596 reg_block_dump(ap, buf, 0, A_SG_RSPQ_CREDIT_RETURN);
1597 reg_block_dump(ap, buf, A_SG_HI_DRB_HI_THRSH, A_ULPRX_PBL_ULIMIT);
1598 reg_block_dump(ap, buf, A_ULPTX_CONFIG, A_MPS_INT_CAUSE);
1599 reg_block_dump(ap, buf, A_CPL_SWITCH_CNTRL, A_CPL_MAP_TBL_DATA);
1600 reg_block_dump(ap, buf, A_SMB_GLOBAL_TIME_CFG, A_XGM_SERDES_STAT3);
1601 reg_block_dump(ap, buf, A_XGM_SERDES_STATUS0,
1602 XGM_REG(A_XGM_SERDES_STAT3, 1));
1603 reg_block_dump(ap, buf, XGM_REG(A_XGM_SERDES_STATUS0, 1),
1604 XGM_REG(A_XGM_RX_SPI4_SOP_EOP_CNT, 1));
1605}
1606
1607static int restart_autoneg(struct net_device *dev)
1608{
1609 struct port_info *p = netdev_priv(dev);
1610
1611 if (!netif_running(dev))
1612 return -EAGAIN;
1613 if (p->link_config.autoneg != AUTONEG_ENABLE)
1614 return -EINVAL;
1615 p->phy.ops->autoneg_restart(&p->phy);
1616 return 0;
1617}
1618
1619static int cxgb3_phys_id(struct net_device *dev, u32 data)
1620{
5fbf816f
DLR
1621 struct port_info *pi = netdev_priv(dev);
1622 struct adapter *adapter = pi->adapter;
4d22de3e 1623 int i;
4d22de3e
DLR
1624
1625 if (data == 0)
1626 data = 2;
1627
1628 for (i = 0; i < data * 2; i++) {
1629 t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
1630 (i & 1) ? F_GPIO0_OUT_VAL : 0);
1631 if (msleep_interruptible(500))
1632 break;
1633 }
1634 t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
1635 F_GPIO0_OUT_VAL);
1636 return 0;
1637}
1638
1639static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1640{
1641 struct port_info *p = netdev_priv(dev);
1642
1643 cmd->supported = p->link_config.supported;
1644 cmd->advertising = p->link_config.advertising;
1645
1646 if (netif_carrier_ok(dev)) {
1647 cmd->speed = p->link_config.speed;
1648 cmd->duplex = p->link_config.duplex;
1649 } else {
1650 cmd->speed = -1;
1651 cmd->duplex = -1;
1652 }
1653
1654 cmd->port = (cmd->supported & SUPPORTED_TP) ? PORT_TP : PORT_FIBRE;
0f07c4ee 1655 cmd->phy_address = p->phy.mdio.prtad;
4d22de3e
DLR
1656 cmd->transceiver = XCVR_EXTERNAL;
1657 cmd->autoneg = p->link_config.autoneg;
1658 cmd->maxtxpkt = 0;
1659 cmd->maxrxpkt = 0;
1660 return 0;
1661}
1662
1663static int speed_duplex_to_caps(int speed, int duplex)
1664{
1665 int cap = 0;
1666
1667 switch (speed) {
1668 case SPEED_10:
1669 if (duplex == DUPLEX_FULL)
1670 cap = SUPPORTED_10baseT_Full;
1671 else
1672 cap = SUPPORTED_10baseT_Half;
1673 break;
1674 case SPEED_100:
1675 if (duplex == DUPLEX_FULL)
1676 cap = SUPPORTED_100baseT_Full;
1677 else
1678 cap = SUPPORTED_100baseT_Half;
1679 break;
1680 case SPEED_1000:
1681 if (duplex == DUPLEX_FULL)
1682 cap = SUPPORTED_1000baseT_Full;
1683 else
1684 cap = SUPPORTED_1000baseT_Half;
1685 break;
1686 case SPEED_10000:
1687 if (duplex == DUPLEX_FULL)
1688 cap = SUPPORTED_10000baseT_Full;
1689 }
1690 return cap;
1691}
1692
1693#define ADVERTISED_MASK (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
1694 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
1695 ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full | \
1696 ADVERTISED_10000baseT_Full)
1697
1698static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1699{
1700 struct port_info *p = netdev_priv(dev);
1701 struct link_config *lc = &p->link_config;
1702
9b1e3656
DLR
1703 if (!(lc->supported & SUPPORTED_Autoneg)) {
1704 /*
1705 * PHY offers a single speed/duplex. See if that's what's
1706 * being requested.
1707 */
1708 if (cmd->autoneg == AUTONEG_DISABLE) {
97915b5b 1709 int cap = speed_duplex_to_caps(cmd->speed, cmd->duplex);
9b1e3656
DLR
1710 if (lc->supported & cap)
1711 return 0;
1712 }
1713 return -EINVAL;
1714 }
4d22de3e
DLR
1715
1716 if (cmd->autoneg == AUTONEG_DISABLE) {
1717 int cap = speed_duplex_to_caps(cmd->speed, cmd->duplex);
1718
1719 if (!(lc->supported & cap) || cmd->speed == SPEED_1000)
1720 return -EINVAL;
1721 lc->requested_speed = cmd->speed;
1722 lc->requested_duplex = cmd->duplex;
1723 lc->advertising = 0;
1724 } else {
1725 cmd->advertising &= ADVERTISED_MASK;
1726 cmd->advertising &= lc->supported;
1727 if (!cmd->advertising)
1728 return -EINVAL;
1729 lc->requested_speed = SPEED_INVALID;
1730 lc->requested_duplex = DUPLEX_INVALID;
1731 lc->advertising = cmd->advertising | ADVERTISED_Autoneg;
1732 }
1733 lc->autoneg = cmd->autoneg;
1734 if (netif_running(dev))
1735 t3_link_start(&p->phy, &p->mac, lc);
1736 return 0;
1737}
1738
1739static void get_pauseparam(struct net_device *dev,
1740 struct ethtool_pauseparam *epause)
1741{
1742 struct port_info *p = netdev_priv(dev);
1743
1744 epause->autoneg = (p->link_config.requested_fc & PAUSE_AUTONEG) != 0;
1745 epause->rx_pause = (p->link_config.fc & PAUSE_RX) != 0;
1746 epause->tx_pause = (p->link_config.fc & PAUSE_TX) != 0;
1747}
1748
1749static int set_pauseparam(struct net_device *dev,
1750 struct ethtool_pauseparam *epause)
1751{
1752 struct port_info *p = netdev_priv(dev);
1753 struct link_config *lc = &p->link_config;
1754
1755 if (epause->autoneg == AUTONEG_DISABLE)
1756 lc->requested_fc = 0;
1757 else if (lc->supported & SUPPORTED_Autoneg)
1758 lc->requested_fc = PAUSE_AUTONEG;
1759 else
1760 return -EINVAL;
1761
1762 if (epause->rx_pause)
1763 lc->requested_fc |= PAUSE_RX;
1764 if (epause->tx_pause)
1765 lc->requested_fc |= PAUSE_TX;
1766 if (lc->autoneg == AUTONEG_ENABLE) {
1767 if (netif_running(dev))
1768 t3_link_start(&p->phy, &p->mac, lc);
1769 } else {
1770 lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1771 if (netif_running(dev))
1772 t3_mac_set_speed_duplex_fc(&p->mac, -1, -1, lc->fc);
1773 }
1774 return 0;
1775}
1776
1777static u32 get_rx_csum(struct net_device *dev)
1778{
1779 struct port_info *p = netdev_priv(dev);
1780
47fd23fe 1781 return p->rx_offload & T3_RX_CSUM;
4d22de3e
DLR
1782}
1783
1784static int set_rx_csum(struct net_device *dev, u32 data)
1785{
1786 struct port_info *p = netdev_priv(dev);
1787
47fd23fe
RD
1788 if (data) {
1789 p->rx_offload |= T3_RX_CSUM;
1790 } else {
b47385bd
DLR
1791 int i;
1792
47fd23fe 1793 p->rx_offload &= ~(T3_RX_CSUM | T3_LRO);
04ecb072
DLR
1794 for (i = p->first_qset; i < p->first_qset + p->nqsets; i++)
1795 set_qset_lro(dev, i, 0);
b47385bd 1796 }
4d22de3e
DLR
1797 return 0;
1798}
1799
1800static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
1801{
5fbf816f
DLR
1802 struct port_info *pi = netdev_priv(dev);
1803 struct adapter *adapter = pi->adapter;
05b97b30 1804 const struct qset_params *q = &adapter->params.sge.qset[pi->first_qset];
4d22de3e
DLR
1805
1806 e->rx_max_pending = MAX_RX_BUFFERS;
1807 e->rx_mini_max_pending = 0;
1808 e->rx_jumbo_max_pending = MAX_RX_JUMBO_BUFFERS;
1809 e->tx_max_pending = MAX_TXQ_ENTRIES;
1810
05b97b30
DLR
1811 e->rx_pending = q->fl_size;
1812 e->rx_mini_pending = q->rspq_size;
1813 e->rx_jumbo_pending = q->jumbo_size;
1814 e->tx_pending = q->txq_size[0];
4d22de3e
DLR
1815}
1816
1817static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
1818{
5fbf816f
DLR
1819 struct port_info *pi = netdev_priv(dev);
1820 struct adapter *adapter = pi->adapter;
05b97b30 1821 struct qset_params *q;
5fbf816f 1822 int i;
4d22de3e
DLR
1823
1824 if (e->rx_pending > MAX_RX_BUFFERS ||
1825 e->rx_jumbo_pending > MAX_RX_JUMBO_BUFFERS ||
1826 e->tx_pending > MAX_TXQ_ENTRIES ||
1827 e->rx_mini_pending > MAX_RSPQ_ENTRIES ||
1828 e->rx_mini_pending < MIN_RSPQ_ENTRIES ||
1829 e->rx_pending < MIN_FL_ENTRIES ||
1830 e->rx_jumbo_pending < MIN_FL_ENTRIES ||
1831 e->tx_pending < adapter->params.nports * MIN_TXQ_ENTRIES)
1832 return -EINVAL;
1833
1834 if (adapter->flags & FULL_INIT_DONE)
1835 return -EBUSY;
1836
05b97b30
DLR
1837 q = &adapter->params.sge.qset[pi->first_qset];
1838 for (i = 0; i < pi->nqsets; ++i, ++q) {
4d22de3e
DLR
1839 q->rspq_size = e->rx_mini_pending;
1840 q->fl_size = e->rx_pending;
1841 q->jumbo_size = e->rx_jumbo_pending;
1842 q->txq_size[0] = e->tx_pending;
1843 q->txq_size[1] = e->tx_pending;
1844 q->txq_size[2] = e->tx_pending;
1845 }
1846 return 0;
1847}
1848
1849static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
1850{
5fbf816f
DLR
1851 struct port_info *pi = netdev_priv(dev);
1852 struct adapter *adapter = pi->adapter;
4d22de3e
DLR
1853 struct qset_params *qsp = &adapter->params.sge.qset[0];
1854 struct sge_qset *qs = &adapter->sge.qs[0];
1855
1856 if (c->rx_coalesce_usecs * 10 > M_NEWTIMER)
1857 return -EINVAL;
1858
1859 qsp->coalesce_usecs = c->rx_coalesce_usecs;
1860 t3_update_qset_coalesce(qs, qsp);
1861 return 0;
1862}
1863
1864static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
1865{
5fbf816f
DLR
1866 struct port_info *pi = netdev_priv(dev);
1867 struct adapter *adapter = pi->adapter;
4d22de3e
DLR
1868 struct qset_params *q = adapter->params.sge.qset;
1869
1870 c->rx_coalesce_usecs = q->coalesce_usecs;
1871 return 0;
1872}
1873
1874static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e,
1875 u8 * data)
1876{
5fbf816f
DLR
1877 struct port_info *pi = netdev_priv(dev);
1878 struct adapter *adapter = pi->adapter;
4d22de3e 1879 int i, err = 0;
4d22de3e
DLR
1880
1881 u8 *buf = kmalloc(EEPROMSIZE, GFP_KERNEL);
1882 if (!buf)
1883 return -ENOMEM;
1884
1885 e->magic = EEPROM_MAGIC;
1886 for (i = e->offset & ~3; !err && i < e->offset + e->len; i += 4)
05e5c116 1887 err = t3_seeprom_read(adapter, i, (__le32 *) & buf[i]);
4d22de3e
DLR
1888
1889 if (!err)
1890 memcpy(data, buf + e->offset, e->len);
1891 kfree(buf);
1892 return err;
1893}
1894
1895static int set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
1896 u8 * data)
1897{
5fbf816f
DLR
1898 struct port_info *pi = netdev_priv(dev);
1899 struct adapter *adapter = pi->adapter;
05e5c116
AV
1900 u32 aligned_offset, aligned_len;
1901 __le32 *p;
4d22de3e 1902 u8 *buf;
c54f5c24 1903 int err;
4d22de3e
DLR
1904
1905 if (eeprom->magic != EEPROM_MAGIC)
1906 return -EINVAL;
1907
1908 aligned_offset = eeprom->offset & ~3;
1909 aligned_len = (eeprom->len + (eeprom->offset & 3) + 3) & ~3;
1910
1911 if (aligned_offset != eeprom->offset || aligned_len != eeprom->len) {
1912 buf = kmalloc(aligned_len, GFP_KERNEL);
1913 if (!buf)
1914 return -ENOMEM;
05e5c116 1915 err = t3_seeprom_read(adapter, aligned_offset, (__le32 *) buf);
4d22de3e
DLR
1916 if (!err && aligned_len > 4)
1917 err = t3_seeprom_read(adapter,
1918 aligned_offset + aligned_len - 4,
05e5c116 1919 (__le32 *) & buf[aligned_len - 4]);
4d22de3e
DLR
1920 if (err)
1921 goto out;
1922 memcpy(buf + (eeprom->offset & 3), data, eeprom->len);
1923 } else
1924 buf = data;
1925
1926 err = t3_seeprom_wp(adapter, 0);
1927 if (err)
1928 goto out;
1929
05e5c116 1930 for (p = (__le32 *) buf; !err && aligned_len; aligned_len -= 4, p++) {
4d22de3e
DLR
1931 err = t3_seeprom_write(adapter, aligned_offset, *p);
1932 aligned_offset += 4;
1933 }
1934
1935 if (!err)
1936 err = t3_seeprom_wp(adapter, 1);
1937out:
1938 if (buf != data)
1939 kfree(buf);
1940 return err;
1941}
1942
1943static void get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1944{
1945 wol->supported = 0;
1946 wol->wolopts = 0;
1947 memset(&wol->sopass, 0, sizeof(wol->sopass));
1948}
1949
1950static const struct ethtool_ops cxgb_ethtool_ops = {
1951 .get_settings = get_settings,
1952 .set_settings = set_settings,
1953 .get_drvinfo = get_drvinfo,
1954 .get_msglevel = get_msglevel,
1955 .set_msglevel = set_msglevel,
1956 .get_ringparam = get_sge_param,
1957 .set_ringparam = set_sge_param,
1958 .get_coalesce = get_coalesce,
1959 .set_coalesce = set_coalesce,
1960 .get_eeprom_len = get_eeprom_len,
1961 .get_eeprom = get_eeprom,
1962 .set_eeprom = set_eeprom,
1963 .get_pauseparam = get_pauseparam,
1964 .set_pauseparam = set_pauseparam,
1965 .get_rx_csum = get_rx_csum,
1966 .set_rx_csum = set_rx_csum,
4d22de3e 1967 .set_tx_csum = ethtool_op_set_tx_csum,
4d22de3e
DLR
1968 .set_sg = ethtool_op_set_sg,
1969 .get_link = ethtool_op_get_link,
1970 .get_strings = get_strings,
1971 .phys_id = cxgb3_phys_id,
1972 .nway_reset = restart_autoneg,
b9f2c044 1973 .get_sset_count = get_sset_count,
4d22de3e
DLR
1974 .get_ethtool_stats = get_stats,
1975 .get_regs_len = get_regs_len,
1976 .get_regs = get_regs,
1977 .get_wol = get_wol,
4d22de3e 1978 .set_tso = ethtool_op_set_tso,
4d22de3e
DLR
1979};
1980
1981static int in_range(int val, int lo, int hi)
1982{
1983 return val < 0 || (val <= hi && val >= lo);
1984}
1985
1986static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
1987{
5fbf816f
DLR
1988 struct port_info *pi = netdev_priv(dev);
1989 struct adapter *adapter = pi->adapter;
4d22de3e 1990 u32 cmd;
5fbf816f 1991 int ret;
4d22de3e
DLR
1992
1993 if (copy_from_user(&cmd, useraddr, sizeof(cmd)))
1994 return -EFAULT;
1995
1996 switch (cmd) {
4d22de3e
DLR
1997 case CHELSIO_SET_QSET_PARAMS:{
1998 int i;
1999 struct qset_params *q;
2000 struct ch_qset_params t;
8c263761
DLR
2001 int q1 = pi->first_qset;
2002 int nqsets = pi->nqsets;
4d22de3e
DLR
2003
2004 if (!capable(CAP_NET_ADMIN))
2005 return -EPERM;
2006 if (copy_from_user(&t, useraddr, sizeof(t)))
2007 return -EFAULT;
2008 if (t.qset_idx >= SGE_QSETS)
2009 return -EINVAL;
2010 if (!in_range(t.intr_lat, 0, M_NEWTIMER) ||
2011 !in_range(t.cong_thres, 0, 255) ||
2012 !in_range(t.txq_size[0], MIN_TXQ_ENTRIES,
2013 MAX_TXQ_ENTRIES) ||
2014 !in_range(t.txq_size[1], MIN_TXQ_ENTRIES,
2015 MAX_TXQ_ENTRIES) ||
2016 !in_range(t.txq_size[2], MIN_CTRL_TXQ_ENTRIES,
2017 MAX_CTRL_TXQ_ENTRIES) ||
2018 !in_range(t.fl_size[0], MIN_FL_ENTRIES,
2019 MAX_RX_BUFFERS)
2020 || !in_range(t.fl_size[1], MIN_FL_ENTRIES,
2021 MAX_RX_JUMBO_BUFFERS)
2022 || !in_range(t.rspq_size, MIN_RSPQ_ENTRIES,
2023 MAX_RSPQ_ENTRIES))
2024 return -EINVAL;
8c263761
DLR
2025
2026 if ((adapter->flags & FULL_INIT_DONE) && t.lro > 0)
2027 for_each_port(adapter, i) {
2028 pi = adap2pinfo(adapter, i);
2029 if (t.qset_idx >= pi->first_qset &&
2030 t.qset_idx < pi->first_qset + pi->nqsets &&
47fd23fe 2031 !(pi->rx_offload & T3_RX_CSUM))
8c263761
DLR
2032 return -EINVAL;
2033 }
2034
4d22de3e
DLR
2035 if ((adapter->flags & FULL_INIT_DONE) &&
2036 (t.rspq_size >= 0 || t.fl_size[0] >= 0 ||
2037 t.fl_size[1] >= 0 || t.txq_size[0] >= 0 ||
2038 t.txq_size[1] >= 0 || t.txq_size[2] >= 0 ||
2039 t.polling >= 0 || t.cong_thres >= 0))
2040 return -EBUSY;
2041
8c263761
DLR
2042 /* Allow setting of any available qset when offload enabled */
2043 if (test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map)) {
2044 q1 = 0;
2045 for_each_port(adapter, i) {
2046 pi = adap2pinfo(adapter, i);
2047 nqsets += pi->first_qset + pi->nqsets;
2048 }
2049 }
2050
2051 if (t.qset_idx < q1)
2052 return -EINVAL;
2053 if (t.qset_idx > q1 + nqsets - 1)
2054 return -EINVAL;
2055
4d22de3e
DLR
2056 q = &adapter->params.sge.qset[t.qset_idx];
2057
2058 if (t.rspq_size >= 0)
2059 q->rspq_size = t.rspq_size;
2060 if (t.fl_size[0] >= 0)
2061 q->fl_size = t.fl_size[0];
2062 if (t.fl_size[1] >= 0)
2063 q->jumbo_size = t.fl_size[1];
2064 if (t.txq_size[0] >= 0)
2065 q->txq_size[0] = t.txq_size[0];
2066 if (t.txq_size[1] >= 0)
2067 q->txq_size[1] = t.txq_size[1];
2068 if (t.txq_size[2] >= 0)
2069 q->txq_size[2] = t.txq_size[2];
2070 if (t.cong_thres >= 0)
2071 q->cong_thres = t.cong_thres;
2072 if (t.intr_lat >= 0) {
2073 struct sge_qset *qs =
2074 &adapter->sge.qs[t.qset_idx];
2075
2076 q->coalesce_usecs = t.intr_lat;
2077 t3_update_qset_coalesce(qs, q);
2078 }
2079 if (t.polling >= 0) {
2080 if (adapter->flags & USING_MSIX)
2081 q->polling = t.polling;
2082 else {
2083 /* No polling with INTx for T3A */
2084 if (adapter->params.rev == 0 &&
2085 !(adapter->flags & USING_MSI))
2086 t.polling = 0;
2087
2088 for (i = 0; i < SGE_QSETS; i++) {
2089 q = &adapter->params.sge.
2090 qset[i];
2091 q->polling = t.polling;
2092 }
2093 }
2094 }
04ecb072
DLR
2095 if (t.lro >= 0)
2096 set_qset_lro(dev, t.qset_idx, t.lro);
2097
4d22de3e
DLR
2098 break;
2099 }
2100 case CHELSIO_GET_QSET_PARAMS:{
2101 struct qset_params *q;
2102 struct ch_qset_params t;
8c263761
DLR
2103 int q1 = pi->first_qset;
2104 int nqsets = pi->nqsets;
2105 int i;
4d22de3e
DLR
2106
2107 if (copy_from_user(&t, useraddr, sizeof(t)))
2108 return -EFAULT;
8c263761
DLR
2109
2110 /* Display qsets for all ports when offload enabled */
2111 if (test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map)) {
2112 q1 = 0;
2113 for_each_port(adapter, i) {
2114 pi = adap2pinfo(adapter, i);
2115 nqsets = pi->first_qset + pi->nqsets;
2116 }
2117 }
2118
2119 if (t.qset_idx >= nqsets)
4d22de3e
DLR
2120 return -EINVAL;
2121
8c263761 2122 q = &adapter->params.sge.qset[q1 + t.qset_idx];
4d22de3e
DLR
2123 t.rspq_size = q->rspq_size;
2124 t.txq_size[0] = q->txq_size[0];
2125 t.txq_size[1] = q->txq_size[1];
2126 t.txq_size[2] = q->txq_size[2];
2127 t.fl_size[0] = q->fl_size;
2128 t.fl_size[1] = q->jumbo_size;
2129 t.polling = q->polling;
b47385bd 2130 t.lro = q->lro;
4d22de3e
DLR
2131 t.intr_lat = q->coalesce_usecs;
2132 t.cong_thres = q->cong_thres;
8c263761
DLR
2133 t.qnum = q1;
2134
2135 if (adapter->flags & USING_MSIX)
2136 t.vector = adapter->msix_info[q1 + t.qset_idx + 1].vec;
2137 else
2138 t.vector = adapter->pdev->irq;
4d22de3e
DLR
2139
2140 if (copy_to_user(useraddr, &t, sizeof(t)))
2141 return -EFAULT;
2142 break;
2143 }
2144 case CHELSIO_SET_QSET_NUM:{
2145 struct ch_reg edata;
4d22de3e
DLR
2146 unsigned int i, first_qset = 0, other_qsets = 0;
2147
2148 if (!capable(CAP_NET_ADMIN))
2149 return -EPERM;
2150 if (adapter->flags & FULL_INIT_DONE)
2151 return -EBUSY;
2152 if (copy_from_user(&edata, useraddr, sizeof(edata)))
2153 return -EFAULT;
2154 if (edata.val < 1 ||
2155 (edata.val > 1 && !(adapter->flags & USING_MSIX)))
2156 return -EINVAL;
2157
2158 for_each_port(adapter, i)
2159 if (adapter->port[i] && adapter->port[i] != dev)
2160 other_qsets += adap2pinfo(adapter, i)->nqsets;
2161
2162 if (edata.val + other_qsets > SGE_QSETS)
2163 return -EINVAL;
2164
2165 pi->nqsets = edata.val;
2166
2167 for_each_port(adapter, i)
2168 if (adapter->port[i]) {
2169 pi = adap2pinfo(adapter, i);
2170 pi->first_qset = first_qset;
2171 first_qset += pi->nqsets;
2172 }
2173 break;
2174 }
2175 case CHELSIO_GET_QSET_NUM:{
2176 struct ch_reg edata;
4d22de3e
DLR
2177
2178 edata.cmd = CHELSIO_GET_QSET_NUM;
2179 edata.val = pi->nqsets;
2180 if (copy_to_user(useraddr, &edata, sizeof(edata)))
2181 return -EFAULT;
2182 break;
2183 }
2184 case CHELSIO_LOAD_FW:{
2185 u8 *fw_data;
2186 struct ch_mem_range t;
2187
1b3aa7af 2188 if (!capable(CAP_SYS_RAWIO))
4d22de3e
DLR
2189 return -EPERM;
2190 if (copy_from_user(&t, useraddr, sizeof(t)))
2191 return -EFAULT;
1b3aa7af 2192 /* Check t.len sanity ? */
4d22de3e
DLR
2193 fw_data = kmalloc(t.len, GFP_KERNEL);
2194 if (!fw_data)
2195 return -ENOMEM;
2196
2197 if (copy_from_user
2198 (fw_data, useraddr + sizeof(t), t.len)) {
2199 kfree(fw_data);
2200 return -EFAULT;
2201 }
2202
2203 ret = t3_load_fw(adapter, fw_data, t.len);
2204 kfree(fw_data);
2205 if (ret)
2206 return ret;
2207 break;
2208 }
2209 case CHELSIO_SETMTUTAB:{
2210 struct ch_mtus m;
2211 int i;
2212
2213 if (!is_offload(adapter))
2214 return -EOPNOTSUPP;
2215 if (!capable(CAP_NET_ADMIN))
2216 return -EPERM;
2217 if (offload_running(adapter))
2218 return -EBUSY;
2219 if (copy_from_user(&m, useraddr, sizeof(m)))
2220 return -EFAULT;
2221 if (m.nmtus != NMTUS)
2222 return -EINVAL;
2223 if (m.mtus[0] < 81) /* accommodate SACK */
2224 return -EINVAL;
2225
2226 /* MTUs must be in ascending order */
2227 for (i = 1; i < NMTUS; ++i)
2228 if (m.mtus[i] < m.mtus[i - 1])
2229 return -EINVAL;
2230
2231 memcpy(adapter->params.mtus, m.mtus,
2232 sizeof(adapter->params.mtus));
2233 break;
2234 }
2235 case CHELSIO_GET_PM:{
2236 struct tp_params *p = &adapter->params.tp;
2237 struct ch_pm m = {.cmd = CHELSIO_GET_PM };
2238
2239 if (!is_offload(adapter))
2240 return -EOPNOTSUPP;
2241 m.tx_pg_sz = p->tx_pg_size;
2242 m.tx_num_pg = p->tx_num_pgs;
2243 m.rx_pg_sz = p->rx_pg_size;
2244 m.rx_num_pg = p->rx_num_pgs;
2245 m.pm_total = p->pmtx_size + p->chan_rx_size * p->nchan;
2246 if (copy_to_user(useraddr, &m, sizeof(m)))
2247 return -EFAULT;
2248 break;
2249 }
2250 case CHELSIO_SET_PM:{
2251 struct ch_pm m;
2252 struct tp_params *p = &adapter->params.tp;
2253
2254 if (!is_offload(adapter))
2255 return -EOPNOTSUPP;
2256 if (!capable(CAP_NET_ADMIN))
2257 return -EPERM;
2258 if (adapter->flags & FULL_INIT_DONE)
2259 return -EBUSY;
2260 if (copy_from_user(&m, useraddr, sizeof(m)))
2261 return -EFAULT;
d9da466a 2262 if (!is_power_of_2(m.rx_pg_sz) ||
2263 !is_power_of_2(m.tx_pg_sz))
4d22de3e
DLR
2264 return -EINVAL; /* not power of 2 */
2265 if (!(m.rx_pg_sz & 0x14000))
2266 return -EINVAL; /* not 16KB or 64KB */
2267 if (!(m.tx_pg_sz & 0x1554000))
2268 return -EINVAL;
2269 if (m.tx_num_pg == -1)
2270 m.tx_num_pg = p->tx_num_pgs;
2271 if (m.rx_num_pg == -1)
2272 m.rx_num_pg = p->rx_num_pgs;
2273 if (m.tx_num_pg % 24 || m.rx_num_pg % 24)
2274 return -EINVAL;
2275 if (m.rx_num_pg * m.rx_pg_sz > p->chan_rx_size ||
2276 m.tx_num_pg * m.tx_pg_sz > p->chan_tx_size)
2277 return -EINVAL;
2278 p->rx_pg_size = m.rx_pg_sz;
2279 p->tx_pg_size = m.tx_pg_sz;
2280 p->rx_num_pgs = m.rx_num_pg;
2281 p->tx_num_pgs = m.tx_num_pg;
2282 break;
2283 }
2284 case CHELSIO_GET_MEM:{
2285 struct ch_mem_range t;
2286 struct mc7 *mem;
2287 u64 buf[32];
2288
2289 if (!is_offload(adapter))
2290 return -EOPNOTSUPP;
2291 if (!(adapter->flags & FULL_INIT_DONE))
2292 return -EIO; /* need the memory controllers */
2293 if (copy_from_user(&t, useraddr, sizeof(t)))
2294 return -EFAULT;
2295 if ((t.addr & 7) || (t.len & 7))
2296 return -EINVAL;
2297 if (t.mem_id == MEM_CM)
2298 mem = &adapter->cm;
2299 else if (t.mem_id == MEM_PMRX)
2300 mem = &adapter->pmrx;
2301 else if (t.mem_id == MEM_PMTX)
2302 mem = &adapter->pmtx;
2303 else
2304 return -EINVAL;
2305
2306 /*
1825494a
DLR
2307 * Version scheme:
2308 * bits 0..9: chip version
2309 * bits 10..15: chip revision
2310 */
4d22de3e
DLR
2311 t.version = 3 | (adapter->params.rev << 10);
2312 if (copy_to_user(useraddr, &t, sizeof(t)))
2313 return -EFAULT;
2314
2315 /*
2316 * Read 256 bytes at a time as len can be large and we don't
2317 * want to use huge intermediate buffers.
2318 */
2319 useraddr += sizeof(t); /* advance to start of buffer */
2320 while (t.len) {
2321 unsigned int chunk =
2322 min_t(unsigned int, t.len, sizeof(buf));
2323
2324 ret =
2325 t3_mc7_bd_read(mem, t.addr / 8, chunk / 8,
2326 buf);
2327 if (ret)
2328 return ret;
2329 if (copy_to_user(useraddr, buf, chunk))
2330 return -EFAULT;
2331 useraddr += chunk;
2332 t.addr += chunk;
2333 t.len -= chunk;
2334 }
2335 break;
2336 }
2337 case CHELSIO_SET_TRACE_FILTER:{
2338 struct ch_trace t;
2339 const struct trace_params *tp;
2340
2341 if (!capable(CAP_NET_ADMIN))
2342 return -EPERM;
2343 if (!offload_running(adapter))
2344 return -EAGAIN;
2345 if (copy_from_user(&t, useraddr, sizeof(t)))
2346 return -EFAULT;
2347
2348 tp = (const struct trace_params *)&t.sip;
2349 if (t.config_tx)
2350 t3_config_trace_filter(adapter, tp, 0,
2351 t.invert_match,
2352 t.trace_tx);
2353 if (t.config_rx)
2354 t3_config_trace_filter(adapter, tp, 1,
2355 t.invert_match,
2356 t.trace_rx);
2357 break;
2358 }
4d22de3e
DLR
2359 default:
2360 return -EOPNOTSUPP;
2361 }
2362 return 0;
2363}
2364
2365static int cxgb_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
2366{
4d22de3e 2367 struct mii_ioctl_data *data = if_mii(req);
5fbf816f
DLR
2368 struct port_info *pi = netdev_priv(dev);
2369 struct adapter *adapter = pi->adapter;
4d22de3e
DLR
2370
2371 switch (cmd) {
0f07c4ee
BH
2372 case SIOCGMIIREG:
2373 case SIOCSMIIREG:
2374 /* Convert phy_id from older PRTAD/DEVAD format */
2375 if (is_10G(adapter) &&
2376 !mdio_phy_id_is_c45(data->phy_id) &&
2377 (data->phy_id & 0x1f00) &&
2378 !(data->phy_id & 0xe0e0))
2379 data->phy_id = mdio_phy_id_c45(data->phy_id >> 8,
2380 data->phy_id & 0x1f);
4d22de3e 2381 /* FALLTHRU */
0f07c4ee
BH
2382 case SIOCGMIIPHY:
2383 return mdio_mii_ioctl(&pi->phy.mdio, data, cmd);
4d22de3e
DLR
2384 case SIOCCHIOCTL:
2385 return cxgb_extension_ioctl(dev, req->ifr_data);
2386 default:
2387 return -EOPNOTSUPP;
2388 }
4d22de3e
DLR
2389}
2390
2391static int cxgb_change_mtu(struct net_device *dev, int new_mtu)
2392{
4d22de3e 2393 struct port_info *pi = netdev_priv(dev);
5fbf816f
DLR
2394 struct adapter *adapter = pi->adapter;
2395 int ret;
4d22de3e
DLR
2396
2397 if (new_mtu < 81) /* accommodate SACK */
2398 return -EINVAL;
2399 if ((ret = t3_mac_set_mtu(&pi->mac, new_mtu)))
2400 return ret;
2401 dev->mtu = new_mtu;
2402 init_port_mtus(adapter);
2403 if (adapter->params.rev == 0 && offload_running(adapter))
2404 t3_load_mtus(adapter, adapter->params.mtus,
2405 adapter->params.a_wnd, adapter->params.b_wnd,
2406 adapter->port[0]->mtu);
2407 return 0;
2408}
2409
2410static int cxgb_set_mac_addr(struct net_device *dev, void *p)
2411{
4d22de3e 2412 struct port_info *pi = netdev_priv(dev);
5fbf816f 2413 struct adapter *adapter = pi->adapter;
4d22de3e
DLR
2414 struct sockaddr *addr = p;
2415
2416 if (!is_valid_ether_addr(addr->sa_data))
2417 return -EINVAL;
2418
2419 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
2420 t3_mac_set_address(&pi->mac, 0, dev->dev_addr);
2421 if (offload_running(adapter))
2422 write_smt_entry(adapter, pi->port_id);
2423 return 0;
2424}
2425
2426/**
2427 * t3_synchronize_rx - wait for current Rx processing on a port to complete
2428 * @adap: the adapter
2429 * @p: the port
2430 *
2431 * Ensures that current Rx processing on any of the queues associated with
2432 * the given port completes before returning. We do this by acquiring and
2433 * releasing the locks of the response queues associated with the port.
2434 */
2435static void t3_synchronize_rx(struct adapter *adap, const struct port_info *p)
2436{
2437 int i;
2438
8c263761
DLR
2439 for (i = p->first_qset; i < p->first_qset + p->nqsets; i++) {
2440 struct sge_rspq *q = &adap->sge.qs[i].rspq;
4d22de3e
DLR
2441
2442 spin_lock_irq(&q->lock);
2443 spin_unlock_irq(&q->lock);
2444 }
2445}
2446
2447static void vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
2448{
4d22de3e 2449 struct port_info *pi = netdev_priv(dev);
5fbf816f 2450 struct adapter *adapter = pi->adapter;
4d22de3e
DLR
2451
2452 pi->vlan_grp = grp;
2453 if (adapter->params.rev > 0)
2454 t3_set_vlan_accel(adapter, 1 << pi->port_id, grp != NULL);
2455 else {
2456 /* single control for all ports */
2457 unsigned int i, have_vlans = 0;
2458 for_each_port(adapter, i)
2459 have_vlans |= adap2pinfo(adapter, i)->vlan_grp != NULL;
2460
2461 t3_set_vlan_accel(adapter, 1, have_vlans);
2462 }
2463 t3_synchronize_rx(adapter, pi);
2464}
2465
4d22de3e
DLR
2466#ifdef CONFIG_NET_POLL_CONTROLLER
2467static void cxgb_netpoll(struct net_device *dev)
2468{
890de332 2469 struct port_info *pi = netdev_priv(dev);
5fbf816f 2470 struct adapter *adapter = pi->adapter;
890de332 2471 int qidx;
4d22de3e 2472
890de332
DLR
2473 for (qidx = pi->first_qset; qidx < pi->first_qset + pi->nqsets; qidx++) {
2474 struct sge_qset *qs = &adapter->sge.qs[qidx];
2475 void *source;
2eab17ab 2476
890de332
DLR
2477 if (adapter->flags & USING_MSIX)
2478 source = qs;
2479 else
2480 source = adapter;
2481
2482 t3_intr_handler(adapter, qs->rspq.polling) (0, source);
2483 }
4d22de3e
DLR
2484}
2485#endif
2486
2487/*
2488 * Periodic accumulation of MAC statistics.
2489 */
2490static void mac_stats_update(struct adapter *adapter)
2491{
2492 int i;
2493
2494 for_each_port(adapter, i) {
2495 struct net_device *dev = adapter->port[i];
2496 struct port_info *p = netdev_priv(dev);
2497
2498 if (netif_running(dev)) {
2499 spin_lock(&adapter->stats_lock);
2500 t3_mac_update_stats(&p->mac);
2501 spin_unlock(&adapter->stats_lock);
2502 }
2503 }
2504}
2505
2506static void check_link_status(struct adapter *adapter)
2507{
2508 int i;
2509
2510 for_each_port(adapter, i) {
2511 struct net_device *dev = adapter->port[i];
2512 struct port_info *p = netdev_priv(dev);
c22c8149 2513 int link_fault;
4d22de3e 2514
bf792094 2515 spin_lock_irq(&adapter->work_lock);
c22c8149
DLR
2516 link_fault = p->link_fault;
2517 spin_unlock_irq(&adapter->work_lock);
2518
2519 if (link_fault) {
3851c66c 2520 t3_link_fault(adapter, i);
bf792094
DLR
2521 continue;
2522 }
bf792094
DLR
2523
2524 if (!(p->phy.caps & SUPPORTED_IRQ) && netif_running(dev)) {
2525 t3_xgm_intr_disable(adapter, i);
2526 t3_read_reg(adapter, A_XGM_INT_STATUS + p->mac.offset);
2527
4d22de3e 2528 t3_link_changed(adapter, i);
bf792094
DLR
2529 t3_xgm_intr_enable(adapter, i);
2530 }
4d22de3e
DLR
2531 }
2532}
2533
fc90664e
DLR
2534static void check_t3b2_mac(struct adapter *adapter)
2535{
2536 int i;
2537
f2d961c9
DLR
2538 if (!rtnl_trylock()) /* synchronize with ifdown */
2539 return;
2540
fc90664e
DLR
2541 for_each_port(adapter, i) {
2542 struct net_device *dev = adapter->port[i];
2543 struct port_info *p = netdev_priv(dev);
2544 int status;
2545
2546 if (!netif_running(dev))
2547 continue;
2548
2549 status = 0;
6d6dabac 2550 if (netif_running(dev) && netif_carrier_ok(dev))
fc90664e
DLR
2551 status = t3b2_mac_watchdog_task(&p->mac);
2552 if (status == 1)
2553 p->mac.stats.num_toggled++;
2554 else if (status == 2) {
2555 struct cmac *mac = &p->mac;
2556
2557 t3_mac_set_mtu(mac, dev->mtu);
2558 t3_mac_set_address(mac, 0, dev->dev_addr);
2559 cxgb_set_rxmode(dev);
2560 t3_link_start(&p->phy, mac, &p->link_config);
2561 t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
2562 t3_port_intr_enable(adapter, p->port_id);
2563 p->mac.stats.num_resets++;
2564 }
2565 }
2566 rtnl_unlock();
2567}
2568
2569
4d22de3e
DLR
2570static void t3_adap_check_task(struct work_struct *work)
2571{
2572 struct adapter *adapter = container_of(work, struct adapter,
2573 adap_check_task.work);
2574 const struct adapter_params *p = &adapter->params;
fc882196
DLR
2575 int port;
2576 unsigned int v, status, reset;
4d22de3e
DLR
2577
2578 adapter->check_task_cnt++;
2579
3851c66c 2580 check_link_status(adapter);
4d22de3e
DLR
2581
2582 /* Accumulate MAC stats if needed */
2583 if (!p->linkpoll_period ||
2584 (adapter->check_task_cnt * p->linkpoll_period) / 10 >=
2585 p->stats_update_period) {
2586 mac_stats_update(adapter);
2587 adapter->check_task_cnt = 0;
2588 }
2589
fc90664e
DLR
2590 if (p->rev == T3_REV_B2)
2591 check_t3b2_mac(adapter);
2592
fc882196
DLR
2593 /*
2594 * Scan the XGMAC's to check for various conditions which we want to
2595 * monitor in a periodic polling manner rather than via an interrupt
2596 * condition. This is used for conditions which would otherwise flood
2597 * the system with interrupts and we only really need to know that the
2598 * conditions are "happening" ... For each condition we count the
2599 * detection of the condition and reset it for the next polling loop.
2600 */
2601 for_each_port(adapter, port) {
2602 struct cmac *mac = &adap2pinfo(adapter, port)->mac;
2603 u32 cause;
2604
2605 cause = t3_read_reg(adapter, A_XGM_INT_CAUSE + mac->offset);
2606 reset = 0;
2607 if (cause & F_RXFIFO_OVERFLOW) {
2608 mac->stats.rx_fifo_ovfl++;
2609 reset |= F_RXFIFO_OVERFLOW;
2610 }
2611
2612 t3_write_reg(adapter, A_XGM_INT_CAUSE + mac->offset, reset);
2613 }
2614
2615 /*
2616 * We do the same as above for FL_EMPTY interrupts.
2617 */
2618 status = t3_read_reg(adapter, A_SG_INT_CAUSE);
2619 reset = 0;
2620
2621 if (status & F_FLEMPTY) {
2622 struct sge_qset *qs = &adapter->sge.qs[0];
2623 int i = 0;
2624
2625 reset |= F_FLEMPTY;
2626
2627 v = (t3_read_reg(adapter, A_SG_RSPQ_FL_STATUS) >> S_FL0EMPTY) &
2628 0xffff;
2629
2630 while (v) {
2631 qs->fl[i].empty += (v & 1);
2632 if (i)
2633 qs++;
2634 i ^= 1;
2635 v >>= 1;
2636 }
2637 }
2638
2639 t3_write_reg(adapter, A_SG_INT_CAUSE, reset);
2640
4d22de3e 2641 /* Schedule the next check update if any port is active. */
20d3fc11 2642 spin_lock_irq(&adapter->work_lock);
4d22de3e
DLR
2643 if (adapter->open_device_map & PORT_MASK)
2644 schedule_chk_task(adapter);
20d3fc11 2645 spin_unlock_irq(&adapter->work_lock);
4d22de3e
DLR
2646}
2647
2648/*
2649 * Processes external (PHY) interrupts in process context.
2650 */
2651static void ext_intr_task(struct work_struct *work)
2652{
2653 struct adapter *adapter = container_of(work, struct adapter,
2654 ext_intr_handler_task);
bf792094
DLR
2655 int i;
2656
2657 /* Disable link fault interrupts */
2658 for_each_port(adapter, i) {
2659 struct net_device *dev = adapter->port[i];
2660 struct port_info *p = netdev_priv(dev);
2661
2662 t3_xgm_intr_disable(adapter, i);
2663 t3_read_reg(adapter, A_XGM_INT_STATUS + p->mac.offset);
2664 }
4d22de3e 2665
bf792094 2666 /* Re-enable link fault interrupts */
4d22de3e
DLR
2667 t3_phy_intr_handler(adapter);
2668
bf792094
DLR
2669 for_each_port(adapter, i)
2670 t3_xgm_intr_enable(adapter, i);
2671
4d22de3e
DLR
2672 /* Now reenable external interrupts */
2673 spin_lock_irq(&adapter->work_lock);
2674 if (adapter->slow_intr_mask) {
2675 adapter->slow_intr_mask |= F_T3DBG;
2676 t3_write_reg(adapter, A_PL_INT_CAUSE0, F_T3DBG);
2677 t3_write_reg(adapter, A_PL_INT_ENABLE0,
2678 adapter->slow_intr_mask);
2679 }
2680 spin_unlock_irq(&adapter->work_lock);
2681}
2682
2683/*
2684 * Interrupt-context handler for external (PHY) interrupts.
2685 */
2686void t3_os_ext_intr_handler(struct adapter *adapter)
2687{
2688 /*
2689 * Schedule a task to handle external interrupts as they may be slow
2690 * and we use a mutex to protect MDIO registers. We disable PHY
2691 * interrupts in the meantime and let the task reenable them when
2692 * it's done.
2693 */
2694 spin_lock(&adapter->work_lock);
2695 if (adapter->slow_intr_mask) {
2696 adapter->slow_intr_mask &= ~F_T3DBG;
2697 t3_write_reg(adapter, A_PL_INT_ENABLE0,
2698 adapter->slow_intr_mask);
2699 queue_work(cxgb3_wq, &adapter->ext_intr_handler_task);
2700 }
2701 spin_unlock(&adapter->work_lock);
2702}
2703
bf792094
DLR
2704void t3_os_link_fault_handler(struct adapter *adapter, int port_id)
2705{
2706 struct net_device *netdev = adapter->port[port_id];
2707 struct port_info *pi = netdev_priv(netdev);
2708
2709 spin_lock(&adapter->work_lock);
2710 pi->link_fault = 1;
bf792094
DLR
2711 spin_unlock(&adapter->work_lock);
2712}
2713
20d3fc11
DLR
2714static int t3_adapter_error(struct adapter *adapter, int reset)
2715{
2716 int i, ret = 0;
2717
cb0bc205
DLR
2718 if (is_offload(adapter) &&
2719 test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map)) {
2720 cxgb3_err_notify(&adapter->tdev, OFFLOAD_STATUS_DOWN, 0);
2721 offload_close(&adapter->tdev);
2722 }
2723
20d3fc11
DLR
2724 /* Stop all ports */
2725 for_each_port(adapter, i) {
2726 struct net_device *netdev = adapter->port[i];
2727
2728 if (netif_running(netdev))
2729 cxgb_close(netdev);
2730 }
2731
20d3fc11
DLR
2732 /* Stop SGE timers */
2733 t3_stop_sge_timers(adapter);
2734
2735 adapter->flags &= ~FULL_INIT_DONE;
2736
2737 if (reset)
2738 ret = t3_reset_adapter(adapter);
2739
2740 pci_disable_device(adapter->pdev);
2741
2742 return ret;
2743}
2744
2745static int t3_reenable_adapter(struct adapter *adapter)
2746{
2747 if (pci_enable_device(adapter->pdev)) {
2748 dev_err(&adapter->pdev->dev,
2749 "Cannot re-enable PCI device after reset.\n");
2750 goto err;
2751 }
2752 pci_set_master(adapter->pdev);
2753 pci_restore_state(adapter->pdev);
2754
2755 /* Free sge resources */
2756 t3_free_sge_resources(adapter);
2757
2758 if (t3_replay_prep_adapter(adapter))
2759 goto err;
2760
2761 return 0;
2762err:
2763 return -1;
2764}
2765
2766static void t3_resume_ports(struct adapter *adapter)
2767{
2768 int i;
2769
2770 /* Restart the ports */
2771 for_each_port(adapter, i) {
2772 struct net_device *netdev = adapter->port[i];
2773
2774 if (netif_running(netdev)) {
2775 if (cxgb_open(netdev)) {
2776 dev_err(&adapter->pdev->dev,
2777 "can't bring device back up"
2778 " after reset\n");
2779 continue;
2780 }
2781 }
2782 }
cb0bc205
DLR
2783
2784 if (is_offload(adapter) && !ofld_disable)
2785 cxgb3_err_notify(&adapter->tdev, OFFLOAD_STATUS_UP, 0);
20d3fc11
DLR
2786}
2787
2788/*
2789 * processes a fatal error.
2790 * Bring the ports down, reset the chip, bring the ports back up.
2791 */
2792static void fatal_error_task(struct work_struct *work)
2793{
2794 struct adapter *adapter = container_of(work, struct adapter,
2795 fatal_error_handler_task);
2796 int err = 0;
2797
2798 rtnl_lock();
2799 err = t3_adapter_error(adapter, 1);
2800 if (!err)
2801 err = t3_reenable_adapter(adapter);
2802 if (!err)
2803 t3_resume_ports(adapter);
2804
2805 CH_ALERT(adapter, "adapter reset %s\n", err ? "failed" : "succeeded");
2806 rtnl_unlock();
2807}
2808
4d22de3e
DLR
2809void t3_fatal_err(struct adapter *adapter)
2810{
2811 unsigned int fw_status[4];
2812
2813 if (adapter->flags & FULL_INIT_DONE) {
2814 t3_sge_stop(adapter);
c64c2eae
DLR
2815 t3_write_reg(adapter, A_XGM_TX_CTRL, 0);
2816 t3_write_reg(adapter, A_XGM_RX_CTRL, 0);
2817 t3_write_reg(adapter, XGM_REG(A_XGM_TX_CTRL, 1), 0);
2818 t3_write_reg(adapter, XGM_REG(A_XGM_RX_CTRL, 1), 0);
20d3fc11
DLR
2819
2820 spin_lock(&adapter->work_lock);
4d22de3e 2821 t3_intr_disable(adapter);
20d3fc11
DLR
2822 queue_work(cxgb3_wq, &adapter->fatal_error_handler_task);
2823 spin_unlock(&adapter->work_lock);
4d22de3e
DLR
2824 }
2825 CH_ALERT(adapter, "encountered fatal error, operation suspended\n");
2826 if (!t3_cim_ctl_blk_read(adapter, 0xa0, 4, fw_status))
2827 CH_ALERT(adapter, "FW status: 0x%x, 0x%x, 0x%x, 0x%x\n",
2828 fw_status[0], fw_status[1],
2829 fw_status[2], fw_status[3]);
4d22de3e
DLR
2830}
2831
91a6b50c
DLR
2832/**
2833 * t3_io_error_detected - called when PCI error is detected
2834 * @pdev: Pointer to PCI device
2835 * @state: The current pci connection state
2836 *
2837 * This function is called after a PCI bus error affecting
2838 * this device has been detected.
2839 */
2840static pci_ers_result_t t3_io_error_detected(struct pci_dev *pdev,
2841 pci_channel_state_t state)
2842{
bc4b6b52 2843 struct adapter *adapter = pci_get_drvdata(pdev);
20d3fc11 2844 int ret;
91a6b50c 2845
e8d19370
DLR
2846 if (state == pci_channel_io_perm_failure)
2847 return PCI_ERS_RESULT_DISCONNECT;
2848
20d3fc11 2849 ret = t3_adapter_error(adapter, 0);
91a6b50c 2850
48c4b6db 2851 /* Request a slot reset. */
91a6b50c
DLR
2852 return PCI_ERS_RESULT_NEED_RESET;
2853}
2854
2855/**
2856 * t3_io_slot_reset - called after the pci bus has been reset.
2857 * @pdev: Pointer to PCI device
2858 *
2859 * Restart the card from scratch, as if from a cold-boot.
2860 */
2861static pci_ers_result_t t3_io_slot_reset(struct pci_dev *pdev)
2862{
bc4b6b52 2863 struct adapter *adapter = pci_get_drvdata(pdev);
91a6b50c 2864
20d3fc11
DLR
2865 if (!t3_reenable_adapter(adapter))
2866 return PCI_ERS_RESULT_RECOVERED;
91a6b50c 2867
48c4b6db 2868 return PCI_ERS_RESULT_DISCONNECT;
91a6b50c
DLR
2869}
2870
2871/**
2872 * t3_io_resume - called when traffic can start flowing again.
2873 * @pdev: Pointer to PCI device
2874 *
2875 * This callback is called when the error recovery driver tells us that
2876 * its OK to resume normal operation.
2877 */
2878static void t3_io_resume(struct pci_dev *pdev)
2879{
bc4b6b52 2880 struct adapter *adapter = pci_get_drvdata(pdev);
91a6b50c 2881
68f40c10
DLR
2882 CH_ALERT(adapter, "adapter recovering, PEX ERR 0x%x\n",
2883 t3_read_reg(adapter, A_PCIE_PEX_ERR));
2884
20d3fc11 2885 t3_resume_ports(adapter);
91a6b50c
DLR
2886}
2887
2888static struct pci_error_handlers t3_err_handler = {
2889 .error_detected = t3_io_error_detected,
2890 .slot_reset = t3_io_slot_reset,
2891 .resume = t3_io_resume,
2892};
2893
8c263761
DLR
2894/*
2895 * Set the number of qsets based on the number of CPUs and the number of ports,
2896 * not to exceed the number of available qsets, assuming there are enough qsets
2897 * per port in HW.
2898 */
2899static void set_nqsets(struct adapter *adap)
2900{
2901 int i, j = 0;
2902 int num_cpus = num_online_cpus();
2903 int hwports = adap->params.nports;
5cda9364 2904 int nqsets = adap->msix_nvectors - 1;
8c263761 2905
f9ee3882 2906 if (adap->params.rev > 0 && adap->flags & USING_MSIX) {
8c263761
DLR
2907 if (hwports == 2 &&
2908 (hwports * nqsets > SGE_QSETS ||
2909 num_cpus >= nqsets / hwports))
2910 nqsets /= hwports;
2911 if (nqsets > num_cpus)
2912 nqsets = num_cpus;
2913 if (nqsets < 1 || hwports == 4)
2914 nqsets = 1;
2915 } else
2916 nqsets = 1;
2917
2918 for_each_port(adap, i) {
2919 struct port_info *pi = adap2pinfo(adap, i);
2920
2921 pi->first_qset = j;
2922 pi->nqsets = nqsets;
2923 j = pi->first_qset + nqsets;
2924
2925 dev_info(&adap->pdev->dev,
2926 "Port %d using %d queue sets.\n", i, nqsets);
2927 }
2928}
2929
4d22de3e
DLR
2930static int __devinit cxgb_enable_msix(struct adapter *adap)
2931{
2932 struct msix_entry entries[SGE_QSETS + 1];
5cda9364 2933 int vectors;
4d22de3e
DLR
2934 int i, err;
2935
5cda9364
DLR
2936 vectors = ARRAY_SIZE(entries);
2937 for (i = 0; i < vectors; ++i)
4d22de3e
DLR
2938 entries[i].entry = i;
2939
5cda9364
DLR
2940 while ((err = pci_enable_msix(adap->pdev, entries, vectors)) > 0)
2941 vectors = err;
2942
2c2f409f
DLR
2943 if (err < 0)
2944 pci_disable_msix(adap->pdev);
2945
2946 if (!err && vectors < (adap->params.nports + 1)) {
2947 pci_disable_msix(adap->pdev);
5cda9364 2948 err = -1;
2c2f409f 2949 }
5cda9364 2950
4d22de3e 2951 if (!err) {
5cda9364 2952 for (i = 0; i < vectors; ++i)
4d22de3e 2953 adap->msix_info[i].vec = entries[i].vector;
5cda9364
DLR
2954 adap->msix_nvectors = vectors;
2955 }
2956
4d22de3e
DLR
2957 return err;
2958}
2959
2960static void __devinit print_port_info(struct adapter *adap,
2961 const struct adapter_info *ai)
2962{
2963 static const char *pci_variant[] = {
2964 "PCI", "PCI-X", "PCI-X ECC", "PCI-X 266", "PCI Express"
2965 };
2966
2967 int i;
2968 char buf[80];
2969
2970 if (is_pcie(adap))
2971 snprintf(buf, sizeof(buf), "%s x%d",
2972 pci_variant[adap->params.pci.variant],
2973 adap->params.pci.width);
2974 else
2975 snprintf(buf, sizeof(buf), "%s %dMHz/%d-bit",
2976 pci_variant[adap->params.pci.variant],
2977 adap->params.pci.speed, adap->params.pci.width);
2978
2979 for_each_port(adap, i) {
2980 struct net_device *dev = adap->port[i];
2981 const struct port_info *pi = netdev_priv(dev);
2982
2983 if (!test_bit(i, &adap->registered_device_map))
2984 continue;
8ac3ba68 2985 printk(KERN_INFO "%s: %s %s %sNIC (rev %d) %s%s\n",
04497982 2986 dev->name, ai->desc, pi->phy.desc,
8ac3ba68 2987 is_offload(adap) ? "R" : "", adap->params.rev, buf,
4d22de3e
DLR
2988 (adap->flags & USING_MSIX) ? " MSI-X" :
2989 (adap->flags & USING_MSI) ? " MSI" : "");
2990 if (adap->name == dev->name && adap->params.vpd.mclk)
167cdf5f
DLR
2991 printk(KERN_INFO
2992 "%s: %uMB CM, %uMB PMTX, %uMB PMRX, S/N: %s\n",
4d22de3e
DLR
2993 adap->name, t3_mc7_size(&adap->cm) >> 20,
2994 t3_mc7_size(&adap->pmtx) >> 20,
167cdf5f
DLR
2995 t3_mc7_size(&adap->pmrx) >> 20,
2996 adap->params.vpd.sn);
4d22de3e
DLR
2997 }
2998}
2999
dd752696
SH
3000static const struct net_device_ops cxgb_netdev_ops = {
3001 .ndo_open = cxgb_open,
3002 .ndo_stop = cxgb_close,
43a944f3 3003 .ndo_start_xmit = t3_eth_xmit,
dd752696
SH
3004 .ndo_get_stats = cxgb_get_stats,
3005 .ndo_validate_addr = eth_validate_addr,
3006 .ndo_set_multicast_list = cxgb_set_rxmode,
3007 .ndo_do_ioctl = cxgb_ioctl,
3008 .ndo_change_mtu = cxgb_change_mtu,
3009 .ndo_set_mac_address = cxgb_set_mac_addr,
3010 .ndo_vlan_rx_register = vlan_rx_register,
3011#ifdef CONFIG_NET_POLL_CONTROLLER
3012 .ndo_poll_controller = cxgb_netpoll,
3013#endif
3014};
3015
4d22de3e
DLR
3016static int __devinit init_one(struct pci_dev *pdev,
3017 const struct pci_device_id *ent)
3018{
3019 static int version_printed;
3020
3021 int i, err, pci_using_dac = 0;
68f40c10 3022 resource_size_t mmio_start, mmio_len;
4d22de3e
DLR
3023 const struct adapter_info *ai;
3024 struct adapter *adapter = NULL;
3025 struct port_info *pi;
3026
3027 if (!version_printed) {
3028 printk(KERN_INFO "%s - version %s\n", DRV_DESC, DRV_VERSION);
3029 ++version_printed;
3030 }
3031
3032 if (!cxgb3_wq) {
3033 cxgb3_wq = create_singlethread_workqueue(DRV_NAME);
3034 if (!cxgb3_wq) {
3035 printk(KERN_ERR DRV_NAME
3036 ": cannot initialize work queue\n");
3037 return -ENOMEM;
3038 }
3039 }
3040
3041 err = pci_request_regions(pdev, DRV_NAME);
3042 if (err) {
3043 /* Just info, some other driver may have claimed the device. */
3044 dev_info(&pdev->dev, "cannot obtain PCI resources\n");
3045 return err;
3046 }
3047
3048 err = pci_enable_device(pdev);
3049 if (err) {
3050 dev_err(&pdev->dev, "cannot enable PCI device\n");
3051 goto out_release_regions;
3052 }
3053
6a35528a 3054 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
4d22de3e 3055 pci_using_dac = 1;
6a35528a 3056 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
4d22de3e
DLR
3057 if (err) {
3058 dev_err(&pdev->dev, "unable to obtain 64-bit DMA for "
3059 "coherent allocations\n");
3060 goto out_disable_device;
3061 }
284901a9 3062 } else if ((err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) != 0) {
4d22de3e
DLR
3063 dev_err(&pdev->dev, "no usable DMA configuration\n");
3064 goto out_disable_device;
3065 }
3066
3067 pci_set_master(pdev);
204e2f98 3068 pci_save_state(pdev);
4d22de3e
DLR
3069
3070 mmio_start = pci_resource_start(pdev, 0);
3071 mmio_len = pci_resource_len(pdev, 0);
3072 ai = t3_get_adapter_info(ent->driver_data);
3073
3074 adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
3075 if (!adapter) {
3076 err = -ENOMEM;
3077 goto out_disable_device;
3078 }
3079
74b793e1
DLR
3080 adapter->nofail_skb =
3081 alloc_skb(sizeof(struct cpl_set_tcb_field), GFP_KERNEL);
3082 if (!adapter->nofail_skb) {
3083 dev_err(&pdev->dev, "cannot allocate nofail buffer\n");
3084 err = -ENOMEM;
3085 goto out_free_adapter;
3086 }
3087
4d22de3e
DLR
3088 adapter->regs = ioremap_nocache(mmio_start, mmio_len);
3089 if (!adapter->regs) {
3090 dev_err(&pdev->dev, "cannot map device registers\n");
3091 err = -ENOMEM;
3092 goto out_free_adapter;
3093 }
3094
3095 adapter->pdev = pdev;
3096 adapter->name = pci_name(pdev);
3097 adapter->msg_enable = dflt_msg_enable;
3098 adapter->mmio_len = mmio_len;
3099
3100 mutex_init(&adapter->mdio_lock);
3101 spin_lock_init(&adapter->work_lock);
3102 spin_lock_init(&adapter->stats_lock);
3103
3104 INIT_LIST_HEAD(&adapter->adapter_list);
3105 INIT_WORK(&adapter->ext_intr_handler_task, ext_intr_task);
20d3fc11 3106 INIT_WORK(&adapter->fatal_error_handler_task, fatal_error_task);
4d22de3e
DLR
3107 INIT_DELAYED_WORK(&adapter->adap_check_task, t3_adap_check_task);
3108
952cdf33 3109 for (i = 0; i < ai->nports0 + ai->nports1; ++i) {
4d22de3e
DLR
3110 struct net_device *netdev;
3111
82ad3329 3112 netdev = alloc_etherdev_mq(sizeof(struct port_info), SGE_QSETS);
4d22de3e
DLR
3113 if (!netdev) {
3114 err = -ENOMEM;
3115 goto out_free_dev;
3116 }
3117
4d22de3e
DLR
3118 SET_NETDEV_DEV(netdev, &pdev->dev);
3119
3120 adapter->port[i] = netdev;
3121 pi = netdev_priv(netdev);
5fbf816f 3122 pi->adapter = adapter;
47fd23fe 3123 pi->rx_offload = T3_RX_CSUM | T3_LRO;
4d22de3e
DLR
3124 pi->port_id = i;
3125 netif_carrier_off(netdev);
82ad3329 3126 netif_tx_stop_all_queues(netdev);
4d22de3e
DLR
3127 netdev->irq = pdev->irq;
3128 netdev->mem_start = mmio_start;
3129 netdev->mem_end = mmio_start + mmio_len - 1;
4d22de3e 3130 netdev->features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO;
7be2df45 3131 netdev->features |= NETIF_F_GRO;
4d22de3e
DLR
3132 if (pci_using_dac)
3133 netdev->features |= NETIF_F_HIGHDMA;
3134
3135 netdev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
dd752696 3136 netdev->netdev_ops = &cxgb_netdev_ops;
4d22de3e
DLR
3137 SET_ETHTOOL_OPS(netdev, &cxgb_ethtool_ops);
3138 }
3139
5fbf816f 3140 pci_set_drvdata(pdev, adapter);
4d22de3e
DLR
3141 if (t3_prep_adapter(adapter, ai, 1) < 0) {
3142 err = -ENODEV;
3143 goto out_free_dev;
3144 }
2eab17ab 3145
4d22de3e
DLR
3146 /*
3147 * The card is now ready to go. If any errors occur during device
3148 * registration we do not fail the whole card but rather proceed only
3149 * with the ports we manage to register successfully. However we must
3150 * register at least one net device.
3151 */
3152 for_each_port(adapter, i) {
3153 err = register_netdev(adapter->port[i]);
3154 if (err)
3155 dev_warn(&pdev->dev,
3156 "cannot register net device %s, skipping\n",
3157 adapter->port[i]->name);
3158 else {
3159 /*
3160 * Change the name we use for messages to the name of
3161 * the first successfully registered interface.
3162 */
3163 if (!adapter->registered_device_map)
3164 adapter->name = adapter->port[i]->name;
3165
3166 __set_bit(i, &adapter->registered_device_map);
3167 }
3168 }
3169 if (!adapter->registered_device_map) {
3170 dev_err(&pdev->dev, "could not register any net devices\n");
3171 goto out_free_dev;
3172 }
3173
3174 /* Driver's ready. Reflect it on LEDs */
3175 t3_led_ready(adapter);
3176
3177 if (is_offload(adapter)) {
3178 __set_bit(OFFLOAD_DEVMAP_BIT, &adapter->registered_device_map);
3179 cxgb3_adapter_ofld(adapter);
3180 }
3181
3182 /* See what interrupts we'll be using */
3183 if (msi > 1 && cxgb_enable_msix(adapter) == 0)
3184 adapter->flags |= USING_MSIX;
3185 else if (msi > 0 && pci_enable_msi(pdev) == 0)
3186 adapter->flags |= USING_MSI;
3187
8c263761
DLR
3188 set_nqsets(adapter);
3189
0ee8d33c 3190 err = sysfs_create_group(&adapter->port[0]->dev.kobj,
4d22de3e
DLR
3191 &cxgb3_attr_group);
3192
3193 print_port_info(adapter, ai);
3194 return 0;
3195
3196out_free_dev:
3197 iounmap(adapter->regs);
952cdf33 3198 for (i = ai->nports0 + ai->nports1 - 1; i >= 0; --i)
4d22de3e
DLR
3199 if (adapter->port[i])
3200 free_netdev(adapter->port[i]);
3201
3202out_free_adapter:
3203 kfree(adapter);
3204
3205out_disable_device:
3206 pci_disable_device(pdev);
3207out_release_regions:
3208 pci_release_regions(pdev);
3209 pci_set_drvdata(pdev, NULL);
3210 return err;
3211}
3212
3213static void __devexit remove_one(struct pci_dev *pdev)
3214{
5fbf816f 3215 struct adapter *adapter = pci_get_drvdata(pdev);
4d22de3e 3216
5fbf816f 3217 if (adapter) {
4d22de3e 3218 int i;
4d22de3e
DLR
3219
3220 t3_sge_stop(adapter);
0ee8d33c 3221 sysfs_remove_group(&adapter->port[0]->dev.kobj,
4d22de3e
DLR
3222 &cxgb3_attr_group);
3223
4d22de3e
DLR
3224 if (is_offload(adapter)) {
3225 cxgb3_adapter_unofld(adapter);
3226 if (test_bit(OFFLOAD_DEVMAP_BIT,
3227 &adapter->open_device_map))
3228 offload_close(&adapter->tdev);
3229 }
3230
67d92ab7
DLR
3231 for_each_port(adapter, i)
3232 if (test_bit(i, &adapter->registered_device_map))
3233 unregister_netdev(adapter->port[i]);
3234
0ca41c04 3235 t3_stop_sge_timers(adapter);
4d22de3e
DLR
3236 t3_free_sge_resources(adapter);
3237 cxgb_disable_msi(adapter);
3238
4d22de3e
DLR
3239 for_each_port(adapter, i)
3240 if (adapter->port[i])
3241 free_netdev(adapter->port[i]);
3242
3243 iounmap(adapter->regs);
74b793e1
DLR
3244 if (adapter->nofail_skb)
3245 kfree_skb(adapter->nofail_skb);
4d22de3e
DLR
3246 kfree(adapter);
3247 pci_release_regions(pdev);
3248 pci_disable_device(pdev);
3249 pci_set_drvdata(pdev, NULL);
3250 }
3251}
3252
3253static struct pci_driver driver = {
3254 .name = DRV_NAME,
3255 .id_table = cxgb3_pci_tbl,
3256 .probe = init_one,
3257 .remove = __devexit_p(remove_one),
91a6b50c 3258 .err_handler = &t3_err_handler,
4d22de3e
DLR
3259};
3260
3261static int __init cxgb3_init_module(void)
3262{
3263 int ret;
3264
3265 cxgb3_offload_init();
3266
3267 ret = pci_register_driver(&driver);
3268 return ret;
3269}
3270
3271static void __exit cxgb3_cleanup_module(void)
3272{
3273 pci_unregister_driver(&driver);
3274 if (cxgb3_wq)
3275 destroy_workqueue(cxgb3_wq);
3276}
3277
3278module_init(cxgb3_init_module);
3279module_exit(cxgb3_cleanup_module);