cxgb3: commnonize LASI phy code
[linux-2.6-block.git] / drivers / net / cxgb3 / cxgb3_main.c
CommitLineData
4d22de3e 1/*
1d68e93d 2 * Copyright (c) 2003-2007 Chelsio, Inc. All rights reserved.
4d22de3e 3 *
1d68e93d
DLR
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
4d22de3e 9 *
1d68e93d
DLR
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
4d22de3e 31 */
4d22de3e
DLR
32#include <linux/module.h>
33#include <linux/moduleparam.h>
34#include <linux/init.h>
35#include <linux/pci.h>
36#include <linux/dma-mapping.h>
37#include <linux/netdevice.h>
38#include <linux/etherdevice.h>
39#include <linux/if_vlan.h>
40#include <linux/mii.h>
41#include <linux/sockios.h>
42#include <linux/workqueue.h>
43#include <linux/proc_fs.h>
44#include <linux/rtnetlink.h>
2e283962 45#include <linux/firmware.h>
d9da466a 46#include <linux/log2.h>
4d22de3e
DLR
47#include <asm/uaccess.h>
48
49#include "common.h"
50#include "cxgb3_ioctl.h"
51#include "regs.h"
52#include "cxgb3_offload.h"
53#include "version.h"
54
55#include "cxgb3_ctl_defs.h"
56#include "t3_cpl.h"
57#include "firmware_exports.h"
58
59enum {
60 MAX_TXQ_ENTRIES = 16384,
61 MAX_CTRL_TXQ_ENTRIES = 1024,
62 MAX_RSPQ_ENTRIES = 16384,
63 MAX_RX_BUFFERS = 16384,
64 MAX_RX_JUMBO_BUFFERS = 16384,
65 MIN_TXQ_ENTRIES = 4,
66 MIN_CTRL_TXQ_ENTRIES = 4,
67 MIN_RSPQ_ENTRIES = 32,
68 MIN_FL_ENTRIES = 32
69};
70
71#define PORT_MASK ((1 << MAX_NPORTS) - 1)
72
73#define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
74 NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
75 NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
76
77#define EEPROM_MAGIC 0x38E2F10C
78
678771d6
DLR
79#define CH_DEVICE(devid, idx) \
80 { PCI_VENDOR_ID_CHELSIO, devid, PCI_ANY_ID, PCI_ANY_ID, 0, 0, idx }
4d22de3e
DLR
81
82static const struct pci_device_id cxgb3_pci_tbl[] = {
678771d6
DLR
83 CH_DEVICE(0x20, 0), /* PE9000 */
84 CH_DEVICE(0x21, 1), /* T302E */
85 CH_DEVICE(0x22, 2), /* T310E */
86 CH_DEVICE(0x23, 3), /* T320X */
87 CH_DEVICE(0x24, 1), /* T302X */
88 CH_DEVICE(0x25, 3), /* T320E */
89 CH_DEVICE(0x26, 2), /* T310X */
90 CH_DEVICE(0x30, 2), /* T3B10 */
91 CH_DEVICE(0x31, 3), /* T3B20 */
92 CH_DEVICE(0x32, 1), /* T3B02 */
4d22de3e
DLR
93 {0,}
94};
95
96MODULE_DESCRIPTION(DRV_DESC);
97MODULE_AUTHOR("Chelsio Communications");
1d68e93d 98MODULE_LICENSE("Dual BSD/GPL");
4d22de3e
DLR
99MODULE_VERSION(DRV_VERSION);
100MODULE_DEVICE_TABLE(pci, cxgb3_pci_tbl);
101
102static int dflt_msg_enable = DFLT_MSG_ENABLE;
103
104module_param(dflt_msg_enable, int, 0644);
105MODULE_PARM_DESC(dflt_msg_enable, "Chelsio T3 default message enable bitmap");
106
107/*
108 * The driver uses the best interrupt scheme available on a platform in the
109 * order MSI-X, MSI, legacy pin interrupts. This parameter determines which
110 * of these schemes the driver may consider as follows:
111 *
112 * msi = 2: choose from among all three options
113 * msi = 1: only consider MSI and pin interrupts
114 * msi = 0: force pin interrupts
115 */
116static int msi = 2;
117
118module_param(msi, int, 0644);
119MODULE_PARM_DESC(msi, "whether to use MSI or MSI-X");
120
121/*
122 * The driver enables offload as a default.
123 * To disable it, use ofld_disable = 1.
124 */
125
126static int ofld_disable = 0;
127
128module_param(ofld_disable, int, 0644);
129MODULE_PARM_DESC(ofld_disable, "whether to enable offload at init time or not");
130
131/*
132 * We have work elements that we need to cancel when an interface is taken
133 * down. Normally the work elements would be executed by keventd but that
134 * can deadlock because of linkwatch. If our close method takes the rtnl
135 * lock and linkwatch is ahead of our work elements in keventd, linkwatch
136 * will block keventd as it needs the rtnl lock, and we'll deadlock waiting
137 * for our work to complete. Get our own work queue to solve this.
138 */
139static struct workqueue_struct *cxgb3_wq;
140
141/**
142 * link_report - show link status and link speed/duplex
143 * @p: the port whose settings are to be reported
144 *
145 * Shows the link status, speed, and duplex of a port.
146 */
147static void link_report(struct net_device *dev)
148{
149 if (!netif_carrier_ok(dev))
150 printk(KERN_INFO "%s: link down\n", dev->name);
151 else {
152 const char *s = "10Mbps";
153 const struct port_info *p = netdev_priv(dev);
154
155 switch (p->link_config.speed) {
156 case SPEED_10000:
157 s = "10Gbps";
158 break;
159 case SPEED_1000:
160 s = "1000Mbps";
161 break;
162 case SPEED_100:
163 s = "100Mbps";
164 break;
165 }
166
167 printk(KERN_INFO "%s: link up, %s, %s-duplex\n", dev->name, s,
168 p->link_config.duplex == DUPLEX_FULL ? "full" : "half");
169 }
170}
171
172/**
173 * t3_os_link_changed - handle link status changes
174 * @adapter: the adapter associated with the link change
175 * @port_id: the port index whose limk status has changed
176 * @link_stat: the new status of the link
177 * @speed: the new speed setting
178 * @duplex: the new duplex setting
179 * @pause: the new flow-control setting
180 *
181 * This is the OS-dependent handler for link status changes. The OS
182 * neutral handler takes care of most of the processing for these events,
183 * then calls this handler for any OS-specific processing.
184 */
185void t3_os_link_changed(struct adapter *adapter, int port_id, int link_stat,
186 int speed, int duplex, int pause)
187{
188 struct net_device *dev = adapter->port[port_id];
6d6dabac
DLR
189 struct port_info *pi = netdev_priv(dev);
190 struct cmac *mac = &pi->mac;
4d22de3e
DLR
191
192 /* Skip changes from disabled ports. */
193 if (!netif_running(dev))
194 return;
195
196 if (link_stat != netif_carrier_ok(dev)) {
6d6dabac 197 if (link_stat) {
59cf8107 198 t3_mac_enable(mac, MAC_DIRECTION_RX);
4d22de3e 199 netif_carrier_on(dev);
6d6dabac 200 } else {
4d22de3e 201 netif_carrier_off(dev);
59cf8107
DLR
202 pi->phy.ops->power_down(&pi->phy, 1);
203 t3_mac_disable(mac, MAC_DIRECTION_RX);
204 t3_link_start(&pi->phy, mac, &pi->link_config);
6d6dabac
DLR
205 }
206
4d22de3e
DLR
207 link_report(dev);
208 }
209}
210
211static void cxgb_set_rxmode(struct net_device *dev)
212{
213 struct t3_rx_mode rm;
214 struct port_info *pi = netdev_priv(dev);
215
216 init_rx_mode(&rm, dev, dev->mc_list);
217 t3_mac_set_rx_mode(&pi->mac, &rm);
218}
219
220/**
221 * link_start - enable a port
222 * @dev: the device to enable
223 *
224 * Performs the MAC and PHY actions needed to enable a port.
225 */
226static void link_start(struct net_device *dev)
227{
228 struct t3_rx_mode rm;
229 struct port_info *pi = netdev_priv(dev);
230 struct cmac *mac = &pi->mac;
231
232 init_rx_mode(&rm, dev, dev->mc_list);
233 t3_mac_reset(mac);
234 t3_mac_set_mtu(mac, dev->mtu);
235 t3_mac_set_address(mac, 0, dev->dev_addr);
236 t3_mac_set_rx_mode(mac, &rm);
237 t3_link_start(&pi->phy, mac, &pi->link_config);
238 t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
239}
240
241static inline void cxgb_disable_msi(struct adapter *adapter)
242{
243 if (adapter->flags & USING_MSIX) {
244 pci_disable_msix(adapter->pdev);
245 adapter->flags &= ~USING_MSIX;
246 } else if (adapter->flags & USING_MSI) {
247 pci_disable_msi(adapter->pdev);
248 adapter->flags &= ~USING_MSI;
249 }
250}
251
252/*
253 * Interrupt handler for asynchronous events used with MSI-X.
254 */
255static irqreturn_t t3_async_intr_handler(int irq, void *cookie)
256{
257 t3_slow_intr_handler(cookie);
258 return IRQ_HANDLED;
259}
260
261/*
262 * Name the MSI-X interrupts.
263 */
264static void name_msix_vecs(struct adapter *adap)
265{
266 int i, j, msi_idx = 1, n = sizeof(adap->msix_info[0].desc) - 1;
267
268 snprintf(adap->msix_info[0].desc, n, "%s", adap->name);
269 adap->msix_info[0].desc[n] = 0;
270
271 for_each_port(adap, j) {
272 struct net_device *d = adap->port[j];
273 const struct port_info *pi = netdev_priv(d);
274
275 for (i = 0; i < pi->nqsets; i++, msi_idx++) {
276 snprintf(adap->msix_info[msi_idx].desc, n,
8c263761 277 "%s-%d", d->name, pi->first_qset + i);
4d22de3e
DLR
278 adap->msix_info[msi_idx].desc[n] = 0;
279 }
8c263761 280 }
4d22de3e
DLR
281}
282
283static int request_msix_data_irqs(struct adapter *adap)
284{
285 int i, j, err, qidx = 0;
286
287 for_each_port(adap, i) {
288 int nqsets = adap2pinfo(adap, i)->nqsets;
289
290 for (j = 0; j < nqsets; ++j) {
291 err = request_irq(adap->msix_info[qidx + 1].vec,
292 t3_intr_handler(adap,
293 adap->sge.qs[qidx].
294 rspq.polling), 0,
295 adap->msix_info[qidx + 1].desc,
296 &adap->sge.qs[qidx]);
297 if (err) {
298 while (--qidx >= 0)
299 free_irq(adap->msix_info[qidx + 1].vec,
300 &adap->sge.qs[qidx]);
301 return err;
302 }
303 qidx++;
304 }
305 }
306 return 0;
307}
308
8c263761
DLR
309static void free_irq_resources(struct adapter *adapter)
310{
311 if (adapter->flags & USING_MSIX) {
312 int i, n = 0;
313
314 free_irq(adapter->msix_info[0].vec, adapter);
315 for_each_port(adapter, i)
316 n += adap2pinfo(adapter, i)->nqsets;
317
318 for (i = 0; i < n; ++i)
319 free_irq(adapter->msix_info[i + 1].vec,
320 &adapter->sge.qs[i]);
321 } else
322 free_irq(adapter->pdev->irq, adapter);
323}
324
b881955b
DLR
325static int await_mgmt_replies(struct adapter *adap, unsigned long init_cnt,
326 unsigned long n)
327{
328 int attempts = 5;
329
330 while (adap->sge.qs[0].rspq.offload_pkts < init_cnt + n) {
331 if (!--attempts)
332 return -ETIMEDOUT;
333 msleep(10);
334 }
335 return 0;
336}
337
338static int init_tp_parity(struct adapter *adap)
339{
340 int i;
341 struct sk_buff *skb;
342 struct cpl_set_tcb_field *greq;
343 unsigned long cnt = adap->sge.qs[0].rspq.offload_pkts;
344
345 t3_tp_set_offload_mode(adap, 1);
346
347 for (i = 0; i < 16; i++) {
348 struct cpl_smt_write_req *req;
349
350 skb = alloc_skb(sizeof(*req), GFP_KERNEL | __GFP_NOFAIL);
351 req = (struct cpl_smt_write_req *)__skb_put(skb, sizeof(*req));
352 memset(req, 0, sizeof(*req));
353 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
354 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SMT_WRITE_REQ, i));
355 req->iff = i;
356 t3_mgmt_tx(adap, skb);
357 }
358
359 for (i = 0; i < 2048; i++) {
360 struct cpl_l2t_write_req *req;
361
362 skb = alloc_skb(sizeof(*req), GFP_KERNEL | __GFP_NOFAIL);
363 req = (struct cpl_l2t_write_req *)__skb_put(skb, sizeof(*req));
364 memset(req, 0, sizeof(*req));
365 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
366 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_L2T_WRITE_REQ, i));
367 req->params = htonl(V_L2T_W_IDX(i));
368 t3_mgmt_tx(adap, skb);
369 }
370
371 for (i = 0; i < 2048; i++) {
372 struct cpl_rte_write_req *req;
373
374 skb = alloc_skb(sizeof(*req), GFP_KERNEL | __GFP_NOFAIL);
375 req = (struct cpl_rte_write_req *)__skb_put(skb, sizeof(*req));
376 memset(req, 0, sizeof(*req));
377 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
378 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_RTE_WRITE_REQ, i));
379 req->l2t_idx = htonl(V_L2T_W_IDX(i));
380 t3_mgmt_tx(adap, skb);
381 }
382
383 skb = alloc_skb(sizeof(*greq), GFP_KERNEL | __GFP_NOFAIL);
384 greq = (struct cpl_set_tcb_field *)__skb_put(skb, sizeof(*greq));
385 memset(greq, 0, sizeof(*greq));
386 greq->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
387 OPCODE_TID(greq) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, 0));
388 greq->mask = cpu_to_be64(1);
389 t3_mgmt_tx(adap, skb);
390
391 i = await_mgmt_replies(adap, cnt, 16 + 2048 + 2048 + 1);
392 t3_tp_set_offload_mode(adap, 0);
393 return i;
394}
395
4d22de3e
DLR
396/**
397 * setup_rss - configure RSS
398 * @adap: the adapter
399 *
400 * Sets up RSS to distribute packets to multiple receive queues. We
401 * configure the RSS CPU lookup table to distribute to the number of HW
402 * receive queues, and the response queue lookup table to narrow that
403 * down to the response queues actually configured for each port.
404 * We always configure the RSS mapping for two ports since the mapping
405 * table has plenty of entries.
406 */
407static void setup_rss(struct adapter *adap)
408{
409 int i;
410 unsigned int nq0 = adap2pinfo(adap, 0)->nqsets;
411 unsigned int nq1 = adap->port[1] ? adap2pinfo(adap, 1)->nqsets : 1;
412 u8 cpus[SGE_QSETS + 1];
413 u16 rspq_map[RSS_TABLE_SIZE];
414
415 for (i = 0; i < SGE_QSETS; ++i)
416 cpus[i] = i;
417 cpus[SGE_QSETS] = 0xff; /* terminator */
418
419 for (i = 0; i < RSS_TABLE_SIZE / 2; ++i) {
420 rspq_map[i] = i % nq0;
421 rspq_map[i + RSS_TABLE_SIZE / 2] = (i % nq1) + nq0;
422 }
423
424 t3_config_rss(adap, F_RQFEEDBACKENABLE | F_TNLLKPEN | F_TNLMAPEN |
425 F_TNLPRTEN | F_TNL2TUPEN | F_TNL4TUPEN |
a2604be5 426 V_RRCPLCPUSIZE(6) | F_HASHTOEPLITZ, cpus, rspq_map);
4d22de3e
DLR
427}
428
bea3348e 429static void init_napi(struct adapter *adap)
4d22de3e 430{
bea3348e 431 int i;
4d22de3e 432
bea3348e
SH
433 for (i = 0; i < SGE_QSETS; i++) {
434 struct sge_qset *qs = &adap->sge.qs[i];
4d22de3e 435
bea3348e
SH
436 if (qs->adap)
437 netif_napi_add(qs->netdev, &qs->napi, qs->napi.poll,
438 64);
4d22de3e 439 }
48c4b6db
DLR
440
441 /*
442 * netif_napi_add() can be called only once per napi_struct because it
443 * adds each new napi_struct to a list. Be careful not to call it a
444 * second time, e.g., during EEH recovery, by making a note of it.
445 */
446 adap->flags |= NAPI_INIT;
4d22de3e
DLR
447}
448
449/*
450 * Wait until all NAPI handlers are descheduled. This includes the handlers of
451 * both netdevices representing interfaces and the dummy ones for the extra
452 * queues.
453 */
454static void quiesce_rx(struct adapter *adap)
455{
456 int i;
4d22de3e 457
bea3348e
SH
458 for (i = 0; i < SGE_QSETS; i++)
459 if (adap->sge.qs[i].adap)
460 napi_disable(&adap->sge.qs[i].napi);
461}
4d22de3e 462
bea3348e
SH
463static void enable_all_napi(struct adapter *adap)
464{
465 int i;
466 for (i = 0; i < SGE_QSETS; i++)
467 if (adap->sge.qs[i].adap)
468 napi_enable(&adap->sge.qs[i].napi);
4d22de3e
DLR
469}
470
471/**
472 * setup_sge_qsets - configure SGE Tx/Rx/response queues
473 * @adap: the adapter
474 *
475 * Determines how many sets of SGE queues to use and initializes them.
476 * We support multiple queue sets per port if we have MSI-X, otherwise
477 * just one queue set per port.
478 */
479static int setup_sge_qsets(struct adapter *adap)
480{
bea3348e 481 int i, j, err, irq_idx = 0, qset_idx = 0;
8ac3ba68 482 unsigned int ntxq = SGE_TXQ_PER_SET;
4d22de3e
DLR
483
484 if (adap->params.rev > 0 && !(adap->flags & USING_MSI))
485 irq_idx = -1;
486
487 for_each_port(adap, i) {
488 struct net_device *dev = adap->port[i];
bea3348e 489 struct port_info *pi = netdev_priv(dev);
4d22de3e 490
bea3348e 491 pi->qs = &adap->sge.qs[pi->first_qset];
8c263761
DLR
492 for (j = pi->first_qset; j < pi->first_qset + pi->nqsets;
493 ++j, ++qset_idx) {
494 if (!pi->rx_csum_offload)
495 adap->params.sge.qset[qset_idx].lro = 0;
4d22de3e
DLR
496 err = t3_sge_alloc_qset(adap, qset_idx, 1,
497 (adap->flags & USING_MSIX) ? qset_idx + 1 :
498 irq_idx,
bea3348e 499 &adap->params.sge.qset[qset_idx], ntxq, dev);
4d22de3e 500 if (err) {
0ca41c04 501 t3_stop_sge_timers(adap);
4d22de3e
DLR
502 t3_free_sge_resources(adap);
503 return err;
504 }
505 }
506 }
507
508 return 0;
509}
510
3e5192ee 511static ssize_t attr_show(struct device *d, char *buf,
896392ef 512 ssize_t(*format) (struct net_device *, char *))
4d22de3e
DLR
513{
514 ssize_t len;
4d22de3e
DLR
515
516 /* Synchronize with ioctls that may shut down the device */
517 rtnl_lock();
896392ef 518 len = (*format) (to_net_dev(d), buf);
4d22de3e
DLR
519 rtnl_unlock();
520 return len;
521}
522
3e5192ee 523static ssize_t attr_store(struct device *d,
0ee8d33c 524 const char *buf, size_t len,
896392ef 525 ssize_t(*set) (struct net_device *, unsigned int),
4d22de3e
DLR
526 unsigned int min_val, unsigned int max_val)
527{
528 char *endp;
529 ssize_t ret;
530 unsigned int val;
4d22de3e
DLR
531
532 if (!capable(CAP_NET_ADMIN))
533 return -EPERM;
534
535 val = simple_strtoul(buf, &endp, 0);
536 if (endp == buf || val < min_val || val > max_val)
537 return -EINVAL;
538
539 rtnl_lock();
896392ef 540 ret = (*set) (to_net_dev(d), val);
4d22de3e
DLR
541 if (!ret)
542 ret = len;
543 rtnl_unlock();
544 return ret;
545}
546
547#define CXGB3_SHOW(name, val_expr) \
896392ef 548static ssize_t format_##name(struct net_device *dev, char *buf) \
4d22de3e 549{ \
5fbf816f
DLR
550 struct port_info *pi = netdev_priv(dev); \
551 struct adapter *adap = pi->adapter; \
4d22de3e
DLR
552 return sprintf(buf, "%u\n", val_expr); \
553} \
0ee8d33c
DLR
554static ssize_t show_##name(struct device *d, struct device_attribute *attr, \
555 char *buf) \
4d22de3e 556{ \
3e5192ee 557 return attr_show(d, buf, format_##name); \
4d22de3e
DLR
558}
559
896392ef 560static ssize_t set_nfilters(struct net_device *dev, unsigned int val)
4d22de3e 561{
5fbf816f
DLR
562 struct port_info *pi = netdev_priv(dev);
563 struct adapter *adap = pi->adapter;
9f238486 564 int min_tids = is_offload(adap) ? MC5_MIN_TIDS : 0;
896392ef 565
4d22de3e
DLR
566 if (adap->flags & FULL_INIT_DONE)
567 return -EBUSY;
568 if (val && adap->params.rev == 0)
569 return -EINVAL;
9f238486
DLR
570 if (val > t3_mc5_size(&adap->mc5) - adap->params.mc5.nservers -
571 min_tids)
4d22de3e
DLR
572 return -EINVAL;
573 adap->params.mc5.nfilters = val;
574 return 0;
575}
576
0ee8d33c
DLR
577static ssize_t store_nfilters(struct device *d, struct device_attribute *attr,
578 const char *buf, size_t len)
4d22de3e 579{
3e5192ee 580 return attr_store(d, buf, len, set_nfilters, 0, ~0);
4d22de3e
DLR
581}
582
896392ef 583static ssize_t set_nservers(struct net_device *dev, unsigned int val)
4d22de3e 584{
5fbf816f
DLR
585 struct port_info *pi = netdev_priv(dev);
586 struct adapter *adap = pi->adapter;
896392ef 587
4d22de3e
DLR
588 if (adap->flags & FULL_INIT_DONE)
589 return -EBUSY;
9f238486
DLR
590 if (val > t3_mc5_size(&adap->mc5) - adap->params.mc5.nfilters -
591 MC5_MIN_TIDS)
4d22de3e
DLR
592 return -EINVAL;
593 adap->params.mc5.nservers = val;
594 return 0;
595}
596
0ee8d33c
DLR
597static ssize_t store_nservers(struct device *d, struct device_attribute *attr,
598 const char *buf, size_t len)
4d22de3e 599{
3e5192ee 600 return attr_store(d, buf, len, set_nservers, 0, ~0);
4d22de3e
DLR
601}
602
603#define CXGB3_ATTR_R(name, val_expr) \
604CXGB3_SHOW(name, val_expr) \
0ee8d33c 605static DEVICE_ATTR(name, S_IRUGO, show_##name, NULL)
4d22de3e
DLR
606
607#define CXGB3_ATTR_RW(name, val_expr, store_method) \
608CXGB3_SHOW(name, val_expr) \
0ee8d33c 609static DEVICE_ATTR(name, S_IRUGO | S_IWUSR, show_##name, store_method)
4d22de3e
DLR
610
611CXGB3_ATTR_R(cam_size, t3_mc5_size(&adap->mc5));
612CXGB3_ATTR_RW(nfilters, adap->params.mc5.nfilters, store_nfilters);
613CXGB3_ATTR_RW(nservers, adap->params.mc5.nservers, store_nservers);
614
615static struct attribute *cxgb3_attrs[] = {
0ee8d33c
DLR
616 &dev_attr_cam_size.attr,
617 &dev_attr_nfilters.attr,
618 &dev_attr_nservers.attr,
4d22de3e
DLR
619 NULL
620};
621
622static struct attribute_group cxgb3_attr_group = {.attrs = cxgb3_attrs };
623
3e5192ee 624static ssize_t tm_attr_show(struct device *d,
0ee8d33c 625 char *buf, int sched)
4d22de3e 626{
5fbf816f
DLR
627 struct port_info *pi = netdev_priv(to_net_dev(d));
628 struct adapter *adap = pi->adapter;
4d22de3e 629 unsigned int v, addr, bpt, cpt;
5fbf816f 630 ssize_t len;
4d22de3e
DLR
631
632 addr = A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2;
633 rtnl_lock();
634 t3_write_reg(adap, A_TP_TM_PIO_ADDR, addr);
635 v = t3_read_reg(adap, A_TP_TM_PIO_DATA);
636 if (sched & 1)
637 v >>= 16;
638 bpt = (v >> 8) & 0xff;
639 cpt = v & 0xff;
640 if (!cpt)
641 len = sprintf(buf, "disabled\n");
642 else {
643 v = (adap->params.vpd.cclk * 1000) / cpt;
644 len = sprintf(buf, "%u Kbps\n", (v * bpt) / 125);
645 }
646 rtnl_unlock();
647 return len;
648}
649
3e5192ee 650static ssize_t tm_attr_store(struct device *d,
0ee8d33c 651 const char *buf, size_t len, int sched)
4d22de3e 652{
5fbf816f
DLR
653 struct port_info *pi = netdev_priv(to_net_dev(d));
654 struct adapter *adap = pi->adapter;
655 unsigned int val;
4d22de3e
DLR
656 char *endp;
657 ssize_t ret;
4d22de3e
DLR
658
659 if (!capable(CAP_NET_ADMIN))
660 return -EPERM;
661
662 val = simple_strtoul(buf, &endp, 0);
663 if (endp == buf || val > 10000000)
664 return -EINVAL;
665
666 rtnl_lock();
667 ret = t3_config_sched(adap, val, sched);
668 if (!ret)
669 ret = len;
670 rtnl_unlock();
671 return ret;
672}
673
674#define TM_ATTR(name, sched) \
0ee8d33c
DLR
675static ssize_t show_##name(struct device *d, struct device_attribute *attr, \
676 char *buf) \
4d22de3e 677{ \
3e5192ee 678 return tm_attr_show(d, buf, sched); \
4d22de3e 679} \
0ee8d33c
DLR
680static ssize_t store_##name(struct device *d, struct device_attribute *attr, \
681 const char *buf, size_t len) \
4d22de3e 682{ \
3e5192ee 683 return tm_attr_store(d, buf, len, sched); \
4d22de3e 684} \
0ee8d33c 685static DEVICE_ATTR(name, S_IRUGO | S_IWUSR, show_##name, store_##name)
4d22de3e
DLR
686
687TM_ATTR(sched0, 0);
688TM_ATTR(sched1, 1);
689TM_ATTR(sched2, 2);
690TM_ATTR(sched3, 3);
691TM_ATTR(sched4, 4);
692TM_ATTR(sched5, 5);
693TM_ATTR(sched6, 6);
694TM_ATTR(sched7, 7);
695
696static struct attribute *offload_attrs[] = {
0ee8d33c
DLR
697 &dev_attr_sched0.attr,
698 &dev_attr_sched1.attr,
699 &dev_attr_sched2.attr,
700 &dev_attr_sched3.attr,
701 &dev_attr_sched4.attr,
702 &dev_attr_sched5.attr,
703 &dev_attr_sched6.attr,
704 &dev_attr_sched7.attr,
4d22de3e
DLR
705 NULL
706};
707
708static struct attribute_group offload_attr_group = {.attrs = offload_attrs };
709
710/*
711 * Sends an sk_buff to an offload queue driver
712 * after dealing with any active network taps.
713 */
714static inline int offload_tx(struct t3cdev *tdev, struct sk_buff *skb)
715{
716 int ret;
717
718 local_bh_disable();
719 ret = t3_offload_tx(tdev, skb);
720 local_bh_enable();
721 return ret;
722}
723
724static int write_smt_entry(struct adapter *adapter, int idx)
725{
726 struct cpl_smt_write_req *req;
727 struct sk_buff *skb = alloc_skb(sizeof(*req), GFP_KERNEL);
728
729 if (!skb)
730 return -ENOMEM;
731
732 req = (struct cpl_smt_write_req *)__skb_put(skb, sizeof(*req));
733 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
734 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SMT_WRITE_REQ, idx));
735 req->mtu_idx = NMTUS - 1; /* should be 0 but there's a T3 bug */
736 req->iff = idx;
737 memset(req->src_mac1, 0, sizeof(req->src_mac1));
738 memcpy(req->src_mac0, adapter->port[idx]->dev_addr, ETH_ALEN);
739 skb->priority = 1;
740 offload_tx(&adapter->tdev, skb);
741 return 0;
742}
743
744static int init_smt(struct adapter *adapter)
745{
746 int i;
747
748 for_each_port(adapter, i)
749 write_smt_entry(adapter, i);
750 return 0;
751}
752
753static void init_port_mtus(struct adapter *adapter)
754{
755 unsigned int mtus = adapter->port[0]->mtu;
756
757 if (adapter->port[1])
758 mtus |= adapter->port[1]->mtu << 16;
759 t3_write_reg(adapter, A_TP_MTU_PORT_TABLE, mtus);
760}
761
8c263761 762static int send_pktsched_cmd(struct adapter *adap, int sched, int qidx, int lo,
14ab9892
DLR
763 int hi, int port)
764{
765 struct sk_buff *skb;
766 struct mngt_pktsched_wr *req;
8c263761 767 int ret;
14ab9892
DLR
768
769 skb = alloc_skb(sizeof(*req), GFP_KERNEL | __GFP_NOFAIL);
770 req = (struct mngt_pktsched_wr *)skb_put(skb, sizeof(*req));
771 req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_MNGT));
772 req->mngt_opcode = FW_MNGTOPCODE_PKTSCHED_SET;
773 req->sched = sched;
774 req->idx = qidx;
775 req->min = lo;
776 req->max = hi;
777 req->binding = port;
8c263761
DLR
778 ret = t3_mgmt_tx(adap, skb);
779
780 return ret;
14ab9892
DLR
781}
782
8c263761 783static int bind_qsets(struct adapter *adap)
14ab9892 784{
8c263761 785 int i, j, err = 0;
14ab9892
DLR
786
787 for_each_port(adap, i) {
788 const struct port_info *pi = adap2pinfo(adap, i);
789
8c263761
DLR
790 for (j = 0; j < pi->nqsets; ++j) {
791 int ret = send_pktsched_cmd(adap, 1,
792 pi->first_qset + j, -1,
793 -1, i);
794 if (ret)
795 err = ret;
796 }
14ab9892 797 }
8c263761
DLR
798
799 return err;
14ab9892
DLR
800}
801
7f672cf5 802#define FW_FNAME "t3fw-%d.%d.%d.bin"
47330077 803#define TPSRAM_NAME "t3%c_protocol_sram-%d.%d.%d.bin"
2e283962
DLR
804
805static int upgrade_fw(struct adapter *adap)
806{
807 int ret;
808 char buf[64];
809 const struct firmware *fw;
810 struct device *dev = &adap->pdev->dev;
811
812 snprintf(buf, sizeof(buf), FW_FNAME, FW_VERSION_MAJOR,
7f672cf5 813 FW_VERSION_MINOR, FW_VERSION_MICRO);
2e283962
DLR
814 ret = request_firmware(&fw, buf, dev);
815 if (ret < 0) {
816 dev_err(dev, "could not upgrade firmware: unable to load %s\n",
817 buf);
818 return ret;
819 }
820 ret = t3_load_fw(adap, fw->data, fw->size);
821 release_firmware(fw);
47330077
DLR
822
823 if (ret == 0)
824 dev_info(dev, "successful upgrade to firmware %d.%d.%d\n",
825 FW_VERSION_MAJOR, FW_VERSION_MINOR, FW_VERSION_MICRO);
826 else
827 dev_err(dev, "failed to upgrade to firmware %d.%d.%d\n",
828 FW_VERSION_MAJOR, FW_VERSION_MINOR, FW_VERSION_MICRO);
2eab17ab 829
47330077
DLR
830 return ret;
831}
832
833static inline char t3rev2char(struct adapter *adapter)
834{
835 char rev = 0;
836
837 switch(adapter->params.rev) {
838 case T3_REV_B:
839 case T3_REV_B2:
840 rev = 'b';
841 break;
1aafee26
DLR
842 case T3_REV_C:
843 rev = 'c';
844 break;
47330077
DLR
845 }
846 return rev;
847}
848
9265fabf 849static int update_tpsram(struct adapter *adap)
47330077
DLR
850{
851 const struct firmware *tpsram;
852 char buf[64];
853 struct device *dev = &adap->pdev->dev;
854 int ret;
855 char rev;
2eab17ab 856
47330077
DLR
857 rev = t3rev2char(adap);
858 if (!rev)
859 return 0;
860
861 snprintf(buf, sizeof(buf), TPSRAM_NAME, rev,
862 TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO);
863
864 ret = request_firmware(&tpsram, buf, dev);
865 if (ret < 0) {
866 dev_err(dev, "could not load TP SRAM: unable to load %s\n",
867 buf);
868 return ret;
869 }
2eab17ab 870
47330077
DLR
871 ret = t3_check_tpsram(adap, tpsram->data, tpsram->size);
872 if (ret)
2eab17ab 873 goto release_tpsram;
47330077
DLR
874
875 ret = t3_set_proto_sram(adap, tpsram->data);
876 if (ret == 0)
877 dev_info(dev,
878 "successful update of protocol engine "
879 "to %d.%d.%d\n",
880 TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO);
881 else
882 dev_err(dev, "failed to update of protocol engine %d.%d.%d\n",
883 TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO);
884 if (ret)
885 dev_err(dev, "loading protocol SRAM failed\n");
886
887release_tpsram:
888 release_firmware(tpsram);
2eab17ab 889
2e283962
DLR
890 return ret;
891}
892
4d22de3e
DLR
893/**
894 * cxgb_up - enable the adapter
895 * @adapter: adapter being enabled
896 *
897 * Called when the first port is enabled, this function performs the
898 * actions necessary to make an adapter operational, such as completing
899 * the initialization of HW modules, and enabling interrupts.
900 *
901 * Must be called with the rtnl lock held.
902 */
903static int cxgb_up(struct adapter *adap)
904{
c54f5c24 905 int err;
47330077 906 int must_load;
4d22de3e
DLR
907
908 if (!(adap->flags & FULL_INIT_DONE)) {
a5a3b460
DLR
909 err = t3_check_fw_version(adap, &must_load);
910 if (err == -EINVAL) {
2e283962 911 err = upgrade_fw(adap);
a5a3b460
DLR
912 if (err && must_load)
913 goto out;
914 }
4d22de3e 915
47330077
DLR
916 err = t3_check_tpsram_version(adap, &must_load);
917 if (err == -EINVAL) {
918 err = update_tpsram(adap);
919 if (err && must_load)
920 goto out;
921 }
922
20d3fc11
DLR
923 /*
924 * Clear interrupts now to catch errors if t3_init_hw fails.
925 * We clear them again later as initialization may trigger
926 * conditions that can interrupt.
927 */
928 t3_intr_clear(adap);
929
4d22de3e
DLR
930 err = t3_init_hw(adap, 0);
931 if (err)
932 goto out;
933
b881955b 934 t3_set_reg_field(adap, A_TP_PARA_REG5, 0, F_RXDDPOFFINIT);
6cdbd77e 935 t3_write_reg(adap, A_ULPRX_TDDP_PSZ, V_HPZ0(PAGE_SHIFT - 12));
bea3348e 936
4d22de3e
DLR
937 err = setup_sge_qsets(adap);
938 if (err)
939 goto out;
940
941 setup_rss(adap);
48c4b6db
DLR
942 if (!(adap->flags & NAPI_INIT))
943 init_napi(adap);
4d22de3e
DLR
944 adap->flags |= FULL_INIT_DONE;
945 }
946
947 t3_intr_clear(adap);
948
949 if (adap->flags & USING_MSIX) {
950 name_msix_vecs(adap);
951 err = request_irq(adap->msix_info[0].vec,
952 t3_async_intr_handler, 0,
953 adap->msix_info[0].desc, adap);
954 if (err)
955 goto irq_err;
956
42256f57
DLR
957 err = request_msix_data_irqs(adap);
958 if (err) {
4d22de3e
DLR
959 free_irq(adap->msix_info[0].vec, adap);
960 goto irq_err;
961 }
962 } else if ((err = request_irq(adap->pdev->irq,
963 t3_intr_handler(adap,
964 adap->sge.qs[0].rspq.
965 polling),
2db6346f
TG
966 (adap->flags & USING_MSI) ?
967 0 : IRQF_SHARED,
4d22de3e
DLR
968 adap->name, adap)))
969 goto irq_err;
970
bea3348e 971 enable_all_napi(adap);
4d22de3e
DLR
972 t3_sge_start(adap);
973 t3_intr_enable(adap);
14ab9892 974
b881955b
DLR
975 if (adap->params.rev >= T3_REV_C && !(adap->flags & TP_PARITY_INIT) &&
976 is_offload(adap) && init_tp_parity(adap) == 0)
977 adap->flags |= TP_PARITY_INIT;
978
979 if (adap->flags & TP_PARITY_INIT) {
980 t3_write_reg(adap, A_TP_INT_CAUSE,
981 F_CMCACHEPERR | F_ARPLUTPERR);
982 t3_write_reg(adap, A_TP_INT_ENABLE, 0x7fbfffff);
983 }
984
8c263761
DLR
985 if (!(adap->flags & QUEUES_BOUND)) {
986 err = bind_qsets(adap);
987 if (err) {
988 CH_ERR(adap, "failed to bind qsets, err %d\n", err);
989 t3_intr_disable(adap);
990 free_irq_resources(adap);
991 goto out;
992 }
993 adap->flags |= QUEUES_BOUND;
994 }
14ab9892 995
4d22de3e
DLR
996out:
997 return err;
998irq_err:
999 CH_ERR(adap, "request_irq failed, err %d\n", err);
1000 goto out;
1001}
1002
1003/*
1004 * Release resources when all the ports and offloading have been stopped.
1005 */
1006static void cxgb_down(struct adapter *adapter)
1007{
1008 t3_sge_stop(adapter);
1009 spin_lock_irq(&adapter->work_lock); /* sync with PHY intr task */
1010 t3_intr_disable(adapter);
1011 spin_unlock_irq(&adapter->work_lock);
1012
8c263761 1013 free_irq_resources(adapter);
4d22de3e
DLR
1014 flush_workqueue(cxgb3_wq); /* wait for external IRQ handler */
1015 quiesce_rx(adapter);
1016}
1017
1018static void schedule_chk_task(struct adapter *adap)
1019{
1020 unsigned int timeo;
1021
1022 timeo = adap->params.linkpoll_period ?
1023 (HZ * adap->params.linkpoll_period) / 10 :
1024 adap->params.stats_update_period * HZ;
1025 if (timeo)
1026 queue_delayed_work(cxgb3_wq, &adap->adap_check_task, timeo);
1027}
1028
1029static int offload_open(struct net_device *dev)
1030{
5fbf816f
DLR
1031 struct port_info *pi = netdev_priv(dev);
1032 struct adapter *adapter = pi->adapter;
1033 struct t3cdev *tdev = dev2t3cdev(dev);
4d22de3e 1034 int adap_up = adapter->open_device_map & PORT_MASK;
c54f5c24 1035 int err;
4d22de3e
DLR
1036
1037 if (test_and_set_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map))
1038 return 0;
1039
1040 if (!adap_up && (err = cxgb_up(adapter)) < 0)
48c4b6db 1041 goto out;
4d22de3e
DLR
1042
1043 t3_tp_set_offload_mode(adapter, 1);
1044 tdev->lldev = adapter->port[0];
1045 err = cxgb3_offload_activate(adapter);
1046 if (err)
1047 goto out;
1048
1049 init_port_mtus(adapter);
1050 t3_load_mtus(adapter, adapter->params.mtus, adapter->params.a_wnd,
1051 adapter->params.b_wnd,
1052 adapter->params.rev == 0 ?
1053 adapter->port[0]->mtu : 0xffff);
1054 init_smt(adapter);
1055
d96a51f6
DN
1056 if (sysfs_create_group(&tdev->lldev->dev.kobj, &offload_attr_group))
1057 dev_dbg(&dev->dev, "cannot create sysfs group\n");
4d22de3e
DLR
1058
1059 /* Call back all registered clients */
1060 cxgb3_add_clients(tdev);
1061
1062out:
1063 /* restore them in case the offload module has changed them */
1064 if (err) {
1065 t3_tp_set_offload_mode(adapter, 0);
1066 clear_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map);
1067 cxgb3_set_dummy_ops(tdev);
1068 }
1069 return err;
1070}
1071
1072static int offload_close(struct t3cdev *tdev)
1073{
1074 struct adapter *adapter = tdev2adap(tdev);
1075
1076 if (!test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map))
1077 return 0;
1078
1079 /* Call back all registered clients */
1080 cxgb3_remove_clients(tdev);
1081
0ee8d33c 1082 sysfs_remove_group(&tdev->lldev->dev.kobj, &offload_attr_group);
4d22de3e
DLR
1083
1084 tdev->lldev = NULL;
1085 cxgb3_set_dummy_ops(tdev);
1086 t3_tp_set_offload_mode(adapter, 0);
1087 clear_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map);
1088
1089 if (!adapter->open_device_map)
1090 cxgb_down(adapter);
1091
1092 cxgb3_offload_deactivate(adapter);
1093 return 0;
1094}
1095
1096static int cxgb_open(struct net_device *dev)
1097{
4d22de3e 1098 struct port_info *pi = netdev_priv(dev);
5fbf816f 1099 struct adapter *adapter = pi->adapter;
4d22de3e 1100 int other_ports = adapter->open_device_map & PORT_MASK;
5fbf816f 1101 int err;
4d22de3e 1102
48c4b6db 1103 if (!adapter->open_device_map && (err = cxgb_up(adapter)) < 0)
4d22de3e
DLR
1104 return err;
1105
1106 set_bit(pi->port_id, &adapter->open_device_map);
8ac3ba68 1107 if (is_offload(adapter) && !ofld_disable) {
4d22de3e
DLR
1108 err = offload_open(dev);
1109 if (err)
1110 printk(KERN_WARNING
1111 "Could not initialize offload capabilities\n");
1112 }
1113
1114 link_start(dev);
1115 t3_port_intr_enable(adapter, pi->port_id);
1116 netif_start_queue(dev);
1117 if (!other_ports)
1118 schedule_chk_task(adapter);
1119
1120 return 0;
1121}
1122
1123static int cxgb_close(struct net_device *dev)
1124{
5fbf816f
DLR
1125 struct port_info *pi = netdev_priv(dev);
1126 struct adapter *adapter = pi->adapter;
4d22de3e 1127
5fbf816f 1128 t3_port_intr_disable(adapter, pi->port_id);
4d22de3e 1129 netif_stop_queue(dev);
5fbf816f 1130 pi->phy.ops->power_down(&pi->phy, 1);
4d22de3e 1131 netif_carrier_off(dev);
5fbf816f 1132 t3_mac_disable(&pi->mac, MAC_DIRECTION_TX | MAC_DIRECTION_RX);
4d22de3e 1133
20d3fc11 1134 spin_lock_irq(&adapter->work_lock); /* sync with update task */
5fbf816f 1135 clear_bit(pi->port_id, &adapter->open_device_map);
20d3fc11 1136 spin_unlock_irq(&adapter->work_lock);
4d22de3e
DLR
1137
1138 if (!(adapter->open_device_map & PORT_MASK))
1139 cancel_rearming_delayed_workqueue(cxgb3_wq,
1140 &adapter->adap_check_task);
1141
1142 if (!adapter->open_device_map)
1143 cxgb_down(adapter);
1144
1145 return 0;
1146}
1147
1148static struct net_device_stats *cxgb_get_stats(struct net_device *dev)
1149{
5fbf816f
DLR
1150 struct port_info *pi = netdev_priv(dev);
1151 struct adapter *adapter = pi->adapter;
1152 struct net_device_stats *ns = &pi->netstats;
4d22de3e
DLR
1153 const struct mac_stats *pstats;
1154
1155 spin_lock(&adapter->stats_lock);
5fbf816f 1156 pstats = t3_mac_update_stats(&pi->mac);
4d22de3e
DLR
1157 spin_unlock(&adapter->stats_lock);
1158
1159 ns->tx_bytes = pstats->tx_octets;
1160 ns->tx_packets = pstats->tx_frames;
1161 ns->rx_bytes = pstats->rx_octets;
1162 ns->rx_packets = pstats->rx_frames;
1163 ns->multicast = pstats->rx_mcast_frames;
1164
1165 ns->tx_errors = pstats->tx_underrun;
1166 ns->rx_errors = pstats->rx_symbol_errs + pstats->rx_fcs_errs +
1167 pstats->rx_too_long + pstats->rx_jabber + pstats->rx_short +
1168 pstats->rx_fifo_ovfl;
1169
1170 /* detailed rx_errors */
1171 ns->rx_length_errors = pstats->rx_jabber + pstats->rx_too_long;
1172 ns->rx_over_errors = 0;
1173 ns->rx_crc_errors = pstats->rx_fcs_errs;
1174 ns->rx_frame_errors = pstats->rx_symbol_errs;
1175 ns->rx_fifo_errors = pstats->rx_fifo_ovfl;
1176 ns->rx_missed_errors = pstats->rx_cong_drops;
1177
1178 /* detailed tx_errors */
1179 ns->tx_aborted_errors = 0;
1180 ns->tx_carrier_errors = 0;
1181 ns->tx_fifo_errors = pstats->tx_underrun;
1182 ns->tx_heartbeat_errors = 0;
1183 ns->tx_window_errors = 0;
1184 return ns;
1185}
1186
1187static u32 get_msglevel(struct net_device *dev)
1188{
5fbf816f
DLR
1189 struct port_info *pi = netdev_priv(dev);
1190 struct adapter *adapter = pi->adapter;
4d22de3e
DLR
1191
1192 return adapter->msg_enable;
1193}
1194
1195static void set_msglevel(struct net_device *dev, u32 val)
1196{
5fbf816f
DLR
1197 struct port_info *pi = netdev_priv(dev);
1198 struct adapter *adapter = pi->adapter;
4d22de3e
DLR
1199
1200 adapter->msg_enable = val;
1201}
1202
1203static char stats_strings[][ETH_GSTRING_LEN] = {
1204 "TxOctetsOK ",
1205 "TxFramesOK ",
1206 "TxMulticastFramesOK",
1207 "TxBroadcastFramesOK",
1208 "TxPauseFrames ",
1209 "TxUnderrun ",
1210 "TxExtUnderrun ",
1211
1212 "TxFrames64 ",
1213 "TxFrames65To127 ",
1214 "TxFrames128To255 ",
1215 "TxFrames256To511 ",
1216 "TxFrames512To1023 ",
1217 "TxFrames1024To1518 ",
1218 "TxFrames1519ToMax ",
1219
1220 "RxOctetsOK ",
1221 "RxFramesOK ",
1222 "RxMulticastFramesOK",
1223 "RxBroadcastFramesOK",
1224 "RxPauseFrames ",
1225 "RxFCSErrors ",
1226 "RxSymbolErrors ",
1227 "RxShortErrors ",
1228 "RxJabberErrors ",
1229 "RxLengthErrors ",
1230 "RxFIFOoverflow ",
1231
1232 "RxFrames64 ",
1233 "RxFrames65To127 ",
1234 "RxFrames128To255 ",
1235 "RxFrames256To511 ",
1236 "RxFrames512To1023 ",
1237 "RxFrames1024To1518 ",
1238 "RxFrames1519ToMax ",
1239
1240 "PhyFIFOErrors ",
1241 "TSO ",
1242 "VLANextractions ",
1243 "VLANinsertions ",
1244 "TxCsumOffload ",
1245 "RxCsumGood ",
b47385bd
DLR
1246 "LroAggregated ",
1247 "LroFlushed ",
1248 "LroNoDesc ",
fc90664e
DLR
1249 "RxDrops ",
1250
1251 "CheckTXEnToggled ",
1252 "CheckResets ",
1253
4d22de3e
DLR
1254};
1255
b9f2c044 1256static int get_sset_count(struct net_device *dev, int sset)
4d22de3e 1257{
b9f2c044
JG
1258 switch (sset) {
1259 case ETH_SS_STATS:
1260 return ARRAY_SIZE(stats_strings);
1261 default:
1262 return -EOPNOTSUPP;
1263 }
4d22de3e
DLR
1264}
1265
1266#define T3_REGMAP_SIZE (3 * 1024)
1267
1268static int get_regs_len(struct net_device *dev)
1269{
1270 return T3_REGMAP_SIZE;
1271}
1272
1273static int get_eeprom_len(struct net_device *dev)
1274{
1275 return EEPROMSIZE;
1276}
1277
1278static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1279{
5fbf816f
DLR
1280 struct port_info *pi = netdev_priv(dev);
1281 struct adapter *adapter = pi->adapter;
4d22de3e 1282 u32 fw_vers = 0;
47330077 1283 u32 tp_vers = 0;
4d22de3e
DLR
1284
1285 t3_get_fw_version(adapter, &fw_vers);
47330077 1286 t3_get_tp_version(adapter, &tp_vers);
4d22de3e
DLR
1287
1288 strcpy(info->driver, DRV_NAME);
1289 strcpy(info->version, DRV_VERSION);
1290 strcpy(info->bus_info, pci_name(adapter->pdev));
1291 if (!fw_vers)
1292 strcpy(info->fw_version, "N/A");
4aac3899 1293 else {
4d22de3e 1294 snprintf(info->fw_version, sizeof(info->fw_version),
47330077 1295 "%s %u.%u.%u TP %u.%u.%u",
4aac3899
DLR
1296 G_FW_VERSION_TYPE(fw_vers) ? "T" : "N",
1297 G_FW_VERSION_MAJOR(fw_vers),
1298 G_FW_VERSION_MINOR(fw_vers),
47330077
DLR
1299 G_FW_VERSION_MICRO(fw_vers),
1300 G_TP_VERSION_MAJOR(tp_vers),
1301 G_TP_VERSION_MINOR(tp_vers),
1302 G_TP_VERSION_MICRO(tp_vers));
4aac3899 1303 }
4d22de3e
DLR
1304}
1305
1306static void get_strings(struct net_device *dev, u32 stringset, u8 * data)
1307{
1308 if (stringset == ETH_SS_STATS)
1309 memcpy(data, stats_strings, sizeof(stats_strings));
1310}
1311
1312static unsigned long collect_sge_port_stats(struct adapter *adapter,
1313 struct port_info *p, int idx)
1314{
1315 int i;
1316 unsigned long tot = 0;
1317
8c263761
DLR
1318 for (i = p->first_qset; i < p->first_qset + p->nqsets; ++i)
1319 tot += adapter->sge.qs[i].port_stats[idx];
4d22de3e
DLR
1320 return tot;
1321}
1322
1323static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
1324 u64 *data)
1325{
4d22de3e 1326 struct port_info *pi = netdev_priv(dev);
5fbf816f 1327 struct adapter *adapter = pi->adapter;
4d22de3e
DLR
1328 const struct mac_stats *s;
1329
1330 spin_lock(&adapter->stats_lock);
1331 s = t3_mac_update_stats(&pi->mac);
1332 spin_unlock(&adapter->stats_lock);
1333
1334 *data++ = s->tx_octets;
1335 *data++ = s->tx_frames;
1336 *data++ = s->tx_mcast_frames;
1337 *data++ = s->tx_bcast_frames;
1338 *data++ = s->tx_pause;
1339 *data++ = s->tx_underrun;
1340 *data++ = s->tx_fifo_urun;
1341
1342 *data++ = s->tx_frames_64;
1343 *data++ = s->tx_frames_65_127;
1344 *data++ = s->tx_frames_128_255;
1345 *data++ = s->tx_frames_256_511;
1346 *data++ = s->tx_frames_512_1023;
1347 *data++ = s->tx_frames_1024_1518;
1348 *data++ = s->tx_frames_1519_max;
1349
1350 *data++ = s->rx_octets;
1351 *data++ = s->rx_frames;
1352 *data++ = s->rx_mcast_frames;
1353 *data++ = s->rx_bcast_frames;
1354 *data++ = s->rx_pause;
1355 *data++ = s->rx_fcs_errs;
1356 *data++ = s->rx_symbol_errs;
1357 *data++ = s->rx_short;
1358 *data++ = s->rx_jabber;
1359 *data++ = s->rx_too_long;
1360 *data++ = s->rx_fifo_ovfl;
1361
1362 *data++ = s->rx_frames_64;
1363 *data++ = s->rx_frames_65_127;
1364 *data++ = s->rx_frames_128_255;
1365 *data++ = s->rx_frames_256_511;
1366 *data++ = s->rx_frames_512_1023;
1367 *data++ = s->rx_frames_1024_1518;
1368 *data++ = s->rx_frames_1519_max;
1369
1370 *data++ = pi->phy.fifo_errors;
1371
1372 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_TSO);
1373 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_VLANEX);
1374 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_VLANINS);
1375 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_TX_CSUM);
1376 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_RX_CSUM_GOOD);
b47385bd
DLR
1377 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_LRO_AGGR);
1378 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_LRO_FLUSHED);
1379 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_LRO_NO_DESC);
4d22de3e 1380 *data++ = s->rx_cong_drops;
fc90664e
DLR
1381
1382 *data++ = s->num_toggled;
1383 *data++ = s->num_resets;
4d22de3e
DLR
1384}
1385
1386static inline void reg_block_dump(struct adapter *ap, void *buf,
1387 unsigned int start, unsigned int end)
1388{
1389 u32 *p = buf + start;
1390
1391 for (; start <= end; start += sizeof(u32))
1392 *p++ = t3_read_reg(ap, start);
1393}
1394
1395static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
1396 void *buf)
1397{
5fbf816f
DLR
1398 struct port_info *pi = netdev_priv(dev);
1399 struct adapter *ap = pi->adapter;
4d22de3e
DLR
1400
1401 /*
1402 * Version scheme:
1403 * bits 0..9: chip version
1404 * bits 10..15: chip revision
1405 * bit 31: set for PCIe cards
1406 */
1407 regs->version = 3 | (ap->params.rev << 10) | (is_pcie(ap) << 31);
1408
1409 /*
1410 * We skip the MAC statistics registers because they are clear-on-read.
1411 * Also reading multi-register stats would need to synchronize with the
1412 * periodic mac stats accumulation. Hard to justify the complexity.
1413 */
1414 memset(buf, 0, T3_REGMAP_SIZE);
1415 reg_block_dump(ap, buf, 0, A_SG_RSPQ_CREDIT_RETURN);
1416 reg_block_dump(ap, buf, A_SG_HI_DRB_HI_THRSH, A_ULPRX_PBL_ULIMIT);
1417 reg_block_dump(ap, buf, A_ULPTX_CONFIG, A_MPS_INT_CAUSE);
1418 reg_block_dump(ap, buf, A_CPL_SWITCH_CNTRL, A_CPL_MAP_TBL_DATA);
1419 reg_block_dump(ap, buf, A_SMB_GLOBAL_TIME_CFG, A_XGM_SERDES_STAT3);
1420 reg_block_dump(ap, buf, A_XGM_SERDES_STATUS0,
1421 XGM_REG(A_XGM_SERDES_STAT3, 1));
1422 reg_block_dump(ap, buf, XGM_REG(A_XGM_SERDES_STATUS0, 1),
1423 XGM_REG(A_XGM_RX_SPI4_SOP_EOP_CNT, 1));
1424}
1425
1426static int restart_autoneg(struct net_device *dev)
1427{
1428 struct port_info *p = netdev_priv(dev);
1429
1430 if (!netif_running(dev))
1431 return -EAGAIN;
1432 if (p->link_config.autoneg != AUTONEG_ENABLE)
1433 return -EINVAL;
1434 p->phy.ops->autoneg_restart(&p->phy);
1435 return 0;
1436}
1437
1438static int cxgb3_phys_id(struct net_device *dev, u32 data)
1439{
5fbf816f
DLR
1440 struct port_info *pi = netdev_priv(dev);
1441 struct adapter *adapter = pi->adapter;
4d22de3e 1442 int i;
4d22de3e
DLR
1443
1444 if (data == 0)
1445 data = 2;
1446
1447 for (i = 0; i < data * 2; i++) {
1448 t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
1449 (i & 1) ? F_GPIO0_OUT_VAL : 0);
1450 if (msleep_interruptible(500))
1451 break;
1452 }
1453 t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
1454 F_GPIO0_OUT_VAL);
1455 return 0;
1456}
1457
1458static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1459{
1460 struct port_info *p = netdev_priv(dev);
1461
1462 cmd->supported = p->link_config.supported;
1463 cmd->advertising = p->link_config.advertising;
1464
1465 if (netif_carrier_ok(dev)) {
1466 cmd->speed = p->link_config.speed;
1467 cmd->duplex = p->link_config.duplex;
1468 } else {
1469 cmd->speed = -1;
1470 cmd->duplex = -1;
1471 }
1472
1473 cmd->port = (cmd->supported & SUPPORTED_TP) ? PORT_TP : PORT_FIBRE;
1474 cmd->phy_address = p->phy.addr;
1475 cmd->transceiver = XCVR_EXTERNAL;
1476 cmd->autoneg = p->link_config.autoneg;
1477 cmd->maxtxpkt = 0;
1478 cmd->maxrxpkt = 0;
1479 return 0;
1480}
1481
1482static int speed_duplex_to_caps(int speed, int duplex)
1483{
1484 int cap = 0;
1485
1486 switch (speed) {
1487 case SPEED_10:
1488 if (duplex == DUPLEX_FULL)
1489 cap = SUPPORTED_10baseT_Full;
1490 else
1491 cap = SUPPORTED_10baseT_Half;
1492 break;
1493 case SPEED_100:
1494 if (duplex == DUPLEX_FULL)
1495 cap = SUPPORTED_100baseT_Full;
1496 else
1497 cap = SUPPORTED_100baseT_Half;
1498 break;
1499 case SPEED_1000:
1500 if (duplex == DUPLEX_FULL)
1501 cap = SUPPORTED_1000baseT_Full;
1502 else
1503 cap = SUPPORTED_1000baseT_Half;
1504 break;
1505 case SPEED_10000:
1506 if (duplex == DUPLEX_FULL)
1507 cap = SUPPORTED_10000baseT_Full;
1508 }
1509 return cap;
1510}
1511
1512#define ADVERTISED_MASK (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
1513 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
1514 ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full | \
1515 ADVERTISED_10000baseT_Full)
1516
1517static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1518{
9b1e3656 1519 int cap;
4d22de3e
DLR
1520 struct port_info *p = netdev_priv(dev);
1521 struct link_config *lc = &p->link_config;
1522
9b1e3656
DLR
1523 if (!(lc->supported & SUPPORTED_Autoneg)) {
1524 /*
1525 * PHY offers a single speed/duplex. See if that's what's
1526 * being requested.
1527 */
1528 if (cmd->autoneg == AUTONEG_DISABLE) {
1529 cap = speed_duplex_to_caps(cmd->speed, cmd->duplex);
1530 if (lc->supported & cap)
1531 return 0;
1532 }
1533 return -EINVAL;
1534 }
4d22de3e
DLR
1535
1536 if (cmd->autoneg == AUTONEG_DISABLE) {
1537 int cap = speed_duplex_to_caps(cmd->speed, cmd->duplex);
1538
1539 if (!(lc->supported & cap) || cmd->speed == SPEED_1000)
1540 return -EINVAL;
1541 lc->requested_speed = cmd->speed;
1542 lc->requested_duplex = cmd->duplex;
1543 lc->advertising = 0;
1544 } else {
1545 cmd->advertising &= ADVERTISED_MASK;
1546 cmd->advertising &= lc->supported;
1547 if (!cmd->advertising)
1548 return -EINVAL;
1549 lc->requested_speed = SPEED_INVALID;
1550 lc->requested_duplex = DUPLEX_INVALID;
1551 lc->advertising = cmd->advertising | ADVERTISED_Autoneg;
1552 }
1553 lc->autoneg = cmd->autoneg;
1554 if (netif_running(dev))
1555 t3_link_start(&p->phy, &p->mac, lc);
1556 return 0;
1557}
1558
1559static void get_pauseparam(struct net_device *dev,
1560 struct ethtool_pauseparam *epause)
1561{
1562 struct port_info *p = netdev_priv(dev);
1563
1564 epause->autoneg = (p->link_config.requested_fc & PAUSE_AUTONEG) != 0;
1565 epause->rx_pause = (p->link_config.fc & PAUSE_RX) != 0;
1566 epause->tx_pause = (p->link_config.fc & PAUSE_TX) != 0;
1567}
1568
1569static int set_pauseparam(struct net_device *dev,
1570 struct ethtool_pauseparam *epause)
1571{
1572 struct port_info *p = netdev_priv(dev);
1573 struct link_config *lc = &p->link_config;
1574
1575 if (epause->autoneg == AUTONEG_DISABLE)
1576 lc->requested_fc = 0;
1577 else if (lc->supported & SUPPORTED_Autoneg)
1578 lc->requested_fc = PAUSE_AUTONEG;
1579 else
1580 return -EINVAL;
1581
1582 if (epause->rx_pause)
1583 lc->requested_fc |= PAUSE_RX;
1584 if (epause->tx_pause)
1585 lc->requested_fc |= PAUSE_TX;
1586 if (lc->autoneg == AUTONEG_ENABLE) {
1587 if (netif_running(dev))
1588 t3_link_start(&p->phy, &p->mac, lc);
1589 } else {
1590 lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1591 if (netif_running(dev))
1592 t3_mac_set_speed_duplex_fc(&p->mac, -1, -1, lc->fc);
1593 }
1594 return 0;
1595}
1596
1597static u32 get_rx_csum(struct net_device *dev)
1598{
1599 struct port_info *p = netdev_priv(dev);
1600
1601 return p->rx_csum_offload;
1602}
1603
1604static int set_rx_csum(struct net_device *dev, u32 data)
1605{
1606 struct port_info *p = netdev_priv(dev);
1607
1608 p->rx_csum_offload = data;
b47385bd
DLR
1609 if (!data) {
1610 struct adapter *adap = p->adapter;
1611 int i;
1612
8c263761
DLR
1613 for (i = p->first_qset; i < p->first_qset + p->nqsets; i++) {
1614 adap->params.sge.qset[i].lro = 0;
b47385bd 1615 adap->sge.qs[i].lro_enabled = 0;
8c263761 1616 }
b47385bd 1617 }
4d22de3e
DLR
1618 return 0;
1619}
1620
1621static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
1622{
5fbf816f
DLR
1623 struct port_info *pi = netdev_priv(dev);
1624 struct adapter *adapter = pi->adapter;
05b97b30 1625 const struct qset_params *q = &adapter->params.sge.qset[pi->first_qset];
4d22de3e
DLR
1626
1627 e->rx_max_pending = MAX_RX_BUFFERS;
1628 e->rx_mini_max_pending = 0;
1629 e->rx_jumbo_max_pending = MAX_RX_JUMBO_BUFFERS;
1630 e->tx_max_pending = MAX_TXQ_ENTRIES;
1631
05b97b30
DLR
1632 e->rx_pending = q->fl_size;
1633 e->rx_mini_pending = q->rspq_size;
1634 e->rx_jumbo_pending = q->jumbo_size;
1635 e->tx_pending = q->txq_size[0];
4d22de3e
DLR
1636}
1637
1638static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
1639{
5fbf816f
DLR
1640 struct port_info *pi = netdev_priv(dev);
1641 struct adapter *adapter = pi->adapter;
05b97b30 1642 struct qset_params *q;
5fbf816f 1643 int i;
4d22de3e
DLR
1644
1645 if (e->rx_pending > MAX_RX_BUFFERS ||
1646 e->rx_jumbo_pending > MAX_RX_JUMBO_BUFFERS ||
1647 e->tx_pending > MAX_TXQ_ENTRIES ||
1648 e->rx_mini_pending > MAX_RSPQ_ENTRIES ||
1649 e->rx_mini_pending < MIN_RSPQ_ENTRIES ||
1650 e->rx_pending < MIN_FL_ENTRIES ||
1651 e->rx_jumbo_pending < MIN_FL_ENTRIES ||
1652 e->tx_pending < adapter->params.nports * MIN_TXQ_ENTRIES)
1653 return -EINVAL;
1654
1655 if (adapter->flags & FULL_INIT_DONE)
1656 return -EBUSY;
1657
05b97b30
DLR
1658 q = &adapter->params.sge.qset[pi->first_qset];
1659 for (i = 0; i < pi->nqsets; ++i, ++q) {
4d22de3e
DLR
1660 q->rspq_size = e->rx_mini_pending;
1661 q->fl_size = e->rx_pending;
1662 q->jumbo_size = e->rx_jumbo_pending;
1663 q->txq_size[0] = e->tx_pending;
1664 q->txq_size[1] = e->tx_pending;
1665 q->txq_size[2] = e->tx_pending;
1666 }
1667 return 0;
1668}
1669
1670static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
1671{
5fbf816f
DLR
1672 struct port_info *pi = netdev_priv(dev);
1673 struct adapter *adapter = pi->adapter;
4d22de3e
DLR
1674 struct qset_params *qsp = &adapter->params.sge.qset[0];
1675 struct sge_qset *qs = &adapter->sge.qs[0];
1676
1677 if (c->rx_coalesce_usecs * 10 > M_NEWTIMER)
1678 return -EINVAL;
1679
1680 qsp->coalesce_usecs = c->rx_coalesce_usecs;
1681 t3_update_qset_coalesce(qs, qsp);
1682 return 0;
1683}
1684
1685static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
1686{
5fbf816f
DLR
1687 struct port_info *pi = netdev_priv(dev);
1688 struct adapter *adapter = pi->adapter;
4d22de3e
DLR
1689 struct qset_params *q = adapter->params.sge.qset;
1690
1691 c->rx_coalesce_usecs = q->coalesce_usecs;
1692 return 0;
1693}
1694
1695static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e,
1696 u8 * data)
1697{
5fbf816f
DLR
1698 struct port_info *pi = netdev_priv(dev);
1699 struct adapter *adapter = pi->adapter;
4d22de3e 1700 int i, err = 0;
4d22de3e
DLR
1701
1702 u8 *buf = kmalloc(EEPROMSIZE, GFP_KERNEL);
1703 if (!buf)
1704 return -ENOMEM;
1705
1706 e->magic = EEPROM_MAGIC;
1707 for (i = e->offset & ~3; !err && i < e->offset + e->len; i += 4)
05e5c116 1708 err = t3_seeprom_read(adapter, i, (__le32 *) & buf[i]);
4d22de3e
DLR
1709
1710 if (!err)
1711 memcpy(data, buf + e->offset, e->len);
1712 kfree(buf);
1713 return err;
1714}
1715
1716static int set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
1717 u8 * data)
1718{
5fbf816f
DLR
1719 struct port_info *pi = netdev_priv(dev);
1720 struct adapter *adapter = pi->adapter;
05e5c116
AV
1721 u32 aligned_offset, aligned_len;
1722 __le32 *p;
4d22de3e 1723 u8 *buf;
c54f5c24 1724 int err;
4d22de3e
DLR
1725
1726 if (eeprom->magic != EEPROM_MAGIC)
1727 return -EINVAL;
1728
1729 aligned_offset = eeprom->offset & ~3;
1730 aligned_len = (eeprom->len + (eeprom->offset & 3) + 3) & ~3;
1731
1732 if (aligned_offset != eeprom->offset || aligned_len != eeprom->len) {
1733 buf = kmalloc(aligned_len, GFP_KERNEL);
1734 if (!buf)
1735 return -ENOMEM;
05e5c116 1736 err = t3_seeprom_read(adapter, aligned_offset, (__le32 *) buf);
4d22de3e
DLR
1737 if (!err && aligned_len > 4)
1738 err = t3_seeprom_read(adapter,
1739 aligned_offset + aligned_len - 4,
05e5c116 1740 (__le32 *) & buf[aligned_len - 4]);
4d22de3e
DLR
1741 if (err)
1742 goto out;
1743 memcpy(buf + (eeprom->offset & 3), data, eeprom->len);
1744 } else
1745 buf = data;
1746
1747 err = t3_seeprom_wp(adapter, 0);
1748 if (err)
1749 goto out;
1750
05e5c116 1751 for (p = (__le32 *) buf; !err && aligned_len; aligned_len -= 4, p++) {
4d22de3e
DLR
1752 err = t3_seeprom_write(adapter, aligned_offset, *p);
1753 aligned_offset += 4;
1754 }
1755
1756 if (!err)
1757 err = t3_seeprom_wp(adapter, 1);
1758out:
1759 if (buf != data)
1760 kfree(buf);
1761 return err;
1762}
1763
1764static void get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1765{
1766 wol->supported = 0;
1767 wol->wolopts = 0;
1768 memset(&wol->sopass, 0, sizeof(wol->sopass));
1769}
1770
1771static const struct ethtool_ops cxgb_ethtool_ops = {
1772 .get_settings = get_settings,
1773 .set_settings = set_settings,
1774 .get_drvinfo = get_drvinfo,
1775 .get_msglevel = get_msglevel,
1776 .set_msglevel = set_msglevel,
1777 .get_ringparam = get_sge_param,
1778 .set_ringparam = set_sge_param,
1779 .get_coalesce = get_coalesce,
1780 .set_coalesce = set_coalesce,
1781 .get_eeprom_len = get_eeprom_len,
1782 .get_eeprom = get_eeprom,
1783 .set_eeprom = set_eeprom,
1784 .get_pauseparam = get_pauseparam,
1785 .set_pauseparam = set_pauseparam,
1786 .get_rx_csum = get_rx_csum,
1787 .set_rx_csum = set_rx_csum,
4d22de3e 1788 .set_tx_csum = ethtool_op_set_tx_csum,
4d22de3e
DLR
1789 .set_sg = ethtool_op_set_sg,
1790 .get_link = ethtool_op_get_link,
1791 .get_strings = get_strings,
1792 .phys_id = cxgb3_phys_id,
1793 .nway_reset = restart_autoneg,
b9f2c044 1794 .get_sset_count = get_sset_count,
4d22de3e
DLR
1795 .get_ethtool_stats = get_stats,
1796 .get_regs_len = get_regs_len,
1797 .get_regs = get_regs,
1798 .get_wol = get_wol,
4d22de3e 1799 .set_tso = ethtool_op_set_tso,
4d22de3e
DLR
1800};
1801
1802static int in_range(int val, int lo, int hi)
1803{
1804 return val < 0 || (val <= hi && val >= lo);
1805}
1806
1807static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
1808{
5fbf816f
DLR
1809 struct port_info *pi = netdev_priv(dev);
1810 struct adapter *adapter = pi->adapter;
4d22de3e 1811 u32 cmd;
5fbf816f 1812 int ret;
4d22de3e
DLR
1813
1814 if (copy_from_user(&cmd, useraddr, sizeof(cmd)))
1815 return -EFAULT;
1816
1817 switch (cmd) {
4d22de3e
DLR
1818 case CHELSIO_SET_QSET_PARAMS:{
1819 int i;
1820 struct qset_params *q;
1821 struct ch_qset_params t;
8c263761
DLR
1822 int q1 = pi->first_qset;
1823 int nqsets = pi->nqsets;
4d22de3e
DLR
1824
1825 if (!capable(CAP_NET_ADMIN))
1826 return -EPERM;
1827 if (copy_from_user(&t, useraddr, sizeof(t)))
1828 return -EFAULT;
1829 if (t.qset_idx >= SGE_QSETS)
1830 return -EINVAL;
1831 if (!in_range(t.intr_lat, 0, M_NEWTIMER) ||
1832 !in_range(t.cong_thres, 0, 255) ||
1833 !in_range(t.txq_size[0], MIN_TXQ_ENTRIES,
1834 MAX_TXQ_ENTRIES) ||
1835 !in_range(t.txq_size[1], MIN_TXQ_ENTRIES,
1836 MAX_TXQ_ENTRIES) ||
1837 !in_range(t.txq_size[2], MIN_CTRL_TXQ_ENTRIES,
1838 MAX_CTRL_TXQ_ENTRIES) ||
1839 !in_range(t.fl_size[0], MIN_FL_ENTRIES,
1840 MAX_RX_BUFFERS)
1841 || !in_range(t.fl_size[1], MIN_FL_ENTRIES,
1842 MAX_RX_JUMBO_BUFFERS)
1843 || !in_range(t.rspq_size, MIN_RSPQ_ENTRIES,
1844 MAX_RSPQ_ENTRIES))
1845 return -EINVAL;
8c263761
DLR
1846
1847 if ((adapter->flags & FULL_INIT_DONE) && t.lro > 0)
1848 for_each_port(adapter, i) {
1849 pi = adap2pinfo(adapter, i);
1850 if (t.qset_idx >= pi->first_qset &&
1851 t.qset_idx < pi->first_qset + pi->nqsets &&
1852 !pi->rx_csum_offload)
1853 return -EINVAL;
1854 }
1855
4d22de3e
DLR
1856 if ((adapter->flags & FULL_INIT_DONE) &&
1857 (t.rspq_size >= 0 || t.fl_size[0] >= 0 ||
1858 t.fl_size[1] >= 0 || t.txq_size[0] >= 0 ||
1859 t.txq_size[1] >= 0 || t.txq_size[2] >= 0 ||
1860 t.polling >= 0 || t.cong_thres >= 0))
1861 return -EBUSY;
1862
8c263761
DLR
1863 /* Allow setting of any available qset when offload enabled */
1864 if (test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map)) {
1865 q1 = 0;
1866 for_each_port(adapter, i) {
1867 pi = adap2pinfo(adapter, i);
1868 nqsets += pi->first_qset + pi->nqsets;
1869 }
1870 }
1871
1872 if (t.qset_idx < q1)
1873 return -EINVAL;
1874 if (t.qset_idx > q1 + nqsets - 1)
1875 return -EINVAL;
1876
4d22de3e
DLR
1877 q = &adapter->params.sge.qset[t.qset_idx];
1878
1879 if (t.rspq_size >= 0)
1880 q->rspq_size = t.rspq_size;
1881 if (t.fl_size[0] >= 0)
1882 q->fl_size = t.fl_size[0];
1883 if (t.fl_size[1] >= 0)
1884 q->jumbo_size = t.fl_size[1];
1885 if (t.txq_size[0] >= 0)
1886 q->txq_size[0] = t.txq_size[0];
1887 if (t.txq_size[1] >= 0)
1888 q->txq_size[1] = t.txq_size[1];
1889 if (t.txq_size[2] >= 0)
1890 q->txq_size[2] = t.txq_size[2];
1891 if (t.cong_thres >= 0)
1892 q->cong_thres = t.cong_thres;
1893 if (t.intr_lat >= 0) {
1894 struct sge_qset *qs =
1895 &adapter->sge.qs[t.qset_idx];
1896
1897 q->coalesce_usecs = t.intr_lat;
1898 t3_update_qset_coalesce(qs, q);
1899 }
1900 if (t.polling >= 0) {
1901 if (adapter->flags & USING_MSIX)
1902 q->polling = t.polling;
1903 else {
1904 /* No polling with INTx for T3A */
1905 if (adapter->params.rev == 0 &&
1906 !(adapter->flags & USING_MSI))
1907 t.polling = 0;
1908
1909 for (i = 0; i < SGE_QSETS; i++) {
1910 q = &adapter->params.sge.
1911 qset[i];
1912 q->polling = t.polling;
1913 }
1914 }
1915 }
b47385bd
DLR
1916 if (t.lro >= 0) {
1917 struct sge_qset *qs = &adapter->sge.qs[t.qset_idx];
1918 q->lro = t.lro;
1919 qs->lro_enabled = t.lro;
1920 }
4d22de3e
DLR
1921 break;
1922 }
1923 case CHELSIO_GET_QSET_PARAMS:{
1924 struct qset_params *q;
1925 struct ch_qset_params t;
8c263761
DLR
1926 int q1 = pi->first_qset;
1927 int nqsets = pi->nqsets;
1928 int i;
4d22de3e
DLR
1929
1930 if (copy_from_user(&t, useraddr, sizeof(t)))
1931 return -EFAULT;
8c263761
DLR
1932
1933 /* Display qsets for all ports when offload enabled */
1934 if (test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map)) {
1935 q1 = 0;
1936 for_each_port(adapter, i) {
1937 pi = adap2pinfo(adapter, i);
1938 nqsets = pi->first_qset + pi->nqsets;
1939 }
1940 }
1941
1942 if (t.qset_idx >= nqsets)
4d22de3e
DLR
1943 return -EINVAL;
1944
8c263761 1945 q = &adapter->params.sge.qset[q1 + t.qset_idx];
4d22de3e
DLR
1946 t.rspq_size = q->rspq_size;
1947 t.txq_size[0] = q->txq_size[0];
1948 t.txq_size[1] = q->txq_size[1];
1949 t.txq_size[2] = q->txq_size[2];
1950 t.fl_size[0] = q->fl_size;
1951 t.fl_size[1] = q->jumbo_size;
1952 t.polling = q->polling;
b47385bd 1953 t.lro = q->lro;
4d22de3e
DLR
1954 t.intr_lat = q->coalesce_usecs;
1955 t.cong_thres = q->cong_thres;
8c263761
DLR
1956 t.qnum = q1;
1957
1958 if (adapter->flags & USING_MSIX)
1959 t.vector = adapter->msix_info[q1 + t.qset_idx + 1].vec;
1960 else
1961 t.vector = adapter->pdev->irq;
4d22de3e
DLR
1962
1963 if (copy_to_user(useraddr, &t, sizeof(t)))
1964 return -EFAULT;
1965 break;
1966 }
1967 case CHELSIO_SET_QSET_NUM:{
1968 struct ch_reg edata;
4d22de3e
DLR
1969 unsigned int i, first_qset = 0, other_qsets = 0;
1970
1971 if (!capable(CAP_NET_ADMIN))
1972 return -EPERM;
1973 if (adapter->flags & FULL_INIT_DONE)
1974 return -EBUSY;
1975 if (copy_from_user(&edata, useraddr, sizeof(edata)))
1976 return -EFAULT;
1977 if (edata.val < 1 ||
1978 (edata.val > 1 && !(adapter->flags & USING_MSIX)))
1979 return -EINVAL;
1980
1981 for_each_port(adapter, i)
1982 if (adapter->port[i] && adapter->port[i] != dev)
1983 other_qsets += adap2pinfo(adapter, i)->nqsets;
1984
1985 if (edata.val + other_qsets > SGE_QSETS)
1986 return -EINVAL;
1987
1988 pi->nqsets = edata.val;
1989
1990 for_each_port(adapter, i)
1991 if (adapter->port[i]) {
1992 pi = adap2pinfo(adapter, i);
1993 pi->first_qset = first_qset;
1994 first_qset += pi->nqsets;
1995 }
1996 break;
1997 }
1998 case CHELSIO_GET_QSET_NUM:{
1999 struct ch_reg edata;
4d22de3e
DLR
2000
2001 edata.cmd = CHELSIO_GET_QSET_NUM;
2002 edata.val = pi->nqsets;
2003 if (copy_to_user(useraddr, &edata, sizeof(edata)))
2004 return -EFAULT;
2005 break;
2006 }
2007 case CHELSIO_LOAD_FW:{
2008 u8 *fw_data;
2009 struct ch_mem_range t;
2010
1b3aa7af 2011 if (!capable(CAP_SYS_RAWIO))
4d22de3e
DLR
2012 return -EPERM;
2013 if (copy_from_user(&t, useraddr, sizeof(t)))
2014 return -EFAULT;
1b3aa7af 2015 /* Check t.len sanity ? */
4d22de3e
DLR
2016 fw_data = kmalloc(t.len, GFP_KERNEL);
2017 if (!fw_data)
2018 return -ENOMEM;
2019
2020 if (copy_from_user
2021 (fw_data, useraddr + sizeof(t), t.len)) {
2022 kfree(fw_data);
2023 return -EFAULT;
2024 }
2025
2026 ret = t3_load_fw(adapter, fw_data, t.len);
2027 kfree(fw_data);
2028 if (ret)
2029 return ret;
2030 break;
2031 }
2032 case CHELSIO_SETMTUTAB:{
2033 struct ch_mtus m;
2034 int i;
2035
2036 if (!is_offload(adapter))
2037 return -EOPNOTSUPP;
2038 if (!capable(CAP_NET_ADMIN))
2039 return -EPERM;
2040 if (offload_running(adapter))
2041 return -EBUSY;
2042 if (copy_from_user(&m, useraddr, sizeof(m)))
2043 return -EFAULT;
2044 if (m.nmtus != NMTUS)
2045 return -EINVAL;
2046 if (m.mtus[0] < 81) /* accommodate SACK */
2047 return -EINVAL;
2048
2049 /* MTUs must be in ascending order */
2050 for (i = 1; i < NMTUS; ++i)
2051 if (m.mtus[i] < m.mtus[i - 1])
2052 return -EINVAL;
2053
2054 memcpy(adapter->params.mtus, m.mtus,
2055 sizeof(adapter->params.mtus));
2056 break;
2057 }
2058 case CHELSIO_GET_PM:{
2059 struct tp_params *p = &adapter->params.tp;
2060 struct ch_pm m = {.cmd = CHELSIO_GET_PM };
2061
2062 if (!is_offload(adapter))
2063 return -EOPNOTSUPP;
2064 m.tx_pg_sz = p->tx_pg_size;
2065 m.tx_num_pg = p->tx_num_pgs;
2066 m.rx_pg_sz = p->rx_pg_size;
2067 m.rx_num_pg = p->rx_num_pgs;
2068 m.pm_total = p->pmtx_size + p->chan_rx_size * p->nchan;
2069 if (copy_to_user(useraddr, &m, sizeof(m)))
2070 return -EFAULT;
2071 break;
2072 }
2073 case CHELSIO_SET_PM:{
2074 struct ch_pm m;
2075 struct tp_params *p = &adapter->params.tp;
2076
2077 if (!is_offload(adapter))
2078 return -EOPNOTSUPP;
2079 if (!capable(CAP_NET_ADMIN))
2080 return -EPERM;
2081 if (adapter->flags & FULL_INIT_DONE)
2082 return -EBUSY;
2083 if (copy_from_user(&m, useraddr, sizeof(m)))
2084 return -EFAULT;
d9da466a 2085 if (!is_power_of_2(m.rx_pg_sz) ||
2086 !is_power_of_2(m.tx_pg_sz))
4d22de3e
DLR
2087 return -EINVAL; /* not power of 2 */
2088 if (!(m.rx_pg_sz & 0x14000))
2089 return -EINVAL; /* not 16KB or 64KB */
2090 if (!(m.tx_pg_sz & 0x1554000))
2091 return -EINVAL;
2092 if (m.tx_num_pg == -1)
2093 m.tx_num_pg = p->tx_num_pgs;
2094 if (m.rx_num_pg == -1)
2095 m.rx_num_pg = p->rx_num_pgs;
2096 if (m.tx_num_pg % 24 || m.rx_num_pg % 24)
2097 return -EINVAL;
2098 if (m.rx_num_pg * m.rx_pg_sz > p->chan_rx_size ||
2099 m.tx_num_pg * m.tx_pg_sz > p->chan_tx_size)
2100 return -EINVAL;
2101 p->rx_pg_size = m.rx_pg_sz;
2102 p->tx_pg_size = m.tx_pg_sz;
2103 p->rx_num_pgs = m.rx_num_pg;
2104 p->tx_num_pgs = m.tx_num_pg;
2105 break;
2106 }
2107 case CHELSIO_GET_MEM:{
2108 struct ch_mem_range t;
2109 struct mc7 *mem;
2110 u64 buf[32];
2111
2112 if (!is_offload(adapter))
2113 return -EOPNOTSUPP;
2114 if (!(adapter->flags & FULL_INIT_DONE))
2115 return -EIO; /* need the memory controllers */
2116 if (copy_from_user(&t, useraddr, sizeof(t)))
2117 return -EFAULT;
2118 if ((t.addr & 7) || (t.len & 7))
2119 return -EINVAL;
2120 if (t.mem_id == MEM_CM)
2121 mem = &adapter->cm;
2122 else if (t.mem_id == MEM_PMRX)
2123 mem = &adapter->pmrx;
2124 else if (t.mem_id == MEM_PMTX)
2125 mem = &adapter->pmtx;
2126 else
2127 return -EINVAL;
2128
2129 /*
1825494a
DLR
2130 * Version scheme:
2131 * bits 0..9: chip version
2132 * bits 10..15: chip revision
2133 */
4d22de3e
DLR
2134 t.version = 3 | (adapter->params.rev << 10);
2135 if (copy_to_user(useraddr, &t, sizeof(t)))
2136 return -EFAULT;
2137
2138 /*
2139 * Read 256 bytes at a time as len can be large and we don't
2140 * want to use huge intermediate buffers.
2141 */
2142 useraddr += sizeof(t); /* advance to start of buffer */
2143 while (t.len) {
2144 unsigned int chunk =
2145 min_t(unsigned int, t.len, sizeof(buf));
2146
2147 ret =
2148 t3_mc7_bd_read(mem, t.addr / 8, chunk / 8,
2149 buf);
2150 if (ret)
2151 return ret;
2152 if (copy_to_user(useraddr, buf, chunk))
2153 return -EFAULT;
2154 useraddr += chunk;
2155 t.addr += chunk;
2156 t.len -= chunk;
2157 }
2158 break;
2159 }
2160 case CHELSIO_SET_TRACE_FILTER:{
2161 struct ch_trace t;
2162 const struct trace_params *tp;
2163
2164 if (!capable(CAP_NET_ADMIN))
2165 return -EPERM;
2166 if (!offload_running(adapter))
2167 return -EAGAIN;
2168 if (copy_from_user(&t, useraddr, sizeof(t)))
2169 return -EFAULT;
2170
2171 tp = (const struct trace_params *)&t.sip;
2172 if (t.config_tx)
2173 t3_config_trace_filter(adapter, tp, 0,
2174 t.invert_match,
2175 t.trace_tx);
2176 if (t.config_rx)
2177 t3_config_trace_filter(adapter, tp, 1,
2178 t.invert_match,
2179 t.trace_rx);
2180 break;
2181 }
4d22de3e
DLR
2182 default:
2183 return -EOPNOTSUPP;
2184 }
2185 return 0;
2186}
2187
2188static int cxgb_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
2189{
4d22de3e 2190 struct mii_ioctl_data *data = if_mii(req);
5fbf816f
DLR
2191 struct port_info *pi = netdev_priv(dev);
2192 struct adapter *adapter = pi->adapter;
2193 int ret, mmd;
4d22de3e
DLR
2194
2195 switch (cmd) {
2196 case SIOCGMIIPHY:
2197 data->phy_id = pi->phy.addr;
2198 /* FALLTHRU */
2199 case SIOCGMIIREG:{
2200 u32 val;
2201 struct cphy *phy = &pi->phy;
2202
2203 if (!phy->mdio_read)
2204 return -EOPNOTSUPP;
2205 if (is_10G(adapter)) {
2206 mmd = data->phy_id >> 8;
2207 if (!mmd)
2208 mmd = MDIO_DEV_PCS;
9b1e3656 2209 else if (mmd > MDIO_DEV_VEND2)
4d22de3e
DLR
2210 return -EINVAL;
2211
2212 ret =
2213 phy->mdio_read(adapter, data->phy_id & 0x1f,
2214 mmd, data->reg_num, &val);
2215 } else
2216 ret =
2217 phy->mdio_read(adapter, data->phy_id & 0x1f,
2218 0, data->reg_num & 0x1f,
2219 &val);
2220 if (!ret)
2221 data->val_out = val;
2222 break;
2223 }
2224 case SIOCSMIIREG:{
2225 struct cphy *phy = &pi->phy;
2226
2227 if (!capable(CAP_NET_ADMIN))
2228 return -EPERM;
2229 if (!phy->mdio_write)
2230 return -EOPNOTSUPP;
2231 if (is_10G(adapter)) {
2232 mmd = data->phy_id >> 8;
2233 if (!mmd)
2234 mmd = MDIO_DEV_PCS;
9b1e3656 2235 else if (mmd > MDIO_DEV_VEND2)
4d22de3e
DLR
2236 return -EINVAL;
2237
2238 ret =
2239 phy->mdio_write(adapter,
2240 data->phy_id & 0x1f, mmd,
2241 data->reg_num,
2242 data->val_in);
2243 } else
2244 ret =
2245 phy->mdio_write(adapter,
2246 data->phy_id & 0x1f, 0,
2247 data->reg_num & 0x1f,
2248 data->val_in);
2249 break;
2250 }
2251 case SIOCCHIOCTL:
2252 return cxgb_extension_ioctl(dev, req->ifr_data);
2253 default:
2254 return -EOPNOTSUPP;
2255 }
2256 return ret;
2257}
2258
2259static int cxgb_change_mtu(struct net_device *dev, int new_mtu)
2260{
4d22de3e 2261 struct port_info *pi = netdev_priv(dev);
5fbf816f
DLR
2262 struct adapter *adapter = pi->adapter;
2263 int ret;
4d22de3e
DLR
2264
2265 if (new_mtu < 81) /* accommodate SACK */
2266 return -EINVAL;
2267 if ((ret = t3_mac_set_mtu(&pi->mac, new_mtu)))
2268 return ret;
2269 dev->mtu = new_mtu;
2270 init_port_mtus(adapter);
2271 if (adapter->params.rev == 0 && offload_running(adapter))
2272 t3_load_mtus(adapter, adapter->params.mtus,
2273 adapter->params.a_wnd, adapter->params.b_wnd,
2274 adapter->port[0]->mtu);
2275 return 0;
2276}
2277
2278static int cxgb_set_mac_addr(struct net_device *dev, void *p)
2279{
4d22de3e 2280 struct port_info *pi = netdev_priv(dev);
5fbf816f 2281 struct adapter *adapter = pi->adapter;
4d22de3e
DLR
2282 struct sockaddr *addr = p;
2283
2284 if (!is_valid_ether_addr(addr->sa_data))
2285 return -EINVAL;
2286
2287 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
2288 t3_mac_set_address(&pi->mac, 0, dev->dev_addr);
2289 if (offload_running(adapter))
2290 write_smt_entry(adapter, pi->port_id);
2291 return 0;
2292}
2293
2294/**
2295 * t3_synchronize_rx - wait for current Rx processing on a port to complete
2296 * @adap: the adapter
2297 * @p: the port
2298 *
2299 * Ensures that current Rx processing on any of the queues associated with
2300 * the given port completes before returning. We do this by acquiring and
2301 * releasing the locks of the response queues associated with the port.
2302 */
2303static void t3_synchronize_rx(struct adapter *adap, const struct port_info *p)
2304{
2305 int i;
2306
8c263761
DLR
2307 for (i = p->first_qset; i < p->first_qset + p->nqsets; i++) {
2308 struct sge_rspq *q = &adap->sge.qs[i].rspq;
4d22de3e
DLR
2309
2310 spin_lock_irq(&q->lock);
2311 spin_unlock_irq(&q->lock);
2312 }
2313}
2314
2315static void vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
2316{
4d22de3e 2317 struct port_info *pi = netdev_priv(dev);
5fbf816f 2318 struct adapter *adapter = pi->adapter;
4d22de3e
DLR
2319
2320 pi->vlan_grp = grp;
2321 if (adapter->params.rev > 0)
2322 t3_set_vlan_accel(adapter, 1 << pi->port_id, grp != NULL);
2323 else {
2324 /* single control for all ports */
2325 unsigned int i, have_vlans = 0;
2326 for_each_port(adapter, i)
2327 have_vlans |= adap2pinfo(adapter, i)->vlan_grp != NULL;
2328
2329 t3_set_vlan_accel(adapter, 1, have_vlans);
2330 }
2331 t3_synchronize_rx(adapter, pi);
2332}
2333
4d22de3e
DLR
2334#ifdef CONFIG_NET_POLL_CONTROLLER
2335static void cxgb_netpoll(struct net_device *dev)
2336{
890de332 2337 struct port_info *pi = netdev_priv(dev);
5fbf816f 2338 struct adapter *adapter = pi->adapter;
890de332 2339 int qidx;
4d22de3e 2340
890de332
DLR
2341 for (qidx = pi->first_qset; qidx < pi->first_qset + pi->nqsets; qidx++) {
2342 struct sge_qset *qs = &adapter->sge.qs[qidx];
2343 void *source;
2eab17ab 2344
890de332
DLR
2345 if (adapter->flags & USING_MSIX)
2346 source = qs;
2347 else
2348 source = adapter;
2349
2350 t3_intr_handler(adapter, qs->rspq.polling) (0, source);
2351 }
4d22de3e
DLR
2352}
2353#endif
2354
2355/*
2356 * Periodic accumulation of MAC statistics.
2357 */
2358static void mac_stats_update(struct adapter *adapter)
2359{
2360 int i;
2361
2362 for_each_port(adapter, i) {
2363 struct net_device *dev = adapter->port[i];
2364 struct port_info *p = netdev_priv(dev);
2365
2366 if (netif_running(dev)) {
2367 spin_lock(&adapter->stats_lock);
2368 t3_mac_update_stats(&p->mac);
2369 spin_unlock(&adapter->stats_lock);
2370 }
2371 }
2372}
2373
2374static void check_link_status(struct adapter *adapter)
2375{
2376 int i;
2377
2378 for_each_port(adapter, i) {
2379 struct net_device *dev = adapter->port[i];
2380 struct port_info *p = netdev_priv(dev);
2381
04497982 2382 if (!(p->phy.caps & SUPPORTED_IRQ) && netif_running(dev))
4d22de3e
DLR
2383 t3_link_changed(adapter, i);
2384 }
2385}
2386
fc90664e
DLR
2387static void check_t3b2_mac(struct adapter *adapter)
2388{
2389 int i;
2390
f2d961c9
DLR
2391 if (!rtnl_trylock()) /* synchronize with ifdown */
2392 return;
2393
fc90664e
DLR
2394 for_each_port(adapter, i) {
2395 struct net_device *dev = adapter->port[i];
2396 struct port_info *p = netdev_priv(dev);
2397 int status;
2398
2399 if (!netif_running(dev))
2400 continue;
2401
2402 status = 0;
6d6dabac 2403 if (netif_running(dev) && netif_carrier_ok(dev))
fc90664e
DLR
2404 status = t3b2_mac_watchdog_task(&p->mac);
2405 if (status == 1)
2406 p->mac.stats.num_toggled++;
2407 else if (status == 2) {
2408 struct cmac *mac = &p->mac;
2409
2410 t3_mac_set_mtu(mac, dev->mtu);
2411 t3_mac_set_address(mac, 0, dev->dev_addr);
2412 cxgb_set_rxmode(dev);
2413 t3_link_start(&p->phy, mac, &p->link_config);
2414 t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
2415 t3_port_intr_enable(adapter, p->port_id);
2416 p->mac.stats.num_resets++;
2417 }
2418 }
2419 rtnl_unlock();
2420}
2421
2422
4d22de3e
DLR
2423static void t3_adap_check_task(struct work_struct *work)
2424{
2425 struct adapter *adapter = container_of(work, struct adapter,
2426 adap_check_task.work);
2427 const struct adapter_params *p = &adapter->params;
2428
2429 adapter->check_task_cnt++;
2430
2431 /* Check link status for PHYs without interrupts */
2432 if (p->linkpoll_period)
2433 check_link_status(adapter);
2434
2435 /* Accumulate MAC stats if needed */
2436 if (!p->linkpoll_period ||
2437 (adapter->check_task_cnt * p->linkpoll_period) / 10 >=
2438 p->stats_update_period) {
2439 mac_stats_update(adapter);
2440 adapter->check_task_cnt = 0;
2441 }
2442
fc90664e
DLR
2443 if (p->rev == T3_REV_B2)
2444 check_t3b2_mac(adapter);
2445
4d22de3e 2446 /* Schedule the next check update if any port is active. */
20d3fc11 2447 spin_lock_irq(&adapter->work_lock);
4d22de3e
DLR
2448 if (adapter->open_device_map & PORT_MASK)
2449 schedule_chk_task(adapter);
20d3fc11 2450 spin_unlock_irq(&adapter->work_lock);
4d22de3e
DLR
2451}
2452
2453/*
2454 * Processes external (PHY) interrupts in process context.
2455 */
2456static void ext_intr_task(struct work_struct *work)
2457{
2458 struct adapter *adapter = container_of(work, struct adapter,
2459 ext_intr_handler_task);
2460
2461 t3_phy_intr_handler(adapter);
2462
2463 /* Now reenable external interrupts */
2464 spin_lock_irq(&adapter->work_lock);
2465 if (adapter->slow_intr_mask) {
2466 adapter->slow_intr_mask |= F_T3DBG;
2467 t3_write_reg(adapter, A_PL_INT_CAUSE0, F_T3DBG);
2468 t3_write_reg(adapter, A_PL_INT_ENABLE0,
2469 adapter->slow_intr_mask);
2470 }
2471 spin_unlock_irq(&adapter->work_lock);
2472}
2473
2474/*
2475 * Interrupt-context handler for external (PHY) interrupts.
2476 */
2477void t3_os_ext_intr_handler(struct adapter *adapter)
2478{
2479 /*
2480 * Schedule a task to handle external interrupts as they may be slow
2481 * and we use a mutex to protect MDIO registers. We disable PHY
2482 * interrupts in the meantime and let the task reenable them when
2483 * it's done.
2484 */
2485 spin_lock(&adapter->work_lock);
2486 if (adapter->slow_intr_mask) {
2487 adapter->slow_intr_mask &= ~F_T3DBG;
2488 t3_write_reg(adapter, A_PL_INT_ENABLE0,
2489 adapter->slow_intr_mask);
2490 queue_work(cxgb3_wq, &adapter->ext_intr_handler_task);
2491 }
2492 spin_unlock(&adapter->work_lock);
2493}
2494
20d3fc11
DLR
2495static int t3_adapter_error(struct adapter *adapter, int reset)
2496{
2497 int i, ret = 0;
2498
2499 /* Stop all ports */
2500 for_each_port(adapter, i) {
2501 struct net_device *netdev = adapter->port[i];
2502
2503 if (netif_running(netdev))
2504 cxgb_close(netdev);
2505 }
2506
2507 if (is_offload(adapter) &&
2508 test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map))
2509 offload_close(&adapter->tdev);
2510
2511 /* Stop SGE timers */
2512 t3_stop_sge_timers(adapter);
2513
2514 adapter->flags &= ~FULL_INIT_DONE;
2515
2516 if (reset)
2517 ret = t3_reset_adapter(adapter);
2518
2519 pci_disable_device(adapter->pdev);
2520
2521 return ret;
2522}
2523
2524static int t3_reenable_adapter(struct adapter *adapter)
2525{
2526 if (pci_enable_device(adapter->pdev)) {
2527 dev_err(&adapter->pdev->dev,
2528 "Cannot re-enable PCI device after reset.\n");
2529 goto err;
2530 }
2531 pci_set_master(adapter->pdev);
2532 pci_restore_state(adapter->pdev);
2533
2534 /* Free sge resources */
2535 t3_free_sge_resources(adapter);
2536
2537 if (t3_replay_prep_adapter(adapter))
2538 goto err;
2539
2540 return 0;
2541err:
2542 return -1;
2543}
2544
2545static void t3_resume_ports(struct adapter *adapter)
2546{
2547 int i;
2548
2549 /* Restart the ports */
2550 for_each_port(adapter, i) {
2551 struct net_device *netdev = adapter->port[i];
2552
2553 if (netif_running(netdev)) {
2554 if (cxgb_open(netdev)) {
2555 dev_err(&adapter->pdev->dev,
2556 "can't bring device back up"
2557 " after reset\n");
2558 continue;
2559 }
2560 }
2561 }
2562}
2563
2564/*
2565 * processes a fatal error.
2566 * Bring the ports down, reset the chip, bring the ports back up.
2567 */
2568static void fatal_error_task(struct work_struct *work)
2569{
2570 struct adapter *adapter = container_of(work, struct adapter,
2571 fatal_error_handler_task);
2572 int err = 0;
2573
2574 rtnl_lock();
2575 err = t3_adapter_error(adapter, 1);
2576 if (!err)
2577 err = t3_reenable_adapter(adapter);
2578 if (!err)
2579 t3_resume_ports(adapter);
2580
2581 CH_ALERT(adapter, "adapter reset %s\n", err ? "failed" : "succeeded");
2582 rtnl_unlock();
2583}
2584
4d22de3e
DLR
2585void t3_fatal_err(struct adapter *adapter)
2586{
2587 unsigned int fw_status[4];
2588
2589 if (adapter->flags & FULL_INIT_DONE) {
2590 t3_sge_stop(adapter);
c64c2eae
DLR
2591 t3_write_reg(adapter, A_XGM_TX_CTRL, 0);
2592 t3_write_reg(adapter, A_XGM_RX_CTRL, 0);
2593 t3_write_reg(adapter, XGM_REG(A_XGM_TX_CTRL, 1), 0);
2594 t3_write_reg(adapter, XGM_REG(A_XGM_RX_CTRL, 1), 0);
20d3fc11
DLR
2595
2596 spin_lock(&adapter->work_lock);
4d22de3e 2597 t3_intr_disable(adapter);
20d3fc11
DLR
2598 queue_work(cxgb3_wq, &adapter->fatal_error_handler_task);
2599 spin_unlock(&adapter->work_lock);
4d22de3e
DLR
2600 }
2601 CH_ALERT(adapter, "encountered fatal error, operation suspended\n");
2602 if (!t3_cim_ctl_blk_read(adapter, 0xa0, 4, fw_status))
2603 CH_ALERT(adapter, "FW status: 0x%x, 0x%x, 0x%x, 0x%x\n",
2604 fw_status[0], fw_status[1],
2605 fw_status[2], fw_status[3]);
2606
2607}
2608
91a6b50c
DLR
2609/**
2610 * t3_io_error_detected - called when PCI error is detected
2611 * @pdev: Pointer to PCI device
2612 * @state: The current pci connection state
2613 *
2614 * This function is called after a PCI bus error affecting
2615 * this device has been detected.
2616 */
2617static pci_ers_result_t t3_io_error_detected(struct pci_dev *pdev,
2618 pci_channel_state_t state)
2619{
bc4b6b52 2620 struct adapter *adapter = pci_get_drvdata(pdev);
20d3fc11 2621 int ret;
91a6b50c 2622
20d3fc11 2623 ret = t3_adapter_error(adapter, 0);
91a6b50c 2624
48c4b6db 2625 /* Request a slot reset. */
91a6b50c
DLR
2626 return PCI_ERS_RESULT_NEED_RESET;
2627}
2628
2629/**
2630 * t3_io_slot_reset - called after the pci bus has been reset.
2631 * @pdev: Pointer to PCI device
2632 *
2633 * Restart the card from scratch, as if from a cold-boot.
2634 */
2635static pci_ers_result_t t3_io_slot_reset(struct pci_dev *pdev)
2636{
bc4b6b52 2637 struct adapter *adapter = pci_get_drvdata(pdev);
91a6b50c 2638
20d3fc11
DLR
2639 if (!t3_reenable_adapter(adapter))
2640 return PCI_ERS_RESULT_RECOVERED;
91a6b50c 2641
48c4b6db 2642 return PCI_ERS_RESULT_DISCONNECT;
91a6b50c
DLR
2643}
2644
2645/**
2646 * t3_io_resume - called when traffic can start flowing again.
2647 * @pdev: Pointer to PCI device
2648 *
2649 * This callback is called when the error recovery driver tells us that
2650 * its OK to resume normal operation.
2651 */
2652static void t3_io_resume(struct pci_dev *pdev)
2653{
bc4b6b52 2654 struct adapter *adapter = pci_get_drvdata(pdev);
91a6b50c 2655
20d3fc11 2656 t3_resume_ports(adapter);
91a6b50c
DLR
2657}
2658
2659static struct pci_error_handlers t3_err_handler = {
2660 .error_detected = t3_io_error_detected,
2661 .slot_reset = t3_io_slot_reset,
2662 .resume = t3_io_resume,
2663};
2664
8c263761
DLR
2665/*
2666 * Set the number of qsets based on the number of CPUs and the number of ports,
2667 * not to exceed the number of available qsets, assuming there are enough qsets
2668 * per port in HW.
2669 */
2670static void set_nqsets(struct adapter *adap)
2671{
2672 int i, j = 0;
2673 int num_cpus = num_online_cpus();
2674 int hwports = adap->params.nports;
2675 int nqsets = SGE_QSETS;
2676
2677 if (adap->params.rev > 0) {
2678 if (hwports == 2 &&
2679 (hwports * nqsets > SGE_QSETS ||
2680 num_cpus >= nqsets / hwports))
2681 nqsets /= hwports;
2682 if (nqsets > num_cpus)
2683 nqsets = num_cpus;
2684 if (nqsets < 1 || hwports == 4)
2685 nqsets = 1;
2686 } else
2687 nqsets = 1;
2688
2689 for_each_port(adap, i) {
2690 struct port_info *pi = adap2pinfo(adap, i);
2691
2692 pi->first_qset = j;
2693 pi->nqsets = nqsets;
2694 j = pi->first_qset + nqsets;
2695
2696 dev_info(&adap->pdev->dev,
2697 "Port %d using %d queue sets.\n", i, nqsets);
2698 }
2699}
2700
4d22de3e
DLR
2701static int __devinit cxgb_enable_msix(struct adapter *adap)
2702{
2703 struct msix_entry entries[SGE_QSETS + 1];
2704 int i, err;
2705
2706 for (i = 0; i < ARRAY_SIZE(entries); ++i)
2707 entries[i].entry = i;
2708
2709 err = pci_enable_msix(adap->pdev, entries, ARRAY_SIZE(entries));
2710 if (!err) {
2711 for (i = 0; i < ARRAY_SIZE(entries); ++i)
2712 adap->msix_info[i].vec = entries[i].vector;
2713 } else if (err > 0)
2714 dev_info(&adap->pdev->dev,
2715 "only %d MSI-X vectors left, not using MSI-X\n", err);
2716 return err;
2717}
2718
2719static void __devinit print_port_info(struct adapter *adap,
2720 const struct adapter_info *ai)
2721{
2722 static const char *pci_variant[] = {
2723 "PCI", "PCI-X", "PCI-X ECC", "PCI-X 266", "PCI Express"
2724 };
2725
2726 int i;
2727 char buf[80];
2728
2729 if (is_pcie(adap))
2730 snprintf(buf, sizeof(buf), "%s x%d",
2731 pci_variant[adap->params.pci.variant],
2732 adap->params.pci.width);
2733 else
2734 snprintf(buf, sizeof(buf), "%s %dMHz/%d-bit",
2735 pci_variant[adap->params.pci.variant],
2736 adap->params.pci.speed, adap->params.pci.width);
2737
2738 for_each_port(adap, i) {
2739 struct net_device *dev = adap->port[i];
2740 const struct port_info *pi = netdev_priv(dev);
2741
2742 if (!test_bit(i, &adap->registered_device_map))
2743 continue;
8ac3ba68 2744 printk(KERN_INFO "%s: %s %s %sNIC (rev %d) %s%s\n",
04497982 2745 dev->name, ai->desc, pi->phy.desc,
8ac3ba68 2746 is_offload(adap) ? "R" : "", adap->params.rev, buf,
4d22de3e
DLR
2747 (adap->flags & USING_MSIX) ? " MSI-X" :
2748 (adap->flags & USING_MSI) ? " MSI" : "");
2749 if (adap->name == dev->name && adap->params.vpd.mclk)
167cdf5f
DLR
2750 printk(KERN_INFO
2751 "%s: %uMB CM, %uMB PMTX, %uMB PMRX, S/N: %s\n",
4d22de3e
DLR
2752 adap->name, t3_mc7_size(&adap->cm) >> 20,
2753 t3_mc7_size(&adap->pmtx) >> 20,
167cdf5f
DLR
2754 t3_mc7_size(&adap->pmrx) >> 20,
2755 adap->params.vpd.sn);
4d22de3e
DLR
2756 }
2757}
2758
2759static int __devinit init_one(struct pci_dev *pdev,
2760 const struct pci_device_id *ent)
2761{
2762 static int version_printed;
2763
2764 int i, err, pci_using_dac = 0;
2765 unsigned long mmio_start, mmio_len;
2766 const struct adapter_info *ai;
2767 struct adapter *adapter = NULL;
2768 struct port_info *pi;
2769
2770 if (!version_printed) {
2771 printk(KERN_INFO "%s - version %s\n", DRV_DESC, DRV_VERSION);
2772 ++version_printed;
2773 }
2774
2775 if (!cxgb3_wq) {
2776 cxgb3_wq = create_singlethread_workqueue(DRV_NAME);
2777 if (!cxgb3_wq) {
2778 printk(KERN_ERR DRV_NAME
2779 ": cannot initialize work queue\n");
2780 return -ENOMEM;
2781 }
2782 }
2783
2784 err = pci_request_regions(pdev, DRV_NAME);
2785 if (err) {
2786 /* Just info, some other driver may have claimed the device. */
2787 dev_info(&pdev->dev, "cannot obtain PCI resources\n");
2788 return err;
2789 }
2790
2791 err = pci_enable_device(pdev);
2792 if (err) {
2793 dev_err(&pdev->dev, "cannot enable PCI device\n");
2794 goto out_release_regions;
2795 }
2796
2797 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
2798 pci_using_dac = 1;
2799 err = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
2800 if (err) {
2801 dev_err(&pdev->dev, "unable to obtain 64-bit DMA for "
2802 "coherent allocations\n");
2803 goto out_disable_device;
2804 }
2805 } else if ((err = pci_set_dma_mask(pdev, DMA_32BIT_MASK)) != 0) {
2806 dev_err(&pdev->dev, "no usable DMA configuration\n");
2807 goto out_disable_device;
2808 }
2809
2810 pci_set_master(pdev);
204e2f98 2811 pci_save_state(pdev);
4d22de3e
DLR
2812
2813 mmio_start = pci_resource_start(pdev, 0);
2814 mmio_len = pci_resource_len(pdev, 0);
2815 ai = t3_get_adapter_info(ent->driver_data);
2816
2817 adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
2818 if (!adapter) {
2819 err = -ENOMEM;
2820 goto out_disable_device;
2821 }
2822
2823 adapter->regs = ioremap_nocache(mmio_start, mmio_len);
2824 if (!adapter->regs) {
2825 dev_err(&pdev->dev, "cannot map device registers\n");
2826 err = -ENOMEM;
2827 goto out_free_adapter;
2828 }
2829
2830 adapter->pdev = pdev;
2831 adapter->name = pci_name(pdev);
2832 adapter->msg_enable = dflt_msg_enable;
2833 adapter->mmio_len = mmio_len;
2834
2835 mutex_init(&adapter->mdio_lock);
2836 spin_lock_init(&adapter->work_lock);
2837 spin_lock_init(&adapter->stats_lock);
2838
2839 INIT_LIST_HEAD(&adapter->adapter_list);
2840 INIT_WORK(&adapter->ext_intr_handler_task, ext_intr_task);
20d3fc11 2841 INIT_WORK(&adapter->fatal_error_handler_task, fatal_error_task);
4d22de3e
DLR
2842 INIT_DELAYED_WORK(&adapter->adap_check_task, t3_adap_check_task);
2843
2844 for (i = 0; i < ai->nports; ++i) {
2845 struct net_device *netdev;
2846
2847 netdev = alloc_etherdev(sizeof(struct port_info));
2848 if (!netdev) {
2849 err = -ENOMEM;
2850 goto out_free_dev;
2851 }
2852
4d22de3e
DLR
2853 SET_NETDEV_DEV(netdev, &pdev->dev);
2854
2855 adapter->port[i] = netdev;
2856 pi = netdev_priv(netdev);
5fbf816f 2857 pi->adapter = adapter;
4d22de3e 2858 pi->rx_csum_offload = 1;
4d22de3e
DLR
2859 pi->port_id = i;
2860 netif_carrier_off(netdev);
2861 netdev->irq = pdev->irq;
2862 netdev->mem_start = mmio_start;
2863 netdev->mem_end = mmio_start + mmio_len - 1;
4d22de3e
DLR
2864 netdev->features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO;
2865 netdev->features |= NETIF_F_LLTX;
2866 if (pci_using_dac)
2867 netdev->features |= NETIF_F_HIGHDMA;
2868
2869 netdev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
2870 netdev->vlan_rx_register = vlan_rx_register;
4d22de3e
DLR
2871
2872 netdev->open = cxgb_open;
2873 netdev->stop = cxgb_close;
2874 netdev->hard_start_xmit = t3_eth_xmit;
2875 netdev->get_stats = cxgb_get_stats;
2876 netdev->set_multicast_list = cxgb_set_rxmode;
2877 netdev->do_ioctl = cxgb_ioctl;
2878 netdev->change_mtu = cxgb_change_mtu;
2879 netdev->set_mac_address = cxgb_set_mac_addr;
2880#ifdef CONFIG_NET_POLL_CONTROLLER
2881 netdev->poll_controller = cxgb_netpoll;
2882#endif
4d22de3e
DLR
2883
2884 SET_ETHTOOL_OPS(netdev, &cxgb_ethtool_ops);
2885 }
2886
5fbf816f 2887 pci_set_drvdata(pdev, adapter);
4d22de3e
DLR
2888 if (t3_prep_adapter(adapter, ai, 1) < 0) {
2889 err = -ENODEV;
2890 goto out_free_dev;
2891 }
2eab17ab 2892
4d22de3e
DLR
2893 /*
2894 * The card is now ready to go. If any errors occur during device
2895 * registration we do not fail the whole card but rather proceed only
2896 * with the ports we manage to register successfully. However we must
2897 * register at least one net device.
2898 */
2899 for_each_port(adapter, i) {
2900 err = register_netdev(adapter->port[i]);
2901 if (err)
2902 dev_warn(&pdev->dev,
2903 "cannot register net device %s, skipping\n",
2904 adapter->port[i]->name);
2905 else {
2906 /*
2907 * Change the name we use for messages to the name of
2908 * the first successfully registered interface.
2909 */
2910 if (!adapter->registered_device_map)
2911 adapter->name = adapter->port[i]->name;
2912
2913 __set_bit(i, &adapter->registered_device_map);
2914 }
2915 }
2916 if (!adapter->registered_device_map) {
2917 dev_err(&pdev->dev, "could not register any net devices\n");
2918 goto out_free_dev;
2919 }
2920
2921 /* Driver's ready. Reflect it on LEDs */
2922 t3_led_ready(adapter);
2923
2924 if (is_offload(adapter)) {
2925 __set_bit(OFFLOAD_DEVMAP_BIT, &adapter->registered_device_map);
2926 cxgb3_adapter_ofld(adapter);
2927 }
2928
2929 /* See what interrupts we'll be using */
2930 if (msi > 1 && cxgb_enable_msix(adapter) == 0)
2931 adapter->flags |= USING_MSIX;
2932 else if (msi > 0 && pci_enable_msi(pdev) == 0)
2933 adapter->flags |= USING_MSI;
2934
8c263761
DLR
2935 set_nqsets(adapter);
2936
0ee8d33c 2937 err = sysfs_create_group(&adapter->port[0]->dev.kobj,
4d22de3e
DLR
2938 &cxgb3_attr_group);
2939
2940 print_port_info(adapter, ai);
2941 return 0;
2942
2943out_free_dev:
2944 iounmap(adapter->regs);
2945 for (i = ai->nports - 1; i >= 0; --i)
2946 if (adapter->port[i])
2947 free_netdev(adapter->port[i]);
2948
2949out_free_adapter:
2950 kfree(adapter);
2951
2952out_disable_device:
2953 pci_disable_device(pdev);
2954out_release_regions:
2955 pci_release_regions(pdev);
2956 pci_set_drvdata(pdev, NULL);
2957 return err;
2958}
2959
2960static void __devexit remove_one(struct pci_dev *pdev)
2961{
5fbf816f 2962 struct adapter *adapter = pci_get_drvdata(pdev);
4d22de3e 2963
5fbf816f 2964 if (adapter) {
4d22de3e 2965 int i;
4d22de3e
DLR
2966
2967 t3_sge_stop(adapter);
0ee8d33c 2968 sysfs_remove_group(&adapter->port[0]->dev.kobj,
4d22de3e
DLR
2969 &cxgb3_attr_group);
2970
4d22de3e
DLR
2971 if (is_offload(adapter)) {
2972 cxgb3_adapter_unofld(adapter);
2973 if (test_bit(OFFLOAD_DEVMAP_BIT,
2974 &adapter->open_device_map))
2975 offload_close(&adapter->tdev);
2976 }
2977
67d92ab7
DLR
2978 for_each_port(adapter, i)
2979 if (test_bit(i, &adapter->registered_device_map))
2980 unregister_netdev(adapter->port[i]);
2981
0ca41c04 2982 t3_stop_sge_timers(adapter);
4d22de3e
DLR
2983 t3_free_sge_resources(adapter);
2984 cxgb_disable_msi(adapter);
2985
4d22de3e
DLR
2986 for_each_port(adapter, i)
2987 if (adapter->port[i])
2988 free_netdev(adapter->port[i]);
2989
2990 iounmap(adapter->regs);
2991 kfree(adapter);
2992 pci_release_regions(pdev);
2993 pci_disable_device(pdev);
2994 pci_set_drvdata(pdev, NULL);
2995 }
2996}
2997
2998static struct pci_driver driver = {
2999 .name = DRV_NAME,
3000 .id_table = cxgb3_pci_tbl,
3001 .probe = init_one,
3002 .remove = __devexit_p(remove_one),
91a6b50c 3003 .err_handler = &t3_err_handler,
4d22de3e
DLR
3004};
3005
3006static int __init cxgb3_init_module(void)
3007{
3008 int ret;
3009
3010 cxgb3_offload_init();
3011
3012 ret = pci_register_driver(&driver);
3013 return ret;
3014}
3015
3016static void __exit cxgb3_cleanup_module(void)
3017{
3018 pci_unregister_driver(&driver);
3019 if (cxgb3_wq)
3020 destroy_workqueue(cxgb3_wq);
3021}
3022
3023module_init(cxgb3_init_module);
3024module_exit(cxgb3_cleanup_module);