e1000e: Invoke VLAN GRO handler
[linux-2.6-block.git] / drivers / net / cxgb3 / cxgb3_main.c
CommitLineData
4d22de3e 1/*
a02d44a0 2 * Copyright (c) 2003-2008 Chelsio, Inc. All rights reserved.
4d22de3e 3 *
1d68e93d
DLR
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
4d22de3e 9 *
1d68e93d
DLR
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
4d22de3e 31 */
4d22de3e
DLR
32#include <linux/module.h>
33#include <linux/moduleparam.h>
34#include <linux/init.h>
35#include <linux/pci.h>
36#include <linux/dma-mapping.h>
37#include <linux/netdevice.h>
38#include <linux/etherdevice.h>
39#include <linux/if_vlan.h>
40#include <linux/mii.h>
41#include <linux/sockios.h>
42#include <linux/workqueue.h>
43#include <linux/proc_fs.h>
44#include <linux/rtnetlink.h>
2e283962 45#include <linux/firmware.h>
d9da466a 46#include <linux/log2.h>
4d22de3e
DLR
47#include <asm/uaccess.h>
48
49#include "common.h"
50#include "cxgb3_ioctl.h"
51#include "regs.h"
52#include "cxgb3_offload.h"
53#include "version.h"
54
55#include "cxgb3_ctl_defs.h"
56#include "t3_cpl.h"
57#include "firmware_exports.h"
58
59enum {
60 MAX_TXQ_ENTRIES = 16384,
61 MAX_CTRL_TXQ_ENTRIES = 1024,
62 MAX_RSPQ_ENTRIES = 16384,
63 MAX_RX_BUFFERS = 16384,
64 MAX_RX_JUMBO_BUFFERS = 16384,
65 MIN_TXQ_ENTRIES = 4,
66 MIN_CTRL_TXQ_ENTRIES = 4,
67 MIN_RSPQ_ENTRIES = 32,
68 MIN_FL_ENTRIES = 32
69};
70
71#define PORT_MASK ((1 << MAX_NPORTS) - 1)
72
73#define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
74 NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
75 NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
76
77#define EEPROM_MAGIC 0x38E2F10C
78
678771d6
DLR
79#define CH_DEVICE(devid, idx) \
80 { PCI_VENDOR_ID_CHELSIO, devid, PCI_ANY_ID, PCI_ANY_ID, 0, 0, idx }
4d22de3e
DLR
81
82static const struct pci_device_id cxgb3_pci_tbl[] = {
678771d6
DLR
83 CH_DEVICE(0x20, 0), /* PE9000 */
84 CH_DEVICE(0x21, 1), /* T302E */
85 CH_DEVICE(0x22, 2), /* T310E */
86 CH_DEVICE(0x23, 3), /* T320X */
87 CH_DEVICE(0x24, 1), /* T302X */
88 CH_DEVICE(0x25, 3), /* T320E */
89 CH_DEVICE(0x26, 2), /* T310X */
90 CH_DEVICE(0x30, 2), /* T3B10 */
91 CH_DEVICE(0x31, 3), /* T3B20 */
92 CH_DEVICE(0x32, 1), /* T3B02 */
4d22de3e
DLR
93 {0,}
94};
95
96MODULE_DESCRIPTION(DRV_DESC);
97MODULE_AUTHOR("Chelsio Communications");
1d68e93d 98MODULE_LICENSE("Dual BSD/GPL");
4d22de3e
DLR
99MODULE_VERSION(DRV_VERSION);
100MODULE_DEVICE_TABLE(pci, cxgb3_pci_tbl);
101
102static int dflt_msg_enable = DFLT_MSG_ENABLE;
103
104module_param(dflt_msg_enable, int, 0644);
105MODULE_PARM_DESC(dflt_msg_enable, "Chelsio T3 default message enable bitmap");
106
107/*
108 * The driver uses the best interrupt scheme available on a platform in the
109 * order MSI-X, MSI, legacy pin interrupts. This parameter determines which
110 * of these schemes the driver may consider as follows:
111 *
112 * msi = 2: choose from among all three options
113 * msi = 1: only consider MSI and pin interrupts
114 * msi = 0: force pin interrupts
115 */
116static int msi = 2;
117
118module_param(msi, int, 0644);
119MODULE_PARM_DESC(msi, "whether to use MSI or MSI-X");
120
121/*
122 * The driver enables offload as a default.
123 * To disable it, use ofld_disable = 1.
124 */
125
126static int ofld_disable = 0;
127
128module_param(ofld_disable, int, 0644);
129MODULE_PARM_DESC(ofld_disable, "whether to enable offload at init time or not");
130
131/*
132 * We have work elements that we need to cancel when an interface is taken
133 * down. Normally the work elements would be executed by keventd but that
134 * can deadlock because of linkwatch. If our close method takes the rtnl
135 * lock and linkwatch is ahead of our work elements in keventd, linkwatch
136 * will block keventd as it needs the rtnl lock, and we'll deadlock waiting
137 * for our work to complete. Get our own work queue to solve this.
138 */
139static struct workqueue_struct *cxgb3_wq;
140
141/**
142 * link_report - show link status and link speed/duplex
143 * @p: the port whose settings are to be reported
144 *
145 * Shows the link status, speed, and duplex of a port.
146 */
147static void link_report(struct net_device *dev)
148{
149 if (!netif_carrier_ok(dev))
150 printk(KERN_INFO "%s: link down\n", dev->name);
151 else {
152 const char *s = "10Mbps";
153 const struct port_info *p = netdev_priv(dev);
154
155 switch (p->link_config.speed) {
156 case SPEED_10000:
157 s = "10Gbps";
158 break;
159 case SPEED_1000:
160 s = "1000Mbps";
161 break;
162 case SPEED_100:
163 s = "100Mbps";
164 break;
165 }
166
167 printk(KERN_INFO "%s: link up, %s, %s-duplex\n", dev->name, s,
168 p->link_config.duplex == DUPLEX_FULL ? "full" : "half");
169 }
170}
171
172/**
173 * t3_os_link_changed - handle link status changes
174 * @adapter: the adapter associated with the link change
175 * @port_id: the port index whose limk status has changed
176 * @link_stat: the new status of the link
177 * @speed: the new speed setting
178 * @duplex: the new duplex setting
179 * @pause: the new flow-control setting
180 *
181 * This is the OS-dependent handler for link status changes. The OS
182 * neutral handler takes care of most of the processing for these events,
183 * then calls this handler for any OS-specific processing.
184 */
185void t3_os_link_changed(struct adapter *adapter, int port_id, int link_stat,
186 int speed, int duplex, int pause)
187{
188 struct net_device *dev = adapter->port[port_id];
6d6dabac
DLR
189 struct port_info *pi = netdev_priv(dev);
190 struct cmac *mac = &pi->mac;
4d22de3e
DLR
191
192 /* Skip changes from disabled ports. */
193 if (!netif_running(dev))
194 return;
195
196 if (link_stat != netif_carrier_ok(dev)) {
6d6dabac 197 if (link_stat) {
59cf8107 198 t3_mac_enable(mac, MAC_DIRECTION_RX);
4d22de3e 199 netif_carrier_on(dev);
6d6dabac 200 } else {
4d22de3e 201 netif_carrier_off(dev);
59cf8107
DLR
202 pi->phy.ops->power_down(&pi->phy, 1);
203 t3_mac_disable(mac, MAC_DIRECTION_RX);
204 t3_link_start(&pi->phy, mac, &pi->link_config);
6d6dabac
DLR
205 }
206
4d22de3e
DLR
207 link_report(dev);
208 }
209}
210
1e882025
DLR
211/**
212 * t3_os_phymod_changed - handle PHY module changes
213 * @phy: the PHY reporting the module change
214 * @mod_type: new module type
215 *
216 * This is the OS-dependent handler for PHY module changes. It is
217 * invoked when a PHY module is removed or inserted for any OS-specific
218 * processing.
219 */
220void t3_os_phymod_changed(struct adapter *adap, int port_id)
221{
222 static const char *mod_str[] = {
223 NULL, "SR", "LR", "LRM", "TWINAX", "TWINAX", "unknown"
224 };
225
226 const struct net_device *dev = adap->port[port_id];
227 const struct port_info *pi = netdev_priv(dev);
228
229 if (pi->phy.modtype == phy_modtype_none)
230 printk(KERN_INFO "%s: PHY module unplugged\n", dev->name);
231 else
232 printk(KERN_INFO "%s: %s PHY module inserted\n", dev->name,
233 mod_str[pi->phy.modtype]);
234}
235
4d22de3e
DLR
236static void cxgb_set_rxmode(struct net_device *dev)
237{
238 struct t3_rx_mode rm;
239 struct port_info *pi = netdev_priv(dev);
240
241 init_rx_mode(&rm, dev, dev->mc_list);
242 t3_mac_set_rx_mode(&pi->mac, &rm);
243}
244
245/**
246 * link_start - enable a port
247 * @dev: the device to enable
248 *
249 * Performs the MAC and PHY actions needed to enable a port.
250 */
251static void link_start(struct net_device *dev)
252{
253 struct t3_rx_mode rm;
254 struct port_info *pi = netdev_priv(dev);
255 struct cmac *mac = &pi->mac;
256
257 init_rx_mode(&rm, dev, dev->mc_list);
258 t3_mac_reset(mac);
259 t3_mac_set_mtu(mac, dev->mtu);
260 t3_mac_set_address(mac, 0, dev->dev_addr);
261 t3_mac_set_rx_mode(mac, &rm);
262 t3_link_start(&pi->phy, mac, &pi->link_config);
263 t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
264}
265
266static inline void cxgb_disable_msi(struct adapter *adapter)
267{
268 if (adapter->flags & USING_MSIX) {
269 pci_disable_msix(adapter->pdev);
270 adapter->flags &= ~USING_MSIX;
271 } else if (adapter->flags & USING_MSI) {
272 pci_disable_msi(adapter->pdev);
273 adapter->flags &= ~USING_MSI;
274 }
275}
276
277/*
278 * Interrupt handler for asynchronous events used with MSI-X.
279 */
280static irqreturn_t t3_async_intr_handler(int irq, void *cookie)
281{
282 t3_slow_intr_handler(cookie);
283 return IRQ_HANDLED;
284}
285
286/*
287 * Name the MSI-X interrupts.
288 */
289static void name_msix_vecs(struct adapter *adap)
290{
291 int i, j, msi_idx = 1, n = sizeof(adap->msix_info[0].desc) - 1;
292
293 snprintf(adap->msix_info[0].desc, n, "%s", adap->name);
294 adap->msix_info[0].desc[n] = 0;
295
296 for_each_port(adap, j) {
297 struct net_device *d = adap->port[j];
298 const struct port_info *pi = netdev_priv(d);
299
300 for (i = 0; i < pi->nqsets; i++, msi_idx++) {
301 snprintf(adap->msix_info[msi_idx].desc, n,
8c263761 302 "%s-%d", d->name, pi->first_qset + i);
4d22de3e
DLR
303 adap->msix_info[msi_idx].desc[n] = 0;
304 }
8c263761 305 }
4d22de3e
DLR
306}
307
308static int request_msix_data_irqs(struct adapter *adap)
309{
310 int i, j, err, qidx = 0;
311
312 for_each_port(adap, i) {
313 int nqsets = adap2pinfo(adap, i)->nqsets;
314
315 for (j = 0; j < nqsets; ++j) {
316 err = request_irq(adap->msix_info[qidx + 1].vec,
317 t3_intr_handler(adap,
318 adap->sge.qs[qidx].
319 rspq.polling), 0,
320 adap->msix_info[qidx + 1].desc,
321 &adap->sge.qs[qidx]);
322 if (err) {
323 while (--qidx >= 0)
324 free_irq(adap->msix_info[qidx + 1].vec,
325 &adap->sge.qs[qidx]);
326 return err;
327 }
328 qidx++;
329 }
330 }
331 return 0;
332}
333
8c263761
DLR
334static void free_irq_resources(struct adapter *adapter)
335{
336 if (adapter->flags & USING_MSIX) {
337 int i, n = 0;
338
339 free_irq(adapter->msix_info[0].vec, adapter);
340 for_each_port(adapter, i)
341 n += adap2pinfo(adapter, i)->nqsets;
342
343 for (i = 0; i < n; ++i)
344 free_irq(adapter->msix_info[i + 1].vec,
345 &adapter->sge.qs[i]);
346 } else
347 free_irq(adapter->pdev->irq, adapter);
348}
349
b881955b
DLR
350static int await_mgmt_replies(struct adapter *adap, unsigned long init_cnt,
351 unsigned long n)
352{
353 int attempts = 5;
354
355 while (adap->sge.qs[0].rspq.offload_pkts < init_cnt + n) {
356 if (!--attempts)
357 return -ETIMEDOUT;
358 msleep(10);
359 }
360 return 0;
361}
362
363static int init_tp_parity(struct adapter *adap)
364{
365 int i;
366 struct sk_buff *skb;
367 struct cpl_set_tcb_field *greq;
368 unsigned long cnt = adap->sge.qs[0].rspq.offload_pkts;
369
370 t3_tp_set_offload_mode(adap, 1);
371
372 for (i = 0; i < 16; i++) {
373 struct cpl_smt_write_req *req;
374
375 skb = alloc_skb(sizeof(*req), GFP_KERNEL | __GFP_NOFAIL);
376 req = (struct cpl_smt_write_req *)__skb_put(skb, sizeof(*req));
377 memset(req, 0, sizeof(*req));
378 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
379 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SMT_WRITE_REQ, i));
380 req->iff = i;
381 t3_mgmt_tx(adap, skb);
382 }
383
384 for (i = 0; i < 2048; i++) {
385 struct cpl_l2t_write_req *req;
386
387 skb = alloc_skb(sizeof(*req), GFP_KERNEL | __GFP_NOFAIL);
388 req = (struct cpl_l2t_write_req *)__skb_put(skb, sizeof(*req));
389 memset(req, 0, sizeof(*req));
390 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
391 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_L2T_WRITE_REQ, i));
392 req->params = htonl(V_L2T_W_IDX(i));
393 t3_mgmt_tx(adap, skb);
394 }
395
396 for (i = 0; i < 2048; i++) {
397 struct cpl_rte_write_req *req;
398
399 skb = alloc_skb(sizeof(*req), GFP_KERNEL | __GFP_NOFAIL);
400 req = (struct cpl_rte_write_req *)__skb_put(skb, sizeof(*req));
401 memset(req, 0, sizeof(*req));
402 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
403 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_RTE_WRITE_REQ, i));
404 req->l2t_idx = htonl(V_L2T_W_IDX(i));
405 t3_mgmt_tx(adap, skb);
406 }
407
408 skb = alloc_skb(sizeof(*greq), GFP_KERNEL | __GFP_NOFAIL);
409 greq = (struct cpl_set_tcb_field *)__skb_put(skb, sizeof(*greq));
410 memset(greq, 0, sizeof(*greq));
411 greq->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
412 OPCODE_TID(greq) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, 0));
413 greq->mask = cpu_to_be64(1);
414 t3_mgmt_tx(adap, skb);
415
416 i = await_mgmt_replies(adap, cnt, 16 + 2048 + 2048 + 1);
417 t3_tp_set_offload_mode(adap, 0);
418 return i;
419}
420
4d22de3e
DLR
421/**
422 * setup_rss - configure RSS
423 * @adap: the adapter
424 *
425 * Sets up RSS to distribute packets to multiple receive queues. We
426 * configure the RSS CPU lookup table to distribute to the number of HW
427 * receive queues, and the response queue lookup table to narrow that
428 * down to the response queues actually configured for each port.
429 * We always configure the RSS mapping for two ports since the mapping
430 * table has plenty of entries.
431 */
432static void setup_rss(struct adapter *adap)
433{
434 int i;
435 unsigned int nq0 = adap2pinfo(adap, 0)->nqsets;
436 unsigned int nq1 = adap->port[1] ? adap2pinfo(adap, 1)->nqsets : 1;
437 u8 cpus[SGE_QSETS + 1];
438 u16 rspq_map[RSS_TABLE_SIZE];
439
440 for (i = 0; i < SGE_QSETS; ++i)
441 cpus[i] = i;
442 cpus[SGE_QSETS] = 0xff; /* terminator */
443
444 for (i = 0; i < RSS_TABLE_SIZE / 2; ++i) {
445 rspq_map[i] = i % nq0;
446 rspq_map[i + RSS_TABLE_SIZE / 2] = (i % nq1) + nq0;
447 }
448
449 t3_config_rss(adap, F_RQFEEDBACKENABLE | F_TNLLKPEN | F_TNLMAPEN |
450 F_TNLPRTEN | F_TNL2TUPEN | F_TNL4TUPEN |
a2604be5 451 V_RRCPLCPUSIZE(6) | F_HASHTOEPLITZ, cpus, rspq_map);
4d22de3e
DLR
452}
453
bea3348e 454static void init_napi(struct adapter *adap)
4d22de3e 455{
bea3348e 456 int i;
4d22de3e 457
bea3348e
SH
458 for (i = 0; i < SGE_QSETS; i++) {
459 struct sge_qset *qs = &adap->sge.qs[i];
4d22de3e 460
bea3348e
SH
461 if (qs->adap)
462 netif_napi_add(qs->netdev, &qs->napi, qs->napi.poll,
463 64);
4d22de3e 464 }
48c4b6db
DLR
465
466 /*
467 * netif_napi_add() can be called only once per napi_struct because it
468 * adds each new napi_struct to a list. Be careful not to call it a
469 * second time, e.g., during EEH recovery, by making a note of it.
470 */
471 adap->flags |= NAPI_INIT;
4d22de3e
DLR
472}
473
474/*
475 * Wait until all NAPI handlers are descheduled. This includes the handlers of
476 * both netdevices representing interfaces and the dummy ones for the extra
477 * queues.
478 */
479static void quiesce_rx(struct adapter *adap)
480{
481 int i;
4d22de3e 482
bea3348e
SH
483 for (i = 0; i < SGE_QSETS; i++)
484 if (adap->sge.qs[i].adap)
485 napi_disable(&adap->sge.qs[i].napi);
486}
4d22de3e 487
bea3348e
SH
488static void enable_all_napi(struct adapter *adap)
489{
490 int i;
491 for (i = 0; i < SGE_QSETS; i++)
492 if (adap->sge.qs[i].adap)
493 napi_enable(&adap->sge.qs[i].napi);
4d22de3e
DLR
494}
495
04ecb072
DLR
496/**
497 * set_qset_lro - Turn a queue set's LRO capability on and off
498 * @dev: the device the qset is attached to
499 * @qset_idx: the queue set index
500 * @val: the LRO switch
501 *
502 * Sets LRO on or off for a particular queue set.
503 * the device's features flag is updated to reflect the LRO
504 * capability when all queues belonging to the device are
505 * in the same state.
506 */
507static void set_qset_lro(struct net_device *dev, int qset_idx, int val)
508{
509 struct port_info *pi = netdev_priv(dev);
510 struct adapter *adapter = pi->adapter;
511 int i, lro_on = 1;
512
513 adapter->params.sge.qset[qset_idx].lro = !!val;
514 adapter->sge.qs[qset_idx].lro_enabled = !!val;
515
516 /* let ethtool report LRO on only if all queues are LRO enabled */
517 for (i = pi->first_qset; i < pi->first_qset + pi->nqsets; ++i)
518 lro_on &= adapter->params.sge.qset[i].lro;
519
520 if (lro_on)
521 dev->features |= NETIF_F_LRO;
522 else
523 dev->features &= ~NETIF_F_LRO;
524}
525
4d22de3e
DLR
526/**
527 * setup_sge_qsets - configure SGE Tx/Rx/response queues
528 * @adap: the adapter
529 *
530 * Determines how many sets of SGE queues to use and initializes them.
531 * We support multiple queue sets per port if we have MSI-X, otherwise
532 * just one queue set per port.
533 */
534static int setup_sge_qsets(struct adapter *adap)
535{
bea3348e 536 int i, j, err, irq_idx = 0, qset_idx = 0;
8ac3ba68 537 unsigned int ntxq = SGE_TXQ_PER_SET;
4d22de3e
DLR
538
539 if (adap->params.rev > 0 && !(adap->flags & USING_MSI))
540 irq_idx = -1;
541
542 for_each_port(adap, i) {
543 struct net_device *dev = adap->port[i];
bea3348e 544 struct port_info *pi = netdev_priv(dev);
4d22de3e 545
bea3348e 546 pi->qs = &adap->sge.qs[pi->first_qset];
8c263761
DLR
547 for (j = pi->first_qset; j < pi->first_qset + pi->nqsets;
548 ++j, ++qset_idx) {
47fd23fe 549 set_qset_lro(dev, qset_idx, pi->rx_offload & T3_LRO);
4d22de3e
DLR
550 err = t3_sge_alloc_qset(adap, qset_idx, 1,
551 (adap->flags & USING_MSIX) ? qset_idx + 1 :
552 irq_idx,
82ad3329
DLR
553 &adap->params.sge.qset[qset_idx], ntxq, dev,
554 netdev_get_tx_queue(dev, j));
4d22de3e 555 if (err) {
0ca41c04 556 t3_stop_sge_timers(adap);
4d22de3e
DLR
557 t3_free_sge_resources(adap);
558 return err;
559 }
560 }
561 }
562
563 return 0;
564}
565
3e5192ee 566static ssize_t attr_show(struct device *d, char *buf,
896392ef 567 ssize_t(*format) (struct net_device *, char *))
4d22de3e
DLR
568{
569 ssize_t len;
4d22de3e
DLR
570
571 /* Synchronize with ioctls that may shut down the device */
572 rtnl_lock();
896392ef 573 len = (*format) (to_net_dev(d), buf);
4d22de3e
DLR
574 rtnl_unlock();
575 return len;
576}
577
3e5192ee 578static ssize_t attr_store(struct device *d,
0ee8d33c 579 const char *buf, size_t len,
896392ef 580 ssize_t(*set) (struct net_device *, unsigned int),
4d22de3e
DLR
581 unsigned int min_val, unsigned int max_val)
582{
583 char *endp;
584 ssize_t ret;
585 unsigned int val;
4d22de3e
DLR
586
587 if (!capable(CAP_NET_ADMIN))
588 return -EPERM;
589
590 val = simple_strtoul(buf, &endp, 0);
591 if (endp == buf || val < min_val || val > max_val)
592 return -EINVAL;
593
594 rtnl_lock();
896392ef 595 ret = (*set) (to_net_dev(d), val);
4d22de3e
DLR
596 if (!ret)
597 ret = len;
598 rtnl_unlock();
599 return ret;
600}
601
602#define CXGB3_SHOW(name, val_expr) \
896392ef 603static ssize_t format_##name(struct net_device *dev, char *buf) \
4d22de3e 604{ \
5fbf816f
DLR
605 struct port_info *pi = netdev_priv(dev); \
606 struct adapter *adap = pi->adapter; \
4d22de3e
DLR
607 return sprintf(buf, "%u\n", val_expr); \
608} \
0ee8d33c
DLR
609static ssize_t show_##name(struct device *d, struct device_attribute *attr, \
610 char *buf) \
4d22de3e 611{ \
3e5192ee 612 return attr_show(d, buf, format_##name); \
4d22de3e
DLR
613}
614
896392ef 615static ssize_t set_nfilters(struct net_device *dev, unsigned int val)
4d22de3e 616{
5fbf816f
DLR
617 struct port_info *pi = netdev_priv(dev);
618 struct adapter *adap = pi->adapter;
9f238486 619 int min_tids = is_offload(adap) ? MC5_MIN_TIDS : 0;
896392ef 620
4d22de3e
DLR
621 if (adap->flags & FULL_INIT_DONE)
622 return -EBUSY;
623 if (val && adap->params.rev == 0)
624 return -EINVAL;
9f238486
DLR
625 if (val > t3_mc5_size(&adap->mc5) - adap->params.mc5.nservers -
626 min_tids)
4d22de3e
DLR
627 return -EINVAL;
628 adap->params.mc5.nfilters = val;
629 return 0;
630}
631
0ee8d33c
DLR
632static ssize_t store_nfilters(struct device *d, struct device_attribute *attr,
633 const char *buf, size_t len)
4d22de3e 634{
3e5192ee 635 return attr_store(d, buf, len, set_nfilters, 0, ~0);
4d22de3e
DLR
636}
637
896392ef 638static ssize_t set_nservers(struct net_device *dev, unsigned int val)
4d22de3e 639{
5fbf816f
DLR
640 struct port_info *pi = netdev_priv(dev);
641 struct adapter *adap = pi->adapter;
896392ef 642
4d22de3e
DLR
643 if (adap->flags & FULL_INIT_DONE)
644 return -EBUSY;
9f238486
DLR
645 if (val > t3_mc5_size(&adap->mc5) - adap->params.mc5.nfilters -
646 MC5_MIN_TIDS)
4d22de3e
DLR
647 return -EINVAL;
648 adap->params.mc5.nservers = val;
649 return 0;
650}
651
0ee8d33c
DLR
652static ssize_t store_nservers(struct device *d, struct device_attribute *attr,
653 const char *buf, size_t len)
4d22de3e 654{
3e5192ee 655 return attr_store(d, buf, len, set_nservers, 0, ~0);
4d22de3e
DLR
656}
657
658#define CXGB3_ATTR_R(name, val_expr) \
659CXGB3_SHOW(name, val_expr) \
0ee8d33c 660static DEVICE_ATTR(name, S_IRUGO, show_##name, NULL)
4d22de3e
DLR
661
662#define CXGB3_ATTR_RW(name, val_expr, store_method) \
663CXGB3_SHOW(name, val_expr) \
0ee8d33c 664static DEVICE_ATTR(name, S_IRUGO | S_IWUSR, show_##name, store_method)
4d22de3e
DLR
665
666CXGB3_ATTR_R(cam_size, t3_mc5_size(&adap->mc5));
667CXGB3_ATTR_RW(nfilters, adap->params.mc5.nfilters, store_nfilters);
668CXGB3_ATTR_RW(nservers, adap->params.mc5.nservers, store_nservers);
669
670static struct attribute *cxgb3_attrs[] = {
0ee8d33c
DLR
671 &dev_attr_cam_size.attr,
672 &dev_attr_nfilters.attr,
673 &dev_attr_nservers.attr,
4d22de3e
DLR
674 NULL
675};
676
677static struct attribute_group cxgb3_attr_group = {.attrs = cxgb3_attrs };
678
3e5192ee 679static ssize_t tm_attr_show(struct device *d,
0ee8d33c 680 char *buf, int sched)
4d22de3e 681{
5fbf816f
DLR
682 struct port_info *pi = netdev_priv(to_net_dev(d));
683 struct adapter *adap = pi->adapter;
4d22de3e 684 unsigned int v, addr, bpt, cpt;
5fbf816f 685 ssize_t len;
4d22de3e
DLR
686
687 addr = A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2;
688 rtnl_lock();
689 t3_write_reg(adap, A_TP_TM_PIO_ADDR, addr);
690 v = t3_read_reg(adap, A_TP_TM_PIO_DATA);
691 if (sched & 1)
692 v >>= 16;
693 bpt = (v >> 8) & 0xff;
694 cpt = v & 0xff;
695 if (!cpt)
696 len = sprintf(buf, "disabled\n");
697 else {
698 v = (adap->params.vpd.cclk * 1000) / cpt;
699 len = sprintf(buf, "%u Kbps\n", (v * bpt) / 125);
700 }
701 rtnl_unlock();
702 return len;
703}
704
3e5192ee 705static ssize_t tm_attr_store(struct device *d,
0ee8d33c 706 const char *buf, size_t len, int sched)
4d22de3e 707{
5fbf816f
DLR
708 struct port_info *pi = netdev_priv(to_net_dev(d));
709 struct adapter *adap = pi->adapter;
710 unsigned int val;
4d22de3e
DLR
711 char *endp;
712 ssize_t ret;
4d22de3e
DLR
713
714 if (!capable(CAP_NET_ADMIN))
715 return -EPERM;
716
717 val = simple_strtoul(buf, &endp, 0);
718 if (endp == buf || val > 10000000)
719 return -EINVAL;
720
721 rtnl_lock();
722 ret = t3_config_sched(adap, val, sched);
723 if (!ret)
724 ret = len;
725 rtnl_unlock();
726 return ret;
727}
728
729#define TM_ATTR(name, sched) \
0ee8d33c
DLR
730static ssize_t show_##name(struct device *d, struct device_attribute *attr, \
731 char *buf) \
4d22de3e 732{ \
3e5192ee 733 return tm_attr_show(d, buf, sched); \
4d22de3e 734} \
0ee8d33c
DLR
735static ssize_t store_##name(struct device *d, struct device_attribute *attr, \
736 const char *buf, size_t len) \
4d22de3e 737{ \
3e5192ee 738 return tm_attr_store(d, buf, len, sched); \
4d22de3e 739} \
0ee8d33c 740static DEVICE_ATTR(name, S_IRUGO | S_IWUSR, show_##name, store_##name)
4d22de3e
DLR
741
742TM_ATTR(sched0, 0);
743TM_ATTR(sched1, 1);
744TM_ATTR(sched2, 2);
745TM_ATTR(sched3, 3);
746TM_ATTR(sched4, 4);
747TM_ATTR(sched5, 5);
748TM_ATTR(sched6, 6);
749TM_ATTR(sched7, 7);
750
751static struct attribute *offload_attrs[] = {
0ee8d33c
DLR
752 &dev_attr_sched0.attr,
753 &dev_attr_sched1.attr,
754 &dev_attr_sched2.attr,
755 &dev_attr_sched3.attr,
756 &dev_attr_sched4.attr,
757 &dev_attr_sched5.attr,
758 &dev_attr_sched6.attr,
759 &dev_attr_sched7.attr,
4d22de3e
DLR
760 NULL
761};
762
763static struct attribute_group offload_attr_group = {.attrs = offload_attrs };
764
765/*
766 * Sends an sk_buff to an offload queue driver
767 * after dealing with any active network taps.
768 */
769static inline int offload_tx(struct t3cdev *tdev, struct sk_buff *skb)
770{
771 int ret;
772
773 local_bh_disable();
774 ret = t3_offload_tx(tdev, skb);
775 local_bh_enable();
776 return ret;
777}
778
779static int write_smt_entry(struct adapter *adapter, int idx)
780{
781 struct cpl_smt_write_req *req;
782 struct sk_buff *skb = alloc_skb(sizeof(*req), GFP_KERNEL);
783
784 if (!skb)
785 return -ENOMEM;
786
787 req = (struct cpl_smt_write_req *)__skb_put(skb, sizeof(*req));
788 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
789 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SMT_WRITE_REQ, idx));
790 req->mtu_idx = NMTUS - 1; /* should be 0 but there's a T3 bug */
791 req->iff = idx;
792 memset(req->src_mac1, 0, sizeof(req->src_mac1));
793 memcpy(req->src_mac0, adapter->port[idx]->dev_addr, ETH_ALEN);
794 skb->priority = 1;
795 offload_tx(&adapter->tdev, skb);
796 return 0;
797}
798
799static int init_smt(struct adapter *adapter)
800{
801 int i;
802
803 for_each_port(adapter, i)
804 write_smt_entry(adapter, i);
805 return 0;
806}
807
808static void init_port_mtus(struct adapter *adapter)
809{
810 unsigned int mtus = adapter->port[0]->mtu;
811
812 if (adapter->port[1])
813 mtus |= adapter->port[1]->mtu << 16;
814 t3_write_reg(adapter, A_TP_MTU_PORT_TABLE, mtus);
815}
816
8c263761 817static int send_pktsched_cmd(struct adapter *adap, int sched, int qidx, int lo,
14ab9892
DLR
818 int hi, int port)
819{
820 struct sk_buff *skb;
821 struct mngt_pktsched_wr *req;
8c263761 822 int ret;
14ab9892
DLR
823
824 skb = alloc_skb(sizeof(*req), GFP_KERNEL | __GFP_NOFAIL);
825 req = (struct mngt_pktsched_wr *)skb_put(skb, sizeof(*req));
826 req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_MNGT));
827 req->mngt_opcode = FW_MNGTOPCODE_PKTSCHED_SET;
828 req->sched = sched;
829 req->idx = qidx;
830 req->min = lo;
831 req->max = hi;
832 req->binding = port;
8c263761
DLR
833 ret = t3_mgmt_tx(adap, skb);
834
835 return ret;
14ab9892
DLR
836}
837
8c263761 838static int bind_qsets(struct adapter *adap)
14ab9892 839{
8c263761 840 int i, j, err = 0;
14ab9892
DLR
841
842 for_each_port(adap, i) {
843 const struct port_info *pi = adap2pinfo(adap, i);
844
8c263761
DLR
845 for (j = 0; j < pi->nqsets; ++j) {
846 int ret = send_pktsched_cmd(adap, 1,
847 pi->first_qset + j, -1,
848 -1, i);
849 if (ret)
850 err = ret;
851 }
14ab9892 852 }
8c263761
DLR
853
854 return err;
14ab9892
DLR
855}
856
851fd7bd
DLR
857#define FW_FNAME "cxgb3/t3fw-%d.%d.%d.bin"
858#define TPSRAM_NAME "cxgb3/t3%c_psram-%d.%d.%d.bin"
2e283962
DLR
859
860static int upgrade_fw(struct adapter *adap)
861{
862 int ret;
863 char buf[64];
864 const struct firmware *fw;
865 struct device *dev = &adap->pdev->dev;
866
867 snprintf(buf, sizeof(buf), FW_FNAME, FW_VERSION_MAJOR,
7f672cf5 868 FW_VERSION_MINOR, FW_VERSION_MICRO);
2e283962
DLR
869 ret = request_firmware(&fw, buf, dev);
870 if (ret < 0) {
871 dev_err(dev, "could not upgrade firmware: unable to load %s\n",
872 buf);
873 return ret;
874 }
875 ret = t3_load_fw(adap, fw->data, fw->size);
876 release_firmware(fw);
47330077
DLR
877
878 if (ret == 0)
879 dev_info(dev, "successful upgrade to firmware %d.%d.%d\n",
880 FW_VERSION_MAJOR, FW_VERSION_MINOR, FW_VERSION_MICRO);
881 else
882 dev_err(dev, "failed to upgrade to firmware %d.%d.%d\n",
883 FW_VERSION_MAJOR, FW_VERSION_MINOR, FW_VERSION_MICRO);
2eab17ab 884
47330077
DLR
885 return ret;
886}
887
888static inline char t3rev2char(struct adapter *adapter)
889{
890 char rev = 0;
891
892 switch(adapter->params.rev) {
893 case T3_REV_B:
894 case T3_REV_B2:
895 rev = 'b';
896 break;
1aafee26
DLR
897 case T3_REV_C:
898 rev = 'c';
899 break;
47330077
DLR
900 }
901 return rev;
902}
903
9265fabf 904static int update_tpsram(struct adapter *adap)
47330077
DLR
905{
906 const struct firmware *tpsram;
907 char buf[64];
908 struct device *dev = &adap->pdev->dev;
909 int ret;
910 char rev;
2eab17ab 911
47330077
DLR
912 rev = t3rev2char(adap);
913 if (!rev)
914 return 0;
915
916 snprintf(buf, sizeof(buf), TPSRAM_NAME, rev,
917 TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO);
918
919 ret = request_firmware(&tpsram, buf, dev);
920 if (ret < 0) {
921 dev_err(dev, "could not load TP SRAM: unable to load %s\n",
922 buf);
923 return ret;
924 }
2eab17ab 925
47330077
DLR
926 ret = t3_check_tpsram(adap, tpsram->data, tpsram->size);
927 if (ret)
2eab17ab 928 goto release_tpsram;
47330077
DLR
929
930 ret = t3_set_proto_sram(adap, tpsram->data);
931 if (ret == 0)
932 dev_info(dev,
933 "successful update of protocol engine "
934 "to %d.%d.%d\n",
935 TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO);
936 else
937 dev_err(dev, "failed to update of protocol engine %d.%d.%d\n",
938 TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO);
939 if (ret)
940 dev_err(dev, "loading protocol SRAM failed\n");
941
942release_tpsram:
943 release_firmware(tpsram);
2eab17ab 944
2e283962
DLR
945 return ret;
946}
947
4d22de3e
DLR
948/**
949 * cxgb_up - enable the adapter
950 * @adapter: adapter being enabled
951 *
952 * Called when the first port is enabled, this function performs the
953 * actions necessary to make an adapter operational, such as completing
954 * the initialization of HW modules, and enabling interrupts.
955 *
956 * Must be called with the rtnl lock held.
957 */
958static int cxgb_up(struct adapter *adap)
959{
c54f5c24 960 int err;
4d22de3e
DLR
961
962 if (!(adap->flags & FULL_INIT_DONE)) {
8207befa 963 err = t3_check_fw_version(adap);
a5a3b460 964 if (err == -EINVAL) {
2e283962 965 err = upgrade_fw(adap);
8207befa
DLR
966 CH_WARN(adap, "FW upgrade to %d.%d.%d %s\n",
967 FW_VERSION_MAJOR, FW_VERSION_MINOR,
968 FW_VERSION_MICRO, err ? "failed" : "succeeded");
a5a3b460 969 }
4d22de3e 970
8207befa 971 err = t3_check_tpsram_version(adap);
47330077
DLR
972 if (err == -EINVAL) {
973 err = update_tpsram(adap);
8207befa
DLR
974 CH_WARN(adap, "TP upgrade to %d.%d.%d %s\n",
975 TP_VERSION_MAJOR, TP_VERSION_MINOR,
976 TP_VERSION_MICRO, err ? "failed" : "succeeded");
47330077
DLR
977 }
978
20d3fc11
DLR
979 /*
980 * Clear interrupts now to catch errors if t3_init_hw fails.
981 * We clear them again later as initialization may trigger
982 * conditions that can interrupt.
983 */
984 t3_intr_clear(adap);
985
4d22de3e
DLR
986 err = t3_init_hw(adap, 0);
987 if (err)
988 goto out;
989
b881955b 990 t3_set_reg_field(adap, A_TP_PARA_REG5, 0, F_RXDDPOFFINIT);
6cdbd77e 991 t3_write_reg(adap, A_ULPRX_TDDP_PSZ, V_HPZ0(PAGE_SHIFT - 12));
bea3348e 992
4d22de3e
DLR
993 err = setup_sge_qsets(adap);
994 if (err)
995 goto out;
996
997 setup_rss(adap);
48c4b6db
DLR
998 if (!(adap->flags & NAPI_INIT))
999 init_napi(adap);
4d22de3e
DLR
1000 adap->flags |= FULL_INIT_DONE;
1001 }
1002
1003 t3_intr_clear(adap);
1004
1005 if (adap->flags & USING_MSIX) {
1006 name_msix_vecs(adap);
1007 err = request_irq(adap->msix_info[0].vec,
1008 t3_async_intr_handler, 0,
1009 adap->msix_info[0].desc, adap);
1010 if (err)
1011 goto irq_err;
1012
42256f57
DLR
1013 err = request_msix_data_irqs(adap);
1014 if (err) {
4d22de3e
DLR
1015 free_irq(adap->msix_info[0].vec, adap);
1016 goto irq_err;
1017 }
1018 } else if ((err = request_irq(adap->pdev->irq,
1019 t3_intr_handler(adap,
1020 adap->sge.qs[0].rspq.
1021 polling),
2db6346f
TG
1022 (adap->flags & USING_MSI) ?
1023 0 : IRQF_SHARED,
4d22de3e
DLR
1024 adap->name, adap)))
1025 goto irq_err;
1026
bea3348e 1027 enable_all_napi(adap);
4d22de3e
DLR
1028 t3_sge_start(adap);
1029 t3_intr_enable(adap);
14ab9892 1030
b881955b
DLR
1031 if (adap->params.rev >= T3_REV_C && !(adap->flags & TP_PARITY_INIT) &&
1032 is_offload(adap) && init_tp_parity(adap) == 0)
1033 adap->flags |= TP_PARITY_INIT;
1034
1035 if (adap->flags & TP_PARITY_INIT) {
1036 t3_write_reg(adap, A_TP_INT_CAUSE,
1037 F_CMCACHEPERR | F_ARPLUTPERR);
1038 t3_write_reg(adap, A_TP_INT_ENABLE, 0x7fbfffff);
1039 }
1040
8c263761
DLR
1041 if (!(adap->flags & QUEUES_BOUND)) {
1042 err = bind_qsets(adap);
1043 if (err) {
1044 CH_ERR(adap, "failed to bind qsets, err %d\n", err);
1045 t3_intr_disable(adap);
1046 free_irq_resources(adap);
1047 goto out;
1048 }
1049 adap->flags |= QUEUES_BOUND;
1050 }
14ab9892 1051
4d22de3e
DLR
1052out:
1053 return err;
1054irq_err:
1055 CH_ERR(adap, "request_irq failed, err %d\n", err);
1056 goto out;
1057}
1058
1059/*
1060 * Release resources when all the ports and offloading have been stopped.
1061 */
1062static void cxgb_down(struct adapter *adapter)
1063{
1064 t3_sge_stop(adapter);
1065 spin_lock_irq(&adapter->work_lock); /* sync with PHY intr task */
1066 t3_intr_disable(adapter);
1067 spin_unlock_irq(&adapter->work_lock);
1068
8c263761 1069 free_irq_resources(adapter);
4d22de3e
DLR
1070 flush_workqueue(cxgb3_wq); /* wait for external IRQ handler */
1071 quiesce_rx(adapter);
1072}
1073
1074static void schedule_chk_task(struct adapter *adap)
1075{
1076 unsigned int timeo;
1077
1078 timeo = adap->params.linkpoll_period ?
1079 (HZ * adap->params.linkpoll_period) / 10 :
1080 adap->params.stats_update_period * HZ;
1081 if (timeo)
1082 queue_delayed_work(cxgb3_wq, &adap->adap_check_task, timeo);
1083}
1084
1085static int offload_open(struct net_device *dev)
1086{
5fbf816f
DLR
1087 struct port_info *pi = netdev_priv(dev);
1088 struct adapter *adapter = pi->adapter;
1089 struct t3cdev *tdev = dev2t3cdev(dev);
4d22de3e 1090 int adap_up = adapter->open_device_map & PORT_MASK;
c54f5c24 1091 int err;
4d22de3e
DLR
1092
1093 if (test_and_set_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map))
1094 return 0;
1095
1096 if (!adap_up && (err = cxgb_up(adapter)) < 0)
48c4b6db 1097 goto out;
4d22de3e
DLR
1098
1099 t3_tp_set_offload_mode(adapter, 1);
1100 tdev->lldev = adapter->port[0];
1101 err = cxgb3_offload_activate(adapter);
1102 if (err)
1103 goto out;
1104
1105 init_port_mtus(adapter);
1106 t3_load_mtus(adapter, adapter->params.mtus, adapter->params.a_wnd,
1107 adapter->params.b_wnd,
1108 adapter->params.rev == 0 ?
1109 adapter->port[0]->mtu : 0xffff);
1110 init_smt(adapter);
1111
d96a51f6
DN
1112 if (sysfs_create_group(&tdev->lldev->dev.kobj, &offload_attr_group))
1113 dev_dbg(&dev->dev, "cannot create sysfs group\n");
4d22de3e
DLR
1114
1115 /* Call back all registered clients */
1116 cxgb3_add_clients(tdev);
1117
1118out:
1119 /* restore them in case the offload module has changed them */
1120 if (err) {
1121 t3_tp_set_offload_mode(adapter, 0);
1122 clear_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map);
1123 cxgb3_set_dummy_ops(tdev);
1124 }
1125 return err;
1126}
1127
1128static int offload_close(struct t3cdev *tdev)
1129{
1130 struct adapter *adapter = tdev2adap(tdev);
1131
1132 if (!test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map))
1133 return 0;
1134
1135 /* Call back all registered clients */
1136 cxgb3_remove_clients(tdev);
1137
0ee8d33c 1138 sysfs_remove_group(&tdev->lldev->dev.kobj, &offload_attr_group);
4d22de3e
DLR
1139
1140 tdev->lldev = NULL;
1141 cxgb3_set_dummy_ops(tdev);
1142 t3_tp_set_offload_mode(adapter, 0);
1143 clear_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map);
1144
1145 if (!adapter->open_device_map)
1146 cxgb_down(adapter);
1147
1148 cxgb3_offload_deactivate(adapter);
1149 return 0;
1150}
1151
1152static int cxgb_open(struct net_device *dev)
1153{
4d22de3e 1154 struct port_info *pi = netdev_priv(dev);
5fbf816f 1155 struct adapter *adapter = pi->adapter;
4d22de3e 1156 int other_ports = adapter->open_device_map & PORT_MASK;
5fbf816f 1157 int err;
4d22de3e 1158
48c4b6db 1159 if (!adapter->open_device_map && (err = cxgb_up(adapter)) < 0)
4d22de3e
DLR
1160 return err;
1161
1162 set_bit(pi->port_id, &adapter->open_device_map);
8ac3ba68 1163 if (is_offload(adapter) && !ofld_disable) {
4d22de3e
DLR
1164 err = offload_open(dev);
1165 if (err)
1166 printk(KERN_WARNING
1167 "Could not initialize offload capabilities\n");
1168 }
1169
82ad3329 1170 dev->real_num_tx_queues = pi->nqsets;
4d22de3e
DLR
1171 link_start(dev);
1172 t3_port_intr_enable(adapter, pi->port_id);
82ad3329 1173 netif_tx_start_all_queues(dev);
4d22de3e
DLR
1174 if (!other_ports)
1175 schedule_chk_task(adapter);
1176
1177 return 0;
1178}
1179
1180static int cxgb_close(struct net_device *dev)
1181{
5fbf816f
DLR
1182 struct port_info *pi = netdev_priv(dev);
1183 struct adapter *adapter = pi->adapter;
4d22de3e 1184
5fbf816f 1185 t3_port_intr_disable(adapter, pi->port_id);
82ad3329 1186 netif_tx_stop_all_queues(dev);
5fbf816f 1187 pi->phy.ops->power_down(&pi->phy, 1);
4d22de3e 1188 netif_carrier_off(dev);
5fbf816f 1189 t3_mac_disable(&pi->mac, MAC_DIRECTION_TX | MAC_DIRECTION_RX);
4d22de3e 1190
20d3fc11 1191 spin_lock_irq(&adapter->work_lock); /* sync with update task */
5fbf816f 1192 clear_bit(pi->port_id, &adapter->open_device_map);
20d3fc11 1193 spin_unlock_irq(&adapter->work_lock);
4d22de3e
DLR
1194
1195 if (!(adapter->open_device_map & PORT_MASK))
1196 cancel_rearming_delayed_workqueue(cxgb3_wq,
1197 &adapter->adap_check_task);
1198
1199 if (!adapter->open_device_map)
1200 cxgb_down(adapter);
1201
1202 return 0;
1203}
1204
1205static struct net_device_stats *cxgb_get_stats(struct net_device *dev)
1206{
5fbf816f
DLR
1207 struct port_info *pi = netdev_priv(dev);
1208 struct adapter *adapter = pi->adapter;
1209 struct net_device_stats *ns = &pi->netstats;
4d22de3e
DLR
1210 const struct mac_stats *pstats;
1211
1212 spin_lock(&adapter->stats_lock);
5fbf816f 1213 pstats = t3_mac_update_stats(&pi->mac);
4d22de3e
DLR
1214 spin_unlock(&adapter->stats_lock);
1215
1216 ns->tx_bytes = pstats->tx_octets;
1217 ns->tx_packets = pstats->tx_frames;
1218 ns->rx_bytes = pstats->rx_octets;
1219 ns->rx_packets = pstats->rx_frames;
1220 ns->multicast = pstats->rx_mcast_frames;
1221
1222 ns->tx_errors = pstats->tx_underrun;
1223 ns->rx_errors = pstats->rx_symbol_errs + pstats->rx_fcs_errs +
1224 pstats->rx_too_long + pstats->rx_jabber + pstats->rx_short +
1225 pstats->rx_fifo_ovfl;
1226
1227 /* detailed rx_errors */
1228 ns->rx_length_errors = pstats->rx_jabber + pstats->rx_too_long;
1229 ns->rx_over_errors = 0;
1230 ns->rx_crc_errors = pstats->rx_fcs_errs;
1231 ns->rx_frame_errors = pstats->rx_symbol_errs;
1232 ns->rx_fifo_errors = pstats->rx_fifo_ovfl;
1233 ns->rx_missed_errors = pstats->rx_cong_drops;
1234
1235 /* detailed tx_errors */
1236 ns->tx_aborted_errors = 0;
1237 ns->tx_carrier_errors = 0;
1238 ns->tx_fifo_errors = pstats->tx_underrun;
1239 ns->tx_heartbeat_errors = 0;
1240 ns->tx_window_errors = 0;
1241 return ns;
1242}
1243
1244static u32 get_msglevel(struct net_device *dev)
1245{
5fbf816f
DLR
1246 struct port_info *pi = netdev_priv(dev);
1247 struct adapter *adapter = pi->adapter;
4d22de3e
DLR
1248
1249 return adapter->msg_enable;
1250}
1251
1252static void set_msglevel(struct net_device *dev, u32 val)
1253{
5fbf816f
DLR
1254 struct port_info *pi = netdev_priv(dev);
1255 struct adapter *adapter = pi->adapter;
4d22de3e
DLR
1256
1257 adapter->msg_enable = val;
1258}
1259
1260static char stats_strings[][ETH_GSTRING_LEN] = {
1261 "TxOctetsOK ",
1262 "TxFramesOK ",
1263 "TxMulticastFramesOK",
1264 "TxBroadcastFramesOK",
1265 "TxPauseFrames ",
1266 "TxUnderrun ",
1267 "TxExtUnderrun ",
1268
1269 "TxFrames64 ",
1270 "TxFrames65To127 ",
1271 "TxFrames128To255 ",
1272 "TxFrames256To511 ",
1273 "TxFrames512To1023 ",
1274 "TxFrames1024To1518 ",
1275 "TxFrames1519ToMax ",
1276
1277 "RxOctetsOK ",
1278 "RxFramesOK ",
1279 "RxMulticastFramesOK",
1280 "RxBroadcastFramesOK",
1281 "RxPauseFrames ",
1282 "RxFCSErrors ",
1283 "RxSymbolErrors ",
1284 "RxShortErrors ",
1285 "RxJabberErrors ",
1286 "RxLengthErrors ",
1287 "RxFIFOoverflow ",
1288
1289 "RxFrames64 ",
1290 "RxFrames65To127 ",
1291 "RxFrames128To255 ",
1292 "RxFrames256To511 ",
1293 "RxFrames512To1023 ",
1294 "RxFrames1024To1518 ",
1295 "RxFrames1519ToMax ",
1296
1297 "PhyFIFOErrors ",
1298 "TSO ",
1299 "VLANextractions ",
1300 "VLANinsertions ",
1301 "TxCsumOffload ",
1302 "RxCsumGood ",
b47385bd
DLR
1303 "LroAggregated ",
1304 "LroFlushed ",
1305 "LroNoDesc ",
fc90664e
DLR
1306 "RxDrops ",
1307
1308 "CheckTXEnToggled ",
1309 "CheckResets ",
1310
4d22de3e
DLR
1311};
1312
b9f2c044 1313static int get_sset_count(struct net_device *dev, int sset)
4d22de3e 1314{
b9f2c044
JG
1315 switch (sset) {
1316 case ETH_SS_STATS:
1317 return ARRAY_SIZE(stats_strings);
1318 default:
1319 return -EOPNOTSUPP;
1320 }
4d22de3e
DLR
1321}
1322
1323#define T3_REGMAP_SIZE (3 * 1024)
1324
1325static int get_regs_len(struct net_device *dev)
1326{
1327 return T3_REGMAP_SIZE;
1328}
1329
1330static int get_eeprom_len(struct net_device *dev)
1331{
1332 return EEPROMSIZE;
1333}
1334
1335static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1336{
5fbf816f
DLR
1337 struct port_info *pi = netdev_priv(dev);
1338 struct adapter *adapter = pi->adapter;
4d22de3e 1339 u32 fw_vers = 0;
47330077 1340 u32 tp_vers = 0;
4d22de3e 1341
cf3760da 1342 spin_lock(&adapter->stats_lock);
4d22de3e 1343 t3_get_fw_version(adapter, &fw_vers);
47330077 1344 t3_get_tp_version(adapter, &tp_vers);
cf3760da 1345 spin_unlock(&adapter->stats_lock);
4d22de3e
DLR
1346
1347 strcpy(info->driver, DRV_NAME);
1348 strcpy(info->version, DRV_VERSION);
1349 strcpy(info->bus_info, pci_name(adapter->pdev));
1350 if (!fw_vers)
1351 strcpy(info->fw_version, "N/A");
4aac3899 1352 else {
4d22de3e 1353 snprintf(info->fw_version, sizeof(info->fw_version),
47330077 1354 "%s %u.%u.%u TP %u.%u.%u",
4aac3899
DLR
1355 G_FW_VERSION_TYPE(fw_vers) ? "T" : "N",
1356 G_FW_VERSION_MAJOR(fw_vers),
1357 G_FW_VERSION_MINOR(fw_vers),
47330077
DLR
1358 G_FW_VERSION_MICRO(fw_vers),
1359 G_TP_VERSION_MAJOR(tp_vers),
1360 G_TP_VERSION_MINOR(tp_vers),
1361 G_TP_VERSION_MICRO(tp_vers));
4aac3899 1362 }
4d22de3e
DLR
1363}
1364
1365static void get_strings(struct net_device *dev, u32 stringset, u8 * data)
1366{
1367 if (stringset == ETH_SS_STATS)
1368 memcpy(data, stats_strings, sizeof(stats_strings));
1369}
1370
1371static unsigned long collect_sge_port_stats(struct adapter *adapter,
1372 struct port_info *p, int idx)
1373{
1374 int i;
1375 unsigned long tot = 0;
1376
8c263761
DLR
1377 for (i = p->first_qset; i < p->first_qset + p->nqsets; ++i)
1378 tot += adapter->sge.qs[i].port_stats[idx];
4d22de3e
DLR
1379 return tot;
1380}
1381
1382static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
1383 u64 *data)
1384{
4d22de3e 1385 struct port_info *pi = netdev_priv(dev);
5fbf816f 1386 struct adapter *adapter = pi->adapter;
4d22de3e
DLR
1387 const struct mac_stats *s;
1388
1389 spin_lock(&adapter->stats_lock);
1390 s = t3_mac_update_stats(&pi->mac);
1391 spin_unlock(&adapter->stats_lock);
1392
1393 *data++ = s->tx_octets;
1394 *data++ = s->tx_frames;
1395 *data++ = s->tx_mcast_frames;
1396 *data++ = s->tx_bcast_frames;
1397 *data++ = s->tx_pause;
1398 *data++ = s->tx_underrun;
1399 *data++ = s->tx_fifo_urun;
1400
1401 *data++ = s->tx_frames_64;
1402 *data++ = s->tx_frames_65_127;
1403 *data++ = s->tx_frames_128_255;
1404 *data++ = s->tx_frames_256_511;
1405 *data++ = s->tx_frames_512_1023;
1406 *data++ = s->tx_frames_1024_1518;
1407 *data++ = s->tx_frames_1519_max;
1408
1409 *data++ = s->rx_octets;
1410 *data++ = s->rx_frames;
1411 *data++ = s->rx_mcast_frames;
1412 *data++ = s->rx_bcast_frames;
1413 *data++ = s->rx_pause;
1414 *data++ = s->rx_fcs_errs;
1415 *data++ = s->rx_symbol_errs;
1416 *data++ = s->rx_short;
1417 *data++ = s->rx_jabber;
1418 *data++ = s->rx_too_long;
1419 *data++ = s->rx_fifo_ovfl;
1420
1421 *data++ = s->rx_frames_64;
1422 *data++ = s->rx_frames_65_127;
1423 *data++ = s->rx_frames_128_255;
1424 *data++ = s->rx_frames_256_511;
1425 *data++ = s->rx_frames_512_1023;
1426 *data++ = s->rx_frames_1024_1518;
1427 *data++ = s->rx_frames_1519_max;
1428
1429 *data++ = pi->phy.fifo_errors;
1430
1431 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_TSO);
1432 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_VLANEX);
1433 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_VLANINS);
1434 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_TX_CSUM);
1435 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_RX_CSUM_GOOD);
b47385bd
DLR
1436 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_LRO_AGGR);
1437 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_LRO_FLUSHED);
1438 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_LRO_NO_DESC);
4d22de3e 1439 *data++ = s->rx_cong_drops;
fc90664e
DLR
1440
1441 *data++ = s->num_toggled;
1442 *data++ = s->num_resets;
4d22de3e
DLR
1443}
1444
1445static inline void reg_block_dump(struct adapter *ap, void *buf,
1446 unsigned int start, unsigned int end)
1447{
1448 u32 *p = buf + start;
1449
1450 for (; start <= end; start += sizeof(u32))
1451 *p++ = t3_read_reg(ap, start);
1452}
1453
1454static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
1455 void *buf)
1456{
5fbf816f
DLR
1457 struct port_info *pi = netdev_priv(dev);
1458 struct adapter *ap = pi->adapter;
4d22de3e
DLR
1459
1460 /*
1461 * Version scheme:
1462 * bits 0..9: chip version
1463 * bits 10..15: chip revision
1464 * bit 31: set for PCIe cards
1465 */
1466 regs->version = 3 | (ap->params.rev << 10) | (is_pcie(ap) << 31);
1467
1468 /*
1469 * We skip the MAC statistics registers because they are clear-on-read.
1470 * Also reading multi-register stats would need to synchronize with the
1471 * periodic mac stats accumulation. Hard to justify the complexity.
1472 */
1473 memset(buf, 0, T3_REGMAP_SIZE);
1474 reg_block_dump(ap, buf, 0, A_SG_RSPQ_CREDIT_RETURN);
1475 reg_block_dump(ap, buf, A_SG_HI_DRB_HI_THRSH, A_ULPRX_PBL_ULIMIT);
1476 reg_block_dump(ap, buf, A_ULPTX_CONFIG, A_MPS_INT_CAUSE);
1477 reg_block_dump(ap, buf, A_CPL_SWITCH_CNTRL, A_CPL_MAP_TBL_DATA);
1478 reg_block_dump(ap, buf, A_SMB_GLOBAL_TIME_CFG, A_XGM_SERDES_STAT3);
1479 reg_block_dump(ap, buf, A_XGM_SERDES_STATUS0,
1480 XGM_REG(A_XGM_SERDES_STAT3, 1));
1481 reg_block_dump(ap, buf, XGM_REG(A_XGM_SERDES_STATUS0, 1),
1482 XGM_REG(A_XGM_RX_SPI4_SOP_EOP_CNT, 1));
1483}
1484
1485static int restart_autoneg(struct net_device *dev)
1486{
1487 struct port_info *p = netdev_priv(dev);
1488
1489 if (!netif_running(dev))
1490 return -EAGAIN;
1491 if (p->link_config.autoneg != AUTONEG_ENABLE)
1492 return -EINVAL;
1493 p->phy.ops->autoneg_restart(&p->phy);
1494 return 0;
1495}
1496
1497static int cxgb3_phys_id(struct net_device *dev, u32 data)
1498{
5fbf816f
DLR
1499 struct port_info *pi = netdev_priv(dev);
1500 struct adapter *adapter = pi->adapter;
4d22de3e 1501 int i;
4d22de3e
DLR
1502
1503 if (data == 0)
1504 data = 2;
1505
1506 for (i = 0; i < data * 2; i++) {
1507 t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
1508 (i & 1) ? F_GPIO0_OUT_VAL : 0);
1509 if (msleep_interruptible(500))
1510 break;
1511 }
1512 t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
1513 F_GPIO0_OUT_VAL);
1514 return 0;
1515}
1516
1517static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1518{
1519 struct port_info *p = netdev_priv(dev);
1520
1521 cmd->supported = p->link_config.supported;
1522 cmd->advertising = p->link_config.advertising;
1523
1524 if (netif_carrier_ok(dev)) {
1525 cmd->speed = p->link_config.speed;
1526 cmd->duplex = p->link_config.duplex;
1527 } else {
1528 cmd->speed = -1;
1529 cmd->duplex = -1;
1530 }
1531
1532 cmd->port = (cmd->supported & SUPPORTED_TP) ? PORT_TP : PORT_FIBRE;
1533 cmd->phy_address = p->phy.addr;
1534 cmd->transceiver = XCVR_EXTERNAL;
1535 cmd->autoneg = p->link_config.autoneg;
1536 cmd->maxtxpkt = 0;
1537 cmd->maxrxpkt = 0;
1538 return 0;
1539}
1540
1541static int speed_duplex_to_caps(int speed, int duplex)
1542{
1543 int cap = 0;
1544
1545 switch (speed) {
1546 case SPEED_10:
1547 if (duplex == DUPLEX_FULL)
1548 cap = SUPPORTED_10baseT_Full;
1549 else
1550 cap = SUPPORTED_10baseT_Half;
1551 break;
1552 case SPEED_100:
1553 if (duplex == DUPLEX_FULL)
1554 cap = SUPPORTED_100baseT_Full;
1555 else
1556 cap = SUPPORTED_100baseT_Half;
1557 break;
1558 case SPEED_1000:
1559 if (duplex == DUPLEX_FULL)
1560 cap = SUPPORTED_1000baseT_Full;
1561 else
1562 cap = SUPPORTED_1000baseT_Half;
1563 break;
1564 case SPEED_10000:
1565 if (duplex == DUPLEX_FULL)
1566 cap = SUPPORTED_10000baseT_Full;
1567 }
1568 return cap;
1569}
1570
1571#define ADVERTISED_MASK (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
1572 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
1573 ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full | \
1574 ADVERTISED_10000baseT_Full)
1575
1576static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1577{
9b1e3656 1578 int cap;
4d22de3e
DLR
1579 struct port_info *p = netdev_priv(dev);
1580 struct link_config *lc = &p->link_config;
1581
9b1e3656
DLR
1582 if (!(lc->supported & SUPPORTED_Autoneg)) {
1583 /*
1584 * PHY offers a single speed/duplex. See if that's what's
1585 * being requested.
1586 */
1587 if (cmd->autoneg == AUTONEG_DISABLE) {
1588 cap = speed_duplex_to_caps(cmd->speed, cmd->duplex);
1589 if (lc->supported & cap)
1590 return 0;
1591 }
1592 return -EINVAL;
1593 }
4d22de3e
DLR
1594
1595 if (cmd->autoneg == AUTONEG_DISABLE) {
1596 int cap = speed_duplex_to_caps(cmd->speed, cmd->duplex);
1597
1598 if (!(lc->supported & cap) || cmd->speed == SPEED_1000)
1599 return -EINVAL;
1600 lc->requested_speed = cmd->speed;
1601 lc->requested_duplex = cmd->duplex;
1602 lc->advertising = 0;
1603 } else {
1604 cmd->advertising &= ADVERTISED_MASK;
1605 cmd->advertising &= lc->supported;
1606 if (!cmd->advertising)
1607 return -EINVAL;
1608 lc->requested_speed = SPEED_INVALID;
1609 lc->requested_duplex = DUPLEX_INVALID;
1610 lc->advertising = cmd->advertising | ADVERTISED_Autoneg;
1611 }
1612 lc->autoneg = cmd->autoneg;
1613 if (netif_running(dev))
1614 t3_link_start(&p->phy, &p->mac, lc);
1615 return 0;
1616}
1617
1618static void get_pauseparam(struct net_device *dev,
1619 struct ethtool_pauseparam *epause)
1620{
1621 struct port_info *p = netdev_priv(dev);
1622
1623 epause->autoneg = (p->link_config.requested_fc & PAUSE_AUTONEG) != 0;
1624 epause->rx_pause = (p->link_config.fc & PAUSE_RX) != 0;
1625 epause->tx_pause = (p->link_config.fc & PAUSE_TX) != 0;
1626}
1627
1628static int set_pauseparam(struct net_device *dev,
1629 struct ethtool_pauseparam *epause)
1630{
1631 struct port_info *p = netdev_priv(dev);
1632 struct link_config *lc = &p->link_config;
1633
1634 if (epause->autoneg == AUTONEG_DISABLE)
1635 lc->requested_fc = 0;
1636 else if (lc->supported & SUPPORTED_Autoneg)
1637 lc->requested_fc = PAUSE_AUTONEG;
1638 else
1639 return -EINVAL;
1640
1641 if (epause->rx_pause)
1642 lc->requested_fc |= PAUSE_RX;
1643 if (epause->tx_pause)
1644 lc->requested_fc |= PAUSE_TX;
1645 if (lc->autoneg == AUTONEG_ENABLE) {
1646 if (netif_running(dev))
1647 t3_link_start(&p->phy, &p->mac, lc);
1648 } else {
1649 lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1650 if (netif_running(dev))
1651 t3_mac_set_speed_duplex_fc(&p->mac, -1, -1, lc->fc);
1652 }
1653 return 0;
1654}
1655
1656static u32 get_rx_csum(struct net_device *dev)
1657{
1658 struct port_info *p = netdev_priv(dev);
1659
47fd23fe 1660 return p->rx_offload & T3_RX_CSUM;
4d22de3e
DLR
1661}
1662
1663static int set_rx_csum(struct net_device *dev, u32 data)
1664{
1665 struct port_info *p = netdev_priv(dev);
1666
47fd23fe
RD
1667 if (data) {
1668 p->rx_offload |= T3_RX_CSUM;
1669 } else {
b47385bd
DLR
1670 int i;
1671
47fd23fe 1672 p->rx_offload &= ~(T3_RX_CSUM | T3_LRO);
04ecb072
DLR
1673 for (i = p->first_qset; i < p->first_qset + p->nqsets; i++)
1674 set_qset_lro(dev, i, 0);
b47385bd 1675 }
4d22de3e
DLR
1676 return 0;
1677}
1678
1679static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
1680{
5fbf816f
DLR
1681 struct port_info *pi = netdev_priv(dev);
1682 struct adapter *adapter = pi->adapter;
05b97b30 1683 const struct qset_params *q = &adapter->params.sge.qset[pi->first_qset];
4d22de3e
DLR
1684
1685 e->rx_max_pending = MAX_RX_BUFFERS;
1686 e->rx_mini_max_pending = 0;
1687 e->rx_jumbo_max_pending = MAX_RX_JUMBO_BUFFERS;
1688 e->tx_max_pending = MAX_TXQ_ENTRIES;
1689
05b97b30
DLR
1690 e->rx_pending = q->fl_size;
1691 e->rx_mini_pending = q->rspq_size;
1692 e->rx_jumbo_pending = q->jumbo_size;
1693 e->tx_pending = q->txq_size[0];
4d22de3e
DLR
1694}
1695
1696static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
1697{
5fbf816f
DLR
1698 struct port_info *pi = netdev_priv(dev);
1699 struct adapter *adapter = pi->adapter;
05b97b30 1700 struct qset_params *q;
5fbf816f 1701 int i;
4d22de3e
DLR
1702
1703 if (e->rx_pending > MAX_RX_BUFFERS ||
1704 e->rx_jumbo_pending > MAX_RX_JUMBO_BUFFERS ||
1705 e->tx_pending > MAX_TXQ_ENTRIES ||
1706 e->rx_mini_pending > MAX_RSPQ_ENTRIES ||
1707 e->rx_mini_pending < MIN_RSPQ_ENTRIES ||
1708 e->rx_pending < MIN_FL_ENTRIES ||
1709 e->rx_jumbo_pending < MIN_FL_ENTRIES ||
1710 e->tx_pending < adapter->params.nports * MIN_TXQ_ENTRIES)
1711 return -EINVAL;
1712
1713 if (adapter->flags & FULL_INIT_DONE)
1714 return -EBUSY;
1715
05b97b30
DLR
1716 q = &adapter->params.sge.qset[pi->first_qset];
1717 for (i = 0; i < pi->nqsets; ++i, ++q) {
4d22de3e
DLR
1718 q->rspq_size = e->rx_mini_pending;
1719 q->fl_size = e->rx_pending;
1720 q->jumbo_size = e->rx_jumbo_pending;
1721 q->txq_size[0] = e->tx_pending;
1722 q->txq_size[1] = e->tx_pending;
1723 q->txq_size[2] = e->tx_pending;
1724 }
1725 return 0;
1726}
1727
1728static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
1729{
5fbf816f
DLR
1730 struct port_info *pi = netdev_priv(dev);
1731 struct adapter *adapter = pi->adapter;
4d22de3e
DLR
1732 struct qset_params *qsp = &adapter->params.sge.qset[0];
1733 struct sge_qset *qs = &adapter->sge.qs[0];
1734
1735 if (c->rx_coalesce_usecs * 10 > M_NEWTIMER)
1736 return -EINVAL;
1737
1738 qsp->coalesce_usecs = c->rx_coalesce_usecs;
1739 t3_update_qset_coalesce(qs, qsp);
1740 return 0;
1741}
1742
1743static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
1744{
5fbf816f
DLR
1745 struct port_info *pi = netdev_priv(dev);
1746 struct adapter *adapter = pi->adapter;
4d22de3e
DLR
1747 struct qset_params *q = adapter->params.sge.qset;
1748
1749 c->rx_coalesce_usecs = q->coalesce_usecs;
1750 return 0;
1751}
1752
1753static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e,
1754 u8 * data)
1755{
5fbf816f
DLR
1756 struct port_info *pi = netdev_priv(dev);
1757 struct adapter *adapter = pi->adapter;
4d22de3e 1758 int i, err = 0;
4d22de3e
DLR
1759
1760 u8 *buf = kmalloc(EEPROMSIZE, GFP_KERNEL);
1761 if (!buf)
1762 return -ENOMEM;
1763
1764 e->magic = EEPROM_MAGIC;
1765 for (i = e->offset & ~3; !err && i < e->offset + e->len; i += 4)
05e5c116 1766 err = t3_seeprom_read(adapter, i, (__le32 *) & buf[i]);
4d22de3e
DLR
1767
1768 if (!err)
1769 memcpy(data, buf + e->offset, e->len);
1770 kfree(buf);
1771 return err;
1772}
1773
1774static int set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
1775 u8 * data)
1776{
5fbf816f
DLR
1777 struct port_info *pi = netdev_priv(dev);
1778 struct adapter *adapter = pi->adapter;
05e5c116
AV
1779 u32 aligned_offset, aligned_len;
1780 __le32 *p;
4d22de3e 1781 u8 *buf;
c54f5c24 1782 int err;
4d22de3e
DLR
1783
1784 if (eeprom->magic != EEPROM_MAGIC)
1785 return -EINVAL;
1786
1787 aligned_offset = eeprom->offset & ~3;
1788 aligned_len = (eeprom->len + (eeprom->offset & 3) + 3) & ~3;
1789
1790 if (aligned_offset != eeprom->offset || aligned_len != eeprom->len) {
1791 buf = kmalloc(aligned_len, GFP_KERNEL);
1792 if (!buf)
1793 return -ENOMEM;
05e5c116 1794 err = t3_seeprom_read(adapter, aligned_offset, (__le32 *) buf);
4d22de3e
DLR
1795 if (!err && aligned_len > 4)
1796 err = t3_seeprom_read(adapter,
1797 aligned_offset + aligned_len - 4,
05e5c116 1798 (__le32 *) & buf[aligned_len - 4]);
4d22de3e
DLR
1799 if (err)
1800 goto out;
1801 memcpy(buf + (eeprom->offset & 3), data, eeprom->len);
1802 } else
1803 buf = data;
1804
1805 err = t3_seeprom_wp(adapter, 0);
1806 if (err)
1807 goto out;
1808
05e5c116 1809 for (p = (__le32 *) buf; !err && aligned_len; aligned_len -= 4, p++) {
4d22de3e
DLR
1810 err = t3_seeprom_write(adapter, aligned_offset, *p);
1811 aligned_offset += 4;
1812 }
1813
1814 if (!err)
1815 err = t3_seeprom_wp(adapter, 1);
1816out:
1817 if (buf != data)
1818 kfree(buf);
1819 return err;
1820}
1821
1822static void get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1823{
1824 wol->supported = 0;
1825 wol->wolopts = 0;
1826 memset(&wol->sopass, 0, sizeof(wol->sopass));
1827}
1828
04ecb072
DLR
1829static int cxgb3_set_flags(struct net_device *dev, u32 data)
1830{
1831 struct port_info *pi = netdev_priv(dev);
1832 int i;
1833
1834 if (data & ETH_FLAG_LRO) {
47fd23fe 1835 if (!(pi->rx_offload & T3_RX_CSUM))
04ecb072
DLR
1836 return -EINVAL;
1837
47fd23fe 1838 pi->rx_offload |= T3_LRO;
04ecb072
DLR
1839 for (i = pi->first_qset; i < pi->first_qset + pi->nqsets; i++)
1840 set_qset_lro(dev, i, 1);
1841
47fd23fe
RD
1842 } else {
1843 pi->rx_offload &= ~T3_LRO;
04ecb072
DLR
1844 for (i = pi->first_qset; i < pi->first_qset + pi->nqsets; i++)
1845 set_qset_lro(dev, i, 0);
47fd23fe 1846 }
04ecb072
DLR
1847
1848 return 0;
1849}
1850
4d22de3e
DLR
1851static const struct ethtool_ops cxgb_ethtool_ops = {
1852 .get_settings = get_settings,
1853 .set_settings = set_settings,
1854 .get_drvinfo = get_drvinfo,
1855 .get_msglevel = get_msglevel,
1856 .set_msglevel = set_msglevel,
1857 .get_ringparam = get_sge_param,
1858 .set_ringparam = set_sge_param,
1859 .get_coalesce = get_coalesce,
1860 .set_coalesce = set_coalesce,
1861 .get_eeprom_len = get_eeprom_len,
1862 .get_eeprom = get_eeprom,
1863 .set_eeprom = set_eeprom,
1864 .get_pauseparam = get_pauseparam,
1865 .set_pauseparam = set_pauseparam,
1866 .get_rx_csum = get_rx_csum,
1867 .set_rx_csum = set_rx_csum,
4d22de3e 1868 .set_tx_csum = ethtool_op_set_tx_csum,
4d22de3e
DLR
1869 .set_sg = ethtool_op_set_sg,
1870 .get_link = ethtool_op_get_link,
1871 .get_strings = get_strings,
1872 .phys_id = cxgb3_phys_id,
1873 .nway_reset = restart_autoneg,
b9f2c044 1874 .get_sset_count = get_sset_count,
4d22de3e
DLR
1875 .get_ethtool_stats = get_stats,
1876 .get_regs_len = get_regs_len,
1877 .get_regs = get_regs,
1878 .get_wol = get_wol,
4d22de3e 1879 .set_tso = ethtool_op_set_tso,
04ecb072
DLR
1880 .get_flags = ethtool_op_get_flags,
1881 .set_flags = cxgb3_set_flags,
4d22de3e
DLR
1882};
1883
1884static int in_range(int val, int lo, int hi)
1885{
1886 return val < 0 || (val <= hi && val >= lo);
1887}
1888
1889static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
1890{
5fbf816f
DLR
1891 struct port_info *pi = netdev_priv(dev);
1892 struct adapter *adapter = pi->adapter;
4d22de3e 1893 u32 cmd;
5fbf816f 1894 int ret;
4d22de3e
DLR
1895
1896 if (copy_from_user(&cmd, useraddr, sizeof(cmd)))
1897 return -EFAULT;
1898
1899 switch (cmd) {
4d22de3e
DLR
1900 case CHELSIO_SET_QSET_PARAMS:{
1901 int i;
1902 struct qset_params *q;
1903 struct ch_qset_params t;
8c263761
DLR
1904 int q1 = pi->first_qset;
1905 int nqsets = pi->nqsets;
4d22de3e
DLR
1906
1907 if (!capable(CAP_NET_ADMIN))
1908 return -EPERM;
1909 if (copy_from_user(&t, useraddr, sizeof(t)))
1910 return -EFAULT;
1911 if (t.qset_idx >= SGE_QSETS)
1912 return -EINVAL;
1913 if (!in_range(t.intr_lat, 0, M_NEWTIMER) ||
1914 !in_range(t.cong_thres, 0, 255) ||
1915 !in_range(t.txq_size[0], MIN_TXQ_ENTRIES,
1916 MAX_TXQ_ENTRIES) ||
1917 !in_range(t.txq_size[1], MIN_TXQ_ENTRIES,
1918 MAX_TXQ_ENTRIES) ||
1919 !in_range(t.txq_size[2], MIN_CTRL_TXQ_ENTRIES,
1920 MAX_CTRL_TXQ_ENTRIES) ||
1921 !in_range(t.fl_size[0], MIN_FL_ENTRIES,
1922 MAX_RX_BUFFERS)
1923 || !in_range(t.fl_size[1], MIN_FL_ENTRIES,
1924 MAX_RX_JUMBO_BUFFERS)
1925 || !in_range(t.rspq_size, MIN_RSPQ_ENTRIES,
1926 MAX_RSPQ_ENTRIES))
1927 return -EINVAL;
8c263761
DLR
1928
1929 if ((adapter->flags & FULL_INIT_DONE) && t.lro > 0)
1930 for_each_port(adapter, i) {
1931 pi = adap2pinfo(adapter, i);
1932 if (t.qset_idx >= pi->first_qset &&
1933 t.qset_idx < pi->first_qset + pi->nqsets &&
47fd23fe 1934 !(pi->rx_offload & T3_RX_CSUM))
8c263761
DLR
1935 return -EINVAL;
1936 }
1937
4d22de3e
DLR
1938 if ((adapter->flags & FULL_INIT_DONE) &&
1939 (t.rspq_size >= 0 || t.fl_size[0] >= 0 ||
1940 t.fl_size[1] >= 0 || t.txq_size[0] >= 0 ||
1941 t.txq_size[1] >= 0 || t.txq_size[2] >= 0 ||
1942 t.polling >= 0 || t.cong_thres >= 0))
1943 return -EBUSY;
1944
8c263761
DLR
1945 /* Allow setting of any available qset when offload enabled */
1946 if (test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map)) {
1947 q1 = 0;
1948 for_each_port(adapter, i) {
1949 pi = adap2pinfo(adapter, i);
1950 nqsets += pi->first_qset + pi->nqsets;
1951 }
1952 }
1953
1954 if (t.qset_idx < q1)
1955 return -EINVAL;
1956 if (t.qset_idx > q1 + nqsets - 1)
1957 return -EINVAL;
1958
4d22de3e
DLR
1959 q = &adapter->params.sge.qset[t.qset_idx];
1960
1961 if (t.rspq_size >= 0)
1962 q->rspq_size = t.rspq_size;
1963 if (t.fl_size[0] >= 0)
1964 q->fl_size = t.fl_size[0];
1965 if (t.fl_size[1] >= 0)
1966 q->jumbo_size = t.fl_size[1];
1967 if (t.txq_size[0] >= 0)
1968 q->txq_size[0] = t.txq_size[0];
1969 if (t.txq_size[1] >= 0)
1970 q->txq_size[1] = t.txq_size[1];
1971 if (t.txq_size[2] >= 0)
1972 q->txq_size[2] = t.txq_size[2];
1973 if (t.cong_thres >= 0)
1974 q->cong_thres = t.cong_thres;
1975 if (t.intr_lat >= 0) {
1976 struct sge_qset *qs =
1977 &adapter->sge.qs[t.qset_idx];
1978
1979 q->coalesce_usecs = t.intr_lat;
1980 t3_update_qset_coalesce(qs, q);
1981 }
1982 if (t.polling >= 0) {
1983 if (adapter->flags & USING_MSIX)
1984 q->polling = t.polling;
1985 else {
1986 /* No polling with INTx for T3A */
1987 if (adapter->params.rev == 0 &&
1988 !(adapter->flags & USING_MSI))
1989 t.polling = 0;
1990
1991 for (i = 0; i < SGE_QSETS; i++) {
1992 q = &adapter->params.sge.
1993 qset[i];
1994 q->polling = t.polling;
1995 }
1996 }
1997 }
04ecb072
DLR
1998 if (t.lro >= 0)
1999 set_qset_lro(dev, t.qset_idx, t.lro);
2000
4d22de3e
DLR
2001 break;
2002 }
2003 case CHELSIO_GET_QSET_PARAMS:{
2004 struct qset_params *q;
2005 struct ch_qset_params t;
8c263761
DLR
2006 int q1 = pi->first_qset;
2007 int nqsets = pi->nqsets;
2008 int i;
4d22de3e
DLR
2009
2010 if (copy_from_user(&t, useraddr, sizeof(t)))
2011 return -EFAULT;
8c263761
DLR
2012
2013 /* Display qsets for all ports when offload enabled */
2014 if (test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map)) {
2015 q1 = 0;
2016 for_each_port(adapter, i) {
2017 pi = adap2pinfo(adapter, i);
2018 nqsets = pi->first_qset + pi->nqsets;
2019 }
2020 }
2021
2022 if (t.qset_idx >= nqsets)
4d22de3e
DLR
2023 return -EINVAL;
2024
8c263761 2025 q = &adapter->params.sge.qset[q1 + t.qset_idx];
4d22de3e
DLR
2026 t.rspq_size = q->rspq_size;
2027 t.txq_size[0] = q->txq_size[0];
2028 t.txq_size[1] = q->txq_size[1];
2029 t.txq_size[2] = q->txq_size[2];
2030 t.fl_size[0] = q->fl_size;
2031 t.fl_size[1] = q->jumbo_size;
2032 t.polling = q->polling;
b47385bd 2033 t.lro = q->lro;
4d22de3e
DLR
2034 t.intr_lat = q->coalesce_usecs;
2035 t.cong_thres = q->cong_thres;
8c263761
DLR
2036 t.qnum = q1;
2037
2038 if (adapter->flags & USING_MSIX)
2039 t.vector = adapter->msix_info[q1 + t.qset_idx + 1].vec;
2040 else
2041 t.vector = adapter->pdev->irq;
4d22de3e
DLR
2042
2043 if (copy_to_user(useraddr, &t, sizeof(t)))
2044 return -EFAULT;
2045 break;
2046 }
2047 case CHELSIO_SET_QSET_NUM:{
2048 struct ch_reg edata;
4d22de3e
DLR
2049 unsigned int i, first_qset = 0, other_qsets = 0;
2050
2051 if (!capable(CAP_NET_ADMIN))
2052 return -EPERM;
2053 if (adapter->flags & FULL_INIT_DONE)
2054 return -EBUSY;
2055 if (copy_from_user(&edata, useraddr, sizeof(edata)))
2056 return -EFAULT;
2057 if (edata.val < 1 ||
2058 (edata.val > 1 && !(adapter->flags & USING_MSIX)))
2059 return -EINVAL;
2060
2061 for_each_port(adapter, i)
2062 if (adapter->port[i] && adapter->port[i] != dev)
2063 other_qsets += adap2pinfo(adapter, i)->nqsets;
2064
2065 if (edata.val + other_qsets > SGE_QSETS)
2066 return -EINVAL;
2067
2068 pi->nqsets = edata.val;
2069
2070 for_each_port(adapter, i)
2071 if (adapter->port[i]) {
2072 pi = adap2pinfo(adapter, i);
2073 pi->first_qset = first_qset;
2074 first_qset += pi->nqsets;
2075 }
2076 break;
2077 }
2078 case CHELSIO_GET_QSET_NUM:{
2079 struct ch_reg edata;
4d22de3e
DLR
2080
2081 edata.cmd = CHELSIO_GET_QSET_NUM;
2082 edata.val = pi->nqsets;
2083 if (copy_to_user(useraddr, &edata, sizeof(edata)))
2084 return -EFAULT;
2085 break;
2086 }
2087 case CHELSIO_LOAD_FW:{
2088 u8 *fw_data;
2089 struct ch_mem_range t;
2090
1b3aa7af 2091 if (!capable(CAP_SYS_RAWIO))
4d22de3e
DLR
2092 return -EPERM;
2093 if (copy_from_user(&t, useraddr, sizeof(t)))
2094 return -EFAULT;
1b3aa7af 2095 /* Check t.len sanity ? */
4d22de3e
DLR
2096 fw_data = kmalloc(t.len, GFP_KERNEL);
2097 if (!fw_data)
2098 return -ENOMEM;
2099
2100 if (copy_from_user
2101 (fw_data, useraddr + sizeof(t), t.len)) {
2102 kfree(fw_data);
2103 return -EFAULT;
2104 }
2105
2106 ret = t3_load_fw(adapter, fw_data, t.len);
2107 kfree(fw_data);
2108 if (ret)
2109 return ret;
2110 break;
2111 }
2112 case CHELSIO_SETMTUTAB:{
2113 struct ch_mtus m;
2114 int i;
2115
2116 if (!is_offload(adapter))
2117 return -EOPNOTSUPP;
2118 if (!capable(CAP_NET_ADMIN))
2119 return -EPERM;
2120 if (offload_running(adapter))
2121 return -EBUSY;
2122 if (copy_from_user(&m, useraddr, sizeof(m)))
2123 return -EFAULT;
2124 if (m.nmtus != NMTUS)
2125 return -EINVAL;
2126 if (m.mtus[0] < 81) /* accommodate SACK */
2127 return -EINVAL;
2128
2129 /* MTUs must be in ascending order */
2130 for (i = 1; i < NMTUS; ++i)
2131 if (m.mtus[i] < m.mtus[i - 1])
2132 return -EINVAL;
2133
2134 memcpy(adapter->params.mtus, m.mtus,
2135 sizeof(adapter->params.mtus));
2136 break;
2137 }
2138 case CHELSIO_GET_PM:{
2139 struct tp_params *p = &adapter->params.tp;
2140 struct ch_pm m = {.cmd = CHELSIO_GET_PM };
2141
2142 if (!is_offload(adapter))
2143 return -EOPNOTSUPP;
2144 m.tx_pg_sz = p->tx_pg_size;
2145 m.tx_num_pg = p->tx_num_pgs;
2146 m.rx_pg_sz = p->rx_pg_size;
2147 m.rx_num_pg = p->rx_num_pgs;
2148 m.pm_total = p->pmtx_size + p->chan_rx_size * p->nchan;
2149 if (copy_to_user(useraddr, &m, sizeof(m)))
2150 return -EFAULT;
2151 break;
2152 }
2153 case CHELSIO_SET_PM:{
2154 struct ch_pm m;
2155 struct tp_params *p = &adapter->params.tp;
2156
2157 if (!is_offload(adapter))
2158 return -EOPNOTSUPP;
2159 if (!capable(CAP_NET_ADMIN))
2160 return -EPERM;
2161 if (adapter->flags & FULL_INIT_DONE)
2162 return -EBUSY;
2163 if (copy_from_user(&m, useraddr, sizeof(m)))
2164 return -EFAULT;
d9da466a 2165 if (!is_power_of_2(m.rx_pg_sz) ||
2166 !is_power_of_2(m.tx_pg_sz))
4d22de3e
DLR
2167 return -EINVAL; /* not power of 2 */
2168 if (!(m.rx_pg_sz & 0x14000))
2169 return -EINVAL; /* not 16KB or 64KB */
2170 if (!(m.tx_pg_sz & 0x1554000))
2171 return -EINVAL;
2172 if (m.tx_num_pg == -1)
2173 m.tx_num_pg = p->tx_num_pgs;
2174 if (m.rx_num_pg == -1)
2175 m.rx_num_pg = p->rx_num_pgs;
2176 if (m.tx_num_pg % 24 || m.rx_num_pg % 24)
2177 return -EINVAL;
2178 if (m.rx_num_pg * m.rx_pg_sz > p->chan_rx_size ||
2179 m.tx_num_pg * m.tx_pg_sz > p->chan_tx_size)
2180 return -EINVAL;
2181 p->rx_pg_size = m.rx_pg_sz;
2182 p->tx_pg_size = m.tx_pg_sz;
2183 p->rx_num_pgs = m.rx_num_pg;
2184 p->tx_num_pgs = m.tx_num_pg;
2185 break;
2186 }
2187 case CHELSIO_GET_MEM:{
2188 struct ch_mem_range t;
2189 struct mc7 *mem;
2190 u64 buf[32];
2191
2192 if (!is_offload(adapter))
2193 return -EOPNOTSUPP;
2194 if (!(adapter->flags & FULL_INIT_DONE))
2195 return -EIO; /* need the memory controllers */
2196 if (copy_from_user(&t, useraddr, sizeof(t)))
2197 return -EFAULT;
2198 if ((t.addr & 7) || (t.len & 7))
2199 return -EINVAL;
2200 if (t.mem_id == MEM_CM)
2201 mem = &adapter->cm;
2202 else if (t.mem_id == MEM_PMRX)
2203 mem = &adapter->pmrx;
2204 else if (t.mem_id == MEM_PMTX)
2205 mem = &adapter->pmtx;
2206 else
2207 return -EINVAL;
2208
2209 /*
1825494a
DLR
2210 * Version scheme:
2211 * bits 0..9: chip version
2212 * bits 10..15: chip revision
2213 */
4d22de3e
DLR
2214 t.version = 3 | (adapter->params.rev << 10);
2215 if (copy_to_user(useraddr, &t, sizeof(t)))
2216 return -EFAULT;
2217
2218 /*
2219 * Read 256 bytes at a time as len can be large and we don't
2220 * want to use huge intermediate buffers.
2221 */
2222 useraddr += sizeof(t); /* advance to start of buffer */
2223 while (t.len) {
2224 unsigned int chunk =
2225 min_t(unsigned int, t.len, sizeof(buf));
2226
2227 ret =
2228 t3_mc7_bd_read(mem, t.addr / 8, chunk / 8,
2229 buf);
2230 if (ret)
2231 return ret;
2232 if (copy_to_user(useraddr, buf, chunk))
2233 return -EFAULT;
2234 useraddr += chunk;
2235 t.addr += chunk;
2236 t.len -= chunk;
2237 }
2238 break;
2239 }
2240 case CHELSIO_SET_TRACE_FILTER:{
2241 struct ch_trace t;
2242 const struct trace_params *tp;
2243
2244 if (!capable(CAP_NET_ADMIN))
2245 return -EPERM;
2246 if (!offload_running(adapter))
2247 return -EAGAIN;
2248 if (copy_from_user(&t, useraddr, sizeof(t)))
2249 return -EFAULT;
2250
2251 tp = (const struct trace_params *)&t.sip;
2252 if (t.config_tx)
2253 t3_config_trace_filter(adapter, tp, 0,
2254 t.invert_match,
2255 t.trace_tx);
2256 if (t.config_rx)
2257 t3_config_trace_filter(adapter, tp, 1,
2258 t.invert_match,
2259 t.trace_rx);
2260 break;
2261 }
4d22de3e
DLR
2262 default:
2263 return -EOPNOTSUPP;
2264 }
2265 return 0;
2266}
2267
2268static int cxgb_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
2269{
4d22de3e 2270 struct mii_ioctl_data *data = if_mii(req);
5fbf816f
DLR
2271 struct port_info *pi = netdev_priv(dev);
2272 struct adapter *adapter = pi->adapter;
2273 int ret, mmd;
4d22de3e
DLR
2274
2275 switch (cmd) {
2276 case SIOCGMIIPHY:
2277 data->phy_id = pi->phy.addr;
2278 /* FALLTHRU */
2279 case SIOCGMIIREG:{
2280 u32 val;
2281 struct cphy *phy = &pi->phy;
2282
2283 if (!phy->mdio_read)
2284 return -EOPNOTSUPP;
2285 if (is_10G(adapter)) {
2286 mmd = data->phy_id >> 8;
2287 if (!mmd)
2288 mmd = MDIO_DEV_PCS;
9b1e3656 2289 else if (mmd > MDIO_DEV_VEND2)
4d22de3e
DLR
2290 return -EINVAL;
2291
2292 ret =
2293 phy->mdio_read(adapter, data->phy_id & 0x1f,
2294 mmd, data->reg_num, &val);
2295 } else
2296 ret =
2297 phy->mdio_read(adapter, data->phy_id & 0x1f,
2298 0, data->reg_num & 0x1f,
2299 &val);
2300 if (!ret)
2301 data->val_out = val;
2302 break;
2303 }
2304 case SIOCSMIIREG:{
2305 struct cphy *phy = &pi->phy;
2306
2307 if (!capable(CAP_NET_ADMIN))
2308 return -EPERM;
2309 if (!phy->mdio_write)
2310 return -EOPNOTSUPP;
2311 if (is_10G(adapter)) {
2312 mmd = data->phy_id >> 8;
2313 if (!mmd)
2314 mmd = MDIO_DEV_PCS;
9b1e3656 2315 else if (mmd > MDIO_DEV_VEND2)
4d22de3e
DLR
2316 return -EINVAL;
2317
2318 ret =
2319 phy->mdio_write(adapter,
2320 data->phy_id & 0x1f, mmd,
2321 data->reg_num,
2322 data->val_in);
2323 } else
2324 ret =
2325 phy->mdio_write(adapter,
2326 data->phy_id & 0x1f, 0,
2327 data->reg_num & 0x1f,
2328 data->val_in);
2329 break;
2330 }
2331 case SIOCCHIOCTL:
2332 return cxgb_extension_ioctl(dev, req->ifr_data);
2333 default:
2334 return -EOPNOTSUPP;
2335 }
2336 return ret;
2337}
2338
2339static int cxgb_change_mtu(struct net_device *dev, int new_mtu)
2340{
4d22de3e 2341 struct port_info *pi = netdev_priv(dev);
5fbf816f
DLR
2342 struct adapter *adapter = pi->adapter;
2343 int ret;
4d22de3e
DLR
2344
2345 if (new_mtu < 81) /* accommodate SACK */
2346 return -EINVAL;
2347 if ((ret = t3_mac_set_mtu(&pi->mac, new_mtu)))
2348 return ret;
2349 dev->mtu = new_mtu;
2350 init_port_mtus(adapter);
2351 if (adapter->params.rev == 0 && offload_running(adapter))
2352 t3_load_mtus(adapter, adapter->params.mtus,
2353 adapter->params.a_wnd, adapter->params.b_wnd,
2354 adapter->port[0]->mtu);
2355 return 0;
2356}
2357
2358static int cxgb_set_mac_addr(struct net_device *dev, void *p)
2359{
4d22de3e 2360 struct port_info *pi = netdev_priv(dev);
5fbf816f 2361 struct adapter *adapter = pi->adapter;
4d22de3e
DLR
2362 struct sockaddr *addr = p;
2363
2364 if (!is_valid_ether_addr(addr->sa_data))
2365 return -EINVAL;
2366
2367 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
2368 t3_mac_set_address(&pi->mac, 0, dev->dev_addr);
2369 if (offload_running(adapter))
2370 write_smt_entry(adapter, pi->port_id);
2371 return 0;
2372}
2373
2374/**
2375 * t3_synchronize_rx - wait for current Rx processing on a port to complete
2376 * @adap: the adapter
2377 * @p: the port
2378 *
2379 * Ensures that current Rx processing on any of the queues associated with
2380 * the given port completes before returning. We do this by acquiring and
2381 * releasing the locks of the response queues associated with the port.
2382 */
2383static void t3_synchronize_rx(struct adapter *adap, const struct port_info *p)
2384{
2385 int i;
2386
8c263761
DLR
2387 for (i = p->first_qset; i < p->first_qset + p->nqsets; i++) {
2388 struct sge_rspq *q = &adap->sge.qs[i].rspq;
4d22de3e
DLR
2389
2390 spin_lock_irq(&q->lock);
2391 spin_unlock_irq(&q->lock);
2392 }
2393}
2394
2395static void vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
2396{
4d22de3e 2397 struct port_info *pi = netdev_priv(dev);
5fbf816f 2398 struct adapter *adapter = pi->adapter;
4d22de3e
DLR
2399
2400 pi->vlan_grp = grp;
2401 if (adapter->params.rev > 0)
2402 t3_set_vlan_accel(adapter, 1 << pi->port_id, grp != NULL);
2403 else {
2404 /* single control for all ports */
2405 unsigned int i, have_vlans = 0;
2406 for_each_port(adapter, i)
2407 have_vlans |= adap2pinfo(adapter, i)->vlan_grp != NULL;
2408
2409 t3_set_vlan_accel(adapter, 1, have_vlans);
2410 }
2411 t3_synchronize_rx(adapter, pi);
2412}
2413
4d22de3e
DLR
2414#ifdef CONFIG_NET_POLL_CONTROLLER
2415static void cxgb_netpoll(struct net_device *dev)
2416{
890de332 2417 struct port_info *pi = netdev_priv(dev);
5fbf816f 2418 struct adapter *adapter = pi->adapter;
890de332 2419 int qidx;
4d22de3e 2420
890de332
DLR
2421 for (qidx = pi->first_qset; qidx < pi->first_qset + pi->nqsets; qidx++) {
2422 struct sge_qset *qs = &adapter->sge.qs[qidx];
2423 void *source;
2eab17ab 2424
890de332
DLR
2425 if (adapter->flags & USING_MSIX)
2426 source = qs;
2427 else
2428 source = adapter;
2429
2430 t3_intr_handler(adapter, qs->rspq.polling) (0, source);
2431 }
4d22de3e
DLR
2432}
2433#endif
2434
2435/*
2436 * Periodic accumulation of MAC statistics.
2437 */
2438static void mac_stats_update(struct adapter *adapter)
2439{
2440 int i;
2441
2442 for_each_port(adapter, i) {
2443 struct net_device *dev = adapter->port[i];
2444 struct port_info *p = netdev_priv(dev);
2445
2446 if (netif_running(dev)) {
2447 spin_lock(&adapter->stats_lock);
2448 t3_mac_update_stats(&p->mac);
2449 spin_unlock(&adapter->stats_lock);
2450 }
2451 }
2452}
2453
2454static void check_link_status(struct adapter *adapter)
2455{
2456 int i;
2457
2458 for_each_port(adapter, i) {
2459 struct net_device *dev = adapter->port[i];
2460 struct port_info *p = netdev_priv(dev);
2461
04497982 2462 if (!(p->phy.caps & SUPPORTED_IRQ) && netif_running(dev))
4d22de3e
DLR
2463 t3_link_changed(adapter, i);
2464 }
2465}
2466
fc90664e
DLR
2467static void check_t3b2_mac(struct adapter *adapter)
2468{
2469 int i;
2470
f2d961c9
DLR
2471 if (!rtnl_trylock()) /* synchronize with ifdown */
2472 return;
2473
fc90664e
DLR
2474 for_each_port(adapter, i) {
2475 struct net_device *dev = adapter->port[i];
2476 struct port_info *p = netdev_priv(dev);
2477 int status;
2478
2479 if (!netif_running(dev))
2480 continue;
2481
2482 status = 0;
6d6dabac 2483 if (netif_running(dev) && netif_carrier_ok(dev))
fc90664e
DLR
2484 status = t3b2_mac_watchdog_task(&p->mac);
2485 if (status == 1)
2486 p->mac.stats.num_toggled++;
2487 else if (status == 2) {
2488 struct cmac *mac = &p->mac;
2489
2490 t3_mac_set_mtu(mac, dev->mtu);
2491 t3_mac_set_address(mac, 0, dev->dev_addr);
2492 cxgb_set_rxmode(dev);
2493 t3_link_start(&p->phy, mac, &p->link_config);
2494 t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
2495 t3_port_intr_enable(adapter, p->port_id);
2496 p->mac.stats.num_resets++;
2497 }
2498 }
2499 rtnl_unlock();
2500}
2501
2502
4d22de3e
DLR
2503static void t3_adap_check_task(struct work_struct *work)
2504{
2505 struct adapter *adapter = container_of(work, struct adapter,
2506 adap_check_task.work);
2507 const struct adapter_params *p = &adapter->params;
2508
2509 adapter->check_task_cnt++;
2510
2511 /* Check link status for PHYs without interrupts */
2512 if (p->linkpoll_period)
2513 check_link_status(adapter);
2514
2515 /* Accumulate MAC stats if needed */
2516 if (!p->linkpoll_period ||
2517 (adapter->check_task_cnt * p->linkpoll_period) / 10 >=
2518 p->stats_update_period) {
2519 mac_stats_update(adapter);
2520 adapter->check_task_cnt = 0;
2521 }
2522
fc90664e
DLR
2523 if (p->rev == T3_REV_B2)
2524 check_t3b2_mac(adapter);
2525
4d22de3e 2526 /* Schedule the next check update if any port is active. */
20d3fc11 2527 spin_lock_irq(&adapter->work_lock);
4d22de3e
DLR
2528 if (adapter->open_device_map & PORT_MASK)
2529 schedule_chk_task(adapter);
20d3fc11 2530 spin_unlock_irq(&adapter->work_lock);
4d22de3e
DLR
2531}
2532
2533/*
2534 * Processes external (PHY) interrupts in process context.
2535 */
2536static void ext_intr_task(struct work_struct *work)
2537{
2538 struct adapter *adapter = container_of(work, struct adapter,
2539 ext_intr_handler_task);
2540
2541 t3_phy_intr_handler(adapter);
2542
2543 /* Now reenable external interrupts */
2544 spin_lock_irq(&adapter->work_lock);
2545 if (adapter->slow_intr_mask) {
2546 adapter->slow_intr_mask |= F_T3DBG;
2547 t3_write_reg(adapter, A_PL_INT_CAUSE0, F_T3DBG);
2548 t3_write_reg(adapter, A_PL_INT_ENABLE0,
2549 adapter->slow_intr_mask);
2550 }
2551 spin_unlock_irq(&adapter->work_lock);
2552}
2553
2554/*
2555 * Interrupt-context handler for external (PHY) interrupts.
2556 */
2557void t3_os_ext_intr_handler(struct adapter *adapter)
2558{
2559 /*
2560 * Schedule a task to handle external interrupts as they may be slow
2561 * and we use a mutex to protect MDIO registers. We disable PHY
2562 * interrupts in the meantime and let the task reenable them when
2563 * it's done.
2564 */
2565 spin_lock(&adapter->work_lock);
2566 if (adapter->slow_intr_mask) {
2567 adapter->slow_intr_mask &= ~F_T3DBG;
2568 t3_write_reg(adapter, A_PL_INT_ENABLE0,
2569 adapter->slow_intr_mask);
2570 queue_work(cxgb3_wq, &adapter->ext_intr_handler_task);
2571 }
2572 spin_unlock(&adapter->work_lock);
2573}
2574
20d3fc11
DLR
2575static int t3_adapter_error(struct adapter *adapter, int reset)
2576{
2577 int i, ret = 0;
2578
2579 /* Stop all ports */
2580 for_each_port(adapter, i) {
2581 struct net_device *netdev = adapter->port[i];
2582
2583 if (netif_running(netdev))
2584 cxgb_close(netdev);
2585 }
2586
2587 if (is_offload(adapter) &&
2588 test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map))
2589 offload_close(&adapter->tdev);
2590
2591 /* Stop SGE timers */
2592 t3_stop_sge_timers(adapter);
2593
2594 adapter->flags &= ~FULL_INIT_DONE;
2595
2596 if (reset)
2597 ret = t3_reset_adapter(adapter);
2598
2599 pci_disable_device(adapter->pdev);
2600
2601 return ret;
2602}
2603
2604static int t3_reenable_adapter(struct adapter *adapter)
2605{
2606 if (pci_enable_device(adapter->pdev)) {
2607 dev_err(&adapter->pdev->dev,
2608 "Cannot re-enable PCI device after reset.\n");
2609 goto err;
2610 }
2611 pci_set_master(adapter->pdev);
2612 pci_restore_state(adapter->pdev);
2613
2614 /* Free sge resources */
2615 t3_free_sge_resources(adapter);
2616
2617 if (t3_replay_prep_adapter(adapter))
2618 goto err;
2619
2620 return 0;
2621err:
2622 return -1;
2623}
2624
2625static void t3_resume_ports(struct adapter *adapter)
2626{
2627 int i;
2628
2629 /* Restart the ports */
2630 for_each_port(adapter, i) {
2631 struct net_device *netdev = adapter->port[i];
2632
2633 if (netif_running(netdev)) {
2634 if (cxgb_open(netdev)) {
2635 dev_err(&adapter->pdev->dev,
2636 "can't bring device back up"
2637 " after reset\n");
2638 continue;
2639 }
2640 }
2641 }
2642}
2643
2644/*
2645 * processes a fatal error.
2646 * Bring the ports down, reset the chip, bring the ports back up.
2647 */
2648static void fatal_error_task(struct work_struct *work)
2649{
2650 struct adapter *adapter = container_of(work, struct adapter,
2651 fatal_error_handler_task);
2652 int err = 0;
2653
2654 rtnl_lock();
2655 err = t3_adapter_error(adapter, 1);
2656 if (!err)
2657 err = t3_reenable_adapter(adapter);
2658 if (!err)
2659 t3_resume_ports(adapter);
2660
2661 CH_ALERT(adapter, "adapter reset %s\n", err ? "failed" : "succeeded");
2662 rtnl_unlock();
2663}
2664
4d22de3e
DLR
2665void t3_fatal_err(struct adapter *adapter)
2666{
2667 unsigned int fw_status[4];
2668
2669 if (adapter->flags & FULL_INIT_DONE) {
2670 t3_sge_stop(adapter);
c64c2eae
DLR
2671 t3_write_reg(adapter, A_XGM_TX_CTRL, 0);
2672 t3_write_reg(adapter, A_XGM_RX_CTRL, 0);
2673 t3_write_reg(adapter, XGM_REG(A_XGM_TX_CTRL, 1), 0);
2674 t3_write_reg(adapter, XGM_REG(A_XGM_RX_CTRL, 1), 0);
20d3fc11
DLR
2675
2676 spin_lock(&adapter->work_lock);
4d22de3e 2677 t3_intr_disable(adapter);
20d3fc11
DLR
2678 queue_work(cxgb3_wq, &adapter->fatal_error_handler_task);
2679 spin_unlock(&adapter->work_lock);
4d22de3e
DLR
2680 }
2681 CH_ALERT(adapter, "encountered fatal error, operation suspended\n");
2682 if (!t3_cim_ctl_blk_read(adapter, 0xa0, 4, fw_status))
2683 CH_ALERT(adapter, "FW status: 0x%x, 0x%x, 0x%x, 0x%x\n",
2684 fw_status[0], fw_status[1],
2685 fw_status[2], fw_status[3]);
2686
2687}
2688
91a6b50c
DLR
2689/**
2690 * t3_io_error_detected - called when PCI error is detected
2691 * @pdev: Pointer to PCI device
2692 * @state: The current pci connection state
2693 *
2694 * This function is called after a PCI bus error affecting
2695 * this device has been detected.
2696 */
2697static pci_ers_result_t t3_io_error_detected(struct pci_dev *pdev,
2698 pci_channel_state_t state)
2699{
bc4b6b52 2700 struct adapter *adapter = pci_get_drvdata(pdev);
20d3fc11 2701 int ret;
91a6b50c 2702
20d3fc11 2703 ret = t3_adapter_error(adapter, 0);
91a6b50c 2704
48c4b6db 2705 /* Request a slot reset. */
91a6b50c
DLR
2706 return PCI_ERS_RESULT_NEED_RESET;
2707}
2708
2709/**
2710 * t3_io_slot_reset - called after the pci bus has been reset.
2711 * @pdev: Pointer to PCI device
2712 *
2713 * Restart the card from scratch, as if from a cold-boot.
2714 */
2715static pci_ers_result_t t3_io_slot_reset(struct pci_dev *pdev)
2716{
bc4b6b52 2717 struct adapter *adapter = pci_get_drvdata(pdev);
91a6b50c 2718
20d3fc11
DLR
2719 if (!t3_reenable_adapter(adapter))
2720 return PCI_ERS_RESULT_RECOVERED;
91a6b50c 2721
48c4b6db 2722 return PCI_ERS_RESULT_DISCONNECT;
91a6b50c
DLR
2723}
2724
2725/**
2726 * t3_io_resume - called when traffic can start flowing again.
2727 * @pdev: Pointer to PCI device
2728 *
2729 * This callback is called when the error recovery driver tells us that
2730 * its OK to resume normal operation.
2731 */
2732static void t3_io_resume(struct pci_dev *pdev)
2733{
bc4b6b52 2734 struct adapter *adapter = pci_get_drvdata(pdev);
91a6b50c 2735
20d3fc11 2736 t3_resume_ports(adapter);
91a6b50c
DLR
2737}
2738
2739static struct pci_error_handlers t3_err_handler = {
2740 .error_detected = t3_io_error_detected,
2741 .slot_reset = t3_io_slot_reset,
2742 .resume = t3_io_resume,
2743};
2744
8c263761
DLR
2745/*
2746 * Set the number of qsets based on the number of CPUs and the number of ports,
2747 * not to exceed the number of available qsets, assuming there are enough qsets
2748 * per port in HW.
2749 */
2750static void set_nqsets(struct adapter *adap)
2751{
2752 int i, j = 0;
2753 int num_cpus = num_online_cpus();
2754 int hwports = adap->params.nports;
2755 int nqsets = SGE_QSETS;
2756
f9ee3882 2757 if (adap->params.rev > 0 && adap->flags & USING_MSIX) {
8c263761
DLR
2758 if (hwports == 2 &&
2759 (hwports * nqsets > SGE_QSETS ||
2760 num_cpus >= nqsets / hwports))
2761 nqsets /= hwports;
2762 if (nqsets > num_cpus)
2763 nqsets = num_cpus;
2764 if (nqsets < 1 || hwports == 4)
2765 nqsets = 1;
2766 } else
2767 nqsets = 1;
2768
2769 for_each_port(adap, i) {
2770 struct port_info *pi = adap2pinfo(adap, i);
2771
2772 pi->first_qset = j;
2773 pi->nqsets = nqsets;
2774 j = pi->first_qset + nqsets;
2775
2776 dev_info(&adap->pdev->dev,
2777 "Port %d using %d queue sets.\n", i, nqsets);
2778 }
2779}
2780
4d22de3e
DLR
2781static int __devinit cxgb_enable_msix(struct adapter *adap)
2782{
2783 struct msix_entry entries[SGE_QSETS + 1];
2784 int i, err;
2785
2786 for (i = 0; i < ARRAY_SIZE(entries); ++i)
2787 entries[i].entry = i;
2788
2789 err = pci_enable_msix(adap->pdev, entries, ARRAY_SIZE(entries));
2790 if (!err) {
2791 for (i = 0; i < ARRAY_SIZE(entries); ++i)
2792 adap->msix_info[i].vec = entries[i].vector;
2793 } else if (err > 0)
2794 dev_info(&adap->pdev->dev,
2795 "only %d MSI-X vectors left, not using MSI-X\n", err);
2796 return err;
2797}
2798
2799static void __devinit print_port_info(struct adapter *adap,
2800 const struct adapter_info *ai)
2801{
2802 static const char *pci_variant[] = {
2803 "PCI", "PCI-X", "PCI-X ECC", "PCI-X 266", "PCI Express"
2804 };
2805
2806 int i;
2807 char buf[80];
2808
2809 if (is_pcie(adap))
2810 snprintf(buf, sizeof(buf), "%s x%d",
2811 pci_variant[adap->params.pci.variant],
2812 adap->params.pci.width);
2813 else
2814 snprintf(buf, sizeof(buf), "%s %dMHz/%d-bit",
2815 pci_variant[adap->params.pci.variant],
2816 adap->params.pci.speed, adap->params.pci.width);
2817
2818 for_each_port(adap, i) {
2819 struct net_device *dev = adap->port[i];
2820 const struct port_info *pi = netdev_priv(dev);
2821
2822 if (!test_bit(i, &adap->registered_device_map))
2823 continue;
8ac3ba68 2824 printk(KERN_INFO "%s: %s %s %sNIC (rev %d) %s%s\n",
04497982 2825 dev->name, ai->desc, pi->phy.desc,
8ac3ba68 2826 is_offload(adap) ? "R" : "", adap->params.rev, buf,
4d22de3e
DLR
2827 (adap->flags & USING_MSIX) ? " MSI-X" :
2828 (adap->flags & USING_MSI) ? " MSI" : "");
2829 if (adap->name == dev->name && adap->params.vpd.mclk)
167cdf5f
DLR
2830 printk(KERN_INFO
2831 "%s: %uMB CM, %uMB PMTX, %uMB PMRX, S/N: %s\n",
4d22de3e
DLR
2832 adap->name, t3_mc7_size(&adap->cm) >> 20,
2833 t3_mc7_size(&adap->pmtx) >> 20,
167cdf5f
DLR
2834 t3_mc7_size(&adap->pmrx) >> 20,
2835 adap->params.vpd.sn);
4d22de3e
DLR
2836 }
2837}
2838
dd752696
SH
2839static const struct net_device_ops cxgb_netdev_ops = {
2840 .ndo_open = cxgb_open,
2841 .ndo_stop = cxgb_close,
43a944f3 2842 .ndo_start_xmit = t3_eth_xmit,
dd752696
SH
2843 .ndo_get_stats = cxgb_get_stats,
2844 .ndo_validate_addr = eth_validate_addr,
2845 .ndo_set_multicast_list = cxgb_set_rxmode,
2846 .ndo_do_ioctl = cxgb_ioctl,
2847 .ndo_change_mtu = cxgb_change_mtu,
2848 .ndo_set_mac_address = cxgb_set_mac_addr,
2849 .ndo_vlan_rx_register = vlan_rx_register,
2850#ifdef CONFIG_NET_POLL_CONTROLLER
2851 .ndo_poll_controller = cxgb_netpoll,
2852#endif
2853};
2854
4d22de3e
DLR
2855static int __devinit init_one(struct pci_dev *pdev,
2856 const struct pci_device_id *ent)
2857{
2858 static int version_printed;
2859
2860 int i, err, pci_using_dac = 0;
2861 unsigned long mmio_start, mmio_len;
2862 const struct adapter_info *ai;
2863 struct adapter *adapter = NULL;
2864 struct port_info *pi;
2865
2866 if (!version_printed) {
2867 printk(KERN_INFO "%s - version %s\n", DRV_DESC, DRV_VERSION);
2868 ++version_printed;
2869 }
2870
2871 if (!cxgb3_wq) {
2872 cxgb3_wq = create_singlethread_workqueue(DRV_NAME);
2873 if (!cxgb3_wq) {
2874 printk(KERN_ERR DRV_NAME
2875 ": cannot initialize work queue\n");
2876 return -ENOMEM;
2877 }
2878 }
2879
2880 err = pci_request_regions(pdev, DRV_NAME);
2881 if (err) {
2882 /* Just info, some other driver may have claimed the device. */
2883 dev_info(&pdev->dev, "cannot obtain PCI resources\n");
2884 return err;
2885 }
2886
2887 err = pci_enable_device(pdev);
2888 if (err) {
2889 dev_err(&pdev->dev, "cannot enable PCI device\n");
2890 goto out_release_regions;
2891 }
2892
2893 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
2894 pci_using_dac = 1;
2895 err = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
2896 if (err) {
2897 dev_err(&pdev->dev, "unable to obtain 64-bit DMA for "
2898 "coherent allocations\n");
2899 goto out_disable_device;
2900 }
2901 } else if ((err = pci_set_dma_mask(pdev, DMA_32BIT_MASK)) != 0) {
2902 dev_err(&pdev->dev, "no usable DMA configuration\n");
2903 goto out_disable_device;
2904 }
2905
2906 pci_set_master(pdev);
204e2f98 2907 pci_save_state(pdev);
4d22de3e
DLR
2908
2909 mmio_start = pci_resource_start(pdev, 0);
2910 mmio_len = pci_resource_len(pdev, 0);
2911 ai = t3_get_adapter_info(ent->driver_data);
2912
2913 adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
2914 if (!adapter) {
2915 err = -ENOMEM;
2916 goto out_disable_device;
2917 }
2918
2919 adapter->regs = ioremap_nocache(mmio_start, mmio_len);
2920 if (!adapter->regs) {
2921 dev_err(&pdev->dev, "cannot map device registers\n");
2922 err = -ENOMEM;
2923 goto out_free_adapter;
2924 }
2925
2926 adapter->pdev = pdev;
2927 adapter->name = pci_name(pdev);
2928 adapter->msg_enable = dflt_msg_enable;
2929 adapter->mmio_len = mmio_len;
2930
2931 mutex_init(&adapter->mdio_lock);
2932 spin_lock_init(&adapter->work_lock);
2933 spin_lock_init(&adapter->stats_lock);
2934
2935 INIT_LIST_HEAD(&adapter->adapter_list);
2936 INIT_WORK(&adapter->ext_intr_handler_task, ext_intr_task);
20d3fc11 2937 INIT_WORK(&adapter->fatal_error_handler_task, fatal_error_task);
4d22de3e
DLR
2938 INIT_DELAYED_WORK(&adapter->adap_check_task, t3_adap_check_task);
2939
2940 for (i = 0; i < ai->nports; ++i) {
2941 struct net_device *netdev;
2942
82ad3329 2943 netdev = alloc_etherdev_mq(sizeof(struct port_info), SGE_QSETS);
4d22de3e
DLR
2944 if (!netdev) {
2945 err = -ENOMEM;
2946 goto out_free_dev;
2947 }
2948
4d22de3e
DLR
2949 SET_NETDEV_DEV(netdev, &pdev->dev);
2950
2951 adapter->port[i] = netdev;
2952 pi = netdev_priv(netdev);
5fbf816f 2953 pi->adapter = adapter;
47fd23fe 2954 pi->rx_offload = T3_RX_CSUM | T3_LRO;
4d22de3e
DLR
2955 pi->port_id = i;
2956 netif_carrier_off(netdev);
82ad3329 2957 netif_tx_stop_all_queues(netdev);
4d22de3e
DLR
2958 netdev->irq = pdev->irq;
2959 netdev->mem_start = mmio_start;
2960 netdev->mem_end = mmio_start + mmio_len - 1;
4d22de3e
DLR
2961 netdev->features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO;
2962 netdev->features |= NETIF_F_LLTX;
47fd23fe 2963 netdev->features |= NETIF_F_LRO;
4d22de3e
DLR
2964 if (pci_using_dac)
2965 netdev->features |= NETIF_F_HIGHDMA;
2966
2967 netdev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
dd752696 2968 netdev->netdev_ops = &cxgb_netdev_ops;
4d22de3e
DLR
2969 SET_ETHTOOL_OPS(netdev, &cxgb_ethtool_ops);
2970 }
2971
5fbf816f 2972 pci_set_drvdata(pdev, adapter);
4d22de3e
DLR
2973 if (t3_prep_adapter(adapter, ai, 1) < 0) {
2974 err = -ENODEV;
2975 goto out_free_dev;
2976 }
2eab17ab 2977
4d22de3e
DLR
2978 /*
2979 * The card is now ready to go. If any errors occur during device
2980 * registration we do not fail the whole card but rather proceed only
2981 * with the ports we manage to register successfully. However we must
2982 * register at least one net device.
2983 */
2984 for_each_port(adapter, i) {
2985 err = register_netdev(adapter->port[i]);
2986 if (err)
2987 dev_warn(&pdev->dev,
2988 "cannot register net device %s, skipping\n",
2989 adapter->port[i]->name);
2990 else {
2991 /*
2992 * Change the name we use for messages to the name of
2993 * the first successfully registered interface.
2994 */
2995 if (!adapter->registered_device_map)
2996 adapter->name = adapter->port[i]->name;
2997
2998 __set_bit(i, &adapter->registered_device_map);
2999 }
3000 }
3001 if (!adapter->registered_device_map) {
3002 dev_err(&pdev->dev, "could not register any net devices\n");
3003 goto out_free_dev;
3004 }
3005
3006 /* Driver's ready. Reflect it on LEDs */
3007 t3_led_ready(adapter);
3008
3009 if (is_offload(adapter)) {
3010 __set_bit(OFFLOAD_DEVMAP_BIT, &adapter->registered_device_map);
3011 cxgb3_adapter_ofld(adapter);
3012 }
3013
3014 /* See what interrupts we'll be using */
3015 if (msi > 1 && cxgb_enable_msix(adapter) == 0)
3016 adapter->flags |= USING_MSIX;
3017 else if (msi > 0 && pci_enable_msi(pdev) == 0)
3018 adapter->flags |= USING_MSI;
3019
8c263761
DLR
3020 set_nqsets(adapter);
3021
0ee8d33c 3022 err = sysfs_create_group(&adapter->port[0]->dev.kobj,
4d22de3e
DLR
3023 &cxgb3_attr_group);
3024
3025 print_port_info(adapter, ai);
3026 return 0;
3027
3028out_free_dev:
3029 iounmap(adapter->regs);
3030 for (i = ai->nports - 1; i >= 0; --i)
3031 if (adapter->port[i])
3032 free_netdev(adapter->port[i]);
3033
3034out_free_adapter:
3035 kfree(adapter);
3036
3037out_disable_device:
3038 pci_disable_device(pdev);
3039out_release_regions:
3040 pci_release_regions(pdev);
3041 pci_set_drvdata(pdev, NULL);
3042 return err;
3043}
3044
3045static void __devexit remove_one(struct pci_dev *pdev)
3046{
5fbf816f 3047 struct adapter *adapter = pci_get_drvdata(pdev);
4d22de3e 3048
5fbf816f 3049 if (adapter) {
4d22de3e 3050 int i;
4d22de3e
DLR
3051
3052 t3_sge_stop(adapter);
0ee8d33c 3053 sysfs_remove_group(&adapter->port[0]->dev.kobj,
4d22de3e
DLR
3054 &cxgb3_attr_group);
3055
4d22de3e
DLR
3056 if (is_offload(adapter)) {
3057 cxgb3_adapter_unofld(adapter);
3058 if (test_bit(OFFLOAD_DEVMAP_BIT,
3059 &adapter->open_device_map))
3060 offload_close(&adapter->tdev);
3061 }
3062
67d92ab7
DLR
3063 for_each_port(adapter, i)
3064 if (test_bit(i, &adapter->registered_device_map))
3065 unregister_netdev(adapter->port[i]);
3066
0ca41c04 3067 t3_stop_sge_timers(adapter);
4d22de3e
DLR
3068 t3_free_sge_resources(adapter);
3069 cxgb_disable_msi(adapter);
3070
4d22de3e
DLR
3071 for_each_port(adapter, i)
3072 if (adapter->port[i])
3073 free_netdev(adapter->port[i]);
3074
3075 iounmap(adapter->regs);
3076 kfree(adapter);
3077 pci_release_regions(pdev);
3078 pci_disable_device(pdev);
3079 pci_set_drvdata(pdev, NULL);
3080 }
3081}
3082
3083static struct pci_driver driver = {
3084 .name = DRV_NAME,
3085 .id_table = cxgb3_pci_tbl,
3086 .probe = init_one,
3087 .remove = __devexit_p(remove_one),
91a6b50c 3088 .err_handler = &t3_err_handler,
4d22de3e
DLR
3089};
3090
3091static int __init cxgb3_init_module(void)
3092{
3093 int ret;
3094
3095 cxgb3_offload_init();
3096
3097 ret = pci_register_driver(&driver);
3098 return ret;
3099}
3100
3101static void __exit cxgb3_cleanup_module(void)
3102{
3103 pci_unregister_driver(&driver);
3104 if (cxgb3_wq)
3105 destroy_workqueue(cxgb3_wq);
3106}
3107
3108module_init(cxgb3_init_module);
3109module_exit(cxgb3_cleanup_module);