skge: fix wake on lan
[linux-2.6-block.git] / drivers / net / cxgb3 / cxgb3_main.c
CommitLineData
4d22de3e 1/*
1d68e93d 2 * Copyright (c) 2003-2007 Chelsio, Inc. All rights reserved.
4d22de3e 3 *
1d68e93d
DLR
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
4d22de3e 9 *
1d68e93d
DLR
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
4d22de3e 31 */
4d22de3e
DLR
32#include <linux/module.h>
33#include <linux/moduleparam.h>
34#include <linux/init.h>
35#include <linux/pci.h>
36#include <linux/dma-mapping.h>
37#include <linux/netdevice.h>
38#include <linux/etherdevice.h>
39#include <linux/if_vlan.h>
40#include <linux/mii.h>
41#include <linux/sockios.h>
42#include <linux/workqueue.h>
43#include <linux/proc_fs.h>
44#include <linux/rtnetlink.h>
2e283962 45#include <linux/firmware.h>
4d22de3e
DLR
46#include <asm/uaccess.h>
47
48#include "common.h"
49#include "cxgb3_ioctl.h"
50#include "regs.h"
51#include "cxgb3_offload.h"
52#include "version.h"
53
54#include "cxgb3_ctl_defs.h"
55#include "t3_cpl.h"
56#include "firmware_exports.h"
57
58enum {
59 MAX_TXQ_ENTRIES = 16384,
60 MAX_CTRL_TXQ_ENTRIES = 1024,
61 MAX_RSPQ_ENTRIES = 16384,
62 MAX_RX_BUFFERS = 16384,
63 MAX_RX_JUMBO_BUFFERS = 16384,
64 MIN_TXQ_ENTRIES = 4,
65 MIN_CTRL_TXQ_ENTRIES = 4,
66 MIN_RSPQ_ENTRIES = 32,
67 MIN_FL_ENTRIES = 32
68};
69
70#define PORT_MASK ((1 << MAX_NPORTS) - 1)
71
72#define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
73 NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
74 NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
75
76#define EEPROM_MAGIC 0x38E2F10C
77
4d22de3e
DLR
78#define CH_DEVICE(devid, ssid, idx) \
79 { PCI_VENDOR_ID_CHELSIO, devid, PCI_ANY_ID, ssid, 0, 0, idx }
80
81static const struct pci_device_id cxgb3_pci_tbl[] = {
82 CH_DEVICE(0x20, 1, 0), /* PE9000 */
83 CH_DEVICE(0x21, 1, 1), /* T302E */
84 CH_DEVICE(0x22, 1, 2), /* T310E */
85 CH_DEVICE(0x23, 1, 3), /* T320X */
86 CH_DEVICE(0x24, 1, 1), /* T302X */
87 CH_DEVICE(0x25, 1, 3), /* T320E */
88 CH_DEVICE(0x26, 1, 2), /* T310X */
89 CH_DEVICE(0x30, 1, 2), /* T3B10 */
90 CH_DEVICE(0x31, 1, 3), /* T3B20 */
91 CH_DEVICE(0x32, 1, 1), /* T3B02 */
92 {0,}
93};
94
95MODULE_DESCRIPTION(DRV_DESC);
96MODULE_AUTHOR("Chelsio Communications");
1d68e93d 97MODULE_LICENSE("Dual BSD/GPL");
4d22de3e
DLR
98MODULE_VERSION(DRV_VERSION);
99MODULE_DEVICE_TABLE(pci, cxgb3_pci_tbl);
100
101static int dflt_msg_enable = DFLT_MSG_ENABLE;
102
103module_param(dflt_msg_enable, int, 0644);
104MODULE_PARM_DESC(dflt_msg_enable, "Chelsio T3 default message enable bitmap");
105
106/*
107 * The driver uses the best interrupt scheme available on a platform in the
108 * order MSI-X, MSI, legacy pin interrupts. This parameter determines which
109 * of these schemes the driver may consider as follows:
110 *
111 * msi = 2: choose from among all three options
112 * msi = 1: only consider MSI and pin interrupts
113 * msi = 0: force pin interrupts
114 */
115static int msi = 2;
116
117module_param(msi, int, 0644);
118MODULE_PARM_DESC(msi, "whether to use MSI or MSI-X");
119
120/*
121 * The driver enables offload as a default.
122 * To disable it, use ofld_disable = 1.
123 */
124
125static int ofld_disable = 0;
126
127module_param(ofld_disable, int, 0644);
128MODULE_PARM_DESC(ofld_disable, "whether to enable offload at init time or not");
129
130/*
131 * We have work elements that we need to cancel when an interface is taken
132 * down. Normally the work elements would be executed by keventd but that
133 * can deadlock because of linkwatch. If our close method takes the rtnl
134 * lock and linkwatch is ahead of our work elements in keventd, linkwatch
135 * will block keventd as it needs the rtnl lock, and we'll deadlock waiting
136 * for our work to complete. Get our own work queue to solve this.
137 */
138static struct workqueue_struct *cxgb3_wq;
139
140/**
141 * link_report - show link status and link speed/duplex
142 * @p: the port whose settings are to be reported
143 *
144 * Shows the link status, speed, and duplex of a port.
145 */
146static void link_report(struct net_device *dev)
147{
148 if (!netif_carrier_ok(dev))
149 printk(KERN_INFO "%s: link down\n", dev->name);
150 else {
151 const char *s = "10Mbps";
152 const struct port_info *p = netdev_priv(dev);
153
154 switch (p->link_config.speed) {
155 case SPEED_10000:
156 s = "10Gbps";
157 break;
158 case SPEED_1000:
159 s = "1000Mbps";
160 break;
161 case SPEED_100:
162 s = "100Mbps";
163 break;
164 }
165
166 printk(KERN_INFO "%s: link up, %s, %s-duplex\n", dev->name, s,
167 p->link_config.duplex == DUPLEX_FULL ? "full" : "half");
168 }
169}
170
171/**
172 * t3_os_link_changed - handle link status changes
173 * @adapter: the adapter associated with the link change
174 * @port_id: the port index whose limk status has changed
175 * @link_stat: the new status of the link
176 * @speed: the new speed setting
177 * @duplex: the new duplex setting
178 * @pause: the new flow-control setting
179 *
180 * This is the OS-dependent handler for link status changes. The OS
181 * neutral handler takes care of most of the processing for these events,
182 * then calls this handler for any OS-specific processing.
183 */
184void t3_os_link_changed(struct adapter *adapter, int port_id, int link_stat,
185 int speed, int duplex, int pause)
186{
187 struct net_device *dev = adapter->port[port_id];
6d6dabac
DLR
188 struct port_info *pi = netdev_priv(dev);
189 struct cmac *mac = &pi->mac;
4d22de3e
DLR
190
191 /* Skip changes from disabled ports. */
192 if (!netif_running(dev))
193 return;
194
195 if (link_stat != netif_carrier_ok(dev)) {
6d6dabac
DLR
196 if (link_stat) {
197 t3_set_reg_field(adapter,
198 A_XGM_TXFIFO_CFG + mac->offset,
199 F_ENDROPPKT, 0);
4d22de3e 200 netif_carrier_on(dev);
6d6dabac 201 } else {
4d22de3e 202 netif_carrier_off(dev);
6d6dabac
DLR
203 t3_set_reg_field(adapter,
204 A_XGM_TXFIFO_CFG + mac->offset,
205 F_ENDROPPKT, F_ENDROPPKT);
206 }
207
4d22de3e
DLR
208 link_report(dev);
209 }
210}
211
212static void cxgb_set_rxmode(struct net_device *dev)
213{
214 struct t3_rx_mode rm;
215 struct port_info *pi = netdev_priv(dev);
216
217 init_rx_mode(&rm, dev, dev->mc_list);
218 t3_mac_set_rx_mode(&pi->mac, &rm);
219}
220
221/**
222 * link_start - enable a port
223 * @dev: the device to enable
224 *
225 * Performs the MAC and PHY actions needed to enable a port.
226 */
227static void link_start(struct net_device *dev)
228{
229 struct t3_rx_mode rm;
230 struct port_info *pi = netdev_priv(dev);
231 struct cmac *mac = &pi->mac;
232
233 init_rx_mode(&rm, dev, dev->mc_list);
234 t3_mac_reset(mac);
235 t3_mac_set_mtu(mac, dev->mtu);
236 t3_mac_set_address(mac, 0, dev->dev_addr);
237 t3_mac_set_rx_mode(mac, &rm);
238 t3_link_start(&pi->phy, mac, &pi->link_config);
239 t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
240}
241
242static inline void cxgb_disable_msi(struct adapter *adapter)
243{
244 if (adapter->flags & USING_MSIX) {
245 pci_disable_msix(adapter->pdev);
246 adapter->flags &= ~USING_MSIX;
247 } else if (adapter->flags & USING_MSI) {
248 pci_disable_msi(adapter->pdev);
249 adapter->flags &= ~USING_MSI;
250 }
251}
252
253/*
254 * Interrupt handler for asynchronous events used with MSI-X.
255 */
256static irqreturn_t t3_async_intr_handler(int irq, void *cookie)
257{
258 t3_slow_intr_handler(cookie);
259 return IRQ_HANDLED;
260}
261
262/*
263 * Name the MSI-X interrupts.
264 */
265static void name_msix_vecs(struct adapter *adap)
266{
267 int i, j, msi_idx = 1, n = sizeof(adap->msix_info[0].desc) - 1;
268
269 snprintf(adap->msix_info[0].desc, n, "%s", adap->name);
270 adap->msix_info[0].desc[n] = 0;
271
272 for_each_port(adap, j) {
273 struct net_device *d = adap->port[j];
274 const struct port_info *pi = netdev_priv(d);
275
276 for (i = 0; i < pi->nqsets; i++, msi_idx++) {
277 snprintf(adap->msix_info[msi_idx].desc, n,
278 "%s (queue %d)", d->name, i);
279 adap->msix_info[msi_idx].desc[n] = 0;
280 }
281 }
282}
283
284static int request_msix_data_irqs(struct adapter *adap)
285{
286 int i, j, err, qidx = 0;
287
288 for_each_port(adap, i) {
289 int nqsets = adap2pinfo(adap, i)->nqsets;
290
291 for (j = 0; j < nqsets; ++j) {
292 err = request_irq(adap->msix_info[qidx + 1].vec,
293 t3_intr_handler(adap,
294 adap->sge.qs[qidx].
295 rspq.polling), 0,
296 adap->msix_info[qidx + 1].desc,
297 &adap->sge.qs[qidx]);
298 if (err) {
299 while (--qidx >= 0)
300 free_irq(adap->msix_info[qidx + 1].vec,
301 &adap->sge.qs[qidx]);
302 return err;
303 }
304 qidx++;
305 }
306 }
307 return 0;
308}
309
310/**
311 * setup_rss - configure RSS
312 * @adap: the adapter
313 *
314 * Sets up RSS to distribute packets to multiple receive queues. We
315 * configure the RSS CPU lookup table to distribute to the number of HW
316 * receive queues, and the response queue lookup table to narrow that
317 * down to the response queues actually configured for each port.
318 * We always configure the RSS mapping for two ports since the mapping
319 * table has plenty of entries.
320 */
321static void setup_rss(struct adapter *adap)
322{
323 int i;
324 unsigned int nq0 = adap2pinfo(adap, 0)->nqsets;
325 unsigned int nq1 = adap->port[1] ? adap2pinfo(adap, 1)->nqsets : 1;
326 u8 cpus[SGE_QSETS + 1];
327 u16 rspq_map[RSS_TABLE_SIZE];
328
329 for (i = 0; i < SGE_QSETS; ++i)
330 cpus[i] = i;
331 cpus[SGE_QSETS] = 0xff; /* terminator */
332
333 for (i = 0; i < RSS_TABLE_SIZE / 2; ++i) {
334 rspq_map[i] = i % nq0;
335 rspq_map[i + RSS_TABLE_SIZE / 2] = (i % nq1) + nq0;
336 }
337
338 t3_config_rss(adap, F_RQFEEDBACKENABLE | F_TNLLKPEN | F_TNLMAPEN |
339 F_TNLPRTEN | F_TNL2TUPEN | F_TNL4TUPEN |
340 V_RRCPLCPUSIZE(6), cpus, rspq_map);
341}
342
343/*
344 * If we have multiple receive queues per port serviced by NAPI we need one
345 * netdevice per queue as NAPI operates on netdevices. We already have one
346 * netdevice, namely the one associated with the interface, so we use dummy
347 * ones for any additional queues. Note that these netdevices exist purely
348 * so that NAPI has something to work with, they do not represent network
349 * ports and are not registered.
350 */
351static int init_dummy_netdevs(struct adapter *adap)
352{
353 int i, j, dummy_idx = 0;
354 struct net_device *nd;
355
356 for_each_port(adap, i) {
357 struct net_device *dev = adap->port[i];
358 const struct port_info *pi = netdev_priv(dev);
359
360 for (j = 0; j < pi->nqsets - 1; j++) {
361 if (!adap->dummy_netdev[dummy_idx]) {
362 nd = alloc_netdev(0, "", ether_setup);
363 if (!nd)
364 goto free_all;
365
366 nd->priv = adap;
367 nd->weight = 64;
368 set_bit(__LINK_STATE_START, &nd->state);
369 adap->dummy_netdev[dummy_idx] = nd;
370 }
371 strcpy(adap->dummy_netdev[dummy_idx]->name, dev->name);
372 dummy_idx++;
373 }
374 }
375 return 0;
376
377free_all:
378 while (--dummy_idx >= 0) {
379 free_netdev(adap->dummy_netdev[dummy_idx]);
380 adap->dummy_netdev[dummy_idx] = NULL;
381 }
382 return -ENOMEM;
383}
384
385/*
386 * Wait until all NAPI handlers are descheduled. This includes the handlers of
387 * both netdevices representing interfaces and the dummy ones for the extra
388 * queues.
389 */
390static void quiesce_rx(struct adapter *adap)
391{
392 int i;
393 struct net_device *dev;
394
395 for_each_port(adap, i) {
396 dev = adap->port[i];
397 while (test_bit(__LINK_STATE_RX_SCHED, &dev->state))
398 msleep(1);
399 }
400
401 for (i = 0; i < ARRAY_SIZE(adap->dummy_netdev); i++) {
402 dev = adap->dummy_netdev[i];
403 if (dev)
404 while (test_bit(__LINK_STATE_RX_SCHED, &dev->state))
405 msleep(1);
406 }
407}
408
409/**
410 * setup_sge_qsets - configure SGE Tx/Rx/response queues
411 * @adap: the adapter
412 *
413 * Determines how many sets of SGE queues to use and initializes them.
414 * We support multiple queue sets per port if we have MSI-X, otherwise
415 * just one queue set per port.
416 */
417static int setup_sge_qsets(struct adapter *adap)
418{
419 int i, j, err, irq_idx = 0, qset_idx = 0, dummy_dev_idx = 0;
8ac3ba68 420 unsigned int ntxq = SGE_TXQ_PER_SET;
4d22de3e
DLR
421
422 if (adap->params.rev > 0 && !(adap->flags & USING_MSI))
423 irq_idx = -1;
424
425 for_each_port(adap, i) {
426 struct net_device *dev = adap->port[i];
427 const struct port_info *pi = netdev_priv(dev);
428
429 for (j = 0; j < pi->nqsets; ++j, ++qset_idx) {
430 err = t3_sge_alloc_qset(adap, qset_idx, 1,
431 (adap->flags & USING_MSIX) ? qset_idx + 1 :
432 irq_idx,
433 &adap->params.sge.qset[qset_idx], ntxq,
434 j == 0 ? dev :
435 adap-> dummy_netdev[dummy_dev_idx++]);
436 if (err) {
437 t3_free_sge_resources(adap);
438 return err;
439 }
440 }
441 }
442
443 return 0;
444}
445
0ee8d33c
DLR
446static ssize_t attr_show(struct device *d, struct device_attribute *attr,
447 char *buf,
896392ef 448 ssize_t(*format) (struct net_device *, char *))
4d22de3e
DLR
449{
450 ssize_t len;
4d22de3e
DLR
451
452 /* Synchronize with ioctls that may shut down the device */
453 rtnl_lock();
896392ef 454 len = (*format) (to_net_dev(d), buf);
4d22de3e
DLR
455 rtnl_unlock();
456 return len;
457}
458
0ee8d33c
DLR
459static ssize_t attr_store(struct device *d, struct device_attribute *attr,
460 const char *buf, size_t len,
896392ef 461 ssize_t(*set) (struct net_device *, unsigned int),
4d22de3e
DLR
462 unsigned int min_val, unsigned int max_val)
463{
464 char *endp;
465 ssize_t ret;
466 unsigned int val;
4d22de3e
DLR
467
468 if (!capable(CAP_NET_ADMIN))
469 return -EPERM;
470
471 val = simple_strtoul(buf, &endp, 0);
472 if (endp == buf || val < min_val || val > max_val)
473 return -EINVAL;
474
475 rtnl_lock();
896392ef 476 ret = (*set) (to_net_dev(d), val);
4d22de3e
DLR
477 if (!ret)
478 ret = len;
479 rtnl_unlock();
480 return ret;
481}
482
483#define CXGB3_SHOW(name, val_expr) \
896392ef 484static ssize_t format_##name(struct net_device *dev, char *buf) \
4d22de3e 485{ \
896392ef 486 struct adapter *adap = dev->priv; \
4d22de3e
DLR
487 return sprintf(buf, "%u\n", val_expr); \
488} \
0ee8d33c
DLR
489static ssize_t show_##name(struct device *d, struct device_attribute *attr, \
490 char *buf) \
4d22de3e 491{ \
0ee8d33c 492 return attr_show(d, attr, buf, format_##name); \
4d22de3e
DLR
493}
494
896392ef 495static ssize_t set_nfilters(struct net_device *dev, unsigned int val)
4d22de3e 496{
896392ef 497 struct adapter *adap = dev->priv;
9f238486 498 int min_tids = is_offload(adap) ? MC5_MIN_TIDS : 0;
896392ef 499
4d22de3e
DLR
500 if (adap->flags & FULL_INIT_DONE)
501 return -EBUSY;
502 if (val && adap->params.rev == 0)
503 return -EINVAL;
9f238486
DLR
504 if (val > t3_mc5_size(&adap->mc5) - adap->params.mc5.nservers -
505 min_tids)
4d22de3e
DLR
506 return -EINVAL;
507 adap->params.mc5.nfilters = val;
508 return 0;
509}
510
0ee8d33c
DLR
511static ssize_t store_nfilters(struct device *d, struct device_attribute *attr,
512 const char *buf, size_t len)
4d22de3e 513{
0ee8d33c 514 return attr_store(d, attr, buf, len, set_nfilters, 0, ~0);
4d22de3e
DLR
515}
516
896392ef 517static ssize_t set_nservers(struct net_device *dev, unsigned int val)
4d22de3e 518{
896392ef
DLR
519 struct adapter *adap = dev->priv;
520
4d22de3e
DLR
521 if (adap->flags & FULL_INIT_DONE)
522 return -EBUSY;
9f238486
DLR
523 if (val > t3_mc5_size(&adap->mc5) - adap->params.mc5.nfilters -
524 MC5_MIN_TIDS)
4d22de3e
DLR
525 return -EINVAL;
526 adap->params.mc5.nservers = val;
527 return 0;
528}
529
0ee8d33c
DLR
530static ssize_t store_nservers(struct device *d, struct device_attribute *attr,
531 const char *buf, size_t len)
4d22de3e 532{
0ee8d33c 533 return attr_store(d, attr, buf, len, set_nservers, 0, ~0);
4d22de3e
DLR
534}
535
536#define CXGB3_ATTR_R(name, val_expr) \
537CXGB3_SHOW(name, val_expr) \
0ee8d33c 538static DEVICE_ATTR(name, S_IRUGO, show_##name, NULL)
4d22de3e
DLR
539
540#define CXGB3_ATTR_RW(name, val_expr, store_method) \
541CXGB3_SHOW(name, val_expr) \
0ee8d33c 542static DEVICE_ATTR(name, S_IRUGO | S_IWUSR, show_##name, store_method)
4d22de3e
DLR
543
544CXGB3_ATTR_R(cam_size, t3_mc5_size(&adap->mc5));
545CXGB3_ATTR_RW(nfilters, adap->params.mc5.nfilters, store_nfilters);
546CXGB3_ATTR_RW(nservers, adap->params.mc5.nservers, store_nservers);
547
548static struct attribute *cxgb3_attrs[] = {
0ee8d33c
DLR
549 &dev_attr_cam_size.attr,
550 &dev_attr_nfilters.attr,
551 &dev_attr_nservers.attr,
4d22de3e
DLR
552 NULL
553};
554
555static struct attribute_group cxgb3_attr_group = {.attrs = cxgb3_attrs };
556
0ee8d33c
DLR
557static ssize_t tm_attr_show(struct device *d, struct device_attribute *attr,
558 char *buf, int sched)
4d22de3e
DLR
559{
560 ssize_t len;
561 unsigned int v, addr, bpt, cpt;
0ee8d33c 562 struct adapter *adap = to_net_dev(d)->priv;
4d22de3e
DLR
563
564 addr = A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2;
565 rtnl_lock();
566 t3_write_reg(adap, A_TP_TM_PIO_ADDR, addr);
567 v = t3_read_reg(adap, A_TP_TM_PIO_DATA);
568 if (sched & 1)
569 v >>= 16;
570 bpt = (v >> 8) & 0xff;
571 cpt = v & 0xff;
572 if (!cpt)
573 len = sprintf(buf, "disabled\n");
574 else {
575 v = (adap->params.vpd.cclk * 1000) / cpt;
576 len = sprintf(buf, "%u Kbps\n", (v * bpt) / 125);
577 }
578 rtnl_unlock();
579 return len;
580}
581
0ee8d33c
DLR
582static ssize_t tm_attr_store(struct device *d, struct device_attribute *attr,
583 const char *buf, size_t len, int sched)
4d22de3e
DLR
584{
585 char *endp;
586 ssize_t ret;
587 unsigned int val;
0ee8d33c 588 struct adapter *adap = to_net_dev(d)->priv;
4d22de3e
DLR
589
590 if (!capable(CAP_NET_ADMIN))
591 return -EPERM;
592
593 val = simple_strtoul(buf, &endp, 0);
594 if (endp == buf || val > 10000000)
595 return -EINVAL;
596
597 rtnl_lock();
598 ret = t3_config_sched(adap, val, sched);
599 if (!ret)
600 ret = len;
601 rtnl_unlock();
602 return ret;
603}
604
605#define TM_ATTR(name, sched) \
0ee8d33c
DLR
606static ssize_t show_##name(struct device *d, struct device_attribute *attr, \
607 char *buf) \
4d22de3e 608{ \
0ee8d33c 609 return tm_attr_show(d, attr, buf, sched); \
4d22de3e 610} \
0ee8d33c
DLR
611static ssize_t store_##name(struct device *d, struct device_attribute *attr, \
612 const char *buf, size_t len) \
4d22de3e 613{ \
0ee8d33c 614 return tm_attr_store(d, attr, buf, len, sched); \
4d22de3e 615} \
0ee8d33c 616static DEVICE_ATTR(name, S_IRUGO | S_IWUSR, show_##name, store_##name)
4d22de3e
DLR
617
618TM_ATTR(sched0, 0);
619TM_ATTR(sched1, 1);
620TM_ATTR(sched2, 2);
621TM_ATTR(sched3, 3);
622TM_ATTR(sched4, 4);
623TM_ATTR(sched5, 5);
624TM_ATTR(sched6, 6);
625TM_ATTR(sched7, 7);
626
627static struct attribute *offload_attrs[] = {
0ee8d33c
DLR
628 &dev_attr_sched0.attr,
629 &dev_attr_sched1.attr,
630 &dev_attr_sched2.attr,
631 &dev_attr_sched3.attr,
632 &dev_attr_sched4.attr,
633 &dev_attr_sched5.attr,
634 &dev_attr_sched6.attr,
635 &dev_attr_sched7.attr,
4d22de3e
DLR
636 NULL
637};
638
639static struct attribute_group offload_attr_group = {.attrs = offload_attrs };
640
641/*
642 * Sends an sk_buff to an offload queue driver
643 * after dealing with any active network taps.
644 */
645static inline int offload_tx(struct t3cdev *tdev, struct sk_buff *skb)
646{
647 int ret;
648
649 local_bh_disable();
650 ret = t3_offload_tx(tdev, skb);
651 local_bh_enable();
652 return ret;
653}
654
655static int write_smt_entry(struct adapter *adapter, int idx)
656{
657 struct cpl_smt_write_req *req;
658 struct sk_buff *skb = alloc_skb(sizeof(*req), GFP_KERNEL);
659
660 if (!skb)
661 return -ENOMEM;
662
663 req = (struct cpl_smt_write_req *)__skb_put(skb, sizeof(*req));
664 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
665 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SMT_WRITE_REQ, idx));
666 req->mtu_idx = NMTUS - 1; /* should be 0 but there's a T3 bug */
667 req->iff = idx;
668 memset(req->src_mac1, 0, sizeof(req->src_mac1));
669 memcpy(req->src_mac0, adapter->port[idx]->dev_addr, ETH_ALEN);
670 skb->priority = 1;
671 offload_tx(&adapter->tdev, skb);
672 return 0;
673}
674
675static int init_smt(struct adapter *adapter)
676{
677 int i;
678
679 for_each_port(adapter, i)
680 write_smt_entry(adapter, i);
681 return 0;
682}
683
684static void init_port_mtus(struct adapter *adapter)
685{
686 unsigned int mtus = adapter->port[0]->mtu;
687
688 if (adapter->port[1])
689 mtus |= adapter->port[1]->mtu << 16;
690 t3_write_reg(adapter, A_TP_MTU_PORT_TABLE, mtus);
691}
692
14ab9892
DLR
693static void send_pktsched_cmd(struct adapter *adap, int sched, int qidx, int lo,
694 int hi, int port)
695{
696 struct sk_buff *skb;
697 struct mngt_pktsched_wr *req;
698
699 skb = alloc_skb(sizeof(*req), GFP_KERNEL | __GFP_NOFAIL);
700 req = (struct mngt_pktsched_wr *)skb_put(skb, sizeof(*req));
701 req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_MNGT));
702 req->mngt_opcode = FW_MNGTOPCODE_PKTSCHED_SET;
703 req->sched = sched;
704 req->idx = qidx;
705 req->min = lo;
706 req->max = hi;
707 req->binding = port;
708 t3_mgmt_tx(adap, skb);
709}
710
711static void bind_qsets(struct adapter *adap)
712{
713 int i, j;
714
715 for_each_port(adap, i) {
716 const struct port_info *pi = adap2pinfo(adap, i);
717
718 for (j = 0; j < pi->nqsets; ++j)
719 send_pktsched_cmd(adap, 1, pi->first_qset + j, -1,
720 -1, i);
721 }
722}
723
7f672cf5 724#define FW_FNAME "t3fw-%d.%d.%d.bin"
2e283962
DLR
725
726static int upgrade_fw(struct adapter *adap)
727{
728 int ret;
729 char buf[64];
730 const struct firmware *fw;
731 struct device *dev = &adap->pdev->dev;
732
733 snprintf(buf, sizeof(buf), FW_FNAME, FW_VERSION_MAJOR,
7f672cf5 734 FW_VERSION_MINOR, FW_VERSION_MICRO);
2e283962
DLR
735 ret = request_firmware(&fw, buf, dev);
736 if (ret < 0) {
737 dev_err(dev, "could not upgrade firmware: unable to load %s\n",
738 buf);
739 return ret;
740 }
741 ret = t3_load_fw(adap, fw->data, fw->size);
742 release_firmware(fw);
743 return ret;
744}
745
4d22de3e
DLR
746/**
747 * cxgb_up - enable the adapter
748 * @adapter: adapter being enabled
749 *
750 * Called when the first port is enabled, this function performs the
751 * actions necessary to make an adapter operational, such as completing
752 * the initialization of HW modules, and enabling interrupts.
753 *
754 * Must be called with the rtnl lock held.
755 */
756static int cxgb_up(struct adapter *adap)
757{
758 int err = 0;
759
760 if (!(adap->flags & FULL_INIT_DONE)) {
761 err = t3_check_fw_version(adap);
2e283962
DLR
762 if (err == -EINVAL)
763 err = upgrade_fw(adap);
4aac3899 764 if (err)
4d22de3e 765 goto out;
4d22de3e
DLR
766
767 err = init_dummy_netdevs(adap);
768 if (err)
769 goto out;
770
771 err = t3_init_hw(adap, 0);
772 if (err)
773 goto out;
774
775 err = setup_sge_qsets(adap);
776 if (err)
777 goto out;
778
779 setup_rss(adap);
780 adap->flags |= FULL_INIT_DONE;
781 }
782
783 t3_intr_clear(adap);
784
785 if (adap->flags & USING_MSIX) {
786 name_msix_vecs(adap);
787 err = request_irq(adap->msix_info[0].vec,
788 t3_async_intr_handler, 0,
789 adap->msix_info[0].desc, adap);
790 if (err)
791 goto irq_err;
792
793 if (request_msix_data_irqs(adap)) {
794 free_irq(adap->msix_info[0].vec, adap);
795 goto irq_err;
796 }
797 } else if ((err = request_irq(adap->pdev->irq,
798 t3_intr_handler(adap,
799 adap->sge.qs[0].rspq.
800 polling),
2db6346f
TG
801 (adap->flags & USING_MSI) ?
802 0 : IRQF_SHARED,
4d22de3e
DLR
803 adap->name, adap)))
804 goto irq_err;
805
806 t3_sge_start(adap);
807 t3_intr_enable(adap);
14ab9892
DLR
808
809 if ((adap->flags & (USING_MSIX | QUEUES_BOUND)) == USING_MSIX)
810 bind_qsets(adap);
811 adap->flags |= QUEUES_BOUND;
812
4d22de3e
DLR
813out:
814 return err;
815irq_err:
816 CH_ERR(adap, "request_irq failed, err %d\n", err);
817 goto out;
818}
819
820/*
821 * Release resources when all the ports and offloading have been stopped.
822 */
823static void cxgb_down(struct adapter *adapter)
824{
825 t3_sge_stop(adapter);
826 spin_lock_irq(&adapter->work_lock); /* sync with PHY intr task */
827 t3_intr_disable(adapter);
828 spin_unlock_irq(&adapter->work_lock);
829
830 if (adapter->flags & USING_MSIX) {
831 int i, n = 0;
832
833 free_irq(adapter->msix_info[0].vec, adapter);
834 for_each_port(adapter, i)
835 n += adap2pinfo(adapter, i)->nqsets;
836
837 for (i = 0; i < n; ++i)
838 free_irq(adapter->msix_info[i + 1].vec,
839 &adapter->sge.qs[i]);
840 } else
841 free_irq(adapter->pdev->irq, adapter);
842
843 flush_workqueue(cxgb3_wq); /* wait for external IRQ handler */
844 quiesce_rx(adapter);
845}
846
847static void schedule_chk_task(struct adapter *adap)
848{
849 unsigned int timeo;
850
851 timeo = adap->params.linkpoll_period ?
852 (HZ * adap->params.linkpoll_period) / 10 :
853 adap->params.stats_update_period * HZ;
854 if (timeo)
855 queue_delayed_work(cxgb3_wq, &adap->adap_check_task, timeo);
856}
857
858static int offload_open(struct net_device *dev)
859{
860 struct adapter *adapter = dev->priv;
861 struct t3cdev *tdev = T3CDEV(dev);
862 int adap_up = adapter->open_device_map & PORT_MASK;
863 int err = 0;
864
865 if (test_and_set_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map))
866 return 0;
867
868 if (!adap_up && (err = cxgb_up(adapter)) < 0)
869 return err;
870
871 t3_tp_set_offload_mode(adapter, 1);
872 tdev->lldev = adapter->port[0];
873 err = cxgb3_offload_activate(adapter);
874 if (err)
875 goto out;
876
877 init_port_mtus(adapter);
878 t3_load_mtus(adapter, adapter->params.mtus, adapter->params.a_wnd,
879 adapter->params.b_wnd,
880 adapter->params.rev == 0 ?
881 adapter->port[0]->mtu : 0xffff);
882 init_smt(adapter);
883
884 /* Never mind if the next step fails */
0ee8d33c 885 sysfs_create_group(&tdev->lldev->dev.kobj, &offload_attr_group);
4d22de3e
DLR
886
887 /* Call back all registered clients */
888 cxgb3_add_clients(tdev);
889
890out:
891 /* restore them in case the offload module has changed them */
892 if (err) {
893 t3_tp_set_offload_mode(adapter, 0);
894 clear_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map);
895 cxgb3_set_dummy_ops(tdev);
896 }
897 return err;
898}
899
900static int offload_close(struct t3cdev *tdev)
901{
902 struct adapter *adapter = tdev2adap(tdev);
903
904 if (!test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map))
905 return 0;
906
907 /* Call back all registered clients */
908 cxgb3_remove_clients(tdev);
909
0ee8d33c 910 sysfs_remove_group(&tdev->lldev->dev.kobj, &offload_attr_group);
4d22de3e
DLR
911
912 tdev->lldev = NULL;
913 cxgb3_set_dummy_ops(tdev);
914 t3_tp_set_offload_mode(adapter, 0);
915 clear_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map);
916
917 if (!adapter->open_device_map)
918 cxgb_down(adapter);
919
920 cxgb3_offload_deactivate(adapter);
921 return 0;
922}
923
924static int cxgb_open(struct net_device *dev)
925{
926 int err;
927 struct adapter *adapter = dev->priv;
928 struct port_info *pi = netdev_priv(dev);
929 int other_ports = adapter->open_device_map & PORT_MASK;
930
931 if (!adapter->open_device_map && (err = cxgb_up(adapter)) < 0)
932 return err;
933
934 set_bit(pi->port_id, &adapter->open_device_map);
8ac3ba68 935 if (is_offload(adapter) && !ofld_disable) {
4d22de3e
DLR
936 err = offload_open(dev);
937 if (err)
938 printk(KERN_WARNING
939 "Could not initialize offload capabilities\n");
940 }
941
942 link_start(dev);
943 t3_port_intr_enable(adapter, pi->port_id);
944 netif_start_queue(dev);
945 if (!other_ports)
946 schedule_chk_task(adapter);
947
948 return 0;
949}
950
951static int cxgb_close(struct net_device *dev)
952{
953 struct adapter *adapter = dev->priv;
954 struct port_info *p = netdev_priv(dev);
955
956 t3_port_intr_disable(adapter, p->port_id);
957 netif_stop_queue(dev);
958 p->phy.ops->power_down(&p->phy, 1);
959 netif_carrier_off(dev);
960 t3_mac_disable(&p->mac, MAC_DIRECTION_TX | MAC_DIRECTION_RX);
961
962 spin_lock(&adapter->work_lock); /* sync with update task */
963 clear_bit(p->port_id, &adapter->open_device_map);
964 spin_unlock(&adapter->work_lock);
965
966 if (!(adapter->open_device_map & PORT_MASK))
967 cancel_rearming_delayed_workqueue(cxgb3_wq,
968 &adapter->adap_check_task);
969
970 if (!adapter->open_device_map)
971 cxgb_down(adapter);
972
973 return 0;
974}
975
976static struct net_device_stats *cxgb_get_stats(struct net_device *dev)
977{
978 struct adapter *adapter = dev->priv;
979 struct port_info *p = netdev_priv(dev);
980 struct net_device_stats *ns = &p->netstats;
981 const struct mac_stats *pstats;
982
983 spin_lock(&adapter->stats_lock);
984 pstats = t3_mac_update_stats(&p->mac);
985 spin_unlock(&adapter->stats_lock);
986
987 ns->tx_bytes = pstats->tx_octets;
988 ns->tx_packets = pstats->tx_frames;
989 ns->rx_bytes = pstats->rx_octets;
990 ns->rx_packets = pstats->rx_frames;
991 ns->multicast = pstats->rx_mcast_frames;
992
993 ns->tx_errors = pstats->tx_underrun;
994 ns->rx_errors = pstats->rx_symbol_errs + pstats->rx_fcs_errs +
995 pstats->rx_too_long + pstats->rx_jabber + pstats->rx_short +
996 pstats->rx_fifo_ovfl;
997
998 /* detailed rx_errors */
999 ns->rx_length_errors = pstats->rx_jabber + pstats->rx_too_long;
1000 ns->rx_over_errors = 0;
1001 ns->rx_crc_errors = pstats->rx_fcs_errs;
1002 ns->rx_frame_errors = pstats->rx_symbol_errs;
1003 ns->rx_fifo_errors = pstats->rx_fifo_ovfl;
1004 ns->rx_missed_errors = pstats->rx_cong_drops;
1005
1006 /* detailed tx_errors */
1007 ns->tx_aborted_errors = 0;
1008 ns->tx_carrier_errors = 0;
1009 ns->tx_fifo_errors = pstats->tx_underrun;
1010 ns->tx_heartbeat_errors = 0;
1011 ns->tx_window_errors = 0;
1012 return ns;
1013}
1014
1015static u32 get_msglevel(struct net_device *dev)
1016{
1017 struct adapter *adapter = dev->priv;
1018
1019 return adapter->msg_enable;
1020}
1021
1022static void set_msglevel(struct net_device *dev, u32 val)
1023{
1024 struct adapter *adapter = dev->priv;
1025
1026 adapter->msg_enable = val;
1027}
1028
1029static char stats_strings[][ETH_GSTRING_LEN] = {
1030 "TxOctetsOK ",
1031 "TxFramesOK ",
1032 "TxMulticastFramesOK",
1033 "TxBroadcastFramesOK",
1034 "TxPauseFrames ",
1035 "TxUnderrun ",
1036 "TxExtUnderrun ",
1037
1038 "TxFrames64 ",
1039 "TxFrames65To127 ",
1040 "TxFrames128To255 ",
1041 "TxFrames256To511 ",
1042 "TxFrames512To1023 ",
1043 "TxFrames1024To1518 ",
1044 "TxFrames1519ToMax ",
1045
1046 "RxOctetsOK ",
1047 "RxFramesOK ",
1048 "RxMulticastFramesOK",
1049 "RxBroadcastFramesOK",
1050 "RxPauseFrames ",
1051 "RxFCSErrors ",
1052 "RxSymbolErrors ",
1053 "RxShortErrors ",
1054 "RxJabberErrors ",
1055 "RxLengthErrors ",
1056 "RxFIFOoverflow ",
1057
1058 "RxFrames64 ",
1059 "RxFrames65To127 ",
1060 "RxFrames128To255 ",
1061 "RxFrames256To511 ",
1062 "RxFrames512To1023 ",
1063 "RxFrames1024To1518 ",
1064 "RxFrames1519ToMax ",
1065
1066 "PhyFIFOErrors ",
1067 "TSO ",
1068 "VLANextractions ",
1069 "VLANinsertions ",
1070 "TxCsumOffload ",
1071 "RxCsumGood ",
fc90664e
DLR
1072 "RxDrops ",
1073
1074 "CheckTXEnToggled ",
1075 "CheckResets ",
1076
4d22de3e
DLR
1077};
1078
1079static int get_stats_count(struct net_device *dev)
1080{
1081 return ARRAY_SIZE(stats_strings);
1082}
1083
1084#define T3_REGMAP_SIZE (3 * 1024)
1085
1086static int get_regs_len(struct net_device *dev)
1087{
1088 return T3_REGMAP_SIZE;
1089}
1090
1091static int get_eeprom_len(struct net_device *dev)
1092{
1093 return EEPROMSIZE;
1094}
1095
1096static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1097{
1098 u32 fw_vers = 0;
1099 struct adapter *adapter = dev->priv;
1100
1101 t3_get_fw_version(adapter, &fw_vers);
1102
1103 strcpy(info->driver, DRV_NAME);
1104 strcpy(info->version, DRV_VERSION);
1105 strcpy(info->bus_info, pci_name(adapter->pdev));
1106 if (!fw_vers)
1107 strcpy(info->fw_version, "N/A");
4aac3899 1108 else {
4d22de3e 1109 snprintf(info->fw_version, sizeof(info->fw_version),
4aac3899
DLR
1110 "%s %u.%u.%u",
1111 G_FW_VERSION_TYPE(fw_vers) ? "T" : "N",
1112 G_FW_VERSION_MAJOR(fw_vers),
1113 G_FW_VERSION_MINOR(fw_vers),
1114 G_FW_VERSION_MICRO(fw_vers));
1115 }
4d22de3e
DLR
1116}
1117
1118static void get_strings(struct net_device *dev, u32 stringset, u8 * data)
1119{
1120 if (stringset == ETH_SS_STATS)
1121 memcpy(data, stats_strings, sizeof(stats_strings));
1122}
1123
1124static unsigned long collect_sge_port_stats(struct adapter *adapter,
1125 struct port_info *p, int idx)
1126{
1127 int i;
1128 unsigned long tot = 0;
1129
1130 for (i = 0; i < p->nqsets; ++i)
1131 tot += adapter->sge.qs[i + p->first_qset].port_stats[idx];
1132 return tot;
1133}
1134
1135static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
1136 u64 *data)
1137{
1138 struct adapter *adapter = dev->priv;
1139 struct port_info *pi = netdev_priv(dev);
1140 const struct mac_stats *s;
1141
1142 spin_lock(&adapter->stats_lock);
1143 s = t3_mac_update_stats(&pi->mac);
1144 spin_unlock(&adapter->stats_lock);
1145
1146 *data++ = s->tx_octets;
1147 *data++ = s->tx_frames;
1148 *data++ = s->tx_mcast_frames;
1149 *data++ = s->tx_bcast_frames;
1150 *data++ = s->tx_pause;
1151 *data++ = s->tx_underrun;
1152 *data++ = s->tx_fifo_urun;
1153
1154 *data++ = s->tx_frames_64;
1155 *data++ = s->tx_frames_65_127;
1156 *data++ = s->tx_frames_128_255;
1157 *data++ = s->tx_frames_256_511;
1158 *data++ = s->tx_frames_512_1023;
1159 *data++ = s->tx_frames_1024_1518;
1160 *data++ = s->tx_frames_1519_max;
1161
1162 *data++ = s->rx_octets;
1163 *data++ = s->rx_frames;
1164 *data++ = s->rx_mcast_frames;
1165 *data++ = s->rx_bcast_frames;
1166 *data++ = s->rx_pause;
1167 *data++ = s->rx_fcs_errs;
1168 *data++ = s->rx_symbol_errs;
1169 *data++ = s->rx_short;
1170 *data++ = s->rx_jabber;
1171 *data++ = s->rx_too_long;
1172 *data++ = s->rx_fifo_ovfl;
1173
1174 *data++ = s->rx_frames_64;
1175 *data++ = s->rx_frames_65_127;
1176 *data++ = s->rx_frames_128_255;
1177 *data++ = s->rx_frames_256_511;
1178 *data++ = s->rx_frames_512_1023;
1179 *data++ = s->rx_frames_1024_1518;
1180 *data++ = s->rx_frames_1519_max;
1181
1182 *data++ = pi->phy.fifo_errors;
1183
1184 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_TSO);
1185 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_VLANEX);
1186 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_VLANINS);
1187 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_TX_CSUM);
1188 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_RX_CSUM_GOOD);
1189 *data++ = s->rx_cong_drops;
fc90664e
DLR
1190
1191 *data++ = s->num_toggled;
1192 *data++ = s->num_resets;
4d22de3e
DLR
1193}
1194
1195static inline void reg_block_dump(struct adapter *ap, void *buf,
1196 unsigned int start, unsigned int end)
1197{
1198 u32 *p = buf + start;
1199
1200 for (; start <= end; start += sizeof(u32))
1201 *p++ = t3_read_reg(ap, start);
1202}
1203
1204static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
1205 void *buf)
1206{
1207 struct adapter *ap = dev->priv;
1208
1209 /*
1210 * Version scheme:
1211 * bits 0..9: chip version
1212 * bits 10..15: chip revision
1213 * bit 31: set for PCIe cards
1214 */
1215 regs->version = 3 | (ap->params.rev << 10) | (is_pcie(ap) << 31);
1216
1217 /*
1218 * We skip the MAC statistics registers because they are clear-on-read.
1219 * Also reading multi-register stats would need to synchronize with the
1220 * periodic mac stats accumulation. Hard to justify the complexity.
1221 */
1222 memset(buf, 0, T3_REGMAP_SIZE);
1223 reg_block_dump(ap, buf, 0, A_SG_RSPQ_CREDIT_RETURN);
1224 reg_block_dump(ap, buf, A_SG_HI_DRB_HI_THRSH, A_ULPRX_PBL_ULIMIT);
1225 reg_block_dump(ap, buf, A_ULPTX_CONFIG, A_MPS_INT_CAUSE);
1226 reg_block_dump(ap, buf, A_CPL_SWITCH_CNTRL, A_CPL_MAP_TBL_DATA);
1227 reg_block_dump(ap, buf, A_SMB_GLOBAL_TIME_CFG, A_XGM_SERDES_STAT3);
1228 reg_block_dump(ap, buf, A_XGM_SERDES_STATUS0,
1229 XGM_REG(A_XGM_SERDES_STAT3, 1));
1230 reg_block_dump(ap, buf, XGM_REG(A_XGM_SERDES_STATUS0, 1),
1231 XGM_REG(A_XGM_RX_SPI4_SOP_EOP_CNT, 1));
1232}
1233
1234static int restart_autoneg(struct net_device *dev)
1235{
1236 struct port_info *p = netdev_priv(dev);
1237
1238 if (!netif_running(dev))
1239 return -EAGAIN;
1240 if (p->link_config.autoneg != AUTONEG_ENABLE)
1241 return -EINVAL;
1242 p->phy.ops->autoneg_restart(&p->phy);
1243 return 0;
1244}
1245
1246static int cxgb3_phys_id(struct net_device *dev, u32 data)
1247{
1248 int i;
1249 struct adapter *adapter = dev->priv;
1250
1251 if (data == 0)
1252 data = 2;
1253
1254 for (i = 0; i < data * 2; i++) {
1255 t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
1256 (i & 1) ? F_GPIO0_OUT_VAL : 0);
1257 if (msleep_interruptible(500))
1258 break;
1259 }
1260 t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
1261 F_GPIO0_OUT_VAL);
1262 return 0;
1263}
1264
1265static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1266{
1267 struct port_info *p = netdev_priv(dev);
1268
1269 cmd->supported = p->link_config.supported;
1270 cmd->advertising = p->link_config.advertising;
1271
1272 if (netif_carrier_ok(dev)) {
1273 cmd->speed = p->link_config.speed;
1274 cmd->duplex = p->link_config.duplex;
1275 } else {
1276 cmd->speed = -1;
1277 cmd->duplex = -1;
1278 }
1279
1280 cmd->port = (cmd->supported & SUPPORTED_TP) ? PORT_TP : PORT_FIBRE;
1281 cmd->phy_address = p->phy.addr;
1282 cmd->transceiver = XCVR_EXTERNAL;
1283 cmd->autoneg = p->link_config.autoneg;
1284 cmd->maxtxpkt = 0;
1285 cmd->maxrxpkt = 0;
1286 return 0;
1287}
1288
1289static int speed_duplex_to_caps(int speed, int duplex)
1290{
1291 int cap = 0;
1292
1293 switch (speed) {
1294 case SPEED_10:
1295 if (duplex == DUPLEX_FULL)
1296 cap = SUPPORTED_10baseT_Full;
1297 else
1298 cap = SUPPORTED_10baseT_Half;
1299 break;
1300 case SPEED_100:
1301 if (duplex == DUPLEX_FULL)
1302 cap = SUPPORTED_100baseT_Full;
1303 else
1304 cap = SUPPORTED_100baseT_Half;
1305 break;
1306 case SPEED_1000:
1307 if (duplex == DUPLEX_FULL)
1308 cap = SUPPORTED_1000baseT_Full;
1309 else
1310 cap = SUPPORTED_1000baseT_Half;
1311 break;
1312 case SPEED_10000:
1313 if (duplex == DUPLEX_FULL)
1314 cap = SUPPORTED_10000baseT_Full;
1315 }
1316 return cap;
1317}
1318
1319#define ADVERTISED_MASK (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
1320 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
1321 ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full | \
1322 ADVERTISED_10000baseT_Full)
1323
1324static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1325{
1326 struct port_info *p = netdev_priv(dev);
1327 struct link_config *lc = &p->link_config;
1328
1329 if (!(lc->supported & SUPPORTED_Autoneg))
1330 return -EOPNOTSUPP; /* can't change speed/duplex */
1331
1332 if (cmd->autoneg == AUTONEG_DISABLE) {
1333 int cap = speed_duplex_to_caps(cmd->speed, cmd->duplex);
1334
1335 if (!(lc->supported & cap) || cmd->speed == SPEED_1000)
1336 return -EINVAL;
1337 lc->requested_speed = cmd->speed;
1338 lc->requested_duplex = cmd->duplex;
1339 lc->advertising = 0;
1340 } else {
1341 cmd->advertising &= ADVERTISED_MASK;
1342 cmd->advertising &= lc->supported;
1343 if (!cmd->advertising)
1344 return -EINVAL;
1345 lc->requested_speed = SPEED_INVALID;
1346 lc->requested_duplex = DUPLEX_INVALID;
1347 lc->advertising = cmd->advertising | ADVERTISED_Autoneg;
1348 }
1349 lc->autoneg = cmd->autoneg;
1350 if (netif_running(dev))
1351 t3_link_start(&p->phy, &p->mac, lc);
1352 return 0;
1353}
1354
1355static void get_pauseparam(struct net_device *dev,
1356 struct ethtool_pauseparam *epause)
1357{
1358 struct port_info *p = netdev_priv(dev);
1359
1360 epause->autoneg = (p->link_config.requested_fc & PAUSE_AUTONEG) != 0;
1361 epause->rx_pause = (p->link_config.fc & PAUSE_RX) != 0;
1362 epause->tx_pause = (p->link_config.fc & PAUSE_TX) != 0;
1363}
1364
1365static int set_pauseparam(struct net_device *dev,
1366 struct ethtool_pauseparam *epause)
1367{
1368 struct port_info *p = netdev_priv(dev);
1369 struct link_config *lc = &p->link_config;
1370
1371 if (epause->autoneg == AUTONEG_DISABLE)
1372 lc->requested_fc = 0;
1373 else if (lc->supported & SUPPORTED_Autoneg)
1374 lc->requested_fc = PAUSE_AUTONEG;
1375 else
1376 return -EINVAL;
1377
1378 if (epause->rx_pause)
1379 lc->requested_fc |= PAUSE_RX;
1380 if (epause->tx_pause)
1381 lc->requested_fc |= PAUSE_TX;
1382 if (lc->autoneg == AUTONEG_ENABLE) {
1383 if (netif_running(dev))
1384 t3_link_start(&p->phy, &p->mac, lc);
1385 } else {
1386 lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1387 if (netif_running(dev))
1388 t3_mac_set_speed_duplex_fc(&p->mac, -1, -1, lc->fc);
1389 }
1390 return 0;
1391}
1392
1393static u32 get_rx_csum(struct net_device *dev)
1394{
1395 struct port_info *p = netdev_priv(dev);
1396
1397 return p->rx_csum_offload;
1398}
1399
1400static int set_rx_csum(struct net_device *dev, u32 data)
1401{
1402 struct port_info *p = netdev_priv(dev);
1403
1404 p->rx_csum_offload = data;
1405 return 0;
1406}
1407
1408static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
1409{
05b97b30
DLR
1410 const struct adapter *adapter = dev->priv;
1411 const struct port_info *pi = netdev_priv(dev);
1412 const struct qset_params *q = &adapter->params.sge.qset[pi->first_qset];
4d22de3e
DLR
1413
1414 e->rx_max_pending = MAX_RX_BUFFERS;
1415 e->rx_mini_max_pending = 0;
1416 e->rx_jumbo_max_pending = MAX_RX_JUMBO_BUFFERS;
1417 e->tx_max_pending = MAX_TXQ_ENTRIES;
1418
05b97b30
DLR
1419 e->rx_pending = q->fl_size;
1420 e->rx_mini_pending = q->rspq_size;
1421 e->rx_jumbo_pending = q->jumbo_size;
1422 e->tx_pending = q->txq_size[0];
4d22de3e
DLR
1423}
1424
1425static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
1426{
1427 int i;
05b97b30 1428 struct qset_params *q;
4d22de3e 1429 struct adapter *adapter = dev->priv;
05b97b30 1430 const struct port_info *pi = netdev_priv(dev);
4d22de3e
DLR
1431
1432 if (e->rx_pending > MAX_RX_BUFFERS ||
1433 e->rx_jumbo_pending > MAX_RX_JUMBO_BUFFERS ||
1434 e->tx_pending > MAX_TXQ_ENTRIES ||
1435 e->rx_mini_pending > MAX_RSPQ_ENTRIES ||
1436 e->rx_mini_pending < MIN_RSPQ_ENTRIES ||
1437 e->rx_pending < MIN_FL_ENTRIES ||
1438 e->rx_jumbo_pending < MIN_FL_ENTRIES ||
1439 e->tx_pending < adapter->params.nports * MIN_TXQ_ENTRIES)
1440 return -EINVAL;
1441
1442 if (adapter->flags & FULL_INIT_DONE)
1443 return -EBUSY;
1444
05b97b30
DLR
1445 q = &adapter->params.sge.qset[pi->first_qset];
1446 for (i = 0; i < pi->nqsets; ++i, ++q) {
4d22de3e
DLR
1447 q->rspq_size = e->rx_mini_pending;
1448 q->fl_size = e->rx_pending;
1449 q->jumbo_size = e->rx_jumbo_pending;
1450 q->txq_size[0] = e->tx_pending;
1451 q->txq_size[1] = e->tx_pending;
1452 q->txq_size[2] = e->tx_pending;
1453 }
1454 return 0;
1455}
1456
1457static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
1458{
1459 struct adapter *adapter = dev->priv;
1460 struct qset_params *qsp = &adapter->params.sge.qset[0];
1461 struct sge_qset *qs = &adapter->sge.qs[0];
1462
1463 if (c->rx_coalesce_usecs * 10 > M_NEWTIMER)
1464 return -EINVAL;
1465
1466 qsp->coalesce_usecs = c->rx_coalesce_usecs;
1467 t3_update_qset_coalesce(qs, qsp);
1468 return 0;
1469}
1470
1471static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
1472{
1473 struct adapter *adapter = dev->priv;
1474 struct qset_params *q = adapter->params.sge.qset;
1475
1476 c->rx_coalesce_usecs = q->coalesce_usecs;
1477 return 0;
1478}
1479
1480static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e,
1481 u8 * data)
1482{
1483 int i, err = 0;
1484 struct adapter *adapter = dev->priv;
1485
1486 u8 *buf = kmalloc(EEPROMSIZE, GFP_KERNEL);
1487 if (!buf)
1488 return -ENOMEM;
1489
1490 e->magic = EEPROM_MAGIC;
1491 for (i = e->offset & ~3; !err && i < e->offset + e->len; i += 4)
1492 err = t3_seeprom_read(adapter, i, (u32 *) & buf[i]);
1493
1494 if (!err)
1495 memcpy(data, buf + e->offset, e->len);
1496 kfree(buf);
1497 return err;
1498}
1499
1500static int set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
1501 u8 * data)
1502{
1503 u8 *buf;
1504 int err = 0;
1505 u32 aligned_offset, aligned_len, *p;
1506 struct adapter *adapter = dev->priv;
1507
1508 if (eeprom->magic != EEPROM_MAGIC)
1509 return -EINVAL;
1510
1511 aligned_offset = eeprom->offset & ~3;
1512 aligned_len = (eeprom->len + (eeprom->offset & 3) + 3) & ~3;
1513
1514 if (aligned_offset != eeprom->offset || aligned_len != eeprom->len) {
1515 buf = kmalloc(aligned_len, GFP_KERNEL);
1516 if (!buf)
1517 return -ENOMEM;
1518 err = t3_seeprom_read(adapter, aligned_offset, (u32 *) buf);
1519 if (!err && aligned_len > 4)
1520 err = t3_seeprom_read(adapter,
1521 aligned_offset + aligned_len - 4,
1522 (u32 *) & buf[aligned_len - 4]);
1523 if (err)
1524 goto out;
1525 memcpy(buf + (eeprom->offset & 3), data, eeprom->len);
1526 } else
1527 buf = data;
1528
1529 err = t3_seeprom_wp(adapter, 0);
1530 if (err)
1531 goto out;
1532
1533 for (p = (u32 *) buf; !err && aligned_len; aligned_len -= 4, p++) {
1534 err = t3_seeprom_write(adapter, aligned_offset, *p);
1535 aligned_offset += 4;
1536 }
1537
1538 if (!err)
1539 err = t3_seeprom_wp(adapter, 1);
1540out:
1541 if (buf != data)
1542 kfree(buf);
1543 return err;
1544}
1545
1546static void get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1547{
1548 wol->supported = 0;
1549 wol->wolopts = 0;
1550 memset(&wol->sopass, 0, sizeof(wol->sopass));
1551}
1552
1553static const struct ethtool_ops cxgb_ethtool_ops = {
1554 .get_settings = get_settings,
1555 .set_settings = set_settings,
1556 .get_drvinfo = get_drvinfo,
1557 .get_msglevel = get_msglevel,
1558 .set_msglevel = set_msglevel,
1559 .get_ringparam = get_sge_param,
1560 .set_ringparam = set_sge_param,
1561 .get_coalesce = get_coalesce,
1562 .set_coalesce = set_coalesce,
1563 .get_eeprom_len = get_eeprom_len,
1564 .get_eeprom = get_eeprom,
1565 .set_eeprom = set_eeprom,
1566 .get_pauseparam = get_pauseparam,
1567 .set_pauseparam = set_pauseparam,
1568 .get_rx_csum = get_rx_csum,
1569 .set_rx_csum = set_rx_csum,
1570 .get_tx_csum = ethtool_op_get_tx_csum,
1571 .set_tx_csum = ethtool_op_set_tx_csum,
1572 .get_sg = ethtool_op_get_sg,
1573 .set_sg = ethtool_op_set_sg,
1574 .get_link = ethtool_op_get_link,
1575 .get_strings = get_strings,
1576 .phys_id = cxgb3_phys_id,
1577 .nway_reset = restart_autoneg,
1578 .get_stats_count = get_stats_count,
1579 .get_ethtool_stats = get_stats,
1580 .get_regs_len = get_regs_len,
1581 .get_regs = get_regs,
1582 .get_wol = get_wol,
1583 .get_tso = ethtool_op_get_tso,
1584 .set_tso = ethtool_op_set_tso,
1585 .get_perm_addr = ethtool_op_get_perm_addr
1586};
1587
1588static int in_range(int val, int lo, int hi)
1589{
1590 return val < 0 || (val <= hi && val >= lo);
1591}
1592
1593static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
1594{
1595 int ret;
1596 u32 cmd;
1597 struct adapter *adapter = dev->priv;
1598
1599 if (copy_from_user(&cmd, useraddr, sizeof(cmd)))
1600 return -EFAULT;
1601
1602 switch (cmd) {
4d22de3e
DLR
1603 case CHELSIO_SET_QSET_PARAMS:{
1604 int i;
1605 struct qset_params *q;
1606 struct ch_qset_params t;
1607
1608 if (!capable(CAP_NET_ADMIN))
1609 return -EPERM;
1610 if (copy_from_user(&t, useraddr, sizeof(t)))
1611 return -EFAULT;
1612 if (t.qset_idx >= SGE_QSETS)
1613 return -EINVAL;
1614 if (!in_range(t.intr_lat, 0, M_NEWTIMER) ||
1615 !in_range(t.cong_thres, 0, 255) ||
1616 !in_range(t.txq_size[0], MIN_TXQ_ENTRIES,
1617 MAX_TXQ_ENTRIES) ||
1618 !in_range(t.txq_size[1], MIN_TXQ_ENTRIES,
1619 MAX_TXQ_ENTRIES) ||
1620 !in_range(t.txq_size[2], MIN_CTRL_TXQ_ENTRIES,
1621 MAX_CTRL_TXQ_ENTRIES) ||
1622 !in_range(t.fl_size[0], MIN_FL_ENTRIES,
1623 MAX_RX_BUFFERS)
1624 || !in_range(t.fl_size[1], MIN_FL_ENTRIES,
1625 MAX_RX_JUMBO_BUFFERS)
1626 || !in_range(t.rspq_size, MIN_RSPQ_ENTRIES,
1627 MAX_RSPQ_ENTRIES))
1628 return -EINVAL;
1629 if ((adapter->flags & FULL_INIT_DONE) &&
1630 (t.rspq_size >= 0 || t.fl_size[0] >= 0 ||
1631 t.fl_size[1] >= 0 || t.txq_size[0] >= 0 ||
1632 t.txq_size[1] >= 0 || t.txq_size[2] >= 0 ||
1633 t.polling >= 0 || t.cong_thres >= 0))
1634 return -EBUSY;
1635
1636 q = &adapter->params.sge.qset[t.qset_idx];
1637
1638 if (t.rspq_size >= 0)
1639 q->rspq_size = t.rspq_size;
1640 if (t.fl_size[0] >= 0)
1641 q->fl_size = t.fl_size[0];
1642 if (t.fl_size[1] >= 0)
1643 q->jumbo_size = t.fl_size[1];
1644 if (t.txq_size[0] >= 0)
1645 q->txq_size[0] = t.txq_size[0];
1646 if (t.txq_size[1] >= 0)
1647 q->txq_size[1] = t.txq_size[1];
1648 if (t.txq_size[2] >= 0)
1649 q->txq_size[2] = t.txq_size[2];
1650 if (t.cong_thres >= 0)
1651 q->cong_thres = t.cong_thres;
1652 if (t.intr_lat >= 0) {
1653 struct sge_qset *qs =
1654 &adapter->sge.qs[t.qset_idx];
1655
1656 q->coalesce_usecs = t.intr_lat;
1657 t3_update_qset_coalesce(qs, q);
1658 }
1659 if (t.polling >= 0) {
1660 if (adapter->flags & USING_MSIX)
1661 q->polling = t.polling;
1662 else {
1663 /* No polling with INTx for T3A */
1664 if (adapter->params.rev == 0 &&
1665 !(adapter->flags & USING_MSI))
1666 t.polling = 0;
1667
1668 for (i = 0; i < SGE_QSETS; i++) {
1669 q = &adapter->params.sge.
1670 qset[i];
1671 q->polling = t.polling;
1672 }
1673 }
1674 }
1675 break;
1676 }
1677 case CHELSIO_GET_QSET_PARAMS:{
1678 struct qset_params *q;
1679 struct ch_qset_params t;
1680
1681 if (copy_from_user(&t, useraddr, sizeof(t)))
1682 return -EFAULT;
1683 if (t.qset_idx >= SGE_QSETS)
1684 return -EINVAL;
1685
1686 q = &adapter->params.sge.qset[t.qset_idx];
1687 t.rspq_size = q->rspq_size;
1688 t.txq_size[0] = q->txq_size[0];
1689 t.txq_size[1] = q->txq_size[1];
1690 t.txq_size[2] = q->txq_size[2];
1691 t.fl_size[0] = q->fl_size;
1692 t.fl_size[1] = q->jumbo_size;
1693 t.polling = q->polling;
1694 t.intr_lat = q->coalesce_usecs;
1695 t.cong_thres = q->cong_thres;
1696
1697 if (copy_to_user(useraddr, &t, sizeof(t)))
1698 return -EFAULT;
1699 break;
1700 }
1701 case CHELSIO_SET_QSET_NUM:{
1702 struct ch_reg edata;
1703 struct port_info *pi = netdev_priv(dev);
1704 unsigned int i, first_qset = 0, other_qsets = 0;
1705
1706 if (!capable(CAP_NET_ADMIN))
1707 return -EPERM;
1708 if (adapter->flags & FULL_INIT_DONE)
1709 return -EBUSY;
1710 if (copy_from_user(&edata, useraddr, sizeof(edata)))
1711 return -EFAULT;
1712 if (edata.val < 1 ||
1713 (edata.val > 1 && !(adapter->flags & USING_MSIX)))
1714 return -EINVAL;
1715
1716 for_each_port(adapter, i)
1717 if (adapter->port[i] && adapter->port[i] != dev)
1718 other_qsets += adap2pinfo(adapter, i)->nqsets;
1719
1720 if (edata.val + other_qsets > SGE_QSETS)
1721 return -EINVAL;
1722
1723 pi->nqsets = edata.val;
1724
1725 for_each_port(adapter, i)
1726 if (adapter->port[i]) {
1727 pi = adap2pinfo(adapter, i);
1728 pi->first_qset = first_qset;
1729 first_qset += pi->nqsets;
1730 }
1731 break;
1732 }
1733 case CHELSIO_GET_QSET_NUM:{
1734 struct ch_reg edata;
1735 struct port_info *pi = netdev_priv(dev);
1736
1737 edata.cmd = CHELSIO_GET_QSET_NUM;
1738 edata.val = pi->nqsets;
1739 if (copy_to_user(useraddr, &edata, sizeof(edata)))
1740 return -EFAULT;
1741 break;
1742 }
1743 case CHELSIO_LOAD_FW:{
1744 u8 *fw_data;
1745 struct ch_mem_range t;
1746
1747 if (!capable(CAP_NET_ADMIN))
1748 return -EPERM;
1749 if (copy_from_user(&t, useraddr, sizeof(t)))
1750 return -EFAULT;
1751
1752 fw_data = kmalloc(t.len, GFP_KERNEL);
1753 if (!fw_data)
1754 return -ENOMEM;
1755
1756 if (copy_from_user
1757 (fw_data, useraddr + sizeof(t), t.len)) {
1758 kfree(fw_data);
1759 return -EFAULT;
1760 }
1761
1762 ret = t3_load_fw(adapter, fw_data, t.len);
1763 kfree(fw_data);
1764 if (ret)
1765 return ret;
1766 break;
1767 }
1768 case CHELSIO_SETMTUTAB:{
1769 struct ch_mtus m;
1770 int i;
1771
1772 if (!is_offload(adapter))
1773 return -EOPNOTSUPP;
1774 if (!capable(CAP_NET_ADMIN))
1775 return -EPERM;
1776 if (offload_running(adapter))
1777 return -EBUSY;
1778 if (copy_from_user(&m, useraddr, sizeof(m)))
1779 return -EFAULT;
1780 if (m.nmtus != NMTUS)
1781 return -EINVAL;
1782 if (m.mtus[0] < 81) /* accommodate SACK */
1783 return -EINVAL;
1784
1785 /* MTUs must be in ascending order */
1786 for (i = 1; i < NMTUS; ++i)
1787 if (m.mtus[i] < m.mtus[i - 1])
1788 return -EINVAL;
1789
1790 memcpy(adapter->params.mtus, m.mtus,
1791 sizeof(adapter->params.mtus));
1792 break;
1793 }
1794 case CHELSIO_GET_PM:{
1795 struct tp_params *p = &adapter->params.tp;
1796 struct ch_pm m = {.cmd = CHELSIO_GET_PM };
1797
1798 if (!is_offload(adapter))
1799 return -EOPNOTSUPP;
1800 m.tx_pg_sz = p->tx_pg_size;
1801 m.tx_num_pg = p->tx_num_pgs;
1802 m.rx_pg_sz = p->rx_pg_size;
1803 m.rx_num_pg = p->rx_num_pgs;
1804 m.pm_total = p->pmtx_size + p->chan_rx_size * p->nchan;
1805 if (copy_to_user(useraddr, &m, sizeof(m)))
1806 return -EFAULT;
1807 break;
1808 }
1809 case CHELSIO_SET_PM:{
1810 struct ch_pm m;
1811 struct tp_params *p = &adapter->params.tp;
1812
1813 if (!is_offload(adapter))
1814 return -EOPNOTSUPP;
1815 if (!capable(CAP_NET_ADMIN))
1816 return -EPERM;
1817 if (adapter->flags & FULL_INIT_DONE)
1818 return -EBUSY;
1819 if (copy_from_user(&m, useraddr, sizeof(m)))
1820 return -EFAULT;
1821 if (!m.rx_pg_sz || (m.rx_pg_sz & (m.rx_pg_sz - 1)) ||
1822 !m.tx_pg_sz || (m.tx_pg_sz & (m.tx_pg_sz - 1)))
1823 return -EINVAL; /* not power of 2 */
1824 if (!(m.rx_pg_sz & 0x14000))
1825 return -EINVAL; /* not 16KB or 64KB */
1826 if (!(m.tx_pg_sz & 0x1554000))
1827 return -EINVAL;
1828 if (m.tx_num_pg == -1)
1829 m.tx_num_pg = p->tx_num_pgs;
1830 if (m.rx_num_pg == -1)
1831 m.rx_num_pg = p->rx_num_pgs;
1832 if (m.tx_num_pg % 24 || m.rx_num_pg % 24)
1833 return -EINVAL;
1834 if (m.rx_num_pg * m.rx_pg_sz > p->chan_rx_size ||
1835 m.tx_num_pg * m.tx_pg_sz > p->chan_tx_size)
1836 return -EINVAL;
1837 p->rx_pg_size = m.rx_pg_sz;
1838 p->tx_pg_size = m.tx_pg_sz;
1839 p->rx_num_pgs = m.rx_num_pg;
1840 p->tx_num_pgs = m.tx_num_pg;
1841 break;
1842 }
1843 case CHELSIO_GET_MEM:{
1844 struct ch_mem_range t;
1845 struct mc7 *mem;
1846 u64 buf[32];
1847
1848 if (!is_offload(adapter))
1849 return -EOPNOTSUPP;
1850 if (!(adapter->flags & FULL_INIT_DONE))
1851 return -EIO; /* need the memory controllers */
1852 if (copy_from_user(&t, useraddr, sizeof(t)))
1853 return -EFAULT;
1854 if ((t.addr & 7) || (t.len & 7))
1855 return -EINVAL;
1856 if (t.mem_id == MEM_CM)
1857 mem = &adapter->cm;
1858 else if (t.mem_id == MEM_PMRX)
1859 mem = &adapter->pmrx;
1860 else if (t.mem_id == MEM_PMTX)
1861 mem = &adapter->pmtx;
1862 else
1863 return -EINVAL;
1864
1865 /*
1825494a
DLR
1866 * Version scheme:
1867 * bits 0..9: chip version
1868 * bits 10..15: chip revision
1869 */
4d22de3e
DLR
1870 t.version = 3 | (adapter->params.rev << 10);
1871 if (copy_to_user(useraddr, &t, sizeof(t)))
1872 return -EFAULT;
1873
1874 /*
1875 * Read 256 bytes at a time as len can be large and we don't
1876 * want to use huge intermediate buffers.
1877 */
1878 useraddr += sizeof(t); /* advance to start of buffer */
1879 while (t.len) {
1880 unsigned int chunk =
1881 min_t(unsigned int, t.len, sizeof(buf));
1882
1883 ret =
1884 t3_mc7_bd_read(mem, t.addr / 8, chunk / 8,
1885 buf);
1886 if (ret)
1887 return ret;
1888 if (copy_to_user(useraddr, buf, chunk))
1889 return -EFAULT;
1890 useraddr += chunk;
1891 t.addr += chunk;
1892 t.len -= chunk;
1893 }
1894 break;
1895 }
1896 case CHELSIO_SET_TRACE_FILTER:{
1897 struct ch_trace t;
1898 const struct trace_params *tp;
1899
1900 if (!capable(CAP_NET_ADMIN))
1901 return -EPERM;
1902 if (!offload_running(adapter))
1903 return -EAGAIN;
1904 if (copy_from_user(&t, useraddr, sizeof(t)))
1905 return -EFAULT;
1906
1907 tp = (const struct trace_params *)&t.sip;
1908 if (t.config_tx)
1909 t3_config_trace_filter(adapter, tp, 0,
1910 t.invert_match,
1911 t.trace_tx);
1912 if (t.config_rx)
1913 t3_config_trace_filter(adapter, tp, 1,
1914 t.invert_match,
1915 t.trace_rx);
1916 break;
1917 }
4d22de3e
DLR
1918 default:
1919 return -EOPNOTSUPP;
1920 }
1921 return 0;
1922}
1923
1924static int cxgb_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
1925{
1926 int ret, mmd;
1927 struct adapter *adapter = dev->priv;
1928 struct port_info *pi = netdev_priv(dev);
1929 struct mii_ioctl_data *data = if_mii(req);
1930
1931 switch (cmd) {
1932 case SIOCGMIIPHY:
1933 data->phy_id = pi->phy.addr;
1934 /* FALLTHRU */
1935 case SIOCGMIIREG:{
1936 u32 val;
1937 struct cphy *phy = &pi->phy;
1938
1939 if (!phy->mdio_read)
1940 return -EOPNOTSUPP;
1941 if (is_10G(adapter)) {
1942 mmd = data->phy_id >> 8;
1943 if (!mmd)
1944 mmd = MDIO_DEV_PCS;
1945 else if (mmd > MDIO_DEV_XGXS)
1946 return -EINVAL;
1947
1948 ret =
1949 phy->mdio_read(adapter, data->phy_id & 0x1f,
1950 mmd, data->reg_num, &val);
1951 } else
1952 ret =
1953 phy->mdio_read(adapter, data->phy_id & 0x1f,
1954 0, data->reg_num & 0x1f,
1955 &val);
1956 if (!ret)
1957 data->val_out = val;
1958 break;
1959 }
1960 case SIOCSMIIREG:{
1961 struct cphy *phy = &pi->phy;
1962
1963 if (!capable(CAP_NET_ADMIN))
1964 return -EPERM;
1965 if (!phy->mdio_write)
1966 return -EOPNOTSUPP;
1967 if (is_10G(adapter)) {
1968 mmd = data->phy_id >> 8;
1969 if (!mmd)
1970 mmd = MDIO_DEV_PCS;
1971 else if (mmd > MDIO_DEV_XGXS)
1972 return -EINVAL;
1973
1974 ret =
1975 phy->mdio_write(adapter,
1976 data->phy_id & 0x1f, mmd,
1977 data->reg_num,
1978 data->val_in);
1979 } else
1980 ret =
1981 phy->mdio_write(adapter,
1982 data->phy_id & 0x1f, 0,
1983 data->reg_num & 0x1f,
1984 data->val_in);
1985 break;
1986 }
1987 case SIOCCHIOCTL:
1988 return cxgb_extension_ioctl(dev, req->ifr_data);
1989 default:
1990 return -EOPNOTSUPP;
1991 }
1992 return ret;
1993}
1994
1995static int cxgb_change_mtu(struct net_device *dev, int new_mtu)
1996{
1997 int ret;
1998 struct adapter *adapter = dev->priv;
1999 struct port_info *pi = netdev_priv(dev);
2000
2001 if (new_mtu < 81) /* accommodate SACK */
2002 return -EINVAL;
2003 if ((ret = t3_mac_set_mtu(&pi->mac, new_mtu)))
2004 return ret;
2005 dev->mtu = new_mtu;
2006 init_port_mtus(adapter);
2007 if (adapter->params.rev == 0 && offload_running(adapter))
2008 t3_load_mtus(adapter, adapter->params.mtus,
2009 adapter->params.a_wnd, adapter->params.b_wnd,
2010 adapter->port[0]->mtu);
2011 return 0;
2012}
2013
2014static int cxgb_set_mac_addr(struct net_device *dev, void *p)
2015{
2016 struct adapter *adapter = dev->priv;
2017 struct port_info *pi = netdev_priv(dev);
2018 struct sockaddr *addr = p;
2019
2020 if (!is_valid_ether_addr(addr->sa_data))
2021 return -EINVAL;
2022
2023 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
2024 t3_mac_set_address(&pi->mac, 0, dev->dev_addr);
2025 if (offload_running(adapter))
2026 write_smt_entry(adapter, pi->port_id);
2027 return 0;
2028}
2029
2030/**
2031 * t3_synchronize_rx - wait for current Rx processing on a port to complete
2032 * @adap: the adapter
2033 * @p: the port
2034 *
2035 * Ensures that current Rx processing on any of the queues associated with
2036 * the given port completes before returning. We do this by acquiring and
2037 * releasing the locks of the response queues associated with the port.
2038 */
2039static void t3_synchronize_rx(struct adapter *adap, const struct port_info *p)
2040{
2041 int i;
2042
2043 for (i = 0; i < p->nqsets; i++) {
2044 struct sge_rspq *q = &adap->sge.qs[i + p->first_qset].rspq;
2045
2046 spin_lock_irq(&q->lock);
2047 spin_unlock_irq(&q->lock);
2048 }
2049}
2050
2051static void vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
2052{
2053 struct adapter *adapter = dev->priv;
2054 struct port_info *pi = netdev_priv(dev);
2055
2056 pi->vlan_grp = grp;
2057 if (adapter->params.rev > 0)
2058 t3_set_vlan_accel(adapter, 1 << pi->port_id, grp != NULL);
2059 else {
2060 /* single control for all ports */
2061 unsigned int i, have_vlans = 0;
2062 for_each_port(adapter, i)
2063 have_vlans |= adap2pinfo(adapter, i)->vlan_grp != NULL;
2064
2065 t3_set_vlan_accel(adapter, 1, have_vlans);
2066 }
2067 t3_synchronize_rx(adapter, pi);
2068}
2069
2070static void vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
2071{
2072 /* nothing */
2073}
2074
2075#ifdef CONFIG_NET_POLL_CONTROLLER
2076static void cxgb_netpoll(struct net_device *dev)
2077{
2078 struct adapter *adapter = dev->priv;
2079 struct sge_qset *qs = dev2qset(dev);
2080
2081 t3_intr_handler(adapter, qs->rspq.polling) (adapter->pdev->irq,
2082 adapter);
2083}
2084#endif
2085
2086/*
2087 * Periodic accumulation of MAC statistics.
2088 */
2089static void mac_stats_update(struct adapter *adapter)
2090{
2091 int i;
2092
2093 for_each_port(adapter, i) {
2094 struct net_device *dev = adapter->port[i];
2095 struct port_info *p = netdev_priv(dev);
2096
2097 if (netif_running(dev)) {
2098 spin_lock(&adapter->stats_lock);
2099 t3_mac_update_stats(&p->mac);
2100 spin_unlock(&adapter->stats_lock);
2101 }
2102 }
2103}
2104
2105static void check_link_status(struct adapter *adapter)
2106{
2107 int i;
2108
2109 for_each_port(adapter, i) {
2110 struct net_device *dev = adapter->port[i];
2111 struct port_info *p = netdev_priv(dev);
2112
2113 if (!(p->port_type->caps & SUPPORTED_IRQ) && netif_running(dev))
2114 t3_link_changed(adapter, i);
2115 }
2116}
2117
fc90664e
DLR
2118static void check_t3b2_mac(struct adapter *adapter)
2119{
2120 int i;
2121
2122 rtnl_lock(); /* synchronize with ifdown */
2123 for_each_port(adapter, i) {
2124 struct net_device *dev = adapter->port[i];
2125 struct port_info *p = netdev_priv(dev);
2126 int status;
2127
2128 if (!netif_running(dev))
2129 continue;
2130
2131 status = 0;
6d6dabac 2132 if (netif_running(dev) && netif_carrier_ok(dev))
fc90664e
DLR
2133 status = t3b2_mac_watchdog_task(&p->mac);
2134 if (status == 1)
2135 p->mac.stats.num_toggled++;
2136 else if (status == 2) {
2137 struct cmac *mac = &p->mac;
2138
2139 t3_mac_set_mtu(mac, dev->mtu);
2140 t3_mac_set_address(mac, 0, dev->dev_addr);
2141 cxgb_set_rxmode(dev);
2142 t3_link_start(&p->phy, mac, &p->link_config);
2143 t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
2144 t3_port_intr_enable(adapter, p->port_id);
2145 p->mac.stats.num_resets++;
2146 }
2147 }
2148 rtnl_unlock();
2149}
2150
2151
4d22de3e
DLR
2152static void t3_adap_check_task(struct work_struct *work)
2153{
2154 struct adapter *adapter = container_of(work, struct adapter,
2155 adap_check_task.work);
2156 const struct adapter_params *p = &adapter->params;
2157
2158 adapter->check_task_cnt++;
2159
2160 /* Check link status for PHYs without interrupts */
2161 if (p->linkpoll_period)
2162 check_link_status(adapter);
2163
2164 /* Accumulate MAC stats if needed */
2165 if (!p->linkpoll_period ||
2166 (adapter->check_task_cnt * p->linkpoll_period) / 10 >=
2167 p->stats_update_period) {
2168 mac_stats_update(adapter);
2169 adapter->check_task_cnt = 0;
2170 }
2171
fc90664e
DLR
2172 if (p->rev == T3_REV_B2)
2173 check_t3b2_mac(adapter);
2174
4d22de3e
DLR
2175 /* Schedule the next check update if any port is active. */
2176 spin_lock(&adapter->work_lock);
2177 if (adapter->open_device_map & PORT_MASK)
2178 schedule_chk_task(adapter);
2179 spin_unlock(&adapter->work_lock);
2180}
2181
2182/*
2183 * Processes external (PHY) interrupts in process context.
2184 */
2185static void ext_intr_task(struct work_struct *work)
2186{
2187 struct adapter *adapter = container_of(work, struct adapter,
2188 ext_intr_handler_task);
2189
2190 t3_phy_intr_handler(adapter);
2191
2192 /* Now reenable external interrupts */
2193 spin_lock_irq(&adapter->work_lock);
2194 if (adapter->slow_intr_mask) {
2195 adapter->slow_intr_mask |= F_T3DBG;
2196 t3_write_reg(adapter, A_PL_INT_CAUSE0, F_T3DBG);
2197 t3_write_reg(adapter, A_PL_INT_ENABLE0,
2198 adapter->slow_intr_mask);
2199 }
2200 spin_unlock_irq(&adapter->work_lock);
2201}
2202
2203/*
2204 * Interrupt-context handler for external (PHY) interrupts.
2205 */
2206void t3_os_ext_intr_handler(struct adapter *adapter)
2207{
2208 /*
2209 * Schedule a task to handle external interrupts as they may be slow
2210 * and we use a mutex to protect MDIO registers. We disable PHY
2211 * interrupts in the meantime and let the task reenable them when
2212 * it's done.
2213 */
2214 spin_lock(&adapter->work_lock);
2215 if (adapter->slow_intr_mask) {
2216 adapter->slow_intr_mask &= ~F_T3DBG;
2217 t3_write_reg(adapter, A_PL_INT_ENABLE0,
2218 adapter->slow_intr_mask);
2219 queue_work(cxgb3_wq, &adapter->ext_intr_handler_task);
2220 }
2221 spin_unlock(&adapter->work_lock);
2222}
2223
2224void t3_fatal_err(struct adapter *adapter)
2225{
2226 unsigned int fw_status[4];
2227
2228 if (adapter->flags & FULL_INIT_DONE) {
2229 t3_sge_stop(adapter);
2230 t3_intr_disable(adapter);
2231 }
2232 CH_ALERT(adapter, "encountered fatal error, operation suspended\n");
2233 if (!t3_cim_ctl_blk_read(adapter, 0xa0, 4, fw_status))
2234 CH_ALERT(adapter, "FW status: 0x%x, 0x%x, 0x%x, 0x%x\n",
2235 fw_status[0], fw_status[1],
2236 fw_status[2], fw_status[3]);
2237
2238}
2239
2240static int __devinit cxgb_enable_msix(struct adapter *adap)
2241{
2242 struct msix_entry entries[SGE_QSETS + 1];
2243 int i, err;
2244
2245 for (i = 0; i < ARRAY_SIZE(entries); ++i)
2246 entries[i].entry = i;
2247
2248 err = pci_enable_msix(adap->pdev, entries, ARRAY_SIZE(entries));
2249 if (!err) {
2250 for (i = 0; i < ARRAY_SIZE(entries); ++i)
2251 adap->msix_info[i].vec = entries[i].vector;
2252 } else if (err > 0)
2253 dev_info(&adap->pdev->dev,
2254 "only %d MSI-X vectors left, not using MSI-X\n", err);
2255 return err;
2256}
2257
2258static void __devinit print_port_info(struct adapter *adap,
2259 const struct adapter_info *ai)
2260{
2261 static const char *pci_variant[] = {
2262 "PCI", "PCI-X", "PCI-X ECC", "PCI-X 266", "PCI Express"
2263 };
2264
2265 int i;
2266 char buf[80];
2267
2268 if (is_pcie(adap))
2269 snprintf(buf, sizeof(buf), "%s x%d",
2270 pci_variant[adap->params.pci.variant],
2271 adap->params.pci.width);
2272 else
2273 snprintf(buf, sizeof(buf), "%s %dMHz/%d-bit",
2274 pci_variant[adap->params.pci.variant],
2275 adap->params.pci.speed, adap->params.pci.width);
2276
2277 for_each_port(adap, i) {
2278 struct net_device *dev = adap->port[i];
2279 const struct port_info *pi = netdev_priv(dev);
2280
2281 if (!test_bit(i, &adap->registered_device_map))
2282 continue;
8ac3ba68 2283 printk(KERN_INFO "%s: %s %s %sNIC (rev %d) %s%s\n",
4d22de3e 2284 dev->name, ai->desc, pi->port_type->desc,
8ac3ba68 2285 is_offload(adap) ? "R" : "", adap->params.rev, buf,
4d22de3e
DLR
2286 (adap->flags & USING_MSIX) ? " MSI-X" :
2287 (adap->flags & USING_MSI) ? " MSI" : "");
2288 if (adap->name == dev->name && adap->params.vpd.mclk)
2289 printk(KERN_INFO "%s: %uMB CM, %uMB PMTX, %uMB PMRX\n",
2290 adap->name, t3_mc7_size(&adap->cm) >> 20,
2291 t3_mc7_size(&adap->pmtx) >> 20,
2292 t3_mc7_size(&adap->pmrx) >> 20);
2293 }
2294}
2295
2296static int __devinit init_one(struct pci_dev *pdev,
2297 const struct pci_device_id *ent)
2298{
2299 static int version_printed;
2300
2301 int i, err, pci_using_dac = 0;
2302 unsigned long mmio_start, mmio_len;
2303 const struct adapter_info *ai;
2304 struct adapter *adapter = NULL;
2305 struct port_info *pi;
2306
2307 if (!version_printed) {
2308 printk(KERN_INFO "%s - version %s\n", DRV_DESC, DRV_VERSION);
2309 ++version_printed;
2310 }
2311
2312 if (!cxgb3_wq) {
2313 cxgb3_wq = create_singlethread_workqueue(DRV_NAME);
2314 if (!cxgb3_wq) {
2315 printk(KERN_ERR DRV_NAME
2316 ": cannot initialize work queue\n");
2317 return -ENOMEM;
2318 }
2319 }
2320
2321 err = pci_request_regions(pdev, DRV_NAME);
2322 if (err) {
2323 /* Just info, some other driver may have claimed the device. */
2324 dev_info(&pdev->dev, "cannot obtain PCI resources\n");
2325 return err;
2326 }
2327
2328 err = pci_enable_device(pdev);
2329 if (err) {
2330 dev_err(&pdev->dev, "cannot enable PCI device\n");
2331 goto out_release_regions;
2332 }
2333
2334 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
2335 pci_using_dac = 1;
2336 err = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
2337 if (err) {
2338 dev_err(&pdev->dev, "unable to obtain 64-bit DMA for "
2339 "coherent allocations\n");
2340 goto out_disable_device;
2341 }
2342 } else if ((err = pci_set_dma_mask(pdev, DMA_32BIT_MASK)) != 0) {
2343 dev_err(&pdev->dev, "no usable DMA configuration\n");
2344 goto out_disable_device;
2345 }
2346
2347 pci_set_master(pdev);
2348
2349 mmio_start = pci_resource_start(pdev, 0);
2350 mmio_len = pci_resource_len(pdev, 0);
2351 ai = t3_get_adapter_info(ent->driver_data);
2352
2353 adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
2354 if (!adapter) {
2355 err = -ENOMEM;
2356 goto out_disable_device;
2357 }
2358
2359 adapter->regs = ioremap_nocache(mmio_start, mmio_len);
2360 if (!adapter->regs) {
2361 dev_err(&pdev->dev, "cannot map device registers\n");
2362 err = -ENOMEM;
2363 goto out_free_adapter;
2364 }
2365
2366 adapter->pdev = pdev;
2367 adapter->name = pci_name(pdev);
2368 adapter->msg_enable = dflt_msg_enable;
2369 adapter->mmio_len = mmio_len;
2370
2371 mutex_init(&adapter->mdio_lock);
2372 spin_lock_init(&adapter->work_lock);
2373 spin_lock_init(&adapter->stats_lock);
2374
2375 INIT_LIST_HEAD(&adapter->adapter_list);
2376 INIT_WORK(&adapter->ext_intr_handler_task, ext_intr_task);
2377 INIT_DELAYED_WORK(&adapter->adap_check_task, t3_adap_check_task);
2378
2379 for (i = 0; i < ai->nports; ++i) {
2380 struct net_device *netdev;
2381
2382 netdev = alloc_etherdev(sizeof(struct port_info));
2383 if (!netdev) {
2384 err = -ENOMEM;
2385 goto out_free_dev;
2386 }
2387
2388 SET_MODULE_OWNER(netdev);
2389 SET_NETDEV_DEV(netdev, &pdev->dev);
2390
2391 adapter->port[i] = netdev;
2392 pi = netdev_priv(netdev);
2393 pi->rx_csum_offload = 1;
2394 pi->nqsets = 1;
2395 pi->first_qset = i;
2396 pi->activity = 0;
2397 pi->port_id = i;
2398 netif_carrier_off(netdev);
2399 netdev->irq = pdev->irq;
2400 netdev->mem_start = mmio_start;
2401 netdev->mem_end = mmio_start + mmio_len - 1;
2402 netdev->priv = adapter;
2403 netdev->features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO;
2404 netdev->features |= NETIF_F_LLTX;
2405 if (pci_using_dac)
2406 netdev->features |= NETIF_F_HIGHDMA;
2407
2408 netdev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
2409 netdev->vlan_rx_register = vlan_rx_register;
2410 netdev->vlan_rx_kill_vid = vlan_rx_kill_vid;
2411
2412 netdev->open = cxgb_open;
2413 netdev->stop = cxgb_close;
2414 netdev->hard_start_xmit = t3_eth_xmit;
2415 netdev->get_stats = cxgb_get_stats;
2416 netdev->set_multicast_list = cxgb_set_rxmode;
2417 netdev->do_ioctl = cxgb_ioctl;
2418 netdev->change_mtu = cxgb_change_mtu;
2419 netdev->set_mac_address = cxgb_set_mac_addr;
2420#ifdef CONFIG_NET_POLL_CONTROLLER
2421 netdev->poll_controller = cxgb_netpoll;
2422#endif
2423 netdev->weight = 64;
2424
2425 SET_ETHTOOL_OPS(netdev, &cxgb_ethtool_ops);
2426 }
2427
2428 pci_set_drvdata(pdev, adapter->port[0]);
2429 if (t3_prep_adapter(adapter, ai, 1) < 0) {
2430 err = -ENODEV;
2431 goto out_free_dev;
2432 }
2433
2434 /*
2435 * The card is now ready to go. If any errors occur during device
2436 * registration we do not fail the whole card but rather proceed only
2437 * with the ports we manage to register successfully. However we must
2438 * register at least one net device.
2439 */
2440 for_each_port(adapter, i) {
2441 err = register_netdev(adapter->port[i]);
2442 if (err)
2443 dev_warn(&pdev->dev,
2444 "cannot register net device %s, skipping\n",
2445 adapter->port[i]->name);
2446 else {
2447 /*
2448 * Change the name we use for messages to the name of
2449 * the first successfully registered interface.
2450 */
2451 if (!adapter->registered_device_map)
2452 adapter->name = adapter->port[i]->name;
2453
2454 __set_bit(i, &adapter->registered_device_map);
2455 }
2456 }
2457 if (!adapter->registered_device_map) {
2458 dev_err(&pdev->dev, "could not register any net devices\n");
2459 goto out_free_dev;
2460 }
2461
2462 /* Driver's ready. Reflect it on LEDs */
2463 t3_led_ready(adapter);
2464
2465 if (is_offload(adapter)) {
2466 __set_bit(OFFLOAD_DEVMAP_BIT, &adapter->registered_device_map);
2467 cxgb3_adapter_ofld(adapter);
2468 }
2469
2470 /* See what interrupts we'll be using */
2471 if (msi > 1 && cxgb_enable_msix(adapter) == 0)
2472 adapter->flags |= USING_MSIX;
2473 else if (msi > 0 && pci_enable_msi(pdev) == 0)
2474 adapter->flags |= USING_MSI;
2475
0ee8d33c 2476 err = sysfs_create_group(&adapter->port[0]->dev.kobj,
4d22de3e
DLR
2477 &cxgb3_attr_group);
2478
2479 print_port_info(adapter, ai);
2480 return 0;
2481
2482out_free_dev:
2483 iounmap(adapter->regs);
2484 for (i = ai->nports - 1; i >= 0; --i)
2485 if (adapter->port[i])
2486 free_netdev(adapter->port[i]);
2487
2488out_free_adapter:
2489 kfree(adapter);
2490
2491out_disable_device:
2492 pci_disable_device(pdev);
2493out_release_regions:
2494 pci_release_regions(pdev);
2495 pci_set_drvdata(pdev, NULL);
2496 return err;
2497}
2498
2499static void __devexit remove_one(struct pci_dev *pdev)
2500{
2501 struct net_device *dev = pci_get_drvdata(pdev);
2502
2503 if (dev) {
2504 int i;
2505 struct adapter *adapter = dev->priv;
2506
2507 t3_sge_stop(adapter);
0ee8d33c 2508 sysfs_remove_group(&adapter->port[0]->dev.kobj,
4d22de3e
DLR
2509 &cxgb3_attr_group);
2510
2511 for_each_port(adapter, i)
2512 if (test_bit(i, &adapter->registered_device_map))
2513 unregister_netdev(adapter->port[i]);
2514
2515 if (is_offload(adapter)) {
2516 cxgb3_adapter_unofld(adapter);
2517 if (test_bit(OFFLOAD_DEVMAP_BIT,
2518 &adapter->open_device_map))
2519 offload_close(&adapter->tdev);
2520 }
2521
2522 t3_free_sge_resources(adapter);
2523 cxgb_disable_msi(adapter);
2524
2525 for (i = 0; i < ARRAY_SIZE(adapter->dummy_netdev); i++)
2526 if (adapter->dummy_netdev[i]) {
2527 free_netdev(adapter->dummy_netdev[i]);
2528 adapter->dummy_netdev[i] = NULL;
2529 }
2530
2531 for_each_port(adapter, i)
2532 if (adapter->port[i])
2533 free_netdev(adapter->port[i]);
2534
2535 iounmap(adapter->regs);
2536 kfree(adapter);
2537 pci_release_regions(pdev);
2538 pci_disable_device(pdev);
2539 pci_set_drvdata(pdev, NULL);
2540 }
2541}
2542
2543static struct pci_driver driver = {
2544 .name = DRV_NAME,
2545 .id_table = cxgb3_pci_tbl,
2546 .probe = init_one,
2547 .remove = __devexit_p(remove_one),
2548};
2549
2550static int __init cxgb3_init_module(void)
2551{
2552 int ret;
2553
2554 cxgb3_offload_init();
2555
2556 ret = pci_register_driver(&driver);
2557 return ret;
2558}
2559
2560static void __exit cxgb3_cleanup_module(void)
2561{
2562 pci_unregister_driver(&driver);
2563 if (cxgb3_wq)
2564 destroy_workqueue(cxgb3_wq);
2565}
2566
2567module_init(cxgb3_init_module);
2568module_exit(cxgb3_cleanup_module);