Merge master.kernel.org:/pub/scm/linux/kernel/git/davem/sparc-2.6
[linux-2.6-block.git] / drivers / net / cxgb3 / cxgb3_main.c
CommitLineData
4d22de3e 1/*
1d68e93d 2 * Copyright (c) 2003-2007 Chelsio, Inc. All rights reserved.
4d22de3e 3 *
1d68e93d
DLR
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
4d22de3e 9 *
1d68e93d
DLR
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
4d22de3e 31 */
4d22de3e
DLR
32#include <linux/module.h>
33#include <linux/moduleparam.h>
34#include <linux/init.h>
35#include <linux/pci.h>
36#include <linux/dma-mapping.h>
37#include <linux/netdevice.h>
38#include <linux/etherdevice.h>
39#include <linux/if_vlan.h>
40#include <linux/mii.h>
41#include <linux/sockios.h>
42#include <linux/workqueue.h>
43#include <linux/proc_fs.h>
44#include <linux/rtnetlink.h>
2e283962 45#include <linux/firmware.h>
4d22de3e
DLR
46#include <asm/uaccess.h>
47
48#include "common.h"
49#include "cxgb3_ioctl.h"
50#include "regs.h"
51#include "cxgb3_offload.h"
52#include "version.h"
53
54#include "cxgb3_ctl_defs.h"
55#include "t3_cpl.h"
56#include "firmware_exports.h"
57
58enum {
59 MAX_TXQ_ENTRIES = 16384,
60 MAX_CTRL_TXQ_ENTRIES = 1024,
61 MAX_RSPQ_ENTRIES = 16384,
62 MAX_RX_BUFFERS = 16384,
63 MAX_RX_JUMBO_BUFFERS = 16384,
64 MIN_TXQ_ENTRIES = 4,
65 MIN_CTRL_TXQ_ENTRIES = 4,
66 MIN_RSPQ_ENTRIES = 32,
67 MIN_FL_ENTRIES = 32
68};
69
70#define PORT_MASK ((1 << MAX_NPORTS) - 1)
71
72#define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
73 NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
74 NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
75
76#define EEPROM_MAGIC 0x38E2F10C
77
4d22de3e
DLR
78#define CH_DEVICE(devid, ssid, idx) \
79 { PCI_VENDOR_ID_CHELSIO, devid, PCI_ANY_ID, ssid, 0, 0, idx }
80
81static const struct pci_device_id cxgb3_pci_tbl[] = {
82 CH_DEVICE(0x20, 1, 0), /* PE9000 */
83 CH_DEVICE(0x21, 1, 1), /* T302E */
84 CH_DEVICE(0x22, 1, 2), /* T310E */
85 CH_DEVICE(0x23, 1, 3), /* T320X */
86 CH_DEVICE(0x24, 1, 1), /* T302X */
87 CH_DEVICE(0x25, 1, 3), /* T320E */
88 CH_DEVICE(0x26, 1, 2), /* T310X */
89 CH_DEVICE(0x30, 1, 2), /* T3B10 */
90 CH_DEVICE(0x31, 1, 3), /* T3B20 */
91 CH_DEVICE(0x32, 1, 1), /* T3B02 */
92 {0,}
93};
94
95MODULE_DESCRIPTION(DRV_DESC);
96MODULE_AUTHOR("Chelsio Communications");
1d68e93d 97MODULE_LICENSE("Dual BSD/GPL");
4d22de3e
DLR
98MODULE_VERSION(DRV_VERSION);
99MODULE_DEVICE_TABLE(pci, cxgb3_pci_tbl);
100
101static int dflt_msg_enable = DFLT_MSG_ENABLE;
102
103module_param(dflt_msg_enable, int, 0644);
104MODULE_PARM_DESC(dflt_msg_enable, "Chelsio T3 default message enable bitmap");
105
106/*
107 * The driver uses the best interrupt scheme available on a platform in the
108 * order MSI-X, MSI, legacy pin interrupts. This parameter determines which
109 * of these schemes the driver may consider as follows:
110 *
111 * msi = 2: choose from among all three options
112 * msi = 1: only consider MSI and pin interrupts
113 * msi = 0: force pin interrupts
114 */
115static int msi = 2;
116
117module_param(msi, int, 0644);
118MODULE_PARM_DESC(msi, "whether to use MSI or MSI-X");
119
120/*
121 * The driver enables offload as a default.
122 * To disable it, use ofld_disable = 1.
123 */
124
125static int ofld_disable = 0;
126
127module_param(ofld_disable, int, 0644);
128MODULE_PARM_DESC(ofld_disable, "whether to enable offload at init time or not");
129
130/*
131 * We have work elements that we need to cancel when an interface is taken
132 * down. Normally the work elements would be executed by keventd but that
133 * can deadlock because of linkwatch. If our close method takes the rtnl
134 * lock and linkwatch is ahead of our work elements in keventd, linkwatch
135 * will block keventd as it needs the rtnl lock, and we'll deadlock waiting
136 * for our work to complete. Get our own work queue to solve this.
137 */
138static struct workqueue_struct *cxgb3_wq;
139
140/**
141 * link_report - show link status and link speed/duplex
142 * @p: the port whose settings are to be reported
143 *
144 * Shows the link status, speed, and duplex of a port.
145 */
146static void link_report(struct net_device *dev)
147{
148 if (!netif_carrier_ok(dev))
149 printk(KERN_INFO "%s: link down\n", dev->name);
150 else {
151 const char *s = "10Mbps";
152 const struct port_info *p = netdev_priv(dev);
153
154 switch (p->link_config.speed) {
155 case SPEED_10000:
156 s = "10Gbps";
157 break;
158 case SPEED_1000:
159 s = "1000Mbps";
160 break;
161 case SPEED_100:
162 s = "100Mbps";
163 break;
164 }
165
166 printk(KERN_INFO "%s: link up, %s, %s-duplex\n", dev->name, s,
167 p->link_config.duplex == DUPLEX_FULL ? "full" : "half");
168 }
169}
170
171/**
172 * t3_os_link_changed - handle link status changes
173 * @adapter: the adapter associated with the link change
174 * @port_id: the port index whose limk status has changed
175 * @link_stat: the new status of the link
176 * @speed: the new speed setting
177 * @duplex: the new duplex setting
178 * @pause: the new flow-control setting
179 *
180 * This is the OS-dependent handler for link status changes. The OS
181 * neutral handler takes care of most of the processing for these events,
182 * then calls this handler for any OS-specific processing.
183 */
184void t3_os_link_changed(struct adapter *adapter, int port_id, int link_stat,
185 int speed, int duplex, int pause)
186{
187 struct net_device *dev = adapter->port[port_id];
188
189 /* Skip changes from disabled ports. */
190 if (!netif_running(dev))
191 return;
192
193 if (link_stat != netif_carrier_ok(dev)) {
194 if (link_stat)
195 netif_carrier_on(dev);
196 else
197 netif_carrier_off(dev);
198 link_report(dev);
199 }
200}
201
202static void cxgb_set_rxmode(struct net_device *dev)
203{
204 struct t3_rx_mode rm;
205 struct port_info *pi = netdev_priv(dev);
206
207 init_rx_mode(&rm, dev, dev->mc_list);
208 t3_mac_set_rx_mode(&pi->mac, &rm);
209}
210
211/**
212 * link_start - enable a port
213 * @dev: the device to enable
214 *
215 * Performs the MAC and PHY actions needed to enable a port.
216 */
217static void link_start(struct net_device *dev)
218{
219 struct t3_rx_mode rm;
220 struct port_info *pi = netdev_priv(dev);
221 struct cmac *mac = &pi->mac;
222
223 init_rx_mode(&rm, dev, dev->mc_list);
224 t3_mac_reset(mac);
225 t3_mac_set_mtu(mac, dev->mtu);
226 t3_mac_set_address(mac, 0, dev->dev_addr);
227 t3_mac_set_rx_mode(mac, &rm);
228 t3_link_start(&pi->phy, mac, &pi->link_config);
229 t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
230}
231
232static inline void cxgb_disable_msi(struct adapter *adapter)
233{
234 if (adapter->flags & USING_MSIX) {
235 pci_disable_msix(adapter->pdev);
236 adapter->flags &= ~USING_MSIX;
237 } else if (adapter->flags & USING_MSI) {
238 pci_disable_msi(adapter->pdev);
239 adapter->flags &= ~USING_MSI;
240 }
241}
242
243/*
244 * Interrupt handler for asynchronous events used with MSI-X.
245 */
246static irqreturn_t t3_async_intr_handler(int irq, void *cookie)
247{
248 t3_slow_intr_handler(cookie);
249 return IRQ_HANDLED;
250}
251
252/*
253 * Name the MSI-X interrupts.
254 */
255static void name_msix_vecs(struct adapter *adap)
256{
257 int i, j, msi_idx = 1, n = sizeof(adap->msix_info[0].desc) - 1;
258
259 snprintf(adap->msix_info[0].desc, n, "%s", adap->name);
260 adap->msix_info[0].desc[n] = 0;
261
262 for_each_port(adap, j) {
263 struct net_device *d = adap->port[j];
264 const struct port_info *pi = netdev_priv(d);
265
266 for (i = 0; i < pi->nqsets; i++, msi_idx++) {
267 snprintf(adap->msix_info[msi_idx].desc, n,
268 "%s (queue %d)", d->name, i);
269 adap->msix_info[msi_idx].desc[n] = 0;
270 }
271 }
272}
273
274static int request_msix_data_irqs(struct adapter *adap)
275{
276 int i, j, err, qidx = 0;
277
278 for_each_port(adap, i) {
279 int nqsets = adap2pinfo(adap, i)->nqsets;
280
281 for (j = 0; j < nqsets; ++j) {
282 err = request_irq(adap->msix_info[qidx + 1].vec,
283 t3_intr_handler(adap,
284 adap->sge.qs[qidx].
285 rspq.polling), 0,
286 adap->msix_info[qidx + 1].desc,
287 &adap->sge.qs[qidx]);
288 if (err) {
289 while (--qidx >= 0)
290 free_irq(adap->msix_info[qidx + 1].vec,
291 &adap->sge.qs[qidx]);
292 return err;
293 }
294 qidx++;
295 }
296 }
297 return 0;
298}
299
300/**
301 * setup_rss - configure RSS
302 * @adap: the adapter
303 *
304 * Sets up RSS to distribute packets to multiple receive queues. We
305 * configure the RSS CPU lookup table to distribute to the number of HW
306 * receive queues, and the response queue lookup table to narrow that
307 * down to the response queues actually configured for each port.
308 * We always configure the RSS mapping for two ports since the mapping
309 * table has plenty of entries.
310 */
311static void setup_rss(struct adapter *adap)
312{
313 int i;
314 unsigned int nq0 = adap2pinfo(adap, 0)->nqsets;
315 unsigned int nq1 = adap->port[1] ? adap2pinfo(adap, 1)->nqsets : 1;
316 u8 cpus[SGE_QSETS + 1];
317 u16 rspq_map[RSS_TABLE_SIZE];
318
319 for (i = 0; i < SGE_QSETS; ++i)
320 cpus[i] = i;
321 cpus[SGE_QSETS] = 0xff; /* terminator */
322
323 for (i = 0; i < RSS_TABLE_SIZE / 2; ++i) {
324 rspq_map[i] = i % nq0;
325 rspq_map[i + RSS_TABLE_SIZE / 2] = (i % nq1) + nq0;
326 }
327
328 t3_config_rss(adap, F_RQFEEDBACKENABLE | F_TNLLKPEN | F_TNLMAPEN |
329 F_TNLPRTEN | F_TNL2TUPEN | F_TNL4TUPEN |
330 V_RRCPLCPUSIZE(6), cpus, rspq_map);
331}
332
333/*
334 * If we have multiple receive queues per port serviced by NAPI we need one
335 * netdevice per queue as NAPI operates on netdevices. We already have one
336 * netdevice, namely the one associated with the interface, so we use dummy
337 * ones for any additional queues. Note that these netdevices exist purely
338 * so that NAPI has something to work with, they do not represent network
339 * ports and are not registered.
340 */
341static int init_dummy_netdevs(struct adapter *adap)
342{
343 int i, j, dummy_idx = 0;
344 struct net_device *nd;
345
346 for_each_port(adap, i) {
347 struct net_device *dev = adap->port[i];
348 const struct port_info *pi = netdev_priv(dev);
349
350 for (j = 0; j < pi->nqsets - 1; j++) {
351 if (!adap->dummy_netdev[dummy_idx]) {
352 nd = alloc_netdev(0, "", ether_setup);
353 if (!nd)
354 goto free_all;
355
356 nd->priv = adap;
357 nd->weight = 64;
358 set_bit(__LINK_STATE_START, &nd->state);
359 adap->dummy_netdev[dummy_idx] = nd;
360 }
361 strcpy(adap->dummy_netdev[dummy_idx]->name, dev->name);
362 dummy_idx++;
363 }
364 }
365 return 0;
366
367free_all:
368 while (--dummy_idx >= 0) {
369 free_netdev(adap->dummy_netdev[dummy_idx]);
370 adap->dummy_netdev[dummy_idx] = NULL;
371 }
372 return -ENOMEM;
373}
374
375/*
376 * Wait until all NAPI handlers are descheduled. This includes the handlers of
377 * both netdevices representing interfaces and the dummy ones for the extra
378 * queues.
379 */
380static void quiesce_rx(struct adapter *adap)
381{
382 int i;
383 struct net_device *dev;
384
385 for_each_port(adap, i) {
386 dev = adap->port[i];
387 while (test_bit(__LINK_STATE_RX_SCHED, &dev->state))
388 msleep(1);
389 }
390
391 for (i = 0; i < ARRAY_SIZE(adap->dummy_netdev); i++) {
392 dev = adap->dummy_netdev[i];
393 if (dev)
394 while (test_bit(__LINK_STATE_RX_SCHED, &dev->state))
395 msleep(1);
396 }
397}
398
399/**
400 * setup_sge_qsets - configure SGE Tx/Rx/response queues
401 * @adap: the adapter
402 *
403 * Determines how many sets of SGE queues to use and initializes them.
404 * We support multiple queue sets per port if we have MSI-X, otherwise
405 * just one queue set per port.
406 */
407static int setup_sge_qsets(struct adapter *adap)
408{
409 int i, j, err, irq_idx = 0, qset_idx = 0, dummy_dev_idx = 0;
410 unsigned int ntxq = is_offload(adap) ? SGE_TXQ_PER_SET : 1;
411
412 if (adap->params.rev > 0 && !(adap->flags & USING_MSI))
413 irq_idx = -1;
414
415 for_each_port(adap, i) {
416 struct net_device *dev = adap->port[i];
417 const struct port_info *pi = netdev_priv(dev);
418
419 for (j = 0; j < pi->nqsets; ++j, ++qset_idx) {
420 err = t3_sge_alloc_qset(adap, qset_idx, 1,
421 (adap->flags & USING_MSIX) ? qset_idx + 1 :
422 irq_idx,
423 &adap->params.sge.qset[qset_idx], ntxq,
424 j == 0 ? dev :
425 adap-> dummy_netdev[dummy_dev_idx++]);
426 if (err) {
427 t3_free_sge_resources(adap);
428 return err;
429 }
430 }
431 }
432
433 return 0;
434}
435
0ee8d33c
DLR
436static ssize_t attr_show(struct device *d, struct device_attribute *attr,
437 char *buf,
896392ef 438 ssize_t(*format) (struct net_device *, char *))
4d22de3e
DLR
439{
440 ssize_t len;
4d22de3e
DLR
441
442 /* Synchronize with ioctls that may shut down the device */
443 rtnl_lock();
896392ef 444 len = (*format) (to_net_dev(d), buf);
4d22de3e
DLR
445 rtnl_unlock();
446 return len;
447}
448
0ee8d33c
DLR
449static ssize_t attr_store(struct device *d, struct device_attribute *attr,
450 const char *buf, size_t len,
896392ef 451 ssize_t(*set) (struct net_device *, unsigned int),
4d22de3e
DLR
452 unsigned int min_val, unsigned int max_val)
453{
454 char *endp;
455 ssize_t ret;
456 unsigned int val;
4d22de3e
DLR
457
458 if (!capable(CAP_NET_ADMIN))
459 return -EPERM;
460
461 val = simple_strtoul(buf, &endp, 0);
462 if (endp == buf || val < min_val || val > max_val)
463 return -EINVAL;
464
465 rtnl_lock();
896392ef 466 ret = (*set) (to_net_dev(d), val);
4d22de3e
DLR
467 if (!ret)
468 ret = len;
469 rtnl_unlock();
470 return ret;
471}
472
473#define CXGB3_SHOW(name, val_expr) \
896392ef 474static ssize_t format_##name(struct net_device *dev, char *buf) \
4d22de3e 475{ \
896392ef 476 struct adapter *adap = dev->priv; \
4d22de3e
DLR
477 return sprintf(buf, "%u\n", val_expr); \
478} \
0ee8d33c
DLR
479static ssize_t show_##name(struct device *d, struct device_attribute *attr, \
480 char *buf) \
4d22de3e 481{ \
0ee8d33c 482 return attr_show(d, attr, buf, format_##name); \
4d22de3e
DLR
483}
484
896392ef 485static ssize_t set_nfilters(struct net_device *dev, unsigned int val)
4d22de3e 486{
896392ef
DLR
487 struct adapter *adap = dev->priv;
488
4d22de3e
DLR
489 if (adap->flags & FULL_INIT_DONE)
490 return -EBUSY;
491 if (val && adap->params.rev == 0)
492 return -EINVAL;
493 if (val > t3_mc5_size(&adap->mc5) - adap->params.mc5.nservers)
494 return -EINVAL;
495 adap->params.mc5.nfilters = val;
496 return 0;
497}
498
0ee8d33c
DLR
499static ssize_t store_nfilters(struct device *d, struct device_attribute *attr,
500 const char *buf, size_t len)
4d22de3e 501{
0ee8d33c 502 return attr_store(d, attr, buf, len, set_nfilters, 0, ~0);
4d22de3e
DLR
503}
504
896392ef 505static ssize_t set_nservers(struct net_device *dev, unsigned int val)
4d22de3e 506{
896392ef
DLR
507 struct adapter *adap = dev->priv;
508
4d22de3e
DLR
509 if (adap->flags & FULL_INIT_DONE)
510 return -EBUSY;
511 if (val > t3_mc5_size(&adap->mc5) - adap->params.mc5.nfilters)
512 return -EINVAL;
513 adap->params.mc5.nservers = val;
514 return 0;
515}
516
0ee8d33c
DLR
517static ssize_t store_nservers(struct device *d, struct device_attribute *attr,
518 const char *buf, size_t len)
4d22de3e 519{
0ee8d33c 520 return attr_store(d, attr, buf, len, set_nservers, 0, ~0);
4d22de3e
DLR
521}
522
523#define CXGB3_ATTR_R(name, val_expr) \
524CXGB3_SHOW(name, val_expr) \
0ee8d33c 525static DEVICE_ATTR(name, S_IRUGO, show_##name, NULL)
4d22de3e
DLR
526
527#define CXGB3_ATTR_RW(name, val_expr, store_method) \
528CXGB3_SHOW(name, val_expr) \
0ee8d33c 529static DEVICE_ATTR(name, S_IRUGO | S_IWUSR, show_##name, store_method)
4d22de3e
DLR
530
531CXGB3_ATTR_R(cam_size, t3_mc5_size(&adap->mc5));
532CXGB3_ATTR_RW(nfilters, adap->params.mc5.nfilters, store_nfilters);
533CXGB3_ATTR_RW(nservers, adap->params.mc5.nservers, store_nservers);
534
535static struct attribute *cxgb3_attrs[] = {
0ee8d33c
DLR
536 &dev_attr_cam_size.attr,
537 &dev_attr_nfilters.attr,
538 &dev_attr_nservers.attr,
4d22de3e
DLR
539 NULL
540};
541
542static struct attribute_group cxgb3_attr_group = {.attrs = cxgb3_attrs };
543
0ee8d33c
DLR
544static ssize_t tm_attr_show(struct device *d, struct device_attribute *attr,
545 char *buf, int sched)
4d22de3e
DLR
546{
547 ssize_t len;
548 unsigned int v, addr, bpt, cpt;
0ee8d33c 549 struct adapter *adap = to_net_dev(d)->priv;
4d22de3e
DLR
550
551 addr = A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2;
552 rtnl_lock();
553 t3_write_reg(adap, A_TP_TM_PIO_ADDR, addr);
554 v = t3_read_reg(adap, A_TP_TM_PIO_DATA);
555 if (sched & 1)
556 v >>= 16;
557 bpt = (v >> 8) & 0xff;
558 cpt = v & 0xff;
559 if (!cpt)
560 len = sprintf(buf, "disabled\n");
561 else {
562 v = (adap->params.vpd.cclk * 1000) / cpt;
563 len = sprintf(buf, "%u Kbps\n", (v * bpt) / 125);
564 }
565 rtnl_unlock();
566 return len;
567}
568
0ee8d33c
DLR
569static ssize_t tm_attr_store(struct device *d, struct device_attribute *attr,
570 const char *buf, size_t len, int sched)
4d22de3e
DLR
571{
572 char *endp;
573 ssize_t ret;
574 unsigned int val;
0ee8d33c 575 struct adapter *adap = to_net_dev(d)->priv;
4d22de3e
DLR
576
577 if (!capable(CAP_NET_ADMIN))
578 return -EPERM;
579
580 val = simple_strtoul(buf, &endp, 0);
581 if (endp == buf || val > 10000000)
582 return -EINVAL;
583
584 rtnl_lock();
585 ret = t3_config_sched(adap, val, sched);
586 if (!ret)
587 ret = len;
588 rtnl_unlock();
589 return ret;
590}
591
592#define TM_ATTR(name, sched) \
0ee8d33c
DLR
593static ssize_t show_##name(struct device *d, struct device_attribute *attr, \
594 char *buf) \
4d22de3e 595{ \
0ee8d33c 596 return tm_attr_show(d, attr, buf, sched); \
4d22de3e 597} \
0ee8d33c
DLR
598static ssize_t store_##name(struct device *d, struct device_attribute *attr, \
599 const char *buf, size_t len) \
4d22de3e 600{ \
0ee8d33c 601 return tm_attr_store(d, attr, buf, len, sched); \
4d22de3e 602} \
0ee8d33c 603static DEVICE_ATTR(name, S_IRUGO | S_IWUSR, show_##name, store_##name)
4d22de3e
DLR
604
605TM_ATTR(sched0, 0);
606TM_ATTR(sched1, 1);
607TM_ATTR(sched2, 2);
608TM_ATTR(sched3, 3);
609TM_ATTR(sched4, 4);
610TM_ATTR(sched5, 5);
611TM_ATTR(sched6, 6);
612TM_ATTR(sched7, 7);
613
614static struct attribute *offload_attrs[] = {
0ee8d33c
DLR
615 &dev_attr_sched0.attr,
616 &dev_attr_sched1.attr,
617 &dev_attr_sched2.attr,
618 &dev_attr_sched3.attr,
619 &dev_attr_sched4.attr,
620 &dev_attr_sched5.attr,
621 &dev_attr_sched6.attr,
622 &dev_attr_sched7.attr,
4d22de3e
DLR
623 NULL
624};
625
626static struct attribute_group offload_attr_group = {.attrs = offload_attrs };
627
628/*
629 * Sends an sk_buff to an offload queue driver
630 * after dealing with any active network taps.
631 */
632static inline int offload_tx(struct t3cdev *tdev, struct sk_buff *skb)
633{
634 int ret;
635
636 local_bh_disable();
637 ret = t3_offload_tx(tdev, skb);
638 local_bh_enable();
639 return ret;
640}
641
642static int write_smt_entry(struct adapter *adapter, int idx)
643{
644 struct cpl_smt_write_req *req;
645 struct sk_buff *skb = alloc_skb(sizeof(*req), GFP_KERNEL);
646
647 if (!skb)
648 return -ENOMEM;
649
650 req = (struct cpl_smt_write_req *)__skb_put(skb, sizeof(*req));
651 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
652 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SMT_WRITE_REQ, idx));
653 req->mtu_idx = NMTUS - 1; /* should be 0 but there's a T3 bug */
654 req->iff = idx;
655 memset(req->src_mac1, 0, sizeof(req->src_mac1));
656 memcpy(req->src_mac0, adapter->port[idx]->dev_addr, ETH_ALEN);
657 skb->priority = 1;
658 offload_tx(&adapter->tdev, skb);
659 return 0;
660}
661
662static int init_smt(struct adapter *adapter)
663{
664 int i;
665
666 for_each_port(adapter, i)
667 write_smt_entry(adapter, i);
668 return 0;
669}
670
671static void init_port_mtus(struct adapter *adapter)
672{
673 unsigned int mtus = adapter->port[0]->mtu;
674
675 if (adapter->port[1])
676 mtus |= adapter->port[1]->mtu << 16;
677 t3_write_reg(adapter, A_TP_MTU_PORT_TABLE, mtus);
678}
679
14ab9892
DLR
680static void send_pktsched_cmd(struct adapter *adap, int sched, int qidx, int lo,
681 int hi, int port)
682{
683 struct sk_buff *skb;
684 struct mngt_pktsched_wr *req;
685
686 skb = alloc_skb(sizeof(*req), GFP_KERNEL | __GFP_NOFAIL);
687 req = (struct mngt_pktsched_wr *)skb_put(skb, sizeof(*req));
688 req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_MNGT));
689 req->mngt_opcode = FW_MNGTOPCODE_PKTSCHED_SET;
690 req->sched = sched;
691 req->idx = qidx;
692 req->min = lo;
693 req->max = hi;
694 req->binding = port;
695 t3_mgmt_tx(adap, skb);
696}
697
698static void bind_qsets(struct adapter *adap)
699{
700 int i, j;
701
702 for_each_port(adap, i) {
703 const struct port_info *pi = adap2pinfo(adap, i);
704
705 for (j = 0; j < pi->nqsets; ++j)
706 send_pktsched_cmd(adap, 1, pi->first_qset + j, -1,
707 -1, i);
708 }
709}
710
2e283962
DLR
711#define FW_FNAME "t3fw-%d.%d.bin"
712
713static int upgrade_fw(struct adapter *adap)
714{
715 int ret;
716 char buf[64];
717 const struct firmware *fw;
718 struct device *dev = &adap->pdev->dev;
719
720 snprintf(buf, sizeof(buf), FW_FNAME, FW_VERSION_MAJOR,
721 FW_VERSION_MINOR);
722 ret = request_firmware(&fw, buf, dev);
723 if (ret < 0) {
724 dev_err(dev, "could not upgrade firmware: unable to load %s\n",
725 buf);
726 return ret;
727 }
728 ret = t3_load_fw(adap, fw->data, fw->size);
729 release_firmware(fw);
730 return ret;
731}
732
4d22de3e
DLR
733/**
734 * cxgb_up - enable the adapter
735 * @adapter: adapter being enabled
736 *
737 * Called when the first port is enabled, this function performs the
738 * actions necessary to make an adapter operational, such as completing
739 * the initialization of HW modules, and enabling interrupts.
740 *
741 * Must be called with the rtnl lock held.
742 */
743static int cxgb_up(struct adapter *adap)
744{
745 int err = 0;
746
747 if (!(adap->flags & FULL_INIT_DONE)) {
748 err = t3_check_fw_version(adap);
2e283962
DLR
749 if (err == -EINVAL)
750 err = upgrade_fw(adap);
4aac3899 751 if (err)
4d22de3e 752 goto out;
4d22de3e
DLR
753
754 err = init_dummy_netdevs(adap);
755 if (err)
756 goto out;
757
758 err = t3_init_hw(adap, 0);
759 if (err)
760 goto out;
761
762 err = setup_sge_qsets(adap);
763 if (err)
764 goto out;
765
766 setup_rss(adap);
767 adap->flags |= FULL_INIT_DONE;
768 }
769
770 t3_intr_clear(adap);
771
772 if (adap->flags & USING_MSIX) {
773 name_msix_vecs(adap);
774 err = request_irq(adap->msix_info[0].vec,
775 t3_async_intr_handler, 0,
776 adap->msix_info[0].desc, adap);
777 if (err)
778 goto irq_err;
779
780 if (request_msix_data_irqs(adap)) {
781 free_irq(adap->msix_info[0].vec, adap);
782 goto irq_err;
783 }
784 } else if ((err = request_irq(adap->pdev->irq,
785 t3_intr_handler(adap,
786 adap->sge.qs[0].rspq.
787 polling),
2db6346f
TG
788 (adap->flags & USING_MSI) ?
789 0 : IRQF_SHARED,
4d22de3e
DLR
790 adap->name, adap)))
791 goto irq_err;
792
793 t3_sge_start(adap);
794 t3_intr_enable(adap);
14ab9892
DLR
795
796 if ((adap->flags & (USING_MSIX | QUEUES_BOUND)) == USING_MSIX)
797 bind_qsets(adap);
798 adap->flags |= QUEUES_BOUND;
799
4d22de3e
DLR
800out:
801 return err;
802irq_err:
803 CH_ERR(adap, "request_irq failed, err %d\n", err);
804 goto out;
805}
806
807/*
808 * Release resources when all the ports and offloading have been stopped.
809 */
810static void cxgb_down(struct adapter *adapter)
811{
812 t3_sge_stop(adapter);
813 spin_lock_irq(&adapter->work_lock); /* sync with PHY intr task */
814 t3_intr_disable(adapter);
815 spin_unlock_irq(&adapter->work_lock);
816
817 if (adapter->flags & USING_MSIX) {
818 int i, n = 0;
819
820 free_irq(adapter->msix_info[0].vec, adapter);
821 for_each_port(adapter, i)
822 n += adap2pinfo(adapter, i)->nqsets;
823
824 for (i = 0; i < n; ++i)
825 free_irq(adapter->msix_info[i + 1].vec,
826 &adapter->sge.qs[i]);
827 } else
828 free_irq(adapter->pdev->irq, adapter);
829
830 flush_workqueue(cxgb3_wq); /* wait for external IRQ handler */
831 quiesce_rx(adapter);
832}
833
834static void schedule_chk_task(struct adapter *adap)
835{
836 unsigned int timeo;
837
838 timeo = adap->params.linkpoll_period ?
839 (HZ * adap->params.linkpoll_period) / 10 :
840 adap->params.stats_update_period * HZ;
841 if (timeo)
842 queue_delayed_work(cxgb3_wq, &adap->adap_check_task, timeo);
843}
844
845static int offload_open(struct net_device *dev)
846{
847 struct adapter *adapter = dev->priv;
848 struct t3cdev *tdev = T3CDEV(dev);
849 int adap_up = adapter->open_device_map & PORT_MASK;
850 int err = 0;
851
852 if (test_and_set_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map))
853 return 0;
854
855 if (!adap_up && (err = cxgb_up(adapter)) < 0)
856 return err;
857
858 t3_tp_set_offload_mode(adapter, 1);
859 tdev->lldev = adapter->port[0];
860 err = cxgb3_offload_activate(adapter);
861 if (err)
862 goto out;
863
864 init_port_mtus(adapter);
865 t3_load_mtus(adapter, adapter->params.mtus, adapter->params.a_wnd,
866 adapter->params.b_wnd,
867 adapter->params.rev == 0 ?
868 adapter->port[0]->mtu : 0xffff);
869 init_smt(adapter);
870
871 /* Never mind if the next step fails */
0ee8d33c 872 sysfs_create_group(&tdev->lldev->dev.kobj, &offload_attr_group);
4d22de3e
DLR
873
874 /* Call back all registered clients */
875 cxgb3_add_clients(tdev);
876
877out:
878 /* restore them in case the offload module has changed them */
879 if (err) {
880 t3_tp_set_offload_mode(adapter, 0);
881 clear_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map);
882 cxgb3_set_dummy_ops(tdev);
883 }
884 return err;
885}
886
887static int offload_close(struct t3cdev *tdev)
888{
889 struct adapter *adapter = tdev2adap(tdev);
890
891 if (!test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map))
892 return 0;
893
894 /* Call back all registered clients */
895 cxgb3_remove_clients(tdev);
896
0ee8d33c 897 sysfs_remove_group(&tdev->lldev->dev.kobj, &offload_attr_group);
4d22de3e
DLR
898
899 tdev->lldev = NULL;
900 cxgb3_set_dummy_ops(tdev);
901 t3_tp_set_offload_mode(adapter, 0);
902 clear_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map);
903
904 if (!adapter->open_device_map)
905 cxgb_down(adapter);
906
907 cxgb3_offload_deactivate(adapter);
908 return 0;
909}
910
911static int cxgb_open(struct net_device *dev)
912{
913 int err;
914 struct adapter *adapter = dev->priv;
915 struct port_info *pi = netdev_priv(dev);
916 int other_ports = adapter->open_device_map & PORT_MASK;
917
918 if (!adapter->open_device_map && (err = cxgb_up(adapter)) < 0)
919 return err;
920
921 set_bit(pi->port_id, &adapter->open_device_map);
922 if (!ofld_disable) {
923 err = offload_open(dev);
924 if (err)
925 printk(KERN_WARNING
926 "Could not initialize offload capabilities\n");
927 }
928
929 link_start(dev);
930 t3_port_intr_enable(adapter, pi->port_id);
931 netif_start_queue(dev);
932 if (!other_ports)
933 schedule_chk_task(adapter);
934
935 return 0;
936}
937
938static int cxgb_close(struct net_device *dev)
939{
940 struct adapter *adapter = dev->priv;
941 struct port_info *p = netdev_priv(dev);
942
943 t3_port_intr_disable(adapter, p->port_id);
944 netif_stop_queue(dev);
945 p->phy.ops->power_down(&p->phy, 1);
946 netif_carrier_off(dev);
947 t3_mac_disable(&p->mac, MAC_DIRECTION_TX | MAC_DIRECTION_RX);
948
949 spin_lock(&adapter->work_lock); /* sync with update task */
950 clear_bit(p->port_id, &adapter->open_device_map);
951 spin_unlock(&adapter->work_lock);
952
953 if (!(adapter->open_device_map & PORT_MASK))
954 cancel_rearming_delayed_workqueue(cxgb3_wq,
955 &adapter->adap_check_task);
956
957 if (!adapter->open_device_map)
958 cxgb_down(adapter);
959
960 return 0;
961}
962
963static struct net_device_stats *cxgb_get_stats(struct net_device *dev)
964{
965 struct adapter *adapter = dev->priv;
966 struct port_info *p = netdev_priv(dev);
967 struct net_device_stats *ns = &p->netstats;
968 const struct mac_stats *pstats;
969
970 spin_lock(&adapter->stats_lock);
971 pstats = t3_mac_update_stats(&p->mac);
972 spin_unlock(&adapter->stats_lock);
973
974 ns->tx_bytes = pstats->tx_octets;
975 ns->tx_packets = pstats->tx_frames;
976 ns->rx_bytes = pstats->rx_octets;
977 ns->rx_packets = pstats->rx_frames;
978 ns->multicast = pstats->rx_mcast_frames;
979
980 ns->tx_errors = pstats->tx_underrun;
981 ns->rx_errors = pstats->rx_symbol_errs + pstats->rx_fcs_errs +
982 pstats->rx_too_long + pstats->rx_jabber + pstats->rx_short +
983 pstats->rx_fifo_ovfl;
984
985 /* detailed rx_errors */
986 ns->rx_length_errors = pstats->rx_jabber + pstats->rx_too_long;
987 ns->rx_over_errors = 0;
988 ns->rx_crc_errors = pstats->rx_fcs_errs;
989 ns->rx_frame_errors = pstats->rx_symbol_errs;
990 ns->rx_fifo_errors = pstats->rx_fifo_ovfl;
991 ns->rx_missed_errors = pstats->rx_cong_drops;
992
993 /* detailed tx_errors */
994 ns->tx_aborted_errors = 0;
995 ns->tx_carrier_errors = 0;
996 ns->tx_fifo_errors = pstats->tx_underrun;
997 ns->tx_heartbeat_errors = 0;
998 ns->tx_window_errors = 0;
999 return ns;
1000}
1001
1002static u32 get_msglevel(struct net_device *dev)
1003{
1004 struct adapter *adapter = dev->priv;
1005
1006 return adapter->msg_enable;
1007}
1008
1009static void set_msglevel(struct net_device *dev, u32 val)
1010{
1011 struct adapter *adapter = dev->priv;
1012
1013 adapter->msg_enable = val;
1014}
1015
1016static char stats_strings[][ETH_GSTRING_LEN] = {
1017 "TxOctetsOK ",
1018 "TxFramesOK ",
1019 "TxMulticastFramesOK",
1020 "TxBroadcastFramesOK",
1021 "TxPauseFrames ",
1022 "TxUnderrun ",
1023 "TxExtUnderrun ",
1024
1025 "TxFrames64 ",
1026 "TxFrames65To127 ",
1027 "TxFrames128To255 ",
1028 "TxFrames256To511 ",
1029 "TxFrames512To1023 ",
1030 "TxFrames1024To1518 ",
1031 "TxFrames1519ToMax ",
1032
1033 "RxOctetsOK ",
1034 "RxFramesOK ",
1035 "RxMulticastFramesOK",
1036 "RxBroadcastFramesOK",
1037 "RxPauseFrames ",
1038 "RxFCSErrors ",
1039 "RxSymbolErrors ",
1040 "RxShortErrors ",
1041 "RxJabberErrors ",
1042 "RxLengthErrors ",
1043 "RxFIFOoverflow ",
1044
1045 "RxFrames64 ",
1046 "RxFrames65To127 ",
1047 "RxFrames128To255 ",
1048 "RxFrames256To511 ",
1049 "RxFrames512To1023 ",
1050 "RxFrames1024To1518 ",
1051 "RxFrames1519ToMax ",
1052
1053 "PhyFIFOErrors ",
1054 "TSO ",
1055 "VLANextractions ",
1056 "VLANinsertions ",
1057 "TxCsumOffload ",
1058 "RxCsumGood ",
fc90664e
DLR
1059 "RxDrops ",
1060
1061 "CheckTXEnToggled ",
1062 "CheckResets ",
1063
4d22de3e
DLR
1064};
1065
1066static int get_stats_count(struct net_device *dev)
1067{
1068 return ARRAY_SIZE(stats_strings);
1069}
1070
1071#define T3_REGMAP_SIZE (3 * 1024)
1072
1073static int get_regs_len(struct net_device *dev)
1074{
1075 return T3_REGMAP_SIZE;
1076}
1077
1078static int get_eeprom_len(struct net_device *dev)
1079{
1080 return EEPROMSIZE;
1081}
1082
1083static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1084{
1085 u32 fw_vers = 0;
1086 struct adapter *adapter = dev->priv;
1087
1088 t3_get_fw_version(adapter, &fw_vers);
1089
1090 strcpy(info->driver, DRV_NAME);
1091 strcpy(info->version, DRV_VERSION);
1092 strcpy(info->bus_info, pci_name(adapter->pdev));
1093 if (!fw_vers)
1094 strcpy(info->fw_version, "N/A");
4aac3899 1095 else {
4d22de3e 1096 snprintf(info->fw_version, sizeof(info->fw_version),
4aac3899
DLR
1097 "%s %u.%u.%u",
1098 G_FW_VERSION_TYPE(fw_vers) ? "T" : "N",
1099 G_FW_VERSION_MAJOR(fw_vers),
1100 G_FW_VERSION_MINOR(fw_vers),
1101 G_FW_VERSION_MICRO(fw_vers));
1102 }
4d22de3e
DLR
1103}
1104
1105static void get_strings(struct net_device *dev, u32 stringset, u8 * data)
1106{
1107 if (stringset == ETH_SS_STATS)
1108 memcpy(data, stats_strings, sizeof(stats_strings));
1109}
1110
1111static unsigned long collect_sge_port_stats(struct adapter *adapter,
1112 struct port_info *p, int idx)
1113{
1114 int i;
1115 unsigned long tot = 0;
1116
1117 for (i = 0; i < p->nqsets; ++i)
1118 tot += adapter->sge.qs[i + p->first_qset].port_stats[idx];
1119 return tot;
1120}
1121
1122static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
1123 u64 *data)
1124{
1125 struct adapter *adapter = dev->priv;
1126 struct port_info *pi = netdev_priv(dev);
1127 const struct mac_stats *s;
1128
1129 spin_lock(&adapter->stats_lock);
1130 s = t3_mac_update_stats(&pi->mac);
1131 spin_unlock(&adapter->stats_lock);
1132
1133 *data++ = s->tx_octets;
1134 *data++ = s->tx_frames;
1135 *data++ = s->tx_mcast_frames;
1136 *data++ = s->tx_bcast_frames;
1137 *data++ = s->tx_pause;
1138 *data++ = s->tx_underrun;
1139 *data++ = s->tx_fifo_urun;
1140
1141 *data++ = s->tx_frames_64;
1142 *data++ = s->tx_frames_65_127;
1143 *data++ = s->tx_frames_128_255;
1144 *data++ = s->tx_frames_256_511;
1145 *data++ = s->tx_frames_512_1023;
1146 *data++ = s->tx_frames_1024_1518;
1147 *data++ = s->tx_frames_1519_max;
1148
1149 *data++ = s->rx_octets;
1150 *data++ = s->rx_frames;
1151 *data++ = s->rx_mcast_frames;
1152 *data++ = s->rx_bcast_frames;
1153 *data++ = s->rx_pause;
1154 *data++ = s->rx_fcs_errs;
1155 *data++ = s->rx_symbol_errs;
1156 *data++ = s->rx_short;
1157 *data++ = s->rx_jabber;
1158 *data++ = s->rx_too_long;
1159 *data++ = s->rx_fifo_ovfl;
1160
1161 *data++ = s->rx_frames_64;
1162 *data++ = s->rx_frames_65_127;
1163 *data++ = s->rx_frames_128_255;
1164 *data++ = s->rx_frames_256_511;
1165 *data++ = s->rx_frames_512_1023;
1166 *data++ = s->rx_frames_1024_1518;
1167 *data++ = s->rx_frames_1519_max;
1168
1169 *data++ = pi->phy.fifo_errors;
1170
1171 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_TSO);
1172 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_VLANEX);
1173 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_VLANINS);
1174 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_TX_CSUM);
1175 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_RX_CSUM_GOOD);
1176 *data++ = s->rx_cong_drops;
fc90664e
DLR
1177
1178 *data++ = s->num_toggled;
1179 *data++ = s->num_resets;
4d22de3e
DLR
1180}
1181
1182static inline void reg_block_dump(struct adapter *ap, void *buf,
1183 unsigned int start, unsigned int end)
1184{
1185 u32 *p = buf + start;
1186
1187 for (; start <= end; start += sizeof(u32))
1188 *p++ = t3_read_reg(ap, start);
1189}
1190
1191static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
1192 void *buf)
1193{
1194 struct adapter *ap = dev->priv;
1195
1196 /*
1197 * Version scheme:
1198 * bits 0..9: chip version
1199 * bits 10..15: chip revision
1200 * bit 31: set for PCIe cards
1201 */
1202 regs->version = 3 | (ap->params.rev << 10) | (is_pcie(ap) << 31);
1203
1204 /*
1205 * We skip the MAC statistics registers because they are clear-on-read.
1206 * Also reading multi-register stats would need to synchronize with the
1207 * periodic mac stats accumulation. Hard to justify the complexity.
1208 */
1209 memset(buf, 0, T3_REGMAP_SIZE);
1210 reg_block_dump(ap, buf, 0, A_SG_RSPQ_CREDIT_RETURN);
1211 reg_block_dump(ap, buf, A_SG_HI_DRB_HI_THRSH, A_ULPRX_PBL_ULIMIT);
1212 reg_block_dump(ap, buf, A_ULPTX_CONFIG, A_MPS_INT_CAUSE);
1213 reg_block_dump(ap, buf, A_CPL_SWITCH_CNTRL, A_CPL_MAP_TBL_DATA);
1214 reg_block_dump(ap, buf, A_SMB_GLOBAL_TIME_CFG, A_XGM_SERDES_STAT3);
1215 reg_block_dump(ap, buf, A_XGM_SERDES_STATUS0,
1216 XGM_REG(A_XGM_SERDES_STAT3, 1));
1217 reg_block_dump(ap, buf, XGM_REG(A_XGM_SERDES_STATUS0, 1),
1218 XGM_REG(A_XGM_RX_SPI4_SOP_EOP_CNT, 1));
1219}
1220
1221static int restart_autoneg(struct net_device *dev)
1222{
1223 struct port_info *p = netdev_priv(dev);
1224
1225 if (!netif_running(dev))
1226 return -EAGAIN;
1227 if (p->link_config.autoneg != AUTONEG_ENABLE)
1228 return -EINVAL;
1229 p->phy.ops->autoneg_restart(&p->phy);
1230 return 0;
1231}
1232
1233static int cxgb3_phys_id(struct net_device *dev, u32 data)
1234{
1235 int i;
1236 struct adapter *adapter = dev->priv;
1237
1238 if (data == 0)
1239 data = 2;
1240
1241 for (i = 0; i < data * 2; i++) {
1242 t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
1243 (i & 1) ? F_GPIO0_OUT_VAL : 0);
1244 if (msleep_interruptible(500))
1245 break;
1246 }
1247 t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
1248 F_GPIO0_OUT_VAL);
1249 return 0;
1250}
1251
1252static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1253{
1254 struct port_info *p = netdev_priv(dev);
1255
1256 cmd->supported = p->link_config.supported;
1257 cmd->advertising = p->link_config.advertising;
1258
1259 if (netif_carrier_ok(dev)) {
1260 cmd->speed = p->link_config.speed;
1261 cmd->duplex = p->link_config.duplex;
1262 } else {
1263 cmd->speed = -1;
1264 cmd->duplex = -1;
1265 }
1266
1267 cmd->port = (cmd->supported & SUPPORTED_TP) ? PORT_TP : PORT_FIBRE;
1268 cmd->phy_address = p->phy.addr;
1269 cmd->transceiver = XCVR_EXTERNAL;
1270 cmd->autoneg = p->link_config.autoneg;
1271 cmd->maxtxpkt = 0;
1272 cmd->maxrxpkt = 0;
1273 return 0;
1274}
1275
1276static int speed_duplex_to_caps(int speed, int duplex)
1277{
1278 int cap = 0;
1279
1280 switch (speed) {
1281 case SPEED_10:
1282 if (duplex == DUPLEX_FULL)
1283 cap = SUPPORTED_10baseT_Full;
1284 else
1285 cap = SUPPORTED_10baseT_Half;
1286 break;
1287 case SPEED_100:
1288 if (duplex == DUPLEX_FULL)
1289 cap = SUPPORTED_100baseT_Full;
1290 else
1291 cap = SUPPORTED_100baseT_Half;
1292 break;
1293 case SPEED_1000:
1294 if (duplex == DUPLEX_FULL)
1295 cap = SUPPORTED_1000baseT_Full;
1296 else
1297 cap = SUPPORTED_1000baseT_Half;
1298 break;
1299 case SPEED_10000:
1300 if (duplex == DUPLEX_FULL)
1301 cap = SUPPORTED_10000baseT_Full;
1302 }
1303 return cap;
1304}
1305
1306#define ADVERTISED_MASK (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
1307 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
1308 ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full | \
1309 ADVERTISED_10000baseT_Full)
1310
1311static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1312{
1313 struct port_info *p = netdev_priv(dev);
1314 struct link_config *lc = &p->link_config;
1315
1316 if (!(lc->supported & SUPPORTED_Autoneg))
1317 return -EOPNOTSUPP; /* can't change speed/duplex */
1318
1319 if (cmd->autoneg == AUTONEG_DISABLE) {
1320 int cap = speed_duplex_to_caps(cmd->speed, cmd->duplex);
1321
1322 if (!(lc->supported & cap) || cmd->speed == SPEED_1000)
1323 return -EINVAL;
1324 lc->requested_speed = cmd->speed;
1325 lc->requested_duplex = cmd->duplex;
1326 lc->advertising = 0;
1327 } else {
1328 cmd->advertising &= ADVERTISED_MASK;
1329 cmd->advertising &= lc->supported;
1330 if (!cmd->advertising)
1331 return -EINVAL;
1332 lc->requested_speed = SPEED_INVALID;
1333 lc->requested_duplex = DUPLEX_INVALID;
1334 lc->advertising = cmd->advertising | ADVERTISED_Autoneg;
1335 }
1336 lc->autoneg = cmd->autoneg;
1337 if (netif_running(dev))
1338 t3_link_start(&p->phy, &p->mac, lc);
1339 return 0;
1340}
1341
1342static void get_pauseparam(struct net_device *dev,
1343 struct ethtool_pauseparam *epause)
1344{
1345 struct port_info *p = netdev_priv(dev);
1346
1347 epause->autoneg = (p->link_config.requested_fc & PAUSE_AUTONEG) != 0;
1348 epause->rx_pause = (p->link_config.fc & PAUSE_RX) != 0;
1349 epause->tx_pause = (p->link_config.fc & PAUSE_TX) != 0;
1350}
1351
1352static int set_pauseparam(struct net_device *dev,
1353 struct ethtool_pauseparam *epause)
1354{
1355 struct port_info *p = netdev_priv(dev);
1356 struct link_config *lc = &p->link_config;
1357
1358 if (epause->autoneg == AUTONEG_DISABLE)
1359 lc->requested_fc = 0;
1360 else if (lc->supported & SUPPORTED_Autoneg)
1361 lc->requested_fc = PAUSE_AUTONEG;
1362 else
1363 return -EINVAL;
1364
1365 if (epause->rx_pause)
1366 lc->requested_fc |= PAUSE_RX;
1367 if (epause->tx_pause)
1368 lc->requested_fc |= PAUSE_TX;
1369 if (lc->autoneg == AUTONEG_ENABLE) {
1370 if (netif_running(dev))
1371 t3_link_start(&p->phy, &p->mac, lc);
1372 } else {
1373 lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1374 if (netif_running(dev))
1375 t3_mac_set_speed_duplex_fc(&p->mac, -1, -1, lc->fc);
1376 }
1377 return 0;
1378}
1379
1380static u32 get_rx_csum(struct net_device *dev)
1381{
1382 struct port_info *p = netdev_priv(dev);
1383
1384 return p->rx_csum_offload;
1385}
1386
1387static int set_rx_csum(struct net_device *dev, u32 data)
1388{
1389 struct port_info *p = netdev_priv(dev);
1390
1391 p->rx_csum_offload = data;
1392 return 0;
1393}
1394
1395static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
1396{
05b97b30
DLR
1397 const struct adapter *adapter = dev->priv;
1398 const struct port_info *pi = netdev_priv(dev);
1399 const struct qset_params *q = &adapter->params.sge.qset[pi->first_qset];
4d22de3e
DLR
1400
1401 e->rx_max_pending = MAX_RX_BUFFERS;
1402 e->rx_mini_max_pending = 0;
1403 e->rx_jumbo_max_pending = MAX_RX_JUMBO_BUFFERS;
1404 e->tx_max_pending = MAX_TXQ_ENTRIES;
1405
05b97b30
DLR
1406 e->rx_pending = q->fl_size;
1407 e->rx_mini_pending = q->rspq_size;
1408 e->rx_jumbo_pending = q->jumbo_size;
1409 e->tx_pending = q->txq_size[0];
4d22de3e
DLR
1410}
1411
1412static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
1413{
1414 int i;
05b97b30 1415 struct qset_params *q;
4d22de3e 1416 struct adapter *adapter = dev->priv;
05b97b30 1417 const struct port_info *pi = netdev_priv(dev);
4d22de3e
DLR
1418
1419 if (e->rx_pending > MAX_RX_BUFFERS ||
1420 e->rx_jumbo_pending > MAX_RX_JUMBO_BUFFERS ||
1421 e->tx_pending > MAX_TXQ_ENTRIES ||
1422 e->rx_mini_pending > MAX_RSPQ_ENTRIES ||
1423 e->rx_mini_pending < MIN_RSPQ_ENTRIES ||
1424 e->rx_pending < MIN_FL_ENTRIES ||
1425 e->rx_jumbo_pending < MIN_FL_ENTRIES ||
1426 e->tx_pending < adapter->params.nports * MIN_TXQ_ENTRIES)
1427 return -EINVAL;
1428
1429 if (adapter->flags & FULL_INIT_DONE)
1430 return -EBUSY;
1431
05b97b30
DLR
1432 q = &adapter->params.sge.qset[pi->first_qset];
1433 for (i = 0; i < pi->nqsets; ++i, ++q) {
4d22de3e
DLR
1434 q->rspq_size = e->rx_mini_pending;
1435 q->fl_size = e->rx_pending;
1436 q->jumbo_size = e->rx_jumbo_pending;
1437 q->txq_size[0] = e->tx_pending;
1438 q->txq_size[1] = e->tx_pending;
1439 q->txq_size[2] = e->tx_pending;
1440 }
1441 return 0;
1442}
1443
1444static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
1445{
1446 struct adapter *adapter = dev->priv;
1447 struct qset_params *qsp = &adapter->params.sge.qset[0];
1448 struct sge_qset *qs = &adapter->sge.qs[0];
1449
1450 if (c->rx_coalesce_usecs * 10 > M_NEWTIMER)
1451 return -EINVAL;
1452
1453 qsp->coalesce_usecs = c->rx_coalesce_usecs;
1454 t3_update_qset_coalesce(qs, qsp);
1455 return 0;
1456}
1457
1458static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
1459{
1460 struct adapter *adapter = dev->priv;
1461 struct qset_params *q = adapter->params.sge.qset;
1462
1463 c->rx_coalesce_usecs = q->coalesce_usecs;
1464 return 0;
1465}
1466
1467static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e,
1468 u8 * data)
1469{
1470 int i, err = 0;
1471 struct adapter *adapter = dev->priv;
1472
1473 u8 *buf = kmalloc(EEPROMSIZE, GFP_KERNEL);
1474 if (!buf)
1475 return -ENOMEM;
1476
1477 e->magic = EEPROM_MAGIC;
1478 for (i = e->offset & ~3; !err && i < e->offset + e->len; i += 4)
1479 err = t3_seeprom_read(adapter, i, (u32 *) & buf[i]);
1480
1481 if (!err)
1482 memcpy(data, buf + e->offset, e->len);
1483 kfree(buf);
1484 return err;
1485}
1486
1487static int set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
1488 u8 * data)
1489{
1490 u8 *buf;
1491 int err = 0;
1492 u32 aligned_offset, aligned_len, *p;
1493 struct adapter *adapter = dev->priv;
1494
1495 if (eeprom->magic != EEPROM_MAGIC)
1496 return -EINVAL;
1497
1498 aligned_offset = eeprom->offset & ~3;
1499 aligned_len = (eeprom->len + (eeprom->offset & 3) + 3) & ~3;
1500
1501 if (aligned_offset != eeprom->offset || aligned_len != eeprom->len) {
1502 buf = kmalloc(aligned_len, GFP_KERNEL);
1503 if (!buf)
1504 return -ENOMEM;
1505 err = t3_seeprom_read(adapter, aligned_offset, (u32 *) buf);
1506 if (!err && aligned_len > 4)
1507 err = t3_seeprom_read(adapter,
1508 aligned_offset + aligned_len - 4,
1509 (u32 *) & buf[aligned_len - 4]);
1510 if (err)
1511 goto out;
1512 memcpy(buf + (eeprom->offset & 3), data, eeprom->len);
1513 } else
1514 buf = data;
1515
1516 err = t3_seeprom_wp(adapter, 0);
1517 if (err)
1518 goto out;
1519
1520 for (p = (u32 *) buf; !err && aligned_len; aligned_len -= 4, p++) {
1521 err = t3_seeprom_write(adapter, aligned_offset, *p);
1522 aligned_offset += 4;
1523 }
1524
1525 if (!err)
1526 err = t3_seeprom_wp(adapter, 1);
1527out:
1528 if (buf != data)
1529 kfree(buf);
1530 return err;
1531}
1532
1533static void get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1534{
1535 wol->supported = 0;
1536 wol->wolopts = 0;
1537 memset(&wol->sopass, 0, sizeof(wol->sopass));
1538}
1539
1540static const struct ethtool_ops cxgb_ethtool_ops = {
1541 .get_settings = get_settings,
1542 .set_settings = set_settings,
1543 .get_drvinfo = get_drvinfo,
1544 .get_msglevel = get_msglevel,
1545 .set_msglevel = set_msglevel,
1546 .get_ringparam = get_sge_param,
1547 .set_ringparam = set_sge_param,
1548 .get_coalesce = get_coalesce,
1549 .set_coalesce = set_coalesce,
1550 .get_eeprom_len = get_eeprom_len,
1551 .get_eeprom = get_eeprom,
1552 .set_eeprom = set_eeprom,
1553 .get_pauseparam = get_pauseparam,
1554 .set_pauseparam = set_pauseparam,
1555 .get_rx_csum = get_rx_csum,
1556 .set_rx_csum = set_rx_csum,
1557 .get_tx_csum = ethtool_op_get_tx_csum,
1558 .set_tx_csum = ethtool_op_set_tx_csum,
1559 .get_sg = ethtool_op_get_sg,
1560 .set_sg = ethtool_op_set_sg,
1561 .get_link = ethtool_op_get_link,
1562 .get_strings = get_strings,
1563 .phys_id = cxgb3_phys_id,
1564 .nway_reset = restart_autoneg,
1565 .get_stats_count = get_stats_count,
1566 .get_ethtool_stats = get_stats,
1567 .get_regs_len = get_regs_len,
1568 .get_regs = get_regs,
1569 .get_wol = get_wol,
1570 .get_tso = ethtool_op_get_tso,
1571 .set_tso = ethtool_op_set_tso,
1572 .get_perm_addr = ethtool_op_get_perm_addr
1573};
1574
1575static int in_range(int val, int lo, int hi)
1576{
1577 return val < 0 || (val <= hi && val >= lo);
1578}
1579
1580static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
1581{
1582 int ret;
1583 u32 cmd;
1584 struct adapter *adapter = dev->priv;
1585
1586 if (copy_from_user(&cmd, useraddr, sizeof(cmd)))
1587 return -EFAULT;
1588
1589 switch (cmd) {
4d22de3e
DLR
1590 case CHELSIO_SET_QSET_PARAMS:{
1591 int i;
1592 struct qset_params *q;
1593 struct ch_qset_params t;
1594
1595 if (!capable(CAP_NET_ADMIN))
1596 return -EPERM;
1597 if (copy_from_user(&t, useraddr, sizeof(t)))
1598 return -EFAULT;
1599 if (t.qset_idx >= SGE_QSETS)
1600 return -EINVAL;
1601 if (!in_range(t.intr_lat, 0, M_NEWTIMER) ||
1602 !in_range(t.cong_thres, 0, 255) ||
1603 !in_range(t.txq_size[0], MIN_TXQ_ENTRIES,
1604 MAX_TXQ_ENTRIES) ||
1605 !in_range(t.txq_size[1], MIN_TXQ_ENTRIES,
1606 MAX_TXQ_ENTRIES) ||
1607 !in_range(t.txq_size[2], MIN_CTRL_TXQ_ENTRIES,
1608 MAX_CTRL_TXQ_ENTRIES) ||
1609 !in_range(t.fl_size[0], MIN_FL_ENTRIES,
1610 MAX_RX_BUFFERS)
1611 || !in_range(t.fl_size[1], MIN_FL_ENTRIES,
1612 MAX_RX_JUMBO_BUFFERS)
1613 || !in_range(t.rspq_size, MIN_RSPQ_ENTRIES,
1614 MAX_RSPQ_ENTRIES))
1615 return -EINVAL;
1616 if ((adapter->flags & FULL_INIT_DONE) &&
1617 (t.rspq_size >= 0 || t.fl_size[0] >= 0 ||
1618 t.fl_size[1] >= 0 || t.txq_size[0] >= 0 ||
1619 t.txq_size[1] >= 0 || t.txq_size[2] >= 0 ||
1620 t.polling >= 0 || t.cong_thres >= 0))
1621 return -EBUSY;
1622
1623 q = &adapter->params.sge.qset[t.qset_idx];
1624
1625 if (t.rspq_size >= 0)
1626 q->rspq_size = t.rspq_size;
1627 if (t.fl_size[0] >= 0)
1628 q->fl_size = t.fl_size[0];
1629 if (t.fl_size[1] >= 0)
1630 q->jumbo_size = t.fl_size[1];
1631 if (t.txq_size[0] >= 0)
1632 q->txq_size[0] = t.txq_size[0];
1633 if (t.txq_size[1] >= 0)
1634 q->txq_size[1] = t.txq_size[1];
1635 if (t.txq_size[2] >= 0)
1636 q->txq_size[2] = t.txq_size[2];
1637 if (t.cong_thres >= 0)
1638 q->cong_thres = t.cong_thres;
1639 if (t.intr_lat >= 0) {
1640 struct sge_qset *qs =
1641 &adapter->sge.qs[t.qset_idx];
1642
1643 q->coalesce_usecs = t.intr_lat;
1644 t3_update_qset_coalesce(qs, q);
1645 }
1646 if (t.polling >= 0) {
1647 if (adapter->flags & USING_MSIX)
1648 q->polling = t.polling;
1649 else {
1650 /* No polling with INTx for T3A */
1651 if (adapter->params.rev == 0 &&
1652 !(adapter->flags & USING_MSI))
1653 t.polling = 0;
1654
1655 for (i = 0; i < SGE_QSETS; i++) {
1656 q = &adapter->params.sge.
1657 qset[i];
1658 q->polling = t.polling;
1659 }
1660 }
1661 }
1662 break;
1663 }
1664 case CHELSIO_GET_QSET_PARAMS:{
1665 struct qset_params *q;
1666 struct ch_qset_params t;
1667
1668 if (copy_from_user(&t, useraddr, sizeof(t)))
1669 return -EFAULT;
1670 if (t.qset_idx >= SGE_QSETS)
1671 return -EINVAL;
1672
1673 q = &adapter->params.sge.qset[t.qset_idx];
1674 t.rspq_size = q->rspq_size;
1675 t.txq_size[0] = q->txq_size[0];
1676 t.txq_size[1] = q->txq_size[1];
1677 t.txq_size[2] = q->txq_size[2];
1678 t.fl_size[0] = q->fl_size;
1679 t.fl_size[1] = q->jumbo_size;
1680 t.polling = q->polling;
1681 t.intr_lat = q->coalesce_usecs;
1682 t.cong_thres = q->cong_thres;
1683
1684 if (copy_to_user(useraddr, &t, sizeof(t)))
1685 return -EFAULT;
1686 break;
1687 }
1688 case CHELSIO_SET_QSET_NUM:{
1689 struct ch_reg edata;
1690 struct port_info *pi = netdev_priv(dev);
1691 unsigned int i, first_qset = 0, other_qsets = 0;
1692
1693 if (!capable(CAP_NET_ADMIN))
1694 return -EPERM;
1695 if (adapter->flags & FULL_INIT_DONE)
1696 return -EBUSY;
1697 if (copy_from_user(&edata, useraddr, sizeof(edata)))
1698 return -EFAULT;
1699 if (edata.val < 1 ||
1700 (edata.val > 1 && !(adapter->flags & USING_MSIX)))
1701 return -EINVAL;
1702
1703 for_each_port(adapter, i)
1704 if (adapter->port[i] && adapter->port[i] != dev)
1705 other_qsets += adap2pinfo(adapter, i)->nqsets;
1706
1707 if (edata.val + other_qsets > SGE_QSETS)
1708 return -EINVAL;
1709
1710 pi->nqsets = edata.val;
1711
1712 for_each_port(adapter, i)
1713 if (adapter->port[i]) {
1714 pi = adap2pinfo(adapter, i);
1715 pi->first_qset = first_qset;
1716 first_qset += pi->nqsets;
1717 }
1718 break;
1719 }
1720 case CHELSIO_GET_QSET_NUM:{
1721 struct ch_reg edata;
1722 struct port_info *pi = netdev_priv(dev);
1723
1724 edata.cmd = CHELSIO_GET_QSET_NUM;
1725 edata.val = pi->nqsets;
1726 if (copy_to_user(useraddr, &edata, sizeof(edata)))
1727 return -EFAULT;
1728 break;
1729 }
1730 case CHELSIO_LOAD_FW:{
1731 u8 *fw_data;
1732 struct ch_mem_range t;
1733
1734 if (!capable(CAP_NET_ADMIN))
1735 return -EPERM;
1736 if (copy_from_user(&t, useraddr, sizeof(t)))
1737 return -EFAULT;
1738
1739 fw_data = kmalloc(t.len, GFP_KERNEL);
1740 if (!fw_data)
1741 return -ENOMEM;
1742
1743 if (copy_from_user
1744 (fw_data, useraddr + sizeof(t), t.len)) {
1745 kfree(fw_data);
1746 return -EFAULT;
1747 }
1748
1749 ret = t3_load_fw(adapter, fw_data, t.len);
1750 kfree(fw_data);
1751 if (ret)
1752 return ret;
1753 break;
1754 }
1755 case CHELSIO_SETMTUTAB:{
1756 struct ch_mtus m;
1757 int i;
1758
1759 if (!is_offload(adapter))
1760 return -EOPNOTSUPP;
1761 if (!capable(CAP_NET_ADMIN))
1762 return -EPERM;
1763 if (offload_running(adapter))
1764 return -EBUSY;
1765 if (copy_from_user(&m, useraddr, sizeof(m)))
1766 return -EFAULT;
1767 if (m.nmtus != NMTUS)
1768 return -EINVAL;
1769 if (m.mtus[0] < 81) /* accommodate SACK */
1770 return -EINVAL;
1771
1772 /* MTUs must be in ascending order */
1773 for (i = 1; i < NMTUS; ++i)
1774 if (m.mtus[i] < m.mtus[i - 1])
1775 return -EINVAL;
1776
1777 memcpy(adapter->params.mtus, m.mtus,
1778 sizeof(adapter->params.mtus));
1779 break;
1780 }
1781 case CHELSIO_GET_PM:{
1782 struct tp_params *p = &adapter->params.tp;
1783 struct ch_pm m = {.cmd = CHELSIO_GET_PM };
1784
1785 if (!is_offload(adapter))
1786 return -EOPNOTSUPP;
1787 m.tx_pg_sz = p->tx_pg_size;
1788 m.tx_num_pg = p->tx_num_pgs;
1789 m.rx_pg_sz = p->rx_pg_size;
1790 m.rx_num_pg = p->rx_num_pgs;
1791 m.pm_total = p->pmtx_size + p->chan_rx_size * p->nchan;
1792 if (copy_to_user(useraddr, &m, sizeof(m)))
1793 return -EFAULT;
1794 break;
1795 }
1796 case CHELSIO_SET_PM:{
1797 struct ch_pm m;
1798 struct tp_params *p = &adapter->params.tp;
1799
1800 if (!is_offload(adapter))
1801 return -EOPNOTSUPP;
1802 if (!capable(CAP_NET_ADMIN))
1803 return -EPERM;
1804 if (adapter->flags & FULL_INIT_DONE)
1805 return -EBUSY;
1806 if (copy_from_user(&m, useraddr, sizeof(m)))
1807 return -EFAULT;
1808 if (!m.rx_pg_sz || (m.rx_pg_sz & (m.rx_pg_sz - 1)) ||
1809 !m.tx_pg_sz || (m.tx_pg_sz & (m.tx_pg_sz - 1)))
1810 return -EINVAL; /* not power of 2 */
1811 if (!(m.rx_pg_sz & 0x14000))
1812 return -EINVAL; /* not 16KB or 64KB */
1813 if (!(m.tx_pg_sz & 0x1554000))
1814 return -EINVAL;
1815 if (m.tx_num_pg == -1)
1816 m.tx_num_pg = p->tx_num_pgs;
1817 if (m.rx_num_pg == -1)
1818 m.rx_num_pg = p->rx_num_pgs;
1819 if (m.tx_num_pg % 24 || m.rx_num_pg % 24)
1820 return -EINVAL;
1821 if (m.rx_num_pg * m.rx_pg_sz > p->chan_rx_size ||
1822 m.tx_num_pg * m.tx_pg_sz > p->chan_tx_size)
1823 return -EINVAL;
1824 p->rx_pg_size = m.rx_pg_sz;
1825 p->tx_pg_size = m.tx_pg_sz;
1826 p->rx_num_pgs = m.rx_num_pg;
1827 p->tx_num_pgs = m.tx_num_pg;
1828 break;
1829 }
1830 case CHELSIO_GET_MEM:{
1831 struct ch_mem_range t;
1832 struct mc7 *mem;
1833 u64 buf[32];
1834
1835 if (!is_offload(adapter))
1836 return -EOPNOTSUPP;
1837 if (!(adapter->flags & FULL_INIT_DONE))
1838 return -EIO; /* need the memory controllers */
1839 if (copy_from_user(&t, useraddr, sizeof(t)))
1840 return -EFAULT;
1841 if ((t.addr & 7) || (t.len & 7))
1842 return -EINVAL;
1843 if (t.mem_id == MEM_CM)
1844 mem = &adapter->cm;
1845 else if (t.mem_id == MEM_PMRX)
1846 mem = &adapter->pmrx;
1847 else if (t.mem_id == MEM_PMTX)
1848 mem = &adapter->pmtx;
1849 else
1850 return -EINVAL;
1851
1852 /*
1825494a
DLR
1853 * Version scheme:
1854 * bits 0..9: chip version
1855 * bits 10..15: chip revision
1856 */
4d22de3e
DLR
1857 t.version = 3 | (adapter->params.rev << 10);
1858 if (copy_to_user(useraddr, &t, sizeof(t)))
1859 return -EFAULT;
1860
1861 /*
1862 * Read 256 bytes at a time as len can be large and we don't
1863 * want to use huge intermediate buffers.
1864 */
1865 useraddr += sizeof(t); /* advance to start of buffer */
1866 while (t.len) {
1867 unsigned int chunk =
1868 min_t(unsigned int, t.len, sizeof(buf));
1869
1870 ret =
1871 t3_mc7_bd_read(mem, t.addr / 8, chunk / 8,
1872 buf);
1873 if (ret)
1874 return ret;
1875 if (copy_to_user(useraddr, buf, chunk))
1876 return -EFAULT;
1877 useraddr += chunk;
1878 t.addr += chunk;
1879 t.len -= chunk;
1880 }
1881 break;
1882 }
1883 case CHELSIO_SET_TRACE_FILTER:{
1884 struct ch_trace t;
1885 const struct trace_params *tp;
1886
1887 if (!capable(CAP_NET_ADMIN))
1888 return -EPERM;
1889 if (!offload_running(adapter))
1890 return -EAGAIN;
1891 if (copy_from_user(&t, useraddr, sizeof(t)))
1892 return -EFAULT;
1893
1894 tp = (const struct trace_params *)&t.sip;
1895 if (t.config_tx)
1896 t3_config_trace_filter(adapter, tp, 0,
1897 t.invert_match,
1898 t.trace_tx);
1899 if (t.config_rx)
1900 t3_config_trace_filter(adapter, tp, 1,
1901 t.invert_match,
1902 t.trace_rx);
1903 break;
1904 }
4d22de3e
DLR
1905 default:
1906 return -EOPNOTSUPP;
1907 }
1908 return 0;
1909}
1910
1911static int cxgb_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
1912{
1913 int ret, mmd;
1914 struct adapter *adapter = dev->priv;
1915 struct port_info *pi = netdev_priv(dev);
1916 struct mii_ioctl_data *data = if_mii(req);
1917
1918 switch (cmd) {
1919 case SIOCGMIIPHY:
1920 data->phy_id = pi->phy.addr;
1921 /* FALLTHRU */
1922 case SIOCGMIIREG:{
1923 u32 val;
1924 struct cphy *phy = &pi->phy;
1925
1926 if (!phy->mdio_read)
1927 return -EOPNOTSUPP;
1928 if (is_10G(adapter)) {
1929 mmd = data->phy_id >> 8;
1930 if (!mmd)
1931 mmd = MDIO_DEV_PCS;
1932 else if (mmd > MDIO_DEV_XGXS)
1933 return -EINVAL;
1934
1935 ret =
1936 phy->mdio_read(adapter, data->phy_id & 0x1f,
1937 mmd, data->reg_num, &val);
1938 } else
1939 ret =
1940 phy->mdio_read(adapter, data->phy_id & 0x1f,
1941 0, data->reg_num & 0x1f,
1942 &val);
1943 if (!ret)
1944 data->val_out = val;
1945 break;
1946 }
1947 case SIOCSMIIREG:{
1948 struct cphy *phy = &pi->phy;
1949
1950 if (!capable(CAP_NET_ADMIN))
1951 return -EPERM;
1952 if (!phy->mdio_write)
1953 return -EOPNOTSUPP;
1954 if (is_10G(adapter)) {
1955 mmd = data->phy_id >> 8;
1956 if (!mmd)
1957 mmd = MDIO_DEV_PCS;
1958 else if (mmd > MDIO_DEV_XGXS)
1959 return -EINVAL;
1960
1961 ret =
1962 phy->mdio_write(adapter,
1963 data->phy_id & 0x1f, mmd,
1964 data->reg_num,
1965 data->val_in);
1966 } else
1967 ret =
1968 phy->mdio_write(adapter,
1969 data->phy_id & 0x1f, 0,
1970 data->reg_num & 0x1f,
1971 data->val_in);
1972 break;
1973 }
1974 case SIOCCHIOCTL:
1975 return cxgb_extension_ioctl(dev, req->ifr_data);
1976 default:
1977 return -EOPNOTSUPP;
1978 }
1979 return ret;
1980}
1981
1982static int cxgb_change_mtu(struct net_device *dev, int new_mtu)
1983{
1984 int ret;
1985 struct adapter *adapter = dev->priv;
1986 struct port_info *pi = netdev_priv(dev);
1987
1988 if (new_mtu < 81) /* accommodate SACK */
1989 return -EINVAL;
1990 if ((ret = t3_mac_set_mtu(&pi->mac, new_mtu)))
1991 return ret;
1992 dev->mtu = new_mtu;
1993 init_port_mtus(adapter);
1994 if (adapter->params.rev == 0 && offload_running(adapter))
1995 t3_load_mtus(adapter, adapter->params.mtus,
1996 adapter->params.a_wnd, adapter->params.b_wnd,
1997 adapter->port[0]->mtu);
1998 return 0;
1999}
2000
2001static int cxgb_set_mac_addr(struct net_device *dev, void *p)
2002{
2003 struct adapter *adapter = dev->priv;
2004 struct port_info *pi = netdev_priv(dev);
2005 struct sockaddr *addr = p;
2006
2007 if (!is_valid_ether_addr(addr->sa_data))
2008 return -EINVAL;
2009
2010 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
2011 t3_mac_set_address(&pi->mac, 0, dev->dev_addr);
2012 if (offload_running(adapter))
2013 write_smt_entry(adapter, pi->port_id);
2014 return 0;
2015}
2016
2017/**
2018 * t3_synchronize_rx - wait for current Rx processing on a port to complete
2019 * @adap: the adapter
2020 * @p: the port
2021 *
2022 * Ensures that current Rx processing on any of the queues associated with
2023 * the given port completes before returning. We do this by acquiring and
2024 * releasing the locks of the response queues associated with the port.
2025 */
2026static void t3_synchronize_rx(struct adapter *adap, const struct port_info *p)
2027{
2028 int i;
2029
2030 for (i = 0; i < p->nqsets; i++) {
2031 struct sge_rspq *q = &adap->sge.qs[i + p->first_qset].rspq;
2032
2033 spin_lock_irq(&q->lock);
2034 spin_unlock_irq(&q->lock);
2035 }
2036}
2037
2038static void vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
2039{
2040 struct adapter *adapter = dev->priv;
2041 struct port_info *pi = netdev_priv(dev);
2042
2043 pi->vlan_grp = grp;
2044 if (adapter->params.rev > 0)
2045 t3_set_vlan_accel(adapter, 1 << pi->port_id, grp != NULL);
2046 else {
2047 /* single control for all ports */
2048 unsigned int i, have_vlans = 0;
2049 for_each_port(adapter, i)
2050 have_vlans |= adap2pinfo(adapter, i)->vlan_grp != NULL;
2051
2052 t3_set_vlan_accel(adapter, 1, have_vlans);
2053 }
2054 t3_synchronize_rx(adapter, pi);
2055}
2056
2057static void vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
2058{
2059 /* nothing */
2060}
2061
2062#ifdef CONFIG_NET_POLL_CONTROLLER
2063static void cxgb_netpoll(struct net_device *dev)
2064{
2065 struct adapter *adapter = dev->priv;
2066 struct sge_qset *qs = dev2qset(dev);
2067
2068 t3_intr_handler(adapter, qs->rspq.polling) (adapter->pdev->irq,
2069 adapter);
2070}
2071#endif
2072
2073/*
2074 * Periodic accumulation of MAC statistics.
2075 */
2076static void mac_stats_update(struct adapter *adapter)
2077{
2078 int i;
2079
2080 for_each_port(adapter, i) {
2081 struct net_device *dev = adapter->port[i];
2082 struct port_info *p = netdev_priv(dev);
2083
2084 if (netif_running(dev)) {
2085 spin_lock(&adapter->stats_lock);
2086 t3_mac_update_stats(&p->mac);
2087 spin_unlock(&adapter->stats_lock);
2088 }
2089 }
2090}
2091
2092static void check_link_status(struct adapter *adapter)
2093{
2094 int i;
2095
2096 for_each_port(adapter, i) {
2097 struct net_device *dev = adapter->port[i];
2098 struct port_info *p = netdev_priv(dev);
2099
2100 if (!(p->port_type->caps & SUPPORTED_IRQ) && netif_running(dev))
2101 t3_link_changed(adapter, i);
2102 }
2103}
2104
fc90664e
DLR
2105static void check_t3b2_mac(struct adapter *adapter)
2106{
2107 int i;
2108
2109 rtnl_lock(); /* synchronize with ifdown */
2110 for_each_port(adapter, i) {
2111 struct net_device *dev = adapter->port[i];
2112 struct port_info *p = netdev_priv(dev);
2113 int status;
2114
2115 if (!netif_running(dev))
2116 continue;
2117
2118 status = 0;
2119 if (netif_running(dev))
2120 status = t3b2_mac_watchdog_task(&p->mac);
2121 if (status == 1)
2122 p->mac.stats.num_toggled++;
2123 else if (status == 2) {
2124 struct cmac *mac = &p->mac;
2125
2126 t3_mac_set_mtu(mac, dev->mtu);
2127 t3_mac_set_address(mac, 0, dev->dev_addr);
2128 cxgb_set_rxmode(dev);
2129 t3_link_start(&p->phy, mac, &p->link_config);
2130 t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
2131 t3_port_intr_enable(adapter, p->port_id);
2132 p->mac.stats.num_resets++;
2133 }
2134 }
2135 rtnl_unlock();
2136}
2137
2138
4d22de3e
DLR
2139static void t3_adap_check_task(struct work_struct *work)
2140{
2141 struct adapter *adapter = container_of(work, struct adapter,
2142 adap_check_task.work);
2143 const struct adapter_params *p = &adapter->params;
2144
2145 adapter->check_task_cnt++;
2146
2147 /* Check link status for PHYs without interrupts */
2148 if (p->linkpoll_period)
2149 check_link_status(adapter);
2150
2151 /* Accumulate MAC stats if needed */
2152 if (!p->linkpoll_period ||
2153 (adapter->check_task_cnt * p->linkpoll_period) / 10 >=
2154 p->stats_update_period) {
2155 mac_stats_update(adapter);
2156 adapter->check_task_cnt = 0;
2157 }
2158
fc90664e
DLR
2159 if (p->rev == T3_REV_B2)
2160 check_t3b2_mac(adapter);
2161
4d22de3e
DLR
2162 /* Schedule the next check update if any port is active. */
2163 spin_lock(&adapter->work_lock);
2164 if (adapter->open_device_map & PORT_MASK)
2165 schedule_chk_task(adapter);
2166 spin_unlock(&adapter->work_lock);
2167}
2168
2169/*
2170 * Processes external (PHY) interrupts in process context.
2171 */
2172static void ext_intr_task(struct work_struct *work)
2173{
2174 struct adapter *adapter = container_of(work, struct adapter,
2175 ext_intr_handler_task);
2176
2177 t3_phy_intr_handler(adapter);
2178
2179 /* Now reenable external interrupts */
2180 spin_lock_irq(&adapter->work_lock);
2181 if (adapter->slow_intr_mask) {
2182 adapter->slow_intr_mask |= F_T3DBG;
2183 t3_write_reg(adapter, A_PL_INT_CAUSE0, F_T3DBG);
2184 t3_write_reg(adapter, A_PL_INT_ENABLE0,
2185 adapter->slow_intr_mask);
2186 }
2187 spin_unlock_irq(&adapter->work_lock);
2188}
2189
2190/*
2191 * Interrupt-context handler for external (PHY) interrupts.
2192 */
2193void t3_os_ext_intr_handler(struct adapter *adapter)
2194{
2195 /*
2196 * Schedule a task to handle external interrupts as they may be slow
2197 * and we use a mutex to protect MDIO registers. We disable PHY
2198 * interrupts in the meantime and let the task reenable them when
2199 * it's done.
2200 */
2201 spin_lock(&adapter->work_lock);
2202 if (adapter->slow_intr_mask) {
2203 adapter->slow_intr_mask &= ~F_T3DBG;
2204 t3_write_reg(adapter, A_PL_INT_ENABLE0,
2205 adapter->slow_intr_mask);
2206 queue_work(cxgb3_wq, &adapter->ext_intr_handler_task);
2207 }
2208 spin_unlock(&adapter->work_lock);
2209}
2210
2211void t3_fatal_err(struct adapter *adapter)
2212{
2213 unsigned int fw_status[4];
2214
2215 if (adapter->flags & FULL_INIT_DONE) {
2216 t3_sge_stop(adapter);
2217 t3_intr_disable(adapter);
2218 }
2219 CH_ALERT(adapter, "encountered fatal error, operation suspended\n");
2220 if (!t3_cim_ctl_blk_read(adapter, 0xa0, 4, fw_status))
2221 CH_ALERT(adapter, "FW status: 0x%x, 0x%x, 0x%x, 0x%x\n",
2222 fw_status[0], fw_status[1],
2223 fw_status[2], fw_status[3]);
2224
2225}
2226
2227static int __devinit cxgb_enable_msix(struct adapter *adap)
2228{
2229 struct msix_entry entries[SGE_QSETS + 1];
2230 int i, err;
2231
2232 for (i = 0; i < ARRAY_SIZE(entries); ++i)
2233 entries[i].entry = i;
2234
2235 err = pci_enable_msix(adap->pdev, entries, ARRAY_SIZE(entries));
2236 if (!err) {
2237 for (i = 0; i < ARRAY_SIZE(entries); ++i)
2238 adap->msix_info[i].vec = entries[i].vector;
2239 } else if (err > 0)
2240 dev_info(&adap->pdev->dev,
2241 "only %d MSI-X vectors left, not using MSI-X\n", err);
2242 return err;
2243}
2244
2245static void __devinit print_port_info(struct adapter *adap,
2246 const struct adapter_info *ai)
2247{
2248 static const char *pci_variant[] = {
2249 "PCI", "PCI-X", "PCI-X ECC", "PCI-X 266", "PCI Express"
2250 };
2251
2252 int i;
2253 char buf[80];
2254
2255 if (is_pcie(adap))
2256 snprintf(buf, sizeof(buf), "%s x%d",
2257 pci_variant[adap->params.pci.variant],
2258 adap->params.pci.width);
2259 else
2260 snprintf(buf, sizeof(buf), "%s %dMHz/%d-bit",
2261 pci_variant[adap->params.pci.variant],
2262 adap->params.pci.speed, adap->params.pci.width);
2263
2264 for_each_port(adap, i) {
2265 struct net_device *dev = adap->port[i];
2266 const struct port_info *pi = netdev_priv(dev);
2267
2268 if (!test_bit(i, &adap->registered_device_map))
2269 continue;
2270 printk(KERN_INFO "%s: %s %s RNIC (rev %d) %s%s\n",
2271 dev->name, ai->desc, pi->port_type->desc,
2272 adap->params.rev, buf,
2273 (adap->flags & USING_MSIX) ? " MSI-X" :
2274 (adap->flags & USING_MSI) ? " MSI" : "");
2275 if (adap->name == dev->name && adap->params.vpd.mclk)
2276 printk(KERN_INFO "%s: %uMB CM, %uMB PMTX, %uMB PMRX\n",
2277 adap->name, t3_mc7_size(&adap->cm) >> 20,
2278 t3_mc7_size(&adap->pmtx) >> 20,
2279 t3_mc7_size(&adap->pmrx) >> 20);
2280 }
2281}
2282
2283static int __devinit init_one(struct pci_dev *pdev,
2284 const struct pci_device_id *ent)
2285{
2286 static int version_printed;
2287
2288 int i, err, pci_using_dac = 0;
2289 unsigned long mmio_start, mmio_len;
2290 const struct adapter_info *ai;
2291 struct adapter *adapter = NULL;
2292 struct port_info *pi;
2293
2294 if (!version_printed) {
2295 printk(KERN_INFO "%s - version %s\n", DRV_DESC, DRV_VERSION);
2296 ++version_printed;
2297 }
2298
2299 if (!cxgb3_wq) {
2300 cxgb3_wq = create_singlethread_workqueue(DRV_NAME);
2301 if (!cxgb3_wq) {
2302 printk(KERN_ERR DRV_NAME
2303 ": cannot initialize work queue\n");
2304 return -ENOMEM;
2305 }
2306 }
2307
2308 err = pci_request_regions(pdev, DRV_NAME);
2309 if (err) {
2310 /* Just info, some other driver may have claimed the device. */
2311 dev_info(&pdev->dev, "cannot obtain PCI resources\n");
2312 return err;
2313 }
2314
2315 err = pci_enable_device(pdev);
2316 if (err) {
2317 dev_err(&pdev->dev, "cannot enable PCI device\n");
2318 goto out_release_regions;
2319 }
2320
2321 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
2322 pci_using_dac = 1;
2323 err = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
2324 if (err) {
2325 dev_err(&pdev->dev, "unable to obtain 64-bit DMA for "
2326 "coherent allocations\n");
2327 goto out_disable_device;
2328 }
2329 } else if ((err = pci_set_dma_mask(pdev, DMA_32BIT_MASK)) != 0) {
2330 dev_err(&pdev->dev, "no usable DMA configuration\n");
2331 goto out_disable_device;
2332 }
2333
2334 pci_set_master(pdev);
2335
2336 mmio_start = pci_resource_start(pdev, 0);
2337 mmio_len = pci_resource_len(pdev, 0);
2338 ai = t3_get_adapter_info(ent->driver_data);
2339
2340 adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
2341 if (!adapter) {
2342 err = -ENOMEM;
2343 goto out_disable_device;
2344 }
2345
2346 adapter->regs = ioremap_nocache(mmio_start, mmio_len);
2347 if (!adapter->regs) {
2348 dev_err(&pdev->dev, "cannot map device registers\n");
2349 err = -ENOMEM;
2350 goto out_free_adapter;
2351 }
2352
2353 adapter->pdev = pdev;
2354 adapter->name = pci_name(pdev);
2355 adapter->msg_enable = dflt_msg_enable;
2356 adapter->mmio_len = mmio_len;
2357
2358 mutex_init(&adapter->mdio_lock);
2359 spin_lock_init(&adapter->work_lock);
2360 spin_lock_init(&adapter->stats_lock);
2361
2362 INIT_LIST_HEAD(&adapter->adapter_list);
2363 INIT_WORK(&adapter->ext_intr_handler_task, ext_intr_task);
2364 INIT_DELAYED_WORK(&adapter->adap_check_task, t3_adap_check_task);
2365
2366 for (i = 0; i < ai->nports; ++i) {
2367 struct net_device *netdev;
2368
2369 netdev = alloc_etherdev(sizeof(struct port_info));
2370 if (!netdev) {
2371 err = -ENOMEM;
2372 goto out_free_dev;
2373 }
2374
2375 SET_MODULE_OWNER(netdev);
2376 SET_NETDEV_DEV(netdev, &pdev->dev);
2377
2378 adapter->port[i] = netdev;
2379 pi = netdev_priv(netdev);
2380 pi->rx_csum_offload = 1;
2381 pi->nqsets = 1;
2382 pi->first_qset = i;
2383 pi->activity = 0;
2384 pi->port_id = i;
2385 netif_carrier_off(netdev);
2386 netdev->irq = pdev->irq;
2387 netdev->mem_start = mmio_start;
2388 netdev->mem_end = mmio_start + mmio_len - 1;
2389 netdev->priv = adapter;
2390 netdev->features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO;
2391 netdev->features |= NETIF_F_LLTX;
2392 if (pci_using_dac)
2393 netdev->features |= NETIF_F_HIGHDMA;
2394
2395 netdev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
2396 netdev->vlan_rx_register = vlan_rx_register;
2397 netdev->vlan_rx_kill_vid = vlan_rx_kill_vid;
2398
2399 netdev->open = cxgb_open;
2400 netdev->stop = cxgb_close;
2401 netdev->hard_start_xmit = t3_eth_xmit;
2402 netdev->get_stats = cxgb_get_stats;
2403 netdev->set_multicast_list = cxgb_set_rxmode;
2404 netdev->do_ioctl = cxgb_ioctl;
2405 netdev->change_mtu = cxgb_change_mtu;
2406 netdev->set_mac_address = cxgb_set_mac_addr;
2407#ifdef CONFIG_NET_POLL_CONTROLLER
2408 netdev->poll_controller = cxgb_netpoll;
2409#endif
2410 netdev->weight = 64;
2411
2412 SET_ETHTOOL_OPS(netdev, &cxgb_ethtool_ops);
2413 }
2414
2415 pci_set_drvdata(pdev, adapter->port[0]);
2416 if (t3_prep_adapter(adapter, ai, 1) < 0) {
2417 err = -ENODEV;
2418 goto out_free_dev;
2419 }
2420
2421 /*
2422 * The card is now ready to go. If any errors occur during device
2423 * registration we do not fail the whole card but rather proceed only
2424 * with the ports we manage to register successfully. However we must
2425 * register at least one net device.
2426 */
2427 for_each_port(adapter, i) {
2428 err = register_netdev(adapter->port[i]);
2429 if (err)
2430 dev_warn(&pdev->dev,
2431 "cannot register net device %s, skipping\n",
2432 adapter->port[i]->name);
2433 else {
2434 /*
2435 * Change the name we use for messages to the name of
2436 * the first successfully registered interface.
2437 */
2438 if (!adapter->registered_device_map)
2439 adapter->name = adapter->port[i]->name;
2440
2441 __set_bit(i, &adapter->registered_device_map);
2442 }
2443 }
2444 if (!adapter->registered_device_map) {
2445 dev_err(&pdev->dev, "could not register any net devices\n");
2446 goto out_free_dev;
2447 }
2448
2449 /* Driver's ready. Reflect it on LEDs */
2450 t3_led_ready(adapter);
2451
2452 if (is_offload(adapter)) {
2453 __set_bit(OFFLOAD_DEVMAP_BIT, &adapter->registered_device_map);
2454 cxgb3_adapter_ofld(adapter);
2455 }
2456
2457 /* See what interrupts we'll be using */
2458 if (msi > 1 && cxgb_enable_msix(adapter) == 0)
2459 adapter->flags |= USING_MSIX;
2460 else if (msi > 0 && pci_enable_msi(pdev) == 0)
2461 adapter->flags |= USING_MSI;
2462
0ee8d33c 2463 err = sysfs_create_group(&adapter->port[0]->dev.kobj,
4d22de3e
DLR
2464 &cxgb3_attr_group);
2465
2466 print_port_info(adapter, ai);
2467 return 0;
2468
2469out_free_dev:
2470 iounmap(adapter->regs);
2471 for (i = ai->nports - 1; i >= 0; --i)
2472 if (adapter->port[i])
2473 free_netdev(adapter->port[i]);
2474
2475out_free_adapter:
2476 kfree(adapter);
2477
2478out_disable_device:
2479 pci_disable_device(pdev);
2480out_release_regions:
2481 pci_release_regions(pdev);
2482 pci_set_drvdata(pdev, NULL);
2483 return err;
2484}
2485
2486static void __devexit remove_one(struct pci_dev *pdev)
2487{
2488 struct net_device *dev = pci_get_drvdata(pdev);
2489
2490 if (dev) {
2491 int i;
2492 struct adapter *adapter = dev->priv;
2493
2494 t3_sge_stop(adapter);
0ee8d33c 2495 sysfs_remove_group(&adapter->port[0]->dev.kobj,
4d22de3e
DLR
2496 &cxgb3_attr_group);
2497
2498 for_each_port(adapter, i)
2499 if (test_bit(i, &adapter->registered_device_map))
2500 unregister_netdev(adapter->port[i]);
2501
2502 if (is_offload(adapter)) {
2503 cxgb3_adapter_unofld(adapter);
2504 if (test_bit(OFFLOAD_DEVMAP_BIT,
2505 &adapter->open_device_map))
2506 offload_close(&adapter->tdev);
2507 }
2508
2509 t3_free_sge_resources(adapter);
2510 cxgb_disable_msi(adapter);
2511
2512 for (i = 0; i < ARRAY_SIZE(adapter->dummy_netdev); i++)
2513 if (adapter->dummy_netdev[i]) {
2514 free_netdev(adapter->dummy_netdev[i]);
2515 adapter->dummy_netdev[i] = NULL;
2516 }
2517
2518 for_each_port(adapter, i)
2519 if (adapter->port[i])
2520 free_netdev(adapter->port[i]);
2521
2522 iounmap(adapter->regs);
2523 kfree(adapter);
2524 pci_release_regions(pdev);
2525 pci_disable_device(pdev);
2526 pci_set_drvdata(pdev, NULL);
2527 }
2528}
2529
2530static struct pci_driver driver = {
2531 .name = DRV_NAME,
2532 .id_table = cxgb3_pci_tbl,
2533 .probe = init_one,
2534 .remove = __devexit_p(remove_one),
2535};
2536
2537static int __init cxgb3_init_module(void)
2538{
2539 int ret;
2540
2541 cxgb3_offload_init();
2542
2543 ret = pci_register_driver(&driver);
2544 return ret;
2545}
2546
2547static void __exit cxgb3_cleanup_module(void)
2548{
2549 pci_unregister_driver(&driver);
2550 if (cxgb3_wq)
2551 destroy_workqueue(cxgb3_wq);
2552}
2553
2554module_init(cxgb3_init_module);
2555module_exit(cxgb3_cleanup_module);