qeth: Drop ARP packages on HiperSockets interface with NOARP attribute.
[linux-2.6-block.git] / drivers / net / cxgb3 / cxgb3_main.c
CommitLineData
4d22de3e 1/*
1d68e93d 2 * Copyright (c) 2003-2007 Chelsio, Inc. All rights reserved.
4d22de3e 3 *
1d68e93d
DLR
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
4d22de3e 9 *
1d68e93d
DLR
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
4d22de3e 31 */
4d22de3e
DLR
32#include <linux/module.h>
33#include <linux/moduleparam.h>
34#include <linux/init.h>
35#include <linux/pci.h>
36#include <linux/dma-mapping.h>
37#include <linux/netdevice.h>
38#include <linux/etherdevice.h>
39#include <linux/if_vlan.h>
40#include <linux/mii.h>
41#include <linux/sockios.h>
42#include <linux/workqueue.h>
43#include <linux/proc_fs.h>
44#include <linux/rtnetlink.h>
2e283962 45#include <linux/firmware.h>
d9da466a 46#include <linux/log2.h>
4d22de3e
DLR
47#include <asm/uaccess.h>
48
49#include "common.h"
50#include "cxgb3_ioctl.h"
51#include "regs.h"
52#include "cxgb3_offload.h"
53#include "version.h"
54
55#include "cxgb3_ctl_defs.h"
56#include "t3_cpl.h"
57#include "firmware_exports.h"
58
59enum {
60 MAX_TXQ_ENTRIES = 16384,
61 MAX_CTRL_TXQ_ENTRIES = 1024,
62 MAX_RSPQ_ENTRIES = 16384,
63 MAX_RX_BUFFERS = 16384,
64 MAX_RX_JUMBO_BUFFERS = 16384,
65 MIN_TXQ_ENTRIES = 4,
66 MIN_CTRL_TXQ_ENTRIES = 4,
67 MIN_RSPQ_ENTRIES = 32,
68 MIN_FL_ENTRIES = 32
69};
70
71#define PORT_MASK ((1 << MAX_NPORTS) - 1)
72
73#define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
74 NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
75 NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
76
77#define EEPROM_MAGIC 0x38E2F10C
78
4d22de3e
DLR
79#define CH_DEVICE(devid, ssid, idx) \
80 { PCI_VENDOR_ID_CHELSIO, devid, PCI_ANY_ID, ssid, 0, 0, idx }
81
82static const struct pci_device_id cxgb3_pci_tbl[] = {
83 CH_DEVICE(0x20, 1, 0), /* PE9000 */
84 CH_DEVICE(0x21, 1, 1), /* T302E */
85 CH_DEVICE(0x22, 1, 2), /* T310E */
86 CH_DEVICE(0x23, 1, 3), /* T320X */
87 CH_DEVICE(0x24, 1, 1), /* T302X */
88 CH_DEVICE(0x25, 1, 3), /* T320E */
89 CH_DEVICE(0x26, 1, 2), /* T310X */
90 CH_DEVICE(0x30, 1, 2), /* T3B10 */
91 CH_DEVICE(0x31, 1, 3), /* T3B20 */
92 CH_DEVICE(0x32, 1, 1), /* T3B02 */
93 {0,}
94};
95
96MODULE_DESCRIPTION(DRV_DESC);
97MODULE_AUTHOR("Chelsio Communications");
1d68e93d 98MODULE_LICENSE("Dual BSD/GPL");
4d22de3e
DLR
99MODULE_VERSION(DRV_VERSION);
100MODULE_DEVICE_TABLE(pci, cxgb3_pci_tbl);
101
102static int dflt_msg_enable = DFLT_MSG_ENABLE;
103
104module_param(dflt_msg_enable, int, 0644);
105MODULE_PARM_DESC(dflt_msg_enable, "Chelsio T3 default message enable bitmap");
106
107/*
108 * The driver uses the best interrupt scheme available on a platform in the
109 * order MSI-X, MSI, legacy pin interrupts. This parameter determines which
110 * of these schemes the driver may consider as follows:
111 *
112 * msi = 2: choose from among all three options
113 * msi = 1: only consider MSI and pin interrupts
114 * msi = 0: force pin interrupts
115 */
116static int msi = 2;
117
118module_param(msi, int, 0644);
119MODULE_PARM_DESC(msi, "whether to use MSI or MSI-X");
120
121/*
122 * The driver enables offload as a default.
123 * To disable it, use ofld_disable = 1.
124 */
125
126static int ofld_disable = 0;
127
128module_param(ofld_disable, int, 0644);
129MODULE_PARM_DESC(ofld_disable, "whether to enable offload at init time or not");
130
131/*
132 * We have work elements that we need to cancel when an interface is taken
133 * down. Normally the work elements would be executed by keventd but that
134 * can deadlock because of linkwatch. If our close method takes the rtnl
135 * lock and linkwatch is ahead of our work elements in keventd, linkwatch
136 * will block keventd as it needs the rtnl lock, and we'll deadlock waiting
137 * for our work to complete. Get our own work queue to solve this.
138 */
139static struct workqueue_struct *cxgb3_wq;
140
141/**
142 * link_report - show link status and link speed/duplex
143 * @p: the port whose settings are to be reported
144 *
145 * Shows the link status, speed, and duplex of a port.
146 */
147static void link_report(struct net_device *dev)
148{
149 if (!netif_carrier_ok(dev))
150 printk(KERN_INFO "%s: link down\n", dev->name);
151 else {
152 const char *s = "10Mbps";
153 const struct port_info *p = netdev_priv(dev);
154
155 switch (p->link_config.speed) {
156 case SPEED_10000:
157 s = "10Gbps";
158 break;
159 case SPEED_1000:
160 s = "1000Mbps";
161 break;
162 case SPEED_100:
163 s = "100Mbps";
164 break;
165 }
166
167 printk(KERN_INFO "%s: link up, %s, %s-duplex\n", dev->name, s,
168 p->link_config.duplex == DUPLEX_FULL ? "full" : "half");
169 }
170}
171
172/**
173 * t3_os_link_changed - handle link status changes
174 * @adapter: the adapter associated with the link change
175 * @port_id: the port index whose limk status has changed
176 * @link_stat: the new status of the link
177 * @speed: the new speed setting
178 * @duplex: the new duplex setting
179 * @pause: the new flow-control setting
180 *
181 * This is the OS-dependent handler for link status changes. The OS
182 * neutral handler takes care of most of the processing for these events,
183 * then calls this handler for any OS-specific processing.
184 */
185void t3_os_link_changed(struct adapter *adapter, int port_id, int link_stat,
186 int speed, int duplex, int pause)
187{
188 struct net_device *dev = adapter->port[port_id];
6d6dabac
DLR
189 struct port_info *pi = netdev_priv(dev);
190 struct cmac *mac = &pi->mac;
4d22de3e
DLR
191
192 /* Skip changes from disabled ports. */
193 if (!netif_running(dev))
194 return;
195
196 if (link_stat != netif_carrier_ok(dev)) {
6d6dabac 197 if (link_stat) {
59cf8107 198 t3_mac_enable(mac, MAC_DIRECTION_RX);
4d22de3e 199 netif_carrier_on(dev);
6d6dabac 200 } else {
4d22de3e 201 netif_carrier_off(dev);
59cf8107
DLR
202 pi->phy.ops->power_down(&pi->phy, 1);
203 t3_mac_disable(mac, MAC_DIRECTION_RX);
204 t3_link_start(&pi->phy, mac, &pi->link_config);
6d6dabac
DLR
205 }
206
4d22de3e
DLR
207 link_report(dev);
208 }
209}
210
211static void cxgb_set_rxmode(struct net_device *dev)
212{
213 struct t3_rx_mode rm;
214 struct port_info *pi = netdev_priv(dev);
215
216 init_rx_mode(&rm, dev, dev->mc_list);
217 t3_mac_set_rx_mode(&pi->mac, &rm);
218}
219
220/**
221 * link_start - enable a port
222 * @dev: the device to enable
223 *
224 * Performs the MAC and PHY actions needed to enable a port.
225 */
226static void link_start(struct net_device *dev)
227{
228 struct t3_rx_mode rm;
229 struct port_info *pi = netdev_priv(dev);
230 struct cmac *mac = &pi->mac;
231
232 init_rx_mode(&rm, dev, dev->mc_list);
233 t3_mac_reset(mac);
234 t3_mac_set_mtu(mac, dev->mtu);
235 t3_mac_set_address(mac, 0, dev->dev_addr);
236 t3_mac_set_rx_mode(mac, &rm);
237 t3_link_start(&pi->phy, mac, &pi->link_config);
238 t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
239}
240
241static inline void cxgb_disable_msi(struct adapter *adapter)
242{
243 if (adapter->flags & USING_MSIX) {
244 pci_disable_msix(adapter->pdev);
245 adapter->flags &= ~USING_MSIX;
246 } else if (adapter->flags & USING_MSI) {
247 pci_disable_msi(adapter->pdev);
248 adapter->flags &= ~USING_MSI;
249 }
250}
251
252/*
253 * Interrupt handler for asynchronous events used with MSI-X.
254 */
255static irqreturn_t t3_async_intr_handler(int irq, void *cookie)
256{
257 t3_slow_intr_handler(cookie);
258 return IRQ_HANDLED;
259}
260
261/*
262 * Name the MSI-X interrupts.
263 */
264static void name_msix_vecs(struct adapter *adap)
265{
266 int i, j, msi_idx = 1, n = sizeof(adap->msix_info[0].desc) - 1;
267
268 snprintf(adap->msix_info[0].desc, n, "%s", adap->name);
269 adap->msix_info[0].desc[n] = 0;
270
271 for_each_port(adap, j) {
272 struct net_device *d = adap->port[j];
273 const struct port_info *pi = netdev_priv(d);
274
275 for (i = 0; i < pi->nqsets; i++, msi_idx++) {
276 snprintf(adap->msix_info[msi_idx].desc, n,
277 "%s (queue %d)", d->name, i);
278 adap->msix_info[msi_idx].desc[n] = 0;
279 }
280 }
281}
282
283static int request_msix_data_irqs(struct adapter *adap)
284{
285 int i, j, err, qidx = 0;
286
287 for_each_port(adap, i) {
288 int nqsets = adap2pinfo(adap, i)->nqsets;
289
290 for (j = 0; j < nqsets; ++j) {
291 err = request_irq(adap->msix_info[qidx + 1].vec,
292 t3_intr_handler(adap,
293 adap->sge.qs[qidx].
294 rspq.polling), 0,
295 adap->msix_info[qidx + 1].desc,
296 &adap->sge.qs[qidx]);
297 if (err) {
298 while (--qidx >= 0)
299 free_irq(adap->msix_info[qidx + 1].vec,
300 &adap->sge.qs[qidx]);
301 return err;
302 }
303 qidx++;
304 }
305 }
306 return 0;
307}
308
309/**
310 * setup_rss - configure RSS
311 * @adap: the adapter
312 *
313 * Sets up RSS to distribute packets to multiple receive queues. We
314 * configure the RSS CPU lookup table to distribute to the number of HW
315 * receive queues, and the response queue lookup table to narrow that
316 * down to the response queues actually configured for each port.
317 * We always configure the RSS mapping for two ports since the mapping
318 * table has plenty of entries.
319 */
320static void setup_rss(struct adapter *adap)
321{
322 int i;
323 unsigned int nq0 = adap2pinfo(adap, 0)->nqsets;
324 unsigned int nq1 = adap->port[1] ? adap2pinfo(adap, 1)->nqsets : 1;
325 u8 cpus[SGE_QSETS + 1];
326 u16 rspq_map[RSS_TABLE_SIZE];
327
328 for (i = 0; i < SGE_QSETS; ++i)
329 cpus[i] = i;
330 cpus[SGE_QSETS] = 0xff; /* terminator */
331
332 for (i = 0; i < RSS_TABLE_SIZE / 2; ++i) {
333 rspq_map[i] = i % nq0;
334 rspq_map[i + RSS_TABLE_SIZE / 2] = (i % nq1) + nq0;
335 }
336
337 t3_config_rss(adap, F_RQFEEDBACKENABLE | F_TNLLKPEN | F_TNLMAPEN |
338 F_TNLPRTEN | F_TNL2TUPEN | F_TNL4TUPEN |
339 V_RRCPLCPUSIZE(6), cpus, rspq_map);
340}
341
342/*
343 * If we have multiple receive queues per port serviced by NAPI we need one
344 * netdevice per queue as NAPI operates on netdevices. We already have one
345 * netdevice, namely the one associated with the interface, so we use dummy
346 * ones for any additional queues. Note that these netdevices exist purely
347 * so that NAPI has something to work with, they do not represent network
348 * ports and are not registered.
349 */
350static int init_dummy_netdevs(struct adapter *adap)
351{
352 int i, j, dummy_idx = 0;
353 struct net_device *nd;
354
355 for_each_port(adap, i) {
356 struct net_device *dev = adap->port[i];
357 const struct port_info *pi = netdev_priv(dev);
358
359 for (j = 0; j < pi->nqsets - 1; j++) {
360 if (!adap->dummy_netdev[dummy_idx]) {
361 nd = alloc_netdev(0, "", ether_setup);
362 if (!nd)
363 goto free_all;
364
365 nd->priv = adap;
366 nd->weight = 64;
367 set_bit(__LINK_STATE_START, &nd->state);
368 adap->dummy_netdev[dummy_idx] = nd;
369 }
370 strcpy(adap->dummy_netdev[dummy_idx]->name, dev->name);
371 dummy_idx++;
372 }
373 }
374 return 0;
375
376free_all:
377 while (--dummy_idx >= 0) {
378 free_netdev(adap->dummy_netdev[dummy_idx]);
379 adap->dummy_netdev[dummy_idx] = NULL;
380 }
381 return -ENOMEM;
382}
383
384/*
385 * Wait until all NAPI handlers are descheduled. This includes the handlers of
386 * both netdevices representing interfaces and the dummy ones for the extra
387 * queues.
388 */
389static void quiesce_rx(struct adapter *adap)
390{
391 int i;
392 struct net_device *dev;
393
394 for_each_port(adap, i) {
395 dev = adap->port[i];
396 while (test_bit(__LINK_STATE_RX_SCHED, &dev->state))
397 msleep(1);
398 }
399
400 for (i = 0; i < ARRAY_SIZE(adap->dummy_netdev); i++) {
401 dev = adap->dummy_netdev[i];
402 if (dev)
403 while (test_bit(__LINK_STATE_RX_SCHED, &dev->state))
404 msleep(1);
405 }
406}
407
408/**
409 * setup_sge_qsets - configure SGE Tx/Rx/response queues
410 * @adap: the adapter
411 *
412 * Determines how many sets of SGE queues to use and initializes them.
413 * We support multiple queue sets per port if we have MSI-X, otherwise
414 * just one queue set per port.
415 */
416static int setup_sge_qsets(struct adapter *adap)
417{
418 int i, j, err, irq_idx = 0, qset_idx = 0, dummy_dev_idx = 0;
8ac3ba68 419 unsigned int ntxq = SGE_TXQ_PER_SET;
4d22de3e
DLR
420
421 if (adap->params.rev > 0 && !(adap->flags & USING_MSI))
422 irq_idx = -1;
423
424 for_each_port(adap, i) {
425 struct net_device *dev = adap->port[i];
426 const struct port_info *pi = netdev_priv(dev);
427
428 for (j = 0; j < pi->nqsets; ++j, ++qset_idx) {
429 err = t3_sge_alloc_qset(adap, qset_idx, 1,
430 (adap->flags & USING_MSIX) ? qset_idx + 1 :
431 irq_idx,
432 &adap->params.sge.qset[qset_idx], ntxq,
433 j == 0 ? dev :
434 adap-> dummy_netdev[dummy_dev_idx++]);
435 if (err) {
436 t3_free_sge_resources(adap);
437 return err;
438 }
439 }
440 }
441
442 return 0;
443}
444
0ee8d33c
DLR
445static ssize_t attr_show(struct device *d, struct device_attribute *attr,
446 char *buf,
896392ef 447 ssize_t(*format) (struct net_device *, char *))
4d22de3e
DLR
448{
449 ssize_t len;
4d22de3e
DLR
450
451 /* Synchronize with ioctls that may shut down the device */
452 rtnl_lock();
896392ef 453 len = (*format) (to_net_dev(d), buf);
4d22de3e
DLR
454 rtnl_unlock();
455 return len;
456}
457
0ee8d33c
DLR
458static ssize_t attr_store(struct device *d, struct device_attribute *attr,
459 const char *buf, size_t len,
896392ef 460 ssize_t(*set) (struct net_device *, unsigned int),
4d22de3e
DLR
461 unsigned int min_val, unsigned int max_val)
462{
463 char *endp;
464 ssize_t ret;
465 unsigned int val;
4d22de3e
DLR
466
467 if (!capable(CAP_NET_ADMIN))
468 return -EPERM;
469
470 val = simple_strtoul(buf, &endp, 0);
471 if (endp == buf || val < min_val || val > max_val)
472 return -EINVAL;
473
474 rtnl_lock();
896392ef 475 ret = (*set) (to_net_dev(d), val);
4d22de3e
DLR
476 if (!ret)
477 ret = len;
478 rtnl_unlock();
479 return ret;
480}
481
482#define CXGB3_SHOW(name, val_expr) \
896392ef 483static ssize_t format_##name(struct net_device *dev, char *buf) \
4d22de3e 484{ \
896392ef 485 struct adapter *adap = dev->priv; \
4d22de3e
DLR
486 return sprintf(buf, "%u\n", val_expr); \
487} \
0ee8d33c
DLR
488static ssize_t show_##name(struct device *d, struct device_attribute *attr, \
489 char *buf) \
4d22de3e 490{ \
0ee8d33c 491 return attr_show(d, attr, buf, format_##name); \
4d22de3e
DLR
492}
493
896392ef 494static ssize_t set_nfilters(struct net_device *dev, unsigned int val)
4d22de3e 495{
896392ef 496 struct adapter *adap = dev->priv;
9f238486 497 int min_tids = is_offload(adap) ? MC5_MIN_TIDS : 0;
896392ef 498
4d22de3e
DLR
499 if (adap->flags & FULL_INIT_DONE)
500 return -EBUSY;
501 if (val && adap->params.rev == 0)
502 return -EINVAL;
9f238486
DLR
503 if (val > t3_mc5_size(&adap->mc5) - adap->params.mc5.nservers -
504 min_tids)
4d22de3e
DLR
505 return -EINVAL;
506 adap->params.mc5.nfilters = val;
507 return 0;
508}
509
0ee8d33c
DLR
510static ssize_t store_nfilters(struct device *d, struct device_attribute *attr,
511 const char *buf, size_t len)
4d22de3e 512{
0ee8d33c 513 return attr_store(d, attr, buf, len, set_nfilters, 0, ~0);
4d22de3e
DLR
514}
515
896392ef 516static ssize_t set_nservers(struct net_device *dev, unsigned int val)
4d22de3e 517{
896392ef
DLR
518 struct adapter *adap = dev->priv;
519
4d22de3e
DLR
520 if (adap->flags & FULL_INIT_DONE)
521 return -EBUSY;
9f238486
DLR
522 if (val > t3_mc5_size(&adap->mc5) - adap->params.mc5.nfilters -
523 MC5_MIN_TIDS)
4d22de3e
DLR
524 return -EINVAL;
525 adap->params.mc5.nservers = val;
526 return 0;
527}
528
0ee8d33c
DLR
529static ssize_t store_nservers(struct device *d, struct device_attribute *attr,
530 const char *buf, size_t len)
4d22de3e 531{
0ee8d33c 532 return attr_store(d, attr, buf, len, set_nservers, 0, ~0);
4d22de3e
DLR
533}
534
535#define CXGB3_ATTR_R(name, val_expr) \
536CXGB3_SHOW(name, val_expr) \
0ee8d33c 537static DEVICE_ATTR(name, S_IRUGO, show_##name, NULL)
4d22de3e
DLR
538
539#define CXGB3_ATTR_RW(name, val_expr, store_method) \
540CXGB3_SHOW(name, val_expr) \
0ee8d33c 541static DEVICE_ATTR(name, S_IRUGO | S_IWUSR, show_##name, store_method)
4d22de3e
DLR
542
543CXGB3_ATTR_R(cam_size, t3_mc5_size(&adap->mc5));
544CXGB3_ATTR_RW(nfilters, adap->params.mc5.nfilters, store_nfilters);
545CXGB3_ATTR_RW(nservers, adap->params.mc5.nservers, store_nservers);
546
547static struct attribute *cxgb3_attrs[] = {
0ee8d33c
DLR
548 &dev_attr_cam_size.attr,
549 &dev_attr_nfilters.attr,
550 &dev_attr_nservers.attr,
4d22de3e
DLR
551 NULL
552};
553
554static struct attribute_group cxgb3_attr_group = {.attrs = cxgb3_attrs };
555
0ee8d33c
DLR
556static ssize_t tm_attr_show(struct device *d, struct device_attribute *attr,
557 char *buf, int sched)
4d22de3e
DLR
558{
559 ssize_t len;
560 unsigned int v, addr, bpt, cpt;
0ee8d33c 561 struct adapter *adap = to_net_dev(d)->priv;
4d22de3e
DLR
562
563 addr = A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2;
564 rtnl_lock();
565 t3_write_reg(adap, A_TP_TM_PIO_ADDR, addr);
566 v = t3_read_reg(adap, A_TP_TM_PIO_DATA);
567 if (sched & 1)
568 v >>= 16;
569 bpt = (v >> 8) & 0xff;
570 cpt = v & 0xff;
571 if (!cpt)
572 len = sprintf(buf, "disabled\n");
573 else {
574 v = (adap->params.vpd.cclk * 1000) / cpt;
575 len = sprintf(buf, "%u Kbps\n", (v * bpt) / 125);
576 }
577 rtnl_unlock();
578 return len;
579}
580
0ee8d33c
DLR
581static ssize_t tm_attr_store(struct device *d, struct device_attribute *attr,
582 const char *buf, size_t len, int sched)
4d22de3e
DLR
583{
584 char *endp;
585 ssize_t ret;
586 unsigned int val;
0ee8d33c 587 struct adapter *adap = to_net_dev(d)->priv;
4d22de3e
DLR
588
589 if (!capable(CAP_NET_ADMIN))
590 return -EPERM;
591
592 val = simple_strtoul(buf, &endp, 0);
593 if (endp == buf || val > 10000000)
594 return -EINVAL;
595
596 rtnl_lock();
597 ret = t3_config_sched(adap, val, sched);
598 if (!ret)
599 ret = len;
600 rtnl_unlock();
601 return ret;
602}
603
604#define TM_ATTR(name, sched) \
0ee8d33c
DLR
605static ssize_t show_##name(struct device *d, struct device_attribute *attr, \
606 char *buf) \
4d22de3e 607{ \
0ee8d33c 608 return tm_attr_show(d, attr, buf, sched); \
4d22de3e 609} \
0ee8d33c
DLR
610static ssize_t store_##name(struct device *d, struct device_attribute *attr, \
611 const char *buf, size_t len) \
4d22de3e 612{ \
0ee8d33c 613 return tm_attr_store(d, attr, buf, len, sched); \
4d22de3e 614} \
0ee8d33c 615static DEVICE_ATTR(name, S_IRUGO | S_IWUSR, show_##name, store_##name)
4d22de3e
DLR
616
617TM_ATTR(sched0, 0);
618TM_ATTR(sched1, 1);
619TM_ATTR(sched2, 2);
620TM_ATTR(sched3, 3);
621TM_ATTR(sched4, 4);
622TM_ATTR(sched5, 5);
623TM_ATTR(sched6, 6);
624TM_ATTR(sched7, 7);
625
626static struct attribute *offload_attrs[] = {
0ee8d33c
DLR
627 &dev_attr_sched0.attr,
628 &dev_attr_sched1.attr,
629 &dev_attr_sched2.attr,
630 &dev_attr_sched3.attr,
631 &dev_attr_sched4.attr,
632 &dev_attr_sched5.attr,
633 &dev_attr_sched6.attr,
634 &dev_attr_sched7.attr,
4d22de3e
DLR
635 NULL
636};
637
638static struct attribute_group offload_attr_group = {.attrs = offload_attrs };
639
640/*
641 * Sends an sk_buff to an offload queue driver
642 * after dealing with any active network taps.
643 */
644static inline int offload_tx(struct t3cdev *tdev, struct sk_buff *skb)
645{
646 int ret;
647
648 local_bh_disable();
649 ret = t3_offload_tx(tdev, skb);
650 local_bh_enable();
651 return ret;
652}
653
654static int write_smt_entry(struct adapter *adapter, int idx)
655{
656 struct cpl_smt_write_req *req;
657 struct sk_buff *skb = alloc_skb(sizeof(*req), GFP_KERNEL);
658
659 if (!skb)
660 return -ENOMEM;
661
662 req = (struct cpl_smt_write_req *)__skb_put(skb, sizeof(*req));
663 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
664 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SMT_WRITE_REQ, idx));
665 req->mtu_idx = NMTUS - 1; /* should be 0 but there's a T3 bug */
666 req->iff = idx;
667 memset(req->src_mac1, 0, sizeof(req->src_mac1));
668 memcpy(req->src_mac0, adapter->port[idx]->dev_addr, ETH_ALEN);
669 skb->priority = 1;
670 offload_tx(&adapter->tdev, skb);
671 return 0;
672}
673
674static int init_smt(struct adapter *adapter)
675{
676 int i;
677
678 for_each_port(adapter, i)
679 write_smt_entry(adapter, i);
680 return 0;
681}
682
683static void init_port_mtus(struct adapter *adapter)
684{
685 unsigned int mtus = adapter->port[0]->mtu;
686
687 if (adapter->port[1])
688 mtus |= adapter->port[1]->mtu << 16;
689 t3_write_reg(adapter, A_TP_MTU_PORT_TABLE, mtus);
690}
691
14ab9892
DLR
692static void send_pktsched_cmd(struct adapter *adap, int sched, int qidx, int lo,
693 int hi, int port)
694{
695 struct sk_buff *skb;
696 struct mngt_pktsched_wr *req;
697
698 skb = alloc_skb(sizeof(*req), GFP_KERNEL | __GFP_NOFAIL);
699 req = (struct mngt_pktsched_wr *)skb_put(skb, sizeof(*req));
700 req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_MNGT));
701 req->mngt_opcode = FW_MNGTOPCODE_PKTSCHED_SET;
702 req->sched = sched;
703 req->idx = qidx;
704 req->min = lo;
705 req->max = hi;
706 req->binding = port;
707 t3_mgmt_tx(adap, skb);
708}
709
710static void bind_qsets(struct adapter *adap)
711{
712 int i, j;
713
714 for_each_port(adap, i) {
715 const struct port_info *pi = adap2pinfo(adap, i);
716
717 for (j = 0; j < pi->nqsets; ++j)
718 send_pktsched_cmd(adap, 1, pi->first_qset + j, -1,
719 -1, i);
720 }
721}
722
7f672cf5 723#define FW_FNAME "t3fw-%d.%d.%d.bin"
2e283962
DLR
724
725static int upgrade_fw(struct adapter *adap)
726{
727 int ret;
728 char buf[64];
729 const struct firmware *fw;
730 struct device *dev = &adap->pdev->dev;
731
732 snprintf(buf, sizeof(buf), FW_FNAME, FW_VERSION_MAJOR,
7f672cf5 733 FW_VERSION_MINOR, FW_VERSION_MICRO);
2e283962
DLR
734 ret = request_firmware(&fw, buf, dev);
735 if (ret < 0) {
736 dev_err(dev, "could not upgrade firmware: unable to load %s\n",
737 buf);
738 return ret;
739 }
740 ret = t3_load_fw(adap, fw->data, fw->size);
741 release_firmware(fw);
742 return ret;
743}
744
4d22de3e
DLR
745/**
746 * cxgb_up - enable the adapter
747 * @adapter: adapter being enabled
748 *
749 * Called when the first port is enabled, this function performs the
750 * actions necessary to make an adapter operational, such as completing
751 * the initialization of HW modules, and enabling interrupts.
752 *
753 * Must be called with the rtnl lock held.
754 */
755static int cxgb_up(struct adapter *adap)
756{
757 int err = 0;
758
759 if (!(adap->flags & FULL_INIT_DONE)) {
760 err = t3_check_fw_version(adap);
2e283962
DLR
761 if (err == -EINVAL)
762 err = upgrade_fw(adap);
4aac3899 763 if (err)
4d22de3e 764 goto out;
4d22de3e
DLR
765
766 err = init_dummy_netdevs(adap);
767 if (err)
768 goto out;
769
770 err = t3_init_hw(adap, 0);
771 if (err)
772 goto out;
773
6cdbd77e
DLR
774 t3_write_reg(adap, A_ULPRX_TDDP_PSZ, V_HPZ0(PAGE_SHIFT - 12));
775
4d22de3e
DLR
776 err = setup_sge_qsets(adap);
777 if (err)
778 goto out;
779
780 setup_rss(adap);
781 adap->flags |= FULL_INIT_DONE;
782 }
783
784 t3_intr_clear(adap);
785
786 if (adap->flags & USING_MSIX) {
787 name_msix_vecs(adap);
788 err = request_irq(adap->msix_info[0].vec,
789 t3_async_intr_handler, 0,
790 adap->msix_info[0].desc, adap);
791 if (err)
792 goto irq_err;
793
794 if (request_msix_data_irqs(adap)) {
795 free_irq(adap->msix_info[0].vec, adap);
796 goto irq_err;
797 }
798 } else if ((err = request_irq(adap->pdev->irq,
799 t3_intr_handler(adap,
800 adap->sge.qs[0].rspq.
801 polling),
2db6346f
TG
802 (adap->flags & USING_MSI) ?
803 0 : IRQF_SHARED,
4d22de3e
DLR
804 adap->name, adap)))
805 goto irq_err;
806
807 t3_sge_start(adap);
808 t3_intr_enable(adap);
14ab9892
DLR
809
810 if ((adap->flags & (USING_MSIX | QUEUES_BOUND)) == USING_MSIX)
811 bind_qsets(adap);
812 adap->flags |= QUEUES_BOUND;
813
4d22de3e
DLR
814out:
815 return err;
816irq_err:
817 CH_ERR(adap, "request_irq failed, err %d\n", err);
818 goto out;
819}
820
821/*
822 * Release resources when all the ports and offloading have been stopped.
823 */
824static void cxgb_down(struct adapter *adapter)
825{
826 t3_sge_stop(adapter);
827 spin_lock_irq(&adapter->work_lock); /* sync with PHY intr task */
828 t3_intr_disable(adapter);
829 spin_unlock_irq(&adapter->work_lock);
830
831 if (adapter->flags & USING_MSIX) {
832 int i, n = 0;
833
834 free_irq(adapter->msix_info[0].vec, adapter);
835 for_each_port(adapter, i)
836 n += adap2pinfo(adapter, i)->nqsets;
837
838 for (i = 0; i < n; ++i)
839 free_irq(adapter->msix_info[i + 1].vec,
840 &adapter->sge.qs[i]);
841 } else
842 free_irq(adapter->pdev->irq, adapter);
843
844 flush_workqueue(cxgb3_wq); /* wait for external IRQ handler */
845 quiesce_rx(adapter);
846}
847
848static void schedule_chk_task(struct adapter *adap)
849{
850 unsigned int timeo;
851
852 timeo = adap->params.linkpoll_period ?
853 (HZ * adap->params.linkpoll_period) / 10 :
854 adap->params.stats_update_period * HZ;
855 if (timeo)
856 queue_delayed_work(cxgb3_wq, &adap->adap_check_task, timeo);
857}
858
859static int offload_open(struct net_device *dev)
860{
861 struct adapter *adapter = dev->priv;
862 struct t3cdev *tdev = T3CDEV(dev);
863 int adap_up = adapter->open_device_map & PORT_MASK;
864 int err = 0;
865
866 if (test_and_set_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map))
867 return 0;
868
869 if (!adap_up && (err = cxgb_up(adapter)) < 0)
870 return err;
871
872 t3_tp_set_offload_mode(adapter, 1);
873 tdev->lldev = adapter->port[0];
874 err = cxgb3_offload_activate(adapter);
875 if (err)
876 goto out;
877
878 init_port_mtus(adapter);
879 t3_load_mtus(adapter, adapter->params.mtus, adapter->params.a_wnd,
880 adapter->params.b_wnd,
881 adapter->params.rev == 0 ?
882 adapter->port[0]->mtu : 0xffff);
883 init_smt(adapter);
884
885 /* Never mind if the next step fails */
0ee8d33c 886 sysfs_create_group(&tdev->lldev->dev.kobj, &offload_attr_group);
4d22de3e
DLR
887
888 /* Call back all registered clients */
889 cxgb3_add_clients(tdev);
890
891out:
892 /* restore them in case the offload module has changed them */
893 if (err) {
894 t3_tp_set_offload_mode(adapter, 0);
895 clear_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map);
896 cxgb3_set_dummy_ops(tdev);
897 }
898 return err;
899}
900
901static int offload_close(struct t3cdev *tdev)
902{
903 struct adapter *adapter = tdev2adap(tdev);
904
905 if (!test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map))
906 return 0;
907
908 /* Call back all registered clients */
909 cxgb3_remove_clients(tdev);
910
0ee8d33c 911 sysfs_remove_group(&tdev->lldev->dev.kobj, &offload_attr_group);
4d22de3e
DLR
912
913 tdev->lldev = NULL;
914 cxgb3_set_dummy_ops(tdev);
915 t3_tp_set_offload_mode(adapter, 0);
916 clear_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map);
917
918 if (!adapter->open_device_map)
919 cxgb_down(adapter);
920
921 cxgb3_offload_deactivate(adapter);
922 return 0;
923}
924
925static int cxgb_open(struct net_device *dev)
926{
927 int err;
928 struct adapter *adapter = dev->priv;
929 struct port_info *pi = netdev_priv(dev);
930 int other_ports = adapter->open_device_map & PORT_MASK;
931
932 if (!adapter->open_device_map && (err = cxgb_up(adapter)) < 0)
933 return err;
934
935 set_bit(pi->port_id, &adapter->open_device_map);
8ac3ba68 936 if (is_offload(adapter) && !ofld_disable) {
4d22de3e
DLR
937 err = offload_open(dev);
938 if (err)
939 printk(KERN_WARNING
940 "Could not initialize offload capabilities\n");
941 }
942
943 link_start(dev);
944 t3_port_intr_enable(adapter, pi->port_id);
945 netif_start_queue(dev);
946 if (!other_ports)
947 schedule_chk_task(adapter);
948
949 return 0;
950}
951
952static int cxgb_close(struct net_device *dev)
953{
954 struct adapter *adapter = dev->priv;
955 struct port_info *p = netdev_priv(dev);
956
957 t3_port_intr_disable(adapter, p->port_id);
958 netif_stop_queue(dev);
959 p->phy.ops->power_down(&p->phy, 1);
960 netif_carrier_off(dev);
961 t3_mac_disable(&p->mac, MAC_DIRECTION_TX | MAC_DIRECTION_RX);
962
963 spin_lock(&adapter->work_lock); /* sync with update task */
964 clear_bit(p->port_id, &adapter->open_device_map);
965 spin_unlock(&adapter->work_lock);
966
967 if (!(adapter->open_device_map & PORT_MASK))
968 cancel_rearming_delayed_workqueue(cxgb3_wq,
969 &adapter->adap_check_task);
970
971 if (!adapter->open_device_map)
972 cxgb_down(adapter);
973
974 return 0;
975}
976
977static struct net_device_stats *cxgb_get_stats(struct net_device *dev)
978{
979 struct adapter *adapter = dev->priv;
980 struct port_info *p = netdev_priv(dev);
981 struct net_device_stats *ns = &p->netstats;
982 const struct mac_stats *pstats;
983
984 spin_lock(&adapter->stats_lock);
985 pstats = t3_mac_update_stats(&p->mac);
986 spin_unlock(&adapter->stats_lock);
987
988 ns->tx_bytes = pstats->tx_octets;
989 ns->tx_packets = pstats->tx_frames;
990 ns->rx_bytes = pstats->rx_octets;
991 ns->rx_packets = pstats->rx_frames;
992 ns->multicast = pstats->rx_mcast_frames;
993
994 ns->tx_errors = pstats->tx_underrun;
995 ns->rx_errors = pstats->rx_symbol_errs + pstats->rx_fcs_errs +
996 pstats->rx_too_long + pstats->rx_jabber + pstats->rx_short +
997 pstats->rx_fifo_ovfl;
998
999 /* detailed rx_errors */
1000 ns->rx_length_errors = pstats->rx_jabber + pstats->rx_too_long;
1001 ns->rx_over_errors = 0;
1002 ns->rx_crc_errors = pstats->rx_fcs_errs;
1003 ns->rx_frame_errors = pstats->rx_symbol_errs;
1004 ns->rx_fifo_errors = pstats->rx_fifo_ovfl;
1005 ns->rx_missed_errors = pstats->rx_cong_drops;
1006
1007 /* detailed tx_errors */
1008 ns->tx_aborted_errors = 0;
1009 ns->tx_carrier_errors = 0;
1010 ns->tx_fifo_errors = pstats->tx_underrun;
1011 ns->tx_heartbeat_errors = 0;
1012 ns->tx_window_errors = 0;
1013 return ns;
1014}
1015
1016static u32 get_msglevel(struct net_device *dev)
1017{
1018 struct adapter *adapter = dev->priv;
1019
1020 return adapter->msg_enable;
1021}
1022
1023static void set_msglevel(struct net_device *dev, u32 val)
1024{
1025 struct adapter *adapter = dev->priv;
1026
1027 adapter->msg_enable = val;
1028}
1029
1030static char stats_strings[][ETH_GSTRING_LEN] = {
1031 "TxOctetsOK ",
1032 "TxFramesOK ",
1033 "TxMulticastFramesOK",
1034 "TxBroadcastFramesOK",
1035 "TxPauseFrames ",
1036 "TxUnderrun ",
1037 "TxExtUnderrun ",
1038
1039 "TxFrames64 ",
1040 "TxFrames65To127 ",
1041 "TxFrames128To255 ",
1042 "TxFrames256To511 ",
1043 "TxFrames512To1023 ",
1044 "TxFrames1024To1518 ",
1045 "TxFrames1519ToMax ",
1046
1047 "RxOctetsOK ",
1048 "RxFramesOK ",
1049 "RxMulticastFramesOK",
1050 "RxBroadcastFramesOK",
1051 "RxPauseFrames ",
1052 "RxFCSErrors ",
1053 "RxSymbolErrors ",
1054 "RxShortErrors ",
1055 "RxJabberErrors ",
1056 "RxLengthErrors ",
1057 "RxFIFOoverflow ",
1058
1059 "RxFrames64 ",
1060 "RxFrames65To127 ",
1061 "RxFrames128To255 ",
1062 "RxFrames256To511 ",
1063 "RxFrames512To1023 ",
1064 "RxFrames1024To1518 ",
1065 "RxFrames1519ToMax ",
1066
1067 "PhyFIFOErrors ",
1068 "TSO ",
1069 "VLANextractions ",
1070 "VLANinsertions ",
1071 "TxCsumOffload ",
1072 "RxCsumGood ",
fc90664e
DLR
1073 "RxDrops ",
1074
1075 "CheckTXEnToggled ",
1076 "CheckResets ",
1077
4d22de3e
DLR
1078};
1079
1080static int get_stats_count(struct net_device *dev)
1081{
1082 return ARRAY_SIZE(stats_strings);
1083}
1084
1085#define T3_REGMAP_SIZE (3 * 1024)
1086
1087static int get_regs_len(struct net_device *dev)
1088{
1089 return T3_REGMAP_SIZE;
1090}
1091
1092static int get_eeprom_len(struct net_device *dev)
1093{
1094 return EEPROMSIZE;
1095}
1096
1097static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1098{
1099 u32 fw_vers = 0;
1100 struct adapter *adapter = dev->priv;
1101
1102 t3_get_fw_version(adapter, &fw_vers);
1103
1104 strcpy(info->driver, DRV_NAME);
1105 strcpy(info->version, DRV_VERSION);
1106 strcpy(info->bus_info, pci_name(adapter->pdev));
1107 if (!fw_vers)
1108 strcpy(info->fw_version, "N/A");
4aac3899 1109 else {
4d22de3e 1110 snprintf(info->fw_version, sizeof(info->fw_version),
4aac3899
DLR
1111 "%s %u.%u.%u",
1112 G_FW_VERSION_TYPE(fw_vers) ? "T" : "N",
1113 G_FW_VERSION_MAJOR(fw_vers),
1114 G_FW_VERSION_MINOR(fw_vers),
1115 G_FW_VERSION_MICRO(fw_vers));
1116 }
4d22de3e
DLR
1117}
1118
1119static void get_strings(struct net_device *dev, u32 stringset, u8 * data)
1120{
1121 if (stringset == ETH_SS_STATS)
1122 memcpy(data, stats_strings, sizeof(stats_strings));
1123}
1124
1125static unsigned long collect_sge_port_stats(struct adapter *adapter,
1126 struct port_info *p, int idx)
1127{
1128 int i;
1129 unsigned long tot = 0;
1130
1131 for (i = 0; i < p->nqsets; ++i)
1132 tot += adapter->sge.qs[i + p->first_qset].port_stats[idx];
1133 return tot;
1134}
1135
1136static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
1137 u64 *data)
1138{
1139 struct adapter *adapter = dev->priv;
1140 struct port_info *pi = netdev_priv(dev);
1141 const struct mac_stats *s;
1142
1143 spin_lock(&adapter->stats_lock);
1144 s = t3_mac_update_stats(&pi->mac);
1145 spin_unlock(&adapter->stats_lock);
1146
1147 *data++ = s->tx_octets;
1148 *data++ = s->tx_frames;
1149 *data++ = s->tx_mcast_frames;
1150 *data++ = s->tx_bcast_frames;
1151 *data++ = s->tx_pause;
1152 *data++ = s->tx_underrun;
1153 *data++ = s->tx_fifo_urun;
1154
1155 *data++ = s->tx_frames_64;
1156 *data++ = s->tx_frames_65_127;
1157 *data++ = s->tx_frames_128_255;
1158 *data++ = s->tx_frames_256_511;
1159 *data++ = s->tx_frames_512_1023;
1160 *data++ = s->tx_frames_1024_1518;
1161 *data++ = s->tx_frames_1519_max;
1162
1163 *data++ = s->rx_octets;
1164 *data++ = s->rx_frames;
1165 *data++ = s->rx_mcast_frames;
1166 *data++ = s->rx_bcast_frames;
1167 *data++ = s->rx_pause;
1168 *data++ = s->rx_fcs_errs;
1169 *data++ = s->rx_symbol_errs;
1170 *data++ = s->rx_short;
1171 *data++ = s->rx_jabber;
1172 *data++ = s->rx_too_long;
1173 *data++ = s->rx_fifo_ovfl;
1174
1175 *data++ = s->rx_frames_64;
1176 *data++ = s->rx_frames_65_127;
1177 *data++ = s->rx_frames_128_255;
1178 *data++ = s->rx_frames_256_511;
1179 *data++ = s->rx_frames_512_1023;
1180 *data++ = s->rx_frames_1024_1518;
1181 *data++ = s->rx_frames_1519_max;
1182
1183 *data++ = pi->phy.fifo_errors;
1184
1185 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_TSO);
1186 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_VLANEX);
1187 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_VLANINS);
1188 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_TX_CSUM);
1189 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_RX_CSUM_GOOD);
1190 *data++ = s->rx_cong_drops;
fc90664e
DLR
1191
1192 *data++ = s->num_toggled;
1193 *data++ = s->num_resets;
4d22de3e
DLR
1194}
1195
1196static inline void reg_block_dump(struct adapter *ap, void *buf,
1197 unsigned int start, unsigned int end)
1198{
1199 u32 *p = buf + start;
1200
1201 for (; start <= end; start += sizeof(u32))
1202 *p++ = t3_read_reg(ap, start);
1203}
1204
1205static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
1206 void *buf)
1207{
1208 struct adapter *ap = dev->priv;
1209
1210 /*
1211 * Version scheme:
1212 * bits 0..9: chip version
1213 * bits 10..15: chip revision
1214 * bit 31: set for PCIe cards
1215 */
1216 regs->version = 3 | (ap->params.rev << 10) | (is_pcie(ap) << 31);
1217
1218 /*
1219 * We skip the MAC statistics registers because they are clear-on-read.
1220 * Also reading multi-register stats would need to synchronize with the
1221 * periodic mac stats accumulation. Hard to justify the complexity.
1222 */
1223 memset(buf, 0, T3_REGMAP_SIZE);
1224 reg_block_dump(ap, buf, 0, A_SG_RSPQ_CREDIT_RETURN);
1225 reg_block_dump(ap, buf, A_SG_HI_DRB_HI_THRSH, A_ULPRX_PBL_ULIMIT);
1226 reg_block_dump(ap, buf, A_ULPTX_CONFIG, A_MPS_INT_CAUSE);
1227 reg_block_dump(ap, buf, A_CPL_SWITCH_CNTRL, A_CPL_MAP_TBL_DATA);
1228 reg_block_dump(ap, buf, A_SMB_GLOBAL_TIME_CFG, A_XGM_SERDES_STAT3);
1229 reg_block_dump(ap, buf, A_XGM_SERDES_STATUS0,
1230 XGM_REG(A_XGM_SERDES_STAT3, 1));
1231 reg_block_dump(ap, buf, XGM_REG(A_XGM_SERDES_STATUS0, 1),
1232 XGM_REG(A_XGM_RX_SPI4_SOP_EOP_CNT, 1));
1233}
1234
1235static int restart_autoneg(struct net_device *dev)
1236{
1237 struct port_info *p = netdev_priv(dev);
1238
1239 if (!netif_running(dev))
1240 return -EAGAIN;
1241 if (p->link_config.autoneg != AUTONEG_ENABLE)
1242 return -EINVAL;
1243 p->phy.ops->autoneg_restart(&p->phy);
1244 return 0;
1245}
1246
1247static int cxgb3_phys_id(struct net_device *dev, u32 data)
1248{
1249 int i;
1250 struct adapter *adapter = dev->priv;
1251
1252 if (data == 0)
1253 data = 2;
1254
1255 for (i = 0; i < data * 2; i++) {
1256 t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
1257 (i & 1) ? F_GPIO0_OUT_VAL : 0);
1258 if (msleep_interruptible(500))
1259 break;
1260 }
1261 t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
1262 F_GPIO0_OUT_VAL);
1263 return 0;
1264}
1265
1266static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1267{
1268 struct port_info *p = netdev_priv(dev);
1269
1270 cmd->supported = p->link_config.supported;
1271 cmd->advertising = p->link_config.advertising;
1272
1273 if (netif_carrier_ok(dev)) {
1274 cmd->speed = p->link_config.speed;
1275 cmd->duplex = p->link_config.duplex;
1276 } else {
1277 cmd->speed = -1;
1278 cmd->duplex = -1;
1279 }
1280
1281 cmd->port = (cmd->supported & SUPPORTED_TP) ? PORT_TP : PORT_FIBRE;
1282 cmd->phy_address = p->phy.addr;
1283 cmd->transceiver = XCVR_EXTERNAL;
1284 cmd->autoneg = p->link_config.autoneg;
1285 cmd->maxtxpkt = 0;
1286 cmd->maxrxpkt = 0;
1287 return 0;
1288}
1289
1290static int speed_duplex_to_caps(int speed, int duplex)
1291{
1292 int cap = 0;
1293
1294 switch (speed) {
1295 case SPEED_10:
1296 if (duplex == DUPLEX_FULL)
1297 cap = SUPPORTED_10baseT_Full;
1298 else
1299 cap = SUPPORTED_10baseT_Half;
1300 break;
1301 case SPEED_100:
1302 if (duplex == DUPLEX_FULL)
1303 cap = SUPPORTED_100baseT_Full;
1304 else
1305 cap = SUPPORTED_100baseT_Half;
1306 break;
1307 case SPEED_1000:
1308 if (duplex == DUPLEX_FULL)
1309 cap = SUPPORTED_1000baseT_Full;
1310 else
1311 cap = SUPPORTED_1000baseT_Half;
1312 break;
1313 case SPEED_10000:
1314 if (duplex == DUPLEX_FULL)
1315 cap = SUPPORTED_10000baseT_Full;
1316 }
1317 return cap;
1318}
1319
1320#define ADVERTISED_MASK (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
1321 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
1322 ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full | \
1323 ADVERTISED_10000baseT_Full)
1324
1325static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1326{
1327 struct port_info *p = netdev_priv(dev);
1328 struct link_config *lc = &p->link_config;
1329
1330 if (!(lc->supported & SUPPORTED_Autoneg))
1331 return -EOPNOTSUPP; /* can't change speed/duplex */
1332
1333 if (cmd->autoneg == AUTONEG_DISABLE) {
1334 int cap = speed_duplex_to_caps(cmd->speed, cmd->duplex);
1335
1336 if (!(lc->supported & cap) || cmd->speed == SPEED_1000)
1337 return -EINVAL;
1338 lc->requested_speed = cmd->speed;
1339 lc->requested_duplex = cmd->duplex;
1340 lc->advertising = 0;
1341 } else {
1342 cmd->advertising &= ADVERTISED_MASK;
1343 cmd->advertising &= lc->supported;
1344 if (!cmd->advertising)
1345 return -EINVAL;
1346 lc->requested_speed = SPEED_INVALID;
1347 lc->requested_duplex = DUPLEX_INVALID;
1348 lc->advertising = cmd->advertising | ADVERTISED_Autoneg;
1349 }
1350 lc->autoneg = cmd->autoneg;
1351 if (netif_running(dev))
1352 t3_link_start(&p->phy, &p->mac, lc);
1353 return 0;
1354}
1355
1356static void get_pauseparam(struct net_device *dev,
1357 struct ethtool_pauseparam *epause)
1358{
1359 struct port_info *p = netdev_priv(dev);
1360
1361 epause->autoneg = (p->link_config.requested_fc & PAUSE_AUTONEG) != 0;
1362 epause->rx_pause = (p->link_config.fc & PAUSE_RX) != 0;
1363 epause->tx_pause = (p->link_config.fc & PAUSE_TX) != 0;
1364}
1365
1366static int set_pauseparam(struct net_device *dev,
1367 struct ethtool_pauseparam *epause)
1368{
1369 struct port_info *p = netdev_priv(dev);
1370 struct link_config *lc = &p->link_config;
1371
1372 if (epause->autoneg == AUTONEG_DISABLE)
1373 lc->requested_fc = 0;
1374 else if (lc->supported & SUPPORTED_Autoneg)
1375 lc->requested_fc = PAUSE_AUTONEG;
1376 else
1377 return -EINVAL;
1378
1379 if (epause->rx_pause)
1380 lc->requested_fc |= PAUSE_RX;
1381 if (epause->tx_pause)
1382 lc->requested_fc |= PAUSE_TX;
1383 if (lc->autoneg == AUTONEG_ENABLE) {
1384 if (netif_running(dev))
1385 t3_link_start(&p->phy, &p->mac, lc);
1386 } else {
1387 lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1388 if (netif_running(dev))
1389 t3_mac_set_speed_duplex_fc(&p->mac, -1, -1, lc->fc);
1390 }
1391 return 0;
1392}
1393
1394static u32 get_rx_csum(struct net_device *dev)
1395{
1396 struct port_info *p = netdev_priv(dev);
1397
1398 return p->rx_csum_offload;
1399}
1400
1401static int set_rx_csum(struct net_device *dev, u32 data)
1402{
1403 struct port_info *p = netdev_priv(dev);
1404
1405 p->rx_csum_offload = data;
1406 return 0;
1407}
1408
1409static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
1410{
05b97b30
DLR
1411 const struct adapter *adapter = dev->priv;
1412 const struct port_info *pi = netdev_priv(dev);
1413 const struct qset_params *q = &adapter->params.sge.qset[pi->first_qset];
4d22de3e
DLR
1414
1415 e->rx_max_pending = MAX_RX_BUFFERS;
1416 e->rx_mini_max_pending = 0;
1417 e->rx_jumbo_max_pending = MAX_RX_JUMBO_BUFFERS;
1418 e->tx_max_pending = MAX_TXQ_ENTRIES;
1419
05b97b30
DLR
1420 e->rx_pending = q->fl_size;
1421 e->rx_mini_pending = q->rspq_size;
1422 e->rx_jumbo_pending = q->jumbo_size;
1423 e->tx_pending = q->txq_size[0];
4d22de3e
DLR
1424}
1425
1426static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
1427{
1428 int i;
05b97b30 1429 struct qset_params *q;
4d22de3e 1430 struct adapter *adapter = dev->priv;
05b97b30 1431 const struct port_info *pi = netdev_priv(dev);
4d22de3e
DLR
1432
1433 if (e->rx_pending > MAX_RX_BUFFERS ||
1434 e->rx_jumbo_pending > MAX_RX_JUMBO_BUFFERS ||
1435 e->tx_pending > MAX_TXQ_ENTRIES ||
1436 e->rx_mini_pending > MAX_RSPQ_ENTRIES ||
1437 e->rx_mini_pending < MIN_RSPQ_ENTRIES ||
1438 e->rx_pending < MIN_FL_ENTRIES ||
1439 e->rx_jumbo_pending < MIN_FL_ENTRIES ||
1440 e->tx_pending < adapter->params.nports * MIN_TXQ_ENTRIES)
1441 return -EINVAL;
1442
1443 if (adapter->flags & FULL_INIT_DONE)
1444 return -EBUSY;
1445
05b97b30
DLR
1446 q = &adapter->params.sge.qset[pi->first_qset];
1447 for (i = 0; i < pi->nqsets; ++i, ++q) {
4d22de3e
DLR
1448 q->rspq_size = e->rx_mini_pending;
1449 q->fl_size = e->rx_pending;
1450 q->jumbo_size = e->rx_jumbo_pending;
1451 q->txq_size[0] = e->tx_pending;
1452 q->txq_size[1] = e->tx_pending;
1453 q->txq_size[2] = e->tx_pending;
1454 }
1455 return 0;
1456}
1457
1458static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
1459{
1460 struct adapter *adapter = dev->priv;
1461 struct qset_params *qsp = &adapter->params.sge.qset[0];
1462 struct sge_qset *qs = &adapter->sge.qs[0];
1463
1464 if (c->rx_coalesce_usecs * 10 > M_NEWTIMER)
1465 return -EINVAL;
1466
1467 qsp->coalesce_usecs = c->rx_coalesce_usecs;
1468 t3_update_qset_coalesce(qs, qsp);
1469 return 0;
1470}
1471
1472static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
1473{
1474 struct adapter *adapter = dev->priv;
1475 struct qset_params *q = adapter->params.sge.qset;
1476
1477 c->rx_coalesce_usecs = q->coalesce_usecs;
1478 return 0;
1479}
1480
1481static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e,
1482 u8 * data)
1483{
1484 int i, err = 0;
1485 struct adapter *adapter = dev->priv;
1486
1487 u8 *buf = kmalloc(EEPROMSIZE, GFP_KERNEL);
1488 if (!buf)
1489 return -ENOMEM;
1490
1491 e->magic = EEPROM_MAGIC;
1492 for (i = e->offset & ~3; !err && i < e->offset + e->len; i += 4)
1493 err = t3_seeprom_read(adapter, i, (u32 *) & buf[i]);
1494
1495 if (!err)
1496 memcpy(data, buf + e->offset, e->len);
1497 kfree(buf);
1498 return err;
1499}
1500
1501static int set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
1502 u8 * data)
1503{
1504 u8 *buf;
1505 int err = 0;
1506 u32 aligned_offset, aligned_len, *p;
1507 struct adapter *adapter = dev->priv;
1508
1509 if (eeprom->magic != EEPROM_MAGIC)
1510 return -EINVAL;
1511
1512 aligned_offset = eeprom->offset & ~3;
1513 aligned_len = (eeprom->len + (eeprom->offset & 3) + 3) & ~3;
1514
1515 if (aligned_offset != eeprom->offset || aligned_len != eeprom->len) {
1516 buf = kmalloc(aligned_len, GFP_KERNEL);
1517 if (!buf)
1518 return -ENOMEM;
1519 err = t3_seeprom_read(adapter, aligned_offset, (u32 *) buf);
1520 if (!err && aligned_len > 4)
1521 err = t3_seeprom_read(adapter,
1522 aligned_offset + aligned_len - 4,
1523 (u32 *) & buf[aligned_len - 4]);
1524 if (err)
1525 goto out;
1526 memcpy(buf + (eeprom->offset & 3), data, eeprom->len);
1527 } else
1528 buf = data;
1529
1530 err = t3_seeprom_wp(adapter, 0);
1531 if (err)
1532 goto out;
1533
1534 for (p = (u32 *) buf; !err && aligned_len; aligned_len -= 4, p++) {
1535 err = t3_seeprom_write(adapter, aligned_offset, *p);
1536 aligned_offset += 4;
1537 }
1538
1539 if (!err)
1540 err = t3_seeprom_wp(adapter, 1);
1541out:
1542 if (buf != data)
1543 kfree(buf);
1544 return err;
1545}
1546
1547static void get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1548{
1549 wol->supported = 0;
1550 wol->wolopts = 0;
1551 memset(&wol->sopass, 0, sizeof(wol->sopass));
1552}
1553
1554static const struct ethtool_ops cxgb_ethtool_ops = {
1555 .get_settings = get_settings,
1556 .set_settings = set_settings,
1557 .get_drvinfo = get_drvinfo,
1558 .get_msglevel = get_msglevel,
1559 .set_msglevel = set_msglevel,
1560 .get_ringparam = get_sge_param,
1561 .set_ringparam = set_sge_param,
1562 .get_coalesce = get_coalesce,
1563 .set_coalesce = set_coalesce,
1564 .get_eeprom_len = get_eeprom_len,
1565 .get_eeprom = get_eeprom,
1566 .set_eeprom = set_eeprom,
1567 .get_pauseparam = get_pauseparam,
1568 .set_pauseparam = set_pauseparam,
1569 .get_rx_csum = get_rx_csum,
1570 .set_rx_csum = set_rx_csum,
1571 .get_tx_csum = ethtool_op_get_tx_csum,
1572 .set_tx_csum = ethtool_op_set_tx_csum,
1573 .get_sg = ethtool_op_get_sg,
1574 .set_sg = ethtool_op_set_sg,
1575 .get_link = ethtool_op_get_link,
1576 .get_strings = get_strings,
1577 .phys_id = cxgb3_phys_id,
1578 .nway_reset = restart_autoneg,
1579 .get_stats_count = get_stats_count,
1580 .get_ethtool_stats = get_stats,
1581 .get_regs_len = get_regs_len,
1582 .get_regs = get_regs,
1583 .get_wol = get_wol,
1584 .get_tso = ethtool_op_get_tso,
1585 .set_tso = ethtool_op_set_tso,
4d22de3e
DLR
1586};
1587
1588static int in_range(int val, int lo, int hi)
1589{
1590 return val < 0 || (val <= hi && val >= lo);
1591}
1592
1593static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
1594{
1595 int ret;
1596 u32 cmd;
1597 struct adapter *adapter = dev->priv;
1598
1599 if (copy_from_user(&cmd, useraddr, sizeof(cmd)))
1600 return -EFAULT;
1601
1602 switch (cmd) {
4d22de3e
DLR
1603 case CHELSIO_SET_QSET_PARAMS:{
1604 int i;
1605 struct qset_params *q;
1606 struct ch_qset_params t;
1607
1608 if (!capable(CAP_NET_ADMIN))
1609 return -EPERM;
1610 if (copy_from_user(&t, useraddr, sizeof(t)))
1611 return -EFAULT;
1612 if (t.qset_idx >= SGE_QSETS)
1613 return -EINVAL;
1614 if (!in_range(t.intr_lat, 0, M_NEWTIMER) ||
1615 !in_range(t.cong_thres, 0, 255) ||
1616 !in_range(t.txq_size[0], MIN_TXQ_ENTRIES,
1617 MAX_TXQ_ENTRIES) ||
1618 !in_range(t.txq_size[1], MIN_TXQ_ENTRIES,
1619 MAX_TXQ_ENTRIES) ||
1620 !in_range(t.txq_size[2], MIN_CTRL_TXQ_ENTRIES,
1621 MAX_CTRL_TXQ_ENTRIES) ||
1622 !in_range(t.fl_size[0], MIN_FL_ENTRIES,
1623 MAX_RX_BUFFERS)
1624 || !in_range(t.fl_size[1], MIN_FL_ENTRIES,
1625 MAX_RX_JUMBO_BUFFERS)
1626 || !in_range(t.rspq_size, MIN_RSPQ_ENTRIES,
1627 MAX_RSPQ_ENTRIES))
1628 return -EINVAL;
1629 if ((adapter->flags & FULL_INIT_DONE) &&
1630 (t.rspq_size >= 0 || t.fl_size[0] >= 0 ||
1631 t.fl_size[1] >= 0 || t.txq_size[0] >= 0 ||
1632 t.txq_size[1] >= 0 || t.txq_size[2] >= 0 ||
1633 t.polling >= 0 || t.cong_thres >= 0))
1634 return -EBUSY;
1635
1636 q = &adapter->params.sge.qset[t.qset_idx];
1637
1638 if (t.rspq_size >= 0)
1639 q->rspq_size = t.rspq_size;
1640 if (t.fl_size[0] >= 0)
1641 q->fl_size = t.fl_size[0];
1642 if (t.fl_size[1] >= 0)
1643 q->jumbo_size = t.fl_size[1];
1644 if (t.txq_size[0] >= 0)
1645 q->txq_size[0] = t.txq_size[0];
1646 if (t.txq_size[1] >= 0)
1647 q->txq_size[1] = t.txq_size[1];
1648 if (t.txq_size[2] >= 0)
1649 q->txq_size[2] = t.txq_size[2];
1650 if (t.cong_thres >= 0)
1651 q->cong_thres = t.cong_thres;
1652 if (t.intr_lat >= 0) {
1653 struct sge_qset *qs =
1654 &adapter->sge.qs[t.qset_idx];
1655
1656 q->coalesce_usecs = t.intr_lat;
1657 t3_update_qset_coalesce(qs, q);
1658 }
1659 if (t.polling >= 0) {
1660 if (adapter->flags & USING_MSIX)
1661 q->polling = t.polling;
1662 else {
1663 /* No polling with INTx for T3A */
1664 if (adapter->params.rev == 0 &&
1665 !(adapter->flags & USING_MSI))
1666 t.polling = 0;
1667
1668 for (i = 0; i < SGE_QSETS; i++) {
1669 q = &adapter->params.sge.
1670 qset[i];
1671 q->polling = t.polling;
1672 }
1673 }
1674 }
1675 break;
1676 }
1677 case CHELSIO_GET_QSET_PARAMS:{
1678 struct qset_params *q;
1679 struct ch_qset_params t;
1680
1681 if (copy_from_user(&t, useraddr, sizeof(t)))
1682 return -EFAULT;
1683 if (t.qset_idx >= SGE_QSETS)
1684 return -EINVAL;
1685
1686 q = &adapter->params.sge.qset[t.qset_idx];
1687 t.rspq_size = q->rspq_size;
1688 t.txq_size[0] = q->txq_size[0];
1689 t.txq_size[1] = q->txq_size[1];
1690 t.txq_size[2] = q->txq_size[2];
1691 t.fl_size[0] = q->fl_size;
1692 t.fl_size[1] = q->jumbo_size;
1693 t.polling = q->polling;
1694 t.intr_lat = q->coalesce_usecs;
1695 t.cong_thres = q->cong_thres;
1696
1697 if (copy_to_user(useraddr, &t, sizeof(t)))
1698 return -EFAULT;
1699 break;
1700 }
1701 case CHELSIO_SET_QSET_NUM:{
1702 struct ch_reg edata;
1703 struct port_info *pi = netdev_priv(dev);
1704 unsigned int i, first_qset = 0, other_qsets = 0;
1705
1706 if (!capable(CAP_NET_ADMIN))
1707 return -EPERM;
1708 if (adapter->flags & FULL_INIT_DONE)
1709 return -EBUSY;
1710 if (copy_from_user(&edata, useraddr, sizeof(edata)))
1711 return -EFAULT;
1712 if (edata.val < 1 ||
1713 (edata.val > 1 && !(adapter->flags & USING_MSIX)))
1714 return -EINVAL;
1715
1716 for_each_port(adapter, i)
1717 if (adapter->port[i] && adapter->port[i] != dev)
1718 other_qsets += adap2pinfo(adapter, i)->nqsets;
1719
1720 if (edata.val + other_qsets > SGE_QSETS)
1721 return -EINVAL;
1722
1723 pi->nqsets = edata.val;
1724
1725 for_each_port(adapter, i)
1726 if (adapter->port[i]) {
1727 pi = adap2pinfo(adapter, i);
1728 pi->first_qset = first_qset;
1729 first_qset += pi->nqsets;
1730 }
1731 break;
1732 }
1733 case CHELSIO_GET_QSET_NUM:{
1734 struct ch_reg edata;
1735 struct port_info *pi = netdev_priv(dev);
1736
1737 edata.cmd = CHELSIO_GET_QSET_NUM;
1738 edata.val = pi->nqsets;
1739 if (copy_to_user(useraddr, &edata, sizeof(edata)))
1740 return -EFAULT;
1741 break;
1742 }
1743 case CHELSIO_LOAD_FW:{
1744 u8 *fw_data;
1745 struct ch_mem_range t;
1746
1747 if (!capable(CAP_NET_ADMIN))
1748 return -EPERM;
1749 if (copy_from_user(&t, useraddr, sizeof(t)))
1750 return -EFAULT;
1751
1752 fw_data = kmalloc(t.len, GFP_KERNEL);
1753 if (!fw_data)
1754 return -ENOMEM;
1755
1756 if (copy_from_user
1757 (fw_data, useraddr + sizeof(t), t.len)) {
1758 kfree(fw_data);
1759 return -EFAULT;
1760 }
1761
1762 ret = t3_load_fw(adapter, fw_data, t.len);
1763 kfree(fw_data);
1764 if (ret)
1765 return ret;
1766 break;
1767 }
1768 case CHELSIO_SETMTUTAB:{
1769 struct ch_mtus m;
1770 int i;
1771
1772 if (!is_offload(adapter))
1773 return -EOPNOTSUPP;
1774 if (!capable(CAP_NET_ADMIN))
1775 return -EPERM;
1776 if (offload_running(adapter))
1777 return -EBUSY;
1778 if (copy_from_user(&m, useraddr, sizeof(m)))
1779 return -EFAULT;
1780 if (m.nmtus != NMTUS)
1781 return -EINVAL;
1782 if (m.mtus[0] < 81) /* accommodate SACK */
1783 return -EINVAL;
1784
1785 /* MTUs must be in ascending order */
1786 for (i = 1; i < NMTUS; ++i)
1787 if (m.mtus[i] < m.mtus[i - 1])
1788 return -EINVAL;
1789
1790 memcpy(adapter->params.mtus, m.mtus,
1791 sizeof(adapter->params.mtus));
1792 break;
1793 }
1794 case CHELSIO_GET_PM:{
1795 struct tp_params *p = &adapter->params.tp;
1796 struct ch_pm m = {.cmd = CHELSIO_GET_PM };
1797
1798 if (!is_offload(adapter))
1799 return -EOPNOTSUPP;
1800 m.tx_pg_sz = p->tx_pg_size;
1801 m.tx_num_pg = p->tx_num_pgs;
1802 m.rx_pg_sz = p->rx_pg_size;
1803 m.rx_num_pg = p->rx_num_pgs;
1804 m.pm_total = p->pmtx_size + p->chan_rx_size * p->nchan;
1805 if (copy_to_user(useraddr, &m, sizeof(m)))
1806 return -EFAULT;
1807 break;
1808 }
1809 case CHELSIO_SET_PM:{
1810 struct ch_pm m;
1811 struct tp_params *p = &adapter->params.tp;
1812
1813 if (!is_offload(adapter))
1814 return -EOPNOTSUPP;
1815 if (!capable(CAP_NET_ADMIN))
1816 return -EPERM;
1817 if (adapter->flags & FULL_INIT_DONE)
1818 return -EBUSY;
1819 if (copy_from_user(&m, useraddr, sizeof(m)))
1820 return -EFAULT;
d9da466a 1821 if (!is_power_of_2(m.rx_pg_sz) ||
1822 !is_power_of_2(m.tx_pg_sz))
4d22de3e
DLR
1823 return -EINVAL; /* not power of 2 */
1824 if (!(m.rx_pg_sz & 0x14000))
1825 return -EINVAL; /* not 16KB or 64KB */
1826 if (!(m.tx_pg_sz & 0x1554000))
1827 return -EINVAL;
1828 if (m.tx_num_pg == -1)
1829 m.tx_num_pg = p->tx_num_pgs;
1830 if (m.rx_num_pg == -1)
1831 m.rx_num_pg = p->rx_num_pgs;
1832 if (m.tx_num_pg % 24 || m.rx_num_pg % 24)
1833 return -EINVAL;
1834 if (m.rx_num_pg * m.rx_pg_sz > p->chan_rx_size ||
1835 m.tx_num_pg * m.tx_pg_sz > p->chan_tx_size)
1836 return -EINVAL;
1837 p->rx_pg_size = m.rx_pg_sz;
1838 p->tx_pg_size = m.tx_pg_sz;
1839 p->rx_num_pgs = m.rx_num_pg;
1840 p->tx_num_pgs = m.tx_num_pg;
1841 break;
1842 }
1843 case CHELSIO_GET_MEM:{
1844 struct ch_mem_range t;
1845 struct mc7 *mem;
1846 u64 buf[32];
1847
1848 if (!is_offload(adapter))
1849 return -EOPNOTSUPP;
1850 if (!(adapter->flags & FULL_INIT_DONE))
1851 return -EIO; /* need the memory controllers */
1852 if (copy_from_user(&t, useraddr, sizeof(t)))
1853 return -EFAULT;
1854 if ((t.addr & 7) || (t.len & 7))
1855 return -EINVAL;
1856 if (t.mem_id == MEM_CM)
1857 mem = &adapter->cm;
1858 else if (t.mem_id == MEM_PMRX)
1859 mem = &adapter->pmrx;
1860 else if (t.mem_id == MEM_PMTX)
1861 mem = &adapter->pmtx;
1862 else
1863 return -EINVAL;
1864
1865 /*
1825494a
DLR
1866 * Version scheme:
1867 * bits 0..9: chip version
1868 * bits 10..15: chip revision
1869 */
4d22de3e
DLR
1870 t.version = 3 | (adapter->params.rev << 10);
1871 if (copy_to_user(useraddr, &t, sizeof(t)))
1872 return -EFAULT;
1873
1874 /*
1875 * Read 256 bytes at a time as len can be large and we don't
1876 * want to use huge intermediate buffers.
1877 */
1878 useraddr += sizeof(t); /* advance to start of buffer */
1879 while (t.len) {
1880 unsigned int chunk =
1881 min_t(unsigned int, t.len, sizeof(buf));
1882
1883 ret =
1884 t3_mc7_bd_read(mem, t.addr / 8, chunk / 8,
1885 buf);
1886 if (ret)
1887 return ret;
1888 if (copy_to_user(useraddr, buf, chunk))
1889 return -EFAULT;
1890 useraddr += chunk;
1891 t.addr += chunk;
1892 t.len -= chunk;
1893 }
1894 break;
1895 }
1896 case CHELSIO_SET_TRACE_FILTER:{
1897 struct ch_trace t;
1898 const struct trace_params *tp;
1899
1900 if (!capable(CAP_NET_ADMIN))
1901 return -EPERM;
1902 if (!offload_running(adapter))
1903 return -EAGAIN;
1904 if (copy_from_user(&t, useraddr, sizeof(t)))
1905 return -EFAULT;
1906
1907 tp = (const struct trace_params *)&t.sip;
1908 if (t.config_tx)
1909 t3_config_trace_filter(adapter, tp, 0,
1910 t.invert_match,
1911 t.trace_tx);
1912 if (t.config_rx)
1913 t3_config_trace_filter(adapter, tp, 1,
1914 t.invert_match,
1915 t.trace_rx);
1916 break;
1917 }
4d22de3e
DLR
1918 default:
1919 return -EOPNOTSUPP;
1920 }
1921 return 0;
1922}
1923
1924static int cxgb_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
1925{
1926 int ret, mmd;
1927 struct adapter *adapter = dev->priv;
1928 struct port_info *pi = netdev_priv(dev);
1929 struct mii_ioctl_data *data = if_mii(req);
1930
1931 switch (cmd) {
1932 case SIOCGMIIPHY:
1933 data->phy_id = pi->phy.addr;
1934 /* FALLTHRU */
1935 case SIOCGMIIREG:{
1936 u32 val;
1937 struct cphy *phy = &pi->phy;
1938
1939 if (!phy->mdio_read)
1940 return -EOPNOTSUPP;
1941 if (is_10G(adapter)) {
1942 mmd = data->phy_id >> 8;
1943 if (!mmd)
1944 mmd = MDIO_DEV_PCS;
1945 else if (mmd > MDIO_DEV_XGXS)
1946 return -EINVAL;
1947
1948 ret =
1949 phy->mdio_read(adapter, data->phy_id & 0x1f,
1950 mmd, data->reg_num, &val);
1951 } else
1952 ret =
1953 phy->mdio_read(adapter, data->phy_id & 0x1f,
1954 0, data->reg_num & 0x1f,
1955 &val);
1956 if (!ret)
1957 data->val_out = val;
1958 break;
1959 }
1960 case SIOCSMIIREG:{
1961 struct cphy *phy = &pi->phy;
1962
1963 if (!capable(CAP_NET_ADMIN))
1964 return -EPERM;
1965 if (!phy->mdio_write)
1966 return -EOPNOTSUPP;
1967 if (is_10G(adapter)) {
1968 mmd = data->phy_id >> 8;
1969 if (!mmd)
1970 mmd = MDIO_DEV_PCS;
1971 else if (mmd > MDIO_DEV_XGXS)
1972 return -EINVAL;
1973
1974 ret =
1975 phy->mdio_write(adapter,
1976 data->phy_id & 0x1f, mmd,
1977 data->reg_num,
1978 data->val_in);
1979 } else
1980 ret =
1981 phy->mdio_write(adapter,
1982 data->phy_id & 0x1f, 0,
1983 data->reg_num & 0x1f,
1984 data->val_in);
1985 break;
1986 }
1987 case SIOCCHIOCTL:
1988 return cxgb_extension_ioctl(dev, req->ifr_data);
1989 default:
1990 return -EOPNOTSUPP;
1991 }
1992 return ret;
1993}
1994
1995static int cxgb_change_mtu(struct net_device *dev, int new_mtu)
1996{
1997 int ret;
1998 struct adapter *adapter = dev->priv;
1999 struct port_info *pi = netdev_priv(dev);
2000
2001 if (new_mtu < 81) /* accommodate SACK */
2002 return -EINVAL;
2003 if ((ret = t3_mac_set_mtu(&pi->mac, new_mtu)))
2004 return ret;
2005 dev->mtu = new_mtu;
2006 init_port_mtus(adapter);
2007 if (adapter->params.rev == 0 && offload_running(adapter))
2008 t3_load_mtus(adapter, adapter->params.mtus,
2009 adapter->params.a_wnd, adapter->params.b_wnd,
2010 adapter->port[0]->mtu);
2011 return 0;
2012}
2013
2014static int cxgb_set_mac_addr(struct net_device *dev, void *p)
2015{
2016 struct adapter *adapter = dev->priv;
2017 struct port_info *pi = netdev_priv(dev);
2018 struct sockaddr *addr = p;
2019
2020 if (!is_valid_ether_addr(addr->sa_data))
2021 return -EINVAL;
2022
2023 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
2024 t3_mac_set_address(&pi->mac, 0, dev->dev_addr);
2025 if (offload_running(adapter))
2026 write_smt_entry(adapter, pi->port_id);
2027 return 0;
2028}
2029
2030/**
2031 * t3_synchronize_rx - wait for current Rx processing on a port to complete
2032 * @adap: the adapter
2033 * @p: the port
2034 *
2035 * Ensures that current Rx processing on any of the queues associated with
2036 * the given port completes before returning. We do this by acquiring and
2037 * releasing the locks of the response queues associated with the port.
2038 */
2039static void t3_synchronize_rx(struct adapter *adap, const struct port_info *p)
2040{
2041 int i;
2042
2043 for (i = 0; i < p->nqsets; i++) {
2044 struct sge_rspq *q = &adap->sge.qs[i + p->first_qset].rspq;
2045
2046 spin_lock_irq(&q->lock);
2047 spin_unlock_irq(&q->lock);
2048 }
2049}
2050
2051static void vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
2052{
2053 struct adapter *adapter = dev->priv;
2054 struct port_info *pi = netdev_priv(dev);
2055
2056 pi->vlan_grp = grp;
2057 if (adapter->params.rev > 0)
2058 t3_set_vlan_accel(adapter, 1 << pi->port_id, grp != NULL);
2059 else {
2060 /* single control for all ports */
2061 unsigned int i, have_vlans = 0;
2062 for_each_port(adapter, i)
2063 have_vlans |= adap2pinfo(adapter, i)->vlan_grp != NULL;
2064
2065 t3_set_vlan_accel(adapter, 1, have_vlans);
2066 }
2067 t3_synchronize_rx(adapter, pi);
2068}
2069
4d22de3e
DLR
2070#ifdef CONFIG_NET_POLL_CONTROLLER
2071static void cxgb_netpoll(struct net_device *dev)
2072{
2073 struct adapter *adapter = dev->priv;
890de332
DLR
2074 struct port_info *pi = netdev_priv(dev);
2075 int qidx;
4d22de3e 2076
890de332
DLR
2077 for (qidx = pi->first_qset; qidx < pi->first_qset + pi->nqsets; qidx++) {
2078 struct sge_qset *qs = &adapter->sge.qs[qidx];
2079 void *source;
2080
2081 if (adapter->flags & USING_MSIX)
2082 source = qs;
2083 else
2084 source = adapter;
2085
2086 t3_intr_handler(adapter, qs->rspq.polling) (0, source);
2087 }
4d22de3e
DLR
2088}
2089#endif
2090
480fe1a3
DLR
2091#define TPSRAM_NAME "t3%c_protocol_sram-%d.%d.%d.bin"
2092int update_tpsram(struct adapter *adap)
2093{
2094 const struct firmware *tpsram;
2095 char buf[64];
2096 struct device *dev = &adap->pdev->dev;
2097 int ret;
2098 char rev;
2099
2100 rev = adap->params.rev == T3_REV_B2 ? 'b' : 'a';
2101
2102 snprintf(buf, sizeof(buf), TPSRAM_NAME, rev,
2103 TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO);
2104
2105 ret = request_firmware(&tpsram, buf, dev);
2106 if (ret < 0) {
2107 dev_err(dev, "could not load TP SRAM: unable to load %s\n",
2108 buf);
2109 return ret;
2110 }
2111
2112 ret = t3_check_tpsram(adap, tpsram->data, tpsram->size);
2113 if (ret)
2114 goto release_tpsram;
2115
2116 ret = t3_set_proto_sram(adap, tpsram->data);
2117 if (ret)
2118 dev_err(dev, "loading protocol SRAM failed\n");
2119
2120release_tpsram:
2121 release_firmware(tpsram);
2122
2123 return ret;
2124}
2125
2126
4d22de3e
DLR
2127/*
2128 * Periodic accumulation of MAC statistics.
2129 */
2130static void mac_stats_update(struct adapter *adapter)
2131{
2132 int i;
2133
2134 for_each_port(adapter, i) {
2135 struct net_device *dev = adapter->port[i];
2136 struct port_info *p = netdev_priv(dev);
2137
2138 if (netif_running(dev)) {
2139 spin_lock(&adapter->stats_lock);
2140 t3_mac_update_stats(&p->mac);
2141 spin_unlock(&adapter->stats_lock);
2142 }
2143 }
2144}
2145
2146static void check_link_status(struct adapter *adapter)
2147{
2148 int i;
2149
2150 for_each_port(adapter, i) {
2151 struct net_device *dev = adapter->port[i];
2152 struct port_info *p = netdev_priv(dev);
2153
2154 if (!(p->port_type->caps & SUPPORTED_IRQ) && netif_running(dev))
2155 t3_link_changed(adapter, i);
2156 }
2157}
2158
fc90664e
DLR
2159static void check_t3b2_mac(struct adapter *adapter)
2160{
2161 int i;
2162
f2d961c9
DLR
2163 if (!rtnl_trylock()) /* synchronize with ifdown */
2164 return;
2165
fc90664e
DLR
2166 for_each_port(adapter, i) {
2167 struct net_device *dev = adapter->port[i];
2168 struct port_info *p = netdev_priv(dev);
2169 int status;
2170
2171 if (!netif_running(dev))
2172 continue;
2173
2174 status = 0;
6d6dabac 2175 if (netif_running(dev) && netif_carrier_ok(dev))
fc90664e
DLR
2176 status = t3b2_mac_watchdog_task(&p->mac);
2177 if (status == 1)
2178 p->mac.stats.num_toggled++;
2179 else if (status == 2) {
2180 struct cmac *mac = &p->mac;
2181
2182 t3_mac_set_mtu(mac, dev->mtu);
2183 t3_mac_set_address(mac, 0, dev->dev_addr);
2184 cxgb_set_rxmode(dev);
2185 t3_link_start(&p->phy, mac, &p->link_config);
2186 t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
2187 t3_port_intr_enable(adapter, p->port_id);
2188 p->mac.stats.num_resets++;
2189 }
2190 }
2191 rtnl_unlock();
2192}
2193
2194
4d22de3e
DLR
2195static void t3_adap_check_task(struct work_struct *work)
2196{
2197 struct adapter *adapter = container_of(work, struct adapter,
2198 adap_check_task.work);
2199 const struct adapter_params *p = &adapter->params;
2200
2201 adapter->check_task_cnt++;
2202
2203 /* Check link status for PHYs without interrupts */
2204 if (p->linkpoll_period)
2205 check_link_status(adapter);
2206
2207 /* Accumulate MAC stats if needed */
2208 if (!p->linkpoll_period ||
2209 (adapter->check_task_cnt * p->linkpoll_period) / 10 >=
2210 p->stats_update_period) {
2211 mac_stats_update(adapter);
2212 adapter->check_task_cnt = 0;
2213 }
2214
fc90664e
DLR
2215 if (p->rev == T3_REV_B2)
2216 check_t3b2_mac(adapter);
2217
4d22de3e
DLR
2218 /* Schedule the next check update if any port is active. */
2219 spin_lock(&adapter->work_lock);
2220 if (adapter->open_device_map & PORT_MASK)
2221 schedule_chk_task(adapter);
2222 spin_unlock(&adapter->work_lock);
2223}
2224
2225/*
2226 * Processes external (PHY) interrupts in process context.
2227 */
2228static void ext_intr_task(struct work_struct *work)
2229{
2230 struct adapter *adapter = container_of(work, struct adapter,
2231 ext_intr_handler_task);
2232
2233 t3_phy_intr_handler(adapter);
2234
2235 /* Now reenable external interrupts */
2236 spin_lock_irq(&adapter->work_lock);
2237 if (adapter->slow_intr_mask) {
2238 adapter->slow_intr_mask |= F_T3DBG;
2239 t3_write_reg(adapter, A_PL_INT_CAUSE0, F_T3DBG);
2240 t3_write_reg(adapter, A_PL_INT_ENABLE0,
2241 adapter->slow_intr_mask);
2242 }
2243 spin_unlock_irq(&adapter->work_lock);
2244}
2245
2246/*
2247 * Interrupt-context handler for external (PHY) interrupts.
2248 */
2249void t3_os_ext_intr_handler(struct adapter *adapter)
2250{
2251 /*
2252 * Schedule a task to handle external interrupts as they may be slow
2253 * and we use a mutex to protect MDIO registers. We disable PHY
2254 * interrupts in the meantime and let the task reenable them when
2255 * it's done.
2256 */
2257 spin_lock(&adapter->work_lock);
2258 if (adapter->slow_intr_mask) {
2259 adapter->slow_intr_mask &= ~F_T3DBG;
2260 t3_write_reg(adapter, A_PL_INT_ENABLE0,
2261 adapter->slow_intr_mask);
2262 queue_work(cxgb3_wq, &adapter->ext_intr_handler_task);
2263 }
2264 spin_unlock(&adapter->work_lock);
2265}
2266
2267void t3_fatal_err(struct adapter *adapter)
2268{
2269 unsigned int fw_status[4];
2270
2271 if (adapter->flags & FULL_INIT_DONE) {
2272 t3_sge_stop(adapter);
2273 t3_intr_disable(adapter);
2274 }
2275 CH_ALERT(adapter, "encountered fatal error, operation suspended\n");
2276 if (!t3_cim_ctl_blk_read(adapter, 0xa0, 4, fw_status))
2277 CH_ALERT(adapter, "FW status: 0x%x, 0x%x, 0x%x, 0x%x\n",
2278 fw_status[0], fw_status[1],
2279 fw_status[2], fw_status[3]);
2280
2281}
2282
2283static int __devinit cxgb_enable_msix(struct adapter *adap)
2284{
2285 struct msix_entry entries[SGE_QSETS + 1];
2286 int i, err;
2287
2288 for (i = 0; i < ARRAY_SIZE(entries); ++i)
2289 entries[i].entry = i;
2290
2291 err = pci_enable_msix(adap->pdev, entries, ARRAY_SIZE(entries));
2292 if (!err) {
2293 for (i = 0; i < ARRAY_SIZE(entries); ++i)
2294 adap->msix_info[i].vec = entries[i].vector;
2295 } else if (err > 0)
2296 dev_info(&adap->pdev->dev,
2297 "only %d MSI-X vectors left, not using MSI-X\n", err);
2298 return err;
2299}
2300
2301static void __devinit print_port_info(struct adapter *adap,
2302 const struct adapter_info *ai)
2303{
2304 static const char *pci_variant[] = {
2305 "PCI", "PCI-X", "PCI-X ECC", "PCI-X 266", "PCI Express"
2306 };
2307
2308 int i;
2309 char buf[80];
2310
2311 if (is_pcie(adap))
2312 snprintf(buf, sizeof(buf), "%s x%d",
2313 pci_variant[adap->params.pci.variant],
2314 adap->params.pci.width);
2315 else
2316 snprintf(buf, sizeof(buf), "%s %dMHz/%d-bit",
2317 pci_variant[adap->params.pci.variant],
2318 adap->params.pci.speed, adap->params.pci.width);
2319
2320 for_each_port(adap, i) {
2321 struct net_device *dev = adap->port[i];
2322 const struct port_info *pi = netdev_priv(dev);
2323
2324 if (!test_bit(i, &adap->registered_device_map))
2325 continue;
8ac3ba68 2326 printk(KERN_INFO "%s: %s %s %sNIC (rev %d) %s%s\n",
4d22de3e 2327 dev->name, ai->desc, pi->port_type->desc,
8ac3ba68 2328 is_offload(adap) ? "R" : "", adap->params.rev, buf,
4d22de3e
DLR
2329 (adap->flags & USING_MSIX) ? " MSI-X" :
2330 (adap->flags & USING_MSI) ? " MSI" : "");
2331 if (adap->name == dev->name && adap->params.vpd.mclk)
2332 printk(KERN_INFO "%s: %uMB CM, %uMB PMTX, %uMB PMRX\n",
2333 adap->name, t3_mc7_size(&adap->cm) >> 20,
2334 t3_mc7_size(&adap->pmtx) >> 20,
2335 t3_mc7_size(&adap->pmrx) >> 20);
2336 }
2337}
2338
2339static int __devinit init_one(struct pci_dev *pdev,
2340 const struct pci_device_id *ent)
2341{
2342 static int version_printed;
2343
2344 int i, err, pci_using_dac = 0;
2345 unsigned long mmio_start, mmio_len;
2346 const struct adapter_info *ai;
2347 struct adapter *adapter = NULL;
2348 struct port_info *pi;
2349
2350 if (!version_printed) {
2351 printk(KERN_INFO "%s - version %s\n", DRV_DESC, DRV_VERSION);
2352 ++version_printed;
2353 }
2354
2355 if (!cxgb3_wq) {
2356 cxgb3_wq = create_singlethread_workqueue(DRV_NAME);
2357 if (!cxgb3_wq) {
2358 printk(KERN_ERR DRV_NAME
2359 ": cannot initialize work queue\n");
2360 return -ENOMEM;
2361 }
2362 }
2363
2364 err = pci_request_regions(pdev, DRV_NAME);
2365 if (err) {
2366 /* Just info, some other driver may have claimed the device. */
2367 dev_info(&pdev->dev, "cannot obtain PCI resources\n");
2368 return err;
2369 }
2370
2371 err = pci_enable_device(pdev);
2372 if (err) {
2373 dev_err(&pdev->dev, "cannot enable PCI device\n");
2374 goto out_release_regions;
2375 }
2376
2377 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
2378 pci_using_dac = 1;
2379 err = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
2380 if (err) {
2381 dev_err(&pdev->dev, "unable to obtain 64-bit DMA for "
2382 "coherent allocations\n");
2383 goto out_disable_device;
2384 }
2385 } else if ((err = pci_set_dma_mask(pdev, DMA_32BIT_MASK)) != 0) {
2386 dev_err(&pdev->dev, "no usable DMA configuration\n");
2387 goto out_disable_device;
2388 }
2389
2390 pci_set_master(pdev);
2391
2392 mmio_start = pci_resource_start(pdev, 0);
2393 mmio_len = pci_resource_len(pdev, 0);
2394 ai = t3_get_adapter_info(ent->driver_data);
2395
2396 adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
2397 if (!adapter) {
2398 err = -ENOMEM;
2399 goto out_disable_device;
2400 }
2401
2402 adapter->regs = ioremap_nocache(mmio_start, mmio_len);
2403 if (!adapter->regs) {
2404 dev_err(&pdev->dev, "cannot map device registers\n");
2405 err = -ENOMEM;
2406 goto out_free_adapter;
2407 }
2408
2409 adapter->pdev = pdev;
2410 adapter->name = pci_name(pdev);
2411 adapter->msg_enable = dflt_msg_enable;
2412 adapter->mmio_len = mmio_len;
2413
2414 mutex_init(&adapter->mdio_lock);
2415 spin_lock_init(&adapter->work_lock);
2416 spin_lock_init(&adapter->stats_lock);
2417
2418 INIT_LIST_HEAD(&adapter->adapter_list);
2419 INIT_WORK(&adapter->ext_intr_handler_task, ext_intr_task);
2420 INIT_DELAYED_WORK(&adapter->adap_check_task, t3_adap_check_task);
2421
2422 for (i = 0; i < ai->nports; ++i) {
2423 struct net_device *netdev;
2424
2425 netdev = alloc_etherdev(sizeof(struct port_info));
2426 if (!netdev) {
2427 err = -ENOMEM;
2428 goto out_free_dev;
2429 }
2430
2431 SET_MODULE_OWNER(netdev);
2432 SET_NETDEV_DEV(netdev, &pdev->dev);
2433
2434 adapter->port[i] = netdev;
2435 pi = netdev_priv(netdev);
2436 pi->rx_csum_offload = 1;
2437 pi->nqsets = 1;
2438 pi->first_qset = i;
2439 pi->activity = 0;
2440 pi->port_id = i;
2441 netif_carrier_off(netdev);
2442 netdev->irq = pdev->irq;
2443 netdev->mem_start = mmio_start;
2444 netdev->mem_end = mmio_start + mmio_len - 1;
2445 netdev->priv = adapter;
2446 netdev->features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO;
2447 netdev->features |= NETIF_F_LLTX;
2448 if (pci_using_dac)
2449 netdev->features |= NETIF_F_HIGHDMA;
2450
2451 netdev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
2452 netdev->vlan_rx_register = vlan_rx_register;
4d22de3e
DLR
2453
2454 netdev->open = cxgb_open;
2455 netdev->stop = cxgb_close;
2456 netdev->hard_start_xmit = t3_eth_xmit;
2457 netdev->get_stats = cxgb_get_stats;
2458 netdev->set_multicast_list = cxgb_set_rxmode;
2459 netdev->do_ioctl = cxgb_ioctl;
2460 netdev->change_mtu = cxgb_change_mtu;
2461 netdev->set_mac_address = cxgb_set_mac_addr;
2462#ifdef CONFIG_NET_POLL_CONTROLLER
2463 netdev->poll_controller = cxgb_netpoll;
2464#endif
2465 netdev->weight = 64;
2466
2467 SET_ETHTOOL_OPS(netdev, &cxgb_ethtool_ops);
2468 }
2469
2470 pci_set_drvdata(pdev, adapter->port[0]);
2471 if (t3_prep_adapter(adapter, ai, 1) < 0) {
2472 err = -ENODEV;
2473 goto out_free_dev;
2474 }
2475
480fe1a3
DLR
2476 err = t3_check_tpsram_version(adapter);
2477 if (err == -EINVAL)
2478 err = update_tpsram(adapter);
2479
2480 if (err)
2481 goto out_free_dev;
2482
4d22de3e
DLR
2483 /*
2484 * The card is now ready to go. If any errors occur during device
2485 * registration we do not fail the whole card but rather proceed only
2486 * with the ports we manage to register successfully. However we must
2487 * register at least one net device.
2488 */
2489 for_each_port(adapter, i) {
2490 err = register_netdev(adapter->port[i]);
2491 if (err)
2492 dev_warn(&pdev->dev,
2493 "cannot register net device %s, skipping\n",
2494 adapter->port[i]->name);
2495 else {
2496 /*
2497 * Change the name we use for messages to the name of
2498 * the first successfully registered interface.
2499 */
2500 if (!adapter->registered_device_map)
2501 adapter->name = adapter->port[i]->name;
2502
2503 __set_bit(i, &adapter->registered_device_map);
2504 }
2505 }
2506 if (!adapter->registered_device_map) {
2507 dev_err(&pdev->dev, "could not register any net devices\n");
2508 goto out_free_dev;
2509 }
2510
2511 /* Driver's ready. Reflect it on LEDs */
2512 t3_led_ready(adapter);
2513
2514 if (is_offload(adapter)) {
2515 __set_bit(OFFLOAD_DEVMAP_BIT, &adapter->registered_device_map);
2516 cxgb3_adapter_ofld(adapter);
2517 }
2518
2519 /* See what interrupts we'll be using */
2520 if (msi > 1 && cxgb_enable_msix(adapter) == 0)
2521 adapter->flags |= USING_MSIX;
2522 else if (msi > 0 && pci_enable_msi(pdev) == 0)
2523 adapter->flags |= USING_MSI;
2524
0ee8d33c 2525 err = sysfs_create_group(&adapter->port[0]->dev.kobj,
4d22de3e
DLR
2526 &cxgb3_attr_group);
2527
2528 print_port_info(adapter, ai);
2529 return 0;
2530
2531out_free_dev:
2532 iounmap(adapter->regs);
2533 for (i = ai->nports - 1; i >= 0; --i)
2534 if (adapter->port[i])
2535 free_netdev(adapter->port[i]);
2536
2537out_free_adapter:
2538 kfree(adapter);
2539
2540out_disable_device:
2541 pci_disable_device(pdev);
2542out_release_regions:
2543 pci_release_regions(pdev);
2544 pci_set_drvdata(pdev, NULL);
2545 return err;
2546}
2547
2548static void __devexit remove_one(struct pci_dev *pdev)
2549{
2550 struct net_device *dev = pci_get_drvdata(pdev);
2551
2552 if (dev) {
2553 int i;
2554 struct adapter *adapter = dev->priv;
2555
2556 t3_sge_stop(adapter);
0ee8d33c 2557 sysfs_remove_group(&adapter->port[0]->dev.kobj,
4d22de3e
DLR
2558 &cxgb3_attr_group);
2559
2560 for_each_port(adapter, i)
2561 if (test_bit(i, &adapter->registered_device_map))
2562 unregister_netdev(adapter->port[i]);
2563
2564 if (is_offload(adapter)) {
2565 cxgb3_adapter_unofld(adapter);
2566 if (test_bit(OFFLOAD_DEVMAP_BIT,
2567 &adapter->open_device_map))
2568 offload_close(&adapter->tdev);
2569 }
2570
2571 t3_free_sge_resources(adapter);
2572 cxgb_disable_msi(adapter);
2573
2574 for (i = 0; i < ARRAY_SIZE(adapter->dummy_netdev); i++)
2575 if (adapter->dummy_netdev[i]) {
2576 free_netdev(adapter->dummy_netdev[i]);
2577 adapter->dummy_netdev[i] = NULL;
2578 }
2579
2580 for_each_port(adapter, i)
2581 if (adapter->port[i])
2582 free_netdev(adapter->port[i]);
2583
2584 iounmap(adapter->regs);
2585 kfree(adapter);
2586 pci_release_regions(pdev);
2587 pci_disable_device(pdev);
2588 pci_set_drvdata(pdev, NULL);
2589 }
2590}
2591
2592static struct pci_driver driver = {
2593 .name = DRV_NAME,
2594 .id_table = cxgb3_pci_tbl,
2595 .probe = init_one,
2596 .remove = __devexit_p(remove_one),
2597};
2598
2599static int __init cxgb3_init_module(void)
2600{
2601 int ret;
2602
2603 cxgb3_offload_init();
2604
2605 ret = pci_register_driver(&driver);
2606 return ret;
2607}
2608
2609static void __exit cxgb3_cleanup_module(void)
2610{
2611 pci_unregister_driver(&driver);
2612 if (cxgb3_wq)
2613 destroy_workqueue(cxgb3_wq);
2614}
2615
2616module_init(cxgb3_init_module);
2617module_exit(cxgb3_cleanup_module);