cxgb3 - Fix resources release.
[linux-2.6-block.git] / drivers / net / cxgb3 / cxgb3_main.c
CommitLineData
4d22de3e 1/*
1d68e93d 2 * Copyright (c) 2003-2007 Chelsio, Inc. All rights reserved.
4d22de3e 3 *
1d68e93d
DLR
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
4d22de3e 9 *
1d68e93d
DLR
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
4d22de3e 31 */
4d22de3e
DLR
32#include <linux/module.h>
33#include <linux/moduleparam.h>
34#include <linux/init.h>
35#include <linux/pci.h>
36#include <linux/dma-mapping.h>
37#include <linux/netdevice.h>
38#include <linux/etherdevice.h>
39#include <linux/if_vlan.h>
40#include <linux/mii.h>
41#include <linux/sockios.h>
42#include <linux/workqueue.h>
43#include <linux/proc_fs.h>
44#include <linux/rtnetlink.h>
2e283962 45#include <linux/firmware.h>
d9da466a 46#include <linux/log2.h>
4d22de3e
DLR
47#include <asm/uaccess.h>
48
49#include "common.h"
50#include "cxgb3_ioctl.h"
51#include "regs.h"
52#include "cxgb3_offload.h"
53#include "version.h"
54
55#include "cxgb3_ctl_defs.h"
56#include "t3_cpl.h"
57#include "firmware_exports.h"
58
59enum {
60 MAX_TXQ_ENTRIES = 16384,
61 MAX_CTRL_TXQ_ENTRIES = 1024,
62 MAX_RSPQ_ENTRIES = 16384,
63 MAX_RX_BUFFERS = 16384,
64 MAX_RX_JUMBO_BUFFERS = 16384,
65 MIN_TXQ_ENTRIES = 4,
66 MIN_CTRL_TXQ_ENTRIES = 4,
67 MIN_RSPQ_ENTRIES = 32,
68 MIN_FL_ENTRIES = 32
69};
70
71#define PORT_MASK ((1 << MAX_NPORTS) - 1)
72
73#define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
74 NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
75 NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
76
77#define EEPROM_MAGIC 0x38E2F10C
78
678771d6
DLR
79#define CH_DEVICE(devid, idx) \
80 { PCI_VENDOR_ID_CHELSIO, devid, PCI_ANY_ID, PCI_ANY_ID, 0, 0, idx }
4d22de3e
DLR
81
82static const struct pci_device_id cxgb3_pci_tbl[] = {
678771d6
DLR
83 CH_DEVICE(0x20, 0), /* PE9000 */
84 CH_DEVICE(0x21, 1), /* T302E */
85 CH_DEVICE(0x22, 2), /* T310E */
86 CH_DEVICE(0x23, 3), /* T320X */
87 CH_DEVICE(0x24, 1), /* T302X */
88 CH_DEVICE(0x25, 3), /* T320E */
89 CH_DEVICE(0x26, 2), /* T310X */
90 CH_DEVICE(0x30, 2), /* T3B10 */
91 CH_DEVICE(0x31, 3), /* T3B20 */
92 CH_DEVICE(0x32, 1), /* T3B02 */
4d22de3e
DLR
93 {0,}
94};
95
96MODULE_DESCRIPTION(DRV_DESC);
97MODULE_AUTHOR("Chelsio Communications");
1d68e93d 98MODULE_LICENSE("Dual BSD/GPL");
4d22de3e
DLR
99MODULE_VERSION(DRV_VERSION);
100MODULE_DEVICE_TABLE(pci, cxgb3_pci_tbl);
101
102static int dflt_msg_enable = DFLT_MSG_ENABLE;
103
104module_param(dflt_msg_enable, int, 0644);
105MODULE_PARM_DESC(dflt_msg_enable, "Chelsio T3 default message enable bitmap");
106
107/*
108 * The driver uses the best interrupt scheme available on a platform in the
109 * order MSI-X, MSI, legacy pin interrupts. This parameter determines which
110 * of these schemes the driver may consider as follows:
111 *
112 * msi = 2: choose from among all three options
113 * msi = 1: only consider MSI and pin interrupts
114 * msi = 0: force pin interrupts
115 */
116static int msi = 2;
117
118module_param(msi, int, 0644);
119MODULE_PARM_DESC(msi, "whether to use MSI or MSI-X");
120
121/*
122 * The driver enables offload as a default.
123 * To disable it, use ofld_disable = 1.
124 */
125
126static int ofld_disable = 0;
127
128module_param(ofld_disable, int, 0644);
129MODULE_PARM_DESC(ofld_disable, "whether to enable offload at init time or not");
130
131/*
132 * We have work elements that we need to cancel when an interface is taken
133 * down. Normally the work elements would be executed by keventd but that
134 * can deadlock because of linkwatch. If our close method takes the rtnl
135 * lock and linkwatch is ahead of our work elements in keventd, linkwatch
136 * will block keventd as it needs the rtnl lock, and we'll deadlock waiting
137 * for our work to complete. Get our own work queue to solve this.
138 */
139static struct workqueue_struct *cxgb3_wq;
140
141/**
142 * link_report - show link status and link speed/duplex
143 * @p: the port whose settings are to be reported
144 *
145 * Shows the link status, speed, and duplex of a port.
146 */
147static void link_report(struct net_device *dev)
148{
149 if (!netif_carrier_ok(dev))
150 printk(KERN_INFO "%s: link down\n", dev->name);
151 else {
152 const char *s = "10Mbps";
153 const struct port_info *p = netdev_priv(dev);
154
155 switch (p->link_config.speed) {
156 case SPEED_10000:
157 s = "10Gbps";
158 break;
159 case SPEED_1000:
160 s = "1000Mbps";
161 break;
162 case SPEED_100:
163 s = "100Mbps";
164 break;
165 }
166
167 printk(KERN_INFO "%s: link up, %s, %s-duplex\n", dev->name, s,
168 p->link_config.duplex == DUPLEX_FULL ? "full" : "half");
169 }
170}
171
172/**
173 * t3_os_link_changed - handle link status changes
174 * @adapter: the adapter associated with the link change
175 * @port_id: the port index whose limk status has changed
176 * @link_stat: the new status of the link
177 * @speed: the new speed setting
178 * @duplex: the new duplex setting
179 * @pause: the new flow-control setting
180 *
181 * This is the OS-dependent handler for link status changes. The OS
182 * neutral handler takes care of most of the processing for these events,
183 * then calls this handler for any OS-specific processing.
184 */
185void t3_os_link_changed(struct adapter *adapter, int port_id, int link_stat,
186 int speed, int duplex, int pause)
187{
188 struct net_device *dev = adapter->port[port_id];
6d6dabac
DLR
189 struct port_info *pi = netdev_priv(dev);
190 struct cmac *mac = &pi->mac;
4d22de3e
DLR
191
192 /* Skip changes from disabled ports. */
193 if (!netif_running(dev))
194 return;
195
196 if (link_stat != netif_carrier_ok(dev)) {
6d6dabac 197 if (link_stat) {
59cf8107 198 t3_mac_enable(mac, MAC_DIRECTION_RX);
4d22de3e 199 netif_carrier_on(dev);
6d6dabac 200 } else {
4d22de3e 201 netif_carrier_off(dev);
59cf8107
DLR
202 pi->phy.ops->power_down(&pi->phy, 1);
203 t3_mac_disable(mac, MAC_DIRECTION_RX);
204 t3_link_start(&pi->phy, mac, &pi->link_config);
6d6dabac
DLR
205 }
206
4d22de3e
DLR
207 link_report(dev);
208 }
209}
210
211static void cxgb_set_rxmode(struct net_device *dev)
212{
213 struct t3_rx_mode rm;
214 struct port_info *pi = netdev_priv(dev);
215
216 init_rx_mode(&rm, dev, dev->mc_list);
217 t3_mac_set_rx_mode(&pi->mac, &rm);
218}
219
220/**
221 * link_start - enable a port
222 * @dev: the device to enable
223 *
224 * Performs the MAC and PHY actions needed to enable a port.
225 */
226static void link_start(struct net_device *dev)
227{
228 struct t3_rx_mode rm;
229 struct port_info *pi = netdev_priv(dev);
230 struct cmac *mac = &pi->mac;
231
232 init_rx_mode(&rm, dev, dev->mc_list);
233 t3_mac_reset(mac);
234 t3_mac_set_mtu(mac, dev->mtu);
235 t3_mac_set_address(mac, 0, dev->dev_addr);
236 t3_mac_set_rx_mode(mac, &rm);
237 t3_link_start(&pi->phy, mac, &pi->link_config);
238 t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
239}
240
241static inline void cxgb_disable_msi(struct adapter *adapter)
242{
243 if (adapter->flags & USING_MSIX) {
244 pci_disable_msix(adapter->pdev);
245 adapter->flags &= ~USING_MSIX;
246 } else if (adapter->flags & USING_MSI) {
247 pci_disable_msi(adapter->pdev);
248 adapter->flags &= ~USING_MSI;
249 }
250}
251
252/*
253 * Interrupt handler for asynchronous events used with MSI-X.
254 */
255static irqreturn_t t3_async_intr_handler(int irq, void *cookie)
256{
257 t3_slow_intr_handler(cookie);
258 return IRQ_HANDLED;
259}
260
261/*
262 * Name the MSI-X interrupts.
263 */
264static void name_msix_vecs(struct adapter *adap)
265{
266 int i, j, msi_idx = 1, n = sizeof(adap->msix_info[0].desc) - 1;
267
268 snprintf(adap->msix_info[0].desc, n, "%s", adap->name);
269 adap->msix_info[0].desc[n] = 0;
270
271 for_each_port(adap, j) {
272 struct net_device *d = adap->port[j];
273 const struct port_info *pi = netdev_priv(d);
274
275 for (i = 0; i < pi->nqsets; i++, msi_idx++) {
276 snprintf(adap->msix_info[msi_idx].desc, n,
277 "%s (queue %d)", d->name, i);
278 adap->msix_info[msi_idx].desc[n] = 0;
279 }
280 }
281}
282
283static int request_msix_data_irqs(struct adapter *adap)
284{
285 int i, j, err, qidx = 0;
286
287 for_each_port(adap, i) {
288 int nqsets = adap2pinfo(adap, i)->nqsets;
289
290 for (j = 0; j < nqsets; ++j) {
291 err = request_irq(adap->msix_info[qidx + 1].vec,
292 t3_intr_handler(adap,
293 adap->sge.qs[qidx].
294 rspq.polling), 0,
295 adap->msix_info[qidx + 1].desc,
296 &adap->sge.qs[qidx]);
297 if (err) {
298 while (--qidx >= 0)
299 free_irq(adap->msix_info[qidx + 1].vec,
300 &adap->sge.qs[qidx]);
301 return err;
302 }
303 qidx++;
304 }
305 }
306 return 0;
307}
308
309/**
310 * setup_rss - configure RSS
311 * @adap: the adapter
312 *
313 * Sets up RSS to distribute packets to multiple receive queues. We
314 * configure the RSS CPU lookup table to distribute to the number of HW
315 * receive queues, and the response queue lookup table to narrow that
316 * down to the response queues actually configured for each port.
317 * We always configure the RSS mapping for two ports since the mapping
318 * table has plenty of entries.
319 */
320static void setup_rss(struct adapter *adap)
321{
322 int i;
323 unsigned int nq0 = adap2pinfo(adap, 0)->nqsets;
324 unsigned int nq1 = adap->port[1] ? adap2pinfo(adap, 1)->nqsets : 1;
325 u8 cpus[SGE_QSETS + 1];
326 u16 rspq_map[RSS_TABLE_SIZE];
327
328 for (i = 0; i < SGE_QSETS; ++i)
329 cpus[i] = i;
330 cpus[SGE_QSETS] = 0xff; /* terminator */
331
332 for (i = 0; i < RSS_TABLE_SIZE / 2; ++i) {
333 rspq_map[i] = i % nq0;
334 rspq_map[i + RSS_TABLE_SIZE / 2] = (i % nq1) + nq0;
335 }
336
337 t3_config_rss(adap, F_RQFEEDBACKENABLE | F_TNLLKPEN | F_TNLMAPEN |
338 F_TNLPRTEN | F_TNL2TUPEN | F_TNL4TUPEN |
339 V_RRCPLCPUSIZE(6), cpus, rspq_map);
340}
341
bea3348e 342static void init_napi(struct adapter *adap)
4d22de3e 343{
bea3348e 344 int i;
4d22de3e 345
bea3348e
SH
346 for (i = 0; i < SGE_QSETS; i++) {
347 struct sge_qset *qs = &adap->sge.qs[i];
4d22de3e 348
bea3348e
SH
349 if (qs->adap)
350 netif_napi_add(qs->netdev, &qs->napi, qs->napi.poll,
351 64);
4d22de3e 352 }
4d22de3e
DLR
353}
354
355/*
356 * Wait until all NAPI handlers are descheduled. This includes the handlers of
357 * both netdevices representing interfaces and the dummy ones for the extra
358 * queues.
359 */
360static void quiesce_rx(struct adapter *adap)
361{
362 int i;
4d22de3e 363
bea3348e
SH
364 for (i = 0; i < SGE_QSETS; i++)
365 if (adap->sge.qs[i].adap)
366 napi_disable(&adap->sge.qs[i].napi);
367}
4d22de3e 368
bea3348e
SH
369static void enable_all_napi(struct adapter *adap)
370{
371 int i;
372 for (i = 0; i < SGE_QSETS; i++)
373 if (adap->sge.qs[i].adap)
374 napi_enable(&adap->sge.qs[i].napi);
4d22de3e
DLR
375}
376
377/**
378 * setup_sge_qsets - configure SGE Tx/Rx/response queues
379 * @adap: the adapter
380 *
381 * Determines how many sets of SGE queues to use and initializes them.
382 * We support multiple queue sets per port if we have MSI-X, otherwise
383 * just one queue set per port.
384 */
385static int setup_sge_qsets(struct adapter *adap)
386{
bea3348e 387 int i, j, err, irq_idx = 0, qset_idx = 0;
8ac3ba68 388 unsigned int ntxq = SGE_TXQ_PER_SET;
4d22de3e
DLR
389
390 if (adap->params.rev > 0 && !(adap->flags & USING_MSI))
391 irq_idx = -1;
392
393 for_each_port(adap, i) {
394 struct net_device *dev = adap->port[i];
bea3348e 395 struct port_info *pi = netdev_priv(dev);
4d22de3e 396
bea3348e 397 pi->qs = &adap->sge.qs[pi->first_qset];
4d22de3e
DLR
398 for (j = 0; j < pi->nqsets; ++j, ++qset_idx) {
399 err = t3_sge_alloc_qset(adap, qset_idx, 1,
400 (adap->flags & USING_MSIX) ? qset_idx + 1 :
401 irq_idx,
bea3348e 402 &adap->params.sge.qset[qset_idx], ntxq, dev);
4d22de3e
DLR
403 if (err) {
404 t3_free_sge_resources(adap);
405 return err;
406 }
407 }
408 }
409
410 return 0;
411}
412
0ee8d33c
DLR
413static ssize_t attr_show(struct device *d, struct device_attribute *attr,
414 char *buf,
896392ef 415 ssize_t(*format) (struct net_device *, char *))
4d22de3e
DLR
416{
417 ssize_t len;
4d22de3e
DLR
418
419 /* Synchronize with ioctls that may shut down the device */
420 rtnl_lock();
896392ef 421 len = (*format) (to_net_dev(d), buf);
4d22de3e
DLR
422 rtnl_unlock();
423 return len;
424}
425
0ee8d33c
DLR
426static ssize_t attr_store(struct device *d, struct device_attribute *attr,
427 const char *buf, size_t len,
896392ef 428 ssize_t(*set) (struct net_device *, unsigned int),
4d22de3e
DLR
429 unsigned int min_val, unsigned int max_val)
430{
431 char *endp;
432 ssize_t ret;
433 unsigned int val;
4d22de3e
DLR
434
435 if (!capable(CAP_NET_ADMIN))
436 return -EPERM;
437
438 val = simple_strtoul(buf, &endp, 0);
439 if (endp == buf || val < min_val || val > max_val)
440 return -EINVAL;
441
442 rtnl_lock();
896392ef 443 ret = (*set) (to_net_dev(d), val);
4d22de3e
DLR
444 if (!ret)
445 ret = len;
446 rtnl_unlock();
447 return ret;
448}
449
450#define CXGB3_SHOW(name, val_expr) \
896392ef 451static ssize_t format_##name(struct net_device *dev, char *buf) \
4d22de3e 452{ \
5fbf816f
DLR
453 struct port_info *pi = netdev_priv(dev); \
454 struct adapter *adap = pi->adapter; \
4d22de3e
DLR
455 return sprintf(buf, "%u\n", val_expr); \
456} \
0ee8d33c
DLR
457static ssize_t show_##name(struct device *d, struct device_attribute *attr, \
458 char *buf) \
4d22de3e 459{ \
0ee8d33c 460 return attr_show(d, attr, buf, format_##name); \
4d22de3e
DLR
461}
462
896392ef 463static ssize_t set_nfilters(struct net_device *dev, unsigned int val)
4d22de3e 464{
5fbf816f
DLR
465 struct port_info *pi = netdev_priv(dev);
466 struct adapter *adap = pi->adapter;
9f238486 467 int min_tids = is_offload(adap) ? MC5_MIN_TIDS : 0;
896392ef 468
4d22de3e
DLR
469 if (adap->flags & FULL_INIT_DONE)
470 return -EBUSY;
471 if (val && adap->params.rev == 0)
472 return -EINVAL;
9f238486
DLR
473 if (val > t3_mc5_size(&adap->mc5) - adap->params.mc5.nservers -
474 min_tids)
4d22de3e
DLR
475 return -EINVAL;
476 adap->params.mc5.nfilters = val;
477 return 0;
478}
479
0ee8d33c
DLR
480static ssize_t store_nfilters(struct device *d, struct device_attribute *attr,
481 const char *buf, size_t len)
4d22de3e 482{
0ee8d33c 483 return attr_store(d, attr, buf, len, set_nfilters, 0, ~0);
4d22de3e
DLR
484}
485
896392ef 486static ssize_t set_nservers(struct net_device *dev, unsigned int val)
4d22de3e 487{
5fbf816f
DLR
488 struct port_info *pi = netdev_priv(dev);
489 struct adapter *adap = pi->adapter;
896392ef 490
4d22de3e
DLR
491 if (adap->flags & FULL_INIT_DONE)
492 return -EBUSY;
9f238486
DLR
493 if (val > t3_mc5_size(&adap->mc5) - adap->params.mc5.nfilters -
494 MC5_MIN_TIDS)
4d22de3e
DLR
495 return -EINVAL;
496 adap->params.mc5.nservers = val;
497 return 0;
498}
499
0ee8d33c
DLR
500static ssize_t store_nservers(struct device *d, struct device_attribute *attr,
501 const char *buf, size_t len)
4d22de3e 502{
0ee8d33c 503 return attr_store(d, attr, buf, len, set_nservers, 0, ~0);
4d22de3e
DLR
504}
505
506#define CXGB3_ATTR_R(name, val_expr) \
507CXGB3_SHOW(name, val_expr) \
0ee8d33c 508static DEVICE_ATTR(name, S_IRUGO, show_##name, NULL)
4d22de3e
DLR
509
510#define CXGB3_ATTR_RW(name, val_expr, store_method) \
511CXGB3_SHOW(name, val_expr) \
0ee8d33c 512static DEVICE_ATTR(name, S_IRUGO | S_IWUSR, show_##name, store_method)
4d22de3e
DLR
513
514CXGB3_ATTR_R(cam_size, t3_mc5_size(&adap->mc5));
515CXGB3_ATTR_RW(nfilters, adap->params.mc5.nfilters, store_nfilters);
516CXGB3_ATTR_RW(nservers, adap->params.mc5.nservers, store_nservers);
517
518static struct attribute *cxgb3_attrs[] = {
0ee8d33c
DLR
519 &dev_attr_cam_size.attr,
520 &dev_attr_nfilters.attr,
521 &dev_attr_nservers.attr,
4d22de3e
DLR
522 NULL
523};
524
525static struct attribute_group cxgb3_attr_group = {.attrs = cxgb3_attrs };
526
0ee8d33c
DLR
527static ssize_t tm_attr_show(struct device *d, struct device_attribute *attr,
528 char *buf, int sched)
4d22de3e 529{
5fbf816f
DLR
530 struct port_info *pi = netdev_priv(to_net_dev(d));
531 struct adapter *adap = pi->adapter;
4d22de3e 532 unsigned int v, addr, bpt, cpt;
5fbf816f 533 ssize_t len;
4d22de3e
DLR
534
535 addr = A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2;
536 rtnl_lock();
537 t3_write_reg(adap, A_TP_TM_PIO_ADDR, addr);
538 v = t3_read_reg(adap, A_TP_TM_PIO_DATA);
539 if (sched & 1)
540 v >>= 16;
541 bpt = (v >> 8) & 0xff;
542 cpt = v & 0xff;
543 if (!cpt)
544 len = sprintf(buf, "disabled\n");
545 else {
546 v = (adap->params.vpd.cclk * 1000) / cpt;
547 len = sprintf(buf, "%u Kbps\n", (v * bpt) / 125);
548 }
549 rtnl_unlock();
550 return len;
551}
552
0ee8d33c
DLR
553static ssize_t tm_attr_store(struct device *d, struct device_attribute *attr,
554 const char *buf, size_t len, int sched)
4d22de3e 555{
5fbf816f
DLR
556 struct port_info *pi = netdev_priv(to_net_dev(d));
557 struct adapter *adap = pi->adapter;
558 unsigned int val;
4d22de3e
DLR
559 char *endp;
560 ssize_t ret;
4d22de3e
DLR
561
562 if (!capable(CAP_NET_ADMIN))
563 return -EPERM;
564
565 val = simple_strtoul(buf, &endp, 0);
566 if (endp == buf || val > 10000000)
567 return -EINVAL;
568
569 rtnl_lock();
570 ret = t3_config_sched(adap, val, sched);
571 if (!ret)
572 ret = len;
573 rtnl_unlock();
574 return ret;
575}
576
577#define TM_ATTR(name, sched) \
0ee8d33c
DLR
578static ssize_t show_##name(struct device *d, struct device_attribute *attr, \
579 char *buf) \
4d22de3e 580{ \
0ee8d33c 581 return tm_attr_show(d, attr, buf, sched); \
4d22de3e 582} \
0ee8d33c
DLR
583static ssize_t store_##name(struct device *d, struct device_attribute *attr, \
584 const char *buf, size_t len) \
4d22de3e 585{ \
0ee8d33c 586 return tm_attr_store(d, attr, buf, len, sched); \
4d22de3e 587} \
0ee8d33c 588static DEVICE_ATTR(name, S_IRUGO | S_IWUSR, show_##name, store_##name)
4d22de3e
DLR
589
590TM_ATTR(sched0, 0);
591TM_ATTR(sched1, 1);
592TM_ATTR(sched2, 2);
593TM_ATTR(sched3, 3);
594TM_ATTR(sched4, 4);
595TM_ATTR(sched5, 5);
596TM_ATTR(sched6, 6);
597TM_ATTR(sched7, 7);
598
599static struct attribute *offload_attrs[] = {
0ee8d33c
DLR
600 &dev_attr_sched0.attr,
601 &dev_attr_sched1.attr,
602 &dev_attr_sched2.attr,
603 &dev_attr_sched3.attr,
604 &dev_attr_sched4.attr,
605 &dev_attr_sched5.attr,
606 &dev_attr_sched6.attr,
607 &dev_attr_sched7.attr,
4d22de3e
DLR
608 NULL
609};
610
611static struct attribute_group offload_attr_group = {.attrs = offload_attrs };
612
613/*
614 * Sends an sk_buff to an offload queue driver
615 * after dealing with any active network taps.
616 */
617static inline int offload_tx(struct t3cdev *tdev, struct sk_buff *skb)
618{
619 int ret;
620
621 local_bh_disable();
622 ret = t3_offload_tx(tdev, skb);
623 local_bh_enable();
624 return ret;
625}
626
627static int write_smt_entry(struct adapter *adapter, int idx)
628{
629 struct cpl_smt_write_req *req;
630 struct sk_buff *skb = alloc_skb(sizeof(*req), GFP_KERNEL);
631
632 if (!skb)
633 return -ENOMEM;
634
635 req = (struct cpl_smt_write_req *)__skb_put(skb, sizeof(*req));
636 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
637 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SMT_WRITE_REQ, idx));
638 req->mtu_idx = NMTUS - 1; /* should be 0 but there's a T3 bug */
639 req->iff = idx;
640 memset(req->src_mac1, 0, sizeof(req->src_mac1));
641 memcpy(req->src_mac0, adapter->port[idx]->dev_addr, ETH_ALEN);
642 skb->priority = 1;
643 offload_tx(&adapter->tdev, skb);
644 return 0;
645}
646
647static int init_smt(struct adapter *adapter)
648{
649 int i;
650
651 for_each_port(adapter, i)
652 write_smt_entry(adapter, i);
653 return 0;
654}
655
656static void init_port_mtus(struct adapter *adapter)
657{
658 unsigned int mtus = adapter->port[0]->mtu;
659
660 if (adapter->port[1])
661 mtus |= adapter->port[1]->mtu << 16;
662 t3_write_reg(adapter, A_TP_MTU_PORT_TABLE, mtus);
663}
664
14ab9892
DLR
665static void send_pktsched_cmd(struct adapter *adap, int sched, int qidx, int lo,
666 int hi, int port)
667{
668 struct sk_buff *skb;
669 struct mngt_pktsched_wr *req;
670
671 skb = alloc_skb(sizeof(*req), GFP_KERNEL | __GFP_NOFAIL);
672 req = (struct mngt_pktsched_wr *)skb_put(skb, sizeof(*req));
673 req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_MNGT));
674 req->mngt_opcode = FW_MNGTOPCODE_PKTSCHED_SET;
675 req->sched = sched;
676 req->idx = qidx;
677 req->min = lo;
678 req->max = hi;
679 req->binding = port;
680 t3_mgmt_tx(adap, skb);
681}
682
683static void bind_qsets(struct adapter *adap)
684{
685 int i, j;
686
687 for_each_port(adap, i) {
688 const struct port_info *pi = adap2pinfo(adap, i);
689
690 for (j = 0; j < pi->nqsets; ++j)
691 send_pktsched_cmd(adap, 1, pi->first_qset + j, -1,
692 -1, i);
693 }
694}
695
7f672cf5 696#define FW_FNAME "t3fw-%d.%d.%d.bin"
47330077 697#define TPSRAM_NAME "t3%c_protocol_sram-%d.%d.%d.bin"
2e283962
DLR
698
699static int upgrade_fw(struct adapter *adap)
700{
701 int ret;
702 char buf[64];
703 const struct firmware *fw;
704 struct device *dev = &adap->pdev->dev;
705
706 snprintf(buf, sizeof(buf), FW_FNAME, FW_VERSION_MAJOR,
7f672cf5 707 FW_VERSION_MINOR, FW_VERSION_MICRO);
2e283962
DLR
708 ret = request_firmware(&fw, buf, dev);
709 if (ret < 0) {
710 dev_err(dev, "could not upgrade firmware: unable to load %s\n",
711 buf);
712 return ret;
713 }
714 ret = t3_load_fw(adap, fw->data, fw->size);
715 release_firmware(fw);
47330077
DLR
716
717 if (ret == 0)
718 dev_info(dev, "successful upgrade to firmware %d.%d.%d\n",
719 FW_VERSION_MAJOR, FW_VERSION_MINOR, FW_VERSION_MICRO);
720 else
721 dev_err(dev, "failed to upgrade to firmware %d.%d.%d\n",
722 FW_VERSION_MAJOR, FW_VERSION_MINOR, FW_VERSION_MICRO);
723
724 return ret;
725}
726
727static inline char t3rev2char(struct adapter *adapter)
728{
729 char rev = 0;
730
731 switch(adapter->params.rev) {
732 case T3_REV_B:
733 case T3_REV_B2:
734 rev = 'b';
735 break;
1aafee26
DLR
736 case T3_REV_C:
737 rev = 'c';
738 break;
47330077
DLR
739 }
740 return rev;
741}
742
9265fabf 743static int update_tpsram(struct adapter *adap)
47330077
DLR
744{
745 const struct firmware *tpsram;
746 char buf[64];
747 struct device *dev = &adap->pdev->dev;
748 int ret;
749 char rev;
750
751 rev = t3rev2char(adap);
752 if (!rev)
753 return 0;
754
755 snprintf(buf, sizeof(buf), TPSRAM_NAME, rev,
756 TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO);
757
758 ret = request_firmware(&tpsram, buf, dev);
759 if (ret < 0) {
760 dev_err(dev, "could not load TP SRAM: unable to load %s\n",
761 buf);
762 return ret;
763 }
764
765 ret = t3_check_tpsram(adap, tpsram->data, tpsram->size);
766 if (ret)
767 goto release_tpsram;
768
769 ret = t3_set_proto_sram(adap, tpsram->data);
770 if (ret == 0)
771 dev_info(dev,
772 "successful update of protocol engine "
773 "to %d.%d.%d\n",
774 TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO);
775 else
776 dev_err(dev, "failed to update of protocol engine %d.%d.%d\n",
777 TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO);
778 if (ret)
779 dev_err(dev, "loading protocol SRAM failed\n");
780
781release_tpsram:
782 release_firmware(tpsram);
783
2e283962
DLR
784 return ret;
785}
786
4d22de3e
DLR
787/**
788 * cxgb_up - enable the adapter
789 * @adapter: adapter being enabled
790 *
791 * Called when the first port is enabled, this function performs the
792 * actions necessary to make an adapter operational, such as completing
793 * the initialization of HW modules, and enabling interrupts.
794 *
795 * Must be called with the rtnl lock held.
796 */
797static int cxgb_up(struct adapter *adap)
798{
c54f5c24 799 int err;
47330077 800 int must_load;
4d22de3e
DLR
801
802 if (!(adap->flags & FULL_INIT_DONE)) {
a5a3b460
DLR
803 err = t3_check_fw_version(adap, &must_load);
804 if (err == -EINVAL) {
2e283962 805 err = upgrade_fw(adap);
a5a3b460
DLR
806 if (err && must_load)
807 goto out;
808 }
4d22de3e 809
47330077
DLR
810 err = t3_check_tpsram_version(adap, &must_load);
811 if (err == -EINVAL) {
812 err = update_tpsram(adap);
813 if (err && must_load)
814 goto out;
815 }
816
4d22de3e
DLR
817 err = t3_init_hw(adap, 0);
818 if (err)
819 goto out;
820
6cdbd77e 821 t3_write_reg(adap, A_ULPRX_TDDP_PSZ, V_HPZ0(PAGE_SHIFT - 12));
bea3348e 822
4d22de3e
DLR
823 err = setup_sge_qsets(adap);
824 if (err)
825 goto out;
826
827 setup_rss(adap);
bea3348e 828 init_napi(adap);
4d22de3e
DLR
829 adap->flags |= FULL_INIT_DONE;
830 }
831
832 t3_intr_clear(adap);
833
834 if (adap->flags & USING_MSIX) {
835 name_msix_vecs(adap);
836 err = request_irq(adap->msix_info[0].vec,
837 t3_async_intr_handler, 0,
838 adap->msix_info[0].desc, adap);
839 if (err)
840 goto irq_err;
841
42256f57
DLR
842 err = request_msix_data_irqs(adap);
843 if (err) {
4d22de3e
DLR
844 free_irq(adap->msix_info[0].vec, adap);
845 goto irq_err;
846 }
847 } else if ((err = request_irq(adap->pdev->irq,
848 t3_intr_handler(adap,
849 adap->sge.qs[0].rspq.
850 polling),
2db6346f
TG
851 (adap->flags & USING_MSI) ?
852 0 : IRQF_SHARED,
4d22de3e
DLR
853 adap->name, adap)))
854 goto irq_err;
855
bea3348e 856 enable_all_napi(adap);
4d22de3e
DLR
857 t3_sge_start(adap);
858 t3_intr_enable(adap);
14ab9892
DLR
859
860 if ((adap->flags & (USING_MSIX | QUEUES_BOUND)) == USING_MSIX)
861 bind_qsets(adap);
862 adap->flags |= QUEUES_BOUND;
863
4d22de3e
DLR
864out:
865 return err;
866irq_err:
867 CH_ERR(adap, "request_irq failed, err %d\n", err);
868 goto out;
869}
870
871/*
872 * Release resources when all the ports and offloading have been stopped.
873 */
874static void cxgb_down(struct adapter *adapter)
875{
876 t3_sge_stop(adapter);
877 spin_lock_irq(&adapter->work_lock); /* sync with PHY intr task */
878 t3_intr_disable(adapter);
879 spin_unlock_irq(&adapter->work_lock);
880
881 if (adapter->flags & USING_MSIX) {
882 int i, n = 0;
883
884 free_irq(adapter->msix_info[0].vec, adapter);
885 for_each_port(adapter, i)
886 n += adap2pinfo(adapter, i)->nqsets;
887
888 for (i = 0; i < n; ++i)
889 free_irq(adapter->msix_info[i + 1].vec,
890 &adapter->sge.qs[i]);
891 } else
892 free_irq(adapter->pdev->irq, adapter);
893
894 flush_workqueue(cxgb3_wq); /* wait for external IRQ handler */
895 quiesce_rx(adapter);
896}
897
898static void schedule_chk_task(struct adapter *adap)
899{
900 unsigned int timeo;
901
902 timeo = adap->params.linkpoll_period ?
903 (HZ * adap->params.linkpoll_period) / 10 :
904 adap->params.stats_update_period * HZ;
905 if (timeo)
906 queue_delayed_work(cxgb3_wq, &adap->adap_check_task, timeo);
907}
908
909static int offload_open(struct net_device *dev)
910{
5fbf816f
DLR
911 struct port_info *pi = netdev_priv(dev);
912 struct adapter *adapter = pi->adapter;
913 struct t3cdev *tdev = dev2t3cdev(dev);
4d22de3e 914 int adap_up = adapter->open_device_map & PORT_MASK;
c54f5c24 915 int err;
4d22de3e
DLR
916
917 if (test_and_set_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map))
918 return 0;
919
920 if (!adap_up && (err = cxgb_up(adapter)) < 0)
921 return err;
922
923 t3_tp_set_offload_mode(adapter, 1);
924 tdev->lldev = adapter->port[0];
925 err = cxgb3_offload_activate(adapter);
926 if (err)
927 goto out;
928
929 init_port_mtus(adapter);
930 t3_load_mtus(adapter, adapter->params.mtus, adapter->params.a_wnd,
931 adapter->params.b_wnd,
932 adapter->params.rev == 0 ?
933 adapter->port[0]->mtu : 0xffff);
934 init_smt(adapter);
935
936 /* Never mind if the next step fails */
0ee8d33c 937 sysfs_create_group(&tdev->lldev->dev.kobj, &offload_attr_group);
4d22de3e
DLR
938
939 /* Call back all registered clients */
940 cxgb3_add_clients(tdev);
941
942out:
943 /* restore them in case the offload module has changed them */
944 if (err) {
945 t3_tp_set_offload_mode(adapter, 0);
946 clear_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map);
947 cxgb3_set_dummy_ops(tdev);
948 }
949 return err;
950}
951
952static int offload_close(struct t3cdev *tdev)
953{
954 struct adapter *adapter = tdev2adap(tdev);
955
956 if (!test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map))
957 return 0;
958
959 /* Call back all registered clients */
960 cxgb3_remove_clients(tdev);
961
0ee8d33c 962 sysfs_remove_group(&tdev->lldev->dev.kobj, &offload_attr_group);
4d22de3e
DLR
963
964 tdev->lldev = NULL;
965 cxgb3_set_dummy_ops(tdev);
966 t3_tp_set_offload_mode(adapter, 0);
967 clear_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map);
968
969 if (!adapter->open_device_map)
970 cxgb_down(adapter);
971
972 cxgb3_offload_deactivate(adapter);
973 return 0;
974}
975
976static int cxgb_open(struct net_device *dev)
977{
4d22de3e 978 struct port_info *pi = netdev_priv(dev);
5fbf816f 979 struct adapter *adapter = pi->adapter;
4d22de3e 980 int other_ports = adapter->open_device_map & PORT_MASK;
5fbf816f 981 int err;
4d22de3e 982
bea3348e
SH
983 if (!adapter->open_device_map && (err = cxgb_up(adapter)) < 0) {
984 quiesce_rx(adapter);
4d22de3e 985 return err;
bea3348e 986 }
4d22de3e
DLR
987
988 set_bit(pi->port_id, &adapter->open_device_map);
8ac3ba68 989 if (is_offload(adapter) && !ofld_disable) {
4d22de3e
DLR
990 err = offload_open(dev);
991 if (err)
992 printk(KERN_WARNING
993 "Could not initialize offload capabilities\n");
994 }
995
996 link_start(dev);
997 t3_port_intr_enable(adapter, pi->port_id);
998 netif_start_queue(dev);
999 if (!other_ports)
1000 schedule_chk_task(adapter);
1001
1002 return 0;
1003}
1004
1005static int cxgb_close(struct net_device *dev)
1006{
5fbf816f
DLR
1007 struct port_info *pi = netdev_priv(dev);
1008 struct adapter *adapter = pi->adapter;
4d22de3e 1009
5fbf816f 1010 t3_port_intr_disable(adapter, pi->port_id);
4d22de3e 1011 netif_stop_queue(dev);
5fbf816f 1012 pi->phy.ops->power_down(&pi->phy, 1);
4d22de3e 1013 netif_carrier_off(dev);
5fbf816f 1014 t3_mac_disable(&pi->mac, MAC_DIRECTION_TX | MAC_DIRECTION_RX);
4d22de3e
DLR
1015
1016 spin_lock(&adapter->work_lock); /* sync with update task */
5fbf816f 1017 clear_bit(pi->port_id, &adapter->open_device_map);
4d22de3e
DLR
1018 spin_unlock(&adapter->work_lock);
1019
1020 if (!(adapter->open_device_map & PORT_MASK))
1021 cancel_rearming_delayed_workqueue(cxgb3_wq,
1022 &adapter->adap_check_task);
1023
1024 if (!adapter->open_device_map)
1025 cxgb_down(adapter);
1026
1027 return 0;
1028}
1029
1030static struct net_device_stats *cxgb_get_stats(struct net_device *dev)
1031{
5fbf816f
DLR
1032 struct port_info *pi = netdev_priv(dev);
1033 struct adapter *adapter = pi->adapter;
1034 struct net_device_stats *ns = &pi->netstats;
4d22de3e
DLR
1035 const struct mac_stats *pstats;
1036
1037 spin_lock(&adapter->stats_lock);
5fbf816f 1038 pstats = t3_mac_update_stats(&pi->mac);
4d22de3e
DLR
1039 spin_unlock(&adapter->stats_lock);
1040
1041 ns->tx_bytes = pstats->tx_octets;
1042 ns->tx_packets = pstats->tx_frames;
1043 ns->rx_bytes = pstats->rx_octets;
1044 ns->rx_packets = pstats->rx_frames;
1045 ns->multicast = pstats->rx_mcast_frames;
1046
1047 ns->tx_errors = pstats->tx_underrun;
1048 ns->rx_errors = pstats->rx_symbol_errs + pstats->rx_fcs_errs +
1049 pstats->rx_too_long + pstats->rx_jabber + pstats->rx_short +
1050 pstats->rx_fifo_ovfl;
1051
1052 /* detailed rx_errors */
1053 ns->rx_length_errors = pstats->rx_jabber + pstats->rx_too_long;
1054 ns->rx_over_errors = 0;
1055 ns->rx_crc_errors = pstats->rx_fcs_errs;
1056 ns->rx_frame_errors = pstats->rx_symbol_errs;
1057 ns->rx_fifo_errors = pstats->rx_fifo_ovfl;
1058 ns->rx_missed_errors = pstats->rx_cong_drops;
1059
1060 /* detailed tx_errors */
1061 ns->tx_aborted_errors = 0;
1062 ns->tx_carrier_errors = 0;
1063 ns->tx_fifo_errors = pstats->tx_underrun;
1064 ns->tx_heartbeat_errors = 0;
1065 ns->tx_window_errors = 0;
1066 return ns;
1067}
1068
1069static u32 get_msglevel(struct net_device *dev)
1070{
5fbf816f
DLR
1071 struct port_info *pi = netdev_priv(dev);
1072 struct adapter *adapter = pi->adapter;
4d22de3e
DLR
1073
1074 return adapter->msg_enable;
1075}
1076
1077static void set_msglevel(struct net_device *dev, u32 val)
1078{
5fbf816f
DLR
1079 struct port_info *pi = netdev_priv(dev);
1080 struct adapter *adapter = pi->adapter;
4d22de3e
DLR
1081
1082 adapter->msg_enable = val;
1083}
1084
1085static char stats_strings[][ETH_GSTRING_LEN] = {
1086 "TxOctetsOK ",
1087 "TxFramesOK ",
1088 "TxMulticastFramesOK",
1089 "TxBroadcastFramesOK",
1090 "TxPauseFrames ",
1091 "TxUnderrun ",
1092 "TxExtUnderrun ",
1093
1094 "TxFrames64 ",
1095 "TxFrames65To127 ",
1096 "TxFrames128To255 ",
1097 "TxFrames256To511 ",
1098 "TxFrames512To1023 ",
1099 "TxFrames1024To1518 ",
1100 "TxFrames1519ToMax ",
1101
1102 "RxOctetsOK ",
1103 "RxFramesOK ",
1104 "RxMulticastFramesOK",
1105 "RxBroadcastFramesOK",
1106 "RxPauseFrames ",
1107 "RxFCSErrors ",
1108 "RxSymbolErrors ",
1109 "RxShortErrors ",
1110 "RxJabberErrors ",
1111 "RxLengthErrors ",
1112 "RxFIFOoverflow ",
1113
1114 "RxFrames64 ",
1115 "RxFrames65To127 ",
1116 "RxFrames128To255 ",
1117 "RxFrames256To511 ",
1118 "RxFrames512To1023 ",
1119 "RxFrames1024To1518 ",
1120 "RxFrames1519ToMax ",
1121
1122 "PhyFIFOErrors ",
1123 "TSO ",
1124 "VLANextractions ",
1125 "VLANinsertions ",
1126 "TxCsumOffload ",
1127 "RxCsumGood ",
fc90664e
DLR
1128 "RxDrops ",
1129
1130 "CheckTXEnToggled ",
1131 "CheckResets ",
1132
4d22de3e
DLR
1133};
1134
b9f2c044 1135static int get_sset_count(struct net_device *dev, int sset)
4d22de3e 1136{
b9f2c044
JG
1137 switch (sset) {
1138 case ETH_SS_STATS:
1139 return ARRAY_SIZE(stats_strings);
1140 default:
1141 return -EOPNOTSUPP;
1142 }
4d22de3e
DLR
1143}
1144
1145#define T3_REGMAP_SIZE (3 * 1024)
1146
1147static int get_regs_len(struct net_device *dev)
1148{
1149 return T3_REGMAP_SIZE;
1150}
1151
1152static int get_eeprom_len(struct net_device *dev)
1153{
1154 return EEPROMSIZE;
1155}
1156
1157static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1158{
5fbf816f
DLR
1159 struct port_info *pi = netdev_priv(dev);
1160 struct adapter *adapter = pi->adapter;
4d22de3e 1161 u32 fw_vers = 0;
47330077 1162 u32 tp_vers = 0;
4d22de3e
DLR
1163
1164 t3_get_fw_version(adapter, &fw_vers);
47330077 1165 t3_get_tp_version(adapter, &tp_vers);
4d22de3e
DLR
1166
1167 strcpy(info->driver, DRV_NAME);
1168 strcpy(info->version, DRV_VERSION);
1169 strcpy(info->bus_info, pci_name(adapter->pdev));
1170 if (!fw_vers)
1171 strcpy(info->fw_version, "N/A");
4aac3899 1172 else {
4d22de3e 1173 snprintf(info->fw_version, sizeof(info->fw_version),
47330077 1174 "%s %u.%u.%u TP %u.%u.%u",
4aac3899
DLR
1175 G_FW_VERSION_TYPE(fw_vers) ? "T" : "N",
1176 G_FW_VERSION_MAJOR(fw_vers),
1177 G_FW_VERSION_MINOR(fw_vers),
47330077
DLR
1178 G_FW_VERSION_MICRO(fw_vers),
1179 G_TP_VERSION_MAJOR(tp_vers),
1180 G_TP_VERSION_MINOR(tp_vers),
1181 G_TP_VERSION_MICRO(tp_vers));
4aac3899 1182 }
4d22de3e
DLR
1183}
1184
1185static void get_strings(struct net_device *dev, u32 stringset, u8 * data)
1186{
1187 if (stringset == ETH_SS_STATS)
1188 memcpy(data, stats_strings, sizeof(stats_strings));
1189}
1190
1191static unsigned long collect_sge_port_stats(struct adapter *adapter,
1192 struct port_info *p, int idx)
1193{
1194 int i;
1195 unsigned long tot = 0;
1196
1197 for (i = 0; i < p->nqsets; ++i)
1198 tot += adapter->sge.qs[i + p->first_qset].port_stats[idx];
1199 return tot;
1200}
1201
1202static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
1203 u64 *data)
1204{
4d22de3e 1205 struct port_info *pi = netdev_priv(dev);
5fbf816f 1206 struct adapter *adapter = pi->adapter;
4d22de3e
DLR
1207 const struct mac_stats *s;
1208
1209 spin_lock(&adapter->stats_lock);
1210 s = t3_mac_update_stats(&pi->mac);
1211 spin_unlock(&adapter->stats_lock);
1212
1213 *data++ = s->tx_octets;
1214 *data++ = s->tx_frames;
1215 *data++ = s->tx_mcast_frames;
1216 *data++ = s->tx_bcast_frames;
1217 *data++ = s->tx_pause;
1218 *data++ = s->tx_underrun;
1219 *data++ = s->tx_fifo_urun;
1220
1221 *data++ = s->tx_frames_64;
1222 *data++ = s->tx_frames_65_127;
1223 *data++ = s->tx_frames_128_255;
1224 *data++ = s->tx_frames_256_511;
1225 *data++ = s->tx_frames_512_1023;
1226 *data++ = s->tx_frames_1024_1518;
1227 *data++ = s->tx_frames_1519_max;
1228
1229 *data++ = s->rx_octets;
1230 *data++ = s->rx_frames;
1231 *data++ = s->rx_mcast_frames;
1232 *data++ = s->rx_bcast_frames;
1233 *data++ = s->rx_pause;
1234 *data++ = s->rx_fcs_errs;
1235 *data++ = s->rx_symbol_errs;
1236 *data++ = s->rx_short;
1237 *data++ = s->rx_jabber;
1238 *data++ = s->rx_too_long;
1239 *data++ = s->rx_fifo_ovfl;
1240
1241 *data++ = s->rx_frames_64;
1242 *data++ = s->rx_frames_65_127;
1243 *data++ = s->rx_frames_128_255;
1244 *data++ = s->rx_frames_256_511;
1245 *data++ = s->rx_frames_512_1023;
1246 *data++ = s->rx_frames_1024_1518;
1247 *data++ = s->rx_frames_1519_max;
1248
1249 *data++ = pi->phy.fifo_errors;
1250
1251 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_TSO);
1252 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_VLANEX);
1253 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_VLANINS);
1254 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_TX_CSUM);
1255 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_RX_CSUM_GOOD);
1256 *data++ = s->rx_cong_drops;
fc90664e
DLR
1257
1258 *data++ = s->num_toggled;
1259 *data++ = s->num_resets;
4d22de3e
DLR
1260}
1261
1262static inline void reg_block_dump(struct adapter *ap, void *buf,
1263 unsigned int start, unsigned int end)
1264{
1265 u32 *p = buf + start;
1266
1267 for (; start <= end; start += sizeof(u32))
1268 *p++ = t3_read_reg(ap, start);
1269}
1270
1271static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
1272 void *buf)
1273{
5fbf816f
DLR
1274 struct port_info *pi = netdev_priv(dev);
1275 struct adapter *ap = pi->adapter;
4d22de3e
DLR
1276
1277 /*
1278 * Version scheme:
1279 * bits 0..9: chip version
1280 * bits 10..15: chip revision
1281 * bit 31: set for PCIe cards
1282 */
1283 regs->version = 3 | (ap->params.rev << 10) | (is_pcie(ap) << 31);
1284
1285 /*
1286 * We skip the MAC statistics registers because they are clear-on-read.
1287 * Also reading multi-register stats would need to synchronize with the
1288 * periodic mac stats accumulation. Hard to justify the complexity.
1289 */
1290 memset(buf, 0, T3_REGMAP_SIZE);
1291 reg_block_dump(ap, buf, 0, A_SG_RSPQ_CREDIT_RETURN);
1292 reg_block_dump(ap, buf, A_SG_HI_DRB_HI_THRSH, A_ULPRX_PBL_ULIMIT);
1293 reg_block_dump(ap, buf, A_ULPTX_CONFIG, A_MPS_INT_CAUSE);
1294 reg_block_dump(ap, buf, A_CPL_SWITCH_CNTRL, A_CPL_MAP_TBL_DATA);
1295 reg_block_dump(ap, buf, A_SMB_GLOBAL_TIME_CFG, A_XGM_SERDES_STAT3);
1296 reg_block_dump(ap, buf, A_XGM_SERDES_STATUS0,
1297 XGM_REG(A_XGM_SERDES_STAT3, 1));
1298 reg_block_dump(ap, buf, XGM_REG(A_XGM_SERDES_STATUS0, 1),
1299 XGM_REG(A_XGM_RX_SPI4_SOP_EOP_CNT, 1));
1300}
1301
1302static int restart_autoneg(struct net_device *dev)
1303{
1304 struct port_info *p = netdev_priv(dev);
1305
1306 if (!netif_running(dev))
1307 return -EAGAIN;
1308 if (p->link_config.autoneg != AUTONEG_ENABLE)
1309 return -EINVAL;
1310 p->phy.ops->autoneg_restart(&p->phy);
1311 return 0;
1312}
1313
1314static int cxgb3_phys_id(struct net_device *dev, u32 data)
1315{
5fbf816f
DLR
1316 struct port_info *pi = netdev_priv(dev);
1317 struct adapter *adapter = pi->adapter;
4d22de3e 1318 int i;
4d22de3e
DLR
1319
1320 if (data == 0)
1321 data = 2;
1322
1323 for (i = 0; i < data * 2; i++) {
1324 t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
1325 (i & 1) ? F_GPIO0_OUT_VAL : 0);
1326 if (msleep_interruptible(500))
1327 break;
1328 }
1329 t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
1330 F_GPIO0_OUT_VAL);
1331 return 0;
1332}
1333
1334static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1335{
1336 struct port_info *p = netdev_priv(dev);
1337
1338 cmd->supported = p->link_config.supported;
1339 cmd->advertising = p->link_config.advertising;
1340
1341 if (netif_carrier_ok(dev)) {
1342 cmd->speed = p->link_config.speed;
1343 cmd->duplex = p->link_config.duplex;
1344 } else {
1345 cmd->speed = -1;
1346 cmd->duplex = -1;
1347 }
1348
1349 cmd->port = (cmd->supported & SUPPORTED_TP) ? PORT_TP : PORT_FIBRE;
1350 cmd->phy_address = p->phy.addr;
1351 cmd->transceiver = XCVR_EXTERNAL;
1352 cmd->autoneg = p->link_config.autoneg;
1353 cmd->maxtxpkt = 0;
1354 cmd->maxrxpkt = 0;
1355 return 0;
1356}
1357
1358static int speed_duplex_to_caps(int speed, int duplex)
1359{
1360 int cap = 0;
1361
1362 switch (speed) {
1363 case SPEED_10:
1364 if (duplex == DUPLEX_FULL)
1365 cap = SUPPORTED_10baseT_Full;
1366 else
1367 cap = SUPPORTED_10baseT_Half;
1368 break;
1369 case SPEED_100:
1370 if (duplex == DUPLEX_FULL)
1371 cap = SUPPORTED_100baseT_Full;
1372 else
1373 cap = SUPPORTED_100baseT_Half;
1374 break;
1375 case SPEED_1000:
1376 if (duplex == DUPLEX_FULL)
1377 cap = SUPPORTED_1000baseT_Full;
1378 else
1379 cap = SUPPORTED_1000baseT_Half;
1380 break;
1381 case SPEED_10000:
1382 if (duplex == DUPLEX_FULL)
1383 cap = SUPPORTED_10000baseT_Full;
1384 }
1385 return cap;
1386}
1387
1388#define ADVERTISED_MASK (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
1389 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
1390 ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full | \
1391 ADVERTISED_10000baseT_Full)
1392
1393static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1394{
1395 struct port_info *p = netdev_priv(dev);
1396 struct link_config *lc = &p->link_config;
1397
1398 if (!(lc->supported & SUPPORTED_Autoneg))
1399 return -EOPNOTSUPP; /* can't change speed/duplex */
1400
1401 if (cmd->autoneg == AUTONEG_DISABLE) {
1402 int cap = speed_duplex_to_caps(cmd->speed, cmd->duplex);
1403
1404 if (!(lc->supported & cap) || cmd->speed == SPEED_1000)
1405 return -EINVAL;
1406 lc->requested_speed = cmd->speed;
1407 lc->requested_duplex = cmd->duplex;
1408 lc->advertising = 0;
1409 } else {
1410 cmd->advertising &= ADVERTISED_MASK;
1411 cmd->advertising &= lc->supported;
1412 if (!cmd->advertising)
1413 return -EINVAL;
1414 lc->requested_speed = SPEED_INVALID;
1415 lc->requested_duplex = DUPLEX_INVALID;
1416 lc->advertising = cmd->advertising | ADVERTISED_Autoneg;
1417 }
1418 lc->autoneg = cmd->autoneg;
1419 if (netif_running(dev))
1420 t3_link_start(&p->phy, &p->mac, lc);
1421 return 0;
1422}
1423
1424static void get_pauseparam(struct net_device *dev,
1425 struct ethtool_pauseparam *epause)
1426{
1427 struct port_info *p = netdev_priv(dev);
1428
1429 epause->autoneg = (p->link_config.requested_fc & PAUSE_AUTONEG) != 0;
1430 epause->rx_pause = (p->link_config.fc & PAUSE_RX) != 0;
1431 epause->tx_pause = (p->link_config.fc & PAUSE_TX) != 0;
1432}
1433
1434static int set_pauseparam(struct net_device *dev,
1435 struct ethtool_pauseparam *epause)
1436{
1437 struct port_info *p = netdev_priv(dev);
1438 struct link_config *lc = &p->link_config;
1439
1440 if (epause->autoneg == AUTONEG_DISABLE)
1441 lc->requested_fc = 0;
1442 else if (lc->supported & SUPPORTED_Autoneg)
1443 lc->requested_fc = PAUSE_AUTONEG;
1444 else
1445 return -EINVAL;
1446
1447 if (epause->rx_pause)
1448 lc->requested_fc |= PAUSE_RX;
1449 if (epause->tx_pause)
1450 lc->requested_fc |= PAUSE_TX;
1451 if (lc->autoneg == AUTONEG_ENABLE) {
1452 if (netif_running(dev))
1453 t3_link_start(&p->phy, &p->mac, lc);
1454 } else {
1455 lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1456 if (netif_running(dev))
1457 t3_mac_set_speed_duplex_fc(&p->mac, -1, -1, lc->fc);
1458 }
1459 return 0;
1460}
1461
1462static u32 get_rx_csum(struct net_device *dev)
1463{
1464 struct port_info *p = netdev_priv(dev);
1465
1466 return p->rx_csum_offload;
1467}
1468
1469static int set_rx_csum(struct net_device *dev, u32 data)
1470{
1471 struct port_info *p = netdev_priv(dev);
1472
1473 p->rx_csum_offload = data;
1474 return 0;
1475}
1476
1477static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
1478{
5fbf816f
DLR
1479 struct port_info *pi = netdev_priv(dev);
1480 struct adapter *adapter = pi->adapter;
05b97b30 1481 const struct qset_params *q = &adapter->params.sge.qset[pi->first_qset];
4d22de3e
DLR
1482
1483 e->rx_max_pending = MAX_RX_BUFFERS;
1484 e->rx_mini_max_pending = 0;
1485 e->rx_jumbo_max_pending = MAX_RX_JUMBO_BUFFERS;
1486 e->tx_max_pending = MAX_TXQ_ENTRIES;
1487
05b97b30
DLR
1488 e->rx_pending = q->fl_size;
1489 e->rx_mini_pending = q->rspq_size;
1490 e->rx_jumbo_pending = q->jumbo_size;
1491 e->tx_pending = q->txq_size[0];
4d22de3e
DLR
1492}
1493
1494static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
1495{
5fbf816f
DLR
1496 struct port_info *pi = netdev_priv(dev);
1497 struct adapter *adapter = pi->adapter;
05b97b30 1498 struct qset_params *q;
5fbf816f 1499 int i;
4d22de3e
DLR
1500
1501 if (e->rx_pending > MAX_RX_BUFFERS ||
1502 e->rx_jumbo_pending > MAX_RX_JUMBO_BUFFERS ||
1503 e->tx_pending > MAX_TXQ_ENTRIES ||
1504 e->rx_mini_pending > MAX_RSPQ_ENTRIES ||
1505 e->rx_mini_pending < MIN_RSPQ_ENTRIES ||
1506 e->rx_pending < MIN_FL_ENTRIES ||
1507 e->rx_jumbo_pending < MIN_FL_ENTRIES ||
1508 e->tx_pending < adapter->params.nports * MIN_TXQ_ENTRIES)
1509 return -EINVAL;
1510
1511 if (adapter->flags & FULL_INIT_DONE)
1512 return -EBUSY;
1513
05b97b30
DLR
1514 q = &adapter->params.sge.qset[pi->first_qset];
1515 for (i = 0; i < pi->nqsets; ++i, ++q) {
4d22de3e
DLR
1516 q->rspq_size = e->rx_mini_pending;
1517 q->fl_size = e->rx_pending;
1518 q->jumbo_size = e->rx_jumbo_pending;
1519 q->txq_size[0] = e->tx_pending;
1520 q->txq_size[1] = e->tx_pending;
1521 q->txq_size[2] = e->tx_pending;
1522 }
1523 return 0;
1524}
1525
1526static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
1527{
5fbf816f
DLR
1528 struct port_info *pi = netdev_priv(dev);
1529 struct adapter *adapter = pi->adapter;
4d22de3e
DLR
1530 struct qset_params *qsp = &adapter->params.sge.qset[0];
1531 struct sge_qset *qs = &adapter->sge.qs[0];
1532
1533 if (c->rx_coalesce_usecs * 10 > M_NEWTIMER)
1534 return -EINVAL;
1535
1536 qsp->coalesce_usecs = c->rx_coalesce_usecs;
1537 t3_update_qset_coalesce(qs, qsp);
1538 return 0;
1539}
1540
1541static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
1542{
5fbf816f
DLR
1543 struct port_info *pi = netdev_priv(dev);
1544 struct adapter *adapter = pi->adapter;
4d22de3e
DLR
1545 struct qset_params *q = adapter->params.sge.qset;
1546
1547 c->rx_coalesce_usecs = q->coalesce_usecs;
1548 return 0;
1549}
1550
1551static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e,
1552 u8 * data)
1553{
5fbf816f
DLR
1554 struct port_info *pi = netdev_priv(dev);
1555 struct adapter *adapter = pi->adapter;
4d22de3e 1556 int i, err = 0;
4d22de3e
DLR
1557
1558 u8 *buf = kmalloc(EEPROMSIZE, GFP_KERNEL);
1559 if (!buf)
1560 return -ENOMEM;
1561
1562 e->magic = EEPROM_MAGIC;
1563 for (i = e->offset & ~3; !err && i < e->offset + e->len; i += 4)
1564 err = t3_seeprom_read(adapter, i, (u32 *) & buf[i]);
1565
1566 if (!err)
1567 memcpy(data, buf + e->offset, e->len);
1568 kfree(buf);
1569 return err;
1570}
1571
1572static int set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
1573 u8 * data)
1574{
5fbf816f
DLR
1575 struct port_info *pi = netdev_priv(dev);
1576 struct adapter *adapter = pi->adapter;
1577 u32 aligned_offset, aligned_len, *p;
4d22de3e 1578 u8 *buf;
c54f5c24 1579 int err;
4d22de3e
DLR
1580
1581 if (eeprom->magic != EEPROM_MAGIC)
1582 return -EINVAL;
1583
1584 aligned_offset = eeprom->offset & ~3;
1585 aligned_len = (eeprom->len + (eeprom->offset & 3) + 3) & ~3;
1586
1587 if (aligned_offset != eeprom->offset || aligned_len != eeprom->len) {
1588 buf = kmalloc(aligned_len, GFP_KERNEL);
1589 if (!buf)
1590 return -ENOMEM;
1591 err = t3_seeprom_read(adapter, aligned_offset, (u32 *) buf);
1592 if (!err && aligned_len > 4)
1593 err = t3_seeprom_read(adapter,
1594 aligned_offset + aligned_len - 4,
1595 (u32 *) & buf[aligned_len - 4]);
1596 if (err)
1597 goto out;
1598 memcpy(buf + (eeprom->offset & 3), data, eeprom->len);
1599 } else
1600 buf = data;
1601
1602 err = t3_seeprom_wp(adapter, 0);
1603 if (err)
1604 goto out;
1605
1606 for (p = (u32 *) buf; !err && aligned_len; aligned_len -= 4, p++) {
1607 err = t3_seeprom_write(adapter, aligned_offset, *p);
1608 aligned_offset += 4;
1609 }
1610
1611 if (!err)
1612 err = t3_seeprom_wp(adapter, 1);
1613out:
1614 if (buf != data)
1615 kfree(buf);
1616 return err;
1617}
1618
1619static void get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1620{
1621 wol->supported = 0;
1622 wol->wolopts = 0;
1623 memset(&wol->sopass, 0, sizeof(wol->sopass));
1624}
1625
1626static const struct ethtool_ops cxgb_ethtool_ops = {
1627 .get_settings = get_settings,
1628 .set_settings = set_settings,
1629 .get_drvinfo = get_drvinfo,
1630 .get_msglevel = get_msglevel,
1631 .set_msglevel = set_msglevel,
1632 .get_ringparam = get_sge_param,
1633 .set_ringparam = set_sge_param,
1634 .get_coalesce = get_coalesce,
1635 .set_coalesce = set_coalesce,
1636 .get_eeprom_len = get_eeprom_len,
1637 .get_eeprom = get_eeprom,
1638 .set_eeprom = set_eeprom,
1639 .get_pauseparam = get_pauseparam,
1640 .set_pauseparam = set_pauseparam,
1641 .get_rx_csum = get_rx_csum,
1642 .set_rx_csum = set_rx_csum,
4d22de3e 1643 .set_tx_csum = ethtool_op_set_tx_csum,
4d22de3e
DLR
1644 .set_sg = ethtool_op_set_sg,
1645 .get_link = ethtool_op_get_link,
1646 .get_strings = get_strings,
1647 .phys_id = cxgb3_phys_id,
1648 .nway_reset = restart_autoneg,
b9f2c044 1649 .get_sset_count = get_sset_count,
4d22de3e
DLR
1650 .get_ethtool_stats = get_stats,
1651 .get_regs_len = get_regs_len,
1652 .get_regs = get_regs,
1653 .get_wol = get_wol,
4d22de3e 1654 .set_tso = ethtool_op_set_tso,
4d22de3e
DLR
1655};
1656
1657static int in_range(int val, int lo, int hi)
1658{
1659 return val < 0 || (val <= hi && val >= lo);
1660}
1661
1662static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
1663{
5fbf816f
DLR
1664 struct port_info *pi = netdev_priv(dev);
1665 struct adapter *adapter = pi->adapter;
4d22de3e 1666 u32 cmd;
5fbf816f 1667 int ret;
4d22de3e
DLR
1668
1669 if (copy_from_user(&cmd, useraddr, sizeof(cmd)))
1670 return -EFAULT;
1671
1672 switch (cmd) {
4d22de3e
DLR
1673 case CHELSIO_SET_QSET_PARAMS:{
1674 int i;
1675 struct qset_params *q;
1676 struct ch_qset_params t;
1677
1678 if (!capable(CAP_NET_ADMIN))
1679 return -EPERM;
1680 if (copy_from_user(&t, useraddr, sizeof(t)))
1681 return -EFAULT;
1682 if (t.qset_idx >= SGE_QSETS)
1683 return -EINVAL;
1684 if (!in_range(t.intr_lat, 0, M_NEWTIMER) ||
1685 !in_range(t.cong_thres, 0, 255) ||
1686 !in_range(t.txq_size[0], MIN_TXQ_ENTRIES,
1687 MAX_TXQ_ENTRIES) ||
1688 !in_range(t.txq_size[1], MIN_TXQ_ENTRIES,
1689 MAX_TXQ_ENTRIES) ||
1690 !in_range(t.txq_size[2], MIN_CTRL_TXQ_ENTRIES,
1691 MAX_CTRL_TXQ_ENTRIES) ||
1692 !in_range(t.fl_size[0], MIN_FL_ENTRIES,
1693 MAX_RX_BUFFERS)
1694 || !in_range(t.fl_size[1], MIN_FL_ENTRIES,
1695 MAX_RX_JUMBO_BUFFERS)
1696 || !in_range(t.rspq_size, MIN_RSPQ_ENTRIES,
1697 MAX_RSPQ_ENTRIES))
1698 return -EINVAL;
1699 if ((adapter->flags & FULL_INIT_DONE) &&
1700 (t.rspq_size >= 0 || t.fl_size[0] >= 0 ||
1701 t.fl_size[1] >= 0 || t.txq_size[0] >= 0 ||
1702 t.txq_size[1] >= 0 || t.txq_size[2] >= 0 ||
1703 t.polling >= 0 || t.cong_thres >= 0))
1704 return -EBUSY;
1705
1706 q = &adapter->params.sge.qset[t.qset_idx];
1707
1708 if (t.rspq_size >= 0)
1709 q->rspq_size = t.rspq_size;
1710 if (t.fl_size[0] >= 0)
1711 q->fl_size = t.fl_size[0];
1712 if (t.fl_size[1] >= 0)
1713 q->jumbo_size = t.fl_size[1];
1714 if (t.txq_size[0] >= 0)
1715 q->txq_size[0] = t.txq_size[0];
1716 if (t.txq_size[1] >= 0)
1717 q->txq_size[1] = t.txq_size[1];
1718 if (t.txq_size[2] >= 0)
1719 q->txq_size[2] = t.txq_size[2];
1720 if (t.cong_thres >= 0)
1721 q->cong_thres = t.cong_thres;
1722 if (t.intr_lat >= 0) {
1723 struct sge_qset *qs =
1724 &adapter->sge.qs[t.qset_idx];
1725
1726 q->coalesce_usecs = t.intr_lat;
1727 t3_update_qset_coalesce(qs, q);
1728 }
1729 if (t.polling >= 0) {
1730 if (adapter->flags & USING_MSIX)
1731 q->polling = t.polling;
1732 else {
1733 /* No polling with INTx for T3A */
1734 if (adapter->params.rev == 0 &&
1735 !(adapter->flags & USING_MSI))
1736 t.polling = 0;
1737
1738 for (i = 0; i < SGE_QSETS; i++) {
1739 q = &adapter->params.sge.
1740 qset[i];
1741 q->polling = t.polling;
1742 }
1743 }
1744 }
1745 break;
1746 }
1747 case CHELSIO_GET_QSET_PARAMS:{
1748 struct qset_params *q;
1749 struct ch_qset_params t;
1750
1751 if (copy_from_user(&t, useraddr, sizeof(t)))
1752 return -EFAULT;
1753 if (t.qset_idx >= SGE_QSETS)
1754 return -EINVAL;
1755
1756 q = &adapter->params.sge.qset[t.qset_idx];
1757 t.rspq_size = q->rspq_size;
1758 t.txq_size[0] = q->txq_size[0];
1759 t.txq_size[1] = q->txq_size[1];
1760 t.txq_size[2] = q->txq_size[2];
1761 t.fl_size[0] = q->fl_size;
1762 t.fl_size[1] = q->jumbo_size;
1763 t.polling = q->polling;
1764 t.intr_lat = q->coalesce_usecs;
1765 t.cong_thres = q->cong_thres;
1766
1767 if (copy_to_user(useraddr, &t, sizeof(t)))
1768 return -EFAULT;
1769 break;
1770 }
1771 case CHELSIO_SET_QSET_NUM:{
1772 struct ch_reg edata;
4d22de3e
DLR
1773 unsigned int i, first_qset = 0, other_qsets = 0;
1774
1775 if (!capable(CAP_NET_ADMIN))
1776 return -EPERM;
1777 if (adapter->flags & FULL_INIT_DONE)
1778 return -EBUSY;
1779 if (copy_from_user(&edata, useraddr, sizeof(edata)))
1780 return -EFAULT;
1781 if (edata.val < 1 ||
1782 (edata.val > 1 && !(adapter->flags & USING_MSIX)))
1783 return -EINVAL;
1784
1785 for_each_port(adapter, i)
1786 if (adapter->port[i] && adapter->port[i] != dev)
1787 other_qsets += adap2pinfo(adapter, i)->nqsets;
1788
1789 if (edata.val + other_qsets > SGE_QSETS)
1790 return -EINVAL;
1791
1792 pi->nqsets = edata.val;
1793
1794 for_each_port(adapter, i)
1795 if (adapter->port[i]) {
1796 pi = adap2pinfo(adapter, i);
1797 pi->first_qset = first_qset;
1798 first_qset += pi->nqsets;
1799 }
1800 break;
1801 }
1802 case CHELSIO_GET_QSET_NUM:{
1803 struct ch_reg edata;
4d22de3e
DLR
1804
1805 edata.cmd = CHELSIO_GET_QSET_NUM;
1806 edata.val = pi->nqsets;
1807 if (copy_to_user(useraddr, &edata, sizeof(edata)))
1808 return -EFAULT;
1809 break;
1810 }
1811 case CHELSIO_LOAD_FW:{
1812 u8 *fw_data;
1813 struct ch_mem_range t;
1814
1815 if (!capable(CAP_NET_ADMIN))
1816 return -EPERM;
1817 if (copy_from_user(&t, useraddr, sizeof(t)))
1818 return -EFAULT;
1819
1820 fw_data = kmalloc(t.len, GFP_KERNEL);
1821 if (!fw_data)
1822 return -ENOMEM;
1823
1824 if (copy_from_user
1825 (fw_data, useraddr + sizeof(t), t.len)) {
1826 kfree(fw_data);
1827 return -EFAULT;
1828 }
1829
1830 ret = t3_load_fw(adapter, fw_data, t.len);
1831 kfree(fw_data);
1832 if (ret)
1833 return ret;
1834 break;
1835 }
1836 case CHELSIO_SETMTUTAB:{
1837 struct ch_mtus m;
1838 int i;
1839
1840 if (!is_offload(adapter))
1841 return -EOPNOTSUPP;
1842 if (!capable(CAP_NET_ADMIN))
1843 return -EPERM;
1844 if (offload_running(adapter))
1845 return -EBUSY;
1846 if (copy_from_user(&m, useraddr, sizeof(m)))
1847 return -EFAULT;
1848 if (m.nmtus != NMTUS)
1849 return -EINVAL;
1850 if (m.mtus[0] < 81) /* accommodate SACK */
1851 return -EINVAL;
1852
1853 /* MTUs must be in ascending order */
1854 for (i = 1; i < NMTUS; ++i)
1855 if (m.mtus[i] < m.mtus[i - 1])
1856 return -EINVAL;
1857
1858 memcpy(adapter->params.mtus, m.mtus,
1859 sizeof(adapter->params.mtus));
1860 break;
1861 }
1862 case CHELSIO_GET_PM:{
1863 struct tp_params *p = &adapter->params.tp;
1864 struct ch_pm m = {.cmd = CHELSIO_GET_PM };
1865
1866 if (!is_offload(adapter))
1867 return -EOPNOTSUPP;
1868 m.tx_pg_sz = p->tx_pg_size;
1869 m.tx_num_pg = p->tx_num_pgs;
1870 m.rx_pg_sz = p->rx_pg_size;
1871 m.rx_num_pg = p->rx_num_pgs;
1872 m.pm_total = p->pmtx_size + p->chan_rx_size * p->nchan;
1873 if (copy_to_user(useraddr, &m, sizeof(m)))
1874 return -EFAULT;
1875 break;
1876 }
1877 case CHELSIO_SET_PM:{
1878 struct ch_pm m;
1879 struct tp_params *p = &adapter->params.tp;
1880
1881 if (!is_offload(adapter))
1882 return -EOPNOTSUPP;
1883 if (!capable(CAP_NET_ADMIN))
1884 return -EPERM;
1885 if (adapter->flags & FULL_INIT_DONE)
1886 return -EBUSY;
1887 if (copy_from_user(&m, useraddr, sizeof(m)))
1888 return -EFAULT;
d9da466a 1889 if (!is_power_of_2(m.rx_pg_sz) ||
1890 !is_power_of_2(m.tx_pg_sz))
4d22de3e
DLR
1891 return -EINVAL; /* not power of 2 */
1892 if (!(m.rx_pg_sz & 0x14000))
1893 return -EINVAL; /* not 16KB or 64KB */
1894 if (!(m.tx_pg_sz & 0x1554000))
1895 return -EINVAL;
1896 if (m.tx_num_pg == -1)
1897 m.tx_num_pg = p->tx_num_pgs;
1898 if (m.rx_num_pg == -1)
1899 m.rx_num_pg = p->rx_num_pgs;
1900 if (m.tx_num_pg % 24 || m.rx_num_pg % 24)
1901 return -EINVAL;
1902 if (m.rx_num_pg * m.rx_pg_sz > p->chan_rx_size ||
1903 m.tx_num_pg * m.tx_pg_sz > p->chan_tx_size)
1904 return -EINVAL;
1905 p->rx_pg_size = m.rx_pg_sz;
1906 p->tx_pg_size = m.tx_pg_sz;
1907 p->rx_num_pgs = m.rx_num_pg;
1908 p->tx_num_pgs = m.tx_num_pg;
1909 break;
1910 }
1911 case CHELSIO_GET_MEM:{
1912 struct ch_mem_range t;
1913 struct mc7 *mem;
1914 u64 buf[32];
1915
1916 if (!is_offload(adapter))
1917 return -EOPNOTSUPP;
1918 if (!(adapter->flags & FULL_INIT_DONE))
1919 return -EIO; /* need the memory controllers */
1920 if (copy_from_user(&t, useraddr, sizeof(t)))
1921 return -EFAULT;
1922 if ((t.addr & 7) || (t.len & 7))
1923 return -EINVAL;
1924 if (t.mem_id == MEM_CM)
1925 mem = &adapter->cm;
1926 else if (t.mem_id == MEM_PMRX)
1927 mem = &adapter->pmrx;
1928 else if (t.mem_id == MEM_PMTX)
1929 mem = &adapter->pmtx;
1930 else
1931 return -EINVAL;
1932
1933 /*
1825494a
DLR
1934 * Version scheme:
1935 * bits 0..9: chip version
1936 * bits 10..15: chip revision
1937 */
4d22de3e
DLR
1938 t.version = 3 | (adapter->params.rev << 10);
1939 if (copy_to_user(useraddr, &t, sizeof(t)))
1940 return -EFAULT;
1941
1942 /*
1943 * Read 256 bytes at a time as len can be large and we don't
1944 * want to use huge intermediate buffers.
1945 */
1946 useraddr += sizeof(t); /* advance to start of buffer */
1947 while (t.len) {
1948 unsigned int chunk =
1949 min_t(unsigned int, t.len, sizeof(buf));
1950
1951 ret =
1952 t3_mc7_bd_read(mem, t.addr / 8, chunk / 8,
1953 buf);
1954 if (ret)
1955 return ret;
1956 if (copy_to_user(useraddr, buf, chunk))
1957 return -EFAULT;
1958 useraddr += chunk;
1959 t.addr += chunk;
1960 t.len -= chunk;
1961 }
1962 break;
1963 }
1964 case CHELSIO_SET_TRACE_FILTER:{
1965 struct ch_trace t;
1966 const struct trace_params *tp;
1967
1968 if (!capable(CAP_NET_ADMIN))
1969 return -EPERM;
1970 if (!offload_running(adapter))
1971 return -EAGAIN;
1972 if (copy_from_user(&t, useraddr, sizeof(t)))
1973 return -EFAULT;
1974
1975 tp = (const struct trace_params *)&t.sip;
1976 if (t.config_tx)
1977 t3_config_trace_filter(adapter, tp, 0,
1978 t.invert_match,
1979 t.trace_tx);
1980 if (t.config_rx)
1981 t3_config_trace_filter(adapter, tp, 1,
1982 t.invert_match,
1983 t.trace_rx);
1984 break;
1985 }
4d22de3e
DLR
1986 default:
1987 return -EOPNOTSUPP;
1988 }
1989 return 0;
1990}
1991
1992static int cxgb_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
1993{
4d22de3e 1994 struct mii_ioctl_data *data = if_mii(req);
5fbf816f
DLR
1995 struct port_info *pi = netdev_priv(dev);
1996 struct adapter *adapter = pi->adapter;
1997 int ret, mmd;
4d22de3e
DLR
1998
1999 switch (cmd) {
2000 case SIOCGMIIPHY:
2001 data->phy_id = pi->phy.addr;
2002 /* FALLTHRU */
2003 case SIOCGMIIREG:{
2004 u32 val;
2005 struct cphy *phy = &pi->phy;
2006
2007 if (!phy->mdio_read)
2008 return -EOPNOTSUPP;
2009 if (is_10G(adapter)) {
2010 mmd = data->phy_id >> 8;
2011 if (!mmd)
2012 mmd = MDIO_DEV_PCS;
2013 else if (mmd > MDIO_DEV_XGXS)
2014 return -EINVAL;
2015
2016 ret =
2017 phy->mdio_read(adapter, data->phy_id & 0x1f,
2018 mmd, data->reg_num, &val);
2019 } else
2020 ret =
2021 phy->mdio_read(adapter, data->phy_id & 0x1f,
2022 0, data->reg_num & 0x1f,
2023 &val);
2024 if (!ret)
2025 data->val_out = val;
2026 break;
2027 }
2028 case SIOCSMIIREG:{
2029 struct cphy *phy = &pi->phy;
2030
2031 if (!capable(CAP_NET_ADMIN))
2032 return -EPERM;
2033 if (!phy->mdio_write)
2034 return -EOPNOTSUPP;
2035 if (is_10G(adapter)) {
2036 mmd = data->phy_id >> 8;
2037 if (!mmd)
2038 mmd = MDIO_DEV_PCS;
2039 else if (mmd > MDIO_DEV_XGXS)
2040 return -EINVAL;
2041
2042 ret =
2043 phy->mdio_write(adapter,
2044 data->phy_id & 0x1f, mmd,
2045 data->reg_num,
2046 data->val_in);
2047 } else
2048 ret =
2049 phy->mdio_write(adapter,
2050 data->phy_id & 0x1f, 0,
2051 data->reg_num & 0x1f,
2052 data->val_in);
2053 break;
2054 }
2055 case SIOCCHIOCTL:
2056 return cxgb_extension_ioctl(dev, req->ifr_data);
2057 default:
2058 return -EOPNOTSUPP;
2059 }
2060 return ret;
2061}
2062
2063static int cxgb_change_mtu(struct net_device *dev, int new_mtu)
2064{
4d22de3e 2065 struct port_info *pi = netdev_priv(dev);
5fbf816f
DLR
2066 struct adapter *adapter = pi->adapter;
2067 int ret;
4d22de3e
DLR
2068
2069 if (new_mtu < 81) /* accommodate SACK */
2070 return -EINVAL;
2071 if ((ret = t3_mac_set_mtu(&pi->mac, new_mtu)))
2072 return ret;
2073 dev->mtu = new_mtu;
2074 init_port_mtus(adapter);
2075 if (adapter->params.rev == 0 && offload_running(adapter))
2076 t3_load_mtus(adapter, adapter->params.mtus,
2077 adapter->params.a_wnd, adapter->params.b_wnd,
2078 adapter->port[0]->mtu);
2079 return 0;
2080}
2081
2082static int cxgb_set_mac_addr(struct net_device *dev, void *p)
2083{
4d22de3e 2084 struct port_info *pi = netdev_priv(dev);
5fbf816f 2085 struct adapter *adapter = pi->adapter;
4d22de3e
DLR
2086 struct sockaddr *addr = p;
2087
2088 if (!is_valid_ether_addr(addr->sa_data))
2089 return -EINVAL;
2090
2091 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
2092 t3_mac_set_address(&pi->mac, 0, dev->dev_addr);
2093 if (offload_running(adapter))
2094 write_smt_entry(adapter, pi->port_id);
2095 return 0;
2096}
2097
2098/**
2099 * t3_synchronize_rx - wait for current Rx processing on a port to complete
2100 * @adap: the adapter
2101 * @p: the port
2102 *
2103 * Ensures that current Rx processing on any of the queues associated with
2104 * the given port completes before returning. We do this by acquiring and
2105 * releasing the locks of the response queues associated with the port.
2106 */
2107static void t3_synchronize_rx(struct adapter *adap, const struct port_info *p)
2108{
2109 int i;
2110
2111 for (i = 0; i < p->nqsets; i++) {
2112 struct sge_rspq *q = &adap->sge.qs[i + p->first_qset].rspq;
2113
2114 spin_lock_irq(&q->lock);
2115 spin_unlock_irq(&q->lock);
2116 }
2117}
2118
2119static void vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
2120{
4d22de3e 2121 struct port_info *pi = netdev_priv(dev);
5fbf816f 2122 struct adapter *adapter = pi->adapter;
4d22de3e
DLR
2123
2124 pi->vlan_grp = grp;
2125 if (adapter->params.rev > 0)
2126 t3_set_vlan_accel(adapter, 1 << pi->port_id, grp != NULL);
2127 else {
2128 /* single control for all ports */
2129 unsigned int i, have_vlans = 0;
2130 for_each_port(adapter, i)
2131 have_vlans |= adap2pinfo(adapter, i)->vlan_grp != NULL;
2132
2133 t3_set_vlan_accel(adapter, 1, have_vlans);
2134 }
2135 t3_synchronize_rx(adapter, pi);
2136}
2137
4d22de3e
DLR
2138#ifdef CONFIG_NET_POLL_CONTROLLER
2139static void cxgb_netpoll(struct net_device *dev)
2140{
890de332 2141 struct port_info *pi = netdev_priv(dev);
5fbf816f 2142 struct adapter *adapter = pi->adapter;
890de332 2143 int qidx;
4d22de3e 2144
890de332
DLR
2145 for (qidx = pi->first_qset; qidx < pi->first_qset + pi->nqsets; qidx++) {
2146 struct sge_qset *qs = &adapter->sge.qs[qidx];
2147 void *source;
2148
2149 if (adapter->flags & USING_MSIX)
2150 source = qs;
2151 else
2152 source = adapter;
2153
2154 t3_intr_handler(adapter, qs->rspq.polling) (0, source);
2155 }
4d22de3e
DLR
2156}
2157#endif
2158
2159/*
2160 * Periodic accumulation of MAC statistics.
2161 */
2162static void mac_stats_update(struct adapter *adapter)
2163{
2164 int i;
2165
2166 for_each_port(adapter, i) {
2167 struct net_device *dev = adapter->port[i];
2168 struct port_info *p = netdev_priv(dev);
2169
2170 if (netif_running(dev)) {
2171 spin_lock(&adapter->stats_lock);
2172 t3_mac_update_stats(&p->mac);
2173 spin_unlock(&adapter->stats_lock);
2174 }
2175 }
2176}
2177
2178static void check_link_status(struct adapter *adapter)
2179{
2180 int i;
2181
2182 for_each_port(adapter, i) {
2183 struct net_device *dev = adapter->port[i];
2184 struct port_info *p = netdev_priv(dev);
2185
2186 if (!(p->port_type->caps & SUPPORTED_IRQ) && netif_running(dev))
2187 t3_link_changed(adapter, i);
2188 }
2189}
2190
fc90664e
DLR
2191static void check_t3b2_mac(struct adapter *adapter)
2192{
2193 int i;
2194
f2d961c9
DLR
2195 if (!rtnl_trylock()) /* synchronize with ifdown */
2196 return;
2197
fc90664e
DLR
2198 for_each_port(adapter, i) {
2199 struct net_device *dev = adapter->port[i];
2200 struct port_info *p = netdev_priv(dev);
2201 int status;
2202
2203 if (!netif_running(dev))
2204 continue;
2205
2206 status = 0;
6d6dabac 2207 if (netif_running(dev) && netif_carrier_ok(dev))
fc90664e
DLR
2208 status = t3b2_mac_watchdog_task(&p->mac);
2209 if (status == 1)
2210 p->mac.stats.num_toggled++;
2211 else if (status == 2) {
2212 struct cmac *mac = &p->mac;
2213
2214 t3_mac_set_mtu(mac, dev->mtu);
2215 t3_mac_set_address(mac, 0, dev->dev_addr);
2216 cxgb_set_rxmode(dev);
2217 t3_link_start(&p->phy, mac, &p->link_config);
2218 t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
2219 t3_port_intr_enable(adapter, p->port_id);
2220 p->mac.stats.num_resets++;
2221 }
2222 }
2223 rtnl_unlock();
2224}
2225
2226
4d22de3e
DLR
2227static void t3_adap_check_task(struct work_struct *work)
2228{
2229 struct adapter *adapter = container_of(work, struct adapter,
2230 adap_check_task.work);
2231 const struct adapter_params *p = &adapter->params;
2232
2233 adapter->check_task_cnt++;
2234
2235 /* Check link status for PHYs without interrupts */
2236 if (p->linkpoll_period)
2237 check_link_status(adapter);
2238
2239 /* Accumulate MAC stats if needed */
2240 if (!p->linkpoll_period ||
2241 (adapter->check_task_cnt * p->linkpoll_period) / 10 >=
2242 p->stats_update_period) {
2243 mac_stats_update(adapter);
2244 adapter->check_task_cnt = 0;
2245 }
2246
fc90664e
DLR
2247 if (p->rev == T3_REV_B2)
2248 check_t3b2_mac(adapter);
2249
4d22de3e
DLR
2250 /* Schedule the next check update if any port is active. */
2251 spin_lock(&adapter->work_lock);
2252 if (adapter->open_device_map & PORT_MASK)
2253 schedule_chk_task(adapter);
2254 spin_unlock(&adapter->work_lock);
2255}
2256
2257/*
2258 * Processes external (PHY) interrupts in process context.
2259 */
2260static void ext_intr_task(struct work_struct *work)
2261{
2262 struct adapter *adapter = container_of(work, struct adapter,
2263 ext_intr_handler_task);
2264
2265 t3_phy_intr_handler(adapter);
2266
2267 /* Now reenable external interrupts */
2268 spin_lock_irq(&adapter->work_lock);
2269 if (adapter->slow_intr_mask) {
2270 adapter->slow_intr_mask |= F_T3DBG;
2271 t3_write_reg(adapter, A_PL_INT_CAUSE0, F_T3DBG);
2272 t3_write_reg(adapter, A_PL_INT_ENABLE0,
2273 adapter->slow_intr_mask);
2274 }
2275 spin_unlock_irq(&adapter->work_lock);
2276}
2277
2278/*
2279 * Interrupt-context handler for external (PHY) interrupts.
2280 */
2281void t3_os_ext_intr_handler(struct adapter *adapter)
2282{
2283 /*
2284 * Schedule a task to handle external interrupts as they may be slow
2285 * and we use a mutex to protect MDIO registers. We disable PHY
2286 * interrupts in the meantime and let the task reenable them when
2287 * it's done.
2288 */
2289 spin_lock(&adapter->work_lock);
2290 if (adapter->slow_intr_mask) {
2291 adapter->slow_intr_mask &= ~F_T3DBG;
2292 t3_write_reg(adapter, A_PL_INT_ENABLE0,
2293 adapter->slow_intr_mask);
2294 queue_work(cxgb3_wq, &adapter->ext_intr_handler_task);
2295 }
2296 spin_unlock(&adapter->work_lock);
2297}
2298
2299void t3_fatal_err(struct adapter *adapter)
2300{
2301 unsigned int fw_status[4];
2302
2303 if (adapter->flags & FULL_INIT_DONE) {
2304 t3_sge_stop(adapter);
c64c2eae
DLR
2305 t3_write_reg(adapter, A_XGM_TX_CTRL, 0);
2306 t3_write_reg(adapter, A_XGM_RX_CTRL, 0);
2307 t3_write_reg(adapter, XGM_REG(A_XGM_TX_CTRL, 1), 0);
2308 t3_write_reg(adapter, XGM_REG(A_XGM_RX_CTRL, 1), 0);
4d22de3e
DLR
2309 t3_intr_disable(adapter);
2310 }
2311 CH_ALERT(adapter, "encountered fatal error, operation suspended\n");
2312 if (!t3_cim_ctl_blk_read(adapter, 0xa0, 4, fw_status))
2313 CH_ALERT(adapter, "FW status: 0x%x, 0x%x, 0x%x, 0x%x\n",
2314 fw_status[0], fw_status[1],
2315 fw_status[2], fw_status[3]);
2316
2317}
2318
2319static int __devinit cxgb_enable_msix(struct adapter *adap)
2320{
2321 struct msix_entry entries[SGE_QSETS + 1];
2322 int i, err;
2323
2324 for (i = 0; i < ARRAY_SIZE(entries); ++i)
2325 entries[i].entry = i;
2326
2327 err = pci_enable_msix(adap->pdev, entries, ARRAY_SIZE(entries));
2328 if (!err) {
2329 for (i = 0; i < ARRAY_SIZE(entries); ++i)
2330 adap->msix_info[i].vec = entries[i].vector;
2331 } else if (err > 0)
2332 dev_info(&adap->pdev->dev,
2333 "only %d MSI-X vectors left, not using MSI-X\n", err);
2334 return err;
2335}
2336
2337static void __devinit print_port_info(struct adapter *adap,
2338 const struct adapter_info *ai)
2339{
2340 static const char *pci_variant[] = {
2341 "PCI", "PCI-X", "PCI-X ECC", "PCI-X 266", "PCI Express"
2342 };
2343
2344 int i;
2345 char buf[80];
2346
2347 if (is_pcie(adap))
2348 snprintf(buf, sizeof(buf), "%s x%d",
2349 pci_variant[adap->params.pci.variant],
2350 adap->params.pci.width);
2351 else
2352 snprintf(buf, sizeof(buf), "%s %dMHz/%d-bit",
2353 pci_variant[adap->params.pci.variant],
2354 adap->params.pci.speed, adap->params.pci.width);
2355
2356 for_each_port(adap, i) {
2357 struct net_device *dev = adap->port[i];
2358 const struct port_info *pi = netdev_priv(dev);
2359
2360 if (!test_bit(i, &adap->registered_device_map))
2361 continue;
8ac3ba68 2362 printk(KERN_INFO "%s: %s %s %sNIC (rev %d) %s%s\n",
4d22de3e 2363 dev->name, ai->desc, pi->port_type->desc,
8ac3ba68 2364 is_offload(adap) ? "R" : "", adap->params.rev, buf,
4d22de3e
DLR
2365 (adap->flags & USING_MSIX) ? " MSI-X" :
2366 (adap->flags & USING_MSI) ? " MSI" : "");
2367 if (adap->name == dev->name && adap->params.vpd.mclk)
167cdf5f
DLR
2368 printk(KERN_INFO
2369 "%s: %uMB CM, %uMB PMTX, %uMB PMRX, S/N: %s\n",
4d22de3e
DLR
2370 adap->name, t3_mc7_size(&adap->cm) >> 20,
2371 t3_mc7_size(&adap->pmtx) >> 20,
167cdf5f
DLR
2372 t3_mc7_size(&adap->pmrx) >> 20,
2373 adap->params.vpd.sn);
4d22de3e
DLR
2374 }
2375}
2376
2377static int __devinit init_one(struct pci_dev *pdev,
2378 const struct pci_device_id *ent)
2379{
2380 static int version_printed;
2381
2382 int i, err, pci_using_dac = 0;
2383 unsigned long mmio_start, mmio_len;
2384 const struct adapter_info *ai;
2385 struct adapter *adapter = NULL;
2386 struct port_info *pi;
2387
2388 if (!version_printed) {
2389 printk(KERN_INFO "%s - version %s\n", DRV_DESC, DRV_VERSION);
2390 ++version_printed;
2391 }
2392
2393 if (!cxgb3_wq) {
2394 cxgb3_wq = create_singlethread_workqueue(DRV_NAME);
2395 if (!cxgb3_wq) {
2396 printk(KERN_ERR DRV_NAME
2397 ": cannot initialize work queue\n");
2398 return -ENOMEM;
2399 }
2400 }
2401
2402 err = pci_request_regions(pdev, DRV_NAME);
2403 if (err) {
2404 /* Just info, some other driver may have claimed the device. */
2405 dev_info(&pdev->dev, "cannot obtain PCI resources\n");
2406 return err;
2407 }
2408
2409 err = pci_enable_device(pdev);
2410 if (err) {
2411 dev_err(&pdev->dev, "cannot enable PCI device\n");
2412 goto out_release_regions;
2413 }
2414
2415 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
2416 pci_using_dac = 1;
2417 err = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
2418 if (err) {
2419 dev_err(&pdev->dev, "unable to obtain 64-bit DMA for "
2420 "coherent allocations\n");
2421 goto out_disable_device;
2422 }
2423 } else if ((err = pci_set_dma_mask(pdev, DMA_32BIT_MASK)) != 0) {
2424 dev_err(&pdev->dev, "no usable DMA configuration\n");
2425 goto out_disable_device;
2426 }
2427
2428 pci_set_master(pdev);
2429
2430 mmio_start = pci_resource_start(pdev, 0);
2431 mmio_len = pci_resource_len(pdev, 0);
2432 ai = t3_get_adapter_info(ent->driver_data);
2433
2434 adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
2435 if (!adapter) {
2436 err = -ENOMEM;
2437 goto out_disable_device;
2438 }
2439
2440 adapter->regs = ioremap_nocache(mmio_start, mmio_len);
2441 if (!adapter->regs) {
2442 dev_err(&pdev->dev, "cannot map device registers\n");
2443 err = -ENOMEM;
2444 goto out_free_adapter;
2445 }
2446
2447 adapter->pdev = pdev;
2448 adapter->name = pci_name(pdev);
2449 adapter->msg_enable = dflt_msg_enable;
2450 adapter->mmio_len = mmio_len;
2451
2452 mutex_init(&adapter->mdio_lock);
2453 spin_lock_init(&adapter->work_lock);
2454 spin_lock_init(&adapter->stats_lock);
2455
2456 INIT_LIST_HEAD(&adapter->adapter_list);
2457 INIT_WORK(&adapter->ext_intr_handler_task, ext_intr_task);
2458 INIT_DELAYED_WORK(&adapter->adap_check_task, t3_adap_check_task);
2459
2460 for (i = 0; i < ai->nports; ++i) {
2461 struct net_device *netdev;
2462
2463 netdev = alloc_etherdev(sizeof(struct port_info));
2464 if (!netdev) {
2465 err = -ENOMEM;
2466 goto out_free_dev;
2467 }
2468
4d22de3e
DLR
2469 SET_NETDEV_DEV(netdev, &pdev->dev);
2470
2471 adapter->port[i] = netdev;
2472 pi = netdev_priv(netdev);
5fbf816f 2473 pi->adapter = adapter;
4d22de3e
DLR
2474 pi->rx_csum_offload = 1;
2475 pi->nqsets = 1;
2476 pi->first_qset = i;
2477 pi->activity = 0;
2478 pi->port_id = i;
2479 netif_carrier_off(netdev);
2480 netdev->irq = pdev->irq;
2481 netdev->mem_start = mmio_start;
2482 netdev->mem_end = mmio_start + mmio_len - 1;
4d22de3e
DLR
2483 netdev->features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO;
2484 netdev->features |= NETIF_F_LLTX;
2485 if (pci_using_dac)
2486 netdev->features |= NETIF_F_HIGHDMA;
2487
2488 netdev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
2489 netdev->vlan_rx_register = vlan_rx_register;
4d22de3e
DLR
2490
2491 netdev->open = cxgb_open;
2492 netdev->stop = cxgb_close;
2493 netdev->hard_start_xmit = t3_eth_xmit;
2494 netdev->get_stats = cxgb_get_stats;
2495 netdev->set_multicast_list = cxgb_set_rxmode;
2496 netdev->do_ioctl = cxgb_ioctl;
2497 netdev->change_mtu = cxgb_change_mtu;
2498 netdev->set_mac_address = cxgb_set_mac_addr;
2499#ifdef CONFIG_NET_POLL_CONTROLLER
2500 netdev->poll_controller = cxgb_netpoll;
2501#endif
4d22de3e
DLR
2502
2503 SET_ETHTOOL_OPS(netdev, &cxgb_ethtool_ops);
2504 }
2505
5fbf816f 2506 pci_set_drvdata(pdev, adapter);
4d22de3e
DLR
2507 if (t3_prep_adapter(adapter, ai, 1) < 0) {
2508 err = -ENODEV;
2509 goto out_free_dev;
2510 }
480fe1a3 2511
4d22de3e
DLR
2512 /*
2513 * The card is now ready to go. If any errors occur during device
2514 * registration we do not fail the whole card but rather proceed only
2515 * with the ports we manage to register successfully. However we must
2516 * register at least one net device.
2517 */
2518 for_each_port(adapter, i) {
2519 err = register_netdev(adapter->port[i]);
2520 if (err)
2521 dev_warn(&pdev->dev,
2522 "cannot register net device %s, skipping\n",
2523 adapter->port[i]->name);
2524 else {
2525 /*
2526 * Change the name we use for messages to the name of
2527 * the first successfully registered interface.
2528 */
2529 if (!adapter->registered_device_map)
2530 adapter->name = adapter->port[i]->name;
2531
2532 __set_bit(i, &adapter->registered_device_map);
2533 }
2534 }
2535 if (!adapter->registered_device_map) {
2536 dev_err(&pdev->dev, "could not register any net devices\n");
2537 goto out_free_dev;
2538 }
2539
2540 /* Driver's ready. Reflect it on LEDs */
2541 t3_led_ready(adapter);
2542
2543 if (is_offload(adapter)) {
2544 __set_bit(OFFLOAD_DEVMAP_BIT, &adapter->registered_device_map);
2545 cxgb3_adapter_ofld(adapter);
2546 }
2547
2548 /* See what interrupts we'll be using */
2549 if (msi > 1 && cxgb_enable_msix(adapter) == 0)
2550 adapter->flags |= USING_MSIX;
2551 else if (msi > 0 && pci_enable_msi(pdev) == 0)
2552 adapter->flags |= USING_MSI;
2553
0ee8d33c 2554 err = sysfs_create_group(&adapter->port[0]->dev.kobj,
4d22de3e
DLR
2555 &cxgb3_attr_group);
2556
2557 print_port_info(adapter, ai);
2558 return 0;
2559
2560out_free_dev:
2561 iounmap(adapter->regs);
2562 for (i = ai->nports - 1; i >= 0; --i)
2563 if (adapter->port[i])
2564 free_netdev(adapter->port[i]);
2565
2566out_free_adapter:
2567 kfree(adapter);
2568
2569out_disable_device:
2570 pci_disable_device(pdev);
2571out_release_regions:
2572 pci_release_regions(pdev);
2573 pci_set_drvdata(pdev, NULL);
2574 return err;
2575}
2576
2577static void __devexit remove_one(struct pci_dev *pdev)
2578{
5fbf816f 2579 struct adapter *adapter = pci_get_drvdata(pdev);
4d22de3e 2580
5fbf816f 2581 if (adapter) {
4d22de3e 2582 int i;
4d22de3e
DLR
2583
2584 t3_sge_stop(adapter);
0ee8d33c 2585 sysfs_remove_group(&adapter->port[0]->dev.kobj,
4d22de3e
DLR
2586 &cxgb3_attr_group);
2587
4d22de3e
DLR
2588 if (is_offload(adapter)) {
2589 cxgb3_adapter_unofld(adapter);
2590 if (test_bit(OFFLOAD_DEVMAP_BIT,
2591 &adapter->open_device_map))
2592 offload_close(&adapter->tdev);
2593 }
2594
67d92ab7
DLR
2595 for_each_port(adapter, i)
2596 if (test_bit(i, &adapter->registered_device_map))
2597 unregister_netdev(adapter->port[i]);
2598
4d22de3e
DLR
2599 t3_free_sge_resources(adapter);
2600 cxgb_disable_msi(adapter);
2601
4d22de3e
DLR
2602 for_each_port(adapter, i)
2603 if (adapter->port[i])
2604 free_netdev(adapter->port[i]);
2605
2606 iounmap(adapter->regs);
2607 kfree(adapter);
2608 pci_release_regions(pdev);
2609 pci_disable_device(pdev);
2610 pci_set_drvdata(pdev, NULL);
2611 }
2612}
2613
2614static struct pci_driver driver = {
2615 .name = DRV_NAME,
2616 .id_table = cxgb3_pci_tbl,
2617 .probe = init_one,
2618 .remove = __devexit_p(remove_one),
2619};
2620
2621static int __init cxgb3_init_module(void)
2622{
2623 int ret;
2624
2625 cxgb3_offload_init();
2626
2627 ret = pci_register_driver(&driver);
2628 return ret;
2629}
2630
2631static void __exit cxgb3_cleanup_module(void)
2632{
2633 pci_unregister_driver(&driver);
2634 if (cxgb3_wq)
2635 destroy_workqueue(cxgb3_wq);
2636}
2637
2638module_init(cxgb3_init_module);
2639module_exit(cxgb3_cleanup_module);