e100: timer power saving
[linux-2.6-block.git] / drivers / net / cxgb3 / cxgb3_main.c
CommitLineData
4d22de3e 1/*
1d68e93d 2 * Copyright (c) 2003-2007 Chelsio, Inc. All rights reserved.
4d22de3e 3 *
1d68e93d
DLR
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
4d22de3e 9 *
1d68e93d
DLR
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
4d22de3e 31 */
4d22de3e
DLR
32#include <linux/module.h>
33#include <linux/moduleparam.h>
34#include <linux/init.h>
35#include <linux/pci.h>
36#include <linux/dma-mapping.h>
37#include <linux/netdevice.h>
38#include <linux/etherdevice.h>
39#include <linux/if_vlan.h>
40#include <linux/mii.h>
41#include <linux/sockios.h>
42#include <linux/workqueue.h>
43#include <linux/proc_fs.h>
44#include <linux/rtnetlink.h>
2e283962 45#include <linux/firmware.h>
d9da466a 46#include <linux/log2.h>
4d22de3e
DLR
47#include <asm/uaccess.h>
48
49#include "common.h"
50#include "cxgb3_ioctl.h"
51#include "regs.h"
52#include "cxgb3_offload.h"
53#include "version.h"
54
55#include "cxgb3_ctl_defs.h"
56#include "t3_cpl.h"
57#include "firmware_exports.h"
58
59enum {
60 MAX_TXQ_ENTRIES = 16384,
61 MAX_CTRL_TXQ_ENTRIES = 1024,
62 MAX_RSPQ_ENTRIES = 16384,
63 MAX_RX_BUFFERS = 16384,
64 MAX_RX_JUMBO_BUFFERS = 16384,
65 MIN_TXQ_ENTRIES = 4,
66 MIN_CTRL_TXQ_ENTRIES = 4,
67 MIN_RSPQ_ENTRIES = 32,
68 MIN_FL_ENTRIES = 32
69};
70
71#define PORT_MASK ((1 << MAX_NPORTS) - 1)
72
73#define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
74 NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
75 NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
76
77#define EEPROM_MAGIC 0x38E2F10C
78
4d22de3e
DLR
79#define CH_DEVICE(devid, ssid, idx) \
80 { PCI_VENDOR_ID_CHELSIO, devid, PCI_ANY_ID, ssid, 0, 0, idx }
81
82static const struct pci_device_id cxgb3_pci_tbl[] = {
83 CH_DEVICE(0x20, 1, 0), /* PE9000 */
84 CH_DEVICE(0x21, 1, 1), /* T302E */
85 CH_DEVICE(0x22, 1, 2), /* T310E */
86 CH_DEVICE(0x23, 1, 3), /* T320X */
87 CH_DEVICE(0x24, 1, 1), /* T302X */
88 CH_DEVICE(0x25, 1, 3), /* T320E */
89 CH_DEVICE(0x26, 1, 2), /* T310X */
90 CH_DEVICE(0x30, 1, 2), /* T3B10 */
91 CH_DEVICE(0x31, 1, 3), /* T3B20 */
92 CH_DEVICE(0x32, 1, 1), /* T3B02 */
93 {0,}
94};
95
96MODULE_DESCRIPTION(DRV_DESC);
97MODULE_AUTHOR("Chelsio Communications");
1d68e93d 98MODULE_LICENSE("Dual BSD/GPL");
4d22de3e
DLR
99MODULE_VERSION(DRV_VERSION);
100MODULE_DEVICE_TABLE(pci, cxgb3_pci_tbl);
101
102static int dflt_msg_enable = DFLT_MSG_ENABLE;
103
104module_param(dflt_msg_enable, int, 0644);
105MODULE_PARM_DESC(dflt_msg_enable, "Chelsio T3 default message enable bitmap");
106
107/*
108 * The driver uses the best interrupt scheme available on a platform in the
109 * order MSI-X, MSI, legacy pin interrupts. This parameter determines which
110 * of these schemes the driver may consider as follows:
111 *
112 * msi = 2: choose from among all three options
113 * msi = 1: only consider MSI and pin interrupts
114 * msi = 0: force pin interrupts
115 */
116static int msi = 2;
117
118module_param(msi, int, 0644);
119MODULE_PARM_DESC(msi, "whether to use MSI or MSI-X");
120
121/*
122 * The driver enables offload as a default.
123 * To disable it, use ofld_disable = 1.
124 */
125
126static int ofld_disable = 0;
127
128module_param(ofld_disable, int, 0644);
129MODULE_PARM_DESC(ofld_disable, "whether to enable offload at init time or not");
130
131/*
132 * We have work elements that we need to cancel when an interface is taken
133 * down. Normally the work elements would be executed by keventd but that
134 * can deadlock because of linkwatch. If our close method takes the rtnl
135 * lock and linkwatch is ahead of our work elements in keventd, linkwatch
136 * will block keventd as it needs the rtnl lock, and we'll deadlock waiting
137 * for our work to complete. Get our own work queue to solve this.
138 */
139static struct workqueue_struct *cxgb3_wq;
140
141/**
142 * link_report - show link status and link speed/duplex
143 * @p: the port whose settings are to be reported
144 *
145 * Shows the link status, speed, and duplex of a port.
146 */
147static void link_report(struct net_device *dev)
148{
149 if (!netif_carrier_ok(dev))
150 printk(KERN_INFO "%s: link down\n", dev->name);
151 else {
152 const char *s = "10Mbps";
153 const struct port_info *p = netdev_priv(dev);
154
155 switch (p->link_config.speed) {
156 case SPEED_10000:
157 s = "10Gbps";
158 break;
159 case SPEED_1000:
160 s = "1000Mbps";
161 break;
162 case SPEED_100:
163 s = "100Mbps";
164 break;
165 }
166
167 printk(KERN_INFO "%s: link up, %s, %s-duplex\n", dev->name, s,
168 p->link_config.duplex == DUPLEX_FULL ? "full" : "half");
169 }
170}
171
172/**
173 * t3_os_link_changed - handle link status changes
174 * @adapter: the adapter associated with the link change
175 * @port_id: the port index whose limk status has changed
176 * @link_stat: the new status of the link
177 * @speed: the new speed setting
178 * @duplex: the new duplex setting
179 * @pause: the new flow-control setting
180 *
181 * This is the OS-dependent handler for link status changes. The OS
182 * neutral handler takes care of most of the processing for these events,
183 * then calls this handler for any OS-specific processing.
184 */
185void t3_os_link_changed(struct adapter *adapter, int port_id, int link_stat,
186 int speed, int duplex, int pause)
187{
188 struct net_device *dev = adapter->port[port_id];
6d6dabac
DLR
189 struct port_info *pi = netdev_priv(dev);
190 struct cmac *mac = &pi->mac;
4d22de3e
DLR
191
192 /* Skip changes from disabled ports. */
193 if (!netif_running(dev))
194 return;
195
196 if (link_stat != netif_carrier_ok(dev)) {
6d6dabac 197 if (link_stat) {
59cf8107 198 t3_mac_enable(mac, MAC_DIRECTION_RX);
4d22de3e 199 netif_carrier_on(dev);
6d6dabac 200 } else {
4d22de3e 201 netif_carrier_off(dev);
59cf8107
DLR
202 pi->phy.ops->power_down(&pi->phy, 1);
203 t3_mac_disable(mac, MAC_DIRECTION_RX);
204 t3_link_start(&pi->phy, mac, &pi->link_config);
6d6dabac
DLR
205 }
206
4d22de3e
DLR
207 link_report(dev);
208 }
209}
210
211static void cxgb_set_rxmode(struct net_device *dev)
212{
213 struct t3_rx_mode rm;
214 struct port_info *pi = netdev_priv(dev);
215
216 init_rx_mode(&rm, dev, dev->mc_list);
217 t3_mac_set_rx_mode(&pi->mac, &rm);
218}
219
220/**
221 * link_start - enable a port
222 * @dev: the device to enable
223 *
224 * Performs the MAC and PHY actions needed to enable a port.
225 */
226static void link_start(struct net_device *dev)
227{
228 struct t3_rx_mode rm;
229 struct port_info *pi = netdev_priv(dev);
230 struct cmac *mac = &pi->mac;
231
232 init_rx_mode(&rm, dev, dev->mc_list);
233 t3_mac_reset(mac);
234 t3_mac_set_mtu(mac, dev->mtu);
235 t3_mac_set_address(mac, 0, dev->dev_addr);
236 t3_mac_set_rx_mode(mac, &rm);
237 t3_link_start(&pi->phy, mac, &pi->link_config);
238 t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
239}
240
241static inline void cxgb_disable_msi(struct adapter *adapter)
242{
243 if (adapter->flags & USING_MSIX) {
244 pci_disable_msix(adapter->pdev);
245 adapter->flags &= ~USING_MSIX;
246 } else if (adapter->flags & USING_MSI) {
247 pci_disable_msi(adapter->pdev);
248 adapter->flags &= ~USING_MSI;
249 }
250}
251
252/*
253 * Interrupt handler for asynchronous events used with MSI-X.
254 */
255static irqreturn_t t3_async_intr_handler(int irq, void *cookie)
256{
257 t3_slow_intr_handler(cookie);
258 return IRQ_HANDLED;
259}
260
261/*
262 * Name the MSI-X interrupts.
263 */
264static void name_msix_vecs(struct adapter *adap)
265{
266 int i, j, msi_idx = 1, n = sizeof(adap->msix_info[0].desc) - 1;
267
268 snprintf(adap->msix_info[0].desc, n, "%s", adap->name);
269 adap->msix_info[0].desc[n] = 0;
270
271 for_each_port(adap, j) {
272 struct net_device *d = adap->port[j];
273 const struct port_info *pi = netdev_priv(d);
274
275 for (i = 0; i < pi->nqsets; i++, msi_idx++) {
276 snprintf(adap->msix_info[msi_idx].desc, n,
277 "%s (queue %d)", d->name, i);
278 adap->msix_info[msi_idx].desc[n] = 0;
279 }
280 }
281}
282
283static int request_msix_data_irqs(struct adapter *adap)
284{
285 int i, j, err, qidx = 0;
286
287 for_each_port(adap, i) {
288 int nqsets = adap2pinfo(adap, i)->nqsets;
289
290 for (j = 0; j < nqsets; ++j) {
291 err = request_irq(adap->msix_info[qidx + 1].vec,
292 t3_intr_handler(adap,
293 adap->sge.qs[qidx].
294 rspq.polling), 0,
295 adap->msix_info[qidx + 1].desc,
296 &adap->sge.qs[qidx]);
297 if (err) {
298 while (--qidx >= 0)
299 free_irq(adap->msix_info[qidx + 1].vec,
300 &adap->sge.qs[qidx]);
301 return err;
302 }
303 qidx++;
304 }
305 }
306 return 0;
307}
308
309/**
310 * setup_rss - configure RSS
311 * @adap: the adapter
312 *
313 * Sets up RSS to distribute packets to multiple receive queues. We
314 * configure the RSS CPU lookup table to distribute to the number of HW
315 * receive queues, and the response queue lookup table to narrow that
316 * down to the response queues actually configured for each port.
317 * We always configure the RSS mapping for two ports since the mapping
318 * table has plenty of entries.
319 */
320static void setup_rss(struct adapter *adap)
321{
322 int i;
323 unsigned int nq0 = adap2pinfo(adap, 0)->nqsets;
324 unsigned int nq1 = adap->port[1] ? adap2pinfo(adap, 1)->nqsets : 1;
325 u8 cpus[SGE_QSETS + 1];
326 u16 rspq_map[RSS_TABLE_SIZE];
327
328 for (i = 0; i < SGE_QSETS; ++i)
329 cpus[i] = i;
330 cpus[SGE_QSETS] = 0xff; /* terminator */
331
332 for (i = 0; i < RSS_TABLE_SIZE / 2; ++i) {
333 rspq_map[i] = i % nq0;
334 rspq_map[i + RSS_TABLE_SIZE / 2] = (i % nq1) + nq0;
335 }
336
337 t3_config_rss(adap, F_RQFEEDBACKENABLE | F_TNLLKPEN | F_TNLMAPEN |
338 F_TNLPRTEN | F_TNL2TUPEN | F_TNL4TUPEN |
339 V_RRCPLCPUSIZE(6), cpus, rspq_map);
340}
341
bea3348e 342static void init_napi(struct adapter *adap)
4d22de3e 343{
bea3348e 344 int i;
4d22de3e 345
bea3348e
SH
346 for (i = 0; i < SGE_QSETS; i++) {
347 struct sge_qset *qs = &adap->sge.qs[i];
4d22de3e 348
bea3348e
SH
349 if (qs->adap)
350 netif_napi_add(qs->netdev, &qs->napi, qs->napi.poll,
351 64);
4d22de3e 352 }
4d22de3e
DLR
353}
354
355/*
356 * Wait until all NAPI handlers are descheduled. This includes the handlers of
357 * both netdevices representing interfaces and the dummy ones for the extra
358 * queues.
359 */
360static void quiesce_rx(struct adapter *adap)
361{
362 int i;
4d22de3e 363
bea3348e
SH
364 for (i = 0; i < SGE_QSETS; i++)
365 if (adap->sge.qs[i].adap)
366 napi_disable(&adap->sge.qs[i].napi);
367}
4d22de3e 368
bea3348e
SH
369static void enable_all_napi(struct adapter *adap)
370{
371 int i;
372 for (i = 0; i < SGE_QSETS; i++)
373 if (adap->sge.qs[i].adap)
374 napi_enable(&adap->sge.qs[i].napi);
4d22de3e
DLR
375}
376
377/**
378 * setup_sge_qsets - configure SGE Tx/Rx/response queues
379 * @adap: the adapter
380 *
381 * Determines how many sets of SGE queues to use and initializes them.
382 * We support multiple queue sets per port if we have MSI-X, otherwise
383 * just one queue set per port.
384 */
385static int setup_sge_qsets(struct adapter *adap)
386{
bea3348e 387 int i, j, err, irq_idx = 0, qset_idx = 0;
8ac3ba68 388 unsigned int ntxq = SGE_TXQ_PER_SET;
4d22de3e
DLR
389
390 if (adap->params.rev > 0 && !(adap->flags & USING_MSI))
391 irq_idx = -1;
392
393 for_each_port(adap, i) {
394 struct net_device *dev = adap->port[i];
bea3348e 395 struct port_info *pi = netdev_priv(dev);
4d22de3e 396
bea3348e 397 pi->qs = &adap->sge.qs[pi->first_qset];
4d22de3e
DLR
398 for (j = 0; j < pi->nqsets; ++j, ++qset_idx) {
399 err = t3_sge_alloc_qset(adap, qset_idx, 1,
400 (adap->flags & USING_MSIX) ? qset_idx + 1 :
401 irq_idx,
bea3348e 402 &adap->params.sge.qset[qset_idx], ntxq, dev);
4d22de3e
DLR
403 if (err) {
404 t3_free_sge_resources(adap);
405 return err;
406 }
407 }
408 }
409
410 return 0;
411}
412
0ee8d33c
DLR
413static ssize_t attr_show(struct device *d, struct device_attribute *attr,
414 char *buf,
896392ef 415 ssize_t(*format) (struct net_device *, char *))
4d22de3e
DLR
416{
417 ssize_t len;
4d22de3e
DLR
418
419 /* Synchronize with ioctls that may shut down the device */
420 rtnl_lock();
896392ef 421 len = (*format) (to_net_dev(d), buf);
4d22de3e
DLR
422 rtnl_unlock();
423 return len;
424}
425
0ee8d33c
DLR
426static ssize_t attr_store(struct device *d, struct device_attribute *attr,
427 const char *buf, size_t len,
896392ef 428 ssize_t(*set) (struct net_device *, unsigned int),
4d22de3e
DLR
429 unsigned int min_val, unsigned int max_val)
430{
431 char *endp;
432 ssize_t ret;
433 unsigned int val;
4d22de3e
DLR
434
435 if (!capable(CAP_NET_ADMIN))
436 return -EPERM;
437
438 val = simple_strtoul(buf, &endp, 0);
439 if (endp == buf || val < min_val || val > max_val)
440 return -EINVAL;
441
442 rtnl_lock();
896392ef 443 ret = (*set) (to_net_dev(d), val);
4d22de3e
DLR
444 if (!ret)
445 ret = len;
446 rtnl_unlock();
447 return ret;
448}
449
450#define CXGB3_SHOW(name, val_expr) \
896392ef 451static ssize_t format_##name(struct net_device *dev, char *buf) \
4d22de3e 452{ \
5fbf816f
DLR
453 struct port_info *pi = netdev_priv(dev); \
454 struct adapter *adap = pi->adapter; \
4d22de3e
DLR
455 return sprintf(buf, "%u\n", val_expr); \
456} \
0ee8d33c
DLR
457static ssize_t show_##name(struct device *d, struct device_attribute *attr, \
458 char *buf) \
4d22de3e 459{ \
0ee8d33c 460 return attr_show(d, attr, buf, format_##name); \
4d22de3e
DLR
461}
462
896392ef 463static ssize_t set_nfilters(struct net_device *dev, unsigned int val)
4d22de3e 464{
5fbf816f
DLR
465 struct port_info *pi = netdev_priv(dev);
466 struct adapter *adap = pi->adapter;
9f238486 467 int min_tids = is_offload(adap) ? MC5_MIN_TIDS : 0;
896392ef 468
4d22de3e
DLR
469 if (adap->flags & FULL_INIT_DONE)
470 return -EBUSY;
471 if (val && adap->params.rev == 0)
472 return -EINVAL;
9f238486
DLR
473 if (val > t3_mc5_size(&adap->mc5) - adap->params.mc5.nservers -
474 min_tids)
4d22de3e
DLR
475 return -EINVAL;
476 adap->params.mc5.nfilters = val;
477 return 0;
478}
479
0ee8d33c
DLR
480static ssize_t store_nfilters(struct device *d, struct device_attribute *attr,
481 const char *buf, size_t len)
4d22de3e 482{
0ee8d33c 483 return attr_store(d, attr, buf, len, set_nfilters, 0, ~0);
4d22de3e
DLR
484}
485
896392ef 486static ssize_t set_nservers(struct net_device *dev, unsigned int val)
4d22de3e 487{
5fbf816f
DLR
488 struct port_info *pi = netdev_priv(dev);
489 struct adapter *adap = pi->adapter;
896392ef 490
4d22de3e
DLR
491 if (adap->flags & FULL_INIT_DONE)
492 return -EBUSY;
9f238486
DLR
493 if (val > t3_mc5_size(&adap->mc5) - adap->params.mc5.nfilters -
494 MC5_MIN_TIDS)
4d22de3e
DLR
495 return -EINVAL;
496 adap->params.mc5.nservers = val;
497 return 0;
498}
499
0ee8d33c
DLR
500static ssize_t store_nservers(struct device *d, struct device_attribute *attr,
501 const char *buf, size_t len)
4d22de3e 502{
0ee8d33c 503 return attr_store(d, attr, buf, len, set_nservers, 0, ~0);
4d22de3e
DLR
504}
505
506#define CXGB3_ATTR_R(name, val_expr) \
507CXGB3_SHOW(name, val_expr) \
0ee8d33c 508static DEVICE_ATTR(name, S_IRUGO, show_##name, NULL)
4d22de3e
DLR
509
510#define CXGB3_ATTR_RW(name, val_expr, store_method) \
511CXGB3_SHOW(name, val_expr) \
0ee8d33c 512static DEVICE_ATTR(name, S_IRUGO | S_IWUSR, show_##name, store_method)
4d22de3e
DLR
513
514CXGB3_ATTR_R(cam_size, t3_mc5_size(&adap->mc5));
515CXGB3_ATTR_RW(nfilters, adap->params.mc5.nfilters, store_nfilters);
516CXGB3_ATTR_RW(nservers, adap->params.mc5.nservers, store_nservers);
517
518static struct attribute *cxgb3_attrs[] = {
0ee8d33c
DLR
519 &dev_attr_cam_size.attr,
520 &dev_attr_nfilters.attr,
521 &dev_attr_nservers.attr,
4d22de3e
DLR
522 NULL
523};
524
525static struct attribute_group cxgb3_attr_group = {.attrs = cxgb3_attrs };
526
0ee8d33c
DLR
527static ssize_t tm_attr_show(struct device *d, struct device_attribute *attr,
528 char *buf, int sched)
4d22de3e 529{
5fbf816f
DLR
530 struct port_info *pi = netdev_priv(to_net_dev(d));
531 struct adapter *adap = pi->adapter;
4d22de3e 532 unsigned int v, addr, bpt, cpt;
5fbf816f 533 ssize_t len;
4d22de3e
DLR
534
535 addr = A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2;
536 rtnl_lock();
537 t3_write_reg(adap, A_TP_TM_PIO_ADDR, addr);
538 v = t3_read_reg(adap, A_TP_TM_PIO_DATA);
539 if (sched & 1)
540 v >>= 16;
541 bpt = (v >> 8) & 0xff;
542 cpt = v & 0xff;
543 if (!cpt)
544 len = sprintf(buf, "disabled\n");
545 else {
546 v = (adap->params.vpd.cclk * 1000) / cpt;
547 len = sprintf(buf, "%u Kbps\n", (v * bpt) / 125);
548 }
549 rtnl_unlock();
550 return len;
551}
552
0ee8d33c
DLR
553static ssize_t tm_attr_store(struct device *d, struct device_attribute *attr,
554 const char *buf, size_t len, int sched)
4d22de3e 555{
5fbf816f
DLR
556 struct port_info *pi = netdev_priv(to_net_dev(d));
557 struct adapter *adap = pi->adapter;
558 unsigned int val;
4d22de3e
DLR
559 char *endp;
560 ssize_t ret;
4d22de3e
DLR
561
562 if (!capable(CAP_NET_ADMIN))
563 return -EPERM;
564
565 val = simple_strtoul(buf, &endp, 0);
566 if (endp == buf || val > 10000000)
567 return -EINVAL;
568
569 rtnl_lock();
570 ret = t3_config_sched(adap, val, sched);
571 if (!ret)
572 ret = len;
573 rtnl_unlock();
574 return ret;
575}
576
577#define TM_ATTR(name, sched) \
0ee8d33c
DLR
578static ssize_t show_##name(struct device *d, struct device_attribute *attr, \
579 char *buf) \
4d22de3e 580{ \
0ee8d33c 581 return tm_attr_show(d, attr, buf, sched); \
4d22de3e 582} \
0ee8d33c
DLR
583static ssize_t store_##name(struct device *d, struct device_attribute *attr, \
584 const char *buf, size_t len) \
4d22de3e 585{ \
0ee8d33c 586 return tm_attr_store(d, attr, buf, len, sched); \
4d22de3e 587} \
0ee8d33c 588static DEVICE_ATTR(name, S_IRUGO | S_IWUSR, show_##name, store_##name)
4d22de3e
DLR
589
590TM_ATTR(sched0, 0);
591TM_ATTR(sched1, 1);
592TM_ATTR(sched2, 2);
593TM_ATTR(sched3, 3);
594TM_ATTR(sched4, 4);
595TM_ATTR(sched5, 5);
596TM_ATTR(sched6, 6);
597TM_ATTR(sched7, 7);
598
599static struct attribute *offload_attrs[] = {
0ee8d33c
DLR
600 &dev_attr_sched0.attr,
601 &dev_attr_sched1.attr,
602 &dev_attr_sched2.attr,
603 &dev_attr_sched3.attr,
604 &dev_attr_sched4.attr,
605 &dev_attr_sched5.attr,
606 &dev_attr_sched6.attr,
607 &dev_attr_sched7.attr,
4d22de3e
DLR
608 NULL
609};
610
611static struct attribute_group offload_attr_group = {.attrs = offload_attrs };
612
613/*
614 * Sends an sk_buff to an offload queue driver
615 * after dealing with any active network taps.
616 */
617static inline int offload_tx(struct t3cdev *tdev, struct sk_buff *skb)
618{
619 int ret;
620
621 local_bh_disable();
622 ret = t3_offload_tx(tdev, skb);
623 local_bh_enable();
624 return ret;
625}
626
627static int write_smt_entry(struct adapter *adapter, int idx)
628{
629 struct cpl_smt_write_req *req;
630 struct sk_buff *skb = alloc_skb(sizeof(*req), GFP_KERNEL);
631
632 if (!skb)
633 return -ENOMEM;
634
635 req = (struct cpl_smt_write_req *)__skb_put(skb, sizeof(*req));
636 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
637 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SMT_WRITE_REQ, idx));
638 req->mtu_idx = NMTUS - 1; /* should be 0 but there's a T3 bug */
639 req->iff = idx;
640 memset(req->src_mac1, 0, sizeof(req->src_mac1));
641 memcpy(req->src_mac0, adapter->port[idx]->dev_addr, ETH_ALEN);
642 skb->priority = 1;
643 offload_tx(&adapter->tdev, skb);
644 return 0;
645}
646
647static int init_smt(struct adapter *adapter)
648{
649 int i;
650
651 for_each_port(adapter, i)
652 write_smt_entry(adapter, i);
653 return 0;
654}
655
656static void init_port_mtus(struct adapter *adapter)
657{
658 unsigned int mtus = adapter->port[0]->mtu;
659
660 if (adapter->port[1])
661 mtus |= adapter->port[1]->mtu << 16;
662 t3_write_reg(adapter, A_TP_MTU_PORT_TABLE, mtus);
663}
664
14ab9892
DLR
665static void send_pktsched_cmd(struct adapter *adap, int sched, int qidx, int lo,
666 int hi, int port)
667{
668 struct sk_buff *skb;
669 struct mngt_pktsched_wr *req;
670
671 skb = alloc_skb(sizeof(*req), GFP_KERNEL | __GFP_NOFAIL);
672 req = (struct mngt_pktsched_wr *)skb_put(skb, sizeof(*req));
673 req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_MNGT));
674 req->mngt_opcode = FW_MNGTOPCODE_PKTSCHED_SET;
675 req->sched = sched;
676 req->idx = qidx;
677 req->min = lo;
678 req->max = hi;
679 req->binding = port;
680 t3_mgmt_tx(adap, skb);
681}
682
683static void bind_qsets(struct adapter *adap)
684{
685 int i, j;
686
687 for_each_port(adap, i) {
688 const struct port_info *pi = adap2pinfo(adap, i);
689
690 for (j = 0; j < pi->nqsets; ++j)
691 send_pktsched_cmd(adap, 1, pi->first_qset + j, -1,
692 -1, i);
693 }
694}
695
7f672cf5 696#define FW_FNAME "t3fw-%d.%d.%d.bin"
47330077 697#define TPSRAM_NAME "t3%c_protocol_sram-%d.%d.%d.bin"
2e283962
DLR
698
699static int upgrade_fw(struct adapter *adap)
700{
701 int ret;
702 char buf[64];
703 const struct firmware *fw;
704 struct device *dev = &adap->pdev->dev;
705
706 snprintf(buf, sizeof(buf), FW_FNAME, FW_VERSION_MAJOR,
7f672cf5 707 FW_VERSION_MINOR, FW_VERSION_MICRO);
2e283962
DLR
708 ret = request_firmware(&fw, buf, dev);
709 if (ret < 0) {
710 dev_err(dev, "could not upgrade firmware: unable to load %s\n",
711 buf);
712 return ret;
713 }
714 ret = t3_load_fw(adap, fw->data, fw->size);
715 release_firmware(fw);
47330077
DLR
716
717 if (ret == 0)
718 dev_info(dev, "successful upgrade to firmware %d.%d.%d\n",
719 FW_VERSION_MAJOR, FW_VERSION_MINOR, FW_VERSION_MICRO);
720 else
721 dev_err(dev, "failed to upgrade to firmware %d.%d.%d\n",
722 FW_VERSION_MAJOR, FW_VERSION_MINOR, FW_VERSION_MICRO);
723
724 return ret;
725}
726
727static inline char t3rev2char(struct adapter *adapter)
728{
729 char rev = 0;
730
731 switch(adapter->params.rev) {
732 case T3_REV_B:
733 case T3_REV_B2:
734 rev = 'b';
735 break;
736 }
737 return rev;
738}
739
740int update_tpsram(struct adapter *adap)
741{
742 const struct firmware *tpsram;
743 char buf[64];
744 struct device *dev = &adap->pdev->dev;
745 int ret;
746 char rev;
747
748 rev = t3rev2char(adap);
749 if (!rev)
750 return 0;
751
752 snprintf(buf, sizeof(buf), TPSRAM_NAME, rev,
753 TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO);
754
755 ret = request_firmware(&tpsram, buf, dev);
756 if (ret < 0) {
757 dev_err(dev, "could not load TP SRAM: unable to load %s\n",
758 buf);
759 return ret;
760 }
761
762 ret = t3_check_tpsram(adap, tpsram->data, tpsram->size);
763 if (ret)
764 goto release_tpsram;
765
766 ret = t3_set_proto_sram(adap, tpsram->data);
767 if (ret == 0)
768 dev_info(dev,
769 "successful update of protocol engine "
770 "to %d.%d.%d\n",
771 TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO);
772 else
773 dev_err(dev, "failed to update of protocol engine %d.%d.%d\n",
774 TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO);
775 if (ret)
776 dev_err(dev, "loading protocol SRAM failed\n");
777
778release_tpsram:
779 release_firmware(tpsram);
780
2e283962
DLR
781 return ret;
782}
783
4d22de3e
DLR
784/**
785 * cxgb_up - enable the adapter
786 * @adapter: adapter being enabled
787 *
788 * Called when the first port is enabled, this function performs the
789 * actions necessary to make an adapter operational, such as completing
790 * the initialization of HW modules, and enabling interrupts.
791 *
792 * Must be called with the rtnl lock held.
793 */
794static int cxgb_up(struct adapter *adap)
795{
c54f5c24 796 int err;
47330077 797 int must_load;
4d22de3e
DLR
798
799 if (!(adap->flags & FULL_INIT_DONE)) {
800 err = t3_check_fw_version(adap);
2e283962
DLR
801 if (err == -EINVAL)
802 err = upgrade_fw(adap);
4aac3899 803 if (err)
4d22de3e 804 goto out;
4d22de3e 805
47330077
DLR
806 err = t3_check_tpsram_version(adap, &must_load);
807 if (err == -EINVAL) {
808 err = update_tpsram(adap);
809 if (err && must_load)
810 goto out;
811 }
812
4d22de3e
DLR
813 err = t3_init_hw(adap, 0);
814 if (err)
815 goto out;
816
6cdbd77e 817 t3_write_reg(adap, A_ULPRX_TDDP_PSZ, V_HPZ0(PAGE_SHIFT - 12));
bea3348e 818
4d22de3e
DLR
819 err = setup_sge_qsets(adap);
820 if (err)
821 goto out;
822
823 setup_rss(adap);
bea3348e 824 init_napi(adap);
4d22de3e
DLR
825 adap->flags |= FULL_INIT_DONE;
826 }
827
828 t3_intr_clear(adap);
829
830 if (adap->flags & USING_MSIX) {
831 name_msix_vecs(adap);
832 err = request_irq(adap->msix_info[0].vec,
833 t3_async_intr_handler, 0,
834 adap->msix_info[0].desc, adap);
835 if (err)
836 goto irq_err;
837
838 if (request_msix_data_irqs(adap)) {
839 free_irq(adap->msix_info[0].vec, adap);
840 goto irq_err;
841 }
842 } else if ((err = request_irq(adap->pdev->irq,
843 t3_intr_handler(adap,
844 adap->sge.qs[0].rspq.
845 polling),
2db6346f
TG
846 (adap->flags & USING_MSI) ?
847 0 : IRQF_SHARED,
4d22de3e
DLR
848 adap->name, adap)))
849 goto irq_err;
850
bea3348e 851 enable_all_napi(adap);
4d22de3e
DLR
852 t3_sge_start(adap);
853 t3_intr_enable(adap);
14ab9892
DLR
854
855 if ((adap->flags & (USING_MSIX | QUEUES_BOUND)) == USING_MSIX)
856 bind_qsets(adap);
857 adap->flags |= QUEUES_BOUND;
858
4d22de3e
DLR
859out:
860 return err;
861irq_err:
862 CH_ERR(adap, "request_irq failed, err %d\n", err);
863 goto out;
864}
865
866/*
867 * Release resources when all the ports and offloading have been stopped.
868 */
869static void cxgb_down(struct adapter *adapter)
870{
871 t3_sge_stop(adapter);
872 spin_lock_irq(&adapter->work_lock); /* sync with PHY intr task */
873 t3_intr_disable(adapter);
874 spin_unlock_irq(&adapter->work_lock);
875
876 if (adapter->flags & USING_MSIX) {
877 int i, n = 0;
878
879 free_irq(adapter->msix_info[0].vec, adapter);
880 for_each_port(adapter, i)
881 n += adap2pinfo(adapter, i)->nqsets;
882
883 for (i = 0; i < n; ++i)
884 free_irq(adapter->msix_info[i + 1].vec,
885 &adapter->sge.qs[i]);
886 } else
887 free_irq(adapter->pdev->irq, adapter);
888
889 flush_workqueue(cxgb3_wq); /* wait for external IRQ handler */
890 quiesce_rx(adapter);
891}
892
893static void schedule_chk_task(struct adapter *adap)
894{
895 unsigned int timeo;
896
897 timeo = adap->params.linkpoll_period ?
898 (HZ * adap->params.linkpoll_period) / 10 :
899 adap->params.stats_update_period * HZ;
900 if (timeo)
901 queue_delayed_work(cxgb3_wq, &adap->adap_check_task, timeo);
902}
903
904static int offload_open(struct net_device *dev)
905{
5fbf816f
DLR
906 struct port_info *pi = netdev_priv(dev);
907 struct adapter *adapter = pi->adapter;
908 struct t3cdev *tdev = dev2t3cdev(dev);
4d22de3e 909 int adap_up = adapter->open_device_map & PORT_MASK;
c54f5c24 910 int err;
4d22de3e
DLR
911
912 if (test_and_set_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map))
913 return 0;
914
915 if (!adap_up && (err = cxgb_up(adapter)) < 0)
916 return err;
917
918 t3_tp_set_offload_mode(adapter, 1);
919 tdev->lldev = adapter->port[0];
920 err = cxgb3_offload_activate(adapter);
921 if (err)
922 goto out;
923
924 init_port_mtus(adapter);
925 t3_load_mtus(adapter, adapter->params.mtus, adapter->params.a_wnd,
926 adapter->params.b_wnd,
927 adapter->params.rev == 0 ?
928 adapter->port[0]->mtu : 0xffff);
929 init_smt(adapter);
930
931 /* Never mind if the next step fails */
0ee8d33c 932 sysfs_create_group(&tdev->lldev->dev.kobj, &offload_attr_group);
4d22de3e
DLR
933
934 /* Call back all registered clients */
935 cxgb3_add_clients(tdev);
936
937out:
938 /* restore them in case the offload module has changed them */
939 if (err) {
940 t3_tp_set_offload_mode(adapter, 0);
941 clear_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map);
942 cxgb3_set_dummy_ops(tdev);
943 }
944 return err;
945}
946
947static int offload_close(struct t3cdev *tdev)
948{
949 struct adapter *adapter = tdev2adap(tdev);
950
951 if (!test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map))
952 return 0;
953
954 /* Call back all registered clients */
955 cxgb3_remove_clients(tdev);
956
0ee8d33c 957 sysfs_remove_group(&tdev->lldev->dev.kobj, &offload_attr_group);
4d22de3e
DLR
958
959 tdev->lldev = NULL;
960 cxgb3_set_dummy_ops(tdev);
961 t3_tp_set_offload_mode(adapter, 0);
962 clear_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map);
963
964 if (!adapter->open_device_map)
965 cxgb_down(adapter);
966
967 cxgb3_offload_deactivate(adapter);
968 return 0;
969}
970
971static int cxgb_open(struct net_device *dev)
972{
4d22de3e 973 struct port_info *pi = netdev_priv(dev);
5fbf816f 974 struct adapter *adapter = pi->adapter;
4d22de3e 975 int other_ports = adapter->open_device_map & PORT_MASK;
5fbf816f 976 int err;
4d22de3e 977
bea3348e
SH
978 if (!adapter->open_device_map && (err = cxgb_up(adapter)) < 0) {
979 quiesce_rx(adapter);
4d22de3e 980 return err;
bea3348e 981 }
4d22de3e
DLR
982
983 set_bit(pi->port_id, &adapter->open_device_map);
8ac3ba68 984 if (is_offload(adapter) && !ofld_disable) {
4d22de3e
DLR
985 err = offload_open(dev);
986 if (err)
987 printk(KERN_WARNING
988 "Could not initialize offload capabilities\n");
989 }
990
991 link_start(dev);
992 t3_port_intr_enable(adapter, pi->port_id);
993 netif_start_queue(dev);
994 if (!other_ports)
995 schedule_chk_task(adapter);
996
997 return 0;
998}
999
1000static int cxgb_close(struct net_device *dev)
1001{
5fbf816f
DLR
1002 struct port_info *pi = netdev_priv(dev);
1003 struct adapter *adapter = pi->adapter;
4d22de3e 1004
5fbf816f 1005 t3_port_intr_disable(adapter, pi->port_id);
4d22de3e 1006 netif_stop_queue(dev);
5fbf816f 1007 pi->phy.ops->power_down(&pi->phy, 1);
4d22de3e 1008 netif_carrier_off(dev);
5fbf816f 1009 t3_mac_disable(&pi->mac, MAC_DIRECTION_TX | MAC_DIRECTION_RX);
4d22de3e
DLR
1010
1011 spin_lock(&adapter->work_lock); /* sync with update task */
5fbf816f 1012 clear_bit(pi->port_id, &adapter->open_device_map);
4d22de3e
DLR
1013 spin_unlock(&adapter->work_lock);
1014
1015 if (!(adapter->open_device_map & PORT_MASK))
1016 cancel_rearming_delayed_workqueue(cxgb3_wq,
1017 &adapter->adap_check_task);
1018
1019 if (!adapter->open_device_map)
1020 cxgb_down(adapter);
1021
1022 return 0;
1023}
1024
1025static struct net_device_stats *cxgb_get_stats(struct net_device *dev)
1026{
5fbf816f
DLR
1027 struct port_info *pi = netdev_priv(dev);
1028 struct adapter *adapter = pi->adapter;
1029 struct net_device_stats *ns = &pi->netstats;
4d22de3e
DLR
1030 const struct mac_stats *pstats;
1031
1032 spin_lock(&adapter->stats_lock);
5fbf816f 1033 pstats = t3_mac_update_stats(&pi->mac);
4d22de3e
DLR
1034 spin_unlock(&adapter->stats_lock);
1035
1036 ns->tx_bytes = pstats->tx_octets;
1037 ns->tx_packets = pstats->tx_frames;
1038 ns->rx_bytes = pstats->rx_octets;
1039 ns->rx_packets = pstats->rx_frames;
1040 ns->multicast = pstats->rx_mcast_frames;
1041
1042 ns->tx_errors = pstats->tx_underrun;
1043 ns->rx_errors = pstats->rx_symbol_errs + pstats->rx_fcs_errs +
1044 pstats->rx_too_long + pstats->rx_jabber + pstats->rx_short +
1045 pstats->rx_fifo_ovfl;
1046
1047 /* detailed rx_errors */
1048 ns->rx_length_errors = pstats->rx_jabber + pstats->rx_too_long;
1049 ns->rx_over_errors = 0;
1050 ns->rx_crc_errors = pstats->rx_fcs_errs;
1051 ns->rx_frame_errors = pstats->rx_symbol_errs;
1052 ns->rx_fifo_errors = pstats->rx_fifo_ovfl;
1053 ns->rx_missed_errors = pstats->rx_cong_drops;
1054
1055 /* detailed tx_errors */
1056 ns->tx_aborted_errors = 0;
1057 ns->tx_carrier_errors = 0;
1058 ns->tx_fifo_errors = pstats->tx_underrun;
1059 ns->tx_heartbeat_errors = 0;
1060 ns->tx_window_errors = 0;
1061 return ns;
1062}
1063
1064static u32 get_msglevel(struct net_device *dev)
1065{
5fbf816f
DLR
1066 struct port_info *pi = netdev_priv(dev);
1067 struct adapter *adapter = pi->adapter;
4d22de3e
DLR
1068
1069 return adapter->msg_enable;
1070}
1071
1072static void set_msglevel(struct net_device *dev, u32 val)
1073{
5fbf816f
DLR
1074 struct port_info *pi = netdev_priv(dev);
1075 struct adapter *adapter = pi->adapter;
4d22de3e
DLR
1076
1077 adapter->msg_enable = val;
1078}
1079
1080static char stats_strings[][ETH_GSTRING_LEN] = {
1081 "TxOctetsOK ",
1082 "TxFramesOK ",
1083 "TxMulticastFramesOK",
1084 "TxBroadcastFramesOK",
1085 "TxPauseFrames ",
1086 "TxUnderrun ",
1087 "TxExtUnderrun ",
1088
1089 "TxFrames64 ",
1090 "TxFrames65To127 ",
1091 "TxFrames128To255 ",
1092 "TxFrames256To511 ",
1093 "TxFrames512To1023 ",
1094 "TxFrames1024To1518 ",
1095 "TxFrames1519ToMax ",
1096
1097 "RxOctetsOK ",
1098 "RxFramesOK ",
1099 "RxMulticastFramesOK",
1100 "RxBroadcastFramesOK",
1101 "RxPauseFrames ",
1102 "RxFCSErrors ",
1103 "RxSymbolErrors ",
1104 "RxShortErrors ",
1105 "RxJabberErrors ",
1106 "RxLengthErrors ",
1107 "RxFIFOoverflow ",
1108
1109 "RxFrames64 ",
1110 "RxFrames65To127 ",
1111 "RxFrames128To255 ",
1112 "RxFrames256To511 ",
1113 "RxFrames512To1023 ",
1114 "RxFrames1024To1518 ",
1115 "RxFrames1519ToMax ",
1116
1117 "PhyFIFOErrors ",
1118 "TSO ",
1119 "VLANextractions ",
1120 "VLANinsertions ",
1121 "TxCsumOffload ",
1122 "RxCsumGood ",
fc90664e
DLR
1123 "RxDrops ",
1124
1125 "CheckTXEnToggled ",
1126 "CheckResets ",
1127
4d22de3e
DLR
1128};
1129
1130static int get_stats_count(struct net_device *dev)
1131{
1132 return ARRAY_SIZE(stats_strings);
1133}
1134
1135#define T3_REGMAP_SIZE (3 * 1024)
1136
1137static int get_regs_len(struct net_device *dev)
1138{
1139 return T3_REGMAP_SIZE;
1140}
1141
1142static int get_eeprom_len(struct net_device *dev)
1143{
1144 return EEPROMSIZE;
1145}
1146
1147static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1148{
5fbf816f
DLR
1149 struct port_info *pi = netdev_priv(dev);
1150 struct adapter *adapter = pi->adapter;
4d22de3e 1151 u32 fw_vers = 0;
47330077 1152 u32 tp_vers = 0;
4d22de3e
DLR
1153
1154 t3_get_fw_version(adapter, &fw_vers);
47330077 1155 t3_get_tp_version(adapter, &tp_vers);
4d22de3e
DLR
1156
1157 strcpy(info->driver, DRV_NAME);
1158 strcpy(info->version, DRV_VERSION);
1159 strcpy(info->bus_info, pci_name(adapter->pdev));
1160 if (!fw_vers)
1161 strcpy(info->fw_version, "N/A");
4aac3899 1162 else {
4d22de3e 1163 snprintf(info->fw_version, sizeof(info->fw_version),
47330077 1164 "%s %u.%u.%u TP %u.%u.%u",
4aac3899
DLR
1165 G_FW_VERSION_TYPE(fw_vers) ? "T" : "N",
1166 G_FW_VERSION_MAJOR(fw_vers),
1167 G_FW_VERSION_MINOR(fw_vers),
47330077
DLR
1168 G_FW_VERSION_MICRO(fw_vers),
1169 G_TP_VERSION_MAJOR(tp_vers),
1170 G_TP_VERSION_MINOR(tp_vers),
1171 G_TP_VERSION_MICRO(tp_vers));
4aac3899 1172 }
4d22de3e
DLR
1173}
1174
1175static void get_strings(struct net_device *dev, u32 stringset, u8 * data)
1176{
1177 if (stringset == ETH_SS_STATS)
1178 memcpy(data, stats_strings, sizeof(stats_strings));
1179}
1180
1181static unsigned long collect_sge_port_stats(struct adapter *adapter,
1182 struct port_info *p, int idx)
1183{
1184 int i;
1185 unsigned long tot = 0;
1186
1187 for (i = 0; i < p->nqsets; ++i)
1188 tot += adapter->sge.qs[i + p->first_qset].port_stats[idx];
1189 return tot;
1190}
1191
1192static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
1193 u64 *data)
1194{
4d22de3e 1195 struct port_info *pi = netdev_priv(dev);
5fbf816f 1196 struct adapter *adapter = pi->adapter;
4d22de3e
DLR
1197 const struct mac_stats *s;
1198
1199 spin_lock(&adapter->stats_lock);
1200 s = t3_mac_update_stats(&pi->mac);
1201 spin_unlock(&adapter->stats_lock);
1202
1203 *data++ = s->tx_octets;
1204 *data++ = s->tx_frames;
1205 *data++ = s->tx_mcast_frames;
1206 *data++ = s->tx_bcast_frames;
1207 *data++ = s->tx_pause;
1208 *data++ = s->tx_underrun;
1209 *data++ = s->tx_fifo_urun;
1210
1211 *data++ = s->tx_frames_64;
1212 *data++ = s->tx_frames_65_127;
1213 *data++ = s->tx_frames_128_255;
1214 *data++ = s->tx_frames_256_511;
1215 *data++ = s->tx_frames_512_1023;
1216 *data++ = s->tx_frames_1024_1518;
1217 *data++ = s->tx_frames_1519_max;
1218
1219 *data++ = s->rx_octets;
1220 *data++ = s->rx_frames;
1221 *data++ = s->rx_mcast_frames;
1222 *data++ = s->rx_bcast_frames;
1223 *data++ = s->rx_pause;
1224 *data++ = s->rx_fcs_errs;
1225 *data++ = s->rx_symbol_errs;
1226 *data++ = s->rx_short;
1227 *data++ = s->rx_jabber;
1228 *data++ = s->rx_too_long;
1229 *data++ = s->rx_fifo_ovfl;
1230
1231 *data++ = s->rx_frames_64;
1232 *data++ = s->rx_frames_65_127;
1233 *data++ = s->rx_frames_128_255;
1234 *data++ = s->rx_frames_256_511;
1235 *data++ = s->rx_frames_512_1023;
1236 *data++ = s->rx_frames_1024_1518;
1237 *data++ = s->rx_frames_1519_max;
1238
1239 *data++ = pi->phy.fifo_errors;
1240
1241 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_TSO);
1242 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_VLANEX);
1243 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_VLANINS);
1244 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_TX_CSUM);
1245 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_RX_CSUM_GOOD);
1246 *data++ = s->rx_cong_drops;
fc90664e
DLR
1247
1248 *data++ = s->num_toggled;
1249 *data++ = s->num_resets;
4d22de3e
DLR
1250}
1251
1252static inline void reg_block_dump(struct adapter *ap, void *buf,
1253 unsigned int start, unsigned int end)
1254{
1255 u32 *p = buf + start;
1256
1257 for (; start <= end; start += sizeof(u32))
1258 *p++ = t3_read_reg(ap, start);
1259}
1260
1261static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
1262 void *buf)
1263{
5fbf816f
DLR
1264 struct port_info *pi = netdev_priv(dev);
1265 struct adapter *ap = pi->adapter;
4d22de3e
DLR
1266
1267 /*
1268 * Version scheme:
1269 * bits 0..9: chip version
1270 * bits 10..15: chip revision
1271 * bit 31: set for PCIe cards
1272 */
1273 regs->version = 3 | (ap->params.rev << 10) | (is_pcie(ap) << 31);
1274
1275 /*
1276 * We skip the MAC statistics registers because they are clear-on-read.
1277 * Also reading multi-register stats would need to synchronize with the
1278 * periodic mac stats accumulation. Hard to justify the complexity.
1279 */
1280 memset(buf, 0, T3_REGMAP_SIZE);
1281 reg_block_dump(ap, buf, 0, A_SG_RSPQ_CREDIT_RETURN);
1282 reg_block_dump(ap, buf, A_SG_HI_DRB_HI_THRSH, A_ULPRX_PBL_ULIMIT);
1283 reg_block_dump(ap, buf, A_ULPTX_CONFIG, A_MPS_INT_CAUSE);
1284 reg_block_dump(ap, buf, A_CPL_SWITCH_CNTRL, A_CPL_MAP_TBL_DATA);
1285 reg_block_dump(ap, buf, A_SMB_GLOBAL_TIME_CFG, A_XGM_SERDES_STAT3);
1286 reg_block_dump(ap, buf, A_XGM_SERDES_STATUS0,
1287 XGM_REG(A_XGM_SERDES_STAT3, 1));
1288 reg_block_dump(ap, buf, XGM_REG(A_XGM_SERDES_STATUS0, 1),
1289 XGM_REG(A_XGM_RX_SPI4_SOP_EOP_CNT, 1));
1290}
1291
1292static int restart_autoneg(struct net_device *dev)
1293{
1294 struct port_info *p = netdev_priv(dev);
1295
1296 if (!netif_running(dev))
1297 return -EAGAIN;
1298 if (p->link_config.autoneg != AUTONEG_ENABLE)
1299 return -EINVAL;
1300 p->phy.ops->autoneg_restart(&p->phy);
1301 return 0;
1302}
1303
1304static int cxgb3_phys_id(struct net_device *dev, u32 data)
1305{
5fbf816f
DLR
1306 struct port_info *pi = netdev_priv(dev);
1307 struct adapter *adapter = pi->adapter;
4d22de3e 1308 int i;
4d22de3e
DLR
1309
1310 if (data == 0)
1311 data = 2;
1312
1313 for (i = 0; i < data * 2; i++) {
1314 t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
1315 (i & 1) ? F_GPIO0_OUT_VAL : 0);
1316 if (msleep_interruptible(500))
1317 break;
1318 }
1319 t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
1320 F_GPIO0_OUT_VAL);
1321 return 0;
1322}
1323
1324static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1325{
1326 struct port_info *p = netdev_priv(dev);
1327
1328 cmd->supported = p->link_config.supported;
1329 cmd->advertising = p->link_config.advertising;
1330
1331 if (netif_carrier_ok(dev)) {
1332 cmd->speed = p->link_config.speed;
1333 cmd->duplex = p->link_config.duplex;
1334 } else {
1335 cmd->speed = -1;
1336 cmd->duplex = -1;
1337 }
1338
1339 cmd->port = (cmd->supported & SUPPORTED_TP) ? PORT_TP : PORT_FIBRE;
1340 cmd->phy_address = p->phy.addr;
1341 cmd->transceiver = XCVR_EXTERNAL;
1342 cmd->autoneg = p->link_config.autoneg;
1343 cmd->maxtxpkt = 0;
1344 cmd->maxrxpkt = 0;
1345 return 0;
1346}
1347
1348static int speed_duplex_to_caps(int speed, int duplex)
1349{
1350 int cap = 0;
1351
1352 switch (speed) {
1353 case SPEED_10:
1354 if (duplex == DUPLEX_FULL)
1355 cap = SUPPORTED_10baseT_Full;
1356 else
1357 cap = SUPPORTED_10baseT_Half;
1358 break;
1359 case SPEED_100:
1360 if (duplex == DUPLEX_FULL)
1361 cap = SUPPORTED_100baseT_Full;
1362 else
1363 cap = SUPPORTED_100baseT_Half;
1364 break;
1365 case SPEED_1000:
1366 if (duplex == DUPLEX_FULL)
1367 cap = SUPPORTED_1000baseT_Full;
1368 else
1369 cap = SUPPORTED_1000baseT_Half;
1370 break;
1371 case SPEED_10000:
1372 if (duplex == DUPLEX_FULL)
1373 cap = SUPPORTED_10000baseT_Full;
1374 }
1375 return cap;
1376}
1377
1378#define ADVERTISED_MASK (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
1379 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
1380 ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full | \
1381 ADVERTISED_10000baseT_Full)
1382
1383static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1384{
1385 struct port_info *p = netdev_priv(dev);
1386 struct link_config *lc = &p->link_config;
1387
1388 if (!(lc->supported & SUPPORTED_Autoneg))
1389 return -EOPNOTSUPP; /* can't change speed/duplex */
1390
1391 if (cmd->autoneg == AUTONEG_DISABLE) {
1392 int cap = speed_duplex_to_caps(cmd->speed, cmd->duplex);
1393
1394 if (!(lc->supported & cap) || cmd->speed == SPEED_1000)
1395 return -EINVAL;
1396 lc->requested_speed = cmd->speed;
1397 lc->requested_duplex = cmd->duplex;
1398 lc->advertising = 0;
1399 } else {
1400 cmd->advertising &= ADVERTISED_MASK;
1401 cmd->advertising &= lc->supported;
1402 if (!cmd->advertising)
1403 return -EINVAL;
1404 lc->requested_speed = SPEED_INVALID;
1405 lc->requested_duplex = DUPLEX_INVALID;
1406 lc->advertising = cmd->advertising | ADVERTISED_Autoneg;
1407 }
1408 lc->autoneg = cmd->autoneg;
1409 if (netif_running(dev))
1410 t3_link_start(&p->phy, &p->mac, lc);
1411 return 0;
1412}
1413
1414static void get_pauseparam(struct net_device *dev,
1415 struct ethtool_pauseparam *epause)
1416{
1417 struct port_info *p = netdev_priv(dev);
1418
1419 epause->autoneg = (p->link_config.requested_fc & PAUSE_AUTONEG) != 0;
1420 epause->rx_pause = (p->link_config.fc & PAUSE_RX) != 0;
1421 epause->tx_pause = (p->link_config.fc & PAUSE_TX) != 0;
1422}
1423
1424static int set_pauseparam(struct net_device *dev,
1425 struct ethtool_pauseparam *epause)
1426{
1427 struct port_info *p = netdev_priv(dev);
1428 struct link_config *lc = &p->link_config;
1429
1430 if (epause->autoneg == AUTONEG_DISABLE)
1431 lc->requested_fc = 0;
1432 else if (lc->supported & SUPPORTED_Autoneg)
1433 lc->requested_fc = PAUSE_AUTONEG;
1434 else
1435 return -EINVAL;
1436
1437 if (epause->rx_pause)
1438 lc->requested_fc |= PAUSE_RX;
1439 if (epause->tx_pause)
1440 lc->requested_fc |= PAUSE_TX;
1441 if (lc->autoneg == AUTONEG_ENABLE) {
1442 if (netif_running(dev))
1443 t3_link_start(&p->phy, &p->mac, lc);
1444 } else {
1445 lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1446 if (netif_running(dev))
1447 t3_mac_set_speed_duplex_fc(&p->mac, -1, -1, lc->fc);
1448 }
1449 return 0;
1450}
1451
1452static u32 get_rx_csum(struct net_device *dev)
1453{
1454 struct port_info *p = netdev_priv(dev);
1455
1456 return p->rx_csum_offload;
1457}
1458
1459static int set_rx_csum(struct net_device *dev, u32 data)
1460{
1461 struct port_info *p = netdev_priv(dev);
1462
1463 p->rx_csum_offload = data;
1464 return 0;
1465}
1466
1467static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
1468{
5fbf816f
DLR
1469 struct port_info *pi = netdev_priv(dev);
1470 struct adapter *adapter = pi->adapter;
05b97b30 1471 const struct qset_params *q = &adapter->params.sge.qset[pi->first_qset];
4d22de3e
DLR
1472
1473 e->rx_max_pending = MAX_RX_BUFFERS;
1474 e->rx_mini_max_pending = 0;
1475 e->rx_jumbo_max_pending = MAX_RX_JUMBO_BUFFERS;
1476 e->tx_max_pending = MAX_TXQ_ENTRIES;
1477
05b97b30
DLR
1478 e->rx_pending = q->fl_size;
1479 e->rx_mini_pending = q->rspq_size;
1480 e->rx_jumbo_pending = q->jumbo_size;
1481 e->tx_pending = q->txq_size[0];
4d22de3e
DLR
1482}
1483
1484static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
1485{
5fbf816f
DLR
1486 struct port_info *pi = netdev_priv(dev);
1487 struct adapter *adapter = pi->adapter;
05b97b30 1488 struct qset_params *q;
5fbf816f 1489 int i;
4d22de3e
DLR
1490
1491 if (e->rx_pending > MAX_RX_BUFFERS ||
1492 e->rx_jumbo_pending > MAX_RX_JUMBO_BUFFERS ||
1493 e->tx_pending > MAX_TXQ_ENTRIES ||
1494 e->rx_mini_pending > MAX_RSPQ_ENTRIES ||
1495 e->rx_mini_pending < MIN_RSPQ_ENTRIES ||
1496 e->rx_pending < MIN_FL_ENTRIES ||
1497 e->rx_jumbo_pending < MIN_FL_ENTRIES ||
1498 e->tx_pending < adapter->params.nports * MIN_TXQ_ENTRIES)
1499 return -EINVAL;
1500
1501 if (adapter->flags & FULL_INIT_DONE)
1502 return -EBUSY;
1503
05b97b30
DLR
1504 q = &adapter->params.sge.qset[pi->first_qset];
1505 for (i = 0; i < pi->nqsets; ++i, ++q) {
4d22de3e
DLR
1506 q->rspq_size = e->rx_mini_pending;
1507 q->fl_size = e->rx_pending;
1508 q->jumbo_size = e->rx_jumbo_pending;
1509 q->txq_size[0] = e->tx_pending;
1510 q->txq_size[1] = e->tx_pending;
1511 q->txq_size[2] = e->tx_pending;
1512 }
1513 return 0;
1514}
1515
1516static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
1517{
5fbf816f
DLR
1518 struct port_info *pi = netdev_priv(dev);
1519 struct adapter *adapter = pi->adapter;
4d22de3e
DLR
1520 struct qset_params *qsp = &adapter->params.sge.qset[0];
1521 struct sge_qset *qs = &adapter->sge.qs[0];
1522
1523 if (c->rx_coalesce_usecs * 10 > M_NEWTIMER)
1524 return -EINVAL;
1525
1526 qsp->coalesce_usecs = c->rx_coalesce_usecs;
1527 t3_update_qset_coalesce(qs, qsp);
1528 return 0;
1529}
1530
1531static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
1532{
5fbf816f
DLR
1533 struct port_info *pi = netdev_priv(dev);
1534 struct adapter *adapter = pi->adapter;
4d22de3e
DLR
1535 struct qset_params *q = adapter->params.sge.qset;
1536
1537 c->rx_coalesce_usecs = q->coalesce_usecs;
1538 return 0;
1539}
1540
1541static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e,
1542 u8 * data)
1543{
5fbf816f
DLR
1544 struct port_info *pi = netdev_priv(dev);
1545 struct adapter *adapter = pi->adapter;
4d22de3e 1546 int i, err = 0;
4d22de3e
DLR
1547
1548 u8 *buf = kmalloc(EEPROMSIZE, GFP_KERNEL);
1549 if (!buf)
1550 return -ENOMEM;
1551
1552 e->magic = EEPROM_MAGIC;
1553 for (i = e->offset & ~3; !err && i < e->offset + e->len; i += 4)
1554 err = t3_seeprom_read(adapter, i, (u32 *) & buf[i]);
1555
1556 if (!err)
1557 memcpy(data, buf + e->offset, e->len);
1558 kfree(buf);
1559 return err;
1560}
1561
1562static int set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
1563 u8 * data)
1564{
5fbf816f
DLR
1565 struct port_info *pi = netdev_priv(dev);
1566 struct adapter *adapter = pi->adapter;
1567 u32 aligned_offset, aligned_len, *p;
4d22de3e 1568 u8 *buf;
c54f5c24 1569 int err;
4d22de3e
DLR
1570
1571 if (eeprom->magic != EEPROM_MAGIC)
1572 return -EINVAL;
1573
1574 aligned_offset = eeprom->offset & ~3;
1575 aligned_len = (eeprom->len + (eeprom->offset & 3) + 3) & ~3;
1576
1577 if (aligned_offset != eeprom->offset || aligned_len != eeprom->len) {
1578 buf = kmalloc(aligned_len, GFP_KERNEL);
1579 if (!buf)
1580 return -ENOMEM;
1581 err = t3_seeprom_read(adapter, aligned_offset, (u32 *) buf);
1582 if (!err && aligned_len > 4)
1583 err = t3_seeprom_read(adapter,
1584 aligned_offset + aligned_len - 4,
1585 (u32 *) & buf[aligned_len - 4]);
1586 if (err)
1587 goto out;
1588 memcpy(buf + (eeprom->offset & 3), data, eeprom->len);
1589 } else
1590 buf = data;
1591
1592 err = t3_seeprom_wp(adapter, 0);
1593 if (err)
1594 goto out;
1595
1596 for (p = (u32 *) buf; !err && aligned_len; aligned_len -= 4, p++) {
1597 err = t3_seeprom_write(adapter, aligned_offset, *p);
1598 aligned_offset += 4;
1599 }
1600
1601 if (!err)
1602 err = t3_seeprom_wp(adapter, 1);
1603out:
1604 if (buf != data)
1605 kfree(buf);
1606 return err;
1607}
1608
1609static void get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1610{
1611 wol->supported = 0;
1612 wol->wolopts = 0;
1613 memset(&wol->sopass, 0, sizeof(wol->sopass));
1614}
1615
1616static const struct ethtool_ops cxgb_ethtool_ops = {
1617 .get_settings = get_settings,
1618 .set_settings = set_settings,
1619 .get_drvinfo = get_drvinfo,
1620 .get_msglevel = get_msglevel,
1621 .set_msglevel = set_msglevel,
1622 .get_ringparam = get_sge_param,
1623 .set_ringparam = set_sge_param,
1624 .get_coalesce = get_coalesce,
1625 .set_coalesce = set_coalesce,
1626 .get_eeprom_len = get_eeprom_len,
1627 .get_eeprom = get_eeprom,
1628 .set_eeprom = set_eeprom,
1629 .get_pauseparam = get_pauseparam,
1630 .set_pauseparam = set_pauseparam,
1631 .get_rx_csum = get_rx_csum,
1632 .set_rx_csum = set_rx_csum,
1633 .get_tx_csum = ethtool_op_get_tx_csum,
1634 .set_tx_csum = ethtool_op_set_tx_csum,
1635 .get_sg = ethtool_op_get_sg,
1636 .set_sg = ethtool_op_set_sg,
1637 .get_link = ethtool_op_get_link,
1638 .get_strings = get_strings,
1639 .phys_id = cxgb3_phys_id,
1640 .nway_reset = restart_autoneg,
1641 .get_stats_count = get_stats_count,
1642 .get_ethtool_stats = get_stats,
1643 .get_regs_len = get_regs_len,
1644 .get_regs = get_regs,
1645 .get_wol = get_wol,
1646 .get_tso = ethtool_op_get_tso,
1647 .set_tso = ethtool_op_set_tso,
4d22de3e
DLR
1648};
1649
1650static int in_range(int val, int lo, int hi)
1651{
1652 return val < 0 || (val <= hi && val >= lo);
1653}
1654
1655static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
1656{
5fbf816f
DLR
1657 struct port_info *pi = netdev_priv(dev);
1658 struct adapter *adapter = pi->adapter;
4d22de3e 1659 u32 cmd;
5fbf816f 1660 int ret;
4d22de3e
DLR
1661
1662 if (copy_from_user(&cmd, useraddr, sizeof(cmd)))
1663 return -EFAULT;
1664
1665 switch (cmd) {
4d22de3e
DLR
1666 case CHELSIO_SET_QSET_PARAMS:{
1667 int i;
1668 struct qset_params *q;
1669 struct ch_qset_params t;
1670
1671 if (!capable(CAP_NET_ADMIN))
1672 return -EPERM;
1673 if (copy_from_user(&t, useraddr, sizeof(t)))
1674 return -EFAULT;
1675 if (t.qset_idx >= SGE_QSETS)
1676 return -EINVAL;
1677 if (!in_range(t.intr_lat, 0, M_NEWTIMER) ||
1678 !in_range(t.cong_thres, 0, 255) ||
1679 !in_range(t.txq_size[0], MIN_TXQ_ENTRIES,
1680 MAX_TXQ_ENTRIES) ||
1681 !in_range(t.txq_size[1], MIN_TXQ_ENTRIES,
1682 MAX_TXQ_ENTRIES) ||
1683 !in_range(t.txq_size[2], MIN_CTRL_TXQ_ENTRIES,
1684 MAX_CTRL_TXQ_ENTRIES) ||
1685 !in_range(t.fl_size[0], MIN_FL_ENTRIES,
1686 MAX_RX_BUFFERS)
1687 || !in_range(t.fl_size[1], MIN_FL_ENTRIES,
1688 MAX_RX_JUMBO_BUFFERS)
1689 || !in_range(t.rspq_size, MIN_RSPQ_ENTRIES,
1690 MAX_RSPQ_ENTRIES))
1691 return -EINVAL;
1692 if ((adapter->flags & FULL_INIT_DONE) &&
1693 (t.rspq_size >= 0 || t.fl_size[0] >= 0 ||
1694 t.fl_size[1] >= 0 || t.txq_size[0] >= 0 ||
1695 t.txq_size[1] >= 0 || t.txq_size[2] >= 0 ||
1696 t.polling >= 0 || t.cong_thres >= 0))
1697 return -EBUSY;
1698
1699 q = &adapter->params.sge.qset[t.qset_idx];
1700
1701 if (t.rspq_size >= 0)
1702 q->rspq_size = t.rspq_size;
1703 if (t.fl_size[0] >= 0)
1704 q->fl_size = t.fl_size[0];
1705 if (t.fl_size[1] >= 0)
1706 q->jumbo_size = t.fl_size[1];
1707 if (t.txq_size[0] >= 0)
1708 q->txq_size[0] = t.txq_size[0];
1709 if (t.txq_size[1] >= 0)
1710 q->txq_size[1] = t.txq_size[1];
1711 if (t.txq_size[2] >= 0)
1712 q->txq_size[2] = t.txq_size[2];
1713 if (t.cong_thres >= 0)
1714 q->cong_thres = t.cong_thres;
1715 if (t.intr_lat >= 0) {
1716 struct sge_qset *qs =
1717 &adapter->sge.qs[t.qset_idx];
1718
1719 q->coalesce_usecs = t.intr_lat;
1720 t3_update_qset_coalesce(qs, q);
1721 }
1722 if (t.polling >= 0) {
1723 if (adapter->flags & USING_MSIX)
1724 q->polling = t.polling;
1725 else {
1726 /* No polling with INTx for T3A */
1727 if (adapter->params.rev == 0 &&
1728 !(adapter->flags & USING_MSI))
1729 t.polling = 0;
1730
1731 for (i = 0; i < SGE_QSETS; i++) {
1732 q = &adapter->params.sge.
1733 qset[i];
1734 q->polling = t.polling;
1735 }
1736 }
1737 }
1738 break;
1739 }
1740 case CHELSIO_GET_QSET_PARAMS:{
1741 struct qset_params *q;
1742 struct ch_qset_params t;
1743
1744 if (copy_from_user(&t, useraddr, sizeof(t)))
1745 return -EFAULT;
1746 if (t.qset_idx >= SGE_QSETS)
1747 return -EINVAL;
1748
1749 q = &adapter->params.sge.qset[t.qset_idx];
1750 t.rspq_size = q->rspq_size;
1751 t.txq_size[0] = q->txq_size[0];
1752 t.txq_size[1] = q->txq_size[1];
1753 t.txq_size[2] = q->txq_size[2];
1754 t.fl_size[0] = q->fl_size;
1755 t.fl_size[1] = q->jumbo_size;
1756 t.polling = q->polling;
1757 t.intr_lat = q->coalesce_usecs;
1758 t.cong_thres = q->cong_thres;
1759
1760 if (copy_to_user(useraddr, &t, sizeof(t)))
1761 return -EFAULT;
1762 break;
1763 }
1764 case CHELSIO_SET_QSET_NUM:{
1765 struct ch_reg edata;
1766 struct port_info *pi = netdev_priv(dev);
1767 unsigned int i, first_qset = 0, other_qsets = 0;
1768
1769 if (!capable(CAP_NET_ADMIN))
1770 return -EPERM;
1771 if (adapter->flags & FULL_INIT_DONE)
1772 return -EBUSY;
1773 if (copy_from_user(&edata, useraddr, sizeof(edata)))
1774 return -EFAULT;
1775 if (edata.val < 1 ||
1776 (edata.val > 1 && !(adapter->flags & USING_MSIX)))
1777 return -EINVAL;
1778
1779 for_each_port(adapter, i)
1780 if (adapter->port[i] && adapter->port[i] != dev)
1781 other_qsets += adap2pinfo(adapter, i)->nqsets;
1782
1783 if (edata.val + other_qsets > SGE_QSETS)
1784 return -EINVAL;
1785
1786 pi->nqsets = edata.val;
1787
1788 for_each_port(adapter, i)
1789 if (adapter->port[i]) {
1790 pi = adap2pinfo(adapter, i);
1791 pi->first_qset = first_qset;
1792 first_qset += pi->nqsets;
1793 }
1794 break;
1795 }
1796 case CHELSIO_GET_QSET_NUM:{
1797 struct ch_reg edata;
1798 struct port_info *pi = netdev_priv(dev);
1799
1800 edata.cmd = CHELSIO_GET_QSET_NUM;
1801 edata.val = pi->nqsets;
1802 if (copy_to_user(useraddr, &edata, sizeof(edata)))
1803 return -EFAULT;
1804 break;
1805 }
1806 case CHELSIO_LOAD_FW:{
1807 u8 *fw_data;
1808 struct ch_mem_range t;
1809
1810 if (!capable(CAP_NET_ADMIN))
1811 return -EPERM;
1812 if (copy_from_user(&t, useraddr, sizeof(t)))
1813 return -EFAULT;
1814
1815 fw_data = kmalloc(t.len, GFP_KERNEL);
1816 if (!fw_data)
1817 return -ENOMEM;
1818
1819 if (copy_from_user
1820 (fw_data, useraddr + sizeof(t), t.len)) {
1821 kfree(fw_data);
1822 return -EFAULT;
1823 }
1824
1825 ret = t3_load_fw(adapter, fw_data, t.len);
1826 kfree(fw_data);
1827 if (ret)
1828 return ret;
1829 break;
1830 }
1831 case CHELSIO_SETMTUTAB:{
1832 struct ch_mtus m;
1833 int i;
1834
1835 if (!is_offload(adapter))
1836 return -EOPNOTSUPP;
1837 if (!capable(CAP_NET_ADMIN))
1838 return -EPERM;
1839 if (offload_running(adapter))
1840 return -EBUSY;
1841 if (copy_from_user(&m, useraddr, sizeof(m)))
1842 return -EFAULT;
1843 if (m.nmtus != NMTUS)
1844 return -EINVAL;
1845 if (m.mtus[0] < 81) /* accommodate SACK */
1846 return -EINVAL;
1847
1848 /* MTUs must be in ascending order */
1849 for (i = 1; i < NMTUS; ++i)
1850 if (m.mtus[i] < m.mtus[i - 1])
1851 return -EINVAL;
1852
1853 memcpy(adapter->params.mtus, m.mtus,
1854 sizeof(adapter->params.mtus));
1855 break;
1856 }
1857 case CHELSIO_GET_PM:{
1858 struct tp_params *p = &adapter->params.tp;
1859 struct ch_pm m = {.cmd = CHELSIO_GET_PM };
1860
1861 if (!is_offload(adapter))
1862 return -EOPNOTSUPP;
1863 m.tx_pg_sz = p->tx_pg_size;
1864 m.tx_num_pg = p->tx_num_pgs;
1865 m.rx_pg_sz = p->rx_pg_size;
1866 m.rx_num_pg = p->rx_num_pgs;
1867 m.pm_total = p->pmtx_size + p->chan_rx_size * p->nchan;
1868 if (copy_to_user(useraddr, &m, sizeof(m)))
1869 return -EFAULT;
1870 break;
1871 }
1872 case CHELSIO_SET_PM:{
1873 struct ch_pm m;
1874 struct tp_params *p = &adapter->params.tp;
1875
1876 if (!is_offload(adapter))
1877 return -EOPNOTSUPP;
1878 if (!capable(CAP_NET_ADMIN))
1879 return -EPERM;
1880 if (adapter->flags & FULL_INIT_DONE)
1881 return -EBUSY;
1882 if (copy_from_user(&m, useraddr, sizeof(m)))
1883 return -EFAULT;
d9da466a 1884 if (!is_power_of_2(m.rx_pg_sz) ||
1885 !is_power_of_2(m.tx_pg_sz))
4d22de3e
DLR
1886 return -EINVAL; /* not power of 2 */
1887 if (!(m.rx_pg_sz & 0x14000))
1888 return -EINVAL; /* not 16KB or 64KB */
1889 if (!(m.tx_pg_sz & 0x1554000))
1890 return -EINVAL;
1891 if (m.tx_num_pg == -1)
1892 m.tx_num_pg = p->tx_num_pgs;
1893 if (m.rx_num_pg == -1)
1894 m.rx_num_pg = p->rx_num_pgs;
1895 if (m.tx_num_pg % 24 || m.rx_num_pg % 24)
1896 return -EINVAL;
1897 if (m.rx_num_pg * m.rx_pg_sz > p->chan_rx_size ||
1898 m.tx_num_pg * m.tx_pg_sz > p->chan_tx_size)
1899 return -EINVAL;
1900 p->rx_pg_size = m.rx_pg_sz;
1901 p->tx_pg_size = m.tx_pg_sz;
1902 p->rx_num_pgs = m.rx_num_pg;
1903 p->tx_num_pgs = m.tx_num_pg;
1904 break;
1905 }
1906 case CHELSIO_GET_MEM:{
1907 struct ch_mem_range t;
1908 struct mc7 *mem;
1909 u64 buf[32];
1910
1911 if (!is_offload(adapter))
1912 return -EOPNOTSUPP;
1913 if (!(adapter->flags & FULL_INIT_DONE))
1914 return -EIO; /* need the memory controllers */
1915 if (copy_from_user(&t, useraddr, sizeof(t)))
1916 return -EFAULT;
1917 if ((t.addr & 7) || (t.len & 7))
1918 return -EINVAL;
1919 if (t.mem_id == MEM_CM)
1920 mem = &adapter->cm;
1921 else if (t.mem_id == MEM_PMRX)
1922 mem = &adapter->pmrx;
1923 else if (t.mem_id == MEM_PMTX)
1924 mem = &adapter->pmtx;
1925 else
1926 return -EINVAL;
1927
1928 /*
1825494a
DLR
1929 * Version scheme:
1930 * bits 0..9: chip version
1931 * bits 10..15: chip revision
1932 */
4d22de3e
DLR
1933 t.version = 3 | (adapter->params.rev << 10);
1934 if (copy_to_user(useraddr, &t, sizeof(t)))
1935 return -EFAULT;
1936
1937 /*
1938 * Read 256 bytes at a time as len can be large and we don't
1939 * want to use huge intermediate buffers.
1940 */
1941 useraddr += sizeof(t); /* advance to start of buffer */
1942 while (t.len) {
1943 unsigned int chunk =
1944 min_t(unsigned int, t.len, sizeof(buf));
1945
1946 ret =
1947 t3_mc7_bd_read(mem, t.addr / 8, chunk / 8,
1948 buf);
1949 if (ret)
1950 return ret;
1951 if (copy_to_user(useraddr, buf, chunk))
1952 return -EFAULT;
1953 useraddr += chunk;
1954 t.addr += chunk;
1955 t.len -= chunk;
1956 }
1957 break;
1958 }
1959 case CHELSIO_SET_TRACE_FILTER:{
1960 struct ch_trace t;
1961 const struct trace_params *tp;
1962
1963 if (!capable(CAP_NET_ADMIN))
1964 return -EPERM;
1965 if (!offload_running(adapter))
1966 return -EAGAIN;
1967 if (copy_from_user(&t, useraddr, sizeof(t)))
1968 return -EFAULT;
1969
1970 tp = (const struct trace_params *)&t.sip;
1971 if (t.config_tx)
1972 t3_config_trace_filter(adapter, tp, 0,
1973 t.invert_match,
1974 t.trace_tx);
1975 if (t.config_rx)
1976 t3_config_trace_filter(adapter, tp, 1,
1977 t.invert_match,
1978 t.trace_rx);
1979 break;
1980 }
4d22de3e
DLR
1981 default:
1982 return -EOPNOTSUPP;
1983 }
1984 return 0;
1985}
1986
1987static int cxgb_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
1988{
4d22de3e 1989 struct mii_ioctl_data *data = if_mii(req);
5fbf816f
DLR
1990 struct port_info *pi = netdev_priv(dev);
1991 struct adapter *adapter = pi->adapter;
1992 int ret, mmd;
4d22de3e
DLR
1993
1994 switch (cmd) {
1995 case SIOCGMIIPHY:
1996 data->phy_id = pi->phy.addr;
1997 /* FALLTHRU */
1998 case SIOCGMIIREG:{
1999 u32 val;
2000 struct cphy *phy = &pi->phy;
2001
2002 if (!phy->mdio_read)
2003 return -EOPNOTSUPP;
2004 if (is_10G(adapter)) {
2005 mmd = data->phy_id >> 8;
2006 if (!mmd)
2007 mmd = MDIO_DEV_PCS;
2008 else if (mmd > MDIO_DEV_XGXS)
2009 return -EINVAL;
2010
2011 ret =
2012 phy->mdio_read(adapter, data->phy_id & 0x1f,
2013 mmd, data->reg_num, &val);
2014 } else
2015 ret =
2016 phy->mdio_read(adapter, data->phy_id & 0x1f,
2017 0, data->reg_num & 0x1f,
2018 &val);
2019 if (!ret)
2020 data->val_out = val;
2021 break;
2022 }
2023 case SIOCSMIIREG:{
2024 struct cphy *phy = &pi->phy;
2025
2026 if (!capable(CAP_NET_ADMIN))
2027 return -EPERM;
2028 if (!phy->mdio_write)
2029 return -EOPNOTSUPP;
2030 if (is_10G(adapter)) {
2031 mmd = data->phy_id >> 8;
2032 if (!mmd)
2033 mmd = MDIO_DEV_PCS;
2034 else if (mmd > MDIO_DEV_XGXS)
2035 return -EINVAL;
2036
2037 ret =
2038 phy->mdio_write(adapter,
2039 data->phy_id & 0x1f, mmd,
2040 data->reg_num,
2041 data->val_in);
2042 } else
2043 ret =
2044 phy->mdio_write(adapter,
2045 data->phy_id & 0x1f, 0,
2046 data->reg_num & 0x1f,
2047 data->val_in);
2048 break;
2049 }
2050 case SIOCCHIOCTL:
2051 return cxgb_extension_ioctl(dev, req->ifr_data);
2052 default:
2053 return -EOPNOTSUPP;
2054 }
2055 return ret;
2056}
2057
2058static int cxgb_change_mtu(struct net_device *dev, int new_mtu)
2059{
4d22de3e 2060 struct port_info *pi = netdev_priv(dev);
5fbf816f
DLR
2061 struct adapter *adapter = pi->adapter;
2062 int ret;
4d22de3e
DLR
2063
2064 if (new_mtu < 81) /* accommodate SACK */
2065 return -EINVAL;
2066 if ((ret = t3_mac_set_mtu(&pi->mac, new_mtu)))
2067 return ret;
2068 dev->mtu = new_mtu;
2069 init_port_mtus(adapter);
2070 if (adapter->params.rev == 0 && offload_running(adapter))
2071 t3_load_mtus(adapter, adapter->params.mtus,
2072 adapter->params.a_wnd, adapter->params.b_wnd,
2073 adapter->port[0]->mtu);
2074 return 0;
2075}
2076
2077static int cxgb_set_mac_addr(struct net_device *dev, void *p)
2078{
4d22de3e 2079 struct port_info *pi = netdev_priv(dev);
5fbf816f 2080 struct adapter *adapter = pi->adapter;
4d22de3e
DLR
2081 struct sockaddr *addr = p;
2082
2083 if (!is_valid_ether_addr(addr->sa_data))
2084 return -EINVAL;
2085
2086 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
2087 t3_mac_set_address(&pi->mac, 0, dev->dev_addr);
2088 if (offload_running(adapter))
2089 write_smt_entry(adapter, pi->port_id);
2090 return 0;
2091}
2092
2093/**
2094 * t3_synchronize_rx - wait for current Rx processing on a port to complete
2095 * @adap: the adapter
2096 * @p: the port
2097 *
2098 * Ensures that current Rx processing on any of the queues associated with
2099 * the given port completes before returning. We do this by acquiring and
2100 * releasing the locks of the response queues associated with the port.
2101 */
2102static void t3_synchronize_rx(struct adapter *adap, const struct port_info *p)
2103{
2104 int i;
2105
2106 for (i = 0; i < p->nqsets; i++) {
2107 struct sge_rspq *q = &adap->sge.qs[i + p->first_qset].rspq;
2108
2109 spin_lock_irq(&q->lock);
2110 spin_unlock_irq(&q->lock);
2111 }
2112}
2113
2114static void vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
2115{
4d22de3e 2116 struct port_info *pi = netdev_priv(dev);
5fbf816f 2117 struct adapter *adapter = pi->adapter;
4d22de3e
DLR
2118
2119 pi->vlan_grp = grp;
2120 if (adapter->params.rev > 0)
2121 t3_set_vlan_accel(adapter, 1 << pi->port_id, grp != NULL);
2122 else {
2123 /* single control for all ports */
2124 unsigned int i, have_vlans = 0;
2125 for_each_port(adapter, i)
2126 have_vlans |= adap2pinfo(adapter, i)->vlan_grp != NULL;
2127
2128 t3_set_vlan_accel(adapter, 1, have_vlans);
2129 }
2130 t3_synchronize_rx(adapter, pi);
2131}
2132
4d22de3e
DLR
2133#ifdef CONFIG_NET_POLL_CONTROLLER
2134static void cxgb_netpoll(struct net_device *dev)
2135{
890de332 2136 struct port_info *pi = netdev_priv(dev);
5fbf816f 2137 struct adapter *adapter = pi->adapter;
890de332 2138 int qidx;
4d22de3e 2139
890de332
DLR
2140 for (qidx = pi->first_qset; qidx < pi->first_qset + pi->nqsets; qidx++) {
2141 struct sge_qset *qs = &adapter->sge.qs[qidx];
2142 void *source;
2143
2144 if (adapter->flags & USING_MSIX)
2145 source = qs;
2146 else
2147 source = adapter;
2148
2149 t3_intr_handler(adapter, qs->rspq.polling) (0, source);
2150 }
4d22de3e
DLR
2151}
2152#endif
2153
2154/*
2155 * Periodic accumulation of MAC statistics.
2156 */
2157static void mac_stats_update(struct adapter *adapter)
2158{
2159 int i;
2160
2161 for_each_port(adapter, i) {
2162 struct net_device *dev = adapter->port[i];
2163 struct port_info *p = netdev_priv(dev);
2164
2165 if (netif_running(dev)) {
2166 spin_lock(&adapter->stats_lock);
2167 t3_mac_update_stats(&p->mac);
2168 spin_unlock(&adapter->stats_lock);
2169 }
2170 }
2171}
2172
2173static void check_link_status(struct adapter *adapter)
2174{
2175 int i;
2176
2177 for_each_port(adapter, i) {
2178 struct net_device *dev = adapter->port[i];
2179 struct port_info *p = netdev_priv(dev);
2180
2181 if (!(p->port_type->caps & SUPPORTED_IRQ) && netif_running(dev))
2182 t3_link_changed(adapter, i);
2183 }
2184}
2185
fc90664e
DLR
2186static void check_t3b2_mac(struct adapter *adapter)
2187{
2188 int i;
2189
f2d961c9
DLR
2190 if (!rtnl_trylock()) /* synchronize with ifdown */
2191 return;
2192
fc90664e
DLR
2193 for_each_port(adapter, i) {
2194 struct net_device *dev = adapter->port[i];
2195 struct port_info *p = netdev_priv(dev);
2196 int status;
2197
2198 if (!netif_running(dev))
2199 continue;
2200
2201 status = 0;
6d6dabac 2202 if (netif_running(dev) && netif_carrier_ok(dev))
fc90664e
DLR
2203 status = t3b2_mac_watchdog_task(&p->mac);
2204 if (status == 1)
2205 p->mac.stats.num_toggled++;
2206 else if (status == 2) {
2207 struct cmac *mac = &p->mac;
2208
2209 t3_mac_set_mtu(mac, dev->mtu);
2210 t3_mac_set_address(mac, 0, dev->dev_addr);
2211 cxgb_set_rxmode(dev);
2212 t3_link_start(&p->phy, mac, &p->link_config);
2213 t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
2214 t3_port_intr_enable(adapter, p->port_id);
2215 p->mac.stats.num_resets++;
2216 }
2217 }
2218 rtnl_unlock();
2219}
2220
2221
4d22de3e
DLR
2222static void t3_adap_check_task(struct work_struct *work)
2223{
2224 struct adapter *adapter = container_of(work, struct adapter,
2225 adap_check_task.work);
2226 const struct adapter_params *p = &adapter->params;
2227
2228 adapter->check_task_cnt++;
2229
2230 /* Check link status for PHYs without interrupts */
2231 if (p->linkpoll_period)
2232 check_link_status(adapter);
2233
2234 /* Accumulate MAC stats if needed */
2235 if (!p->linkpoll_period ||
2236 (adapter->check_task_cnt * p->linkpoll_period) / 10 >=
2237 p->stats_update_period) {
2238 mac_stats_update(adapter);
2239 adapter->check_task_cnt = 0;
2240 }
2241
fc90664e
DLR
2242 if (p->rev == T3_REV_B2)
2243 check_t3b2_mac(adapter);
2244
4d22de3e
DLR
2245 /* Schedule the next check update if any port is active. */
2246 spin_lock(&adapter->work_lock);
2247 if (adapter->open_device_map & PORT_MASK)
2248 schedule_chk_task(adapter);
2249 spin_unlock(&adapter->work_lock);
2250}
2251
2252/*
2253 * Processes external (PHY) interrupts in process context.
2254 */
2255static void ext_intr_task(struct work_struct *work)
2256{
2257 struct adapter *adapter = container_of(work, struct adapter,
2258 ext_intr_handler_task);
2259
2260 t3_phy_intr_handler(adapter);
2261
2262 /* Now reenable external interrupts */
2263 spin_lock_irq(&adapter->work_lock);
2264 if (adapter->slow_intr_mask) {
2265 adapter->slow_intr_mask |= F_T3DBG;
2266 t3_write_reg(adapter, A_PL_INT_CAUSE0, F_T3DBG);
2267 t3_write_reg(adapter, A_PL_INT_ENABLE0,
2268 adapter->slow_intr_mask);
2269 }
2270 spin_unlock_irq(&adapter->work_lock);
2271}
2272
2273/*
2274 * Interrupt-context handler for external (PHY) interrupts.
2275 */
2276void t3_os_ext_intr_handler(struct adapter *adapter)
2277{
2278 /*
2279 * Schedule a task to handle external interrupts as they may be slow
2280 * and we use a mutex to protect MDIO registers. We disable PHY
2281 * interrupts in the meantime and let the task reenable them when
2282 * it's done.
2283 */
2284 spin_lock(&adapter->work_lock);
2285 if (adapter->slow_intr_mask) {
2286 adapter->slow_intr_mask &= ~F_T3DBG;
2287 t3_write_reg(adapter, A_PL_INT_ENABLE0,
2288 adapter->slow_intr_mask);
2289 queue_work(cxgb3_wq, &adapter->ext_intr_handler_task);
2290 }
2291 spin_unlock(&adapter->work_lock);
2292}
2293
2294void t3_fatal_err(struct adapter *adapter)
2295{
2296 unsigned int fw_status[4];
2297
2298 if (adapter->flags & FULL_INIT_DONE) {
2299 t3_sge_stop(adapter);
c64c2eae
DLR
2300 t3_write_reg(adapter, A_XGM_TX_CTRL, 0);
2301 t3_write_reg(adapter, A_XGM_RX_CTRL, 0);
2302 t3_write_reg(adapter, XGM_REG(A_XGM_TX_CTRL, 1), 0);
2303 t3_write_reg(adapter, XGM_REG(A_XGM_RX_CTRL, 1), 0);
4d22de3e
DLR
2304 t3_intr_disable(adapter);
2305 }
2306 CH_ALERT(adapter, "encountered fatal error, operation suspended\n");
2307 if (!t3_cim_ctl_blk_read(adapter, 0xa0, 4, fw_status))
2308 CH_ALERT(adapter, "FW status: 0x%x, 0x%x, 0x%x, 0x%x\n",
2309 fw_status[0], fw_status[1],
2310 fw_status[2], fw_status[3]);
2311
2312}
2313
2314static int __devinit cxgb_enable_msix(struct adapter *adap)
2315{
2316 struct msix_entry entries[SGE_QSETS + 1];
2317 int i, err;
2318
2319 for (i = 0; i < ARRAY_SIZE(entries); ++i)
2320 entries[i].entry = i;
2321
2322 err = pci_enable_msix(adap->pdev, entries, ARRAY_SIZE(entries));
2323 if (!err) {
2324 for (i = 0; i < ARRAY_SIZE(entries); ++i)
2325 adap->msix_info[i].vec = entries[i].vector;
2326 } else if (err > 0)
2327 dev_info(&adap->pdev->dev,
2328 "only %d MSI-X vectors left, not using MSI-X\n", err);
2329 return err;
2330}
2331
2332static void __devinit print_port_info(struct adapter *adap,
2333 const struct adapter_info *ai)
2334{
2335 static const char *pci_variant[] = {
2336 "PCI", "PCI-X", "PCI-X ECC", "PCI-X 266", "PCI Express"
2337 };
2338
2339 int i;
2340 char buf[80];
2341
2342 if (is_pcie(adap))
2343 snprintf(buf, sizeof(buf), "%s x%d",
2344 pci_variant[adap->params.pci.variant],
2345 adap->params.pci.width);
2346 else
2347 snprintf(buf, sizeof(buf), "%s %dMHz/%d-bit",
2348 pci_variant[adap->params.pci.variant],
2349 adap->params.pci.speed, adap->params.pci.width);
2350
2351 for_each_port(adap, i) {
2352 struct net_device *dev = adap->port[i];
2353 const struct port_info *pi = netdev_priv(dev);
2354
2355 if (!test_bit(i, &adap->registered_device_map))
2356 continue;
8ac3ba68 2357 printk(KERN_INFO "%s: %s %s %sNIC (rev %d) %s%s\n",
4d22de3e 2358 dev->name, ai->desc, pi->port_type->desc,
8ac3ba68 2359 is_offload(adap) ? "R" : "", adap->params.rev, buf,
4d22de3e
DLR
2360 (adap->flags & USING_MSIX) ? " MSI-X" :
2361 (adap->flags & USING_MSI) ? " MSI" : "");
2362 if (adap->name == dev->name && adap->params.vpd.mclk)
167cdf5f
DLR
2363 printk(KERN_INFO
2364 "%s: %uMB CM, %uMB PMTX, %uMB PMRX, S/N: %s\n",
4d22de3e
DLR
2365 adap->name, t3_mc7_size(&adap->cm) >> 20,
2366 t3_mc7_size(&adap->pmtx) >> 20,
167cdf5f
DLR
2367 t3_mc7_size(&adap->pmrx) >> 20,
2368 adap->params.vpd.sn);
4d22de3e
DLR
2369 }
2370}
2371
2372static int __devinit init_one(struct pci_dev *pdev,
2373 const struct pci_device_id *ent)
2374{
2375 static int version_printed;
2376
2377 int i, err, pci_using_dac = 0;
2378 unsigned long mmio_start, mmio_len;
2379 const struct adapter_info *ai;
2380 struct adapter *adapter = NULL;
2381 struct port_info *pi;
2382
2383 if (!version_printed) {
2384 printk(KERN_INFO "%s - version %s\n", DRV_DESC, DRV_VERSION);
2385 ++version_printed;
2386 }
2387
2388 if (!cxgb3_wq) {
2389 cxgb3_wq = create_singlethread_workqueue(DRV_NAME);
2390 if (!cxgb3_wq) {
2391 printk(KERN_ERR DRV_NAME
2392 ": cannot initialize work queue\n");
2393 return -ENOMEM;
2394 }
2395 }
2396
2397 err = pci_request_regions(pdev, DRV_NAME);
2398 if (err) {
2399 /* Just info, some other driver may have claimed the device. */
2400 dev_info(&pdev->dev, "cannot obtain PCI resources\n");
2401 return err;
2402 }
2403
2404 err = pci_enable_device(pdev);
2405 if (err) {
2406 dev_err(&pdev->dev, "cannot enable PCI device\n");
2407 goto out_release_regions;
2408 }
2409
2410 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
2411 pci_using_dac = 1;
2412 err = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
2413 if (err) {
2414 dev_err(&pdev->dev, "unable to obtain 64-bit DMA for "
2415 "coherent allocations\n");
2416 goto out_disable_device;
2417 }
2418 } else if ((err = pci_set_dma_mask(pdev, DMA_32BIT_MASK)) != 0) {
2419 dev_err(&pdev->dev, "no usable DMA configuration\n");
2420 goto out_disable_device;
2421 }
2422
2423 pci_set_master(pdev);
2424
2425 mmio_start = pci_resource_start(pdev, 0);
2426 mmio_len = pci_resource_len(pdev, 0);
2427 ai = t3_get_adapter_info(ent->driver_data);
2428
2429 adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
2430 if (!adapter) {
2431 err = -ENOMEM;
2432 goto out_disable_device;
2433 }
2434
2435 adapter->regs = ioremap_nocache(mmio_start, mmio_len);
2436 if (!adapter->regs) {
2437 dev_err(&pdev->dev, "cannot map device registers\n");
2438 err = -ENOMEM;
2439 goto out_free_adapter;
2440 }
2441
2442 adapter->pdev = pdev;
2443 adapter->name = pci_name(pdev);
2444 adapter->msg_enable = dflt_msg_enable;
2445 adapter->mmio_len = mmio_len;
2446
2447 mutex_init(&adapter->mdio_lock);
2448 spin_lock_init(&adapter->work_lock);
2449 spin_lock_init(&adapter->stats_lock);
2450
2451 INIT_LIST_HEAD(&adapter->adapter_list);
2452 INIT_WORK(&adapter->ext_intr_handler_task, ext_intr_task);
2453 INIT_DELAYED_WORK(&adapter->adap_check_task, t3_adap_check_task);
2454
2455 for (i = 0; i < ai->nports; ++i) {
2456 struct net_device *netdev;
2457
2458 netdev = alloc_etherdev(sizeof(struct port_info));
2459 if (!netdev) {
2460 err = -ENOMEM;
2461 goto out_free_dev;
2462 }
2463
2464 SET_MODULE_OWNER(netdev);
2465 SET_NETDEV_DEV(netdev, &pdev->dev);
2466
2467 adapter->port[i] = netdev;
2468 pi = netdev_priv(netdev);
5fbf816f 2469 pi->adapter = adapter;
4d22de3e
DLR
2470 pi->rx_csum_offload = 1;
2471 pi->nqsets = 1;
2472 pi->first_qset = i;
2473 pi->activity = 0;
2474 pi->port_id = i;
2475 netif_carrier_off(netdev);
2476 netdev->irq = pdev->irq;
2477 netdev->mem_start = mmio_start;
2478 netdev->mem_end = mmio_start + mmio_len - 1;
4d22de3e
DLR
2479 netdev->features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO;
2480 netdev->features |= NETIF_F_LLTX;
2481 if (pci_using_dac)
2482 netdev->features |= NETIF_F_HIGHDMA;
2483
2484 netdev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
2485 netdev->vlan_rx_register = vlan_rx_register;
4d22de3e
DLR
2486
2487 netdev->open = cxgb_open;
2488 netdev->stop = cxgb_close;
2489 netdev->hard_start_xmit = t3_eth_xmit;
2490 netdev->get_stats = cxgb_get_stats;
2491 netdev->set_multicast_list = cxgb_set_rxmode;
2492 netdev->do_ioctl = cxgb_ioctl;
2493 netdev->change_mtu = cxgb_change_mtu;
2494 netdev->set_mac_address = cxgb_set_mac_addr;
2495#ifdef CONFIG_NET_POLL_CONTROLLER
2496 netdev->poll_controller = cxgb_netpoll;
2497#endif
4d22de3e
DLR
2498
2499 SET_ETHTOOL_OPS(netdev, &cxgb_ethtool_ops);
2500 }
2501
5fbf816f 2502 pci_set_drvdata(pdev, adapter);
4d22de3e
DLR
2503 if (t3_prep_adapter(adapter, ai, 1) < 0) {
2504 err = -ENODEV;
2505 goto out_free_dev;
2506 }
480fe1a3 2507
4d22de3e
DLR
2508 /*
2509 * The card is now ready to go. If any errors occur during device
2510 * registration we do not fail the whole card but rather proceed only
2511 * with the ports we manage to register successfully. However we must
2512 * register at least one net device.
2513 */
2514 for_each_port(adapter, i) {
2515 err = register_netdev(adapter->port[i]);
2516 if (err)
2517 dev_warn(&pdev->dev,
2518 "cannot register net device %s, skipping\n",
2519 adapter->port[i]->name);
2520 else {
2521 /*
2522 * Change the name we use for messages to the name of
2523 * the first successfully registered interface.
2524 */
2525 if (!adapter->registered_device_map)
2526 adapter->name = adapter->port[i]->name;
2527
2528 __set_bit(i, &adapter->registered_device_map);
2529 }
2530 }
2531 if (!adapter->registered_device_map) {
2532 dev_err(&pdev->dev, "could not register any net devices\n");
2533 goto out_free_dev;
2534 }
2535
2536 /* Driver's ready. Reflect it on LEDs */
2537 t3_led_ready(adapter);
2538
2539 if (is_offload(adapter)) {
2540 __set_bit(OFFLOAD_DEVMAP_BIT, &adapter->registered_device_map);
2541 cxgb3_adapter_ofld(adapter);
2542 }
2543
2544 /* See what interrupts we'll be using */
2545 if (msi > 1 && cxgb_enable_msix(adapter) == 0)
2546 adapter->flags |= USING_MSIX;
2547 else if (msi > 0 && pci_enable_msi(pdev) == 0)
2548 adapter->flags |= USING_MSI;
2549
0ee8d33c 2550 err = sysfs_create_group(&adapter->port[0]->dev.kobj,
4d22de3e
DLR
2551 &cxgb3_attr_group);
2552
2553 print_port_info(adapter, ai);
2554 return 0;
2555
2556out_free_dev:
2557 iounmap(adapter->regs);
2558 for (i = ai->nports - 1; i >= 0; --i)
2559 if (adapter->port[i])
2560 free_netdev(adapter->port[i]);
2561
2562out_free_adapter:
2563 kfree(adapter);
2564
2565out_disable_device:
2566 pci_disable_device(pdev);
2567out_release_regions:
2568 pci_release_regions(pdev);
2569 pci_set_drvdata(pdev, NULL);
2570 return err;
2571}
2572
2573static void __devexit remove_one(struct pci_dev *pdev)
2574{
5fbf816f 2575 struct adapter *adapter = pci_get_drvdata(pdev);
4d22de3e 2576
5fbf816f 2577 if (adapter) {
4d22de3e 2578 int i;
4d22de3e
DLR
2579
2580 t3_sge_stop(adapter);
0ee8d33c 2581 sysfs_remove_group(&adapter->port[0]->dev.kobj,
4d22de3e
DLR
2582 &cxgb3_attr_group);
2583
2584 for_each_port(adapter, i)
2585 if (test_bit(i, &adapter->registered_device_map))
2586 unregister_netdev(adapter->port[i]);
2587
2588 if (is_offload(adapter)) {
2589 cxgb3_adapter_unofld(adapter);
2590 if (test_bit(OFFLOAD_DEVMAP_BIT,
2591 &adapter->open_device_map))
2592 offload_close(&adapter->tdev);
2593 }
2594
2595 t3_free_sge_resources(adapter);
2596 cxgb_disable_msi(adapter);
2597
4d22de3e
DLR
2598 for_each_port(adapter, i)
2599 if (adapter->port[i])
2600 free_netdev(adapter->port[i]);
2601
2602 iounmap(adapter->regs);
2603 kfree(adapter);
2604 pci_release_regions(pdev);
2605 pci_disable_device(pdev);
2606 pci_set_drvdata(pdev, NULL);
2607 }
2608}
2609
2610static struct pci_driver driver = {
2611 .name = DRV_NAME,
2612 .id_table = cxgb3_pci_tbl,
2613 .probe = init_one,
2614 .remove = __devexit_p(remove_one),
2615};
2616
2617static int __init cxgb3_init_module(void)
2618{
2619 int ret;
2620
2621 cxgb3_offload_init();
2622
2623 ret = pci_register_driver(&driver);
2624 return ret;
2625}
2626
2627static void __exit cxgb3_cleanup_module(void)
2628{
2629 pci_unregister_driver(&driver);
2630 if (cxgb3_wq)
2631 destroy_workqueue(cxgb3_wq);
2632}
2633
2634module_init(cxgb3_init_module);
2635module_exit(cxgb3_cleanup_module);