cxgb3 - CQ context operations time out too soon.
[linux-2.6-block.git] / drivers / net / cxgb3 / cxgb3_main.c
CommitLineData
4d22de3e 1/*
1d68e93d 2 * Copyright (c) 2003-2007 Chelsio, Inc. All rights reserved.
4d22de3e 3 *
1d68e93d
DLR
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
4d22de3e 9 *
1d68e93d
DLR
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
4d22de3e 31 */
4d22de3e
DLR
32#include <linux/module.h>
33#include <linux/moduleparam.h>
34#include <linux/init.h>
35#include <linux/pci.h>
36#include <linux/dma-mapping.h>
37#include <linux/netdevice.h>
38#include <linux/etherdevice.h>
39#include <linux/if_vlan.h>
40#include <linux/mii.h>
41#include <linux/sockios.h>
42#include <linux/workqueue.h>
43#include <linux/proc_fs.h>
44#include <linux/rtnetlink.h>
2e283962 45#include <linux/firmware.h>
d9da466a 46#include <linux/log2.h>
4d22de3e
DLR
47#include <asm/uaccess.h>
48
49#include "common.h"
50#include "cxgb3_ioctl.h"
51#include "regs.h"
52#include "cxgb3_offload.h"
53#include "version.h"
54
55#include "cxgb3_ctl_defs.h"
56#include "t3_cpl.h"
57#include "firmware_exports.h"
58
59enum {
60 MAX_TXQ_ENTRIES = 16384,
61 MAX_CTRL_TXQ_ENTRIES = 1024,
62 MAX_RSPQ_ENTRIES = 16384,
63 MAX_RX_BUFFERS = 16384,
64 MAX_RX_JUMBO_BUFFERS = 16384,
65 MIN_TXQ_ENTRIES = 4,
66 MIN_CTRL_TXQ_ENTRIES = 4,
67 MIN_RSPQ_ENTRIES = 32,
68 MIN_FL_ENTRIES = 32
69};
70
71#define PORT_MASK ((1 << MAX_NPORTS) - 1)
72
73#define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
74 NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
75 NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
76
77#define EEPROM_MAGIC 0x38E2F10C
78
4d22de3e
DLR
79#define CH_DEVICE(devid, ssid, idx) \
80 { PCI_VENDOR_ID_CHELSIO, devid, PCI_ANY_ID, ssid, 0, 0, idx }
81
82static const struct pci_device_id cxgb3_pci_tbl[] = {
83 CH_DEVICE(0x20, 1, 0), /* PE9000 */
84 CH_DEVICE(0x21, 1, 1), /* T302E */
85 CH_DEVICE(0x22, 1, 2), /* T310E */
86 CH_DEVICE(0x23, 1, 3), /* T320X */
87 CH_DEVICE(0x24, 1, 1), /* T302X */
88 CH_DEVICE(0x25, 1, 3), /* T320E */
89 CH_DEVICE(0x26, 1, 2), /* T310X */
90 CH_DEVICE(0x30, 1, 2), /* T3B10 */
91 CH_DEVICE(0x31, 1, 3), /* T3B20 */
92 CH_DEVICE(0x32, 1, 1), /* T3B02 */
93 {0,}
94};
95
96MODULE_DESCRIPTION(DRV_DESC);
97MODULE_AUTHOR("Chelsio Communications");
1d68e93d 98MODULE_LICENSE("Dual BSD/GPL");
4d22de3e
DLR
99MODULE_VERSION(DRV_VERSION);
100MODULE_DEVICE_TABLE(pci, cxgb3_pci_tbl);
101
102static int dflt_msg_enable = DFLT_MSG_ENABLE;
103
104module_param(dflt_msg_enable, int, 0644);
105MODULE_PARM_DESC(dflt_msg_enable, "Chelsio T3 default message enable bitmap");
106
107/*
108 * The driver uses the best interrupt scheme available on a platform in the
109 * order MSI-X, MSI, legacy pin interrupts. This parameter determines which
110 * of these schemes the driver may consider as follows:
111 *
112 * msi = 2: choose from among all three options
113 * msi = 1: only consider MSI and pin interrupts
114 * msi = 0: force pin interrupts
115 */
116static int msi = 2;
117
118module_param(msi, int, 0644);
119MODULE_PARM_DESC(msi, "whether to use MSI or MSI-X");
120
121/*
122 * The driver enables offload as a default.
123 * To disable it, use ofld_disable = 1.
124 */
125
126static int ofld_disable = 0;
127
128module_param(ofld_disable, int, 0644);
129MODULE_PARM_DESC(ofld_disable, "whether to enable offload at init time or not");
130
131/*
132 * We have work elements that we need to cancel when an interface is taken
133 * down. Normally the work elements would be executed by keventd but that
134 * can deadlock because of linkwatch. If our close method takes the rtnl
135 * lock and linkwatch is ahead of our work elements in keventd, linkwatch
136 * will block keventd as it needs the rtnl lock, and we'll deadlock waiting
137 * for our work to complete. Get our own work queue to solve this.
138 */
139static struct workqueue_struct *cxgb3_wq;
140
141/**
142 * link_report - show link status and link speed/duplex
143 * @p: the port whose settings are to be reported
144 *
145 * Shows the link status, speed, and duplex of a port.
146 */
147static void link_report(struct net_device *dev)
148{
149 if (!netif_carrier_ok(dev))
150 printk(KERN_INFO "%s: link down\n", dev->name);
151 else {
152 const char *s = "10Mbps";
153 const struct port_info *p = netdev_priv(dev);
154
155 switch (p->link_config.speed) {
156 case SPEED_10000:
157 s = "10Gbps";
158 break;
159 case SPEED_1000:
160 s = "1000Mbps";
161 break;
162 case SPEED_100:
163 s = "100Mbps";
164 break;
165 }
166
167 printk(KERN_INFO "%s: link up, %s, %s-duplex\n", dev->name, s,
168 p->link_config.duplex == DUPLEX_FULL ? "full" : "half");
169 }
170}
171
172/**
173 * t3_os_link_changed - handle link status changes
174 * @adapter: the adapter associated with the link change
175 * @port_id: the port index whose limk status has changed
176 * @link_stat: the new status of the link
177 * @speed: the new speed setting
178 * @duplex: the new duplex setting
179 * @pause: the new flow-control setting
180 *
181 * This is the OS-dependent handler for link status changes. The OS
182 * neutral handler takes care of most of the processing for these events,
183 * then calls this handler for any OS-specific processing.
184 */
185void t3_os_link_changed(struct adapter *adapter, int port_id, int link_stat,
186 int speed, int duplex, int pause)
187{
188 struct net_device *dev = adapter->port[port_id];
6d6dabac
DLR
189 struct port_info *pi = netdev_priv(dev);
190 struct cmac *mac = &pi->mac;
4d22de3e
DLR
191
192 /* Skip changes from disabled ports. */
193 if (!netif_running(dev))
194 return;
195
196 if (link_stat != netif_carrier_ok(dev)) {
6d6dabac 197 if (link_stat) {
59cf8107 198 t3_mac_enable(mac, MAC_DIRECTION_RX);
4d22de3e 199 netif_carrier_on(dev);
6d6dabac 200 } else {
4d22de3e 201 netif_carrier_off(dev);
59cf8107
DLR
202 pi->phy.ops->power_down(&pi->phy, 1);
203 t3_mac_disable(mac, MAC_DIRECTION_RX);
204 t3_link_start(&pi->phy, mac, &pi->link_config);
6d6dabac
DLR
205 }
206
4d22de3e
DLR
207 link_report(dev);
208 }
209}
210
211static void cxgb_set_rxmode(struct net_device *dev)
212{
213 struct t3_rx_mode rm;
214 struct port_info *pi = netdev_priv(dev);
215
216 init_rx_mode(&rm, dev, dev->mc_list);
217 t3_mac_set_rx_mode(&pi->mac, &rm);
218}
219
220/**
221 * link_start - enable a port
222 * @dev: the device to enable
223 *
224 * Performs the MAC and PHY actions needed to enable a port.
225 */
226static void link_start(struct net_device *dev)
227{
228 struct t3_rx_mode rm;
229 struct port_info *pi = netdev_priv(dev);
230 struct cmac *mac = &pi->mac;
231
232 init_rx_mode(&rm, dev, dev->mc_list);
233 t3_mac_reset(mac);
234 t3_mac_set_mtu(mac, dev->mtu);
235 t3_mac_set_address(mac, 0, dev->dev_addr);
236 t3_mac_set_rx_mode(mac, &rm);
237 t3_link_start(&pi->phy, mac, &pi->link_config);
238 t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
239}
240
241static inline void cxgb_disable_msi(struct adapter *adapter)
242{
243 if (adapter->flags & USING_MSIX) {
244 pci_disable_msix(adapter->pdev);
245 adapter->flags &= ~USING_MSIX;
246 } else if (adapter->flags & USING_MSI) {
247 pci_disable_msi(adapter->pdev);
248 adapter->flags &= ~USING_MSI;
249 }
250}
251
252/*
253 * Interrupt handler for asynchronous events used with MSI-X.
254 */
255static irqreturn_t t3_async_intr_handler(int irq, void *cookie)
256{
257 t3_slow_intr_handler(cookie);
258 return IRQ_HANDLED;
259}
260
261/*
262 * Name the MSI-X interrupts.
263 */
264static void name_msix_vecs(struct adapter *adap)
265{
266 int i, j, msi_idx = 1, n = sizeof(adap->msix_info[0].desc) - 1;
267
268 snprintf(adap->msix_info[0].desc, n, "%s", adap->name);
269 adap->msix_info[0].desc[n] = 0;
270
271 for_each_port(adap, j) {
272 struct net_device *d = adap->port[j];
273 const struct port_info *pi = netdev_priv(d);
274
275 for (i = 0; i < pi->nqsets; i++, msi_idx++) {
276 snprintf(adap->msix_info[msi_idx].desc, n,
277 "%s (queue %d)", d->name, i);
278 adap->msix_info[msi_idx].desc[n] = 0;
279 }
280 }
281}
282
283static int request_msix_data_irqs(struct adapter *adap)
284{
285 int i, j, err, qidx = 0;
286
287 for_each_port(adap, i) {
288 int nqsets = adap2pinfo(adap, i)->nqsets;
289
290 for (j = 0; j < nqsets; ++j) {
291 err = request_irq(adap->msix_info[qidx + 1].vec,
292 t3_intr_handler(adap,
293 adap->sge.qs[qidx].
294 rspq.polling), 0,
295 adap->msix_info[qidx + 1].desc,
296 &adap->sge.qs[qidx]);
297 if (err) {
298 while (--qidx >= 0)
299 free_irq(adap->msix_info[qidx + 1].vec,
300 &adap->sge.qs[qidx]);
301 return err;
302 }
303 qidx++;
304 }
305 }
306 return 0;
307}
308
309/**
310 * setup_rss - configure RSS
311 * @adap: the adapter
312 *
313 * Sets up RSS to distribute packets to multiple receive queues. We
314 * configure the RSS CPU lookup table to distribute to the number of HW
315 * receive queues, and the response queue lookup table to narrow that
316 * down to the response queues actually configured for each port.
317 * We always configure the RSS mapping for two ports since the mapping
318 * table has plenty of entries.
319 */
320static void setup_rss(struct adapter *adap)
321{
322 int i;
323 unsigned int nq0 = adap2pinfo(adap, 0)->nqsets;
324 unsigned int nq1 = adap->port[1] ? adap2pinfo(adap, 1)->nqsets : 1;
325 u8 cpus[SGE_QSETS + 1];
326 u16 rspq_map[RSS_TABLE_SIZE];
327
328 for (i = 0; i < SGE_QSETS; ++i)
329 cpus[i] = i;
330 cpus[SGE_QSETS] = 0xff; /* terminator */
331
332 for (i = 0; i < RSS_TABLE_SIZE / 2; ++i) {
333 rspq_map[i] = i % nq0;
334 rspq_map[i + RSS_TABLE_SIZE / 2] = (i % nq1) + nq0;
335 }
336
337 t3_config_rss(adap, F_RQFEEDBACKENABLE | F_TNLLKPEN | F_TNLMAPEN |
338 F_TNLPRTEN | F_TNL2TUPEN | F_TNL4TUPEN |
339 V_RRCPLCPUSIZE(6), cpus, rspq_map);
340}
341
bea3348e 342static void init_napi(struct adapter *adap)
4d22de3e 343{
bea3348e 344 int i;
4d22de3e 345
bea3348e
SH
346 for (i = 0; i < SGE_QSETS; i++) {
347 struct sge_qset *qs = &adap->sge.qs[i];
4d22de3e 348
bea3348e
SH
349 if (qs->adap)
350 netif_napi_add(qs->netdev, &qs->napi, qs->napi.poll,
351 64);
4d22de3e 352 }
4d22de3e
DLR
353}
354
355/*
356 * Wait until all NAPI handlers are descheduled. This includes the handlers of
357 * both netdevices representing interfaces and the dummy ones for the extra
358 * queues.
359 */
360static void quiesce_rx(struct adapter *adap)
361{
362 int i;
4d22de3e 363
bea3348e
SH
364 for (i = 0; i < SGE_QSETS; i++)
365 if (adap->sge.qs[i].adap)
366 napi_disable(&adap->sge.qs[i].napi);
367}
4d22de3e 368
bea3348e
SH
369static void enable_all_napi(struct adapter *adap)
370{
371 int i;
372 for (i = 0; i < SGE_QSETS; i++)
373 if (adap->sge.qs[i].adap)
374 napi_enable(&adap->sge.qs[i].napi);
4d22de3e
DLR
375}
376
377/**
378 * setup_sge_qsets - configure SGE Tx/Rx/response queues
379 * @adap: the adapter
380 *
381 * Determines how many sets of SGE queues to use and initializes them.
382 * We support multiple queue sets per port if we have MSI-X, otherwise
383 * just one queue set per port.
384 */
385static int setup_sge_qsets(struct adapter *adap)
386{
bea3348e 387 int i, j, err, irq_idx = 0, qset_idx = 0;
8ac3ba68 388 unsigned int ntxq = SGE_TXQ_PER_SET;
4d22de3e
DLR
389
390 if (adap->params.rev > 0 && !(adap->flags & USING_MSI))
391 irq_idx = -1;
392
393 for_each_port(adap, i) {
394 struct net_device *dev = adap->port[i];
bea3348e 395 struct port_info *pi = netdev_priv(dev);
4d22de3e 396
bea3348e 397 pi->qs = &adap->sge.qs[pi->first_qset];
4d22de3e
DLR
398 for (j = 0; j < pi->nqsets; ++j, ++qset_idx) {
399 err = t3_sge_alloc_qset(adap, qset_idx, 1,
400 (adap->flags & USING_MSIX) ? qset_idx + 1 :
401 irq_idx,
bea3348e 402 &adap->params.sge.qset[qset_idx], ntxq, dev);
4d22de3e
DLR
403 if (err) {
404 t3_free_sge_resources(adap);
405 return err;
406 }
407 }
408 }
409
410 return 0;
411}
412
0ee8d33c
DLR
413static ssize_t attr_show(struct device *d, struct device_attribute *attr,
414 char *buf,
896392ef 415 ssize_t(*format) (struct net_device *, char *))
4d22de3e
DLR
416{
417 ssize_t len;
4d22de3e
DLR
418
419 /* Synchronize with ioctls that may shut down the device */
420 rtnl_lock();
896392ef 421 len = (*format) (to_net_dev(d), buf);
4d22de3e
DLR
422 rtnl_unlock();
423 return len;
424}
425
0ee8d33c
DLR
426static ssize_t attr_store(struct device *d, struct device_attribute *attr,
427 const char *buf, size_t len,
896392ef 428 ssize_t(*set) (struct net_device *, unsigned int),
4d22de3e
DLR
429 unsigned int min_val, unsigned int max_val)
430{
431 char *endp;
432 ssize_t ret;
433 unsigned int val;
4d22de3e
DLR
434
435 if (!capable(CAP_NET_ADMIN))
436 return -EPERM;
437
438 val = simple_strtoul(buf, &endp, 0);
439 if (endp == buf || val < min_val || val > max_val)
440 return -EINVAL;
441
442 rtnl_lock();
896392ef 443 ret = (*set) (to_net_dev(d), val);
4d22de3e
DLR
444 if (!ret)
445 ret = len;
446 rtnl_unlock();
447 return ret;
448}
449
450#define CXGB3_SHOW(name, val_expr) \
896392ef 451static ssize_t format_##name(struct net_device *dev, char *buf) \
4d22de3e 452{ \
5fbf816f
DLR
453 struct port_info *pi = netdev_priv(dev); \
454 struct adapter *adap = pi->adapter; \
4d22de3e
DLR
455 return sprintf(buf, "%u\n", val_expr); \
456} \
0ee8d33c
DLR
457static ssize_t show_##name(struct device *d, struct device_attribute *attr, \
458 char *buf) \
4d22de3e 459{ \
0ee8d33c 460 return attr_show(d, attr, buf, format_##name); \
4d22de3e
DLR
461}
462
896392ef 463static ssize_t set_nfilters(struct net_device *dev, unsigned int val)
4d22de3e 464{
5fbf816f
DLR
465 struct port_info *pi = netdev_priv(dev);
466 struct adapter *adap = pi->adapter;
9f238486 467 int min_tids = is_offload(adap) ? MC5_MIN_TIDS : 0;
896392ef 468
4d22de3e
DLR
469 if (adap->flags & FULL_INIT_DONE)
470 return -EBUSY;
471 if (val && adap->params.rev == 0)
472 return -EINVAL;
9f238486
DLR
473 if (val > t3_mc5_size(&adap->mc5) - adap->params.mc5.nservers -
474 min_tids)
4d22de3e
DLR
475 return -EINVAL;
476 adap->params.mc5.nfilters = val;
477 return 0;
478}
479
0ee8d33c
DLR
480static ssize_t store_nfilters(struct device *d, struct device_attribute *attr,
481 const char *buf, size_t len)
4d22de3e 482{
0ee8d33c 483 return attr_store(d, attr, buf, len, set_nfilters, 0, ~0);
4d22de3e
DLR
484}
485
896392ef 486static ssize_t set_nservers(struct net_device *dev, unsigned int val)
4d22de3e 487{
5fbf816f
DLR
488 struct port_info *pi = netdev_priv(dev);
489 struct adapter *adap = pi->adapter;
896392ef 490
4d22de3e
DLR
491 if (adap->flags & FULL_INIT_DONE)
492 return -EBUSY;
9f238486
DLR
493 if (val > t3_mc5_size(&adap->mc5) - adap->params.mc5.nfilters -
494 MC5_MIN_TIDS)
4d22de3e
DLR
495 return -EINVAL;
496 adap->params.mc5.nservers = val;
497 return 0;
498}
499
0ee8d33c
DLR
500static ssize_t store_nservers(struct device *d, struct device_attribute *attr,
501 const char *buf, size_t len)
4d22de3e 502{
0ee8d33c 503 return attr_store(d, attr, buf, len, set_nservers, 0, ~0);
4d22de3e
DLR
504}
505
506#define CXGB3_ATTR_R(name, val_expr) \
507CXGB3_SHOW(name, val_expr) \
0ee8d33c 508static DEVICE_ATTR(name, S_IRUGO, show_##name, NULL)
4d22de3e
DLR
509
510#define CXGB3_ATTR_RW(name, val_expr, store_method) \
511CXGB3_SHOW(name, val_expr) \
0ee8d33c 512static DEVICE_ATTR(name, S_IRUGO | S_IWUSR, show_##name, store_method)
4d22de3e
DLR
513
514CXGB3_ATTR_R(cam_size, t3_mc5_size(&adap->mc5));
515CXGB3_ATTR_RW(nfilters, adap->params.mc5.nfilters, store_nfilters);
516CXGB3_ATTR_RW(nservers, adap->params.mc5.nservers, store_nservers);
517
518static struct attribute *cxgb3_attrs[] = {
0ee8d33c
DLR
519 &dev_attr_cam_size.attr,
520 &dev_attr_nfilters.attr,
521 &dev_attr_nservers.attr,
4d22de3e
DLR
522 NULL
523};
524
525static struct attribute_group cxgb3_attr_group = {.attrs = cxgb3_attrs };
526
0ee8d33c
DLR
527static ssize_t tm_attr_show(struct device *d, struct device_attribute *attr,
528 char *buf, int sched)
4d22de3e 529{
5fbf816f
DLR
530 struct port_info *pi = netdev_priv(to_net_dev(d));
531 struct adapter *adap = pi->adapter;
4d22de3e 532 unsigned int v, addr, bpt, cpt;
5fbf816f 533 ssize_t len;
4d22de3e
DLR
534
535 addr = A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2;
536 rtnl_lock();
537 t3_write_reg(adap, A_TP_TM_PIO_ADDR, addr);
538 v = t3_read_reg(adap, A_TP_TM_PIO_DATA);
539 if (sched & 1)
540 v >>= 16;
541 bpt = (v >> 8) & 0xff;
542 cpt = v & 0xff;
543 if (!cpt)
544 len = sprintf(buf, "disabled\n");
545 else {
546 v = (adap->params.vpd.cclk * 1000) / cpt;
547 len = sprintf(buf, "%u Kbps\n", (v * bpt) / 125);
548 }
549 rtnl_unlock();
550 return len;
551}
552
0ee8d33c
DLR
553static ssize_t tm_attr_store(struct device *d, struct device_attribute *attr,
554 const char *buf, size_t len, int sched)
4d22de3e 555{
5fbf816f
DLR
556 struct port_info *pi = netdev_priv(to_net_dev(d));
557 struct adapter *adap = pi->adapter;
558 unsigned int val;
4d22de3e
DLR
559 char *endp;
560 ssize_t ret;
4d22de3e
DLR
561
562 if (!capable(CAP_NET_ADMIN))
563 return -EPERM;
564
565 val = simple_strtoul(buf, &endp, 0);
566 if (endp == buf || val > 10000000)
567 return -EINVAL;
568
569 rtnl_lock();
570 ret = t3_config_sched(adap, val, sched);
571 if (!ret)
572 ret = len;
573 rtnl_unlock();
574 return ret;
575}
576
577#define TM_ATTR(name, sched) \
0ee8d33c
DLR
578static ssize_t show_##name(struct device *d, struct device_attribute *attr, \
579 char *buf) \
4d22de3e 580{ \
0ee8d33c 581 return tm_attr_show(d, attr, buf, sched); \
4d22de3e 582} \
0ee8d33c
DLR
583static ssize_t store_##name(struct device *d, struct device_attribute *attr, \
584 const char *buf, size_t len) \
4d22de3e 585{ \
0ee8d33c 586 return tm_attr_store(d, attr, buf, len, sched); \
4d22de3e 587} \
0ee8d33c 588static DEVICE_ATTR(name, S_IRUGO | S_IWUSR, show_##name, store_##name)
4d22de3e
DLR
589
590TM_ATTR(sched0, 0);
591TM_ATTR(sched1, 1);
592TM_ATTR(sched2, 2);
593TM_ATTR(sched3, 3);
594TM_ATTR(sched4, 4);
595TM_ATTR(sched5, 5);
596TM_ATTR(sched6, 6);
597TM_ATTR(sched7, 7);
598
599static struct attribute *offload_attrs[] = {
0ee8d33c
DLR
600 &dev_attr_sched0.attr,
601 &dev_attr_sched1.attr,
602 &dev_attr_sched2.attr,
603 &dev_attr_sched3.attr,
604 &dev_attr_sched4.attr,
605 &dev_attr_sched5.attr,
606 &dev_attr_sched6.attr,
607 &dev_attr_sched7.attr,
4d22de3e
DLR
608 NULL
609};
610
611static struct attribute_group offload_attr_group = {.attrs = offload_attrs };
612
613/*
614 * Sends an sk_buff to an offload queue driver
615 * after dealing with any active network taps.
616 */
617static inline int offload_tx(struct t3cdev *tdev, struct sk_buff *skb)
618{
619 int ret;
620
621 local_bh_disable();
622 ret = t3_offload_tx(tdev, skb);
623 local_bh_enable();
624 return ret;
625}
626
627static int write_smt_entry(struct adapter *adapter, int idx)
628{
629 struct cpl_smt_write_req *req;
630 struct sk_buff *skb = alloc_skb(sizeof(*req), GFP_KERNEL);
631
632 if (!skb)
633 return -ENOMEM;
634
635 req = (struct cpl_smt_write_req *)__skb_put(skb, sizeof(*req));
636 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
637 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SMT_WRITE_REQ, idx));
638 req->mtu_idx = NMTUS - 1; /* should be 0 but there's a T3 bug */
639 req->iff = idx;
640 memset(req->src_mac1, 0, sizeof(req->src_mac1));
641 memcpy(req->src_mac0, adapter->port[idx]->dev_addr, ETH_ALEN);
642 skb->priority = 1;
643 offload_tx(&adapter->tdev, skb);
644 return 0;
645}
646
647static int init_smt(struct adapter *adapter)
648{
649 int i;
650
651 for_each_port(adapter, i)
652 write_smt_entry(adapter, i);
653 return 0;
654}
655
656static void init_port_mtus(struct adapter *adapter)
657{
658 unsigned int mtus = adapter->port[0]->mtu;
659
660 if (adapter->port[1])
661 mtus |= adapter->port[1]->mtu << 16;
662 t3_write_reg(adapter, A_TP_MTU_PORT_TABLE, mtus);
663}
664
14ab9892
DLR
665static void send_pktsched_cmd(struct adapter *adap, int sched, int qidx, int lo,
666 int hi, int port)
667{
668 struct sk_buff *skb;
669 struct mngt_pktsched_wr *req;
670
671 skb = alloc_skb(sizeof(*req), GFP_KERNEL | __GFP_NOFAIL);
672 req = (struct mngt_pktsched_wr *)skb_put(skb, sizeof(*req));
673 req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_MNGT));
674 req->mngt_opcode = FW_MNGTOPCODE_PKTSCHED_SET;
675 req->sched = sched;
676 req->idx = qidx;
677 req->min = lo;
678 req->max = hi;
679 req->binding = port;
680 t3_mgmt_tx(adap, skb);
681}
682
683static void bind_qsets(struct adapter *adap)
684{
685 int i, j;
686
687 for_each_port(adap, i) {
688 const struct port_info *pi = adap2pinfo(adap, i);
689
690 for (j = 0; j < pi->nqsets; ++j)
691 send_pktsched_cmd(adap, 1, pi->first_qset + j, -1,
692 -1, i);
693 }
694}
695
7f672cf5 696#define FW_FNAME "t3fw-%d.%d.%d.bin"
47330077 697#define TPSRAM_NAME "t3%c_protocol_sram-%d.%d.%d.bin"
2e283962
DLR
698
699static int upgrade_fw(struct adapter *adap)
700{
701 int ret;
702 char buf[64];
703 const struct firmware *fw;
704 struct device *dev = &adap->pdev->dev;
705
706 snprintf(buf, sizeof(buf), FW_FNAME, FW_VERSION_MAJOR,
7f672cf5 707 FW_VERSION_MINOR, FW_VERSION_MICRO);
2e283962
DLR
708 ret = request_firmware(&fw, buf, dev);
709 if (ret < 0) {
710 dev_err(dev, "could not upgrade firmware: unable to load %s\n",
711 buf);
712 return ret;
713 }
714 ret = t3_load_fw(adap, fw->data, fw->size);
715 release_firmware(fw);
47330077
DLR
716
717 if (ret == 0)
718 dev_info(dev, "successful upgrade to firmware %d.%d.%d\n",
719 FW_VERSION_MAJOR, FW_VERSION_MINOR, FW_VERSION_MICRO);
720 else
721 dev_err(dev, "failed to upgrade to firmware %d.%d.%d\n",
722 FW_VERSION_MAJOR, FW_VERSION_MINOR, FW_VERSION_MICRO);
723
724 return ret;
725}
726
727static inline char t3rev2char(struct adapter *adapter)
728{
729 char rev = 0;
730
731 switch(adapter->params.rev) {
732 case T3_REV_B:
733 case T3_REV_B2:
734 rev = 'b';
735 break;
736 }
737 return rev;
738}
739
740int update_tpsram(struct adapter *adap)
741{
742 const struct firmware *tpsram;
743 char buf[64];
744 struct device *dev = &adap->pdev->dev;
745 int ret;
746 char rev;
747
748 rev = t3rev2char(adap);
749 if (!rev)
750 return 0;
751
752 snprintf(buf, sizeof(buf), TPSRAM_NAME, rev,
753 TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO);
754
755 ret = request_firmware(&tpsram, buf, dev);
756 if (ret < 0) {
757 dev_err(dev, "could not load TP SRAM: unable to load %s\n",
758 buf);
759 return ret;
760 }
761
762 ret = t3_check_tpsram(adap, tpsram->data, tpsram->size);
763 if (ret)
764 goto release_tpsram;
765
766 ret = t3_set_proto_sram(adap, tpsram->data);
767 if (ret == 0)
768 dev_info(dev,
769 "successful update of protocol engine "
770 "to %d.%d.%d\n",
771 TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO);
772 else
773 dev_err(dev, "failed to update of protocol engine %d.%d.%d\n",
774 TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO);
775 if (ret)
776 dev_err(dev, "loading protocol SRAM failed\n");
777
778release_tpsram:
779 release_firmware(tpsram);
780
2e283962
DLR
781 return ret;
782}
783
4d22de3e
DLR
784/**
785 * cxgb_up - enable the adapter
786 * @adapter: adapter being enabled
787 *
788 * Called when the first port is enabled, this function performs the
789 * actions necessary to make an adapter operational, such as completing
790 * the initialization of HW modules, and enabling interrupts.
791 *
792 * Must be called with the rtnl lock held.
793 */
794static int cxgb_up(struct adapter *adap)
795{
c54f5c24 796 int err;
47330077 797 int must_load;
4d22de3e
DLR
798
799 if (!(adap->flags & FULL_INIT_DONE)) {
a5a3b460
DLR
800 err = t3_check_fw_version(adap, &must_load);
801 if (err == -EINVAL) {
2e283962 802 err = upgrade_fw(adap);
a5a3b460
DLR
803 if (err && must_load)
804 goto out;
805 }
4d22de3e 806
47330077
DLR
807 err = t3_check_tpsram_version(adap, &must_load);
808 if (err == -EINVAL) {
809 err = update_tpsram(adap);
810 if (err && must_load)
811 goto out;
812 }
813
4d22de3e
DLR
814 err = t3_init_hw(adap, 0);
815 if (err)
816 goto out;
817
6cdbd77e 818 t3_write_reg(adap, A_ULPRX_TDDP_PSZ, V_HPZ0(PAGE_SHIFT - 12));
bea3348e 819
4d22de3e
DLR
820 err = setup_sge_qsets(adap);
821 if (err)
822 goto out;
823
824 setup_rss(adap);
bea3348e 825 init_napi(adap);
4d22de3e
DLR
826 adap->flags |= FULL_INIT_DONE;
827 }
828
829 t3_intr_clear(adap);
830
831 if (adap->flags & USING_MSIX) {
832 name_msix_vecs(adap);
833 err = request_irq(adap->msix_info[0].vec,
834 t3_async_intr_handler, 0,
835 adap->msix_info[0].desc, adap);
836 if (err)
837 goto irq_err;
838
839 if (request_msix_data_irqs(adap)) {
840 free_irq(adap->msix_info[0].vec, adap);
841 goto irq_err;
842 }
843 } else if ((err = request_irq(adap->pdev->irq,
844 t3_intr_handler(adap,
845 adap->sge.qs[0].rspq.
846 polling),
2db6346f
TG
847 (adap->flags & USING_MSI) ?
848 0 : IRQF_SHARED,
4d22de3e
DLR
849 adap->name, adap)))
850 goto irq_err;
851
bea3348e 852 enable_all_napi(adap);
4d22de3e
DLR
853 t3_sge_start(adap);
854 t3_intr_enable(adap);
14ab9892
DLR
855
856 if ((adap->flags & (USING_MSIX | QUEUES_BOUND)) == USING_MSIX)
857 bind_qsets(adap);
858 adap->flags |= QUEUES_BOUND;
859
4d22de3e
DLR
860out:
861 return err;
862irq_err:
863 CH_ERR(adap, "request_irq failed, err %d\n", err);
864 goto out;
865}
866
867/*
868 * Release resources when all the ports and offloading have been stopped.
869 */
870static void cxgb_down(struct adapter *adapter)
871{
872 t3_sge_stop(adapter);
873 spin_lock_irq(&adapter->work_lock); /* sync with PHY intr task */
874 t3_intr_disable(adapter);
875 spin_unlock_irq(&adapter->work_lock);
876
877 if (adapter->flags & USING_MSIX) {
878 int i, n = 0;
879
880 free_irq(adapter->msix_info[0].vec, adapter);
881 for_each_port(adapter, i)
882 n += adap2pinfo(adapter, i)->nqsets;
883
884 for (i = 0; i < n; ++i)
885 free_irq(adapter->msix_info[i + 1].vec,
886 &adapter->sge.qs[i]);
887 } else
888 free_irq(adapter->pdev->irq, adapter);
889
890 flush_workqueue(cxgb3_wq); /* wait for external IRQ handler */
891 quiesce_rx(adapter);
892}
893
894static void schedule_chk_task(struct adapter *adap)
895{
896 unsigned int timeo;
897
898 timeo = adap->params.linkpoll_period ?
899 (HZ * adap->params.linkpoll_period) / 10 :
900 adap->params.stats_update_period * HZ;
901 if (timeo)
902 queue_delayed_work(cxgb3_wq, &adap->adap_check_task, timeo);
903}
904
905static int offload_open(struct net_device *dev)
906{
5fbf816f
DLR
907 struct port_info *pi = netdev_priv(dev);
908 struct adapter *adapter = pi->adapter;
909 struct t3cdev *tdev = dev2t3cdev(dev);
4d22de3e 910 int adap_up = adapter->open_device_map & PORT_MASK;
c54f5c24 911 int err;
4d22de3e
DLR
912
913 if (test_and_set_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map))
914 return 0;
915
916 if (!adap_up && (err = cxgb_up(adapter)) < 0)
917 return err;
918
919 t3_tp_set_offload_mode(adapter, 1);
920 tdev->lldev = adapter->port[0];
921 err = cxgb3_offload_activate(adapter);
922 if (err)
923 goto out;
924
925 init_port_mtus(adapter);
926 t3_load_mtus(adapter, adapter->params.mtus, adapter->params.a_wnd,
927 adapter->params.b_wnd,
928 adapter->params.rev == 0 ?
929 adapter->port[0]->mtu : 0xffff);
930 init_smt(adapter);
931
932 /* Never mind if the next step fails */
0ee8d33c 933 sysfs_create_group(&tdev->lldev->dev.kobj, &offload_attr_group);
4d22de3e
DLR
934
935 /* Call back all registered clients */
936 cxgb3_add_clients(tdev);
937
938out:
939 /* restore them in case the offload module has changed them */
940 if (err) {
941 t3_tp_set_offload_mode(adapter, 0);
942 clear_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map);
943 cxgb3_set_dummy_ops(tdev);
944 }
945 return err;
946}
947
948static int offload_close(struct t3cdev *tdev)
949{
950 struct adapter *adapter = tdev2adap(tdev);
951
952 if (!test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map))
953 return 0;
954
955 /* Call back all registered clients */
956 cxgb3_remove_clients(tdev);
957
0ee8d33c 958 sysfs_remove_group(&tdev->lldev->dev.kobj, &offload_attr_group);
4d22de3e
DLR
959
960 tdev->lldev = NULL;
961 cxgb3_set_dummy_ops(tdev);
962 t3_tp_set_offload_mode(adapter, 0);
963 clear_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map);
964
965 if (!adapter->open_device_map)
966 cxgb_down(adapter);
967
968 cxgb3_offload_deactivate(adapter);
969 return 0;
970}
971
972static int cxgb_open(struct net_device *dev)
973{
4d22de3e 974 struct port_info *pi = netdev_priv(dev);
5fbf816f 975 struct adapter *adapter = pi->adapter;
4d22de3e 976 int other_ports = adapter->open_device_map & PORT_MASK;
5fbf816f 977 int err;
4d22de3e 978
bea3348e
SH
979 if (!adapter->open_device_map && (err = cxgb_up(adapter)) < 0) {
980 quiesce_rx(adapter);
4d22de3e 981 return err;
bea3348e 982 }
4d22de3e
DLR
983
984 set_bit(pi->port_id, &adapter->open_device_map);
8ac3ba68 985 if (is_offload(adapter) && !ofld_disable) {
4d22de3e
DLR
986 err = offload_open(dev);
987 if (err)
988 printk(KERN_WARNING
989 "Could not initialize offload capabilities\n");
990 }
991
992 link_start(dev);
993 t3_port_intr_enable(adapter, pi->port_id);
994 netif_start_queue(dev);
995 if (!other_ports)
996 schedule_chk_task(adapter);
997
998 return 0;
999}
1000
1001static int cxgb_close(struct net_device *dev)
1002{
5fbf816f
DLR
1003 struct port_info *pi = netdev_priv(dev);
1004 struct adapter *adapter = pi->adapter;
4d22de3e 1005
5fbf816f 1006 t3_port_intr_disable(adapter, pi->port_id);
4d22de3e 1007 netif_stop_queue(dev);
5fbf816f 1008 pi->phy.ops->power_down(&pi->phy, 1);
4d22de3e 1009 netif_carrier_off(dev);
5fbf816f 1010 t3_mac_disable(&pi->mac, MAC_DIRECTION_TX | MAC_DIRECTION_RX);
4d22de3e
DLR
1011
1012 spin_lock(&adapter->work_lock); /* sync with update task */
5fbf816f 1013 clear_bit(pi->port_id, &adapter->open_device_map);
4d22de3e
DLR
1014 spin_unlock(&adapter->work_lock);
1015
1016 if (!(adapter->open_device_map & PORT_MASK))
1017 cancel_rearming_delayed_workqueue(cxgb3_wq,
1018 &adapter->adap_check_task);
1019
1020 if (!adapter->open_device_map)
1021 cxgb_down(adapter);
1022
1023 return 0;
1024}
1025
1026static struct net_device_stats *cxgb_get_stats(struct net_device *dev)
1027{
5fbf816f
DLR
1028 struct port_info *pi = netdev_priv(dev);
1029 struct adapter *adapter = pi->adapter;
1030 struct net_device_stats *ns = &pi->netstats;
4d22de3e
DLR
1031 const struct mac_stats *pstats;
1032
1033 spin_lock(&adapter->stats_lock);
5fbf816f 1034 pstats = t3_mac_update_stats(&pi->mac);
4d22de3e
DLR
1035 spin_unlock(&adapter->stats_lock);
1036
1037 ns->tx_bytes = pstats->tx_octets;
1038 ns->tx_packets = pstats->tx_frames;
1039 ns->rx_bytes = pstats->rx_octets;
1040 ns->rx_packets = pstats->rx_frames;
1041 ns->multicast = pstats->rx_mcast_frames;
1042
1043 ns->tx_errors = pstats->tx_underrun;
1044 ns->rx_errors = pstats->rx_symbol_errs + pstats->rx_fcs_errs +
1045 pstats->rx_too_long + pstats->rx_jabber + pstats->rx_short +
1046 pstats->rx_fifo_ovfl;
1047
1048 /* detailed rx_errors */
1049 ns->rx_length_errors = pstats->rx_jabber + pstats->rx_too_long;
1050 ns->rx_over_errors = 0;
1051 ns->rx_crc_errors = pstats->rx_fcs_errs;
1052 ns->rx_frame_errors = pstats->rx_symbol_errs;
1053 ns->rx_fifo_errors = pstats->rx_fifo_ovfl;
1054 ns->rx_missed_errors = pstats->rx_cong_drops;
1055
1056 /* detailed tx_errors */
1057 ns->tx_aborted_errors = 0;
1058 ns->tx_carrier_errors = 0;
1059 ns->tx_fifo_errors = pstats->tx_underrun;
1060 ns->tx_heartbeat_errors = 0;
1061 ns->tx_window_errors = 0;
1062 return ns;
1063}
1064
1065static u32 get_msglevel(struct net_device *dev)
1066{
5fbf816f
DLR
1067 struct port_info *pi = netdev_priv(dev);
1068 struct adapter *adapter = pi->adapter;
4d22de3e
DLR
1069
1070 return adapter->msg_enable;
1071}
1072
1073static void set_msglevel(struct net_device *dev, u32 val)
1074{
5fbf816f
DLR
1075 struct port_info *pi = netdev_priv(dev);
1076 struct adapter *adapter = pi->adapter;
4d22de3e
DLR
1077
1078 adapter->msg_enable = val;
1079}
1080
1081static char stats_strings[][ETH_GSTRING_LEN] = {
1082 "TxOctetsOK ",
1083 "TxFramesOK ",
1084 "TxMulticastFramesOK",
1085 "TxBroadcastFramesOK",
1086 "TxPauseFrames ",
1087 "TxUnderrun ",
1088 "TxExtUnderrun ",
1089
1090 "TxFrames64 ",
1091 "TxFrames65To127 ",
1092 "TxFrames128To255 ",
1093 "TxFrames256To511 ",
1094 "TxFrames512To1023 ",
1095 "TxFrames1024To1518 ",
1096 "TxFrames1519ToMax ",
1097
1098 "RxOctetsOK ",
1099 "RxFramesOK ",
1100 "RxMulticastFramesOK",
1101 "RxBroadcastFramesOK",
1102 "RxPauseFrames ",
1103 "RxFCSErrors ",
1104 "RxSymbolErrors ",
1105 "RxShortErrors ",
1106 "RxJabberErrors ",
1107 "RxLengthErrors ",
1108 "RxFIFOoverflow ",
1109
1110 "RxFrames64 ",
1111 "RxFrames65To127 ",
1112 "RxFrames128To255 ",
1113 "RxFrames256To511 ",
1114 "RxFrames512To1023 ",
1115 "RxFrames1024To1518 ",
1116 "RxFrames1519ToMax ",
1117
1118 "PhyFIFOErrors ",
1119 "TSO ",
1120 "VLANextractions ",
1121 "VLANinsertions ",
1122 "TxCsumOffload ",
1123 "RxCsumGood ",
fc90664e
DLR
1124 "RxDrops ",
1125
1126 "CheckTXEnToggled ",
1127 "CheckResets ",
1128
4d22de3e
DLR
1129};
1130
1131static int get_stats_count(struct net_device *dev)
1132{
1133 return ARRAY_SIZE(stats_strings);
1134}
1135
1136#define T3_REGMAP_SIZE (3 * 1024)
1137
1138static int get_regs_len(struct net_device *dev)
1139{
1140 return T3_REGMAP_SIZE;
1141}
1142
1143static int get_eeprom_len(struct net_device *dev)
1144{
1145 return EEPROMSIZE;
1146}
1147
1148static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1149{
5fbf816f
DLR
1150 struct port_info *pi = netdev_priv(dev);
1151 struct adapter *adapter = pi->adapter;
4d22de3e 1152 u32 fw_vers = 0;
47330077 1153 u32 tp_vers = 0;
4d22de3e
DLR
1154
1155 t3_get_fw_version(adapter, &fw_vers);
47330077 1156 t3_get_tp_version(adapter, &tp_vers);
4d22de3e
DLR
1157
1158 strcpy(info->driver, DRV_NAME);
1159 strcpy(info->version, DRV_VERSION);
1160 strcpy(info->bus_info, pci_name(adapter->pdev));
1161 if (!fw_vers)
1162 strcpy(info->fw_version, "N/A");
4aac3899 1163 else {
4d22de3e 1164 snprintf(info->fw_version, sizeof(info->fw_version),
47330077 1165 "%s %u.%u.%u TP %u.%u.%u",
4aac3899
DLR
1166 G_FW_VERSION_TYPE(fw_vers) ? "T" : "N",
1167 G_FW_VERSION_MAJOR(fw_vers),
1168 G_FW_VERSION_MINOR(fw_vers),
47330077
DLR
1169 G_FW_VERSION_MICRO(fw_vers),
1170 G_TP_VERSION_MAJOR(tp_vers),
1171 G_TP_VERSION_MINOR(tp_vers),
1172 G_TP_VERSION_MICRO(tp_vers));
4aac3899 1173 }
4d22de3e
DLR
1174}
1175
1176static void get_strings(struct net_device *dev, u32 stringset, u8 * data)
1177{
1178 if (stringset == ETH_SS_STATS)
1179 memcpy(data, stats_strings, sizeof(stats_strings));
1180}
1181
1182static unsigned long collect_sge_port_stats(struct adapter *adapter,
1183 struct port_info *p, int idx)
1184{
1185 int i;
1186 unsigned long tot = 0;
1187
1188 for (i = 0; i < p->nqsets; ++i)
1189 tot += adapter->sge.qs[i + p->first_qset].port_stats[idx];
1190 return tot;
1191}
1192
1193static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
1194 u64 *data)
1195{
4d22de3e 1196 struct port_info *pi = netdev_priv(dev);
5fbf816f 1197 struct adapter *adapter = pi->adapter;
4d22de3e
DLR
1198 const struct mac_stats *s;
1199
1200 spin_lock(&adapter->stats_lock);
1201 s = t3_mac_update_stats(&pi->mac);
1202 spin_unlock(&adapter->stats_lock);
1203
1204 *data++ = s->tx_octets;
1205 *data++ = s->tx_frames;
1206 *data++ = s->tx_mcast_frames;
1207 *data++ = s->tx_bcast_frames;
1208 *data++ = s->tx_pause;
1209 *data++ = s->tx_underrun;
1210 *data++ = s->tx_fifo_urun;
1211
1212 *data++ = s->tx_frames_64;
1213 *data++ = s->tx_frames_65_127;
1214 *data++ = s->tx_frames_128_255;
1215 *data++ = s->tx_frames_256_511;
1216 *data++ = s->tx_frames_512_1023;
1217 *data++ = s->tx_frames_1024_1518;
1218 *data++ = s->tx_frames_1519_max;
1219
1220 *data++ = s->rx_octets;
1221 *data++ = s->rx_frames;
1222 *data++ = s->rx_mcast_frames;
1223 *data++ = s->rx_bcast_frames;
1224 *data++ = s->rx_pause;
1225 *data++ = s->rx_fcs_errs;
1226 *data++ = s->rx_symbol_errs;
1227 *data++ = s->rx_short;
1228 *data++ = s->rx_jabber;
1229 *data++ = s->rx_too_long;
1230 *data++ = s->rx_fifo_ovfl;
1231
1232 *data++ = s->rx_frames_64;
1233 *data++ = s->rx_frames_65_127;
1234 *data++ = s->rx_frames_128_255;
1235 *data++ = s->rx_frames_256_511;
1236 *data++ = s->rx_frames_512_1023;
1237 *data++ = s->rx_frames_1024_1518;
1238 *data++ = s->rx_frames_1519_max;
1239
1240 *data++ = pi->phy.fifo_errors;
1241
1242 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_TSO);
1243 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_VLANEX);
1244 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_VLANINS);
1245 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_TX_CSUM);
1246 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_RX_CSUM_GOOD);
1247 *data++ = s->rx_cong_drops;
fc90664e
DLR
1248
1249 *data++ = s->num_toggled;
1250 *data++ = s->num_resets;
4d22de3e
DLR
1251}
1252
1253static inline void reg_block_dump(struct adapter *ap, void *buf,
1254 unsigned int start, unsigned int end)
1255{
1256 u32 *p = buf + start;
1257
1258 for (; start <= end; start += sizeof(u32))
1259 *p++ = t3_read_reg(ap, start);
1260}
1261
1262static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
1263 void *buf)
1264{
5fbf816f
DLR
1265 struct port_info *pi = netdev_priv(dev);
1266 struct adapter *ap = pi->adapter;
4d22de3e
DLR
1267
1268 /*
1269 * Version scheme:
1270 * bits 0..9: chip version
1271 * bits 10..15: chip revision
1272 * bit 31: set for PCIe cards
1273 */
1274 regs->version = 3 | (ap->params.rev << 10) | (is_pcie(ap) << 31);
1275
1276 /*
1277 * We skip the MAC statistics registers because they are clear-on-read.
1278 * Also reading multi-register stats would need to synchronize with the
1279 * periodic mac stats accumulation. Hard to justify the complexity.
1280 */
1281 memset(buf, 0, T3_REGMAP_SIZE);
1282 reg_block_dump(ap, buf, 0, A_SG_RSPQ_CREDIT_RETURN);
1283 reg_block_dump(ap, buf, A_SG_HI_DRB_HI_THRSH, A_ULPRX_PBL_ULIMIT);
1284 reg_block_dump(ap, buf, A_ULPTX_CONFIG, A_MPS_INT_CAUSE);
1285 reg_block_dump(ap, buf, A_CPL_SWITCH_CNTRL, A_CPL_MAP_TBL_DATA);
1286 reg_block_dump(ap, buf, A_SMB_GLOBAL_TIME_CFG, A_XGM_SERDES_STAT3);
1287 reg_block_dump(ap, buf, A_XGM_SERDES_STATUS0,
1288 XGM_REG(A_XGM_SERDES_STAT3, 1));
1289 reg_block_dump(ap, buf, XGM_REG(A_XGM_SERDES_STATUS0, 1),
1290 XGM_REG(A_XGM_RX_SPI4_SOP_EOP_CNT, 1));
1291}
1292
1293static int restart_autoneg(struct net_device *dev)
1294{
1295 struct port_info *p = netdev_priv(dev);
1296
1297 if (!netif_running(dev))
1298 return -EAGAIN;
1299 if (p->link_config.autoneg != AUTONEG_ENABLE)
1300 return -EINVAL;
1301 p->phy.ops->autoneg_restart(&p->phy);
1302 return 0;
1303}
1304
1305static int cxgb3_phys_id(struct net_device *dev, u32 data)
1306{
5fbf816f
DLR
1307 struct port_info *pi = netdev_priv(dev);
1308 struct adapter *adapter = pi->adapter;
4d22de3e 1309 int i;
4d22de3e
DLR
1310
1311 if (data == 0)
1312 data = 2;
1313
1314 for (i = 0; i < data * 2; i++) {
1315 t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
1316 (i & 1) ? F_GPIO0_OUT_VAL : 0);
1317 if (msleep_interruptible(500))
1318 break;
1319 }
1320 t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
1321 F_GPIO0_OUT_VAL);
1322 return 0;
1323}
1324
1325static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1326{
1327 struct port_info *p = netdev_priv(dev);
1328
1329 cmd->supported = p->link_config.supported;
1330 cmd->advertising = p->link_config.advertising;
1331
1332 if (netif_carrier_ok(dev)) {
1333 cmd->speed = p->link_config.speed;
1334 cmd->duplex = p->link_config.duplex;
1335 } else {
1336 cmd->speed = -1;
1337 cmd->duplex = -1;
1338 }
1339
1340 cmd->port = (cmd->supported & SUPPORTED_TP) ? PORT_TP : PORT_FIBRE;
1341 cmd->phy_address = p->phy.addr;
1342 cmd->transceiver = XCVR_EXTERNAL;
1343 cmd->autoneg = p->link_config.autoneg;
1344 cmd->maxtxpkt = 0;
1345 cmd->maxrxpkt = 0;
1346 return 0;
1347}
1348
1349static int speed_duplex_to_caps(int speed, int duplex)
1350{
1351 int cap = 0;
1352
1353 switch (speed) {
1354 case SPEED_10:
1355 if (duplex == DUPLEX_FULL)
1356 cap = SUPPORTED_10baseT_Full;
1357 else
1358 cap = SUPPORTED_10baseT_Half;
1359 break;
1360 case SPEED_100:
1361 if (duplex == DUPLEX_FULL)
1362 cap = SUPPORTED_100baseT_Full;
1363 else
1364 cap = SUPPORTED_100baseT_Half;
1365 break;
1366 case SPEED_1000:
1367 if (duplex == DUPLEX_FULL)
1368 cap = SUPPORTED_1000baseT_Full;
1369 else
1370 cap = SUPPORTED_1000baseT_Half;
1371 break;
1372 case SPEED_10000:
1373 if (duplex == DUPLEX_FULL)
1374 cap = SUPPORTED_10000baseT_Full;
1375 }
1376 return cap;
1377}
1378
1379#define ADVERTISED_MASK (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
1380 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
1381 ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full | \
1382 ADVERTISED_10000baseT_Full)
1383
1384static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1385{
1386 struct port_info *p = netdev_priv(dev);
1387 struct link_config *lc = &p->link_config;
1388
1389 if (!(lc->supported & SUPPORTED_Autoneg))
1390 return -EOPNOTSUPP; /* can't change speed/duplex */
1391
1392 if (cmd->autoneg == AUTONEG_DISABLE) {
1393 int cap = speed_duplex_to_caps(cmd->speed, cmd->duplex);
1394
1395 if (!(lc->supported & cap) || cmd->speed == SPEED_1000)
1396 return -EINVAL;
1397 lc->requested_speed = cmd->speed;
1398 lc->requested_duplex = cmd->duplex;
1399 lc->advertising = 0;
1400 } else {
1401 cmd->advertising &= ADVERTISED_MASK;
1402 cmd->advertising &= lc->supported;
1403 if (!cmd->advertising)
1404 return -EINVAL;
1405 lc->requested_speed = SPEED_INVALID;
1406 lc->requested_duplex = DUPLEX_INVALID;
1407 lc->advertising = cmd->advertising | ADVERTISED_Autoneg;
1408 }
1409 lc->autoneg = cmd->autoneg;
1410 if (netif_running(dev))
1411 t3_link_start(&p->phy, &p->mac, lc);
1412 return 0;
1413}
1414
1415static void get_pauseparam(struct net_device *dev,
1416 struct ethtool_pauseparam *epause)
1417{
1418 struct port_info *p = netdev_priv(dev);
1419
1420 epause->autoneg = (p->link_config.requested_fc & PAUSE_AUTONEG) != 0;
1421 epause->rx_pause = (p->link_config.fc & PAUSE_RX) != 0;
1422 epause->tx_pause = (p->link_config.fc & PAUSE_TX) != 0;
1423}
1424
1425static int set_pauseparam(struct net_device *dev,
1426 struct ethtool_pauseparam *epause)
1427{
1428 struct port_info *p = netdev_priv(dev);
1429 struct link_config *lc = &p->link_config;
1430
1431 if (epause->autoneg == AUTONEG_DISABLE)
1432 lc->requested_fc = 0;
1433 else if (lc->supported & SUPPORTED_Autoneg)
1434 lc->requested_fc = PAUSE_AUTONEG;
1435 else
1436 return -EINVAL;
1437
1438 if (epause->rx_pause)
1439 lc->requested_fc |= PAUSE_RX;
1440 if (epause->tx_pause)
1441 lc->requested_fc |= PAUSE_TX;
1442 if (lc->autoneg == AUTONEG_ENABLE) {
1443 if (netif_running(dev))
1444 t3_link_start(&p->phy, &p->mac, lc);
1445 } else {
1446 lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1447 if (netif_running(dev))
1448 t3_mac_set_speed_duplex_fc(&p->mac, -1, -1, lc->fc);
1449 }
1450 return 0;
1451}
1452
1453static u32 get_rx_csum(struct net_device *dev)
1454{
1455 struct port_info *p = netdev_priv(dev);
1456
1457 return p->rx_csum_offload;
1458}
1459
1460static int set_rx_csum(struct net_device *dev, u32 data)
1461{
1462 struct port_info *p = netdev_priv(dev);
1463
1464 p->rx_csum_offload = data;
1465 return 0;
1466}
1467
1468static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
1469{
5fbf816f
DLR
1470 struct port_info *pi = netdev_priv(dev);
1471 struct adapter *adapter = pi->adapter;
05b97b30 1472 const struct qset_params *q = &adapter->params.sge.qset[pi->first_qset];
4d22de3e
DLR
1473
1474 e->rx_max_pending = MAX_RX_BUFFERS;
1475 e->rx_mini_max_pending = 0;
1476 e->rx_jumbo_max_pending = MAX_RX_JUMBO_BUFFERS;
1477 e->tx_max_pending = MAX_TXQ_ENTRIES;
1478
05b97b30
DLR
1479 e->rx_pending = q->fl_size;
1480 e->rx_mini_pending = q->rspq_size;
1481 e->rx_jumbo_pending = q->jumbo_size;
1482 e->tx_pending = q->txq_size[0];
4d22de3e
DLR
1483}
1484
1485static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
1486{
5fbf816f
DLR
1487 struct port_info *pi = netdev_priv(dev);
1488 struct adapter *adapter = pi->adapter;
05b97b30 1489 struct qset_params *q;
5fbf816f 1490 int i;
4d22de3e
DLR
1491
1492 if (e->rx_pending > MAX_RX_BUFFERS ||
1493 e->rx_jumbo_pending > MAX_RX_JUMBO_BUFFERS ||
1494 e->tx_pending > MAX_TXQ_ENTRIES ||
1495 e->rx_mini_pending > MAX_RSPQ_ENTRIES ||
1496 e->rx_mini_pending < MIN_RSPQ_ENTRIES ||
1497 e->rx_pending < MIN_FL_ENTRIES ||
1498 e->rx_jumbo_pending < MIN_FL_ENTRIES ||
1499 e->tx_pending < adapter->params.nports * MIN_TXQ_ENTRIES)
1500 return -EINVAL;
1501
1502 if (adapter->flags & FULL_INIT_DONE)
1503 return -EBUSY;
1504
05b97b30
DLR
1505 q = &adapter->params.sge.qset[pi->first_qset];
1506 for (i = 0; i < pi->nqsets; ++i, ++q) {
4d22de3e
DLR
1507 q->rspq_size = e->rx_mini_pending;
1508 q->fl_size = e->rx_pending;
1509 q->jumbo_size = e->rx_jumbo_pending;
1510 q->txq_size[0] = e->tx_pending;
1511 q->txq_size[1] = e->tx_pending;
1512 q->txq_size[2] = e->tx_pending;
1513 }
1514 return 0;
1515}
1516
1517static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
1518{
5fbf816f
DLR
1519 struct port_info *pi = netdev_priv(dev);
1520 struct adapter *adapter = pi->adapter;
4d22de3e
DLR
1521 struct qset_params *qsp = &adapter->params.sge.qset[0];
1522 struct sge_qset *qs = &adapter->sge.qs[0];
1523
1524 if (c->rx_coalesce_usecs * 10 > M_NEWTIMER)
1525 return -EINVAL;
1526
1527 qsp->coalesce_usecs = c->rx_coalesce_usecs;
1528 t3_update_qset_coalesce(qs, qsp);
1529 return 0;
1530}
1531
1532static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
1533{
5fbf816f
DLR
1534 struct port_info *pi = netdev_priv(dev);
1535 struct adapter *adapter = pi->adapter;
4d22de3e
DLR
1536 struct qset_params *q = adapter->params.sge.qset;
1537
1538 c->rx_coalesce_usecs = q->coalesce_usecs;
1539 return 0;
1540}
1541
1542static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e,
1543 u8 * data)
1544{
5fbf816f
DLR
1545 struct port_info *pi = netdev_priv(dev);
1546 struct adapter *adapter = pi->adapter;
4d22de3e 1547 int i, err = 0;
4d22de3e
DLR
1548
1549 u8 *buf = kmalloc(EEPROMSIZE, GFP_KERNEL);
1550 if (!buf)
1551 return -ENOMEM;
1552
1553 e->magic = EEPROM_MAGIC;
1554 for (i = e->offset & ~3; !err && i < e->offset + e->len; i += 4)
1555 err = t3_seeprom_read(adapter, i, (u32 *) & buf[i]);
1556
1557 if (!err)
1558 memcpy(data, buf + e->offset, e->len);
1559 kfree(buf);
1560 return err;
1561}
1562
1563static int set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
1564 u8 * data)
1565{
5fbf816f
DLR
1566 struct port_info *pi = netdev_priv(dev);
1567 struct adapter *adapter = pi->adapter;
1568 u32 aligned_offset, aligned_len, *p;
4d22de3e 1569 u8 *buf;
c54f5c24 1570 int err;
4d22de3e
DLR
1571
1572 if (eeprom->magic != EEPROM_MAGIC)
1573 return -EINVAL;
1574
1575 aligned_offset = eeprom->offset & ~3;
1576 aligned_len = (eeprom->len + (eeprom->offset & 3) + 3) & ~3;
1577
1578 if (aligned_offset != eeprom->offset || aligned_len != eeprom->len) {
1579 buf = kmalloc(aligned_len, GFP_KERNEL);
1580 if (!buf)
1581 return -ENOMEM;
1582 err = t3_seeprom_read(adapter, aligned_offset, (u32 *) buf);
1583 if (!err && aligned_len > 4)
1584 err = t3_seeprom_read(adapter,
1585 aligned_offset + aligned_len - 4,
1586 (u32 *) & buf[aligned_len - 4]);
1587 if (err)
1588 goto out;
1589 memcpy(buf + (eeprom->offset & 3), data, eeprom->len);
1590 } else
1591 buf = data;
1592
1593 err = t3_seeprom_wp(adapter, 0);
1594 if (err)
1595 goto out;
1596
1597 for (p = (u32 *) buf; !err && aligned_len; aligned_len -= 4, p++) {
1598 err = t3_seeprom_write(adapter, aligned_offset, *p);
1599 aligned_offset += 4;
1600 }
1601
1602 if (!err)
1603 err = t3_seeprom_wp(adapter, 1);
1604out:
1605 if (buf != data)
1606 kfree(buf);
1607 return err;
1608}
1609
1610static void get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1611{
1612 wol->supported = 0;
1613 wol->wolopts = 0;
1614 memset(&wol->sopass, 0, sizeof(wol->sopass));
1615}
1616
1617static const struct ethtool_ops cxgb_ethtool_ops = {
1618 .get_settings = get_settings,
1619 .set_settings = set_settings,
1620 .get_drvinfo = get_drvinfo,
1621 .get_msglevel = get_msglevel,
1622 .set_msglevel = set_msglevel,
1623 .get_ringparam = get_sge_param,
1624 .set_ringparam = set_sge_param,
1625 .get_coalesce = get_coalesce,
1626 .set_coalesce = set_coalesce,
1627 .get_eeprom_len = get_eeprom_len,
1628 .get_eeprom = get_eeprom,
1629 .set_eeprom = set_eeprom,
1630 .get_pauseparam = get_pauseparam,
1631 .set_pauseparam = set_pauseparam,
1632 .get_rx_csum = get_rx_csum,
1633 .set_rx_csum = set_rx_csum,
1634 .get_tx_csum = ethtool_op_get_tx_csum,
1635 .set_tx_csum = ethtool_op_set_tx_csum,
1636 .get_sg = ethtool_op_get_sg,
1637 .set_sg = ethtool_op_set_sg,
1638 .get_link = ethtool_op_get_link,
1639 .get_strings = get_strings,
1640 .phys_id = cxgb3_phys_id,
1641 .nway_reset = restart_autoneg,
1642 .get_stats_count = get_stats_count,
1643 .get_ethtool_stats = get_stats,
1644 .get_regs_len = get_regs_len,
1645 .get_regs = get_regs,
1646 .get_wol = get_wol,
1647 .get_tso = ethtool_op_get_tso,
1648 .set_tso = ethtool_op_set_tso,
4d22de3e
DLR
1649};
1650
1651static int in_range(int val, int lo, int hi)
1652{
1653 return val < 0 || (val <= hi && val >= lo);
1654}
1655
1656static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
1657{
5fbf816f
DLR
1658 struct port_info *pi = netdev_priv(dev);
1659 struct adapter *adapter = pi->adapter;
4d22de3e 1660 u32 cmd;
5fbf816f 1661 int ret;
4d22de3e
DLR
1662
1663 if (copy_from_user(&cmd, useraddr, sizeof(cmd)))
1664 return -EFAULT;
1665
1666 switch (cmd) {
4d22de3e
DLR
1667 case CHELSIO_SET_QSET_PARAMS:{
1668 int i;
1669 struct qset_params *q;
1670 struct ch_qset_params t;
1671
1672 if (!capable(CAP_NET_ADMIN))
1673 return -EPERM;
1674 if (copy_from_user(&t, useraddr, sizeof(t)))
1675 return -EFAULT;
1676 if (t.qset_idx >= SGE_QSETS)
1677 return -EINVAL;
1678 if (!in_range(t.intr_lat, 0, M_NEWTIMER) ||
1679 !in_range(t.cong_thres, 0, 255) ||
1680 !in_range(t.txq_size[0], MIN_TXQ_ENTRIES,
1681 MAX_TXQ_ENTRIES) ||
1682 !in_range(t.txq_size[1], MIN_TXQ_ENTRIES,
1683 MAX_TXQ_ENTRIES) ||
1684 !in_range(t.txq_size[2], MIN_CTRL_TXQ_ENTRIES,
1685 MAX_CTRL_TXQ_ENTRIES) ||
1686 !in_range(t.fl_size[0], MIN_FL_ENTRIES,
1687 MAX_RX_BUFFERS)
1688 || !in_range(t.fl_size[1], MIN_FL_ENTRIES,
1689 MAX_RX_JUMBO_BUFFERS)
1690 || !in_range(t.rspq_size, MIN_RSPQ_ENTRIES,
1691 MAX_RSPQ_ENTRIES))
1692 return -EINVAL;
1693 if ((adapter->flags & FULL_INIT_DONE) &&
1694 (t.rspq_size >= 0 || t.fl_size[0] >= 0 ||
1695 t.fl_size[1] >= 0 || t.txq_size[0] >= 0 ||
1696 t.txq_size[1] >= 0 || t.txq_size[2] >= 0 ||
1697 t.polling >= 0 || t.cong_thres >= 0))
1698 return -EBUSY;
1699
1700 q = &adapter->params.sge.qset[t.qset_idx];
1701
1702 if (t.rspq_size >= 0)
1703 q->rspq_size = t.rspq_size;
1704 if (t.fl_size[0] >= 0)
1705 q->fl_size = t.fl_size[0];
1706 if (t.fl_size[1] >= 0)
1707 q->jumbo_size = t.fl_size[1];
1708 if (t.txq_size[0] >= 0)
1709 q->txq_size[0] = t.txq_size[0];
1710 if (t.txq_size[1] >= 0)
1711 q->txq_size[1] = t.txq_size[1];
1712 if (t.txq_size[2] >= 0)
1713 q->txq_size[2] = t.txq_size[2];
1714 if (t.cong_thres >= 0)
1715 q->cong_thres = t.cong_thres;
1716 if (t.intr_lat >= 0) {
1717 struct sge_qset *qs =
1718 &adapter->sge.qs[t.qset_idx];
1719
1720 q->coalesce_usecs = t.intr_lat;
1721 t3_update_qset_coalesce(qs, q);
1722 }
1723 if (t.polling >= 0) {
1724 if (adapter->flags & USING_MSIX)
1725 q->polling = t.polling;
1726 else {
1727 /* No polling with INTx for T3A */
1728 if (adapter->params.rev == 0 &&
1729 !(adapter->flags & USING_MSI))
1730 t.polling = 0;
1731
1732 for (i = 0; i < SGE_QSETS; i++) {
1733 q = &adapter->params.sge.
1734 qset[i];
1735 q->polling = t.polling;
1736 }
1737 }
1738 }
1739 break;
1740 }
1741 case CHELSIO_GET_QSET_PARAMS:{
1742 struct qset_params *q;
1743 struct ch_qset_params t;
1744
1745 if (copy_from_user(&t, useraddr, sizeof(t)))
1746 return -EFAULT;
1747 if (t.qset_idx >= SGE_QSETS)
1748 return -EINVAL;
1749
1750 q = &adapter->params.sge.qset[t.qset_idx];
1751 t.rspq_size = q->rspq_size;
1752 t.txq_size[0] = q->txq_size[0];
1753 t.txq_size[1] = q->txq_size[1];
1754 t.txq_size[2] = q->txq_size[2];
1755 t.fl_size[0] = q->fl_size;
1756 t.fl_size[1] = q->jumbo_size;
1757 t.polling = q->polling;
1758 t.intr_lat = q->coalesce_usecs;
1759 t.cong_thres = q->cong_thres;
1760
1761 if (copy_to_user(useraddr, &t, sizeof(t)))
1762 return -EFAULT;
1763 break;
1764 }
1765 case CHELSIO_SET_QSET_NUM:{
1766 struct ch_reg edata;
1767 struct port_info *pi = netdev_priv(dev);
1768 unsigned int i, first_qset = 0, other_qsets = 0;
1769
1770 if (!capable(CAP_NET_ADMIN))
1771 return -EPERM;
1772 if (adapter->flags & FULL_INIT_DONE)
1773 return -EBUSY;
1774 if (copy_from_user(&edata, useraddr, sizeof(edata)))
1775 return -EFAULT;
1776 if (edata.val < 1 ||
1777 (edata.val > 1 && !(adapter->flags & USING_MSIX)))
1778 return -EINVAL;
1779
1780 for_each_port(adapter, i)
1781 if (adapter->port[i] && adapter->port[i] != dev)
1782 other_qsets += adap2pinfo(adapter, i)->nqsets;
1783
1784 if (edata.val + other_qsets > SGE_QSETS)
1785 return -EINVAL;
1786
1787 pi->nqsets = edata.val;
1788
1789 for_each_port(adapter, i)
1790 if (adapter->port[i]) {
1791 pi = adap2pinfo(adapter, i);
1792 pi->first_qset = first_qset;
1793 first_qset += pi->nqsets;
1794 }
1795 break;
1796 }
1797 case CHELSIO_GET_QSET_NUM:{
1798 struct ch_reg edata;
1799 struct port_info *pi = netdev_priv(dev);
1800
1801 edata.cmd = CHELSIO_GET_QSET_NUM;
1802 edata.val = pi->nqsets;
1803 if (copy_to_user(useraddr, &edata, sizeof(edata)))
1804 return -EFAULT;
1805 break;
1806 }
1807 case CHELSIO_LOAD_FW:{
1808 u8 *fw_data;
1809 struct ch_mem_range t;
1810
1811 if (!capable(CAP_NET_ADMIN))
1812 return -EPERM;
1813 if (copy_from_user(&t, useraddr, sizeof(t)))
1814 return -EFAULT;
1815
1816 fw_data = kmalloc(t.len, GFP_KERNEL);
1817 if (!fw_data)
1818 return -ENOMEM;
1819
1820 if (copy_from_user
1821 (fw_data, useraddr + sizeof(t), t.len)) {
1822 kfree(fw_data);
1823 return -EFAULT;
1824 }
1825
1826 ret = t3_load_fw(adapter, fw_data, t.len);
1827 kfree(fw_data);
1828 if (ret)
1829 return ret;
1830 break;
1831 }
1832 case CHELSIO_SETMTUTAB:{
1833 struct ch_mtus m;
1834 int i;
1835
1836 if (!is_offload(adapter))
1837 return -EOPNOTSUPP;
1838 if (!capable(CAP_NET_ADMIN))
1839 return -EPERM;
1840 if (offload_running(adapter))
1841 return -EBUSY;
1842 if (copy_from_user(&m, useraddr, sizeof(m)))
1843 return -EFAULT;
1844 if (m.nmtus != NMTUS)
1845 return -EINVAL;
1846 if (m.mtus[0] < 81) /* accommodate SACK */
1847 return -EINVAL;
1848
1849 /* MTUs must be in ascending order */
1850 for (i = 1; i < NMTUS; ++i)
1851 if (m.mtus[i] < m.mtus[i - 1])
1852 return -EINVAL;
1853
1854 memcpy(adapter->params.mtus, m.mtus,
1855 sizeof(adapter->params.mtus));
1856 break;
1857 }
1858 case CHELSIO_GET_PM:{
1859 struct tp_params *p = &adapter->params.tp;
1860 struct ch_pm m = {.cmd = CHELSIO_GET_PM };
1861
1862 if (!is_offload(adapter))
1863 return -EOPNOTSUPP;
1864 m.tx_pg_sz = p->tx_pg_size;
1865 m.tx_num_pg = p->tx_num_pgs;
1866 m.rx_pg_sz = p->rx_pg_size;
1867 m.rx_num_pg = p->rx_num_pgs;
1868 m.pm_total = p->pmtx_size + p->chan_rx_size * p->nchan;
1869 if (copy_to_user(useraddr, &m, sizeof(m)))
1870 return -EFAULT;
1871 break;
1872 }
1873 case CHELSIO_SET_PM:{
1874 struct ch_pm m;
1875 struct tp_params *p = &adapter->params.tp;
1876
1877 if (!is_offload(adapter))
1878 return -EOPNOTSUPP;
1879 if (!capable(CAP_NET_ADMIN))
1880 return -EPERM;
1881 if (adapter->flags & FULL_INIT_DONE)
1882 return -EBUSY;
1883 if (copy_from_user(&m, useraddr, sizeof(m)))
1884 return -EFAULT;
d9da466a 1885 if (!is_power_of_2(m.rx_pg_sz) ||
1886 !is_power_of_2(m.tx_pg_sz))
4d22de3e
DLR
1887 return -EINVAL; /* not power of 2 */
1888 if (!(m.rx_pg_sz & 0x14000))
1889 return -EINVAL; /* not 16KB or 64KB */
1890 if (!(m.tx_pg_sz & 0x1554000))
1891 return -EINVAL;
1892 if (m.tx_num_pg == -1)
1893 m.tx_num_pg = p->tx_num_pgs;
1894 if (m.rx_num_pg == -1)
1895 m.rx_num_pg = p->rx_num_pgs;
1896 if (m.tx_num_pg % 24 || m.rx_num_pg % 24)
1897 return -EINVAL;
1898 if (m.rx_num_pg * m.rx_pg_sz > p->chan_rx_size ||
1899 m.tx_num_pg * m.tx_pg_sz > p->chan_tx_size)
1900 return -EINVAL;
1901 p->rx_pg_size = m.rx_pg_sz;
1902 p->tx_pg_size = m.tx_pg_sz;
1903 p->rx_num_pgs = m.rx_num_pg;
1904 p->tx_num_pgs = m.tx_num_pg;
1905 break;
1906 }
1907 case CHELSIO_GET_MEM:{
1908 struct ch_mem_range t;
1909 struct mc7 *mem;
1910 u64 buf[32];
1911
1912 if (!is_offload(adapter))
1913 return -EOPNOTSUPP;
1914 if (!(adapter->flags & FULL_INIT_DONE))
1915 return -EIO; /* need the memory controllers */
1916 if (copy_from_user(&t, useraddr, sizeof(t)))
1917 return -EFAULT;
1918 if ((t.addr & 7) || (t.len & 7))
1919 return -EINVAL;
1920 if (t.mem_id == MEM_CM)
1921 mem = &adapter->cm;
1922 else if (t.mem_id == MEM_PMRX)
1923 mem = &adapter->pmrx;
1924 else if (t.mem_id == MEM_PMTX)
1925 mem = &adapter->pmtx;
1926 else
1927 return -EINVAL;
1928
1929 /*
1825494a
DLR
1930 * Version scheme:
1931 * bits 0..9: chip version
1932 * bits 10..15: chip revision
1933 */
4d22de3e
DLR
1934 t.version = 3 | (adapter->params.rev << 10);
1935 if (copy_to_user(useraddr, &t, sizeof(t)))
1936 return -EFAULT;
1937
1938 /*
1939 * Read 256 bytes at a time as len can be large and we don't
1940 * want to use huge intermediate buffers.
1941 */
1942 useraddr += sizeof(t); /* advance to start of buffer */
1943 while (t.len) {
1944 unsigned int chunk =
1945 min_t(unsigned int, t.len, sizeof(buf));
1946
1947 ret =
1948 t3_mc7_bd_read(mem, t.addr / 8, chunk / 8,
1949 buf);
1950 if (ret)
1951 return ret;
1952 if (copy_to_user(useraddr, buf, chunk))
1953 return -EFAULT;
1954 useraddr += chunk;
1955 t.addr += chunk;
1956 t.len -= chunk;
1957 }
1958 break;
1959 }
1960 case CHELSIO_SET_TRACE_FILTER:{
1961 struct ch_trace t;
1962 const struct trace_params *tp;
1963
1964 if (!capable(CAP_NET_ADMIN))
1965 return -EPERM;
1966 if (!offload_running(adapter))
1967 return -EAGAIN;
1968 if (copy_from_user(&t, useraddr, sizeof(t)))
1969 return -EFAULT;
1970
1971 tp = (const struct trace_params *)&t.sip;
1972 if (t.config_tx)
1973 t3_config_trace_filter(adapter, tp, 0,
1974 t.invert_match,
1975 t.trace_tx);
1976 if (t.config_rx)
1977 t3_config_trace_filter(adapter, tp, 1,
1978 t.invert_match,
1979 t.trace_rx);
1980 break;
1981 }
4d22de3e
DLR
1982 default:
1983 return -EOPNOTSUPP;
1984 }
1985 return 0;
1986}
1987
1988static int cxgb_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
1989{
4d22de3e 1990 struct mii_ioctl_data *data = if_mii(req);
5fbf816f
DLR
1991 struct port_info *pi = netdev_priv(dev);
1992 struct adapter *adapter = pi->adapter;
1993 int ret, mmd;
4d22de3e
DLR
1994
1995 switch (cmd) {
1996 case SIOCGMIIPHY:
1997 data->phy_id = pi->phy.addr;
1998 /* FALLTHRU */
1999 case SIOCGMIIREG:{
2000 u32 val;
2001 struct cphy *phy = &pi->phy;
2002
2003 if (!phy->mdio_read)
2004 return -EOPNOTSUPP;
2005 if (is_10G(adapter)) {
2006 mmd = data->phy_id >> 8;
2007 if (!mmd)
2008 mmd = MDIO_DEV_PCS;
2009 else if (mmd > MDIO_DEV_XGXS)
2010 return -EINVAL;
2011
2012 ret =
2013 phy->mdio_read(adapter, data->phy_id & 0x1f,
2014 mmd, data->reg_num, &val);
2015 } else
2016 ret =
2017 phy->mdio_read(adapter, data->phy_id & 0x1f,
2018 0, data->reg_num & 0x1f,
2019 &val);
2020 if (!ret)
2021 data->val_out = val;
2022 break;
2023 }
2024 case SIOCSMIIREG:{
2025 struct cphy *phy = &pi->phy;
2026
2027 if (!capable(CAP_NET_ADMIN))
2028 return -EPERM;
2029 if (!phy->mdio_write)
2030 return -EOPNOTSUPP;
2031 if (is_10G(adapter)) {
2032 mmd = data->phy_id >> 8;
2033 if (!mmd)
2034 mmd = MDIO_DEV_PCS;
2035 else if (mmd > MDIO_DEV_XGXS)
2036 return -EINVAL;
2037
2038 ret =
2039 phy->mdio_write(adapter,
2040 data->phy_id & 0x1f, mmd,
2041 data->reg_num,
2042 data->val_in);
2043 } else
2044 ret =
2045 phy->mdio_write(adapter,
2046 data->phy_id & 0x1f, 0,
2047 data->reg_num & 0x1f,
2048 data->val_in);
2049 break;
2050 }
2051 case SIOCCHIOCTL:
2052 return cxgb_extension_ioctl(dev, req->ifr_data);
2053 default:
2054 return -EOPNOTSUPP;
2055 }
2056 return ret;
2057}
2058
2059static int cxgb_change_mtu(struct net_device *dev, int new_mtu)
2060{
4d22de3e 2061 struct port_info *pi = netdev_priv(dev);
5fbf816f
DLR
2062 struct adapter *adapter = pi->adapter;
2063 int ret;
4d22de3e
DLR
2064
2065 if (new_mtu < 81) /* accommodate SACK */
2066 return -EINVAL;
2067 if ((ret = t3_mac_set_mtu(&pi->mac, new_mtu)))
2068 return ret;
2069 dev->mtu = new_mtu;
2070 init_port_mtus(adapter);
2071 if (adapter->params.rev == 0 && offload_running(adapter))
2072 t3_load_mtus(adapter, adapter->params.mtus,
2073 adapter->params.a_wnd, adapter->params.b_wnd,
2074 adapter->port[0]->mtu);
2075 return 0;
2076}
2077
2078static int cxgb_set_mac_addr(struct net_device *dev, void *p)
2079{
4d22de3e 2080 struct port_info *pi = netdev_priv(dev);
5fbf816f 2081 struct adapter *adapter = pi->adapter;
4d22de3e
DLR
2082 struct sockaddr *addr = p;
2083
2084 if (!is_valid_ether_addr(addr->sa_data))
2085 return -EINVAL;
2086
2087 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
2088 t3_mac_set_address(&pi->mac, 0, dev->dev_addr);
2089 if (offload_running(adapter))
2090 write_smt_entry(adapter, pi->port_id);
2091 return 0;
2092}
2093
2094/**
2095 * t3_synchronize_rx - wait for current Rx processing on a port to complete
2096 * @adap: the adapter
2097 * @p: the port
2098 *
2099 * Ensures that current Rx processing on any of the queues associated with
2100 * the given port completes before returning. We do this by acquiring and
2101 * releasing the locks of the response queues associated with the port.
2102 */
2103static void t3_synchronize_rx(struct adapter *adap, const struct port_info *p)
2104{
2105 int i;
2106
2107 for (i = 0; i < p->nqsets; i++) {
2108 struct sge_rspq *q = &adap->sge.qs[i + p->first_qset].rspq;
2109
2110 spin_lock_irq(&q->lock);
2111 spin_unlock_irq(&q->lock);
2112 }
2113}
2114
2115static void vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
2116{
4d22de3e 2117 struct port_info *pi = netdev_priv(dev);
5fbf816f 2118 struct adapter *adapter = pi->adapter;
4d22de3e
DLR
2119
2120 pi->vlan_grp = grp;
2121 if (adapter->params.rev > 0)
2122 t3_set_vlan_accel(adapter, 1 << pi->port_id, grp != NULL);
2123 else {
2124 /* single control for all ports */
2125 unsigned int i, have_vlans = 0;
2126 for_each_port(adapter, i)
2127 have_vlans |= adap2pinfo(adapter, i)->vlan_grp != NULL;
2128
2129 t3_set_vlan_accel(adapter, 1, have_vlans);
2130 }
2131 t3_synchronize_rx(adapter, pi);
2132}
2133
4d22de3e
DLR
2134#ifdef CONFIG_NET_POLL_CONTROLLER
2135static void cxgb_netpoll(struct net_device *dev)
2136{
890de332 2137 struct port_info *pi = netdev_priv(dev);
5fbf816f 2138 struct adapter *adapter = pi->adapter;
890de332 2139 int qidx;
4d22de3e 2140
890de332
DLR
2141 for (qidx = pi->first_qset; qidx < pi->first_qset + pi->nqsets; qidx++) {
2142 struct sge_qset *qs = &adapter->sge.qs[qidx];
2143 void *source;
2144
2145 if (adapter->flags & USING_MSIX)
2146 source = qs;
2147 else
2148 source = adapter;
2149
2150 t3_intr_handler(adapter, qs->rspq.polling) (0, source);
2151 }
4d22de3e
DLR
2152}
2153#endif
2154
2155/*
2156 * Periodic accumulation of MAC statistics.
2157 */
2158static void mac_stats_update(struct adapter *adapter)
2159{
2160 int i;
2161
2162 for_each_port(adapter, i) {
2163 struct net_device *dev = adapter->port[i];
2164 struct port_info *p = netdev_priv(dev);
2165
2166 if (netif_running(dev)) {
2167 spin_lock(&adapter->stats_lock);
2168 t3_mac_update_stats(&p->mac);
2169 spin_unlock(&adapter->stats_lock);
2170 }
2171 }
2172}
2173
2174static void check_link_status(struct adapter *adapter)
2175{
2176 int i;
2177
2178 for_each_port(adapter, i) {
2179 struct net_device *dev = adapter->port[i];
2180 struct port_info *p = netdev_priv(dev);
2181
2182 if (!(p->port_type->caps & SUPPORTED_IRQ) && netif_running(dev))
2183 t3_link_changed(adapter, i);
2184 }
2185}
2186
fc90664e
DLR
2187static void check_t3b2_mac(struct adapter *adapter)
2188{
2189 int i;
2190
f2d961c9
DLR
2191 if (!rtnl_trylock()) /* synchronize with ifdown */
2192 return;
2193
fc90664e
DLR
2194 for_each_port(adapter, i) {
2195 struct net_device *dev = adapter->port[i];
2196 struct port_info *p = netdev_priv(dev);
2197 int status;
2198
2199 if (!netif_running(dev))
2200 continue;
2201
2202 status = 0;
6d6dabac 2203 if (netif_running(dev) && netif_carrier_ok(dev))
fc90664e
DLR
2204 status = t3b2_mac_watchdog_task(&p->mac);
2205 if (status == 1)
2206 p->mac.stats.num_toggled++;
2207 else if (status == 2) {
2208 struct cmac *mac = &p->mac;
2209
2210 t3_mac_set_mtu(mac, dev->mtu);
2211 t3_mac_set_address(mac, 0, dev->dev_addr);
2212 cxgb_set_rxmode(dev);
2213 t3_link_start(&p->phy, mac, &p->link_config);
2214 t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
2215 t3_port_intr_enable(adapter, p->port_id);
2216 p->mac.stats.num_resets++;
2217 }
2218 }
2219 rtnl_unlock();
2220}
2221
2222
4d22de3e
DLR
2223static void t3_adap_check_task(struct work_struct *work)
2224{
2225 struct adapter *adapter = container_of(work, struct adapter,
2226 adap_check_task.work);
2227 const struct adapter_params *p = &adapter->params;
2228
2229 adapter->check_task_cnt++;
2230
2231 /* Check link status for PHYs without interrupts */
2232 if (p->linkpoll_period)
2233 check_link_status(adapter);
2234
2235 /* Accumulate MAC stats if needed */
2236 if (!p->linkpoll_period ||
2237 (adapter->check_task_cnt * p->linkpoll_period) / 10 >=
2238 p->stats_update_period) {
2239 mac_stats_update(adapter);
2240 adapter->check_task_cnt = 0;
2241 }
2242
fc90664e
DLR
2243 if (p->rev == T3_REV_B2)
2244 check_t3b2_mac(adapter);
2245
4d22de3e
DLR
2246 /* Schedule the next check update if any port is active. */
2247 spin_lock(&adapter->work_lock);
2248 if (adapter->open_device_map & PORT_MASK)
2249 schedule_chk_task(adapter);
2250 spin_unlock(&adapter->work_lock);
2251}
2252
2253/*
2254 * Processes external (PHY) interrupts in process context.
2255 */
2256static void ext_intr_task(struct work_struct *work)
2257{
2258 struct adapter *adapter = container_of(work, struct adapter,
2259 ext_intr_handler_task);
2260
2261 t3_phy_intr_handler(adapter);
2262
2263 /* Now reenable external interrupts */
2264 spin_lock_irq(&adapter->work_lock);
2265 if (adapter->slow_intr_mask) {
2266 adapter->slow_intr_mask |= F_T3DBG;
2267 t3_write_reg(adapter, A_PL_INT_CAUSE0, F_T3DBG);
2268 t3_write_reg(adapter, A_PL_INT_ENABLE0,
2269 adapter->slow_intr_mask);
2270 }
2271 spin_unlock_irq(&adapter->work_lock);
2272}
2273
2274/*
2275 * Interrupt-context handler for external (PHY) interrupts.
2276 */
2277void t3_os_ext_intr_handler(struct adapter *adapter)
2278{
2279 /*
2280 * Schedule a task to handle external interrupts as they may be slow
2281 * and we use a mutex to protect MDIO registers. We disable PHY
2282 * interrupts in the meantime and let the task reenable them when
2283 * it's done.
2284 */
2285 spin_lock(&adapter->work_lock);
2286 if (adapter->slow_intr_mask) {
2287 adapter->slow_intr_mask &= ~F_T3DBG;
2288 t3_write_reg(adapter, A_PL_INT_ENABLE0,
2289 adapter->slow_intr_mask);
2290 queue_work(cxgb3_wq, &adapter->ext_intr_handler_task);
2291 }
2292 spin_unlock(&adapter->work_lock);
2293}
2294
2295void t3_fatal_err(struct adapter *adapter)
2296{
2297 unsigned int fw_status[4];
2298
2299 if (adapter->flags & FULL_INIT_DONE) {
2300 t3_sge_stop(adapter);
c64c2eae
DLR
2301 t3_write_reg(adapter, A_XGM_TX_CTRL, 0);
2302 t3_write_reg(adapter, A_XGM_RX_CTRL, 0);
2303 t3_write_reg(adapter, XGM_REG(A_XGM_TX_CTRL, 1), 0);
2304 t3_write_reg(adapter, XGM_REG(A_XGM_RX_CTRL, 1), 0);
4d22de3e
DLR
2305 t3_intr_disable(adapter);
2306 }
2307 CH_ALERT(adapter, "encountered fatal error, operation suspended\n");
2308 if (!t3_cim_ctl_blk_read(adapter, 0xa0, 4, fw_status))
2309 CH_ALERT(adapter, "FW status: 0x%x, 0x%x, 0x%x, 0x%x\n",
2310 fw_status[0], fw_status[1],
2311 fw_status[2], fw_status[3]);
2312
2313}
2314
2315static int __devinit cxgb_enable_msix(struct adapter *adap)
2316{
2317 struct msix_entry entries[SGE_QSETS + 1];
2318 int i, err;
2319
2320 for (i = 0; i < ARRAY_SIZE(entries); ++i)
2321 entries[i].entry = i;
2322
2323 err = pci_enable_msix(adap->pdev, entries, ARRAY_SIZE(entries));
2324 if (!err) {
2325 for (i = 0; i < ARRAY_SIZE(entries); ++i)
2326 adap->msix_info[i].vec = entries[i].vector;
2327 } else if (err > 0)
2328 dev_info(&adap->pdev->dev,
2329 "only %d MSI-X vectors left, not using MSI-X\n", err);
2330 return err;
2331}
2332
2333static void __devinit print_port_info(struct adapter *adap,
2334 const struct adapter_info *ai)
2335{
2336 static const char *pci_variant[] = {
2337 "PCI", "PCI-X", "PCI-X ECC", "PCI-X 266", "PCI Express"
2338 };
2339
2340 int i;
2341 char buf[80];
2342
2343 if (is_pcie(adap))
2344 snprintf(buf, sizeof(buf), "%s x%d",
2345 pci_variant[adap->params.pci.variant],
2346 adap->params.pci.width);
2347 else
2348 snprintf(buf, sizeof(buf), "%s %dMHz/%d-bit",
2349 pci_variant[adap->params.pci.variant],
2350 adap->params.pci.speed, adap->params.pci.width);
2351
2352 for_each_port(adap, i) {
2353 struct net_device *dev = adap->port[i];
2354 const struct port_info *pi = netdev_priv(dev);
2355
2356 if (!test_bit(i, &adap->registered_device_map))
2357 continue;
8ac3ba68 2358 printk(KERN_INFO "%s: %s %s %sNIC (rev %d) %s%s\n",
4d22de3e 2359 dev->name, ai->desc, pi->port_type->desc,
8ac3ba68 2360 is_offload(adap) ? "R" : "", adap->params.rev, buf,
4d22de3e
DLR
2361 (adap->flags & USING_MSIX) ? " MSI-X" :
2362 (adap->flags & USING_MSI) ? " MSI" : "");
2363 if (adap->name == dev->name && adap->params.vpd.mclk)
167cdf5f
DLR
2364 printk(KERN_INFO
2365 "%s: %uMB CM, %uMB PMTX, %uMB PMRX, S/N: %s\n",
4d22de3e
DLR
2366 adap->name, t3_mc7_size(&adap->cm) >> 20,
2367 t3_mc7_size(&adap->pmtx) >> 20,
167cdf5f
DLR
2368 t3_mc7_size(&adap->pmrx) >> 20,
2369 adap->params.vpd.sn);
4d22de3e
DLR
2370 }
2371}
2372
2373static int __devinit init_one(struct pci_dev *pdev,
2374 const struct pci_device_id *ent)
2375{
2376 static int version_printed;
2377
2378 int i, err, pci_using_dac = 0;
2379 unsigned long mmio_start, mmio_len;
2380 const struct adapter_info *ai;
2381 struct adapter *adapter = NULL;
2382 struct port_info *pi;
2383
2384 if (!version_printed) {
2385 printk(KERN_INFO "%s - version %s\n", DRV_DESC, DRV_VERSION);
2386 ++version_printed;
2387 }
2388
2389 if (!cxgb3_wq) {
2390 cxgb3_wq = create_singlethread_workqueue(DRV_NAME);
2391 if (!cxgb3_wq) {
2392 printk(KERN_ERR DRV_NAME
2393 ": cannot initialize work queue\n");
2394 return -ENOMEM;
2395 }
2396 }
2397
2398 err = pci_request_regions(pdev, DRV_NAME);
2399 if (err) {
2400 /* Just info, some other driver may have claimed the device. */
2401 dev_info(&pdev->dev, "cannot obtain PCI resources\n");
2402 return err;
2403 }
2404
2405 err = pci_enable_device(pdev);
2406 if (err) {
2407 dev_err(&pdev->dev, "cannot enable PCI device\n");
2408 goto out_release_regions;
2409 }
2410
2411 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
2412 pci_using_dac = 1;
2413 err = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
2414 if (err) {
2415 dev_err(&pdev->dev, "unable to obtain 64-bit DMA for "
2416 "coherent allocations\n");
2417 goto out_disable_device;
2418 }
2419 } else if ((err = pci_set_dma_mask(pdev, DMA_32BIT_MASK)) != 0) {
2420 dev_err(&pdev->dev, "no usable DMA configuration\n");
2421 goto out_disable_device;
2422 }
2423
2424 pci_set_master(pdev);
2425
2426 mmio_start = pci_resource_start(pdev, 0);
2427 mmio_len = pci_resource_len(pdev, 0);
2428 ai = t3_get_adapter_info(ent->driver_data);
2429
2430 adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
2431 if (!adapter) {
2432 err = -ENOMEM;
2433 goto out_disable_device;
2434 }
2435
2436 adapter->regs = ioremap_nocache(mmio_start, mmio_len);
2437 if (!adapter->regs) {
2438 dev_err(&pdev->dev, "cannot map device registers\n");
2439 err = -ENOMEM;
2440 goto out_free_adapter;
2441 }
2442
2443 adapter->pdev = pdev;
2444 adapter->name = pci_name(pdev);
2445 adapter->msg_enable = dflt_msg_enable;
2446 adapter->mmio_len = mmio_len;
2447
2448 mutex_init(&adapter->mdio_lock);
2449 spin_lock_init(&adapter->work_lock);
2450 spin_lock_init(&adapter->stats_lock);
2451
2452 INIT_LIST_HEAD(&adapter->adapter_list);
2453 INIT_WORK(&adapter->ext_intr_handler_task, ext_intr_task);
2454 INIT_DELAYED_WORK(&adapter->adap_check_task, t3_adap_check_task);
2455
2456 for (i = 0; i < ai->nports; ++i) {
2457 struct net_device *netdev;
2458
2459 netdev = alloc_etherdev(sizeof(struct port_info));
2460 if (!netdev) {
2461 err = -ENOMEM;
2462 goto out_free_dev;
2463 }
2464
2465 SET_MODULE_OWNER(netdev);
2466 SET_NETDEV_DEV(netdev, &pdev->dev);
2467
2468 adapter->port[i] = netdev;
2469 pi = netdev_priv(netdev);
5fbf816f 2470 pi->adapter = adapter;
4d22de3e
DLR
2471 pi->rx_csum_offload = 1;
2472 pi->nqsets = 1;
2473 pi->first_qset = i;
2474 pi->activity = 0;
2475 pi->port_id = i;
2476 netif_carrier_off(netdev);
2477 netdev->irq = pdev->irq;
2478 netdev->mem_start = mmio_start;
2479 netdev->mem_end = mmio_start + mmio_len - 1;
4d22de3e
DLR
2480 netdev->features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO;
2481 netdev->features |= NETIF_F_LLTX;
2482 if (pci_using_dac)
2483 netdev->features |= NETIF_F_HIGHDMA;
2484
2485 netdev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
2486 netdev->vlan_rx_register = vlan_rx_register;
4d22de3e
DLR
2487
2488 netdev->open = cxgb_open;
2489 netdev->stop = cxgb_close;
2490 netdev->hard_start_xmit = t3_eth_xmit;
2491 netdev->get_stats = cxgb_get_stats;
2492 netdev->set_multicast_list = cxgb_set_rxmode;
2493 netdev->do_ioctl = cxgb_ioctl;
2494 netdev->change_mtu = cxgb_change_mtu;
2495 netdev->set_mac_address = cxgb_set_mac_addr;
2496#ifdef CONFIG_NET_POLL_CONTROLLER
2497 netdev->poll_controller = cxgb_netpoll;
2498#endif
4d22de3e
DLR
2499
2500 SET_ETHTOOL_OPS(netdev, &cxgb_ethtool_ops);
2501 }
2502
5fbf816f 2503 pci_set_drvdata(pdev, adapter);
4d22de3e
DLR
2504 if (t3_prep_adapter(adapter, ai, 1) < 0) {
2505 err = -ENODEV;
2506 goto out_free_dev;
2507 }
480fe1a3 2508
4d22de3e
DLR
2509 /*
2510 * The card is now ready to go. If any errors occur during device
2511 * registration we do not fail the whole card but rather proceed only
2512 * with the ports we manage to register successfully. However we must
2513 * register at least one net device.
2514 */
2515 for_each_port(adapter, i) {
2516 err = register_netdev(adapter->port[i]);
2517 if (err)
2518 dev_warn(&pdev->dev,
2519 "cannot register net device %s, skipping\n",
2520 adapter->port[i]->name);
2521 else {
2522 /*
2523 * Change the name we use for messages to the name of
2524 * the first successfully registered interface.
2525 */
2526 if (!adapter->registered_device_map)
2527 adapter->name = adapter->port[i]->name;
2528
2529 __set_bit(i, &adapter->registered_device_map);
2530 }
2531 }
2532 if (!adapter->registered_device_map) {
2533 dev_err(&pdev->dev, "could not register any net devices\n");
2534 goto out_free_dev;
2535 }
2536
2537 /* Driver's ready. Reflect it on LEDs */
2538 t3_led_ready(adapter);
2539
2540 if (is_offload(adapter)) {
2541 __set_bit(OFFLOAD_DEVMAP_BIT, &adapter->registered_device_map);
2542 cxgb3_adapter_ofld(adapter);
2543 }
2544
2545 /* See what interrupts we'll be using */
2546 if (msi > 1 && cxgb_enable_msix(adapter) == 0)
2547 adapter->flags |= USING_MSIX;
2548 else if (msi > 0 && pci_enable_msi(pdev) == 0)
2549 adapter->flags |= USING_MSI;
2550
0ee8d33c 2551 err = sysfs_create_group(&adapter->port[0]->dev.kobj,
4d22de3e
DLR
2552 &cxgb3_attr_group);
2553
2554 print_port_info(adapter, ai);
2555 return 0;
2556
2557out_free_dev:
2558 iounmap(adapter->regs);
2559 for (i = ai->nports - 1; i >= 0; --i)
2560 if (adapter->port[i])
2561 free_netdev(adapter->port[i]);
2562
2563out_free_adapter:
2564 kfree(adapter);
2565
2566out_disable_device:
2567 pci_disable_device(pdev);
2568out_release_regions:
2569 pci_release_regions(pdev);
2570 pci_set_drvdata(pdev, NULL);
2571 return err;
2572}
2573
2574static void __devexit remove_one(struct pci_dev *pdev)
2575{
5fbf816f 2576 struct adapter *adapter = pci_get_drvdata(pdev);
4d22de3e 2577
5fbf816f 2578 if (adapter) {
4d22de3e 2579 int i;
4d22de3e
DLR
2580
2581 t3_sge_stop(adapter);
0ee8d33c 2582 sysfs_remove_group(&adapter->port[0]->dev.kobj,
4d22de3e
DLR
2583 &cxgb3_attr_group);
2584
2585 for_each_port(adapter, i)
2586 if (test_bit(i, &adapter->registered_device_map))
2587 unregister_netdev(adapter->port[i]);
2588
2589 if (is_offload(adapter)) {
2590 cxgb3_adapter_unofld(adapter);
2591 if (test_bit(OFFLOAD_DEVMAP_BIT,
2592 &adapter->open_device_map))
2593 offload_close(&adapter->tdev);
2594 }
2595
2596 t3_free_sge_resources(adapter);
2597 cxgb_disable_msi(adapter);
2598
4d22de3e
DLR
2599 for_each_port(adapter, i)
2600 if (adapter->port[i])
2601 free_netdev(adapter->port[i]);
2602
2603 iounmap(adapter->regs);
2604 kfree(adapter);
2605 pci_release_regions(pdev);
2606 pci_disable_device(pdev);
2607 pci_set_drvdata(pdev, NULL);
2608 }
2609}
2610
2611static struct pci_driver driver = {
2612 .name = DRV_NAME,
2613 .id_table = cxgb3_pci_tbl,
2614 .probe = init_one,
2615 .remove = __devexit_p(remove_one),
2616};
2617
2618static int __init cxgb3_init_module(void)
2619{
2620 int ret;
2621
2622 cxgb3_offload_init();
2623
2624 ret = pci_register_driver(&driver);
2625 return ret;
2626}
2627
2628static void __exit cxgb3_cleanup_module(void)
2629{
2630 pci_unregister_driver(&driver);
2631 if (cxgb3_wq)
2632 destroy_workqueue(cxgb3_wq);
2633}
2634
2635module_init(cxgb3_init_module);
2636module_exit(cxgb3_cleanup_module);