cxgb3 - Fix I/O synchronization
[linux-2.6-block.git] / drivers / net / cxgb3 / cxgb3_main.c
CommitLineData
4d22de3e 1/*
1d68e93d 2 * Copyright (c) 2003-2007 Chelsio, Inc. All rights reserved.
4d22de3e 3 *
1d68e93d
DLR
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
4d22de3e 9 *
1d68e93d
DLR
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
4d22de3e 31 */
4d22de3e
DLR
32#include <linux/module.h>
33#include <linux/moduleparam.h>
34#include <linux/init.h>
35#include <linux/pci.h>
36#include <linux/dma-mapping.h>
37#include <linux/netdevice.h>
38#include <linux/etherdevice.h>
39#include <linux/if_vlan.h>
40#include <linux/mii.h>
41#include <linux/sockios.h>
42#include <linux/workqueue.h>
43#include <linux/proc_fs.h>
44#include <linux/rtnetlink.h>
2e283962 45#include <linux/firmware.h>
d9da466a 46#include <linux/log2.h>
4d22de3e
DLR
47#include <asm/uaccess.h>
48
49#include "common.h"
50#include "cxgb3_ioctl.h"
51#include "regs.h"
52#include "cxgb3_offload.h"
53#include "version.h"
54
55#include "cxgb3_ctl_defs.h"
56#include "t3_cpl.h"
57#include "firmware_exports.h"
58
59enum {
60 MAX_TXQ_ENTRIES = 16384,
61 MAX_CTRL_TXQ_ENTRIES = 1024,
62 MAX_RSPQ_ENTRIES = 16384,
63 MAX_RX_BUFFERS = 16384,
64 MAX_RX_JUMBO_BUFFERS = 16384,
65 MIN_TXQ_ENTRIES = 4,
66 MIN_CTRL_TXQ_ENTRIES = 4,
67 MIN_RSPQ_ENTRIES = 32,
68 MIN_FL_ENTRIES = 32
69};
70
71#define PORT_MASK ((1 << MAX_NPORTS) - 1)
72
73#define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
74 NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
75 NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
76
77#define EEPROM_MAGIC 0x38E2F10C
78
678771d6
DLR
79#define CH_DEVICE(devid, idx) \
80 { PCI_VENDOR_ID_CHELSIO, devid, PCI_ANY_ID, PCI_ANY_ID, 0, 0, idx }
4d22de3e
DLR
81
82static const struct pci_device_id cxgb3_pci_tbl[] = {
678771d6
DLR
83 CH_DEVICE(0x20, 0), /* PE9000 */
84 CH_DEVICE(0x21, 1), /* T302E */
85 CH_DEVICE(0x22, 2), /* T310E */
86 CH_DEVICE(0x23, 3), /* T320X */
87 CH_DEVICE(0x24, 1), /* T302X */
88 CH_DEVICE(0x25, 3), /* T320E */
89 CH_DEVICE(0x26, 2), /* T310X */
90 CH_DEVICE(0x30, 2), /* T3B10 */
91 CH_DEVICE(0x31, 3), /* T3B20 */
92 CH_DEVICE(0x32, 1), /* T3B02 */
4d22de3e
DLR
93 {0,}
94};
95
96MODULE_DESCRIPTION(DRV_DESC);
97MODULE_AUTHOR("Chelsio Communications");
1d68e93d 98MODULE_LICENSE("Dual BSD/GPL");
4d22de3e
DLR
99MODULE_VERSION(DRV_VERSION);
100MODULE_DEVICE_TABLE(pci, cxgb3_pci_tbl);
101
102static int dflt_msg_enable = DFLT_MSG_ENABLE;
103
104module_param(dflt_msg_enable, int, 0644);
105MODULE_PARM_DESC(dflt_msg_enable, "Chelsio T3 default message enable bitmap");
106
107/*
108 * The driver uses the best interrupt scheme available on a platform in the
109 * order MSI-X, MSI, legacy pin interrupts. This parameter determines which
110 * of these schemes the driver may consider as follows:
111 *
112 * msi = 2: choose from among all three options
113 * msi = 1: only consider MSI and pin interrupts
114 * msi = 0: force pin interrupts
115 */
116static int msi = 2;
117
118module_param(msi, int, 0644);
119MODULE_PARM_DESC(msi, "whether to use MSI or MSI-X");
120
121/*
122 * The driver enables offload as a default.
123 * To disable it, use ofld_disable = 1.
124 */
125
126static int ofld_disable = 0;
127
128module_param(ofld_disable, int, 0644);
129MODULE_PARM_DESC(ofld_disable, "whether to enable offload at init time or not");
130
131/*
132 * We have work elements that we need to cancel when an interface is taken
133 * down. Normally the work elements would be executed by keventd but that
134 * can deadlock because of linkwatch. If our close method takes the rtnl
135 * lock and linkwatch is ahead of our work elements in keventd, linkwatch
136 * will block keventd as it needs the rtnl lock, and we'll deadlock waiting
137 * for our work to complete. Get our own work queue to solve this.
138 */
139static struct workqueue_struct *cxgb3_wq;
140
141/**
142 * link_report - show link status and link speed/duplex
143 * @p: the port whose settings are to be reported
144 *
145 * Shows the link status, speed, and duplex of a port.
146 */
147static void link_report(struct net_device *dev)
148{
149 if (!netif_carrier_ok(dev))
150 printk(KERN_INFO "%s: link down\n", dev->name);
151 else {
152 const char *s = "10Mbps";
153 const struct port_info *p = netdev_priv(dev);
154
155 switch (p->link_config.speed) {
156 case SPEED_10000:
157 s = "10Gbps";
158 break;
159 case SPEED_1000:
160 s = "1000Mbps";
161 break;
162 case SPEED_100:
163 s = "100Mbps";
164 break;
165 }
166
167 printk(KERN_INFO "%s: link up, %s, %s-duplex\n", dev->name, s,
168 p->link_config.duplex == DUPLEX_FULL ? "full" : "half");
169 }
170}
171
172/**
173 * t3_os_link_changed - handle link status changes
174 * @adapter: the adapter associated with the link change
175 * @port_id: the port index whose limk status has changed
176 * @link_stat: the new status of the link
177 * @speed: the new speed setting
178 * @duplex: the new duplex setting
179 * @pause: the new flow-control setting
180 *
181 * This is the OS-dependent handler for link status changes. The OS
182 * neutral handler takes care of most of the processing for these events,
183 * then calls this handler for any OS-specific processing.
184 */
185void t3_os_link_changed(struct adapter *adapter, int port_id, int link_stat,
186 int speed, int duplex, int pause)
187{
188 struct net_device *dev = adapter->port[port_id];
6d6dabac
DLR
189 struct port_info *pi = netdev_priv(dev);
190 struct cmac *mac = &pi->mac;
4d22de3e
DLR
191
192 /* Skip changes from disabled ports. */
193 if (!netif_running(dev))
194 return;
195
196 if (link_stat != netif_carrier_ok(dev)) {
6d6dabac 197 if (link_stat) {
59cf8107 198 t3_mac_enable(mac, MAC_DIRECTION_RX);
4d22de3e 199 netif_carrier_on(dev);
6d6dabac 200 } else {
4d22de3e 201 netif_carrier_off(dev);
59cf8107
DLR
202 pi->phy.ops->power_down(&pi->phy, 1);
203 t3_mac_disable(mac, MAC_DIRECTION_RX);
204 t3_link_start(&pi->phy, mac, &pi->link_config);
6d6dabac
DLR
205 }
206
4d22de3e
DLR
207 link_report(dev);
208 }
209}
210
211static void cxgb_set_rxmode(struct net_device *dev)
212{
213 struct t3_rx_mode rm;
214 struct port_info *pi = netdev_priv(dev);
215
216 init_rx_mode(&rm, dev, dev->mc_list);
217 t3_mac_set_rx_mode(&pi->mac, &rm);
218}
219
220/**
221 * link_start - enable a port
222 * @dev: the device to enable
223 *
224 * Performs the MAC and PHY actions needed to enable a port.
225 */
226static void link_start(struct net_device *dev)
227{
228 struct t3_rx_mode rm;
229 struct port_info *pi = netdev_priv(dev);
230 struct cmac *mac = &pi->mac;
231
232 init_rx_mode(&rm, dev, dev->mc_list);
233 t3_mac_reset(mac);
234 t3_mac_set_mtu(mac, dev->mtu);
235 t3_mac_set_address(mac, 0, dev->dev_addr);
236 t3_mac_set_rx_mode(mac, &rm);
237 t3_link_start(&pi->phy, mac, &pi->link_config);
238 t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
239}
240
241static inline void cxgb_disable_msi(struct adapter *adapter)
242{
243 if (adapter->flags & USING_MSIX) {
244 pci_disable_msix(adapter->pdev);
245 adapter->flags &= ~USING_MSIX;
246 } else if (adapter->flags & USING_MSI) {
247 pci_disable_msi(adapter->pdev);
248 adapter->flags &= ~USING_MSI;
249 }
250}
251
252/*
253 * Interrupt handler for asynchronous events used with MSI-X.
254 */
255static irqreturn_t t3_async_intr_handler(int irq, void *cookie)
256{
257 t3_slow_intr_handler(cookie);
258 return IRQ_HANDLED;
259}
260
261/*
262 * Name the MSI-X interrupts.
263 */
264static void name_msix_vecs(struct adapter *adap)
265{
266 int i, j, msi_idx = 1, n = sizeof(adap->msix_info[0].desc) - 1;
267
268 snprintf(adap->msix_info[0].desc, n, "%s", adap->name);
269 adap->msix_info[0].desc[n] = 0;
270
271 for_each_port(adap, j) {
272 struct net_device *d = adap->port[j];
273 const struct port_info *pi = netdev_priv(d);
274
275 for (i = 0; i < pi->nqsets; i++, msi_idx++) {
276 snprintf(adap->msix_info[msi_idx].desc, n,
277 "%s (queue %d)", d->name, i);
278 adap->msix_info[msi_idx].desc[n] = 0;
279 }
280 }
281}
282
283static int request_msix_data_irqs(struct adapter *adap)
284{
285 int i, j, err, qidx = 0;
286
287 for_each_port(adap, i) {
288 int nqsets = adap2pinfo(adap, i)->nqsets;
289
290 for (j = 0; j < nqsets; ++j) {
291 err = request_irq(adap->msix_info[qidx + 1].vec,
292 t3_intr_handler(adap,
293 adap->sge.qs[qidx].
294 rspq.polling), 0,
295 adap->msix_info[qidx + 1].desc,
296 &adap->sge.qs[qidx]);
297 if (err) {
298 while (--qidx >= 0)
299 free_irq(adap->msix_info[qidx + 1].vec,
300 &adap->sge.qs[qidx]);
301 return err;
302 }
303 qidx++;
304 }
305 }
306 return 0;
307}
308
309/**
310 * setup_rss - configure RSS
311 * @adap: the adapter
312 *
313 * Sets up RSS to distribute packets to multiple receive queues. We
314 * configure the RSS CPU lookup table to distribute to the number of HW
315 * receive queues, and the response queue lookup table to narrow that
316 * down to the response queues actually configured for each port.
317 * We always configure the RSS mapping for two ports since the mapping
318 * table has plenty of entries.
319 */
320static void setup_rss(struct adapter *adap)
321{
322 int i;
323 unsigned int nq0 = adap2pinfo(adap, 0)->nqsets;
324 unsigned int nq1 = adap->port[1] ? adap2pinfo(adap, 1)->nqsets : 1;
325 u8 cpus[SGE_QSETS + 1];
326 u16 rspq_map[RSS_TABLE_SIZE];
327
328 for (i = 0; i < SGE_QSETS; ++i)
329 cpus[i] = i;
330 cpus[SGE_QSETS] = 0xff; /* terminator */
331
332 for (i = 0; i < RSS_TABLE_SIZE / 2; ++i) {
333 rspq_map[i] = i % nq0;
334 rspq_map[i + RSS_TABLE_SIZE / 2] = (i % nq1) + nq0;
335 }
336
337 t3_config_rss(adap, F_RQFEEDBACKENABLE | F_TNLLKPEN | F_TNLMAPEN |
338 F_TNLPRTEN | F_TNL2TUPEN | F_TNL4TUPEN |
a2604be5 339 V_RRCPLCPUSIZE(6) | F_HASHTOEPLITZ, cpus, rspq_map);
4d22de3e
DLR
340}
341
bea3348e 342static void init_napi(struct adapter *adap)
4d22de3e 343{
bea3348e 344 int i;
4d22de3e 345
bea3348e
SH
346 for (i = 0; i < SGE_QSETS; i++) {
347 struct sge_qset *qs = &adap->sge.qs[i];
4d22de3e 348
bea3348e
SH
349 if (qs->adap)
350 netif_napi_add(qs->netdev, &qs->napi, qs->napi.poll,
351 64);
4d22de3e 352 }
4d22de3e
DLR
353}
354
355/*
356 * Wait until all NAPI handlers are descheduled. This includes the handlers of
357 * both netdevices representing interfaces and the dummy ones for the extra
358 * queues.
359 */
360static void quiesce_rx(struct adapter *adap)
361{
362 int i;
4d22de3e 363
bea3348e
SH
364 for (i = 0; i < SGE_QSETS; i++)
365 if (adap->sge.qs[i].adap)
366 napi_disable(&adap->sge.qs[i].napi);
367}
4d22de3e 368
bea3348e
SH
369static void enable_all_napi(struct adapter *adap)
370{
371 int i;
372 for (i = 0; i < SGE_QSETS; i++)
373 if (adap->sge.qs[i].adap)
374 napi_enable(&adap->sge.qs[i].napi);
4d22de3e
DLR
375}
376
377/**
378 * setup_sge_qsets - configure SGE Tx/Rx/response queues
379 * @adap: the adapter
380 *
381 * Determines how many sets of SGE queues to use and initializes them.
382 * We support multiple queue sets per port if we have MSI-X, otherwise
383 * just one queue set per port.
384 */
385static int setup_sge_qsets(struct adapter *adap)
386{
bea3348e 387 int i, j, err, irq_idx = 0, qset_idx = 0;
8ac3ba68 388 unsigned int ntxq = SGE_TXQ_PER_SET;
4d22de3e
DLR
389
390 if (adap->params.rev > 0 && !(adap->flags & USING_MSI))
391 irq_idx = -1;
392
393 for_each_port(adap, i) {
394 struct net_device *dev = adap->port[i];
bea3348e 395 struct port_info *pi = netdev_priv(dev);
4d22de3e 396
bea3348e 397 pi->qs = &adap->sge.qs[pi->first_qset];
4d22de3e
DLR
398 for (j = 0; j < pi->nqsets; ++j, ++qset_idx) {
399 err = t3_sge_alloc_qset(adap, qset_idx, 1,
400 (adap->flags & USING_MSIX) ? qset_idx + 1 :
401 irq_idx,
bea3348e 402 &adap->params.sge.qset[qset_idx], ntxq, dev);
4d22de3e
DLR
403 if (err) {
404 t3_free_sge_resources(adap);
405 return err;
406 }
407 }
408 }
409
410 return 0;
411}
412
3e5192ee 413static ssize_t attr_show(struct device *d, char *buf,
896392ef 414 ssize_t(*format) (struct net_device *, char *))
4d22de3e
DLR
415{
416 ssize_t len;
4d22de3e
DLR
417
418 /* Synchronize with ioctls that may shut down the device */
419 rtnl_lock();
896392ef 420 len = (*format) (to_net_dev(d), buf);
4d22de3e
DLR
421 rtnl_unlock();
422 return len;
423}
424
3e5192ee 425static ssize_t attr_store(struct device *d,
0ee8d33c 426 const char *buf, size_t len,
896392ef 427 ssize_t(*set) (struct net_device *, unsigned int),
4d22de3e
DLR
428 unsigned int min_val, unsigned int max_val)
429{
430 char *endp;
431 ssize_t ret;
432 unsigned int val;
4d22de3e
DLR
433
434 if (!capable(CAP_NET_ADMIN))
435 return -EPERM;
436
437 val = simple_strtoul(buf, &endp, 0);
438 if (endp == buf || val < min_val || val > max_val)
439 return -EINVAL;
440
441 rtnl_lock();
896392ef 442 ret = (*set) (to_net_dev(d), val);
4d22de3e
DLR
443 if (!ret)
444 ret = len;
445 rtnl_unlock();
446 return ret;
447}
448
449#define CXGB3_SHOW(name, val_expr) \
896392ef 450static ssize_t format_##name(struct net_device *dev, char *buf) \
4d22de3e 451{ \
5fbf816f
DLR
452 struct port_info *pi = netdev_priv(dev); \
453 struct adapter *adap = pi->adapter; \
4d22de3e
DLR
454 return sprintf(buf, "%u\n", val_expr); \
455} \
0ee8d33c
DLR
456static ssize_t show_##name(struct device *d, struct device_attribute *attr, \
457 char *buf) \
4d22de3e 458{ \
3e5192ee 459 return attr_show(d, buf, format_##name); \
4d22de3e
DLR
460}
461
896392ef 462static ssize_t set_nfilters(struct net_device *dev, unsigned int val)
4d22de3e 463{
5fbf816f
DLR
464 struct port_info *pi = netdev_priv(dev);
465 struct adapter *adap = pi->adapter;
9f238486 466 int min_tids = is_offload(adap) ? MC5_MIN_TIDS : 0;
896392ef 467
4d22de3e
DLR
468 if (adap->flags & FULL_INIT_DONE)
469 return -EBUSY;
470 if (val && adap->params.rev == 0)
471 return -EINVAL;
9f238486
DLR
472 if (val > t3_mc5_size(&adap->mc5) - adap->params.mc5.nservers -
473 min_tids)
4d22de3e
DLR
474 return -EINVAL;
475 adap->params.mc5.nfilters = val;
476 return 0;
477}
478
0ee8d33c
DLR
479static ssize_t store_nfilters(struct device *d, struct device_attribute *attr,
480 const char *buf, size_t len)
4d22de3e 481{
3e5192ee 482 return attr_store(d, buf, len, set_nfilters, 0, ~0);
4d22de3e
DLR
483}
484
896392ef 485static ssize_t set_nservers(struct net_device *dev, unsigned int val)
4d22de3e 486{
5fbf816f
DLR
487 struct port_info *pi = netdev_priv(dev);
488 struct adapter *adap = pi->adapter;
896392ef 489
4d22de3e
DLR
490 if (adap->flags & FULL_INIT_DONE)
491 return -EBUSY;
9f238486
DLR
492 if (val > t3_mc5_size(&adap->mc5) - adap->params.mc5.nfilters -
493 MC5_MIN_TIDS)
4d22de3e
DLR
494 return -EINVAL;
495 adap->params.mc5.nservers = val;
496 return 0;
497}
498
0ee8d33c
DLR
499static ssize_t store_nservers(struct device *d, struct device_attribute *attr,
500 const char *buf, size_t len)
4d22de3e 501{
3e5192ee 502 return attr_store(d, buf, len, set_nservers, 0, ~0);
4d22de3e
DLR
503}
504
505#define CXGB3_ATTR_R(name, val_expr) \
506CXGB3_SHOW(name, val_expr) \
0ee8d33c 507static DEVICE_ATTR(name, S_IRUGO, show_##name, NULL)
4d22de3e
DLR
508
509#define CXGB3_ATTR_RW(name, val_expr, store_method) \
510CXGB3_SHOW(name, val_expr) \
0ee8d33c 511static DEVICE_ATTR(name, S_IRUGO | S_IWUSR, show_##name, store_method)
4d22de3e
DLR
512
513CXGB3_ATTR_R(cam_size, t3_mc5_size(&adap->mc5));
514CXGB3_ATTR_RW(nfilters, adap->params.mc5.nfilters, store_nfilters);
515CXGB3_ATTR_RW(nservers, adap->params.mc5.nservers, store_nservers);
516
517static struct attribute *cxgb3_attrs[] = {
0ee8d33c
DLR
518 &dev_attr_cam_size.attr,
519 &dev_attr_nfilters.attr,
520 &dev_attr_nservers.attr,
4d22de3e
DLR
521 NULL
522};
523
524static struct attribute_group cxgb3_attr_group = {.attrs = cxgb3_attrs };
525
3e5192ee 526static ssize_t tm_attr_show(struct device *d,
0ee8d33c 527 char *buf, int sched)
4d22de3e 528{
5fbf816f
DLR
529 struct port_info *pi = netdev_priv(to_net_dev(d));
530 struct adapter *adap = pi->adapter;
4d22de3e 531 unsigned int v, addr, bpt, cpt;
5fbf816f 532 ssize_t len;
4d22de3e
DLR
533
534 addr = A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2;
535 rtnl_lock();
536 t3_write_reg(adap, A_TP_TM_PIO_ADDR, addr);
537 v = t3_read_reg(adap, A_TP_TM_PIO_DATA);
538 if (sched & 1)
539 v >>= 16;
540 bpt = (v >> 8) & 0xff;
541 cpt = v & 0xff;
542 if (!cpt)
543 len = sprintf(buf, "disabled\n");
544 else {
545 v = (adap->params.vpd.cclk * 1000) / cpt;
546 len = sprintf(buf, "%u Kbps\n", (v * bpt) / 125);
547 }
548 rtnl_unlock();
549 return len;
550}
551
3e5192ee 552static ssize_t tm_attr_store(struct device *d,
0ee8d33c 553 const char *buf, size_t len, int sched)
4d22de3e 554{
5fbf816f
DLR
555 struct port_info *pi = netdev_priv(to_net_dev(d));
556 struct adapter *adap = pi->adapter;
557 unsigned int val;
4d22de3e
DLR
558 char *endp;
559 ssize_t ret;
4d22de3e
DLR
560
561 if (!capable(CAP_NET_ADMIN))
562 return -EPERM;
563
564 val = simple_strtoul(buf, &endp, 0);
565 if (endp == buf || val > 10000000)
566 return -EINVAL;
567
568 rtnl_lock();
569 ret = t3_config_sched(adap, val, sched);
570 if (!ret)
571 ret = len;
572 rtnl_unlock();
573 return ret;
574}
575
576#define TM_ATTR(name, sched) \
0ee8d33c
DLR
577static ssize_t show_##name(struct device *d, struct device_attribute *attr, \
578 char *buf) \
4d22de3e 579{ \
3e5192ee 580 return tm_attr_show(d, buf, sched); \
4d22de3e 581} \
0ee8d33c
DLR
582static ssize_t store_##name(struct device *d, struct device_attribute *attr, \
583 const char *buf, size_t len) \
4d22de3e 584{ \
3e5192ee 585 return tm_attr_store(d, buf, len, sched); \
4d22de3e 586} \
0ee8d33c 587static DEVICE_ATTR(name, S_IRUGO | S_IWUSR, show_##name, store_##name)
4d22de3e
DLR
588
589TM_ATTR(sched0, 0);
590TM_ATTR(sched1, 1);
591TM_ATTR(sched2, 2);
592TM_ATTR(sched3, 3);
593TM_ATTR(sched4, 4);
594TM_ATTR(sched5, 5);
595TM_ATTR(sched6, 6);
596TM_ATTR(sched7, 7);
597
598static struct attribute *offload_attrs[] = {
0ee8d33c
DLR
599 &dev_attr_sched0.attr,
600 &dev_attr_sched1.attr,
601 &dev_attr_sched2.attr,
602 &dev_attr_sched3.attr,
603 &dev_attr_sched4.attr,
604 &dev_attr_sched5.attr,
605 &dev_attr_sched6.attr,
606 &dev_attr_sched7.attr,
4d22de3e
DLR
607 NULL
608};
609
610static struct attribute_group offload_attr_group = {.attrs = offload_attrs };
611
612/*
613 * Sends an sk_buff to an offload queue driver
614 * after dealing with any active network taps.
615 */
616static inline int offload_tx(struct t3cdev *tdev, struct sk_buff *skb)
617{
618 int ret;
619
620 local_bh_disable();
621 ret = t3_offload_tx(tdev, skb);
622 local_bh_enable();
623 return ret;
624}
625
626static int write_smt_entry(struct adapter *adapter, int idx)
627{
628 struct cpl_smt_write_req *req;
629 struct sk_buff *skb = alloc_skb(sizeof(*req), GFP_KERNEL);
630
631 if (!skb)
632 return -ENOMEM;
633
634 req = (struct cpl_smt_write_req *)__skb_put(skb, sizeof(*req));
635 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
636 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SMT_WRITE_REQ, idx));
637 req->mtu_idx = NMTUS - 1; /* should be 0 but there's a T3 bug */
638 req->iff = idx;
639 memset(req->src_mac1, 0, sizeof(req->src_mac1));
640 memcpy(req->src_mac0, adapter->port[idx]->dev_addr, ETH_ALEN);
641 skb->priority = 1;
642 offload_tx(&adapter->tdev, skb);
643 return 0;
644}
645
646static int init_smt(struct adapter *adapter)
647{
648 int i;
649
650 for_each_port(adapter, i)
651 write_smt_entry(adapter, i);
652 return 0;
653}
654
655static void init_port_mtus(struct adapter *adapter)
656{
657 unsigned int mtus = adapter->port[0]->mtu;
658
659 if (adapter->port[1])
660 mtus |= adapter->port[1]->mtu << 16;
661 t3_write_reg(adapter, A_TP_MTU_PORT_TABLE, mtus);
662}
663
14ab9892
DLR
664static void send_pktsched_cmd(struct adapter *adap, int sched, int qidx, int lo,
665 int hi, int port)
666{
667 struct sk_buff *skb;
668 struct mngt_pktsched_wr *req;
669
670 skb = alloc_skb(sizeof(*req), GFP_KERNEL | __GFP_NOFAIL);
671 req = (struct mngt_pktsched_wr *)skb_put(skb, sizeof(*req));
672 req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_MNGT));
673 req->mngt_opcode = FW_MNGTOPCODE_PKTSCHED_SET;
674 req->sched = sched;
675 req->idx = qidx;
676 req->min = lo;
677 req->max = hi;
678 req->binding = port;
679 t3_mgmt_tx(adap, skb);
680}
681
682static void bind_qsets(struct adapter *adap)
683{
684 int i, j;
685
686 for_each_port(adap, i) {
687 const struct port_info *pi = adap2pinfo(adap, i);
688
689 for (j = 0; j < pi->nqsets; ++j)
690 send_pktsched_cmd(adap, 1, pi->first_qset + j, -1,
691 -1, i);
692 }
693}
694
7f672cf5 695#define FW_FNAME "t3fw-%d.%d.%d.bin"
47330077 696#define TPSRAM_NAME "t3%c_protocol_sram-%d.%d.%d.bin"
2e283962
DLR
697
698static int upgrade_fw(struct adapter *adap)
699{
700 int ret;
701 char buf[64];
702 const struct firmware *fw;
703 struct device *dev = &adap->pdev->dev;
704
705 snprintf(buf, sizeof(buf), FW_FNAME, FW_VERSION_MAJOR,
7f672cf5 706 FW_VERSION_MINOR, FW_VERSION_MICRO);
2e283962
DLR
707 ret = request_firmware(&fw, buf, dev);
708 if (ret < 0) {
709 dev_err(dev, "could not upgrade firmware: unable to load %s\n",
710 buf);
711 return ret;
712 }
713 ret = t3_load_fw(adap, fw->data, fw->size);
714 release_firmware(fw);
47330077
DLR
715
716 if (ret == 0)
717 dev_info(dev, "successful upgrade to firmware %d.%d.%d\n",
718 FW_VERSION_MAJOR, FW_VERSION_MINOR, FW_VERSION_MICRO);
719 else
720 dev_err(dev, "failed to upgrade to firmware %d.%d.%d\n",
721 FW_VERSION_MAJOR, FW_VERSION_MINOR, FW_VERSION_MICRO);
722
723 return ret;
724}
725
726static inline char t3rev2char(struct adapter *adapter)
727{
728 char rev = 0;
729
730 switch(adapter->params.rev) {
731 case T3_REV_B:
732 case T3_REV_B2:
733 rev = 'b';
734 break;
1aafee26
DLR
735 case T3_REV_C:
736 rev = 'c';
737 break;
47330077
DLR
738 }
739 return rev;
740}
741
9265fabf 742static int update_tpsram(struct adapter *adap)
47330077
DLR
743{
744 const struct firmware *tpsram;
745 char buf[64];
746 struct device *dev = &adap->pdev->dev;
747 int ret;
748 char rev;
749
750 rev = t3rev2char(adap);
751 if (!rev)
752 return 0;
753
754 snprintf(buf, sizeof(buf), TPSRAM_NAME, rev,
755 TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO);
756
757 ret = request_firmware(&tpsram, buf, dev);
758 if (ret < 0) {
759 dev_err(dev, "could not load TP SRAM: unable to load %s\n",
760 buf);
761 return ret;
762 }
763
764 ret = t3_check_tpsram(adap, tpsram->data, tpsram->size);
765 if (ret)
766 goto release_tpsram;
767
768 ret = t3_set_proto_sram(adap, tpsram->data);
769 if (ret == 0)
770 dev_info(dev,
771 "successful update of protocol engine "
772 "to %d.%d.%d\n",
773 TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO);
774 else
775 dev_err(dev, "failed to update of protocol engine %d.%d.%d\n",
776 TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO);
777 if (ret)
778 dev_err(dev, "loading protocol SRAM failed\n");
779
780release_tpsram:
781 release_firmware(tpsram);
782
2e283962
DLR
783 return ret;
784}
785
4d22de3e
DLR
786/**
787 * cxgb_up - enable the adapter
788 * @adapter: adapter being enabled
789 *
790 * Called when the first port is enabled, this function performs the
791 * actions necessary to make an adapter operational, such as completing
792 * the initialization of HW modules, and enabling interrupts.
793 *
794 * Must be called with the rtnl lock held.
795 */
796static int cxgb_up(struct adapter *adap)
797{
c54f5c24 798 int err;
47330077 799 int must_load;
4d22de3e
DLR
800
801 if (!(adap->flags & FULL_INIT_DONE)) {
a5a3b460
DLR
802 err = t3_check_fw_version(adap, &must_load);
803 if (err == -EINVAL) {
2e283962 804 err = upgrade_fw(adap);
a5a3b460
DLR
805 if (err && must_load)
806 goto out;
807 }
4d22de3e 808
47330077
DLR
809 err = t3_check_tpsram_version(adap, &must_load);
810 if (err == -EINVAL) {
811 err = update_tpsram(adap);
812 if (err && must_load)
813 goto out;
814 }
815
4d22de3e
DLR
816 err = t3_init_hw(adap, 0);
817 if (err)
818 goto out;
819
6cdbd77e 820 t3_write_reg(adap, A_ULPRX_TDDP_PSZ, V_HPZ0(PAGE_SHIFT - 12));
bea3348e 821
4d22de3e
DLR
822 err = setup_sge_qsets(adap);
823 if (err)
824 goto out;
825
826 setup_rss(adap);
bea3348e 827 init_napi(adap);
4d22de3e
DLR
828 adap->flags |= FULL_INIT_DONE;
829 }
830
831 t3_intr_clear(adap);
832
833 if (adap->flags & USING_MSIX) {
834 name_msix_vecs(adap);
835 err = request_irq(adap->msix_info[0].vec,
836 t3_async_intr_handler, 0,
837 adap->msix_info[0].desc, adap);
838 if (err)
839 goto irq_err;
840
42256f57
DLR
841 err = request_msix_data_irqs(adap);
842 if (err) {
4d22de3e
DLR
843 free_irq(adap->msix_info[0].vec, adap);
844 goto irq_err;
845 }
846 } else if ((err = request_irq(adap->pdev->irq,
847 t3_intr_handler(adap,
848 adap->sge.qs[0].rspq.
849 polling),
2db6346f
TG
850 (adap->flags & USING_MSI) ?
851 0 : IRQF_SHARED,
4d22de3e
DLR
852 adap->name, adap)))
853 goto irq_err;
854
bea3348e 855 enable_all_napi(adap);
4d22de3e
DLR
856 t3_sge_start(adap);
857 t3_intr_enable(adap);
14ab9892
DLR
858
859 if ((adap->flags & (USING_MSIX | QUEUES_BOUND)) == USING_MSIX)
860 bind_qsets(adap);
861 adap->flags |= QUEUES_BOUND;
862
4d22de3e
DLR
863out:
864 return err;
865irq_err:
866 CH_ERR(adap, "request_irq failed, err %d\n", err);
867 goto out;
868}
869
870/*
871 * Release resources when all the ports and offloading have been stopped.
872 */
873static void cxgb_down(struct adapter *adapter)
874{
875 t3_sge_stop(adapter);
876 spin_lock_irq(&adapter->work_lock); /* sync with PHY intr task */
877 t3_intr_disable(adapter);
878 spin_unlock_irq(&adapter->work_lock);
879
880 if (adapter->flags & USING_MSIX) {
881 int i, n = 0;
882
883 free_irq(adapter->msix_info[0].vec, adapter);
884 for_each_port(adapter, i)
885 n += adap2pinfo(adapter, i)->nqsets;
886
887 for (i = 0; i < n; ++i)
888 free_irq(adapter->msix_info[i + 1].vec,
889 &adapter->sge.qs[i]);
890 } else
891 free_irq(adapter->pdev->irq, adapter);
892
893 flush_workqueue(cxgb3_wq); /* wait for external IRQ handler */
894 quiesce_rx(adapter);
895}
896
897static void schedule_chk_task(struct adapter *adap)
898{
899 unsigned int timeo;
900
901 timeo = adap->params.linkpoll_period ?
902 (HZ * adap->params.linkpoll_period) / 10 :
903 adap->params.stats_update_period * HZ;
904 if (timeo)
905 queue_delayed_work(cxgb3_wq, &adap->adap_check_task, timeo);
906}
907
908static int offload_open(struct net_device *dev)
909{
5fbf816f
DLR
910 struct port_info *pi = netdev_priv(dev);
911 struct adapter *adapter = pi->adapter;
912 struct t3cdev *tdev = dev2t3cdev(dev);
4d22de3e 913 int adap_up = adapter->open_device_map & PORT_MASK;
c54f5c24 914 int err;
4d22de3e
DLR
915
916 if (test_and_set_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map))
917 return 0;
918
919 if (!adap_up && (err = cxgb_up(adapter)) < 0)
920 return err;
921
922 t3_tp_set_offload_mode(adapter, 1);
923 tdev->lldev = adapter->port[0];
924 err = cxgb3_offload_activate(adapter);
925 if (err)
926 goto out;
927
928 init_port_mtus(adapter);
929 t3_load_mtus(adapter, adapter->params.mtus, adapter->params.a_wnd,
930 adapter->params.b_wnd,
931 adapter->params.rev == 0 ?
932 adapter->port[0]->mtu : 0xffff);
933 init_smt(adapter);
934
935 /* Never mind if the next step fails */
0ee8d33c 936 sysfs_create_group(&tdev->lldev->dev.kobj, &offload_attr_group);
4d22de3e
DLR
937
938 /* Call back all registered clients */
939 cxgb3_add_clients(tdev);
940
941out:
942 /* restore them in case the offload module has changed them */
943 if (err) {
944 t3_tp_set_offload_mode(adapter, 0);
945 clear_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map);
946 cxgb3_set_dummy_ops(tdev);
947 }
948 return err;
949}
950
951static int offload_close(struct t3cdev *tdev)
952{
953 struct adapter *adapter = tdev2adap(tdev);
954
955 if (!test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map))
956 return 0;
957
958 /* Call back all registered clients */
959 cxgb3_remove_clients(tdev);
960
0ee8d33c 961 sysfs_remove_group(&tdev->lldev->dev.kobj, &offload_attr_group);
4d22de3e
DLR
962
963 tdev->lldev = NULL;
964 cxgb3_set_dummy_ops(tdev);
965 t3_tp_set_offload_mode(adapter, 0);
966 clear_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map);
967
968 if (!adapter->open_device_map)
969 cxgb_down(adapter);
970
971 cxgb3_offload_deactivate(adapter);
972 return 0;
973}
974
975static int cxgb_open(struct net_device *dev)
976{
4d22de3e 977 struct port_info *pi = netdev_priv(dev);
5fbf816f 978 struct adapter *adapter = pi->adapter;
4d22de3e 979 int other_ports = adapter->open_device_map & PORT_MASK;
5fbf816f 980 int err;
4d22de3e 981
bea3348e
SH
982 if (!adapter->open_device_map && (err = cxgb_up(adapter)) < 0) {
983 quiesce_rx(adapter);
4d22de3e 984 return err;
bea3348e 985 }
4d22de3e
DLR
986
987 set_bit(pi->port_id, &adapter->open_device_map);
8ac3ba68 988 if (is_offload(adapter) && !ofld_disable) {
4d22de3e
DLR
989 err = offload_open(dev);
990 if (err)
991 printk(KERN_WARNING
992 "Could not initialize offload capabilities\n");
993 }
994
995 link_start(dev);
996 t3_port_intr_enable(adapter, pi->port_id);
997 netif_start_queue(dev);
998 if (!other_ports)
999 schedule_chk_task(adapter);
1000
1001 return 0;
1002}
1003
1004static int cxgb_close(struct net_device *dev)
1005{
5fbf816f
DLR
1006 struct port_info *pi = netdev_priv(dev);
1007 struct adapter *adapter = pi->adapter;
4d22de3e 1008
5fbf816f 1009 t3_port_intr_disable(adapter, pi->port_id);
4d22de3e 1010 netif_stop_queue(dev);
5fbf816f 1011 pi->phy.ops->power_down(&pi->phy, 1);
4d22de3e 1012 netif_carrier_off(dev);
5fbf816f 1013 t3_mac_disable(&pi->mac, MAC_DIRECTION_TX | MAC_DIRECTION_RX);
4d22de3e
DLR
1014
1015 spin_lock(&adapter->work_lock); /* sync with update task */
5fbf816f 1016 clear_bit(pi->port_id, &adapter->open_device_map);
4d22de3e
DLR
1017 spin_unlock(&adapter->work_lock);
1018
1019 if (!(adapter->open_device_map & PORT_MASK))
1020 cancel_rearming_delayed_workqueue(cxgb3_wq,
1021 &adapter->adap_check_task);
1022
1023 if (!adapter->open_device_map)
1024 cxgb_down(adapter);
1025
1026 return 0;
1027}
1028
1029static struct net_device_stats *cxgb_get_stats(struct net_device *dev)
1030{
5fbf816f
DLR
1031 struct port_info *pi = netdev_priv(dev);
1032 struct adapter *adapter = pi->adapter;
1033 struct net_device_stats *ns = &pi->netstats;
4d22de3e
DLR
1034 const struct mac_stats *pstats;
1035
1036 spin_lock(&adapter->stats_lock);
5fbf816f 1037 pstats = t3_mac_update_stats(&pi->mac);
4d22de3e
DLR
1038 spin_unlock(&adapter->stats_lock);
1039
1040 ns->tx_bytes = pstats->tx_octets;
1041 ns->tx_packets = pstats->tx_frames;
1042 ns->rx_bytes = pstats->rx_octets;
1043 ns->rx_packets = pstats->rx_frames;
1044 ns->multicast = pstats->rx_mcast_frames;
1045
1046 ns->tx_errors = pstats->tx_underrun;
1047 ns->rx_errors = pstats->rx_symbol_errs + pstats->rx_fcs_errs +
1048 pstats->rx_too_long + pstats->rx_jabber + pstats->rx_short +
1049 pstats->rx_fifo_ovfl;
1050
1051 /* detailed rx_errors */
1052 ns->rx_length_errors = pstats->rx_jabber + pstats->rx_too_long;
1053 ns->rx_over_errors = 0;
1054 ns->rx_crc_errors = pstats->rx_fcs_errs;
1055 ns->rx_frame_errors = pstats->rx_symbol_errs;
1056 ns->rx_fifo_errors = pstats->rx_fifo_ovfl;
1057 ns->rx_missed_errors = pstats->rx_cong_drops;
1058
1059 /* detailed tx_errors */
1060 ns->tx_aborted_errors = 0;
1061 ns->tx_carrier_errors = 0;
1062 ns->tx_fifo_errors = pstats->tx_underrun;
1063 ns->tx_heartbeat_errors = 0;
1064 ns->tx_window_errors = 0;
1065 return ns;
1066}
1067
1068static u32 get_msglevel(struct net_device *dev)
1069{
5fbf816f
DLR
1070 struct port_info *pi = netdev_priv(dev);
1071 struct adapter *adapter = pi->adapter;
4d22de3e
DLR
1072
1073 return adapter->msg_enable;
1074}
1075
1076static void set_msglevel(struct net_device *dev, u32 val)
1077{
5fbf816f
DLR
1078 struct port_info *pi = netdev_priv(dev);
1079 struct adapter *adapter = pi->adapter;
4d22de3e
DLR
1080
1081 adapter->msg_enable = val;
1082}
1083
1084static char stats_strings[][ETH_GSTRING_LEN] = {
1085 "TxOctetsOK ",
1086 "TxFramesOK ",
1087 "TxMulticastFramesOK",
1088 "TxBroadcastFramesOK",
1089 "TxPauseFrames ",
1090 "TxUnderrun ",
1091 "TxExtUnderrun ",
1092
1093 "TxFrames64 ",
1094 "TxFrames65To127 ",
1095 "TxFrames128To255 ",
1096 "TxFrames256To511 ",
1097 "TxFrames512To1023 ",
1098 "TxFrames1024To1518 ",
1099 "TxFrames1519ToMax ",
1100
1101 "RxOctetsOK ",
1102 "RxFramesOK ",
1103 "RxMulticastFramesOK",
1104 "RxBroadcastFramesOK",
1105 "RxPauseFrames ",
1106 "RxFCSErrors ",
1107 "RxSymbolErrors ",
1108 "RxShortErrors ",
1109 "RxJabberErrors ",
1110 "RxLengthErrors ",
1111 "RxFIFOoverflow ",
1112
1113 "RxFrames64 ",
1114 "RxFrames65To127 ",
1115 "RxFrames128To255 ",
1116 "RxFrames256To511 ",
1117 "RxFrames512To1023 ",
1118 "RxFrames1024To1518 ",
1119 "RxFrames1519ToMax ",
1120
1121 "PhyFIFOErrors ",
1122 "TSO ",
1123 "VLANextractions ",
1124 "VLANinsertions ",
1125 "TxCsumOffload ",
1126 "RxCsumGood ",
fc90664e
DLR
1127 "RxDrops ",
1128
1129 "CheckTXEnToggled ",
1130 "CheckResets ",
1131
4d22de3e
DLR
1132};
1133
b9f2c044 1134static int get_sset_count(struct net_device *dev, int sset)
4d22de3e 1135{
b9f2c044
JG
1136 switch (sset) {
1137 case ETH_SS_STATS:
1138 return ARRAY_SIZE(stats_strings);
1139 default:
1140 return -EOPNOTSUPP;
1141 }
4d22de3e
DLR
1142}
1143
1144#define T3_REGMAP_SIZE (3 * 1024)
1145
1146static int get_regs_len(struct net_device *dev)
1147{
1148 return T3_REGMAP_SIZE;
1149}
1150
1151static int get_eeprom_len(struct net_device *dev)
1152{
1153 return EEPROMSIZE;
1154}
1155
1156static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1157{
5fbf816f
DLR
1158 struct port_info *pi = netdev_priv(dev);
1159 struct adapter *adapter = pi->adapter;
4d22de3e 1160 u32 fw_vers = 0;
47330077 1161 u32 tp_vers = 0;
4d22de3e
DLR
1162
1163 t3_get_fw_version(adapter, &fw_vers);
47330077 1164 t3_get_tp_version(adapter, &tp_vers);
4d22de3e
DLR
1165
1166 strcpy(info->driver, DRV_NAME);
1167 strcpy(info->version, DRV_VERSION);
1168 strcpy(info->bus_info, pci_name(adapter->pdev));
1169 if (!fw_vers)
1170 strcpy(info->fw_version, "N/A");
4aac3899 1171 else {
4d22de3e 1172 snprintf(info->fw_version, sizeof(info->fw_version),
47330077 1173 "%s %u.%u.%u TP %u.%u.%u",
4aac3899
DLR
1174 G_FW_VERSION_TYPE(fw_vers) ? "T" : "N",
1175 G_FW_VERSION_MAJOR(fw_vers),
1176 G_FW_VERSION_MINOR(fw_vers),
47330077
DLR
1177 G_FW_VERSION_MICRO(fw_vers),
1178 G_TP_VERSION_MAJOR(tp_vers),
1179 G_TP_VERSION_MINOR(tp_vers),
1180 G_TP_VERSION_MICRO(tp_vers));
4aac3899 1181 }
4d22de3e
DLR
1182}
1183
1184static void get_strings(struct net_device *dev, u32 stringset, u8 * data)
1185{
1186 if (stringset == ETH_SS_STATS)
1187 memcpy(data, stats_strings, sizeof(stats_strings));
1188}
1189
1190static unsigned long collect_sge_port_stats(struct adapter *adapter,
1191 struct port_info *p, int idx)
1192{
1193 int i;
1194 unsigned long tot = 0;
1195
1196 for (i = 0; i < p->nqsets; ++i)
1197 tot += adapter->sge.qs[i + p->first_qset].port_stats[idx];
1198 return tot;
1199}
1200
1201static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
1202 u64 *data)
1203{
4d22de3e 1204 struct port_info *pi = netdev_priv(dev);
5fbf816f 1205 struct adapter *adapter = pi->adapter;
4d22de3e
DLR
1206 const struct mac_stats *s;
1207
1208 spin_lock(&adapter->stats_lock);
1209 s = t3_mac_update_stats(&pi->mac);
1210 spin_unlock(&adapter->stats_lock);
1211
1212 *data++ = s->tx_octets;
1213 *data++ = s->tx_frames;
1214 *data++ = s->tx_mcast_frames;
1215 *data++ = s->tx_bcast_frames;
1216 *data++ = s->tx_pause;
1217 *data++ = s->tx_underrun;
1218 *data++ = s->tx_fifo_urun;
1219
1220 *data++ = s->tx_frames_64;
1221 *data++ = s->tx_frames_65_127;
1222 *data++ = s->tx_frames_128_255;
1223 *data++ = s->tx_frames_256_511;
1224 *data++ = s->tx_frames_512_1023;
1225 *data++ = s->tx_frames_1024_1518;
1226 *data++ = s->tx_frames_1519_max;
1227
1228 *data++ = s->rx_octets;
1229 *data++ = s->rx_frames;
1230 *data++ = s->rx_mcast_frames;
1231 *data++ = s->rx_bcast_frames;
1232 *data++ = s->rx_pause;
1233 *data++ = s->rx_fcs_errs;
1234 *data++ = s->rx_symbol_errs;
1235 *data++ = s->rx_short;
1236 *data++ = s->rx_jabber;
1237 *data++ = s->rx_too_long;
1238 *data++ = s->rx_fifo_ovfl;
1239
1240 *data++ = s->rx_frames_64;
1241 *data++ = s->rx_frames_65_127;
1242 *data++ = s->rx_frames_128_255;
1243 *data++ = s->rx_frames_256_511;
1244 *data++ = s->rx_frames_512_1023;
1245 *data++ = s->rx_frames_1024_1518;
1246 *data++ = s->rx_frames_1519_max;
1247
1248 *data++ = pi->phy.fifo_errors;
1249
1250 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_TSO);
1251 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_VLANEX);
1252 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_VLANINS);
1253 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_TX_CSUM);
1254 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_RX_CSUM_GOOD);
1255 *data++ = s->rx_cong_drops;
fc90664e
DLR
1256
1257 *data++ = s->num_toggled;
1258 *data++ = s->num_resets;
4d22de3e
DLR
1259}
1260
1261static inline void reg_block_dump(struct adapter *ap, void *buf,
1262 unsigned int start, unsigned int end)
1263{
1264 u32 *p = buf + start;
1265
1266 for (; start <= end; start += sizeof(u32))
1267 *p++ = t3_read_reg(ap, start);
1268}
1269
1270static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
1271 void *buf)
1272{
5fbf816f
DLR
1273 struct port_info *pi = netdev_priv(dev);
1274 struct adapter *ap = pi->adapter;
4d22de3e
DLR
1275
1276 /*
1277 * Version scheme:
1278 * bits 0..9: chip version
1279 * bits 10..15: chip revision
1280 * bit 31: set for PCIe cards
1281 */
1282 regs->version = 3 | (ap->params.rev << 10) | (is_pcie(ap) << 31);
1283
1284 /*
1285 * We skip the MAC statistics registers because they are clear-on-read.
1286 * Also reading multi-register stats would need to synchronize with the
1287 * periodic mac stats accumulation. Hard to justify the complexity.
1288 */
1289 memset(buf, 0, T3_REGMAP_SIZE);
1290 reg_block_dump(ap, buf, 0, A_SG_RSPQ_CREDIT_RETURN);
1291 reg_block_dump(ap, buf, A_SG_HI_DRB_HI_THRSH, A_ULPRX_PBL_ULIMIT);
1292 reg_block_dump(ap, buf, A_ULPTX_CONFIG, A_MPS_INT_CAUSE);
1293 reg_block_dump(ap, buf, A_CPL_SWITCH_CNTRL, A_CPL_MAP_TBL_DATA);
1294 reg_block_dump(ap, buf, A_SMB_GLOBAL_TIME_CFG, A_XGM_SERDES_STAT3);
1295 reg_block_dump(ap, buf, A_XGM_SERDES_STATUS0,
1296 XGM_REG(A_XGM_SERDES_STAT3, 1));
1297 reg_block_dump(ap, buf, XGM_REG(A_XGM_SERDES_STATUS0, 1),
1298 XGM_REG(A_XGM_RX_SPI4_SOP_EOP_CNT, 1));
1299}
1300
1301static int restart_autoneg(struct net_device *dev)
1302{
1303 struct port_info *p = netdev_priv(dev);
1304
1305 if (!netif_running(dev))
1306 return -EAGAIN;
1307 if (p->link_config.autoneg != AUTONEG_ENABLE)
1308 return -EINVAL;
1309 p->phy.ops->autoneg_restart(&p->phy);
1310 return 0;
1311}
1312
1313static int cxgb3_phys_id(struct net_device *dev, u32 data)
1314{
5fbf816f
DLR
1315 struct port_info *pi = netdev_priv(dev);
1316 struct adapter *adapter = pi->adapter;
4d22de3e 1317 int i;
4d22de3e
DLR
1318
1319 if (data == 0)
1320 data = 2;
1321
1322 for (i = 0; i < data * 2; i++) {
1323 t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
1324 (i & 1) ? F_GPIO0_OUT_VAL : 0);
1325 if (msleep_interruptible(500))
1326 break;
1327 }
1328 t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
1329 F_GPIO0_OUT_VAL);
1330 return 0;
1331}
1332
1333static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1334{
1335 struct port_info *p = netdev_priv(dev);
1336
1337 cmd->supported = p->link_config.supported;
1338 cmd->advertising = p->link_config.advertising;
1339
1340 if (netif_carrier_ok(dev)) {
1341 cmd->speed = p->link_config.speed;
1342 cmd->duplex = p->link_config.duplex;
1343 } else {
1344 cmd->speed = -1;
1345 cmd->duplex = -1;
1346 }
1347
1348 cmd->port = (cmd->supported & SUPPORTED_TP) ? PORT_TP : PORT_FIBRE;
1349 cmd->phy_address = p->phy.addr;
1350 cmd->transceiver = XCVR_EXTERNAL;
1351 cmd->autoneg = p->link_config.autoneg;
1352 cmd->maxtxpkt = 0;
1353 cmd->maxrxpkt = 0;
1354 return 0;
1355}
1356
1357static int speed_duplex_to_caps(int speed, int duplex)
1358{
1359 int cap = 0;
1360
1361 switch (speed) {
1362 case SPEED_10:
1363 if (duplex == DUPLEX_FULL)
1364 cap = SUPPORTED_10baseT_Full;
1365 else
1366 cap = SUPPORTED_10baseT_Half;
1367 break;
1368 case SPEED_100:
1369 if (duplex == DUPLEX_FULL)
1370 cap = SUPPORTED_100baseT_Full;
1371 else
1372 cap = SUPPORTED_100baseT_Half;
1373 break;
1374 case SPEED_1000:
1375 if (duplex == DUPLEX_FULL)
1376 cap = SUPPORTED_1000baseT_Full;
1377 else
1378 cap = SUPPORTED_1000baseT_Half;
1379 break;
1380 case SPEED_10000:
1381 if (duplex == DUPLEX_FULL)
1382 cap = SUPPORTED_10000baseT_Full;
1383 }
1384 return cap;
1385}
1386
1387#define ADVERTISED_MASK (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
1388 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
1389 ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full | \
1390 ADVERTISED_10000baseT_Full)
1391
1392static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1393{
1394 struct port_info *p = netdev_priv(dev);
1395 struct link_config *lc = &p->link_config;
1396
1397 if (!(lc->supported & SUPPORTED_Autoneg))
1398 return -EOPNOTSUPP; /* can't change speed/duplex */
1399
1400 if (cmd->autoneg == AUTONEG_DISABLE) {
1401 int cap = speed_duplex_to_caps(cmd->speed, cmd->duplex);
1402
1403 if (!(lc->supported & cap) || cmd->speed == SPEED_1000)
1404 return -EINVAL;
1405 lc->requested_speed = cmd->speed;
1406 lc->requested_duplex = cmd->duplex;
1407 lc->advertising = 0;
1408 } else {
1409 cmd->advertising &= ADVERTISED_MASK;
1410 cmd->advertising &= lc->supported;
1411 if (!cmd->advertising)
1412 return -EINVAL;
1413 lc->requested_speed = SPEED_INVALID;
1414 lc->requested_duplex = DUPLEX_INVALID;
1415 lc->advertising = cmd->advertising | ADVERTISED_Autoneg;
1416 }
1417 lc->autoneg = cmd->autoneg;
1418 if (netif_running(dev))
1419 t3_link_start(&p->phy, &p->mac, lc);
1420 return 0;
1421}
1422
1423static void get_pauseparam(struct net_device *dev,
1424 struct ethtool_pauseparam *epause)
1425{
1426 struct port_info *p = netdev_priv(dev);
1427
1428 epause->autoneg = (p->link_config.requested_fc & PAUSE_AUTONEG) != 0;
1429 epause->rx_pause = (p->link_config.fc & PAUSE_RX) != 0;
1430 epause->tx_pause = (p->link_config.fc & PAUSE_TX) != 0;
1431}
1432
1433static int set_pauseparam(struct net_device *dev,
1434 struct ethtool_pauseparam *epause)
1435{
1436 struct port_info *p = netdev_priv(dev);
1437 struct link_config *lc = &p->link_config;
1438
1439 if (epause->autoneg == AUTONEG_DISABLE)
1440 lc->requested_fc = 0;
1441 else if (lc->supported & SUPPORTED_Autoneg)
1442 lc->requested_fc = PAUSE_AUTONEG;
1443 else
1444 return -EINVAL;
1445
1446 if (epause->rx_pause)
1447 lc->requested_fc |= PAUSE_RX;
1448 if (epause->tx_pause)
1449 lc->requested_fc |= PAUSE_TX;
1450 if (lc->autoneg == AUTONEG_ENABLE) {
1451 if (netif_running(dev))
1452 t3_link_start(&p->phy, &p->mac, lc);
1453 } else {
1454 lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1455 if (netif_running(dev))
1456 t3_mac_set_speed_duplex_fc(&p->mac, -1, -1, lc->fc);
1457 }
1458 return 0;
1459}
1460
1461static u32 get_rx_csum(struct net_device *dev)
1462{
1463 struct port_info *p = netdev_priv(dev);
1464
1465 return p->rx_csum_offload;
1466}
1467
1468static int set_rx_csum(struct net_device *dev, u32 data)
1469{
1470 struct port_info *p = netdev_priv(dev);
1471
1472 p->rx_csum_offload = data;
1473 return 0;
1474}
1475
1476static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
1477{
5fbf816f
DLR
1478 struct port_info *pi = netdev_priv(dev);
1479 struct adapter *adapter = pi->adapter;
05b97b30 1480 const struct qset_params *q = &adapter->params.sge.qset[pi->first_qset];
4d22de3e
DLR
1481
1482 e->rx_max_pending = MAX_RX_BUFFERS;
1483 e->rx_mini_max_pending = 0;
1484 e->rx_jumbo_max_pending = MAX_RX_JUMBO_BUFFERS;
1485 e->tx_max_pending = MAX_TXQ_ENTRIES;
1486
05b97b30
DLR
1487 e->rx_pending = q->fl_size;
1488 e->rx_mini_pending = q->rspq_size;
1489 e->rx_jumbo_pending = q->jumbo_size;
1490 e->tx_pending = q->txq_size[0];
4d22de3e
DLR
1491}
1492
1493static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
1494{
5fbf816f
DLR
1495 struct port_info *pi = netdev_priv(dev);
1496 struct adapter *adapter = pi->adapter;
05b97b30 1497 struct qset_params *q;
5fbf816f 1498 int i;
4d22de3e
DLR
1499
1500 if (e->rx_pending > MAX_RX_BUFFERS ||
1501 e->rx_jumbo_pending > MAX_RX_JUMBO_BUFFERS ||
1502 e->tx_pending > MAX_TXQ_ENTRIES ||
1503 e->rx_mini_pending > MAX_RSPQ_ENTRIES ||
1504 e->rx_mini_pending < MIN_RSPQ_ENTRIES ||
1505 e->rx_pending < MIN_FL_ENTRIES ||
1506 e->rx_jumbo_pending < MIN_FL_ENTRIES ||
1507 e->tx_pending < adapter->params.nports * MIN_TXQ_ENTRIES)
1508 return -EINVAL;
1509
1510 if (adapter->flags & FULL_INIT_DONE)
1511 return -EBUSY;
1512
05b97b30
DLR
1513 q = &adapter->params.sge.qset[pi->first_qset];
1514 for (i = 0; i < pi->nqsets; ++i, ++q) {
4d22de3e
DLR
1515 q->rspq_size = e->rx_mini_pending;
1516 q->fl_size = e->rx_pending;
1517 q->jumbo_size = e->rx_jumbo_pending;
1518 q->txq_size[0] = e->tx_pending;
1519 q->txq_size[1] = e->tx_pending;
1520 q->txq_size[2] = e->tx_pending;
1521 }
1522 return 0;
1523}
1524
1525static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
1526{
5fbf816f
DLR
1527 struct port_info *pi = netdev_priv(dev);
1528 struct adapter *adapter = pi->adapter;
4d22de3e
DLR
1529 struct qset_params *qsp = &adapter->params.sge.qset[0];
1530 struct sge_qset *qs = &adapter->sge.qs[0];
1531
1532 if (c->rx_coalesce_usecs * 10 > M_NEWTIMER)
1533 return -EINVAL;
1534
1535 qsp->coalesce_usecs = c->rx_coalesce_usecs;
1536 t3_update_qset_coalesce(qs, qsp);
1537 return 0;
1538}
1539
1540static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
1541{
5fbf816f
DLR
1542 struct port_info *pi = netdev_priv(dev);
1543 struct adapter *adapter = pi->adapter;
4d22de3e
DLR
1544 struct qset_params *q = adapter->params.sge.qset;
1545
1546 c->rx_coalesce_usecs = q->coalesce_usecs;
1547 return 0;
1548}
1549
1550static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e,
1551 u8 * data)
1552{
5fbf816f
DLR
1553 struct port_info *pi = netdev_priv(dev);
1554 struct adapter *adapter = pi->adapter;
4d22de3e 1555 int i, err = 0;
4d22de3e
DLR
1556
1557 u8 *buf = kmalloc(EEPROMSIZE, GFP_KERNEL);
1558 if (!buf)
1559 return -ENOMEM;
1560
1561 e->magic = EEPROM_MAGIC;
1562 for (i = e->offset & ~3; !err && i < e->offset + e->len; i += 4)
1563 err = t3_seeprom_read(adapter, i, (u32 *) & buf[i]);
1564
1565 if (!err)
1566 memcpy(data, buf + e->offset, e->len);
1567 kfree(buf);
1568 return err;
1569}
1570
1571static int set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
1572 u8 * data)
1573{
5fbf816f
DLR
1574 struct port_info *pi = netdev_priv(dev);
1575 struct adapter *adapter = pi->adapter;
1576 u32 aligned_offset, aligned_len, *p;
4d22de3e 1577 u8 *buf;
c54f5c24 1578 int err;
4d22de3e
DLR
1579
1580 if (eeprom->magic != EEPROM_MAGIC)
1581 return -EINVAL;
1582
1583 aligned_offset = eeprom->offset & ~3;
1584 aligned_len = (eeprom->len + (eeprom->offset & 3) + 3) & ~3;
1585
1586 if (aligned_offset != eeprom->offset || aligned_len != eeprom->len) {
1587 buf = kmalloc(aligned_len, GFP_KERNEL);
1588 if (!buf)
1589 return -ENOMEM;
1590 err = t3_seeprom_read(adapter, aligned_offset, (u32 *) buf);
1591 if (!err && aligned_len > 4)
1592 err = t3_seeprom_read(adapter,
1593 aligned_offset + aligned_len - 4,
1594 (u32 *) & buf[aligned_len - 4]);
1595 if (err)
1596 goto out;
1597 memcpy(buf + (eeprom->offset & 3), data, eeprom->len);
1598 } else
1599 buf = data;
1600
1601 err = t3_seeprom_wp(adapter, 0);
1602 if (err)
1603 goto out;
1604
1605 for (p = (u32 *) buf; !err && aligned_len; aligned_len -= 4, p++) {
1606 err = t3_seeprom_write(adapter, aligned_offset, *p);
1607 aligned_offset += 4;
1608 }
1609
1610 if (!err)
1611 err = t3_seeprom_wp(adapter, 1);
1612out:
1613 if (buf != data)
1614 kfree(buf);
1615 return err;
1616}
1617
1618static void get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1619{
1620 wol->supported = 0;
1621 wol->wolopts = 0;
1622 memset(&wol->sopass, 0, sizeof(wol->sopass));
1623}
1624
1625static const struct ethtool_ops cxgb_ethtool_ops = {
1626 .get_settings = get_settings,
1627 .set_settings = set_settings,
1628 .get_drvinfo = get_drvinfo,
1629 .get_msglevel = get_msglevel,
1630 .set_msglevel = set_msglevel,
1631 .get_ringparam = get_sge_param,
1632 .set_ringparam = set_sge_param,
1633 .get_coalesce = get_coalesce,
1634 .set_coalesce = set_coalesce,
1635 .get_eeprom_len = get_eeprom_len,
1636 .get_eeprom = get_eeprom,
1637 .set_eeprom = set_eeprom,
1638 .get_pauseparam = get_pauseparam,
1639 .set_pauseparam = set_pauseparam,
1640 .get_rx_csum = get_rx_csum,
1641 .set_rx_csum = set_rx_csum,
4d22de3e 1642 .set_tx_csum = ethtool_op_set_tx_csum,
4d22de3e
DLR
1643 .set_sg = ethtool_op_set_sg,
1644 .get_link = ethtool_op_get_link,
1645 .get_strings = get_strings,
1646 .phys_id = cxgb3_phys_id,
1647 .nway_reset = restart_autoneg,
b9f2c044 1648 .get_sset_count = get_sset_count,
4d22de3e
DLR
1649 .get_ethtool_stats = get_stats,
1650 .get_regs_len = get_regs_len,
1651 .get_regs = get_regs,
1652 .get_wol = get_wol,
4d22de3e 1653 .set_tso = ethtool_op_set_tso,
4d22de3e
DLR
1654};
1655
1656static int in_range(int val, int lo, int hi)
1657{
1658 return val < 0 || (val <= hi && val >= lo);
1659}
1660
1661static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
1662{
5fbf816f
DLR
1663 struct port_info *pi = netdev_priv(dev);
1664 struct adapter *adapter = pi->adapter;
4d22de3e 1665 u32 cmd;
5fbf816f 1666 int ret;
4d22de3e
DLR
1667
1668 if (copy_from_user(&cmd, useraddr, sizeof(cmd)))
1669 return -EFAULT;
1670
1671 switch (cmd) {
4d22de3e
DLR
1672 case CHELSIO_SET_QSET_PARAMS:{
1673 int i;
1674 struct qset_params *q;
1675 struct ch_qset_params t;
1676
1677 if (!capable(CAP_NET_ADMIN))
1678 return -EPERM;
1679 if (copy_from_user(&t, useraddr, sizeof(t)))
1680 return -EFAULT;
1681 if (t.qset_idx >= SGE_QSETS)
1682 return -EINVAL;
1683 if (!in_range(t.intr_lat, 0, M_NEWTIMER) ||
1684 !in_range(t.cong_thres, 0, 255) ||
1685 !in_range(t.txq_size[0], MIN_TXQ_ENTRIES,
1686 MAX_TXQ_ENTRIES) ||
1687 !in_range(t.txq_size[1], MIN_TXQ_ENTRIES,
1688 MAX_TXQ_ENTRIES) ||
1689 !in_range(t.txq_size[2], MIN_CTRL_TXQ_ENTRIES,
1690 MAX_CTRL_TXQ_ENTRIES) ||
1691 !in_range(t.fl_size[0], MIN_FL_ENTRIES,
1692 MAX_RX_BUFFERS)
1693 || !in_range(t.fl_size[1], MIN_FL_ENTRIES,
1694 MAX_RX_JUMBO_BUFFERS)
1695 || !in_range(t.rspq_size, MIN_RSPQ_ENTRIES,
1696 MAX_RSPQ_ENTRIES))
1697 return -EINVAL;
1698 if ((adapter->flags & FULL_INIT_DONE) &&
1699 (t.rspq_size >= 0 || t.fl_size[0] >= 0 ||
1700 t.fl_size[1] >= 0 || t.txq_size[0] >= 0 ||
1701 t.txq_size[1] >= 0 || t.txq_size[2] >= 0 ||
1702 t.polling >= 0 || t.cong_thres >= 0))
1703 return -EBUSY;
1704
1705 q = &adapter->params.sge.qset[t.qset_idx];
1706
1707 if (t.rspq_size >= 0)
1708 q->rspq_size = t.rspq_size;
1709 if (t.fl_size[0] >= 0)
1710 q->fl_size = t.fl_size[0];
1711 if (t.fl_size[1] >= 0)
1712 q->jumbo_size = t.fl_size[1];
1713 if (t.txq_size[0] >= 0)
1714 q->txq_size[0] = t.txq_size[0];
1715 if (t.txq_size[1] >= 0)
1716 q->txq_size[1] = t.txq_size[1];
1717 if (t.txq_size[2] >= 0)
1718 q->txq_size[2] = t.txq_size[2];
1719 if (t.cong_thres >= 0)
1720 q->cong_thres = t.cong_thres;
1721 if (t.intr_lat >= 0) {
1722 struct sge_qset *qs =
1723 &adapter->sge.qs[t.qset_idx];
1724
1725 q->coalesce_usecs = t.intr_lat;
1726 t3_update_qset_coalesce(qs, q);
1727 }
1728 if (t.polling >= 0) {
1729 if (adapter->flags & USING_MSIX)
1730 q->polling = t.polling;
1731 else {
1732 /* No polling with INTx for T3A */
1733 if (adapter->params.rev == 0 &&
1734 !(adapter->flags & USING_MSI))
1735 t.polling = 0;
1736
1737 for (i = 0; i < SGE_QSETS; i++) {
1738 q = &adapter->params.sge.
1739 qset[i];
1740 q->polling = t.polling;
1741 }
1742 }
1743 }
1744 break;
1745 }
1746 case CHELSIO_GET_QSET_PARAMS:{
1747 struct qset_params *q;
1748 struct ch_qset_params t;
1749
1750 if (copy_from_user(&t, useraddr, sizeof(t)))
1751 return -EFAULT;
1752 if (t.qset_idx >= SGE_QSETS)
1753 return -EINVAL;
1754
1755 q = &adapter->params.sge.qset[t.qset_idx];
1756 t.rspq_size = q->rspq_size;
1757 t.txq_size[0] = q->txq_size[0];
1758 t.txq_size[1] = q->txq_size[1];
1759 t.txq_size[2] = q->txq_size[2];
1760 t.fl_size[0] = q->fl_size;
1761 t.fl_size[1] = q->jumbo_size;
1762 t.polling = q->polling;
1763 t.intr_lat = q->coalesce_usecs;
1764 t.cong_thres = q->cong_thres;
1765
1766 if (copy_to_user(useraddr, &t, sizeof(t)))
1767 return -EFAULT;
1768 break;
1769 }
1770 case CHELSIO_SET_QSET_NUM:{
1771 struct ch_reg edata;
4d22de3e
DLR
1772 unsigned int i, first_qset = 0, other_qsets = 0;
1773
1774 if (!capable(CAP_NET_ADMIN))
1775 return -EPERM;
1776 if (adapter->flags & FULL_INIT_DONE)
1777 return -EBUSY;
1778 if (copy_from_user(&edata, useraddr, sizeof(edata)))
1779 return -EFAULT;
1780 if (edata.val < 1 ||
1781 (edata.val > 1 && !(adapter->flags & USING_MSIX)))
1782 return -EINVAL;
1783
1784 for_each_port(adapter, i)
1785 if (adapter->port[i] && adapter->port[i] != dev)
1786 other_qsets += adap2pinfo(adapter, i)->nqsets;
1787
1788 if (edata.val + other_qsets > SGE_QSETS)
1789 return -EINVAL;
1790
1791 pi->nqsets = edata.val;
1792
1793 for_each_port(adapter, i)
1794 if (adapter->port[i]) {
1795 pi = adap2pinfo(adapter, i);
1796 pi->first_qset = first_qset;
1797 first_qset += pi->nqsets;
1798 }
1799 break;
1800 }
1801 case CHELSIO_GET_QSET_NUM:{
1802 struct ch_reg edata;
4d22de3e
DLR
1803
1804 edata.cmd = CHELSIO_GET_QSET_NUM;
1805 edata.val = pi->nqsets;
1806 if (copy_to_user(useraddr, &edata, sizeof(edata)))
1807 return -EFAULT;
1808 break;
1809 }
1810 case CHELSIO_LOAD_FW:{
1811 u8 *fw_data;
1812 struct ch_mem_range t;
1813
1814 if (!capable(CAP_NET_ADMIN))
1815 return -EPERM;
1816 if (copy_from_user(&t, useraddr, sizeof(t)))
1817 return -EFAULT;
1818
1819 fw_data = kmalloc(t.len, GFP_KERNEL);
1820 if (!fw_data)
1821 return -ENOMEM;
1822
1823 if (copy_from_user
1824 (fw_data, useraddr + sizeof(t), t.len)) {
1825 kfree(fw_data);
1826 return -EFAULT;
1827 }
1828
1829 ret = t3_load_fw(adapter, fw_data, t.len);
1830 kfree(fw_data);
1831 if (ret)
1832 return ret;
1833 break;
1834 }
1835 case CHELSIO_SETMTUTAB:{
1836 struct ch_mtus m;
1837 int i;
1838
1839 if (!is_offload(adapter))
1840 return -EOPNOTSUPP;
1841 if (!capable(CAP_NET_ADMIN))
1842 return -EPERM;
1843 if (offload_running(adapter))
1844 return -EBUSY;
1845 if (copy_from_user(&m, useraddr, sizeof(m)))
1846 return -EFAULT;
1847 if (m.nmtus != NMTUS)
1848 return -EINVAL;
1849 if (m.mtus[0] < 81) /* accommodate SACK */
1850 return -EINVAL;
1851
1852 /* MTUs must be in ascending order */
1853 for (i = 1; i < NMTUS; ++i)
1854 if (m.mtus[i] < m.mtus[i - 1])
1855 return -EINVAL;
1856
1857 memcpy(adapter->params.mtus, m.mtus,
1858 sizeof(adapter->params.mtus));
1859 break;
1860 }
1861 case CHELSIO_GET_PM:{
1862 struct tp_params *p = &adapter->params.tp;
1863 struct ch_pm m = {.cmd = CHELSIO_GET_PM };
1864
1865 if (!is_offload(adapter))
1866 return -EOPNOTSUPP;
1867 m.tx_pg_sz = p->tx_pg_size;
1868 m.tx_num_pg = p->tx_num_pgs;
1869 m.rx_pg_sz = p->rx_pg_size;
1870 m.rx_num_pg = p->rx_num_pgs;
1871 m.pm_total = p->pmtx_size + p->chan_rx_size * p->nchan;
1872 if (copy_to_user(useraddr, &m, sizeof(m)))
1873 return -EFAULT;
1874 break;
1875 }
1876 case CHELSIO_SET_PM:{
1877 struct ch_pm m;
1878 struct tp_params *p = &adapter->params.tp;
1879
1880 if (!is_offload(adapter))
1881 return -EOPNOTSUPP;
1882 if (!capable(CAP_NET_ADMIN))
1883 return -EPERM;
1884 if (adapter->flags & FULL_INIT_DONE)
1885 return -EBUSY;
1886 if (copy_from_user(&m, useraddr, sizeof(m)))
1887 return -EFAULT;
d9da466a 1888 if (!is_power_of_2(m.rx_pg_sz) ||
1889 !is_power_of_2(m.tx_pg_sz))
4d22de3e
DLR
1890 return -EINVAL; /* not power of 2 */
1891 if (!(m.rx_pg_sz & 0x14000))
1892 return -EINVAL; /* not 16KB or 64KB */
1893 if (!(m.tx_pg_sz & 0x1554000))
1894 return -EINVAL;
1895 if (m.tx_num_pg == -1)
1896 m.tx_num_pg = p->tx_num_pgs;
1897 if (m.rx_num_pg == -1)
1898 m.rx_num_pg = p->rx_num_pgs;
1899 if (m.tx_num_pg % 24 || m.rx_num_pg % 24)
1900 return -EINVAL;
1901 if (m.rx_num_pg * m.rx_pg_sz > p->chan_rx_size ||
1902 m.tx_num_pg * m.tx_pg_sz > p->chan_tx_size)
1903 return -EINVAL;
1904 p->rx_pg_size = m.rx_pg_sz;
1905 p->tx_pg_size = m.tx_pg_sz;
1906 p->rx_num_pgs = m.rx_num_pg;
1907 p->tx_num_pgs = m.tx_num_pg;
1908 break;
1909 }
1910 case CHELSIO_GET_MEM:{
1911 struct ch_mem_range t;
1912 struct mc7 *mem;
1913 u64 buf[32];
1914
1915 if (!is_offload(adapter))
1916 return -EOPNOTSUPP;
1917 if (!(adapter->flags & FULL_INIT_DONE))
1918 return -EIO; /* need the memory controllers */
1919 if (copy_from_user(&t, useraddr, sizeof(t)))
1920 return -EFAULT;
1921 if ((t.addr & 7) || (t.len & 7))
1922 return -EINVAL;
1923 if (t.mem_id == MEM_CM)
1924 mem = &adapter->cm;
1925 else if (t.mem_id == MEM_PMRX)
1926 mem = &adapter->pmrx;
1927 else if (t.mem_id == MEM_PMTX)
1928 mem = &adapter->pmtx;
1929 else
1930 return -EINVAL;
1931
1932 /*
1825494a
DLR
1933 * Version scheme:
1934 * bits 0..9: chip version
1935 * bits 10..15: chip revision
1936 */
4d22de3e
DLR
1937 t.version = 3 | (adapter->params.rev << 10);
1938 if (copy_to_user(useraddr, &t, sizeof(t)))
1939 return -EFAULT;
1940
1941 /*
1942 * Read 256 bytes at a time as len can be large and we don't
1943 * want to use huge intermediate buffers.
1944 */
1945 useraddr += sizeof(t); /* advance to start of buffer */
1946 while (t.len) {
1947 unsigned int chunk =
1948 min_t(unsigned int, t.len, sizeof(buf));
1949
1950 ret =
1951 t3_mc7_bd_read(mem, t.addr / 8, chunk / 8,
1952 buf);
1953 if (ret)
1954 return ret;
1955 if (copy_to_user(useraddr, buf, chunk))
1956 return -EFAULT;
1957 useraddr += chunk;
1958 t.addr += chunk;
1959 t.len -= chunk;
1960 }
1961 break;
1962 }
1963 case CHELSIO_SET_TRACE_FILTER:{
1964 struct ch_trace t;
1965 const struct trace_params *tp;
1966
1967 if (!capable(CAP_NET_ADMIN))
1968 return -EPERM;
1969 if (!offload_running(adapter))
1970 return -EAGAIN;
1971 if (copy_from_user(&t, useraddr, sizeof(t)))
1972 return -EFAULT;
1973
1974 tp = (const struct trace_params *)&t.sip;
1975 if (t.config_tx)
1976 t3_config_trace_filter(adapter, tp, 0,
1977 t.invert_match,
1978 t.trace_tx);
1979 if (t.config_rx)
1980 t3_config_trace_filter(adapter, tp, 1,
1981 t.invert_match,
1982 t.trace_rx);
1983 break;
1984 }
4d22de3e
DLR
1985 default:
1986 return -EOPNOTSUPP;
1987 }
1988 return 0;
1989}
1990
1991static int cxgb_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
1992{
4d22de3e 1993 struct mii_ioctl_data *data = if_mii(req);
5fbf816f
DLR
1994 struct port_info *pi = netdev_priv(dev);
1995 struct adapter *adapter = pi->adapter;
1996 int ret, mmd;
4d22de3e
DLR
1997
1998 switch (cmd) {
1999 case SIOCGMIIPHY:
2000 data->phy_id = pi->phy.addr;
2001 /* FALLTHRU */
2002 case SIOCGMIIREG:{
2003 u32 val;
2004 struct cphy *phy = &pi->phy;
2005
2006 if (!phy->mdio_read)
2007 return -EOPNOTSUPP;
2008 if (is_10G(adapter)) {
2009 mmd = data->phy_id >> 8;
2010 if (!mmd)
2011 mmd = MDIO_DEV_PCS;
2012 else if (mmd > MDIO_DEV_XGXS)
2013 return -EINVAL;
2014
2015 ret =
2016 phy->mdio_read(adapter, data->phy_id & 0x1f,
2017 mmd, data->reg_num, &val);
2018 } else
2019 ret =
2020 phy->mdio_read(adapter, data->phy_id & 0x1f,
2021 0, data->reg_num & 0x1f,
2022 &val);
2023 if (!ret)
2024 data->val_out = val;
2025 break;
2026 }
2027 case SIOCSMIIREG:{
2028 struct cphy *phy = &pi->phy;
2029
2030 if (!capable(CAP_NET_ADMIN))
2031 return -EPERM;
2032 if (!phy->mdio_write)
2033 return -EOPNOTSUPP;
2034 if (is_10G(adapter)) {
2035 mmd = data->phy_id >> 8;
2036 if (!mmd)
2037 mmd = MDIO_DEV_PCS;
2038 else if (mmd > MDIO_DEV_XGXS)
2039 return -EINVAL;
2040
2041 ret =
2042 phy->mdio_write(adapter,
2043 data->phy_id & 0x1f, mmd,
2044 data->reg_num,
2045 data->val_in);
2046 } else
2047 ret =
2048 phy->mdio_write(adapter,
2049 data->phy_id & 0x1f, 0,
2050 data->reg_num & 0x1f,
2051 data->val_in);
2052 break;
2053 }
2054 case SIOCCHIOCTL:
2055 return cxgb_extension_ioctl(dev, req->ifr_data);
2056 default:
2057 return -EOPNOTSUPP;
2058 }
2059 return ret;
2060}
2061
2062static int cxgb_change_mtu(struct net_device *dev, int new_mtu)
2063{
4d22de3e 2064 struct port_info *pi = netdev_priv(dev);
5fbf816f
DLR
2065 struct adapter *adapter = pi->adapter;
2066 int ret;
4d22de3e
DLR
2067
2068 if (new_mtu < 81) /* accommodate SACK */
2069 return -EINVAL;
2070 if ((ret = t3_mac_set_mtu(&pi->mac, new_mtu)))
2071 return ret;
2072 dev->mtu = new_mtu;
2073 init_port_mtus(adapter);
2074 if (adapter->params.rev == 0 && offload_running(adapter))
2075 t3_load_mtus(adapter, adapter->params.mtus,
2076 adapter->params.a_wnd, adapter->params.b_wnd,
2077 adapter->port[0]->mtu);
2078 return 0;
2079}
2080
2081static int cxgb_set_mac_addr(struct net_device *dev, void *p)
2082{
4d22de3e 2083 struct port_info *pi = netdev_priv(dev);
5fbf816f 2084 struct adapter *adapter = pi->adapter;
4d22de3e
DLR
2085 struct sockaddr *addr = p;
2086
2087 if (!is_valid_ether_addr(addr->sa_data))
2088 return -EINVAL;
2089
2090 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
2091 t3_mac_set_address(&pi->mac, 0, dev->dev_addr);
2092 if (offload_running(adapter))
2093 write_smt_entry(adapter, pi->port_id);
2094 return 0;
2095}
2096
2097/**
2098 * t3_synchronize_rx - wait for current Rx processing on a port to complete
2099 * @adap: the adapter
2100 * @p: the port
2101 *
2102 * Ensures that current Rx processing on any of the queues associated with
2103 * the given port completes before returning. We do this by acquiring and
2104 * releasing the locks of the response queues associated with the port.
2105 */
2106static void t3_synchronize_rx(struct adapter *adap, const struct port_info *p)
2107{
2108 int i;
2109
2110 for (i = 0; i < p->nqsets; i++) {
2111 struct sge_rspq *q = &adap->sge.qs[i + p->first_qset].rspq;
2112
2113 spin_lock_irq(&q->lock);
2114 spin_unlock_irq(&q->lock);
2115 }
2116}
2117
2118static void vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
2119{
4d22de3e 2120 struct port_info *pi = netdev_priv(dev);
5fbf816f 2121 struct adapter *adapter = pi->adapter;
4d22de3e
DLR
2122
2123 pi->vlan_grp = grp;
2124 if (adapter->params.rev > 0)
2125 t3_set_vlan_accel(adapter, 1 << pi->port_id, grp != NULL);
2126 else {
2127 /* single control for all ports */
2128 unsigned int i, have_vlans = 0;
2129 for_each_port(adapter, i)
2130 have_vlans |= adap2pinfo(adapter, i)->vlan_grp != NULL;
2131
2132 t3_set_vlan_accel(adapter, 1, have_vlans);
2133 }
2134 t3_synchronize_rx(adapter, pi);
2135}
2136
4d22de3e
DLR
2137#ifdef CONFIG_NET_POLL_CONTROLLER
2138static void cxgb_netpoll(struct net_device *dev)
2139{
890de332 2140 struct port_info *pi = netdev_priv(dev);
5fbf816f 2141 struct adapter *adapter = pi->adapter;
890de332 2142 int qidx;
4d22de3e 2143
890de332
DLR
2144 for (qidx = pi->first_qset; qidx < pi->first_qset + pi->nqsets; qidx++) {
2145 struct sge_qset *qs = &adapter->sge.qs[qidx];
2146 void *source;
2147
2148 if (adapter->flags & USING_MSIX)
2149 source = qs;
2150 else
2151 source = adapter;
2152
2153 t3_intr_handler(adapter, qs->rspq.polling) (0, source);
2154 }
4d22de3e
DLR
2155}
2156#endif
2157
2158/*
2159 * Periodic accumulation of MAC statistics.
2160 */
2161static void mac_stats_update(struct adapter *adapter)
2162{
2163 int i;
2164
2165 for_each_port(adapter, i) {
2166 struct net_device *dev = adapter->port[i];
2167 struct port_info *p = netdev_priv(dev);
2168
2169 if (netif_running(dev)) {
2170 spin_lock(&adapter->stats_lock);
2171 t3_mac_update_stats(&p->mac);
2172 spin_unlock(&adapter->stats_lock);
2173 }
2174 }
2175}
2176
2177static void check_link_status(struct adapter *adapter)
2178{
2179 int i;
2180
2181 for_each_port(adapter, i) {
2182 struct net_device *dev = adapter->port[i];
2183 struct port_info *p = netdev_priv(dev);
2184
2185 if (!(p->port_type->caps & SUPPORTED_IRQ) && netif_running(dev))
2186 t3_link_changed(adapter, i);
2187 }
2188}
2189
fc90664e
DLR
2190static void check_t3b2_mac(struct adapter *adapter)
2191{
2192 int i;
2193
f2d961c9
DLR
2194 if (!rtnl_trylock()) /* synchronize with ifdown */
2195 return;
2196
fc90664e
DLR
2197 for_each_port(adapter, i) {
2198 struct net_device *dev = adapter->port[i];
2199 struct port_info *p = netdev_priv(dev);
2200 int status;
2201
2202 if (!netif_running(dev))
2203 continue;
2204
2205 status = 0;
6d6dabac 2206 if (netif_running(dev) && netif_carrier_ok(dev))
fc90664e
DLR
2207 status = t3b2_mac_watchdog_task(&p->mac);
2208 if (status == 1)
2209 p->mac.stats.num_toggled++;
2210 else if (status == 2) {
2211 struct cmac *mac = &p->mac;
2212
2213 t3_mac_set_mtu(mac, dev->mtu);
2214 t3_mac_set_address(mac, 0, dev->dev_addr);
2215 cxgb_set_rxmode(dev);
2216 t3_link_start(&p->phy, mac, &p->link_config);
2217 t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
2218 t3_port_intr_enable(adapter, p->port_id);
2219 p->mac.stats.num_resets++;
2220 }
2221 }
2222 rtnl_unlock();
2223}
2224
2225
4d22de3e
DLR
2226static void t3_adap_check_task(struct work_struct *work)
2227{
2228 struct adapter *adapter = container_of(work, struct adapter,
2229 adap_check_task.work);
2230 const struct adapter_params *p = &adapter->params;
2231
2232 adapter->check_task_cnt++;
2233
2234 /* Check link status for PHYs without interrupts */
2235 if (p->linkpoll_period)
2236 check_link_status(adapter);
2237
2238 /* Accumulate MAC stats if needed */
2239 if (!p->linkpoll_period ||
2240 (adapter->check_task_cnt * p->linkpoll_period) / 10 >=
2241 p->stats_update_period) {
2242 mac_stats_update(adapter);
2243 adapter->check_task_cnt = 0;
2244 }
2245
fc90664e
DLR
2246 if (p->rev == T3_REV_B2)
2247 check_t3b2_mac(adapter);
2248
4d22de3e
DLR
2249 /* Schedule the next check update if any port is active. */
2250 spin_lock(&adapter->work_lock);
2251 if (adapter->open_device_map & PORT_MASK)
2252 schedule_chk_task(adapter);
2253 spin_unlock(&adapter->work_lock);
2254}
2255
2256/*
2257 * Processes external (PHY) interrupts in process context.
2258 */
2259static void ext_intr_task(struct work_struct *work)
2260{
2261 struct adapter *adapter = container_of(work, struct adapter,
2262 ext_intr_handler_task);
2263
2264 t3_phy_intr_handler(adapter);
2265
2266 /* Now reenable external interrupts */
2267 spin_lock_irq(&adapter->work_lock);
2268 if (adapter->slow_intr_mask) {
2269 adapter->slow_intr_mask |= F_T3DBG;
2270 t3_write_reg(adapter, A_PL_INT_CAUSE0, F_T3DBG);
2271 t3_write_reg(adapter, A_PL_INT_ENABLE0,
2272 adapter->slow_intr_mask);
2273 }
2274 spin_unlock_irq(&adapter->work_lock);
2275}
2276
2277/*
2278 * Interrupt-context handler for external (PHY) interrupts.
2279 */
2280void t3_os_ext_intr_handler(struct adapter *adapter)
2281{
2282 /*
2283 * Schedule a task to handle external interrupts as they may be slow
2284 * and we use a mutex to protect MDIO registers. We disable PHY
2285 * interrupts in the meantime and let the task reenable them when
2286 * it's done.
2287 */
2288 spin_lock(&adapter->work_lock);
2289 if (adapter->slow_intr_mask) {
2290 adapter->slow_intr_mask &= ~F_T3DBG;
2291 t3_write_reg(adapter, A_PL_INT_ENABLE0,
2292 adapter->slow_intr_mask);
2293 queue_work(cxgb3_wq, &adapter->ext_intr_handler_task);
2294 }
2295 spin_unlock(&adapter->work_lock);
2296}
2297
2298void t3_fatal_err(struct adapter *adapter)
2299{
2300 unsigned int fw_status[4];
2301
2302 if (adapter->flags & FULL_INIT_DONE) {
2303 t3_sge_stop(adapter);
c64c2eae
DLR
2304 t3_write_reg(adapter, A_XGM_TX_CTRL, 0);
2305 t3_write_reg(adapter, A_XGM_RX_CTRL, 0);
2306 t3_write_reg(adapter, XGM_REG(A_XGM_TX_CTRL, 1), 0);
2307 t3_write_reg(adapter, XGM_REG(A_XGM_RX_CTRL, 1), 0);
4d22de3e
DLR
2308 t3_intr_disable(adapter);
2309 }
2310 CH_ALERT(adapter, "encountered fatal error, operation suspended\n");
2311 if (!t3_cim_ctl_blk_read(adapter, 0xa0, 4, fw_status))
2312 CH_ALERT(adapter, "FW status: 0x%x, 0x%x, 0x%x, 0x%x\n",
2313 fw_status[0], fw_status[1],
2314 fw_status[2], fw_status[3]);
2315
2316}
2317
91a6b50c
DLR
2318/**
2319 * t3_io_error_detected - called when PCI error is detected
2320 * @pdev: Pointer to PCI device
2321 * @state: The current pci connection state
2322 *
2323 * This function is called after a PCI bus error affecting
2324 * this device has been detected.
2325 */
2326static pci_ers_result_t t3_io_error_detected(struct pci_dev *pdev,
2327 pci_channel_state_t state)
2328{
2329 struct net_device *dev = pci_get_drvdata(pdev);
2330 struct port_info *pi = netdev_priv(dev);
2331 struct adapter *adapter = pi->adapter;
2332 int i;
2333
2334 /* Stop all ports */
2335 for_each_port(adapter, i) {
2336 struct net_device *netdev = adapter->port[i];
2337
2338 if (netif_running(netdev))
2339 cxgb_close(netdev);
2340 }
2341
2342 if (is_offload(adapter) &&
2343 test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map))
2344 offload_close(&adapter->tdev);
2345
2346 /* Free sge resources */
2347 t3_free_sge_resources(adapter);
2348
2349 adapter->flags &= ~FULL_INIT_DONE;
2350
2351 pci_disable_device(pdev);
2352
2353 /* Request a slot slot reset. */
2354 return PCI_ERS_RESULT_NEED_RESET;
2355}
2356
2357/**
2358 * t3_io_slot_reset - called after the pci bus has been reset.
2359 * @pdev: Pointer to PCI device
2360 *
2361 * Restart the card from scratch, as if from a cold-boot.
2362 */
2363static pci_ers_result_t t3_io_slot_reset(struct pci_dev *pdev)
2364{
2365 struct net_device *dev = pci_get_drvdata(pdev);
2366 struct port_info *pi = netdev_priv(dev);
2367 struct adapter *adapter = pi->adapter;
2368
2369 if (pci_enable_device(pdev)) {
2370 dev_err(&pdev->dev,
2371 "Cannot re-enable PCI device after reset.\n");
2372 return PCI_ERS_RESULT_DISCONNECT;
2373 }
2374 pci_set_master(pdev);
2375
2376 t3_prep_adapter(adapter, adapter->params.info, 1);
2377
2378 return PCI_ERS_RESULT_RECOVERED;
2379}
2380
2381/**
2382 * t3_io_resume - called when traffic can start flowing again.
2383 * @pdev: Pointer to PCI device
2384 *
2385 * This callback is called when the error recovery driver tells us that
2386 * its OK to resume normal operation.
2387 */
2388static void t3_io_resume(struct pci_dev *pdev)
2389{
2390 struct net_device *dev = pci_get_drvdata(pdev);
2391 struct port_info *pi = netdev_priv(dev);
2392 struct adapter *adapter = pi->adapter;
2393 int i;
2394
2395 /* Restart the ports */
2396 for_each_port(adapter, i) {
2397 struct net_device *netdev = adapter->port[i];
2398
2399 if (netif_running(netdev)) {
2400 if (cxgb_open(netdev)) {
2401 dev_err(&pdev->dev,
2402 "can't bring device back up"
2403 " after reset\n");
2404 continue;
2405 }
2406 netif_device_attach(netdev);
2407 }
2408 }
2409
2410 if (is_offload(adapter)) {
2411 __set_bit(OFFLOAD_DEVMAP_BIT, &adapter->registered_device_map);
2412 if (offload_open(dev))
2413 printk(KERN_WARNING
2414 "Could not bring back offload capabilities\n");
2415 }
2416}
2417
2418static struct pci_error_handlers t3_err_handler = {
2419 .error_detected = t3_io_error_detected,
2420 .slot_reset = t3_io_slot_reset,
2421 .resume = t3_io_resume,
2422};
2423
4d22de3e
DLR
2424static int __devinit cxgb_enable_msix(struct adapter *adap)
2425{
2426 struct msix_entry entries[SGE_QSETS + 1];
2427 int i, err;
2428
2429 for (i = 0; i < ARRAY_SIZE(entries); ++i)
2430 entries[i].entry = i;
2431
2432 err = pci_enable_msix(adap->pdev, entries, ARRAY_SIZE(entries));
2433 if (!err) {
2434 for (i = 0; i < ARRAY_SIZE(entries); ++i)
2435 adap->msix_info[i].vec = entries[i].vector;
2436 } else if (err > 0)
2437 dev_info(&adap->pdev->dev,
2438 "only %d MSI-X vectors left, not using MSI-X\n", err);
2439 return err;
2440}
2441
2442static void __devinit print_port_info(struct adapter *adap,
2443 const struct adapter_info *ai)
2444{
2445 static const char *pci_variant[] = {
2446 "PCI", "PCI-X", "PCI-X ECC", "PCI-X 266", "PCI Express"
2447 };
2448
2449 int i;
2450 char buf[80];
2451
2452 if (is_pcie(adap))
2453 snprintf(buf, sizeof(buf), "%s x%d",
2454 pci_variant[adap->params.pci.variant],
2455 adap->params.pci.width);
2456 else
2457 snprintf(buf, sizeof(buf), "%s %dMHz/%d-bit",
2458 pci_variant[adap->params.pci.variant],
2459 adap->params.pci.speed, adap->params.pci.width);
2460
2461 for_each_port(adap, i) {
2462 struct net_device *dev = adap->port[i];
2463 const struct port_info *pi = netdev_priv(dev);
2464
2465 if (!test_bit(i, &adap->registered_device_map))
2466 continue;
8ac3ba68 2467 printk(KERN_INFO "%s: %s %s %sNIC (rev %d) %s%s\n",
4d22de3e 2468 dev->name, ai->desc, pi->port_type->desc,
8ac3ba68 2469 is_offload(adap) ? "R" : "", adap->params.rev, buf,
4d22de3e
DLR
2470 (adap->flags & USING_MSIX) ? " MSI-X" :
2471 (adap->flags & USING_MSI) ? " MSI" : "");
2472 if (adap->name == dev->name && adap->params.vpd.mclk)
167cdf5f
DLR
2473 printk(KERN_INFO
2474 "%s: %uMB CM, %uMB PMTX, %uMB PMRX, S/N: %s\n",
4d22de3e
DLR
2475 adap->name, t3_mc7_size(&adap->cm) >> 20,
2476 t3_mc7_size(&adap->pmtx) >> 20,
167cdf5f
DLR
2477 t3_mc7_size(&adap->pmrx) >> 20,
2478 adap->params.vpd.sn);
4d22de3e
DLR
2479 }
2480}
2481
2482static int __devinit init_one(struct pci_dev *pdev,
2483 const struct pci_device_id *ent)
2484{
2485 static int version_printed;
2486
2487 int i, err, pci_using_dac = 0;
2488 unsigned long mmio_start, mmio_len;
2489 const struct adapter_info *ai;
2490 struct adapter *adapter = NULL;
2491 struct port_info *pi;
2492
2493 if (!version_printed) {
2494 printk(KERN_INFO "%s - version %s\n", DRV_DESC, DRV_VERSION);
2495 ++version_printed;
2496 }
2497
2498 if (!cxgb3_wq) {
2499 cxgb3_wq = create_singlethread_workqueue(DRV_NAME);
2500 if (!cxgb3_wq) {
2501 printk(KERN_ERR DRV_NAME
2502 ": cannot initialize work queue\n");
2503 return -ENOMEM;
2504 }
2505 }
2506
2507 err = pci_request_regions(pdev, DRV_NAME);
2508 if (err) {
2509 /* Just info, some other driver may have claimed the device. */
2510 dev_info(&pdev->dev, "cannot obtain PCI resources\n");
2511 return err;
2512 }
2513
2514 err = pci_enable_device(pdev);
2515 if (err) {
2516 dev_err(&pdev->dev, "cannot enable PCI device\n");
2517 goto out_release_regions;
2518 }
2519
2520 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
2521 pci_using_dac = 1;
2522 err = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
2523 if (err) {
2524 dev_err(&pdev->dev, "unable to obtain 64-bit DMA for "
2525 "coherent allocations\n");
2526 goto out_disable_device;
2527 }
2528 } else if ((err = pci_set_dma_mask(pdev, DMA_32BIT_MASK)) != 0) {
2529 dev_err(&pdev->dev, "no usable DMA configuration\n");
2530 goto out_disable_device;
2531 }
2532
2533 pci_set_master(pdev);
2534
2535 mmio_start = pci_resource_start(pdev, 0);
2536 mmio_len = pci_resource_len(pdev, 0);
2537 ai = t3_get_adapter_info(ent->driver_data);
2538
2539 adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
2540 if (!adapter) {
2541 err = -ENOMEM;
2542 goto out_disable_device;
2543 }
2544
2545 adapter->regs = ioremap_nocache(mmio_start, mmio_len);
2546 if (!adapter->regs) {
2547 dev_err(&pdev->dev, "cannot map device registers\n");
2548 err = -ENOMEM;
2549 goto out_free_adapter;
2550 }
2551
2552 adapter->pdev = pdev;
2553 adapter->name = pci_name(pdev);
2554 adapter->msg_enable = dflt_msg_enable;
2555 adapter->mmio_len = mmio_len;
2556
2557 mutex_init(&adapter->mdio_lock);
2558 spin_lock_init(&adapter->work_lock);
2559 spin_lock_init(&adapter->stats_lock);
2560
2561 INIT_LIST_HEAD(&adapter->adapter_list);
2562 INIT_WORK(&adapter->ext_intr_handler_task, ext_intr_task);
2563 INIT_DELAYED_WORK(&adapter->adap_check_task, t3_adap_check_task);
2564
2565 for (i = 0; i < ai->nports; ++i) {
2566 struct net_device *netdev;
2567
2568 netdev = alloc_etherdev(sizeof(struct port_info));
2569 if (!netdev) {
2570 err = -ENOMEM;
2571 goto out_free_dev;
2572 }
2573
4d22de3e
DLR
2574 SET_NETDEV_DEV(netdev, &pdev->dev);
2575
2576 adapter->port[i] = netdev;
2577 pi = netdev_priv(netdev);
5fbf816f 2578 pi->adapter = adapter;
4d22de3e
DLR
2579 pi->rx_csum_offload = 1;
2580 pi->nqsets = 1;
2581 pi->first_qset = i;
2582 pi->activity = 0;
2583 pi->port_id = i;
2584 netif_carrier_off(netdev);
2585 netdev->irq = pdev->irq;
2586 netdev->mem_start = mmio_start;
2587 netdev->mem_end = mmio_start + mmio_len - 1;
4d22de3e
DLR
2588 netdev->features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO;
2589 netdev->features |= NETIF_F_LLTX;
2590 if (pci_using_dac)
2591 netdev->features |= NETIF_F_HIGHDMA;
2592
2593 netdev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
2594 netdev->vlan_rx_register = vlan_rx_register;
4d22de3e
DLR
2595
2596 netdev->open = cxgb_open;
2597 netdev->stop = cxgb_close;
2598 netdev->hard_start_xmit = t3_eth_xmit;
2599 netdev->get_stats = cxgb_get_stats;
2600 netdev->set_multicast_list = cxgb_set_rxmode;
2601 netdev->do_ioctl = cxgb_ioctl;
2602 netdev->change_mtu = cxgb_change_mtu;
2603 netdev->set_mac_address = cxgb_set_mac_addr;
2604#ifdef CONFIG_NET_POLL_CONTROLLER
2605 netdev->poll_controller = cxgb_netpoll;
2606#endif
4d22de3e
DLR
2607
2608 SET_ETHTOOL_OPS(netdev, &cxgb_ethtool_ops);
2609 }
2610
5fbf816f 2611 pci_set_drvdata(pdev, adapter);
4d22de3e
DLR
2612 if (t3_prep_adapter(adapter, ai, 1) < 0) {
2613 err = -ENODEV;
2614 goto out_free_dev;
2615 }
480fe1a3 2616
4d22de3e
DLR
2617 /*
2618 * The card is now ready to go. If any errors occur during device
2619 * registration we do not fail the whole card but rather proceed only
2620 * with the ports we manage to register successfully. However we must
2621 * register at least one net device.
2622 */
2623 for_each_port(adapter, i) {
2624 err = register_netdev(adapter->port[i]);
2625 if (err)
2626 dev_warn(&pdev->dev,
2627 "cannot register net device %s, skipping\n",
2628 adapter->port[i]->name);
2629 else {
2630 /*
2631 * Change the name we use for messages to the name of
2632 * the first successfully registered interface.
2633 */
2634 if (!adapter->registered_device_map)
2635 adapter->name = adapter->port[i]->name;
2636
2637 __set_bit(i, &adapter->registered_device_map);
2638 }
2639 }
2640 if (!adapter->registered_device_map) {
2641 dev_err(&pdev->dev, "could not register any net devices\n");
2642 goto out_free_dev;
2643 }
2644
2645 /* Driver's ready. Reflect it on LEDs */
2646 t3_led_ready(adapter);
2647
2648 if (is_offload(adapter)) {
2649 __set_bit(OFFLOAD_DEVMAP_BIT, &adapter->registered_device_map);
2650 cxgb3_adapter_ofld(adapter);
2651 }
2652
2653 /* See what interrupts we'll be using */
2654 if (msi > 1 && cxgb_enable_msix(adapter) == 0)
2655 adapter->flags |= USING_MSIX;
2656 else if (msi > 0 && pci_enable_msi(pdev) == 0)
2657 adapter->flags |= USING_MSI;
2658
0ee8d33c 2659 err = sysfs_create_group(&adapter->port[0]->dev.kobj,
4d22de3e
DLR
2660 &cxgb3_attr_group);
2661
2662 print_port_info(adapter, ai);
2663 return 0;
2664
2665out_free_dev:
2666 iounmap(adapter->regs);
2667 for (i = ai->nports - 1; i >= 0; --i)
2668 if (adapter->port[i])
2669 free_netdev(adapter->port[i]);
2670
2671out_free_adapter:
2672 kfree(adapter);
2673
2674out_disable_device:
2675 pci_disable_device(pdev);
2676out_release_regions:
2677 pci_release_regions(pdev);
2678 pci_set_drvdata(pdev, NULL);
2679 return err;
2680}
2681
2682static void __devexit remove_one(struct pci_dev *pdev)
2683{
5fbf816f 2684 struct adapter *adapter = pci_get_drvdata(pdev);
4d22de3e 2685
5fbf816f 2686 if (adapter) {
4d22de3e 2687 int i;
4d22de3e
DLR
2688
2689 t3_sge_stop(adapter);
0ee8d33c 2690 sysfs_remove_group(&adapter->port[0]->dev.kobj,
4d22de3e
DLR
2691 &cxgb3_attr_group);
2692
4d22de3e
DLR
2693 if (is_offload(adapter)) {
2694 cxgb3_adapter_unofld(adapter);
2695 if (test_bit(OFFLOAD_DEVMAP_BIT,
2696 &adapter->open_device_map))
2697 offload_close(&adapter->tdev);
2698 }
2699
67d92ab7
DLR
2700 for_each_port(adapter, i)
2701 if (test_bit(i, &adapter->registered_device_map))
2702 unregister_netdev(adapter->port[i]);
2703
4d22de3e
DLR
2704 t3_free_sge_resources(adapter);
2705 cxgb_disable_msi(adapter);
2706
4d22de3e
DLR
2707 for_each_port(adapter, i)
2708 if (adapter->port[i])
2709 free_netdev(adapter->port[i]);
2710
2711 iounmap(adapter->regs);
2712 kfree(adapter);
2713 pci_release_regions(pdev);
2714 pci_disable_device(pdev);
2715 pci_set_drvdata(pdev, NULL);
2716 }
2717}
2718
2719static struct pci_driver driver = {
2720 .name = DRV_NAME,
2721 .id_table = cxgb3_pci_tbl,
2722 .probe = init_one,
2723 .remove = __devexit_p(remove_one),
91a6b50c 2724 .err_handler = &t3_err_handler,
4d22de3e
DLR
2725};
2726
2727static int __init cxgb3_init_module(void)
2728{
2729 int ret;
2730
2731 cxgb3_offload_init();
2732
2733 ret = pci_register_driver(&driver);
2734 return ret;
2735}
2736
2737static void __exit cxgb3_cleanup_module(void)
2738{
2739 pci_unregister_driver(&driver);
2740 if (cxgb3_wq)
2741 destroy_workqueue(cxgb3_wq);
2742}
2743
2744module_init(cxgb3_init_module);
2745module_exit(cxgb3_cleanup_module);