netdev: convert eth16i to net_device_ops
[linux-2.6-block.git] / drivers / net / cxgb3 / cxgb3_main.c
CommitLineData
4d22de3e 1/*
a02d44a0 2 * Copyright (c) 2003-2008 Chelsio, Inc. All rights reserved.
4d22de3e 3 *
1d68e93d
DLR
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
4d22de3e 9 *
1d68e93d
DLR
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
4d22de3e 31 */
4d22de3e
DLR
32#include <linux/module.h>
33#include <linux/moduleparam.h>
34#include <linux/init.h>
35#include <linux/pci.h>
36#include <linux/dma-mapping.h>
37#include <linux/netdevice.h>
38#include <linux/etherdevice.h>
39#include <linux/if_vlan.h>
40#include <linux/mii.h>
41#include <linux/sockios.h>
42#include <linux/workqueue.h>
43#include <linux/proc_fs.h>
44#include <linux/rtnetlink.h>
2e283962 45#include <linux/firmware.h>
d9da466a 46#include <linux/log2.h>
4d22de3e
DLR
47#include <asm/uaccess.h>
48
49#include "common.h"
50#include "cxgb3_ioctl.h"
51#include "regs.h"
52#include "cxgb3_offload.h"
53#include "version.h"
54
55#include "cxgb3_ctl_defs.h"
56#include "t3_cpl.h"
57#include "firmware_exports.h"
58
59enum {
60 MAX_TXQ_ENTRIES = 16384,
61 MAX_CTRL_TXQ_ENTRIES = 1024,
62 MAX_RSPQ_ENTRIES = 16384,
63 MAX_RX_BUFFERS = 16384,
64 MAX_RX_JUMBO_BUFFERS = 16384,
65 MIN_TXQ_ENTRIES = 4,
66 MIN_CTRL_TXQ_ENTRIES = 4,
67 MIN_RSPQ_ENTRIES = 32,
68 MIN_FL_ENTRIES = 32
69};
70
71#define PORT_MASK ((1 << MAX_NPORTS) - 1)
72
73#define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
74 NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
75 NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
76
77#define EEPROM_MAGIC 0x38E2F10C
78
678771d6
DLR
79#define CH_DEVICE(devid, idx) \
80 { PCI_VENDOR_ID_CHELSIO, devid, PCI_ANY_ID, PCI_ANY_ID, 0, 0, idx }
4d22de3e
DLR
81
82static const struct pci_device_id cxgb3_pci_tbl[] = {
678771d6
DLR
83 CH_DEVICE(0x20, 0), /* PE9000 */
84 CH_DEVICE(0x21, 1), /* T302E */
85 CH_DEVICE(0x22, 2), /* T310E */
86 CH_DEVICE(0x23, 3), /* T320X */
87 CH_DEVICE(0x24, 1), /* T302X */
88 CH_DEVICE(0x25, 3), /* T320E */
89 CH_DEVICE(0x26, 2), /* T310X */
90 CH_DEVICE(0x30, 2), /* T3B10 */
91 CH_DEVICE(0x31, 3), /* T3B20 */
92 CH_DEVICE(0x32, 1), /* T3B02 */
ce03aadd 93 CH_DEVICE(0x35, 6), /* T3C20-derived T3C10 */
4d22de3e
DLR
94 {0,}
95};
96
97MODULE_DESCRIPTION(DRV_DESC);
98MODULE_AUTHOR("Chelsio Communications");
1d68e93d 99MODULE_LICENSE("Dual BSD/GPL");
4d22de3e
DLR
100MODULE_VERSION(DRV_VERSION);
101MODULE_DEVICE_TABLE(pci, cxgb3_pci_tbl);
102
103static int dflt_msg_enable = DFLT_MSG_ENABLE;
104
105module_param(dflt_msg_enable, int, 0644);
106MODULE_PARM_DESC(dflt_msg_enable, "Chelsio T3 default message enable bitmap");
107
108/*
109 * The driver uses the best interrupt scheme available on a platform in the
110 * order MSI-X, MSI, legacy pin interrupts. This parameter determines which
111 * of these schemes the driver may consider as follows:
112 *
113 * msi = 2: choose from among all three options
114 * msi = 1: only consider MSI and pin interrupts
115 * msi = 0: force pin interrupts
116 */
117static int msi = 2;
118
119module_param(msi, int, 0644);
120MODULE_PARM_DESC(msi, "whether to use MSI or MSI-X");
121
122/*
123 * The driver enables offload as a default.
124 * To disable it, use ofld_disable = 1.
125 */
126
127static int ofld_disable = 0;
128
129module_param(ofld_disable, int, 0644);
130MODULE_PARM_DESC(ofld_disable, "whether to enable offload at init time or not");
131
132/*
133 * We have work elements that we need to cancel when an interface is taken
134 * down. Normally the work elements would be executed by keventd but that
135 * can deadlock because of linkwatch. If our close method takes the rtnl
136 * lock and linkwatch is ahead of our work elements in keventd, linkwatch
137 * will block keventd as it needs the rtnl lock, and we'll deadlock waiting
138 * for our work to complete. Get our own work queue to solve this.
139 */
140static struct workqueue_struct *cxgb3_wq;
141
142/**
143 * link_report - show link status and link speed/duplex
144 * @p: the port whose settings are to be reported
145 *
146 * Shows the link status, speed, and duplex of a port.
147 */
148static void link_report(struct net_device *dev)
149{
150 if (!netif_carrier_ok(dev))
151 printk(KERN_INFO "%s: link down\n", dev->name);
152 else {
153 const char *s = "10Mbps";
154 const struct port_info *p = netdev_priv(dev);
155
156 switch (p->link_config.speed) {
157 case SPEED_10000:
158 s = "10Gbps";
159 break;
160 case SPEED_1000:
161 s = "1000Mbps";
162 break;
163 case SPEED_100:
164 s = "100Mbps";
165 break;
166 }
167
168 printk(KERN_INFO "%s: link up, %s, %s-duplex\n", dev->name, s,
169 p->link_config.duplex == DUPLEX_FULL ? "full" : "half");
170 }
171}
172
bf792094
DLR
173void t3_os_link_fault(struct adapter *adap, int port_id, int state)
174{
175 struct net_device *dev = adap->port[port_id];
176 struct port_info *pi = netdev_priv(dev);
177
178 if (state == netif_carrier_ok(dev))
179 return;
180
181 if (state) {
182 struct cmac *mac = &pi->mac;
183
184 netif_carrier_on(dev);
185
186 /* Clear local faults */
187 t3_xgm_intr_disable(adap, pi->port_id);
188 t3_read_reg(adap, A_XGM_INT_STATUS +
189 pi->mac.offset);
190 t3_write_reg(adap,
191 A_XGM_INT_CAUSE + pi->mac.offset,
192 F_XGM_INT);
193
194 t3_set_reg_field(adap,
195 A_XGM_INT_ENABLE +
196 pi->mac.offset,
197 F_XGM_INT, F_XGM_INT);
198 t3_xgm_intr_enable(adap, pi->port_id);
199
200 t3_mac_enable(mac, MAC_DIRECTION_TX);
201 } else
202 netif_carrier_off(dev);
203
204 link_report(dev);
205}
206
4d22de3e
DLR
207/**
208 * t3_os_link_changed - handle link status changes
209 * @adapter: the adapter associated with the link change
210 * @port_id: the port index whose limk status has changed
211 * @link_stat: the new status of the link
212 * @speed: the new speed setting
213 * @duplex: the new duplex setting
214 * @pause: the new flow-control setting
215 *
216 * This is the OS-dependent handler for link status changes. The OS
217 * neutral handler takes care of most of the processing for these events,
218 * then calls this handler for any OS-specific processing.
219 */
220void t3_os_link_changed(struct adapter *adapter, int port_id, int link_stat,
221 int speed, int duplex, int pause)
222{
223 struct net_device *dev = adapter->port[port_id];
6d6dabac
DLR
224 struct port_info *pi = netdev_priv(dev);
225 struct cmac *mac = &pi->mac;
4d22de3e
DLR
226
227 /* Skip changes from disabled ports. */
228 if (!netif_running(dev))
229 return;
230
231 if (link_stat != netif_carrier_ok(dev)) {
6d6dabac 232 if (link_stat) {
59cf8107 233 t3_mac_enable(mac, MAC_DIRECTION_RX);
bf792094
DLR
234
235 /* Clear local faults */
236 t3_xgm_intr_disable(adapter, pi->port_id);
237 t3_read_reg(adapter, A_XGM_INT_STATUS +
238 pi->mac.offset);
239 t3_write_reg(adapter,
240 A_XGM_INT_CAUSE + pi->mac.offset,
241 F_XGM_INT);
242
243 t3_set_reg_field(adapter,
244 A_XGM_INT_ENABLE + pi->mac.offset,
245 F_XGM_INT, F_XGM_INT);
246 t3_xgm_intr_enable(adapter, pi->port_id);
247
4d22de3e 248 netif_carrier_on(dev);
6d6dabac 249 } else {
4d22de3e 250 netif_carrier_off(dev);
bf792094
DLR
251
252 t3_xgm_intr_disable(adapter, pi->port_id);
253 t3_read_reg(adapter, A_XGM_INT_STATUS + pi->mac.offset);
254 t3_set_reg_field(adapter,
255 A_XGM_INT_ENABLE + pi->mac.offset,
256 F_XGM_INT, 0);
257
258 if (is_10G(adapter))
259 pi->phy.ops->power_down(&pi->phy, 1);
260
261 t3_read_reg(adapter, A_XGM_INT_STATUS + pi->mac.offset);
59cf8107
DLR
262 t3_mac_disable(mac, MAC_DIRECTION_RX);
263 t3_link_start(&pi->phy, mac, &pi->link_config);
6d6dabac
DLR
264 }
265
4d22de3e
DLR
266 link_report(dev);
267 }
268}
269
1e882025
DLR
270/**
271 * t3_os_phymod_changed - handle PHY module changes
272 * @phy: the PHY reporting the module change
273 * @mod_type: new module type
274 *
275 * This is the OS-dependent handler for PHY module changes. It is
276 * invoked when a PHY module is removed or inserted for any OS-specific
277 * processing.
278 */
279void t3_os_phymod_changed(struct adapter *adap, int port_id)
280{
281 static const char *mod_str[] = {
282 NULL, "SR", "LR", "LRM", "TWINAX", "TWINAX", "unknown"
283 };
284
285 const struct net_device *dev = adap->port[port_id];
286 const struct port_info *pi = netdev_priv(dev);
287
288 if (pi->phy.modtype == phy_modtype_none)
289 printk(KERN_INFO "%s: PHY module unplugged\n", dev->name);
290 else
291 printk(KERN_INFO "%s: %s PHY module inserted\n", dev->name,
292 mod_str[pi->phy.modtype]);
293}
294
4d22de3e
DLR
295static void cxgb_set_rxmode(struct net_device *dev)
296{
297 struct t3_rx_mode rm;
298 struct port_info *pi = netdev_priv(dev);
299
300 init_rx_mode(&rm, dev, dev->mc_list);
301 t3_mac_set_rx_mode(&pi->mac, &rm);
302}
303
304/**
305 * link_start - enable a port
306 * @dev: the device to enable
307 *
308 * Performs the MAC and PHY actions needed to enable a port.
309 */
310static void link_start(struct net_device *dev)
311{
312 struct t3_rx_mode rm;
313 struct port_info *pi = netdev_priv(dev);
314 struct cmac *mac = &pi->mac;
315
316 init_rx_mode(&rm, dev, dev->mc_list);
317 t3_mac_reset(mac);
318 t3_mac_set_mtu(mac, dev->mtu);
319 t3_mac_set_address(mac, 0, dev->dev_addr);
320 t3_mac_set_rx_mode(mac, &rm);
321 t3_link_start(&pi->phy, mac, &pi->link_config);
322 t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
323}
324
325static inline void cxgb_disable_msi(struct adapter *adapter)
326{
327 if (adapter->flags & USING_MSIX) {
328 pci_disable_msix(adapter->pdev);
329 adapter->flags &= ~USING_MSIX;
330 } else if (adapter->flags & USING_MSI) {
331 pci_disable_msi(adapter->pdev);
332 adapter->flags &= ~USING_MSI;
333 }
334}
335
336/*
337 * Interrupt handler for asynchronous events used with MSI-X.
338 */
339static irqreturn_t t3_async_intr_handler(int irq, void *cookie)
340{
341 t3_slow_intr_handler(cookie);
342 return IRQ_HANDLED;
343}
344
345/*
346 * Name the MSI-X interrupts.
347 */
348static void name_msix_vecs(struct adapter *adap)
349{
350 int i, j, msi_idx = 1, n = sizeof(adap->msix_info[0].desc) - 1;
351
352 snprintf(adap->msix_info[0].desc, n, "%s", adap->name);
353 adap->msix_info[0].desc[n] = 0;
354
355 for_each_port(adap, j) {
356 struct net_device *d = adap->port[j];
357 const struct port_info *pi = netdev_priv(d);
358
359 for (i = 0; i < pi->nqsets; i++, msi_idx++) {
360 snprintf(adap->msix_info[msi_idx].desc, n,
8c263761 361 "%s-%d", d->name, pi->first_qset + i);
4d22de3e
DLR
362 adap->msix_info[msi_idx].desc[n] = 0;
363 }
8c263761 364 }
4d22de3e
DLR
365}
366
367static int request_msix_data_irqs(struct adapter *adap)
368{
369 int i, j, err, qidx = 0;
370
371 for_each_port(adap, i) {
372 int nqsets = adap2pinfo(adap, i)->nqsets;
373
374 for (j = 0; j < nqsets; ++j) {
375 err = request_irq(adap->msix_info[qidx + 1].vec,
376 t3_intr_handler(adap,
377 adap->sge.qs[qidx].
378 rspq.polling), 0,
379 adap->msix_info[qidx + 1].desc,
380 &adap->sge.qs[qidx]);
381 if (err) {
382 while (--qidx >= 0)
383 free_irq(adap->msix_info[qidx + 1].vec,
384 &adap->sge.qs[qidx]);
385 return err;
386 }
387 qidx++;
388 }
389 }
390 return 0;
391}
392
8c263761
DLR
393static void free_irq_resources(struct adapter *adapter)
394{
395 if (adapter->flags & USING_MSIX) {
396 int i, n = 0;
397
398 free_irq(adapter->msix_info[0].vec, adapter);
399 for_each_port(adapter, i)
5cda9364 400 n += adap2pinfo(adapter, i)->nqsets;
8c263761
DLR
401
402 for (i = 0; i < n; ++i)
403 free_irq(adapter->msix_info[i + 1].vec,
404 &adapter->sge.qs[i]);
405 } else
406 free_irq(adapter->pdev->irq, adapter);
407}
408
b881955b
DLR
409static int await_mgmt_replies(struct adapter *adap, unsigned long init_cnt,
410 unsigned long n)
411{
412 int attempts = 5;
413
414 while (adap->sge.qs[0].rspq.offload_pkts < init_cnt + n) {
415 if (!--attempts)
416 return -ETIMEDOUT;
417 msleep(10);
418 }
419 return 0;
420}
421
422static int init_tp_parity(struct adapter *adap)
423{
424 int i;
425 struct sk_buff *skb;
426 struct cpl_set_tcb_field *greq;
427 unsigned long cnt = adap->sge.qs[0].rspq.offload_pkts;
428
429 t3_tp_set_offload_mode(adap, 1);
430
431 for (i = 0; i < 16; i++) {
432 struct cpl_smt_write_req *req;
433
434 skb = alloc_skb(sizeof(*req), GFP_KERNEL | __GFP_NOFAIL);
435 req = (struct cpl_smt_write_req *)__skb_put(skb, sizeof(*req));
436 memset(req, 0, sizeof(*req));
437 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
438 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SMT_WRITE_REQ, i));
439 req->iff = i;
440 t3_mgmt_tx(adap, skb);
441 }
442
443 for (i = 0; i < 2048; i++) {
444 struct cpl_l2t_write_req *req;
445
446 skb = alloc_skb(sizeof(*req), GFP_KERNEL | __GFP_NOFAIL);
447 req = (struct cpl_l2t_write_req *)__skb_put(skb, sizeof(*req));
448 memset(req, 0, sizeof(*req));
449 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
450 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_L2T_WRITE_REQ, i));
451 req->params = htonl(V_L2T_W_IDX(i));
452 t3_mgmt_tx(adap, skb);
453 }
454
455 for (i = 0; i < 2048; i++) {
456 struct cpl_rte_write_req *req;
457
458 skb = alloc_skb(sizeof(*req), GFP_KERNEL | __GFP_NOFAIL);
459 req = (struct cpl_rte_write_req *)__skb_put(skb, sizeof(*req));
460 memset(req, 0, sizeof(*req));
461 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
462 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_RTE_WRITE_REQ, i));
463 req->l2t_idx = htonl(V_L2T_W_IDX(i));
464 t3_mgmt_tx(adap, skb);
465 }
466
467 skb = alloc_skb(sizeof(*greq), GFP_KERNEL | __GFP_NOFAIL);
468 greq = (struct cpl_set_tcb_field *)__skb_put(skb, sizeof(*greq));
469 memset(greq, 0, sizeof(*greq));
470 greq->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
471 OPCODE_TID(greq) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, 0));
472 greq->mask = cpu_to_be64(1);
473 t3_mgmt_tx(adap, skb);
474
475 i = await_mgmt_replies(adap, cnt, 16 + 2048 + 2048 + 1);
476 t3_tp_set_offload_mode(adap, 0);
477 return i;
478}
479
4d22de3e
DLR
480/**
481 * setup_rss - configure RSS
482 * @adap: the adapter
483 *
484 * Sets up RSS to distribute packets to multiple receive queues. We
485 * configure the RSS CPU lookup table to distribute to the number of HW
486 * receive queues, and the response queue lookup table to narrow that
487 * down to the response queues actually configured for each port.
488 * We always configure the RSS mapping for two ports since the mapping
489 * table has plenty of entries.
490 */
491static void setup_rss(struct adapter *adap)
492{
493 int i;
494 unsigned int nq0 = adap2pinfo(adap, 0)->nqsets;
495 unsigned int nq1 = adap->port[1] ? adap2pinfo(adap, 1)->nqsets : 1;
496 u8 cpus[SGE_QSETS + 1];
497 u16 rspq_map[RSS_TABLE_SIZE];
498
499 for (i = 0; i < SGE_QSETS; ++i)
500 cpus[i] = i;
501 cpus[SGE_QSETS] = 0xff; /* terminator */
502
503 for (i = 0; i < RSS_TABLE_SIZE / 2; ++i) {
504 rspq_map[i] = i % nq0;
505 rspq_map[i + RSS_TABLE_SIZE / 2] = (i % nq1) + nq0;
506 }
507
508 t3_config_rss(adap, F_RQFEEDBACKENABLE | F_TNLLKPEN | F_TNLMAPEN |
509 F_TNLPRTEN | F_TNL2TUPEN | F_TNL4TUPEN |
a2604be5 510 V_RRCPLCPUSIZE(6) | F_HASHTOEPLITZ, cpus, rspq_map);
4d22de3e
DLR
511}
512
bea3348e 513static void init_napi(struct adapter *adap)
4d22de3e 514{
bea3348e 515 int i;
4d22de3e 516
bea3348e
SH
517 for (i = 0; i < SGE_QSETS; i++) {
518 struct sge_qset *qs = &adap->sge.qs[i];
4d22de3e 519
bea3348e
SH
520 if (qs->adap)
521 netif_napi_add(qs->netdev, &qs->napi, qs->napi.poll,
522 64);
4d22de3e 523 }
48c4b6db
DLR
524
525 /*
526 * netif_napi_add() can be called only once per napi_struct because it
527 * adds each new napi_struct to a list. Be careful not to call it a
528 * second time, e.g., during EEH recovery, by making a note of it.
529 */
530 adap->flags |= NAPI_INIT;
4d22de3e
DLR
531}
532
533/*
534 * Wait until all NAPI handlers are descheduled. This includes the handlers of
535 * both netdevices representing interfaces and the dummy ones for the extra
536 * queues.
537 */
538static void quiesce_rx(struct adapter *adap)
539{
540 int i;
4d22de3e 541
bea3348e
SH
542 for (i = 0; i < SGE_QSETS; i++)
543 if (adap->sge.qs[i].adap)
544 napi_disable(&adap->sge.qs[i].napi);
545}
4d22de3e 546
bea3348e
SH
547static void enable_all_napi(struct adapter *adap)
548{
549 int i;
550 for (i = 0; i < SGE_QSETS; i++)
551 if (adap->sge.qs[i].adap)
552 napi_enable(&adap->sge.qs[i].napi);
4d22de3e
DLR
553}
554
04ecb072
DLR
555/**
556 * set_qset_lro - Turn a queue set's LRO capability on and off
557 * @dev: the device the qset is attached to
558 * @qset_idx: the queue set index
559 * @val: the LRO switch
560 *
561 * Sets LRO on or off for a particular queue set.
562 * the device's features flag is updated to reflect the LRO
563 * capability when all queues belonging to the device are
564 * in the same state.
565 */
566static void set_qset_lro(struct net_device *dev, int qset_idx, int val)
567{
568 struct port_info *pi = netdev_priv(dev);
569 struct adapter *adapter = pi->adapter;
04ecb072
DLR
570
571 adapter->params.sge.qset[qset_idx].lro = !!val;
572 adapter->sge.qs[qset_idx].lro_enabled = !!val;
04ecb072
DLR
573}
574
4d22de3e
DLR
575/**
576 * setup_sge_qsets - configure SGE Tx/Rx/response queues
577 * @adap: the adapter
578 *
579 * Determines how many sets of SGE queues to use and initializes them.
580 * We support multiple queue sets per port if we have MSI-X, otherwise
581 * just one queue set per port.
582 */
583static int setup_sge_qsets(struct adapter *adap)
584{
bea3348e 585 int i, j, err, irq_idx = 0, qset_idx = 0;
8ac3ba68 586 unsigned int ntxq = SGE_TXQ_PER_SET;
4d22de3e
DLR
587
588 if (adap->params.rev > 0 && !(adap->flags & USING_MSI))
589 irq_idx = -1;
590
591 for_each_port(adap, i) {
592 struct net_device *dev = adap->port[i];
bea3348e 593 struct port_info *pi = netdev_priv(dev);
4d22de3e 594
bea3348e 595 pi->qs = &adap->sge.qs[pi->first_qset];
8c263761
DLR
596 for (j = pi->first_qset; j < pi->first_qset + pi->nqsets;
597 ++j, ++qset_idx) {
47fd23fe 598 set_qset_lro(dev, qset_idx, pi->rx_offload & T3_LRO);
4d22de3e
DLR
599 err = t3_sge_alloc_qset(adap, qset_idx, 1,
600 (adap->flags & USING_MSIX) ? qset_idx + 1 :
601 irq_idx,
82ad3329
DLR
602 &adap->params.sge.qset[qset_idx], ntxq, dev,
603 netdev_get_tx_queue(dev, j));
4d22de3e 604 if (err) {
0ca41c04 605 t3_stop_sge_timers(adap);
4d22de3e
DLR
606 t3_free_sge_resources(adap);
607 return err;
608 }
609 }
610 }
611
612 return 0;
613}
614
3e5192ee 615static ssize_t attr_show(struct device *d, char *buf,
896392ef 616 ssize_t(*format) (struct net_device *, char *))
4d22de3e
DLR
617{
618 ssize_t len;
4d22de3e
DLR
619
620 /* Synchronize with ioctls that may shut down the device */
621 rtnl_lock();
896392ef 622 len = (*format) (to_net_dev(d), buf);
4d22de3e
DLR
623 rtnl_unlock();
624 return len;
625}
626
3e5192ee 627static ssize_t attr_store(struct device *d,
0ee8d33c 628 const char *buf, size_t len,
896392ef 629 ssize_t(*set) (struct net_device *, unsigned int),
4d22de3e
DLR
630 unsigned int min_val, unsigned int max_val)
631{
632 char *endp;
633 ssize_t ret;
634 unsigned int val;
4d22de3e
DLR
635
636 if (!capable(CAP_NET_ADMIN))
637 return -EPERM;
638
639 val = simple_strtoul(buf, &endp, 0);
640 if (endp == buf || val < min_val || val > max_val)
641 return -EINVAL;
642
643 rtnl_lock();
896392ef 644 ret = (*set) (to_net_dev(d), val);
4d22de3e
DLR
645 if (!ret)
646 ret = len;
647 rtnl_unlock();
648 return ret;
649}
650
651#define CXGB3_SHOW(name, val_expr) \
896392ef 652static ssize_t format_##name(struct net_device *dev, char *buf) \
4d22de3e 653{ \
5fbf816f
DLR
654 struct port_info *pi = netdev_priv(dev); \
655 struct adapter *adap = pi->adapter; \
4d22de3e
DLR
656 return sprintf(buf, "%u\n", val_expr); \
657} \
0ee8d33c
DLR
658static ssize_t show_##name(struct device *d, struct device_attribute *attr, \
659 char *buf) \
4d22de3e 660{ \
3e5192ee 661 return attr_show(d, buf, format_##name); \
4d22de3e
DLR
662}
663
896392ef 664static ssize_t set_nfilters(struct net_device *dev, unsigned int val)
4d22de3e 665{
5fbf816f
DLR
666 struct port_info *pi = netdev_priv(dev);
667 struct adapter *adap = pi->adapter;
9f238486 668 int min_tids = is_offload(adap) ? MC5_MIN_TIDS : 0;
896392ef 669
4d22de3e
DLR
670 if (adap->flags & FULL_INIT_DONE)
671 return -EBUSY;
672 if (val && adap->params.rev == 0)
673 return -EINVAL;
9f238486
DLR
674 if (val > t3_mc5_size(&adap->mc5) - adap->params.mc5.nservers -
675 min_tids)
4d22de3e
DLR
676 return -EINVAL;
677 adap->params.mc5.nfilters = val;
678 return 0;
679}
680
0ee8d33c
DLR
681static ssize_t store_nfilters(struct device *d, struct device_attribute *attr,
682 const char *buf, size_t len)
4d22de3e 683{
3e5192ee 684 return attr_store(d, buf, len, set_nfilters, 0, ~0);
4d22de3e
DLR
685}
686
896392ef 687static ssize_t set_nservers(struct net_device *dev, unsigned int val)
4d22de3e 688{
5fbf816f
DLR
689 struct port_info *pi = netdev_priv(dev);
690 struct adapter *adap = pi->adapter;
896392ef 691
4d22de3e
DLR
692 if (adap->flags & FULL_INIT_DONE)
693 return -EBUSY;
9f238486
DLR
694 if (val > t3_mc5_size(&adap->mc5) - adap->params.mc5.nfilters -
695 MC5_MIN_TIDS)
4d22de3e
DLR
696 return -EINVAL;
697 adap->params.mc5.nservers = val;
698 return 0;
699}
700
0ee8d33c
DLR
701static ssize_t store_nservers(struct device *d, struct device_attribute *attr,
702 const char *buf, size_t len)
4d22de3e 703{
3e5192ee 704 return attr_store(d, buf, len, set_nservers, 0, ~0);
4d22de3e
DLR
705}
706
707#define CXGB3_ATTR_R(name, val_expr) \
708CXGB3_SHOW(name, val_expr) \
0ee8d33c 709static DEVICE_ATTR(name, S_IRUGO, show_##name, NULL)
4d22de3e
DLR
710
711#define CXGB3_ATTR_RW(name, val_expr, store_method) \
712CXGB3_SHOW(name, val_expr) \
0ee8d33c 713static DEVICE_ATTR(name, S_IRUGO | S_IWUSR, show_##name, store_method)
4d22de3e
DLR
714
715CXGB3_ATTR_R(cam_size, t3_mc5_size(&adap->mc5));
716CXGB3_ATTR_RW(nfilters, adap->params.mc5.nfilters, store_nfilters);
717CXGB3_ATTR_RW(nservers, adap->params.mc5.nservers, store_nservers);
718
719static struct attribute *cxgb3_attrs[] = {
0ee8d33c
DLR
720 &dev_attr_cam_size.attr,
721 &dev_attr_nfilters.attr,
722 &dev_attr_nservers.attr,
4d22de3e
DLR
723 NULL
724};
725
726static struct attribute_group cxgb3_attr_group = {.attrs = cxgb3_attrs };
727
3e5192ee 728static ssize_t tm_attr_show(struct device *d,
0ee8d33c 729 char *buf, int sched)
4d22de3e 730{
5fbf816f
DLR
731 struct port_info *pi = netdev_priv(to_net_dev(d));
732 struct adapter *adap = pi->adapter;
4d22de3e 733 unsigned int v, addr, bpt, cpt;
5fbf816f 734 ssize_t len;
4d22de3e
DLR
735
736 addr = A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2;
737 rtnl_lock();
738 t3_write_reg(adap, A_TP_TM_PIO_ADDR, addr);
739 v = t3_read_reg(adap, A_TP_TM_PIO_DATA);
740 if (sched & 1)
741 v >>= 16;
742 bpt = (v >> 8) & 0xff;
743 cpt = v & 0xff;
744 if (!cpt)
745 len = sprintf(buf, "disabled\n");
746 else {
747 v = (adap->params.vpd.cclk * 1000) / cpt;
748 len = sprintf(buf, "%u Kbps\n", (v * bpt) / 125);
749 }
750 rtnl_unlock();
751 return len;
752}
753
3e5192ee 754static ssize_t tm_attr_store(struct device *d,
0ee8d33c 755 const char *buf, size_t len, int sched)
4d22de3e 756{
5fbf816f
DLR
757 struct port_info *pi = netdev_priv(to_net_dev(d));
758 struct adapter *adap = pi->adapter;
759 unsigned int val;
4d22de3e
DLR
760 char *endp;
761 ssize_t ret;
4d22de3e
DLR
762
763 if (!capable(CAP_NET_ADMIN))
764 return -EPERM;
765
766 val = simple_strtoul(buf, &endp, 0);
767 if (endp == buf || val > 10000000)
768 return -EINVAL;
769
770 rtnl_lock();
771 ret = t3_config_sched(adap, val, sched);
772 if (!ret)
773 ret = len;
774 rtnl_unlock();
775 return ret;
776}
777
778#define TM_ATTR(name, sched) \
0ee8d33c
DLR
779static ssize_t show_##name(struct device *d, struct device_attribute *attr, \
780 char *buf) \
4d22de3e 781{ \
3e5192ee 782 return tm_attr_show(d, buf, sched); \
4d22de3e 783} \
0ee8d33c
DLR
784static ssize_t store_##name(struct device *d, struct device_attribute *attr, \
785 const char *buf, size_t len) \
4d22de3e 786{ \
3e5192ee 787 return tm_attr_store(d, buf, len, sched); \
4d22de3e 788} \
0ee8d33c 789static DEVICE_ATTR(name, S_IRUGO | S_IWUSR, show_##name, store_##name)
4d22de3e
DLR
790
791TM_ATTR(sched0, 0);
792TM_ATTR(sched1, 1);
793TM_ATTR(sched2, 2);
794TM_ATTR(sched3, 3);
795TM_ATTR(sched4, 4);
796TM_ATTR(sched5, 5);
797TM_ATTR(sched6, 6);
798TM_ATTR(sched7, 7);
799
800static struct attribute *offload_attrs[] = {
0ee8d33c
DLR
801 &dev_attr_sched0.attr,
802 &dev_attr_sched1.attr,
803 &dev_attr_sched2.attr,
804 &dev_attr_sched3.attr,
805 &dev_attr_sched4.attr,
806 &dev_attr_sched5.attr,
807 &dev_attr_sched6.attr,
808 &dev_attr_sched7.attr,
4d22de3e
DLR
809 NULL
810};
811
812static struct attribute_group offload_attr_group = {.attrs = offload_attrs };
813
814/*
815 * Sends an sk_buff to an offload queue driver
816 * after dealing with any active network taps.
817 */
818static inline int offload_tx(struct t3cdev *tdev, struct sk_buff *skb)
819{
820 int ret;
821
822 local_bh_disable();
823 ret = t3_offload_tx(tdev, skb);
824 local_bh_enable();
825 return ret;
826}
827
828static int write_smt_entry(struct adapter *adapter, int idx)
829{
830 struct cpl_smt_write_req *req;
831 struct sk_buff *skb = alloc_skb(sizeof(*req), GFP_KERNEL);
832
833 if (!skb)
834 return -ENOMEM;
835
836 req = (struct cpl_smt_write_req *)__skb_put(skb, sizeof(*req));
837 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
838 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SMT_WRITE_REQ, idx));
839 req->mtu_idx = NMTUS - 1; /* should be 0 but there's a T3 bug */
840 req->iff = idx;
841 memset(req->src_mac1, 0, sizeof(req->src_mac1));
842 memcpy(req->src_mac0, adapter->port[idx]->dev_addr, ETH_ALEN);
843 skb->priority = 1;
844 offload_tx(&adapter->tdev, skb);
845 return 0;
846}
847
848static int init_smt(struct adapter *adapter)
849{
850 int i;
851
852 for_each_port(adapter, i)
853 write_smt_entry(adapter, i);
854 return 0;
855}
856
857static void init_port_mtus(struct adapter *adapter)
858{
859 unsigned int mtus = adapter->port[0]->mtu;
860
861 if (adapter->port[1])
862 mtus |= adapter->port[1]->mtu << 16;
863 t3_write_reg(adapter, A_TP_MTU_PORT_TABLE, mtus);
864}
865
8c263761 866static int send_pktsched_cmd(struct adapter *adap, int sched, int qidx, int lo,
14ab9892
DLR
867 int hi, int port)
868{
869 struct sk_buff *skb;
870 struct mngt_pktsched_wr *req;
8c263761 871 int ret;
14ab9892
DLR
872
873 skb = alloc_skb(sizeof(*req), GFP_KERNEL | __GFP_NOFAIL);
874 req = (struct mngt_pktsched_wr *)skb_put(skb, sizeof(*req));
875 req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_MNGT));
876 req->mngt_opcode = FW_MNGTOPCODE_PKTSCHED_SET;
877 req->sched = sched;
878 req->idx = qidx;
879 req->min = lo;
880 req->max = hi;
881 req->binding = port;
8c263761
DLR
882 ret = t3_mgmt_tx(adap, skb);
883
884 return ret;
14ab9892
DLR
885}
886
8c263761 887static int bind_qsets(struct adapter *adap)
14ab9892 888{
8c263761 889 int i, j, err = 0;
14ab9892
DLR
890
891 for_each_port(adap, i) {
892 const struct port_info *pi = adap2pinfo(adap, i);
893
8c263761
DLR
894 for (j = 0; j < pi->nqsets; ++j) {
895 int ret = send_pktsched_cmd(adap, 1,
896 pi->first_qset + j, -1,
897 -1, i);
898 if (ret)
899 err = ret;
900 }
14ab9892 901 }
8c263761
DLR
902
903 return err;
14ab9892
DLR
904}
905
851fd7bd
DLR
906#define FW_FNAME "cxgb3/t3fw-%d.%d.%d.bin"
907#define TPSRAM_NAME "cxgb3/t3%c_psram-%d.%d.%d.bin"
2e283962
DLR
908
909static int upgrade_fw(struct adapter *adap)
910{
911 int ret;
912 char buf[64];
913 const struct firmware *fw;
914 struct device *dev = &adap->pdev->dev;
915
916 snprintf(buf, sizeof(buf), FW_FNAME, FW_VERSION_MAJOR,
7f672cf5 917 FW_VERSION_MINOR, FW_VERSION_MICRO);
2e283962
DLR
918 ret = request_firmware(&fw, buf, dev);
919 if (ret < 0) {
920 dev_err(dev, "could not upgrade firmware: unable to load %s\n",
921 buf);
922 return ret;
923 }
924 ret = t3_load_fw(adap, fw->data, fw->size);
925 release_firmware(fw);
47330077
DLR
926
927 if (ret == 0)
928 dev_info(dev, "successful upgrade to firmware %d.%d.%d\n",
929 FW_VERSION_MAJOR, FW_VERSION_MINOR, FW_VERSION_MICRO);
930 else
931 dev_err(dev, "failed to upgrade to firmware %d.%d.%d\n",
932 FW_VERSION_MAJOR, FW_VERSION_MINOR, FW_VERSION_MICRO);
2eab17ab 933
47330077
DLR
934 return ret;
935}
936
937static inline char t3rev2char(struct adapter *adapter)
938{
939 char rev = 0;
940
941 switch(adapter->params.rev) {
942 case T3_REV_B:
943 case T3_REV_B2:
944 rev = 'b';
945 break;
1aafee26
DLR
946 case T3_REV_C:
947 rev = 'c';
948 break;
47330077
DLR
949 }
950 return rev;
951}
952
9265fabf 953static int update_tpsram(struct adapter *adap)
47330077
DLR
954{
955 const struct firmware *tpsram;
956 char buf[64];
957 struct device *dev = &adap->pdev->dev;
958 int ret;
959 char rev;
2eab17ab 960
47330077
DLR
961 rev = t3rev2char(adap);
962 if (!rev)
963 return 0;
964
965 snprintf(buf, sizeof(buf), TPSRAM_NAME, rev,
966 TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO);
967
968 ret = request_firmware(&tpsram, buf, dev);
969 if (ret < 0) {
970 dev_err(dev, "could not load TP SRAM: unable to load %s\n",
971 buf);
972 return ret;
973 }
2eab17ab 974
47330077
DLR
975 ret = t3_check_tpsram(adap, tpsram->data, tpsram->size);
976 if (ret)
2eab17ab 977 goto release_tpsram;
47330077
DLR
978
979 ret = t3_set_proto_sram(adap, tpsram->data);
980 if (ret == 0)
981 dev_info(dev,
982 "successful update of protocol engine "
983 "to %d.%d.%d\n",
984 TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO);
985 else
986 dev_err(dev, "failed to update of protocol engine %d.%d.%d\n",
987 TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO);
988 if (ret)
989 dev_err(dev, "loading protocol SRAM failed\n");
990
991release_tpsram:
992 release_firmware(tpsram);
2eab17ab 993
2e283962
DLR
994 return ret;
995}
996
4d22de3e
DLR
997/**
998 * cxgb_up - enable the adapter
999 * @adapter: adapter being enabled
1000 *
1001 * Called when the first port is enabled, this function performs the
1002 * actions necessary to make an adapter operational, such as completing
1003 * the initialization of HW modules, and enabling interrupts.
1004 *
1005 * Must be called with the rtnl lock held.
1006 */
1007static int cxgb_up(struct adapter *adap)
1008{
c54f5c24 1009 int err;
4d22de3e
DLR
1010
1011 if (!(adap->flags & FULL_INIT_DONE)) {
8207befa 1012 err = t3_check_fw_version(adap);
a5a3b460 1013 if (err == -EINVAL) {
2e283962 1014 err = upgrade_fw(adap);
8207befa
DLR
1015 CH_WARN(adap, "FW upgrade to %d.%d.%d %s\n",
1016 FW_VERSION_MAJOR, FW_VERSION_MINOR,
1017 FW_VERSION_MICRO, err ? "failed" : "succeeded");
a5a3b460 1018 }
4d22de3e 1019
8207befa 1020 err = t3_check_tpsram_version(adap);
47330077
DLR
1021 if (err == -EINVAL) {
1022 err = update_tpsram(adap);
8207befa
DLR
1023 CH_WARN(adap, "TP upgrade to %d.%d.%d %s\n",
1024 TP_VERSION_MAJOR, TP_VERSION_MINOR,
1025 TP_VERSION_MICRO, err ? "failed" : "succeeded");
47330077
DLR
1026 }
1027
20d3fc11
DLR
1028 /*
1029 * Clear interrupts now to catch errors if t3_init_hw fails.
1030 * We clear them again later as initialization may trigger
1031 * conditions that can interrupt.
1032 */
1033 t3_intr_clear(adap);
1034
4d22de3e
DLR
1035 err = t3_init_hw(adap, 0);
1036 if (err)
1037 goto out;
1038
b881955b 1039 t3_set_reg_field(adap, A_TP_PARA_REG5, 0, F_RXDDPOFFINIT);
6cdbd77e 1040 t3_write_reg(adap, A_ULPRX_TDDP_PSZ, V_HPZ0(PAGE_SHIFT - 12));
bea3348e 1041
4d22de3e
DLR
1042 err = setup_sge_qsets(adap);
1043 if (err)
1044 goto out;
1045
1046 setup_rss(adap);
48c4b6db
DLR
1047 if (!(adap->flags & NAPI_INIT))
1048 init_napi(adap);
4d22de3e
DLR
1049 adap->flags |= FULL_INIT_DONE;
1050 }
1051
1052 t3_intr_clear(adap);
1053
1054 if (adap->flags & USING_MSIX) {
1055 name_msix_vecs(adap);
1056 err = request_irq(adap->msix_info[0].vec,
1057 t3_async_intr_handler, 0,
1058 adap->msix_info[0].desc, adap);
1059 if (err)
1060 goto irq_err;
1061
42256f57
DLR
1062 err = request_msix_data_irqs(adap);
1063 if (err) {
4d22de3e
DLR
1064 free_irq(adap->msix_info[0].vec, adap);
1065 goto irq_err;
1066 }
1067 } else if ((err = request_irq(adap->pdev->irq,
1068 t3_intr_handler(adap,
1069 adap->sge.qs[0].rspq.
1070 polling),
2db6346f
TG
1071 (adap->flags & USING_MSI) ?
1072 0 : IRQF_SHARED,
4d22de3e
DLR
1073 adap->name, adap)))
1074 goto irq_err;
1075
bea3348e 1076 enable_all_napi(adap);
4d22de3e
DLR
1077 t3_sge_start(adap);
1078 t3_intr_enable(adap);
14ab9892 1079
b881955b
DLR
1080 if (adap->params.rev >= T3_REV_C && !(adap->flags & TP_PARITY_INIT) &&
1081 is_offload(adap) && init_tp_parity(adap) == 0)
1082 adap->flags |= TP_PARITY_INIT;
1083
1084 if (adap->flags & TP_PARITY_INIT) {
1085 t3_write_reg(adap, A_TP_INT_CAUSE,
1086 F_CMCACHEPERR | F_ARPLUTPERR);
1087 t3_write_reg(adap, A_TP_INT_ENABLE, 0x7fbfffff);
1088 }
1089
8c263761
DLR
1090 if (!(adap->flags & QUEUES_BOUND)) {
1091 err = bind_qsets(adap);
1092 if (err) {
1093 CH_ERR(adap, "failed to bind qsets, err %d\n", err);
1094 t3_intr_disable(adap);
1095 free_irq_resources(adap);
1096 goto out;
1097 }
1098 adap->flags |= QUEUES_BOUND;
1099 }
14ab9892 1100
4d22de3e
DLR
1101out:
1102 return err;
1103irq_err:
1104 CH_ERR(adap, "request_irq failed, err %d\n", err);
1105 goto out;
1106}
1107
1108/*
1109 * Release resources when all the ports and offloading have been stopped.
1110 */
1111static void cxgb_down(struct adapter *adapter)
1112{
1113 t3_sge_stop(adapter);
1114 spin_lock_irq(&adapter->work_lock); /* sync with PHY intr task */
1115 t3_intr_disable(adapter);
1116 spin_unlock_irq(&adapter->work_lock);
1117
8c263761 1118 free_irq_resources(adapter);
4d22de3e
DLR
1119 flush_workqueue(cxgb3_wq); /* wait for external IRQ handler */
1120 quiesce_rx(adapter);
1121}
1122
1123static void schedule_chk_task(struct adapter *adap)
1124{
1125 unsigned int timeo;
1126
1127 timeo = adap->params.linkpoll_period ?
1128 (HZ * adap->params.linkpoll_period) / 10 :
1129 adap->params.stats_update_period * HZ;
1130 if (timeo)
1131 queue_delayed_work(cxgb3_wq, &adap->adap_check_task, timeo);
1132}
1133
1134static int offload_open(struct net_device *dev)
1135{
5fbf816f
DLR
1136 struct port_info *pi = netdev_priv(dev);
1137 struct adapter *adapter = pi->adapter;
1138 struct t3cdev *tdev = dev2t3cdev(dev);
4d22de3e 1139 int adap_up = adapter->open_device_map & PORT_MASK;
c54f5c24 1140 int err;
4d22de3e
DLR
1141
1142 if (test_and_set_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map))
1143 return 0;
1144
1145 if (!adap_up && (err = cxgb_up(adapter)) < 0)
48c4b6db 1146 goto out;
4d22de3e
DLR
1147
1148 t3_tp_set_offload_mode(adapter, 1);
1149 tdev->lldev = adapter->port[0];
1150 err = cxgb3_offload_activate(adapter);
1151 if (err)
1152 goto out;
1153
1154 init_port_mtus(adapter);
1155 t3_load_mtus(adapter, adapter->params.mtus, adapter->params.a_wnd,
1156 adapter->params.b_wnd,
1157 adapter->params.rev == 0 ?
1158 adapter->port[0]->mtu : 0xffff);
1159 init_smt(adapter);
1160
d96a51f6
DN
1161 if (sysfs_create_group(&tdev->lldev->dev.kobj, &offload_attr_group))
1162 dev_dbg(&dev->dev, "cannot create sysfs group\n");
4d22de3e
DLR
1163
1164 /* Call back all registered clients */
1165 cxgb3_add_clients(tdev);
1166
1167out:
1168 /* restore them in case the offload module has changed them */
1169 if (err) {
1170 t3_tp_set_offload_mode(adapter, 0);
1171 clear_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map);
1172 cxgb3_set_dummy_ops(tdev);
1173 }
1174 return err;
1175}
1176
1177static int offload_close(struct t3cdev *tdev)
1178{
1179 struct adapter *adapter = tdev2adap(tdev);
1180
1181 if (!test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map))
1182 return 0;
1183
1184 /* Call back all registered clients */
1185 cxgb3_remove_clients(tdev);
1186
0ee8d33c 1187 sysfs_remove_group(&tdev->lldev->dev.kobj, &offload_attr_group);
4d22de3e
DLR
1188
1189 tdev->lldev = NULL;
1190 cxgb3_set_dummy_ops(tdev);
1191 t3_tp_set_offload_mode(adapter, 0);
1192 clear_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map);
1193
1194 if (!adapter->open_device_map)
1195 cxgb_down(adapter);
1196
1197 cxgb3_offload_deactivate(adapter);
1198 return 0;
1199}
1200
1201static int cxgb_open(struct net_device *dev)
1202{
4d22de3e 1203 struct port_info *pi = netdev_priv(dev);
5fbf816f 1204 struct adapter *adapter = pi->adapter;
4d22de3e 1205 int other_ports = adapter->open_device_map & PORT_MASK;
5fbf816f 1206 int err;
4d22de3e 1207
48c4b6db 1208 if (!adapter->open_device_map && (err = cxgb_up(adapter)) < 0)
4d22de3e
DLR
1209 return err;
1210
1211 set_bit(pi->port_id, &adapter->open_device_map);
8ac3ba68 1212 if (is_offload(adapter) && !ofld_disable) {
4d22de3e
DLR
1213 err = offload_open(dev);
1214 if (err)
1215 printk(KERN_WARNING
1216 "Could not initialize offload capabilities\n");
1217 }
1218
82ad3329 1219 dev->real_num_tx_queues = pi->nqsets;
4d22de3e
DLR
1220 link_start(dev);
1221 t3_port_intr_enable(adapter, pi->port_id);
82ad3329 1222 netif_tx_start_all_queues(dev);
4d22de3e
DLR
1223 if (!other_ports)
1224 schedule_chk_task(adapter);
1225
1226 return 0;
1227}
1228
1229static int cxgb_close(struct net_device *dev)
1230{
5fbf816f
DLR
1231 struct port_info *pi = netdev_priv(dev);
1232 struct adapter *adapter = pi->adapter;
4d22de3e 1233
bf792094
DLR
1234 /* Stop link fault interrupts */
1235 t3_xgm_intr_disable(adapter, pi->port_id);
1236 t3_read_reg(adapter, A_XGM_INT_STATUS + pi->mac.offset);
1237
5fbf816f 1238 t3_port_intr_disable(adapter, pi->port_id);
82ad3329 1239 netif_tx_stop_all_queues(dev);
5fbf816f 1240 pi->phy.ops->power_down(&pi->phy, 1);
4d22de3e 1241 netif_carrier_off(dev);
5fbf816f 1242 t3_mac_disable(&pi->mac, MAC_DIRECTION_TX | MAC_DIRECTION_RX);
4d22de3e 1243
20d3fc11 1244 spin_lock_irq(&adapter->work_lock); /* sync with update task */
5fbf816f 1245 clear_bit(pi->port_id, &adapter->open_device_map);
20d3fc11 1246 spin_unlock_irq(&adapter->work_lock);
4d22de3e
DLR
1247
1248 if (!(adapter->open_device_map & PORT_MASK))
1249 cancel_rearming_delayed_workqueue(cxgb3_wq,
1250 &adapter->adap_check_task);
1251
1252 if (!adapter->open_device_map)
1253 cxgb_down(adapter);
1254
1255 return 0;
1256}
1257
1258static struct net_device_stats *cxgb_get_stats(struct net_device *dev)
1259{
5fbf816f
DLR
1260 struct port_info *pi = netdev_priv(dev);
1261 struct adapter *adapter = pi->adapter;
1262 struct net_device_stats *ns = &pi->netstats;
4d22de3e
DLR
1263 const struct mac_stats *pstats;
1264
1265 spin_lock(&adapter->stats_lock);
5fbf816f 1266 pstats = t3_mac_update_stats(&pi->mac);
4d22de3e
DLR
1267 spin_unlock(&adapter->stats_lock);
1268
1269 ns->tx_bytes = pstats->tx_octets;
1270 ns->tx_packets = pstats->tx_frames;
1271 ns->rx_bytes = pstats->rx_octets;
1272 ns->rx_packets = pstats->rx_frames;
1273 ns->multicast = pstats->rx_mcast_frames;
1274
1275 ns->tx_errors = pstats->tx_underrun;
1276 ns->rx_errors = pstats->rx_symbol_errs + pstats->rx_fcs_errs +
1277 pstats->rx_too_long + pstats->rx_jabber + pstats->rx_short +
1278 pstats->rx_fifo_ovfl;
1279
1280 /* detailed rx_errors */
1281 ns->rx_length_errors = pstats->rx_jabber + pstats->rx_too_long;
1282 ns->rx_over_errors = 0;
1283 ns->rx_crc_errors = pstats->rx_fcs_errs;
1284 ns->rx_frame_errors = pstats->rx_symbol_errs;
1285 ns->rx_fifo_errors = pstats->rx_fifo_ovfl;
1286 ns->rx_missed_errors = pstats->rx_cong_drops;
1287
1288 /* detailed tx_errors */
1289 ns->tx_aborted_errors = 0;
1290 ns->tx_carrier_errors = 0;
1291 ns->tx_fifo_errors = pstats->tx_underrun;
1292 ns->tx_heartbeat_errors = 0;
1293 ns->tx_window_errors = 0;
1294 return ns;
1295}
1296
1297static u32 get_msglevel(struct net_device *dev)
1298{
5fbf816f
DLR
1299 struct port_info *pi = netdev_priv(dev);
1300 struct adapter *adapter = pi->adapter;
4d22de3e
DLR
1301
1302 return adapter->msg_enable;
1303}
1304
1305static void set_msglevel(struct net_device *dev, u32 val)
1306{
5fbf816f
DLR
1307 struct port_info *pi = netdev_priv(dev);
1308 struct adapter *adapter = pi->adapter;
4d22de3e
DLR
1309
1310 adapter->msg_enable = val;
1311}
1312
1313static char stats_strings[][ETH_GSTRING_LEN] = {
1314 "TxOctetsOK ",
1315 "TxFramesOK ",
1316 "TxMulticastFramesOK",
1317 "TxBroadcastFramesOK",
1318 "TxPauseFrames ",
1319 "TxUnderrun ",
1320 "TxExtUnderrun ",
1321
1322 "TxFrames64 ",
1323 "TxFrames65To127 ",
1324 "TxFrames128To255 ",
1325 "TxFrames256To511 ",
1326 "TxFrames512To1023 ",
1327 "TxFrames1024To1518 ",
1328 "TxFrames1519ToMax ",
1329
1330 "RxOctetsOK ",
1331 "RxFramesOK ",
1332 "RxMulticastFramesOK",
1333 "RxBroadcastFramesOK",
1334 "RxPauseFrames ",
1335 "RxFCSErrors ",
1336 "RxSymbolErrors ",
1337 "RxShortErrors ",
1338 "RxJabberErrors ",
1339 "RxLengthErrors ",
1340 "RxFIFOoverflow ",
1341
1342 "RxFrames64 ",
1343 "RxFrames65To127 ",
1344 "RxFrames128To255 ",
1345 "RxFrames256To511 ",
1346 "RxFrames512To1023 ",
1347 "RxFrames1024To1518 ",
1348 "RxFrames1519ToMax ",
1349
1350 "PhyFIFOErrors ",
1351 "TSO ",
1352 "VLANextractions ",
1353 "VLANinsertions ",
1354 "TxCsumOffload ",
1355 "RxCsumGood ",
b47385bd
DLR
1356 "LroAggregated ",
1357 "LroFlushed ",
1358 "LroNoDesc ",
fc90664e
DLR
1359 "RxDrops ",
1360
1361 "CheckTXEnToggled ",
1362 "CheckResets ",
1363
bf792094 1364 "LinkFaults ",
4d22de3e
DLR
1365};
1366
b9f2c044 1367static int get_sset_count(struct net_device *dev, int sset)
4d22de3e 1368{
b9f2c044
JG
1369 switch (sset) {
1370 case ETH_SS_STATS:
1371 return ARRAY_SIZE(stats_strings);
1372 default:
1373 return -EOPNOTSUPP;
1374 }
4d22de3e
DLR
1375}
1376
1377#define T3_REGMAP_SIZE (3 * 1024)
1378
1379static int get_regs_len(struct net_device *dev)
1380{
1381 return T3_REGMAP_SIZE;
1382}
1383
1384static int get_eeprom_len(struct net_device *dev)
1385{
1386 return EEPROMSIZE;
1387}
1388
1389static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1390{
5fbf816f
DLR
1391 struct port_info *pi = netdev_priv(dev);
1392 struct adapter *adapter = pi->adapter;
4d22de3e 1393 u32 fw_vers = 0;
47330077 1394 u32 tp_vers = 0;
4d22de3e 1395
cf3760da 1396 spin_lock(&adapter->stats_lock);
4d22de3e 1397 t3_get_fw_version(adapter, &fw_vers);
47330077 1398 t3_get_tp_version(adapter, &tp_vers);
cf3760da 1399 spin_unlock(&adapter->stats_lock);
4d22de3e
DLR
1400
1401 strcpy(info->driver, DRV_NAME);
1402 strcpy(info->version, DRV_VERSION);
1403 strcpy(info->bus_info, pci_name(adapter->pdev));
1404 if (!fw_vers)
1405 strcpy(info->fw_version, "N/A");
4aac3899 1406 else {
4d22de3e 1407 snprintf(info->fw_version, sizeof(info->fw_version),
47330077 1408 "%s %u.%u.%u TP %u.%u.%u",
4aac3899
DLR
1409 G_FW_VERSION_TYPE(fw_vers) ? "T" : "N",
1410 G_FW_VERSION_MAJOR(fw_vers),
1411 G_FW_VERSION_MINOR(fw_vers),
47330077
DLR
1412 G_FW_VERSION_MICRO(fw_vers),
1413 G_TP_VERSION_MAJOR(tp_vers),
1414 G_TP_VERSION_MINOR(tp_vers),
1415 G_TP_VERSION_MICRO(tp_vers));
4aac3899 1416 }
4d22de3e
DLR
1417}
1418
1419static void get_strings(struct net_device *dev, u32 stringset, u8 * data)
1420{
1421 if (stringset == ETH_SS_STATS)
1422 memcpy(data, stats_strings, sizeof(stats_strings));
1423}
1424
1425static unsigned long collect_sge_port_stats(struct adapter *adapter,
1426 struct port_info *p, int idx)
1427{
1428 int i;
1429 unsigned long tot = 0;
1430
8c263761
DLR
1431 for (i = p->first_qset; i < p->first_qset + p->nqsets; ++i)
1432 tot += adapter->sge.qs[i].port_stats[idx];
4d22de3e
DLR
1433 return tot;
1434}
1435
1436static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
1437 u64 *data)
1438{
4d22de3e 1439 struct port_info *pi = netdev_priv(dev);
5fbf816f 1440 struct adapter *adapter = pi->adapter;
4d22de3e
DLR
1441 const struct mac_stats *s;
1442
1443 spin_lock(&adapter->stats_lock);
1444 s = t3_mac_update_stats(&pi->mac);
1445 spin_unlock(&adapter->stats_lock);
1446
1447 *data++ = s->tx_octets;
1448 *data++ = s->tx_frames;
1449 *data++ = s->tx_mcast_frames;
1450 *data++ = s->tx_bcast_frames;
1451 *data++ = s->tx_pause;
1452 *data++ = s->tx_underrun;
1453 *data++ = s->tx_fifo_urun;
1454
1455 *data++ = s->tx_frames_64;
1456 *data++ = s->tx_frames_65_127;
1457 *data++ = s->tx_frames_128_255;
1458 *data++ = s->tx_frames_256_511;
1459 *data++ = s->tx_frames_512_1023;
1460 *data++ = s->tx_frames_1024_1518;
1461 *data++ = s->tx_frames_1519_max;
1462
1463 *data++ = s->rx_octets;
1464 *data++ = s->rx_frames;
1465 *data++ = s->rx_mcast_frames;
1466 *data++ = s->rx_bcast_frames;
1467 *data++ = s->rx_pause;
1468 *data++ = s->rx_fcs_errs;
1469 *data++ = s->rx_symbol_errs;
1470 *data++ = s->rx_short;
1471 *data++ = s->rx_jabber;
1472 *data++ = s->rx_too_long;
1473 *data++ = s->rx_fifo_ovfl;
1474
1475 *data++ = s->rx_frames_64;
1476 *data++ = s->rx_frames_65_127;
1477 *data++ = s->rx_frames_128_255;
1478 *data++ = s->rx_frames_256_511;
1479 *data++ = s->rx_frames_512_1023;
1480 *data++ = s->rx_frames_1024_1518;
1481 *data++ = s->rx_frames_1519_max;
1482
1483 *data++ = pi->phy.fifo_errors;
1484
1485 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_TSO);
1486 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_VLANEX);
1487 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_VLANINS);
1488 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_TX_CSUM);
1489 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_RX_CSUM_GOOD);
7be2df45
HX
1490 *data++ = 0;
1491 *data++ = 0;
1492 *data++ = 0;
4d22de3e 1493 *data++ = s->rx_cong_drops;
fc90664e
DLR
1494
1495 *data++ = s->num_toggled;
1496 *data++ = s->num_resets;
bf792094
DLR
1497
1498 *data++ = s->link_faults;
4d22de3e
DLR
1499}
1500
1501static inline void reg_block_dump(struct adapter *ap, void *buf,
1502 unsigned int start, unsigned int end)
1503{
1504 u32 *p = buf + start;
1505
1506 for (; start <= end; start += sizeof(u32))
1507 *p++ = t3_read_reg(ap, start);
1508}
1509
1510static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
1511 void *buf)
1512{
5fbf816f
DLR
1513 struct port_info *pi = netdev_priv(dev);
1514 struct adapter *ap = pi->adapter;
4d22de3e
DLR
1515
1516 /*
1517 * Version scheme:
1518 * bits 0..9: chip version
1519 * bits 10..15: chip revision
1520 * bit 31: set for PCIe cards
1521 */
1522 regs->version = 3 | (ap->params.rev << 10) | (is_pcie(ap) << 31);
1523
1524 /*
1525 * We skip the MAC statistics registers because they are clear-on-read.
1526 * Also reading multi-register stats would need to synchronize with the
1527 * periodic mac stats accumulation. Hard to justify the complexity.
1528 */
1529 memset(buf, 0, T3_REGMAP_SIZE);
1530 reg_block_dump(ap, buf, 0, A_SG_RSPQ_CREDIT_RETURN);
1531 reg_block_dump(ap, buf, A_SG_HI_DRB_HI_THRSH, A_ULPRX_PBL_ULIMIT);
1532 reg_block_dump(ap, buf, A_ULPTX_CONFIG, A_MPS_INT_CAUSE);
1533 reg_block_dump(ap, buf, A_CPL_SWITCH_CNTRL, A_CPL_MAP_TBL_DATA);
1534 reg_block_dump(ap, buf, A_SMB_GLOBAL_TIME_CFG, A_XGM_SERDES_STAT3);
1535 reg_block_dump(ap, buf, A_XGM_SERDES_STATUS0,
1536 XGM_REG(A_XGM_SERDES_STAT3, 1));
1537 reg_block_dump(ap, buf, XGM_REG(A_XGM_SERDES_STATUS0, 1),
1538 XGM_REG(A_XGM_RX_SPI4_SOP_EOP_CNT, 1));
1539}
1540
1541static int restart_autoneg(struct net_device *dev)
1542{
1543 struct port_info *p = netdev_priv(dev);
1544
1545 if (!netif_running(dev))
1546 return -EAGAIN;
1547 if (p->link_config.autoneg != AUTONEG_ENABLE)
1548 return -EINVAL;
1549 p->phy.ops->autoneg_restart(&p->phy);
1550 return 0;
1551}
1552
1553static int cxgb3_phys_id(struct net_device *dev, u32 data)
1554{
5fbf816f
DLR
1555 struct port_info *pi = netdev_priv(dev);
1556 struct adapter *adapter = pi->adapter;
4d22de3e 1557 int i;
4d22de3e
DLR
1558
1559 if (data == 0)
1560 data = 2;
1561
1562 for (i = 0; i < data * 2; i++) {
1563 t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
1564 (i & 1) ? F_GPIO0_OUT_VAL : 0);
1565 if (msleep_interruptible(500))
1566 break;
1567 }
1568 t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
1569 F_GPIO0_OUT_VAL);
1570 return 0;
1571}
1572
1573static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1574{
1575 struct port_info *p = netdev_priv(dev);
1576
1577 cmd->supported = p->link_config.supported;
1578 cmd->advertising = p->link_config.advertising;
1579
1580 if (netif_carrier_ok(dev)) {
1581 cmd->speed = p->link_config.speed;
1582 cmd->duplex = p->link_config.duplex;
1583 } else {
1584 cmd->speed = -1;
1585 cmd->duplex = -1;
1586 }
1587
1588 cmd->port = (cmd->supported & SUPPORTED_TP) ? PORT_TP : PORT_FIBRE;
1589 cmd->phy_address = p->phy.addr;
1590 cmd->transceiver = XCVR_EXTERNAL;
1591 cmd->autoneg = p->link_config.autoneg;
1592 cmd->maxtxpkt = 0;
1593 cmd->maxrxpkt = 0;
1594 return 0;
1595}
1596
1597static int speed_duplex_to_caps(int speed, int duplex)
1598{
1599 int cap = 0;
1600
1601 switch (speed) {
1602 case SPEED_10:
1603 if (duplex == DUPLEX_FULL)
1604 cap = SUPPORTED_10baseT_Full;
1605 else
1606 cap = SUPPORTED_10baseT_Half;
1607 break;
1608 case SPEED_100:
1609 if (duplex == DUPLEX_FULL)
1610 cap = SUPPORTED_100baseT_Full;
1611 else
1612 cap = SUPPORTED_100baseT_Half;
1613 break;
1614 case SPEED_1000:
1615 if (duplex == DUPLEX_FULL)
1616 cap = SUPPORTED_1000baseT_Full;
1617 else
1618 cap = SUPPORTED_1000baseT_Half;
1619 break;
1620 case SPEED_10000:
1621 if (duplex == DUPLEX_FULL)
1622 cap = SUPPORTED_10000baseT_Full;
1623 }
1624 return cap;
1625}
1626
1627#define ADVERTISED_MASK (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
1628 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
1629 ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full | \
1630 ADVERTISED_10000baseT_Full)
1631
1632static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1633{
1634 struct port_info *p = netdev_priv(dev);
1635 struct link_config *lc = &p->link_config;
1636
9b1e3656
DLR
1637 if (!(lc->supported & SUPPORTED_Autoneg)) {
1638 /*
1639 * PHY offers a single speed/duplex. See if that's what's
1640 * being requested.
1641 */
1642 if (cmd->autoneg == AUTONEG_DISABLE) {
97915b5b 1643 int cap = speed_duplex_to_caps(cmd->speed, cmd->duplex);
9b1e3656
DLR
1644 if (lc->supported & cap)
1645 return 0;
1646 }
1647 return -EINVAL;
1648 }
4d22de3e
DLR
1649
1650 if (cmd->autoneg == AUTONEG_DISABLE) {
1651 int cap = speed_duplex_to_caps(cmd->speed, cmd->duplex);
1652
1653 if (!(lc->supported & cap) || cmd->speed == SPEED_1000)
1654 return -EINVAL;
1655 lc->requested_speed = cmd->speed;
1656 lc->requested_duplex = cmd->duplex;
1657 lc->advertising = 0;
1658 } else {
1659 cmd->advertising &= ADVERTISED_MASK;
1660 cmd->advertising &= lc->supported;
1661 if (!cmd->advertising)
1662 return -EINVAL;
1663 lc->requested_speed = SPEED_INVALID;
1664 lc->requested_duplex = DUPLEX_INVALID;
1665 lc->advertising = cmd->advertising | ADVERTISED_Autoneg;
1666 }
1667 lc->autoneg = cmd->autoneg;
1668 if (netif_running(dev))
1669 t3_link_start(&p->phy, &p->mac, lc);
1670 return 0;
1671}
1672
1673static void get_pauseparam(struct net_device *dev,
1674 struct ethtool_pauseparam *epause)
1675{
1676 struct port_info *p = netdev_priv(dev);
1677
1678 epause->autoneg = (p->link_config.requested_fc & PAUSE_AUTONEG) != 0;
1679 epause->rx_pause = (p->link_config.fc & PAUSE_RX) != 0;
1680 epause->tx_pause = (p->link_config.fc & PAUSE_TX) != 0;
1681}
1682
1683static int set_pauseparam(struct net_device *dev,
1684 struct ethtool_pauseparam *epause)
1685{
1686 struct port_info *p = netdev_priv(dev);
1687 struct link_config *lc = &p->link_config;
1688
1689 if (epause->autoneg == AUTONEG_DISABLE)
1690 lc->requested_fc = 0;
1691 else if (lc->supported & SUPPORTED_Autoneg)
1692 lc->requested_fc = PAUSE_AUTONEG;
1693 else
1694 return -EINVAL;
1695
1696 if (epause->rx_pause)
1697 lc->requested_fc |= PAUSE_RX;
1698 if (epause->tx_pause)
1699 lc->requested_fc |= PAUSE_TX;
1700 if (lc->autoneg == AUTONEG_ENABLE) {
1701 if (netif_running(dev))
1702 t3_link_start(&p->phy, &p->mac, lc);
1703 } else {
1704 lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1705 if (netif_running(dev))
1706 t3_mac_set_speed_duplex_fc(&p->mac, -1, -1, lc->fc);
1707 }
1708 return 0;
1709}
1710
1711static u32 get_rx_csum(struct net_device *dev)
1712{
1713 struct port_info *p = netdev_priv(dev);
1714
47fd23fe 1715 return p->rx_offload & T3_RX_CSUM;
4d22de3e
DLR
1716}
1717
1718static int set_rx_csum(struct net_device *dev, u32 data)
1719{
1720 struct port_info *p = netdev_priv(dev);
1721
47fd23fe
RD
1722 if (data) {
1723 p->rx_offload |= T3_RX_CSUM;
1724 } else {
b47385bd
DLR
1725 int i;
1726
47fd23fe 1727 p->rx_offload &= ~(T3_RX_CSUM | T3_LRO);
04ecb072
DLR
1728 for (i = p->first_qset; i < p->first_qset + p->nqsets; i++)
1729 set_qset_lro(dev, i, 0);
b47385bd 1730 }
4d22de3e
DLR
1731 return 0;
1732}
1733
1734static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
1735{
5fbf816f
DLR
1736 struct port_info *pi = netdev_priv(dev);
1737 struct adapter *adapter = pi->adapter;
05b97b30 1738 const struct qset_params *q = &adapter->params.sge.qset[pi->first_qset];
4d22de3e
DLR
1739
1740 e->rx_max_pending = MAX_RX_BUFFERS;
1741 e->rx_mini_max_pending = 0;
1742 e->rx_jumbo_max_pending = MAX_RX_JUMBO_BUFFERS;
1743 e->tx_max_pending = MAX_TXQ_ENTRIES;
1744
05b97b30
DLR
1745 e->rx_pending = q->fl_size;
1746 e->rx_mini_pending = q->rspq_size;
1747 e->rx_jumbo_pending = q->jumbo_size;
1748 e->tx_pending = q->txq_size[0];
4d22de3e
DLR
1749}
1750
1751static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
1752{
5fbf816f
DLR
1753 struct port_info *pi = netdev_priv(dev);
1754 struct adapter *adapter = pi->adapter;
05b97b30 1755 struct qset_params *q;
5fbf816f 1756 int i;
4d22de3e
DLR
1757
1758 if (e->rx_pending > MAX_RX_BUFFERS ||
1759 e->rx_jumbo_pending > MAX_RX_JUMBO_BUFFERS ||
1760 e->tx_pending > MAX_TXQ_ENTRIES ||
1761 e->rx_mini_pending > MAX_RSPQ_ENTRIES ||
1762 e->rx_mini_pending < MIN_RSPQ_ENTRIES ||
1763 e->rx_pending < MIN_FL_ENTRIES ||
1764 e->rx_jumbo_pending < MIN_FL_ENTRIES ||
1765 e->tx_pending < adapter->params.nports * MIN_TXQ_ENTRIES)
1766 return -EINVAL;
1767
1768 if (adapter->flags & FULL_INIT_DONE)
1769 return -EBUSY;
1770
05b97b30
DLR
1771 q = &adapter->params.sge.qset[pi->first_qset];
1772 for (i = 0; i < pi->nqsets; ++i, ++q) {
4d22de3e
DLR
1773 q->rspq_size = e->rx_mini_pending;
1774 q->fl_size = e->rx_pending;
1775 q->jumbo_size = e->rx_jumbo_pending;
1776 q->txq_size[0] = e->tx_pending;
1777 q->txq_size[1] = e->tx_pending;
1778 q->txq_size[2] = e->tx_pending;
1779 }
1780 return 0;
1781}
1782
1783static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
1784{
5fbf816f
DLR
1785 struct port_info *pi = netdev_priv(dev);
1786 struct adapter *adapter = pi->adapter;
4d22de3e
DLR
1787 struct qset_params *qsp = &adapter->params.sge.qset[0];
1788 struct sge_qset *qs = &adapter->sge.qs[0];
1789
1790 if (c->rx_coalesce_usecs * 10 > M_NEWTIMER)
1791 return -EINVAL;
1792
1793 qsp->coalesce_usecs = c->rx_coalesce_usecs;
1794 t3_update_qset_coalesce(qs, qsp);
1795 return 0;
1796}
1797
1798static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
1799{
5fbf816f
DLR
1800 struct port_info *pi = netdev_priv(dev);
1801 struct adapter *adapter = pi->adapter;
4d22de3e
DLR
1802 struct qset_params *q = adapter->params.sge.qset;
1803
1804 c->rx_coalesce_usecs = q->coalesce_usecs;
1805 return 0;
1806}
1807
1808static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e,
1809 u8 * data)
1810{
5fbf816f
DLR
1811 struct port_info *pi = netdev_priv(dev);
1812 struct adapter *adapter = pi->adapter;
4d22de3e 1813 int i, err = 0;
4d22de3e
DLR
1814
1815 u8 *buf = kmalloc(EEPROMSIZE, GFP_KERNEL);
1816 if (!buf)
1817 return -ENOMEM;
1818
1819 e->magic = EEPROM_MAGIC;
1820 for (i = e->offset & ~3; !err && i < e->offset + e->len; i += 4)
05e5c116 1821 err = t3_seeprom_read(adapter, i, (__le32 *) & buf[i]);
4d22de3e
DLR
1822
1823 if (!err)
1824 memcpy(data, buf + e->offset, e->len);
1825 kfree(buf);
1826 return err;
1827}
1828
1829static int set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
1830 u8 * data)
1831{
5fbf816f
DLR
1832 struct port_info *pi = netdev_priv(dev);
1833 struct adapter *adapter = pi->adapter;
05e5c116
AV
1834 u32 aligned_offset, aligned_len;
1835 __le32 *p;
4d22de3e 1836 u8 *buf;
c54f5c24 1837 int err;
4d22de3e
DLR
1838
1839 if (eeprom->magic != EEPROM_MAGIC)
1840 return -EINVAL;
1841
1842 aligned_offset = eeprom->offset & ~3;
1843 aligned_len = (eeprom->len + (eeprom->offset & 3) + 3) & ~3;
1844
1845 if (aligned_offset != eeprom->offset || aligned_len != eeprom->len) {
1846 buf = kmalloc(aligned_len, GFP_KERNEL);
1847 if (!buf)
1848 return -ENOMEM;
05e5c116 1849 err = t3_seeprom_read(adapter, aligned_offset, (__le32 *) buf);
4d22de3e
DLR
1850 if (!err && aligned_len > 4)
1851 err = t3_seeprom_read(adapter,
1852 aligned_offset + aligned_len - 4,
05e5c116 1853 (__le32 *) & buf[aligned_len - 4]);
4d22de3e
DLR
1854 if (err)
1855 goto out;
1856 memcpy(buf + (eeprom->offset & 3), data, eeprom->len);
1857 } else
1858 buf = data;
1859
1860 err = t3_seeprom_wp(adapter, 0);
1861 if (err)
1862 goto out;
1863
05e5c116 1864 for (p = (__le32 *) buf; !err && aligned_len; aligned_len -= 4, p++) {
4d22de3e
DLR
1865 err = t3_seeprom_write(adapter, aligned_offset, *p);
1866 aligned_offset += 4;
1867 }
1868
1869 if (!err)
1870 err = t3_seeprom_wp(adapter, 1);
1871out:
1872 if (buf != data)
1873 kfree(buf);
1874 return err;
1875}
1876
1877static void get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1878{
1879 wol->supported = 0;
1880 wol->wolopts = 0;
1881 memset(&wol->sopass, 0, sizeof(wol->sopass));
1882}
1883
1884static const struct ethtool_ops cxgb_ethtool_ops = {
1885 .get_settings = get_settings,
1886 .set_settings = set_settings,
1887 .get_drvinfo = get_drvinfo,
1888 .get_msglevel = get_msglevel,
1889 .set_msglevel = set_msglevel,
1890 .get_ringparam = get_sge_param,
1891 .set_ringparam = set_sge_param,
1892 .get_coalesce = get_coalesce,
1893 .set_coalesce = set_coalesce,
1894 .get_eeprom_len = get_eeprom_len,
1895 .get_eeprom = get_eeprom,
1896 .set_eeprom = set_eeprom,
1897 .get_pauseparam = get_pauseparam,
1898 .set_pauseparam = set_pauseparam,
1899 .get_rx_csum = get_rx_csum,
1900 .set_rx_csum = set_rx_csum,
4d22de3e 1901 .set_tx_csum = ethtool_op_set_tx_csum,
4d22de3e
DLR
1902 .set_sg = ethtool_op_set_sg,
1903 .get_link = ethtool_op_get_link,
1904 .get_strings = get_strings,
1905 .phys_id = cxgb3_phys_id,
1906 .nway_reset = restart_autoneg,
b9f2c044 1907 .get_sset_count = get_sset_count,
4d22de3e
DLR
1908 .get_ethtool_stats = get_stats,
1909 .get_regs_len = get_regs_len,
1910 .get_regs = get_regs,
1911 .get_wol = get_wol,
4d22de3e 1912 .set_tso = ethtool_op_set_tso,
4d22de3e
DLR
1913};
1914
1915static int in_range(int val, int lo, int hi)
1916{
1917 return val < 0 || (val <= hi && val >= lo);
1918}
1919
1920static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
1921{
5fbf816f
DLR
1922 struct port_info *pi = netdev_priv(dev);
1923 struct adapter *adapter = pi->adapter;
4d22de3e 1924 u32 cmd;
5fbf816f 1925 int ret;
4d22de3e
DLR
1926
1927 if (copy_from_user(&cmd, useraddr, sizeof(cmd)))
1928 return -EFAULT;
1929
1930 switch (cmd) {
4d22de3e
DLR
1931 case CHELSIO_SET_QSET_PARAMS:{
1932 int i;
1933 struct qset_params *q;
1934 struct ch_qset_params t;
8c263761
DLR
1935 int q1 = pi->first_qset;
1936 int nqsets = pi->nqsets;
4d22de3e
DLR
1937
1938 if (!capable(CAP_NET_ADMIN))
1939 return -EPERM;
1940 if (copy_from_user(&t, useraddr, sizeof(t)))
1941 return -EFAULT;
1942 if (t.qset_idx >= SGE_QSETS)
1943 return -EINVAL;
1944 if (!in_range(t.intr_lat, 0, M_NEWTIMER) ||
1945 !in_range(t.cong_thres, 0, 255) ||
1946 !in_range(t.txq_size[0], MIN_TXQ_ENTRIES,
1947 MAX_TXQ_ENTRIES) ||
1948 !in_range(t.txq_size[1], MIN_TXQ_ENTRIES,
1949 MAX_TXQ_ENTRIES) ||
1950 !in_range(t.txq_size[2], MIN_CTRL_TXQ_ENTRIES,
1951 MAX_CTRL_TXQ_ENTRIES) ||
1952 !in_range(t.fl_size[0], MIN_FL_ENTRIES,
1953 MAX_RX_BUFFERS)
1954 || !in_range(t.fl_size[1], MIN_FL_ENTRIES,
1955 MAX_RX_JUMBO_BUFFERS)
1956 || !in_range(t.rspq_size, MIN_RSPQ_ENTRIES,
1957 MAX_RSPQ_ENTRIES))
1958 return -EINVAL;
8c263761
DLR
1959
1960 if ((adapter->flags & FULL_INIT_DONE) && t.lro > 0)
1961 for_each_port(adapter, i) {
1962 pi = adap2pinfo(adapter, i);
1963 if (t.qset_idx >= pi->first_qset &&
1964 t.qset_idx < pi->first_qset + pi->nqsets &&
47fd23fe 1965 !(pi->rx_offload & T3_RX_CSUM))
8c263761
DLR
1966 return -EINVAL;
1967 }
1968
4d22de3e
DLR
1969 if ((adapter->flags & FULL_INIT_DONE) &&
1970 (t.rspq_size >= 0 || t.fl_size[0] >= 0 ||
1971 t.fl_size[1] >= 0 || t.txq_size[0] >= 0 ||
1972 t.txq_size[1] >= 0 || t.txq_size[2] >= 0 ||
1973 t.polling >= 0 || t.cong_thres >= 0))
1974 return -EBUSY;
1975
8c263761
DLR
1976 /* Allow setting of any available qset when offload enabled */
1977 if (test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map)) {
1978 q1 = 0;
1979 for_each_port(adapter, i) {
1980 pi = adap2pinfo(adapter, i);
1981 nqsets += pi->first_qset + pi->nqsets;
1982 }
1983 }
1984
1985 if (t.qset_idx < q1)
1986 return -EINVAL;
1987 if (t.qset_idx > q1 + nqsets - 1)
1988 return -EINVAL;
1989
4d22de3e
DLR
1990 q = &adapter->params.sge.qset[t.qset_idx];
1991
1992 if (t.rspq_size >= 0)
1993 q->rspq_size = t.rspq_size;
1994 if (t.fl_size[0] >= 0)
1995 q->fl_size = t.fl_size[0];
1996 if (t.fl_size[1] >= 0)
1997 q->jumbo_size = t.fl_size[1];
1998 if (t.txq_size[0] >= 0)
1999 q->txq_size[0] = t.txq_size[0];
2000 if (t.txq_size[1] >= 0)
2001 q->txq_size[1] = t.txq_size[1];
2002 if (t.txq_size[2] >= 0)
2003 q->txq_size[2] = t.txq_size[2];
2004 if (t.cong_thres >= 0)
2005 q->cong_thres = t.cong_thres;
2006 if (t.intr_lat >= 0) {
2007 struct sge_qset *qs =
2008 &adapter->sge.qs[t.qset_idx];
2009
2010 q->coalesce_usecs = t.intr_lat;
2011 t3_update_qset_coalesce(qs, q);
2012 }
2013 if (t.polling >= 0) {
2014 if (adapter->flags & USING_MSIX)
2015 q->polling = t.polling;
2016 else {
2017 /* No polling with INTx for T3A */
2018 if (adapter->params.rev == 0 &&
2019 !(adapter->flags & USING_MSI))
2020 t.polling = 0;
2021
2022 for (i = 0; i < SGE_QSETS; i++) {
2023 q = &adapter->params.sge.
2024 qset[i];
2025 q->polling = t.polling;
2026 }
2027 }
2028 }
04ecb072
DLR
2029 if (t.lro >= 0)
2030 set_qset_lro(dev, t.qset_idx, t.lro);
2031
4d22de3e
DLR
2032 break;
2033 }
2034 case CHELSIO_GET_QSET_PARAMS:{
2035 struct qset_params *q;
2036 struct ch_qset_params t;
8c263761
DLR
2037 int q1 = pi->first_qset;
2038 int nqsets = pi->nqsets;
2039 int i;
4d22de3e
DLR
2040
2041 if (copy_from_user(&t, useraddr, sizeof(t)))
2042 return -EFAULT;
8c263761
DLR
2043
2044 /* Display qsets for all ports when offload enabled */
2045 if (test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map)) {
2046 q1 = 0;
2047 for_each_port(adapter, i) {
2048 pi = adap2pinfo(adapter, i);
2049 nqsets = pi->first_qset + pi->nqsets;
2050 }
2051 }
2052
2053 if (t.qset_idx >= nqsets)
4d22de3e
DLR
2054 return -EINVAL;
2055
8c263761 2056 q = &adapter->params.sge.qset[q1 + t.qset_idx];
4d22de3e
DLR
2057 t.rspq_size = q->rspq_size;
2058 t.txq_size[0] = q->txq_size[0];
2059 t.txq_size[1] = q->txq_size[1];
2060 t.txq_size[2] = q->txq_size[2];
2061 t.fl_size[0] = q->fl_size;
2062 t.fl_size[1] = q->jumbo_size;
2063 t.polling = q->polling;
b47385bd 2064 t.lro = q->lro;
4d22de3e
DLR
2065 t.intr_lat = q->coalesce_usecs;
2066 t.cong_thres = q->cong_thres;
8c263761
DLR
2067 t.qnum = q1;
2068
2069 if (adapter->flags & USING_MSIX)
2070 t.vector = adapter->msix_info[q1 + t.qset_idx + 1].vec;
2071 else
2072 t.vector = adapter->pdev->irq;
4d22de3e
DLR
2073
2074 if (copy_to_user(useraddr, &t, sizeof(t)))
2075 return -EFAULT;
2076 break;
2077 }
2078 case CHELSIO_SET_QSET_NUM:{
2079 struct ch_reg edata;
4d22de3e
DLR
2080 unsigned int i, first_qset = 0, other_qsets = 0;
2081
2082 if (!capable(CAP_NET_ADMIN))
2083 return -EPERM;
2084 if (adapter->flags & FULL_INIT_DONE)
2085 return -EBUSY;
2086 if (copy_from_user(&edata, useraddr, sizeof(edata)))
2087 return -EFAULT;
2088 if (edata.val < 1 ||
2089 (edata.val > 1 && !(adapter->flags & USING_MSIX)))
2090 return -EINVAL;
2091
2092 for_each_port(adapter, i)
2093 if (adapter->port[i] && adapter->port[i] != dev)
2094 other_qsets += adap2pinfo(adapter, i)->nqsets;
2095
2096 if (edata.val + other_qsets > SGE_QSETS)
2097 return -EINVAL;
2098
2099 pi->nqsets = edata.val;
2100
2101 for_each_port(adapter, i)
2102 if (adapter->port[i]) {
2103 pi = adap2pinfo(adapter, i);
2104 pi->first_qset = first_qset;
2105 first_qset += pi->nqsets;
2106 }
2107 break;
2108 }
2109 case CHELSIO_GET_QSET_NUM:{
2110 struct ch_reg edata;
4d22de3e
DLR
2111
2112 edata.cmd = CHELSIO_GET_QSET_NUM;
2113 edata.val = pi->nqsets;
2114 if (copy_to_user(useraddr, &edata, sizeof(edata)))
2115 return -EFAULT;
2116 break;
2117 }
2118 case CHELSIO_LOAD_FW:{
2119 u8 *fw_data;
2120 struct ch_mem_range t;
2121
1b3aa7af 2122 if (!capable(CAP_SYS_RAWIO))
4d22de3e
DLR
2123 return -EPERM;
2124 if (copy_from_user(&t, useraddr, sizeof(t)))
2125 return -EFAULT;
1b3aa7af 2126 /* Check t.len sanity ? */
4d22de3e
DLR
2127 fw_data = kmalloc(t.len, GFP_KERNEL);
2128 if (!fw_data)
2129 return -ENOMEM;
2130
2131 if (copy_from_user
2132 (fw_data, useraddr + sizeof(t), t.len)) {
2133 kfree(fw_data);
2134 return -EFAULT;
2135 }
2136
2137 ret = t3_load_fw(adapter, fw_data, t.len);
2138 kfree(fw_data);
2139 if (ret)
2140 return ret;
2141 break;
2142 }
2143 case CHELSIO_SETMTUTAB:{
2144 struct ch_mtus m;
2145 int i;
2146
2147 if (!is_offload(adapter))
2148 return -EOPNOTSUPP;
2149 if (!capable(CAP_NET_ADMIN))
2150 return -EPERM;
2151 if (offload_running(adapter))
2152 return -EBUSY;
2153 if (copy_from_user(&m, useraddr, sizeof(m)))
2154 return -EFAULT;
2155 if (m.nmtus != NMTUS)
2156 return -EINVAL;
2157 if (m.mtus[0] < 81) /* accommodate SACK */
2158 return -EINVAL;
2159
2160 /* MTUs must be in ascending order */
2161 for (i = 1; i < NMTUS; ++i)
2162 if (m.mtus[i] < m.mtus[i - 1])
2163 return -EINVAL;
2164
2165 memcpy(adapter->params.mtus, m.mtus,
2166 sizeof(adapter->params.mtus));
2167 break;
2168 }
2169 case CHELSIO_GET_PM:{
2170 struct tp_params *p = &adapter->params.tp;
2171 struct ch_pm m = {.cmd = CHELSIO_GET_PM };
2172
2173 if (!is_offload(adapter))
2174 return -EOPNOTSUPP;
2175 m.tx_pg_sz = p->tx_pg_size;
2176 m.tx_num_pg = p->tx_num_pgs;
2177 m.rx_pg_sz = p->rx_pg_size;
2178 m.rx_num_pg = p->rx_num_pgs;
2179 m.pm_total = p->pmtx_size + p->chan_rx_size * p->nchan;
2180 if (copy_to_user(useraddr, &m, sizeof(m)))
2181 return -EFAULT;
2182 break;
2183 }
2184 case CHELSIO_SET_PM:{
2185 struct ch_pm m;
2186 struct tp_params *p = &adapter->params.tp;
2187
2188 if (!is_offload(adapter))
2189 return -EOPNOTSUPP;
2190 if (!capable(CAP_NET_ADMIN))
2191 return -EPERM;
2192 if (adapter->flags & FULL_INIT_DONE)
2193 return -EBUSY;
2194 if (copy_from_user(&m, useraddr, sizeof(m)))
2195 return -EFAULT;
d9da466a 2196 if (!is_power_of_2(m.rx_pg_sz) ||
2197 !is_power_of_2(m.tx_pg_sz))
4d22de3e
DLR
2198 return -EINVAL; /* not power of 2 */
2199 if (!(m.rx_pg_sz & 0x14000))
2200 return -EINVAL; /* not 16KB or 64KB */
2201 if (!(m.tx_pg_sz & 0x1554000))
2202 return -EINVAL;
2203 if (m.tx_num_pg == -1)
2204 m.tx_num_pg = p->tx_num_pgs;
2205 if (m.rx_num_pg == -1)
2206 m.rx_num_pg = p->rx_num_pgs;
2207 if (m.tx_num_pg % 24 || m.rx_num_pg % 24)
2208 return -EINVAL;
2209 if (m.rx_num_pg * m.rx_pg_sz > p->chan_rx_size ||
2210 m.tx_num_pg * m.tx_pg_sz > p->chan_tx_size)
2211 return -EINVAL;
2212 p->rx_pg_size = m.rx_pg_sz;
2213 p->tx_pg_size = m.tx_pg_sz;
2214 p->rx_num_pgs = m.rx_num_pg;
2215 p->tx_num_pgs = m.tx_num_pg;
2216 break;
2217 }
2218 case CHELSIO_GET_MEM:{
2219 struct ch_mem_range t;
2220 struct mc7 *mem;
2221 u64 buf[32];
2222
2223 if (!is_offload(adapter))
2224 return -EOPNOTSUPP;
2225 if (!(adapter->flags & FULL_INIT_DONE))
2226 return -EIO; /* need the memory controllers */
2227 if (copy_from_user(&t, useraddr, sizeof(t)))
2228 return -EFAULT;
2229 if ((t.addr & 7) || (t.len & 7))
2230 return -EINVAL;
2231 if (t.mem_id == MEM_CM)
2232 mem = &adapter->cm;
2233 else if (t.mem_id == MEM_PMRX)
2234 mem = &adapter->pmrx;
2235 else if (t.mem_id == MEM_PMTX)
2236 mem = &adapter->pmtx;
2237 else
2238 return -EINVAL;
2239
2240 /*
1825494a
DLR
2241 * Version scheme:
2242 * bits 0..9: chip version
2243 * bits 10..15: chip revision
2244 */
4d22de3e
DLR
2245 t.version = 3 | (adapter->params.rev << 10);
2246 if (copy_to_user(useraddr, &t, sizeof(t)))
2247 return -EFAULT;
2248
2249 /*
2250 * Read 256 bytes at a time as len can be large and we don't
2251 * want to use huge intermediate buffers.
2252 */
2253 useraddr += sizeof(t); /* advance to start of buffer */
2254 while (t.len) {
2255 unsigned int chunk =
2256 min_t(unsigned int, t.len, sizeof(buf));
2257
2258 ret =
2259 t3_mc7_bd_read(mem, t.addr / 8, chunk / 8,
2260 buf);
2261 if (ret)
2262 return ret;
2263 if (copy_to_user(useraddr, buf, chunk))
2264 return -EFAULT;
2265 useraddr += chunk;
2266 t.addr += chunk;
2267 t.len -= chunk;
2268 }
2269 break;
2270 }
2271 case CHELSIO_SET_TRACE_FILTER:{
2272 struct ch_trace t;
2273 const struct trace_params *tp;
2274
2275 if (!capable(CAP_NET_ADMIN))
2276 return -EPERM;
2277 if (!offload_running(adapter))
2278 return -EAGAIN;
2279 if (copy_from_user(&t, useraddr, sizeof(t)))
2280 return -EFAULT;
2281
2282 tp = (const struct trace_params *)&t.sip;
2283 if (t.config_tx)
2284 t3_config_trace_filter(adapter, tp, 0,
2285 t.invert_match,
2286 t.trace_tx);
2287 if (t.config_rx)
2288 t3_config_trace_filter(adapter, tp, 1,
2289 t.invert_match,
2290 t.trace_rx);
2291 break;
2292 }
4d22de3e
DLR
2293 default:
2294 return -EOPNOTSUPP;
2295 }
2296 return 0;
2297}
2298
2299static int cxgb_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
2300{
4d22de3e 2301 struct mii_ioctl_data *data = if_mii(req);
5fbf816f
DLR
2302 struct port_info *pi = netdev_priv(dev);
2303 struct adapter *adapter = pi->adapter;
2304 int ret, mmd;
4d22de3e
DLR
2305
2306 switch (cmd) {
2307 case SIOCGMIIPHY:
2308 data->phy_id = pi->phy.addr;
2309 /* FALLTHRU */
2310 case SIOCGMIIREG:{
2311 u32 val;
2312 struct cphy *phy = &pi->phy;
2313
2314 if (!phy->mdio_read)
2315 return -EOPNOTSUPP;
2316 if (is_10G(adapter)) {
2317 mmd = data->phy_id >> 8;
2318 if (!mmd)
2319 mmd = MDIO_DEV_PCS;
9b1e3656 2320 else if (mmd > MDIO_DEV_VEND2)
4d22de3e
DLR
2321 return -EINVAL;
2322
2323 ret =
2324 phy->mdio_read(adapter, data->phy_id & 0x1f,
2325 mmd, data->reg_num, &val);
2326 } else
2327 ret =
2328 phy->mdio_read(adapter, data->phy_id & 0x1f,
2329 0, data->reg_num & 0x1f,
2330 &val);
2331 if (!ret)
2332 data->val_out = val;
2333 break;
2334 }
2335 case SIOCSMIIREG:{
2336 struct cphy *phy = &pi->phy;
2337
2338 if (!capable(CAP_NET_ADMIN))
2339 return -EPERM;
2340 if (!phy->mdio_write)
2341 return -EOPNOTSUPP;
2342 if (is_10G(adapter)) {
2343 mmd = data->phy_id >> 8;
2344 if (!mmd)
2345 mmd = MDIO_DEV_PCS;
9b1e3656 2346 else if (mmd > MDIO_DEV_VEND2)
4d22de3e
DLR
2347 return -EINVAL;
2348
2349 ret =
2350 phy->mdio_write(adapter,
2351 data->phy_id & 0x1f, mmd,
2352 data->reg_num,
2353 data->val_in);
2354 } else
2355 ret =
2356 phy->mdio_write(adapter,
2357 data->phy_id & 0x1f, 0,
2358 data->reg_num & 0x1f,
2359 data->val_in);
2360 break;
2361 }
2362 case SIOCCHIOCTL:
2363 return cxgb_extension_ioctl(dev, req->ifr_data);
2364 default:
2365 return -EOPNOTSUPP;
2366 }
2367 return ret;
2368}
2369
2370static int cxgb_change_mtu(struct net_device *dev, int new_mtu)
2371{
4d22de3e 2372 struct port_info *pi = netdev_priv(dev);
5fbf816f
DLR
2373 struct adapter *adapter = pi->adapter;
2374 int ret;
4d22de3e
DLR
2375
2376 if (new_mtu < 81) /* accommodate SACK */
2377 return -EINVAL;
2378 if ((ret = t3_mac_set_mtu(&pi->mac, new_mtu)))
2379 return ret;
2380 dev->mtu = new_mtu;
2381 init_port_mtus(adapter);
2382 if (adapter->params.rev == 0 && offload_running(adapter))
2383 t3_load_mtus(adapter, adapter->params.mtus,
2384 adapter->params.a_wnd, adapter->params.b_wnd,
2385 adapter->port[0]->mtu);
2386 return 0;
2387}
2388
2389static int cxgb_set_mac_addr(struct net_device *dev, void *p)
2390{
4d22de3e 2391 struct port_info *pi = netdev_priv(dev);
5fbf816f 2392 struct adapter *adapter = pi->adapter;
4d22de3e
DLR
2393 struct sockaddr *addr = p;
2394
2395 if (!is_valid_ether_addr(addr->sa_data))
2396 return -EINVAL;
2397
2398 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
2399 t3_mac_set_address(&pi->mac, 0, dev->dev_addr);
2400 if (offload_running(adapter))
2401 write_smt_entry(adapter, pi->port_id);
2402 return 0;
2403}
2404
2405/**
2406 * t3_synchronize_rx - wait for current Rx processing on a port to complete
2407 * @adap: the adapter
2408 * @p: the port
2409 *
2410 * Ensures that current Rx processing on any of the queues associated with
2411 * the given port completes before returning. We do this by acquiring and
2412 * releasing the locks of the response queues associated with the port.
2413 */
2414static void t3_synchronize_rx(struct adapter *adap, const struct port_info *p)
2415{
2416 int i;
2417
8c263761
DLR
2418 for (i = p->first_qset; i < p->first_qset + p->nqsets; i++) {
2419 struct sge_rspq *q = &adap->sge.qs[i].rspq;
4d22de3e
DLR
2420
2421 spin_lock_irq(&q->lock);
2422 spin_unlock_irq(&q->lock);
2423 }
2424}
2425
2426static void vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
2427{
4d22de3e 2428 struct port_info *pi = netdev_priv(dev);
5fbf816f 2429 struct adapter *adapter = pi->adapter;
4d22de3e
DLR
2430
2431 pi->vlan_grp = grp;
2432 if (adapter->params.rev > 0)
2433 t3_set_vlan_accel(adapter, 1 << pi->port_id, grp != NULL);
2434 else {
2435 /* single control for all ports */
2436 unsigned int i, have_vlans = 0;
2437 for_each_port(adapter, i)
2438 have_vlans |= adap2pinfo(adapter, i)->vlan_grp != NULL;
2439
2440 t3_set_vlan_accel(adapter, 1, have_vlans);
2441 }
2442 t3_synchronize_rx(adapter, pi);
2443}
2444
4d22de3e
DLR
2445#ifdef CONFIG_NET_POLL_CONTROLLER
2446static void cxgb_netpoll(struct net_device *dev)
2447{
890de332 2448 struct port_info *pi = netdev_priv(dev);
5fbf816f 2449 struct adapter *adapter = pi->adapter;
890de332 2450 int qidx;
4d22de3e 2451
890de332
DLR
2452 for (qidx = pi->first_qset; qidx < pi->first_qset + pi->nqsets; qidx++) {
2453 struct sge_qset *qs = &adapter->sge.qs[qidx];
2454 void *source;
2eab17ab 2455
890de332
DLR
2456 if (adapter->flags & USING_MSIX)
2457 source = qs;
2458 else
2459 source = adapter;
2460
2461 t3_intr_handler(adapter, qs->rspq.polling) (0, source);
2462 }
4d22de3e
DLR
2463}
2464#endif
2465
2466/*
2467 * Periodic accumulation of MAC statistics.
2468 */
2469static void mac_stats_update(struct adapter *adapter)
2470{
2471 int i;
2472
2473 for_each_port(adapter, i) {
2474 struct net_device *dev = adapter->port[i];
2475 struct port_info *p = netdev_priv(dev);
2476
2477 if (netif_running(dev)) {
2478 spin_lock(&adapter->stats_lock);
2479 t3_mac_update_stats(&p->mac);
2480 spin_unlock(&adapter->stats_lock);
2481 }
2482 }
2483}
2484
2485static void check_link_status(struct adapter *adapter)
2486{
2487 int i;
2488
2489 for_each_port(adapter, i) {
2490 struct net_device *dev = adapter->port[i];
2491 struct port_info *p = netdev_priv(dev);
2492
bf792094
DLR
2493 spin_lock_irq(&adapter->work_lock);
2494 if (p->link_fault) {
2495 spin_unlock_irq(&adapter->work_lock);
2496 continue;
2497 }
2498 spin_unlock_irq(&adapter->work_lock);
2499
2500 if (!(p->phy.caps & SUPPORTED_IRQ) && netif_running(dev)) {
2501 t3_xgm_intr_disable(adapter, i);
2502 t3_read_reg(adapter, A_XGM_INT_STATUS + p->mac.offset);
2503
4d22de3e 2504 t3_link_changed(adapter, i);
bf792094
DLR
2505 t3_xgm_intr_enable(adapter, i);
2506 }
4d22de3e
DLR
2507 }
2508}
2509
fc90664e
DLR
2510static void check_t3b2_mac(struct adapter *adapter)
2511{
2512 int i;
2513
f2d961c9
DLR
2514 if (!rtnl_trylock()) /* synchronize with ifdown */
2515 return;
2516
fc90664e
DLR
2517 for_each_port(adapter, i) {
2518 struct net_device *dev = adapter->port[i];
2519 struct port_info *p = netdev_priv(dev);
2520 int status;
2521
2522 if (!netif_running(dev))
2523 continue;
2524
2525 status = 0;
6d6dabac 2526 if (netif_running(dev) && netif_carrier_ok(dev))
fc90664e
DLR
2527 status = t3b2_mac_watchdog_task(&p->mac);
2528 if (status == 1)
2529 p->mac.stats.num_toggled++;
2530 else if (status == 2) {
2531 struct cmac *mac = &p->mac;
2532
2533 t3_mac_set_mtu(mac, dev->mtu);
2534 t3_mac_set_address(mac, 0, dev->dev_addr);
2535 cxgb_set_rxmode(dev);
2536 t3_link_start(&p->phy, mac, &p->link_config);
2537 t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
2538 t3_port_intr_enable(adapter, p->port_id);
2539 p->mac.stats.num_resets++;
2540 }
2541 }
2542 rtnl_unlock();
2543}
2544
2545
4d22de3e
DLR
2546static void t3_adap_check_task(struct work_struct *work)
2547{
2548 struct adapter *adapter = container_of(work, struct adapter,
2549 adap_check_task.work);
2550 const struct adapter_params *p = &adapter->params;
fc882196
DLR
2551 int port;
2552 unsigned int v, status, reset;
4d22de3e
DLR
2553
2554 adapter->check_task_cnt++;
2555
2556 /* Check link status for PHYs without interrupts */
2557 if (p->linkpoll_period)
2558 check_link_status(adapter);
2559
2560 /* Accumulate MAC stats if needed */
2561 if (!p->linkpoll_period ||
2562 (adapter->check_task_cnt * p->linkpoll_period) / 10 >=
2563 p->stats_update_period) {
2564 mac_stats_update(adapter);
2565 adapter->check_task_cnt = 0;
2566 }
2567
fc90664e
DLR
2568 if (p->rev == T3_REV_B2)
2569 check_t3b2_mac(adapter);
2570
fc882196
DLR
2571 /*
2572 * Scan the XGMAC's to check for various conditions which we want to
2573 * monitor in a periodic polling manner rather than via an interrupt
2574 * condition. This is used for conditions which would otherwise flood
2575 * the system with interrupts and we only really need to know that the
2576 * conditions are "happening" ... For each condition we count the
2577 * detection of the condition and reset it for the next polling loop.
2578 */
2579 for_each_port(adapter, port) {
2580 struct cmac *mac = &adap2pinfo(adapter, port)->mac;
2581 u32 cause;
2582
2583 cause = t3_read_reg(adapter, A_XGM_INT_CAUSE + mac->offset);
2584 reset = 0;
2585 if (cause & F_RXFIFO_OVERFLOW) {
2586 mac->stats.rx_fifo_ovfl++;
2587 reset |= F_RXFIFO_OVERFLOW;
2588 }
2589
2590 t3_write_reg(adapter, A_XGM_INT_CAUSE + mac->offset, reset);
2591 }
2592
2593 /*
2594 * We do the same as above for FL_EMPTY interrupts.
2595 */
2596 status = t3_read_reg(adapter, A_SG_INT_CAUSE);
2597 reset = 0;
2598
2599 if (status & F_FLEMPTY) {
2600 struct sge_qset *qs = &adapter->sge.qs[0];
2601 int i = 0;
2602
2603 reset |= F_FLEMPTY;
2604
2605 v = (t3_read_reg(adapter, A_SG_RSPQ_FL_STATUS) >> S_FL0EMPTY) &
2606 0xffff;
2607
2608 while (v) {
2609 qs->fl[i].empty += (v & 1);
2610 if (i)
2611 qs++;
2612 i ^= 1;
2613 v >>= 1;
2614 }
2615 }
2616
2617 t3_write_reg(adapter, A_SG_INT_CAUSE, reset);
2618
4d22de3e 2619 /* Schedule the next check update if any port is active. */
20d3fc11 2620 spin_lock_irq(&adapter->work_lock);
4d22de3e
DLR
2621 if (adapter->open_device_map & PORT_MASK)
2622 schedule_chk_task(adapter);
20d3fc11 2623 spin_unlock_irq(&adapter->work_lock);
4d22de3e
DLR
2624}
2625
2626/*
2627 * Processes external (PHY) interrupts in process context.
2628 */
2629static void ext_intr_task(struct work_struct *work)
2630{
2631 struct adapter *adapter = container_of(work, struct adapter,
2632 ext_intr_handler_task);
bf792094
DLR
2633 int i;
2634
2635 /* Disable link fault interrupts */
2636 for_each_port(adapter, i) {
2637 struct net_device *dev = adapter->port[i];
2638 struct port_info *p = netdev_priv(dev);
2639
2640 t3_xgm_intr_disable(adapter, i);
2641 t3_read_reg(adapter, A_XGM_INT_STATUS + p->mac.offset);
2642 }
4d22de3e 2643
bf792094 2644 /* Re-enable link fault interrupts */
4d22de3e
DLR
2645 t3_phy_intr_handler(adapter);
2646
bf792094
DLR
2647 for_each_port(adapter, i)
2648 t3_xgm_intr_enable(adapter, i);
2649
4d22de3e
DLR
2650 /* Now reenable external interrupts */
2651 spin_lock_irq(&adapter->work_lock);
2652 if (adapter->slow_intr_mask) {
2653 adapter->slow_intr_mask |= F_T3DBG;
2654 t3_write_reg(adapter, A_PL_INT_CAUSE0, F_T3DBG);
2655 t3_write_reg(adapter, A_PL_INT_ENABLE0,
2656 adapter->slow_intr_mask);
2657 }
2658 spin_unlock_irq(&adapter->work_lock);
2659}
2660
2661/*
2662 * Interrupt-context handler for external (PHY) interrupts.
2663 */
2664void t3_os_ext_intr_handler(struct adapter *adapter)
2665{
2666 /*
2667 * Schedule a task to handle external interrupts as they may be slow
2668 * and we use a mutex to protect MDIO registers. We disable PHY
2669 * interrupts in the meantime and let the task reenable them when
2670 * it's done.
2671 */
2672 spin_lock(&adapter->work_lock);
2673 if (adapter->slow_intr_mask) {
2674 adapter->slow_intr_mask &= ~F_T3DBG;
2675 t3_write_reg(adapter, A_PL_INT_ENABLE0,
2676 adapter->slow_intr_mask);
2677 queue_work(cxgb3_wq, &adapter->ext_intr_handler_task);
2678 }
2679 spin_unlock(&adapter->work_lock);
2680}
2681
bf792094
DLR
2682static void link_fault_task(struct work_struct *work)
2683{
2684 struct adapter *adapter = container_of(work, struct adapter,
2685 link_fault_handler_task);
2686 int i;
2687
2688 for_each_port(adapter, i) {
2689 struct net_device *netdev = adapter->port[i];
2690 struct port_info *pi = netdev_priv(netdev);
2691
2692 if (pi->link_fault)
2693 t3_link_fault(adapter, i);
2694 }
2695}
2696
2697void t3_os_link_fault_handler(struct adapter *adapter, int port_id)
2698{
2699 struct net_device *netdev = adapter->port[port_id];
2700 struct port_info *pi = netdev_priv(netdev);
2701
2702 spin_lock(&adapter->work_lock);
2703 pi->link_fault = 1;
2704 queue_work(cxgb3_wq, &adapter->link_fault_handler_task);
2705 spin_unlock(&adapter->work_lock);
2706}
2707
20d3fc11
DLR
2708static int t3_adapter_error(struct adapter *adapter, int reset)
2709{
2710 int i, ret = 0;
2711
cb0bc205
DLR
2712 if (is_offload(adapter) &&
2713 test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map)) {
2714 cxgb3_err_notify(&adapter->tdev, OFFLOAD_STATUS_DOWN, 0);
2715 offload_close(&adapter->tdev);
2716 }
2717
20d3fc11
DLR
2718 /* Stop all ports */
2719 for_each_port(adapter, i) {
2720 struct net_device *netdev = adapter->port[i];
2721
2722 if (netif_running(netdev))
2723 cxgb_close(netdev);
2724 }
2725
20d3fc11
DLR
2726 /* Stop SGE timers */
2727 t3_stop_sge_timers(adapter);
2728
2729 adapter->flags &= ~FULL_INIT_DONE;
2730
2731 if (reset)
2732 ret = t3_reset_adapter(adapter);
2733
2734 pci_disable_device(adapter->pdev);
2735
2736 return ret;
2737}
2738
2739static int t3_reenable_adapter(struct adapter *adapter)
2740{
2741 if (pci_enable_device(adapter->pdev)) {
2742 dev_err(&adapter->pdev->dev,
2743 "Cannot re-enable PCI device after reset.\n");
2744 goto err;
2745 }
2746 pci_set_master(adapter->pdev);
2747 pci_restore_state(adapter->pdev);
2748
2749 /* Free sge resources */
2750 t3_free_sge_resources(adapter);
2751
2752 if (t3_replay_prep_adapter(adapter))
2753 goto err;
2754
2755 return 0;
2756err:
2757 return -1;
2758}
2759
2760static void t3_resume_ports(struct adapter *adapter)
2761{
2762 int i;
2763
2764 /* Restart the ports */
2765 for_each_port(adapter, i) {
2766 struct net_device *netdev = adapter->port[i];
2767
2768 if (netif_running(netdev)) {
2769 if (cxgb_open(netdev)) {
2770 dev_err(&adapter->pdev->dev,
2771 "can't bring device back up"
2772 " after reset\n");
2773 continue;
2774 }
2775 }
2776 }
cb0bc205
DLR
2777
2778 if (is_offload(adapter) && !ofld_disable)
2779 cxgb3_err_notify(&adapter->tdev, OFFLOAD_STATUS_UP, 0);
20d3fc11
DLR
2780}
2781
2782/*
2783 * processes a fatal error.
2784 * Bring the ports down, reset the chip, bring the ports back up.
2785 */
2786static void fatal_error_task(struct work_struct *work)
2787{
2788 struct adapter *adapter = container_of(work, struct adapter,
2789 fatal_error_handler_task);
2790 int err = 0;
2791
2792 rtnl_lock();
2793 err = t3_adapter_error(adapter, 1);
2794 if (!err)
2795 err = t3_reenable_adapter(adapter);
2796 if (!err)
2797 t3_resume_ports(adapter);
2798
2799 CH_ALERT(adapter, "adapter reset %s\n", err ? "failed" : "succeeded");
2800 rtnl_unlock();
2801}
2802
4d22de3e
DLR
2803void t3_fatal_err(struct adapter *adapter)
2804{
2805 unsigned int fw_status[4];
2806
2807 if (adapter->flags & FULL_INIT_DONE) {
2808 t3_sge_stop(adapter);
c64c2eae
DLR
2809 t3_write_reg(adapter, A_XGM_TX_CTRL, 0);
2810 t3_write_reg(adapter, A_XGM_RX_CTRL, 0);
2811 t3_write_reg(adapter, XGM_REG(A_XGM_TX_CTRL, 1), 0);
2812 t3_write_reg(adapter, XGM_REG(A_XGM_RX_CTRL, 1), 0);
20d3fc11
DLR
2813
2814 spin_lock(&adapter->work_lock);
4d22de3e 2815 t3_intr_disable(adapter);
20d3fc11
DLR
2816 queue_work(cxgb3_wq, &adapter->fatal_error_handler_task);
2817 spin_unlock(&adapter->work_lock);
4d22de3e
DLR
2818 }
2819 CH_ALERT(adapter, "encountered fatal error, operation suspended\n");
2820 if (!t3_cim_ctl_blk_read(adapter, 0xa0, 4, fw_status))
2821 CH_ALERT(adapter, "FW status: 0x%x, 0x%x, 0x%x, 0x%x\n",
2822 fw_status[0], fw_status[1],
2823 fw_status[2], fw_status[3]);
4d22de3e
DLR
2824}
2825
91a6b50c
DLR
2826/**
2827 * t3_io_error_detected - called when PCI error is detected
2828 * @pdev: Pointer to PCI device
2829 * @state: The current pci connection state
2830 *
2831 * This function is called after a PCI bus error affecting
2832 * this device has been detected.
2833 */
2834static pci_ers_result_t t3_io_error_detected(struct pci_dev *pdev,
2835 pci_channel_state_t state)
2836{
bc4b6b52 2837 struct adapter *adapter = pci_get_drvdata(pdev);
20d3fc11 2838 int ret;
91a6b50c 2839
20d3fc11 2840 ret = t3_adapter_error(adapter, 0);
91a6b50c 2841
48c4b6db 2842 /* Request a slot reset. */
91a6b50c
DLR
2843 return PCI_ERS_RESULT_NEED_RESET;
2844}
2845
2846/**
2847 * t3_io_slot_reset - called after the pci bus has been reset.
2848 * @pdev: Pointer to PCI device
2849 *
2850 * Restart the card from scratch, as if from a cold-boot.
2851 */
2852static pci_ers_result_t t3_io_slot_reset(struct pci_dev *pdev)
2853{
bc4b6b52 2854 struct adapter *adapter = pci_get_drvdata(pdev);
91a6b50c 2855
20d3fc11
DLR
2856 if (!t3_reenable_adapter(adapter))
2857 return PCI_ERS_RESULT_RECOVERED;
91a6b50c 2858
48c4b6db 2859 return PCI_ERS_RESULT_DISCONNECT;
91a6b50c
DLR
2860}
2861
2862/**
2863 * t3_io_resume - called when traffic can start flowing again.
2864 * @pdev: Pointer to PCI device
2865 *
2866 * This callback is called when the error recovery driver tells us that
2867 * its OK to resume normal operation.
2868 */
2869static void t3_io_resume(struct pci_dev *pdev)
2870{
bc4b6b52 2871 struct adapter *adapter = pci_get_drvdata(pdev);
91a6b50c 2872
20d3fc11 2873 t3_resume_ports(adapter);
91a6b50c
DLR
2874}
2875
2876static struct pci_error_handlers t3_err_handler = {
2877 .error_detected = t3_io_error_detected,
2878 .slot_reset = t3_io_slot_reset,
2879 .resume = t3_io_resume,
2880};
2881
8c263761
DLR
2882/*
2883 * Set the number of qsets based on the number of CPUs and the number of ports,
2884 * not to exceed the number of available qsets, assuming there are enough qsets
2885 * per port in HW.
2886 */
2887static void set_nqsets(struct adapter *adap)
2888{
2889 int i, j = 0;
2890 int num_cpus = num_online_cpus();
2891 int hwports = adap->params.nports;
5cda9364 2892 int nqsets = adap->msix_nvectors - 1;
8c263761 2893
f9ee3882 2894 if (adap->params.rev > 0 && adap->flags & USING_MSIX) {
8c263761
DLR
2895 if (hwports == 2 &&
2896 (hwports * nqsets > SGE_QSETS ||
2897 num_cpus >= nqsets / hwports))
2898 nqsets /= hwports;
2899 if (nqsets > num_cpus)
2900 nqsets = num_cpus;
2901 if (nqsets < 1 || hwports == 4)
2902 nqsets = 1;
2903 } else
2904 nqsets = 1;
2905
2906 for_each_port(adap, i) {
2907 struct port_info *pi = adap2pinfo(adap, i);
2908
2909 pi->first_qset = j;
2910 pi->nqsets = nqsets;
2911 j = pi->first_qset + nqsets;
2912
2913 dev_info(&adap->pdev->dev,
2914 "Port %d using %d queue sets.\n", i, nqsets);
2915 }
2916}
2917
4d22de3e
DLR
2918static int __devinit cxgb_enable_msix(struct adapter *adap)
2919{
2920 struct msix_entry entries[SGE_QSETS + 1];
5cda9364 2921 int vectors;
4d22de3e
DLR
2922 int i, err;
2923
5cda9364
DLR
2924 vectors = ARRAY_SIZE(entries);
2925 for (i = 0; i < vectors; ++i)
4d22de3e
DLR
2926 entries[i].entry = i;
2927
5cda9364
DLR
2928 while ((err = pci_enable_msix(adap->pdev, entries, vectors)) > 0)
2929 vectors = err;
2930
2931 if (!err && vectors < (adap->params.nports + 1))
2932 err = -1;
2933
4d22de3e 2934 if (!err) {
5cda9364 2935 for (i = 0; i < vectors; ++i)
4d22de3e 2936 adap->msix_info[i].vec = entries[i].vector;
5cda9364
DLR
2937 adap->msix_nvectors = vectors;
2938 }
2939
4d22de3e
DLR
2940 return err;
2941}
2942
2943static void __devinit print_port_info(struct adapter *adap,
2944 const struct adapter_info *ai)
2945{
2946 static const char *pci_variant[] = {
2947 "PCI", "PCI-X", "PCI-X ECC", "PCI-X 266", "PCI Express"
2948 };
2949
2950 int i;
2951 char buf[80];
2952
2953 if (is_pcie(adap))
2954 snprintf(buf, sizeof(buf), "%s x%d",
2955 pci_variant[adap->params.pci.variant],
2956 adap->params.pci.width);
2957 else
2958 snprintf(buf, sizeof(buf), "%s %dMHz/%d-bit",
2959 pci_variant[adap->params.pci.variant],
2960 adap->params.pci.speed, adap->params.pci.width);
2961
2962 for_each_port(adap, i) {
2963 struct net_device *dev = adap->port[i];
2964 const struct port_info *pi = netdev_priv(dev);
2965
2966 if (!test_bit(i, &adap->registered_device_map))
2967 continue;
8ac3ba68 2968 printk(KERN_INFO "%s: %s %s %sNIC (rev %d) %s%s\n",
04497982 2969 dev->name, ai->desc, pi->phy.desc,
8ac3ba68 2970 is_offload(adap) ? "R" : "", adap->params.rev, buf,
4d22de3e
DLR
2971 (adap->flags & USING_MSIX) ? " MSI-X" :
2972 (adap->flags & USING_MSI) ? " MSI" : "");
2973 if (adap->name == dev->name && adap->params.vpd.mclk)
167cdf5f
DLR
2974 printk(KERN_INFO
2975 "%s: %uMB CM, %uMB PMTX, %uMB PMRX, S/N: %s\n",
4d22de3e
DLR
2976 adap->name, t3_mc7_size(&adap->cm) >> 20,
2977 t3_mc7_size(&adap->pmtx) >> 20,
167cdf5f
DLR
2978 t3_mc7_size(&adap->pmrx) >> 20,
2979 adap->params.vpd.sn);
4d22de3e
DLR
2980 }
2981}
2982
dd752696
SH
2983static const struct net_device_ops cxgb_netdev_ops = {
2984 .ndo_open = cxgb_open,
2985 .ndo_stop = cxgb_close,
43a944f3 2986 .ndo_start_xmit = t3_eth_xmit,
dd752696
SH
2987 .ndo_get_stats = cxgb_get_stats,
2988 .ndo_validate_addr = eth_validate_addr,
2989 .ndo_set_multicast_list = cxgb_set_rxmode,
2990 .ndo_do_ioctl = cxgb_ioctl,
2991 .ndo_change_mtu = cxgb_change_mtu,
2992 .ndo_set_mac_address = cxgb_set_mac_addr,
2993 .ndo_vlan_rx_register = vlan_rx_register,
2994#ifdef CONFIG_NET_POLL_CONTROLLER
2995 .ndo_poll_controller = cxgb_netpoll,
2996#endif
2997};
2998
4d22de3e
DLR
2999static int __devinit init_one(struct pci_dev *pdev,
3000 const struct pci_device_id *ent)
3001{
3002 static int version_printed;
3003
3004 int i, err, pci_using_dac = 0;
3005 unsigned long mmio_start, mmio_len;
3006 const struct adapter_info *ai;
3007 struct adapter *adapter = NULL;
3008 struct port_info *pi;
3009
3010 if (!version_printed) {
3011 printk(KERN_INFO "%s - version %s\n", DRV_DESC, DRV_VERSION);
3012 ++version_printed;
3013 }
3014
3015 if (!cxgb3_wq) {
3016 cxgb3_wq = create_singlethread_workqueue(DRV_NAME);
3017 if (!cxgb3_wq) {
3018 printk(KERN_ERR DRV_NAME
3019 ": cannot initialize work queue\n");
3020 return -ENOMEM;
3021 }
3022 }
3023
3024 err = pci_request_regions(pdev, DRV_NAME);
3025 if (err) {
3026 /* Just info, some other driver may have claimed the device. */
3027 dev_info(&pdev->dev, "cannot obtain PCI resources\n");
3028 return err;
3029 }
3030
3031 err = pci_enable_device(pdev);
3032 if (err) {
3033 dev_err(&pdev->dev, "cannot enable PCI device\n");
3034 goto out_release_regions;
3035 }
3036
3037 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
3038 pci_using_dac = 1;
3039 err = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
3040 if (err) {
3041 dev_err(&pdev->dev, "unable to obtain 64-bit DMA for "
3042 "coherent allocations\n");
3043 goto out_disable_device;
3044 }
3045 } else if ((err = pci_set_dma_mask(pdev, DMA_32BIT_MASK)) != 0) {
3046 dev_err(&pdev->dev, "no usable DMA configuration\n");
3047 goto out_disable_device;
3048 }
3049
3050 pci_set_master(pdev);
204e2f98 3051 pci_save_state(pdev);
4d22de3e
DLR
3052
3053 mmio_start = pci_resource_start(pdev, 0);
3054 mmio_len = pci_resource_len(pdev, 0);
3055 ai = t3_get_adapter_info(ent->driver_data);
3056
3057 adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
3058 if (!adapter) {
3059 err = -ENOMEM;
3060 goto out_disable_device;
3061 }
3062
3063 adapter->regs = ioremap_nocache(mmio_start, mmio_len);
3064 if (!adapter->regs) {
3065 dev_err(&pdev->dev, "cannot map device registers\n");
3066 err = -ENOMEM;
3067 goto out_free_adapter;
3068 }
3069
3070 adapter->pdev = pdev;
3071 adapter->name = pci_name(pdev);
3072 adapter->msg_enable = dflt_msg_enable;
3073 adapter->mmio_len = mmio_len;
3074
3075 mutex_init(&adapter->mdio_lock);
3076 spin_lock_init(&adapter->work_lock);
3077 spin_lock_init(&adapter->stats_lock);
3078
3079 INIT_LIST_HEAD(&adapter->adapter_list);
3080 INIT_WORK(&adapter->ext_intr_handler_task, ext_intr_task);
bf792094 3081 INIT_WORK(&adapter->link_fault_handler_task, link_fault_task);
20d3fc11 3082 INIT_WORK(&adapter->fatal_error_handler_task, fatal_error_task);
4d22de3e
DLR
3083 INIT_DELAYED_WORK(&adapter->adap_check_task, t3_adap_check_task);
3084
3085 for (i = 0; i < ai->nports; ++i) {
3086 struct net_device *netdev;
3087
82ad3329 3088 netdev = alloc_etherdev_mq(sizeof(struct port_info), SGE_QSETS);
4d22de3e
DLR
3089 if (!netdev) {
3090 err = -ENOMEM;
3091 goto out_free_dev;
3092 }
3093
4d22de3e
DLR
3094 SET_NETDEV_DEV(netdev, &pdev->dev);
3095
3096 adapter->port[i] = netdev;
3097 pi = netdev_priv(netdev);
5fbf816f 3098 pi->adapter = adapter;
47fd23fe 3099 pi->rx_offload = T3_RX_CSUM | T3_LRO;
4d22de3e
DLR
3100 pi->port_id = i;
3101 netif_carrier_off(netdev);
82ad3329 3102 netif_tx_stop_all_queues(netdev);
4d22de3e
DLR
3103 netdev->irq = pdev->irq;
3104 netdev->mem_start = mmio_start;
3105 netdev->mem_end = mmio_start + mmio_len - 1;
4d22de3e
DLR
3106 netdev->features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO;
3107 netdev->features |= NETIF_F_LLTX;
7be2df45 3108 netdev->features |= NETIF_F_GRO;
4d22de3e
DLR
3109 if (pci_using_dac)
3110 netdev->features |= NETIF_F_HIGHDMA;
3111
3112 netdev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
dd752696 3113 netdev->netdev_ops = &cxgb_netdev_ops;
4d22de3e
DLR
3114 SET_ETHTOOL_OPS(netdev, &cxgb_ethtool_ops);
3115 }
3116
5fbf816f 3117 pci_set_drvdata(pdev, adapter);
4d22de3e
DLR
3118 if (t3_prep_adapter(adapter, ai, 1) < 0) {
3119 err = -ENODEV;
3120 goto out_free_dev;
3121 }
2eab17ab 3122
4d22de3e
DLR
3123 /*
3124 * The card is now ready to go. If any errors occur during device
3125 * registration we do not fail the whole card but rather proceed only
3126 * with the ports we manage to register successfully. However we must
3127 * register at least one net device.
3128 */
3129 for_each_port(adapter, i) {
3130 err = register_netdev(adapter->port[i]);
3131 if (err)
3132 dev_warn(&pdev->dev,
3133 "cannot register net device %s, skipping\n",
3134 adapter->port[i]->name);
3135 else {
3136 /*
3137 * Change the name we use for messages to the name of
3138 * the first successfully registered interface.
3139 */
3140 if (!adapter->registered_device_map)
3141 adapter->name = adapter->port[i]->name;
3142
3143 __set_bit(i, &adapter->registered_device_map);
3144 }
3145 }
3146 if (!adapter->registered_device_map) {
3147 dev_err(&pdev->dev, "could not register any net devices\n");
3148 goto out_free_dev;
3149 }
3150
3151 /* Driver's ready. Reflect it on LEDs */
3152 t3_led_ready(adapter);
3153
3154 if (is_offload(adapter)) {
3155 __set_bit(OFFLOAD_DEVMAP_BIT, &adapter->registered_device_map);
3156 cxgb3_adapter_ofld(adapter);
3157 }
3158
3159 /* See what interrupts we'll be using */
3160 if (msi > 1 && cxgb_enable_msix(adapter) == 0)
3161 adapter->flags |= USING_MSIX;
3162 else if (msi > 0 && pci_enable_msi(pdev) == 0)
3163 adapter->flags |= USING_MSI;
3164
8c263761
DLR
3165 set_nqsets(adapter);
3166
0ee8d33c 3167 err = sysfs_create_group(&adapter->port[0]->dev.kobj,
4d22de3e
DLR
3168 &cxgb3_attr_group);
3169
3170 print_port_info(adapter, ai);
3171 return 0;
3172
3173out_free_dev:
3174 iounmap(adapter->regs);
3175 for (i = ai->nports - 1; i >= 0; --i)
3176 if (adapter->port[i])
3177 free_netdev(adapter->port[i]);
3178
3179out_free_adapter:
3180 kfree(adapter);
3181
3182out_disable_device:
3183 pci_disable_device(pdev);
3184out_release_regions:
3185 pci_release_regions(pdev);
3186 pci_set_drvdata(pdev, NULL);
3187 return err;
3188}
3189
3190static void __devexit remove_one(struct pci_dev *pdev)
3191{
5fbf816f 3192 struct adapter *adapter = pci_get_drvdata(pdev);
4d22de3e 3193
5fbf816f 3194 if (adapter) {
4d22de3e 3195 int i;
4d22de3e
DLR
3196
3197 t3_sge_stop(adapter);
0ee8d33c 3198 sysfs_remove_group(&adapter->port[0]->dev.kobj,
4d22de3e
DLR
3199 &cxgb3_attr_group);
3200
4d22de3e
DLR
3201 if (is_offload(adapter)) {
3202 cxgb3_adapter_unofld(adapter);
3203 if (test_bit(OFFLOAD_DEVMAP_BIT,
3204 &adapter->open_device_map))
3205 offload_close(&adapter->tdev);
3206 }
3207
67d92ab7
DLR
3208 for_each_port(adapter, i)
3209 if (test_bit(i, &adapter->registered_device_map))
3210 unregister_netdev(adapter->port[i]);
3211
0ca41c04 3212 t3_stop_sge_timers(adapter);
4d22de3e
DLR
3213 t3_free_sge_resources(adapter);
3214 cxgb_disable_msi(adapter);
3215
4d22de3e
DLR
3216 for_each_port(adapter, i)
3217 if (adapter->port[i])
3218 free_netdev(adapter->port[i]);
3219
3220 iounmap(adapter->regs);
3221 kfree(adapter);
3222 pci_release_regions(pdev);
3223 pci_disable_device(pdev);
3224 pci_set_drvdata(pdev, NULL);
3225 }
3226}
3227
3228static struct pci_driver driver = {
3229 .name = DRV_NAME,
3230 .id_table = cxgb3_pci_tbl,
3231 .probe = init_one,
3232 .remove = __devexit_p(remove_one),
91a6b50c 3233 .err_handler = &t3_err_handler,
4d22de3e
DLR
3234};
3235
3236static int __init cxgb3_init_module(void)
3237{
3238 int ret;
3239
3240 cxgb3_offload_init();
3241
3242 ret = pci_register_driver(&driver);
3243 return ret;
3244}
3245
3246static void __exit cxgb3_cleanup_module(void)
3247{
3248 pci_unregister_driver(&driver);
3249 if (cxgb3_wq)
3250 destroy_workqueue(cxgb3_wq);
3251}
3252
3253module_init(cxgb3_init_module);
3254module_exit(cxgb3_cleanup_module);