cxgb3: Fix potential msi-x vector leak
[linux-2.6-block.git] / drivers / net / cxgb3 / cxgb3_main.c
CommitLineData
4d22de3e 1/*
a02d44a0 2 * Copyright (c) 2003-2008 Chelsio, Inc. All rights reserved.
4d22de3e 3 *
1d68e93d
DLR
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
4d22de3e 9 *
1d68e93d
DLR
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
4d22de3e 31 */
4d22de3e
DLR
32#include <linux/module.h>
33#include <linux/moduleparam.h>
34#include <linux/init.h>
35#include <linux/pci.h>
36#include <linux/dma-mapping.h>
37#include <linux/netdevice.h>
38#include <linux/etherdevice.h>
39#include <linux/if_vlan.h>
40#include <linux/mii.h>
41#include <linux/sockios.h>
42#include <linux/workqueue.h>
43#include <linux/proc_fs.h>
44#include <linux/rtnetlink.h>
2e283962 45#include <linux/firmware.h>
d9da466a 46#include <linux/log2.h>
4d22de3e
DLR
47#include <asm/uaccess.h>
48
49#include "common.h"
50#include "cxgb3_ioctl.h"
51#include "regs.h"
52#include "cxgb3_offload.h"
53#include "version.h"
54
55#include "cxgb3_ctl_defs.h"
56#include "t3_cpl.h"
57#include "firmware_exports.h"
58
59enum {
60 MAX_TXQ_ENTRIES = 16384,
61 MAX_CTRL_TXQ_ENTRIES = 1024,
62 MAX_RSPQ_ENTRIES = 16384,
63 MAX_RX_BUFFERS = 16384,
64 MAX_RX_JUMBO_BUFFERS = 16384,
65 MIN_TXQ_ENTRIES = 4,
66 MIN_CTRL_TXQ_ENTRIES = 4,
67 MIN_RSPQ_ENTRIES = 32,
68 MIN_FL_ENTRIES = 32
69};
70
71#define PORT_MASK ((1 << MAX_NPORTS) - 1)
72
73#define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
74 NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
75 NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
76
77#define EEPROM_MAGIC 0x38E2F10C
78
678771d6
DLR
79#define CH_DEVICE(devid, idx) \
80 { PCI_VENDOR_ID_CHELSIO, devid, PCI_ANY_ID, PCI_ANY_ID, 0, 0, idx }
4d22de3e
DLR
81
82static const struct pci_device_id cxgb3_pci_tbl[] = {
678771d6
DLR
83 CH_DEVICE(0x20, 0), /* PE9000 */
84 CH_DEVICE(0x21, 1), /* T302E */
85 CH_DEVICE(0x22, 2), /* T310E */
86 CH_DEVICE(0x23, 3), /* T320X */
87 CH_DEVICE(0x24, 1), /* T302X */
88 CH_DEVICE(0x25, 3), /* T320E */
89 CH_DEVICE(0x26, 2), /* T310X */
90 CH_DEVICE(0x30, 2), /* T3B10 */
91 CH_DEVICE(0x31, 3), /* T3B20 */
92 CH_DEVICE(0x32, 1), /* T3B02 */
ce03aadd 93 CH_DEVICE(0x35, 6), /* T3C20-derived T3C10 */
4d22de3e
DLR
94 {0,}
95};
96
97MODULE_DESCRIPTION(DRV_DESC);
98MODULE_AUTHOR("Chelsio Communications");
1d68e93d 99MODULE_LICENSE("Dual BSD/GPL");
4d22de3e
DLR
100MODULE_VERSION(DRV_VERSION);
101MODULE_DEVICE_TABLE(pci, cxgb3_pci_tbl);
102
103static int dflt_msg_enable = DFLT_MSG_ENABLE;
104
105module_param(dflt_msg_enable, int, 0644);
106MODULE_PARM_DESC(dflt_msg_enable, "Chelsio T3 default message enable bitmap");
107
108/*
109 * The driver uses the best interrupt scheme available on a platform in the
110 * order MSI-X, MSI, legacy pin interrupts. This parameter determines which
111 * of these schemes the driver may consider as follows:
112 *
113 * msi = 2: choose from among all three options
114 * msi = 1: only consider MSI and pin interrupts
115 * msi = 0: force pin interrupts
116 */
117static int msi = 2;
118
119module_param(msi, int, 0644);
120MODULE_PARM_DESC(msi, "whether to use MSI or MSI-X");
121
122/*
123 * The driver enables offload as a default.
124 * To disable it, use ofld_disable = 1.
125 */
126
127static int ofld_disable = 0;
128
129module_param(ofld_disable, int, 0644);
130MODULE_PARM_DESC(ofld_disable, "whether to enable offload at init time or not");
131
132/*
133 * We have work elements that we need to cancel when an interface is taken
134 * down. Normally the work elements would be executed by keventd but that
135 * can deadlock because of linkwatch. If our close method takes the rtnl
136 * lock and linkwatch is ahead of our work elements in keventd, linkwatch
137 * will block keventd as it needs the rtnl lock, and we'll deadlock waiting
138 * for our work to complete. Get our own work queue to solve this.
139 */
140static struct workqueue_struct *cxgb3_wq;
141
142/**
143 * link_report - show link status and link speed/duplex
144 * @p: the port whose settings are to be reported
145 *
146 * Shows the link status, speed, and duplex of a port.
147 */
148static void link_report(struct net_device *dev)
149{
150 if (!netif_carrier_ok(dev))
151 printk(KERN_INFO "%s: link down\n", dev->name);
152 else {
153 const char *s = "10Mbps";
154 const struct port_info *p = netdev_priv(dev);
155
156 switch (p->link_config.speed) {
157 case SPEED_10000:
158 s = "10Gbps";
159 break;
160 case SPEED_1000:
161 s = "1000Mbps";
162 break;
163 case SPEED_100:
164 s = "100Mbps";
165 break;
166 }
167
168 printk(KERN_INFO "%s: link up, %s, %s-duplex\n", dev->name, s,
169 p->link_config.duplex == DUPLEX_FULL ? "full" : "half");
170 }
171}
172
bf792094
DLR
173void t3_os_link_fault(struct adapter *adap, int port_id, int state)
174{
175 struct net_device *dev = adap->port[port_id];
176 struct port_info *pi = netdev_priv(dev);
177
178 if (state == netif_carrier_ok(dev))
179 return;
180
181 if (state) {
182 struct cmac *mac = &pi->mac;
183
184 netif_carrier_on(dev);
185
186 /* Clear local faults */
187 t3_xgm_intr_disable(adap, pi->port_id);
188 t3_read_reg(adap, A_XGM_INT_STATUS +
189 pi->mac.offset);
190 t3_write_reg(adap,
191 A_XGM_INT_CAUSE + pi->mac.offset,
192 F_XGM_INT);
193
194 t3_set_reg_field(adap,
195 A_XGM_INT_ENABLE +
196 pi->mac.offset,
197 F_XGM_INT, F_XGM_INT);
198 t3_xgm_intr_enable(adap, pi->port_id);
199
200 t3_mac_enable(mac, MAC_DIRECTION_TX);
201 } else
202 netif_carrier_off(dev);
203
204 link_report(dev);
205}
206
4d22de3e
DLR
207/**
208 * t3_os_link_changed - handle link status changes
209 * @adapter: the adapter associated with the link change
210 * @port_id: the port index whose limk status has changed
211 * @link_stat: the new status of the link
212 * @speed: the new speed setting
213 * @duplex: the new duplex setting
214 * @pause: the new flow-control setting
215 *
216 * This is the OS-dependent handler for link status changes. The OS
217 * neutral handler takes care of most of the processing for these events,
218 * then calls this handler for any OS-specific processing.
219 */
220void t3_os_link_changed(struct adapter *adapter, int port_id, int link_stat,
221 int speed, int duplex, int pause)
222{
223 struct net_device *dev = adapter->port[port_id];
6d6dabac
DLR
224 struct port_info *pi = netdev_priv(dev);
225 struct cmac *mac = &pi->mac;
4d22de3e
DLR
226
227 /* Skip changes from disabled ports. */
228 if (!netif_running(dev))
229 return;
230
231 if (link_stat != netif_carrier_ok(dev)) {
6d6dabac 232 if (link_stat) {
59cf8107 233 t3_mac_enable(mac, MAC_DIRECTION_RX);
bf792094
DLR
234
235 /* Clear local faults */
236 t3_xgm_intr_disable(adapter, pi->port_id);
237 t3_read_reg(adapter, A_XGM_INT_STATUS +
238 pi->mac.offset);
239 t3_write_reg(adapter,
240 A_XGM_INT_CAUSE + pi->mac.offset,
241 F_XGM_INT);
242
243 t3_set_reg_field(adapter,
244 A_XGM_INT_ENABLE + pi->mac.offset,
245 F_XGM_INT, F_XGM_INT);
246 t3_xgm_intr_enable(adapter, pi->port_id);
247
4d22de3e 248 netif_carrier_on(dev);
6d6dabac 249 } else {
4d22de3e 250 netif_carrier_off(dev);
bf792094
DLR
251
252 t3_xgm_intr_disable(adapter, pi->port_id);
253 t3_read_reg(adapter, A_XGM_INT_STATUS + pi->mac.offset);
254 t3_set_reg_field(adapter,
255 A_XGM_INT_ENABLE + pi->mac.offset,
256 F_XGM_INT, 0);
257
258 if (is_10G(adapter))
259 pi->phy.ops->power_down(&pi->phy, 1);
260
261 t3_read_reg(adapter, A_XGM_INT_STATUS + pi->mac.offset);
59cf8107
DLR
262 t3_mac_disable(mac, MAC_DIRECTION_RX);
263 t3_link_start(&pi->phy, mac, &pi->link_config);
6d6dabac
DLR
264 }
265
4d22de3e
DLR
266 link_report(dev);
267 }
268}
269
1e882025
DLR
270/**
271 * t3_os_phymod_changed - handle PHY module changes
272 * @phy: the PHY reporting the module change
273 * @mod_type: new module type
274 *
275 * This is the OS-dependent handler for PHY module changes. It is
276 * invoked when a PHY module is removed or inserted for any OS-specific
277 * processing.
278 */
279void t3_os_phymod_changed(struct adapter *adap, int port_id)
280{
281 static const char *mod_str[] = {
282 NULL, "SR", "LR", "LRM", "TWINAX", "TWINAX", "unknown"
283 };
284
285 const struct net_device *dev = adap->port[port_id];
286 const struct port_info *pi = netdev_priv(dev);
287
288 if (pi->phy.modtype == phy_modtype_none)
289 printk(KERN_INFO "%s: PHY module unplugged\n", dev->name);
290 else
291 printk(KERN_INFO "%s: %s PHY module inserted\n", dev->name,
292 mod_str[pi->phy.modtype]);
293}
294
4d22de3e
DLR
295static void cxgb_set_rxmode(struct net_device *dev)
296{
297 struct t3_rx_mode rm;
298 struct port_info *pi = netdev_priv(dev);
299
300 init_rx_mode(&rm, dev, dev->mc_list);
301 t3_mac_set_rx_mode(&pi->mac, &rm);
302}
303
304/**
305 * link_start - enable a port
306 * @dev: the device to enable
307 *
308 * Performs the MAC and PHY actions needed to enable a port.
309 */
310static void link_start(struct net_device *dev)
311{
312 struct t3_rx_mode rm;
313 struct port_info *pi = netdev_priv(dev);
314 struct cmac *mac = &pi->mac;
315
316 init_rx_mode(&rm, dev, dev->mc_list);
317 t3_mac_reset(mac);
318 t3_mac_set_mtu(mac, dev->mtu);
319 t3_mac_set_address(mac, 0, dev->dev_addr);
320 t3_mac_set_rx_mode(mac, &rm);
321 t3_link_start(&pi->phy, mac, &pi->link_config);
322 t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
323}
324
325static inline void cxgb_disable_msi(struct adapter *adapter)
326{
327 if (adapter->flags & USING_MSIX) {
328 pci_disable_msix(adapter->pdev);
329 adapter->flags &= ~USING_MSIX;
330 } else if (adapter->flags & USING_MSI) {
331 pci_disable_msi(adapter->pdev);
332 adapter->flags &= ~USING_MSI;
333 }
334}
335
336/*
337 * Interrupt handler for asynchronous events used with MSI-X.
338 */
339static irqreturn_t t3_async_intr_handler(int irq, void *cookie)
340{
341 t3_slow_intr_handler(cookie);
342 return IRQ_HANDLED;
343}
344
345/*
346 * Name the MSI-X interrupts.
347 */
348static void name_msix_vecs(struct adapter *adap)
349{
350 int i, j, msi_idx = 1, n = sizeof(adap->msix_info[0].desc) - 1;
351
352 snprintf(adap->msix_info[0].desc, n, "%s", adap->name);
353 adap->msix_info[0].desc[n] = 0;
354
355 for_each_port(adap, j) {
356 struct net_device *d = adap->port[j];
357 const struct port_info *pi = netdev_priv(d);
358
359 for (i = 0; i < pi->nqsets; i++, msi_idx++) {
360 snprintf(adap->msix_info[msi_idx].desc, n,
8c263761 361 "%s-%d", d->name, pi->first_qset + i);
4d22de3e
DLR
362 adap->msix_info[msi_idx].desc[n] = 0;
363 }
8c263761 364 }
4d22de3e
DLR
365}
366
367static int request_msix_data_irqs(struct adapter *adap)
368{
369 int i, j, err, qidx = 0;
370
371 for_each_port(adap, i) {
372 int nqsets = adap2pinfo(adap, i)->nqsets;
373
374 for (j = 0; j < nqsets; ++j) {
375 err = request_irq(adap->msix_info[qidx + 1].vec,
376 t3_intr_handler(adap,
377 adap->sge.qs[qidx].
378 rspq.polling), 0,
379 adap->msix_info[qidx + 1].desc,
380 &adap->sge.qs[qidx]);
381 if (err) {
382 while (--qidx >= 0)
383 free_irq(adap->msix_info[qidx + 1].vec,
384 &adap->sge.qs[qidx]);
385 return err;
386 }
387 qidx++;
388 }
389 }
390 return 0;
391}
392
8c263761
DLR
393static void free_irq_resources(struct adapter *adapter)
394{
395 if (adapter->flags & USING_MSIX) {
396 int i, n = 0;
397
398 free_irq(adapter->msix_info[0].vec, adapter);
399 for_each_port(adapter, i)
5cda9364 400 n += adap2pinfo(adapter, i)->nqsets;
8c263761
DLR
401
402 for (i = 0; i < n; ++i)
403 free_irq(adapter->msix_info[i + 1].vec,
404 &adapter->sge.qs[i]);
405 } else
406 free_irq(adapter->pdev->irq, adapter);
407}
408
b881955b
DLR
409static int await_mgmt_replies(struct adapter *adap, unsigned long init_cnt,
410 unsigned long n)
411{
412 int attempts = 5;
413
414 while (adap->sge.qs[0].rspq.offload_pkts < init_cnt + n) {
415 if (!--attempts)
416 return -ETIMEDOUT;
417 msleep(10);
418 }
419 return 0;
420}
421
422static int init_tp_parity(struct adapter *adap)
423{
424 int i;
425 struct sk_buff *skb;
426 struct cpl_set_tcb_field *greq;
427 unsigned long cnt = adap->sge.qs[0].rspq.offload_pkts;
428
429 t3_tp_set_offload_mode(adap, 1);
430
431 for (i = 0; i < 16; i++) {
432 struct cpl_smt_write_req *req;
433
434 skb = alloc_skb(sizeof(*req), GFP_KERNEL | __GFP_NOFAIL);
435 req = (struct cpl_smt_write_req *)__skb_put(skb, sizeof(*req));
436 memset(req, 0, sizeof(*req));
437 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
438 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SMT_WRITE_REQ, i));
439 req->iff = i;
440 t3_mgmt_tx(adap, skb);
441 }
442
443 for (i = 0; i < 2048; i++) {
444 struct cpl_l2t_write_req *req;
445
446 skb = alloc_skb(sizeof(*req), GFP_KERNEL | __GFP_NOFAIL);
447 req = (struct cpl_l2t_write_req *)__skb_put(skb, sizeof(*req));
448 memset(req, 0, sizeof(*req));
449 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
450 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_L2T_WRITE_REQ, i));
451 req->params = htonl(V_L2T_W_IDX(i));
452 t3_mgmt_tx(adap, skb);
453 }
454
455 for (i = 0; i < 2048; i++) {
456 struct cpl_rte_write_req *req;
457
458 skb = alloc_skb(sizeof(*req), GFP_KERNEL | __GFP_NOFAIL);
459 req = (struct cpl_rte_write_req *)__skb_put(skb, sizeof(*req));
460 memset(req, 0, sizeof(*req));
461 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
462 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_RTE_WRITE_REQ, i));
463 req->l2t_idx = htonl(V_L2T_W_IDX(i));
464 t3_mgmt_tx(adap, skb);
465 }
466
467 skb = alloc_skb(sizeof(*greq), GFP_KERNEL | __GFP_NOFAIL);
468 greq = (struct cpl_set_tcb_field *)__skb_put(skb, sizeof(*greq));
469 memset(greq, 0, sizeof(*greq));
470 greq->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
471 OPCODE_TID(greq) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, 0));
472 greq->mask = cpu_to_be64(1);
473 t3_mgmt_tx(adap, skb);
474
475 i = await_mgmt_replies(adap, cnt, 16 + 2048 + 2048 + 1);
476 t3_tp_set_offload_mode(adap, 0);
477 return i;
478}
479
4d22de3e
DLR
480/**
481 * setup_rss - configure RSS
482 * @adap: the adapter
483 *
484 * Sets up RSS to distribute packets to multiple receive queues. We
485 * configure the RSS CPU lookup table to distribute to the number of HW
486 * receive queues, and the response queue lookup table to narrow that
487 * down to the response queues actually configured for each port.
488 * We always configure the RSS mapping for two ports since the mapping
489 * table has plenty of entries.
490 */
491static void setup_rss(struct adapter *adap)
492{
493 int i;
494 unsigned int nq0 = adap2pinfo(adap, 0)->nqsets;
495 unsigned int nq1 = adap->port[1] ? adap2pinfo(adap, 1)->nqsets : 1;
496 u8 cpus[SGE_QSETS + 1];
497 u16 rspq_map[RSS_TABLE_SIZE];
498
499 for (i = 0; i < SGE_QSETS; ++i)
500 cpus[i] = i;
501 cpus[SGE_QSETS] = 0xff; /* terminator */
502
503 for (i = 0; i < RSS_TABLE_SIZE / 2; ++i) {
504 rspq_map[i] = i % nq0;
505 rspq_map[i + RSS_TABLE_SIZE / 2] = (i % nq1) + nq0;
506 }
507
508 t3_config_rss(adap, F_RQFEEDBACKENABLE | F_TNLLKPEN | F_TNLMAPEN |
509 F_TNLPRTEN | F_TNL2TUPEN | F_TNL4TUPEN |
a2604be5 510 V_RRCPLCPUSIZE(6) | F_HASHTOEPLITZ, cpus, rspq_map);
4d22de3e
DLR
511}
512
bea3348e 513static void init_napi(struct adapter *adap)
4d22de3e 514{
bea3348e 515 int i;
4d22de3e 516
bea3348e
SH
517 for (i = 0; i < SGE_QSETS; i++) {
518 struct sge_qset *qs = &adap->sge.qs[i];
4d22de3e 519
bea3348e
SH
520 if (qs->adap)
521 netif_napi_add(qs->netdev, &qs->napi, qs->napi.poll,
522 64);
4d22de3e 523 }
48c4b6db
DLR
524
525 /*
526 * netif_napi_add() can be called only once per napi_struct because it
527 * adds each new napi_struct to a list. Be careful not to call it a
528 * second time, e.g., during EEH recovery, by making a note of it.
529 */
530 adap->flags |= NAPI_INIT;
4d22de3e
DLR
531}
532
533/*
534 * Wait until all NAPI handlers are descheduled. This includes the handlers of
535 * both netdevices representing interfaces and the dummy ones for the extra
536 * queues.
537 */
538static void quiesce_rx(struct adapter *adap)
539{
540 int i;
4d22de3e 541
bea3348e
SH
542 for (i = 0; i < SGE_QSETS; i++)
543 if (adap->sge.qs[i].adap)
544 napi_disable(&adap->sge.qs[i].napi);
545}
4d22de3e 546
bea3348e
SH
547static void enable_all_napi(struct adapter *adap)
548{
549 int i;
550 for (i = 0; i < SGE_QSETS; i++)
551 if (adap->sge.qs[i].adap)
552 napi_enable(&adap->sge.qs[i].napi);
4d22de3e
DLR
553}
554
04ecb072
DLR
555/**
556 * set_qset_lro - Turn a queue set's LRO capability on and off
557 * @dev: the device the qset is attached to
558 * @qset_idx: the queue set index
559 * @val: the LRO switch
560 *
561 * Sets LRO on or off for a particular queue set.
562 * the device's features flag is updated to reflect the LRO
563 * capability when all queues belonging to the device are
564 * in the same state.
565 */
566static void set_qset_lro(struct net_device *dev, int qset_idx, int val)
567{
568 struct port_info *pi = netdev_priv(dev);
569 struct adapter *adapter = pi->adapter;
04ecb072
DLR
570
571 adapter->params.sge.qset[qset_idx].lro = !!val;
572 adapter->sge.qs[qset_idx].lro_enabled = !!val;
04ecb072
DLR
573}
574
4d22de3e
DLR
575/**
576 * setup_sge_qsets - configure SGE Tx/Rx/response queues
577 * @adap: the adapter
578 *
579 * Determines how many sets of SGE queues to use and initializes them.
580 * We support multiple queue sets per port if we have MSI-X, otherwise
581 * just one queue set per port.
582 */
583static int setup_sge_qsets(struct adapter *adap)
584{
bea3348e 585 int i, j, err, irq_idx = 0, qset_idx = 0;
8ac3ba68 586 unsigned int ntxq = SGE_TXQ_PER_SET;
4d22de3e
DLR
587
588 if (adap->params.rev > 0 && !(adap->flags & USING_MSI))
589 irq_idx = -1;
590
591 for_each_port(adap, i) {
592 struct net_device *dev = adap->port[i];
bea3348e 593 struct port_info *pi = netdev_priv(dev);
4d22de3e 594
bea3348e 595 pi->qs = &adap->sge.qs[pi->first_qset];
8c263761
DLR
596 for (j = pi->first_qset; j < pi->first_qset + pi->nqsets;
597 ++j, ++qset_idx) {
47fd23fe 598 set_qset_lro(dev, qset_idx, pi->rx_offload & T3_LRO);
4d22de3e
DLR
599 err = t3_sge_alloc_qset(adap, qset_idx, 1,
600 (adap->flags & USING_MSIX) ? qset_idx + 1 :
601 irq_idx,
82ad3329
DLR
602 &adap->params.sge.qset[qset_idx], ntxq, dev,
603 netdev_get_tx_queue(dev, j));
4d22de3e
DLR
604 if (err) {
605 t3_free_sge_resources(adap);
606 return err;
607 }
608 }
609 }
610
611 return 0;
612}
613
3e5192ee 614static ssize_t attr_show(struct device *d, char *buf,
896392ef 615 ssize_t(*format) (struct net_device *, char *))
4d22de3e
DLR
616{
617 ssize_t len;
4d22de3e
DLR
618
619 /* Synchronize with ioctls that may shut down the device */
620 rtnl_lock();
896392ef 621 len = (*format) (to_net_dev(d), buf);
4d22de3e
DLR
622 rtnl_unlock();
623 return len;
624}
625
3e5192ee 626static ssize_t attr_store(struct device *d,
0ee8d33c 627 const char *buf, size_t len,
896392ef 628 ssize_t(*set) (struct net_device *, unsigned int),
4d22de3e
DLR
629 unsigned int min_val, unsigned int max_val)
630{
631 char *endp;
632 ssize_t ret;
633 unsigned int val;
4d22de3e
DLR
634
635 if (!capable(CAP_NET_ADMIN))
636 return -EPERM;
637
638 val = simple_strtoul(buf, &endp, 0);
639 if (endp == buf || val < min_val || val > max_val)
640 return -EINVAL;
641
642 rtnl_lock();
896392ef 643 ret = (*set) (to_net_dev(d), val);
4d22de3e
DLR
644 if (!ret)
645 ret = len;
646 rtnl_unlock();
647 return ret;
648}
649
650#define CXGB3_SHOW(name, val_expr) \
896392ef 651static ssize_t format_##name(struct net_device *dev, char *buf) \
4d22de3e 652{ \
5fbf816f
DLR
653 struct port_info *pi = netdev_priv(dev); \
654 struct adapter *adap = pi->adapter; \
4d22de3e
DLR
655 return sprintf(buf, "%u\n", val_expr); \
656} \
0ee8d33c
DLR
657static ssize_t show_##name(struct device *d, struct device_attribute *attr, \
658 char *buf) \
4d22de3e 659{ \
3e5192ee 660 return attr_show(d, buf, format_##name); \
4d22de3e
DLR
661}
662
896392ef 663static ssize_t set_nfilters(struct net_device *dev, unsigned int val)
4d22de3e 664{
5fbf816f
DLR
665 struct port_info *pi = netdev_priv(dev);
666 struct adapter *adap = pi->adapter;
9f238486 667 int min_tids = is_offload(adap) ? MC5_MIN_TIDS : 0;
896392ef 668
4d22de3e
DLR
669 if (adap->flags & FULL_INIT_DONE)
670 return -EBUSY;
671 if (val && adap->params.rev == 0)
672 return -EINVAL;
9f238486
DLR
673 if (val > t3_mc5_size(&adap->mc5) - adap->params.mc5.nservers -
674 min_tids)
4d22de3e
DLR
675 return -EINVAL;
676 adap->params.mc5.nfilters = val;
677 return 0;
678}
679
0ee8d33c
DLR
680static ssize_t store_nfilters(struct device *d, struct device_attribute *attr,
681 const char *buf, size_t len)
4d22de3e 682{
3e5192ee 683 return attr_store(d, buf, len, set_nfilters, 0, ~0);
4d22de3e
DLR
684}
685
896392ef 686static ssize_t set_nservers(struct net_device *dev, unsigned int val)
4d22de3e 687{
5fbf816f
DLR
688 struct port_info *pi = netdev_priv(dev);
689 struct adapter *adap = pi->adapter;
896392ef 690
4d22de3e
DLR
691 if (adap->flags & FULL_INIT_DONE)
692 return -EBUSY;
9f238486
DLR
693 if (val > t3_mc5_size(&adap->mc5) - adap->params.mc5.nfilters -
694 MC5_MIN_TIDS)
4d22de3e
DLR
695 return -EINVAL;
696 adap->params.mc5.nservers = val;
697 return 0;
698}
699
0ee8d33c
DLR
700static ssize_t store_nservers(struct device *d, struct device_attribute *attr,
701 const char *buf, size_t len)
4d22de3e 702{
3e5192ee 703 return attr_store(d, buf, len, set_nservers, 0, ~0);
4d22de3e
DLR
704}
705
706#define CXGB3_ATTR_R(name, val_expr) \
707CXGB3_SHOW(name, val_expr) \
0ee8d33c 708static DEVICE_ATTR(name, S_IRUGO, show_##name, NULL)
4d22de3e
DLR
709
710#define CXGB3_ATTR_RW(name, val_expr, store_method) \
711CXGB3_SHOW(name, val_expr) \
0ee8d33c 712static DEVICE_ATTR(name, S_IRUGO | S_IWUSR, show_##name, store_method)
4d22de3e
DLR
713
714CXGB3_ATTR_R(cam_size, t3_mc5_size(&adap->mc5));
715CXGB3_ATTR_RW(nfilters, adap->params.mc5.nfilters, store_nfilters);
716CXGB3_ATTR_RW(nservers, adap->params.mc5.nservers, store_nservers);
717
718static struct attribute *cxgb3_attrs[] = {
0ee8d33c
DLR
719 &dev_attr_cam_size.attr,
720 &dev_attr_nfilters.attr,
721 &dev_attr_nservers.attr,
4d22de3e
DLR
722 NULL
723};
724
725static struct attribute_group cxgb3_attr_group = {.attrs = cxgb3_attrs };
726
3e5192ee 727static ssize_t tm_attr_show(struct device *d,
0ee8d33c 728 char *buf, int sched)
4d22de3e 729{
5fbf816f
DLR
730 struct port_info *pi = netdev_priv(to_net_dev(d));
731 struct adapter *adap = pi->adapter;
4d22de3e 732 unsigned int v, addr, bpt, cpt;
5fbf816f 733 ssize_t len;
4d22de3e
DLR
734
735 addr = A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2;
736 rtnl_lock();
737 t3_write_reg(adap, A_TP_TM_PIO_ADDR, addr);
738 v = t3_read_reg(adap, A_TP_TM_PIO_DATA);
739 if (sched & 1)
740 v >>= 16;
741 bpt = (v >> 8) & 0xff;
742 cpt = v & 0xff;
743 if (!cpt)
744 len = sprintf(buf, "disabled\n");
745 else {
746 v = (adap->params.vpd.cclk * 1000) / cpt;
747 len = sprintf(buf, "%u Kbps\n", (v * bpt) / 125);
748 }
749 rtnl_unlock();
750 return len;
751}
752
3e5192ee 753static ssize_t tm_attr_store(struct device *d,
0ee8d33c 754 const char *buf, size_t len, int sched)
4d22de3e 755{
5fbf816f
DLR
756 struct port_info *pi = netdev_priv(to_net_dev(d));
757 struct adapter *adap = pi->adapter;
758 unsigned int val;
4d22de3e
DLR
759 char *endp;
760 ssize_t ret;
4d22de3e
DLR
761
762 if (!capable(CAP_NET_ADMIN))
763 return -EPERM;
764
765 val = simple_strtoul(buf, &endp, 0);
766 if (endp == buf || val > 10000000)
767 return -EINVAL;
768
769 rtnl_lock();
770 ret = t3_config_sched(adap, val, sched);
771 if (!ret)
772 ret = len;
773 rtnl_unlock();
774 return ret;
775}
776
777#define TM_ATTR(name, sched) \
0ee8d33c
DLR
778static ssize_t show_##name(struct device *d, struct device_attribute *attr, \
779 char *buf) \
4d22de3e 780{ \
3e5192ee 781 return tm_attr_show(d, buf, sched); \
4d22de3e 782} \
0ee8d33c
DLR
783static ssize_t store_##name(struct device *d, struct device_attribute *attr, \
784 const char *buf, size_t len) \
4d22de3e 785{ \
3e5192ee 786 return tm_attr_store(d, buf, len, sched); \
4d22de3e 787} \
0ee8d33c 788static DEVICE_ATTR(name, S_IRUGO | S_IWUSR, show_##name, store_##name)
4d22de3e
DLR
789
790TM_ATTR(sched0, 0);
791TM_ATTR(sched1, 1);
792TM_ATTR(sched2, 2);
793TM_ATTR(sched3, 3);
794TM_ATTR(sched4, 4);
795TM_ATTR(sched5, 5);
796TM_ATTR(sched6, 6);
797TM_ATTR(sched7, 7);
798
799static struct attribute *offload_attrs[] = {
0ee8d33c
DLR
800 &dev_attr_sched0.attr,
801 &dev_attr_sched1.attr,
802 &dev_attr_sched2.attr,
803 &dev_attr_sched3.attr,
804 &dev_attr_sched4.attr,
805 &dev_attr_sched5.attr,
806 &dev_attr_sched6.attr,
807 &dev_attr_sched7.attr,
4d22de3e
DLR
808 NULL
809};
810
811static struct attribute_group offload_attr_group = {.attrs = offload_attrs };
812
813/*
814 * Sends an sk_buff to an offload queue driver
815 * after dealing with any active network taps.
816 */
817static inline int offload_tx(struct t3cdev *tdev, struct sk_buff *skb)
818{
819 int ret;
820
821 local_bh_disable();
822 ret = t3_offload_tx(tdev, skb);
823 local_bh_enable();
824 return ret;
825}
826
827static int write_smt_entry(struct adapter *adapter, int idx)
828{
829 struct cpl_smt_write_req *req;
830 struct sk_buff *skb = alloc_skb(sizeof(*req), GFP_KERNEL);
831
832 if (!skb)
833 return -ENOMEM;
834
835 req = (struct cpl_smt_write_req *)__skb_put(skb, sizeof(*req));
836 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
837 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SMT_WRITE_REQ, idx));
838 req->mtu_idx = NMTUS - 1; /* should be 0 but there's a T3 bug */
839 req->iff = idx;
840 memset(req->src_mac1, 0, sizeof(req->src_mac1));
841 memcpy(req->src_mac0, adapter->port[idx]->dev_addr, ETH_ALEN);
842 skb->priority = 1;
843 offload_tx(&adapter->tdev, skb);
844 return 0;
845}
846
847static int init_smt(struct adapter *adapter)
848{
849 int i;
850
851 for_each_port(adapter, i)
852 write_smt_entry(adapter, i);
853 return 0;
854}
855
856static void init_port_mtus(struct adapter *adapter)
857{
858 unsigned int mtus = adapter->port[0]->mtu;
859
860 if (adapter->port[1])
861 mtus |= adapter->port[1]->mtu << 16;
862 t3_write_reg(adapter, A_TP_MTU_PORT_TABLE, mtus);
863}
864
8c263761 865static int send_pktsched_cmd(struct adapter *adap, int sched, int qidx, int lo,
14ab9892
DLR
866 int hi, int port)
867{
868 struct sk_buff *skb;
869 struct mngt_pktsched_wr *req;
8c263761 870 int ret;
14ab9892
DLR
871
872 skb = alloc_skb(sizeof(*req), GFP_KERNEL | __GFP_NOFAIL);
873 req = (struct mngt_pktsched_wr *)skb_put(skb, sizeof(*req));
874 req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_MNGT));
875 req->mngt_opcode = FW_MNGTOPCODE_PKTSCHED_SET;
876 req->sched = sched;
877 req->idx = qidx;
878 req->min = lo;
879 req->max = hi;
880 req->binding = port;
8c263761
DLR
881 ret = t3_mgmt_tx(adap, skb);
882
883 return ret;
14ab9892
DLR
884}
885
8c263761 886static int bind_qsets(struct adapter *adap)
14ab9892 887{
8c263761 888 int i, j, err = 0;
14ab9892
DLR
889
890 for_each_port(adap, i) {
891 const struct port_info *pi = adap2pinfo(adap, i);
892
8c263761
DLR
893 for (j = 0; j < pi->nqsets; ++j) {
894 int ret = send_pktsched_cmd(adap, 1,
895 pi->first_qset + j, -1,
896 -1, i);
897 if (ret)
898 err = ret;
899 }
14ab9892 900 }
8c263761
DLR
901
902 return err;
14ab9892
DLR
903}
904
851fd7bd
DLR
905#define FW_FNAME "cxgb3/t3fw-%d.%d.%d.bin"
906#define TPSRAM_NAME "cxgb3/t3%c_psram-%d.%d.%d.bin"
2e283962
DLR
907
908static int upgrade_fw(struct adapter *adap)
909{
910 int ret;
911 char buf[64];
912 const struct firmware *fw;
913 struct device *dev = &adap->pdev->dev;
914
915 snprintf(buf, sizeof(buf), FW_FNAME, FW_VERSION_MAJOR,
7f672cf5 916 FW_VERSION_MINOR, FW_VERSION_MICRO);
2e283962
DLR
917 ret = request_firmware(&fw, buf, dev);
918 if (ret < 0) {
919 dev_err(dev, "could not upgrade firmware: unable to load %s\n",
920 buf);
921 return ret;
922 }
923 ret = t3_load_fw(adap, fw->data, fw->size);
924 release_firmware(fw);
47330077
DLR
925
926 if (ret == 0)
927 dev_info(dev, "successful upgrade to firmware %d.%d.%d\n",
928 FW_VERSION_MAJOR, FW_VERSION_MINOR, FW_VERSION_MICRO);
929 else
930 dev_err(dev, "failed to upgrade to firmware %d.%d.%d\n",
931 FW_VERSION_MAJOR, FW_VERSION_MINOR, FW_VERSION_MICRO);
2eab17ab 932
47330077
DLR
933 return ret;
934}
935
936static inline char t3rev2char(struct adapter *adapter)
937{
938 char rev = 0;
939
940 switch(adapter->params.rev) {
941 case T3_REV_B:
942 case T3_REV_B2:
943 rev = 'b';
944 break;
1aafee26
DLR
945 case T3_REV_C:
946 rev = 'c';
947 break;
47330077
DLR
948 }
949 return rev;
950}
951
9265fabf 952static int update_tpsram(struct adapter *adap)
47330077
DLR
953{
954 const struct firmware *tpsram;
955 char buf[64];
956 struct device *dev = &adap->pdev->dev;
957 int ret;
958 char rev;
2eab17ab 959
47330077
DLR
960 rev = t3rev2char(adap);
961 if (!rev)
962 return 0;
963
964 snprintf(buf, sizeof(buf), TPSRAM_NAME, rev,
965 TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO);
966
967 ret = request_firmware(&tpsram, buf, dev);
968 if (ret < 0) {
969 dev_err(dev, "could not load TP SRAM: unable to load %s\n",
970 buf);
971 return ret;
972 }
2eab17ab 973
47330077
DLR
974 ret = t3_check_tpsram(adap, tpsram->data, tpsram->size);
975 if (ret)
2eab17ab 976 goto release_tpsram;
47330077
DLR
977
978 ret = t3_set_proto_sram(adap, tpsram->data);
979 if (ret == 0)
980 dev_info(dev,
981 "successful update of protocol engine "
982 "to %d.%d.%d\n",
983 TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO);
984 else
985 dev_err(dev, "failed to update of protocol engine %d.%d.%d\n",
986 TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO);
987 if (ret)
988 dev_err(dev, "loading protocol SRAM failed\n");
989
990release_tpsram:
991 release_firmware(tpsram);
2eab17ab 992
2e283962
DLR
993 return ret;
994}
995
4d22de3e
DLR
996/**
997 * cxgb_up - enable the adapter
998 * @adapter: adapter being enabled
999 *
1000 * Called when the first port is enabled, this function performs the
1001 * actions necessary to make an adapter operational, such as completing
1002 * the initialization of HW modules, and enabling interrupts.
1003 *
1004 * Must be called with the rtnl lock held.
1005 */
1006static int cxgb_up(struct adapter *adap)
1007{
c54f5c24 1008 int err;
4d22de3e
DLR
1009
1010 if (!(adap->flags & FULL_INIT_DONE)) {
8207befa 1011 err = t3_check_fw_version(adap);
a5a3b460 1012 if (err == -EINVAL) {
2e283962 1013 err = upgrade_fw(adap);
8207befa
DLR
1014 CH_WARN(adap, "FW upgrade to %d.%d.%d %s\n",
1015 FW_VERSION_MAJOR, FW_VERSION_MINOR,
1016 FW_VERSION_MICRO, err ? "failed" : "succeeded");
a5a3b460 1017 }
4d22de3e 1018
8207befa 1019 err = t3_check_tpsram_version(adap);
47330077
DLR
1020 if (err == -EINVAL) {
1021 err = update_tpsram(adap);
8207befa
DLR
1022 CH_WARN(adap, "TP upgrade to %d.%d.%d %s\n",
1023 TP_VERSION_MAJOR, TP_VERSION_MINOR,
1024 TP_VERSION_MICRO, err ? "failed" : "succeeded");
47330077
DLR
1025 }
1026
20d3fc11
DLR
1027 /*
1028 * Clear interrupts now to catch errors if t3_init_hw fails.
1029 * We clear them again later as initialization may trigger
1030 * conditions that can interrupt.
1031 */
1032 t3_intr_clear(adap);
1033
4d22de3e
DLR
1034 err = t3_init_hw(adap, 0);
1035 if (err)
1036 goto out;
1037
b881955b 1038 t3_set_reg_field(adap, A_TP_PARA_REG5, 0, F_RXDDPOFFINIT);
6cdbd77e 1039 t3_write_reg(adap, A_ULPRX_TDDP_PSZ, V_HPZ0(PAGE_SHIFT - 12));
bea3348e 1040
4d22de3e
DLR
1041 err = setup_sge_qsets(adap);
1042 if (err)
1043 goto out;
1044
1045 setup_rss(adap);
48c4b6db
DLR
1046 if (!(adap->flags & NAPI_INIT))
1047 init_napi(adap);
31563789
DLR
1048
1049 t3_start_sge_timers(adap);
4d22de3e
DLR
1050 adap->flags |= FULL_INIT_DONE;
1051 }
1052
1053 t3_intr_clear(adap);
1054
1055 if (adap->flags & USING_MSIX) {
1056 name_msix_vecs(adap);
1057 err = request_irq(adap->msix_info[0].vec,
1058 t3_async_intr_handler, 0,
1059 adap->msix_info[0].desc, adap);
1060 if (err)
1061 goto irq_err;
1062
42256f57
DLR
1063 err = request_msix_data_irqs(adap);
1064 if (err) {
4d22de3e
DLR
1065 free_irq(adap->msix_info[0].vec, adap);
1066 goto irq_err;
1067 }
1068 } else if ((err = request_irq(adap->pdev->irq,
1069 t3_intr_handler(adap,
1070 adap->sge.qs[0].rspq.
1071 polling),
2db6346f
TG
1072 (adap->flags & USING_MSI) ?
1073 0 : IRQF_SHARED,
4d22de3e
DLR
1074 adap->name, adap)))
1075 goto irq_err;
1076
bea3348e 1077 enable_all_napi(adap);
4d22de3e
DLR
1078 t3_sge_start(adap);
1079 t3_intr_enable(adap);
14ab9892 1080
b881955b
DLR
1081 if (adap->params.rev >= T3_REV_C && !(adap->flags & TP_PARITY_INIT) &&
1082 is_offload(adap) && init_tp_parity(adap) == 0)
1083 adap->flags |= TP_PARITY_INIT;
1084
1085 if (adap->flags & TP_PARITY_INIT) {
1086 t3_write_reg(adap, A_TP_INT_CAUSE,
1087 F_CMCACHEPERR | F_ARPLUTPERR);
1088 t3_write_reg(adap, A_TP_INT_ENABLE, 0x7fbfffff);
1089 }
1090
8c263761
DLR
1091 if (!(adap->flags & QUEUES_BOUND)) {
1092 err = bind_qsets(adap);
1093 if (err) {
1094 CH_ERR(adap, "failed to bind qsets, err %d\n", err);
1095 t3_intr_disable(adap);
1096 free_irq_resources(adap);
1097 goto out;
1098 }
1099 adap->flags |= QUEUES_BOUND;
1100 }
14ab9892 1101
4d22de3e
DLR
1102out:
1103 return err;
1104irq_err:
1105 CH_ERR(adap, "request_irq failed, err %d\n", err);
1106 goto out;
1107}
1108
1109/*
1110 * Release resources when all the ports and offloading have been stopped.
1111 */
1112static void cxgb_down(struct adapter *adapter)
1113{
1114 t3_sge_stop(adapter);
1115 spin_lock_irq(&adapter->work_lock); /* sync with PHY intr task */
1116 t3_intr_disable(adapter);
1117 spin_unlock_irq(&adapter->work_lock);
1118
8c263761 1119 free_irq_resources(adapter);
4d22de3e 1120 quiesce_rx(adapter);
c80b0c28 1121 flush_workqueue(cxgb3_wq); /* wait for external IRQ handler */
4d22de3e
DLR
1122}
1123
1124static void schedule_chk_task(struct adapter *adap)
1125{
1126 unsigned int timeo;
1127
1128 timeo = adap->params.linkpoll_period ?
1129 (HZ * adap->params.linkpoll_period) / 10 :
1130 adap->params.stats_update_period * HZ;
1131 if (timeo)
1132 queue_delayed_work(cxgb3_wq, &adap->adap_check_task, timeo);
1133}
1134
1135static int offload_open(struct net_device *dev)
1136{
5fbf816f
DLR
1137 struct port_info *pi = netdev_priv(dev);
1138 struct adapter *adapter = pi->adapter;
1139 struct t3cdev *tdev = dev2t3cdev(dev);
4d22de3e 1140 int adap_up = adapter->open_device_map & PORT_MASK;
c54f5c24 1141 int err;
4d22de3e
DLR
1142
1143 if (test_and_set_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map))
1144 return 0;
1145
1146 if (!adap_up && (err = cxgb_up(adapter)) < 0)
48c4b6db 1147 goto out;
4d22de3e
DLR
1148
1149 t3_tp_set_offload_mode(adapter, 1);
1150 tdev->lldev = adapter->port[0];
1151 err = cxgb3_offload_activate(adapter);
1152 if (err)
1153 goto out;
1154
1155 init_port_mtus(adapter);
1156 t3_load_mtus(adapter, adapter->params.mtus, adapter->params.a_wnd,
1157 adapter->params.b_wnd,
1158 adapter->params.rev == 0 ?
1159 adapter->port[0]->mtu : 0xffff);
1160 init_smt(adapter);
1161
d96a51f6
DN
1162 if (sysfs_create_group(&tdev->lldev->dev.kobj, &offload_attr_group))
1163 dev_dbg(&dev->dev, "cannot create sysfs group\n");
4d22de3e
DLR
1164
1165 /* Call back all registered clients */
1166 cxgb3_add_clients(tdev);
1167
1168out:
1169 /* restore them in case the offload module has changed them */
1170 if (err) {
1171 t3_tp_set_offload_mode(adapter, 0);
1172 clear_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map);
1173 cxgb3_set_dummy_ops(tdev);
1174 }
1175 return err;
1176}
1177
1178static int offload_close(struct t3cdev *tdev)
1179{
1180 struct adapter *adapter = tdev2adap(tdev);
1181
1182 if (!test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map))
1183 return 0;
1184
1185 /* Call back all registered clients */
1186 cxgb3_remove_clients(tdev);
1187
0ee8d33c 1188 sysfs_remove_group(&tdev->lldev->dev.kobj, &offload_attr_group);
4d22de3e 1189
c80b0c28
DLR
1190 /* Flush work scheduled while releasing TIDs */
1191 flush_scheduled_work();
1192
4d22de3e
DLR
1193 tdev->lldev = NULL;
1194 cxgb3_set_dummy_ops(tdev);
1195 t3_tp_set_offload_mode(adapter, 0);
1196 clear_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map);
1197
1198 if (!adapter->open_device_map)
1199 cxgb_down(adapter);
1200
1201 cxgb3_offload_deactivate(adapter);
1202 return 0;
1203}
1204
1205static int cxgb_open(struct net_device *dev)
1206{
4d22de3e 1207 struct port_info *pi = netdev_priv(dev);
5fbf816f 1208 struct adapter *adapter = pi->adapter;
4d22de3e 1209 int other_ports = adapter->open_device_map & PORT_MASK;
5fbf816f 1210 int err;
4d22de3e 1211
48c4b6db 1212 if (!adapter->open_device_map && (err = cxgb_up(adapter)) < 0)
4d22de3e
DLR
1213 return err;
1214
1215 set_bit(pi->port_id, &adapter->open_device_map);
8ac3ba68 1216 if (is_offload(adapter) && !ofld_disable) {
4d22de3e
DLR
1217 err = offload_open(dev);
1218 if (err)
1219 printk(KERN_WARNING
1220 "Could not initialize offload capabilities\n");
1221 }
1222
82ad3329 1223 dev->real_num_tx_queues = pi->nqsets;
4d22de3e
DLR
1224 link_start(dev);
1225 t3_port_intr_enable(adapter, pi->port_id);
82ad3329 1226 netif_tx_start_all_queues(dev);
4d22de3e
DLR
1227 if (!other_ports)
1228 schedule_chk_task(adapter);
1229
1230 return 0;
1231}
1232
1233static int cxgb_close(struct net_device *dev)
1234{
5fbf816f
DLR
1235 struct port_info *pi = netdev_priv(dev);
1236 struct adapter *adapter = pi->adapter;
4d22de3e 1237
bf792094
DLR
1238 /* Stop link fault interrupts */
1239 t3_xgm_intr_disable(adapter, pi->port_id);
1240 t3_read_reg(adapter, A_XGM_INT_STATUS + pi->mac.offset);
1241
5fbf816f 1242 t3_port_intr_disable(adapter, pi->port_id);
82ad3329 1243 netif_tx_stop_all_queues(dev);
5fbf816f 1244 pi->phy.ops->power_down(&pi->phy, 1);
4d22de3e 1245 netif_carrier_off(dev);
5fbf816f 1246 t3_mac_disable(&pi->mac, MAC_DIRECTION_TX | MAC_DIRECTION_RX);
4d22de3e 1247
20d3fc11 1248 spin_lock_irq(&adapter->work_lock); /* sync with update task */
5fbf816f 1249 clear_bit(pi->port_id, &adapter->open_device_map);
20d3fc11 1250 spin_unlock_irq(&adapter->work_lock);
4d22de3e
DLR
1251
1252 if (!(adapter->open_device_map & PORT_MASK))
c80b0c28 1253 cancel_delayed_work_sync(&adapter->adap_check_task);
4d22de3e
DLR
1254
1255 if (!adapter->open_device_map)
1256 cxgb_down(adapter);
1257
1258 return 0;
1259}
1260
1261static struct net_device_stats *cxgb_get_stats(struct net_device *dev)
1262{
5fbf816f
DLR
1263 struct port_info *pi = netdev_priv(dev);
1264 struct adapter *adapter = pi->adapter;
1265 struct net_device_stats *ns = &pi->netstats;
4d22de3e
DLR
1266 const struct mac_stats *pstats;
1267
1268 spin_lock(&adapter->stats_lock);
5fbf816f 1269 pstats = t3_mac_update_stats(&pi->mac);
4d22de3e
DLR
1270 spin_unlock(&adapter->stats_lock);
1271
1272 ns->tx_bytes = pstats->tx_octets;
1273 ns->tx_packets = pstats->tx_frames;
1274 ns->rx_bytes = pstats->rx_octets;
1275 ns->rx_packets = pstats->rx_frames;
1276 ns->multicast = pstats->rx_mcast_frames;
1277
1278 ns->tx_errors = pstats->tx_underrun;
1279 ns->rx_errors = pstats->rx_symbol_errs + pstats->rx_fcs_errs +
1280 pstats->rx_too_long + pstats->rx_jabber + pstats->rx_short +
1281 pstats->rx_fifo_ovfl;
1282
1283 /* detailed rx_errors */
1284 ns->rx_length_errors = pstats->rx_jabber + pstats->rx_too_long;
1285 ns->rx_over_errors = 0;
1286 ns->rx_crc_errors = pstats->rx_fcs_errs;
1287 ns->rx_frame_errors = pstats->rx_symbol_errs;
1288 ns->rx_fifo_errors = pstats->rx_fifo_ovfl;
1289 ns->rx_missed_errors = pstats->rx_cong_drops;
1290
1291 /* detailed tx_errors */
1292 ns->tx_aborted_errors = 0;
1293 ns->tx_carrier_errors = 0;
1294 ns->tx_fifo_errors = pstats->tx_underrun;
1295 ns->tx_heartbeat_errors = 0;
1296 ns->tx_window_errors = 0;
1297 return ns;
1298}
1299
1300static u32 get_msglevel(struct net_device *dev)
1301{
5fbf816f
DLR
1302 struct port_info *pi = netdev_priv(dev);
1303 struct adapter *adapter = pi->adapter;
4d22de3e
DLR
1304
1305 return adapter->msg_enable;
1306}
1307
1308static void set_msglevel(struct net_device *dev, u32 val)
1309{
5fbf816f
DLR
1310 struct port_info *pi = netdev_priv(dev);
1311 struct adapter *adapter = pi->adapter;
4d22de3e
DLR
1312
1313 adapter->msg_enable = val;
1314}
1315
1316static char stats_strings[][ETH_GSTRING_LEN] = {
1317 "TxOctetsOK ",
1318 "TxFramesOK ",
1319 "TxMulticastFramesOK",
1320 "TxBroadcastFramesOK",
1321 "TxPauseFrames ",
1322 "TxUnderrun ",
1323 "TxExtUnderrun ",
1324
1325 "TxFrames64 ",
1326 "TxFrames65To127 ",
1327 "TxFrames128To255 ",
1328 "TxFrames256To511 ",
1329 "TxFrames512To1023 ",
1330 "TxFrames1024To1518 ",
1331 "TxFrames1519ToMax ",
1332
1333 "RxOctetsOK ",
1334 "RxFramesOK ",
1335 "RxMulticastFramesOK",
1336 "RxBroadcastFramesOK",
1337 "RxPauseFrames ",
1338 "RxFCSErrors ",
1339 "RxSymbolErrors ",
1340 "RxShortErrors ",
1341 "RxJabberErrors ",
1342 "RxLengthErrors ",
1343 "RxFIFOoverflow ",
1344
1345 "RxFrames64 ",
1346 "RxFrames65To127 ",
1347 "RxFrames128To255 ",
1348 "RxFrames256To511 ",
1349 "RxFrames512To1023 ",
1350 "RxFrames1024To1518 ",
1351 "RxFrames1519ToMax ",
1352
1353 "PhyFIFOErrors ",
1354 "TSO ",
1355 "VLANextractions ",
1356 "VLANinsertions ",
1357 "TxCsumOffload ",
1358 "RxCsumGood ",
b47385bd
DLR
1359 "LroAggregated ",
1360 "LroFlushed ",
1361 "LroNoDesc ",
fc90664e
DLR
1362 "RxDrops ",
1363
1364 "CheckTXEnToggled ",
1365 "CheckResets ",
1366
bf792094 1367 "LinkFaults ",
4d22de3e
DLR
1368};
1369
b9f2c044 1370static int get_sset_count(struct net_device *dev, int sset)
4d22de3e 1371{
b9f2c044
JG
1372 switch (sset) {
1373 case ETH_SS_STATS:
1374 return ARRAY_SIZE(stats_strings);
1375 default:
1376 return -EOPNOTSUPP;
1377 }
4d22de3e
DLR
1378}
1379
1380#define T3_REGMAP_SIZE (3 * 1024)
1381
1382static int get_regs_len(struct net_device *dev)
1383{
1384 return T3_REGMAP_SIZE;
1385}
1386
1387static int get_eeprom_len(struct net_device *dev)
1388{
1389 return EEPROMSIZE;
1390}
1391
1392static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1393{
5fbf816f
DLR
1394 struct port_info *pi = netdev_priv(dev);
1395 struct adapter *adapter = pi->adapter;
4d22de3e 1396 u32 fw_vers = 0;
47330077 1397 u32 tp_vers = 0;
4d22de3e 1398
cf3760da 1399 spin_lock(&adapter->stats_lock);
4d22de3e 1400 t3_get_fw_version(adapter, &fw_vers);
47330077 1401 t3_get_tp_version(adapter, &tp_vers);
cf3760da 1402 spin_unlock(&adapter->stats_lock);
4d22de3e
DLR
1403
1404 strcpy(info->driver, DRV_NAME);
1405 strcpy(info->version, DRV_VERSION);
1406 strcpy(info->bus_info, pci_name(adapter->pdev));
1407 if (!fw_vers)
1408 strcpy(info->fw_version, "N/A");
4aac3899 1409 else {
4d22de3e 1410 snprintf(info->fw_version, sizeof(info->fw_version),
47330077 1411 "%s %u.%u.%u TP %u.%u.%u",
4aac3899
DLR
1412 G_FW_VERSION_TYPE(fw_vers) ? "T" : "N",
1413 G_FW_VERSION_MAJOR(fw_vers),
1414 G_FW_VERSION_MINOR(fw_vers),
47330077
DLR
1415 G_FW_VERSION_MICRO(fw_vers),
1416 G_TP_VERSION_MAJOR(tp_vers),
1417 G_TP_VERSION_MINOR(tp_vers),
1418 G_TP_VERSION_MICRO(tp_vers));
4aac3899 1419 }
4d22de3e
DLR
1420}
1421
1422static void get_strings(struct net_device *dev, u32 stringset, u8 * data)
1423{
1424 if (stringset == ETH_SS_STATS)
1425 memcpy(data, stats_strings, sizeof(stats_strings));
1426}
1427
1428static unsigned long collect_sge_port_stats(struct adapter *adapter,
1429 struct port_info *p, int idx)
1430{
1431 int i;
1432 unsigned long tot = 0;
1433
8c263761
DLR
1434 for (i = p->first_qset; i < p->first_qset + p->nqsets; ++i)
1435 tot += adapter->sge.qs[i].port_stats[idx];
4d22de3e
DLR
1436 return tot;
1437}
1438
1439static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
1440 u64 *data)
1441{
4d22de3e 1442 struct port_info *pi = netdev_priv(dev);
5fbf816f 1443 struct adapter *adapter = pi->adapter;
4d22de3e
DLR
1444 const struct mac_stats *s;
1445
1446 spin_lock(&adapter->stats_lock);
1447 s = t3_mac_update_stats(&pi->mac);
1448 spin_unlock(&adapter->stats_lock);
1449
1450 *data++ = s->tx_octets;
1451 *data++ = s->tx_frames;
1452 *data++ = s->tx_mcast_frames;
1453 *data++ = s->tx_bcast_frames;
1454 *data++ = s->tx_pause;
1455 *data++ = s->tx_underrun;
1456 *data++ = s->tx_fifo_urun;
1457
1458 *data++ = s->tx_frames_64;
1459 *data++ = s->tx_frames_65_127;
1460 *data++ = s->tx_frames_128_255;
1461 *data++ = s->tx_frames_256_511;
1462 *data++ = s->tx_frames_512_1023;
1463 *data++ = s->tx_frames_1024_1518;
1464 *data++ = s->tx_frames_1519_max;
1465
1466 *data++ = s->rx_octets;
1467 *data++ = s->rx_frames;
1468 *data++ = s->rx_mcast_frames;
1469 *data++ = s->rx_bcast_frames;
1470 *data++ = s->rx_pause;
1471 *data++ = s->rx_fcs_errs;
1472 *data++ = s->rx_symbol_errs;
1473 *data++ = s->rx_short;
1474 *data++ = s->rx_jabber;
1475 *data++ = s->rx_too_long;
1476 *data++ = s->rx_fifo_ovfl;
1477
1478 *data++ = s->rx_frames_64;
1479 *data++ = s->rx_frames_65_127;
1480 *data++ = s->rx_frames_128_255;
1481 *data++ = s->rx_frames_256_511;
1482 *data++ = s->rx_frames_512_1023;
1483 *data++ = s->rx_frames_1024_1518;
1484 *data++ = s->rx_frames_1519_max;
1485
1486 *data++ = pi->phy.fifo_errors;
1487
1488 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_TSO);
1489 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_VLANEX);
1490 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_VLANINS);
1491 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_TX_CSUM);
1492 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_RX_CSUM_GOOD);
7be2df45
HX
1493 *data++ = 0;
1494 *data++ = 0;
1495 *data++ = 0;
4d22de3e 1496 *data++ = s->rx_cong_drops;
fc90664e
DLR
1497
1498 *data++ = s->num_toggled;
1499 *data++ = s->num_resets;
bf792094
DLR
1500
1501 *data++ = s->link_faults;
4d22de3e
DLR
1502}
1503
1504static inline void reg_block_dump(struct adapter *ap, void *buf,
1505 unsigned int start, unsigned int end)
1506{
1507 u32 *p = buf + start;
1508
1509 for (; start <= end; start += sizeof(u32))
1510 *p++ = t3_read_reg(ap, start);
1511}
1512
1513static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
1514 void *buf)
1515{
5fbf816f
DLR
1516 struct port_info *pi = netdev_priv(dev);
1517 struct adapter *ap = pi->adapter;
4d22de3e
DLR
1518
1519 /*
1520 * Version scheme:
1521 * bits 0..9: chip version
1522 * bits 10..15: chip revision
1523 * bit 31: set for PCIe cards
1524 */
1525 regs->version = 3 | (ap->params.rev << 10) | (is_pcie(ap) << 31);
1526
1527 /*
1528 * We skip the MAC statistics registers because they are clear-on-read.
1529 * Also reading multi-register stats would need to synchronize with the
1530 * periodic mac stats accumulation. Hard to justify the complexity.
1531 */
1532 memset(buf, 0, T3_REGMAP_SIZE);
1533 reg_block_dump(ap, buf, 0, A_SG_RSPQ_CREDIT_RETURN);
1534 reg_block_dump(ap, buf, A_SG_HI_DRB_HI_THRSH, A_ULPRX_PBL_ULIMIT);
1535 reg_block_dump(ap, buf, A_ULPTX_CONFIG, A_MPS_INT_CAUSE);
1536 reg_block_dump(ap, buf, A_CPL_SWITCH_CNTRL, A_CPL_MAP_TBL_DATA);
1537 reg_block_dump(ap, buf, A_SMB_GLOBAL_TIME_CFG, A_XGM_SERDES_STAT3);
1538 reg_block_dump(ap, buf, A_XGM_SERDES_STATUS0,
1539 XGM_REG(A_XGM_SERDES_STAT3, 1));
1540 reg_block_dump(ap, buf, XGM_REG(A_XGM_SERDES_STATUS0, 1),
1541 XGM_REG(A_XGM_RX_SPI4_SOP_EOP_CNT, 1));
1542}
1543
1544static int restart_autoneg(struct net_device *dev)
1545{
1546 struct port_info *p = netdev_priv(dev);
1547
1548 if (!netif_running(dev))
1549 return -EAGAIN;
1550 if (p->link_config.autoneg != AUTONEG_ENABLE)
1551 return -EINVAL;
1552 p->phy.ops->autoneg_restart(&p->phy);
1553 return 0;
1554}
1555
1556static int cxgb3_phys_id(struct net_device *dev, u32 data)
1557{
5fbf816f
DLR
1558 struct port_info *pi = netdev_priv(dev);
1559 struct adapter *adapter = pi->adapter;
4d22de3e 1560 int i;
4d22de3e
DLR
1561
1562 if (data == 0)
1563 data = 2;
1564
1565 for (i = 0; i < data * 2; i++) {
1566 t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
1567 (i & 1) ? F_GPIO0_OUT_VAL : 0);
1568 if (msleep_interruptible(500))
1569 break;
1570 }
1571 t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
1572 F_GPIO0_OUT_VAL);
1573 return 0;
1574}
1575
1576static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1577{
1578 struct port_info *p = netdev_priv(dev);
1579
1580 cmd->supported = p->link_config.supported;
1581 cmd->advertising = p->link_config.advertising;
1582
1583 if (netif_carrier_ok(dev)) {
1584 cmd->speed = p->link_config.speed;
1585 cmd->duplex = p->link_config.duplex;
1586 } else {
1587 cmd->speed = -1;
1588 cmd->duplex = -1;
1589 }
1590
1591 cmd->port = (cmd->supported & SUPPORTED_TP) ? PORT_TP : PORT_FIBRE;
1592 cmd->phy_address = p->phy.addr;
1593 cmd->transceiver = XCVR_EXTERNAL;
1594 cmd->autoneg = p->link_config.autoneg;
1595 cmd->maxtxpkt = 0;
1596 cmd->maxrxpkt = 0;
1597 return 0;
1598}
1599
1600static int speed_duplex_to_caps(int speed, int duplex)
1601{
1602 int cap = 0;
1603
1604 switch (speed) {
1605 case SPEED_10:
1606 if (duplex == DUPLEX_FULL)
1607 cap = SUPPORTED_10baseT_Full;
1608 else
1609 cap = SUPPORTED_10baseT_Half;
1610 break;
1611 case SPEED_100:
1612 if (duplex == DUPLEX_FULL)
1613 cap = SUPPORTED_100baseT_Full;
1614 else
1615 cap = SUPPORTED_100baseT_Half;
1616 break;
1617 case SPEED_1000:
1618 if (duplex == DUPLEX_FULL)
1619 cap = SUPPORTED_1000baseT_Full;
1620 else
1621 cap = SUPPORTED_1000baseT_Half;
1622 break;
1623 case SPEED_10000:
1624 if (duplex == DUPLEX_FULL)
1625 cap = SUPPORTED_10000baseT_Full;
1626 }
1627 return cap;
1628}
1629
1630#define ADVERTISED_MASK (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
1631 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
1632 ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full | \
1633 ADVERTISED_10000baseT_Full)
1634
1635static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1636{
1637 struct port_info *p = netdev_priv(dev);
1638 struct link_config *lc = &p->link_config;
1639
9b1e3656
DLR
1640 if (!(lc->supported & SUPPORTED_Autoneg)) {
1641 /*
1642 * PHY offers a single speed/duplex. See if that's what's
1643 * being requested.
1644 */
1645 if (cmd->autoneg == AUTONEG_DISABLE) {
97915b5b 1646 int cap = speed_duplex_to_caps(cmd->speed, cmd->duplex);
9b1e3656
DLR
1647 if (lc->supported & cap)
1648 return 0;
1649 }
1650 return -EINVAL;
1651 }
4d22de3e
DLR
1652
1653 if (cmd->autoneg == AUTONEG_DISABLE) {
1654 int cap = speed_duplex_to_caps(cmd->speed, cmd->duplex);
1655
1656 if (!(lc->supported & cap) || cmd->speed == SPEED_1000)
1657 return -EINVAL;
1658 lc->requested_speed = cmd->speed;
1659 lc->requested_duplex = cmd->duplex;
1660 lc->advertising = 0;
1661 } else {
1662 cmd->advertising &= ADVERTISED_MASK;
1663 cmd->advertising &= lc->supported;
1664 if (!cmd->advertising)
1665 return -EINVAL;
1666 lc->requested_speed = SPEED_INVALID;
1667 lc->requested_duplex = DUPLEX_INVALID;
1668 lc->advertising = cmd->advertising | ADVERTISED_Autoneg;
1669 }
1670 lc->autoneg = cmd->autoneg;
1671 if (netif_running(dev))
1672 t3_link_start(&p->phy, &p->mac, lc);
1673 return 0;
1674}
1675
1676static void get_pauseparam(struct net_device *dev,
1677 struct ethtool_pauseparam *epause)
1678{
1679 struct port_info *p = netdev_priv(dev);
1680
1681 epause->autoneg = (p->link_config.requested_fc & PAUSE_AUTONEG) != 0;
1682 epause->rx_pause = (p->link_config.fc & PAUSE_RX) != 0;
1683 epause->tx_pause = (p->link_config.fc & PAUSE_TX) != 0;
1684}
1685
1686static int set_pauseparam(struct net_device *dev,
1687 struct ethtool_pauseparam *epause)
1688{
1689 struct port_info *p = netdev_priv(dev);
1690 struct link_config *lc = &p->link_config;
1691
1692 if (epause->autoneg == AUTONEG_DISABLE)
1693 lc->requested_fc = 0;
1694 else if (lc->supported & SUPPORTED_Autoneg)
1695 lc->requested_fc = PAUSE_AUTONEG;
1696 else
1697 return -EINVAL;
1698
1699 if (epause->rx_pause)
1700 lc->requested_fc |= PAUSE_RX;
1701 if (epause->tx_pause)
1702 lc->requested_fc |= PAUSE_TX;
1703 if (lc->autoneg == AUTONEG_ENABLE) {
1704 if (netif_running(dev))
1705 t3_link_start(&p->phy, &p->mac, lc);
1706 } else {
1707 lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1708 if (netif_running(dev))
1709 t3_mac_set_speed_duplex_fc(&p->mac, -1, -1, lc->fc);
1710 }
1711 return 0;
1712}
1713
1714static u32 get_rx_csum(struct net_device *dev)
1715{
1716 struct port_info *p = netdev_priv(dev);
1717
47fd23fe 1718 return p->rx_offload & T3_RX_CSUM;
4d22de3e
DLR
1719}
1720
1721static int set_rx_csum(struct net_device *dev, u32 data)
1722{
1723 struct port_info *p = netdev_priv(dev);
1724
47fd23fe
RD
1725 if (data) {
1726 p->rx_offload |= T3_RX_CSUM;
1727 } else {
b47385bd
DLR
1728 int i;
1729
47fd23fe 1730 p->rx_offload &= ~(T3_RX_CSUM | T3_LRO);
04ecb072
DLR
1731 for (i = p->first_qset; i < p->first_qset + p->nqsets; i++)
1732 set_qset_lro(dev, i, 0);
b47385bd 1733 }
4d22de3e
DLR
1734 return 0;
1735}
1736
1737static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
1738{
5fbf816f
DLR
1739 struct port_info *pi = netdev_priv(dev);
1740 struct adapter *adapter = pi->adapter;
05b97b30 1741 const struct qset_params *q = &adapter->params.sge.qset[pi->first_qset];
4d22de3e
DLR
1742
1743 e->rx_max_pending = MAX_RX_BUFFERS;
1744 e->rx_mini_max_pending = 0;
1745 e->rx_jumbo_max_pending = MAX_RX_JUMBO_BUFFERS;
1746 e->tx_max_pending = MAX_TXQ_ENTRIES;
1747
05b97b30
DLR
1748 e->rx_pending = q->fl_size;
1749 e->rx_mini_pending = q->rspq_size;
1750 e->rx_jumbo_pending = q->jumbo_size;
1751 e->tx_pending = q->txq_size[0];
4d22de3e
DLR
1752}
1753
1754static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
1755{
5fbf816f
DLR
1756 struct port_info *pi = netdev_priv(dev);
1757 struct adapter *adapter = pi->adapter;
05b97b30 1758 struct qset_params *q;
5fbf816f 1759 int i;
4d22de3e
DLR
1760
1761 if (e->rx_pending > MAX_RX_BUFFERS ||
1762 e->rx_jumbo_pending > MAX_RX_JUMBO_BUFFERS ||
1763 e->tx_pending > MAX_TXQ_ENTRIES ||
1764 e->rx_mini_pending > MAX_RSPQ_ENTRIES ||
1765 e->rx_mini_pending < MIN_RSPQ_ENTRIES ||
1766 e->rx_pending < MIN_FL_ENTRIES ||
1767 e->rx_jumbo_pending < MIN_FL_ENTRIES ||
1768 e->tx_pending < adapter->params.nports * MIN_TXQ_ENTRIES)
1769 return -EINVAL;
1770
1771 if (adapter->flags & FULL_INIT_DONE)
1772 return -EBUSY;
1773
05b97b30
DLR
1774 q = &adapter->params.sge.qset[pi->first_qset];
1775 for (i = 0; i < pi->nqsets; ++i, ++q) {
4d22de3e
DLR
1776 q->rspq_size = e->rx_mini_pending;
1777 q->fl_size = e->rx_pending;
1778 q->jumbo_size = e->rx_jumbo_pending;
1779 q->txq_size[0] = e->tx_pending;
1780 q->txq_size[1] = e->tx_pending;
1781 q->txq_size[2] = e->tx_pending;
1782 }
1783 return 0;
1784}
1785
1786static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
1787{
5fbf816f
DLR
1788 struct port_info *pi = netdev_priv(dev);
1789 struct adapter *adapter = pi->adapter;
4d22de3e
DLR
1790 struct qset_params *qsp = &adapter->params.sge.qset[0];
1791 struct sge_qset *qs = &adapter->sge.qs[0];
1792
1793 if (c->rx_coalesce_usecs * 10 > M_NEWTIMER)
1794 return -EINVAL;
1795
1796 qsp->coalesce_usecs = c->rx_coalesce_usecs;
1797 t3_update_qset_coalesce(qs, qsp);
1798 return 0;
1799}
1800
1801static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
1802{
5fbf816f
DLR
1803 struct port_info *pi = netdev_priv(dev);
1804 struct adapter *adapter = pi->adapter;
4d22de3e
DLR
1805 struct qset_params *q = adapter->params.sge.qset;
1806
1807 c->rx_coalesce_usecs = q->coalesce_usecs;
1808 return 0;
1809}
1810
1811static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e,
1812 u8 * data)
1813{
5fbf816f
DLR
1814 struct port_info *pi = netdev_priv(dev);
1815 struct adapter *adapter = pi->adapter;
4d22de3e 1816 int i, err = 0;
4d22de3e
DLR
1817
1818 u8 *buf = kmalloc(EEPROMSIZE, GFP_KERNEL);
1819 if (!buf)
1820 return -ENOMEM;
1821
1822 e->magic = EEPROM_MAGIC;
1823 for (i = e->offset & ~3; !err && i < e->offset + e->len; i += 4)
05e5c116 1824 err = t3_seeprom_read(adapter, i, (__le32 *) & buf[i]);
4d22de3e
DLR
1825
1826 if (!err)
1827 memcpy(data, buf + e->offset, e->len);
1828 kfree(buf);
1829 return err;
1830}
1831
1832static int set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
1833 u8 * data)
1834{
5fbf816f
DLR
1835 struct port_info *pi = netdev_priv(dev);
1836 struct adapter *adapter = pi->adapter;
05e5c116
AV
1837 u32 aligned_offset, aligned_len;
1838 __le32 *p;
4d22de3e 1839 u8 *buf;
c54f5c24 1840 int err;
4d22de3e
DLR
1841
1842 if (eeprom->magic != EEPROM_MAGIC)
1843 return -EINVAL;
1844
1845 aligned_offset = eeprom->offset & ~3;
1846 aligned_len = (eeprom->len + (eeprom->offset & 3) + 3) & ~3;
1847
1848 if (aligned_offset != eeprom->offset || aligned_len != eeprom->len) {
1849 buf = kmalloc(aligned_len, GFP_KERNEL);
1850 if (!buf)
1851 return -ENOMEM;
05e5c116 1852 err = t3_seeprom_read(adapter, aligned_offset, (__le32 *) buf);
4d22de3e
DLR
1853 if (!err && aligned_len > 4)
1854 err = t3_seeprom_read(adapter,
1855 aligned_offset + aligned_len - 4,
05e5c116 1856 (__le32 *) & buf[aligned_len - 4]);
4d22de3e
DLR
1857 if (err)
1858 goto out;
1859 memcpy(buf + (eeprom->offset & 3), data, eeprom->len);
1860 } else
1861 buf = data;
1862
1863 err = t3_seeprom_wp(adapter, 0);
1864 if (err)
1865 goto out;
1866
05e5c116 1867 for (p = (__le32 *) buf; !err && aligned_len; aligned_len -= 4, p++) {
4d22de3e
DLR
1868 err = t3_seeprom_write(adapter, aligned_offset, *p);
1869 aligned_offset += 4;
1870 }
1871
1872 if (!err)
1873 err = t3_seeprom_wp(adapter, 1);
1874out:
1875 if (buf != data)
1876 kfree(buf);
1877 return err;
1878}
1879
1880static void get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1881{
1882 wol->supported = 0;
1883 wol->wolopts = 0;
1884 memset(&wol->sopass, 0, sizeof(wol->sopass));
1885}
1886
1887static const struct ethtool_ops cxgb_ethtool_ops = {
1888 .get_settings = get_settings,
1889 .set_settings = set_settings,
1890 .get_drvinfo = get_drvinfo,
1891 .get_msglevel = get_msglevel,
1892 .set_msglevel = set_msglevel,
1893 .get_ringparam = get_sge_param,
1894 .set_ringparam = set_sge_param,
1895 .get_coalesce = get_coalesce,
1896 .set_coalesce = set_coalesce,
1897 .get_eeprom_len = get_eeprom_len,
1898 .get_eeprom = get_eeprom,
1899 .set_eeprom = set_eeprom,
1900 .get_pauseparam = get_pauseparam,
1901 .set_pauseparam = set_pauseparam,
1902 .get_rx_csum = get_rx_csum,
1903 .set_rx_csum = set_rx_csum,
4d22de3e 1904 .set_tx_csum = ethtool_op_set_tx_csum,
4d22de3e
DLR
1905 .set_sg = ethtool_op_set_sg,
1906 .get_link = ethtool_op_get_link,
1907 .get_strings = get_strings,
1908 .phys_id = cxgb3_phys_id,
1909 .nway_reset = restart_autoneg,
b9f2c044 1910 .get_sset_count = get_sset_count,
4d22de3e
DLR
1911 .get_ethtool_stats = get_stats,
1912 .get_regs_len = get_regs_len,
1913 .get_regs = get_regs,
1914 .get_wol = get_wol,
4d22de3e 1915 .set_tso = ethtool_op_set_tso,
4d22de3e
DLR
1916};
1917
1918static int in_range(int val, int lo, int hi)
1919{
1920 return val < 0 || (val <= hi && val >= lo);
1921}
1922
1923static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
1924{
5fbf816f
DLR
1925 struct port_info *pi = netdev_priv(dev);
1926 struct adapter *adapter = pi->adapter;
4d22de3e 1927 u32 cmd;
5fbf816f 1928 int ret;
4d22de3e
DLR
1929
1930 if (copy_from_user(&cmd, useraddr, sizeof(cmd)))
1931 return -EFAULT;
1932
1933 switch (cmd) {
4d22de3e
DLR
1934 case CHELSIO_SET_QSET_PARAMS:{
1935 int i;
1936 struct qset_params *q;
1937 struct ch_qset_params t;
8c263761
DLR
1938 int q1 = pi->first_qset;
1939 int nqsets = pi->nqsets;
4d22de3e
DLR
1940
1941 if (!capable(CAP_NET_ADMIN))
1942 return -EPERM;
1943 if (copy_from_user(&t, useraddr, sizeof(t)))
1944 return -EFAULT;
1945 if (t.qset_idx >= SGE_QSETS)
1946 return -EINVAL;
1947 if (!in_range(t.intr_lat, 0, M_NEWTIMER) ||
1948 !in_range(t.cong_thres, 0, 255) ||
1949 !in_range(t.txq_size[0], MIN_TXQ_ENTRIES,
1950 MAX_TXQ_ENTRIES) ||
1951 !in_range(t.txq_size[1], MIN_TXQ_ENTRIES,
1952 MAX_TXQ_ENTRIES) ||
1953 !in_range(t.txq_size[2], MIN_CTRL_TXQ_ENTRIES,
1954 MAX_CTRL_TXQ_ENTRIES) ||
1955 !in_range(t.fl_size[0], MIN_FL_ENTRIES,
1956 MAX_RX_BUFFERS)
1957 || !in_range(t.fl_size[1], MIN_FL_ENTRIES,
1958 MAX_RX_JUMBO_BUFFERS)
1959 || !in_range(t.rspq_size, MIN_RSPQ_ENTRIES,
1960 MAX_RSPQ_ENTRIES))
1961 return -EINVAL;
8c263761
DLR
1962
1963 if ((adapter->flags & FULL_INIT_DONE) && t.lro > 0)
1964 for_each_port(adapter, i) {
1965 pi = adap2pinfo(adapter, i);
1966 if (t.qset_idx >= pi->first_qset &&
1967 t.qset_idx < pi->first_qset + pi->nqsets &&
47fd23fe 1968 !(pi->rx_offload & T3_RX_CSUM))
8c263761
DLR
1969 return -EINVAL;
1970 }
1971
4d22de3e
DLR
1972 if ((adapter->flags & FULL_INIT_DONE) &&
1973 (t.rspq_size >= 0 || t.fl_size[0] >= 0 ||
1974 t.fl_size[1] >= 0 || t.txq_size[0] >= 0 ||
1975 t.txq_size[1] >= 0 || t.txq_size[2] >= 0 ||
1976 t.polling >= 0 || t.cong_thres >= 0))
1977 return -EBUSY;
1978
8c263761
DLR
1979 /* Allow setting of any available qset when offload enabled */
1980 if (test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map)) {
1981 q1 = 0;
1982 for_each_port(adapter, i) {
1983 pi = adap2pinfo(adapter, i);
1984 nqsets += pi->first_qset + pi->nqsets;
1985 }
1986 }
1987
1988 if (t.qset_idx < q1)
1989 return -EINVAL;
1990 if (t.qset_idx > q1 + nqsets - 1)
1991 return -EINVAL;
1992
4d22de3e
DLR
1993 q = &adapter->params.sge.qset[t.qset_idx];
1994
1995 if (t.rspq_size >= 0)
1996 q->rspq_size = t.rspq_size;
1997 if (t.fl_size[0] >= 0)
1998 q->fl_size = t.fl_size[0];
1999 if (t.fl_size[1] >= 0)
2000 q->jumbo_size = t.fl_size[1];
2001 if (t.txq_size[0] >= 0)
2002 q->txq_size[0] = t.txq_size[0];
2003 if (t.txq_size[1] >= 0)
2004 q->txq_size[1] = t.txq_size[1];
2005 if (t.txq_size[2] >= 0)
2006 q->txq_size[2] = t.txq_size[2];
2007 if (t.cong_thres >= 0)
2008 q->cong_thres = t.cong_thres;
2009 if (t.intr_lat >= 0) {
2010 struct sge_qset *qs =
2011 &adapter->sge.qs[t.qset_idx];
2012
2013 q->coalesce_usecs = t.intr_lat;
2014 t3_update_qset_coalesce(qs, q);
2015 }
2016 if (t.polling >= 0) {
2017 if (adapter->flags & USING_MSIX)
2018 q->polling = t.polling;
2019 else {
2020 /* No polling with INTx for T3A */
2021 if (adapter->params.rev == 0 &&
2022 !(adapter->flags & USING_MSI))
2023 t.polling = 0;
2024
2025 for (i = 0; i < SGE_QSETS; i++) {
2026 q = &adapter->params.sge.
2027 qset[i];
2028 q->polling = t.polling;
2029 }
2030 }
2031 }
04ecb072
DLR
2032 if (t.lro >= 0)
2033 set_qset_lro(dev, t.qset_idx, t.lro);
2034
4d22de3e
DLR
2035 break;
2036 }
2037 case CHELSIO_GET_QSET_PARAMS:{
2038 struct qset_params *q;
2039 struct ch_qset_params t;
8c263761
DLR
2040 int q1 = pi->first_qset;
2041 int nqsets = pi->nqsets;
2042 int i;
4d22de3e
DLR
2043
2044 if (copy_from_user(&t, useraddr, sizeof(t)))
2045 return -EFAULT;
8c263761
DLR
2046
2047 /* Display qsets for all ports when offload enabled */
2048 if (test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map)) {
2049 q1 = 0;
2050 for_each_port(adapter, i) {
2051 pi = adap2pinfo(adapter, i);
2052 nqsets = pi->first_qset + pi->nqsets;
2053 }
2054 }
2055
2056 if (t.qset_idx >= nqsets)
4d22de3e
DLR
2057 return -EINVAL;
2058
8c263761 2059 q = &adapter->params.sge.qset[q1 + t.qset_idx];
4d22de3e
DLR
2060 t.rspq_size = q->rspq_size;
2061 t.txq_size[0] = q->txq_size[0];
2062 t.txq_size[1] = q->txq_size[1];
2063 t.txq_size[2] = q->txq_size[2];
2064 t.fl_size[0] = q->fl_size;
2065 t.fl_size[1] = q->jumbo_size;
2066 t.polling = q->polling;
b47385bd 2067 t.lro = q->lro;
4d22de3e
DLR
2068 t.intr_lat = q->coalesce_usecs;
2069 t.cong_thres = q->cong_thres;
8c263761
DLR
2070 t.qnum = q1;
2071
2072 if (adapter->flags & USING_MSIX)
2073 t.vector = adapter->msix_info[q1 + t.qset_idx + 1].vec;
2074 else
2075 t.vector = adapter->pdev->irq;
4d22de3e
DLR
2076
2077 if (copy_to_user(useraddr, &t, sizeof(t)))
2078 return -EFAULT;
2079 break;
2080 }
2081 case CHELSIO_SET_QSET_NUM:{
2082 struct ch_reg edata;
4d22de3e
DLR
2083 unsigned int i, first_qset = 0, other_qsets = 0;
2084
2085 if (!capable(CAP_NET_ADMIN))
2086 return -EPERM;
2087 if (adapter->flags & FULL_INIT_DONE)
2088 return -EBUSY;
2089 if (copy_from_user(&edata, useraddr, sizeof(edata)))
2090 return -EFAULT;
2091 if (edata.val < 1 ||
2092 (edata.val > 1 && !(adapter->flags & USING_MSIX)))
2093 return -EINVAL;
2094
2095 for_each_port(adapter, i)
2096 if (adapter->port[i] && adapter->port[i] != dev)
2097 other_qsets += adap2pinfo(adapter, i)->nqsets;
2098
2099 if (edata.val + other_qsets > SGE_QSETS)
2100 return -EINVAL;
2101
2102 pi->nqsets = edata.val;
2103
2104 for_each_port(adapter, i)
2105 if (adapter->port[i]) {
2106 pi = adap2pinfo(adapter, i);
2107 pi->first_qset = first_qset;
2108 first_qset += pi->nqsets;
2109 }
2110 break;
2111 }
2112 case CHELSIO_GET_QSET_NUM:{
2113 struct ch_reg edata;
4d22de3e
DLR
2114
2115 edata.cmd = CHELSIO_GET_QSET_NUM;
2116 edata.val = pi->nqsets;
2117 if (copy_to_user(useraddr, &edata, sizeof(edata)))
2118 return -EFAULT;
2119 break;
2120 }
2121 case CHELSIO_LOAD_FW:{
2122 u8 *fw_data;
2123 struct ch_mem_range t;
2124
1b3aa7af 2125 if (!capable(CAP_SYS_RAWIO))
4d22de3e
DLR
2126 return -EPERM;
2127 if (copy_from_user(&t, useraddr, sizeof(t)))
2128 return -EFAULT;
1b3aa7af 2129 /* Check t.len sanity ? */
4d22de3e
DLR
2130 fw_data = kmalloc(t.len, GFP_KERNEL);
2131 if (!fw_data)
2132 return -ENOMEM;
2133
2134 if (copy_from_user
2135 (fw_data, useraddr + sizeof(t), t.len)) {
2136 kfree(fw_data);
2137 return -EFAULT;
2138 }
2139
2140 ret = t3_load_fw(adapter, fw_data, t.len);
2141 kfree(fw_data);
2142 if (ret)
2143 return ret;
2144 break;
2145 }
2146 case CHELSIO_SETMTUTAB:{
2147 struct ch_mtus m;
2148 int i;
2149
2150 if (!is_offload(adapter))
2151 return -EOPNOTSUPP;
2152 if (!capable(CAP_NET_ADMIN))
2153 return -EPERM;
2154 if (offload_running(adapter))
2155 return -EBUSY;
2156 if (copy_from_user(&m, useraddr, sizeof(m)))
2157 return -EFAULT;
2158 if (m.nmtus != NMTUS)
2159 return -EINVAL;
2160 if (m.mtus[0] < 81) /* accommodate SACK */
2161 return -EINVAL;
2162
2163 /* MTUs must be in ascending order */
2164 for (i = 1; i < NMTUS; ++i)
2165 if (m.mtus[i] < m.mtus[i - 1])
2166 return -EINVAL;
2167
2168 memcpy(adapter->params.mtus, m.mtus,
2169 sizeof(adapter->params.mtus));
2170 break;
2171 }
2172 case CHELSIO_GET_PM:{
2173 struct tp_params *p = &adapter->params.tp;
2174 struct ch_pm m = {.cmd = CHELSIO_GET_PM };
2175
2176 if (!is_offload(adapter))
2177 return -EOPNOTSUPP;
2178 m.tx_pg_sz = p->tx_pg_size;
2179 m.tx_num_pg = p->tx_num_pgs;
2180 m.rx_pg_sz = p->rx_pg_size;
2181 m.rx_num_pg = p->rx_num_pgs;
2182 m.pm_total = p->pmtx_size + p->chan_rx_size * p->nchan;
2183 if (copy_to_user(useraddr, &m, sizeof(m)))
2184 return -EFAULT;
2185 break;
2186 }
2187 case CHELSIO_SET_PM:{
2188 struct ch_pm m;
2189 struct tp_params *p = &adapter->params.tp;
2190
2191 if (!is_offload(adapter))
2192 return -EOPNOTSUPP;
2193 if (!capable(CAP_NET_ADMIN))
2194 return -EPERM;
2195 if (adapter->flags & FULL_INIT_DONE)
2196 return -EBUSY;
2197 if (copy_from_user(&m, useraddr, sizeof(m)))
2198 return -EFAULT;
d9da466a 2199 if (!is_power_of_2(m.rx_pg_sz) ||
2200 !is_power_of_2(m.tx_pg_sz))
4d22de3e
DLR
2201 return -EINVAL; /* not power of 2 */
2202 if (!(m.rx_pg_sz & 0x14000))
2203 return -EINVAL; /* not 16KB or 64KB */
2204 if (!(m.tx_pg_sz & 0x1554000))
2205 return -EINVAL;
2206 if (m.tx_num_pg == -1)
2207 m.tx_num_pg = p->tx_num_pgs;
2208 if (m.rx_num_pg == -1)
2209 m.rx_num_pg = p->rx_num_pgs;
2210 if (m.tx_num_pg % 24 || m.rx_num_pg % 24)
2211 return -EINVAL;
2212 if (m.rx_num_pg * m.rx_pg_sz > p->chan_rx_size ||
2213 m.tx_num_pg * m.tx_pg_sz > p->chan_tx_size)
2214 return -EINVAL;
2215 p->rx_pg_size = m.rx_pg_sz;
2216 p->tx_pg_size = m.tx_pg_sz;
2217 p->rx_num_pgs = m.rx_num_pg;
2218 p->tx_num_pgs = m.tx_num_pg;
2219 break;
2220 }
2221 case CHELSIO_GET_MEM:{
2222 struct ch_mem_range t;
2223 struct mc7 *mem;
2224 u64 buf[32];
2225
2226 if (!is_offload(adapter))
2227 return -EOPNOTSUPP;
2228 if (!(adapter->flags & FULL_INIT_DONE))
2229 return -EIO; /* need the memory controllers */
2230 if (copy_from_user(&t, useraddr, sizeof(t)))
2231 return -EFAULT;
2232 if ((t.addr & 7) || (t.len & 7))
2233 return -EINVAL;
2234 if (t.mem_id == MEM_CM)
2235 mem = &adapter->cm;
2236 else if (t.mem_id == MEM_PMRX)
2237 mem = &adapter->pmrx;
2238 else if (t.mem_id == MEM_PMTX)
2239 mem = &adapter->pmtx;
2240 else
2241 return -EINVAL;
2242
2243 /*
1825494a
DLR
2244 * Version scheme:
2245 * bits 0..9: chip version
2246 * bits 10..15: chip revision
2247 */
4d22de3e
DLR
2248 t.version = 3 | (adapter->params.rev << 10);
2249 if (copy_to_user(useraddr, &t, sizeof(t)))
2250 return -EFAULT;
2251
2252 /*
2253 * Read 256 bytes at a time as len can be large and we don't
2254 * want to use huge intermediate buffers.
2255 */
2256 useraddr += sizeof(t); /* advance to start of buffer */
2257 while (t.len) {
2258 unsigned int chunk =
2259 min_t(unsigned int, t.len, sizeof(buf));
2260
2261 ret =
2262 t3_mc7_bd_read(mem, t.addr / 8, chunk / 8,
2263 buf);
2264 if (ret)
2265 return ret;
2266 if (copy_to_user(useraddr, buf, chunk))
2267 return -EFAULT;
2268 useraddr += chunk;
2269 t.addr += chunk;
2270 t.len -= chunk;
2271 }
2272 break;
2273 }
2274 case CHELSIO_SET_TRACE_FILTER:{
2275 struct ch_trace t;
2276 const struct trace_params *tp;
2277
2278 if (!capable(CAP_NET_ADMIN))
2279 return -EPERM;
2280 if (!offload_running(adapter))
2281 return -EAGAIN;
2282 if (copy_from_user(&t, useraddr, sizeof(t)))
2283 return -EFAULT;
2284
2285 tp = (const struct trace_params *)&t.sip;
2286 if (t.config_tx)
2287 t3_config_trace_filter(adapter, tp, 0,
2288 t.invert_match,
2289 t.trace_tx);
2290 if (t.config_rx)
2291 t3_config_trace_filter(adapter, tp, 1,
2292 t.invert_match,
2293 t.trace_rx);
2294 break;
2295 }
4d22de3e
DLR
2296 default:
2297 return -EOPNOTSUPP;
2298 }
2299 return 0;
2300}
2301
2302static int cxgb_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
2303{
4d22de3e 2304 struct mii_ioctl_data *data = if_mii(req);
5fbf816f
DLR
2305 struct port_info *pi = netdev_priv(dev);
2306 struct adapter *adapter = pi->adapter;
2307 int ret, mmd;
4d22de3e
DLR
2308
2309 switch (cmd) {
2310 case SIOCGMIIPHY:
2311 data->phy_id = pi->phy.addr;
2312 /* FALLTHRU */
2313 case SIOCGMIIREG:{
2314 u32 val;
2315 struct cphy *phy = &pi->phy;
2316
2317 if (!phy->mdio_read)
2318 return -EOPNOTSUPP;
2319 if (is_10G(adapter)) {
2320 mmd = data->phy_id >> 8;
2321 if (!mmd)
2322 mmd = MDIO_DEV_PCS;
9b1e3656 2323 else if (mmd > MDIO_DEV_VEND2)
4d22de3e
DLR
2324 return -EINVAL;
2325
2326 ret =
2327 phy->mdio_read(adapter, data->phy_id & 0x1f,
2328 mmd, data->reg_num, &val);
2329 } else
2330 ret =
2331 phy->mdio_read(adapter, data->phy_id & 0x1f,
2332 0, data->reg_num & 0x1f,
2333 &val);
2334 if (!ret)
2335 data->val_out = val;
2336 break;
2337 }
2338 case SIOCSMIIREG:{
2339 struct cphy *phy = &pi->phy;
2340
2341 if (!capable(CAP_NET_ADMIN))
2342 return -EPERM;
2343 if (!phy->mdio_write)
2344 return -EOPNOTSUPP;
2345 if (is_10G(adapter)) {
2346 mmd = data->phy_id >> 8;
2347 if (!mmd)
2348 mmd = MDIO_DEV_PCS;
9b1e3656 2349 else if (mmd > MDIO_DEV_VEND2)
4d22de3e
DLR
2350 return -EINVAL;
2351
2352 ret =
2353 phy->mdio_write(adapter,
2354 data->phy_id & 0x1f, mmd,
2355 data->reg_num,
2356 data->val_in);
2357 } else
2358 ret =
2359 phy->mdio_write(adapter,
2360 data->phy_id & 0x1f, 0,
2361 data->reg_num & 0x1f,
2362 data->val_in);
2363 break;
2364 }
2365 case SIOCCHIOCTL:
2366 return cxgb_extension_ioctl(dev, req->ifr_data);
2367 default:
2368 return -EOPNOTSUPP;
2369 }
2370 return ret;
2371}
2372
2373static int cxgb_change_mtu(struct net_device *dev, int new_mtu)
2374{
4d22de3e 2375 struct port_info *pi = netdev_priv(dev);
5fbf816f
DLR
2376 struct adapter *adapter = pi->adapter;
2377 int ret;
4d22de3e
DLR
2378
2379 if (new_mtu < 81) /* accommodate SACK */
2380 return -EINVAL;
2381 if ((ret = t3_mac_set_mtu(&pi->mac, new_mtu)))
2382 return ret;
2383 dev->mtu = new_mtu;
2384 init_port_mtus(adapter);
2385 if (adapter->params.rev == 0 && offload_running(adapter))
2386 t3_load_mtus(adapter, adapter->params.mtus,
2387 adapter->params.a_wnd, adapter->params.b_wnd,
2388 adapter->port[0]->mtu);
2389 return 0;
2390}
2391
2392static int cxgb_set_mac_addr(struct net_device *dev, void *p)
2393{
4d22de3e 2394 struct port_info *pi = netdev_priv(dev);
5fbf816f 2395 struct adapter *adapter = pi->adapter;
4d22de3e
DLR
2396 struct sockaddr *addr = p;
2397
2398 if (!is_valid_ether_addr(addr->sa_data))
2399 return -EINVAL;
2400
2401 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
2402 t3_mac_set_address(&pi->mac, 0, dev->dev_addr);
2403 if (offload_running(adapter))
2404 write_smt_entry(adapter, pi->port_id);
2405 return 0;
2406}
2407
2408/**
2409 * t3_synchronize_rx - wait for current Rx processing on a port to complete
2410 * @adap: the adapter
2411 * @p: the port
2412 *
2413 * Ensures that current Rx processing on any of the queues associated with
2414 * the given port completes before returning. We do this by acquiring and
2415 * releasing the locks of the response queues associated with the port.
2416 */
2417static void t3_synchronize_rx(struct adapter *adap, const struct port_info *p)
2418{
2419 int i;
2420
8c263761
DLR
2421 for (i = p->first_qset; i < p->first_qset + p->nqsets; i++) {
2422 struct sge_rspq *q = &adap->sge.qs[i].rspq;
4d22de3e
DLR
2423
2424 spin_lock_irq(&q->lock);
2425 spin_unlock_irq(&q->lock);
2426 }
2427}
2428
2429static void vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
2430{
4d22de3e 2431 struct port_info *pi = netdev_priv(dev);
5fbf816f 2432 struct adapter *adapter = pi->adapter;
4d22de3e
DLR
2433
2434 pi->vlan_grp = grp;
2435 if (adapter->params.rev > 0)
2436 t3_set_vlan_accel(adapter, 1 << pi->port_id, grp != NULL);
2437 else {
2438 /* single control for all ports */
2439 unsigned int i, have_vlans = 0;
2440 for_each_port(adapter, i)
2441 have_vlans |= adap2pinfo(adapter, i)->vlan_grp != NULL;
2442
2443 t3_set_vlan_accel(adapter, 1, have_vlans);
2444 }
2445 t3_synchronize_rx(adapter, pi);
2446}
2447
4d22de3e
DLR
2448#ifdef CONFIG_NET_POLL_CONTROLLER
2449static void cxgb_netpoll(struct net_device *dev)
2450{
890de332 2451 struct port_info *pi = netdev_priv(dev);
5fbf816f 2452 struct adapter *adapter = pi->adapter;
890de332 2453 int qidx;
4d22de3e 2454
890de332
DLR
2455 for (qidx = pi->first_qset; qidx < pi->first_qset + pi->nqsets; qidx++) {
2456 struct sge_qset *qs = &adapter->sge.qs[qidx];
2457 void *source;
2eab17ab 2458
890de332
DLR
2459 if (adapter->flags & USING_MSIX)
2460 source = qs;
2461 else
2462 source = adapter;
2463
2464 t3_intr_handler(adapter, qs->rspq.polling) (0, source);
2465 }
4d22de3e
DLR
2466}
2467#endif
2468
2469/*
2470 * Periodic accumulation of MAC statistics.
2471 */
2472static void mac_stats_update(struct adapter *adapter)
2473{
2474 int i;
2475
2476 for_each_port(adapter, i) {
2477 struct net_device *dev = adapter->port[i];
2478 struct port_info *p = netdev_priv(dev);
2479
2480 if (netif_running(dev)) {
2481 spin_lock(&adapter->stats_lock);
2482 t3_mac_update_stats(&p->mac);
2483 spin_unlock(&adapter->stats_lock);
2484 }
2485 }
2486}
2487
2488static void check_link_status(struct adapter *adapter)
2489{
2490 int i;
2491
2492 for_each_port(adapter, i) {
2493 struct net_device *dev = adapter->port[i];
2494 struct port_info *p = netdev_priv(dev);
2495
bf792094
DLR
2496 spin_lock_irq(&adapter->work_lock);
2497 if (p->link_fault) {
3851c66c 2498 t3_link_fault(adapter, i);
bf792094
DLR
2499 spin_unlock_irq(&adapter->work_lock);
2500 continue;
2501 }
2502 spin_unlock_irq(&adapter->work_lock);
2503
2504 if (!(p->phy.caps & SUPPORTED_IRQ) && netif_running(dev)) {
2505 t3_xgm_intr_disable(adapter, i);
2506 t3_read_reg(adapter, A_XGM_INT_STATUS + p->mac.offset);
2507
4d22de3e 2508 t3_link_changed(adapter, i);
bf792094
DLR
2509 t3_xgm_intr_enable(adapter, i);
2510 }
4d22de3e
DLR
2511 }
2512}
2513
fc90664e
DLR
2514static void check_t3b2_mac(struct adapter *adapter)
2515{
2516 int i;
2517
f2d961c9
DLR
2518 if (!rtnl_trylock()) /* synchronize with ifdown */
2519 return;
2520
fc90664e
DLR
2521 for_each_port(adapter, i) {
2522 struct net_device *dev = adapter->port[i];
2523 struct port_info *p = netdev_priv(dev);
2524 int status;
2525
2526 if (!netif_running(dev))
2527 continue;
2528
2529 status = 0;
6d6dabac 2530 if (netif_running(dev) && netif_carrier_ok(dev))
fc90664e
DLR
2531 status = t3b2_mac_watchdog_task(&p->mac);
2532 if (status == 1)
2533 p->mac.stats.num_toggled++;
2534 else if (status == 2) {
2535 struct cmac *mac = &p->mac;
2536
2537 t3_mac_set_mtu(mac, dev->mtu);
2538 t3_mac_set_address(mac, 0, dev->dev_addr);
2539 cxgb_set_rxmode(dev);
2540 t3_link_start(&p->phy, mac, &p->link_config);
2541 t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
2542 t3_port_intr_enable(adapter, p->port_id);
2543 p->mac.stats.num_resets++;
2544 }
2545 }
2546 rtnl_unlock();
2547}
2548
2549
4d22de3e
DLR
2550static void t3_adap_check_task(struct work_struct *work)
2551{
2552 struct adapter *adapter = container_of(work, struct adapter,
2553 adap_check_task.work);
2554 const struct adapter_params *p = &adapter->params;
fc882196
DLR
2555 int port;
2556 unsigned int v, status, reset;
4d22de3e
DLR
2557
2558 adapter->check_task_cnt++;
2559
3851c66c 2560 check_link_status(adapter);
4d22de3e
DLR
2561
2562 /* Accumulate MAC stats if needed */
2563 if (!p->linkpoll_period ||
2564 (adapter->check_task_cnt * p->linkpoll_period) / 10 >=
2565 p->stats_update_period) {
2566 mac_stats_update(adapter);
2567 adapter->check_task_cnt = 0;
2568 }
2569
fc90664e
DLR
2570 if (p->rev == T3_REV_B2)
2571 check_t3b2_mac(adapter);
2572
fc882196
DLR
2573 /*
2574 * Scan the XGMAC's to check for various conditions which we want to
2575 * monitor in a periodic polling manner rather than via an interrupt
2576 * condition. This is used for conditions which would otherwise flood
2577 * the system with interrupts and we only really need to know that the
2578 * conditions are "happening" ... For each condition we count the
2579 * detection of the condition and reset it for the next polling loop.
2580 */
2581 for_each_port(adapter, port) {
2582 struct cmac *mac = &adap2pinfo(adapter, port)->mac;
2583 u32 cause;
2584
2585 cause = t3_read_reg(adapter, A_XGM_INT_CAUSE + mac->offset);
2586 reset = 0;
2587 if (cause & F_RXFIFO_OVERFLOW) {
2588 mac->stats.rx_fifo_ovfl++;
2589 reset |= F_RXFIFO_OVERFLOW;
2590 }
2591
2592 t3_write_reg(adapter, A_XGM_INT_CAUSE + mac->offset, reset);
2593 }
2594
2595 /*
2596 * We do the same as above for FL_EMPTY interrupts.
2597 */
2598 status = t3_read_reg(adapter, A_SG_INT_CAUSE);
2599 reset = 0;
2600
2601 if (status & F_FLEMPTY) {
2602 struct sge_qset *qs = &adapter->sge.qs[0];
2603 int i = 0;
2604
2605 reset |= F_FLEMPTY;
2606
2607 v = (t3_read_reg(adapter, A_SG_RSPQ_FL_STATUS) >> S_FL0EMPTY) &
2608 0xffff;
2609
2610 while (v) {
2611 qs->fl[i].empty += (v & 1);
2612 if (i)
2613 qs++;
2614 i ^= 1;
2615 v >>= 1;
2616 }
2617 }
2618
2619 t3_write_reg(adapter, A_SG_INT_CAUSE, reset);
2620
4d22de3e 2621 /* Schedule the next check update if any port is active. */
20d3fc11 2622 spin_lock_irq(&adapter->work_lock);
4d22de3e
DLR
2623 if (adapter->open_device_map & PORT_MASK)
2624 schedule_chk_task(adapter);
20d3fc11 2625 spin_unlock_irq(&adapter->work_lock);
4d22de3e
DLR
2626}
2627
2628/*
2629 * Processes external (PHY) interrupts in process context.
2630 */
2631static void ext_intr_task(struct work_struct *work)
2632{
2633 struct adapter *adapter = container_of(work, struct adapter,
2634 ext_intr_handler_task);
bf792094
DLR
2635 int i;
2636
2637 /* Disable link fault interrupts */
2638 for_each_port(adapter, i) {
2639 struct net_device *dev = adapter->port[i];
2640 struct port_info *p = netdev_priv(dev);
2641
2642 t3_xgm_intr_disable(adapter, i);
2643 t3_read_reg(adapter, A_XGM_INT_STATUS + p->mac.offset);
2644 }
4d22de3e 2645
bf792094 2646 /* Re-enable link fault interrupts */
4d22de3e
DLR
2647 t3_phy_intr_handler(adapter);
2648
bf792094
DLR
2649 for_each_port(adapter, i)
2650 t3_xgm_intr_enable(adapter, i);
2651
4d22de3e
DLR
2652 /* Now reenable external interrupts */
2653 spin_lock_irq(&adapter->work_lock);
2654 if (adapter->slow_intr_mask) {
2655 adapter->slow_intr_mask |= F_T3DBG;
2656 t3_write_reg(adapter, A_PL_INT_CAUSE0, F_T3DBG);
2657 t3_write_reg(adapter, A_PL_INT_ENABLE0,
2658 adapter->slow_intr_mask);
2659 }
2660 spin_unlock_irq(&adapter->work_lock);
2661}
2662
2663/*
2664 * Interrupt-context handler for external (PHY) interrupts.
2665 */
2666void t3_os_ext_intr_handler(struct adapter *adapter)
2667{
2668 /*
2669 * Schedule a task to handle external interrupts as they may be slow
2670 * and we use a mutex to protect MDIO registers. We disable PHY
2671 * interrupts in the meantime and let the task reenable them when
2672 * it's done.
2673 */
2674 spin_lock(&adapter->work_lock);
2675 if (adapter->slow_intr_mask) {
2676 adapter->slow_intr_mask &= ~F_T3DBG;
2677 t3_write_reg(adapter, A_PL_INT_ENABLE0,
2678 adapter->slow_intr_mask);
2679 queue_work(cxgb3_wq, &adapter->ext_intr_handler_task);
2680 }
2681 spin_unlock(&adapter->work_lock);
2682}
2683
bf792094
DLR
2684void t3_os_link_fault_handler(struct adapter *adapter, int port_id)
2685{
2686 struct net_device *netdev = adapter->port[port_id];
2687 struct port_info *pi = netdev_priv(netdev);
2688
2689 spin_lock(&adapter->work_lock);
2690 pi->link_fault = 1;
bf792094
DLR
2691 spin_unlock(&adapter->work_lock);
2692}
2693
20d3fc11
DLR
2694static int t3_adapter_error(struct adapter *adapter, int reset)
2695{
2696 int i, ret = 0;
2697
cb0bc205
DLR
2698 if (is_offload(adapter) &&
2699 test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map)) {
2700 cxgb3_err_notify(&adapter->tdev, OFFLOAD_STATUS_DOWN, 0);
2701 offload_close(&adapter->tdev);
2702 }
2703
20d3fc11
DLR
2704 /* Stop all ports */
2705 for_each_port(adapter, i) {
2706 struct net_device *netdev = adapter->port[i];
2707
2708 if (netif_running(netdev))
2709 cxgb_close(netdev);
2710 }
2711
20d3fc11
DLR
2712 /* Stop SGE timers */
2713 t3_stop_sge_timers(adapter);
2714
2715 adapter->flags &= ~FULL_INIT_DONE;
2716
2717 if (reset)
2718 ret = t3_reset_adapter(adapter);
2719
2720 pci_disable_device(adapter->pdev);
2721
2722 return ret;
2723}
2724
2725static int t3_reenable_adapter(struct adapter *adapter)
2726{
2727 if (pci_enable_device(adapter->pdev)) {
2728 dev_err(&adapter->pdev->dev,
2729 "Cannot re-enable PCI device after reset.\n");
2730 goto err;
2731 }
2732 pci_set_master(adapter->pdev);
2733 pci_restore_state(adapter->pdev);
2734
2735 /* Free sge resources */
2736 t3_free_sge_resources(adapter);
2737
2738 if (t3_replay_prep_adapter(adapter))
2739 goto err;
2740
2741 return 0;
2742err:
2743 return -1;
2744}
2745
2746static void t3_resume_ports(struct adapter *adapter)
2747{
2748 int i;
2749
2750 /* Restart the ports */
2751 for_each_port(adapter, i) {
2752 struct net_device *netdev = adapter->port[i];
2753
2754 if (netif_running(netdev)) {
2755 if (cxgb_open(netdev)) {
2756 dev_err(&adapter->pdev->dev,
2757 "can't bring device back up"
2758 " after reset\n");
2759 continue;
2760 }
2761 }
2762 }
cb0bc205
DLR
2763
2764 if (is_offload(adapter) && !ofld_disable)
2765 cxgb3_err_notify(&adapter->tdev, OFFLOAD_STATUS_UP, 0);
20d3fc11
DLR
2766}
2767
2768/*
2769 * processes a fatal error.
2770 * Bring the ports down, reset the chip, bring the ports back up.
2771 */
2772static void fatal_error_task(struct work_struct *work)
2773{
2774 struct adapter *adapter = container_of(work, struct adapter,
2775 fatal_error_handler_task);
2776 int err = 0;
2777
2778 rtnl_lock();
2779 err = t3_adapter_error(adapter, 1);
2780 if (!err)
2781 err = t3_reenable_adapter(adapter);
2782 if (!err)
2783 t3_resume_ports(adapter);
2784
2785 CH_ALERT(adapter, "adapter reset %s\n", err ? "failed" : "succeeded");
2786 rtnl_unlock();
2787}
2788
4d22de3e
DLR
2789void t3_fatal_err(struct adapter *adapter)
2790{
2791 unsigned int fw_status[4];
2792
2793 if (adapter->flags & FULL_INIT_DONE) {
2794 t3_sge_stop(adapter);
c64c2eae
DLR
2795 t3_write_reg(adapter, A_XGM_TX_CTRL, 0);
2796 t3_write_reg(adapter, A_XGM_RX_CTRL, 0);
2797 t3_write_reg(adapter, XGM_REG(A_XGM_TX_CTRL, 1), 0);
2798 t3_write_reg(adapter, XGM_REG(A_XGM_RX_CTRL, 1), 0);
20d3fc11
DLR
2799
2800 spin_lock(&adapter->work_lock);
4d22de3e 2801 t3_intr_disable(adapter);
20d3fc11
DLR
2802 queue_work(cxgb3_wq, &adapter->fatal_error_handler_task);
2803 spin_unlock(&adapter->work_lock);
4d22de3e
DLR
2804 }
2805 CH_ALERT(adapter, "encountered fatal error, operation suspended\n");
2806 if (!t3_cim_ctl_blk_read(adapter, 0xa0, 4, fw_status))
2807 CH_ALERT(adapter, "FW status: 0x%x, 0x%x, 0x%x, 0x%x\n",
2808 fw_status[0], fw_status[1],
2809 fw_status[2], fw_status[3]);
4d22de3e
DLR
2810}
2811
91a6b50c
DLR
2812/**
2813 * t3_io_error_detected - called when PCI error is detected
2814 * @pdev: Pointer to PCI device
2815 * @state: The current pci connection state
2816 *
2817 * This function is called after a PCI bus error affecting
2818 * this device has been detected.
2819 */
2820static pci_ers_result_t t3_io_error_detected(struct pci_dev *pdev,
2821 pci_channel_state_t state)
2822{
bc4b6b52 2823 struct adapter *adapter = pci_get_drvdata(pdev);
20d3fc11 2824 int ret;
91a6b50c 2825
20d3fc11 2826 ret = t3_adapter_error(adapter, 0);
91a6b50c 2827
48c4b6db 2828 /* Request a slot reset. */
91a6b50c
DLR
2829 return PCI_ERS_RESULT_NEED_RESET;
2830}
2831
2832/**
2833 * t3_io_slot_reset - called after the pci bus has been reset.
2834 * @pdev: Pointer to PCI device
2835 *
2836 * Restart the card from scratch, as if from a cold-boot.
2837 */
2838static pci_ers_result_t t3_io_slot_reset(struct pci_dev *pdev)
2839{
bc4b6b52 2840 struct adapter *adapter = pci_get_drvdata(pdev);
91a6b50c 2841
20d3fc11
DLR
2842 if (!t3_reenable_adapter(adapter))
2843 return PCI_ERS_RESULT_RECOVERED;
91a6b50c 2844
48c4b6db 2845 return PCI_ERS_RESULT_DISCONNECT;
91a6b50c
DLR
2846}
2847
2848/**
2849 * t3_io_resume - called when traffic can start flowing again.
2850 * @pdev: Pointer to PCI device
2851 *
2852 * This callback is called when the error recovery driver tells us that
2853 * its OK to resume normal operation.
2854 */
2855static void t3_io_resume(struct pci_dev *pdev)
2856{
bc4b6b52 2857 struct adapter *adapter = pci_get_drvdata(pdev);
91a6b50c 2858
68f40c10
DLR
2859 CH_ALERT(adapter, "adapter recovering, PEX ERR 0x%x\n",
2860 t3_read_reg(adapter, A_PCIE_PEX_ERR));
2861
20d3fc11 2862 t3_resume_ports(adapter);
91a6b50c
DLR
2863}
2864
2865static struct pci_error_handlers t3_err_handler = {
2866 .error_detected = t3_io_error_detected,
2867 .slot_reset = t3_io_slot_reset,
2868 .resume = t3_io_resume,
2869};
2870
8c263761
DLR
2871/*
2872 * Set the number of qsets based on the number of CPUs and the number of ports,
2873 * not to exceed the number of available qsets, assuming there are enough qsets
2874 * per port in HW.
2875 */
2876static void set_nqsets(struct adapter *adap)
2877{
2878 int i, j = 0;
2879 int num_cpus = num_online_cpus();
2880 int hwports = adap->params.nports;
5cda9364 2881 int nqsets = adap->msix_nvectors - 1;
8c263761 2882
f9ee3882 2883 if (adap->params.rev > 0 && adap->flags & USING_MSIX) {
8c263761
DLR
2884 if (hwports == 2 &&
2885 (hwports * nqsets > SGE_QSETS ||
2886 num_cpus >= nqsets / hwports))
2887 nqsets /= hwports;
2888 if (nqsets > num_cpus)
2889 nqsets = num_cpus;
2890 if (nqsets < 1 || hwports == 4)
2891 nqsets = 1;
2892 } else
2893 nqsets = 1;
2894
2895 for_each_port(adap, i) {
2896 struct port_info *pi = adap2pinfo(adap, i);
2897
2898 pi->first_qset = j;
2899 pi->nqsets = nqsets;
2900 j = pi->first_qset + nqsets;
2901
2902 dev_info(&adap->pdev->dev,
2903 "Port %d using %d queue sets.\n", i, nqsets);
2904 }
2905}
2906
4d22de3e
DLR
2907static int __devinit cxgb_enable_msix(struct adapter *adap)
2908{
2909 struct msix_entry entries[SGE_QSETS + 1];
5cda9364 2910 int vectors;
4d22de3e
DLR
2911 int i, err;
2912
5cda9364
DLR
2913 vectors = ARRAY_SIZE(entries);
2914 for (i = 0; i < vectors; ++i)
4d22de3e
DLR
2915 entries[i].entry = i;
2916
5cda9364
DLR
2917 while ((err = pci_enable_msix(adap->pdev, entries, vectors)) > 0)
2918 vectors = err;
2919
2c2f409f
DLR
2920 if (err < 0)
2921 pci_disable_msix(adap->pdev);
2922
2923 if (!err && vectors < (adap->params.nports + 1)) {
2924 pci_disable_msix(adap->pdev);
5cda9364 2925 err = -1;
2c2f409f 2926 }
5cda9364 2927
4d22de3e 2928 if (!err) {
5cda9364 2929 for (i = 0; i < vectors; ++i)
4d22de3e 2930 adap->msix_info[i].vec = entries[i].vector;
5cda9364
DLR
2931 adap->msix_nvectors = vectors;
2932 }
2933
4d22de3e
DLR
2934 return err;
2935}
2936
2937static void __devinit print_port_info(struct adapter *adap,
2938 const struct adapter_info *ai)
2939{
2940 static const char *pci_variant[] = {
2941 "PCI", "PCI-X", "PCI-X ECC", "PCI-X 266", "PCI Express"
2942 };
2943
2944 int i;
2945 char buf[80];
2946
2947 if (is_pcie(adap))
2948 snprintf(buf, sizeof(buf), "%s x%d",
2949 pci_variant[adap->params.pci.variant],
2950 adap->params.pci.width);
2951 else
2952 snprintf(buf, sizeof(buf), "%s %dMHz/%d-bit",
2953 pci_variant[adap->params.pci.variant],
2954 adap->params.pci.speed, adap->params.pci.width);
2955
2956 for_each_port(adap, i) {
2957 struct net_device *dev = adap->port[i];
2958 const struct port_info *pi = netdev_priv(dev);
2959
2960 if (!test_bit(i, &adap->registered_device_map))
2961 continue;
8ac3ba68 2962 printk(KERN_INFO "%s: %s %s %sNIC (rev %d) %s%s\n",
04497982 2963 dev->name, ai->desc, pi->phy.desc,
8ac3ba68 2964 is_offload(adap) ? "R" : "", adap->params.rev, buf,
4d22de3e
DLR
2965 (adap->flags & USING_MSIX) ? " MSI-X" :
2966 (adap->flags & USING_MSI) ? " MSI" : "");
2967 if (adap->name == dev->name && adap->params.vpd.mclk)
167cdf5f
DLR
2968 printk(KERN_INFO
2969 "%s: %uMB CM, %uMB PMTX, %uMB PMRX, S/N: %s\n",
4d22de3e
DLR
2970 adap->name, t3_mc7_size(&adap->cm) >> 20,
2971 t3_mc7_size(&adap->pmtx) >> 20,
167cdf5f
DLR
2972 t3_mc7_size(&adap->pmrx) >> 20,
2973 adap->params.vpd.sn);
4d22de3e
DLR
2974 }
2975}
2976
dd752696
SH
2977static const struct net_device_ops cxgb_netdev_ops = {
2978 .ndo_open = cxgb_open,
2979 .ndo_stop = cxgb_close,
43a944f3 2980 .ndo_start_xmit = t3_eth_xmit,
dd752696
SH
2981 .ndo_get_stats = cxgb_get_stats,
2982 .ndo_validate_addr = eth_validate_addr,
2983 .ndo_set_multicast_list = cxgb_set_rxmode,
2984 .ndo_do_ioctl = cxgb_ioctl,
2985 .ndo_change_mtu = cxgb_change_mtu,
2986 .ndo_set_mac_address = cxgb_set_mac_addr,
2987 .ndo_vlan_rx_register = vlan_rx_register,
2988#ifdef CONFIG_NET_POLL_CONTROLLER
2989 .ndo_poll_controller = cxgb_netpoll,
2990#endif
2991};
2992
4d22de3e
DLR
2993static int __devinit init_one(struct pci_dev *pdev,
2994 const struct pci_device_id *ent)
2995{
2996 static int version_printed;
2997
2998 int i, err, pci_using_dac = 0;
68f40c10 2999 resource_size_t mmio_start, mmio_len;
4d22de3e
DLR
3000 const struct adapter_info *ai;
3001 struct adapter *adapter = NULL;
3002 struct port_info *pi;
3003
3004 if (!version_printed) {
3005 printk(KERN_INFO "%s - version %s\n", DRV_DESC, DRV_VERSION);
3006 ++version_printed;
3007 }
3008
3009 if (!cxgb3_wq) {
3010 cxgb3_wq = create_singlethread_workqueue(DRV_NAME);
3011 if (!cxgb3_wq) {
3012 printk(KERN_ERR DRV_NAME
3013 ": cannot initialize work queue\n");
3014 return -ENOMEM;
3015 }
3016 }
3017
3018 err = pci_request_regions(pdev, DRV_NAME);
3019 if (err) {
3020 /* Just info, some other driver may have claimed the device. */
3021 dev_info(&pdev->dev, "cannot obtain PCI resources\n");
3022 return err;
3023 }
3024
3025 err = pci_enable_device(pdev);
3026 if (err) {
3027 dev_err(&pdev->dev, "cannot enable PCI device\n");
3028 goto out_release_regions;
3029 }
3030
6a35528a 3031 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
4d22de3e 3032 pci_using_dac = 1;
6a35528a 3033 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
4d22de3e
DLR
3034 if (err) {
3035 dev_err(&pdev->dev, "unable to obtain 64-bit DMA for "
3036 "coherent allocations\n");
3037 goto out_disable_device;
3038 }
284901a9 3039 } else if ((err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) != 0) {
4d22de3e
DLR
3040 dev_err(&pdev->dev, "no usable DMA configuration\n");
3041 goto out_disable_device;
3042 }
3043
3044 pci_set_master(pdev);
204e2f98 3045 pci_save_state(pdev);
4d22de3e
DLR
3046
3047 mmio_start = pci_resource_start(pdev, 0);
3048 mmio_len = pci_resource_len(pdev, 0);
3049 ai = t3_get_adapter_info(ent->driver_data);
3050
3051 adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
3052 if (!adapter) {
3053 err = -ENOMEM;
3054 goto out_disable_device;
3055 }
3056
3057 adapter->regs = ioremap_nocache(mmio_start, mmio_len);
3058 if (!adapter->regs) {
3059 dev_err(&pdev->dev, "cannot map device registers\n");
3060 err = -ENOMEM;
3061 goto out_free_adapter;
3062 }
3063
3064 adapter->pdev = pdev;
3065 adapter->name = pci_name(pdev);
3066 adapter->msg_enable = dflt_msg_enable;
3067 adapter->mmio_len = mmio_len;
3068
3069 mutex_init(&adapter->mdio_lock);
3070 spin_lock_init(&adapter->work_lock);
3071 spin_lock_init(&adapter->stats_lock);
3072
3073 INIT_LIST_HEAD(&adapter->adapter_list);
3074 INIT_WORK(&adapter->ext_intr_handler_task, ext_intr_task);
20d3fc11 3075 INIT_WORK(&adapter->fatal_error_handler_task, fatal_error_task);
4d22de3e
DLR
3076 INIT_DELAYED_WORK(&adapter->adap_check_task, t3_adap_check_task);
3077
952cdf33 3078 for (i = 0; i < ai->nports0 + ai->nports1; ++i) {
4d22de3e
DLR
3079 struct net_device *netdev;
3080
82ad3329 3081 netdev = alloc_etherdev_mq(sizeof(struct port_info), SGE_QSETS);
4d22de3e
DLR
3082 if (!netdev) {
3083 err = -ENOMEM;
3084 goto out_free_dev;
3085 }
3086
4d22de3e
DLR
3087 SET_NETDEV_DEV(netdev, &pdev->dev);
3088
3089 adapter->port[i] = netdev;
3090 pi = netdev_priv(netdev);
5fbf816f 3091 pi->adapter = adapter;
47fd23fe 3092 pi->rx_offload = T3_RX_CSUM | T3_LRO;
4d22de3e
DLR
3093 pi->port_id = i;
3094 netif_carrier_off(netdev);
82ad3329 3095 netif_tx_stop_all_queues(netdev);
4d22de3e
DLR
3096 netdev->irq = pdev->irq;
3097 netdev->mem_start = mmio_start;
3098 netdev->mem_end = mmio_start + mmio_len - 1;
4d22de3e
DLR
3099 netdev->features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO;
3100 netdev->features |= NETIF_F_LLTX;
7be2df45 3101 netdev->features |= NETIF_F_GRO;
4d22de3e
DLR
3102 if (pci_using_dac)
3103 netdev->features |= NETIF_F_HIGHDMA;
3104
3105 netdev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
dd752696 3106 netdev->netdev_ops = &cxgb_netdev_ops;
4d22de3e
DLR
3107 SET_ETHTOOL_OPS(netdev, &cxgb_ethtool_ops);
3108 }
3109
5fbf816f 3110 pci_set_drvdata(pdev, adapter);
4d22de3e
DLR
3111 if (t3_prep_adapter(adapter, ai, 1) < 0) {
3112 err = -ENODEV;
3113 goto out_free_dev;
3114 }
2eab17ab 3115
4d22de3e
DLR
3116 /*
3117 * The card is now ready to go. If any errors occur during device
3118 * registration we do not fail the whole card but rather proceed only
3119 * with the ports we manage to register successfully. However we must
3120 * register at least one net device.
3121 */
3122 for_each_port(adapter, i) {
3123 err = register_netdev(adapter->port[i]);
3124 if (err)
3125 dev_warn(&pdev->dev,
3126 "cannot register net device %s, skipping\n",
3127 adapter->port[i]->name);
3128 else {
3129 /*
3130 * Change the name we use for messages to the name of
3131 * the first successfully registered interface.
3132 */
3133 if (!adapter->registered_device_map)
3134 adapter->name = adapter->port[i]->name;
3135
3136 __set_bit(i, &adapter->registered_device_map);
3137 }
3138 }
3139 if (!adapter->registered_device_map) {
3140 dev_err(&pdev->dev, "could not register any net devices\n");
3141 goto out_free_dev;
3142 }
3143
3144 /* Driver's ready. Reflect it on LEDs */
3145 t3_led_ready(adapter);
3146
3147 if (is_offload(adapter)) {
3148 __set_bit(OFFLOAD_DEVMAP_BIT, &adapter->registered_device_map);
3149 cxgb3_adapter_ofld(adapter);
3150 }
3151
3152 /* See what interrupts we'll be using */
3153 if (msi > 1 && cxgb_enable_msix(adapter) == 0)
3154 adapter->flags |= USING_MSIX;
3155 else if (msi > 0 && pci_enable_msi(pdev) == 0)
3156 adapter->flags |= USING_MSI;
3157
8c263761
DLR
3158 set_nqsets(adapter);
3159
0ee8d33c 3160 err = sysfs_create_group(&adapter->port[0]->dev.kobj,
4d22de3e
DLR
3161 &cxgb3_attr_group);
3162
3163 print_port_info(adapter, ai);
3164 return 0;
3165
3166out_free_dev:
3167 iounmap(adapter->regs);
952cdf33 3168 for (i = ai->nports0 + ai->nports1 - 1; i >= 0; --i)
4d22de3e
DLR
3169 if (adapter->port[i])
3170 free_netdev(adapter->port[i]);
3171
3172out_free_adapter:
3173 kfree(adapter);
3174
3175out_disable_device:
3176 pci_disable_device(pdev);
3177out_release_regions:
3178 pci_release_regions(pdev);
3179 pci_set_drvdata(pdev, NULL);
3180 return err;
3181}
3182
3183static void __devexit remove_one(struct pci_dev *pdev)
3184{
5fbf816f 3185 struct adapter *adapter = pci_get_drvdata(pdev);
4d22de3e 3186
5fbf816f 3187 if (adapter) {
4d22de3e 3188 int i;
4d22de3e
DLR
3189
3190 t3_sge_stop(adapter);
0ee8d33c 3191 sysfs_remove_group(&adapter->port[0]->dev.kobj,
4d22de3e
DLR
3192 &cxgb3_attr_group);
3193
4d22de3e
DLR
3194 if (is_offload(adapter)) {
3195 cxgb3_adapter_unofld(adapter);
3196 if (test_bit(OFFLOAD_DEVMAP_BIT,
3197 &adapter->open_device_map))
3198 offload_close(&adapter->tdev);
3199 }
3200
67d92ab7
DLR
3201 for_each_port(adapter, i)
3202 if (test_bit(i, &adapter->registered_device_map))
3203 unregister_netdev(adapter->port[i]);
3204
0ca41c04 3205 t3_stop_sge_timers(adapter);
4d22de3e
DLR
3206 t3_free_sge_resources(adapter);
3207 cxgb_disable_msi(adapter);
3208
4d22de3e
DLR
3209 for_each_port(adapter, i)
3210 if (adapter->port[i])
3211 free_netdev(adapter->port[i]);
3212
3213 iounmap(adapter->regs);
3214 kfree(adapter);
3215 pci_release_regions(pdev);
3216 pci_disable_device(pdev);
3217 pci_set_drvdata(pdev, NULL);
3218 }
3219}
3220
3221static struct pci_driver driver = {
3222 .name = DRV_NAME,
3223 .id_table = cxgb3_pci_tbl,
3224 .probe = init_one,
3225 .remove = __devexit_p(remove_one),
91a6b50c 3226 .err_handler = &t3_err_handler,
4d22de3e
DLR
3227};
3228
3229static int __init cxgb3_init_module(void)
3230{
3231 int ret;
3232
3233 cxgb3_offload_init();
3234
3235 ret = pci_register_driver(&driver);
3236 return ret;
3237}
3238
3239static void __exit cxgb3_cleanup_module(void)
3240{
3241 pci_unregister_driver(&driver);
3242 if (cxgb3_wq)
3243 destroy_workqueue(cxgb3_wq);
3244}
3245
3246module_init(cxgb3_init_module);
3247module_exit(cxgb3_cleanup_module);