dma-mapping: replace all DMA_39BIT_MASK macro with DMA_BIT_MASK(39)
[linux-2.6-block.git] / drivers / net / cxgb3 / cxgb3_main.c
CommitLineData
4d22de3e 1/*
a02d44a0 2 * Copyright (c) 2003-2008 Chelsio, Inc. All rights reserved.
4d22de3e 3 *
1d68e93d
DLR
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
4d22de3e 9 *
1d68e93d
DLR
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
4d22de3e 31 */
4d22de3e
DLR
32#include <linux/module.h>
33#include <linux/moduleparam.h>
34#include <linux/init.h>
35#include <linux/pci.h>
36#include <linux/dma-mapping.h>
37#include <linux/netdevice.h>
38#include <linux/etherdevice.h>
39#include <linux/if_vlan.h>
40#include <linux/mii.h>
41#include <linux/sockios.h>
42#include <linux/workqueue.h>
43#include <linux/proc_fs.h>
44#include <linux/rtnetlink.h>
2e283962 45#include <linux/firmware.h>
d9da466a 46#include <linux/log2.h>
4d22de3e
DLR
47#include <asm/uaccess.h>
48
49#include "common.h"
50#include "cxgb3_ioctl.h"
51#include "regs.h"
52#include "cxgb3_offload.h"
53#include "version.h"
54
55#include "cxgb3_ctl_defs.h"
56#include "t3_cpl.h"
57#include "firmware_exports.h"
58
59enum {
60 MAX_TXQ_ENTRIES = 16384,
61 MAX_CTRL_TXQ_ENTRIES = 1024,
62 MAX_RSPQ_ENTRIES = 16384,
63 MAX_RX_BUFFERS = 16384,
64 MAX_RX_JUMBO_BUFFERS = 16384,
65 MIN_TXQ_ENTRIES = 4,
66 MIN_CTRL_TXQ_ENTRIES = 4,
67 MIN_RSPQ_ENTRIES = 32,
68 MIN_FL_ENTRIES = 32
69};
70
71#define PORT_MASK ((1 << MAX_NPORTS) - 1)
72
73#define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
74 NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
75 NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
76
77#define EEPROM_MAGIC 0x38E2F10C
78
678771d6
DLR
79#define CH_DEVICE(devid, idx) \
80 { PCI_VENDOR_ID_CHELSIO, devid, PCI_ANY_ID, PCI_ANY_ID, 0, 0, idx }
4d22de3e
DLR
81
82static const struct pci_device_id cxgb3_pci_tbl[] = {
678771d6
DLR
83 CH_DEVICE(0x20, 0), /* PE9000 */
84 CH_DEVICE(0x21, 1), /* T302E */
85 CH_DEVICE(0x22, 2), /* T310E */
86 CH_DEVICE(0x23, 3), /* T320X */
87 CH_DEVICE(0x24, 1), /* T302X */
88 CH_DEVICE(0x25, 3), /* T320E */
89 CH_DEVICE(0x26, 2), /* T310X */
90 CH_DEVICE(0x30, 2), /* T3B10 */
91 CH_DEVICE(0x31, 3), /* T3B20 */
92 CH_DEVICE(0x32, 1), /* T3B02 */
ce03aadd 93 CH_DEVICE(0x35, 6), /* T3C20-derived T3C10 */
4d22de3e
DLR
94 {0,}
95};
96
97MODULE_DESCRIPTION(DRV_DESC);
98MODULE_AUTHOR("Chelsio Communications");
1d68e93d 99MODULE_LICENSE("Dual BSD/GPL");
4d22de3e
DLR
100MODULE_VERSION(DRV_VERSION);
101MODULE_DEVICE_TABLE(pci, cxgb3_pci_tbl);
102
103static int dflt_msg_enable = DFLT_MSG_ENABLE;
104
105module_param(dflt_msg_enable, int, 0644);
106MODULE_PARM_DESC(dflt_msg_enable, "Chelsio T3 default message enable bitmap");
107
108/*
109 * The driver uses the best interrupt scheme available on a platform in the
110 * order MSI-X, MSI, legacy pin interrupts. This parameter determines which
111 * of these schemes the driver may consider as follows:
112 *
113 * msi = 2: choose from among all three options
114 * msi = 1: only consider MSI and pin interrupts
115 * msi = 0: force pin interrupts
116 */
117static int msi = 2;
118
119module_param(msi, int, 0644);
120MODULE_PARM_DESC(msi, "whether to use MSI or MSI-X");
121
122/*
123 * The driver enables offload as a default.
124 * To disable it, use ofld_disable = 1.
125 */
126
127static int ofld_disable = 0;
128
129module_param(ofld_disable, int, 0644);
130MODULE_PARM_DESC(ofld_disable, "whether to enable offload at init time or not");
131
132/*
133 * We have work elements that we need to cancel when an interface is taken
134 * down. Normally the work elements would be executed by keventd but that
135 * can deadlock because of linkwatch. If our close method takes the rtnl
136 * lock and linkwatch is ahead of our work elements in keventd, linkwatch
137 * will block keventd as it needs the rtnl lock, and we'll deadlock waiting
138 * for our work to complete. Get our own work queue to solve this.
139 */
140static struct workqueue_struct *cxgb3_wq;
141
142/**
143 * link_report - show link status and link speed/duplex
144 * @p: the port whose settings are to be reported
145 *
146 * Shows the link status, speed, and duplex of a port.
147 */
148static void link_report(struct net_device *dev)
149{
150 if (!netif_carrier_ok(dev))
151 printk(KERN_INFO "%s: link down\n", dev->name);
152 else {
153 const char *s = "10Mbps";
154 const struct port_info *p = netdev_priv(dev);
155
156 switch (p->link_config.speed) {
157 case SPEED_10000:
158 s = "10Gbps";
159 break;
160 case SPEED_1000:
161 s = "1000Mbps";
162 break;
163 case SPEED_100:
164 s = "100Mbps";
165 break;
166 }
167
168 printk(KERN_INFO "%s: link up, %s, %s-duplex\n", dev->name, s,
169 p->link_config.duplex == DUPLEX_FULL ? "full" : "half");
170 }
171}
172
bf792094
DLR
173void t3_os_link_fault(struct adapter *adap, int port_id, int state)
174{
175 struct net_device *dev = adap->port[port_id];
176 struct port_info *pi = netdev_priv(dev);
177
178 if (state == netif_carrier_ok(dev))
179 return;
180
181 if (state) {
182 struct cmac *mac = &pi->mac;
183
184 netif_carrier_on(dev);
185
186 /* Clear local faults */
187 t3_xgm_intr_disable(adap, pi->port_id);
188 t3_read_reg(adap, A_XGM_INT_STATUS +
189 pi->mac.offset);
190 t3_write_reg(adap,
191 A_XGM_INT_CAUSE + pi->mac.offset,
192 F_XGM_INT);
193
194 t3_set_reg_field(adap,
195 A_XGM_INT_ENABLE +
196 pi->mac.offset,
197 F_XGM_INT, F_XGM_INT);
198 t3_xgm_intr_enable(adap, pi->port_id);
199
200 t3_mac_enable(mac, MAC_DIRECTION_TX);
201 } else
202 netif_carrier_off(dev);
203
204 link_report(dev);
205}
206
4d22de3e
DLR
207/**
208 * t3_os_link_changed - handle link status changes
209 * @adapter: the adapter associated with the link change
210 * @port_id: the port index whose limk status has changed
211 * @link_stat: the new status of the link
212 * @speed: the new speed setting
213 * @duplex: the new duplex setting
214 * @pause: the new flow-control setting
215 *
216 * This is the OS-dependent handler for link status changes. The OS
217 * neutral handler takes care of most of the processing for these events,
218 * then calls this handler for any OS-specific processing.
219 */
220void t3_os_link_changed(struct adapter *adapter, int port_id, int link_stat,
221 int speed, int duplex, int pause)
222{
223 struct net_device *dev = adapter->port[port_id];
6d6dabac
DLR
224 struct port_info *pi = netdev_priv(dev);
225 struct cmac *mac = &pi->mac;
4d22de3e
DLR
226
227 /* Skip changes from disabled ports. */
228 if (!netif_running(dev))
229 return;
230
231 if (link_stat != netif_carrier_ok(dev)) {
6d6dabac 232 if (link_stat) {
59cf8107 233 t3_mac_enable(mac, MAC_DIRECTION_RX);
bf792094
DLR
234
235 /* Clear local faults */
236 t3_xgm_intr_disable(adapter, pi->port_id);
237 t3_read_reg(adapter, A_XGM_INT_STATUS +
238 pi->mac.offset);
239 t3_write_reg(adapter,
240 A_XGM_INT_CAUSE + pi->mac.offset,
241 F_XGM_INT);
242
243 t3_set_reg_field(adapter,
244 A_XGM_INT_ENABLE + pi->mac.offset,
245 F_XGM_INT, F_XGM_INT);
246 t3_xgm_intr_enable(adapter, pi->port_id);
247
4d22de3e 248 netif_carrier_on(dev);
6d6dabac 249 } else {
4d22de3e 250 netif_carrier_off(dev);
bf792094
DLR
251
252 t3_xgm_intr_disable(adapter, pi->port_id);
253 t3_read_reg(adapter, A_XGM_INT_STATUS + pi->mac.offset);
254 t3_set_reg_field(adapter,
255 A_XGM_INT_ENABLE + pi->mac.offset,
256 F_XGM_INT, 0);
257
258 if (is_10G(adapter))
259 pi->phy.ops->power_down(&pi->phy, 1);
260
261 t3_read_reg(adapter, A_XGM_INT_STATUS + pi->mac.offset);
59cf8107
DLR
262 t3_mac_disable(mac, MAC_DIRECTION_RX);
263 t3_link_start(&pi->phy, mac, &pi->link_config);
6d6dabac
DLR
264 }
265
4d22de3e
DLR
266 link_report(dev);
267 }
268}
269
1e882025
DLR
270/**
271 * t3_os_phymod_changed - handle PHY module changes
272 * @phy: the PHY reporting the module change
273 * @mod_type: new module type
274 *
275 * This is the OS-dependent handler for PHY module changes. It is
276 * invoked when a PHY module is removed or inserted for any OS-specific
277 * processing.
278 */
279void t3_os_phymod_changed(struct adapter *adap, int port_id)
280{
281 static const char *mod_str[] = {
282 NULL, "SR", "LR", "LRM", "TWINAX", "TWINAX", "unknown"
283 };
284
285 const struct net_device *dev = adap->port[port_id];
286 const struct port_info *pi = netdev_priv(dev);
287
288 if (pi->phy.modtype == phy_modtype_none)
289 printk(KERN_INFO "%s: PHY module unplugged\n", dev->name);
290 else
291 printk(KERN_INFO "%s: %s PHY module inserted\n", dev->name,
292 mod_str[pi->phy.modtype]);
293}
294
4d22de3e
DLR
295static void cxgb_set_rxmode(struct net_device *dev)
296{
297 struct t3_rx_mode rm;
298 struct port_info *pi = netdev_priv(dev);
299
300 init_rx_mode(&rm, dev, dev->mc_list);
301 t3_mac_set_rx_mode(&pi->mac, &rm);
302}
303
304/**
305 * link_start - enable a port
306 * @dev: the device to enable
307 *
308 * Performs the MAC and PHY actions needed to enable a port.
309 */
310static void link_start(struct net_device *dev)
311{
312 struct t3_rx_mode rm;
313 struct port_info *pi = netdev_priv(dev);
314 struct cmac *mac = &pi->mac;
315
316 init_rx_mode(&rm, dev, dev->mc_list);
317 t3_mac_reset(mac);
318 t3_mac_set_mtu(mac, dev->mtu);
319 t3_mac_set_address(mac, 0, dev->dev_addr);
320 t3_mac_set_rx_mode(mac, &rm);
321 t3_link_start(&pi->phy, mac, &pi->link_config);
322 t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
323}
324
325static inline void cxgb_disable_msi(struct adapter *adapter)
326{
327 if (adapter->flags & USING_MSIX) {
328 pci_disable_msix(adapter->pdev);
329 adapter->flags &= ~USING_MSIX;
330 } else if (adapter->flags & USING_MSI) {
331 pci_disable_msi(adapter->pdev);
332 adapter->flags &= ~USING_MSI;
333 }
334}
335
336/*
337 * Interrupt handler for asynchronous events used with MSI-X.
338 */
339static irqreturn_t t3_async_intr_handler(int irq, void *cookie)
340{
341 t3_slow_intr_handler(cookie);
342 return IRQ_HANDLED;
343}
344
345/*
346 * Name the MSI-X interrupts.
347 */
348static void name_msix_vecs(struct adapter *adap)
349{
350 int i, j, msi_idx = 1, n = sizeof(adap->msix_info[0].desc) - 1;
351
352 snprintf(adap->msix_info[0].desc, n, "%s", adap->name);
353 adap->msix_info[0].desc[n] = 0;
354
355 for_each_port(adap, j) {
356 struct net_device *d = adap->port[j];
357 const struct port_info *pi = netdev_priv(d);
358
359 for (i = 0; i < pi->nqsets; i++, msi_idx++) {
360 snprintf(adap->msix_info[msi_idx].desc, n,
8c263761 361 "%s-%d", d->name, pi->first_qset + i);
4d22de3e
DLR
362 adap->msix_info[msi_idx].desc[n] = 0;
363 }
8c263761 364 }
4d22de3e
DLR
365}
366
367static int request_msix_data_irqs(struct adapter *adap)
368{
369 int i, j, err, qidx = 0;
370
371 for_each_port(adap, i) {
372 int nqsets = adap2pinfo(adap, i)->nqsets;
373
374 for (j = 0; j < nqsets; ++j) {
375 err = request_irq(adap->msix_info[qidx + 1].vec,
376 t3_intr_handler(adap,
377 adap->sge.qs[qidx].
378 rspq.polling), 0,
379 adap->msix_info[qidx + 1].desc,
380 &adap->sge.qs[qidx]);
381 if (err) {
382 while (--qidx >= 0)
383 free_irq(adap->msix_info[qidx + 1].vec,
384 &adap->sge.qs[qidx]);
385 return err;
386 }
387 qidx++;
388 }
389 }
390 return 0;
391}
392
8c263761
DLR
393static void free_irq_resources(struct adapter *adapter)
394{
395 if (adapter->flags & USING_MSIX) {
396 int i, n = 0;
397
398 free_irq(adapter->msix_info[0].vec, adapter);
399 for_each_port(adapter, i)
5cda9364 400 n += adap2pinfo(adapter, i)->nqsets;
8c263761
DLR
401
402 for (i = 0; i < n; ++i)
403 free_irq(adapter->msix_info[i + 1].vec,
404 &adapter->sge.qs[i]);
405 } else
406 free_irq(adapter->pdev->irq, adapter);
407}
408
b881955b
DLR
409static int await_mgmt_replies(struct adapter *adap, unsigned long init_cnt,
410 unsigned long n)
411{
412 int attempts = 5;
413
414 while (adap->sge.qs[0].rspq.offload_pkts < init_cnt + n) {
415 if (!--attempts)
416 return -ETIMEDOUT;
417 msleep(10);
418 }
419 return 0;
420}
421
422static int init_tp_parity(struct adapter *adap)
423{
424 int i;
425 struct sk_buff *skb;
426 struct cpl_set_tcb_field *greq;
427 unsigned long cnt = adap->sge.qs[0].rspq.offload_pkts;
428
429 t3_tp_set_offload_mode(adap, 1);
430
431 for (i = 0; i < 16; i++) {
432 struct cpl_smt_write_req *req;
433
434 skb = alloc_skb(sizeof(*req), GFP_KERNEL | __GFP_NOFAIL);
435 req = (struct cpl_smt_write_req *)__skb_put(skb, sizeof(*req));
436 memset(req, 0, sizeof(*req));
437 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
438 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SMT_WRITE_REQ, i));
439 req->iff = i;
440 t3_mgmt_tx(adap, skb);
441 }
442
443 for (i = 0; i < 2048; i++) {
444 struct cpl_l2t_write_req *req;
445
446 skb = alloc_skb(sizeof(*req), GFP_KERNEL | __GFP_NOFAIL);
447 req = (struct cpl_l2t_write_req *)__skb_put(skb, sizeof(*req));
448 memset(req, 0, sizeof(*req));
449 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
450 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_L2T_WRITE_REQ, i));
451 req->params = htonl(V_L2T_W_IDX(i));
452 t3_mgmt_tx(adap, skb);
453 }
454
455 for (i = 0; i < 2048; i++) {
456 struct cpl_rte_write_req *req;
457
458 skb = alloc_skb(sizeof(*req), GFP_KERNEL | __GFP_NOFAIL);
459 req = (struct cpl_rte_write_req *)__skb_put(skb, sizeof(*req));
460 memset(req, 0, sizeof(*req));
461 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
462 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_RTE_WRITE_REQ, i));
463 req->l2t_idx = htonl(V_L2T_W_IDX(i));
464 t3_mgmt_tx(adap, skb);
465 }
466
467 skb = alloc_skb(sizeof(*greq), GFP_KERNEL | __GFP_NOFAIL);
468 greq = (struct cpl_set_tcb_field *)__skb_put(skb, sizeof(*greq));
469 memset(greq, 0, sizeof(*greq));
470 greq->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
471 OPCODE_TID(greq) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, 0));
472 greq->mask = cpu_to_be64(1);
473 t3_mgmt_tx(adap, skb);
474
475 i = await_mgmt_replies(adap, cnt, 16 + 2048 + 2048 + 1);
476 t3_tp_set_offload_mode(adap, 0);
477 return i;
478}
479
4d22de3e
DLR
480/**
481 * setup_rss - configure RSS
482 * @adap: the adapter
483 *
484 * Sets up RSS to distribute packets to multiple receive queues. We
485 * configure the RSS CPU lookup table to distribute to the number of HW
486 * receive queues, and the response queue lookup table to narrow that
487 * down to the response queues actually configured for each port.
488 * We always configure the RSS mapping for two ports since the mapping
489 * table has plenty of entries.
490 */
491static void setup_rss(struct adapter *adap)
492{
493 int i;
494 unsigned int nq0 = adap2pinfo(adap, 0)->nqsets;
495 unsigned int nq1 = adap->port[1] ? adap2pinfo(adap, 1)->nqsets : 1;
496 u8 cpus[SGE_QSETS + 1];
497 u16 rspq_map[RSS_TABLE_SIZE];
498
499 for (i = 0; i < SGE_QSETS; ++i)
500 cpus[i] = i;
501 cpus[SGE_QSETS] = 0xff; /* terminator */
502
503 for (i = 0; i < RSS_TABLE_SIZE / 2; ++i) {
504 rspq_map[i] = i % nq0;
505 rspq_map[i + RSS_TABLE_SIZE / 2] = (i % nq1) + nq0;
506 }
507
508 t3_config_rss(adap, F_RQFEEDBACKENABLE | F_TNLLKPEN | F_TNLMAPEN |
509 F_TNLPRTEN | F_TNL2TUPEN | F_TNL4TUPEN |
a2604be5 510 V_RRCPLCPUSIZE(6) | F_HASHTOEPLITZ, cpus, rspq_map);
4d22de3e
DLR
511}
512
bea3348e 513static void init_napi(struct adapter *adap)
4d22de3e 514{
bea3348e 515 int i;
4d22de3e 516
bea3348e
SH
517 for (i = 0; i < SGE_QSETS; i++) {
518 struct sge_qset *qs = &adap->sge.qs[i];
4d22de3e 519
bea3348e
SH
520 if (qs->adap)
521 netif_napi_add(qs->netdev, &qs->napi, qs->napi.poll,
522 64);
4d22de3e 523 }
48c4b6db
DLR
524
525 /*
526 * netif_napi_add() can be called only once per napi_struct because it
527 * adds each new napi_struct to a list. Be careful not to call it a
528 * second time, e.g., during EEH recovery, by making a note of it.
529 */
530 adap->flags |= NAPI_INIT;
4d22de3e
DLR
531}
532
533/*
534 * Wait until all NAPI handlers are descheduled. This includes the handlers of
535 * both netdevices representing interfaces and the dummy ones for the extra
536 * queues.
537 */
538static void quiesce_rx(struct adapter *adap)
539{
540 int i;
4d22de3e 541
bea3348e
SH
542 for (i = 0; i < SGE_QSETS; i++)
543 if (adap->sge.qs[i].adap)
544 napi_disable(&adap->sge.qs[i].napi);
545}
4d22de3e 546
bea3348e
SH
547static void enable_all_napi(struct adapter *adap)
548{
549 int i;
550 for (i = 0; i < SGE_QSETS; i++)
551 if (adap->sge.qs[i].adap)
552 napi_enable(&adap->sge.qs[i].napi);
4d22de3e
DLR
553}
554
04ecb072
DLR
555/**
556 * set_qset_lro - Turn a queue set's LRO capability on and off
557 * @dev: the device the qset is attached to
558 * @qset_idx: the queue set index
559 * @val: the LRO switch
560 *
561 * Sets LRO on or off for a particular queue set.
562 * the device's features flag is updated to reflect the LRO
563 * capability when all queues belonging to the device are
564 * in the same state.
565 */
566static void set_qset_lro(struct net_device *dev, int qset_idx, int val)
567{
568 struct port_info *pi = netdev_priv(dev);
569 struct adapter *adapter = pi->adapter;
04ecb072
DLR
570
571 adapter->params.sge.qset[qset_idx].lro = !!val;
572 adapter->sge.qs[qset_idx].lro_enabled = !!val;
04ecb072
DLR
573}
574
4d22de3e
DLR
575/**
576 * setup_sge_qsets - configure SGE Tx/Rx/response queues
577 * @adap: the adapter
578 *
579 * Determines how many sets of SGE queues to use and initializes them.
580 * We support multiple queue sets per port if we have MSI-X, otherwise
581 * just one queue set per port.
582 */
583static int setup_sge_qsets(struct adapter *adap)
584{
bea3348e 585 int i, j, err, irq_idx = 0, qset_idx = 0;
8ac3ba68 586 unsigned int ntxq = SGE_TXQ_PER_SET;
4d22de3e
DLR
587
588 if (adap->params.rev > 0 && !(adap->flags & USING_MSI))
589 irq_idx = -1;
590
591 for_each_port(adap, i) {
592 struct net_device *dev = adap->port[i];
bea3348e 593 struct port_info *pi = netdev_priv(dev);
4d22de3e 594
bea3348e 595 pi->qs = &adap->sge.qs[pi->first_qset];
8c263761
DLR
596 for (j = pi->first_qset; j < pi->first_qset + pi->nqsets;
597 ++j, ++qset_idx) {
47fd23fe 598 set_qset_lro(dev, qset_idx, pi->rx_offload & T3_LRO);
4d22de3e
DLR
599 err = t3_sge_alloc_qset(adap, qset_idx, 1,
600 (adap->flags & USING_MSIX) ? qset_idx + 1 :
601 irq_idx,
82ad3329
DLR
602 &adap->params.sge.qset[qset_idx], ntxq, dev,
603 netdev_get_tx_queue(dev, j));
4d22de3e
DLR
604 if (err) {
605 t3_free_sge_resources(adap);
606 return err;
607 }
608 }
609 }
610
611 return 0;
612}
613
3e5192ee 614static ssize_t attr_show(struct device *d, char *buf,
896392ef 615 ssize_t(*format) (struct net_device *, char *))
4d22de3e
DLR
616{
617 ssize_t len;
4d22de3e
DLR
618
619 /* Synchronize with ioctls that may shut down the device */
620 rtnl_lock();
896392ef 621 len = (*format) (to_net_dev(d), buf);
4d22de3e
DLR
622 rtnl_unlock();
623 return len;
624}
625
3e5192ee 626static ssize_t attr_store(struct device *d,
0ee8d33c 627 const char *buf, size_t len,
896392ef 628 ssize_t(*set) (struct net_device *, unsigned int),
4d22de3e
DLR
629 unsigned int min_val, unsigned int max_val)
630{
631 char *endp;
632 ssize_t ret;
633 unsigned int val;
4d22de3e
DLR
634
635 if (!capable(CAP_NET_ADMIN))
636 return -EPERM;
637
638 val = simple_strtoul(buf, &endp, 0);
639 if (endp == buf || val < min_val || val > max_val)
640 return -EINVAL;
641
642 rtnl_lock();
896392ef 643 ret = (*set) (to_net_dev(d), val);
4d22de3e
DLR
644 if (!ret)
645 ret = len;
646 rtnl_unlock();
647 return ret;
648}
649
650#define CXGB3_SHOW(name, val_expr) \
896392ef 651static ssize_t format_##name(struct net_device *dev, char *buf) \
4d22de3e 652{ \
5fbf816f
DLR
653 struct port_info *pi = netdev_priv(dev); \
654 struct adapter *adap = pi->adapter; \
4d22de3e
DLR
655 return sprintf(buf, "%u\n", val_expr); \
656} \
0ee8d33c
DLR
657static ssize_t show_##name(struct device *d, struct device_attribute *attr, \
658 char *buf) \
4d22de3e 659{ \
3e5192ee 660 return attr_show(d, buf, format_##name); \
4d22de3e
DLR
661}
662
896392ef 663static ssize_t set_nfilters(struct net_device *dev, unsigned int val)
4d22de3e 664{
5fbf816f
DLR
665 struct port_info *pi = netdev_priv(dev);
666 struct adapter *adap = pi->adapter;
9f238486 667 int min_tids = is_offload(adap) ? MC5_MIN_TIDS : 0;
896392ef 668
4d22de3e
DLR
669 if (adap->flags & FULL_INIT_DONE)
670 return -EBUSY;
671 if (val && adap->params.rev == 0)
672 return -EINVAL;
9f238486
DLR
673 if (val > t3_mc5_size(&adap->mc5) - adap->params.mc5.nservers -
674 min_tids)
4d22de3e
DLR
675 return -EINVAL;
676 adap->params.mc5.nfilters = val;
677 return 0;
678}
679
0ee8d33c
DLR
680static ssize_t store_nfilters(struct device *d, struct device_attribute *attr,
681 const char *buf, size_t len)
4d22de3e 682{
3e5192ee 683 return attr_store(d, buf, len, set_nfilters, 0, ~0);
4d22de3e
DLR
684}
685
896392ef 686static ssize_t set_nservers(struct net_device *dev, unsigned int val)
4d22de3e 687{
5fbf816f
DLR
688 struct port_info *pi = netdev_priv(dev);
689 struct adapter *adap = pi->adapter;
896392ef 690
4d22de3e
DLR
691 if (adap->flags & FULL_INIT_DONE)
692 return -EBUSY;
9f238486
DLR
693 if (val > t3_mc5_size(&adap->mc5) - adap->params.mc5.nfilters -
694 MC5_MIN_TIDS)
4d22de3e
DLR
695 return -EINVAL;
696 adap->params.mc5.nservers = val;
697 return 0;
698}
699
0ee8d33c
DLR
700static ssize_t store_nservers(struct device *d, struct device_attribute *attr,
701 const char *buf, size_t len)
4d22de3e 702{
3e5192ee 703 return attr_store(d, buf, len, set_nservers, 0, ~0);
4d22de3e
DLR
704}
705
706#define CXGB3_ATTR_R(name, val_expr) \
707CXGB3_SHOW(name, val_expr) \
0ee8d33c 708static DEVICE_ATTR(name, S_IRUGO, show_##name, NULL)
4d22de3e
DLR
709
710#define CXGB3_ATTR_RW(name, val_expr, store_method) \
711CXGB3_SHOW(name, val_expr) \
0ee8d33c 712static DEVICE_ATTR(name, S_IRUGO | S_IWUSR, show_##name, store_method)
4d22de3e
DLR
713
714CXGB3_ATTR_R(cam_size, t3_mc5_size(&adap->mc5));
715CXGB3_ATTR_RW(nfilters, adap->params.mc5.nfilters, store_nfilters);
716CXGB3_ATTR_RW(nservers, adap->params.mc5.nservers, store_nservers);
717
718static struct attribute *cxgb3_attrs[] = {
0ee8d33c
DLR
719 &dev_attr_cam_size.attr,
720 &dev_attr_nfilters.attr,
721 &dev_attr_nservers.attr,
4d22de3e
DLR
722 NULL
723};
724
725static struct attribute_group cxgb3_attr_group = {.attrs = cxgb3_attrs };
726
3e5192ee 727static ssize_t tm_attr_show(struct device *d,
0ee8d33c 728 char *buf, int sched)
4d22de3e 729{
5fbf816f
DLR
730 struct port_info *pi = netdev_priv(to_net_dev(d));
731 struct adapter *adap = pi->adapter;
4d22de3e 732 unsigned int v, addr, bpt, cpt;
5fbf816f 733 ssize_t len;
4d22de3e
DLR
734
735 addr = A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2;
736 rtnl_lock();
737 t3_write_reg(adap, A_TP_TM_PIO_ADDR, addr);
738 v = t3_read_reg(adap, A_TP_TM_PIO_DATA);
739 if (sched & 1)
740 v >>= 16;
741 bpt = (v >> 8) & 0xff;
742 cpt = v & 0xff;
743 if (!cpt)
744 len = sprintf(buf, "disabled\n");
745 else {
746 v = (adap->params.vpd.cclk * 1000) / cpt;
747 len = sprintf(buf, "%u Kbps\n", (v * bpt) / 125);
748 }
749 rtnl_unlock();
750 return len;
751}
752
3e5192ee 753static ssize_t tm_attr_store(struct device *d,
0ee8d33c 754 const char *buf, size_t len, int sched)
4d22de3e 755{
5fbf816f
DLR
756 struct port_info *pi = netdev_priv(to_net_dev(d));
757 struct adapter *adap = pi->adapter;
758 unsigned int val;
4d22de3e
DLR
759 char *endp;
760 ssize_t ret;
4d22de3e
DLR
761
762 if (!capable(CAP_NET_ADMIN))
763 return -EPERM;
764
765 val = simple_strtoul(buf, &endp, 0);
766 if (endp == buf || val > 10000000)
767 return -EINVAL;
768
769 rtnl_lock();
770 ret = t3_config_sched(adap, val, sched);
771 if (!ret)
772 ret = len;
773 rtnl_unlock();
774 return ret;
775}
776
777#define TM_ATTR(name, sched) \
0ee8d33c
DLR
778static ssize_t show_##name(struct device *d, struct device_attribute *attr, \
779 char *buf) \
4d22de3e 780{ \
3e5192ee 781 return tm_attr_show(d, buf, sched); \
4d22de3e 782} \
0ee8d33c
DLR
783static ssize_t store_##name(struct device *d, struct device_attribute *attr, \
784 const char *buf, size_t len) \
4d22de3e 785{ \
3e5192ee 786 return tm_attr_store(d, buf, len, sched); \
4d22de3e 787} \
0ee8d33c 788static DEVICE_ATTR(name, S_IRUGO | S_IWUSR, show_##name, store_##name)
4d22de3e
DLR
789
790TM_ATTR(sched0, 0);
791TM_ATTR(sched1, 1);
792TM_ATTR(sched2, 2);
793TM_ATTR(sched3, 3);
794TM_ATTR(sched4, 4);
795TM_ATTR(sched5, 5);
796TM_ATTR(sched6, 6);
797TM_ATTR(sched7, 7);
798
799static struct attribute *offload_attrs[] = {
0ee8d33c
DLR
800 &dev_attr_sched0.attr,
801 &dev_attr_sched1.attr,
802 &dev_attr_sched2.attr,
803 &dev_attr_sched3.attr,
804 &dev_attr_sched4.attr,
805 &dev_attr_sched5.attr,
806 &dev_attr_sched6.attr,
807 &dev_attr_sched7.attr,
4d22de3e
DLR
808 NULL
809};
810
811static struct attribute_group offload_attr_group = {.attrs = offload_attrs };
812
813/*
814 * Sends an sk_buff to an offload queue driver
815 * after dealing with any active network taps.
816 */
817static inline int offload_tx(struct t3cdev *tdev, struct sk_buff *skb)
818{
819 int ret;
820
821 local_bh_disable();
822 ret = t3_offload_tx(tdev, skb);
823 local_bh_enable();
824 return ret;
825}
826
827static int write_smt_entry(struct adapter *adapter, int idx)
828{
829 struct cpl_smt_write_req *req;
830 struct sk_buff *skb = alloc_skb(sizeof(*req), GFP_KERNEL);
831
832 if (!skb)
833 return -ENOMEM;
834
835 req = (struct cpl_smt_write_req *)__skb_put(skb, sizeof(*req));
836 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
837 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SMT_WRITE_REQ, idx));
838 req->mtu_idx = NMTUS - 1; /* should be 0 but there's a T3 bug */
839 req->iff = idx;
840 memset(req->src_mac1, 0, sizeof(req->src_mac1));
841 memcpy(req->src_mac0, adapter->port[idx]->dev_addr, ETH_ALEN);
842 skb->priority = 1;
843 offload_tx(&adapter->tdev, skb);
844 return 0;
845}
846
847static int init_smt(struct adapter *adapter)
848{
849 int i;
850
851 for_each_port(adapter, i)
852 write_smt_entry(adapter, i);
853 return 0;
854}
855
856static void init_port_mtus(struct adapter *adapter)
857{
858 unsigned int mtus = adapter->port[0]->mtu;
859
860 if (adapter->port[1])
861 mtus |= adapter->port[1]->mtu << 16;
862 t3_write_reg(adapter, A_TP_MTU_PORT_TABLE, mtus);
863}
864
8c263761 865static int send_pktsched_cmd(struct adapter *adap, int sched, int qidx, int lo,
14ab9892
DLR
866 int hi, int port)
867{
868 struct sk_buff *skb;
869 struct mngt_pktsched_wr *req;
8c263761 870 int ret;
14ab9892
DLR
871
872 skb = alloc_skb(sizeof(*req), GFP_KERNEL | __GFP_NOFAIL);
873 req = (struct mngt_pktsched_wr *)skb_put(skb, sizeof(*req));
874 req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_MNGT));
875 req->mngt_opcode = FW_MNGTOPCODE_PKTSCHED_SET;
876 req->sched = sched;
877 req->idx = qidx;
878 req->min = lo;
879 req->max = hi;
880 req->binding = port;
8c263761
DLR
881 ret = t3_mgmt_tx(adap, skb);
882
883 return ret;
14ab9892
DLR
884}
885
8c263761 886static int bind_qsets(struct adapter *adap)
14ab9892 887{
8c263761 888 int i, j, err = 0;
14ab9892
DLR
889
890 for_each_port(adap, i) {
891 const struct port_info *pi = adap2pinfo(adap, i);
892
8c263761
DLR
893 for (j = 0; j < pi->nqsets; ++j) {
894 int ret = send_pktsched_cmd(adap, 1,
895 pi->first_qset + j, -1,
896 -1, i);
897 if (ret)
898 err = ret;
899 }
14ab9892 900 }
8c263761
DLR
901
902 return err;
14ab9892
DLR
903}
904
851fd7bd
DLR
905#define FW_FNAME "cxgb3/t3fw-%d.%d.%d.bin"
906#define TPSRAM_NAME "cxgb3/t3%c_psram-%d.%d.%d.bin"
2e283962
DLR
907
908static int upgrade_fw(struct adapter *adap)
909{
910 int ret;
911 char buf[64];
912 const struct firmware *fw;
913 struct device *dev = &adap->pdev->dev;
914
915 snprintf(buf, sizeof(buf), FW_FNAME, FW_VERSION_MAJOR,
7f672cf5 916 FW_VERSION_MINOR, FW_VERSION_MICRO);
2e283962
DLR
917 ret = request_firmware(&fw, buf, dev);
918 if (ret < 0) {
919 dev_err(dev, "could not upgrade firmware: unable to load %s\n",
920 buf);
921 return ret;
922 }
923 ret = t3_load_fw(adap, fw->data, fw->size);
924 release_firmware(fw);
47330077
DLR
925
926 if (ret == 0)
927 dev_info(dev, "successful upgrade to firmware %d.%d.%d\n",
928 FW_VERSION_MAJOR, FW_VERSION_MINOR, FW_VERSION_MICRO);
929 else
930 dev_err(dev, "failed to upgrade to firmware %d.%d.%d\n",
931 FW_VERSION_MAJOR, FW_VERSION_MINOR, FW_VERSION_MICRO);
2eab17ab 932
47330077
DLR
933 return ret;
934}
935
936static inline char t3rev2char(struct adapter *adapter)
937{
938 char rev = 0;
939
940 switch(adapter->params.rev) {
941 case T3_REV_B:
942 case T3_REV_B2:
943 rev = 'b';
944 break;
1aafee26
DLR
945 case T3_REV_C:
946 rev = 'c';
947 break;
47330077
DLR
948 }
949 return rev;
950}
951
9265fabf 952static int update_tpsram(struct adapter *adap)
47330077
DLR
953{
954 const struct firmware *tpsram;
955 char buf[64];
956 struct device *dev = &adap->pdev->dev;
957 int ret;
958 char rev;
2eab17ab 959
47330077
DLR
960 rev = t3rev2char(adap);
961 if (!rev)
962 return 0;
963
964 snprintf(buf, sizeof(buf), TPSRAM_NAME, rev,
965 TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO);
966
967 ret = request_firmware(&tpsram, buf, dev);
968 if (ret < 0) {
969 dev_err(dev, "could not load TP SRAM: unable to load %s\n",
970 buf);
971 return ret;
972 }
2eab17ab 973
47330077
DLR
974 ret = t3_check_tpsram(adap, tpsram->data, tpsram->size);
975 if (ret)
2eab17ab 976 goto release_tpsram;
47330077
DLR
977
978 ret = t3_set_proto_sram(adap, tpsram->data);
979 if (ret == 0)
980 dev_info(dev,
981 "successful update of protocol engine "
982 "to %d.%d.%d\n",
983 TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO);
984 else
985 dev_err(dev, "failed to update of protocol engine %d.%d.%d\n",
986 TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO);
987 if (ret)
988 dev_err(dev, "loading protocol SRAM failed\n");
989
990release_tpsram:
991 release_firmware(tpsram);
2eab17ab 992
2e283962
DLR
993 return ret;
994}
995
4d22de3e
DLR
996/**
997 * cxgb_up - enable the adapter
998 * @adapter: adapter being enabled
999 *
1000 * Called when the first port is enabled, this function performs the
1001 * actions necessary to make an adapter operational, such as completing
1002 * the initialization of HW modules, and enabling interrupts.
1003 *
1004 * Must be called with the rtnl lock held.
1005 */
1006static int cxgb_up(struct adapter *adap)
1007{
c54f5c24 1008 int err;
4d22de3e
DLR
1009
1010 if (!(adap->flags & FULL_INIT_DONE)) {
8207befa 1011 err = t3_check_fw_version(adap);
a5a3b460 1012 if (err == -EINVAL) {
2e283962 1013 err = upgrade_fw(adap);
8207befa
DLR
1014 CH_WARN(adap, "FW upgrade to %d.%d.%d %s\n",
1015 FW_VERSION_MAJOR, FW_VERSION_MINOR,
1016 FW_VERSION_MICRO, err ? "failed" : "succeeded");
a5a3b460 1017 }
4d22de3e 1018
8207befa 1019 err = t3_check_tpsram_version(adap);
47330077
DLR
1020 if (err == -EINVAL) {
1021 err = update_tpsram(adap);
8207befa
DLR
1022 CH_WARN(adap, "TP upgrade to %d.%d.%d %s\n",
1023 TP_VERSION_MAJOR, TP_VERSION_MINOR,
1024 TP_VERSION_MICRO, err ? "failed" : "succeeded");
47330077
DLR
1025 }
1026
20d3fc11
DLR
1027 /*
1028 * Clear interrupts now to catch errors if t3_init_hw fails.
1029 * We clear them again later as initialization may trigger
1030 * conditions that can interrupt.
1031 */
1032 t3_intr_clear(adap);
1033
4d22de3e
DLR
1034 err = t3_init_hw(adap, 0);
1035 if (err)
1036 goto out;
1037
b881955b 1038 t3_set_reg_field(adap, A_TP_PARA_REG5, 0, F_RXDDPOFFINIT);
6cdbd77e 1039 t3_write_reg(adap, A_ULPRX_TDDP_PSZ, V_HPZ0(PAGE_SHIFT - 12));
bea3348e 1040
4d22de3e
DLR
1041 err = setup_sge_qsets(adap);
1042 if (err)
1043 goto out;
1044
1045 setup_rss(adap);
48c4b6db
DLR
1046 if (!(adap->flags & NAPI_INIT))
1047 init_napi(adap);
31563789
DLR
1048
1049 t3_start_sge_timers(adap);
4d22de3e
DLR
1050 adap->flags |= FULL_INIT_DONE;
1051 }
1052
1053 t3_intr_clear(adap);
1054
1055 if (adap->flags & USING_MSIX) {
1056 name_msix_vecs(adap);
1057 err = request_irq(adap->msix_info[0].vec,
1058 t3_async_intr_handler, 0,
1059 adap->msix_info[0].desc, adap);
1060 if (err)
1061 goto irq_err;
1062
42256f57
DLR
1063 err = request_msix_data_irqs(adap);
1064 if (err) {
4d22de3e
DLR
1065 free_irq(adap->msix_info[0].vec, adap);
1066 goto irq_err;
1067 }
1068 } else if ((err = request_irq(adap->pdev->irq,
1069 t3_intr_handler(adap,
1070 adap->sge.qs[0].rspq.
1071 polling),
2db6346f
TG
1072 (adap->flags & USING_MSI) ?
1073 0 : IRQF_SHARED,
4d22de3e
DLR
1074 adap->name, adap)))
1075 goto irq_err;
1076
bea3348e 1077 enable_all_napi(adap);
4d22de3e
DLR
1078 t3_sge_start(adap);
1079 t3_intr_enable(adap);
14ab9892 1080
b881955b
DLR
1081 if (adap->params.rev >= T3_REV_C && !(adap->flags & TP_PARITY_INIT) &&
1082 is_offload(adap) && init_tp_parity(adap) == 0)
1083 adap->flags |= TP_PARITY_INIT;
1084
1085 if (adap->flags & TP_PARITY_INIT) {
1086 t3_write_reg(adap, A_TP_INT_CAUSE,
1087 F_CMCACHEPERR | F_ARPLUTPERR);
1088 t3_write_reg(adap, A_TP_INT_ENABLE, 0x7fbfffff);
1089 }
1090
8c263761
DLR
1091 if (!(adap->flags & QUEUES_BOUND)) {
1092 err = bind_qsets(adap);
1093 if (err) {
1094 CH_ERR(adap, "failed to bind qsets, err %d\n", err);
1095 t3_intr_disable(adap);
1096 free_irq_resources(adap);
1097 goto out;
1098 }
1099 adap->flags |= QUEUES_BOUND;
1100 }
14ab9892 1101
4d22de3e
DLR
1102out:
1103 return err;
1104irq_err:
1105 CH_ERR(adap, "request_irq failed, err %d\n", err);
1106 goto out;
1107}
1108
1109/*
1110 * Release resources when all the ports and offloading have been stopped.
1111 */
1112static void cxgb_down(struct adapter *adapter)
1113{
1114 t3_sge_stop(adapter);
1115 spin_lock_irq(&adapter->work_lock); /* sync with PHY intr task */
1116 t3_intr_disable(adapter);
1117 spin_unlock_irq(&adapter->work_lock);
1118
8c263761 1119 free_irq_resources(adapter);
4d22de3e
DLR
1120 flush_workqueue(cxgb3_wq); /* wait for external IRQ handler */
1121 quiesce_rx(adapter);
1122}
1123
1124static void schedule_chk_task(struct adapter *adap)
1125{
1126 unsigned int timeo;
1127
1128 timeo = adap->params.linkpoll_period ?
1129 (HZ * adap->params.linkpoll_period) / 10 :
1130 adap->params.stats_update_period * HZ;
1131 if (timeo)
1132 queue_delayed_work(cxgb3_wq, &adap->adap_check_task, timeo);
1133}
1134
1135static int offload_open(struct net_device *dev)
1136{
5fbf816f
DLR
1137 struct port_info *pi = netdev_priv(dev);
1138 struct adapter *adapter = pi->adapter;
1139 struct t3cdev *tdev = dev2t3cdev(dev);
4d22de3e 1140 int adap_up = adapter->open_device_map & PORT_MASK;
c54f5c24 1141 int err;
4d22de3e
DLR
1142
1143 if (test_and_set_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map))
1144 return 0;
1145
1146 if (!adap_up && (err = cxgb_up(adapter)) < 0)
48c4b6db 1147 goto out;
4d22de3e
DLR
1148
1149 t3_tp_set_offload_mode(adapter, 1);
1150 tdev->lldev = adapter->port[0];
1151 err = cxgb3_offload_activate(adapter);
1152 if (err)
1153 goto out;
1154
1155 init_port_mtus(adapter);
1156 t3_load_mtus(adapter, adapter->params.mtus, adapter->params.a_wnd,
1157 adapter->params.b_wnd,
1158 adapter->params.rev == 0 ?
1159 adapter->port[0]->mtu : 0xffff);
1160 init_smt(adapter);
1161
d96a51f6
DN
1162 if (sysfs_create_group(&tdev->lldev->dev.kobj, &offload_attr_group))
1163 dev_dbg(&dev->dev, "cannot create sysfs group\n");
4d22de3e
DLR
1164
1165 /* Call back all registered clients */
1166 cxgb3_add_clients(tdev);
1167
1168out:
1169 /* restore them in case the offload module has changed them */
1170 if (err) {
1171 t3_tp_set_offload_mode(adapter, 0);
1172 clear_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map);
1173 cxgb3_set_dummy_ops(tdev);
1174 }
1175 return err;
1176}
1177
1178static int offload_close(struct t3cdev *tdev)
1179{
1180 struct adapter *adapter = tdev2adap(tdev);
1181
1182 if (!test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map))
1183 return 0;
1184
1185 /* Call back all registered clients */
1186 cxgb3_remove_clients(tdev);
1187
0ee8d33c 1188 sysfs_remove_group(&tdev->lldev->dev.kobj, &offload_attr_group);
4d22de3e
DLR
1189
1190 tdev->lldev = NULL;
1191 cxgb3_set_dummy_ops(tdev);
1192 t3_tp_set_offload_mode(adapter, 0);
1193 clear_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map);
1194
1195 if (!adapter->open_device_map)
1196 cxgb_down(adapter);
1197
1198 cxgb3_offload_deactivate(adapter);
1199 return 0;
1200}
1201
1202static int cxgb_open(struct net_device *dev)
1203{
4d22de3e 1204 struct port_info *pi = netdev_priv(dev);
5fbf816f 1205 struct adapter *adapter = pi->adapter;
4d22de3e 1206 int other_ports = adapter->open_device_map & PORT_MASK;
5fbf816f 1207 int err;
4d22de3e 1208
48c4b6db 1209 if (!adapter->open_device_map && (err = cxgb_up(adapter)) < 0)
4d22de3e
DLR
1210 return err;
1211
1212 set_bit(pi->port_id, &adapter->open_device_map);
8ac3ba68 1213 if (is_offload(adapter) && !ofld_disable) {
4d22de3e
DLR
1214 err = offload_open(dev);
1215 if (err)
1216 printk(KERN_WARNING
1217 "Could not initialize offload capabilities\n");
1218 }
1219
82ad3329 1220 dev->real_num_tx_queues = pi->nqsets;
4d22de3e
DLR
1221 link_start(dev);
1222 t3_port_intr_enable(adapter, pi->port_id);
82ad3329 1223 netif_tx_start_all_queues(dev);
4d22de3e
DLR
1224 if (!other_ports)
1225 schedule_chk_task(adapter);
1226
1227 return 0;
1228}
1229
1230static int cxgb_close(struct net_device *dev)
1231{
5fbf816f
DLR
1232 struct port_info *pi = netdev_priv(dev);
1233 struct adapter *adapter = pi->adapter;
4d22de3e 1234
bf792094
DLR
1235 /* Stop link fault interrupts */
1236 t3_xgm_intr_disable(adapter, pi->port_id);
1237 t3_read_reg(adapter, A_XGM_INT_STATUS + pi->mac.offset);
1238
5fbf816f 1239 t3_port_intr_disable(adapter, pi->port_id);
82ad3329 1240 netif_tx_stop_all_queues(dev);
5fbf816f 1241 pi->phy.ops->power_down(&pi->phy, 1);
4d22de3e 1242 netif_carrier_off(dev);
5fbf816f 1243 t3_mac_disable(&pi->mac, MAC_DIRECTION_TX | MAC_DIRECTION_RX);
4d22de3e 1244
20d3fc11 1245 spin_lock_irq(&adapter->work_lock); /* sync with update task */
5fbf816f 1246 clear_bit(pi->port_id, &adapter->open_device_map);
20d3fc11 1247 spin_unlock_irq(&adapter->work_lock);
4d22de3e
DLR
1248
1249 if (!(adapter->open_device_map & PORT_MASK))
1250 cancel_rearming_delayed_workqueue(cxgb3_wq,
1251 &adapter->adap_check_task);
1252
1253 if (!adapter->open_device_map)
1254 cxgb_down(adapter);
1255
1256 return 0;
1257}
1258
1259static struct net_device_stats *cxgb_get_stats(struct net_device *dev)
1260{
5fbf816f
DLR
1261 struct port_info *pi = netdev_priv(dev);
1262 struct adapter *adapter = pi->adapter;
1263 struct net_device_stats *ns = &pi->netstats;
4d22de3e
DLR
1264 const struct mac_stats *pstats;
1265
1266 spin_lock(&adapter->stats_lock);
5fbf816f 1267 pstats = t3_mac_update_stats(&pi->mac);
4d22de3e
DLR
1268 spin_unlock(&adapter->stats_lock);
1269
1270 ns->tx_bytes = pstats->tx_octets;
1271 ns->tx_packets = pstats->tx_frames;
1272 ns->rx_bytes = pstats->rx_octets;
1273 ns->rx_packets = pstats->rx_frames;
1274 ns->multicast = pstats->rx_mcast_frames;
1275
1276 ns->tx_errors = pstats->tx_underrun;
1277 ns->rx_errors = pstats->rx_symbol_errs + pstats->rx_fcs_errs +
1278 pstats->rx_too_long + pstats->rx_jabber + pstats->rx_short +
1279 pstats->rx_fifo_ovfl;
1280
1281 /* detailed rx_errors */
1282 ns->rx_length_errors = pstats->rx_jabber + pstats->rx_too_long;
1283 ns->rx_over_errors = 0;
1284 ns->rx_crc_errors = pstats->rx_fcs_errs;
1285 ns->rx_frame_errors = pstats->rx_symbol_errs;
1286 ns->rx_fifo_errors = pstats->rx_fifo_ovfl;
1287 ns->rx_missed_errors = pstats->rx_cong_drops;
1288
1289 /* detailed tx_errors */
1290 ns->tx_aborted_errors = 0;
1291 ns->tx_carrier_errors = 0;
1292 ns->tx_fifo_errors = pstats->tx_underrun;
1293 ns->tx_heartbeat_errors = 0;
1294 ns->tx_window_errors = 0;
1295 return ns;
1296}
1297
1298static u32 get_msglevel(struct net_device *dev)
1299{
5fbf816f
DLR
1300 struct port_info *pi = netdev_priv(dev);
1301 struct adapter *adapter = pi->adapter;
4d22de3e
DLR
1302
1303 return adapter->msg_enable;
1304}
1305
1306static void set_msglevel(struct net_device *dev, u32 val)
1307{
5fbf816f
DLR
1308 struct port_info *pi = netdev_priv(dev);
1309 struct adapter *adapter = pi->adapter;
4d22de3e
DLR
1310
1311 adapter->msg_enable = val;
1312}
1313
1314static char stats_strings[][ETH_GSTRING_LEN] = {
1315 "TxOctetsOK ",
1316 "TxFramesOK ",
1317 "TxMulticastFramesOK",
1318 "TxBroadcastFramesOK",
1319 "TxPauseFrames ",
1320 "TxUnderrun ",
1321 "TxExtUnderrun ",
1322
1323 "TxFrames64 ",
1324 "TxFrames65To127 ",
1325 "TxFrames128To255 ",
1326 "TxFrames256To511 ",
1327 "TxFrames512To1023 ",
1328 "TxFrames1024To1518 ",
1329 "TxFrames1519ToMax ",
1330
1331 "RxOctetsOK ",
1332 "RxFramesOK ",
1333 "RxMulticastFramesOK",
1334 "RxBroadcastFramesOK",
1335 "RxPauseFrames ",
1336 "RxFCSErrors ",
1337 "RxSymbolErrors ",
1338 "RxShortErrors ",
1339 "RxJabberErrors ",
1340 "RxLengthErrors ",
1341 "RxFIFOoverflow ",
1342
1343 "RxFrames64 ",
1344 "RxFrames65To127 ",
1345 "RxFrames128To255 ",
1346 "RxFrames256To511 ",
1347 "RxFrames512To1023 ",
1348 "RxFrames1024To1518 ",
1349 "RxFrames1519ToMax ",
1350
1351 "PhyFIFOErrors ",
1352 "TSO ",
1353 "VLANextractions ",
1354 "VLANinsertions ",
1355 "TxCsumOffload ",
1356 "RxCsumGood ",
b47385bd
DLR
1357 "LroAggregated ",
1358 "LroFlushed ",
1359 "LroNoDesc ",
fc90664e
DLR
1360 "RxDrops ",
1361
1362 "CheckTXEnToggled ",
1363 "CheckResets ",
1364
bf792094 1365 "LinkFaults ",
4d22de3e
DLR
1366};
1367
b9f2c044 1368static int get_sset_count(struct net_device *dev, int sset)
4d22de3e 1369{
b9f2c044
JG
1370 switch (sset) {
1371 case ETH_SS_STATS:
1372 return ARRAY_SIZE(stats_strings);
1373 default:
1374 return -EOPNOTSUPP;
1375 }
4d22de3e
DLR
1376}
1377
1378#define T3_REGMAP_SIZE (3 * 1024)
1379
1380static int get_regs_len(struct net_device *dev)
1381{
1382 return T3_REGMAP_SIZE;
1383}
1384
1385static int get_eeprom_len(struct net_device *dev)
1386{
1387 return EEPROMSIZE;
1388}
1389
1390static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1391{
5fbf816f
DLR
1392 struct port_info *pi = netdev_priv(dev);
1393 struct adapter *adapter = pi->adapter;
4d22de3e 1394 u32 fw_vers = 0;
47330077 1395 u32 tp_vers = 0;
4d22de3e 1396
cf3760da 1397 spin_lock(&adapter->stats_lock);
4d22de3e 1398 t3_get_fw_version(adapter, &fw_vers);
47330077 1399 t3_get_tp_version(adapter, &tp_vers);
cf3760da 1400 spin_unlock(&adapter->stats_lock);
4d22de3e
DLR
1401
1402 strcpy(info->driver, DRV_NAME);
1403 strcpy(info->version, DRV_VERSION);
1404 strcpy(info->bus_info, pci_name(adapter->pdev));
1405 if (!fw_vers)
1406 strcpy(info->fw_version, "N/A");
4aac3899 1407 else {
4d22de3e 1408 snprintf(info->fw_version, sizeof(info->fw_version),
47330077 1409 "%s %u.%u.%u TP %u.%u.%u",
4aac3899
DLR
1410 G_FW_VERSION_TYPE(fw_vers) ? "T" : "N",
1411 G_FW_VERSION_MAJOR(fw_vers),
1412 G_FW_VERSION_MINOR(fw_vers),
47330077
DLR
1413 G_FW_VERSION_MICRO(fw_vers),
1414 G_TP_VERSION_MAJOR(tp_vers),
1415 G_TP_VERSION_MINOR(tp_vers),
1416 G_TP_VERSION_MICRO(tp_vers));
4aac3899 1417 }
4d22de3e
DLR
1418}
1419
1420static void get_strings(struct net_device *dev, u32 stringset, u8 * data)
1421{
1422 if (stringset == ETH_SS_STATS)
1423 memcpy(data, stats_strings, sizeof(stats_strings));
1424}
1425
1426static unsigned long collect_sge_port_stats(struct adapter *adapter,
1427 struct port_info *p, int idx)
1428{
1429 int i;
1430 unsigned long tot = 0;
1431
8c263761
DLR
1432 for (i = p->first_qset; i < p->first_qset + p->nqsets; ++i)
1433 tot += adapter->sge.qs[i].port_stats[idx];
4d22de3e
DLR
1434 return tot;
1435}
1436
1437static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
1438 u64 *data)
1439{
4d22de3e 1440 struct port_info *pi = netdev_priv(dev);
5fbf816f 1441 struct adapter *adapter = pi->adapter;
4d22de3e
DLR
1442 const struct mac_stats *s;
1443
1444 spin_lock(&adapter->stats_lock);
1445 s = t3_mac_update_stats(&pi->mac);
1446 spin_unlock(&adapter->stats_lock);
1447
1448 *data++ = s->tx_octets;
1449 *data++ = s->tx_frames;
1450 *data++ = s->tx_mcast_frames;
1451 *data++ = s->tx_bcast_frames;
1452 *data++ = s->tx_pause;
1453 *data++ = s->tx_underrun;
1454 *data++ = s->tx_fifo_urun;
1455
1456 *data++ = s->tx_frames_64;
1457 *data++ = s->tx_frames_65_127;
1458 *data++ = s->tx_frames_128_255;
1459 *data++ = s->tx_frames_256_511;
1460 *data++ = s->tx_frames_512_1023;
1461 *data++ = s->tx_frames_1024_1518;
1462 *data++ = s->tx_frames_1519_max;
1463
1464 *data++ = s->rx_octets;
1465 *data++ = s->rx_frames;
1466 *data++ = s->rx_mcast_frames;
1467 *data++ = s->rx_bcast_frames;
1468 *data++ = s->rx_pause;
1469 *data++ = s->rx_fcs_errs;
1470 *data++ = s->rx_symbol_errs;
1471 *data++ = s->rx_short;
1472 *data++ = s->rx_jabber;
1473 *data++ = s->rx_too_long;
1474 *data++ = s->rx_fifo_ovfl;
1475
1476 *data++ = s->rx_frames_64;
1477 *data++ = s->rx_frames_65_127;
1478 *data++ = s->rx_frames_128_255;
1479 *data++ = s->rx_frames_256_511;
1480 *data++ = s->rx_frames_512_1023;
1481 *data++ = s->rx_frames_1024_1518;
1482 *data++ = s->rx_frames_1519_max;
1483
1484 *data++ = pi->phy.fifo_errors;
1485
1486 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_TSO);
1487 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_VLANEX);
1488 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_VLANINS);
1489 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_TX_CSUM);
1490 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_RX_CSUM_GOOD);
7be2df45
HX
1491 *data++ = 0;
1492 *data++ = 0;
1493 *data++ = 0;
4d22de3e 1494 *data++ = s->rx_cong_drops;
fc90664e
DLR
1495
1496 *data++ = s->num_toggled;
1497 *data++ = s->num_resets;
bf792094
DLR
1498
1499 *data++ = s->link_faults;
4d22de3e
DLR
1500}
1501
1502static inline void reg_block_dump(struct adapter *ap, void *buf,
1503 unsigned int start, unsigned int end)
1504{
1505 u32 *p = buf + start;
1506
1507 for (; start <= end; start += sizeof(u32))
1508 *p++ = t3_read_reg(ap, start);
1509}
1510
1511static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
1512 void *buf)
1513{
5fbf816f
DLR
1514 struct port_info *pi = netdev_priv(dev);
1515 struct adapter *ap = pi->adapter;
4d22de3e
DLR
1516
1517 /*
1518 * Version scheme:
1519 * bits 0..9: chip version
1520 * bits 10..15: chip revision
1521 * bit 31: set for PCIe cards
1522 */
1523 regs->version = 3 | (ap->params.rev << 10) | (is_pcie(ap) << 31);
1524
1525 /*
1526 * We skip the MAC statistics registers because they are clear-on-read.
1527 * Also reading multi-register stats would need to synchronize with the
1528 * periodic mac stats accumulation. Hard to justify the complexity.
1529 */
1530 memset(buf, 0, T3_REGMAP_SIZE);
1531 reg_block_dump(ap, buf, 0, A_SG_RSPQ_CREDIT_RETURN);
1532 reg_block_dump(ap, buf, A_SG_HI_DRB_HI_THRSH, A_ULPRX_PBL_ULIMIT);
1533 reg_block_dump(ap, buf, A_ULPTX_CONFIG, A_MPS_INT_CAUSE);
1534 reg_block_dump(ap, buf, A_CPL_SWITCH_CNTRL, A_CPL_MAP_TBL_DATA);
1535 reg_block_dump(ap, buf, A_SMB_GLOBAL_TIME_CFG, A_XGM_SERDES_STAT3);
1536 reg_block_dump(ap, buf, A_XGM_SERDES_STATUS0,
1537 XGM_REG(A_XGM_SERDES_STAT3, 1));
1538 reg_block_dump(ap, buf, XGM_REG(A_XGM_SERDES_STATUS0, 1),
1539 XGM_REG(A_XGM_RX_SPI4_SOP_EOP_CNT, 1));
1540}
1541
1542static int restart_autoneg(struct net_device *dev)
1543{
1544 struct port_info *p = netdev_priv(dev);
1545
1546 if (!netif_running(dev))
1547 return -EAGAIN;
1548 if (p->link_config.autoneg != AUTONEG_ENABLE)
1549 return -EINVAL;
1550 p->phy.ops->autoneg_restart(&p->phy);
1551 return 0;
1552}
1553
1554static int cxgb3_phys_id(struct net_device *dev, u32 data)
1555{
5fbf816f
DLR
1556 struct port_info *pi = netdev_priv(dev);
1557 struct adapter *adapter = pi->adapter;
4d22de3e 1558 int i;
4d22de3e
DLR
1559
1560 if (data == 0)
1561 data = 2;
1562
1563 for (i = 0; i < data * 2; i++) {
1564 t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
1565 (i & 1) ? F_GPIO0_OUT_VAL : 0);
1566 if (msleep_interruptible(500))
1567 break;
1568 }
1569 t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
1570 F_GPIO0_OUT_VAL);
1571 return 0;
1572}
1573
1574static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1575{
1576 struct port_info *p = netdev_priv(dev);
1577
1578 cmd->supported = p->link_config.supported;
1579 cmd->advertising = p->link_config.advertising;
1580
1581 if (netif_carrier_ok(dev)) {
1582 cmd->speed = p->link_config.speed;
1583 cmd->duplex = p->link_config.duplex;
1584 } else {
1585 cmd->speed = -1;
1586 cmd->duplex = -1;
1587 }
1588
1589 cmd->port = (cmd->supported & SUPPORTED_TP) ? PORT_TP : PORT_FIBRE;
1590 cmd->phy_address = p->phy.addr;
1591 cmd->transceiver = XCVR_EXTERNAL;
1592 cmd->autoneg = p->link_config.autoneg;
1593 cmd->maxtxpkt = 0;
1594 cmd->maxrxpkt = 0;
1595 return 0;
1596}
1597
1598static int speed_duplex_to_caps(int speed, int duplex)
1599{
1600 int cap = 0;
1601
1602 switch (speed) {
1603 case SPEED_10:
1604 if (duplex == DUPLEX_FULL)
1605 cap = SUPPORTED_10baseT_Full;
1606 else
1607 cap = SUPPORTED_10baseT_Half;
1608 break;
1609 case SPEED_100:
1610 if (duplex == DUPLEX_FULL)
1611 cap = SUPPORTED_100baseT_Full;
1612 else
1613 cap = SUPPORTED_100baseT_Half;
1614 break;
1615 case SPEED_1000:
1616 if (duplex == DUPLEX_FULL)
1617 cap = SUPPORTED_1000baseT_Full;
1618 else
1619 cap = SUPPORTED_1000baseT_Half;
1620 break;
1621 case SPEED_10000:
1622 if (duplex == DUPLEX_FULL)
1623 cap = SUPPORTED_10000baseT_Full;
1624 }
1625 return cap;
1626}
1627
1628#define ADVERTISED_MASK (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
1629 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
1630 ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full | \
1631 ADVERTISED_10000baseT_Full)
1632
1633static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1634{
1635 struct port_info *p = netdev_priv(dev);
1636 struct link_config *lc = &p->link_config;
1637
9b1e3656
DLR
1638 if (!(lc->supported & SUPPORTED_Autoneg)) {
1639 /*
1640 * PHY offers a single speed/duplex. See if that's what's
1641 * being requested.
1642 */
1643 if (cmd->autoneg == AUTONEG_DISABLE) {
97915b5b 1644 int cap = speed_duplex_to_caps(cmd->speed, cmd->duplex);
9b1e3656
DLR
1645 if (lc->supported & cap)
1646 return 0;
1647 }
1648 return -EINVAL;
1649 }
4d22de3e
DLR
1650
1651 if (cmd->autoneg == AUTONEG_DISABLE) {
1652 int cap = speed_duplex_to_caps(cmd->speed, cmd->duplex);
1653
1654 if (!(lc->supported & cap) || cmd->speed == SPEED_1000)
1655 return -EINVAL;
1656 lc->requested_speed = cmd->speed;
1657 lc->requested_duplex = cmd->duplex;
1658 lc->advertising = 0;
1659 } else {
1660 cmd->advertising &= ADVERTISED_MASK;
1661 cmd->advertising &= lc->supported;
1662 if (!cmd->advertising)
1663 return -EINVAL;
1664 lc->requested_speed = SPEED_INVALID;
1665 lc->requested_duplex = DUPLEX_INVALID;
1666 lc->advertising = cmd->advertising | ADVERTISED_Autoneg;
1667 }
1668 lc->autoneg = cmd->autoneg;
1669 if (netif_running(dev))
1670 t3_link_start(&p->phy, &p->mac, lc);
1671 return 0;
1672}
1673
1674static void get_pauseparam(struct net_device *dev,
1675 struct ethtool_pauseparam *epause)
1676{
1677 struct port_info *p = netdev_priv(dev);
1678
1679 epause->autoneg = (p->link_config.requested_fc & PAUSE_AUTONEG) != 0;
1680 epause->rx_pause = (p->link_config.fc & PAUSE_RX) != 0;
1681 epause->tx_pause = (p->link_config.fc & PAUSE_TX) != 0;
1682}
1683
1684static int set_pauseparam(struct net_device *dev,
1685 struct ethtool_pauseparam *epause)
1686{
1687 struct port_info *p = netdev_priv(dev);
1688 struct link_config *lc = &p->link_config;
1689
1690 if (epause->autoneg == AUTONEG_DISABLE)
1691 lc->requested_fc = 0;
1692 else if (lc->supported & SUPPORTED_Autoneg)
1693 lc->requested_fc = PAUSE_AUTONEG;
1694 else
1695 return -EINVAL;
1696
1697 if (epause->rx_pause)
1698 lc->requested_fc |= PAUSE_RX;
1699 if (epause->tx_pause)
1700 lc->requested_fc |= PAUSE_TX;
1701 if (lc->autoneg == AUTONEG_ENABLE) {
1702 if (netif_running(dev))
1703 t3_link_start(&p->phy, &p->mac, lc);
1704 } else {
1705 lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1706 if (netif_running(dev))
1707 t3_mac_set_speed_duplex_fc(&p->mac, -1, -1, lc->fc);
1708 }
1709 return 0;
1710}
1711
1712static u32 get_rx_csum(struct net_device *dev)
1713{
1714 struct port_info *p = netdev_priv(dev);
1715
47fd23fe 1716 return p->rx_offload & T3_RX_CSUM;
4d22de3e
DLR
1717}
1718
1719static int set_rx_csum(struct net_device *dev, u32 data)
1720{
1721 struct port_info *p = netdev_priv(dev);
1722
47fd23fe
RD
1723 if (data) {
1724 p->rx_offload |= T3_RX_CSUM;
1725 } else {
b47385bd
DLR
1726 int i;
1727
47fd23fe 1728 p->rx_offload &= ~(T3_RX_CSUM | T3_LRO);
04ecb072
DLR
1729 for (i = p->first_qset; i < p->first_qset + p->nqsets; i++)
1730 set_qset_lro(dev, i, 0);
b47385bd 1731 }
4d22de3e
DLR
1732 return 0;
1733}
1734
1735static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
1736{
5fbf816f
DLR
1737 struct port_info *pi = netdev_priv(dev);
1738 struct adapter *adapter = pi->adapter;
05b97b30 1739 const struct qset_params *q = &adapter->params.sge.qset[pi->first_qset];
4d22de3e
DLR
1740
1741 e->rx_max_pending = MAX_RX_BUFFERS;
1742 e->rx_mini_max_pending = 0;
1743 e->rx_jumbo_max_pending = MAX_RX_JUMBO_BUFFERS;
1744 e->tx_max_pending = MAX_TXQ_ENTRIES;
1745
05b97b30
DLR
1746 e->rx_pending = q->fl_size;
1747 e->rx_mini_pending = q->rspq_size;
1748 e->rx_jumbo_pending = q->jumbo_size;
1749 e->tx_pending = q->txq_size[0];
4d22de3e
DLR
1750}
1751
1752static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
1753{
5fbf816f
DLR
1754 struct port_info *pi = netdev_priv(dev);
1755 struct adapter *adapter = pi->adapter;
05b97b30 1756 struct qset_params *q;
5fbf816f 1757 int i;
4d22de3e
DLR
1758
1759 if (e->rx_pending > MAX_RX_BUFFERS ||
1760 e->rx_jumbo_pending > MAX_RX_JUMBO_BUFFERS ||
1761 e->tx_pending > MAX_TXQ_ENTRIES ||
1762 e->rx_mini_pending > MAX_RSPQ_ENTRIES ||
1763 e->rx_mini_pending < MIN_RSPQ_ENTRIES ||
1764 e->rx_pending < MIN_FL_ENTRIES ||
1765 e->rx_jumbo_pending < MIN_FL_ENTRIES ||
1766 e->tx_pending < adapter->params.nports * MIN_TXQ_ENTRIES)
1767 return -EINVAL;
1768
1769 if (adapter->flags & FULL_INIT_DONE)
1770 return -EBUSY;
1771
05b97b30
DLR
1772 q = &adapter->params.sge.qset[pi->first_qset];
1773 for (i = 0; i < pi->nqsets; ++i, ++q) {
4d22de3e
DLR
1774 q->rspq_size = e->rx_mini_pending;
1775 q->fl_size = e->rx_pending;
1776 q->jumbo_size = e->rx_jumbo_pending;
1777 q->txq_size[0] = e->tx_pending;
1778 q->txq_size[1] = e->tx_pending;
1779 q->txq_size[2] = e->tx_pending;
1780 }
1781 return 0;
1782}
1783
1784static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
1785{
5fbf816f
DLR
1786 struct port_info *pi = netdev_priv(dev);
1787 struct adapter *adapter = pi->adapter;
4d22de3e
DLR
1788 struct qset_params *qsp = &adapter->params.sge.qset[0];
1789 struct sge_qset *qs = &adapter->sge.qs[0];
1790
1791 if (c->rx_coalesce_usecs * 10 > M_NEWTIMER)
1792 return -EINVAL;
1793
1794 qsp->coalesce_usecs = c->rx_coalesce_usecs;
1795 t3_update_qset_coalesce(qs, qsp);
1796 return 0;
1797}
1798
1799static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
1800{
5fbf816f
DLR
1801 struct port_info *pi = netdev_priv(dev);
1802 struct adapter *adapter = pi->adapter;
4d22de3e
DLR
1803 struct qset_params *q = adapter->params.sge.qset;
1804
1805 c->rx_coalesce_usecs = q->coalesce_usecs;
1806 return 0;
1807}
1808
1809static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e,
1810 u8 * data)
1811{
5fbf816f
DLR
1812 struct port_info *pi = netdev_priv(dev);
1813 struct adapter *adapter = pi->adapter;
4d22de3e 1814 int i, err = 0;
4d22de3e
DLR
1815
1816 u8 *buf = kmalloc(EEPROMSIZE, GFP_KERNEL);
1817 if (!buf)
1818 return -ENOMEM;
1819
1820 e->magic = EEPROM_MAGIC;
1821 for (i = e->offset & ~3; !err && i < e->offset + e->len; i += 4)
05e5c116 1822 err = t3_seeprom_read(adapter, i, (__le32 *) & buf[i]);
4d22de3e
DLR
1823
1824 if (!err)
1825 memcpy(data, buf + e->offset, e->len);
1826 kfree(buf);
1827 return err;
1828}
1829
1830static int set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
1831 u8 * data)
1832{
5fbf816f
DLR
1833 struct port_info *pi = netdev_priv(dev);
1834 struct adapter *adapter = pi->adapter;
05e5c116
AV
1835 u32 aligned_offset, aligned_len;
1836 __le32 *p;
4d22de3e 1837 u8 *buf;
c54f5c24 1838 int err;
4d22de3e
DLR
1839
1840 if (eeprom->magic != EEPROM_MAGIC)
1841 return -EINVAL;
1842
1843 aligned_offset = eeprom->offset & ~3;
1844 aligned_len = (eeprom->len + (eeprom->offset & 3) + 3) & ~3;
1845
1846 if (aligned_offset != eeprom->offset || aligned_len != eeprom->len) {
1847 buf = kmalloc(aligned_len, GFP_KERNEL);
1848 if (!buf)
1849 return -ENOMEM;
05e5c116 1850 err = t3_seeprom_read(adapter, aligned_offset, (__le32 *) buf);
4d22de3e
DLR
1851 if (!err && aligned_len > 4)
1852 err = t3_seeprom_read(adapter,
1853 aligned_offset + aligned_len - 4,
05e5c116 1854 (__le32 *) & buf[aligned_len - 4]);
4d22de3e
DLR
1855 if (err)
1856 goto out;
1857 memcpy(buf + (eeprom->offset & 3), data, eeprom->len);
1858 } else
1859 buf = data;
1860
1861 err = t3_seeprom_wp(adapter, 0);
1862 if (err)
1863 goto out;
1864
05e5c116 1865 for (p = (__le32 *) buf; !err && aligned_len; aligned_len -= 4, p++) {
4d22de3e
DLR
1866 err = t3_seeprom_write(adapter, aligned_offset, *p);
1867 aligned_offset += 4;
1868 }
1869
1870 if (!err)
1871 err = t3_seeprom_wp(adapter, 1);
1872out:
1873 if (buf != data)
1874 kfree(buf);
1875 return err;
1876}
1877
1878static void get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1879{
1880 wol->supported = 0;
1881 wol->wolopts = 0;
1882 memset(&wol->sopass, 0, sizeof(wol->sopass));
1883}
1884
1885static const struct ethtool_ops cxgb_ethtool_ops = {
1886 .get_settings = get_settings,
1887 .set_settings = set_settings,
1888 .get_drvinfo = get_drvinfo,
1889 .get_msglevel = get_msglevel,
1890 .set_msglevel = set_msglevel,
1891 .get_ringparam = get_sge_param,
1892 .set_ringparam = set_sge_param,
1893 .get_coalesce = get_coalesce,
1894 .set_coalesce = set_coalesce,
1895 .get_eeprom_len = get_eeprom_len,
1896 .get_eeprom = get_eeprom,
1897 .set_eeprom = set_eeprom,
1898 .get_pauseparam = get_pauseparam,
1899 .set_pauseparam = set_pauseparam,
1900 .get_rx_csum = get_rx_csum,
1901 .set_rx_csum = set_rx_csum,
4d22de3e 1902 .set_tx_csum = ethtool_op_set_tx_csum,
4d22de3e
DLR
1903 .set_sg = ethtool_op_set_sg,
1904 .get_link = ethtool_op_get_link,
1905 .get_strings = get_strings,
1906 .phys_id = cxgb3_phys_id,
1907 .nway_reset = restart_autoneg,
b9f2c044 1908 .get_sset_count = get_sset_count,
4d22de3e
DLR
1909 .get_ethtool_stats = get_stats,
1910 .get_regs_len = get_regs_len,
1911 .get_regs = get_regs,
1912 .get_wol = get_wol,
4d22de3e 1913 .set_tso = ethtool_op_set_tso,
4d22de3e
DLR
1914};
1915
1916static int in_range(int val, int lo, int hi)
1917{
1918 return val < 0 || (val <= hi && val >= lo);
1919}
1920
1921static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
1922{
5fbf816f
DLR
1923 struct port_info *pi = netdev_priv(dev);
1924 struct adapter *adapter = pi->adapter;
4d22de3e 1925 u32 cmd;
5fbf816f 1926 int ret;
4d22de3e
DLR
1927
1928 if (copy_from_user(&cmd, useraddr, sizeof(cmd)))
1929 return -EFAULT;
1930
1931 switch (cmd) {
4d22de3e
DLR
1932 case CHELSIO_SET_QSET_PARAMS:{
1933 int i;
1934 struct qset_params *q;
1935 struct ch_qset_params t;
8c263761
DLR
1936 int q1 = pi->first_qset;
1937 int nqsets = pi->nqsets;
4d22de3e
DLR
1938
1939 if (!capable(CAP_NET_ADMIN))
1940 return -EPERM;
1941 if (copy_from_user(&t, useraddr, sizeof(t)))
1942 return -EFAULT;
1943 if (t.qset_idx >= SGE_QSETS)
1944 return -EINVAL;
1945 if (!in_range(t.intr_lat, 0, M_NEWTIMER) ||
1946 !in_range(t.cong_thres, 0, 255) ||
1947 !in_range(t.txq_size[0], MIN_TXQ_ENTRIES,
1948 MAX_TXQ_ENTRIES) ||
1949 !in_range(t.txq_size[1], MIN_TXQ_ENTRIES,
1950 MAX_TXQ_ENTRIES) ||
1951 !in_range(t.txq_size[2], MIN_CTRL_TXQ_ENTRIES,
1952 MAX_CTRL_TXQ_ENTRIES) ||
1953 !in_range(t.fl_size[0], MIN_FL_ENTRIES,
1954 MAX_RX_BUFFERS)
1955 || !in_range(t.fl_size[1], MIN_FL_ENTRIES,
1956 MAX_RX_JUMBO_BUFFERS)
1957 || !in_range(t.rspq_size, MIN_RSPQ_ENTRIES,
1958 MAX_RSPQ_ENTRIES))
1959 return -EINVAL;
8c263761
DLR
1960
1961 if ((adapter->flags & FULL_INIT_DONE) && t.lro > 0)
1962 for_each_port(adapter, i) {
1963 pi = adap2pinfo(adapter, i);
1964 if (t.qset_idx >= pi->first_qset &&
1965 t.qset_idx < pi->first_qset + pi->nqsets &&
47fd23fe 1966 !(pi->rx_offload & T3_RX_CSUM))
8c263761
DLR
1967 return -EINVAL;
1968 }
1969
4d22de3e
DLR
1970 if ((adapter->flags & FULL_INIT_DONE) &&
1971 (t.rspq_size >= 0 || t.fl_size[0] >= 0 ||
1972 t.fl_size[1] >= 0 || t.txq_size[0] >= 0 ||
1973 t.txq_size[1] >= 0 || t.txq_size[2] >= 0 ||
1974 t.polling >= 0 || t.cong_thres >= 0))
1975 return -EBUSY;
1976
8c263761
DLR
1977 /* Allow setting of any available qset when offload enabled */
1978 if (test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map)) {
1979 q1 = 0;
1980 for_each_port(adapter, i) {
1981 pi = adap2pinfo(adapter, i);
1982 nqsets += pi->first_qset + pi->nqsets;
1983 }
1984 }
1985
1986 if (t.qset_idx < q1)
1987 return -EINVAL;
1988 if (t.qset_idx > q1 + nqsets - 1)
1989 return -EINVAL;
1990
4d22de3e
DLR
1991 q = &adapter->params.sge.qset[t.qset_idx];
1992
1993 if (t.rspq_size >= 0)
1994 q->rspq_size = t.rspq_size;
1995 if (t.fl_size[0] >= 0)
1996 q->fl_size = t.fl_size[0];
1997 if (t.fl_size[1] >= 0)
1998 q->jumbo_size = t.fl_size[1];
1999 if (t.txq_size[0] >= 0)
2000 q->txq_size[0] = t.txq_size[0];
2001 if (t.txq_size[1] >= 0)
2002 q->txq_size[1] = t.txq_size[1];
2003 if (t.txq_size[2] >= 0)
2004 q->txq_size[2] = t.txq_size[2];
2005 if (t.cong_thres >= 0)
2006 q->cong_thres = t.cong_thres;
2007 if (t.intr_lat >= 0) {
2008 struct sge_qset *qs =
2009 &adapter->sge.qs[t.qset_idx];
2010
2011 q->coalesce_usecs = t.intr_lat;
2012 t3_update_qset_coalesce(qs, q);
2013 }
2014 if (t.polling >= 0) {
2015 if (adapter->flags & USING_MSIX)
2016 q->polling = t.polling;
2017 else {
2018 /* No polling with INTx for T3A */
2019 if (adapter->params.rev == 0 &&
2020 !(adapter->flags & USING_MSI))
2021 t.polling = 0;
2022
2023 for (i = 0; i < SGE_QSETS; i++) {
2024 q = &adapter->params.sge.
2025 qset[i];
2026 q->polling = t.polling;
2027 }
2028 }
2029 }
04ecb072
DLR
2030 if (t.lro >= 0)
2031 set_qset_lro(dev, t.qset_idx, t.lro);
2032
4d22de3e
DLR
2033 break;
2034 }
2035 case CHELSIO_GET_QSET_PARAMS:{
2036 struct qset_params *q;
2037 struct ch_qset_params t;
8c263761
DLR
2038 int q1 = pi->first_qset;
2039 int nqsets = pi->nqsets;
2040 int i;
4d22de3e
DLR
2041
2042 if (copy_from_user(&t, useraddr, sizeof(t)))
2043 return -EFAULT;
8c263761
DLR
2044
2045 /* Display qsets for all ports when offload enabled */
2046 if (test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map)) {
2047 q1 = 0;
2048 for_each_port(adapter, i) {
2049 pi = adap2pinfo(adapter, i);
2050 nqsets = pi->first_qset + pi->nqsets;
2051 }
2052 }
2053
2054 if (t.qset_idx >= nqsets)
4d22de3e
DLR
2055 return -EINVAL;
2056
8c263761 2057 q = &adapter->params.sge.qset[q1 + t.qset_idx];
4d22de3e
DLR
2058 t.rspq_size = q->rspq_size;
2059 t.txq_size[0] = q->txq_size[0];
2060 t.txq_size[1] = q->txq_size[1];
2061 t.txq_size[2] = q->txq_size[2];
2062 t.fl_size[0] = q->fl_size;
2063 t.fl_size[1] = q->jumbo_size;
2064 t.polling = q->polling;
b47385bd 2065 t.lro = q->lro;
4d22de3e
DLR
2066 t.intr_lat = q->coalesce_usecs;
2067 t.cong_thres = q->cong_thres;
8c263761
DLR
2068 t.qnum = q1;
2069
2070 if (adapter->flags & USING_MSIX)
2071 t.vector = adapter->msix_info[q1 + t.qset_idx + 1].vec;
2072 else
2073 t.vector = adapter->pdev->irq;
4d22de3e
DLR
2074
2075 if (copy_to_user(useraddr, &t, sizeof(t)))
2076 return -EFAULT;
2077 break;
2078 }
2079 case CHELSIO_SET_QSET_NUM:{
2080 struct ch_reg edata;
4d22de3e
DLR
2081 unsigned int i, first_qset = 0, other_qsets = 0;
2082
2083 if (!capable(CAP_NET_ADMIN))
2084 return -EPERM;
2085 if (adapter->flags & FULL_INIT_DONE)
2086 return -EBUSY;
2087 if (copy_from_user(&edata, useraddr, sizeof(edata)))
2088 return -EFAULT;
2089 if (edata.val < 1 ||
2090 (edata.val > 1 && !(adapter->flags & USING_MSIX)))
2091 return -EINVAL;
2092
2093 for_each_port(adapter, i)
2094 if (adapter->port[i] && adapter->port[i] != dev)
2095 other_qsets += adap2pinfo(adapter, i)->nqsets;
2096
2097 if (edata.val + other_qsets > SGE_QSETS)
2098 return -EINVAL;
2099
2100 pi->nqsets = edata.val;
2101
2102 for_each_port(adapter, i)
2103 if (adapter->port[i]) {
2104 pi = adap2pinfo(adapter, i);
2105 pi->first_qset = first_qset;
2106 first_qset += pi->nqsets;
2107 }
2108 break;
2109 }
2110 case CHELSIO_GET_QSET_NUM:{
2111 struct ch_reg edata;
4d22de3e
DLR
2112
2113 edata.cmd = CHELSIO_GET_QSET_NUM;
2114 edata.val = pi->nqsets;
2115 if (copy_to_user(useraddr, &edata, sizeof(edata)))
2116 return -EFAULT;
2117 break;
2118 }
2119 case CHELSIO_LOAD_FW:{
2120 u8 *fw_data;
2121 struct ch_mem_range t;
2122
1b3aa7af 2123 if (!capable(CAP_SYS_RAWIO))
4d22de3e
DLR
2124 return -EPERM;
2125 if (copy_from_user(&t, useraddr, sizeof(t)))
2126 return -EFAULT;
1b3aa7af 2127 /* Check t.len sanity ? */
4d22de3e
DLR
2128 fw_data = kmalloc(t.len, GFP_KERNEL);
2129 if (!fw_data)
2130 return -ENOMEM;
2131
2132 if (copy_from_user
2133 (fw_data, useraddr + sizeof(t), t.len)) {
2134 kfree(fw_data);
2135 return -EFAULT;
2136 }
2137
2138 ret = t3_load_fw(adapter, fw_data, t.len);
2139 kfree(fw_data);
2140 if (ret)
2141 return ret;
2142 break;
2143 }
2144 case CHELSIO_SETMTUTAB:{
2145 struct ch_mtus m;
2146 int i;
2147
2148 if (!is_offload(adapter))
2149 return -EOPNOTSUPP;
2150 if (!capable(CAP_NET_ADMIN))
2151 return -EPERM;
2152 if (offload_running(adapter))
2153 return -EBUSY;
2154 if (copy_from_user(&m, useraddr, sizeof(m)))
2155 return -EFAULT;
2156 if (m.nmtus != NMTUS)
2157 return -EINVAL;
2158 if (m.mtus[0] < 81) /* accommodate SACK */
2159 return -EINVAL;
2160
2161 /* MTUs must be in ascending order */
2162 for (i = 1; i < NMTUS; ++i)
2163 if (m.mtus[i] < m.mtus[i - 1])
2164 return -EINVAL;
2165
2166 memcpy(adapter->params.mtus, m.mtus,
2167 sizeof(adapter->params.mtus));
2168 break;
2169 }
2170 case CHELSIO_GET_PM:{
2171 struct tp_params *p = &adapter->params.tp;
2172 struct ch_pm m = {.cmd = CHELSIO_GET_PM };
2173
2174 if (!is_offload(adapter))
2175 return -EOPNOTSUPP;
2176 m.tx_pg_sz = p->tx_pg_size;
2177 m.tx_num_pg = p->tx_num_pgs;
2178 m.rx_pg_sz = p->rx_pg_size;
2179 m.rx_num_pg = p->rx_num_pgs;
2180 m.pm_total = p->pmtx_size + p->chan_rx_size * p->nchan;
2181 if (copy_to_user(useraddr, &m, sizeof(m)))
2182 return -EFAULT;
2183 break;
2184 }
2185 case CHELSIO_SET_PM:{
2186 struct ch_pm m;
2187 struct tp_params *p = &adapter->params.tp;
2188
2189 if (!is_offload(adapter))
2190 return -EOPNOTSUPP;
2191 if (!capable(CAP_NET_ADMIN))
2192 return -EPERM;
2193 if (adapter->flags & FULL_INIT_DONE)
2194 return -EBUSY;
2195 if (copy_from_user(&m, useraddr, sizeof(m)))
2196 return -EFAULT;
d9da466a 2197 if (!is_power_of_2(m.rx_pg_sz) ||
2198 !is_power_of_2(m.tx_pg_sz))
4d22de3e
DLR
2199 return -EINVAL; /* not power of 2 */
2200 if (!(m.rx_pg_sz & 0x14000))
2201 return -EINVAL; /* not 16KB or 64KB */
2202 if (!(m.tx_pg_sz & 0x1554000))
2203 return -EINVAL;
2204 if (m.tx_num_pg == -1)
2205 m.tx_num_pg = p->tx_num_pgs;
2206 if (m.rx_num_pg == -1)
2207 m.rx_num_pg = p->rx_num_pgs;
2208 if (m.tx_num_pg % 24 || m.rx_num_pg % 24)
2209 return -EINVAL;
2210 if (m.rx_num_pg * m.rx_pg_sz > p->chan_rx_size ||
2211 m.tx_num_pg * m.tx_pg_sz > p->chan_tx_size)
2212 return -EINVAL;
2213 p->rx_pg_size = m.rx_pg_sz;
2214 p->tx_pg_size = m.tx_pg_sz;
2215 p->rx_num_pgs = m.rx_num_pg;
2216 p->tx_num_pgs = m.tx_num_pg;
2217 break;
2218 }
2219 case CHELSIO_GET_MEM:{
2220 struct ch_mem_range t;
2221 struct mc7 *mem;
2222 u64 buf[32];
2223
2224 if (!is_offload(adapter))
2225 return -EOPNOTSUPP;
2226 if (!(adapter->flags & FULL_INIT_DONE))
2227 return -EIO; /* need the memory controllers */
2228 if (copy_from_user(&t, useraddr, sizeof(t)))
2229 return -EFAULT;
2230 if ((t.addr & 7) || (t.len & 7))
2231 return -EINVAL;
2232 if (t.mem_id == MEM_CM)
2233 mem = &adapter->cm;
2234 else if (t.mem_id == MEM_PMRX)
2235 mem = &adapter->pmrx;
2236 else if (t.mem_id == MEM_PMTX)
2237 mem = &adapter->pmtx;
2238 else
2239 return -EINVAL;
2240
2241 /*
1825494a
DLR
2242 * Version scheme:
2243 * bits 0..9: chip version
2244 * bits 10..15: chip revision
2245 */
4d22de3e
DLR
2246 t.version = 3 | (adapter->params.rev << 10);
2247 if (copy_to_user(useraddr, &t, sizeof(t)))
2248 return -EFAULT;
2249
2250 /*
2251 * Read 256 bytes at a time as len can be large and we don't
2252 * want to use huge intermediate buffers.
2253 */
2254 useraddr += sizeof(t); /* advance to start of buffer */
2255 while (t.len) {
2256 unsigned int chunk =
2257 min_t(unsigned int, t.len, sizeof(buf));
2258
2259 ret =
2260 t3_mc7_bd_read(mem, t.addr / 8, chunk / 8,
2261 buf);
2262 if (ret)
2263 return ret;
2264 if (copy_to_user(useraddr, buf, chunk))
2265 return -EFAULT;
2266 useraddr += chunk;
2267 t.addr += chunk;
2268 t.len -= chunk;
2269 }
2270 break;
2271 }
2272 case CHELSIO_SET_TRACE_FILTER:{
2273 struct ch_trace t;
2274 const struct trace_params *tp;
2275
2276 if (!capable(CAP_NET_ADMIN))
2277 return -EPERM;
2278 if (!offload_running(adapter))
2279 return -EAGAIN;
2280 if (copy_from_user(&t, useraddr, sizeof(t)))
2281 return -EFAULT;
2282
2283 tp = (const struct trace_params *)&t.sip;
2284 if (t.config_tx)
2285 t3_config_trace_filter(adapter, tp, 0,
2286 t.invert_match,
2287 t.trace_tx);
2288 if (t.config_rx)
2289 t3_config_trace_filter(adapter, tp, 1,
2290 t.invert_match,
2291 t.trace_rx);
2292 break;
2293 }
4d22de3e
DLR
2294 default:
2295 return -EOPNOTSUPP;
2296 }
2297 return 0;
2298}
2299
2300static int cxgb_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
2301{
4d22de3e 2302 struct mii_ioctl_data *data = if_mii(req);
5fbf816f
DLR
2303 struct port_info *pi = netdev_priv(dev);
2304 struct adapter *adapter = pi->adapter;
2305 int ret, mmd;
4d22de3e
DLR
2306
2307 switch (cmd) {
2308 case SIOCGMIIPHY:
2309 data->phy_id = pi->phy.addr;
2310 /* FALLTHRU */
2311 case SIOCGMIIREG:{
2312 u32 val;
2313 struct cphy *phy = &pi->phy;
2314
2315 if (!phy->mdio_read)
2316 return -EOPNOTSUPP;
2317 if (is_10G(adapter)) {
2318 mmd = data->phy_id >> 8;
2319 if (!mmd)
2320 mmd = MDIO_DEV_PCS;
9b1e3656 2321 else if (mmd > MDIO_DEV_VEND2)
4d22de3e
DLR
2322 return -EINVAL;
2323
2324 ret =
2325 phy->mdio_read(adapter, data->phy_id & 0x1f,
2326 mmd, data->reg_num, &val);
2327 } else
2328 ret =
2329 phy->mdio_read(adapter, data->phy_id & 0x1f,
2330 0, data->reg_num & 0x1f,
2331 &val);
2332 if (!ret)
2333 data->val_out = val;
2334 break;
2335 }
2336 case SIOCSMIIREG:{
2337 struct cphy *phy = &pi->phy;
2338
2339 if (!capable(CAP_NET_ADMIN))
2340 return -EPERM;
2341 if (!phy->mdio_write)
2342 return -EOPNOTSUPP;
2343 if (is_10G(adapter)) {
2344 mmd = data->phy_id >> 8;
2345 if (!mmd)
2346 mmd = MDIO_DEV_PCS;
9b1e3656 2347 else if (mmd > MDIO_DEV_VEND2)
4d22de3e
DLR
2348 return -EINVAL;
2349
2350 ret =
2351 phy->mdio_write(adapter,
2352 data->phy_id & 0x1f, mmd,
2353 data->reg_num,
2354 data->val_in);
2355 } else
2356 ret =
2357 phy->mdio_write(adapter,
2358 data->phy_id & 0x1f, 0,
2359 data->reg_num & 0x1f,
2360 data->val_in);
2361 break;
2362 }
2363 case SIOCCHIOCTL:
2364 return cxgb_extension_ioctl(dev, req->ifr_data);
2365 default:
2366 return -EOPNOTSUPP;
2367 }
2368 return ret;
2369}
2370
2371static int cxgb_change_mtu(struct net_device *dev, int new_mtu)
2372{
4d22de3e 2373 struct port_info *pi = netdev_priv(dev);
5fbf816f
DLR
2374 struct adapter *adapter = pi->adapter;
2375 int ret;
4d22de3e
DLR
2376
2377 if (new_mtu < 81) /* accommodate SACK */
2378 return -EINVAL;
2379 if ((ret = t3_mac_set_mtu(&pi->mac, new_mtu)))
2380 return ret;
2381 dev->mtu = new_mtu;
2382 init_port_mtus(adapter);
2383 if (adapter->params.rev == 0 && offload_running(adapter))
2384 t3_load_mtus(adapter, adapter->params.mtus,
2385 adapter->params.a_wnd, adapter->params.b_wnd,
2386 adapter->port[0]->mtu);
2387 return 0;
2388}
2389
2390static int cxgb_set_mac_addr(struct net_device *dev, void *p)
2391{
4d22de3e 2392 struct port_info *pi = netdev_priv(dev);
5fbf816f 2393 struct adapter *adapter = pi->adapter;
4d22de3e
DLR
2394 struct sockaddr *addr = p;
2395
2396 if (!is_valid_ether_addr(addr->sa_data))
2397 return -EINVAL;
2398
2399 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
2400 t3_mac_set_address(&pi->mac, 0, dev->dev_addr);
2401 if (offload_running(adapter))
2402 write_smt_entry(adapter, pi->port_id);
2403 return 0;
2404}
2405
2406/**
2407 * t3_synchronize_rx - wait for current Rx processing on a port to complete
2408 * @adap: the adapter
2409 * @p: the port
2410 *
2411 * Ensures that current Rx processing on any of the queues associated with
2412 * the given port completes before returning. We do this by acquiring and
2413 * releasing the locks of the response queues associated with the port.
2414 */
2415static void t3_synchronize_rx(struct adapter *adap, const struct port_info *p)
2416{
2417 int i;
2418
8c263761
DLR
2419 for (i = p->first_qset; i < p->first_qset + p->nqsets; i++) {
2420 struct sge_rspq *q = &adap->sge.qs[i].rspq;
4d22de3e
DLR
2421
2422 spin_lock_irq(&q->lock);
2423 spin_unlock_irq(&q->lock);
2424 }
2425}
2426
2427static void vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
2428{
4d22de3e 2429 struct port_info *pi = netdev_priv(dev);
5fbf816f 2430 struct adapter *adapter = pi->adapter;
4d22de3e
DLR
2431
2432 pi->vlan_grp = grp;
2433 if (adapter->params.rev > 0)
2434 t3_set_vlan_accel(adapter, 1 << pi->port_id, grp != NULL);
2435 else {
2436 /* single control for all ports */
2437 unsigned int i, have_vlans = 0;
2438 for_each_port(adapter, i)
2439 have_vlans |= adap2pinfo(adapter, i)->vlan_grp != NULL;
2440
2441 t3_set_vlan_accel(adapter, 1, have_vlans);
2442 }
2443 t3_synchronize_rx(adapter, pi);
2444}
2445
4d22de3e
DLR
2446#ifdef CONFIG_NET_POLL_CONTROLLER
2447static void cxgb_netpoll(struct net_device *dev)
2448{
890de332 2449 struct port_info *pi = netdev_priv(dev);
5fbf816f 2450 struct adapter *adapter = pi->adapter;
890de332 2451 int qidx;
4d22de3e 2452
890de332
DLR
2453 for (qidx = pi->first_qset; qidx < pi->first_qset + pi->nqsets; qidx++) {
2454 struct sge_qset *qs = &adapter->sge.qs[qidx];
2455 void *source;
2eab17ab 2456
890de332
DLR
2457 if (adapter->flags & USING_MSIX)
2458 source = qs;
2459 else
2460 source = adapter;
2461
2462 t3_intr_handler(adapter, qs->rspq.polling) (0, source);
2463 }
4d22de3e
DLR
2464}
2465#endif
2466
2467/*
2468 * Periodic accumulation of MAC statistics.
2469 */
2470static void mac_stats_update(struct adapter *adapter)
2471{
2472 int i;
2473
2474 for_each_port(adapter, i) {
2475 struct net_device *dev = adapter->port[i];
2476 struct port_info *p = netdev_priv(dev);
2477
2478 if (netif_running(dev)) {
2479 spin_lock(&adapter->stats_lock);
2480 t3_mac_update_stats(&p->mac);
2481 spin_unlock(&adapter->stats_lock);
2482 }
2483 }
2484}
2485
2486static void check_link_status(struct adapter *adapter)
2487{
2488 int i;
2489
2490 for_each_port(adapter, i) {
2491 struct net_device *dev = adapter->port[i];
2492 struct port_info *p = netdev_priv(dev);
2493
bf792094
DLR
2494 spin_lock_irq(&adapter->work_lock);
2495 if (p->link_fault) {
2496 spin_unlock_irq(&adapter->work_lock);
2497 continue;
2498 }
2499 spin_unlock_irq(&adapter->work_lock);
2500
2501 if (!(p->phy.caps & SUPPORTED_IRQ) && netif_running(dev)) {
2502 t3_xgm_intr_disable(adapter, i);
2503 t3_read_reg(adapter, A_XGM_INT_STATUS + p->mac.offset);
2504
4d22de3e 2505 t3_link_changed(adapter, i);
bf792094
DLR
2506 t3_xgm_intr_enable(adapter, i);
2507 }
4d22de3e
DLR
2508 }
2509}
2510
fc90664e
DLR
2511static void check_t3b2_mac(struct adapter *adapter)
2512{
2513 int i;
2514
f2d961c9
DLR
2515 if (!rtnl_trylock()) /* synchronize with ifdown */
2516 return;
2517
fc90664e
DLR
2518 for_each_port(adapter, i) {
2519 struct net_device *dev = adapter->port[i];
2520 struct port_info *p = netdev_priv(dev);
2521 int status;
2522
2523 if (!netif_running(dev))
2524 continue;
2525
2526 status = 0;
6d6dabac 2527 if (netif_running(dev) && netif_carrier_ok(dev))
fc90664e
DLR
2528 status = t3b2_mac_watchdog_task(&p->mac);
2529 if (status == 1)
2530 p->mac.stats.num_toggled++;
2531 else if (status == 2) {
2532 struct cmac *mac = &p->mac;
2533
2534 t3_mac_set_mtu(mac, dev->mtu);
2535 t3_mac_set_address(mac, 0, dev->dev_addr);
2536 cxgb_set_rxmode(dev);
2537 t3_link_start(&p->phy, mac, &p->link_config);
2538 t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
2539 t3_port_intr_enable(adapter, p->port_id);
2540 p->mac.stats.num_resets++;
2541 }
2542 }
2543 rtnl_unlock();
2544}
2545
2546
4d22de3e
DLR
2547static void t3_adap_check_task(struct work_struct *work)
2548{
2549 struct adapter *adapter = container_of(work, struct adapter,
2550 adap_check_task.work);
2551 const struct adapter_params *p = &adapter->params;
fc882196
DLR
2552 int port;
2553 unsigned int v, status, reset;
4d22de3e
DLR
2554
2555 adapter->check_task_cnt++;
2556
2557 /* Check link status for PHYs without interrupts */
2558 if (p->linkpoll_period)
2559 check_link_status(adapter);
2560
2561 /* Accumulate MAC stats if needed */
2562 if (!p->linkpoll_period ||
2563 (adapter->check_task_cnt * p->linkpoll_period) / 10 >=
2564 p->stats_update_period) {
2565 mac_stats_update(adapter);
2566 adapter->check_task_cnt = 0;
2567 }
2568
fc90664e
DLR
2569 if (p->rev == T3_REV_B2)
2570 check_t3b2_mac(adapter);
2571
fc882196
DLR
2572 /*
2573 * Scan the XGMAC's to check for various conditions which we want to
2574 * monitor in a periodic polling manner rather than via an interrupt
2575 * condition. This is used for conditions which would otherwise flood
2576 * the system with interrupts and we only really need to know that the
2577 * conditions are "happening" ... For each condition we count the
2578 * detection of the condition and reset it for the next polling loop.
2579 */
2580 for_each_port(adapter, port) {
2581 struct cmac *mac = &adap2pinfo(adapter, port)->mac;
2582 u32 cause;
2583
2584 cause = t3_read_reg(adapter, A_XGM_INT_CAUSE + mac->offset);
2585 reset = 0;
2586 if (cause & F_RXFIFO_OVERFLOW) {
2587 mac->stats.rx_fifo_ovfl++;
2588 reset |= F_RXFIFO_OVERFLOW;
2589 }
2590
2591 t3_write_reg(adapter, A_XGM_INT_CAUSE + mac->offset, reset);
2592 }
2593
2594 /*
2595 * We do the same as above for FL_EMPTY interrupts.
2596 */
2597 status = t3_read_reg(adapter, A_SG_INT_CAUSE);
2598 reset = 0;
2599
2600 if (status & F_FLEMPTY) {
2601 struct sge_qset *qs = &adapter->sge.qs[0];
2602 int i = 0;
2603
2604 reset |= F_FLEMPTY;
2605
2606 v = (t3_read_reg(adapter, A_SG_RSPQ_FL_STATUS) >> S_FL0EMPTY) &
2607 0xffff;
2608
2609 while (v) {
2610 qs->fl[i].empty += (v & 1);
2611 if (i)
2612 qs++;
2613 i ^= 1;
2614 v >>= 1;
2615 }
2616 }
2617
2618 t3_write_reg(adapter, A_SG_INT_CAUSE, reset);
2619
4d22de3e 2620 /* Schedule the next check update if any port is active. */
20d3fc11 2621 spin_lock_irq(&adapter->work_lock);
4d22de3e
DLR
2622 if (adapter->open_device_map & PORT_MASK)
2623 schedule_chk_task(adapter);
20d3fc11 2624 spin_unlock_irq(&adapter->work_lock);
4d22de3e
DLR
2625}
2626
2627/*
2628 * Processes external (PHY) interrupts in process context.
2629 */
2630static void ext_intr_task(struct work_struct *work)
2631{
2632 struct adapter *adapter = container_of(work, struct adapter,
2633 ext_intr_handler_task);
bf792094
DLR
2634 int i;
2635
2636 /* Disable link fault interrupts */
2637 for_each_port(adapter, i) {
2638 struct net_device *dev = adapter->port[i];
2639 struct port_info *p = netdev_priv(dev);
2640
2641 t3_xgm_intr_disable(adapter, i);
2642 t3_read_reg(adapter, A_XGM_INT_STATUS + p->mac.offset);
2643 }
4d22de3e 2644
bf792094 2645 /* Re-enable link fault interrupts */
4d22de3e
DLR
2646 t3_phy_intr_handler(adapter);
2647
bf792094
DLR
2648 for_each_port(adapter, i)
2649 t3_xgm_intr_enable(adapter, i);
2650
4d22de3e
DLR
2651 /* Now reenable external interrupts */
2652 spin_lock_irq(&adapter->work_lock);
2653 if (adapter->slow_intr_mask) {
2654 adapter->slow_intr_mask |= F_T3DBG;
2655 t3_write_reg(adapter, A_PL_INT_CAUSE0, F_T3DBG);
2656 t3_write_reg(adapter, A_PL_INT_ENABLE0,
2657 adapter->slow_intr_mask);
2658 }
2659 spin_unlock_irq(&adapter->work_lock);
2660}
2661
2662/*
2663 * Interrupt-context handler for external (PHY) interrupts.
2664 */
2665void t3_os_ext_intr_handler(struct adapter *adapter)
2666{
2667 /*
2668 * Schedule a task to handle external interrupts as they may be slow
2669 * and we use a mutex to protect MDIO registers. We disable PHY
2670 * interrupts in the meantime and let the task reenable them when
2671 * it's done.
2672 */
2673 spin_lock(&adapter->work_lock);
2674 if (adapter->slow_intr_mask) {
2675 adapter->slow_intr_mask &= ~F_T3DBG;
2676 t3_write_reg(adapter, A_PL_INT_ENABLE0,
2677 adapter->slow_intr_mask);
2678 queue_work(cxgb3_wq, &adapter->ext_intr_handler_task);
2679 }
2680 spin_unlock(&adapter->work_lock);
2681}
2682
bf792094
DLR
2683static void link_fault_task(struct work_struct *work)
2684{
2685 struct adapter *adapter = container_of(work, struct adapter,
2686 link_fault_handler_task);
2687 int i;
2688
2689 for_each_port(adapter, i) {
2690 struct net_device *netdev = adapter->port[i];
2691 struct port_info *pi = netdev_priv(netdev);
2692
2693 if (pi->link_fault)
2694 t3_link_fault(adapter, i);
2695 }
2696}
2697
2698void t3_os_link_fault_handler(struct adapter *adapter, int port_id)
2699{
2700 struct net_device *netdev = adapter->port[port_id];
2701 struct port_info *pi = netdev_priv(netdev);
2702
2703 spin_lock(&adapter->work_lock);
2704 pi->link_fault = 1;
2705 queue_work(cxgb3_wq, &adapter->link_fault_handler_task);
2706 spin_unlock(&adapter->work_lock);
2707}
2708
20d3fc11
DLR
2709static int t3_adapter_error(struct adapter *adapter, int reset)
2710{
2711 int i, ret = 0;
2712
cb0bc205
DLR
2713 if (is_offload(adapter) &&
2714 test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map)) {
2715 cxgb3_err_notify(&adapter->tdev, OFFLOAD_STATUS_DOWN, 0);
2716 offload_close(&adapter->tdev);
2717 }
2718
20d3fc11
DLR
2719 /* Stop all ports */
2720 for_each_port(adapter, i) {
2721 struct net_device *netdev = adapter->port[i];
2722
2723 if (netif_running(netdev))
2724 cxgb_close(netdev);
2725 }
2726
20d3fc11
DLR
2727 /* Stop SGE timers */
2728 t3_stop_sge_timers(adapter);
2729
2730 adapter->flags &= ~FULL_INIT_DONE;
2731
2732 if (reset)
2733 ret = t3_reset_adapter(adapter);
2734
2735 pci_disable_device(adapter->pdev);
2736
2737 return ret;
2738}
2739
2740static int t3_reenable_adapter(struct adapter *adapter)
2741{
2742 if (pci_enable_device(adapter->pdev)) {
2743 dev_err(&adapter->pdev->dev,
2744 "Cannot re-enable PCI device after reset.\n");
2745 goto err;
2746 }
2747 pci_set_master(adapter->pdev);
2748 pci_restore_state(adapter->pdev);
2749
2750 /* Free sge resources */
2751 t3_free_sge_resources(adapter);
2752
2753 if (t3_replay_prep_adapter(adapter))
2754 goto err;
2755
2756 return 0;
2757err:
2758 return -1;
2759}
2760
2761static void t3_resume_ports(struct adapter *adapter)
2762{
2763 int i;
2764
2765 /* Restart the ports */
2766 for_each_port(adapter, i) {
2767 struct net_device *netdev = adapter->port[i];
2768
2769 if (netif_running(netdev)) {
2770 if (cxgb_open(netdev)) {
2771 dev_err(&adapter->pdev->dev,
2772 "can't bring device back up"
2773 " after reset\n");
2774 continue;
2775 }
2776 }
2777 }
cb0bc205
DLR
2778
2779 if (is_offload(adapter) && !ofld_disable)
2780 cxgb3_err_notify(&adapter->tdev, OFFLOAD_STATUS_UP, 0);
20d3fc11
DLR
2781}
2782
2783/*
2784 * processes a fatal error.
2785 * Bring the ports down, reset the chip, bring the ports back up.
2786 */
2787static void fatal_error_task(struct work_struct *work)
2788{
2789 struct adapter *adapter = container_of(work, struct adapter,
2790 fatal_error_handler_task);
2791 int err = 0;
2792
2793 rtnl_lock();
2794 err = t3_adapter_error(adapter, 1);
2795 if (!err)
2796 err = t3_reenable_adapter(adapter);
2797 if (!err)
2798 t3_resume_ports(adapter);
2799
2800 CH_ALERT(adapter, "adapter reset %s\n", err ? "failed" : "succeeded");
2801 rtnl_unlock();
2802}
2803
4d22de3e
DLR
2804void t3_fatal_err(struct adapter *adapter)
2805{
2806 unsigned int fw_status[4];
2807
2808 if (adapter->flags & FULL_INIT_DONE) {
2809 t3_sge_stop(adapter);
c64c2eae
DLR
2810 t3_write_reg(adapter, A_XGM_TX_CTRL, 0);
2811 t3_write_reg(adapter, A_XGM_RX_CTRL, 0);
2812 t3_write_reg(adapter, XGM_REG(A_XGM_TX_CTRL, 1), 0);
2813 t3_write_reg(adapter, XGM_REG(A_XGM_RX_CTRL, 1), 0);
20d3fc11
DLR
2814
2815 spin_lock(&adapter->work_lock);
4d22de3e 2816 t3_intr_disable(adapter);
20d3fc11
DLR
2817 queue_work(cxgb3_wq, &adapter->fatal_error_handler_task);
2818 spin_unlock(&adapter->work_lock);
4d22de3e
DLR
2819 }
2820 CH_ALERT(adapter, "encountered fatal error, operation suspended\n");
2821 if (!t3_cim_ctl_blk_read(adapter, 0xa0, 4, fw_status))
2822 CH_ALERT(adapter, "FW status: 0x%x, 0x%x, 0x%x, 0x%x\n",
2823 fw_status[0], fw_status[1],
2824 fw_status[2], fw_status[3]);
4d22de3e
DLR
2825}
2826
91a6b50c
DLR
2827/**
2828 * t3_io_error_detected - called when PCI error is detected
2829 * @pdev: Pointer to PCI device
2830 * @state: The current pci connection state
2831 *
2832 * This function is called after a PCI bus error affecting
2833 * this device has been detected.
2834 */
2835static pci_ers_result_t t3_io_error_detected(struct pci_dev *pdev,
2836 pci_channel_state_t state)
2837{
bc4b6b52 2838 struct adapter *adapter = pci_get_drvdata(pdev);
20d3fc11 2839 int ret;
91a6b50c 2840
20d3fc11 2841 ret = t3_adapter_error(adapter, 0);
91a6b50c 2842
48c4b6db 2843 /* Request a slot reset. */
91a6b50c
DLR
2844 return PCI_ERS_RESULT_NEED_RESET;
2845}
2846
2847/**
2848 * t3_io_slot_reset - called after the pci bus has been reset.
2849 * @pdev: Pointer to PCI device
2850 *
2851 * Restart the card from scratch, as if from a cold-boot.
2852 */
2853static pci_ers_result_t t3_io_slot_reset(struct pci_dev *pdev)
2854{
bc4b6b52 2855 struct adapter *adapter = pci_get_drvdata(pdev);
91a6b50c 2856
20d3fc11
DLR
2857 if (!t3_reenable_adapter(adapter))
2858 return PCI_ERS_RESULT_RECOVERED;
91a6b50c 2859
48c4b6db 2860 return PCI_ERS_RESULT_DISCONNECT;
91a6b50c
DLR
2861}
2862
2863/**
2864 * t3_io_resume - called when traffic can start flowing again.
2865 * @pdev: Pointer to PCI device
2866 *
2867 * This callback is called when the error recovery driver tells us that
2868 * its OK to resume normal operation.
2869 */
2870static void t3_io_resume(struct pci_dev *pdev)
2871{
bc4b6b52 2872 struct adapter *adapter = pci_get_drvdata(pdev);
91a6b50c 2873
68f40c10
DLR
2874 CH_ALERT(adapter, "adapter recovering, PEX ERR 0x%x\n",
2875 t3_read_reg(adapter, A_PCIE_PEX_ERR));
2876
20d3fc11 2877 t3_resume_ports(adapter);
91a6b50c
DLR
2878}
2879
2880static struct pci_error_handlers t3_err_handler = {
2881 .error_detected = t3_io_error_detected,
2882 .slot_reset = t3_io_slot_reset,
2883 .resume = t3_io_resume,
2884};
2885
8c263761
DLR
2886/*
2887 * Set the number of qsets based on the number of CPUs and the number of ports,
2888 * not to exceed the number of available qsets, assuming there are enough qsets
2889 * per port in HW.
2890 */
2891static void set_nqsets(struct adapter *adap)
2892{
2893 int i, j = 0;
2894 int num_cpus = num_online_cpus();
2895 int hwports = adap->params.nports;
5cda9364 2896 int nqsets = adap->msix_nvectors - 1;
8c263761 2897
f9ee3882 2898 if (adap->params.rev > 0 && adap->flags & USING_MSIX) {
8c263761
DLR
2899 if (hwports == 2 &&
2900 (hwports * nqsets > SGE_QSETS ||
2901 num_cpus >= nqsets / hwports))
2902 nqsets /= hwports;
2903 if (nqsets > num_cpus)
2904 nqsets = num_cpus;
2905 if (nqsets < 1 || hwports == 4)
2906 nqsets = 1;
2907 } else
2908 nqsets = 1;
2909
2910 for_each_port(adap, i) {
2911 struct port_info *pi = adap2pinfo(adap, i);
2912
2913 pi->first_qset = j;
2914 pi->nqsets = nqsets;
2915 j = pi->first_qset + nqsets;
2916
2917 dev_info(&adap->pdev->dev,
2918 "Port %d using %d queue sets.\n", i, nqsets);
2919 }
2920}
2921
4d22de3e
DLR
2922static int __devinit cxgb_enable_msix(struct adapter *adap)
2923{
2924 struct msix_entry entries[SGE_QSETS + 1];
5cda9364 2925 int vectors;
4d22de3e
DLR
2926 int i, err;
2927
5cda9364
DLR
2928 vectors = ARRAY_SIZE(entries);
2929 for (i = 0; i < vectors; ++i)
4d22de3e
DLR
2930 entries[i].entry = i;
2931
5cda9364
DLR
2932 while ((err = pci_enable_msix(adap->pdev, entries, vectors)) > 0)
2933 vectors = err;
2934
2935 if (!err && vectors < (adap->params.nports + 1))
2936 err = -1;
2937
4d22de3e 2938 if (!err) {
5cda9364 2939 for (i = 0; i < vectors; ++i)
4d22de3e 2940 adap->msix_info[i].vec = entries[i].vector;
5cda9364
DLR
2941 adap->msix_nvectors = vectors;
2942 }
2943
4d22de3e
DLR
2944 return err;
2945}
2946
2947static void __devinit print_port_info(struct adapter *adap,
2948 const struct adapter_info *ai)
2949{
2950 static const char *pci_variant[] = {
2951 "PCI", "PCI-X", "PCI-X ECC", "PCI-X 266", "PCI Express"
2952 };
2953
2954 int i;
2955 char buf[80];
2956
2957 if (is_pcie(adap))
2958 snprintf(buf, sizeof(buf), "%s x%d",
2959 pci_variant[adap->params.pci.variant],
2960 adap->params.pci.width);
2961 else
2962 snprintf(buf, sizeof(buf), "%s %dMHz/%d-bit",
2963 pci_variant[adap->params.pci.variant],
2964 adap->params.pci.speed, adap->params.pci.width);
2965
2966 for_each_port(adap, i) {
2967 struct net_device *dev = adap->port[i];
2968 const struct port_info *pi = netdev_priv(dev);
2969
2970 if (!test_bit(i, &adap->registered_device_map))
2971 continue;
8ac3ba68 2972 printk(KERN_INFO "%s: %s %s %sNIC (rev %d) %s%s\n",
04497982 2973 dev->name, ai->desc, pi->phy.desc,
8ac3ba68 2974 is_offload(adap) ? "R" : "", adap->params.rev, buf,
4d22de3e
DLR
2975 (adap->flags & USING_MSIX) ? " MSI-X" :
2976 (adap->flags & USING_MSI) ? " MSI" : "");
2977 if (adap->name == dev->name && adap->params.vpd.mclk)
167cdf5f
DLR
2978 printk(KERN_INFO
2979 "%s: %uMB CM, %uMB PMTX, %uMB PMRX, S/N: %s\n",
4d22de3e
DLR
2980 adap->name, t3_mc7_size(&adap->cm) >> 20,
2981 t3_mc7_size(&adap->pmtx) >> 20,
167cdf5f
DLR
2982 t3_mc7_size(&adap->pmrx) >> 20,
2983 adap->params.vpd.sn);
4d22de3e
DLR
2984 }
2985}
2986
dd752696
SH
2987static const struct net_device_ops cxgb_netdev_ops = {
2988 .ndo_open = cxgb_open,
2989 .ndo_stop = cxgb_close,
43a944f3 2990 .ndo_start_xmit = t3_eth_xmit,
dd752696
SH
2991 .ndo_get_stats = cxgb_get_stats,
2992 .ndo_validate_addr = eth_validate_addr,
2993 .ndo_set_multicast_list = cxgb_set_rxmode,
2994 .ndo_do_ioctl = cxgb_ioctl,
2995 .ndo_change_mtu = cxgb_change_mtu,
2996 .ndo_set_mac_address = cxgb_set_mac_addr,
2997 .ndo_vlan_rx_register = vlan_rx_register,
2998#ifdef CONFIG_NET_POLL_CONTROLLER
2999 .ndo_poll_controller = cxgb_netpoll,
3000#endif
3001};
3002
4d22de3e
DLR
3003static int __devinit init_one(struct pci_dev *pdev,
3004 const struct pci_device_id *ent)
3005{
3006 static int version_printed;
3007
3008 int i, err, pci_using_dac = 0;
68f40c10 3009 resource_size_t mmio_start, mmio_len;
4d22de3e
DLR
3010 const struct adapter_info *ai;
3011 struct adapter *adapter = NULL;
3012 struct port_info *pi;
3013
3014 if (!version_printed) {
3015 printk(KERN_INFO "%s - version %s\n", DRV_DESC, DRV_VERSION);
3016 ++version_printed;
3017 }
3018
3019 if (!cxgb3_wq) {
3020 cxgb3_wq = create_singlethread_workqueue(DRV_NAME);
3021 if (!cxgb3_wq) {
3022 printk(KERN_ERR DRV_NAME
3023 ": cannot initialize work queue\n");
3024 return -ENOMEM;
3025 }
3026 }
3027
3028 err = pci_request_regions(pdev, DRV_NAME);
3029 if (err) {
3030 /* Just info, some other driver may have claimed the device. */
3031 dev_info(&pdev->dev, "cannot obtain PCI resources\n");
3032 return err;
3033 }
3034
3035 err = pci_enable_device(pdev);
3036 if (err) {
3037 dev_err(&pdev->dev, "cannot enable PCI device\n");
3038 goto out_release_regions;
3039 }
3040
6a35528a 3041 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
4d22de3e 3042 pci_using_dac = 1;
6a35528a 3043 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
4d22de3e
DLR
3044 if (err) {
3045 dev_err(&pdev->dev, "unable to obtain 64-bit DMA for "
3046 "coherent allocations\n");
3047 goto out_disable_device;
3048 }
3049 } else if ((err = pci_set_dma_mask(pdev, DMA_32BIT_MASK)) != 0) {
3050 dev_err(&pdev->dev, "no usable DMA configuration\n");
3051 goto out_disable_device;
3052 }
3053
3054 pci_set_master(pdev);
204e2f98 3055 pci_save_state(pdev);
4d22de3e
DLR
3056
3057 mmio_start = pci_resource_start(pdev, 0);
3058 mmio_len = pci_resource_len(pdev, 0);
3059 ai = t3_get_adapter_info(ent->driver_data);
3060
3061 adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
3062 if (!adapter) {
3063 err = -ENOMEM;
3064 goto out_disable_device;
3065 }
3066
3067 adapter->regs = ioremap_nocache(mmio_start, mmio_len);
3068 if (!adapter->regs) {
3069 dev_err(&pdev->dev, "cannot map device registers\n");
3070 err = -ENOMEM;
3071 goto out_free_adapter;
3072 }
3073
3074 adapter->pdev = pdev;
3075 adapter->name = pci_name(pdev);
3076 adapter->msg_enable = dflt_msg_enable;
3077 adapter->mmio_len = mmio_len;
3078
3079 mutex_init(&adapter->mdio_lock);
3080 spin_lock_init(&adapter->work_lock);
3081 spin_lock_init(&adapter->stats_lock);
3082
3083 INIT_LIST_HEAD(&adapter->adapter_list);
3084 INIT_WORK(&adapter->ext_intr_handler_task, ext_intr_task);
bf792094 3085 INIT_WORK(&adapter->link_fault_handler_task, link_fault_task);
20d3fc11 3086 INIT_WORK(&adapter->fatal_error_handler_task, fatal_error_task);
4d22de3e
DLR
3087 INIT_DELAYED_WORK(&adapter->adap_check_task, t3_adap_check_task);
3088
952cdf33 3089 for (i = 0; i < ai->nports0 + ai->nports1; ++i) {
4d22de3e
DLR
3090 struct net_device *netdev;
3091
82ad3329 3092 netdev = alloc_etherdev_mq(sizeof(struct port_info), SGE_QSETS);
4d22de3e
DLR
3093 if (!netdev) {
3094 err = -ENOMEM;
3095 goto out_free_dev;
3096 }
3097
4d22de3e
DLR
3098 SET_NETDEV_DEV(netdev, &pdev->dev);
3099
3100 adapter->port[i] = netdev;
3101 pi = netdev_priv(netdev);
5fbf816f 3102 pi->adapter = adapter;
47fd23fe 3103 pi->rx_offload = T3_RX_CSUM | T3_LRO;
4d22de3e
DLR
3104 pi->port_id = i;
3105 netif_carrier_off(netdev);
82ad3329 3106 netif_tx_stop_all_queues(netdev);
4d22de3e
DLR
3107 netdev->irq = pdev->irq;
3108 netdev->mem_start = mmio_start;
3109 netdev->mem_end = mmio_start + mmio_len - 1;
4d22de3e
DLR
3110 netdev->features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO;
3111 netdev->features |= NETIF_F_LLTX;
7be2df45 3112 netdev->features |= NETIF_F_GRO;
4d22de3e
DLR
3113 if (pci_using_dac)
3114 netdev->features |= NETIF_F_HIGHDMA;
3115
3116 netdev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
dd752696 3117 netdev->netdev_ops = &cxgb_netdev_ops;
4d22de3e
DLR
3118 SET_ETHTOOL_OPS(netdev, &cxgb_ethtool_ops);
3119 }
3120
5fbf816f 3121 pci_set_drvdata(pdev, adapter);
4d22de3e
DLR
3122 if (t3_prep_adapter(adapter, ai, 1) < 0) {
3123 err = -ENODEV;
3124 goto out_free_dev;
3125 }
2eab17ab 3126
4d22de3e
DLR
3127 /*
3128 * The card is now ready to go. If any errors occur during device
3129 * registration we do not fail the whole card but rather proceed only
3130 * with the ports we manage to register successfully. However we must
3131 * register at least one net device.
3132 */
3133 for_each_port(adapter, i) {
3134 err = register_netdev(adapter->port[i]);
3135 if (err)
3136 dev_warn(&pdev->dev,
3137 "cannot register net device %s, skipping\n",
3138 adapter->port[i]->name);
3139 else {
3140 /*
3141 * Change the name we use for messages to the name of
3142 * the first successfully registered interface.
3143 */
3144 if (!adapter->registered_device_map)
3145 adapter->name = adapter->port[i]->name;
3146
3147 __set_bit(i, &adapter->registered_device_map);
3148 }
3149 }
3150 if (!adapter->registered_device_map) {
3151 dev_err(&pdev->dev, "could not register any net devices\n");
3152 goto out_free_dev;
3153 }
3154
3155 /* Driver's ready. Reflect it on LEDs */
3156 t3_led_ready(adapter);
3157
3158 if (is_offload(adapter)) {
3159 __set_bit(OFFLOAD_DEVMAP_BIT, &adapter->registered_device_map);
3160 cxgb3_adapter_ofld(adapter);
3161 }
3162
3163 /* See what interrupts we'll be using */
3164 if (msi > 1 && cxgb_enable_msix(adapter) == 0)
3165 adapter->flags |= USING_MSIX;
3166 else if (msi > 0 && pci_enable_msi(pdev) == 0)
3167 adapter->flags |= USING_MSI;
3168
8c263761
DLR
3169 set_nqsets(adapter);
3170
0ee8d33c 3171 err = sysfs_create_group(&adapter->port[0]->dev.kobj,
4d22de3e
DLR
3172 &cxgb3_attr_group);
3173
3174 print_port_info(adapter, ai);
3175 return 0;
3176
3177out_free_dev:
3178 iounmap(adapter->regs);
952cdf33 3179 for (i = ai->nports0 + ai->nports1 - 1; i >= 0; --i)
4d22de3e
DLR
3180 if (adapter->port[i])
3181 free_netdev(adapter->port[i]);
3182
3183out_free_adapter:
3184 kfree(adapter);
3185
3186out_disable_device:
3187 pci_disable_device(pdev);
3188out_release_regions:
3189 pci_release_regions(pdev);
3190 pci_set_drvdata(pdev, NULL);
3191 return err;
3192}
3193
3194static void __devexit remove_one(struct pci_dev *pdev)
3195{
5fbf816f 3196 struct adapter *adapter = pci_get_drvdata(pdev);
4d22de3e 3197
5fbf816f 3198 if (adapter) {
4d22de3e 3199 int i;
4d22de3e
DLR
3200
3201 t3_sge_stop(adapter);
0ee8d33c 3202 sysfs_remove_group(&adapter->port[0]->dev.kobj,
4d22de3e
DLR
3203 &cxgb3_attr_group);
3204
4d22de3e
DLR
3205 if (is_offload(adapter)) {
3206 cxgb3_adapter_unofld(adapter);
3207 if (test_bit(OFFLOAD_DEVMAP_BIT,
3208 &adapter->open_device_map))
3209 offload_close(&adapter->tdev);
3210 }
3211
67d92ab7
DLR
3212 for_each_port(adapter, i)
3213 if (test_bit(i, &adapter->registered_device_map))
3214 unregister_netdev(adapter->port[i]);
3215
0ca41c04 3216 t3_stop_sge_timers(adapter);
4d22de3e
DLR
3217 t3_free_sge_resources(adapter);
3218 cxgb_disable_msi(adapter);
3219
4d22de3e
DLR
3220 for_each_port(adapter, i)
3221 if (adapter->port[i])
3222 free_netdev(adapter->port[i]);
3223
3224 iounmap(adapter->regs);
3225 kfree(adapter);
3226 pci_release_regions(pdev);
3227 pci_disable_device(pdev);
3228 pci_set_drvdata(pdev, NULL);
3229 }
3230}
3231
3232static struct pci_driver driver = {
3233 .name = DRV_NAME,
3234 .id_table = cxgb3_pci_tbl,
3235 .probe = init_one,
3236 .remove = __devexit_p(remove_one),
91a6b50c 3237 .err_handler = &t3_err_handler,
4d22de3e
DLR
3238};
3239
3240static int __init cxgb3_init_module(void)
3241{
3242 int ret;
3243
3244 cxgb3_offload_init();
3245
3246 ret = pci_register_driver(&driver);
3247 return ret;
3248}
3249
3250static void __exit cxgb3_cleanup_module(void)
3251{
3252 pci_unregister_driver(&driver);
3253 if (cxgb3_wq)
3254 destroy_workqueue(cxgb3_wq);
3255}
3256
3257module_init(cxgb3_init_module);
3258module_exit(cxgb3_cleanup_module);