via-velocity: Fix warnings on sparc64.
[linux-2.6-block.git] / drivers / net / cxgb3 / cxgb3_main.c
CommitLineData
4d22de3e 1/*
1d68e93d 2 * Copyright (c) 2003-2007 Chelsio, Inc. All rights reserved.
4d22de3e 3 *
1d68e93d
DLR
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
4d22de3e 9 *
1d68e93d
DLR
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
4d22de3e 31 */
4d22de3e
DLR
32#include <linux/module.h>
33#include <linux/moduleparam.h>
34#include <linux/init.h>
35#include <linux/pci.h>
36#include <linux/dma-mapping.h>
37#include <linux/netdevice.h>
38#include <linux/etherdevice.h>
39#include <linux/if_vlan.h>
40#include <linux/mii.h>
41#include <linux/sockios.h>
42#include <linux/workqueue.h>
43#include <linux/proc_fs.h>
44#include <linux/rtnetlink.h>
2e283962 45#include <linux/firmware.h>
d9da466a 46#include <linux/log2.h>
4d22de3e
DLR
47#include <asm/uaccess.h>
48
49#include "common.h"
50#include "cxgb3_ioctl.h"
51#include "regs.h"
52#include "cxgb3_offload.h"
53#include "version.h"
54
55#include "cxgb3_ctl_defs.h"
56#include "t3_cpl.h"
57#include "firmware_exports.h"
58
59enum {
60 MAX_TXQ_ENTRIES = 16384,
61 MAX_CTRL_TXQ_ENTRIES = 1024,
62 MAX_RSPQ_ENTRIES = 16384,
63 MAX_RX_BUFFERS = 16384,
64 MAX_RX_JUMBO_BUFFERS = 16384,
65 MIN_TXQ_ENTRIES = 4,
66 MIN_CTRL_TXQ_ENTRIES = 4,
67 MIN_RSPQ_ENTRIES = 32,
68 MIN_FL_ENTRIES = 32
69};
70
71#define PORT_MASK ((1 << MAX_NPORTS) - 1)
72
73#define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
74 NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
75 NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
76
77#define EEPROM_MAGIC 0x38E2F10C
78
678771d6
DLR
79#define CH_DEVICE(devid, idx) \
80 { PCI_VENDOR_ID_CHELSIO, devid, PCI_ANY_ID, PCI_ANY_ID, 0, 0, idx }
4d22de3e
DLR
81
82static const struct pci_device_id cxgb3_pci_tbl[] = {
678771d6
DLR
83 CH_DEVICE(0x20, 0), /* PE9000 */
84 CH_DEVICE(0x21, 1), /* T302E */
85 CH_DEVICE(0x22, 2), /* T310E */
86 CH_DEVICE(0x23, 3), /* T320X */
87 CH_DEVICE(0x24, 1), /* T302X */
88 CH_DEVICE(0x25, 3), /* T320E */
89 CH_DEVICE(0x26, 2), /* T310X */
90 CH_DEVICE(0x30, 2), /* T3B10 */
91 CH_DEVICE(0x31, 3), /* T3B20 */
92 CH_DEVICE(0x32, 1), /* T3B02 */
4d22de3e
DLR
93 {0,}
94};
95
96MODULE_DESCRIPTION(DRV_DESC);
97MODULE_AUTHOR("Chelsio Communications");
1d68e93d 98MODULE_LICENSE("Dual BSD/GPL");
4d22de3e
DLR
99MODULE_VERSION(DRV_VERSION);
100MODULE_DEVICE_TABLE(pci, cxgb3_pci_tbl);
101
102static int dflt_msg_enable = DFLT_MSG_ENABLE;
103
104module_param(dflt_msg_enable, int, 0644);
105MODULE_PARM_DESC(dflt_msg_enable, "Chelsio T3 default message enable bitmap");
106
107/*
108 * The driver uses the best interrupt scheme available on a platform in the
109 * order MSI-X, MSI, legacy pin interrupts. This parameter determines which
110 * of these schemes the driver may consider as follows:
111 *
112 * msi = 2: choose from among all three options
113 * msi = 1: only consider MSI and pin interrupts
114 * msi = 0: force pin interrupts
115 */
116static int msi = 2;
117
118module_param(msi, int, 0644);
119MODULE_PARM_DESC(msi, "whether to use MSI or MSI-X");
120
121/*
122 * The driver enables offload as a default.
123 * To disable it, use ofld_disable = 1.
124 */
125
126static int ofld_disable = 0;
127
128module_param(ofld_disable, int, 0644);
129MODULE_PARM_DESC(ofld_disable, "whether to enable offload at init time or not");
130
131/*
132 * We have work elements that we need to cancel when an interface is taken
133 * down. Normally the work elements would be executed by keventd but that
134 * can deadlock because of linkwatch. If our close method takes the rtnl
135 * lock and linkwatch is ahead of our work elements in keventd, linkwatch
136 * will block keventd as it needs the rtnl lock, and we'll deadlock waiting
137 * for our work to complete. Get our own work queue to solve this.
138 */
139static struct workqueue_struct *cxgb3_wq;
140
141/**
142 * link_report - show link status and link speed/duplex
143 * @p: the port whose settings are to be reported
144 *
145 * Shows the link status, speed, and duplex of a port.
146 */
147static void link_report(struct net_device *dev)
148{
149 if (!netif_carrier_ok(dev))
150 printk(KERN_INFO "%s: link down\n", dev->name);
151 else {
152 const char *s = "10Mbps";
153 const struct port_info *p = netdev_priv(dev);
154
155 switch (p->link_config.speed) {
156 case SPEED_10000:
157 s = "10Gbps";
158 break;
159 case SPEED_1000:
160 s = "1000Mbps";
161 break;
162 case SPEED_100:
163 s = "100Mbps";
164 break;
165 }
166
167 printk(KERN_INFO "%s: link up, %s, %s-duplex\n", dev->name, s,
168 p->link_config.duplex == DUPLEX_FULL ? "full" : "half");
169 }
170}
171
172/**
173 * t3_os_link_changed - handle link status changes
174 * @adapter: the adapter associated with the link change
175 * @port_id: the port index whose limk status has changed
176 * @link_stat: the new status of the link
177 * @speed: the new speed setting
178 * @duplex: the new duplex setting
179 * @pause: the new flow-control setting
180 *
181 * This is the OS-dependent handler for link status changes. The OS
182 * neutral handler takes care of most of the processing for these events,
183 * then calls this handler for any OS-specific processing.
184 */
185void t3_os_link_changed(struct adapter *adapter, int port_id, int link_stat,
186 int speed, int duplex, int pause)
187{
188 struct net_device *dev = adapter->port[port_id];
6d6dabac
DLR
189 struct port_info *pi = netdev_priv(dev);
190 struct cmac *mac = &pi->mac;
4d22de3e
DLR
191
192 /* Skip changes from disabled ports. */
193 if (!netif_running(dev))
194 return;
195
196 if (link_stat != netif_carrier_ok(dev)) {
6d6dabac 197 if (link_stat) {
59cf8107 198 t3_mac_enable(mac, MAC_DIRECTION_RX);
4d22de3e 199 netif_carrier_on(dev);
6d6dabac 200 } else {
4d22de3e 201 netif_carrier_off(dev);
59cf8107
DLR
202 pi->phy.ops->power_down(&pi->phy, 1);
203 t3_mac_disable(mac, MAC_DIRECTION_RX);
204 t3_link_start(&pi->phy, mac, &pi->link_config);
6d6dabac
DLR
205 }
206
4d22de3e
DLR
207 link_report(dev);
208 }
209}
210
211static void cxgb_set_rxmode(struct net_device *dev)
212{
213 struct t3_rx_mode rm;
214 struct port_info *pi = netdev_priv(dev);
215
216 init_rx_mode(&rm, dev, dev->mc_list);
217 t3_mac_set_rx_mode(&pi->mac, &rm);
218}
219
220/**
221 * link_start - enable a port
222 * @dev: the device to enable
223 *
224 * Performs the MAC and PHY actions needed to enable a port.
225 */
226static void link_start(struct net_device *dev)
227{
228 struct t3_rx_mode rm;
229 struct port_info *pi = netdev_priv(dev);
230 struct cmac *mac = &pi->mac;
231
232 init_rx_mode(&rm, dev, dev->mc_list);
233 t3_mac_reset(mac);
234 t3_mac_set_mtu(mac, dev->mtu);
235 t3_mac_set_address(mac, 0, dev->dev_addr);
236 t3_mac_set_rx_mode(mac, &rm);
237 t3_link_start(&pi->phy, mac, &pi->link_config);
238 t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
239}
240
241static inline void cxgb_disable_msi(struct adapter *adapter)
242{
243 if (adapter->flags & USING_MSIX) {
244 pci_disable_msix(adapter->pdev);
245 adapter->flags &= ~USING_MSIX;
246 } else if (adapter->flags & USING_MSI) {
247 pci_disable_msi(adapter->pdev);
248 adapter->flags &= ~USING_MSI;
249 }
250}
251
252/*
253 * Interrupt handler for asynchronous events used with MSI-X.
254 */
255static irqreturn_t t3_async_intr_handler(int irq, void *cookie)
256{
257 t3_slow_intr_handler(cookie);
258 return IRQ_HANDLED;
259}
260
261/*
262 * Name the MSI-X interrupts.
263 */
264static void name_msix_vecs(struct adapter *adap)
265{
266 int i, j, msi_idx = 1, n = sizeof(adap->msix_info[0].desc) - 1;
267
268 snprintf(adap->msix_info[0].desc, n, "%s", adap->name);
269 adap->msix_info[0].desc[n] = 0;
270
271 for_each_port(adap, j) {
272 struct net_device *d = adap->port[j];
273 const struct port_info *pi = netdev_priv(d);
274
275 for (i = 0; i < pi->nqsets; i++, msi_idx++) {
276 snprintf(adap->msix_info[msi_idx].desc, n,
277 "%s (queue %d)", d->name, i);
278 adap->msix_info[msi_idx].desc[n] = 0;
279 }
280 }
281}
282
283static int request_msix_data_irqs(struct adapter *adap)
284{
285 int i, j, err, qidx = 0;
286
287 for_each_port(adap, i) {
288 int nqsets = adap2pinfo(adap, i)->nqsets;
289
290 for (j = 0; j < nqsets; ++j) {
291 err = request_irq(adap->msix_info[qidx + 1].vec,
292 t3_intr_handler(adap,
293 adap->sge.qs[qidx].
294 rspq.polling), 0,
295 adap->msix_info[qidx + 1].desc,
296 &adap->sge.qs[qidx]);
297 if (err) {
298 while (--qidx >= 0)
299 free_irq(adap->msix_info[qidx + 1].vec,
300 &adap->sge.qs[qidx]);
301 return err;
302 }
303 qidx++;
304 }
305 }
306 return 0;
307}
308
b881955b
DLR
309static int await_mgmt_replies(struct adapter *adap, unsigned long init_cnt,
310 unsigned long n)
311{
312 int attempts = 5;
313
314 while (adap->sge.qs[0].rspq.offload_pkts < init_cnt + n) {
315 if (!--attempts)
316 return -ETIMEDOUT;
317 msleep(10);
318 }
319 return 0;
320}
321
322static int init_tp_parity(struct adapter *adap)
323{
324 int i;
325 struct sk_buff *skb;
326 struct cpl_set_tcb_field *greq;
327 unsigned long cnt = adap->sge.qs[0].rspq.offload_pkts;
328
329 t3_tp_set_offload_mode(adap, 1);
330
331 for (i = 0; i < 16; i++) {
332 struct cpl_smt_write_req *req;
333
334 skb = alloc_skb(sizeof(*req), GFP_KERNEL | __GFP_NOFAIL);
335 req = (struct cpl_smt_write_req *)__skb_put(skb, sizeof(*req));
336 memset(req, 0, sizeof(*req));
337 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
338 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SMT_WRITE_REQ, i));
339 req->iff = i;
340 t3_mgmt_tx(adap, skb);
341 }
342
343 for (i = 0; i < 2048; i++) {
344 struct cpl_l2t_write_req *req;
345
346 skb = alloc_skb(sizeof(*req), GFP_KERNEL | __GFP_NOFAIL);
347 req = (struct cpl_l2t_write_req *)__skb_put(skb, sizeof(*req));
348 memset(req, 0, sizeof(*req));
349 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
350 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_L2T_WRITE_REQ, i));
351 req->params = htonl(V_L2T_W_IDX(i));
352 t3_mgmt_tx(adap, skb);
353 }
354
355 for (i = 0; i < 2048; i++) {
356 struct cpl_rte_write_req *req;
357
358 skb = alloc_skb(sizeof(*req), GFP_KERNEL | __GFP_NOFAIL);
359 req = (struct cpl_rte_write_req *)__skb_put(skb, sizeof(*req));
360 memset(req, 0, sizeof(*req));
361 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
362 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_RTE_WRITE_REQ, i));
363 req->l2t_idx = htonl(V_L2T_W_IDX(i));
364 t3_mgmt_tx(adap, skb);
365 }
366
367 skb = alloc_skb(sizeof(*greq), GFP_KERNEL | __GFP_NOFAIL);
368 greq = (struct cpl_set_tcb_field *)__skb_put(skb, sizeof(*greq));
369 memset(greq, 0, sizeof(*greq));
370 greq->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
371 OPCODE_TID(greq) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, 0));
372 greq->mask = cpu_to_be64(1);
373 t3_mgmt_tx(adap, skb);
374
375 i = await_mgmt_replies(adap, cnt, 16 + 2048 + 2048 + 1);
376 t3_tp_set_offload_mode(adap, 0);
377 return i;
378}
379
4d22de3e
DLR
380/**
381 * setup_rss - configure RSS
382 * @adap: the adapter
383 *
384 * Sets up RSS to distribute packets to multiple receive queues. We
385 * configure the RSS CPU lookup table to distribute to the number of HW
386 * receive queues, and the response queue lookup table to narrow that
387 * down to the response queues actually configured for each port.
388 * We always configure the RSS mapping for two ports since the mapping
389 * table has plenty of entries.
390 */
391static void setup_rss(struct adapter *adap)
392{
393 int i;
394 unsigned int nq0 = adap2pinfo(adap, 0)->nqsets;
395 unsigned int nq1 = adap->port[1] ? adap2pinfo(adap, 1)->nqsets : 1;
396 u8 cpus[SGE_QSETS + 1];
397 u16 rspq_map[RSS_TABLE_SIZE];
398
399 for (i = 0; i < SGE_QSETS; ++i)
400 cpus[i] = i;
401 cpus[SGE_QSETS] = 0xff; /* terminator */
402
403 for (i = 0; i < RSS_TABLE_SIZE / 2; ++i) {
404 rspq_map[i] = i % nq0;
405 rspq_map[i + RSS_TABLE_SIZE / 2] = (i % nq1) + nq0;
406 }
407
408 t3_config_rss(adap, F_RQFEEDBACKENABLE | F_TNLLKPEN | F_TNLMAPEN |
409 F_TNLPRTEN | F_TNL2TUPEN | F_TNL4TUPEN |
a2604be5 410 V_RRCPLCPUSIZE(6) | F_HASHTOEPLITZ, cpus, rspq_map);
4d22de3e
DLR
411}
412
bea3348e 413static void init_napi(struct adapter *adap)
4d22de3e 414{
bea3348e 415 int i;
4d22de3e 416
bea3348e
SH
417 for (i = 0; i < SGE_QSETS; i++) {
418 struct sge_qset *qs = &adap->sge.qs[i];
4d22de3e 419
bea3348e
SH
420 if (qs->adap)
421 netif_napi_add(qs->netdev, &qs->napi, qs->napi.poll,
422 64);
4d22de3e 423 }
48c4b6db
DLR
424
425 /*
426 * netif_napi_add() can be called only once per napi_struct because it
427 * adds each new napi_struct to a list. Be careful not to call it a
428 * second time, e.g., during EEH recovery, by making a note of it.
429 */
430 adap->flags |= NAPI_INIT;
4d22de3e
DLR
431}
432
433/*
434 * Wait until all NAPI handlers are descheduled. This includes the handlers of
435 * both netdevices representing interfaces and the dummy ones for the extra
436 * queues.
437 */
438static void quiesce_rx(struct adapter *adap)
439{
440 int i;
4d22de3e 441
bea3348e
SH
442 for (i = 0; i < SGE_QSETS; i++)
443 if (adap->sge.qs[i].adap)
444 napi_disable(&adap->sge.qs[i].napi);
445}
4d22de3e 446
bea3348e
SH
447static void enable_all_napi(struct adapter *adap)
448{
449 int i;
450 for (i = 0; i < SGE_QSETS; i++)
451 if (adap->sge.qs[i].adap)
452 napi_enable(&adap->sge.qs[i].napi);
4d22de3e
DLR
453}
454
455/**
456 * setup_sge_qsets - configure SGE Tx/Rx/response queues
457 * @adap: the adapter
458 *
459 * Determines how many sets of SGE queues to use and initializes them.
460 * We support multiple queue sets per port if we have MSI-X, otherwise
461 * just one queue set per port.
462 */
463static int setup_sge_qsets(struct adapter *adap)
464{
bea3348e 465 int i, j, err, irq_idx = 0, qset_idx = 0;
8ac3ba68 466 unsigned int ntxq = SGE_TXQ_PER_SET;
4d22de3e
DLR
467
468 if (adap->params.rev > 0 && !(adap->flags & USING_MSI))
469 irq_idx = -1;
470
471 for_each_port(adap, i) {
472 struct net_device *dev = adap->port[i];
bea3348e 473 struct port_info *pi = netdev_priv(dev);
4d22de3e 474
bea3348e 475 pi->qs = &adap->sge.qs[pi->first_qset];
4d22de3e
DLR
476 for (j = 0; j < pi->nqsets; ++j, ++qset_idx) {
477 err = t3_sge_alloc_qset(adap, qset_idx, 1,
478 (adap->flags & USING_MSIX) ? qset_idx + 1 :
479 irq_idx,
bea3348e 480 &adap->params.sge.qset[qset_idx], ntxq, dev);
4d22de3e
DLR
481 if (err) {
482 t3_free_sge_resources(adap);
483 return err;
484 }
485 }
486 }
487
488 return 0;
489}
490
3e5192ee 491static ssize_t attr_show(struct device *d, char *buf,
896392ef 492 ssize_t(*format) (struct net_device *, char *))
4d22de3e
DLR
493{
494 ssize_t len;
4d22de3e
DLR
495
496 /* Synchronize with ioctls that may shut down the device */
497 rtnl_lock();
896392ef 498 len = (*format) (to_net_dev(d), buf);
4d22de3e
DLR
499 rtnl_unlock();
500 return len;
501}
502
3e5192ee 503static ssize_t attr_store(struct device *d,
0ee8d33c 504 const char *buf, size_t len,
896392ef 505 ssize_t(*set) (struct net_device *, unsigned int),
4d22de3e
DLR
506 unsigned int min_val, unsigned int max_val)
507{
508 char *endp;
509 ssize_t ret;
510 unsigned int val;
4d22de3e
DLR
511
512 if (!capable(CAP_NET_ADMIN))
513 return -EPERM;
514
515 val = simple_strtoul(buf, &endp, 0);
516 if (endp == buf || val < min_val || val > max_val)
517 return -EINVAL;
518
519 rtnl_lock();
896392ef 520 ret = (*set) (to_net_dev(d), val);
4d22de3e
DLR
521 if (!ret)
522 ret = len;
523 rtnl_unlock();
524 return ret;
525}
526
527#define CXGB3_SHOW(name, val_expr) \
896392ef 528static ssize_t format_##name(struct net_device *dev, char *buf) \
4d22de3e 529{ \
5fbf816f
DLR
530 struct port_info *pi = netdev_priv(dev); \
531 struct adapter *adap = pi->adapter; \
4d22de3e
DLR
532 return sprintf(buf, "%u\n", val_expr); \
533} \
0ee8d33c
DLR
534static ssize_t show_##name(struct device *d, struct device_attribute *attr, \
535 char *buf) \
4d22de3e 536{ \
3e5192ee 537 return attr_show(d, buf, format_##name); \
4d22de3e
DLR
538}
539
896392ef 540static ssize_t set_nfilters(struct net_device *dev, unsigned int val)
4d22de3e 541{
5fbf816f
DLR
542 struct port_info *pi = netdev_priv(dev);
543 struct adapter *adap = pi->adapter;
9f238486 544 int min_tids = is_offload(adap) ? MC5_MIN_TIDS : 0;
896392ef 545
4d22de3e
DLR
546 if (adap->flags & FULL_INIT_DONE)
547 return -EBUSY;
548 if (val && adap->params.rev == 0)
549 return -EINVAL;
9f238486
DLR
550 if (val > t3_mc5_size(&adap->mc5) - adap->params.mc5.nservers -
551 min_tids)
4d22de3e
DLR
552 return -EINVAL;
553 adap->params.mc5.nfilters = val;
554 return 0;
555}
556
0ee8d33c
DLR
557static ssize_t store_nfilters(struct device *d, struct device_attribute *attr,
558 const char *buf, size_t len)
4d22de3e 559{
3e5192ee 560 return attr_store(d, buf, len, set_nfilters, 0, ~0);
4d22de3e
DLR
561}
562
896392ef 563static ssize_t set_nservers(struct net_device *dev, unsigned int val)
4d22de3e 564{
5fbf816f
DLR
565 struct port_info *pi = netdev_priv(dev);
566 struct adapter *adap = pi->adapter;
896392ef 567
4d22de3e
DLR
568 if (adap->flags & FULL_INIT_DONE)
569 return -EBUSY;
9f238486
DLR
570 if (val > t3_mc5_size(&adap->mc5) - adap->params.mc5.nfilters -
571 MC5_MIN_TIDS)
4d22de3e
DLR
572 return -EINVAL;
573 adap->params.mc5.nservers = val;
574 return 0;
575}
576
0ee8d33c
DLR
577static ssize_t store_nservers(struct device *d, struct device_attribute *attr,
578 const char *buf, size_t len)
4d22de3e 579{
3e5192ee 580 return attr_store(d, buf, len, set_nservers, 0, ~0);
4d22de3e
DLR
581}
582
583#define CXGB3_ATTR_R(name, val_expr) \
584CXGB3_SHOW(name, val_expr) \
0ee8d33c 585static DEVICE_ATTR(name, S_IRUGO, show_##name, NULL)
4d22de3e
DLR
586
587#define CXGB3_ATTR_RW(name, val_expr, store_method) \
588CXGB3_SHOW(name, val_expr) \
0ee8d33c 589static DEVICE_ATTR(name, S_IRUGO | S_IWUSR, show_##name, store_method)
4d22de3e
DLR
590
591CXGB3_ATTR_R(cam_size, t3_mc5_size(&adap->mc5));
592CXGB3_ATTR_RW(nfilters, adap->params.mc5.nfilters, store_nfilters);
593CXGB3_ATTR_RW(nservers, adap->params.mc5.nservers, store_nservers);
594
595static struct attribute *cxgb3_attrs[] = {
0ee8d33c
DLR
596 &dev_attr_cam_size.attr,
597 &dev_attr_nfilters.attr,
598 &dev_attr_nservers.attr,
4d22de3e
DLR
599 NULL
600};
601
602static struct attribute_group cxgb3_attr_group = {.attrs = cxgb3_attrs };
603
3e5192ee 604static ssize_t tm_attr_show(struct device *d,
0ee8d33c 605 char *buf, int sched)
4d22de3e 606{
5fbf816f
DLR
607 struct port_info *pi = netdev_priv(to_net_dev(d));
608 struct adapter *adap = pi->adapter;
4d22de3e 609 unsigned int v, addr, bpt, cpt;
5fbf816f 610 ssize_t len;
4d22de3e
DLR
611
612 addr = A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2;
613 rtnl_lock();
614 t3_write_reg(adap, A_TP_TM_PIO_ADDR, addr);
615 v = t3_read_reg(adap, A_TP_TM_PIO_DATA);
616 if (sched & 1)
617 v >>= 16;
618 bpt = (v >> 8) & 0xff;
619 cpt = v & 0xff;
620 if (!cpt)
621 len = sprintf(buf, "disabled\n");
622 else {
623 v = (adap->params.vpd.cclk * 1000) / cpt;
624 len = sprintf(buf, "%u Kbps\n", (v * bpt) / 125);
625 }
626 rtnl_unlock();
627 return len;
628}
629
3e5192ee 630static ssize_t tm_attr_store(struct device *d,
0ee8d33c 631 const char *buf, size_t len, int sched)
4d22de3e 632{
5fbf816f
DLR
633 struct port_info *pi = netdev_priv(to_net_dev(d));
634 struct adapter *adap = pi->adapter;
635 unsigned int val;
4d22de3e
DLR
636 char *endp;
637 ssize_t ret;
4d22de3e
DLR
638
639 if (!capable(CAP_NET_ADMIN))
640 return -EPERM;
641
642 val = simple_strtoul(buf, &endp, 0);
643 if (endp == buf || val > 10000000)
644 return -EINVAL;
645
646 rtnl_lock();
647 ret = t3_config_sched(adap, val, sched);
648 if (!ret)
649 ret = len;
650 rtnl_unlock();
651 return ret;
652}
653
654#define TM_ATTR(name, sched) \
0ee8d33c
DLR
655static ssize_t show_##name(struct device *d, struct device_attribute *attr, \
656 char *buf) \
4d22de3e 657{ \
3e5192ee 658 return tm_attr_show(d, buf, sched); \
4d22de3e 659} \
0ee8d33c
DLR
660static ssize_t store_##name(struct device *d, struct device_attribute *attr, \
661 const char *buf, size_t len) \
4d22de3e 662{ \
3e5192ee 663 return tm_attr_store(d, buf, len, sched); \
4d22de3e 664} \
0ee8d33c 665static DEVICE_ATTR(name, S_IRUGO | S_IWUSR, show_##name, store_##name)
4d22de3e
DLR
666
667TM_ATTR(sched0, 0);
668TM_ATTR(sched1, 1);
669TM_ATTR(sched2, 2);
670TM_ATTR(sched3, 3);
671TM_ATTR(sched4, 4);
672TM_ATTR(sched5, 5);
673TM_ATTR(sched6, 6);
674TM_ATTR(sched7, 7);
675
676static struct attribute *offload_attrs[] = {
0ee8d33c
DLR
677 &dev_attr_sched0.attr,
678 &dev_attr_sched1.attr,
679 &dev_attr_sched2.attr,
680 &dev_attr_sched3.attr,
681 &dev_attr_sched4.attr,
682 &dev_attr_sched5.attr,
683 &dev_attr_sched6.attr,
684 &dev_attr_sched7.attr,
4d22de3e
DLR
685 NULL
686};
687
688static struct attribute_group offload_attr_group = {.attrs = offload_attrs };
689
690/*
691 * Sends an sk_buff to an offload queue driver
692 * after dealing with any active network taps.
693 */
694static inline int offload_tx(struct t3cdev *tdev, struct sk_buff *skb)
695{
696 int ret;
697
698 local_bh_disable();
699 ret = t3_offload_tx(tdev, skb);
700 local_bh_enable();
701 return ret;
702}
703
704static int write_smt_entry(struct adapter *adapter, int idx)
705{
706 struct cpl_smt_write_req *req;
707 struct sk_buff *skb = alloc_skb(sizeof(*req), GFP_KERNEL);
708
709 if (!skb)
710 return -ENOMEM;
711
712 req = (struct cpl_smt_write_req *)__skb_put(skb, sizeof(*req));
713 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
714 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SMT_WRITE_REQ, idx));
715 req->mtu_idx = NMTUS - 1; /* should be 0 but there's a T3 bug */
716 req->iff = idx;
717 memset(req->src_mac1, 0, sizeof(req->src_mac1));
718 memcpy(req->src_mac0, adapter->port[idx]->dev_addr, ETH_ALEN);
719 skb->priority = 1;
720 offload_tx(&adapter->tdev, skb);
721 return 0;
722}
723
724static int init_smt(struct adapter *adapter)
725{
726 int i;
727
728 for_each_port(adapter, i)
729 write_smt_entry(adapter, i);
730 return 0;
731}
732
733static void init_port_mtus(struct adapter *adapter)
734{
735 unsigned int mtus = adapter->port[0]->mtu;
736
737 if (adapter->port[1])
738 mtus |= adapter->port[1]->mtu << 16;
739 t3_write_reg(adapter, A_TP_MTU_PORT_TABLE, mtus);
740}
741
14ab9892
DLR
742static void send_pktsched_cmd(struct adapter *adap, int sched, int qidx, int lo,
743 int hi, int port)
744{
745 struct sk_buff *skb;
746 struct mngt_pktsched_wr *req;
747
748 skb = alloc_skb(sizeof(*req), GFP_KERNEL | __GFP_NOFAIL);
749 req = (struct mngt_pktsched_wr *)skb_put(skb, sizeof(*req));
750 req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_MNGT));
751 req->mngt_opcode = FW_MNGTOPCODE_PKTSCHED_SET;
752 req->sched = sched;
753 req->idx = qidx;
754 req->min = lo;
755 req->max = hi;
756 req->binding = port;
757 t3_mgmt_tx(adap, skb);
758}
759
760static void bind_qsets(struct adapter *adap)
761{
762 int i, j;
763
764 for_each_port(adap, i) {
765 const struct port_info *pi = adap2pinfo(adap, i);
766
767 for (j = 0; j < pi->nqsets; ++j)
768 send_pktsched_cmd(adap, 1, pi->first_qset + j, -1,
769 -1, i);
770 }
771}
772
7f672cf5 773#define FW_FNAME "t3fw-%d.%d.%d.bin"
47330077 774#define TPSRAM_NAME "t3%c_protocol_sram-%d.%d.%d.bin"
2e283962
DLR
775
776static int upgrade_fw(struct adapter *adap)
777{
778 int ret;
779 char buf[64];
780 const struct firmware *fw;
781 struct device *dev = &adap->pdev->dev;
782
783 snprintf(buf, sizeof(buf), FW_FNAME, FW_VERSION_MAJOR,
7f672cf5 784 FW_VERSION_MINOR, FW_VERSION_MICRO);
2e283962
DLR
785 ret = request_firmware(&fw, buf, dev);
786 if (ret < 0) {
787 dev_err(dev, "could not upgrade firmware: unable to load %s\n",
788 buf);
789 return ret;
790 }
791 ret = t3_load_fw(adap, fw->data, fw->size);
792 release_firmware(fw);
47330077
DLR
793
794 if (ret == 0)
795 dev_info(dev, "successful upgrade to firmware %d.%d.%d\n",
796 FW_VERSION_MAJOR, FW_VERSION_MINOR, FW_VERSION_MICRO);
797 else
798 dev_err(dev, "failed to upgrade to firmware %d.%d.%d\n",
799 FW_VERSION_MAJOR, FW_VERSION_MINOR, FW_VERSION_MICRO);
2eab17ab 800
47330077
DLR
801 return ret;
802}
803
804static inline char t3rev2char(struct adapter *adapter)
805{
806 char rev = 0;
807
808 switch(adapter->params.rev) {
809 case T3_REV_B:
810 case T3_REV_B2:
811 rev = 'b';
812 break;
1aafee26
DLR
813 case T3_REV_C:
814 rev = 'c';
815 break;
47330077
DLR
816 }
817 return rev;
818}
819
9265fabf 820static int update_tpsram(struct adapter *adap)
47330077
DLR
821{
822 const struct firmware *tpsram;
823 char buf[64];
824 struct device *dev = &adap->pdev->dev;
825 int ret;
826 char rev;
2eab17ab 827
47330077
DLR
828 rev = t3rev2char(adap);
829 if (!rev)
830 return 0;
831
832 snprintf(buf, sizeof(buf), TPSRAM_NAME, rev,
833 TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO);
834
835 ret = request_firmware(&tpsram, buf, dev);
836 if (ret < 0) {
837 dev_err(dev, "could not load TP SRAM: unable to load %s\n",
838 buf);
839 return ret;
840 }
2eab17ab 841
47330077
DLR
842 ret = t3_check_tpsram(adap, tpsram->data, tpsram->size);
843 if (ret)
2eab17ab 844 goto release_tpsram;
47330077
DLR
845
846 ret = t3_set_proto_sram(adap, tpsram->data);
847 if (ret == 0)
848 dev_info(dev,
849 "successful update of protocol engine "
850 "to %d.%d.%d\n",
851 TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO);
852 else
853 dev_err(dev, "failed to update of protocol engine %d.%d.%d\n",
854 TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO);
855 if (ret)
856 dev_err(dev, "loading protocol SRAM failed\n");
857
858release_tpsram:
859 release_firmware(tpsram);
2eab17ab 860
2e283962
DLR
861 return ret;
862}
863
4d22de3e
DLR
864/**
865 * cxgb_up - enable the adapter
866 * @adapter: adapter being enabled
867 *
868 * Called when the first port is enabled, this function performs the
869 * actions necessary to make an adapter operational, such as completing
870 * the initialization of HW modules, and enabling interrupts.
871 *
872 * Must be called with the rtnl lock held.
873 */
874static int cxgb_up(struct adapter *adap)
875{
c54f5c24 876 int err;
47330077 877 int must_load;
4d22de3e
DLR
878
879 if (!(adap->flags & FULL_INIT_DONE)) {
a5a3b460
DLR
880 err = t3_check_fw_version(adap, &must_load);
881 if (err == -EINVAL) {
2e283962 882 err = upgrade_fw(adap);
a5a3b460
DLR
883 if (err && must_load)
884 goto out;
885 }
4d22de3e 886
47330077
DLR
887 err = t3_check_tpsram_version(adap, &must_load);
888 if (err == -EINVAL) {
889 err = update_tpsram(adap);
890 if (err && must_load)
891 goto out;
892 }
893
4d22de3e
DLR
894 err = t3_init_hw(adap, 0);
895 if (err)
896 goto out;
897
b881955b 898 t3_set_reg_field(adap, A_TP_PARA_REG5, 0, F_RXDDPOFFINIT);
6cdbd77e 899 t3_write_reg(adap, A_ULPRX_TDDP_PSZ, V_HPZ0(PAGE_SHIFT - 12));
bea3348e 900
4d22de3e
DLR
901 err = setup_sge_qsets(adap);
902 if (err)
903 goto out;
904
905 setup_rss(adap);
48c4b6db
DLR
906 if (!(adap->flags & NAPI_INIT))
907 init_napi(adap);
4d22de3e
DLR
908 adap->flags |= FULL_INIT_DONE;
909 }
910
911 t3_intr_clear(adap);
912
913 if (adap->flags & USING_MSIX) {
914 name_msix_vecs(adap);
915 err = request_irq(adap->msix_info[0].vec,
916 t3_async_intr_handler, 0,
917 adap->msix_info[0].desc, adap);
918 if (err)
919 goto irq_err;
920
42256f57
DLR
921 err = request_msix_data_irqs(adap);
922 if (err) {
4d22de3e
DLR
923 free_irq(adap->msix_info[0].vec, adap);
924 goto irq_err;
925 }
926 } else if ((err = request_irq(adap->pdev->irq,
927 t3_intr_handler(adap,
928 adap->sge.qs[0].rspq.
929 polling),
2db6346f
TG
930 (adap->flags & USING_MSI) ?
931 0 : IRQF_SHARED,
4d22de3e
DLR
932 adap->name, adap)))
933 goto irq_err;
934
bea3348e 935 enable_all_napi(adap);
4d22de3e
DLR
936 t3_sge_start(adap);
937 t3_intr_enable(adap);
14ab9892 938
b881955b
DLR
939 if (adap->params.rev >= T3_REV_C && !(adap->flags & TP_PARITY_INIT) &&
940 is_offload(adap) && init_tp_parity(adap) == 0)
941 adap->flags |= TP_PARITY_INIT;
942
943 if (adap->flags & TP_PARITY_INIT) {
944 t3_write_reg(adap, A_TP_INT_CAUSE,
945 F_CMCACHEPERR | F_ARPLUTPERR);
946 t3_write_reg(adap, A_TP_INT_ENABLE, 0x7fbfffff);
947 }
948
14ab9892
DLR
949 if ((adap->flags & (USING_MSIX | QUEUES_BOUND)) == USING_MSIX)
950 bind_qsets(adap);
951 adap->flags |= QUEUES_BOUND;
952
4d22de3e
DLR
953out:
954 return err;
955irq_err:
956 CH_ERR(adap, "request_irq failed, err %d\n", err);
957 goto out;
958}
959
960/*
961 * Release resources when all the ports and offloading have been stopped.
962 */
963static void cxgb_down(struct adapter *adapter)
964{
965 t3_sge_stop(adapter);
966 spin_lock_irq(&adapter->work_lock); /* sync with PHY intr task */
967 t3_intr_disable(adapter);
968 spin_unlock_irq(&adapter->work_lock);
969
970 if (adapter->flags & USING_MSIX) {
971 int i, n = 0;
972
973 free_irq(adapter->msix_info[0].vec, adapter);
974 for_each_port(adapter, i)
975 n += adap2pinfo(adapter, i)->nqsets;
976
977 for (i = 0; i < n; ++i)
978 free_irq(adapter->msix_info[i + 1].vec,
979 &adapter->sge.qs[i]);
980 } else
981 free_irq(adapter->pdev->irq, adapter);
982
983 flush_workqueue(cxgb3_wq); /* wait for external IRQ handler */
984 quiesce_rx(adapter);
985}
986
987static void schedule_chk_task(struct adapter *adap)
988{
989 unsigned int timeo;
990
991 timeo = adap->params.linkpoll_period ?
992 (HZ * adap->params.linkpoll_period) / 10 :
993 adap->params.stats_update_period * HZ;
994 if (timeo)
995 queue_delayed_work(cxgb3_wq, &adap->adap_check_task, timeo);
996}
997
998static int offload_open(struct net_device *dev)
999{
5fbf816f
DLR
1000 struct port_info *pi = netdev_priv(dev);
1001 struct adapter *adapter = pi->adapter;
1002 struct t3cdev *tdev = dev2t3cdev(dev);
4d22de3e 1003 int adap_up = adapter->open_device_map & PORT_MASK;
c54f5c24 1004 int err;
4d22de3e
DLR
1005
1006 if (test_and_set_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map))
1007 return 0;
1008
1009 if (!adap_up && (err = cxgb_up(adapter)) < 0)
48c4b6db 1010 goto out;
4d22de3e
DLR
1011
1012 t3_tp_set_offload_mode(adapter, 1);
1013 tdev->lldev = adapter->port[0];
1014 err = cxgb3_offload_activate(adapter);
1015 if (err)
1016 goto out;
1017
1018 init_port_mtus(adapter);
1019 t3_load_mtus(adapter, adapter->params.mtus, adapter->params.a_wnd,
1020 adapter->params.b_wnd,
1021 adapter->params.rev == 0 ?
1022 adapter->port[0]->mtu : 0xffff);
1023 init_smt(adapter);
1024
d96a51f6
DN
1025 if (sysfs_create_group(&tdev->lldev->dev.kobj, &offload_attr_group))
1026 dev_dbg(&dev->dev, "cannot create sysfs group\n");
4d22de3e
DLR
1027
1028 /* Call back all registered clients */
1029 cxgb3_add_clients(tdev);
1030
1031out:
1032 /* restore them in case the offload module has changed them */
1033 if (err) {
1034 t3_tp_set_offload_mode(adapter, 0);
1035 clear_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map);
1036 cxgb3_set_dummy_ops(tdev);
1037 }
1038 return err;
1039}
1040
1041static int offload_close(struct t3cdev *tdev)
1042{
1043 struct adapter *adapter = tdev2adap(tdev);
1044
1045 if (!test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map))
1046 return 0;
1047
1048 /* Call back all registered clients */
1049 cxgb3_remove_clients(tdev);
1050
0ee8d33c 1051 sysfs_remove_group(&tdev->lldev->dev.kobj, &offload_attr_group);
4d22de3e
DLR
1052
1053 tdev->lldev = NULL;
1054 cxgb3_set_dummy_ops(tdev);
1055 t3_tp_set_offload_mode(adapter, 0);
1056 clear_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map);
1057
1058 if (!adapter->open_device_map)
1059 cxgb_down(adapter);
1060
1061 cxgb3_offload_deactivate(adapter);
1062 return 0;
1063}
1064
1065static int cxgb_open(struct net_device *dev)
1066{
4d22de3e 1067 struct port_info *pi = netdev_priv(dev);
5fbf816f 1068 struct adapter *adapter = pi->adapter;
4d22de3e 1069 int other_ports = adapter->open_device_map & PORT_MASK;
5fbf816f 1070 int err;
4d22de3e 1071
48c4b6db 1072 if (!adapter->open_device_map && (err = cxgb_up(adapter)) < 0)
4d22de3e
DLR
1073 return err;
1074
1075 set_bit(pi->port_id, &adapter->open_device_map);
8ac3ba68 1076 if (is_offload(adapter) && !ofld_disable) {
4d22de3e
DLR
1077 err = offload_open(dev);
1078 if (err)
1079 printk(KERN_WARNING
1080 "Could not initialize offload capabilities\n");
1081 }
1082
1083 link_start(dev);
1084 t3_port_intr_enable(adapter, pi->port_id);
1085 netif_start_queue(dev);
1086 if (!other_ports)
1087 schedule_chk_task(adapter);
1088
1089 return 0;
1090}
1091
1092static int cxgb_close(struct net_device *dev)
1093{
5fbf816f
DLR
1094 struct port_info *pi = netdev_priv(dev);
1095 struct adapter *adapter = pi->adapter;
4d22de3e 1096
5fbf816f 1097 t3_port_intr_disable(adapter, pi->port_id);
4d22de3e 1098 netif_stop_queue(dev);
5fbf816f 1099 pi->phy.ops->power_down(&pi->phy, 1);
4d22de3e 1100 netif_carrier_off(dev);
5fbf816f 1101 t3_mac_disable(&pi->mac, MAC_DIRECTION_TX | MAC_DIRECTION_RX);
4d22de3e
DLR
1102
1103 spin_lock(&adapter->work_lock); /* sync with update task */
5fbf816f 1104 clear_bit(pi->port_id, &adapter->open_device_map);
4d22de3e
DLR
1105 spin_unlock(&adapter->work_lock);
1106
1107 if (!(adapter->open_device_map & PORT_MASK))
1108 cancel_rearming_delayed_workqueue(cxgb3_wq,
1109 &adapter->adap_check_task);
1110
1111 if (!adapter->open_device_map)
1112 cxgb_down(adapter);
1113
1114 return 0;
1115}
1116
1117static struct net_device_stats *cxgb_get_stats(struct net_device *dev)
1118{
5fbf816f
DLR
1119 struct port_info *pi = netdev_priv(dev);
1120 struct adapter *adapter = pi->adapter;
1121 struct net_device_stats *ns = &pi->netstats;
4d22de3e
DLR
1122 const struct mac_stats *pstats;
1123
1124 spin_lock(&adapter->stats_lock);
5fbf816f 1125 pstats = t3_mac_update_stats(&pi->mac);
4d22de3e
DLR
1126 spin_unlock(&adapter->stats_lock);
1127
1128 ns->tx_bytes = pstats->tx_octets;
1129 ns->tx_packets = pstats->tx_frames;
1130 ns->rx_bytes = pstats->rx_octets;
1131 ns->rx_packets = pstats->rx_frames;
1132 ns->multicast = pstats->rx_mcast_frames;
1133
1134 ns->tx_errors = pstats->tx_underrun;
1135 ns->rx_errors = pstats->rx_symbol_errs + pstats->rx_fcs_errs +
1136 pstats->rx_too_long + pstats->rx_jabber + pstats->rx_short +
1137 pstats->rx_fifo_ovfl;
1138
1139 /* detailed rx_errors */
1140 ns->rx_length_errors = pstats->rx_jabber + pstats->rx_too_long;
1141 ns->rx_over_errors = 0;
1142 ns->rx_crc_errors = pstats->rx_fcs_errs;
1143 ns->rx_frame_errors = pstats->rx_symbol_errs;
1144 ns->rx_fifo_errors = pstats->rx_fifo_ovfl;
1145 ns->rx_missed_errors = pstats->rx_cong_drops;
1146
1147 /* detailed tx_errors */
1148 ns->tx_aborted_errors = 0;
1149 ns->tx_carrier_errors = 0;
1150 ns->tx_fifo_errors = pstats->tx_underrun;
1151 ns->tx_heartbeat_errors = 0;
1152 ns->tx_window_errors = 0;
1153 return ns;
1154}
1155
1156static u32 get_msglevel(struct net_device *dev)
1157{
5fbf816f
DLR
1158 struct port_info *pi = netdev_priv(dev);
1159 struct adapter *adapter = pi->adapter;
4d22de3e
DLR
1160
1161 return adapter->msg_enable;
1162}
1163
1164static void set_msglevel(struct net_device *dev, u32 val)
1165{
5fbf816f
DLR
1166 struct port_info *pi = netdev_priv(dev);
1167 struct adapter *adapter = pi->adapter;
4d22de3e
DLR
1168
1169 adapter->msg_enable = val;
1170}
1171
1172static char stats_strings[][ETH_GSTRING_LEN] = {
1173 "TxOctetsOK ",
1174 "TxFramesOK ",
1175 "TxMulticastFramesOK",
1176 "TxBroadcastFramesOK",
1177 "TxPauseFrames ",
1178 "TxUnderrun ",
1179 "TxExtUnderrun ",
1180
1181 "TxFrames64 ",
1182 "TxFrames65To127 ",
1183 "TxFrames128To255 ",
1184 "TxFrames256To511 ",
1185 "TxFrames512To1023 ",
1186 "TxFrames1024To1518 ",
1187 "TxFrames1519ToMax ",
1188
1189 "RxOctetsOK ",
1190 "RxFramesOK ",
1191 "RxMulticastFramesOK",
1192 "RxBroadcastFramesOK",
1193 "RxPauseFrames ",
1194 "RxFCSErrors ",
1195 "RxSymbolErrors ",
1196 "RxShortErrors ",
1197 "RxJabberErrors ",
1198 "RxLengthErrors ",
1199 "RxFIFOoverflow ",
1200
1201 "RxFrames64 ",
1202 "RxFrames65To127 ",
1203 "RxFrames128To255 ",
1204 "RxFrames256To511 ",
1205 "RxFrames512To1023 ",
1206 "RxFrames1024To1518 ",
1207 "RxFrames1519ToMax ",
1208
1209 "PhyFIFOErrors ",
1210 "TSO ",
1211 "VLANextractions ",
1212 "VLANinsertions ",
1213 "TxCsumOffload ",
1214 "RxCsumGood ",
b47385bd
DLR
1215 "LroAggregated ",
1216 "LroFlushed ",
1217 "LroNoDesc ",
fc90664e
DLR
1218 "RxDrops ",
1219
1220 "CheckTXEnToggled ",
1221 "CheckResets ",
1222
4d22de3e
DLR
1223};
1224
b9f2c044 1225static int get_sset_count(struct net_device *dev, int sset)
4d22de3e 1226{
b9f2c044
JG
1227 switch (sset) {
1228 case ETH_SS_STATS:
1229 return ARRAY_SIZE(stats_strings);
1230 default:
1231 return -EOPNOTSUPP;
1232 }
4d22de3e
DLR
1233}
1234
1235#define T3_REGMAP_SIZE (3 * 1024)
1236
1237static int get_regs_len(struct net_device *dev)
1238{
1239 return T3_REGMAP_SIZE;
1240}
1241
1242static int get_eeprom_len(struct net_device *dev)
1243{
1244 return EEPROMSIZE;
1245}
1246
1247static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1248{
5fbf816f
DLR
1249 struct port_info *pi = netdev_priv(dev);
1250 struct adapter *adapter = pi->adapter;
4d22de3e 1251 u32 fw_vers = 0;
47330077 1252 u32 tp_vers = 0;
4d22de3e
DLR
1253
1254 t3_get_fw_version(adapter, &fw_vers);
47330077 1255 t3_get_tp_version(adapter, &tp_vers);
4d22de3e
DLR
1256
1257 strcpy(info->driver, DRV_NAME);
1258 strcpy(info->version, DRV_VERSION);
1259 strcpy(info->bus_info, pci_name(adapter->pdev));
1260 if (!fw_vers)
1261 strcpy(info->fw_version, "N/A");
4aac3899 1262 else {
4d22de3e 1263 snprintf(info->fw_version, sizeof(info->fw_version),
47330077 1264 "%s %u.%u.%u TP %u.%u.%u",
4aac3899
DLR
1265 G_FW_VERSION_TYPE(fw_vers) ? "T" : "N",
1266 G_FW_VERSION_MAJOR(fw_vers),
1267 G_FW_VERSION_MINOR(fw_vers),
47330077
DLR
1268 G_FW_VERSION_MICRO(fw_vers),
1269 G_TP_VERSION_MAJOR(tp_vers),
1270 G_TP_VERSION_MINOR(tp_vers),
1271 G_TP_VERSION_MICRO(tp_vers));
4aac3899 1272 }
4d22de3e
DLR
1273}
1274
1275static void get_strings(struct net_device *dev, u32 stringset, u8 * data)
1276{
1277 if (stringset == ETH_SS_STATS)
1278 memcpy(data, stats_strings, sizeof(stats_strings));
1279}
1280
1281static unsigned long collect_sge_port_stats(struct adapter *adapter,
1282 struct port_info *p, int idx)
1283{
1284 int i;
1285 unsigned long tot = 0;
1286
1287 for (i = 0; i < p->nqsets; ++i)
1288 tot += adapter->sge.qs[i + p->first_qset].port_stats[idx];
1289 return tot;
1290}
1291
1292static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
1293 u64 *data)
1294{
4d22de3e 1295 struct port_info *pi = netdev_priv(dev);
5fbf816f 1296 struct adapter *adapter = pi->adapter;
4d22de3e
DLR
1297 const struct mac_stats *s;
1298
1299 spin_lock(&adapter->stats_lock);
1300 s = t3_mac_update_stats(&pi->mac);
1301 spin_unlock(&adapter->stats_lock);
1302
1303 *data++ = s->tx_octets;
1304 *data++ = s->tx_frames;
1305 *data++ = s->tx_mcast_frames;
1306 *data++ = s->tx_bcast_frames;
1307 *data++ = s->tx_pause;
1308 *data++ = s->tx_underrun;
1309 *data++ = s->tx_fifo_urun;
1310
1311 *data++ = s->tx_frames_64;
1312 *data++ = s->tx_frames_65_127;
1313 *data++ = s->tx_frames_128_255;
1314 *data++ = s->tx_frames_256_511;
1315 *data++ = s->tx_frames_512_1023;
1316 *data++ = s->tx_frames_1024_1518;
1317 *data++ = s->tx_frames_1519_max;
1318
1319 *data++ = s->rx_octets;
1320 *data++ = s->rx_frames;
1321 *data++ = s->rx_mcast_frames;
1322 *data++ = s->rx_bcast_frames;
1323 *data++ = s->rx_pause;
1324 *data++ = s->rx_fcs_errs;
1325 *data++ = s->rx_symbol_errs;
1326 *data++ = s->rx_short;
1327 *data++ = s->rx_jabber;
1328 *data++ = s->rx_too_long;
1329 *data++ = s->rx_fifo_ovfl;
1330
1331 *data++ = s->rx_frames_64;
1332 *data++ = s->rx_frames_65_127;
1333 *data++ = s->rx_frames_128_255;
1334 *data++ = s->rx_frames_256_511;
1335 *data++ = s->rx_frames_512_1023;
1336 *data++ = s->rx_frames_1024_1518;
1337 *data++ = s->rx_frames_1519_max;
1338
1339 *data++ = pi->phy.fifo_errors;
1340
1341 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_TSO);
1342 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_VLANEX);
1343 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_VLANINS);
1344 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_TX_CSUM);
1345 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_RX_CSUM_GOOD);
b47385bd
DLR
1346 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_LRO_AGGR);
1347 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_LRO_FLUSHED);
1348 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_LRO_NO_DESC);
4d22de3e 1349 *data++ = s->rx_cong_drops;
fc90664e
DLR
1350
1351 *data++ = s->num_toggled;
1352 *data++ = s->num_resets;
4d22de3e
DLR
1353}
1354
1355static inline void reg_block_dump(struct adapter *ap, void *buf,
1356 unsigned int start, unsigned int end)
1357{
1358 u32 *p = buf + start;
1359
1360 for (; start <= end; start += sizeof(u32))
1361 *p++ = t3_read_reg(ap, start);
1362}
1363
1364static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
1365 void *buf)
1366{
5fbf816f
DLR
1367 struct port_info *pi = netdev_priv(dev);
1368 struct adapter *ap = pi->adapter;
4d22de3e
DLR
1369
1370 /*
1371 * Version scheme:
1372 * bits 0..9: chip version
1373 * bits 10..15: chip revision
1374 * bit 31: set for PCIe cards
1375 */
1376 regs->version = 3 | (ap->params.rev << 10) | (is_pcie(ap) << 31);
1377
1378 /*
1379 * We skip the MAC statistics registers because they are clear-on-read.
1380 * Also reading multi-register stats would need to synchronize with the
1381 * periodic mac stats accumulation. Hard to justify the complexity.
1382 */
1383 memset(buf, 0, T3_REGMAP_SIZE);
1384 reg_block_dump(ap, buf, 0, A_SG_RSPQ_CREDIT_RETURN);
1385 reg_block_dump(ap, buf, A_SG_HI_DRB_HI_THRSH, A_ULPRX_PBL_ULIMIT);
1386 reg_block_dump(ap, buf, A_ULPTX_CONFIG, A_MPS_INT_CAUSE);
1387 reg_block_dump(ap, buf, A_CPL_SWITCH_CNTRL, A_CPL_MAP_TBL_DATA);
1388 reg_block_dump(ap, buf, A_SMB_GLOBAL_TIME_CFG, A_XGM_SERDES_STAT3);
1389 reg_block_dump(ap, buf, A_XGM_SERDES_STATUS0,
1390 XGM_REG(A_XGM_SERDES_STAT3, 1));
1391 reg_block_dump(ap, buf, XGM_REG(A_XGM_SERDES_STATUS0, 1),
1392 XGM_REG(A_XGM_RX_SPI4_SOP_EOP_CNT, 1));
1393}
1394
1395static int restart_autoneg(struct net_device *dev)
1396{
1397 struct port_info *p = netdev_priv(dev);
1398
1399 if (!netif_running(dev))
1400 return -EAGAIN;
1401 if (p->link_config.autoneg != AUTONEG_ENABLE)
1402 return -EINVAL;
1403 p->phy.ops->autoneg_restart(&p->phy);
1404 return 0;
1405}
1406
1407static int cxgb3_phys_id(struct net_device *dev, u32 data)
1408{
5fbf816f
DLR
1409 struct port_info *pi = netdev_priv(dev);
1410 struct adapter *adapter = pi->adapter;
4d22de3e 1411 int i;
4d22de3e
DLR
1412
1413 if (data == 0)
1414 data = 2;
1415
1416 for (i = 0; i < data * 2; i++) {
1417 t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
1418 (i & 1) ? F_GPIO0_OUT_VAL : 0);
1419 if (msleep_interruptible(500))
1420 break;
1421 }
1422 t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
1423 F_GPIO0_OUT_VAL);
1424 return 0;
1425}
1426
1427static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1428{
1429 struct port_info *p = netdev_priv(dev);
1430
1431 cmd->supported = p->link_config.supported;
1432 cmd->advertising = p->link_config.advertising;
1433
1434 if (netif_carrier_ok(dev)) {
1435 cmd->speed = p->link_config.speed;
1436 cmd->duplex = p->link_config.duplex;
1437 } else {
1438 cmd->speed = -1;
1439 cmd->duplex = -1;
1440 }
1441
1442 cmd->port = (cmd->supported & SUPPORTED_TP) ? PORT_TP : PORT_FIBRE;
1443 cmd->phy_address = p->phy.addr;
1444 cmd->transceiver = XCVR_EXTERNAL;
1445 cmd->autoneg = p->link_config.autoneg;
1446 cmd->maxtxpkt = 0;
1447 cmd->maxrxpkt = 0;
1448 return 0;
1449}
1450
1451static int speed_duplex_to_caps(int speed, int duplex)
1452{
1453 int cap = 0;
1454
1455 switch (speed) {
1456 case SPEED_10:
1457 if (duplex == DUPLEX_FULL)
1458 cap = SUPPORTED_10baseT_Full;
1459 else
1460 cap = SUPPORTED_10baseT_Half;
1461 break;
1462 case SPEED_100:
1463 if (duplex == DUPLEX_FULL)
1464 cap = SUPPORTED_100baseT_Full;
1465 else
1466 cap = SUPPORTED_100baseT_Half;
1467 break;
1468 case SPEED_1000:
1469 if (duplex == DUPLEX_FULL)
1470 cap = SUPPORTED_1000baseT_Full;
1471 else
1472 cap = SUPPORTED_1000baseT_Half;
1473 break;
1474 case SPEED_10000:
1475 if (duplex == DUPLEX_FULL)
1476 cap = SUPPORTED_10000baseT_Full;
1477 }
1478 return cap;
1479}
1480
1481#define ADVERTISED_MASK (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
1482 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
1483 ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full | \
1484 ADVERTISED_10000baseT_Full)
1485
1486static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1487{
1488 struct port_info *p = netdev_priv(dev);
1489 struct link_config *lc = &p->link_config;
1490
1491 if (!(lc->supported & SUPPORTED_Autoneg))
1492 return -EOPNOTSUPP; /* can't change speed/duplex */
1493
1494 if (cmd->autoneg == AUTONEG_DISABLE) {
1495 int cap = speed_duplex_to_caps(cmd->speed, cmd->duplex);
1496
1497 if (!(lc->supported & cap) || cmd->speed == SPEED_1000)
1498 return -EINVAL;
1499 lc->requested_speed = cmd->speed;
1500 lc->requested_duplex = cmd->duplex;
1501 lc->advertising = 0;
1502 } else {
1503 cmd->advertising &= ADVERTISED_MASK;
1504 cmd->advertising &= lc->supported;
1505 if (!cmd->advertising)
1506 return -EINVAL;
1507 lc->requested_speed = SPEED_INVALID;
1508 lc->requested_duplex = DUPLEX_INVALID;
1509 lc->advertising = cmd->advertising | ADVERTISED_Autoneg;
1510 }
1511 lc->autoneg = cmd->autoneg;
1512 if (netif_running(dev))
1513 t3_link_start(&p->phy, &p->mac, lc);
1514 return 0;
1515}
1516
1517static void get_pauseparam(struct net_device *dev,
1518 struct ethtool_pauseparam *epause)
1519{
1520 struct port_info *p = netdev_priv(dev);
1521
1522 epause->autoneg = (p->link_config.requested_fc & PAUSE_AUTONEG) != 0;
1523 epause->rx_pause = (p->link_config.fc & PAUSE_RX) != 0;
1524 epause->tx_pause = (p->link_config.fc & PAUSE_TX) != 0;
1525}
1526
1527static int set_pauseparam(struct net_device *dev,
1528 struct ethtool_pauseparam *epause)
1529{
1530 struct port_info *p = netdev_priv(dev);
1531 struct link_config *lc = &p->link_config;
1532
1533 if (epause->autoneg == AUTONEG_DISABLE)
1534 lc->requested_fc = 0;
1535 else if (lc->supported & SUPPORTED_Autoneg)
1536 lc->requested_fc = PAUSE_AUTONEG;
1537 else
1538 return -EINVAL;
1539
1540 if (epause->rx_pause)
1541 lc->requested_fc |= PAUSE_RX;
1542 if (epause->tx_pause)
1543 lc->requested_fc |= PAUSE_TX;
1544 if (lc->autoneg == AUTONEG_ENABLE) {
1545 if (netif_running(dev))
1546 t3_link_start(&p->phy, &p->mac, lc);
1547 } else {
1548 lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1549 if (netif_running(dev))
1550 t3_mac_set_speed_duplex_fc(&p->mac, -1, -1, lc->fc);
1551 }
1552 return 0;
1553}
1554
1555static u32 get_rx_csum(struct net_device *dev)
1556{
1557 struct port_info *p = netdev_priv(dev);
1558
1559 return p->rx_csum_offload;
1560}
1561
1562static int set_rx_csum(struct net_device *dev, u32 data)
1563{
1564 struct port_info *p = netdev_priv(dev);
1565
1566 p->rx_csum_offload = data;
b47385bd
DLR
1567 if (!data) {
1568 struct adapter *adap = p->adapter;
1569 int i;
1570
1571 for (i = p->first_qset; i < p->first_qset + p->nqsets; i++)
1572 adap->sge.qs[i].lro_enabled = 0;
1573 }
4d22de3e
DLR
1574 return 0;
1575}
1576
1577static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
1578{
5fbf816f
DLR
1579 struct port_info *pi = netdev_priv(dev);
1580 struct adapter *adapter = pi->adapter;
05b97b30 1581 const struct qset_params *q = &adapter->params.sge.qset[pi->first_qset];
4d22de3e
DLR
1582
1583 e->rx_max_pending = MAX_RX_BUFFERS;
1584 e->rx_mini_max_pending = 0;
1585 e->rx_jumbo_max_pending = MAX_RX_JUMBO_BUFFERS;
1586 e->tx_max_pending = MAX_TXQ_ENTRIES;
1587
05b97b30
DLR
1588 e->rx_pending = q->fl_size;
1589 e->rx_mini_pending = q->rspq_size;
1590 e->rx_jumbo_pending = q->jumbo_size;
1591 e->tx_pending = q->txq_size[0];
4d22de3e
DLR
1592}
1593
1594static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
1595{
5fbf816f
DLR
1596 struct port_info *pi = netdev_priv(dev);
1597 struct adapter *adapter = pi->adapter;
05b97b30 1598 struct qset_params *q;
5fbf816f 1599 int i;
4d22de3e
DLR
1600
1601 if (e->rx_pending > MAX_RX_BUFFERS ||
1602 e->rx_jumbo_pending > MAX_RX_JUMBO_BUFFERS ||
1603 e->tx_pending > MAX_TXQ_ENTRIES ||
1604 e->rx_mini_pending > MAX_RSPQ_ENTRIES ||
1605 e->rx_mini_pending < MIN_RSPQ_ENTRIES ||
1606 e->rx_pending < MIN_FL_ENTRIES ||
1607 e->rx_jumbo_pending < MIN_FL_ENTRIES ||
1608 e->tx_pending < adapter->params.nports * MIN_TXQ_ENTRIES)
1609 return -EINVAL;
1610
1611 if (adapter->flags & FULL_INIT_DONE)
1612 return -EBUSY;
1613
05b97b30
DLR
1614 q = &adapter->params.sge.qset[pi->first_qset];
1615 for (i = 0; i < pi->nqsets; ++i, ++q) {
4d22de3e
DLR
1616 q->rspq_size = e->rx_mini_pending;
1617 q->fl_size = e->rx_pending;
1618 q->jumbo_size = e->rx_jumbo_pending;
1619 q->txq_size[0] = e->tx_pending;
1620 q->txq_size[1] = e->tx_pending;
1621 q->txq_size[2] = e->tx_pending;
1622 }
1623 return 0;
1624}
1625
1626static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
1627{
5fbf816f
DLR
1628 struct port_info *pi = netdev_priv(dev);
1629 struct adapter *adapter = pi->adapter;
4d22de3e
DLR
1630 struct qset_params *qsp = &adapter->params.sge.qset[0];
1631 struct sge_qset *qs = &adapter->sge.qs[0];
1632
1633 if (c->rx_coalesce_usecs * 10 > M_NEWTIMER)
1634 return -EINVAL;
1635
1636 qsp->coalesce_usecs = c->rx_coalesce_usecs;
1637 t3_update_qset_coalesce(qs, qsp);
1638 return 0;
1639}
1640
1641static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
1642{
5fbf816f
DLR
1643 struct port_info *pi = netdev_priv(dev);
1644 struct adapter *adapter = pi->adapter;
4d22de3e
DLR
1645 struct qset_params *q = adapter->params.sge.qset;
1646
1647 c->rx_coalesce_usecs = q->coalesce_usecs;
1648 return 0;
1649}
1650
1651static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e,
1652 u8 * data)
1653{
5fbf816f
DLR
1654 struct port_info *pi = netdev_priv(dev);
1655 struct adapter *adapter = pi->adapter;
4d22de3e 1656 int i, err = 0;
4d22de3e
DLR
1657
1658 u8 *buf = kmalloc(EEPROMSIZE, GFP_KERNEL);
1659 if (!buf)
1660 return -ENOMEM;
1661
1662 e->magic = EEPROM_MAGIC;
1663 for (i = e->offset & ~3; !err && i < e->offset + e->len; i += 4)
05e5c116 1664 err = t3_seeprom_read(adapter, i, (__le32 *) & buf[i]);
4d22de3e
DLR
1665
1666 if (!err)
1667 memcpy(data, buf + e->offset, e->len);
1668 kfree(buf);
1669 return err;
1670}
1671
1672static int set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
1673 u8 * data)
1674{
5fbf816f
DLR
1675 struct port_info *pi = netdev_priv(dev);
1676 struct adapter *adapter = pi->adapter;
05e5c116
AV
1677 u32 aligned_offset, aligned_len;
1678 __le32 *p;
4d22de3e 1679 u8 *buf;
c54f5c24 1680 int err;
4d22de3e
DLR
1681
1682 if (eeprom->magic != EEPROM_MAGIC)
1683 return -EINVAL;
1684
1685 aligned_offset = eeprom->offset & ~3;
1686 aligned_len = (eeprom->len + (eeprom->offset & 3) + 3) & ~3;
1687
1688 if (aligned_offset != eeprom->offset || aligned_len != eeprom->len) {
1689 buf = kmalloc(aligned_len, GFP_KERNEL);
1690 if (!buf)
1691 return -ENOMEM;
05e5c116 1692 err = t3_seeprom_read(adapter, aligned_offset, (__le32 *) buf);
4d22de3e
DLR
1693 if (!err && aligned_len > 4)
1694 err = t3_seeprom_read(adapter,
1695 aligned_offset + aligned_len - 4,
05e5c116 1696 (__le32 *) & buf[aligned_len - 4]);
4d22de3e
DLR
1697 if (err)
1698 goto out;
1699 memcpy(buf + (eeprom->offset & 3), data, eeprom->len);
1700 } else
1701 buf = data;
1702
1703 err = t3_seeprom_wp(adapter, 0);
1704 if (err)
1705 goto out;
1706
05e5c116 1707 for (p = (__le32 *) buf; !err && aligned_len; aligned_len -= 4, p++) {
4d22de3e
DLR
1708 err = t3_seeprom_write(adapter, aligned_offset, *p);
1709 aligned_offset += 4;
1710 }
1711
1712 if (!err)
1713 err = t3_seeprom_wp(adapter, 1);
1714out:
1715 if (buf != data)
1716 kfree(buf);
1717 return err;
1718}
1719
1720static void get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1721{
1722 wol->supported = 0;
1723 wol->wolopts = 0;
1724 memset(&wol->sopass, 0, sizeof(wol->sopass));
1725}
1726
1727static const struct ethtool_ops cxgb_ethtool_ops = {
1728 .get_settings = get_settings,
1729 .set_settings = set_settings,
1730 .get_drvinfo = get_drvinfo,
1731 .get_msglevel = get_msglevel,
1732 .set_msglevel = set_msglevel,
1733 .get_ringparam = get_sge_param,
1734 .set_ringparam = set_sge_param,
1735 .get_coalesce = get_coalesce,
1736 .set_coalesce = set_coalesce,
1737 .get_eeprom_len = get_eeprom_len,
1738 .get_eeprom = get_eeprom,
1739 .set_eeprom = set_eeprom,
1740 .get_pauseparam = get_pauseparam,
1741 .set_pauseparam = set_pauseparam,
1742 .get_rx_csum = get_rx_csum,
1743 .set_rx_csum = set_rx_csum,
4d22de3e 1744 .set_tx_csum = ethtool_op_set_tx_csum,
4d22de3e
DLR
1745 .set_sg = ethtool_op_set_sg,
1746 .get_link = ethtool_op_get_link,
1747 .get_strings = get_strings,
1748 .phys_id = cxgb3_phys_id,
1749 .nway_reset = restart_autoneg,
b9f2c044 1750 .get_sset_count = get_sset_count,
4d22de3e
DLR
1751 .get_ethtool_stats = get_stats,
1752 .get_regs_len = get_regs_len,
1753 .get_regs = get_regs,
1754 .get_wol = get_wol,
4d22de3e 1755 .set_tso = ethtool_op_set_tso,
4d22de3e
DLR
1756};
1757
1758static int in_range(int val, int lo, int hi)
1759{
1760 return val < 0 || (val <= hi && val >= lo);
1761}
1762
1763static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
1764{
5fbf816f
DLR
1765 struct port_info *pi = netdev_priv(dev);
1766 struct adapter *adapter = pi->adapter;
4d22de3e 1767 u32 cmd;
5fbf816f 1768 int ret;
4d22de3e
DLR
1769
1770 if (copy_from_user(&cmd, useraddr, sizeof(cmd)))
1771 return -EFAULT;
1772
1773 switch (cmd) {
4d22de3e
DLR
1774 case CHELSIO_SET_QSET_PARAMS:{
1775 int i;
1776 struct qset_params *q;
1777 struct ch_qset_params t;
1778
1779 if (!capable(CAP_NET_ADMIN))
1780 return -EPERM;
1781 if (copy_from_user(&t, useraddr, sizeof(t)))
1782 return -EFAULT;
1783 if (t.qset_idx >= SGE_QSETS)
1784 return -EINVAL;
1785 if (!in_range(t.intr_lat, 0, M_NEWTIMER) ||
1786 !in_range(t.cong_thres, 0, 255) ||
1787 !in_range(t.txq_size[0], MIN_TXQ_ENTRIES,
1788 MAX_TXQ_ENTRIES) ||
1789 !in_range(t.txq_size[1], MIN_TXQ_ENTRIES,
1790 MAX_TXQ_ENTRIES) ||
1791 !in_range(t.txq_size[2], MIN_CTRL_TXQ_ENTRIES,
1792 MAX_CTRL_TXQ_ENTRIES) ||
1793 !in_range(t.fl_size[0], MIN_FL_ENTRIES,
1794 MAX_RX_BUFFERS)
1795 || !in_range(t.fl_size[1], MIN_FL_ENTRIES,
1796 MAX_RX_JUMBO_BUFFERS)
1797 || !in_range(t.rspq_size, MIN_RSPQ_ENTRIES,
1798 MAX_RSPQ_ENTRIES))
1799 return -EINVAL;
1800 if ((adapter->flags & FULL_INIT_DONE) &&
1801 (t.rspq_size >= 0 || t.fl_size[0] >= 0 ||
1802 t.fl_size[1] >= 0 || t.txq_size[0] >= 0 ||
1803 t.txq_size[1] >= 0 || t.txq_size[2] >= 0 ||
1804 t.polling >= 0 || t.cong_thres >= 0))
1805 return -EBUSY;
1806
1807 q = &adapter->params.sge.qset[t.qset_idx];
1808
1809 if (t.rspq_size >= 0)
1810 q->rspq_size = t.rspq_size;
1811 if (t.fl_size[0] >= 0)
1812 q->fl_size = t.fl_size[0];
1813 if (t.fl_size[1] >= 0)
1814 q->jumbo_size = t.fl_size[1];
1815 if (t.txq_size[0] >= 0)
1816 q->txq_size[0] = t.txq_size[0];
1817 if (t.txq_size[1] >= 0)
1818 q->txq_size[1] = t.txq_size[1];
1819 if (t.txq_size[2] >= 0)
1820 q->txq_size[2] = t.txq_size[2];
1821 if (t.cong_thres >= 0)
1822 q->cong_thres = t.cong_thres;
1823 if (t.intr_lat >= 0) {
1824 struct sge_qset *qs =
1825 &adapter->sge.qs[t.qset_idx];
1826
1827 q->coalesce_usecs = t.intr_lat;
1828 t3_update_qset_coalesce(qs, q);
1829 }
1830 if (t.polling >= 0) {
1831 if (adapter->flags & USING_MSIX)
1832 q->polling = t.polling;
1833 else {
1834 /* No polling with INTx for T3A */
1835 if (adapter->params.rev == 0 &&
1836 !(adapter->flags & USING_MSI))
1837 t.polling = 0;
1838
1839 for (i = 0; i < SGE_QSETS; i++) {
1840 q = &adapter->params.sge.
1841 qset[i];
1842 q->polling = t.polling;
1843 }
1844 }
1845 }
b47385bd
DLR
1846 if (t.lro >= 0) {
1847 struct sge_qset *qs = &adapter->sge.qs[t.qset_idx];
1848 q->lro = t.lro;
1849 qs->lro_enabled = t.lro;
1850 }
4d22de3e
DLR
1851 break;
1852 }
1853 case CHELSIO_GET_QSET_PARAMS:{
1854 struct qset_params *q;
1855 struct ch_qset_params t;
1856
1857 if (copy_from_user(&t, useraddr, sizeof(t)))
1858 return -EFAULT;
1859 if (t.qset_idx >= SGE_QSETS)
1860 return -EINVAL;
1861
1862 q = &adapter->params.sge.qset[t.qset_idx];
1863 t.rspq_size = q->rspq_size;
1864 t.txq_size[0] = q->txq_size[0];
1865 t.txq_size[1] = q->txq_size[1];
1866 t.txq_size[2] = q->txq_size[2];
1867 t.fl_size[0] = q->fl_size;
1868 t.fl_size[1] = q->jumbo_size;
1869 t.polling = q->polling;
b47385bd 1870 t.lro = q->lro;
4d22de3e
DLR
1871 t.intr_lat = q->coalesce_usecs;
1872 t.cong_thres = q->cong_thres;
1873
1874 if (copy_to_user(useraddr, &t, sizeof(t)))
1875 return -EFAULT;
1876 break;
1877 }
1878 case CHELSIO_SET_QSET_NUM:{
1879 struct ch_reg edata;
4d22de3e
DLR
1880 unsigned int i, first_qset = 0, other_qsets = 0;
1881
1882 if (!capable(CAP_NET_ADMIN))
1883 return -EPERM;
1884 if (adapter->flags & FULL_INIT_DONE)
1885 return -EBUSY;
1886 if (copy_from_user(&edata, useraddr, sizeof(edata)))
1887 return -EFAULT;
1888 if (edata.val < 1 ||
1889 (edata.val > 1 && !(adapter->flags & USING_MSIX)))
1890 return -EINVAL;
1891
1892 for_each_port(adapter, i)
1893 if (adapter->port[i] && adapter->port[i] != dev)
1894 other_qsets += adap2pinfo(adapter, i)->nqsets;
1895
1896 if (edata.val + other_qsets > SGE_QSETS)
1897 return -EINVAL;
1898
1899 pi->nqsets = edata.val;
1900
1901 for_each_port(adapter, i)
1902 if (adapter->port[i]) {
1903 pi = adap2pinfo(adapter, i);
1904 pi->first_qset = first_qset;
1905 first_qset += pi->nqsets;
1906 }
1907 break;
1908 }
1909 case CHELSIO_GET_QSET_NUM:{
1910 struct ch_reg edata;
4d22de3e
DLR
1911
1912 edata.cmd = CHELSIO_GET_QSET_NUM;
1913 edata.val = pi->nqsets;
1914 if (copy_to_user(useraddr, &edata, sizeof(edata)))
1915 return -EFAULT;
1916 break;
1917 }
1918 case CHELSIO_LOAD_FW:{
1919 u8 *fw_data;
1920 struct ch_mem_range t;
1921
1b3aa7af 1922 if (!capable(CAP_SYS_RAWIO))
4d22de3e
DLR
1923 return -EPERM;
1924 if (copy_from_user(&t, useraddr, sizeof(t)))
1925 return -EFAULT;
1b3aa7af 1926 /* Check t.len sanity ? */
4d22de3e
DLR
1927 fw_data = kmalloc(t.len, GFP_KERNEL);
1928 if (!fw_data)
1929 return -ENOMEM;
1930
1931 if (copy_from_user
1932 (fw_data, useraddr + sizeof(t), t.len)) {
1933 kfree(fw_data);
1934 return -EFAULT;
1935 }
1936
1937 ret = t3_load_fw(adapter, fw_data, t.len);
1938 kfree(fw_data);
1939 if (ret)
1940 return ret;
1941 break;
1942 }
1943 case CHELSIO_SETMTUTAB:{
1944 struct ch_mtus m;
1945 int i;
1946
1947 if (!is_offload(adapter))
1948 return -EOPNOTSUPP;
1949 if (!capable(CAP_NET_ADMIN))
1950 return -EPERM;
1951 if (offload_running(adapter))
1952 return -EBUSY;
1953 if (copy_from_user(&m, useraddr, sizeof(m)))
1954 return -EFAULT;
1955 if (m.nmtus != NMTUS)
1956 return -EINVAL;
1957 if (m.mtus[0] < 81) /* accommodate SACK */
1958 return -EINVAL;
1959
1960 /* MTUs must be in ascending order */
1961 for (i = 1; i < NMTUS; ++i)
1962 if (m.mtus[i] < m.mtus[i - 1])
1963 return -EINVAL;
1964
1965 memcpy(adapter->params.mtus, m.mtus,
1966 sizeof(adapter->params.mtus));
1967 break;
1968 }
1969 case CHELSIO_GET_PM:{
1970 struct tp_params *p = &adapter->params.tp;
1971 struct ch_pm m = {.cmd = CHELSIO_GET_PM };
1972
1973 if (!is_offload(adapter))
1974 return -EOPNOTSUPP;
1975 m.tx_pg_sz = p->tx_pg_size;
1976 m.tx_num_pg = p->tx_num_pgs;
1977 m.rx_pg_sz = p->rx_pg_size;
1978 m.rx_num_pg = p->rx_num_pgs;
1979 m.pm_total = p->pmtx_size + p->chan_rx_size * p->nchan;
1980 if (copy_to_user(useraddr, &m, sizeof(m)))
1981 return -EFAULT;
1982 break;
1983 }
1984 case CHELSIO_SET_PM:{
1985 struct ch_pm m;
1986 struct tp_params *p = &adapter->params.tp;
1987
1988 if (!is_offload(adapter))
1989 return -EOPNOTSUPP;
1990 if (!capable(CAP_NET_ADMIN))
1991 return -EPERM;
1992 if (adapter->flags & FULL_INIT_DONE)
1993 return -EBUSY;
1994 if (copy_from_user(&m, useraddr, sizeof(m)))
1995 return -EFAULT;
d9da466a 1996 if (!is_power_of_2(m.rx_pg_sz) ||
1997 !is_power_of_2(m.tx_pg_sz))
4d22de3e
DLR
1998 return -EINVAL; /* not power of 2 */
1999 if (!(m.rx_pg_sz & 0x14000))
2000 return -EINVAL; /* not 16KB or 64KB */
2001 if (!(m.tx_pg_sz & 0x1554000))
2002 return -EINVAL;
2003 if (m.tx_num_pg == -1)
2004 m.tx_num_pg = p->tx_num_pgs;
2005 if (m.rx_num_pg == -1)
2006 m.rx_num_pg = p->rx_num_pgs;
2007 if (m.tx_num_pg % 24 || m.rx_num_pg % 24)
2008 return -EINVAL;
2009 if (m.rx_num_pg * m.rx_pg_sz > p->chan_rx_size ||
2010 m.tx_num_pg * m.tx_pg_sz > p->chan_tx_size)
2011 return -EINVAL;
2012 p->rx_pg_size = m.rx_pg_sz;
2013 p->tx_pg_size = m.tx_pg_sz;
2014 p->rx_num_pgs = m.rx_num_pg;
2015 p->tx_num_pgs = m.tx_num_pg;
2016 break;
2017 }
2018 case CHELSIO_GET_MEM:{
2019 struct ch_mem_range t;
2020 struct mc7 *mem;
2021 u64 buf[32];
2022
2023 if (!is_offload(adapter))
2024 return -EOPNOTSUPP;
2025 if (!(adapter->flags & FULL_INIT_DONE))
2026 return -EIO; /* need the memory controllers */
2027 if (copy_from_user(&t, useraddr, sizeof(t)))
2028 return -EFAULT;
2029 if ((t.addr & 7) || (t.len & 7))
2030 return -EINVAL;
2031 if (t.mem_id == MEM_CM)
2032 mem = &adapter->cm;
2033 else if (t.mem_id == MEM_PMRX)
2034 mem = &adapter->pmrx;
2035 else if (t.mem_id == MEM_PMTX)
2036 mem = &adapter->pmtx;
2037 else
2038 return -EINVAL;
2039
2040 /*
1825494a
DLR
2041 * Version scheme:
2042 * bits 0..9: chip version
2043 * bits 10..15: chip revision
2044 */
4d22de3e
DLR
2045 t.version = 3 | (adapter->params.rev << 10);
2046 if (copy_to_user(useraddr, &t, sizeof(t)))
2047 return -EFAULT;
2048
2049 /*
2050 * Read 256 bytes at a time as len can be large and we don't
2051 * want to use huge intermediate buffers.
2052 */
2053 useraddr += sizeof(t); /* advance to start of buffer */
2054 while (t.len) {
2055 unsigned int chunk =
2056 min_t(unsigned int, t.len, sizeof(buf));
2057
2058 ret =
2059 t3_mc7_bd_read(mem, t.addr / 8, chunk / 8,
2060 buf);
2061 if (ret)
2062 return ret;
2063 if (copy_to_user(useraddr, buf, chunk))
2064 return -EFAULT;
2065 useraddr += chunk;
2066 t.addr += chunk;
2067 t.len -= chunk;
2068 }
2069 break;
2070 }
2071 case CHELSIO_SET_TRACE_FILTER:{
2072 struct ch_trace t;
2073 const struct trace_params *tp;
2074
2075 if (!capable(CAP_NET_ADMIN))
2076 return -EPERM;
2077 if (!offload_running(adapter))
2078 return -EAGAIN;
2079 if (copy_from_user(&t, useraddr, sizeof(t)))
2080 return -EFAULT;
2081
2082 tp = (const struct trace_params *)&t.sip;
2083 if (t.config_tx)
2084 t3_config_trace_filter(adapter, tp, 0,
2085 t.invert_match,
2086 t.trace_tx);
2087 if (t.config_rx)
2088 t3_config_trace_filter(adapter, tp, 1,
2089 t.invert_match,
2090 t.trace_rx);
2091 break;
2092 }
4d22de3e
DLR
2093 default:
2094 return -EOPNOTSUPP;
2095 }
2096 return 0;
2097}
2098
2099static int cxgb_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
2100{
4d22de3e 2101 struct mii_ioctl_data *data = if_mii(req);
5fbf816f
DLR
2102 struct port_info *pi = netdev_priv(dev);
2103 struct adapter *adapter = pi->adapter;
2104 int ret, mmd;
4d22de3e
DLR
2105
2106 switch (cmd) {
2107 case SIOCGMIIPHY:
2108 data->phy_id = pi->phy.addr;
2109 /* FALLTHRU */
2110 case SIOCGMIIREG:{
2111 u32 val;
2112 struct cphy *phy = &pi->phy;
2113
2114 if (!phy->mdio_read)
2115 return -EOPNOTSUPP;
2116 if (is_10G(adapter)) {
2117 mmd = data->phy_id >> 8;
2118 if (!mmd)
2119 mmd = MDIO_DEV_PCS;
2120 else if (mmd > MDIO_DEV_XGXS)
2121 return -EINVAL;
2122
2123 ret =
2124 phy->mdio_read(adapter, data->phy_id & 0x1f,
2125 mmd, data->reg_num, &val);
2126 } else
2127 ret =
2128 phy->mdio_read(adapter, data->phy_id & 0x1f,
2129 0, data->reg_num & 0x1f,
2130 &val);
2131 if (!ret)
2132 data->val_out = val;
2133 break;
2134 }
2135 case SIOCSMIIREG:{
2136 struct cphy *phy = &pi->phy;
2137
2138 if (!capable(CAP_NET_ADMIN))
2139 return -EPERM;
2140 if (!phy->mdio_write)
2141 return -EOPNOTSUPP;
2142 if (is_10G(adapter)) {
2143 mmd = data->phy_id >> 8;
2144 if (!mmd)
2145 mmd = MDIO_DEV_PCS;
2146 else if (mmd > MDIO_DEV_XGXS)
2147 return -EINVAL;
2148
2149 ret =
2150 phy->mdio_write(adapter,
2151 data->phy_id & 0x1f, mmd,
2152 data->reg_num,
2153 data->val_in);
2154 } else
2155 ret =
2156 phy->mdio_write(adapter,
2157 data->phy_id & 0x1f, 0,
2158 data->reg_num & 0x1f,
2159 data->val_in);
2160 break;
2161 }
2162 case SIOCCHIOCTL:
2163 return cxgb_extension_ioctl(dev, req->ifr_data);
2164 default:
2165 return -EOPNOTSUPP;
2166 }
2167 return ret;
2168}
2169
2170static int cxgb_change_mtu(struct net_device *dev, int new_mtu)
2171{
4d22de3e 2172 struct port_info *pi = netdev_priv(dev);
5fbf816f
DLR
2173 struct adapter *adapter = pi->adapter;
2174 int ret;
4d22de3e
DLR
2175
2176 if (new_mtu < 81) /* accommodate SACK */
2177 return -EINVAL;
2178 if ((ret = t3_mac_set_mtu(&pi->mac, new_mtu)))
2179 return ret;
2180 dev->mtu = new_mtu;
2181 init_port_mtus(adapter);
2182 if (adapter->params.rev == 0 && offload_running(adapter))
2183 t3_load_mtus(adapter, adapter->params.mtus,
2184 adapter->params.a_wnd, adapter->params.b_wnd,
2185 adapter->port[0]->mtu);
2186 return 0;
2187}
2188
2189static int cxgb_set_mac_addr(struct net_device *dev, void *p)
2190{
4d22de3e 2191 struct port_info *pi = netdev_priv(dev);
5fbf816f 2192 struct adapter *adapter = pi->adapter;
4d22de3e
DLR
2193 struct sockaddr *addr = p;
2194
2195 if (!is_valid_ether_addr(addr->sa_data))
2196 return -EINVAL;
2197
2198 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
2199 t3_mac_set_address(&pi->mac, 0, dev->dev_addr);
2200 if (offload_running(adapter))
2201 write_smt_entry(adapter, pi->port_id);
2202 return 0;
2203}
2204
2205/**
2206 * t3_synchronize_rx - wait for current Rx processing on a port to complete
2207 * @adap: the adapter
2208 * @p: the port
2209 *
2210 * Ensures that current Rx processing on any of the queues associated with
2211 * the given port completes before returning. We do this by acquiring and
2212 * releasing the locks of the response queues associated with the port.
2213 */
2214static void t3_synchronize_rx(struct adapter *adap, const struct port_info *p)
2215{
2216 int i;
2217
2218 for (i = 0; i < p->nqsets; i++) {
2219 struct sge_rspq *q = &adap->sge.qs[i + p->first_qset].rspq;
2220
2221 spin_lock_irq(&q->lock);
2222 spin_unlock_irq(&q->lock);
2223 }
2224}
2225
2226static void vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
2227{
4d22de3e 2228 struct port_info *pi = netdev_priv(dev);
5fbf816f 2229 struct adapter *adapter = pi->adapter;
4d22de3e
DLR
2230
2231 pi->vlan_grp = grp;
2232 if (adapter->params.rev > 0)
2233 t3_set_vlan_accel(adapter, 1 << pi->port_id, grp != NULL);
2234 else {
2235 /* single control for all ports */
2236 unsigned int i, have_vlans = 0;
2237 for_each_port(adapter, i)
2238 have_vlans |= adap2pinfo(adapter, i)->vlan_grp != NULL;
2239
2240 t3_set_vlan_accel(adapter, 1, have_vlans);
2241 }
2242 t3_synchronize_rx(adapter, pi);
2243}
2244
4d22de3e
DLR
2245#ifdef CONFIG_NET_POLL_CONTROLLER
2246static void cxgb_netpoll(struct net_device *dev)
2247{
890de332 2248 struct port_info *pi = netdev_priv(dev);
5fbf816f 2249 struct adapter *adapter = pi->adapter;
890de332 2250 int qidx;
4d22de3e 2251
890de332
DLR
2252 for (qidx = pi->first_qset; qidx < pi->first_qset + pi->nqsets; qidx++) {
2253 struct sge_qset *qs = &adapter->sge.qs[qidx];
2254 void *source;
2eab17ab 2255
890de332
DLR
2256 if (adapter->flags & USING_MSIX)
2257 source = qs;
2258 else
2259 source = adapter;
2260
2261 t3_intr_handler(adapter, qs->rspq.polling) (0, source);
2262 }
4d22de3e
DLR
2263}
2264#endif
2265
2266/*
2267 * Periodic accumulation of MAC statistics.
2268 */
2269static void mac_stats_update(struct adapter *adapter)
2270{
2271 int i;
2272
2273 for_each_port(adapter, i) {
2274 struct net_device *dev = adapter->port[i];
2275 struct port_info *p = netdev_priv(dev);
2276
2277 if (netif_running(dev)) {
2278 spin_lock(&adapter->stats_lock);
2279 t3_mac_update_stats(&p->mac);
2280 spin_unlock(&adapter->stats_lock);
2281 }
2282 }
2283}
2284
2285static void check_link_status(struct adapter *adapter)
2286{
2287 int i;
2288
2289 for_each_port(adapter, i) {
2290 struct net_device *dev = adapter->port[i];
2291 struct port_info *p = netdev_priv(dev);
2292
2293 if (!(p->port_type->caps & SUPPORTED_IRQ) && netif_running(dev))
2294 t3_link_changed(adapter, i);
2295 }
2296}
2297
fc90664e
DLR
2298static void check_t3b2_mac(struct adapter *adapter)
2299{
2300 int i;
2301
f2d961c9
DLR
2302 if (!rtnl_trylock()) /* synchronize with ifdown */
2303 return;
2304
fc90664e
DLR
2305 for_each_port(adapter, i) {
2306 struct net_device *dev = adapter->port[i];
2307 struct port_info *p = netdev_priv(dev);
2308 int status;
2309
2310 if (!netif_running(dev))
2311 continue;
2312
2313 status = 0;
6d6dabac 2314 if (netif_running(dev) && netif_carrier_ok(dev))
fc90664e
DLR
2315 status = t3b2_mac_watchdog_task(&p->mac);
2316 if (status == 1)
2317 p->mac.stats.num_toggled++;
2318 else if (status == 2) {
2319 struct cmac *mac = &p->mac;
2320
2321 t3_mac_set_mtu(mac, dev->mtu);
2322 t3_mac_set_address(mac, 0, dev->dev_addr);
2323 cxgb_set_rxmode(dev);
2324 t3_link_start(&p->phy, mac, &p->link_config);
2325 t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
2326 t3_port_intr_enable(adapter, p->port_id);
2327 p->mac.stats.num_resets++;
2328 }
2329 }
2330 rtnl_unlock();
2331}
2332
2333
4d22de3e
DLR
2334static void t3_adap_check_task(struct work_struct *work)
2335{
2336 struct adapter *adapter = container_of(work, struct adapter,
2337 adap_check_task.work);
2338 const struct adapter_params *p = &adapter->params;
2339
2340 adapter->check_task_cnt++;
2341
2342 /* Check link status for PHYs without interrupts */
2343 if (p->linkpoll_period)
2344 check_link_status(adapter);
2345
2346 /* Accumulate MAC stats if needed */
2347 if (!p->linkpoll_period ||
2348 (adapter->check_task_cnt * p->linkpoll_period) / 10 >=
2349 p->stats_update_period) {
2350 mac_stats_update(adapter);
2351 adapter->check_task_cnt = 0;
2352 }
2353
fc90664e
DLR
2354 if (p->rev == T3_REV_B2)
2355 check_t3b2_mac(adapter);
2356
4d22de3e
DLR
2357 /* Schedule the next check update if any port is active. */
2358 spin_lock(&adapter->work_lock);
2359 if (adapter->open_device_map & PORT_MASK)
2360 schedule_chk_task(adapter);
2361 spin_unlock(&adapter->work_lock);
2362}
2363
2364/*
2365 * Processes external (PHY) interrupts in process context.
2366 */
2367static void ext_intr_task(struct work_struct *work)
2368{
2369 struct adapter *adapter = container_of(work, struct adapter,
2370 ext_intr_handler_task);
2371
2372 t3_phy_intr_handler(adapter);
2373
2374 /* Now reenable external interrupts */
2375 spin_lock_irq(&adapter->work_lock);
2376 if (adapter->slow_intr_mask) {
2377 adapter->slow_intr_mask |= F_T3DBG;
2378 t3_write_reg(adapter, A_PL_INT_CAUSE0, F_T3DBG);
2379 t3_write_reg(adapter, A_PL_INT_ENABLE0,
2380 adapter->slow_intr_mask);
2381 }
2382 spin_unlock_irq(&adapter->work_lock);
2383}
2384
2385/*
2386 * Interrupt-context handler for external (PHY) interrupts.
2387 */
2388void t3_os_ext_intr_handler(struct adapter *adapter)
2389{
2390 /*
2391 * Schedule a task to handle external interrupts as they may be slow
2392 * and we use a mutex to protect MDIO registers. We disable PHY
2393 * interrupts in the meantime and let the task reenable them when
2394 * it's done.
2395 */
2396 spin_lock(&adapter->work_lock);
2397 if (adapter->slow_intr_mask) {
2398 adapter->slow_intr_mask &= ~F_T3DBG;
2399 t3_write_reg(adapter, A_PL_INT_ENABLE0,
2400 adapter->slow_intr_mask);
2401 queue_work(cxgb3_wq, &adapter->ext_intr_handler_task);
2402 }
2403 spin_unlock(&adapter->work_lock);
2404}
2405
2406void t3_fatal_err(struct adapter *adapter)
2407{
2408 unsigned int fw_status[4];
2409
2410 if (adapter->flags & FULL_INIT_DONE) {
2411 t3_sge_stop(adapter);
c64c2eae
DLR
2412 t3_write_reg(adapter, A_XGM_TX_CTRL, 0);
2413 t3_write_reg(adapter, A_XGM_RX_CTRL, 0);
2414 t3_write_reg(adapter, XGM_REG(A_XGM_TX_CTRL, 1), 0);
2415 t3_write_reg(adapter, XGM_REG(A_XGM_RX_CTRL, 1), 0);
4d22de3e
DLR
2416 t3_intr_disable(adapter);
2417 }
2418 CH_ALERT(adapter, "encountered fatal error, operation suspended\n");
2419 if (!t3_cim_ctl_blk_read(adapter, 0xa0, 4, fw_status))
2420 CH_ALERT(adapter, "FW status: 0x%x, 0x%x, 0x%x, 0x%x\n",
2421 fw_status[0], fw_status[1],
2422 fw_status[2], fw_status[3]);
2423
2424}
2425
91a6b50c
DLR
2426/**
2427 * t3_io_error_detected - called when PCI error is detected
2428 * @pdev: Pointer to PCI device
2429 * @state: The current pci connection state
2430 *
2431 * This function is called after a PCI bus error affecting
2432 * this device has been detected.
2433 */
2434static pci_ers_result_t t3_io_error_detected(struct pci_dev *pdev,
2435 pci_channel_state_t state)
2436{
bc4b6b52 2437 struct adapter *adapter = pci_get_drvdata(pdev);
91a6b50c
DLR
2438 int i;
2439
2440 /* Stop all ports */
2441 for_each_port(adapter, i) {
2442 struct net_device *netdev = adapter->port[i];
2443
2444 if (netif_running(netdev))
2445 cxgb_close(netdev);
2446 }
2447
2eab17ab 2448 if (is_offload(adapter) &&
91a6b50c
DLR
2449 test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map))
2450 offload_close(&adapter->tdev);
2451
91a6b50c
DLR
2452 adapter->flags &= ~FULL_INIT_DONE;
2453
2454 pci_disable_device(pdev);
2455
48c4b6db 2456 /* Request a slot reset. */
91a6b50c
DLR
2457 return PCI_ERS_RESULT_NEED_RESET;
2458}
2459
2460/**
2461 * t3_io_slot_reset - called after the pci bus has been reset.
2462 * @pdev: Pointer to PCI device
2463 *
2464 * Restart the card from scratch, as if from a cold-boot.
2465 */
2466static pci_ers_result_t t3_io_slot_reset(struct pci_dev *pdev)
2467{
bc4b6b52 2468 struct adapter *adapter = pci_get_drvdata(pdev);
91a6b50c
DLR
2469
2470 if (pci_enable_device(pdev)) {
2471 dev_err(&pdev->dev,
2472 "Cannot re-enable PCI device after reset.\n");
48c4b6db 2473 goto err;
91a6b50c
DLR
2474 }
2475 pci_set_master(pdev);
204e2f98
DLR
2476 pci_restore_state(pdev);
2477
2478 /* Free sge resources */
2479 t3_free_sge_resources(adapter);
91a6b50c 2480
204e2f98 2481 if (t3_replay_prep_adapter(adapter))
48c4b6db 2482 goto err;
91a6b50c
DLR
2483
2484 return PCI_ERS_RESULT_RECOVERED;
48c4b6db
DLR
2485err:
2486 return PCI_ERS_RESULT_DISCONNECT;
91a6b50c
DLR
2487}
2488
2489/**
2490 * t3_io_resume - called when traffic can start flowing again.
2491 * @pdev: Pointer to PCI device
2492 *
2493 * This callback is called when the error recovery driver tells us that
2494 * its OK to resume normal operation.
2495 */
2496static void t3_io_resume(struct pci_dev *pdev)
2497{
bc4b6b52 2498 struct adapter *adapter = pci_get_drvdata(pdev);
91a6b50c
DLR
2499 int i;
2500
2501 /* Restart the ports */
2502 for_each_port(adapter, i) {
2503 struct net_device *netdev = adapter->port[i];
2504
2505 if (netif_running(netdev)) {
2506 if (cxgb_open(netdev)) {
2507 dev_err(&pdev->dev,
2508 "can't bring device back up"
2509 " after reset\n");
2510 continue;
2511 }
2512 netif_device_attach(netdev);
2513 }
2514 }
91a6b50c
DLR
2515}
2516
2517static struct pci_error_handlers t3_err_handler = {
2518 .error_detected = t3_io_error_detected,
2519 .slot_reset = t3_io_slot_reset,
2520 .resume = t3_io_resume,
2521};
2522
4d22de3e
DLR
2523static int __devinit cxgb_enable_msix(struct adapter *adap)
2524{
2525 struct msix_entry entries[SGE_QSETS + 1];
2526 int i, err;
2527
2528 for (i = 0; i < ARRAY_SIZE(entries); ++i)
2529 entries[i].entry = i;
2530
2531 err = pci_enable_msix(adap->pdev, entries, ARRAY_SIZE(entries));
2532 if (!err) {
2533 for (i = 0; i < ARRAY_SIZE(entries); ++i)
2534 adap->msix_info[i].vec = entries[i].vector;
2535 } else if (err > 0)
2536 dev_info(&adap->pdev->dev,
2537 "only %d MSI-X vectors left, not using MSI-X\n", err);
2538 return err;
2539}
2540
2541static void __devinit print_port_info(struct adapter *adap,
2542 const struct adapter_info *ai)
2543{
2544 static const char *pci_variant[] = {
2545 "PCI", "PCI-X", "PCI-X ECC", "PCI-X 266", "PCI Express"
2546 };
2547
2548 int i;
2549 char buf[80];
2550
2551 if (is_pcie(adap))
2552 snprintf(buf, sizeof(buf), "%s x%d",
2553 pci_variant[adap->params.pci.variant],
2554 adap->params.pci.width);
2555 else
2556 snprintf(buf, sizeof(buf), "%s %dMHz/%d-bit",
2557 pci_variant[adap->params.pci.variant],
2558 adap->params.pci.speed, adap->params.pci.width);
2559
2560 for_each_port(adap, i) {
2561 struct net_device *dev = adap->port[i];
2562 const struct port_info *pi = netdev_priv(dev);
2563
2564 if (!test_bit(i, &adap->registered_device_map))
2565 continue;
8ac3ba68 2566 printk(KERN_INFO "%s: %s %s %sNIC (rev %d) %s%s\n",
4d22de3e 2567 dev->name, ai->desc, pi->port_type->desc,
8ac3ba68 2568 is_offload(adap) ? "R" : "", adap->params.rev, buf,
4d22de3e
DLR
2569 (adap->flags & USING_MSIX) ? " MSI-X" :
2570 (adap->flags & USING_MSI) ? " MSI" : "");
2571 if (adap->name == dev->name && adap->params.vpd.mclk)
167cdf5f
DLR
2572 printk(KERN_INFO
2573 "%s: %uMB CM, %uMB PMTX, %uMB PMRX, S/N: %s\n",
4d22de3e
DLR
2574 adap->name, t3_mc7_size(&adap->cm) >> 20,
2575 t3_mc7_size(&adap->pmtx) >> 20,
167cdf5f
DLR
2576 t3_mc7_size(&adap->pmrx) >> 20,
2577 adap->params.vpd.sn);
4d22de3e
DLR
2578 }
2579}
2580
2581static int __devinit init_one(struct pci_dev *pdev,
2582 const struct pci_device_id *ent)
2583{
2584 static int version_printed;
2585
2586 int i, err, pci_using_dac = 0;
2587 unsigned long mmio_start, mmio_len;
2588 const struct adapter_info *ai;
2589 struct adapter *adapter = NULL;
2590 struct port_info *pi;
2591
2592 if (!version_printed) {
2593 printk(KERN_INFO "%s - version %s\n", DRV_DESC, DRV_VERSION);
2594 ++version_printed;
2595 }
2596
2597 if (!cxgb3_wq) {
2598 cxgb3_wq = create_singlethread_workqueue(DRV_NAME);
2599 if (!cxgb3_wq) {
2600 printk(KERN_ERR DRV_NAME
2601 ": cannot initialize work queue\n");
2602 return -ENOMEM;
2603 }
2604 }
2605
2606 err = pci_request_regions(pdev, DRV_NAME);
2607 if (err) {
2608 /* Just info, some other driver may have claimed the device. */
2609 dev_info(&pdev->dev, "cannot obtain PCI resources\n");
2610 return err;
2611 }
2612
2613 err = pci_enable_device(pdev);
2614 if (err) {
2615 dev_err(&pdev->dev, "cannot enable PCI device\n");
2616 goto out_release_regions;
2617 }
2618
2619 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
2620 pci_using_dac = 1;
2621 err = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
2622 if (err) {
2623 dev_err(&pdev->dev, "unable to obtain 64-bit DMA for "
2624 "coherent allocations\n");
2625 goto out_disable_device;
2626 }
2627 } else if ((err = pci_set_dma_mask(pdev, DMA_32BIT_MASK)) != 0) {
2628 dev_err(&pdev->dev, "no usable DMA configuration\n");
2629 goto out_disable_device;
2630 }
2631
2632 pci_set_master(pdev);
204e2f98 2633 pci_save_state(pdev);
4d22de3e
DLR
2634
2635 mmio_start = pci_resource_start(pdev, 0);
2636 mmio_len = pci_resource_len(pdev, 0);
2637 ai = t3_get_adapter_info(ent->driver_data);
2638
2639 adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
2640 if (!adapter) {
2641 err = -ENOMEM;
2642 goto out_disable_device;
2643 }
2644
2645 adapter->regs = ioremap_nocache(mmio_start, mmio_len);
2646 if (!adapter->regs) {
2647 dev_err(&pdev->dev, "cannot map device registers\n");
2648 err = -ENOMEM;
2649 goto out_free_adapter;
2650 }
2651
2652 adapter->pdev = pdev;
2653 adapter->name = pci_name(pdev);
2654 adapter->msg_enable = dflt_msg_enable;
2655 adapter->mmio_len = mmio_len;
2656
2657 mutex_init(&adapter->mdio_lock);
2658 spin_lock_init(&adapter->work_lock);
2659 spin_lock_init(&adapter->stats_lock);
2660
2661 INIT_LIST_HEAD(&adapter->adapter_list);
2662 INIT_WORK(&adapter->ext_intr_handler_task, ext_intr_task);
2663 INIT_DELAYED_WORK(&adapter->adap_check_task, t3_adap_check_task);
2664
2665 for (i = 0; i < ai->nports; ++i) {
2666 struct net_device *netdev;
2667
2668 netdev = alloc_etherdev(sizeof(struct port_info));
2669 if (!netdev) {
2670 err = -ENOMEM;
2671 goto out_free_dev;
2672 }
2673
4d22de3e
DLR
2674 SET_NETDEV_DEV(netdev, &pdev->dev);
2675
2676 adapter->port[i] = netdev;
2677 pi = netdev_priv(netdev);
5fbf816f 2678 pi->adapter = adapter;
4d22de3e
DLR
2679 pi->rx_csum_offload = 1;
2680 pi->nqsets = 1;
2681 pi->first_qset = i;
2682 pi->activity = 0;
2683 pi->port_id = i;
2684 netif_carrier_off(netdev);
2685 netdev->irq = pdev->irq;
2686 netdev->mem_start = mmio_start;
2687 netdev->mem_end = mmio_start + mmio_len - 1;
4d22de3e
DLR
2688 netdev->features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO;
2689 netdev->features |= NETIF_F_LLTX;
2690 if (pci_using_dac)
2691 netdev->features |= NETIF_F_HIGHDMA;
2692
2693 netdev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
2694 netdev->vlan_rx_register = vlan_rx_register;
4d22de3e
DLR
2695
2696 netdev->open = cxgb_open;
2697 netdev->stop = cxgb_close;
2698 netdev->hard_start_xmit = t3_eth_xmit;
2699 netdev->get_stats = cxgb_get_stats;
2700 netdev->set_multicast_list = cxgb_set_rxmode;
2701 netdev->do_ioctl = cxgb_ioctl;
2702 netdev->change_mtu = cxgb_change_mtu;
2703 netdev->set_mac_address = cxgb_set_mac_addr;
2704#ifdef CONFIG_NET_POLL_CONTROLLER
2705 netdev->poll_controller = cxgb_netpoll;
2706#endif
4d22de3e
DLR
2707
2708 SET_ETHTOOL_OPS(netdev, &cxgb_ethtool_ops);
2709 }
2710
5fbf816f 2711 pci_set_drvdata(pdev, adapter);
4d22de3e
DLR
2712 if (t3_prep_adapter(adapter, ai, 1) < 0) {
2713 err = -ENODEV;
2714 goto out_free_dev;
2715 }
2eab17ab 2716
4d22de3e
DLR
2717 /*
2718 * The card is now ready to go. If any errors occur during device
2719 * registration we do not fail the whole card but rather proceed only
2720 * with the ports we manage to register successfully. However we must
2721 * register at least one net device.
2722 */
2723 for_each_port(adapter, i) {
2724 err = register_netdev(adapter->port[i]);
2725 if (err)
2726 dev_warn(&pdev->dev,
2727 "cannot register net device %s, skipping\n",
2728 adapter->port[i]->name);
2729 else {
2730 /*
2731 * Change the name we use for messages to the name of
2732 * the first successfully registered interface.
2733 */
2734 if (!adapter->registered_device_map)
2735 adapter->name = adapter->port[i]->name;
2736
2737 __set_bit(i, &adapter->registered_device_map);
2738 }
2739 }
2740 if (!adapter->registered_device_map) {
2741 dev_err(&pdev->dev, "could not register any net devices\n");
2742 goto out_free_dev;
2743 }
2744
2745 /* Driver's ready. Reflect it on LEDs */
2746 t3_led_ready(adapter);
2747
2748 if (is_offload(adapter)) {
2749 __set_bit(OFFLOAD_DEVMAP_BIT, &adapter->registered_device_map);
2750 cxgb3_adapter_ofld(adapter);
2751 }
2752
2753 /* See what interrupts we'll be using */
2754 if (msi > 1 && cxgb_enable_msix(adapter) == 0)
2755 adapter->flags |= USING_MSIX;
2756 else if (msi > 0 && pci_enable_msi(pdev) == 0)
2757 adapter->flags |= USING_MSI;
2758
0ee8d33c 2759 err = sysfs_create_group(&adapter->port[0]->dev.kobj,
4d22de3e
DLR
2760 &cxgb3_attr_group);
2761
2762 print_port_info(adapter, ai);
2763 return 0;
2764
2765out_free_dev:
2766 iounmap(adapter->regs);
2767 for (i = ai->nports - 1; i >= 0; --i)
2768 if (adapter->port[i])
2769 free_netdev(adapter->port[i]);
2770
2771out_free_adapter:
2772 kfree(adapter);
2773
2774out_disable_device:
2775 pci_disable_device(pdev);
2776out_release_regions:
2777 pci_release_regions(pdev);
2778 pci_set_drvdata(pdev, NULL);
2779 return err;
2780}
2781
2782static void __devexit remove_one(struct pci_dev *pdev)
2783{
5fbf816f 2784 struct adapter *adapter = pci_get_drvdata(pdev);
4d22de3e 2785
5fbf816f 2786 if (adapter) {
4d22de3e 2787 int i;
4d22de3e
DLR
2788
2789 t3_sge_stop(adapter);
0ee8d33c 2790 sysfs_remove_group(&adapter->port[0]->dev.kobj,
4d22de3e
DLR
2791 &cxgb3_attr_group);
2792
4d22de3e
DLR
2793 if (is_offload(adapter)) {
2794 cxgb3_adapter_unofld(adapter);
2795 if (test_bit(OFFLOAD_DEVMAP_BIT,
2796 &adapter->open_device_map))
2797 offload_close(&adapter->tdev);
2798 }
2799
67d92ab7
DLR
2800 for_each_port(adapter, i)
2801 if (test_bit(i, &adapter->registered_device_map))
2802 unregister_netdev(adapter->port[i]);
2803
4d22de3e
DLR
2804 t3_free_sge_resources(adapter);
2805 cxgb_disable_msi(adapter);
2806
4d22de3e
DLR
2807 for_each_port(adapter, i)
2808 if (adapter->port[i])
2809 free_netdev(adapter->port[i]);
2810
2811 iounmap(adapter->regs);
2812 kfree(adapter);
2813 pci_release_regions(pdev);
2814 pci_disable_device(pdev);
2815 pci_set_drvdata(pdev, NULL);
2816 }
2817}
2818
2819static struct pci_driver driver = {
2820 .name = DRV_NAME,
2821 .id_table = cxgb3_pci_tbl,
2822 .probe = init_one,
2823 .remove = __devexit_p(remove_one),
91a6b50c 2824 .err_handler = &t3_err_handler,
4d22de3e
DLR
2825};
2826
2827static int __init cxgb3_init_module(void)
2828{
2829 int ret;
2830
2831 cxgb3_offload_init();
2832
2833 ret = pci_register_driver(&driver);
2834 return ret;
2835}
2836
2837static void __exit cxgb3_cleanup_module(void)
2838{
2839 pci_unregister_driver(&driver);
2840 if (cxgb3_wq)
2841 destroy_workqueue(cxgb3_wq);
2842}
2843
2844module_init(cxgb3_init_module);
2845module_exit(cxgb3_cleanup_module);