cxgb3 - parity initialization for T3C adapters.
[linux-2.6-block.git] / drivers / net / cxgb3 / cxgb3_main.c
CommitLineData
4d22de3e 1/*
1d68e93d 2 * Copyright (c) 2003-2007 Chelsio, Inc. All rights reserved.
4d22de3e 3 *
1d68e93d
DLR
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
4d22de3e 9 *
1d68e93d
DLR
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
4d22de3e 31 */
4d22de3e
DLR
32#include <linux/module.h>
33#include <linux/moduleparam.h>
34#include <linux/init.h>
35#include <linux/pci.h>
36#include <linux/dma-mapping.h>
37#include <linux/netdevice.h>
38#include <linux/etherdevice.h>
39#include <linux/if_vlan.h>
40#include <linux/mii.h>
41#include <linux/sockios.h>
42#include <linux/workqueue.h>
43#include <linux/proc_fs.h>
44#include <linux/rtnetlink.h>
2e283962 45#include <linux/firmware.h>
d9da466a 46#include <linux/log2.h>
4d22de3e
DLR
47#include <asm/uaccess.h>
48
49#include "common.h"
50#include "cxgb3_ioctl.h"
51#include "regs.h"
52#include "cxgb3_offload.h"
53#include "version.h"
54
55#include "cxgb3_ctl_defs.h"
56#include "t3_cpl.h"
57#include "firmware_exports.h"
58
59enum {
60 MAX_TXQ_ENTRIES = 16384,
61 MAX_CTRL_TXQ_ENTRIES = 1024,
62 MAX_RSPQ_ENTRIES = 16384,
63 MAX_RX_BUFFERS = 16384,
64 MAX_RX_JUMBO_BUFFERS = 16384,
65 MIN_TXQ_ENTRIES = 4,
66 MIN_CTRL_TXQ_ENTRIES = 4,
67 MIN_RSPQ_ENTRIES = 32,
68 MIN_FL_ENTRIES = 32
69};
70
71#define PORT_MASK ((1 << MAX_NPORTS) - 1)
72
73#define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
74 NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
75 NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
76
77#define EEPROM_MAGIC 0x38E2F10C
78
678771d6
DLR
79#define CH_DEVICE(devid, idx) \
80 { PCI_VENDOR_ID_CHELSIO, devid, PCI_ANY_ID, PCI_ANY_ID, 0, 0, idx }
4d22de3e
DLR
81
82static const struct pci_device_id cxgb3_pci_tbl[] = {
678771d6
DLR
83 CH_DEVICE(0x20, 0), /* PE9000 */
84 CH_DEVICE(0x21, 1), /* T302E */
85 CH_DEVICE(0x22, 2), /* T310E */
86 CH_DEVICE(0x23, 3), /* T320X */
87 CH_DEVICE(0x24, 1), /* T302X */
88 CH_DEVICE(0x25, 3), /* T320E */
89 CH_DEVICE(0x26, 2), /* T310X */
90 CH_DEVICE(0x30, 2), /* T3B10 */
91 CH_DEVICE(0x31, 3), /* T3B20 */
92 CH_DEVICE(0x32, 1), /* T3B02 */
4d22de3e
DLR
93 {0,}
94};
95
96MODULE_DESCRIPTION(DRV_DESC);
97MODULE_AUTHOR("Chelsio Communications");
1d68e93d 98MODULE_LICENSE("Dual BSD/GPL");
4d22de3e
DLR
99MODULE_VERSION(DRV_VERSION);
100MODULE_DEVICE_TABLE(pci, cxgb3_pci_tbl);
101
102static int dflt_msg_enable = DFLT_MSG_ENABLE;
103
104module_param(dflt_msg_enable, int, 0644);
105MODULE_PARM_DESC(dflt_msg_enable, "Chelsio T3 default message enable bitmap");
106
107/*
108 * The driver uses the best interrupt scheme available on a platform in the
109 * order MSI-X, MSI, legacy pin interrupts. This parameter determines which
110 * of these schemes the driver may consider as follows:
111 *
112 * msi = 2: choose from among all three options
113 * msi = 1: only consider MSI and pin interrupts
114 * msi = 0: force pin interrupts
115 */
116static int msi = 2;
117
118module_param(msi, int, 0644);
119MODULE_PARM_DESC(msi, "whether to use MSI or MSI-X");
120
121/*
122 * The driver enables offload as a default.
123 * To disable it, use ofld_disable = 1.
124 */
125
126static int ofld_disable = 0;
127
128module_param(ofld_disable, int, 0644);
129MODULE_PARM_DESC(ofld_disable, "whether to enable offload at init time or not");
130
131/*
132 * We have work elements that we need to cancel when an interface is taken
133 * down. Normally the work elements would be executed by keventd but that
134 * can deadlock because of linkwatch. If our close method takes the rtnl
135 * lock and linkwatch is ahead of our work elements in keventd, linkwatch
136 * will block keventd as it needs the rtnl lock, and we'll deadlock waiting
137 * for our work to complete. Get our own work queue to solve this.
138 */
139static struct workqueue_struct *cxgb3_wq;
140
141/**
142 * link_report - show link status and link speed/duplex
143 * @p: the port whose settings are to be reported
144 *
145 * Shows the link status, speed, and duplex of a port.
146 */
147static void link_report(struct net_device *dev)
148{
149 if (!netif_carrier_ok(dev))
150 printk(KERN_INFO "%s: link down\n", dev->name);
151 else {
152 const char *s = "10Mbps";
153 const struct port_info *p = netdev_priv(dev);
154
155 switch (p->link_config.speed) {
156 case SPEED_10000:
157 s = "10Gbps";
158 break;
159 case SPEED_1000:
160 s = "1000Mbps";
161 break;
162 case SPEED_100:
163 s = "100Mbps";
164 break;
165 }
166
167 printk(KERN_INFO "%s: link up, %s, %s-duplex\n", dev->name, s,
168 p->link_config.duplex == DUPLEX_FULL ? "full" : "half");
169 }
170}
171
172/**
173 * t3_os_link_changed - handle link status changes
174 * @adapter: the adapter associated with the link change
175 * @port_id: the port index whose limk status has changed
176 * @link_stat: the new status of the link
177 * @speed: the new speed setting
178 * @duplex: the new duplex setting
179 * @pause: the new flow-control setting
180 *
181 * This is the OS-dependent handler for link status changes. The OS
182 * neutral handler takes care of most of the processing for these events,
183 * then calls this handler for any OS-specific processing.
184 */
185void t3_os_link_changed(struct adapter *adapter, int port_id, int link_stat,
186 int speed, int duplex, int pause)
187{
188 struct net_device *dev = adapter->port[port_id];
6d6dabac
DLR
189 struct port_info *pi = netdev_priv(dev);
190 struct cmac *mac = &pi->mac;
4d22de3e
DLR
191
192 /* Skip changes from disabled ports. */
193 if (!netif_running(dev))
194 return;
195
196 if (link_stat != netif_carrier_ok(dev)) {
6d6dabac 197 if (link_stat) {
59cf8107 198 t3_mac_enable(mac, MAC_DIRECTION_RX);
4d22de3e 199 netif_carrier_on(dev);
6d6dabac 200 } else {
4d22de3e 201 netif_carrier_off(dev);
59cf8107
DLR
202 pi->phy.ops->power_down(&pi->phy, 1);
203 t3_mac_disable(mac, MAC_DIRECTION_RX);
204 t3_link_start(&pi->phy, mac, &pi->link_config);
6d6dabac
DLR
205 }
206
4d22de3e
DLR
207 link_report(dev);
208 }
209}
210
211static void cxgb_set_rxmode(struct net_device *dev)
212{
213 struct t3_rx_mode rm;
214 struct port_info *pi = netdev_priv(dev);
215
216 init_rx_mode(&rm, dev, dev->mc_list);
217 t3_mac_set_rx_mode(&pi->mac, &rm);
218}
219
220/**
221 * link_start - enable a port
222 * @dev: the device to enable
223 *
224 * Performs the MAC and PHY actions needed to enable a port.
225 */
226static void link_start(struct net_device *dev)
227{
228 struct t3_rx_mode rm;
229 struct port_info *pi = netdev_priv(dev);
230 struct cmac *mac = &pi->mac;
231
232 init_rx_mode(&rm, dev, dev->mc_list);
233 t3_mac_reset(mac);
234 t3_mac_set_mtu(mac, dev->mtu);
235 t3_mac_set_address(mac, 0, dev->dev_addr);
236 t3_mac_set_rx_mode(mac, &rm);
237 t3_link_start(&pi->phy, mac, &pi->link_config);
238 t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
239}
240
241static inline void cxgb_disable_msi(struct adapter *adapter)
242{
243 if (adapter->flags & USING_MSIX) {
244 pci_disable_msix(adapter->pdev);
245 adapter->flags &= ~USING_MSIX;
246 } else if (adapter->flags & USING_MSI) {
247 pci_disable_msi(adapter->pdev);
248 adapter->flags &= ~USING_MSI;
249 }
250}
251
252/*
253 * Interrupt handler for asynchronous events used with MSI-X.
254 */
255static irqreturn_t t3_async_intr_handler(int irq, void *cookie)
256{
257 t3_slow_intr_handler(cookie);
258 return IRQ_HANDLED;
259}
260
261/*
262 * Name the MSI-X interrupts.
263 */
264static void name_msix_vecs(struct adapter *adap)
265{
266 int i, j, msi_idx = 1, n = sizeof(adap->msix_info[0].desc) - 1;
267
268 snprintf(adap->msix_info[0].desc, n, "%s", adap->name);
269 adap->msix_info[0].desc[n] = 0;
270
271 for_each_port(adap, j) {
272 struct net_device *d = adap->port[j];
273 const struct port_info *pi = netdev_priv(d);
274
275 for (i = 0; i < pi->nqsets; i++, msi_idx++) {
276 snprintf(adap->msix_info[msi_idx].desc, n,
277 "%s (queue %d)", d->name, i);
278 adap->msix_info[msi_idx].desc[n] = 0;
279 }
280 }
281}
282
283static int request_msix_data_irqs(struct adapter *adap)
284{
285 int i, j, err, qidx = 0;
286
287 for_each_port(adap, i) {
288 int nqsets = adap2pinfo(adap, i)->nqsets;
289
290 for (j = 0; j < nqsets; ++j) {
291 err = request_irq(adap->msix_info[qidx + 1].vec,
292 t3_intr_handler(adap,
293 adap->sge.qs[qidx].
294 rspq.polling), 0,
295 adap->msix_info[qidx + 1].desc,
296 &adap->sge.qs[qidx]);
297 if (err) {
298 while (--qidx >= 0)
299 free_irq(adap->msix_info[qidx + 1].vec,
300 &adap->sge.qs[qidx]);
301 return err;
302 }
303 qidx++;
304 }
305 }
306 return 0;
307}
308
b881955b
DLR
309static int await_mgmt_replies(struct adapter *adap, unsigned long init_cnt,
310 unsigned long n)
311{
312 int attempts = 5;
313
314 while (adap->sge.qs[0].rspq.offload_pkts < init_cnt + n) {
315 if (!--attempts)
316 return -ETIMEDOUT;
317 msleep(10);
318 }
319 return 0;
320}
321
322static int init_tp_parity(struct adapter *adap)
323{
324 int i;
325 struct sk_buff *skb;
326 struct cpl_set_tcb_field *greq;
327 unsigned long cnt = adap->sge.qs[0].rspq.offload_pkts;
328
329 t3_tp_set_offload_mode(adap, 1);
330
331 for (i = 0; i < 16; i++) {
332 struct cpl_smt_write_req *req;
333
334 skb = alloc_skb(sizeof(*req), GFP_KERNEL | __GFP_NOFAIL);
335 req = (struct cpl_smt_write_req *)__skb_put(skb, sizeof(*req));
336 memset(req, 0, sizeof(*req));
337 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
338 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SMT_WRITE_REQ, i));
339 req->iff = i;
340 t3_mgmt_tx(adap, skb);
341 }
342
343 for (i = 0; i < 2048; i++) {
344 struct cpl_l2t_write_req *req;
345
346 skb = alloc_skb(sizeof(*req), GFP_KERNEL | __GFP_NOFAIL);
347 req = (struct cpl_l2t_write_req *)__skb_put(skb, sizeof(*req));
348 memset(req, 0, sizeof(*req));
349 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
350 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_L2T_WRITE_REQ, i));
351 req->params = htonl(V_L2T_W_IDX(i));
352 t3_mgmt_tx(adap, skb);
353 }
354
355 for (i = 0; i < 2048; i++) {
356 struct cpl_rte_write_req *req;
357
358 skb = alloc_skb(sizeof(*req), GFP_KERNEL | __GFP_NOFAIL);
359 req = (struct cpl_rte_write_req *)__skb_put(skb, sizeof(*req));
360 memset(req, 0, sizeof(*req));
361 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
362 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_RTE_WRITE_REQ, i));
363 req->l2t_idx = htonl(V_L2T_W_IDX(i));
364 t3_mgmt_tx(adap, skb);
365 }
366
367 skb = alloc_skb(sizeof(*greq), GFP_KERNEL | __GFP_NOFAIL);
368 greq = (struct cpl_set_tcb_field *)__skb_put(skb, sizeof(*greq));
369 memset(greq, 0, sizeof(*greq));
370 greq->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
371 OPCODE_TID(greq) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, 0));
372 greq->mask = cpu_to_be64(1);
373 t3_mgmt_tx(adap, skb);
374
375 i = await_mgmt_replies(adap, cnt, 16 + 2048 + 2048 + 1);
376 t3_tp_set_offload_mode(adap, 0);
377 return i;
378}
379
4d22de3e
DLR
380/**
381 * setup_rss - configure RSS
382 * @adap: the adapter
383 *
384 * Sets up RSS to distribute packets to multiple receive queues. We
385 * configure the RSS CPU lookup table to distribute to the number of HW
386 * receive queues, and the response queue lookup table to narrow that
387 * down to the response queues actually configured for each port.
388 * We always configure the RSS mapping for two ports since the mapping
389 * table has plenty of entries.
390 */
391static void setup_rss(struct adapter *adap)
392{
393 int i;
394 unsigned int nq0 = adap2pinfo(adap, 0)->nqsets;
395 unsigned int nq1 = adap->port[1] ? adap2pinfo(adap, 1)->nqsets : 1;
396 u8 cpus[SGE_QSETS + 1];
397 u16 rspq_map[RSS_TABLE_SIZE];
398
399 for (i = 0; i < SGE_QSETS; ++i)
400 cpus[i] = i;
401 cpus[SGE_QSETS] = 0xff; /* terminator */
402
403 for (i = 0; i < RSS_TABLE_SIZE / 2; ++i) {
404 rspq_map[i] = i % nq0;
405 rspq_map[i + RSS_TABLE_SIZE / 2] = (i % nq1) + nq0;
406 }
407
408 t3_config_rss(adap, F_RQFEEDBACKENABLE | F_TNLLKPEN | F_TNLMAPEN |
409 F_TNLPRTEN | F_TNL2TUPEN | F_TNL4TUPEN |
a2604be5 410 V_RRCPLCPUSIZE(6) | F_HASHTOEPLITZ, cpus, rspq_map);
4d22de3e
DLR
411}
412
bea3348e 413static void init_napi(struct adapter *adap)
4d22de3e 414{
bea3348e 415 int i;
4d22de3e 416
bea3348e
SH
417 for (i = 0; i < SGE_QSETS; i++) {
418 struct sge_qset *qs = &adap->sge.qs[i];
4d22de3e 419
bea3348e
SH
420 if (qs->adap)
421 netif_napi_add(qs->netdev, &qs->napi, qs->napi.poll,
422 64);
4d22de3e 423 }
4d22de3e
DLR
424}
425
426/*
427 * Wait until all NAPI handlers are descheduled. This includes the handlers of
428 * both netdevices representing interfaces and the dummy ones for the extra
429 * queues.
430 */
431static void quiesce_rx(struct adapter *adap)
432{
433 int i;
4d22de3e 434
bea3348e
SH
435 for (i = 0; i < SGE_QSETS; i++)
436 if (adap->sge.qs[i].adap)
437 napi_disable(&adap->sge.qs[i].napi);
438}
4d22de3e 439
bea3348e
SH
440static void enable_all_napi(struct adapter *adap)
441{
442 int i;
443 for (i = 0; i < SGE_QSETS; i++)
444 if (adap->sge.qs[i].adap)
445 napi_enable(&adap->sge.qs[i].napi);
4d22de3e
DLR
446}
447
448/**
449 * setup_sge_qsets - configure SGE Tx/Rx/response queues
450 * @adap: the adapter
451 *
452 * Determines how many sets of SGE queues to use and initializes them.
453 * We support multiple queue sets per port if we have MSI-X, otherwise
454 * just one queue set per port.
455 */
456static int setup_sge_qsets(struct adapter *adap)
457{
bea3348e 458 int i, j, err, irq_idx = 0, qset_idx = 0;
8ac3ba68 459 unsigned int ntxq = SGE_TXQ_PER_SET;
4d22de3e
DLR
460
461 if (adap->params.rev > 0 && !(adap->flags & USING_MSI))
462 irq_idx = -1;
463
464 for_each_port(adap, i) {
465 struct net_device *dev = adap->port[i];
bea3348e 466 struct port_info *pi = netdev_priv(dev);
4d22de3e 467
bea3348e 468 pi->qs = &adap->sge.qs[pi->first_qset];
4d22de3e
DLR
469 for (j = 0; j < pi->nqsets; ++j, ++qset_idx) {
470 err = t3_sge_alloc_qset(adap, qset_idx, 1,
471 (adap->flags & USING_MSIX) ? qset_idx + 1 :
472 irq_idx,
bea3348e 473 &adap->params.sge.qset[qset_idx], ntxq, dev);
4d22de3e
DLR
474 if (err) {
475 t3_free_sge_resources(adap);
476 return err;
477 }
478 }
479 }
480
481 return 0;
482}
483
3e5192ee 484static ssize_t attr_show(struct device *d, char *buf,
896392ef 485 ssize_t(*format) (struct net_device *, char *))
4d22de3e
DLR
486{
487 ssize_t len;
4d22de3e
DLR
488
489 /* Synchronize with ioctls that may shut down the device */
490 rtnl_lock();
896392ef 491 len = (*format) (to_net_dev(d), buf);
4d22de3e
DLR
492 rtnl_unlock();
493 return len;
494}
495
3e5192ee 496static ssize_t attr_store(struct device *d,
0ee8d33c 497 const char *buf, size_t len,
896392ef 498 ssize_t(*set) (struct net_device *, unsigned int),
4d22de3e
DLR
499 unsigned int min_val, unsigned int max_val)
500{
501 char *endp;
502 ssize_t ret;
503 unsigned int val;
4d22de3e
DLR
504
505 if (!capable(CAP_NET_ADMIN))
506 return -EPERM;
507
508 val = simple_strtoul(buf, &endp, 0);
509 if (endp == buf || val < min_val || val > max_val)
510 return -EINVAL;
511
512 rtnl_lock();
896392ef 513 ret = (*set) (to_net_dev(d), val);
4d22de3e
DLR
514 if (!ret)
515 ret = len;
516 rtnl_unlock();
517 return ret;
518}
519
520#define CXGB3_SHOW(name, val_expr) \
896392ef 521static ssize_t format_##name(struct net_device *dev, char *buf) \
4d22de3e 522{ \
5fbf816f
DLR
523 struct port_info *pi = netdev_priv(dev); \
524 struct adapter *adap = pi->adapter; \
4d22de3e
DLR
525 return sprintf(buf, "%u\n", val_expr); \
526} \
0ee8d33c
DLR
527static ssize_t show_##name(struct device *d, struct device_attribute *attr, \
528 char *buf) \
4d22de3e 529{ \
3e5192ee 530 return attr_show(d, buf, format_##name); \
4d22de3e
DLR
531}
532
896392ef 533static ssize_t set_nfilters(struct net_device *dev, unsigned int val)
4d22de3e 534{
5fbf816f
DLR
535 struct port_info *pi = netdev_priv(dev);
536 struct adapter *adap = pi->adapter;
9f238486 537 int min_tids = is_offload(adap) ? MC5_MIN_TIDS : 0;
896392ef 538
4d22de3e
DLR
539 if (adap->flags & FULL_INIT_DONE)
540 return -EBUSY;
541 if (val && adap->params.rev == 0)
542 return -EINVAL;
9f238486
DLR
543 if (val > t3_mc5_size(&adap->mc5) - adap->params.mc5.nservers -
544 min_tids)
4d22de3e
DLR
545 return -EINVAL;
546 adap->params.mc5.nfilters = val;
547 return 0;
548}
549
0ee8d33c
DLR
550static ssize_t store_nfilters(struct device *d, struct device_attribute *attr,
551 const char *buf, size_t len)
4d22de3e 552{
3e5192ee 553 return attr_store(d, buf, len, set_nfilters, 0, ~0);
4d22de3e
DLR
554}
555
896392ef 556static ssize_t set_nservers(struct net_device *dev, unsigned int val)
4d22de3e 557{
5fbf816f
DLR
558 struct port_info *pi = netdev_priv(dev);
559 struct adapter *adap = pi->adapter;
896392ef 560
4d22de3e
DLR
561 if (adap->flags & FULL_INIT_DONE)
562 return -EBUSY;
9f238486
DLR
563 if (val > t3_mc5_size(&adap->mc5) - adap->params.mc5.nfilters -
564 MC5_MIN_TIDS)
4d22de3e
DLR
565 return -EINVAL;
566 adap->params.mc5.nservers = val;
567 return 0;
568}
569
0ee8d33c
DLR
570static ssize_t store_nservers(struct device *d, struct device_attribute *attr,
571 const char *buf, size_t len)
4d22de3e 572{
3e5192ee 573 return attr_store(d, buf, len, set_nservers, 0, ~0);
4d22de3e
DLR
574}
575
576#define CXGB3_ATTR_R(name, val_expr) \
577CXGB3_SHOW(name, val_expr) \
0ee8d33c 578static DEVICE_ATTR(name, S_IRUGO, show_##name, NULL)
4d22de3e
DLR
579
580#define CXGB3_ATTR_RW(name, val_expr, store_method) \
581CXGB3_SHOW(name, val_expr) \
0ee8d33c 582static DEVICE_ATTR(name, S_IRUGO | S_IWUSR, show_##name, store_method)
4d22de3e
DLR
583
584CXGB3_ATTR_R(cam_size, t3_mc5_size(&adap->mc5));
585CXGB3_ATTR_RW(nfilters, adap->params.mc5.nfilters, store_nfilters);
586CXGB3_ATTR_RW(nservers, adap->params.mc5.nservers, store_nservers);
587
588static struct attribute *cxgb3_attrs[] = {
0ee8d33c
DLR
589 &dev_attr_cam_size.attr,
590 &dev_attr_nfilters.attr,
591 &dev_attr_nservers.attr,
4d22de3e
DLR
592 NULL
593};
594
595static struct attribute_group cxgb3_attr_group = {.attrs = cxgb3_attrs };
596
3e5192ee 597static ssize_t tm_attr_show(struct device *d,
0ee8d33c 598 char *buf, int sched)
4d22de3e 599{
5fbf816f
DLR
600 struct port_info *pi = netdev_priv(to_net_dev(d));
601 struct adapter *adap = pi->adapter;
4d22de3e 602 unsigned int v, addr, bpt, cpt;
5fbf816f 603 ssize_t len;
4d22de3e
DLR
604
605 addr = A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2;
606 rtnl_lock();
607 t3_write_reg(adap, A_TP_TM_PIO_ADDR, addr);
608 v = t3_read_reg(adap, A_TP_TM_PIO_DATA);
609 if (sched & 1)
610 v >>= 16;
611 bpt = (v >> 8) & 0xff;
612 cpt = v & 0xff;
613 if (!cpt)
614 len = sprintf(buf, "disabled\n");
615 else {
616 v = (adap->params.vpd.cclk * 1000) / cpt;
617 len = sprintf(buf, "%u Kbps\n", (v * bpt) / 125);
618 }
619 rtnl_unlock();
620 return len;
621}
622
3e5192ee 623static ssize_t tm_attr_store(struct device *d,
0ee8d33c 624 const char *buf, size_t len, int sched)
4d22de3e 625{
5fbf816f
DLR
626 struct port_info *pi = netdev_priv(to_net_dev(d));
627 struct adapter *adap = pi->adapter;
628 unsigned int val;
4d22de3e
DLR
629 char *endp;
630 ssize_t ret;
4d22de3e
DLR
631
632 if (!capable(CAP_NET_ADMIN))
633 return -EPERM;
634
635 val = simple_strtoul(buf, &endp, 0);
636 if (endp == buf || val > 10000000)
637 return -EINVAL;
638
639 rtnl_lock();
640 ret = t3_config_sched(adap, val, sched);
641 if (!ret)
642 ret = len;
643 rtnl_unlock();
644 return ret;
645}
646
647#define TM_ATTR(name, sched) \
0ee8d33c
DLR
648static ssize_t show_##name(struct device *d, struct device_attribute *attr, \
649 char *buf) \
4d22de3e 650{ \
3e5192ee 651 return tm_attr_show(d, buf, sched); \
4d22de3e 652} \
0ee8d33c
DLR
653static ssize_t store_##name(struct device *d, struct device_attribute *attr, \
654 const char *buf, size_t len) \
4d22de3e 655{ \
3e5192ee 656 return tm_attr_store(d, buf, len, sched); \
4d22de3e 657} \
0ee8d33c 658static DEVICE_ATTR(name, S_IRUGO | S_IWUSR, show_##name, store_##name)
4d22de3e
DLR
659
660TM_ATTR(sched0, 0);
661TM_ATTR(sched1, 1);
662TM_ATTR(sched2, 2);
663TM_ATTR(sched3, 3);
664TM_ATTR(sched4, 4);
665TM_ATTR(sched5, 5);
666TM_ATTR(sched6, 6);
667TM_ATTR(sched7, 7);
668
669static struct attribute *offload_attrs[] = {
0ee8d33c
DLR
670 &dev_attr_sched0.attr,
671 &dev_attr_sched1.attr,
672 &dev_attr_sched2.attr,
673 &dev_attr_sched3.attr,
674 &dev_attr_sched4.attr,
675 &dev_attr_sched5.attr,
676 &dev_attr_sched6.attr,
677 &dev_attr_sched7.attr,
4d22de3e
DLR
678 NULL
679};
680
681static struct attribute_group offload_attr_group = {.attrs = offload_attrs };
682
683/*
684 * Sends an sk_buff to an offload queue driver
685 * after dealing with any active network taps.
686 */
687static inline int offload_tx(struct t3cdev *tdev, struct sk_buff *skb)
688{
689 int ret;
690
691 local_bh_disable();
692 ret = t3_offload_tx(tdev, skb);
693 local_bh_enable();
694 return ret;
695}
696
697static int write_smt_entry(struct adapter *adapter, int idx)
698{
699 struct cpl_smt_write_req *req;
700 struct sk_buff *skb = alloc_skb(sizeof(*req), GFP_KERNEL);
701
702 if (!skb)
703 return -ENOMEM;
704
705 req = (struct cpl_smt_write_req *)__skb_put(skb, sizeof(*req));
706 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
707 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SMT_WRITE_REQ, idx));
708 req->mtu_idx = NMTUS - 1; /* should be 0 but there's a T3 bug */
709 req->iff = idx;
710 memset(req->src_mac1, 0, sizeof(req->src_mac1));
711 memcpy(req->src_mac0, adapter->port[idx]->dev_addr, ETH_ALEN);
712 skb->priority = 1;
713 offload_tx(&adapter->tdev, skb);
714 return 0;
715}
716
717static int init_smt(struct adapter *adapter)
718{
719 int i;
720
721 for_each_port(adapter, i)
722 write_smt_entry(adapter, i);
723 return 0;
724}
725
726static void init_port_mtus(struct adapter *adapter)
727{
728 unsigned int mtus = adapter->port[0]->mtu;
729
730 if (adapter->port[1])
731 mtus |= adapter->port[1]->mtu << 16;
732 t3_write_reg(adapter, A_TP_MTU_PORT_TABLE, mtus);
733}
734
14ab9892
DLR
735static void send_pktsched_cmd(struct adapter *adap, int sched, int qidx, int lo,
736 int hi, int port)
737{
738 struct sk_buff *skb;
739 struct mngt_pktsched_wr *req;
740
741 skb = alloc_skb(sizeof(*req), GFP_KERNEL | __GFP_NOFAIL);
742 req = (struct mngt_pktsched_wr *)skb_put(skb, sizeof(*req));
743 req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_MNGT));
744 req->mngt_opcode = FW_MNGTOPCODE_PKTSCHED_SET;
745 req->sched = sched;
746 req->idx = qidx;
747 req->min = lo;
748 req->max = hi;
749 req->binding = port;
750 t3_mgmt_tx(adap, skb);
751}
752
753static void bind_qsets(struct adapter *adap)
754{
755 int i, j;
756
757 for_each_port(adap, i) {
758 const struct port_info *pi = adap2pinfo(adap, i);
759
760 for (j = 0; j < pi->nqsets; ++j)
761 send_pktsched_cmd(adap, 1, pi->first_qset + j, -1,
762 -1, i);
763 }
764}
765
7f672cf5 766#define FW_FNAME "t3fw-%d.%d.%d.bin"
47330077 767#define TPSRAM_NAME "t3%c_protocol_sram-%d.%d.%d.bin"
2e283962
DLR
768
769static int upgrade_fw(struct adapter *adap)
770{
771 int ret;
772 char buf[64];
773 const struct firmware *fw;
774 struct device *dev = &adap->pdev->dev;
775
776 snprintf(buf, sizeof(buf), FW_FNAME, FW_VERSION_MAJOR,
7f672cf5 777 FW_VERSION_MINOR, FW_VERSION_MICRO);
2e283962
DLR
778 ret = request_firmware(&fw, buf, dev);
779 if (ret < 0) {
780 dev_err(dev, "could not upgrade firmware: unable to load %s\n",
781 buf);
782 return ret;
783 }
784 ret = t3_load_fw(adap, fw->data, fw->size);
785 release_firmware(fw);
47330077
DLR
786
787 if (ret == 0)
788 dev_info(dev, "successful upgrade to firmware %d.%d.%d\n",
789 FW_VERSION_MAJOR, FW_VERSION_MINOR, FW_VERSION_MICRO);
790 else
791 dev_err(dev, "failed to upgrade to firmware %d.%d.%d\n",
792 FW_VERSION_MAJOR, FW_VERSION_MINOR, FW_VERSION_MICRO);
2eab17ab 793
47330077
DLR
794 return ret;
795}
796
797static inline char t3rev2char(struct adapter *adapter)
798{
799 char rev = 0;
800
801 switch(adapter->params.rev) {
802 case T3_REV_B:
803 case T3_REV_B2:
804 rev = 'b';
805 break;
1aafee26
DLR
806 case T3_REV_C:
807 rev = 'c';
808 break;
47330077
DLR
809 }
810 return rev;
811}
812
9265fabf 813static int update_tpsram(struct adapter *adap)
47330077
DLR
814{
815 const struct firmware *tpsram;
816 char buf[64];
817 struct device *dev = &adap->pdev->dev;
818 int ret;
819 char rev;
2eab17ab 820
47330077
DLR
821 rev = t3rev2char(adap);
822 if (!rev)
823 return 0;
824
825 snprintf(buf, sizeof(buf), TPSRAM_NAME, rev,
826 TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO);
827
828 ret = request_firmware(&tpsram, buf, dev);
829 if (ret < 0) {
830 dev_err(dev, "could not load TP SRAM: unable to load %s\n",
831 buf);
832 return ret;
833 }
2eab17ab 834
47330077
DLR
835 ret = t3_check_tpsram(adap, tpsram->data, tpsram->size);
836 if (ret)
2eab17ab 837 goto release_tpsram;
47330077
DLR
838
839 ret = t3_set_proto_sram(adap, tpsram->data);
840 if (ret == 0)
841 dev_info(dev,
842 "successful update of protocol engine "
843 "to %d.%d.%d\n",
844 TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO);
845 else
846 dev_err(dev, "failed to update of protocol engine %d.%d.%d\n",
847 TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO);
848 if (ret)
849 dev_err(dev, "loading protocol SRAM failed\n");
850
851release_tpsram:
852 release_firmware(tpsram);
2eab17ab 853
2e283962
DLR
854 return ret;
855}
856
4d22de3e
DLR
857/**
858 * cxgb_up - enable the adapter
859 * @adapter: adapter being enabled
860 *
861 * Called when the first port is enabled, this function performs the
862 * actions necessary to make an adapter operational, such as completing
863 * the initialization of HW modules, and enabling interrupts.
864 *
865 * Must be called with the rtnl lock held.
866 */
867static int cxgb_up(struct adapter *adap)
868{
c54f5c24 869 int err;
47330077 870 int must_load;
4d22de3e
DLR
871
872 if (!(adap->flags & FULL_INIT_DONE)) {
a5a3b460
DLR
873 err = t3_check_fw_version(adap, &must_load);
874 if (err == -EINVAL) {
2e283962 875 err = upgrade_fw(adap);
a5a3b460
DLR
876 if (err && must_load)
877 goto out;
878 }
4d22de3e 879
47330077
DLR
880 err = t3_check_tpsram_version(adap, &must_load);
881 if (err == -EINVAL) {
882 err = update_tpsram(adap);
883 if (err && must_load)
884 goto out;
885 }
886
4d22de3e
DLR
887 err = t3_init_hw(adap, 0);
888 if (err)
889 goto out;
890
b881955b 891 t3_set_reg_field(adap, A_TP_PARA_REG5, 0, F_RXDDPOFFINIT);
6cdbd77e 892 t3_write_reg(adap, A_ULPRX_TDDP_PSZ, V_HPZ0(PAGE_SHIFT - 12));
bea3348e 893
4d22de3e
DLR
894 err = setup_sge_qsets(adap);
895 if (err)
896 goto out;
897
898 setup_rss(adap);
bea3348e 899 init_napi(adap);
4d22de3e
DLR
900 adap->flags |= FULL_INIT_DONE;
901 }
902
903 t3_intr_clear(adap);
904
905 if (adap->flags & USING_MSIX) {
906 name_msix_vecs(adap);
907 err = request_irq(adap->msix_info[0].vec,
908 t3_async_intr_handler, 0,
909 adap->msix_info[0].desc, adap);
910 if (err)
911 goto irq_err;
912
42256f57
DLR
913 err = request_msix_data_irqs(adap);
914 if (err) {
4d22de3e
DLR
915 free_irq(adap->msix_info[0].vec, adap);
916 goto irq_err;
917 }
918 } else if ((err = request_irq(adap->pdev->irq,
919 t3_intr_handler(adap,
920 adap->sge.qs[0].rspq.
921 polling),
2db6346f
TG
922 (adap->flags & USING_MSI) ?
923 0 : IRQF_SHARED,
4d22de3e
DLR
924 adap->name, adap)))
925 goto irq_err;
926
bea3348e 927 enable_all_napi(adap);
4d22de3e
DLR
928 t3_sge_start(adap);
929 t3_intr_enable(adap);
14ab9892 930
b881955b
DLR
931 if (adap->params.rev >= T3_REV_C && !(adap->flags & TP_PARITY_INIT) &&
932 is_offload(adap) && init_tp_parity(adap) == 0)
933 adap->flags |= TP_PARITY_INIT;
934
935 if (adap->flags & TP_PARITY_INIT) {
936 t3_write_reg(adap, A_TP_INT_CAUSE,
937 F_CMCACHEPERR | F_ARPLUTPERR);
938 t3_write_reg(adap, A_TP_INT_ENABLE, 0x7fbfffff);
939 }
940
14ab9892
DLR
941 if ((adap->flags & (USING_MSIX | QUEUES_BOUND)) == USING_MSIX)
942 bind_qsets(adap);
943 adap->flags |= QUEUES_BOUND;
944
4d22de3e
DLR
945out:
946 return err;
947irq_err:
948 CH_ERR(adap, "request_irq failed, err %d\n", err);
949 goto out;
950}
951
952/*
953 * Release resources when all the ports and offloading have been stopped.
954 */
955static void cxgb_down(struct adapter *adapter)
956{
957 t3_sge_stop(adapter);
958 spin_lock_irq(&adapter->work_lock); /* sync with PHY intr task */
959 t3_intr_disable(adapter);
960 spin_unlock_irq(&adapter->work_lock);
961
962 if (adapter->flags & USING_MSIX) {
963 int i, n = 0;
964
965 free_irq(adapter->msix_info[0].vec, adapter);
966 for_each_port(adapter, i)
967 n += adap2pinfo(adapter, i)->nqsets;
968
969 for (i = 0; i < n; ++i)
970 free_irq(adapter->msix_info[i + 1].vec,
971 &adapter->sge.qs[i]);
972 } else
973 free_irq(adapter->pdev->irq, adapter);
974
975 flush_workqueue(cxgb3_wq); /* wait for external IRQ handler */
976 quiesce_rx(adapter);
977}
978
979static void schedule_chk_task(struct adapter *adap)
980{
981 unsigned int timeo;
982
983 timeo = adap->params.linkpoll_period ?
984 (HZ * adap->params.linkpoll_period) / 10 :
985 adap->params.stats_update_period * HZ;
986 if (timeo)
987 queue_delayed_work(cxgb3_wq, &adap->adap_check_task, timeo);
988}
989
990static int offload_open(struct net_device *dev)
991{
5fbf816f
DLR
992 struct port_info *pi = netdev_priv(dev);
993 struct adapter *adapter = pi->adapter;
994 struct t3cdev *tdev = dev2t3cdev(dev);
4d22de3e 995 int adap_up = adapter->open_device_map & PORT_MASK;
c54f5c24 996 int err;
4d22de3e
DLR
997
998 if (test_and_set_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map))
999 return 0;
1000
1001 if (!adap_up && (err = cxgb_up(adapter)) < 0)
1002 return err;
1003
1004 t3_tp_set_offload_mode(adapter, 1);
1005 tdev->lldev = adapter->port[0];
1006 err = cxgb3_offload_activate(adapter);
1007 if (err)
1008 goto out;
1009
1010 init_port_mtus(adapter);
1011 t3_load_mtus(adapter, adapter->params.mtus, adapter->params.a_wnd,
1012 adapter->params.b_wnd,
1013 adapter->params.rev == 0 ?
1014 adapter->port[0]->mtu : 0xffff);
1015 init_smt(adapter);
1016
1017 /* Never mind if the next step fails */
0ee8d33c 1018 sysfs_create_group(&tdev->lldev->dev.kobj, &offload_attr_group);
4d22de3e
DLR
1019
1020 /* Call back all registered clients */
1021 cxgb3_add_clients(tdev);
1022
1023out:
1024 /* restore them in case the offload module has changed them */
1025 if (err) {
1026 t3_tp_set_offload_mode(adapter, 0);
1027 clear_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map);
1028 cxgb3_set_dummy_ops(tdev);
1029 }
1030 return err;
1031}
1032
1033static int offload_close(struct t3cdev *tdev)
1034{
1035 struct adapter *adapter = tdev2adap(tdev);
1036
1037 if (!test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map))
1038 return 0;
1039
1040 /* Call back all registered clients */
1041 cxgb3_remove_clients(tdev);
1042
0ee8d33c 1043 sysfs_remove_group(&tdev->lldev->dev.kobj, &offload_attr_group);
4d22de3e
DLR
1044
1045 tdev->lldev = NULL;
1046 cxgb3_set_dummy_ops(tdev);
1047 t3_tp_set_offload_mode(adapter, 0);
1048 clear_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map);
1049
1050 if (!adapter->open_device_map)
1051 cxgb_down(adapter);
1052
1053 cxgb3_offload_deactivate(adapter);
1054 return 0;
1055}
1056
1057static int cxgb_open(struct net_device *dev)
1058{
4d22de3e 1059 struct port_info *pi = netdev_priv(dev);
5fbf816f 1060 struct adapter *adapter = pi->adapter;
4d22de3e 1061 int other_ports = adapter->open_device_map & PORT_MASK;
5fbf816f 1062 int err;
4d22de3e 1063
bea3348e
SH
1064 if (!adapter->open_device_map && (err = cxgb_up(adapter)) < 0) {
1065 quiesce_rx(adapter);
4d22de3e 1066 return err;
bea3348e 1067 }
4d22de3e
DLR
1068
1069 set_bit(pi->port_id, &adapter->open_device_map);
8ac3ba68 1070 if (is_offload(adapter) && !ofld_disable) {
4d22de3e
DLR
1071 err = offload_open(dev);
1072 if (err)
1073 printk(KERN_WARNING
1074 "Could not initialize offload capabilities\n");
1075 }
1076
1077 link_start(dev);
1078 t3_port_intr_enable(adapter, pi->port_id);
1079 netif_start_queue(dev);
1080 if (!other_ports)
1081 schedule_chk_task(adapter);
1082
1083 return 0;
1084}
1085
1086static int cxgb_close(struct net_device *dev)
1087{
5fbf816f
DLR
1088 struct port_info *pi = netdev_priv(dev);
1089 struct adapter *adapter = pi->adapter;
4d22de3e 1090
5fbf816f 1091 t3_port_intr_disable(adapter, pi->port_id);
4d22de3e 1092 netif_stop_queue(dev);
5fbf816f 1093 pi->phy.ops->power_down(&pi->phy, 1);
4d22de3e 1094 netif_carrier_off(dev);
5fbf816f 1095 t3_mac_disable(&pi->mac, MAC_DIRECTION_TX | MAC_DIRECTION_RX);
4d22de3e
DLR
1096
1097 spin_lock(&adapter->work_lock); /* sync with update task */
5fbf816f 1098 clear_bit(pi->port_id, &adapter->open_device_map);
4d22de3e
DLR
1099 spin_unlock(&adapter->work_lock);
1100
1101 if (!(adapter->open_device_map & PORT_MASK))
1102 cancel_rearming_delayed_workqueue(cxgb3_wq,
1103 &adapter->adap_check_task);
1104
1105 if (!adapter->open_device_map)
1106 cxgb_down(adapter);
1107
1108 return 0;
1109}
1110
1111static struct net_device_stats *cxgb_get_stats(struct net_device *dev)
1112{
5fbf816f
DLR
1113 struct port_info *pi = netdev_priv(dev);
1114 struct adapter *adapter = pi->adapter;
1115 struct net_device_stats *ns = &pi->netstats;
4d22de3e
DLR
1116 const struct mac_stats *pstats;
1117
1118 spin_lock(&adapter->stats_lock);
5fbf816f 1119 pstats = t3_mac_update_stats(&pi->mac);
4d22de3e
DLR
1120 spin_unlock(&adapter->stats_lock);
1121
1122 ns->tx_bytes = pstats->tx_octets;
1123 ns->tx_packets = pstats->tx_frames;
1124 ns->rx_bytes = pstats->rx_octets;
1125 ns->rx_packets = pstats->rx_frames;
1126 ns->multicast = pstats->rx_mcast_frames;
1127
1128 ns->tx_errors = pstats->tx_underrun;
1129 ns->rx_errors = pstats->rx_symbol_errs + pstats->rx_fcs_errs +
1130 pstats->rx_too_long + pstats->rx_jabber + pstats->rx_short +
1131 pstats->rx_fifo_ovfl;
1132
1133 /* detailed rx_errors */
1134 ns->rx_length_errors = pstats->rx_jabber + pstats->rx_too_long;
1135 ns->rx_over_errors = 0;
1136 ns->rx_crc_errors = pstats->rx_fcs_errs;
1137 ns->rx_frame_errors = pstats->rx_symbol_errs;
1138 ns->rx_fifo_errors = pstats->rx_fifo_ovfl;
1139 ns->rx_missed_errors = pstats->rx_cong_drops;
1140
1141 /* detailed tx_errors */
1142 ns->tx_aborted_errors = 0;
1143 ns->tx_carrier_errors = 0;
1144 ns->tx_fifo_errors = pstats->tx_underrun;
1145 ns->tx_heartbeat_errors = 0;
1146 ns->tx_window_errors = 0;
1147 return ns;
1148}
1149
1150static u32 get_msglevel(struct net_device *dev)
1151{
5fbf816f
DLR
1152 struct port_info *pi = netdev_priv(dev);
1153 struct adapter *adapter = pi->adapter;
4d22de3e
DLR
1154
1155 return adapter->msg_enable;
1156}
1157
1158static void set_msglevel(struct net_device *dev, u32 val)
1159{
5fbf816f
DLR
1160 struct port_info *pi = netdev_priv(dev);
1161 struct adapter *adapter = pi->adapter;
4d22de3e
DLR
1162
1163 adapter->msg_enable = val;
1164}
1165
1166static char stats_strings[][ETH_GSTRING_LEN] = {
1167 "TxOctetsOK ",
1168 "TxFramesOK ",
1169 "TxMulticastFramesOK",
1170 "TxBroadcastFramesOK",
1171 "TxPauseFrames ",
1172 "TxUnderrun ",
1173 "TxExtUnderrun ",
1174
1175 "TxFrames64 ",
1176 "TxFrames65To127 ",
1177 "TxFrames128To255 ",
1178 "TxFrames256To511 ",
1179 "TxFrames512To1023 ",
1180 "TxFrames1024To1518 ",
1181 "TxFrames1519ToMax ",
1182
1183 "RxOctetsOK ",
1184 "RxFramesOK ",
1185 "RxMulticastFramesOK",
1186 "RxBroadcastFramesOK",
1187 "RxPauseFrames ",
1188 "RxFCSErrors ",
1189 "RxSymbolErrors ",
1190 "RxShortErrors ",
1191 "RxJabberErrors ",
1192 "RxLengthErrors ",
1193 "RxFIFOoverflow ",
1194
1195 "RxFrames64 ",
1196 "RxFrames65To127 ",
1197 "RxFrames128To255 ",
1198 "RxFrames256To511 ",
1199 "RxFrames512To1023 ",
1200 "RxFrames1024To1518 ",
1201 "RxFrames1519ToMax ",
1202
1203 "PhyFIFOErrors ",
1204 "TSO ",
1205 "VLANextractions ",
1206 "VLANinsertions ",
1207 "TxCsumOffload ",
1208 "RxCsumGood ",
fc90664e
DLR
1209 "RxDrops ",
1210
1211 "CheckTXEnToggled ",
1212 "CheckResets ",
1213
4d22de3e
DLR
1214};
1215
b9f2c044 1216static int get_sset_count(struct net_device *dev, int sset)
4d22de3e 1217{
b9f2c044
JG
1218 switch (sset) {
1219 case ETH_SS_STATS:
1220 return ARRAY_SIZE(stats_strings);
1221 default:
1222 return -EOPNOTSUPP;
1223 }
4d22de3e
DLR
1224}
1225
1226#define T3_REGMAP_SIZE (3 * 1024)
1227
1228static int get_regs_len(struct net_device *dev)
1229{
1230 return T3_REGMAP_SIZE;
1231}
1232
1233static int get_eeprom_len(struct net_device *dev)
1234{
1235 return EEPROMSIZE;
1236}
1237
1238static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1239{
5fbf816f
DLR
1240 struct port_info *pi = netdev_priv(dev);
1241 struct adapter *adapter = pi->adapter;
4d22de3e 1242 u32 fw_vers = 0;
47330077 1243 u32 tp_vers = 0;
4d22de3e
DLR
1244
1245 t3_get_fw_version(adapter, &fw_vers);
47330077 1246 t3_get_tp_version(adapter, &tp_vers);
4d22de3e
DLR
1247
1248 strcpy(info->driver, DRV_NAME);
1249 strcpy(info->version, DRV_VERSION);
1250 strcpy(info->bus_info, pci_name(adapter->pdev));
1251 if (!fw_vers)
1252 strcpy(info->fw_version, "N/A");
4aac3899 1253 else {
4d22de3e 1254 snprintf(info->fw_version, sizeof(info->fw_version),
47330077 1255 "%s %u.%u.%u TP %u.%u.%u",
4aac3899
DLR
1256 G_FW_VERSION_TYPE(fw_vers) ? "T" : "N",
1257 G_FW_VERSION_MAJOR(fw_vers),
1258 G_FW_VERSION_MINOR(fw_vers),
47330077
DLR
1259 G_FW_VERSION_MICRO(fw_vers),
1260 G_TP_VERSION_MAJOR(tp_vers),
1261 G_TP_VERSION_MINOR(tp_vers),
1262 G_TP_VERSION_MICRO(tp_vers));
4aac3899 1263 }
4d22de3e
DLR
1264}
1265
1266static void get_strings(struct net_device *dev, u32 stringset, u8 * data)
1267{
1268 if (stringset == ETH_SS_STATS)
1269 memcpy(data, stats_strings, sizeof(stats_strings));
1270}
1271
1272static unsigned long collect_sge_port_stats(struct adapter *adapter,
1273 struct port_info *p, int idx)
1274{
1275 int i;
1276 unsigned long tot = 0;
1277
1278 for (i = 0; i < p->nqsets; ++i)
1279 tot += adapter->sge.qs[i + p->first_qset].port_stats[idx];
1280 return tot;
1281}
1282
1283static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
1284 u64 *data)
1285{
4d22de3e 1286 struct port_info *pi = netdev_priv(dev);
5fbf816f 1287 struct adapter *adapter = pi->adapter;
4d22de3e
DLR
1288 const struct mac_stats *s;
1289
1290 spin_lock(&adapter->stats_lock);
1291 s = t3_mac_update_stats(&pi->mac);
1292 spin_unlock(&adapter->stats_lock);
1293
1294 *data++ = s->tx_octets;
1295 *data++ = s->tx_frames;
1296 *data++ = s->tx_mcast_frames;
1297 *data++ = s->tx_bcast_frames;
1298 *data++ = s->tx_pause;
1299 *data++ = s->tx_underrun;
1300 *data++ = s->tx_fifo_urun;
1301
1302 *data++ = s->tx_frames_64;
1303 *data++ = s->tx_frames_65_127;
1304 *data++ = s->tx_frames_128_255;
1305 *data++ = s->tx_frames_256_511;
1306 *data++ = s->tx_frames_512_1023;
1307 *data++ = s->tx_frames_1024_1518;
1308 *data++ = s->tx_frames_1519_max;
1309
1310 *data++ = s->rx_octets;
1311 *data++ = s->rx_frames;
1312 *data++ = s->rx_mcast_frames;
1313 *data++ = s->rx_bcast_frames;
1314 *data++ = s->rx_pause;
1315 *data++ = s->rx_fcs_errs;
1316 *data++ = s->rx_symbol_errs;
1317 *data++ = s->rx_short;
1318 *data++ = s->rx_jabber;
1319 *data++ = s->rx_too_long;
1320 *data++ = s->rx_fifo_ovfl;
1321
1322 *data++ = s->rx_frames_64;
1323 *data++ = s->rx_frames_65_127;
1324 *data++ = s->rx_frames_128_255;
1325 *data++ = s->rx_frames_256_511;
1326 *data++ = s->rx_frames_512_1023;
1327 *data++ = s->rx_frames_1024_1518;
1328 *data++ = s->rx_frames_1519_max;
1329
1330 *data++ = pi->phy.fifo_errors;
1331
1332 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_TSO);
1333 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_VLANEX);
1334 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_VLANINS);
1335 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_TX_CSUM);
1336 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_RX_CSUM_GOOD);
1337 *data++ = s->rx_cong_drops;
fc90664e
DLR
1338
1339 *data++ = s->num_toggled;
1340 *data++ = s->num_resets;
4d22de3e
DLR
1341}
1342
1343static inline void reg_block_dump(struct adapter *ap, void *buf,
1344 unsigned int start, unsigned int end)
1345{
1346 u32 *p = buf + start;
1347
1348 for (; start <= end; start += sizeof(u32))
1349 *p++ = t3_read_reg(ap, start);
1350}
1351
1352static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
1353 void *buf)
1354{
5fbf816f
DLR
1355 struct port_info *pi = netdev_priv(dev);
1356 struct adapter *ap = pi->adapter;
4d22de3e
DLR
1357
1358 /*
1359 * Version scheme:
1360 * bits 0..9: chip version
1361 * bits 10..15: chip revision
1362 * bit 31: set for PCIe cards
1363 */
1364 regs->version = 3 | (ap->params.rev << 10) | (is_pcie(ap) << 31);
1365
1366 /*
1367 * We skip the MAC statistics registers because they are clear-on-read.
1368 * Also reading multi-register stats would need to synchronize with the
1369 * periodic mac stats accumulation. Hard to justify the complexity.
1370 */
1371 memset(buf, 0, T3_REGMAP_SIZE);
1372 reg_block_dump(ap, buf, 0, A_SG_RSPQ_CREDIT_RETURN);
1373 reg_block_dump(ap, buf, A_SG_HI_DRB_HI_THRSH, A_ULPRX_PBL_ULIMIT);
1374 reg_block_dump(ap, buf, A_ULPTX_CONFIG, A_MPS_INT_CAUSE);
1375 reg_block_dump(ap, buf, A_CPL_SWITCH_CNTRL, A_CPL_MAP_TBL_DATA);
1376 reg_block_dump(ap, buf, A_SMB_GLOBAL_TIME_CFG, A_XGM_SERDES_STAT3);
1377 reg_block_dump(ap, buf, A_XGM_SERDES_STATUS0,
1378 XGM_REG(A_XGM_SERDES_STAT3, 1));
1379 reg_block_dump(ap, buf, XGM_REG(A_XGM_SERDES_STATUS0, 1),
1380 XGM_REG(A_XGM_RX_SPI4_SOP_EOP_CNT, 1));
1381}
1382
1383static int restart_autoneg(struct net_device *dev)
1384{
1385 struct port_info *p = netdev_priv(dev);
1386
1387 if (!netif_running(dev))
1388 return -EAGAIN;
1389 if (p->link_config.autoneg != AUTONEG_ENABLE)
1390 return -EINVAL;
1391 p->phy.ops->autoneg_restart(&p->phy);
1392 return 0;
1393}
1394
1395static int cxgb3_phys_id(struct net_device *dev, u32 data)
1396{
5fbf816f
DLR
1397 struct port_info *pi = netdev_priv(dev);
1398 struct adapter *adapter = pi->adapter;
4d22de3e 1399 int i;
4d22de3e
DLR
1400
1401 if (data == 0)
1402 data = 2;
1403
1404 for (i = 0; i < data * 2; i++) {
1405 t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
1406 (i & 1) ? F_GPIO0_OUT_VAL : 0);
1407 if (msleep_interruptible(500))
1408 break;
1409 }
1410 t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
1411 F_GPIO0_OUT_VAL);
1412 return 0;
1413}
1414
1415static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1416{
1417 struct port_info *p = netdev_priv(dev);
1418
1419 cmd->supported = p->link_config.supported;
1420 cmd->advertising = p->link_config.advertising;
1421
1422 if (netif_carrier_ok(dev)) {
1423 cmd->speed = p->link_config.speed;
1424 cmd->duplex = p->link_config.duplex;
1425 } else {
1426 cmd->speed = -1;
1427 cmd->duplex = -1;
1428 }
1429
1430 cmd->port = (cmd->supported & SUPPORTED_TP) ? PORT_TP : PORT_FIBRE;
1431 cmd->phy_address = p->phy.addr;
1432 cmd->transceiver = XCVR_EXTERNAL;
1433 cmd->autoneg = p->link_config.autoneg;
1434 cmd->maxtxpkt = 0;
1435 cmd->maxrxpkt = 0;
1436 return 0;
1437}
1438
1439static int speed_duplex_to_caps(int speed, int duplex)
1440{
1441 int cap = 0;
1442
1443 switch (speed) {
1444 case SPEED_10:
1445 if (duplex == DUPLEX_FULL)
1446 cap = SUPPORTED_10baseT_Full;
1447 else
1448 cap = SUPPORTED_10baseT_Half;
1449 break;
1450 case SPEED_100:
1451 if (duplex == DUPLEX_FULL)
1452 cap = SUPPORTED_100baseT_Full;
1453 else
1454 cap = SUPPORTED_100baseT_Half;
1455 break;
1456 case SPEED_1000:
1457 if (duplex == DUPLEX_FULL)
1458 cap = SUPPORTED_1000baseT_Full;
1459 else
1460 cap = SUPPORTED_1000baseT_Half;
1461 break;
1462 case SPEED_10000:
1463 if (duplex == DUPLEX_FULL)
1464 cap = SUPPORTED_10000baseT_Full;
1465 }
1466 return cap;
1467}
1468
1469#define ADVERTISED_MASK (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
1470 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
1471 ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full | \
1472 ADVERTISED_10000baseT_Full)
1473
1474static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1475{
1476 struct port_info *p = netdev_priv(dev);
1477 struct link_config *lc = &p->link_config;
1478
1479 if (!(lc->supported & SUPPORTED_Autoneg))
1480 return -EOPNOTSUPP; /* can't change speed/duplex */
1481
1482 if (cmd->autoneg == AUTONEG_DISABLE) {
1483 int cap = speed_duplex_to_caps(cmd->speed, cmd->duplex);
1484
1485 if (!(lc->supported & cap) || cmd->speed == SPEED_1000)
1486 return -EINVAL;
1487 lc->requested_speed = cmd->speed;
1488 lc->requested_duplex = cmd->duplex;
1489 lc->advertising = 0;
1490 } else {
1491 cmd->advertising &= ADVERTISED_MASK;
1492 cmd->advertising &= lc->supported;
1493 if (!cmd->advertising)
1494 return -EINVAL;
1495 lc->requested_speed = SPEED_INVALID;
1496 lc->requested_duplex = DUPLEX_INVALID;
1497 lc->advertising = cmd->advertising | ADVERTISED_Autoneg;
1498 }
1499 lc->autoneg = cmd->autoneg;
1500 if (netif_running(dev))
1501 t3_link_start(&p->phy, &p->mac, lc);
1502 return 0;
1503}
1504
1505static void get_pauseparam(struct net_device *dev,
1506 struct ethtool_pauseparam *epause)
1507{
1508 struct port_info *p = netdev_priv(dev);
1509
1510 epause->autoneg = (p->link_config.requested_fc & PAUSE_AUTONEG) != 0;
1511 epause->rx_pause = (p->link_config.fc & PAUSE_RX) != 0;
1512 epause->tx_pause = (p->link_config.fc & PAUSE_TX) != 0;
1513}
1514
1515static int set_pauseparam(struct net_device *dev,
1516 struct ethtool_pauseparam *epause)
1517{
1518 struct port_info *p = netdev_priv(dev);
1519 struct link_config *lc = &p->link_config;
1520
1521 if (epause->autoneg == AUTONEG_DISABLE)
1522 lc->requested_fc = 0;
1523 else if (lc->supported & SUPPORTED_Autoneg)
1524 lc->requested_fc = PAUSE_AUTONEG;
1525 else
1526 return -EINVAL;
1527
1528 if (epause->rx_pause)
1529 lc->requested_fc |= PAUSE_RX;
1530 if (epause->tx_pause)
1531 lc->requested_fc |= PAUSE_TX;
1532 if (lc->autoneg == AUTONEG_ENABLE) {
1533 if (netif_running(dev))
1534 t3_link_start(&p->phy, &p->mac, lc);
1535 } else {
1536 lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1537 if (netif_running(dev))
1538 t3_mac_set_speed_duplex_fc(&p->mac, -1, -1, lc->fc);
1539 }
1540 return 0;
1541}
1542
1543static u32 get_rx_csum(struct net_device *dev)
1544{
1545 struct port_info *p = netdev_priv(dev);
1546
1547 return p->rx_csum_offload;
1548}
1549
1550static int set_rx_csum(struct net_device *dev, u32 data)
1551{
1552 struct port_info *p = netdev_priv(dev);
1553
1554 p->rx_csum_offload = data;
1555 return 0;
1556}
1557
1558static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
1559{
5fbf816f
DLR
1560 struct port_info *pi = netdev_priv(dev);
1561 struct adapter *adapter = pi->adapter;
05b97b30 1562 const struct qset_params *q = &adapter->params.sge.qset[pi->first_qset];
4d22de3e
DLR
1563
1564 e->rx_max_pending = MAX_RX_BUFFERS;
1565 e->rx_mini_max_pending = 0;
1566 e->rx_jumbo_max_pending = MAX_RX_JUMBO_BUFFERS;
1567 e->tx_max_pending = MAX_TXQ_ENTRIES;
1568
05b97b30
DLR
1569 e->rx_pending = q->fl_size;
1570 e->rx_mini_pending = q->rspq_size;
1571 e->rx_jumbo_pending = q->jumbo_size;
1572 e->tx_pending = q->txq_size[0];
4d22de3e
DLR
1573}
1574
1575static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
1576{
5fbf816f
DLR
1577 struct port_info *pi = netdev_priv(dev);
1578 struct adapter *adapter = pi->adapter;
05b97b30 1579 struct qset_params *q;
5fbf816f 1580 int i;
4d22de3e
DLR
1581
1582 if (e->rx_pending > MAX_RX_BUFFERS ||
1583 e->rx_jumbo_pending > MAX_RX_JUMBO_BUFFERS ||
1584 e->tx_pending > MAX_TXQ_ENTRIES ||
1585 e->rx_mini_pending > MAX_RSPQ_ENTRIES ||
1586 e->rx_mini_pending < MIN_RSPQ_ENTRIES ||
1587 e->rx_pending < MIN_FL_ENTRIES ||
1588 e->rx_jumbo_pending < MIN_FL_ENTRIES ||
1589 e->tx_pending < adapter->params.nports * MIN_TXQ_ENTRIES)
1590 return -EINVAL;
1591
1592 if (adapter->flags & FULL_INIT_DONE)
1593 return -EBUSY;
1594
05b97b30
DLR
1595 q = &adapter->params.sge.qset[pi->first_qset];
1596 for (i = 0; i < pi->nqsets; ++i, ++q) {
4d22de3e
DLR
1597 q->rspq_size = e->rx_mini_pending;
1598 q->fl_size = e->rx_pending;
1599 q->jumbo_size = e->rx_jumbo_pending;
1600 q->txq_size[0] = e->tx_pending;
1601 q->txq_size[1] = e->tx_pending;
1602 q->txq_size[2] = e->tx_pending;
1603 }
1604 return 0;
1605}
1606
1607static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
1608{
5fbf816f
DLR
1609 struct port_info *pi = netdev_priv(dev);
1610 struct adapter *adapter = pi->adapter;
4d22de3e
DLR
1611 struct qset_params *qsp = &adapter->params.sge.qset[0];
1612 struct sge_qset *qs = &adapter->sge.qs[0];
1613
1614 if (c->rx_coalesce_usecs * 10 > M_NEWTIMER)
1615 return -EINVAL;
1616
1617 qsp->coalesce_usecs = c->rx_coalesce_usecs;
1618 t3_update_qset_coalesce(qs, qsp);
1619 return 0;
1620}
1621
1622static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
1623{
5fbf816f
DLR
1624 struct port_info *pi = netdev_priv(dev);
1625 struct adapter *adapter = pi->adapter;
4d22de3e
DLR
1626 struct qset_params *q = adapter->params.sge.qset;
1627
1628 c->rx_coalesce_usecs = q->coalesce_usecs;
1629 return 0;
1630}
1631
1632static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e,
1633 u8 * data)
1634{
5fbf816f
DLR
1635 struct port_info *pi = netdev_priv(dev);
1636 struct adapter *adapter = pi->adapter;
4d22de3e 1637 int i, err = 0;
4d22de3e
DLR
1638
1639 u8 *buf = kmalloc(EEPROMSIZE, GFP_KERNEL);
1640 if (!buf)
1641 return -ENOMEM;
1642
1643 e->magic = EEPROM_MAGIC;
1644 for (i = e->offset & ~3; !err && i < e->offset + e->len; i += 4)
1645 err = t3_seeprom_read(adapter, i, (u32 *) & buf[i]);
1646
1647 if (!err)
1648 memcpy(data, buf + e->offset, e->len);
1649 kfree(buf);
1650 return err;
1651}
1652
1653static int set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
1654 u8 * data)
1655{
5fbf816f
DLR
1656 struct port_info *pi = netdev_priv(dev);
1657 struct adapter *adapter = pi->adapter;
1658 u32 aligned_offset, aligned_len, *p;
4d22de3e 1659 u8 *buf;
c54f5c24 1660 int err;
4d22de3e
DLR
1661
1662 if (eeprom->magic != EEPROM_MAGIC)
1663 return -EINVAL;
1664
1665 aligned_offset = eeprom->offset & ~3;
1666 aligned_len = (eeprom->len + (eeprom->offset & 3) + 3) & ~3;
1667
1668 if (aligned_offset != eeprom->offset || aligned_len != eeprom->len) {
1669 buf = kmalloc(aligned_len, GFP_KERNEL);
1670 if (!buf)
1671 return -ENOMEM;
1672 err = t3_seeprom_read(adapter, aligned_offset, (u32 *) buf);
1673 if (!err && aligned_len > 4)
1674 err = t3_seeprom_read(adapter,
1675 aligned_offset + aligned_len - 4,
1676 (u32 *) & buf[aligned_len - 4]);
1677 if (err)
1678 goto out;
1679 memcpy(buf + (eeprom->offset & 3), data, eeprom->len);
1680 } else
1681 buf = data;
1682
1683 err = t3_seeprom_wp(adapter, 0);
1684 if (err)
1685 goto out;
1686
1687 for (p = (u32 *) buf; !err && aligned_len; aligned_len -= 4, p++) {
1688 err = t3_seeprom_write(adapter, aligned_offset, *p);
1689 aligned_offset += 4;
1690 }
1691
1692 if (!err)
1693 err = t3_seeprom_wp(adapter, 1);
1694out:
1695 if (buf != data)
1696 kfree(buf);
1697 return err;
1698}
1699
1700static void get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1701{
1702 wol->supported = 0;
1703 wol->wolopts = 0;
1704 memset(&wol->sopass, 0, sizeof(wol->sopass));
1705}
1706
1707static const struct ethtool_ops cxgb_ethtool_ops = {
1708 .get_settings = get_settings,
1709 .set_settings = set_settings,
1710 .get_drvinfo = get_drvinfo,
1711 .get_msglevel = get_msglevel,
1712 .set_msglevel = set_msglevel,
1713 .get_ringparam = get_sge_param,
1714 .set_ringparam = set_sge_param,
1715 .get_coalesce = get_coalesce,
1716 .set_coalesce = set_coalesce,
1717 .get_eeprom_len = get_eeprom_len,
1718 .get_eeprom = get_eeprom,
1719 .set_eeprom = set_eeprom,
1720 .get_pauseparam = get_pauseparam,
1721 .set_pauseparam = set_pauseparam,
1722 .get_rx_csum = get_rx_csum,
1723 .set_rx_csum = set_rx_csum,
4d22de3e 1724 .set_tx_csum = ethtool_op_set_tx_csum,
4d22de3e
DLR
1725 .set_sg = ethtool_op_set_sg,
1726 .get_link = ethtool_op_get_link,
1727 .get_strings = get_strings,
1728 .phys_id = cxgb3_phys_id,
1729 .nway_reset = restart_autoneg,
b9f2c044 1730 .get_sset_count = get_sset_count,
4d22de3e
DLR
1731 .get_ethtool_stats = get_stats,
1732 .get_regs_len = get_regs_len,
1733 .get_regs = get_regs,
1734 .get_wol = get_wol,
4d22de3e 1735 .set_tso = ethtool_op_set_tso,
4d22de3e
DLR
1736};
1737
1738static int in_range(int val, int lo, int hi)
1739{
1740 return val < 0 || (val <= hi && val >= lo);
1741}
1742
1743static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
1744{
5fbf816f
DLR
1745 struct port_info *pi = netdev_priv(dev);
1746 struct adapter *adapter = pi->adapter;
4d22de3e 1747 u32 cmd;
5fbf816f 1748 int ret;
4d22de3e
DLR
1749
1750 if (copy_from_user(&cmd, useraddr, sizeof(cmd)))
1751 return -EFAULT;
1752
1753 switch (cmd) {
4d22de3e
DLR
1754 case CHELSIO_SET_QSET_PARAMS:{
1755 int i;
1756 struct qset_params *q;
1757 struct ch_qset_params t;
1758
1759 if (!capable(CAP_NET_ADMIN))
1760 return -EPERM;
1761 if (copy_from_user(&t, useraddr, sizeof(t)))
1762 return -EFAULT;
1763 if (t.qset_idx >= SGE_QSETS)
1764 return -EINVAL;
1765 if (!in_range(t.intr_lat, 0, M_NEWTIMER) ||
1766 !in_range(t.cong_thres, 0, 255) ||
1767 !in_range(t.txq_size[0], MIN_TXQ_ENTRIES,
1768 MAX_TXQ_ENTRIES) ||
1769 !in_range(t.txq_size[1], MIN_TXQ_ENTRIES,
1770 MAX_TXQ_ENTRIES) ||
1771 !in_range(t.txq_size[2], MIN_CTRL_TXQ_ENTRIES,
1772 MAX_CTRL_TXQ_ENTRIES) ||
1773 !in_range(t.fl_size[0], MIN_FL_ENTRIES,
1774 MAX_RX_BUFFERS)
1775 || !in_range(t.fl_size[1], MIN_FL_ENTRIES,
1776 MAX_RX_JUMBO_BUFFERS)
1777 || !in_range(t.rspq_size, MIN_RSPQ_ENTRIES,
1778 MAX_RSPQ_ENTRIES))
1779 return -EINVAL;
1780 if ((adapter->flags & FULL_INIT_DONE) &&
1781 (t.rspq_size >= 0 || t.fl_size[0] >= 0 ||
1782 t.fl_size[1] >= 0 || t.txq_size[0] >= 0 ||
1783 t.txq_size[1] >= 0 || t.txq_size[2] >= 0 ||
1784 t.polling >= 0 || t.cong_thres >= 0))
1785 return -EBUSY;
1786
1787 q = &adapter->params.sge.qset[t.qset_idx];
1788
1789 if (t.rspq_size >= 0)
1790 q->rspq_size = t.rspq_size;
1791 if (t.fl_size[0] >= 0)
1792 q->fl_size = t.fl_size[0];
1793 if (t.fl_size[1] >= 0)
1794 q->jumbo_size = t.fl_size[1];
1795 if (t.txq_size[0] >= 0)
1796 q->txq_size[0] = t.txq_size[0];
1797 if (t.txq_size[1] >= 0)
1798 q->txq_size[1] = t.txq_size[1];
1799 if (t.txq_size[2] >= 0)
1800 q->txq_size[2] = t.txq_size[2];
1801 if (t.cong_thres >= 0)
1802 q->cong_thres = t.cong_thres;
1803 if (t.intr_lat >= 0) {
1804 struct sge_qset *qs =
1805 &adapter->sge.qs[t.qset_idx];
1806
1807 q->coalesce_usecs = t.intr_lat;
1808 t3_update_qset_coalesce(qs, q);
1809 }
1810 if (t.polling >= 0) {
1811 if (adapter->flags & USING_MSIX)
1812 q->polling = t.polling;
1813 else {
1814 /* No polling with INTx for T3A */
1815 if (adapter->params.rev == 0 &&
1816 !(adapter->flags & USING_MSI))
1817 t.polling = 0;
1818
1819 for (i = 0; i < SGE_QSETS; i++) {
1820 q = &adapter->params.sge.
1821 qset[i];
1822 q->polling = t.polling;
1823 }
1824 }
1825 }
1826 break;
1827 }
1828 case CHELSIO_GET_QSET_PARAMS:{
1829 struct qset_params *q;
1830 struct ch_qset_params t;
1831
1832 if (copy_from_user(&t, useraddr, sizeof(t)))
1833 return -EFAULT;
1834 if (t.qset_idx >= SGE_QSETS)
1835 return -EINVAL;
1836
1837 q = &adapter->params.sge.qset[t.qset_idx];
1838 t.rspq_size = q->rspq_size;
1839 t.txq_size[0] = q->txq_size[0];
1840 t.txq_size[1] = q->txq_size[1];
1841 t.txq_size[2] = q->txq_size[2];
1842 t.fl_size[0] = q->fl_size;
1843 t.fl_size[1] = q->jumbo_size;
1844 t.polling = q->polling;
1845 t.intr_lat = q->coalesce_usecs;
1846 t.cong_thres = q->cong_thres;
1847
1848 if (copy_to_user(useraddr, &t, sizeof(t)))
1849 return -EFAULT;
1850 break;
1851 }
1852 case CHELSIO_SET_QSET_NUM:{
1853 struct ch_reg edata;
4d22de3e
DLR
1854 unsigned int i, first_qset = 0, other_qsets = 0;
1855
1856 if (!capable(CAP_NET_ADMIN))
1857 return -EPERM;
1858 if (adapter->flags & FULL_INIT_DONE)
1859 return -EBUSY;
1860 if (copy_from_user(&edata, useraddr, sizeof(edata)))
1861 return -EFAULT;
1862 if (edata.val < 1 ||
1863 (edata.val > 1 && !(adapter->flags & USING_MSIX)))
1864 return -EINVAL;
1865
1866 for_each_port(adapter, i)
1867 if (adapter->port[i] && adapter->port[i] != dev)
1868 other_qsets += adap2pinfo(adapter, i)->nqsets;
1869
1870 if (edata.val + other_qsets > SGE_QSETS)
1871 return -EINVAL;
1872
1873 pi->nqsets = edata.val;
1874
1875 for_each_port(adapter, i)
1876 if (adapter->port[i]) {
1877 pi = adap2pinfo(adapter, i);
1878 pi->first_qset = first_qset;
1879 first_qset += pi->nqsets;
1880 }
1881 break;
1882 }
1883 case CHELSIO_GET_QSET_NUM:{
1884 struct ch_reg edata;
4d22de3e
DLR
1885
1886 edata.cmd = CHELSIO_GET_QSET_NUM;
1887 edata.val = pi->nqsets;
1888 if (copy_to_user(useraddr, &edata, sizeof(edata)))
1889 return -EFAULT;
1890 break;
1891 }
1892 case CHELSIO_LOAD_FW:{
1893 u8 *fw_data;
1894 struct ch_mem_range t;
1895
1896 if (!capable(CAP_NET_ADMIN))
1897 return -EPERM;
1898 if (copy_from_user(&t, useraddr, sizeof(t)))
1899 return -EFAULT;
1900
1901 fw_data = kmalloc(t.len, GFP_KERNEL);
1902 if (!fw_data)
1903 return -ENOMEM;
1904
1905 if (copy_from_user
1906 (fw_data, useraddr + sizeof(t), t.len)) {
1907 kfree(fw_data);
1908 return -EFAULT;
1909 }
1910
1911 ret = t3_load_fw(adapter, fw_data, t.len);
1912 kfree(fw_data);
1913 if (ret)
1914 return ret;
1915 break;
1916 }
1917 case CHELSIO_SETMTUTAB:{
1918 struct ch_mtus m;
1919 int i;
1920
1921 if (!is_offload(adapter))
1922 return -EOPNOTSUPP;
1923 if (!capable(CAP_NET_ADMIN))
1924 return -EPERM;
1925 if (offload_running(adapter))
1926 return -EBUSY;
1927 if (copy_from_user(&m, useraddr, sizeof(m)))
1928 return -EFAULT;
1929 if (m.nmtus != NMTUS)
1930 return -EINVAL;
1931 if (m.mtus[0] < 81) /* accommodate SACK */
1932 return -EINVAL;
1933
1934 /* MTUs must be in ascending order */
1935 for (i = 1; i < NMTUS; ++i)
1936 if (m.mtus[i] < m.mtus[i - 1])
1937 return -EINVAL;
1938
1939 memcpy(adapter->params.mtus, m.mtus,
1940 sizeof(adapter->params.mtus));
1941 break;
1942 }
1943 case CHELSIO_GET_PM:{
1944 struct tp_params *p = &adapter->params.tp;
1945 struct ch_pm m = {.cmd = CHELSIO_GET_PM };
1946
1947 if (!is_offload(adapter))
1948 return -EOPNOTSUPP;
1949 m.tx_pg_sz = p->tx_pg_size;
1950 m.tx_num_pg = p->tx_num_pgs;
1951 m.rx_pg_sz = p->rx_pg_size;
1952 m.rx_num_pg = p->rx_num_pgs;
1953 m.pm_total = p->pmtx_size + p->chan_rx_size * p->nchan;
1954 if (copy_to_user(useraddr, &m, sizeof(m)))
1955 return -EFAULT;
1956 break;
1957 }
1958 case CHELSIO_SET_PM:{
1959 struct ch_pm m;
1960 struct tp_params *p = &adapter->params.tp;
1961
1962 if (!is_offload(adapter))
1963 return -EOPNOTSUPP;
1964 if (!capable(CAP_NET_ADMIN))
1965 return -EPERM;
1966 if (adapter->flags & FULL_INIT_DONE)
1967 return -EBUSY;
1968 if (copy_from_user(&m, useraddr, sizeof(m)))
1969 return -EFAULT;
d9da466a 1970 if (!is_power_of_2(m.rx_pg_sz) ||
1971 !is_power_of_2(m.tx_pg_sz))
4d22de3e
DLR
1972 return -EINVAL; /* not power of 2 */
1973 if (!(m.rx_pg_sz & 0x14000))
1974 return -EINVAL; /* not 16KB or 64KB */
1975 if (!(m.tx_pg_sz & 0x1554000))
1976 return -EINVAL;
1977 if (m.tx_num_pg == -1)
1978 m.tx_num_pg = p->tx_num_pgs;
1979 if (m.rx_num_pg == -1)
1980 m.rx_num_pg = p->rx_num_pgs;
1981 if (m.tx_num_pg % 24 || m.rx_num_pg % 24)
1982 return -EINVAL;
1983 if (m.rx_num_pg * m.rx_pg_sz > p->chan_rx_size ||
1984 m.tx_num_pg * m.tx_pg_sz > p->chan_tx_size)
1985 return -EINVAL;
1986 p->rx_pg_size = m.rx_pg_sz;
1987 p->tx_pg_size = m.tx_pg_sz;
1988 p->rx_num_pgs = m.rx_num_pg;
1989 p->tx_num_pgs = m.tx_num_pg;
1990 break;
1991 }
1992 case CHELSIO_GET_MEM:{
1993 struct ch_mem_range t;
1994 struct mc7 *mem;
1995 u64 buf[32];
1996
1997 if (!is_offload(adapter))
1998 return -EOPNOTSUPP;
1999 if (!(adapter->flags & FULL_INIT_DONE))
2000 return -EIO; /* need the memory controllers */
2001 if (copy_from_user(&t, useraddr, sizeof(t)))
2002 return -EFAULT;
2003 if ((t.addr & 7) || (t.len & 7))
2004 return -EINVAL;
2005 if (t.mem_id == MEM_CM)
2006 mem = &adapter->cm;
2007 else if (t.mem_id == MEM_PMRX)
2008 mem = &adapter->pmrx;
2009 else if (t.mem_id == MEM_PMTX)
2010 mem = &adapter->pmtx;
2011 else
2012 return -EINVAL;
2013
2014 /*
1825494a
DLR
2015 * Version scheme:
2016 * bits 0..9: chip version
2017 * bits 10..15: chip revision
2018 */
4d22de3e
DLR
2019 t.version = 3 | (adapter->params.rev << 10);
2020 if (copy_to_user(useraddr, &t, sizeof(t)))
2021 return -EFAULT;
2022
2023 /*
2024 * Read 256 bytes at a time as len can be large and we don't
2025 * want to use huge intermediate buffers.
2026 */
2027 useraddr += sizeof(t); /* advance to start of buffer */
2028 while (t.len) {
2029 unsigned int chunk =
2030 min_t(unsigned int, t.len, sizeof(buf));
2031
2032 ret =
2033 t3_mc7_bd_read(mem, t.addr / 8, chunk / 8,
2034 buf);
2035 if (ret)
2036 return ret;
2037 if (copy_to_user(useraddr, buf, chunk))
2038 return -EFAULT;
2039 useraddr += chunk;
2040 t.addr += chunk;
2041 t.len -= chunk;
2042 }
2043 break;
2044 }
2045 case CHELSIO_SET_TRACE_FILTER:{
2046 struct ch_trace t;
2047 const struct trace_params *tp;
2048
2049 if (!capable(CAP_NET_ADMIN))
2050 return -EPERM;
2051 if (!offload_running(adapter))
2052 return -EAGAIN;
2053 if (copy_from_user(&t, useraddr, sizeof(t)))
2054 return -EFAULT;
2055
2056 tp = (const struct trace_params *)&t.sip;
2057 if (t.config_tx)
2058 t3_config_trace_filter(adapter, tp, 0,
2059 t.invert_match,
2060 t.trace_tx);
2061 if (t.config_rx)
2062 t3_config_trace_filter(adapter, tp, 1,
2063 t.invert_match,
2064 t.trace_rx);
2065 break;
2066 }
4d22de3e
DLR
2067 default:
2068 return -EOPNOTSUPP;
2069 }
2070 return 0;
2071}
2072
2073static int cxgb_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
2074{
4d22de3e 2075 struct mii_ioctl_data *data = if_mii(req);
5fbf816f
DLR
2076 struct port_info *pi = netdev_priv(dev);
2077 struct adapter *adapter = pi->adapter;
2078 int ret, mmd;
4d22de3e
DLR
2079
2080 switch (cmd) {
2081 case SIOCGMIIPHY:
2082 data->phy_id = pi->phy.addr;
2083 /* FALLTHRU */
2084 case SIOCGMIIREG:{
2085 u32 val;
2086 struct cphy *phy = &pi->phy;
2087
2088 if (!phy->mdio_read)
2089 return -EOPNOTSUPP;
2090 if (is_10G(adapter)) {
2091 mmd = data->phy_id >> 8;
2092 if (!mmd)
2093 mmd = MDIO_DEV_PCS;
2094 else if (mmd > MDIO_DEV_XGXS)
2095 return -EINVAL;
2096
2097 ret =
2098 phy->mdio_read(adapter, data->phy_id & 0x1f,
2099 mmd, data->reg_num, &val);
2100 } else
2101 ret =
2102 phy->mdio_read(adapter, data->phy_id & 0x1f,
2103 0, data->reg_num & 0x1f,
2104 &val);
2105 if (!ret)
2106 data->val_out = val;
2107 break;
2108 }
2109 case SIOCSMIIREG:{
2110 struct cphy *phy = &pi->phy;
2111
2112 if (!capable(CAP_NET_ADMIN))
2113 return -EPERM;
2114 if (!phy->mdio_write)
2115 return -EOPNOTSUPP;
2116 if (is_10G(adapter)) {
2117 mmd = data->phy_id >> 8;
2118 if (!mmd)
2119 mmd = MDIO_DEV_PCS;
2120 else if (mmd > MDIO_DEV_XGXS)
2121 return -EINVAL;
2122
2123 ret =
2124 phy->mdio_write(adapter,
2125 data->phy_id & 0x1f, mmd,
2126 data->reg_num,
2127 data->val_in);
2128 } else
2129 ret =
2130 phy->mdio_write(adapter,
2131 data->phy_id & 0x1f, 0,
2132 data->reg_num & 0x1f,
2133 data->val_in);
2134 break;
2135 }
2136 case SIOCCHIOCTL:
2137 return cxgb_extension_ioctl(dev, req->ifr_data);
2138 default:
2139 return -EOPNOTSUPP;
2140 }
2141 return ret;
2142}
2143
2144static int cxgb_change_mtu(struct net_device *dev, int new_mtu)
2145{
4d22de3e 2146 struct port_info *pi = netdev_priv(dev);
5fbf816f
DLR
2147 struct adapter *adapter = pi->adapter;
2148 int ret;
4d22de3e
DLR
2149
2150 if (new_mtu < 81) /* accommodate SACK */
2151 return -EINVAL;
2152 if ((ret = t3_mac_set_mtu(&pi->mac, new_mtu)))
2153 return ret;
2154 dev->mtu = new_mtu;
2155 init_port_mtus(adapter);
2156 if (adapter->params.rev == 0 && offload_running(adapter))
2157 t3_load_mtus(adapter, adapter->params.mtus,
2158 adapter->params.a_wnd, adapter->params.b_wnd,
2159 adapter->port[0]->mtu);
2160 return 0;
2161}
2162
2163static int cxgb_set_mac_addr(struct net_device *dev, void *p)
2164{
4d22de3e 2165 struct port_info *pi = netdev_priv(dev);
5fbf816f 2166 struct adapter *adapter = pi->adapter;
4d22de3e
DLR
2167 struct sockaddr *addr = p;
2168
2169 if (!is_valid_ether_addr(addr->sa_data))
2170 return -EINVAL;
2171
2172 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
2173 t3_mac_set_address(&pi->mac, 0, dev->dev_addr);
2174 if (offload_running(adapter))
2175 write_smt_entry(adapter, pi->port_id);
2176 return 0;
2177}
2178
2179/**
2180 * t3_synchronize_rx - wait for current Rx processing on a port to complete
2181 * @adap: the adapter
2182 * @p: the port
2183 *
2184 * Ensures that current Rx processing on any of the queues associated with
2185 * the given port completes before returning. We do this by acquiring and
2186 * releasing the locks of the response queues associated with the port.
2187 */
2188static void t3_synchronize_rx(struct adapter *adap, const struct port_info *p)
2189{
2190 int i;
2191
2192 for (i = 0; i < p->nqsets; i++) {
2193 struct sge_rspq *q = &adap->sge.qs[i + p->first_qset].rspq;
2194
2195 spin_lock_irq(&q->lock);
2196 spin_unlock_irq(&q->lock);
2197 }
2198}
2199
2200static void vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
2201{
4d22de3e 2202 struct port_info *pi = netdev_priv(dev);
5fbf816f 2203 struct adapter *adapter = pi->adapter;
4d22de3e
DLR
2204
2205 pi->vlan_grp = grp;
2206 if (adapter->params.rev > 0)
2207 t3_set_vlan_accel(adapter, 1 << pi->port_id, grp != NULL);
2208 else {
2209 /* single control for all ports */
2210 unsigned int i, have_vlans = 0;
2211 for_each_port(adapter, i)
2212 have_vlans |= adap2pinfo(adapter, i)->vlan_grp != NULL;
2213
2214 t3_set_vlan_accel(adapter, 1, have_vlans);
2215 }
2216 t3_synchronize_rx(adapter, pi);
2217}
2218
4d22de3e
DLR
2219#ifdef CONFIG_NET_POLL_CONTROLLER
2220static void cxgb_netpoll(struct net_device *dev)
2221{
890de332 2222 struct port_info *pi = netdev_priv(dev);
5fbf816f 2223 struct adapter *adapter = pi->adapter;
890de332 2224 int qidx;
4d22de3e 2225
890de332
DLR
2226 for (qidx = pi->first_qset; qidx < pi->first_qset + pi->nqsets; qidx++) {
2227 struct sge_qset *qs = &adapter->sge.qs[qidx];
2228 void *source;
2eab17ab 2229
890de332
DLR
2230 if (adapter->flags & USING_MSIX)
2231 source = qs;
2232 else
2233 source = adapter;
2234
2235 t3_intr_handler(adapter, qs->rspq.polling) (0, source);
2236 }
4d22de3e
DLR
2237}
2238#endif
2239
2240/*
2241 * Periodic accumulation of MAC statistics.
2242 */
2243static void mac_stats_update(struct adapter *adapter)
2244{
2245 int i;
2246
2247 for_each_port(adapter, i) {
2248 struct net_device *dev = adapter->port[i];
2249 struct port_info *p = netdev_priv(dev);
2250
2251 if (netif_running(dev)) {
2252 spin_lock(&adapter->stats_lock);
2253 t3_mac_update_stats(&p->mac);
2254 spin_unlock(&adapter->stats_lock);
2255 }
2256 }
2257}
2258
2259static void check_link_status(struct adapter *adapter)
2260{
2261 int i;
2262
2263 for_each_port(adapter, i) {
2264 struct net_device *dev = adapter->port[i];
2265 struct port_info *p = netdev_priv(dev);
2266
2267 if (!(p->port_type->caps & SUPPORTED_IRQ) && netif_running(dev))
2268 t3_link_changed(adapter, i);
2269 }
2270}
2271
fc90664e
DLR
2272static void check_t3b2_mac(struct adapter *adapter)
2273{
2274 int i;
2275
f2d961c9
DLR
2276 if (!rtnl_trylock()) /* synchronize with ifdown */
2277 return;
2278
fc90664e
DLR
2279 for_each_port(adapter, i) {
2280 struct net_device *dev = adapter->port[i];
2281 struct port_info *p = netdev_priv(dev);
2282 int status;
2283
2284 if (!netif_running(dev))
2285 continue;
2286
2287 status = 0;
6d6dabac 2288 if (netif_running(dev) && netif_carrier_ok(dev))
fc90664e
DLR
2289 status = t3b2_mac_watchdog_task(&p->mac);
2290 if (status == 1)
2291 p->mac.stats.num_toggled++;
2292 else if (status == 2) {
2293 struct cmac *mac = &p->mac;
2294
2295 t3_mac_set_mtu(mac, dev->mtu);
2296 t3_mac_set_address(mac, 0, dev->dev_addr);
2297 cxgb_set_rxmode(dev);
2298 t3_link_start(&p->phy, mac, &p->link_config);
2299 t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
2300 t3_port_intr_enable(adapter, p->port_id);
2301 p->mac.stats.num_resets++;
2302 }
2303 }
2304 rtnl_unlock();
2305}
2306
2307
4d22de3e
DLR
2308static void t3_adap_check_task(struct work_struct *work)
2309{
2310 struct adapter *adapter = container_of(work, struct adapter,
2311 adap_check_task.work);
2312 const struct adapter_params *p = &adapter->params;
2313
2314 adapter->check_task_cnt++;
2315
2316 /* Check link status for PHYs without interrupts */
2317 if (p->linkpoll_period)
2318 check_link_status(adapter);
2319
2320 /* Accumulate MAC stats if needed */
2321 if (!p->linkpoll_period ||
2322 (adapter->check_task_cnt * p->linkpoll_period) / 10 >=
2323 p->stats_update_period) {
2324 mac_stats_update(adapter);
2325 adapter->check_task_cnt = 0;
2326 }
2327
fc90664e
DLR
2328 if (p->rev == T3_REV_B2)
2329 check_t3b2_mac(adapter);
2330
4d22de3e
DLR
2331 /* Schedule the next check update if any port is active. */
2332 spin_lock(&adapter->work_lock);
2333 if (adapter->open_device_map & PORT_MASK)
2334 schedule_chk_task(adapter);
2335 spin_unlock(&adapter->work_lock);
2336}
2337
2338/*
2339 * Processes external (PHY) interrupts in process context.
2340 */
2341static void ext_intr_task(struct work_struct *work)
2342{
2343 struct adapter *adapter = container_of(work, struct adapter,
2344 ext_intr_handler_task);
2345
2346 t3_phy_intr_handler(adapter);
2347
2348 /* Now reenable external interrupts */
2349 spin_lock_irq(&adapter->work_lock);
2350 if (adapter->slow_intr_mask) {
2351 adapter->slow_intr_mask |= F_T3DBG;
2352 t3_write_reg(adapter, A_PL_INT_CAUSE0, F_T3DBG);
2353 t3_write_reg(adapter, A_PL_INT_ENABLE0,
2354 adapter->slow_intr_mask);
2355 }
2356 spin_unlock_irq(&adapter->work_lock);
2357}
2358
2359/*
2360 * Interrupt-context handler for external (PHY) interrupts.
2361 */
2362void t3_os_ext_intr_handler(struct adapter *adapter)
2363{
2364 /*
2365 * Schedule a task to handle external interrupts as they may be slow
2366 * and we use a mutex to protect MDIO registers. We disable PHY
2367 * interrupts in the meantime and let the task reenable them when
2368 * it's done.
2369 */
2370 spin_lock(&adapter->work_lock);
2371 if (adapter->slow_intr_mask) {
2372 adapter->slow_intr_mask &= ~F_T3DBG;
2373 t3_write_reg(adapter, A_PL_INT_ENABLE0,
2374 adapter->slow_intr_mask);
2375 queue_work(cxgb3_wq, &adapter->ext_intr_handler_task);
2376 }
2377 spin_unlock(&adapter->work_lock);
2378}
2379
2380void t3_fatal_err(struct adapter *adapter)
2381{
2382 unsigned int fw_status[4];
2383
2384 if (adapter->flags & FULL_INIT_DONE) {
2385 t3_sge_stop(adapter);
c64c2eae
DLR
2386 t3_write_reg(adapter, A_XGM_TX_CTRL, 0);
2387 t3_write_reg(adapter, A_XGM_RX_CTRL, 0);
2388 t3_write_reg(adapter, XGM_REG(A_XGM_TX_CTRL, 1), 0);
2389 t3_write_reg(adapter, XGM_REG(A_XGM_RX_CTRL, 1), 0);
4d22de3e
DLR
2390 t3_intr_disable(adapter);
2391 }
2392 CH_ALERT(adapter, "encountered fatal error, operation suspended\n");
2393 if (!t3_cim_ctl_blk_read(adapter, 0xa0, 4, fw_status))
2394 CH_ALERT(adapter, "FW status: 0x%x, 0x%x, 0x%x, 0x%x\n",
2395 fw_status[0], fw_status[1],
2396 fw_status[2], fw_status[3]);
2397
2398}
2399
91a6b50c
DLR
2400/**
2401 * t3_io_error_detected - called when PCI error is detected
2402 * @pdev: Pointer to PCI device
2403 * @state: The current pci connection state
2404 *
2405 * This function is called after a PCI bus error affecting
2406 * this device has been detected.
2407 */
2408static pci_ers_result_t t3_io_error_detected(struct pci_dev *pdev,
2409 pci_channel_state_t state)
2410{
2411 struct net_device *dev = pci_get_drvdata(pdev);
2412 struct port_info *pi = netdev_priv(dev);
2413 struct adapter *adapter = pi->adapter;
2414 int i;
2415
2416 /* Stop all ports */
2417 for_each_port(adapter, i) {
2418 struct net_device *netdev = adapter->port[i];
2419
2420 if (netif_running(netdev))
2421 cxgb_close(netdev);
2422 }
2423
2eab17ab 2424 if (is_offload(adapter) &&
91a6b50c
DLR
2425 test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map))
2426 offload_close(&adapter->tdev);
2427
2428 /* Free sge resources */
2429 t3_free_sge_resources(adapter);
2430
2431 adapter->flags &= ~FULL_INIT_DONE;
2432
2433 pci_disable_device(pdev);
2434
2435 /* Request a slot slot reset. */
2436 return PCI_ERS_RESULT_NEED_RESET;
2437}
2438
2439/**
2440 * t3_io_slot_reset - called after the pci bus has been reset.
2441 * @pdev: Pointer to PCI device
2442 *
2443 * Restart the card from scratch, as if from a cold-boot.
2444 */
2445static pci_ers_result_t t3_io_slot_reset(struct pci_dev *pdev)
2446{
2447 struct net_device *dev = pci_get_drvdata(pdev);
2448 struct port_info *pi = netdev_priv(dev);
2449 struct adapter *adapter = pi->adapter;
2450
2451 if (pci_enable_device(pdev)) {
2452 dev_err(&pdev->dev,
2453 "Cannot re-enable PCI device after reset.\n");
2454 return PCI_ERS_RESULT_DISCONNECT;
2455 }
2456 pci_set_master(pdev);
2457
2458 t3_prep_adapter(adapter, adapter->params.info, 1);
2459
2460 return PCI_ERS_RESULT_RECOVERED;
2461}
2462
2463/**
2464 * t3_io_resume - called when traffic can start flowing again.
2465 * @pdev: Pointer to PCI device
2466 *
2467 * This callback is called when the error recovery driver tells us that
2468 * its OK to resume normal operation.
2469 */
2470static void t3_io_resume(struct pci_dev *pdev)
2471{
2472 struct net_device *dev = pci_get_drvdata(pdev);
2473 struct port_info *pi = netdev_priv(dev);
2474 struct adapter *adapter = pi->adapter;
2475 int i;
2476
2477 /* Restart the ports */
2478 for_each_port(adapter, i) {
2479 struct net_device *netdev = adapter->port[i];
2480
2481 if (netif_running(netdev)) {
2482 if (cxgb_open(netdev)) {
2483 dev_err(&pdev->dev,
2484 "can't bring device back up"
2485 " after reset\n");
2486 continue;
2487 }
2488 netif_device_attach(netdev);
2489 }
2490 }
2491
2492 if (is_offload(adapter)) {
2493 __set_bit(OFFLOAD_DEVMAP_BIT, &adapter->registered_device_map);
2494 if (offload_open(dev))
2495 printk(KERN_WARNING
2496 "Could not bring back offload capabilities\n");
2497 }
2498}
2499
2500static struct pci_error_handlers t3_err_handler = {
2501 .error_detected = t3_io_error_detected,
2502 .slot_reset = t3_io_slot_reset,
2503 .resume = t3_io_resume,
2504};
2505
4d22de3e
DLR
2506static int __devinit cxgb_enable_msix(struct adapter *adap)
2507{
2508 struct msix_entry entries[SGE_QSETS + 1];
2509 int i, err;
2510
2511 for (i = 0; i < ARRAY_SIZE(entries); ++i)
2512 entries[i].entry = i;
2513
2514 err = pci_enable_msix(adap->pdev, entries, ARRAY_SIZE(entries));
2515 if (!err) {
2516 for (i = 0; i < ARRAY_SIZE(entries); ++i)
2517 adap->msix_info[i].vec = entries[i].vector;
2518 } else if (err > 0)
2519 dev_info(&adap->pdev->dev,
2520 "only %d MSI-X vectors left, not using MSI-X\n", err);
2521 return err;
2522}
2523
2524static void __devinit print_port_info(struct adapter *adap,
2525 const struct adapter_info *ai)
2526{
2527 static const char *pci_variant[] = {
2528 "PCI", "PCI-X", "PCI-X ECC", "PCI-X 266", "PCI Express"
2529 };
2530
2531 int i;
2532 char buf[80];
2533
2534 if (is_pcie(adap))
2535 snprintf(buf, sizeof(buf), "%s x%d",
2536 pci_variant[adap->params.pci.variant],
2537 adap->params.pci.width);
2538 else
2539 snprintf(buf, sizeof(buf), "%s %dMHz/%d-bit",
2540 pci_variant[adap->params.pci.variant],
2541 adap->params.pci.speed, adap->params.pci.width);
2542
2543 for_each_port(adap, i) {
2544 struct net_device *dev = adap->port[i];
2545 const struct port_info *pi = netdev_priv(dev);
2546
2547 if (!test_bit(i, &adap->registered_device_map))
2548 continue;
8ac3ba68 2549 printk(KERN_INFO "%s: %s %s %sNIC (rev %d) %s%s\n",
4d22de3e 2550 dev->name, ai->desc, pi->port_type->desc,
8ac3ba68 2551 is_offload(adap) ? "R" : "", adap->params.rev, buf,
4d22de3e
DLR
2552 (adap->flags & USING_MSIX) ? " MSI-X" :
2553 (adap->flags & USING_MSI) ? " MSI" : "");
2554 if (adap->name == dev->name && adap->params.vpd.mclk)
167cdf5f
DLR
2555 printk(KERN_INFO
2556 "%s: %uMB CM, %uMB PMTX, %uMB PMRX, S/N: %s\n",
4d22de3e
DLR
2557 adap->name, t3_mc7_size(&adap->cm) >> 20,
2558 t3_mc7_size(&adap->pmtx) >> 20,
167cdf5f
DLR
2559 t3_mc7_size(&adap->pmrx) >> 20,
2560 adap->params.vpd.sn);
4d22de3e
DLR
2561 }
2562}
2563
2564static int __devinit init_one(struct pci_dev *pdev,
2565 const struct pci_device_id *ent)
2566{
2567 static int version_printed;
2568
2569 int i, err, pci_using_dac = 0;
2570 unsigned long mmio_start, mmio_len;
2571 const struct adapter_info *ai;
2572 struct adapter *adapter = NULL;
2573 struct port_info *pi;
2574
2575 if (!version_printed) {
2576 printk(KERN_INFO "%s - version %s\n", DRV_DESC, DRV_VERSION);
2577 ++version_printed;
2578 }
2579
2580 if (!cxgb3_wq) {
2581 cxgb3_wq = create_singlethread_workqueue(DRV_NAME);
2582 if (!cxgb3_wq) {
2583 printk(KERN_ERR DRV_NAME
2584 ": cannot initialize work queue\n");
2585 return -ENOMEM;
2586 }
2587 }
2588
2589 err = pci_request_regions(pdev, DRV_NAME);
2590 if (err) {
2591 /* Just info, some other driver may have claimed the device. */
2592 dev_info(&pdev->dev, "cannot obtain PCI resources\n");
2593 return err;
2594 }
2595
2596 err = pci_enable_device(pdev);
2597 if (err) {
2598 dev_err(&pdev->dev, "cannot enable PCI device\n");
2599 goto out_release_regions;
2600 }
2601
2602 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
2603 pci_using_dac = 1;
2604 err = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
2605 if (err) {
2606 dev_err(&pdev->dev, "unable to obtain 64-bit DMA for "
2607 "coherent allocations\n");
2608 goto out_disable_device;
2609 }
2610 } else if ((err = pci_set_dma_mask(pdev, DMA_32BIT_MASK)) != 0) {
2611 dev_err(&pdev->dev, "no usable DMA configuration\n");
2612 goto out_disable_device;
2613 }
2614
2615 pci_set_master(pdev);
2616
2617 mmio_start = pci_resource_start(pdev, 0);
2618 mmio_len = pci_resource_len(pdev, 0);
2619 ai = t3_get_adapter_info(ent->driver_data);
2620
2621 adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
2622 if (!adapter) {
2623 err = -ENOMEM;
2624 goto out_disable_device;
2625 }
2626
2627 adapter->regs = ioremap_nocache(mmio_start, mmio_len);
2628 if (!adapter->regs) {
2629 dev_err(&pdev->dev, "cannot map device registers\n");
2630 err = -ENOMEM;
2631 goto out_free_adapter;
2632 }
2633
2634 adapter->pdev = pdev;
2635 adapter->name = pci_name(pdev);
2636 adapter->msg_enable = dflt_msg_enable;
2637 adapter->mmio_len = mmio_len;
2638
2639 mutex_init(&adapter->mdio_lock);
2640 spin_lock_init(&adapter->work_lock);
2641 spin_lock_init(&adapter->stats_lock);
2642
2643 INIT_LIST_HEAD(&adapter->adapter_list);
2644 INIT_WORK(&adapter->ext_intr_handler_task, ext_intr_task);
2645 INIT_DELAYED_WORK(&adapter->adap_check_task, t3_adap_check_task);
2646
2647 for (i = 0; i < ai->nports; ++i) {
2648 struct net_device *netdev;
2649
2650 netdev = alloc_etherdev(sizeof(struct port_info));
2651 if (!netdev) {
2652 err = -ENOMEM;
2653 goto out_free_dev;
2654 }
2655
4d22de3e
DLR
2656 SET_NETDEV_DEV(netdev, &pdev->dev);
2657
2658 adapter->port[i] = netdev;
2659 pi = netdev_priv(netdev);
5fbf816f 2660 pi->adapter = adapter;
4d22de3e
DLR
2661 pi->rx_csum_offload = 1;
2662 pi->nqsets = 1;
2663 pi->first_qset = i;
2664 pi->activity = 0;
2665 pi->port_id = i;
2666 netif_carrier_off(netdev);
2667 netdev->irq = pdev->irq;
2668 netdev->mem_start = mmio_start;
2669 netdev->mem_end = mmio_start + mmio_len - 1;
4d22de3e
DLR
2670 netdev->features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO;
2671 netdev->features |= NETIF_F_LLTX;
2672 if (pci_using_dac)
2673 netdev->features |= NETIF_F_HIGHDMA;
2674
2675 netdev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
2676 netdev->vlan_rx_register = vlan_rx_register;
4d22de3e
DLR
2677
2678 netdev->open = cxgb_open;
2679 netdev->stop = cxgb_close;
2680 netdev->hard_start_xmit = t3_eth_xmit;
2681 netdev->get_stats = cxgb_get_stats;
2682 netdev->set_multicast_list = cxgb_set_rxmode;
2683 netdev->do_ioctl = cxgb_ioctl;
2684 netdev->change_mtu = cxgb_change_mtu;
2685 netdev->set_mac_address = cxgb_set_mac_addr;
2686#ifdef CONFIG_NET_POLL_CONTROLLER
2687 netdev->poll_controller = cxgb_netpoll;
2688#endif
4d22de3e
DLR
2689
2690 SET_ETHTOOL_OPS(netdev, &cxgb_ethtool_ops);
2691 }
2692
5fbf816f 2693 pci_set_drvdata(pdev, adapter);
4d22de3e
DLR
2694 if (t3_prep_adapter(adapter, ai, 1) < 0) {
2695 err = -ENODEV;
2696 goto out_free_dev;
2697 }
2eab17ab 2698
4d22de3e
DLR
2699 /*
2700 * The card is now ready to go. If any errors occur during device
2701 * registration we do not fail the whole card but rather proceed only
2702 * with the ports we manage to register successfully. However we must
2703 * register at least one net device.
2704 */
2705 for_each_port(adapter, i) {
2706 err = register_netdev(adapter->port[i]);
2707 if (err)
2708 dev_warn(&pdev->dev,
2709 "cannot register net device %s, skipping\n",
2710 adapter->port[i]->name);
2711 else {
2712 /*
2713 * Change the name we use for messages to the name of
2714 * the first successfully registered interface.
2715 */
2716 if (!adapter->registered_device_map)
2717 adapter->name = adapter->port[i]->name;
2718
2719 __set_bit(i, &adapter->registered_device_map);
2720 }
2721 }
2722 if (!adapter->registered_device_map) {
2723 dev_err(&pdev->dev, "could not register any net devices\n");
2724 goto out_free_dev;
2725 }
2726
2727 /* Driver's ready. Reflect it on LEDs */
2728 t3_led_ready(adapter);
2729
2730 if (is_offload(adapter)) {
2731 __set_bit(OFFLOAD_DEVMAP_BIT, &adapter->registered_device_map);
2732 cxgb3_adapter_ofld(adapter);
2733 }
2734
2735 /* See what interrupts we'll be using */
2736 if (msi > 1 && cxgb_enable_msix(adapter) == 0)
2737 adapter->flags |= USING_MSIX;
2738 else if (msi > 0 && pci_enable_msi(pdev) == 0)
2739 adapter->flags |= USING_MSI;
2740
0ee8d33c 2741 err = sysfs_create_group(&adapter->port[0]->dev.kobj,
4d22de3e
DLR
2742 &cxgb3_attr_group);
2743
2744 print_port_info(adapter, ai);
2745 return 0;
2746
2747out_free_dev:
2748 iounmap(adapter->regs);
2749 for (i = ai->nports - 1; i >= 0; --i)
2750 if (adapter->port[i])
2751 free_netdev(adapter->port[i]);
2752
2753out_free_adapter:
2754 kfree(adapter);
2755
2756out_disable_device:
2757 pci_disable_device(pdev);
2758out_release_regions:
2759 pci_release_regions(pdev);
2760 pci_set_drvdata(pdev, NULL);
2761 return err;
2762}
2763
2764static void __devexit remove_one(struct pci_dev *pdev)
2765{
5fbf816f 2766 struct adapter *adapter = pci_get_drvdata(pdev);
4d22de3e 2767
5fbf816f 2768 if (adapter) {
4d22de3e 2769 int i;
4d22de3e
DLR
2770
2771 t3_sge_stop(adapter);
0ee8d33c 2772 sysfs_remove_group(&adapter->port[0]->dev.kobj,
4d22de3e
DLR
2773 &cxgb3_attr_group);
2774
4d22de3e
DLR
2775 if (is_offload(adapter)) {
2776 cxgb3_adapter_unofld(adapter);
2777 if (test_bit(OFFLOAD_DEVMAP_BIT,
2778 &adapter->open_device_map))
2779 offload_close(&adapter->tdev);
2780 }
2781
67d92ab7
DLR
2782 for_each_port(adapter, i)
2783 if (test_bit(i, &adapter->registered_device_map))
2784 unregister_netdev(adapter->port[i]);
2785
4d22de3e
DLR
2786 t3_free_sge_resources(adapter);
2787 cxgb_disable_msi(adapter);
2788
4d22de3e
DLR
2789 for_each_port(adapter, i)
2790 if (adapter->port[i])
2791 free_netdev(adapter->port[i]);
2792
2793 iounmap(adapter->regs);
2794 kfree(adapter);
2795 pci_release_regions(pdev);
2796 pci_disable_device(pdev);
2797 pci_set_drvdata(pdev, NULL);
2798 }
2799}
2800
2801static struct pci_driver driver = {
2802 .name = DRV_NAME,
2803 .id_table = cxgb3_pci_tbl,
2804 .probe = init_one,
2805 .remove = __devexit_p(remove_one),
91a6b50c 2806 .err_handler = &t3_err_handler,
4d22de3e
DLR
2807};
2808
2809static int __init cxgb3_init_module(void)
2810{
2811 int ret;
2812
2813 cxgb3_offload_init();
2814
2815 ret = pci_register_driver(&driver);
2816 return ret;
2817}
2818
2819static void __exit cxgb3_cleanup_module(void)
2820{
2821 pci_unregister_driver(&driver);
2822 if (cxgb3_wq)
2823 destroy_workqueue(cxgb3_wq);
2824}
2825
2826module_init(cxgb3_init_module);
2827module_exit(cxgb3_cleanup_module);