cxgb3 - Auto-load FW if mismatch detected
[linux-2.6-block.git] / drivers / net / cxgb3 / cxgb3_main.c
CommitLineData
4d22de3e 1/*
1d68e93d 2 * Copyright (c) 2003-2007 Chelsio, Inc. All rights reserved.
4d22de3e 3 *
1d68e93d
DLR
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
4d22de3e 9 *
1d68e93d
DLR
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
4d22de3e 31 */
4d22de3e
DLR
32#include <linux/module.h>
33#include <linux/moduleparam.h>
34#include <linux/init.h>
35#include <linux/pci.h>
36#include <linux/dma-mapping.h>
37#include <linux/netdevice.h>
38#include <linux/etherdevice.h>
39#include <linux/if_vlan.h>
40#include <linux/mii.h>
41#include <linux/sockios.h>
42#include <linux/workqueue.h>
43#include <linux/proc_fs.h>
44#include <linux/rtnetlink.h>
2e283962 45#include <linux/firmware.h>
4d22de3e
DLR
46#include <asm/uaccess.h>
47
48#include "common.h"
49#include "cxgb3_ioctl.h"
50#include "regs.h"
51#include "cxgb3_offload.h"
52#include "version.h"
53
54#include "cxgb3_ctl_defs.h"
55#include "t3_cpl.h"
56#include "firmware_exports.h"
57
58enum {
59 MAX_TXQ_ENTRIES = 16384,
60 MAX_CTRL_TXQ_ENTRIES = 1024,
61 MAX_RSPQ_ENTRIES = 16384,
62 MAX_RX_BUFFERS = 16384,
63 MAX_RX_JUMBO_BUFFERS = 16384,
64 MIN_TXQ_ENTRIES = 4,
65 MIN_CTRL_TXQ_ENTRIES = 4,
66 MIN_RSPQ_ENTRIES = 32,
67 MIN_FL_ENTRIES = 32
68};
69
70#define PORT_MASK ((1 << MAX_NPORTS) - 1)
71
72#define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
73 NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
74 NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
75
76#define EEPROM_MAGIC 0x38E2F10C
77
4d22de3e
DLR
78#define CH_DEVICE(devid, ssid, idx) \
79 { PCI_VENDOR_ID_CHELSIO, devid, PCI_ANY_ID, ssid, 0, 0, idx }
80
81static const struct pci_device_id cxgb3_pci_tbl[] = {
82 CH_DEVICE(0x20, 1, 0), /* PE9000 */
83 CH_DEVICE(0x21, 1, 1), /* T302E */
84 CH_DEVICE(0x22, 1, 2), /* T310E */
85 CH_DEVICE(0x23, 1, 3), /* T320X */
86 CH_DEVICE(0x24, 1, 1), /* T302X */
87 CH_DEVICE(0x25, 1, 3), /* T320E */
88 CH_DEVICE(0x26, 1, 2), /* T310X */
89 CH_DEVICE(0x30, 1, 2), /* T3B10 */
90 CH_DEVICE(0x31, 1, 3), /* T3B20 */
91 CH_DEVICE(0x32, 1, 1), /* T3B02 */
92 {0,}
93};
94
95MODULE_DESCRIPTION(DRV_DESC);
96MODULE_AUTHOR("Chelsio Communications");
1d68e93d 97MODULE_LICENSE("Dual BSD/GPL");
4d22de3e
DLR
98MODULE_VERSION(DRV_VERSION);
99MODULE_DEVICE_TABLE(pci, cxgb3_pci_tbl);
100
101static int dflt_msg_enable = DFLT_MSG_ENABLE;
102
103module_param(dflt_msg_enable, int, 0644);
104MODULE_PARM_DESC(dflt_msg_enable, "Chelsio T3 default message enable bitmap");
105
106/*
107 * The driver uses the best interrupt scheme available on a platform in the
108 * order MSI-X, MSI, legacy pin interrupts. This parameter determines which
109 * of these schemes the driver may consider as follows:
110 *
111 * msi = 2: choose from among all three options
112 * msi = 1: only consider MSI and pin interrupts
113 * msi = 0: force pin interrupts
114 */
115static int msi = 2;
116
117module_param(msi, int, 0644);
118MODULE_PARM_DESC(msi, "whether to use MSI or MSI-X");
119
120/*
121 * The driver enables offload as a default.
122 * To disable it, use ofld_disable = 1.
123 */
124
125static int ofld_disable = 0;
126
127module_param(ofld_disable, int, 0644);
128MODULE_PARM_DESC(ofld_disable, "whether to enable offload at init time or not");
129
130/*
131 * We have work elements that we need to cancel when an interface is taken
132 * down. Normally the work elements would be executed by keventd but that
133 * can deadlock because of linkwatch. If our close method takes the rtnl
134 * lock and linkwatch is ahead of our work elements in keventd, linkwatch
135 * will block keventd as it needs the rtnl lock, and we'll deadlock waiting
136 * for our work to complete. Get our own work queue to solve this.
137 */
138static struct workqueue_struct *cxgb3_wq;
139
140/**
141 * link_report - show link status and link speed/duplex
142 * @p: the port whose settings are to be reported
143 *
144 * Shows the link status, speed, and duplex of a port.
145 */
146static void link_report(struct net_device *dev)
147{
148 if (!netif_carrier_ok(dev))
149 printk(KERN_INFO "%s: link down\n", dev->name);
150 else {
151 const char *s = "10Mbps";
152 const struct port_info *p = netdev_priv(dev);
153
154 switch (p->link_config.speed) {
155 case SPEED_10000:
156 s = "10Gbps";
157 break;
158 case SPEED_1000:
159 s = "1000Mbps";
160 break;
161 case SPEED_100:
162 s = "100Mbps";
163 break;
164 }
165
166 printk(KERN_INFO "%s: link up, %s, %s-duplex\n", dev->name, s,
167 p->link_config.duplex == DUPLEX_FULL ? "full" : "half");
168 }
169}
170
171/**
172 * t3_os_link_changed - handle link status changes
173 * @adapter: the adapter associated with the link change
174 * @port_id: the port index whose limk status has changed
175 * @link_stat: the new status of the link
176 * @speed: the new speed setting
177 * @duplex: the new duplex setting
178 * @pause: the new flow-control setting
179 *
180 * This is the OS-dependent handler for link status changes. The OS
181 * neutral handler takes care of most of the processing for these events,
182 * then calls this handler for any OS-specific processing.
183 */
184void t3_os_link_changed(struct adapter *adapter, int port_id, int link_stat,
185 int speed, int duplex, int pause)
186{
187 struct net_device *dev = adapter->port[port_id];
188
189 /* Skip changes from disabled ports. */
190 if (!netif_running(dev))
191 return;
192
193 if (link_stat != netif_carrier_ok(dev)) {
194 if (link_stat)
195 netif_carrier_on(dev);
196 else
197 netif_carrier_off(dev);
198 link_report(dev);
199 }
200}
201
202static void cxgb_set_rxmode(struct net_device *dev)
203{
204 struct t3_rx_mode rm;
205 struct port_info *pi = netdev_priv(dev);
206
207 init_rx_mode(&rm, dev, dev->mc_list);
208 t3_mac_set_rx_mode(&pi->mac, &rm);
209}
210
211/**
212 * link_start - enable a port
213 * @dev: the device to enable
214 *
215 * Performs the MAC and PHY actions needed to enable a port.
216 */
217static void link_start(struct net_device *dev)
218{
219 struct t3_rx_mode rm;
220 struct port_info *pi = netdev_priv(dev);
221 struct cmac *mac = &pi->mac;
222
223 init_rx_mode(&rm, dev, dev->mc_list);
224 t3_mac_reset(mac);
225 t3_mac_set_mtu(mac, dev->mtu);
226 t3_mac_set_address(mac, 0, dev->dev_addr);
227 t3_mac_set_rx_mode(mac, &rm);
228 t3_link_start(&pi->phy, mac, &pi->link_config);
229 t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
230}
231
232static inline void cxgb_disable_msi(struct adapter *adapter)
233{
234 if (adapter->flags & USING_MSIX) {
235 pci_disable_msix(adapter->pdev);
236 adapter->flags &= ~USING_MSIX;
237 } else if (adapter->flags & USING_MSI) {
238 pci_disable_msi(adapter->pdev);
239 adapter->flags &= ~USING_MSI;
240 }
241}
242
243/*
244 * Interrupt handler for asynchronous events used with MSI-X.
245 */
246static irqreturn_t t3_async_intr_handler(int irq, void *cookie)
247{
248 t3_slow_intr_handler(cookie);
249 return IRQ_HANDLED;
250}
251
252/*
253 * Name the MSI-X interrupts.
254 */
255static void name_msix_vecs(struct adapter *adap)
256{
257 int i, j, msi_idx = 1, n = sizeof(adap->msix_info[0].desc) - 1;
258
259 snprintf(adap->msix_info[0].desc, n, "%s", adap->name);
260 adap->msix_info[0].desc[n] = 0;
261
262 for_each_port(adap, j) {
263 struct net_device *d = adap->port[j];
264 const struct port_info *pi = netdev_priv(d);
265
266 for (i = 0; i < pi->nqsets; i++, msi_idx++) {
267 snprintf(adap->msix_info[msi_idx].desc, n,
268 "%s (queue %d)", d->name, i);
269 adap->msix_info[msi_idx].desc[n] = 0;
270 }
271 }
272}
273
274static int request_msix_data_irqs(struct adapter *adap)
275{
276 int i, j, err, qidx = 0;
277
278 for_each_port(adap, i) {
279 int nqsets = adap2pinfo(adap, i)->nqsets;
280
281 for (j = 0; j < nqsets; ++j) {
282 err = request_irq(adap->msix_info[qidx + 1].vec,
283 t3_intr_handler(adap,
284 adap->sge.qs[qidx].
285 rspq.polling), 0,
286 adap->msix_info[qidx + 1].desc,
287 &adap->sge.qs[qidx]);
288 if (err) {
289 while (--qidx >= 0)
290 free_irq(adap->msix_info[qidx + 1].vec,
291 &adap->sge.qs[qidx]);
292 return err;
293 }
294 qidx++;
295 }
296 }
297 return 0;
298}
299
300/**
301 * setup_rss - configure RSS
302 * @adap: the adapter
303 *
304 * Sets up RSS to distribute packets to multiple receive queues. We
305 * configure the RSS CPU lookup table to distribute to the number of HW
306 * receive queues, and the response queue lookup table to narrow that
307 * down to the response queues actually configured for each port.
308 * We always configure the RSS mapping for two ports since the mapping
309 * table has plenty of entries.
310 */
311static void setup_rss(struct adapter *adap)
312{
313 int i;
314 unsigned int nq0 = adap2pinfo(adap, 0)->nqsets;
315 unsigned int nq1 = adap->port[1] ? adap2pinfo(adap, 1)->nqsets : 1;
316 u8 cpus[SGE_QSETS + 1];
317 u16 rspq_map[RSS_TABLE_SIZE];
318
319 for (i = 0; i < SGE_QSETS; ++i)
320 cpus[i] = i;
321 cpus[SGE_QSETS] = 0xff; /* terminator */
322
323 for (i = 0; i < RSS_TABLE_SIZE / 2; ++i) {
324 rspq_map[i] = i % nq0;
325 rspq_map[i + RSS_TABLE_SIZE / 2] = (i % nq1) + nq0;
326 }
327
328 t3_config_rss(adap, F_RQFEEDBACKENABLE | F_TNLLKPEN | F_TNLMAPEN |
329 F_TNLPRTEN | F_TNL2TUPEN | F_TNL4TUPEN |
330 V_RRCPLCPUSIZE(6), cpus, rspq_map);
331}
332
333/*
334 * If we have multiple receive queues per port serviced by NAPI we need one
335 * netdevice per queue as NAPI operates on netdevices. We already have one
336 * netdevice, namely the one associated with the interface, so we use dummy
337 * ones for any additional queues. Note that these netdevices exist purely
338 * so that NAPI has something to work with, they do not represent network
339 * ports and are not registered.
340 */
341static int init_dummy_netdevs(struct adapter *adap)
342{
343 int i, j, dummy_idx = 0;
344 struct net_device *nd;
345
346 for_each_port(adap, i) {
347 struct net_device *dev = adap->port[i];
348 const struct port_info *pi = netdev_priv(dev);
349
350 for (j = 0; j < pi->nqsets - 1; j++) {
351 if (!adap->dummy_netdev[dummy_idx]) {
352 nd = alloc_netdev(0, "", ether_setup);
353 if (!nd)
354 goto free_all;
355
356 nd->priv = adap;
357 nd->weight = 64;
358 set_bit(__LINK_STATE_START, &nd->state);
359 adap->dummy_netdev[dummy_idx] = nd;
360 }
361 strcpy(adap->dummy_netdev[dummy_idx]->name, dev->name);
362 dummy_idx++;
363 }
364 }
365 return 0;
366
367free_all:
368 while (--dummy_idx >= 0) {
369 free_netdev(adap->dummy_netdev[dummy_idx]);
370 adap->dummy_netdev[dummy_idx] = NULL;
371 }
372 return -ENOMEM;
373}
374
375/*
376 * Wait until all NAPI handlers are descheduled. This includes the handlers of
377 * both netdevices representing interfaces and the dummy ones for the extra
378 * queues.
379 */
380static void quiesce_rx(struct adapter *adap)
381{
382 int i;
383 struct net_device *dev;
384
385 for_each_port(adap, i) {
386 dev = adap->port[i];
387 while (test_bit(__LINK_STATE_RX_SCHED, &dev->state))
388 msleep(1);
389 }
390
391 for (i = 0; i < ARRAY_SIZE(adap->dummy_netdev); i++) {
392 dev = adap->dummy_netdev[i];
393 if (dev)
394 while (test_bit(__LINK_STATE_RX_SCHED, &dev->state))
395 msleep(1);
396 }
397}
398
399/**
400 * setup_sge_qsets - configure SGE Tx/Rx/response queues
401 * @adap: the adapter
402 *
403 * Determines how many sets of SGE queues to use and initializes them.
404 * We support multiple queue sets per port if we have MSI-X, otherwise
405 * just one queue set per port.
406 */
407static int setup_sge_qsets(struct adapter *adap)
408{
409 int i, j, err, irq_idx = 0, qset_idx = 0, dummy_dev_idx = 0;
410 unsigned int ntxq = is_offload(adap) ? SGE_TXQ_PER_SET : 1;
411
412 if (adap->params.rev > 0 && !(adap->flags & USING_MSI))
413 irq_idx = -1;
414
415 for_each_port(adap, i) {
416 struct net_device *dev = adap->port[i];
417 const struct port_info *pi = netdev_priv(dev);
418
419 for (j = 0; j < pi->nqsets; ++j, ++qset_idx) {
420 err = t3_sge_alloc_qset(adap, qset_idx, 1,
421 (adap->flags & USING_MSIX) ? qset_idx + 1 :
422 irq_idx,
423 &adap->params.sge.qset[qset_idx], ntxq,
424 j == 0 ? dev :
425 adap-> dummy_netdev[dummy_dev_idx++]);
426 if (err) {
427 t3_free_sge_resources(adap);
428 return err;
429 }
430 }
431 }
432
433 return 0;
434}
435
0ee8d33c
DLR
436static ssize_t attr_show(struct device *d, struct device_attribute *attr,
437 char *buf,
896392ef 438 ssize_t(*format) (struct net_device *, char *))
4d22de3e
DLR
439{
440 ssize_t len;
4d22de3e
DLR
441
442 /* Synchronize with ioctls that may shut down the device */
443 rtnl_lock();
896392ef 444 len = (*format) (to_net_dev(d), buf);
4d22de3e
DLR
445 rtnl_unlock();
446 return len;
447}
448
0ee8d33c
DLR
449static ssize_t attr_store(struct device *d, struct device_attribute *attr,
450 const char *buf, size_t len,
896392ef 451 ssize_t(*set) (struct net_device *, unsigned int),
4d22de3e
DLR
452 unsigned int min_val, unsigned int max_val)
453{
454 char *endp;
455 ssize_t ret;
456 unsigned int val;
4d22de3e
DLR
457
458 if (!capable(CAP_NET_ADMIN))
459 return -EPERM;
460
461 val = simple_strtoul(buf, &endp, 0);
462 if (endp == buf || val < min_val || val > max_val)
463 return -EINVAL;
464
465 rtnl_lock();
896392ef 466 ret = (*set) (to_net_dev(d), val);
4d22de3e
DLR
467 if (!ret)
468 ret = len;
469 rtnl_unlock();
470 return ret;
471}
472
473#define CXGB3_SHOW(name, val_expr) \
896392ef 474static ssize_t format_##name(struct net_device *dev, char *buf) \
4d22de3e 475{ \
896392ef 476 struct adapter *adap = dev->priv; \
4d22de3e
DLR
477 return sprintf(buf, "%u\n", val_expr); \
478} \
0ee8d33c
DLR
479static ssize_t show_##name(struct device *d, struct device_attribute *attr, \
480 char *buf) \
4d22de3e 481{ \
0ee8d33c 482 return attr_show(d, attr, buf, format_##name); \
4d22de3e
DLR
483}
484
896392ef 485static ssize_t set_nfilters(struct net_device *dev, unsigned int val)
4d22de3e 486{
896392ef
DLR
487 struct adapter *adap = dev->priv;
488
4d22de3e
DLR
489 if (adap->flags & FULL_INIT_DONE)
490 return -EBUSY;
491 if (val && adap->params.rev == 0)
492 return -EINVAL;
493 if (val > t3_mc5_size(&adap->mc5) - adap->params.mc5.nservers)
494 return -EINVAL;
495 adap->params.mc5.nfilters = val;
496 return 0;
497}
498
0ee8d33c
DLR
499static ssize_t store_nfilters(struct device *d, struct device_attribute *attr,
500 const char *buf, size_t len)
4d22de3e 501{
0ee8d33c 502 return attr_store(d, attr, buf, len, set_nfilters, 0, ~0);
4d22de3e
DLR
503}
504
896392ef 505static ssize_t set_nservers(struct net_device *dev, unsigned int val)
4d22de3e 506{
896392ef
DLR
507 struct adapter *adap = dev->priv;
508
4d22de3e
DLR
509 if (adap->flags & FULL_INIT_DONE)
510 return -EBUSY;
511 if (val > t3_mc5_size(&adap->mc5) - adap->params.mc5.nfilters)
512 return -EINVAL;
513 adap->params.mc5.nservers = val;
514 return 0;
515}
516
0ee8d33c
DLR
517static ssize_t store_nservers(struct device *d, struct device_attribute *attr,
518 const char *buf, size_t len)
4d22de3e 519{
0ee8d33c 520 return attr_store(d, attr, buf, len, set_nservers, 0, ~0);
4d22de3e
DLR
521}
522
523#define CXGB3_ATTR_R(name, val_expr) \
524CXGB3_SHOW(name, val_expr) \
0ee8d33c 525static DEVICE_ATTR(name, S_IRUGO, show_##name, NULL)
4d22de3e
DLR
526
527#define CXGB3_ATTR_RW(name, val_expr, store_method) \
528CXGB3_SHOW(name, val_expr) \
0ee8d33c 529static DEVICE_ATTR(name, S_IRUGO | S_IWUSR, show_##name, store_method)
4d22de3e
DLR
530
531CXGB3_ATTR_R(cam_size, t3_mc5_size(&adap->mc5));
532CXGB3_ATTR_RW(nfilters, adap->params.mc5.nfilters, store_nfilters);
533CXGB3_ATTR_RW(nservers, adap->params.mc5.nservers, store_nservers);
534
535static struct attribute *cxgb3_attrs[] = {
0ee8d33c
DLR
536 &dev_attr_cam_size.attr,
537 &dev_attr_nfilters.attr,
538 &dev_attr_nservers.attr,
4d22de3e
DLR
539 NULL
540};
541
542static struct attribute_group cxgb3_attr_group = {.attrs = cxgb3_attrs };
543
0ee8d33c
DLR
544static ssize_t tm_attr_show(struct device *d, struct device_attribute *attr,
545 char *buf, int sched)
4d22de3e
DLR
546{
547 ssize_t len;
548 unsigned int v, addr, bpt, cpt;
0ee8d33c 549 struct adapter *adap = to_net_dev(d)->priv;
4d22de3e
DLR
550
551 addr = A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2;
552 rtnl_lock();
553 t3_write_reg(adap, A_TP_TM_PIO_ADDR, addr);
554 v = t3_read_reg(adap, A_TP_TM_PIO_DATA);
555 if (sched & 1)
556 v >>= 16;
557 bpt = (v >> 8) & 0xff;
558 cpt = v & 0xff;
559 if (!cpt)
560 len = sprintf(buf, "disabled\n");
561 else {
562 v = (adap->params.vpd.cclk * 1000) / cpt;
563 len = sprintf(buf, "%u Kbps\n", (v * bpt) / 125);
564 }
565 rtnl_unlock();
566 return len;
567}
568
0ee8d33c
DLR
569static ssize_t tm_attr_store(struct device *d, struct device_attribute *attr,
570 const char *buf, size_t len, int sched)
4d22de3e
DLR
571{
572 char *endp;
573 ssize_t ret;
574 unsigned int val;
0ee8d33c 575 struct adapter *adap = to_net_dev(d)->priv;
4d22de3e
DLR
576
577 if (!capable(CAP_NET_ADMIN))
578 return -EPERM;
579
580 val = simple_strtoul(buf, &endp, 0);
581 if (endp == buf || val > 10000000)
582 return -EINVAL;
583
584 rtnl_lock();
585 ret = t3_config_sched(adap, val, sched);
586 if (!ret)
587 ret = len;
588 rtnl_unlock();
589 return ret;
590}
591
592#define TM_ATTR(name, sched) \
0ee8d33c
DLR
593static ssize_t show_##name(struct device *d, struct device_attribute *attr, \
594 char *buf) \
4d22de3e 595{ \
0ee8d33c 596 return tm_attr_show(d, attr, buf, sched); \
4d22de3e 597} \
0ee8d33c
DLR
598static ssize_t store_##name(struct device *d, struct device_attribute *attr, \
599 const char *buf, size_t len) \
4d22de3e 600{ \
0ee8d33c 601 return tm_attr_store(d, attr, buf, len, sched); \
4d22de3e 602} \
0ee8d33c 603static DEVICE_ATTR(name, S_IRUGO | S_IWUSR, show_##name, store_##name)
4d22de3e
DLR
604
605TM_ATTR(sched0, 0);
606TM_ATTR(sched1, 1);
607TM_ATTR(sched2, 2);
608TM_ATTR(sched3, 3);
609TM_ATTR(sched4, 4);
610TM_ATTR(sched5, 5);
611TM_ATTR(sched6, 6);
612TM_ATTR(sched7, 7);
613
614static struct attribute *offload_attrs[] = {
0ee8d33c
DLR
615 &dev_attr_sched0.attr,
616 &dev_attr_sched1.attr,
617 &dev_attr_sched2.attr,
618 &dev_attr_sched3.attr,
619 &dev_attr_sched4.attr,
620 &dev_attr_sched5.attr,
621 &dev_attr_sched6.attr,
622 &dev_attr_sched7.attr,
4d22de3e
DLR
623 NULL
624};
625
626static struct attribute_group offload_attr_group = {.attrs = offload_attrs };
627
628/*
629 * Sends an sk_buff to an offload queue driver
630 * after dealing with any active network taps.
631 */
632static inline int offload_tx(struct t3cdev *tdev, struct sk_buff *skb)
633{
634 int ret;
635
636 local_bh_disable();
637 ret = t3_offload_tx(tdev, skb);
638 local_bh_enable();
639 return ret;
640}
641
642static int write_smt_entry(struct adapter *adapter, int idx)
643{
644 struct cpl_smt_write_req *req;
645 struct sk_buff *skb = alloc_skb(sizeof(*req), GFP_KERNEL);
646
647 if (!skb)
648 return -ENOMEM;
649
650 req = (struct cpl_smt_write_req *)__skb_put(skb, sizeof(*req));
651 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
652 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SMT_WRITE_REQ, idx));
653 req->mtu_idx = NMTUS - 1; /* should be 0 but there's a T3 bug */
654 req->iff = idx;
655 memset(req->src_mac1, 0, sizeof(req->src_mac1));
656 memcpy(req->src_mac0, adapter->port[idx]->dev_addr, ETH_ALEN);
657 skb->priority = 1;
658 offload_tx(&adapter->tdev, skb);
659 return 0;
660}
661
662static int init_smt(struct adapter *adapter)
663{
664 int i;
665
666 for_each_port(adapter, i)
667 write_smt_entry(adapter, i);
668 return 0;
669}
670
671static void init_port_mtus(struct adapter *adapter)
672{
673 unsigned int mtus = adapter->port[0]->mtu;
674
675 if (adapter->port[1])
676 mtus |= adapter->port[1]->mtu << 16;
677 t3_write_reg(adapter, A_TP_MTU_PORT_TABLE, mtus);
678}
679
14ab9892
DLR
680static void send_pktsched_cmd(struct adapter *adap, int sched, int qidx, int lo,
681 int hi, int port)
682{
683 struct sk_buff *skb;
684 struct mngt_pktsched_wr *req;
685
686 skb = alloc_skb(sizeof(*req), GFP_KERNEL | __GFP_NOFAIL);
687 req = (struct mngt_pktsched_wr *)skb_put(skb, sizeof(*req));
688 req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_MNGT));
689 req->mngt_opcode = FW_MNGTOPCODE_PKTSCHED_SET;
690 req->sched = sched;
691 req->idx = qidx;
692 req->min = lo;
693 req->max = hi;
694 req->binding = port;
695 t3_mgmt_tx(adap, skb);
696}
697
698static void bind_qsets(struct adapter *adap)
699{
700 int i, j;
701
702 for_each_port(adap, i) {
703 const struct port_info *pi = adap2pinfo(adap, i);
704
705 for (j = 0; j < pi->nqsets; ++j)
706 send_pktsched_cmd(adap, 1, pi->first_qset + j, -1,
707 -1, i);
708 }
709}
710
2e283962
DLR
711#define FW_FNAME "t3fw-%d.%d.bin"
712
713static int upgrade_fw(struct adapter *adap)
714{
715 int ret;
716 char buf[64];
717 const struct firmware *fw;
718 struct device *dev = &adap->pdev->dev;
719
720 snprintf(buf, sizeof(buf), FW_FNAME, FW_VERSION_MAJOR,
721 FW_VERSION_MINOR);
722 ret = request_firmware(&fw, buf, dev);
723 if (ret < 0) {
724 dev_err(dev, "could not upgrade firmware: unable to load %s\n",
725 buf);
726 return ret;
727 }
728 ret = t3_load_fw(adap, fw->data, fw->size);
729 release_firmware(fw);
730 return ret;
731}
732
4d22de3e
DLR
733/**
734 * cxgb_up - enable the adapter
735 * @adapter: adapter being enabled
736 *
737 * Called when the first port is enabled, this function performs the
738 * actions necessary to make an adapter operational, such as completing
739 * the initialization of HW modules, and enabling interrupts.
740 *
741 * Must be called with the rtnl lock held.
742 */
743static int cxgb_up(struct adapter *adap)
744{
745 int err = 0;
746
747 if (!(adap->flags & FULL_INIT_DONE)) {
748 err = t3_check_fw_version(adap);
2e283962
DLR
749 if (err == -EINVAL)
750 err = upgrade_fw(adap);
4aac3899 751 if (err)
4d22de3e 752 goto out;
4d22de3e
DLR
753
754 err = init_dummy_netdevs(adap);
755 if (err)
756 goto out;
757
758 err = t3_init_hw(adap, 0);
759 if (err)
760 goto out;
761
762 err = setup_sge_qsets(adap);
763 if (err)
764 goto out;
765
766 setup_rss(adap);
767 adap->flags |= FULL_INIT_DONE;
768 }
769
770 t3_intr_clear(adap);
771
772 if (adap->flags & USING_MSIX) {
773 name_msix_vecs(adap);
774 err = request_irq(adap->msix_info[0].vec,
775 t3_async_intr_handler, 0,
776 adap->msix_info[0].desc, adap);
777 if (err)
778 goto irq_err;
779
780 if (request_msix_data_irqs(adap)) {
781 free_irq(adap->msix_info[0].vec, adap);
782 goto irq_err;
783 }
784 } else if ((err = request_irq(adap->pdev->irq,
785 t3_intr_handler(adap,
786 adap->sge.qs[0].rspq.
787 polling),
2db6346f
TG
788 (adap->flags & USING_MSI) ?
789 0 : IRQF_SHARED,
4d22de3e
DLR
790 adap->name, adap)))
791 goto irq_err;
792
793 t3_sge_start(adap);
794 t3_intr_enable(adap);
14ab9892
DLR
795
796 if ((adap->flags & (USING_MSIX | QUEUES_BOUND)) == USING_MSIX)
797 bind_qsets(adap);
798 adap->flags |= QUEUES_BOUND;
799
4d22de3e
DLR
800out:
801 return err;
802irq_err:
803 CH_ERR(adap, "request_irq failed, err %d\n", err);
804 goto out;
805}
806
807/*
808 * Release resources when all the ports and offloading have been stopped.
809 */
810static void cxgb_down(struct adapter *adapter)
811{
812 t3_sge_stop(adapter);
813 spin_lock_irq(&adapter->work_lock); /* sync with PHY intr task */
814 t3_intr_disable(adapter);
815 spin_unlock_irq(&adapter->work_lock);
816
817 if (adapter->flags & USING_MSIX) {
818 int i, n = 0;
819
820 free_irq(adapter->msix_info[0].vec, adapter);
821 for_each_port(adapter, i)
822 n += adap2pinfo(adapter, i)->nqsets;
823
824 for (i = 0; i < n; ++i)
825 free_irq(adapter->msix_info[i + 1].vec,
826 &adapter->sge.qs[i]);
827 } else
828 free_irq(adapter->pdev->irq, adapter);
829
830 flush_workqueue(cxgb3_wq); /* wait for external IRQ handler */
831 quiesce_rx(adapter);
832}
833
834static void schedule_chk_task(struct adapter *adap)
835{
836 unsigned int timeo;
837
838 timeo = adap->params.linkpoll_period ?
839 (HZ * adap->params.linkpoll_period) / 10 :
840 adap->params.stats_update_period * HZ;
841 if (timeo)
842 queue_delayed_work(cxgb3_wq, &adap->adap_check_task, timeo);
843}
844
845static int offload_open(struct net_device *dev)
846{
847 struct adapter *adapter = dev->priv;
848 struct t3cdev *tdev = T3CDEV(dev);
849 int adap_up = adapter->open_device_map & PORT_MASK;
850 int err = 0;
851
852 if (test_and_set_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map))
853 return 0;
854
855 if (!adap_up && (err = cxgb_up(adapter)) < 0)
856 return err;
857
858 t3_tp_set_offload_mode(adapter, 1);
859 tdev->lldev = adapter->port[0];
860 err = cxgb3_offload_activate(adapter);
861 if (err)
862 goto out;
863
864 init_port_mtus(adapter);
865 t3_load_mtus(adapter, adapter->params.mtus, adapter->params.a_wnd,
866 adapter->params.b_wnd,
867 adapter->params.rev == 0 ?
868 adapter->port[0]->mtu : 0xffff);
869 init_smt(adapter);
870
871 /* Never mind if the next step fails */
0ee8d33c 872 sysfs_create_group(&tdev->lldev->dev.kobj, &offload_attr_group);
4d22de3e
DLR
873
874 /* Call back all registered clients */
875 cxgb3_add_clients(tdev);
876
877out:
878 /* restore them in case the offload module has changed them */
879 if (err) {
880 t3_tp_set_offload_mode(adapter, 0);
881 clear_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map);
882 cxgb3_set_dummy_ops(tdev);
883 }
884 return err;
885}
886
887static int offload_close(struct t3cdev *tdev)
888{
889 struct adapter *adapter = tdev2adap(tdev);
890
891 if (!test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map))
892 return 0;
893
894 /* Call back all registered clients */
895 cxgb3_remove_clients(tdev);
896
0ee8d33c 897 sysfs_remove_group(&tdev->lldev->dev.kobj, &offload_attr_group);
4d22de3e
DLR
898
899 tdev->lldev = NULL;
900 cxgb3_set_dummy_ops(tdev);
901 t3_tp_set_offload_mode(adapter, 0);
902 clear_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map);
903
904 if (!adapter->open_device_map)
905 cxgb_down(adapter);
906
907 cxgb3_offload_deactivate(adapter);
908 return 0;
909}
910
911static int cxgb_open(struct net_device *dev)
912{
913 int err;
914 struct adapter *adapter = dev->priv;
915 struct port_info *pi = netdev_priv(dev);
916 int other_ports = adapter->open_device_map & PORT_MASK;
917
918 if (!adapter->open_device_map && (err = cxgb_up(adapter)) < 0)
919 return err;
920
921 set_bit(pi->port_id, &adapter->open_device_map);
922 if (!ofld_disable) {
923 err = offload_open(dev);
924 if (err)
925 printk(KERN_WARNING
926 "Could not initialize offload capabilities\n");
927 }
928
929 link_start(dev);
930 t3_port_intr_enable(adapter, pi->port_id);
931 netif_start_queue(dev);
932 if (!other_ports)
933 schedule_chk_task(adapter);
934
935 return 0;
936}
937
938static int cxgb_close(struct net_device *dev)
939{
940 struct adapter *adapter = dev->priv;
941 struct port_info *p = netdev_priv(dev);
942
943 t3_port_intr_disable(adapter, p->port_id);
944 netif_stop_queue(dev);
945 p->phy.ops->power_down(&p->phy, 1);
946 netif_carrier_off(dev);
947 t3_mac_disable(&p->mac, MAC_DIRECTION_TX | MAC_DIRECTION_RX);
948
949 spin_lock(&adapter->work_lock); /* sync with update task */
950 clear_bit(p->port_id, &adapter->open_device_map);
951 spin_unlock(&adapter->work_lock);
952
953 if (!(adapter->open_device_map & PORT_MASK))
954 cancel_rearming_delayed_workqueue(cxgb3_wq,
955 &adapter->adap_check_task);
956
957 if (!adapter->open_device_map)
958 cxgb_down(adapter);
959
960 return 0;
961}
962
963static struct net_device_stats *cxgb_get_stats(struct net_device *dev)
964{
965 struct adapter *adapter = dev->priv;
966 struct port_info *p = netdev_priv(dev);
967 struct net_device_stats *ns = &p->netstats;
968 const struct mac_stats *pstats;
969
970 spin_lock(&adapter->stats_lock);
971 pstats = t3_mac_update_stats(&p->mac);
972 spin_unlock(&adapter->stats_lock);
973
974 ns->tx_bytes = pstats->tx_octets;
975 ns->tx_packets = pstats->tx_frames;
976 ns->rx_bytes = pstats->rx_octets;
977 ns->rx_packets = pstats->rx_frames;
978 ns->multicast = pstats->rx_mcast_frames;
979
980 ns->tx_errors = pstats->tx_underrun;
981 ns->rx_errors = pstats->rx_symbol_errs + pstats->rx_fcs_errs +
982 pstats->rx_too_long + pstats->rx_jabber + pstats->rx_short +
983 pstats->rx_fifo_ovfl;
984
985 /* detailed rx_errors */
986 ns->rx_length_errors = pstats->rx_jabber + pstats->rx_too_long;
987 ns->rx_over_errors = 0;
988 ns->rx_crc_errors = pstats->rx_fcs_errs;
989 ns->rx_frame_errors = pstats->rx_symbol_errs;
990 ns->rx_fifo_errors = pstats->rx_fifo_ovfl;
991 ns->rx_missed_errors = pstats->rx_cong_drops;
992
993 /* detailed tx_errors */
994 ns->tx_aborted_errors = 0;
995 ns->tx_carrier_errors = 0;
996 ns->tx_fifo_errors = pstats->tx_underrun;
997 ns->tx_heartbeat_errors = 0;
998 ns->tx_window_errors = 0;
999 return ns;
1000}
1001
1002static u32 get_msglevel(struct net_device *dev)
1003{
1004 struct adapter *adapter = dev->priv;
1005
1006 return adapter->msg_enable;
1007}
1008
1009static void set_msglevel(struct net_device *dev, u32 val)
1010{
1011 struct adapter *adapter = dev->priv;
1012
1013 adapter->msg_enable = val;
1014}
1015
1016static char stats_strings[][ETH_GSTRING_LEN] = {
1017 "TxOctetsOK ",
1018 "TxFramesOK ",
1019 "TxMulticastFramesOK",
1020 "TxBroadcastFramesOK",
1021 "TxPauseFrames ",
1022 "TxUnderrun ",
1023 "TxExtUnderrun ",
1024
1025 "TxFrames64 ",
1026 "TxFrames65To127 ",
1027 "TxFrames128To255 ",
1028 "TxFrames256To511 ",
1029 "TxFrames512To1023 ",
1030 "TxFrames1024To1518 ",
1031 "TxFrames1519ToMax ",
1032
1033 "RxOctetsOK ",
1034 "RxFramesOK ",
1035 "RxMulticastFramesOK",
1036 "RxBroadcastFramesOK",
1037 "RxPauseFrames ",
1038 "RxFCSErrors ",
1039 "RxSymbolErrors ",
1040 "RxShortErrors ",
1041 "RxJabberErrors ",
1042 "RxLengthErrors ",
1043 "RxFIFOoverflow ",
1044
1045 "RxFrames64 ",
1046 "RxFrames65To127 ",
1047 "RxFrames128To255 ",
1048 "RxFrames256To511 ",
1049 "RxFrames512To1023 ",
1050 "RxFrames1024To1518 ",
1051 "RxFrames1519ToMax ",
1052
1053 "PhyFIFOErrors ",
1054 "TSO ",
1055 "VLANextractions ",
1056 "VLANinsertions ",
1057 "TxCsumOffload ",
1058 "RxCsumGood ",
1059 "RxDrops "
1060};
1061
1062static int get_stats_count(struct net_device *dev)
1063{
1064 return ARRAY_SIZE(stats_strings);
1065}
1066
1067#define T3_REGMAP_SIZE (3 * 1024)
1068
1069static int get_regs_len(struct net_device *dev)
1070{
1071 return T3_REGMAP_SIZE;
1072}
1073
1074static int get_eeprom_len(struct net_device *dev)
1075{
1076 return EEPROMSIZE;
1077}
1078
1079static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1080{
1081 u32 fw_vers = 0;
1082 struct adapter *adapter = dev->priv;
1083
1084 t3_get_fw_version(adapter, &fw_vers);
1085
1086 strcpy(info->driver, DRV_NAME);
1087 strcpy(info->version, DRV_VERSION);
1088 strcpy(info->bus_info, pci_name(adapter->pdev));
1089 if (!fw_vers)
1090 strcpy(info->fw_version, "N/A");
4aac3899 1091 else {
4d22de3e 1092 snprintf(info->fw_version, sizeof(info->fw_version),
4aac3899
DLR
1093 "%s %u.%u.%u",
1094 G_FW_VERSION_TYPE(fw_vers) ? "T" : "N",
1095 G_FW_VERSION_MAJOR(fw_vers),
1096 G_FW_VERSION_MINOR(fw_vers),
1097 G_FW_VERSION_MICRO(fw_vers));
1098 }
4d22de3e
DLR
1099}
1100
1101static void get_strings(struct net_device *dev, u32 stringset, u8 * data)
1102{
1103 if (stringset == ETH_SS_STATS)
1104 memcpy(data, stats_strings, sizeof(stats_strings));
1105}
1106
1107static unsigned long collect_sge_port_stats(struct adapter *adapter,
1108 struct port_info *p, int idx)
1109{
1110 int i;
1111 unsigned long tot = 0;
1112
1113 for (i = 0; i < p->nqsets; ++i)
1114 tot += adapter->sge.qs[i + p->first_qset].port_stats[idx];
1115 return tot;
1116}
1117
1118static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
1119 u64 *data)
1120{
1121 struct adapter *adapter = dev->priv;
1122 struct port_info *pi = netdev_priv(dev);
1123 const struct mac_stats *s;
1124
1125 spin_lock(&adapter->stats_lock);
1126 s = t3_mac_update_stats(&pi->mac);
1127 spin_unlock(&adapter->stats_lock);
1128
1129 *data++ = s->tx_octets;
1130 *data++ = s->tx_frames;
1131 *data++ = s->tx_mcast_frames;
1132 *data++ = s->tx_bcast_frames;
1133 *data++ = s->tx_pause;
1134 *data++ = s->tx_underrun;
1135 *data++ = s->tx_fifo_urun;
1136
1137 *data++ = s->tx_frames_64;
1138 *data++ = s->tx_frames_65_127;
1139 *data++ = s->tx_frames_128_255;
1140 *data++ = s->tx_frames_256_511;
1141 *data++ = s->tx_frames_512_1023;
1142 *data++ = s->tx_frames_1024_1518;
1143 *data++ = s->tx_frames_1519_max;
1144
1145 *data++ = s->rx_octets;
1146 *data++ = s->rx_frames;
1147 *data++ = s->rx_mcast_frames;
1148 *data++ = s->rx_bcast_frames;
1149 *data++ = s->rx_pause;
1150 *data++ = s->rx_fcs_errs;
1151 *data++ = s->rx_symbol_errs;
1152 *data++ = s->rx_short;
1153 *data++ = s->rx_jabber;
1154 *data++ = s->rx_too_long;
1155 *data++ = s->rx_fifo_ovfl;
1156
1157 *data++ = s->rx_frames_64;
1158 *data++ = s->rx_frames_65_127;
1159 *data++ = s->rx_frames_128_255;
1160 *data++ = s->rx_frames_256_511;
1161 *data++ = s->rx_frames_512_1023;
1162 *data++ = s->rx_frames_1024_1518;
1163 *data++ = s->rx_frames_1519_max;
1164
1165 *data++ = pi->phy.fifo_errors;
1166
1167 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_TSO);
1168 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_VLANEX);
1169 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_VLANINS);
1170 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_TX_CSUM);
1171 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_RX_CSUM_GOOD);
1172 *data++ = s->rx_cong_drops;
1173}
1174
1175static inline void reg_block_dump(struct adapter *ap, void *buf,
1176 unsigned int start, unsigned int end)
1177{
1178 u32 *p = buf + start;
1179
1180 for (; start <= end; start += sizeof(u32))
1181 *p++ = t3_read_reg(ap, start);
1182}
1183
1184static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
1185 void *buf)
1186{
1187 struct adapter *ap = dev->priv;
1188
1189 /*
1190 * Version scheme:
1191 * bits 0..9: chip version
1192 * bits 10..15: chip revision
1193 * bit 31: set for PCIe cards
1194 */
1195 regs->version = 3 | (ap->params.rev << 10) | (is_pcie(ap) << 31);
1196
1197 /*
1198 * We skip the MAC statistics registers because they are clear-on-read.
1199 * Also reading multi-register stats would need to synchronize with the
1200 * periodic mac stats accumulation. Hard to justify the complexity.
1201 */
1202 memset(buf, 0, T3_REGMAP_SIZE);
1203 reg_block_dump(ap, buf, 0, A_SG_RSPQ_CREDIT_RETURN);
1204 reg_block_dump(ap, buf, A_SG_HI_DRB_HI_THRSH, A_ULPRX_PBL_ULIMIT);
1205 reg_block_dump(ap, buf, A_ULPTX_CONFIG, A_MPS_INT_CAUSE);
1206 reg_block_dump(ap, buf, A_CPL_SWITCH_CNTRL, A_CPL_MAP_TBL_DATA);
1207 reg_block_dump(ap, buf, A_SMB_GLOBAL_TIME_CFG, A_XGM_SERDES_STAT3);
1208 reg_block_dump(ap, buf, A_XGM_SERDES_STATUS0,
1209 XGM_REG(A_XGM_SERDES_STAT3, 1));
1210 reg_block_dump(ap, buf, XGM_REG(A_XGM_SERDES_STATUS0, 1),
1211 XGM_REG(A_XGM_RX_SPI4_SOP_EOP_CNT, 1));
1212}
1213
1214static int restart_autoneg(struct net_device *dev)
1215{
1216 struct port_info *p = netdev_priv(dev);
1217
1218 if (!netif_running(dev))
1219 return -EAGAIN;
1220 if (p->link_config.autoneg != AUTONEG_ENABLE)
1221 return -EINVAL;
1222 p->phy.ops->autoneg_restart(&p->phy);
1223 return 0;
1224}
1225
1226static int cxgb3_phys_id(struct net_device *dev, u32 data)
1227{
1228 int i;
1229 struct adapter *adapter = dev->priv;
1230
1231 if (data == 0)
1232 data = 2;
1233
1234 for (i = 0; i < data * 2; i++) {
1235 t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
1236 (i & 1) ? F_GPIO0_OUT_VAL : 0);
1237 if (msleep_interruptible(500))
1238 break;
1239 }
1240 t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
1241 F_GPIO0_OUT_VAL);
1242 return 0;
1243}
1244
1245static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1246{
1247 struct port_info *p = netdev_priv(dev);
1248
1249 cmd->supported = p->link_config.supported;
1250 cmd->advertising = p->link_config.advertising;
1251
1252 if (netif_carrier_ok(dev)) {
1253 cmd->speed = p->link_config.speed;
1254 cmd->duplex = p->link_config.duplex;
1255 } else {
1256 cmd->speed = -1;
1257 cmd->duplex = -1;
1258 }
1259
1260 cmd->port = (cmd->supported & SUPPORTED_TP) ? PORT_TP : PORT_FIBRE;
1261 cmd->phy_address = p->phy.addr;
1262 cmd->transceiver = XCVR_EXTERNAL;
1263 cmd->autoneg = p->link_config.autoneg;
1264 cmd->maxtxpkt = 0;
1265 cmd->maxrxpkt = 0;
1266 return 0;
1267}
1268
1269static int speed_duplex_to_caps(int speed, int duplex)
1270{
1271 int cap = 0;
1272
1273 switch (speed) {
1274 case SPEED_10:
1275 if (duplex == DUPLEX_FULL)
1276 cap = SUPPORTED_10baseT_Full;
1277 else
1278 cap = SUPPORTED_10baseT_Half;
1279 break;
1280 case SPEED_100:
1281 if (duplex == DUPLEX_FULL)
1282 cap = SUPPORTED_100baseT_Full;
1283 else
1284 cap = SUPPORTED_100baseT_Half;
1285 break;
1286 case SPEED_1000:
1287 if (duplex == DUPLEX_FULL)
1288 cap = SUPPORTED_1000baseT_Full;
1289 else
1290 cap = SUPPORTED_1000baseT_Half;
1291 break;
1292 case SPEED_10000:
1293 if (duplex == DUPLEX_FULL)
1294 cap = SUPPORTED_10000baseT_Full;
1295 }
1296 return cap;
1297}
1298
1299#define ADVERTISED_MASK (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
1300 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
1301 ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full | \
1302 ADVERTISED_10000baseT_Full)
1303
1304static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1305{
1306 struct port_info *p = netdev_priv(dev);
1307 struct link_config *lc = &p->link_config;
1308
1309 if (!(lc->supported & SUPPORTED_Autoneg))
1310 return -EOPNOTSUPP; /* can't change speed/duplex */
1311
1312 if (cmd->autoneg == AUTONEG_DISABLE) {
1313 int cap = speed_duplex_to_caps(cmd->speed, cmd->duplex);
1314
1315 if (!(lc->supported & cap) || cmd->speed == SPEED_1000)
1316 return -EINVAL;
1317 lc->requested_speed = cmd->speed;
1318 lc->requested_duplex = cmd->duplex;
1319 lc->advertising = 0;
1320 } else {
1321 cmd->advertising &= ADVERTISED_MASK;
1322 cmd->advertising &= lc->supported;
1323 if (!cmd->advertising)
1324 return -EINVAL;
1325 lc->requested_speed = SPEED_INVALID;
1326 lc->requested_duplex = DUPLEX_INVALID;
1327 lc->advertising = cmd->advertising | ADVERTISED_Autoneg;
1328 }
1329 lc->autoneg = cmd->autoneg;
1330 if (netif_running(dev))
1331 t3_link_start(&p->phy, &p->mac, lc);
1332 return 0;
1333}
1334
1335static void get_pauseparam(struct net_device *dev,
1336 struct ethtool_pauseparam *epause)
1337{
1338 struct port_info *p = netdev_priv(dev);
1339
1340 epause->autoneg = (p->link_config.requested_fc & PAUSE_AUTONEG) != 0;
1341 epause->rx_pause = (p->link_config.fc & PAUSE_RX) != 0;
1342 epause->tx_pause = (p->link_config.fc & PAUSE_TX) != 0;
1343}
1344
1345static int set_pauseparam(struct net_device *dev,
1346 struct ethtool_pauseparam *epause)
1347{
1348 struct port_info *p = netdev_priv(dev);
1349 struct link_config *lc = &p->link_config;
1350
1351 if (epause->autoneg == AUTONEG_DISABLE)
1352 lc->requested_fc = 0;
1353 else if (lc->supported & SUPPORTED_Autoneg)
1354 lc->requested_fc = PAUSE_AUTONEG;
1355 else
1356 return -EINVAL;
1357
1358 if (epause->rx_pause)
1359 lc->requested_fc |= PAUSE_RX;
1360 if (epause->tx_pause)
1361 lc->requested_fc |= PAUSE_TX;
1362 if (lc->autoneg == AUTONEG_ENABLE) {
1363 if (netif_running(dev))
1364 t3_link_start(&p->phy, &p->mac, lc);
1365 } else {
1366 lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1367 if (netif_running(dev))
1368 t3_mac_set_speed_duplex_fc(&p->mac, -1, -1, lc->fc);
1369 }
1370 return 0;
1371}
1372
1373static u32 get_rx_csum(struct net_device *dev)
1374{
1375 struct port_info *p = netdev_priv(dev);
1376
1377 return p->rx_csum_offload;
1378}
1379
1380static int set_rx_csum(struct net_device *dev, u32 data)
1381{
1382 struct port_info *p = netdev_priv(dev);
1383
1384 p->rx_csum_offload = data;
1385 return 0;
1386}
1387
1388static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
1389{
05b97b30
DLR
1390 const struct adapter *adapter = dev->priv;
1391 const struct port_info *pi = netdev_priv(dev);
1392 const struct qset_params *q = &adapter->params.sge.qset[pi->first_qset];
4d22de3e
DLR
1393
1394 e->rx_max_pending = MAX_RX_BUFFERS;
1395 e->rx_mini_max_pending = 0;
1396 e->rx_jumbo_max_pending = MAX_RX_JUMBO_BUFFERS;
1397 e->tx_max_pending = MAX_TXQ_ENTRIES;
1398
05b97b30
DLR
1399 e->rx_pending = q->fl_size;
1400 e->rx_mini_pending = q->rspq_size;
1401 e->rx_jumbo_pending = q->jumbo_size;
1402 e->tx_pending = q->txq_size[0];
4d22de3e
DLR
1403}
1404
1405static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
1406{
1407 int i;
05b97b30 1408 struct qset_params *q;
4d22de3e 1409 struct adapter *adapter = dev->priv;
05b97b30 1410 const struct port_info *pi = netdev_priv(dev);
4d22de3e
DLR
1411
1412 if (e->rx_pending > MAX_RX_BUFFERS ||
1413 e->rx_jumbo_pending > MAX_RX_JUMBO_BUFFERS ||
1414 e->tx_pending > MAX_TXQ_ENTRIES ||
1415 e->rx_mini_pending > MAX_RSPQ_ENTRIES ||
1416 e->rx_mini_pending < MIN_RSPQ_ENTRIES ||
1417 e->rx_pending < MIN_FL_ENTRIES ||
1418 e->rx_jumbo_pending < MIN_FL_ENTRIES ||
1419 e->tx_pending < adapter->params.nports * MIN_TXQ_ENTRIES)
1420 return -EINVAL;
1421
1422 if (adapter->flags & FULL_INIT_DONE)
1423 return -EBUSY;
1424
05b97b30
DLR
1425 q = &adapter->params.sge.qset[pi->first_qset];
1426 for (i = 0; i < pi->nqsets; ++i, ++q) {
4d22de3e
DLR
1427 q->rspq_size = e->rx_mini_pending;
1428 q->fl_size = e->rx_pending;
1429 q->jumbo_size = e->rx_jumbo_pending;
1430 q->txq_size[0] = e->tx_pending;
1431 q->txq_size[1] = e->tx_pending;
1432 q->txq_size[2] = e->tx_pending;
1433 }
1434 return 0;
1435}
1436
1437static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
1438{
1439 struct adapter *adapter = dev->priv;
1440 struct qset_params *qsp = &adapter->params.sge.qset[0];
1441 struct sge_qset *qs = &adapter->sge.qs[0];
1442
1443 if (c->rx_coalesce_usecs * 10 > M_NEWTIMER)
1444 return -EINVAL;
1445
1446 qsp->coalesce_usecs = c->rx_coalesce_usecs;
1447 t3_update_qset_coalesce(qs, qsp);
1448 return 0;
1449}
1450
1451static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
1452{
1453 struct adapter *adapter = dev->priv;
1454 struct qset_params *q = adapter->params.sge.qset;
1455
1456 c->rx_coalesce_usecs = q->coalesce_usecs;
1457 return 0;
1458}
1459
1460static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e,
1461 u8 * data)
1462{
1463 int i, err = 0;
1464 struct adapter *adapter = dev->priv;
1465
1466 u8 *buf = kmalloc(EEPROMSIZE, GFP_KERNEL);
1467 if (!buf)
1468 return -ENOMEM;
1469
1470 e->magic = EEPROM_MAGIC;
1471 for (i = e->offset & ~3; !err && i < e->offset + e->len; i += 4)
1472 err = t3_seeprom_read(adapter, i, (u32 *) & buf[i]);
1473
1474 if (!err)
1475 memcpy(data, buf + e->offset, e->len);
1476 kfree(buf);
1477 return err;
1478}
1479
1480static int set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
1481 u8 * data)
1482{
1483 u8 *buf;
1484 int err = 0;
1485 u32 aligned_offset, aligned_len, *p;
1486 struct adapter *adapter = dev->priv;
1487
1488 if (eeprom->magic != EEPROM_MAGIC)
1489 return -EINVAL;
1490
1491 aligned_offset = eeprom->offset & ~3;
1492 aligned_len = (eeprom->len + (eeprom->offset & 3) + 3) & ~3;
1493
1494 if (aligned_offset != eeprom->offset || aligned_len != eeprom->len) {
1495 buf = kmalloc(aligned_len, GFP_KERNEL);
1496 if (!buf)
1497 return -ENOMEM;
1498 err = t3_seeprom_read(adapter, aligned_offset, (u32 *) buf);
1499 if (!err && aligned_len > 4)
1500 err = t3_seeprom_read(adapter,
1501 aligned_offset + aligned_len - 4,
1502 (u32 *) & buf[aligned_len - 4]);
1503 if (err)
1504 goto out;
1505 memcpy(buf + (eeprom->offset & 3), data, eeprom->len);
1506 } else
1507 buf = data;
1508
1509 err = t3_seeprom_wp(adapter, 0);
1510 if (err)
1511 goto out;
1512
1513 for (p = (u32 *) buf; !err && aligned_len; aligned_len -= 4, p++) {
1514 err = t3_seeprom_write(adapter, aligned_offset, *p);
1515 aligned_offset += 4;
1516 }
1517
1518 if (!err)
1519 err = t3_seeprom_wp(adapter, 1);
1520out:
1521 if (buf != data)
1522 kfree(buf);
1523 return err;
1524}
1525
1526static void get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1527{
1528 wol->supported = 0;
1529 wol->wolopts = 0;
1530 memset(&wol->sopass, 0, sizeof(wol->sopass));
1531}
1532
1533static const struct ethtool_ops cxgb_ethtool_ops = {
1534 .get_settings = get_settings,
1535 .set_settings = set_settings,
1536 .get_drvinfo = get_drvinfo,
1537 .get_msglevel = get_msglevel,
1538 .set_msglevel = set_msglevel,
1539 .get_ringparam = get_sge_param,
1540 .set_ringparam = set_sge_param,
1541 .get_coalesce = get_coalesce,
1542 .set_coalesce = set_coalesce,
1543 .get_eeprom_len = get_eeprom_len,
1544 .get_eeprom = get_eeprom,
1545 .set_eeprom = set_eeprom,
1546 .get_pauseparam = get_pauseparam,
1547 .set_pauseparam = set_pauseparam,
1548 .get_rx_csum = get_rx_csum,
1549 .set_rx_csum = set_rx_csum,
1550 .get_tx_csum = ethtool_op_get_tx_csum,
1551 .set_tx_csum = ethtool_op_set_tx_csum,
1552 .get_sg = ethtool_op_get_sg,
1553 .set_sg = ethtool_op_set_sg,
1554 .get_link = ethtool_op_get_link,
1555 .get_strings = get_strings,
1556 .phys_id = cxgb3_phys_id,
1557 .nway_reset = restart_autoneg,
1558 .get_stats_count = get_stats_count,
1559 .get_ethtool_stats = get_stats,
1560 .get_regs_len = get_regs_len,
1561 .get_regs = get_regs,
1562 .get_wol = get_wol,
1563 .get_tso = ethtool_op_get_tso,
1564 .set_tso = ethtool_op_set_tso,
1565 .get_perm_addr = ethtool_op_get_perm_addr
1566};
1567
1568static int in_range(int val, int lo, int hi)
1569{
1570 return val < 0 || (val <= hi && val >= lo);
1571}
1572
1573static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
1574{
1575 int ret;
1576 u32 cmd;
1577 struct adapter *adapter = dev->priv;
1578
1579 if (copy_from_user(&cmd, useraddr, sizeof(cmd)))
1580 return -EFAULT;
1581
1582 switch (cmd) {
4d22de3e
DLR
1583 case CHELSIO_SET_QSET_PARAMS:{
1584 int i;
1585 struct qset_params *q;
1586 struct ch_qset_params t;
1587
1588 if (!capable(CAP_NET_ADMIN))
1589 return -EPERM;
1590 if (copy_from_user(&t, useraddr, sizeof(t)))
1591 return -EFAULT;
1592 if (t.qset_idx >= SGE_QSETS)
1593 return -EINVAL;
1594 if (!in_range(t.intr_lat, 0, M_NEWTIMER) ||
1595 !in_range(t.cong_thres, 0, 255) ||
1596 !in_range(t.txq_size[0], MIN_TXQ_ENTRIES,
1597 MAX_TXQ_ENTRIES) ||
1598 !in_range(t.txq_size[1], MIN_TXQ_ENTRIES,
1599 MAX_TXQ_ENTRIES) ||
1600 !in_range(t.txq_size[2], MIN_CTRL_TXQ_ENTRIES,
1601 MAX_CTRL_TXQ_ENTRIES) ||
1602 !in_range(t.fl_size[0], MIN_FL_ENTRIES,
1603 MAX_RX_BUFFERS)
1604 || !in_range(t.fl_size[1], MIN_FL_ENTRIES,
1605 MAX_RX_JUMBO_BUFFERS)
1606 || !in_range(t.rspq_size, MIN_RSPQ_ENTRIES,
1607 MAX_RSPQ_ENTRIES))
1608 return -EINVAL;
1609 if ((adapter->flags & FULL_INIT_DONE) &&
1610 (t.rspq_size >= 0 || t.fl_size[0] >= 0 ||
1611 t.fl_size[1] >= 0 || t.txq_size[0] >= 0 ||
1612 t.txq_size[1] >= 0 || t.txq_size[2] >= 0 ||
1613 t.polling >= 0 || t.cong_thres >= 0))
1614 return -EBUSY;
1615
1616 q = &adapter->params.sge.qset[t.qset_idx];
1617
1618 if (t.rspq_size >= 0)
1619 q->rspq_size = t.rspq_size;
1620 if (t.fl_size[0] >= 0)
1621 q->fl_size = t.fl_size[0];
1622 if (t.fl_size[1] >= 0)
1623 q->jumbo_size = t.fl_size[1];
1624 if (t.txq_size[0] >= 0)
1625 q->txq_size[0] = t.txq_size[0];
1626 if (t.txq_size[1] >= 0)
1627 q->txq_size[1] = t.txq_size[1];
1628 if (t.txq_size[2] >= 0)
1629 q->txq_size[2] = t.txq_size[2];
1630 if (t.cong_thres >= 0)
1631 q->cong_thres = t.cong_thres;
1632 if (t.intr_lat >= 0) {
1633 struct sge_qset *qs =
1634 &adapter->sge.qs[t.qset_idx];
1635
1636 q->coalesce_usecs = t.intr_lat;
1637 t3_update_qset_coalesce(qs, q);
1638 }
1639 if (t.polling >= 0) {
1640 if (adapter->flags & USING_MSIX)
1641 q->polling = t.polling;
1642 else {
1643 /* No polling with INTx for T3A */
1644 if (adapter->params.rev == 0 &&
1645 !(adapter->flags & USING_MSI))
1646 t.polling = 0;
1647
1648 for (i = 0; i < SGE_QSETS; i++) {
1649 q = &adapter->params.sge.
1650 qset[i];
1651 q->polling = t.polling;
1652 }
1653 }
1654 }
1655 break;
1656 }
1657 case CHELSIO_GET_QSET_PARAMS:{
1658 struct qset_params *q;
1659 struct ch_qset_params t;
1660
1661 if (copy_from_user(&t, useraddr, sizeof(t)))
1662 return -EFAULT;
1663 if (t.qset_idx >= SGE_QSETS)
1664 return -EINVAL;
1665
1666 q = &adapter->params.sge.qset[t.qset_idx];
1667 t.rspq_size = q->rspq_size;
1668 t.txq_size[0] = q->txq_size[0];
1669 t.txq_size[1] = q->txq_size[1];
1670 t.txq_size[2] = q->txq_size[2];
1671 t.fl_size[0] = q->fl_size;
1672 t.fl_size[1] = q->jumbo_size;
1673 t.polling = q->polling;
1674 t.intr_lat = q->coalesce_usecs;
1675 t.cong_thres = q->cong_thres;
1676
1677 if (copy_to_user(useraddr, &t, sizeof(t)))
1678 return -EFAULT;
1679 break;
1680 }
1681 case CHELSIO_SET_QSET_NUM:{
1682 struct ch_reg edata;
1683 struct port_info *pi = netdev_priv(dev);
1684 unsigned int i, first_qset = 0, other_qsets = 0;
1685
1686 if (!capable(CAP_NET_ADMIN))
1687 return -EPERM;
1688 if (adapter->flags & FULL_INIT_DONE)
1689 return -EBUSY;
1690 if (copy_from_user(&edata, useraddr, sizeof(edata)))
1691 return -EFAULT;
1692 if (edata.val < 1 ||
1693 (edata.val > 1 && !(adapter->flags & USING_MSIX)))
1694 return -EINVAL;
1695
1696 for_each_port(adapter, i)
1697 if (adapter->port[i] && adapter->port[i] != dev)
1698 other_qsets += adap2pinfo(adapter, i)->nqsets;
1699
1700 if (edata.val + other_qsets > SGE_QSETS)
1701 return -EINVAL;
1702
1703 pi->nqsets = edata.val;
1704
1705 for_each_port(adapter, i)
1706 if (adapter->port[i]) {
1707 pi = adap2pinfo(adapter, i);
1708 pi->first_qset = first_qset;
1709 first_qset += pi->nqsets;
1710 }
1711 break;
1712 }
1713 case CHELSIO_GET_QSET_NUM:{
1714 struct ch_reg edata;
1715 struct port_info *pi = netdev_priv(dev);
1716
1717 edata.cmd = CHELSIO_GET_QSET_NUM;
1718 edata.val = pi->nqsets;
1719 if (copy_to_user(useraddr, &edata, sizeof(edata)))
1720 return -EFAULT;
1721 break;
1722 }
1723 case CHELSIO_LOAD_FW:{
1724 u8 *fw_data;
1725 struct ch_mem_range t;
1726
1727 if (!capable(CAP_NET_ADMIN))
1728 return -EPERM;
1729 if (copy_from_user(&t, useraddr, sizeof(t)))
1730 return -EFAULT;
1731
1732 fw_data = kmalloc(t.len, GFP_KERNEL);
1733 if (!fw_data)
1734 return -ENOMEM;
1735
1736 if (copy_from_user
1737 (fw_data, useraddr + sizeof(t), t.len)) {
1738 kfree(fw_data);
1739 return -EFAULT;
1740 }
1741
1742 ret = t3_load_fw(adapter, fw_data, t.len);
1743 kfree(fw_data);
1744 if (ret)
1745 return ret;
1746 break;
1747 }
1748 case CHELSIO_SETMTUTAB:{
1749 struct ch_mtus m;
1750 int i;
1751
1752 if (!is_offload(adapter))
1753 return -EOPNOTSUPP;
1754 if (!capable(CAP_NET_ADMIN))
1755 return -EPERM;
1756 if (offload_running(adapter))
1757 return -EBUSY;
1758 if (copy_from_user(&m, useraddr, sizeof(m)))
1759 return -EFAULT;
1760 if (m.nmtus != NMTUS)
1761 return -EINVAL;
1762 if (m.mtus[0] < 81) /* accommodate SACK */
1763 return -EINVAL;
1764
1765 /* MTUs must be in ascending order */
1766 for (i = 1; i < NMTUS; ++i)
1767 if (m.mtus[i] < m.mtus[i - 1])
1768 return -EINVAL;
1769
1770 memcpy(adapter->params.mtus, m.mtus,
1771 sizeof(adapter->params.mtus));
1772 break;
1773 }
1774 case CHELSIO_GET_PM:{
1775 struct tp_params *p = &adapter->params.tp;
1776 struct ch_pm m = {.cmd = CHELSIO_GET_PM };
1777
1778 if (!is_offload(adapter))
1779 return -EOPNOTSUPP;
1780 m.tx_pg_sz = p->tx_pg_size;
1781 m.tx_num_pg = p->tx_num_pgs;
1782 m.rx_pg_sz = p->rx_pg_size;
1783 m.rx_num_pg = p->rx_num_pgs;
1784 m.pm_total = p->pmtx_size + p->chan_rx_size * p->nchan;
1785 if (copy_to_user(useraddr, &m, sizeof(m)))
1786 return -EFAULT;
1787 break;
1788 }
1789 case CHELSIO_SET_PM:{
1790 struct ch_pm m;
1791 struct tp_params *p = &adapter->params.tp;
1792
1793 if (!is_offload(adapter))
1794 return -EOPNOTSUPP;
1795 if (!capable(CAP_NET_ADMIN))
1796 return -EPERM;
1797 if (adapter->flags & FULL_INIT_DONE)
1798 return -EBUSY;
1799 if (copy_from_user(&m, useraddr, sizeof(m)))
1800 return -EFAULT;
1801 if (!m.rx_pg_sz || (m.rx_pg_sz & (m.rx_pg_sz - 1)) ||
1802 !m.tx_pg_sz || (m.tx_pg_sz & (m.tx_pg_sz - 1)))
1803 return -EINVAL; /* not power of 2 */
1804 if (!(m.rx_pg_sz & 0x14000))
1805 return -EINVAL; /* not 16KB or 64KB */
1806 if (!(m.tx_pg_sz & 0x1554000))
1807 return -EINVAL;
1808 if (m.tx_num_pg == -1)
1809 m.tx_num_pg = p->tx_num_pgs;
1810 if (m.rx_num_pg == -1)
1811 m.rx_num_pg = p->rx_num_pgs;
1812 if (m.tx_num_pg % 24 || m.rx_num_pg % 24)
1813 return -EINVAL;
1814 if (m.rx_num_pg * m.rx_pg_sz > p->chan_rx_size ||
1815 m.tx_num_pg * m.tx_pg_sz > p->chan_tx_size)
1816 return -EINVAL;
1817 p->rx_pg_size = m.rx_pg_sz;
1818 p->tx_pg_size = m.tx_pg_sz;
1819 p->rx_num_pgs = m.rx_num_pg;
1820 p->tx_num_pgs = m.tx_num_pg;
1821 break;
1822 }
1823 case CHELSIO_GET_MEM:{
1824 struct ch_mem_range t;
1825 struct mc7 *mem;
1826 u64 buf[32];
1827
1828 if (!is_offload(adapter))
1829 return -EOPNOTSUPP;
1830 if (!(adapter->flags & FULL_INIT_DONE))
1831 return -EIO; /* need the memory controllers */
1832 if (copy_from_user(&t, useraddr, sizeof(t)))
1833 return -EFAULT;
1834 if ((t.addr & 7) || (t.len & 7))
1835 return -EINVAL;
1836 if (t.mem_id == MEM_CM)
1837 mem = &adapter->cm;
1838 else if (t.mem_id == MEM_PMRX)
1839 mem = &adapter->pmrx;
1840 else if (t.mem_id == MEM_PMTX)
1841 mem = &adapter->pmtx;
1842 else
1843 return -EINVAL;
1844
1845 /*
1825494a
DLR
1846 * Version scheme:
1847 * bits 0..9: chip version
1848 * bits 10..15: chip revision
1849 */
4d22de3e
DLR
1850 t.version = 3 | (adapter->params.rev << 10);
1851 if (copy_to_user(useraddr, &t, sizeof(t)))
1852 return -EFAULT;
1853
1854 /*
1855 * Read 256 bytes at a time as len can be large and we don't
1856 * want to use huge intermediate buffers.
1857 */
1858 useraddr += sizeof(t); /* advance to start of buffer */
1859 while (t.len) {
1860 unsigned int chunk =
1861 min_t(unsigned int, t.len, sizeof(buf));
1862
1863 ret =
1864 t3_mc7_bd_read(mem, t.addr / 8, chunk / 8,
1865 buf);
1866 if (ret)
1867 return ret;
1868 if (copy_to_user(useraddr, buf, chunk))
1869 return -EFAULT;
1870 useraddr += chunk;
1871 t.addr += chunk;
1872 t.len -= chunk;
1873 }
1874 break;
1875 }
1876 case CHELSIO_SET_TRACE_FILTER:{
1877 struct ch_trace t;
1878 const struct trace_params *tp;
1879
1880 if (!capable(CAP_NET_ADMIN))
1881 return -EPERM;
1882 if (!offload_running(adapter))
1883 return -EAGAIN;
1884 if (copy_from_user(&t, useraddr, sizeof(t)))
1885 return -EFAULT;
1886
1887 tp = (const struct trace_params *)&t.sip;
1888 if (t.config_tx)
1889 t3_config_trace_filter(adapter, tp, 0,
1890 t.invert_match,
1891 t.trace_tx);
1892 if (t.config_rx)
1893 t3_config_trace_filter(adapter, tp, 1,
1894 t.invert_match,
1895 t.trace_rx);
1896 break;
1897 }
4d22de3e
DLR
1898 default:
1899 return -EOPNOTSUPP;
1900 }
1901 return 0;
1902}
1903
1904static int cxgb_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
1905{
1906 int ret, mmd;
1907 struct adapter *adapter = dev->priv;
1908 struct port_info *pi = netdev_priv(dev);
1909 struct mii_ioctl_data *data = if_mii(req);
1910
1911 switch (cmd) {
1912 case SIOCGMIIPHY:
1913 data->phy_id = pi->phy.addr;
1914 /* FALLTHRU */
1915 case SIOCGMIIREG:{
1916 u32 val;
1917 struct cphy *phy = &pi->phy;
1918
1919 if (!phy->mdio_read)
1920 return -EOPNOTSUPP;
1921 if (is_10G(adapter)) {
1922 mmd = data->phy_id >> 8;
1923 if (!mmd)
1924 mmd = MDIO_DEV_PCS;
1925 else if (mmd > MDIO_DEV_XGXS)
1926 return -EINVAL;
1927
1928 ret =
1929 phy->mdio_read(adapter, data->phy_id & 0x1f,
1930 mmd, data->reg_num, &val);
1931 } else
1932 ret =
1933 phy->mdio_read(adapter, data->phy_id & 0x1f,
1934 0, data->reg_num & 0x1f,
1935 &val);
1936 if (!ret)
1937 data->val_out = val;
1938 break;
1939 }
1940 case SIOCSMIIREG:{
1941 struct cphy *phy = &pi->phy;
1942
1943 if (!capable(CAP_NET_ADMIN))
1944 return -EPERM;
1945 if (!phy->mdio_write)
1946 return -EOPNOTSUPP;
1947 if (is_10G(adapter)) {
1948 mmd = data->phy_id >> 8;
1949 if (!mmd)
1950 mmd = MDIO_DEV_PCS;
1951 else if (mmd > MDIO_DEV_XGXS)
1952 return -EINVAL;
1953
1954 ret =
1955 phy->mdio_write(adapter,
1956 data->phy_id & 0x1f, mmd,
1957 data->reg_num,
1958 data->val_in);
1959 } else
1960 ret =
1961 phy->mdio_write(adapter,
1962 data->phy_id & 0x1f, 0,
1963 data->reg_num & 0x1f,
1964 data->val_in);
1965 break;
1966 }
1967 case SIOCCHIOCTL:
1968 return cxgb_extension_ioctl(dev, req->ifr_data);
1969 default:
1970 return -EOPNOTSUPP;
1971 }
1972 return ret;
1973}
1974
1975static int cxgb_change_mtu(struct net_device *dev, int new_mtu)
1976{
1977 int ret;
1978 struct adapter *adapter = dev->priv;
1979 struct port_info *pi = netdev_priv(dev);
1980
1981 if (new_mtu < 81) /* accommodate SACK */
1982 return -EINVAL;
1983 if ((ret = t3_mac_set_mtu(&pi->mac, new_mtu)))
1984 return ret;
1985 dev->mtu = new_mtu;
1986 init_port_mtus(adapter);
1987 if (adapter->params.rev == 0 && offload_running(adapter))
1988 t3_load_mtus(adapter, adapter->params.mtus,
1989 adapter->params.a_wnd, adapter->params.b_wnd,
1990 adapter->port[0]->mtu);
1991 return 0;
1992}
1993
1994static int cxgb_set_mac_addr(struct net_device *dev, void *p)
1995{
1996 struct adapter *adapter = dev->priv;
1997 struct port_info *pi = netdev_priv(dev);
1998 struct sockaddr *addr = p;
1999
2000 if (!is_valid_ether_addr(addr->sa_data))
2001 return -EINVAL;
2002
2003 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
2004 t3_mac_set_address(&pi->mac, 0, dev->dev_addr);
2005 if (offload_running(adapter))
2006 write_smt_entry(adapter, pi->port_id);
2007 return 0;
2008}
2009
2010/**
2011 * t3_synchronize_rx - wait for current Rx processing on a port to complete
2012 * @adap: the adapter
2013 * @p: the port
2014 *
2015 * Ensures that current Rx processing on any of the queues associated with
2016 * the given port completes before returning. We do this by acquiring and
2017 * releasing the locks of the response queues associated with the port.
2018 */
2019static void t3_synchronize_rx(struct adapter *adap, const struct port_info *p)
2020{
2021 int i;
2022
2023 for (i = 0; i < p->nqsets; i++) {
2024 struct sge_rspq *q = &adap->sge.qs[i + p->first_qset].rspq;
2025
2026 spin_lock_irq(&q->lock);
2027 spin_unlock_irq(&q->lock);
2028 }
2029}
2030
2031static void vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
2032{
2033 struct adapter *adapter = dev->priv;
2034 struct port_info *pi = netdev_priv(dev);
2035
2036 pi->vlan_grp = grp;
2037 if (adapter->params.rev > 0)
2038 t3_set_vlan_accel(adapter, 1 << pi->port_id, grp != NULL);
2039 else {
2040 /* single control for all ports */
2041 unsigned int i, have_vlans = 0;
2042 for_each_port(adapter, i)
2043 have_vlans |= adap2pinfo(adapter, i)->vlan_grp != NULL;
2044
2045 t3_set_vlan_accel(adapter, 1, have_vlans);
2046 }
2047 t3_synchronize_rx(adapter, pi);
2048}
2049
2050static void vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
2051{
2052 /* nothing */
2053}
2054
2055#ifdef CONFIG_NET_POLL_CONTROLLER
2056static void cxgb_netpoll(struct net_device *dev)
2057{
2058 struct adapter *adapter = dev->priv;
2059 struct sge_qset *qs = dev2qset(dev);
2060
2061 t3_intr_handler(adapter, qs->rspq.polling) (adapter->pdev->irq,
2062 adapter);
2063}
2064#endif
2065
2066/*
2067 * Periodic accumulation of MAC statistics.
2068 */
2069static void mac_stats_update(struct adapter *adapter)
2070{
2071 int i;
2072
2073 for_each_port(adapter, i) {
2074 struct net_device *dev = adapter->port[i];
2075 struct port_info *p = netdev_priv(dev);
2076
2077 if (netif_running(dev)) {
2078 spin_lock(&adapter->stats_lock);
2079 t3_mac_update_stats(&p->mac);
2080 spin_unlock(&adapter->stats_lock);
2081 }
2082 }
2083}
2084
2085static void check_link_status(struct adapter *adapter)
2086{
2087 int i;
2088
2089 for_each_port(adapter, i) {
2090 struct net_device *dev = adapter->port[i];
2091 struct port_info *p = netdev_priv(dev);
2092
2093 if (!(p->port_type->caps & SUPPORTED_IRQ) && netif_running(dev))
2094 t3_link_changed(adapter, i);
2095 }
2096}
2097
2098static void t3_adap_check_task(struct work_struct *work)
2099{
2100 struct adapter *adapter = container_of(work, struct adapter,
2101 adap_check_task.work);
2102 const struct adapter_params *p = &adapter->params;
2103
2104 adapter->check_task_cnt++;
2105
2106 /* Check link status for PHYs without interrupts */
2107 if (p->linkpoll_period)
2108 check_link_status(adapter);
2109
2110 /* Accumulate MAC stats if needed */
2111 if (!p->linkpoll_period ||
2112 (adapter->check_task_cnt * p->linkpoll_period) / 10 >=
2113 p->stats_update_period) {
2114 mac_stats_update(adapter);
2115 adapter->check_task_cnt = 0;
2116 }
2117
2118 /* Schedule the next check update if any port is active. */
2119 spin_lock(&adapter->work_lock);
2120 if (adapter->open_device_map & PORT_MASK)
2121 schedule_chk_task(adapter);
2122 spin_unlock(&adapter->work_lock);
2123}
2124
2125/*
2126 * Processes external (PHY) interrupts in process context.
2127 */
2128static void ext_intr_task(struct work_struct *work)
2129{
2130 struct adapter *adapter = container_of(work, struct adapter,
2131 ext_intr_handler_task);
2132
2133 t3_phy_intr_handler(adapter);
2134
2135 /* Now reenable external interrupts */
2136 spin_lock_irq(&adapter->work_lock);
2137 if (adapter->slow_intr_mask) {
2138 adapter->slow_intr_mask |= F_T3DBG;
2139 t3_write_reg(adapter, A_PL_INT_CAUSE0, F_T3DBG);
2140 t3_write_reg(adapter, A_PL_INT_ENABLE0,
2141 adapter->slow_intr_mask);
2142 }
2143 spin_unlock_irq(&adapter->work_lock);
2144}
2145
2146/*
2147 * Interrupt-context handler for external (PHY) interrupts.
2148 */
2149void t3_os_ext_intr_handler(struct adapter *adapter)
2150{
2151 /*
2152 * Schedule a task to handle external interrupts as they may be slow
2153 * and we use a mutex to protect MDIO registers. We disable PHY
2154 * interrupts in the meantime and let the task reenable them when
2155 * it's done.
2156 */
2157 spin_lock(&adapter->work_lock);
2158 if (adapter->slow_intr_mask) {
2159 adapter->slow_intr_mask &= ~F_T3DBG;
2160 t3_write_reg(adapter, A_PL_INT_ENABLE0,
2161 adapter->slow_intr_mask);
2162 queue_work(cxgb3_wq, &adapter->ext_intr_handler_task);
2163 }
2164 spin_unlock(&adapter->work_lock);
2165}
2166
2167void t3_fatal_err(struct adapter *adapter)
2168{
2169 unsigned int fw_status[4];
2170
2171 if (adapter->flags & FULL_INIT_DONE) {
2172 t3_sge_stop(adapter);
2173 t3_intr_disable(adapter);
2174 }
2175 CH_ALERT(adapter, "encountered fatal error, operation suspended\n");
2176 if (!t3_cim_ctl_blk_read(adapter, 0xa0, 4, fw_status))
2177 CH_ALERT(adapter, "FW status: 0x%x, 0x%x, 0x%x, 0x%x\n",
2178 fw_status[0], fw_status[1],
2179 fw_status[2], fw_status[3]);
2180
2181}
2182
2183static int __devinit cxgb_enable_msix(struct adapter *adap)
2184{
2185 struct msix_entry entries[SGE_QSETS + 1];
2186 int i, err;
2187
2188 for (i = 0; i < ARRAY_SIZE(entries); ++i)
2189 entries[i].entry = i;
2190
2191 err = pci_enable_msix(adap->pdev, entries, ARRAY_SIZE(entries));
2192 if (!err) {
2193 for (i = 0; i < ARRAY_SIZE(entries); ++i)
2194 adap->msix_info[i].vec = entries[i].vector;
2195 } else if (err > 0)
2196 dev_info(&adap->pdev->dev,
2197 "only %d MSI-X vectors left, not using MSI-X\n", err);
2198 return err;
2199}
2200
2201static void __devinit print_port_info(struct adapter *adap,
2202 const struct adapter_info *ai)
2203{
2204 static const char *pci_variant[] = {
2205 "PCI", "PCI-X", "PCI-X ECC", "PCI-X 266", "PCI Express"
2206 };
2207
2208 int i;
2209 char buf[80];
2210
2211 if (is_pcie(adap))
2212 snprintf(buf, sizeof(buf), "%s x%d",
2213 pci_variant[adap->params.pci.variant],
2214 adap->params.pci.width);
2215 else
2216 snprintf(buf, sizeof(buf), "%s %dMHz/%d-bit",
2217 pci_variant[adap->params.pci.variant],
2218 adap->params.pci.speed, adap->params.pci.width);
2219
2220 for_each_port(adap, i) {
2221 struct net_device *dev = adap->port[i];
2222 const struct port_info *pi = netdev_priv(dev);
2223
2224 if (!test_bit(i, &adap->registered_device_map))
2225 continue;
2226 printk(KERN_INFO "%s: %s %s RNIC (rev %d) %s%s\n",
2227 dev->name, ai->desc, pi->port_type->desc,
2228 adap->params.rev, buf,
2229 (adap->flags & USING_MSIX) ? " MSI-X" :
2230 (adap->flags & USING_MSI) ? " MSI" : "");
2231 if (adap->name == dev->name && adap->params.vpd.mclk)
2232 printk(KERN_INFO "%s: %uMB CM, %uMB PMTX, %uMB PMRX\n",
2233 adap->name, t3_mc7_size(&adap->cm) >> 20,
2234 t3_mc7_size(&adap->pmtx) >> 20,
2235 t3_mc7_size(&adap->pmrx) >> 20);
2236 }
2237}
2238
2239static int __devinit init_one(struct pci_dev *pdev,
2240 const struct pci_device_id *ent)
2241{
2242 static int version_printed;
2243
2244 int i, err, pci_using_dac = 0;
2245 unsigned long mmio_start, mmio_len;
2246 const struct adapter_info *ai;
2247 struct adapter *adapter = NULL;
2248 struct port_info *pi;
2249
2250 if (!version_printed) {
2251 printk(KERN_INFO "%s - version %s\n", DRV_DESC, DRV_VERSION);
2252 ++version_printed;
2253 }
2254
2255 if (!cxgb3_wq) {
2256 cxgb3_wq = create_singlethread_workqueue(DRV_NAME);
2257 if (!cxgb3_wq) {
2258 printk(KERN_ERR DRV_NAME
2259 ": cannot initialize work queue\n");
2260 return -ENOMEM;
2261 }
2262 }
2263
2264 err = pci_request_regions(pdev, DRV_NAME);
2265 if (err) {
2266 /* Just info, some other driver may have claimed the device. */
2267 dev_info(&pdev->dev, "cannot obtain PCI resources\n");
2268 return err;
2269 }
2270
2271 err = pci_enable_device(pdev);
2272 if (err) {
2273 dev_err(&pdev->dev, "cannot enable PCI device\n");
2274 goto out_release_regions;
2275 }
2276
2277 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
2278 pci_using_dac = 1;
2279 err = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
2280 if (err) {
2281 dev_err(&pdev->dev, "unable to obtain 64-bit DMA for "
2282 "coherent allocations\n");
2283 goto out_disable_device;
2284 }
2285 } else if ((err = pci_set_dma_mask(pdev, DMA_32BIT_MASK)) != 0) {
2286 dev_err(&pdev->dev, "no usable DMA configuration\n");
2287 goto out_disable_device;
2288 }
2289
2290 pci_set_master(pdev);
2291
2292 mmio_start = pci_resource_start(pdev, 0);
2293 mmio_len = pci_resource_len(pdev, 0);
2294 ai = t3_get_adapter_info(ent->driver_data);
2295
2296 adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
2297 if (!adapter) {
2298 err = -ENOMEM;
2299 goto out_disable_device;
2300 }
2301
2302 adapter->regs = ioremap_nocache(mmio_start, mmio_len);
2303 if (!adapter->regs) {
2304 dev_err(&pdev->dev, "cannot map device registers\n");
2305 err = -ENOMEM;
2306 goto out_free_adapter;
2307 }
2308
2309 adapter->pdev = pdev;
2310 adapter->name = pci_name(pdev);
2311 adapter->msg_enable = dflt_msg_enable;
2312 adapter->mmio_len = mmio_len;
2313
2314 mutex_init(&adapter->mdio_lock);
2315 spin_lock_init(&adapter->work_lock);
2316 spin_lock_init(&adapter->stats_lock);
2317
2318 INIT_LIST_HEAD(&adapter->adapter_list);
2319 INIT_WORK(&adapter->ext_intr_handler_task, ext_intr_task);
2320 INIT_DELAYED_WORK(&adapter->adap_check_task, t3_adap_check_task);
2321
2322 for (i = 0; i < ai->nports; ++i) {
2323 struct net_device *netdev;
2324
2325 netdev = alloc_etherdev(sizeof(struct port_info));
2326 if (!netdev) {
2327 err = -ENOMEM;
2328 goto out_free_dev;
2329 }
2330
2331 SET_MODULE_OWNER(netdev);
2332 SET_NETDEV_DEV(netdev, &pdev->dev);
2333
2334 adapter->port[i] = netdev;
2335 pi = netdev_priv(netdev);
2336 pi->rx_csum_offload = 1;
2337 pi->nqsets = 1;
2338 pi->first_qset = i;
2339 pi->activity = 0;
2340 pi->port_id = i;
2341 netif_carrier_off(netdev);
2342 netdev->irq = pdev->irq;
2343 netdev->mem_start = mmio_start;
2344 netdev->mem_end = mmio_start + mmio_len - 1;
2345 netdev->priv = adapter;
2346 netdev->features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO;
2347 netdev->features |= NETIF_F_LLTX;
2348 if (pci_using_dac)
2349 netdev->features |= NETIF_F_HIGHDMA;
2350
2351 netdev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
2352 netdev->vlan_rx_register = vlan_rx_register;
2353 netdev->vlan_rx_kill_vid = vlan_rx_kill_vid;
2354
2355 netdev->open = cxgb_open;
2356 netdev->stop = cxgb_close;
2357 netdev->hard_start_xmit = t3_eth_xmit;
2358 netdev->get_stats = cxgb_get_stats;
2359 netdev->set_multicast_list = cxgb_set_rxmode;
2360 netdev->do_ioctl = cxgb_ioctl;
2361 netdev->change_mtu = cxgb_change_mtu;
2362 netdev->set_mac_address = cxgb_set_mac_addr;
2363#ifdef CONFIG_NET_POLL_CONTROLLER
2364 netdev->poll_controller = cxgb_netpoll;
2365#endif
2366 netdev->weight = 64;
2367
2368 SET_ETHTOOL_OPS(netdev, &cxgb_ethtool_ops);
2369 }
2370
2371 pci_set_drvdata(pdev, adapter->port[0]);
2372 if (t3_prep_adapter(adapter, ai, 1) < 0) {
2373 err = -ENODEV;
2374 goto out_free_dev;
2375 }
2376
2377 /*
2378 * The card is now ready to go. If any errors occur during device
2379 * registration we do not fail the whole card but rather proceed only
2380 * with the ports we manage to register successfully. However we must
2381 * register at least one net device.
2382 */
2383 for_each_port(adapter, i) {
2384 err = register_netdev(adapter->port[i]);
2385 if (err)
2386 dev_warn(&pdev->dev,
2387 "cannot register net device %s, skipping\n",
2388 adapter->port[i]->name);
2389 else {
2390 /*
2391 * Change the name we use for messages to the name of
2392 * the first successfully registered interface.
2393 */
2394 if (!adapter->registered_device_map)
2395 adapter->name = adapter->port[i]->name;
2396
2397 __set_bit(i, &adapter->registered_device_map);
2398 }
2399 }
2400 if (!adapter->registered_device_map) {
2401 dev_err(&pdev->dev, "could not register any net devices\n");
2402 goto out_free_dev;
2403 }
2404
2405 /* Driver's ready. Reflect it on LEDs */
2406 t3_led_ready(adapter);
2407
2408 if (is_offload(adapter)) {
2409 __set_bit(OFFLOAD_DEVMAP_BIT, &adapter->registered_device_map);
2410 cxgb3_adapter_ofld(adapter);
2411 }
2412
2413 /* See what interrupts we'll be using */
2414 if (msi > 1 && cxgb_enable_msix(adapter) == 0)
2415 adapter->flags |= USING_MSIX;
2416 else if (msi > 0 && pci_enable_msi(pdev) == 0)
2417 adapter->flags |= USING_MSI;
2418
0ee8d33c 2419 err = sysfs_create_group(&adapter->port[0]->dev.kobj,
4d22de3e
DLR
2420 &cxgb3_attr_group);
2421
2422 print_port_info(adapter, ai);
2423 return 0;
2424
2425out_free_dev:
2426 iounmap(adapter->regs);
2427 for (i = ai->nports - 1; i >= 0; --i)
2428 if (adapter->port[i])
2429 free_netdev(adapter->port[i]);
2430
2431out_free_adapter:
2432 kfree(adapter);
2433
2434out_disable_device:
2435 pci_disable_device(pdev);
2436out_release_regions:
2437 pci_release_regions(pdev);
2438 pci_set_drvdata(pdev, NULL);
2439 return err;
2440}
2441
2442static void __devexit remove_one(struct pci_dev *pdev)
2443{
2444 struct net_device *dev = pci_get_drvdata(pdev);
2445
2446 if (dev) {
2447 int i;
2448 struct adapter *adapter = dev->priv;
2449
2450 t3_sge_stop(adapter);
0ee8d33c 2451 sysfs_remove_group(&adapter->port[0]->dev.kobj,
4d22de3e
DLR
2452 &cxgb3_attr_group);
2453
2454 for_each_port(adapter, i)
2455 if (test_bit(i, &adapter->registered_device_map))
2456 unregister_netdev(adapter->port[i]);
2457
2458 if (is_offload(adapter)) {
2459 cxgb3_adapter_unofld(adapter);
2460 if (test_bit(OFFLOAD_DEVMAP_BIT,
2461 &adapter->open_device_map))
2462 offload_close(&adapter->tdev);
2463 }
2464
2465 t3_free_sge_resources(adapter);
2466 cxgb_disable_msi(adapter);
2467
2468 for (i = 0; i < ARRAY_SIZE(adapter->dummy_netdev); i++)
2469 if (adapter->dummy_netdev[i]) {
2470 free_netdev(adapter->dummy_netdev[i]);
2471 adapter->dummy_netdev[i] = NULL;
2472 }
2473
2474 for_each_port(adapter, i)
2475 if (adapter->port[i])
2476 free_netdev(adapter->port[i]);
2477
2478 iounmap(adapter->regs);
2479 kfree(adapter);
2480 pci_release_regions(pdev);
2481 pci_disable_device(pdev);
2482 pci_set_drvdata(pdev, NULL);
2483 }
2484}
2485
2486static struct pci_driver driver = {
2487 .name = DRV_NAME,
2488 .id_table = cxgb3_pci_tbl,
2489 .probe = init_one,
2490 .remove = __devexit_p(remove_one),
2491};
2492
2493static int __init cxgb3_init_module(void)
2494{
2495 int ret;
2496
2497 cxgb3_offload_init();
2498
2499 ret = pci_register_driver(&driver);
2500 return ret;
2501}
2502
2503static void __exit cxgb3_cleanup_module(void)
2504{
2505 pci_unregister_driver(&driver);
2506 if (cxgb3_wq)
2507 destroy_workqueue(cxgb3_wq);
2508}
2509
2510module_init(cxgb3_init_module);
2511module_exit(cxgb3_cleanup_module);