cxgb3 - manage sysfs attributes per port
[linux-2.6-block.git] / drivers / net / cxgb3 / cxgb3_main.c
CommitLineData
4d22de3e 1/*
1d68e93d 2 * Copyright (c) 2003-2007 Chelsio, Inc. All rights reserved.
4d22de3e 3 *
1d68e93d
DLR
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
4d22de3e 9 *
1d68e93d
DLR
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
4d22de3e 31 */
4d22de3e
DLR
32#include <linux/module.h>
33#include <linux/moduleparam.h>
34#include <linux/init.h>
35#include <linux/pci.h>
36#include <linux/dma-mapping.h>
37#include <linux/netdevice.h>
38#include <linux/etherdevice.h>
39#include <linux/if_vlan.h>
40#include <linux/mii.h>
41#include <linux/sockios.h>
42#include <linux/workqueue.h>
43#include <linux/proc_fs.h>
44#include <linux/rtnetlink.h>
45#include <asm/uaccess.h>
46
47#include "common.h"
48#include "cxgb3_ioctl.h"
49#include "regs.h"
50#include "cxgb3_offload.h"
51#include "version.h"
52
53#include "cxgb3_ctl_defs.h"
54#include "t3_cpl.h"
55#include "firmware_exports.h"
56
57enum {
58 MAX_TXQ_ENTRIES = 16384,
59 MAX_CTRL_TXQ_ENTRIES = 1024,
60 MAX_RSPQ_ENTRIES = 16384,
61 MAX_RX_BUFFERS = 16384,
62 MAX_RX_JUMBO_BUFFERS = 16384,
63 MIN_TXQ_ENTRIES = 4,
64 MIN_CTRL_TXQ_ENTRIES = 4,
65 MIN_RSPQ_ENTRIES = 32,
66 MIN_FL_ENTRIES = 32
67};
68
69#define PORT_MASK ((1 << MAX_NPORTS) - 1)
70
71#define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
72 NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
73 NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
74
75#define EEPROM_MAGIC 0x38E2F10C
76
4d22de3e
DLR
77#define CH_DEVICE(devid, ssid, idx) \
78 { PCI_VENDOR_ID_CHELSIO, devid, PCI_ANY_ID, ssid, 0, 0, idx }
79
80static const struct pci_device_id cxgb3_pci_tbl[] = {
81 CH_DEVICE(0x20, 1, 0), /* PE9000 */
82 CH_DEVICE(0x21, 1, 1), /* T302E */
83 CH_DEVICE(0x22, 1, 2), /* T310E */
84 CH_DEVICE(0x23, 1, 3), /* T320X */
85 CH_DEVICE(0x24, 1, 1), /* T302X */
86 CH_DEVICE(0x25, 1, 3), /* T320E */
87 CH_DEVICE(0x26, 1, 2), /* T310X */
88 CH_DEVICE(0x30, 1, 2), /* T3B10 */
89 CH_DEVICE(0x31, 1, 3), /* T3B20 */
90 CH_DEVICE(0x32, 1, 1), /* T3B02 */
91 {0,}
92};
93
94MODULE_DESCRIPTION(DRV_DESC);
95MODULE_AUTHOR("Chelsio Communications");
1d68e93d 96MODULE_LICENSE("Dual BSD/GPL");
4d22de3e
DLR
97MODULE_VERSION(DRV_VERSION);
98MODULE_DEVICE_TABLE(pci, cxgb3_pci_tbl);
99
100static int dflt_msg_enable = DFLT_MSG_ENABLE;
101
102module_param(dflt_msg_enable, int, 0644);
103MODULE_PARM_DESC(dflt_msg_enable, "Chelsio T3 default message enable bitmap");
104
105/*
106 * The driver uses the best interrupt scheme available on a platform in the
107 * order MSI-X, MSI, legacy pin interrupts. This parameter determines which
108 * of these schemes the driver may consider as follows:
109 *
110 * msi = 2: choose from among all three options
111 * msi = 1: only consider MSI and pin interrupts
112 * msi = 0: force pin interrupts
113 */
114static int msi = 2;
115
116module_param(msi, int, 0644);
117MODULE_PARM_DESC(msi, "whether to use MSI or MSI-X");
118
119/*
120 * The driver enables offload as a default.
121 * To disable it, use ofld_disable = 1.
122 */
123
124static int ofld_disable = 0;
125
126module_param(ofld_disable, int, 0644);
127MODULE_PARM_DESC(ofld_disable, "whether to enable offload at init time or not");
128
129/*
130 * We have work elements that we need to cancel when an interface is taken
131 * down. Normally the work elements would be executed by keventd but that
132 * can deadlock because of linkwatch. If our close method takes the rtnl
133 * lock and linkwatch is ahead of our work elements in keventd, linkwatch
134 * will block keventd as it needs the rtnl lock, and we'll deadlock waiting
135 * for our work to complete. Get our own work queue to solve this.
136 */
137static struct workqueue_struct *cxgb3_wq;
138
139/**
140 * link_report - show link status and link speed/duplex
141 * @p: the port whose settings are to be reported
142 *
143 * Shows the link status, speed, and duplex of a port.
144 */
145static void link_report(struct net_device *dev)
146{
147 if (!netif_carrier_ok(dev))
148 printk(KERN_INFO "%s: link down\n", dev->name);
149 else {
150 const char *s = "10Mbps";
151 const struct port_info *p = netdev_priv(dev);
152
153 switch (p->link_config.speed) {
154 case SPEED_10000:
155 s = "10Gbps";
156 break;
157 case SPEED_1000:
158 s = "1000Mbps";
159 break;
160 case SPEED_100:
161 s = "100Mbps";
162 break;
163 }
164
165 printk(KERN_INFO "%s: link up, %s, %s-duplex\n", dev->name, s,
166 p->link_config.duplex == DUPLEX_FULL ? "full" : "half");
167 }
168}
169
170/**
171 * t3_os_link_changed - handle link status changes
172 * @adapter: the adapter associated with the link change
173 * @port_id: the port index whose limk status has changed
174 * @link_stat: the new status of the link
175 * @speed: the new speed setting
176 * @duplex: the new duplex setting
177 * @pause: the new flow-control setting
178 *
179 * This is the OS-dependent handler for link status changes. The OS
180 * neutral handler takes care of most of the processing for these events,
181 * then calls this handler for any OS-specific processing.
182 */
183void t3_os_link_changed(struct adapter *adapter, int port_id, int link_stat,
184 int speed, int duplex, int pause)
185{
186 struct net_device *dev = adapter->port[port_id];
187
188 /* Skip changes from disabled ports. */
189 if (!netif_running(dev))
190 return;
191
192 if (link_stat != netif_carrier_ok(dev)) {
193 if (link_stat)
194 netif_carrier_on(dev);
195 else
196 netif_carrier_off(dev);
197 link_report(dev);
198 }
199}
200
201static void cxgb_set_rxmode(struct net_device *dev)
202{
203 struct t3_rx_mode rm;
204 struct port_info *pi = netdev_priv(dev);
205
206 init_rx_mode(&rm, dev, dev->mc_list);
207 t3_mac_set_rx_mode(&pi->mac, &rm);
208}
209
210/**
211 * link_start - enable a port
212 * @dev: the device to enable
213 *
214 * Performs the MAC and PHY actions needed to enable a port.
215 */
216static void link_start(struct net_device *dev)
217{
218 struct t3_rx_mode rm;
219 struct port_info *pi = netdev_priv(dev);
220 struct cmac *mac = &pi->mac;
221
222 init_rx_mode(&rm, dev, dev->mc_list);
223 t3_mac_reset(mac);
224 t3_mac_set_mtu(mac, dev->mtu);
225 t3_mac_set_address(mac, 0, dev->dev_addr);
226 t3_mac_set_rx_mode(mac, &rm);
227 t3_link_start(&pi->phy, mac, &pi->link_config);
228 t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
229}
230
231static inline void cxgb_disable_msi(struct adapter *adapter)
232{
233 if (adapter->flags & USING_MSIX) {
234 pci_disable_msix(adapter->pdev);
235 adapter->flags &= ~USING_MSIX;
236 } else if (adapter->flags & USING_MSI) {
237 pci_disable_msi(adapter->pdev);
238 adapter->flags &= ~USING_MSI;
239 }
240}
241
242/*
243 * Interrupt handler for asynchronous events used with MSI-X.
244 */
245static irqreturn_t t3_async_intr_handler(int irq, void *cookie)
246{
247 t3_slow_intr_handler(cookie);
248 return IRQ_HANDLED;
249}
250
251/*
252 * Name the MSI-X interrupts.
253 */
254static void name_msix_vecs(struct adapter *adap)
255{
256 int i, j, msi_idx = 1, n = sizeof(adap->msix_info[0].desc) - 1;
257
258 snprintf(adap->msix_info[0].desc, n, "%s", adap->name);
259 adap->msix_info[0].desc[n] = 0;
260
261 for_each_port(adap, j) {
262 struct net_device *d = adap->port[j];
263 const struct port_info *pi = netdev_priv(d);
264
265 for (i = 0; i < pi->nqsets; i++, msi_idx++) {
266 snprintf(adap->msix_info[msi_idx].desc, n,
267 "%s (queue %d)", d->name, i);
268 adap->msix_info[msi_idx].desc[n] = 0;
269 }
270 }
271}
272
273static int request_msix_data_irqs(struct adapter *adap)
274{
275 int i, j, err, qidx = 0;
276
277 for_each_port(adap, i) {
278 int nqsets = adap2pinfo(adap, i)->nqsets;
279
280 for (j = 0; j < nqsets; ++j) {
281 err = request_irq(adap->msix_info[qidx + 1].vec,
282 t3_intr_handler(adap,
283 adap->sge.qs[qidx].
284 rspq.polling), 0,
285 adap->msix_info[qidx + 1].desc,
286 &adap->sge.qs[qidx]);
287 if (err) {
288 while (--qidx >= 0)
289 free_irq(adap->msix_info[qidx + 1].vec,
290 &adap->sge.qs[qidx]);
291 return err;
292 }
293 qidx++;
294 }
295 }
296 return 0;
297}
298
299/**
300 * setup_rss - configure RSS
301 * @adap: the adapter
302 *
303 * Sets up RSS to distribute packets to multiple receive queues. We
304 * configure the RSS CPU lookup table to distribute to the number of HW
305 * receive queues, and the response queue lookup table to narrow that
306 * down to the response queues actually configured for each port.
307 * We always configure the RSS mapping for two ports since the mapping
308 * table has plenty of entries.
309 */
310static void setup_rss(struct adapter *adap)
311{
312 int i;
313 unsigned int nq0 = adap2pinfo(adap, 0)->nqsets;
314 unsigned int nq1 = adap->port[1] ? adap2pinfo(adap, 1)->nqsets : 1;
315 u8 cpus[SGE_QSETS + 1];
316 u16 rspq_map[RSS_TABLE_SIZE];
317
318 for (i = 0; i < SGE_QSETS; ++i)
319 cpus[i] = i;
320 cpus[SGE_QSETS] = 0xff; /* terminator */
321
322 for (i = 0; i < RSS_TABLE_SIZE / 2; ++i) {
323 rspq_map[i] = i % nq0;
324 rspq_map[i + RSS_TABLE_SIZE / 2] = (i % nq1) + nq0;
325 }
326
327 t3_config_rss(adap, F_RQFEEDBACKENABLE | F_TNLLKPEN | F_TNLMAPEN |
328 F_TNLPRTEN | F_TNL2TUPEN | F_TNL4TUPEN |
329 V_RRCPLCPUSIZE(6), cpus, rspq_map);
330}
331
332/*
333 * If we have multiple receive queues per port serviced by NAPI we need one
334 * netdevice per queue as NAPI operates on netdevices. We already have one
335 * netdevice, namely the one associated with the interface, so we use dummy
336 * ones for any additional queues. Note that these netdevices exist purely
337 * so that NAPI has something to work with, they do not represent network
338 * ports and are not registered.
339 */
340static int init_dummy_netdevs(struct adapter *adap)
341{
342 int i, j, dummy_idx = 0;
343 struct net_device *nd;
344
345 for_each_port(adap, i) {
346 struct net_device *dev = adap->port[i];
347 const struct port_info *pi = netdev_priv(dev);
348
349 for (j = 0; j < pi->nqsets - 1; j++) {
350 if (!adap->dummy_netdev[dummy_idx]) {
351 nd = alloc_netdev(0, "", ether_setup);
352 if (!nd)
353 goto free_all;
354
355 nd->priv = adap;
356 nd->weight = 64;
357 set_bit(__LINK_STATE_START, &nd->state);
358 adap->dummy_netdev[dummy_idx] = nd;
359 }
360 strcpy(adap->dummy_netdev[dummy_idx]->name, dev->name);
361 dummy_idx++;
362 }
363 }
364 return 0;
365
366free_all:
367 while (--dummy_idx >= 0) {
368 free_netdev(adap->dummy_netdev[dummy_idx]);
369 adap->dummy_netdev[dummy_idx] = NULL;
370 }
371 return -ENOMEM;
372}
373
374/*
375 * Wait until all NAPI handlers are descheduled. This includes the handlers of
376 * both netdevices representing interfaces and the dummy ones for the extra
377 * queues.
378 */
379static void quiesce_rx(struct adapter *adap)
380{
381 int i;
382 struct net_device *dev;
383
384 for_each_port(adap, i) {
385 dev = adap->port[i];
386 while (test_bit(__LINK_STATE_RX_SCHED, &dev->state))
387 msleep(1);
388 }
389
390 for (i = 0; i < ARRAY_SIZE(adap->dummy_netdev); i++) {
391 dev = adap->dummy_netdev[i];
392 if (dev)
393 while (test_bit(__LINK_STATE_RX_SCHED, &dev->state))
394 msleep(1);
395 }
396}
397
398/**
399 * setup_sge_qsets - configure SGE Tx/Rx/response queues
400 * @adap: the adapter
401 *
402 * Determines how many sets of SGE queues to use and initializes them.
403 * We support multiple queue sets per port if we have MSI-X, otherwise
404 * just one queue set per port.
405 */
406static int setup_sge_qsets(struct adapter *adap)
407{
408 int i, j, err, irq_idx = 0, qset_idx = 0, dummy_dev_idx = 0;
409 unsigned int ntxq = is_offload(adap) ? SGE_TXQ_PER_SET : 1;
410
411 if (adap->params.rev > 0 && !(adap->flags & USING_MSI))
412 irq_idx = -1;
413
414 for_each_port(adap, i) {
415 struct net_device *dev = adap->port[i];
416 const struct port_info *pi = netdev_priv(dev);
417
418 for (j = 0; j < pi->nqsets; ++j, ++qset_idx) {
419 err = t3_sge_alloc_qset(adap, qset_idx, 1,
420 (adap->flags & USING_MSIX) ? qset_idx + 1 :
421 irq_idx,
422 &adap->params.sge.qset[qset_idx], ntxq,
423 j == 0 ? dev :
424 adap-> dummy_netdev[dummy_dev_idx++]);
425 if (err) {
426 t3_free_sge_resources(adap);
427 return err;
428 }
429 }
430 }
431
432 return 0;
433}
434
0ee8d33c
DLR
435static ssize_t attr_show(struct device *d, struct device_attribute *attr,
436 char *buf,
896392ef 437 ssize_t(*format) (struct net_device *, char *))
4d22de3e
DLR
438{
439 ssize_t len;
4d22de3e
DLR
440
441 /* Synchronize with ioctls that may shut down the device */
442 rtnl_lock();
896392ef 443 len = (*format) (to_net_dev(d), buf);
4d22de3e
DLR
444 rtnl_unlock();
445 return len;
446}
447
0ee8d33c
DLR
448static ssize_t attr_store(struct device *d, struct device_attribute *attr,
449 const char *buf, size_t len,
896392ef 450 ssize_t(*set) (struct net_device *, unsigned int),
4d22de3e
DLR
451 unsigned int min_val, unsigned int max_val)
452{
453 char *endp;
454 ssize_t ret;
455 unsigned int val;
4d22de3e
DLR
456
457 if (!capable(CAP_NET_ADMIN))
458 return -EPERM;
459
460 val = simple_strtoul(buf, &endp, 0);
461 if (endp == buf || val < min_val || val > max_val)
462 return -EINVAL;
463
464 rtnl_lock();
896392ef 465 ret = (*set) (to_net_dev(d), val);
4d22de3e
DLR
466 if (!ret)
467 ret = len;
468 rtnl_unlock();
469 return ret;
470}
471
472#define CXGB3_SHOW(name, val_expr) \
896392ef 473static ssize_t format_##name(struct net_device *dev, char *buf) \
4d22de3e 474{ \
896392ef 475 struct adapter *adap = dev->priv; \
4d22de3e
DLR
476 return sprintf(buf, "%u\n", val_expr); \
477} \
0ee8d33c
DLR
478static ssize_t show_##name(struct device *d, struct device_attribute *attr, \
479 char *buf) \
4d22de3e 480{ \
0ee8d33c 481 return attr_show(d, attr, buf, format_##name); \
4d22de3e
DLR
482}
483
896392ef 484static ssize_t set_nfilters(struct net_device *dev, unsigned int val)
4d22de3e 485{
896392ef
DLR
486 struct adapter *adap = dev->priv;
487
4d22de3e
DLR
488 if (adap->flags & FULL_INIT_DONE)
489 return -EBUSY;
490 if (val && adap->params.rev == 0)
491 return -EINVAL;
492 if (val > t3_mc5_size(&adap->mc5) - adap->params.mc5.nservers)
493 return -EINVAL;
494 adap->params.mc5.nfilters = val;
495 return 0;
496}
497
0ee8d33c
DLR
498static ssize_t store_nfilters(struct device *d, struct device_attribute *attr,
499 const char *buf, size_t len)
4d22de3e 500{
0ee8d33c 501 return attr_store(d, attr, buf, len, set_nfilters, 0, ~0);
4d22de3e
DLR
502}
503
896392ef 504static ssize_t set_nservers(struct net_device *dev, unsigned int val)
4d22de3e 505{
896392ef
DLR
506 struct adapter *adap = dev->priv;
507
4d22de3e
DLR
508 if (adap->flags & FULL_INIT_DONE)
509 return -EBUSY;
510 if (val > t3_mc5_size(&adap->mc5) - adap->params.mc5.nfilters)
511 return -EINVAL;
512 adap->params.mc5.nservers = val;
513 return 0;
514}
515
0ee8d33c
DLR
516static ssize_t store_nservers(struct device *d, struct device_attribute *attr,
517 const char *buf, size_t len)
4d22de3e 518{
0ee8d33c 519 return attr_store(d, attr, buf, len, set_nservers, 0, ~0);
4d22de3e
DLR
520}
521
522#define CXGB3_ATTR_R(name, val_expr) \
523CXGB3_SHOW(name, val_expr) \
0ee8d33c 524static DEVICE_ATTR(name, S_IRUGO, show_##name, NULL)
4d22de3e
DLR
525
526#define CXGB3_ATTR_RW(name, val_expr, store_method) \
527CXGB3_SHOW(name, val_expr) \
0ee8d33c 528static DEVICE_ATTR(name, S_IRUGO | S_IWUSR, show_##name, store_method)
4d22de3e
DLR
529
530CXGB3_ATTR_R(cam_size, t3_mc5_size(&adap->mc5));
531CXGB3_ATTR_RW(nfilters, adap->params.mc5.nfilters, store_nfilters);
532CXGB3_ATTR_RW(nservers, adap->params.mc5.nservers, store_nservers);
533
534static struct attribute *cxgb3_attrs[] = {
0ee8d33c
DLR
535 &dev_attr_cam_size.attr,
536 &dev_attr_nfilters.attr,
537 &dev_attr_nservers.attr,
4d22de3e
DLR
538 NULL
539};
540
541static struct attribute_group cxgb3_attr_group = {.attrs = cxgb3_attrs };
542
0ee8d33c
DLR
543static ssize_t tm_attr_show(struct device *d, struct device_attribute *attr,
544 char *buf, int sched)
4d22de3e
DLR
545{
546 ssize_t len;
547 unsigned int v, addr, bpt, cpt;
0ee8d33c 548 struct adapter *adap = to_net_dev(d)->priv;
4d22de3e
DLR
549
550 addr = A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2;
551 rtnl_lock();
552 t3_write_reg(adap, A_TP_TM_PIO_ADDR, addr);
553 v = t3_read_reg(adap, A_TP_TM_PIO_DATA);
554 if (sched & 1)
555 v >>= 16;
556 bpt = (v >> 8) & 0xff;
557 cpt = v & 0xff;
558 if (!cpt)
559 len = sprintf(buf, "disabled\n");
560 else {
561 v = (adap->params.vpd.cclk * 1000) / cpt;
562 len = sprintf(buf, "%u Kbps\n", (v * bpt) / 125);
563 }
564 rtnl_unlock();
565 return len;
566}
567
0ee8d33c
DLR
568static ssize_t tm_attr_store(struct device *d, struct device_attribute *attr,
569 const char *buf, size_t len, int sched)
4d22de3e
DLR
570{
571 char *endp;
572 ssize_t ret;
573 unsigned int val;
0ee8d33c 574 struct adapter *adap = to_net_dev(d)->priv;
4d22de3e
DLR
575
576 if (!capable(CAP_NET_ADMIN))
577 return -EPERM;
578
579 val = simple_strtoul(buf, &endp, 0);
580 if (endp == buf || val > 10000000)
581 return -EINVAL;
582
583 rtnl_lock();
584 ret = t3_config_sched(adap, val, sched);
585 if (!ret)
586 ret = len;
587 rtnl_unlock();
588 return ret;
589}
590
591#define TM_ATTR(name, sched) \
0ee8d33c
DLR
592static ssize_t show_##name(struct device *d, struct device_attribute *attr, \
593 char *buf) \
4d22de3e 594{ \
0ee8d33c 595 return tm_attr_show(d, attr, buf, sched); \
4d22de3e 596} \
0ee8d33c
DLR
597static ssize_t store_##name(struct device *d, struct device_attribute *attr, \
598 const char *buf, size_t len) \
4d22de3e 599{ \
0ee8d33c 600 return tm_attr_store(d, attr, buf, len, sched); \
4d22de3e 601} \
0ee8d33c 602static DEVICE_ATTR(name, S_IRUGO | S_IWUSR, show_##name, store_##name)
4d22de3e
DLR
603
604TM_ATTR(sched0, 0);
605TM_ATTR(sched1, 1);
606TM_ATTR(sched2, 2);
607TM_ATTR(sched3, 3);
608TM_ATTR(sched4, 4);
609TM_ATTR(sched5, 5);
610TM_ATTR(sched6, 6);
611TM_ATTR(sched7, 7);
612
613static struct attribute *offload_attrs[] = {
0ee8d33c
DLR
614 &dev_attr_sched0.attr,
615 &dev_attr_sched1.attr,
616 &dev_attr_sched2.attr,
617 &dev_attr_sched3.attr,
618 &dev_attr_sched4.attr,
619 &dev_attr_sched5.attr,
620 &dev_attr_sched6.attr,
621 &dev_attr_sched7.attr,
4d22de3e
DLR
622 NULL
623};
624
625static struct attribute_group offload_attr_group = {.attrs = offload_attrs };
626
627/*
628 * Sends an sk_buff to an offload queue driver
629 * after dealing with any active network taps.
630 */
631static inline int offload_tx(struct t3cdev *tdev, struct sk_buff *skb)
632{
633 int ret;
634
635 local_bh_disable();
636 ret = t3_offload_tx(tdev, skb);
637 local_bh_enable();
638 return ret;
639}
640
641static int write_smt_entry(struct adapter *adapter, int idx)
642{
643 struct cpl_smt_write_req *req;
644 struct sk_buff *skb = alloc_skb(sizeof(*req), GFP_KERNEL);
645
646 if (!skb)
647 return -ENOMEM;
648
649 req = (struct cpl_smt_write_req *)__skb_put(skb, sizeof(*req));
650 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
651 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SMT_WRITE_REQ, idx));
652 req->mtu_idx = NMTUS - 1; /* should be 0 but there's a T3 bug */
653 req->iff = idx;
654 memset(req->src_mac1, 0, sizeof(req->src_mac1));
655 memcpy(req->src_mac0, adapter->port[idx]->dev_addr, ETH_ALEN);
656 skb->priority = 1;
657 offload_tx(&adapter->tdev, skb);
658 return 0;
659}
660
661static int init_smt(struct adapter *adapter)
662{
663 int i;
664
665 for_each_port(adapter, i)
666 write_smt_entry(adapter, i);
667 return 0;
668}
669
670static void init_port_mtus(struct adapter *adapter)
671{
672 unsigned int mtus = adapter->port[0]->mtu;
673
674 if (adapter->port[1])
675 mtus |= adapter->port[1]->mtu << 16;
676 t3_write_reg(adapter, A_TP_MTU_PORT_TABLE, mtus);
677}
678
14ab9892
DLR
679static void send_pktsched_cmd(struct adapter *adap, int sched, int qidx, int lo,
680 int hi, int port)
681{
682 struct sk_buff *skb;
683 struct mngt_pktsched_wr *req;
684
685 skb = alloc_skb(sizeof(*req), GFP_KERNEL | __GFP_NOFAIL);
686 req = (struct mngt_pktsched_wr *)skb_put(skb, sizeof(*req));
687 req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_MNGT));
688 req->mngt_opcode = FW_MNGTOPCODE_PKTSCHED_SET;
689 req->sched = sched;
690 req->idx = qidx;
691 req->min = lo;
692 req->max = hi;
693 req->binding = port;
694 t3_mgmt_tx(adap, skb);
695}
696
697static void bind_qsets(struct adapter *adap)
698{
699 int i, j;
700
701 for_each_port(adap, i) {
702 const struct port_info *pi = adap2pinfo(adap, i);
703
704 for (j = 0; j < pi->nqsets; ++j)
705 send_pktsched_cmd(adap, 1, pi->first_qset + j, -1,
706 -1, i);
707 }
708}
709
4d22de3e
DLR
710/**
711 * cxgb_up - enable the adapter
712 * @adapter: adapter being enabled
713 *
714 * Called when the first port is enabled, this function performs the
715 * actions necessary to make an adapter operational, such as completing
716 * the initialization of HW modules, and enabling interrupts.
717 *
718 * Must be called with the rtnl lock held.
719 */
720static int cxgb_up(struct adapter *adap)
721{
722 int err = 0;
723
724 if (!(adap->flags & FULL_INIT_DONE)) {
725 err = t3_check_fw_version(adap);
4aac3899 726 if (err)
4d22de3e 727 goto out;
4d22de3e
DLR
728
729 err = init_dummy_netdevs(adap);
730 if (err)
731 goto out;
732
733 err = t3_init_hw(adap, 0);
734 if (err)
735 goto out;
736
737 err = setup_sge_qsets(adap);
738 if (err)
739 goto out;
740
741 setup_rss(adap);
742 adap->flags |= FULL_INIT_DONE;
743 }
744
745 t3_intr_clear(adap);
746
747 if (adap->flags & USING_MSIX) {
748 name_msix_vecs(adap);
749 err = request_irq(adap->msix_info[0].vec,
750 t3_async_intr_handler, 0,
751 adap->msix_info[0].desc, adap);
752 if (err)
753 goto irq_err;
754
755 if (request_msix_data_irqs(adap)) {
756 free_irq(adap->msix_info[0].vec, adap);
757 goto irq_err;
758 }
759 } else if ((err = request_irq(adap->pdev->irq,
760 t3_intr_handler(adap,
761 adap->sge.qs[0].rspq.
762 polling),
2db6346f
TG
763 (adap->flags & USING_MSI) ?
764 0 : IRQF_SHARED,
4d22de3e
DLR
765 adap->name, adap)))
766 goto irq_err;
767
768 t3_sge_start(adap);
769 t3_intr_enable(adap);
14ab9892
DLR
770
771 if ((adap->flags & (USING_MSIX | QUEUES_BOUND)) == USING_MSIX)
772 bind_qsets(adap);
773 adap->flags |= QUEUES_BOUND;
774
4d22de3e
DLR
775out:
776 return err;
777irq_err:
778 CH_ERR(adap, "request_irq failed, err %d\n", err);
779 goto out;
780}
781
782/*
783 * Release resources when all the ports and offloading have been stopped.
784 */
785static void cxgb_down(struct adapter *adapter)
786{
787 t3_sge_stop(adapter);
788 spin_lock_irq(&adapter->work_lock); /* sync with PHY intr task */
789 t3_intr_disable(adapter);
790 spin_unlock_irq(&adapter->work_lock);
791
792 if (adapter->flags & USING_MSIX) {
793 int i, n = 0;
794
795 free_irq(adapter->msix_info[0].vec, adapter);
796 for_each_port(adapter, i)
797 n += adap2pinfo(adapter, i)->nqsets;
798
799 for (i = 0; i < n; ++i)
800 free_irq(adapter->msix_info[i + 1].vec,
801 &adapter->sge.qs[i]);
802 } else
803 free_irq(adapter->pdev->irq, adapter);
804
805 flush_workqueue(cxgb3_wq); /* wait for external IRQ handler */
806 quiesce_rx(adapter);
807}
808
809static void schedule_chk_task(struct adapter *adap)
810{
811 unsigned int timeo;
812
813 timeo = adap->params.linkpoll_period ?
814 (HZ * adap->params.linkpoll_period) / 10 :
815 adap->params.stats_update_period * HZ;
816 if (timeo)
817 queue_delayed_work(cxgb3_wq, &adap->adap_check_task, timeo);
818}
819
820static int offload_open(struct net_device *dev)
821{
822 struct adapter *adapter = dev->priv;
823 struct t3cdev *tdev = T3CDEV(dev);
824 int adap_up = adapter->open_device_map & PORT_MASK;
825 int err = 0;
826
827 if (test_and_set_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map))
828 return 0;
829
830 if (!adap_up && (err = cxgb_up(adapter)) < 0)
831 return err;
832
833 t3_tp_set_offload_mode(adapter, 1);
834 tdev->lldev = adapter->port[0];
835 err = cxgb3_offload_activate(adapter);
836 if (err)
837 goto out;
838
839 init_port_mtus(adapter);
840 t3_load_mtus(adapter, adapter->params.mtus, adapter->params.a_wnd,
841 adapter->params.b_wnd,
842 adapter->params.rev == 0 ?
843 adapter->port[0]->mtu : 0xffff);
844 init_smt(adapter);
845
846 /* Never mind if the next step fails */
0ee8d33c 847 sysfs_create_group(&tdev->lldev->dev.kobj, &offload_attr_group);
4d22de3e
DLR
848
849 /* Call back all registered clients */
850 cxgb3_add_clients(tdev);
851
852out:
853 /* restore them in case the offload module has changed them */
854 if (err) {
855 t3_tp_set_offload_mode(adapter, 0);
856 clear_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map);
857 cxgb3_set_dummy_ops(tdev);
858 }
859 return err;
860}
861
862static int offload_close(struct t3cdev *tdev)
863{
864 struct adapter *adapter = tdev2adap(tdev);
865
866 if (!test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map))
867 return 0;
868
869 /* Call back all registered clients */
870 cxgb3_remove_clients(tdev);
871
0ee8d33c 872 sysfs_remove_group(&tdev->lldev->dev.kobj, &offload_attr_group);
4d22de3e
DLR
873
874 tdev->lldev = NULL;
875 cxgb3_set_dummy_ops(tdev);
876 t3_tp_set_offload_mode(adapter, 0);
877 clear_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map);
878
879 if (!adapter->open_device_map)
880 cxgb_down(adapter);
881
882 cxgb3_offload_deactivate(adapter);
883 return 0;
884}
885
886static int cxgb_open(struct net_device *dev)
887{
888 int err;
889 struct adapter *adapter = dev->priv;
890 struct port_info *pi = netdev_priv(dev);
891 int other_ports = adapter->open_device_map & PORT_MASK;
892
893 if (!adapter->open_device_map && (err = cxgb_up(adapter)) < 0)
894 return err;
895
896 set_bit(pi->port_id, &adapter->open_device_map);
897 if (!ofld_disable) {
898 err = offload_open(dev);
899 if (err)
900 printk(KERN_WARNING
901 "Could not initialize offload capabilities\n");
902 }
903
904 link_start(dev);
905 t3_port_intr_enable(adapter, pi->port_id);
906 netif_start_queue(dev);
907 if (!other_ports)
908 schedule_chk_task(adapter);
909
910 return 0;
911}
912
913static int cxgb_close(struct net_device *dev)
914{
915 struct adapter *adapter = dev->priv;
916 struct port_info *p = netdev_priv(dev);
917
918 t3_port_intr_disable(adapter, p->port_id);
919 netif_stop_queue(dev);
920 p->phy.ops->power_down(&p->phy, 1);
921 netif_carrier_off(dev);
922 t3_mac_disable(&p->mac, MAC_DIRECTION_TX | MAC_DIRECTION_RX);
923
924 spin_lock(&adapter->work_lock); /* sync with update task */
925 clear_bit(p->port_id, &adapter->open_device_map);
926 spin_unlock(&adapter->work_lock);
927
928 if (!(adapter->open_device_map & PORT_MASK))
929 cancel_rearming_delayed_workqueue(cxgb3_wq,
930 &adapter->adap_check_task);
931
932 if (!adapter->open_device_map)
933 cxgb_down(adapter);
934
935 return 0;
936}
937
938static struct net_device_stats *cxgb_get_stats(struct net_device *dev)
939{
940 struct adapter *adapter = dev->priv;
941 struct port_info *p = netdev_priv(dev);
942 struct net_device_stats *ns = &p->netstats;
943 const struct mac_stats *pstats;
944
945 spin_lock(&adapter->stats_lock);
946 pstats = t3_mac_update_stats(&p->mac);
947 spin_unlock(&adapter->stats_lock);
948
949 ns->tx_bytes = pstats->tx_octets;
950 ns->tx_packets = pstats->tx_frames;
951 ns->rx_bytes = pstats->rx_octets;
952 ns->rx_packets = pstats->rx_frames;
953 ns->multicast = pstats->rx_mcast_frames;
954
955 ns->tx_errors = pstats->tx_underrun;
956 ns->rx_errors = pstats->rx_symbol_errs + pstats->rx_fcs_errs +
957 pstats->rx_too_long + pstats->rx_jabber + pstats->rx_short +
958 pstats->rx_fifo_ovfl;
959
960 /* detailed rx_errors */
961 ns->rx_length_errors = pstats->rx_jabber + pstats->rx_too_long;
962 ns->rx_over_errors = 0;
963 ns->rx_crc_errors = pstats->rx_fcs_errs;
964 ns->rx_frame_errors = pstats->rx_symbol_errs;
965 ns->rx_fifo_errors = pstats->rx_fifo_ovfl;
966 ns->rx_missed_errors = pstats->rx_cong_drops;
967
968 /* detailed tx_errors */
969 ns->tx_aborted_errors = 0;
970 ns->tx_carrier_errors = 0;
971 ns->tx_fifo_errors = pstats->tx_underrun;
972 ns->tx_heartbeat_errors = 0;
973 ns->tx_window_errors = 0;
974 return ns;
975}
976
977static u32 get_msglevel(struct net_device *dev)
978{
979 struct adapter *adapter = dev->priv;
980
981 return adapter->msg_enable;
982}
983
984static void set_msglevel(struct net_device *dev, u32 val)
985{
986 struct adapter *adapter = dev->priv;
987
988 adapter->msg_enable = val;
989}
990
991static char stats_strings[][ETH_GSTRING_LEN] = {
992 "TxOctetsOK ",
993 "TxFramesOK ",
994 "TxMulticastFramesOK",
995 "TxBroadcastFramesOK",
996 "TxPauseFrames ",
997 "TxUnderrun ",
998 "TxExtUnderrun ",
999
1000 "TxFrames64 ",
1001 "TxFrames65To127 ",
1002 "TxFrames128To255 ",
1003 "TxFrames256To511 ",
1004 "TxFrames512To1023 ",
1005 "TxFrames1024To1518 ",
1006 "TxFrames1519ToMax ",
1007
1008 "RxOctetsOK ",
1009 "RxFramesOK ",
1010 "RxMulticastFramesOK",
1011 "RxBroadcastFramesOK",
1012 "RxPauseFrames ",
1013 "RxFCSErrors ",
1014 "RxSymbolErrors ",
1015 "RxShortErrors ",
1016 "RxJabberErrors ",
1017 "RxLengthErrors ",
1018 "RxFIFOoverflow ",
1019
1020 "RxFrames64 ",
1021 "RxFrames65To127 ",
1022 "RxFrames128To255 ",
1023 "RxFrames256To511 ",
1024 "RxFrames512To1023 ",
1025 "RxFrames1024To1518 ",
1026 "RxFrames1519ToMax ",
1027
1028 "PhyFIFOErrors ",
1029 "TSO ",
1030 "VLANextractions ",
1031 "VLANinsertions ",
1032 "TxCsumOffload ",
1033 "RxCsumGood ",
1034 "RxDrops "
1035};
1036
1037static int get_stats_count(struct net_device *dev)
1038{
1039 return ARRAY_SIZE(stats_strings);
1040}
1041
1042#define T3_REGMAP_SIZE (3 * 1024)
1043
1044static int get_regs_len(struct net_device *dev)
1045{
1046 return T3_REGMAP_SIZE;
1047}
1048
1049static int get_eeprom_len(struct net_device *dev)
1050{
1051 return EEPROMSIZE;
1052}
1053
1054static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1055{
1056 u32 fw_vers = 0;
1057 struct adapter *adapter = dev->priv;
1058
1059 t3_get_fw_version(adapter, &fw_vers);
1060
1061 strcpy(info->driver, DRV_NAME);
1062 strcpy(info->version, DRV_VERSION);
1063 strcpy(info->bus_info, pci_name(adapter->pdev));
1064 if (!fw_vers)
1065 strcpy(info->fw_version, "N/A");
4aac3899 1066 else {
4d22de3e 1067 snprintf(info->fw_version, sizeof(info->fw_version),
4aac3899
DLR
1068 "%s %u.%u.%u",
1069 G_FW_VERSION_TYPE(fw_vers) ? "T" : "N",
1070 G_FW_VERSION_MAJOR(fw_vers),
1071 G_FW_VERSION_MINOR(fw_vers),
1072 G_FW_VERSION_MICRO(fw_vers));
1073 }
4d22de3e
DLR
1074}
1075
1076static void get_strings(struct net_device *dev, u32 stringset, u8 * data)
1077{
1078 if (stringset == ETH_SS_STATS)
1079 memcpy(data, stats_strings, sizeof(stats_strings));
1080}
1081
1082static unsigned long collect_sge_port_stats(struct adapter *adapter,
1083 struct port_info *p, int idx)
1084{
1085 int i;
1086 unsigned long tot = 0;
1087
1088 for (i = 0; i < p->nqsets; ++i)
1089 tot += adapter->sge.qs[i + p->first_qset].port_stats[idx];
1090 return tot;
1091}
1092
1093static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
1094 u64 *data)
1095{
1096 struct adapter *adapter = dev->priv;
1097 struct port_info *pi = netdev_priv(dev);
1098 const struct mac_stats *s;
1099
1100 spin_lock(&adapter->stats_lock);
1101 s = t3_mac_update_stats(&pi->mac);
1102 spin_unlock(&adapter->stats_lock);
1103
1104 *data++ = s->tx_octets;
1105 *data++ = s->tx_frames;
1106 *data++ = s->tx_mcast_frames;
1107 *data++ = s->tx_bcast_frames;
1108 *data++ = s->tx_pause;
1109 *data++ = s->tx_underrun;
1110 *data++ = s->tx_fifo_urun;
1111
1112 *data++ = s->tx_frames_64;
1113 *data++ = s->tx_frames_65_127;
1114 *data++ = s->tx_frames_128_255;
1115 *data++ = s->tx_frames_256_511;
1116 *data++ = s->tx_frames_512_1023;
1117 *data++ = s->tx_frames_1024_1518;
1118 *data++ = s->tx_frames_1519_max;
1119
1120 *data++ = s->rx_octets;
1121 *data++ = s->rx_frames;
1122 *data++ = s->rx_mcast_frames;
1123 *data++ = s->rx_bcast_frames;
1124 *data++ = s->rx_pause;
1125 *data++ = s->rx_fcs_errs;
1126 *data++ = s->rx_symbol_errs;
1127 *data++ = s->rx_short;
1128 *data++ = s->rx_jabber;
1129 *data++ = s->rx_too_long;
1130 *data++ = s->rx_fifo_ovfl;
1131
1132 *data++ = s->rx_frames_64;
1133 *data++ = s->rx_frames_65_127;
1134 *data++ = s->rx_frames_128_255;
1135 *data++ = s->rx_frames_256_511;
1136 *data++ = s->rx_frames_512_1023;
1137 *data++ = s->rx_frames_1024_1518;
1138 *data++ = s->rx_frames_1519_max;
1139
1140 *data++ = pi->phy.fifo_errors;
1141
1142 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_TSO);
1143 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_VLANEX);
1144 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_VLANINS);
1145 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_TX_CSUM);
1146 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_RX_CSUM_GOOD);
1147 *data++ = s->rx_cong_drops;
1148}
1149
1150static inline void reg_block_dump(struct adapter *ap, void *buf,
1151 unsigned int start, unsigned int end)
1152{
1153 u32 *p = buf + start;
1154
1155 for (; start <= end; start += sizeof(u32))
1156 *p++ = t3_read_reg(ap, start);
1157}
1158
1159static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
1160 void *buf)
1161{
1162 struct adapter *ap = dev->priv;
1163
1164 /*
1165 * Version scheme:
1166 * bits 0..9: chip version
1167 * bits 10..15: chip revision
1168 * bit 31: set for PCIe cards
1169 */
1170 regs->version = 3 | (ap->params.rev << 10) | (is_pcie(ap) << 31);
1171
1172 /*
1173 * We skip the MAC statistics registers because they are clear-on-read.
1174 * Also reading multi-register stats would need to synchronize with the
1175 * periodic mac stats accumulation. Hard to justify the complexity.
1176 */
1177 memset(buf, 0, T3_REGMAP_SIZE);
1178 reg_block_dump(ap, buf, 0, A_SG_RSPQ_CREDIT_RETURN);
1179 reg_block_dump(ap, buf, A_SG_HI_DRB_HI_THRSH, A_ULPRX_PBL_ULIMIT);
1180 reg_block_dump(ap, buf, A_ULPTX_CONFIG, A_MPS_INT_CAUSE);
1181 reg_block_dump(ap, buf, A_CPL_SWITCH_CNTRL, A_CPL_MAP_TBL_DATA);
1182 reg_block_dump(ap, buf, A_SMB_GLOBAL_TIME_CFG, A_XGM_SERDES_STAT3);
1183 reg_block_dump(ap, buf, A_XGM_SERDES_STATUS0,
1184 XGM_REG(A_XGM_SERDES_STAT3, 1));
1185 reg_block_dump(ap, buf, XGM_REG(A_XGM_SERDES_STATUS0, 1),
1186 XGM_REG(A_XGM_RX_SPI4_SOP_EOP_CNT, 1));
1187}
1188
1189static int restart_autoneg(struct net_device *dev)
1190{
1191 struct port_info *p = netdev_priv(dev);
1192
1193 if (!netif_running(dev))
1194 return -EAGAIN;
1195 if (p->link_config.autoneg != AUTONEG_ENABLE)
1196 return -EINVAL;
1197 p->phy.ops->autoneg_restart(&p->phy);
1198 return 0;
1199}
1200
1201static int cxgb3_phys_id(struct net_device *dev, u32 data)
1202{
1203 int i;
1204 struct adapter *adapter = dev->priv;
1205
1206 if (data == 0)
1207 data = 2;
1208
1209 for (i = 0; i < data * 2; i++) {
1210 t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
1211 (i & 1) ? F_GPIO0_OUT_VAL : 0);
1212 if (msleep_interruptible(500))
1213 break;
1214 }
1215 t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
1216 F_GPIO0_OUT_VAL);
1217 return 0;
1218}
1219
1220static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1221{
1222 struct port_info *p = netdev_priv(dev);
1223
1224 cmd->supported = p->link_config.supported;
1225 cmd->advertising = p->link_config.advertising;
1226
1227 if (netif_carrier_ok(dev)) {
1228 cmd->speed = p->link_config.speed;
1229 cmd->duplex = p->link_config.duplex;
1230 } else {
1231 cmd->speed = -1;
1232 cmd->duplex = -1;
1233 }
1234
1235 cmd->port = (cmd->supported & SUPPORTED_TP) ? PORT_TP : PORT_FIBRE;
1236 cmd->phy_address = p->phy.addr;
1237 cmd->transceiver = XCVR_EXTERNAL;
1238 cmd->autoneg = p->link_config.autoneg;
1239 cmd->maxtxpkt = 0;
1240 cmd->maxrxpkt = 0;
1241 return 0;
1242}
1243
1244static int speed_duplex_to_caps(int speed, int duplex)
1245{
1246 int cap = 0;
1247
1248 switch (speed) {
1249 case SPEED_10:
1250 if (duplex == DUPLEX_FULL)
1251 cap = SUPPORTED_10baseT_Full;
1252 else
1253 cap = SUPPORTED_10baseT_Half;
1254 break;
1255 case SPEED_100:
1256 if (duplex == DUPLEX_FULL)
1257 cap = SUPPORTED_100baseT_Full;
1258 else
1259 cap = SUPPORTED_100baseT_Half;
1260 break;
1261 case SPEED_1000:
1262 if (duplex == DUPLEX_FULL)
1263 cap = SUPPORTED_1000baseT_Full;
1264 else
1265 cap = SUPPORTED_1000baseT_Half;
1266 break;
1267 case SPEED_10000:
1268 if (duplex == DUPLEX_FULL)
1269 cap = SUPPORTED_10000baseT_Full;
1270 }
1271 return cap;
1272}
1273
1274#define ADVERTISED_MASK (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
1275 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
1276 ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full | \
1277 ADVERTISED_10000baseT_Full)
1278
1279static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1280{
1281 struct port_info *p = netdev_priv(dev);
1282 struct link_config *lc = &p->link_config;
1283
1284 if (!(lc->supported & SUPPORTED_Autoneg))
1285 return -EOPNOTSUPP; /* can't change speed/duplex */
1286
1287 if (cmd->autoneg == AUTONEG_DISABLE) {
1288 int cap = speed_duplex_to_caps(cmd->speed, cmd->duplex);
1289
1290 if (!(lc->supported & cap) || cmd->speed == SPEED_1000)
1291 return -EINVAL;
1292 lc->requested_speed = cmd->speed;
1293 lc->requested_duplex = cmd->duplex;
1294 lc->advertising = 0;
1295 } else {
1296 cmd->advertising &= ADVERTISED_MASK;
1297 cmd->advertising &= lc->supported;
1298 if (!cmd->advertising)
1299 return -EINVAL;
1300 lc->requested_speed = SPEED_INVALID;
1301 lc->requested_duplex = DUPLEX_INVALID;
1302 lc->advertising = cmd->advertising | ADVERTISED_Autoneg;
1303 }
1304 lc->autoneg = cmd->autoneg;
1305 if (netif_running(dev))
1306 t3_link_start(&p->phy, &p->mac, lc);
1307 return 0;
1308}
1309
1310static void get_pauseparam(struct net_device *dev,
1311 struct ethtool_pauseparam *epause)
1312{
1313 struct port_info *p = netdev_priv(dev);
1314
1315 epause->autoneg = (p->link_config.requested_fc & PAUSE_AUTONEG) != 0;
1316 epause->rx_pause = (p->link_config.fc & PAUSE_RX) != 0;
1317 epause->tx_pause = (p->link_config.fc & PAUSE_TX) != 0;
1318}
1319
1320static int set_pauseparam(struct net_device *dev,
1321 struct ethtool_pauseparam *epause)
1322{
1323 struct port_info *p = netdev_priv(dev);
1324 struct link_config *lc = &p->link_config;
1325
1326 if (epause->autoneg == AUTONEG_DISABLE)
1327 lc->requested_fc = 0;
1328 else if (lc->supported & SUPPORTED_Autoneg)
1329 lc->requested_fc = PAUSE_AUTONEG;
1330 else
1331 return -EINVAL;
1332
1333 if (epause->rx_pause)
1334 lc->requested_fc |= PAUSE_RX;
1335 if (epause->tx_pause)
1336 lc->requested_fc |= PAUSE_TX;
1337 if (lc->autoneg == AUTONEG_ENABLE) {
1338 if (netif_running(dev))
1339 t3_link_start(&p->phy, &p->mac, lc);
1340 } else {
1341 lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1342 if (netif_running(dev))
1343 t3_mac_set_speed_duplex_fc(&p->mac, -1, -1, lc->fc);
1344 }
1345 return 0;
1346}
1347
1348static u32 get_rx_csum(struct net_device *dev)
1349{
1350 struct port_info *p = netdev_priv(dev);
1351
1352 return p->rx_csum_offload;
1353}
1354
1355static int set_rx_csum(struct net_device *dev, u32 data)
1356{
1357 struct port_info *p = netdev_priv(dev);
1358
1359 p->rx_csum_offload = data;
1360 return 0;
1361}
1362
1363static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
1364{
1365 struct adapter *adapter = dev->priv;
1366
1367 e->rx_max_pending = MAX_RX_BUFFERS;
1368 e->rx_mini_max_pending = 0;
1369 e->rx_jumbo_max_pending = MAX_RX_JUMBO_BUFFERS;
1370 e->tx_max_pending = MAX_TXQ_ENTRIES;
1371
1372 e->rx_pending = adapter->params.sge.qset[0].fl_size;
1373 e->rx_mini_pending = adapter->params.sge.qset[0].rspq_size;
1374 e->rx_jumbo_pending = adapter->params.sge.qset[0].jumbo_size;
1375 e->tx_pending = adapter->params.sge.qset[0].txq_size[0];
1376}
1377
1378static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
1379{
1380 int i;
1381 struct adapter *adapter = dev->priv;
1382
1383 if (e->rx_pending > MAX_RX_BUFFERS ||
1384 e->rx_jumbo_pending > MAX_RX_JUMBO_BUFFERS ||
1385 e->tx_pending > MAX_TXQ_ENTRIES ||
1386 e->rx_mini_pending > MAX_RSPQ_ENTRIES ||
1387 e->rx_mini_pending < MIN_RSPQ_ENTRIES ||
1388 e->rx_pending < MIN_FL_ENTRIES ||
1389 e->rx_jumbo_pending < MIN_FL_ENTRIES ||
1390 e->tx_pending < adapter->params.nports * MIN_TXQ_ENTRIES)
1391 return -EINVAL;
1392
1393 if (adapter->flags & FULL_INIT_DONE)
1394 return -EBUSY;
1395
1396 for (i = 0; i < SGE_QSETS; ++i) {
1397 struct qset_params *q = &adapter->params.sge.qset[i];
1398
1399 q->rspq_size = e->rx_mini_pending;
1400 q->fl_size = e->rx_pending;
1401 q->jumbo_size = e->rx_jumbo_pending;
1402 q->txq_size[0] = e->tx_pending;
1403 q->txq_size[1] = e->tx_pending;
1404 q->txq_size[2] = e->tx_pending;
1405 }
1406 return 0;
1407}
1408
1409static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
1410{
1411 struct adapter *adapter = dev->priv;
1412 struct qset_params *qsp = &adapter->params.sge.qset[0];
1413 struct sge_qset *qs = &adapter->sge.qs[0];
1414
1415 if (c->rx_coalesce_usecs * 10 > M_NEWTIMER)
1416 return -EINVAL;
1417
1418 qsp->coalesce_usecs = c->rx_coalesce_usecs;
1419 t3_update_qset_coalesce(qs, qsp);
1420 return 0;
1421}
1422
1423static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
1424{
1425 struct adapter *adapter = dev->priv;
1426 struct qset_params *q = adapter->params.sge.qset;
1427
1428 c->rx_coalesce_usecs = q->coalesce_usecs;
1429 return 0;
1430}
1431
1432static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e,
1433 u8 * data)
1434{
1435 int i, err = 0;
1436 struct adapter *adapter = dev->priv;
1437
1438 u8 *buf = kmalloc(EEPROMSIZE, GFP_KERNEL);
1439 if (!buf)
1440 return -ENOMEM;
1441
1442 e->magic = EEPROM_MAGIC;
1443 for (i = e->offset & ~3; !err && i < e->offset + e->len; i += 4)
1444 err = t3_seeprom_read(adapter, i, (u32 *) & buf[i]);
1445
1446 if (!err)
1447 memcpy(data, buf + e->offset, e->len);
1448 kfree(buf);
1449 return err;
1450}
1451
1452static int set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
1453 u8 * data)
1454{
1455 u8 *buf;
1456 int err = 0;
1457 u32 aligned_offset, aligned_len, *p;
1458 struct adapter *adapter = dev->priv;
1459
1460 if (eeprom->magic != EEPROM_MAGIC)
1461 return -EINVAL;
1462
1463 aligned_offset = eeprom->offset & ~3;
1464 aligned_len = (eeprom->len + (eeprom->offset & 3) + 3) & ~3;
1465
1466 if (aligned_offset != eeprom->offset || aligned_len != eeprom->len) {
1467 buf = kmalloc(aligned_len, GFP_KERNEL);
1468 if (!buf)
1469 return -ENOMEM;
1470 err = t3_seeprom_read(adapter, aligned_offset, (u32 *) buf);
1471 if (!err && aligned_len > 4)
1472 err = t3_seeprom_read(adapter,
1473 aligned_offset + aligned_len - 4,
1474 (u32 *) & buf[aligned_len - 4]);
1475 if (err)
1476 goto out;
1477 memcpy(buf + (eeprom->offset & 3), data, eeprom->len);
1478 } else
1479 buf = data;
1480
1481 err = t3_seeprom_wp(adapter, 0);
1482 if (err)
1483 goto out;
1484
1485 for (p = (u32 *) buf; !err && aligned_len; aligned_len -= 4, p++) {
1486 err = t3_seeprom_write(adapter, aligned_offset, *p);
1487 aligned_offset += 4;
1488 }
1489
1490 if (!err)
1491 err = t3_seeprom_wp(adapter, 1);
1492out:
1493 if (buf != data)
1494 kfree(buf);
1495 return err;
1496}
1497
1498static void get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1499{
1500 wol->supported = 0;
1501 wol->wolopts = 0;
1502 memset(&wol->sopass, 0, sizeof(wol->sopass));
1503}
1504
1505static const struct ethtool_ops cxgb_ethtool_ops = {
1506 .get_settings = get_settings,
1507 .set_settings = set_settings,
1508 .get_drvinfo = get_drvinfo,
1509 .get_msglevel = get_msglevel,
1510 .set_msglevel = set_msglevel,
1511 .get_ringparam = get_sge_param,
1512 .set_ringparam = set_sge_param,
1513 .get_coalesce = get_coalesce,
1514 .set_coalesce = set_coalesce,
1515 .get_eeprom_len = get_eeprom_len,
1516 .get_eeprom = get_eeprom,
1517 .set_eeprom = set_eeprom,
1518 .get_pauseparam = get_pauseparam,
1519 .set_pauseparam = set_pauseparam,
1520 .get_rx_csum = get_rx_csum,
1521 .set_rx_csum = set_rx_csum,
1522 .get_tx_csum = ethtool_op_get_tx_csum,
1523 .set_tx_csum = ethtool_op_set_tx_csum,
1524 .get_sg = ethtool_op_get_sg,
1525 .set_sg = ethtool_op_set_sg,
1526 .get_link = ethtool_op_get_link,
1527 .get_strings = get_strings,
1528 .phys_id = cxgb3_phys_id,
1529 .nway_reset = restart_autoneg,
1530 .get_stats_count = get_stats_count,
1531 .get_ethtool_stats = get_stats,
1532 .get_regs_len = get_regs_len,
1533 .get_regs = get_regs,
1534 .get_wol = get_wol,
1535 .get_tso = ethtool_op_get_tso,
1536 .set_tso = ethtool_op_set_tso,
1537 .get_perm_addr = ethtool_op_get_perm_addr
1538};
1539
1540static int in_range(int val, int lo, int hi)
1541{
1542 return val < 0 || (val <= hi && val >= lo);
1543}
1544
1545static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
1546{
1547 int ret;
1548 u32 cmd;
1549 struct adapter *adapter = dev->priv;
1550
1551 if (copy_from_user(&cmd, useraddr, sizeof(cmd)))
1552 return -EFAULT;
1553
1554 switch (cmd) {
1555 case CHELSIO_SETREG:{
1556 struct ch_reg edata;
1557
1558 if (!capable(CAP_NET_ADMIN))
1559 return -EPERM;
1560 if (copy_from_user(&edata, useraddr, sizeof(edata)))
1561 return -EFAULT;
1562 if ((edata.addr & 3) != 0
1563 || edata.addr >= adapter->mmio_len)
1564 return -EINVAL;
1565 writel(edata.val, adapter->regs + edata.addr);
1566 break;
1567 }
1568 case CHELSIO_GETREG:{
1569 struct ch_reg edata;
1570
1571 if (copy_from_user(&edata, useraddr, sizeof(edata)))
1572 return -EFAULT;
1573 if ((edata.addr & 3) != 0
1574 || edata.addr >= adapter->mmio_len)
1575 return -EINVAL;
1576 edata.val = readl(adapter->regs + edata.addr);
1577 if (copy_to_user(useraddr, &edata, sizeof(edata)))
1578 return -EFAULT;
1579 break;
1580 }
1581 case CHELSIO_SET_QSET_PARAMS:{
1582 int i;
1583 struct qset_params *q;
1584 struct ch_qset_params t;
1585
1586 if (!capable(CAP_NET_ADMIN))
1587 return -EPERM;
1588 if (copy_from_user(&t, useraddr, sizeof(t)))
1589 return -EFAULT;
1590 if (t.qset_idx >= SGE_QSETS)
1591 return -EINVAL;
1592 if (!in_range(t.intr_lat, 0, M_NEWTIMER) ||
1593 !in_range(t.cong_thres, 0, 255) ||
1594 !in_range(t.txq_size[0], MIN_TXQ_ENTRIES,
1595 MAX_TXQ_ENTRIES) ||
1596 !in_range(t.txq_size[1], MIN_TXQ_ENTRIES,
1597 MAX_TXQ_ENTRIES) ||
1598 !in_range(t.txq_size[2], MIN_CTRL_TXQ_ENTRIES,
1599 MAX_CTRL_TXQ_ENTRIES) ||
1600 !in_range(t.fl_size[0], MIN_FL_ENTRIES,
1601 MAX_RX_BUFFERS)
1602 || !in_range(t.fl_size[1], MIN_FL_ENTRIES,
1603 MAX_RX_JUMBO_BUFFERS)
1604 || !in_range(t.rspq_size, MIN_RSPQ_ENTRIES,
1605 MAX_RSPQ_ENTRIES))
1606 return -EINVAL;
1607 if ((adapter->flags & FULL_INIT_DONE) &&
1608 (t.rspq_size >= 0 || t.fl_size[0] >= 0 ||
1609 t.fl_size[1] >= 0 || t.txq_size[0] >= 0 ||
1610 t.txq_size[1] >= 0 || t.txq_size[2] >= 0 ||
1611 t.polling >= 0 || t.cong_thres >= 0))
1612 return -EBUSY;
1613
1614 q = &adapter->params.sge.qset[t.qset_idx];
1615
1616 if (t.rspq_size >= 0)
1617 q->rspq_size = t.rspq_size;
1618 if (t.fl_size[0] >= 0)
1619 q->fl_size = t.fl_size[0];
1620 if (t.fl_size[1] >= 0)
1621 q->jumbo_size = t.fl_size[1];
1622 if (t.txq_size[0] >= 0)
1623 q->txq_size[0] = t.txq_size[0];
1624 if (t.txq_size[1] >= 0)
1625 q->txq_size[1] = t.txq_size[1];
1626 if (t.txq_size[2] >= 0)
1627 q->txq_size[2] = t.txq_size[2];
1628 if (t.cong_thres >= 0)
1629 q->cong_thres = t.cong_thres;
1630 if (t.intr_lat >= 0) {
1631 struct sge_qset *qs =
1632 &adapter->sge.qs[t.qset_idx];
1633
1634 q->coalesce_usecs = t.intr_lat;
1635 t3_update_qset_coalesce(qs, q);
1636 }
1637 if (t.polling >= 0) {
1638 if (adapter->flags & USING_MSIX)
1639 q->polling = t.polling;
1640 else {
1641 /* No polling with INTx for T3A */
1642 if (adapter->params.rev == 0 &&
1643 !(adapter->flags & USING_MSI))
1644 t.polling = 0;
1645
1646 for (i = 0; i < SGE_QSETS; i++) {
1647 q = &adapter->params.sge.
1648 qset[i];
1649 q->polling = t.polling;
1650 }
1651 }
1652 }
1653 break;
1654 }
1655 case CHELSIO_GET_QSET_PARAMS:{
1656 struct qset_params *q;
1657 struct ch_qset_params t;
1658
1659 if (copy_from_user(&t, useraddr, sizeof(t)))
1660 return -EFAULT;
1661 if (t.qset_idx >= SGE_QSETS)
1662 return -EINVAL;
1663
1664 q = &adapter->params.sge.qset[t.qset_idx];
1665 t.rspq_size = q->rspq_size;
1666 t.txq_size[0] = q->txq_size[0];
1667 t.txq_size[1] = q->txq_size[1];
1668 t.txq_size[2] = q->txq_size[2];
1669 t.fl_size[0] = q->fl_size;
1670 t.fl_size[1] = q->jumbo_size;
1671 t.polling = q->polling;
1672 t.intr_lat = q->coalesce_usecs;
1673 t.cong_thres = q->cong_thres;
1674
1675 if (copy_to_user(useraddr, &t, sizeof(t)))
1676 return -EFAULT;
1677 break;
1678 }
1679 case CHELSIO_SET_QSET_NUM:{
1680 struct ch_reg edata;
1681 struct port_info *pi = netdev_priv(dev);
1682 unsigned int i, first_qset = 0, other_qsets = 0;
1683
1684 if (!capable(CAP_NET_ADMIN))
1685 return -EPERM;
1686 if (adapter->flags & FULL_INIT_DONE)
1687 return -EBUSY;
1688 if (copy_from_user(&edata, useraddr, sizeof(edata)))
1689 return -EFAULT;
1690 if (edata.val < 1 ||
1691 (edata.val > 1 && !(adapter->flags & USING_MSIX)))
1692 return -EINVAL;
1693
1694 for_each_port(adapter, i)
1695 if (adapter->port[i] && adapter->port[i] != dev)
1696 other_qsets += adap2pinfo(adapter, i)->nqsets;
1697
1698 if (edata.val + other_qsets > SGE_QSETS)
1699 return -EINVAL;
1700
1701 pi->nqsets = edata.val;
1702
1703 for_each_port(adapter, i)
1704 if (adapter->port[i]) {
1705 pi = adap2pinfo(adapter, i);
1706 pi->first_qset = first_qset;
1707 first_qset += pi->nqsets;
1708 }
1709 break;
1710 }
1711 case CHELSIO_GET_QSET_NUM:{
1712 struct ch_reg edata;
1713 struct port_info *pi = netdev_priv(dev);
1714
1715 edata.cmd = CHELSIO_GET_QSET_NUM;
1716 edata.val = pi->nqsets;
1717 if (copy_to_user(useraddr, &edata, sizeof(edata)))
1718 return -EFAULT;
1719 break;
1720 }
1721 case CHELSIO_LOAD_FW:{
1722 u8 *fw_data;
1723 struct ch_mem_range t;
1724
1725 if (!capable(CAP_NET_ADMIN))
1726 return -EPERM;
1727 if (copy_from_user(&t, useraddr, sizeof(t)))
1728 return -EFAULT;
1729
1730 fw_data = kmalloc(t.len, GFP_KERNEL);
1731 if (!fw_data)
1732 return -ENOMEM;
1733
1734 if (copy_from_user
1735 (fw_data, useraddr + sizeof(t), t.len)) {
1736 kfree(fw_data);
1737 return -EFAULT;
1738 }
1739
1740 ret = t3_load_fw(adapter, fw_data, t.len);
1741 kfree(fw_data);
1742 if (ret)
1743 return ret;
1744 break;
1745 }
1746 case CHELSIO_SETMTUTAB:{
1747 struct ch_mtus m;
1748 int i;
1749
1750 if (!is_offload(adapter))
1751 return -EOPNOTSUPP;
1752 if (!capable(CAP_NET_ADMIN))
1753 return -EPERM;
1754 if (offload_running(adapter))
1755 return -EBUSY;
1756 if (copy_from_user(&m, useraddr, sizeof(m)))
1757 return -EFAULT;
1758 if (m.nmtus != NMTUS)
1759 return -EINVAL;
1760 if (m.mtus[0] < 81) /* accommodate SACK */
1761 return -EINVAL;
1762
1763 /* MTUs must be in ascending order */
1764 for (i = 1; i < NMTUS; ++i)
1765 if (m.mtus[i] < m.mtus[i - 1])
1766 return -EINVAL;
1767
1768 memcpy(adapter->params.mtus, m.mtus,
1769 sizeof(adapter->params.mtus));
1770 break;
1771 }
1772 case CHELSIO_GET_PM:{
1773 struct tp_params *p = &adapter->params.tp;
1774 struct ch_pm m = {.cmd = CHELSIO_GET_PM };
1775
1776 if (!is_offload(adapter))
1777 return -EOPNOTSUPP;
1778 m.tx_pg_sz = p->tx_pg_size;
1779 m.tx_num_pg = p->tx_num_pgs;
1780 m.rx_pg_sz = p->rx_pg_size;
1781 m.rx_num_pg = p->rx_num_pgs;
1782 m.pm_total = p->pmtx_size + p->chan_rx_size * p->nchan;
1783 if (copy_to_user(useraddr, &m, sizeof(m)))
1784 return -EFAULT;
1785 break;
1786 }
1787 case CHELSIO_SET_PM:{
1788 struct ch_pm m;
1789 struct tp_params *p = &adapter->params.tp;
1790
1791 if (!is_offload(adapter))
1792 return -EOPNOTSUPP;
1793 if (!capable(CAP_NET_ADMIN))
1794 return -EPERM;
1795 if (adapter->flags & FULL_INIT_DONE)
1796 return -EBUSY;
1797 if (copy_from_user(&m, useraddr, sizeof(m)))
1798 return -EFAULT;
1799 if (!m.rx_pg_sz || (m.rx_pg_sz & (m.rx_pg_sz - 1)) ||
1800 !m.tx_pg_sz || (m.tx_pg_sz & (m.tx_pg_sz - 1)))
1801 return -EINVAL; /* not power of 2 */
1802 if (!(m.rx_pg_sz & 0x14000))
1803 return -EINVAL; /* not 16KB or 64KB */
1804 if (!(m.tx_pg_sz & 0x1554000))
1805 return -EINVAL;
1806 if (m.tx_num_pg == -1)
1807 m.tx_num_pg = p->tx_num_pgs;
1808 if (m.rx_num_pg == -1)
1809 m.rx_num_pg = p->rx_num_pgs;
1810 if (m.tx_num_pg % 24 || m.rx_num_pg % 24)
1811 return -EINVAL;
1812 if (m.rx_num_pg * m.rx_pg_sz > p->chan_rx_size ||
1813 m.tx_num_pg * m.tx_pg_sz > p->chan_tx_size)
1814 return -EINVAL;
1815 p->rx_pg_size = m.rx_pg_sz;
1816 p->tx_pg_size = m.tx_pg_sz;
1817 p->rx_num_pgs = m.rx_num_pg;
1818 p->tx_num_pgs = m.tx_num_pg;
1819 break;
1820 }
1821 case CHELSIO_GET_MEM:{
1822 struct ch_mem_range t;
1823 struct mc7 *mem;
1824 u64 buf[32];
1825
1826 if (!is_offload(adapter))
1827 return -EOPNOTSUPP;
1828 if (!(adapter->flags & FULL_INIT_DONE))
1829 return -EIO; /* need the memory controllers */
1830 if (copy_from_user(&t, useraddr, sizeof(t)))
1831 return -EFAULT;
1832 if ((t.addr & 7) || (t.len & 7))
1833 return -EINVAL;
1834 if (t.mem_id == MEM_CM)
1835 mem = &adapter->cm;
1836 else if (t.mem_id == MEM_PMRX)
1837 mem = &adapter->pmrx;
1838 else if (t.mem_id == MEM_PMTX)
1839 mem = &adapter->pmtx;
1840 else
1841 return -EINVAL;
1842
1843 /*
1844 * Version scheme:
1845 * bits 0..9: chip version
1846 * bits 10..15: chip revision
1847 */
1848 t.version = 3 | (adapter->params.rev << 10);
1849 if (copy_to_user(useraddr, &t, sizeof(t)))
1850 return -EFAULT;
1851
1852 /*
1853 * Read 256 bytes at a time as len can be large and we don't
1854 * want to use huge intermediate buffers.
1855 */
1856 useraddr += sizeof(t); /* advance to start of buffer */
1857 while (t.len) {
1858 unsigned int chunk =
1859 min_t(unsigned int, t.len, sizeof(buf));
1860
1861 ret =
1862 t3_mc7_bd_read(mem, t.addr / 8, chunk / 8,
1863 buf);
1864 if (ret)
1865 return ret;
1866 if (copy_to_user(useraddr, buf, chunk))
1867 return -EFAULT;
1868 useraddr += chunk;
1869 t.addr += chunk;
1870 t.len -= chunk;
1871 }
1872 break;
1873 }
1874 case CHELSIO_SET_TRACE_FILTER:{
1875 struct ch_trace t;
1876 const struct trace_params *tp;
1877
1878 if (!capable(CAP_NET_ADMIN))
1879 return -EPERM;
1880 if (!offload_running(adapter))
1881 return -EAGAIN;
1882 if (copy_from_user(&t, useraddr, sizeof(t)))
1883 return -EFAULT;
1884
1885 tp = (const struct trace_params *)&t.sip;
1886 if (t.config_tx)
1887 t3_config_trace_filter(adapter, tp, 0,
1888 t.invert_match,
1889 t.trace_tx);
1890 if (t.config_rx)
1891 t3_config_trace_filter(adapter, tp, 1,
1892 t.invert_match,
1893 t.trace_rx);
1894 break;
1895 }
1896 case CHELSIO_SET_PKTSCHED:{
4d22de3e 1897 struct ch_pktsched_params p;
4d22de3e 1898
14ab9892
DLR
1899 if (!capable(CAP_NET_ADMIN))
1900 return -EPERM;
1901 if (!adapter->open_device_map)
1902 return -EAGAIN; /* uP and SGE must be running */
4d22de3e 1903 if (copy_from_user(&p, useraddr, sizeof(p)))
14ab9892
DLR
1904 return -EFAULT;
1905 send_pktsched_cmd(adapter, p.sched, p.idx, p.min, p.max,
1906 p.binding);
4d22de3e 1907 break;
14ab9892 1908
4d22de3e
DLR
1909 }
1910 default:
1911 return -EOPNOTSUPP;
1912 }
1913 return 0;
1914}
1915
1916static int cxgb_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
1917{
1918 int ret, mmd;
1919 struct adapter *adapter = dev->priv;
1920 struct port_info *pi = netdev_priv(dev);
1921 struct mii_ioctl_data *data = if_mii(req);
1922
1923 switch (cmd) {
1924 case SIOCGMIIPHY:
1925 data->phy_id = pi->phy.addr;
1926 /* FALLTHRU */
1927 case SIOCGMIIREG:{
1928 u32 val;
1929 struct cphy *phy = &pi->phy;
1930
1931 if (!phy->mdio_read)
1932 return -EOPNOTSUPP;
1933 if (is_10G(adapter)) {
1934 mmd = data->phy_id >> 8;
1935 if (!mmd)
1936 mmd = MDIO_DEV_PCS;
1937 else if (mmd > MDIO_DEV_XGXS)
1938 return -EINVAL;
1939
1940 ret =
1941 phy->mdio_read(adapter, data->phy_id & 0x1f,
1942 mmd, data->reg_num, &val);
1943 } else
1944 ret =
1945 phy->mdio_read(adapter, data->phy_id & 0x1f,
1946 0, data->reg_num & 0x1f,
1947 &val);
1948 if (!ret)
1949 data->val_out = val;
1950 break;
1951 }
1952 case SIOCSMIIREG:{
1953 struct cphy *phy = &pi->phy;
1954
1955 if (!capable(CAP_NET_ADMIN))
1956 return -EPERM;
1957 if (!phy->mdio_write)
1958 return -EOPNOTSUPP;
1959 if (is_10G(adapter)) {
1960 mmd = data->phy_id >> 8;
1961 if (!mmd)
1962 mmd = MDIO_DEV_PCS;
1963 else if (mmd > MDIO_DEV_XGXS)
1964 return -EINVAL;
1965
1966 ret =
1967 phy->mdio_write(adapter,
1968 data->phy_id & 0x1f, mmd,
1969 data->reg_num,
1970 data->val_in);
1971 } else
1972 ret =
1973 phy->mdio_write(adapter,
1974 data->phy_id & 0x1f, 0,
1975 data->reg_num & 0x1f,
1976 data->val_in);
1977 break;
1978 }
1979 case SIOCCHIOCTL:
1980 return cxgb_extension_ioctl(dev, req->ifr_data);
1981 default:
1982 return -EOPNOTSUPP;
1983 }
1984 return ret;
1985}
1986
1987static int cxgb_change_mtu(struct net_device *dev, int new_mtu)
1988{
1989 int ret;
1990 struct adapter *adapter = dev->priv;
1991 struct port_info *pi = netdev_priv(dev);
1992
1993 if (new_mtu < 81) /* accommodate SACK */
1994 return -EINVAL;
1995 if ((ret = t3_mac_set_mtu(&pi->mac, new_mtu)))
1996 return ret;
1997 dev->mtu = new_mtu;
1998 init_port_mtus(adapter);
1999 if (adapter->params.rev == 0 && offload_running(adapter))
2000 t3_load_mtus(adapter, adapter->params.mtus,
2001 adapter->params.a_wnd, adapter->params.b_wnd,
2002 adapter->port[0]->mtu);
2003 return 0;
2004}
2005
2006static int cxgb_set_mac_addr(struct net_device *dev, void *p)
2007{
2008 struct adapter *adapter = dev->priv;
2009 struct port_info *pi = netdev_priv(dev);
2010 struct sockaddr *addr = p;
2011
2012 if (!is_valid_ether_addr(addr->sa_data))
2013 return -EINVAL;
2014
2015 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
2016 t3_mac_set_address(&pi->mac, 0, dev->dev_addr);
2017 if (offload_running(adapter))
2018 write_smt_entry(adapter, pi->port_id);
2019 return 0;
2020}
2021
2022/**
2023 * t3_synchronize_rx - wait for current Rx processing on a port to complete
2024 * @adap: the adapter
2025 * @p: the port
2026 *
2027 * Ensures that current Rx processing on any of the queues associated with
2028 * the given port completes before returning. We do this by acquiring and
2029 * releasing the locks of the response queues associated with the port.
2030 */
2031static void t3_synchronize_rx(struct adapter *adap, const struct port_info *p)
2032{
2033 int i;
2034
2035 for (i = 0; i < p->nqsets; i++) {
2036 struct sge_rspq *q = &adap->sge.qs[i + p->first_qset].rspq;
2037
2038 spin_lock_irq(&q->lock);
2039 spin_unlock_irq(&q->lock);
2040 }
2041}
2042
2043static void vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
2044{
2045 struct adapter *adapter = dev->priv;
2046 struct port_info *pi = netdev_priv(dev);
2047
2048 pi->vlan_grp = grp;
2049 if (adapter->params.rev > 0)
2050 t3_set_vlan_accel(adapter, 1 << pi->port_id, grp != NULL);
2051 else {
2052 /* single control for all ports */
2053 unsigned int i, have_vlans = 0;
2054 for_each_port(adapter, i)
2055 have_vlans |= adap2pinfo(adapter, i)->vlan_grp != NULL;
2056
2057 t3_set_vlan_accel(adapter, 1, have_vlans);
2058 }
2059 t3_synchronize_rx(adapter, pi);
2060}
2061
2062static void vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
2063{
2064 /* nothing */
2065}
2066
2067#ifdef CONFIG_NET_POLL_CONTROLLER
2068static void cxgb_netpoll(struct net_device *dev)
2069{
2070 struct adapter *adapter = dev->priv;
2071 struct sge_qset *qs = dev2qset(dev);
2072
2073 t3_intr_handler(adapter, qs->rspq.polling) (adapter->pdev->irq,
2074 adapter);
2075}
2076#endif
2077
2078/*
2079 * Periodic accumulation of MAC statistics.
2080 */
2081static void mac_stats_update(struct adapter *adapter)
2082{
2083 int i;
2084
2085 for_each_port(adapter, i) {
2086 struct net_device *dev = adapter->port[i];
2087 struct port_info *p = netdev_priv(dev);
2088
2089 if (netif_running(dev)) {
2090 spin_lock(&adapter->stats_lock);
2091 t3_mac_update_stats(&p->mac);
2092 spin_unlock(&adapter->stats_lock);
2093 }
2094 }
2095}
2096
2097static void check_link_status(struct adapter *adapter)
2098{
2099 int i;
2100
2101 for_each_port(adapter, i) {
2102 struct net_device *dev = adapter->port[i];
2103 struct port_info *p = netdev_priv(dev);
2104
2105 if (!(p->port_type->caps & SUPPORTED_IRQ) && netif_running(dev))
2106 t3_link_changed(adapter, i);
2107 }
2108}
2109
2110static void t3_adap_check_task(struct work_struct *work)
2111{
2112 struct adapter *adapter = container_of(work, struct adapter,
2113 adap_check_task.work);
2114 const struct adapter_params *p = &adapter->params;
2115
2116 adapter->check_task_cnt++;
2117
2118 /* Check link status for PHYs without interrupts */
2119 if (p->linkpoll_period)
2120 check_link_status(adapter);
2121
2122 /* Accumulate MAC stats if needed */
2123 if (!p->linkpoll_period ||
2124 (adapter->check_task_cnt * p->linkpoll_period) / 10 >=
2125 p->stats_update_period) {
2126 mac_stats_update(adapter);
2127 adapter->check_task_cnt = 0;
2128 }
2129
2130 /* Schedule the next check update if any port is active. */
2131 spin_lock(&adapter->work_lock);
2132 if (adapter->open_device_map & PORT_MASK)
2133 schedule_chk_task(adapter);
2134 spin_unlock(&adapter->work_lock);
2135}
2136
2137/*
2138 * Processes external (PHY) interrupts in process context.
2139 */
2140static void ext_intr_task(struct work_struct *work)
2141{
2142 struct adapter *adapter = container_of(work, struct adapter,
2143 ext_intr_handler_task);
2144
2145 t3_phy_intr_handler(adapter);
2146
2147 /* Now reenable external interrupts */
2148 spin_lock_irq(&adapter->work_lock);
2149 if (adapter->slow_intr_mask) {
2150 adapter->slow_intr_mask |= F_T3DBG;
2151 t3_write_reg(adapter, A_PL_INT_CAUSE0, F_T3DBG);
2152 t3_write_reg(adapter, A_PL_INT_ENABLE0,
2153 adapter->slow_intr_mask);
2154 }
2155 spin_unlock_irq(&adapter->work_lock);
2156}
2157
2158/*
2159 * Interrupt-context handler for external (PHY) interrupts.
2160 */
2161void t3_os_ext_intr_handler(struct adapter *adapter)
2162{
2163 /*
2164 * Schedule a task to handle external interrupts as they may be slow
2165 * and we use a mutex to protect MDIO registers. We disable PHY
2166 * interrupts in the meantime and let the task reenable them when
2167 * it's done.
2168 */
2169 spin_lock(&adapter->work_lock);
2170 if (adapter->slow_intr_mask) {
2171 adapter->slow_intr_mask &= ~F_T3DBG;
2172 t3_write_reg(adapter, A_PL_INT_ENABLE0,
2173 adapter->slow_intr_mask);
2174 queue_work(cxgb3_wq, &adapter->ext_intr_handler_task);
2175 }
2176 spin_unlock(&adapter->work_lock);
2177}
2178
2179void t3_fatal_err(struct adapter *adapter)
2180{
2181 unsigned int fw_status[4];
2182
2183 if (adapter->flags & FULL_INIT_DONE) {
2184 t3_sge_stop(adapter);
2185 t3_intr_disable(adapter);
2186 }
2187 CH_ALERT(adapter, "encountered fatal error, operation suspended\n");
2188 if (!t3_cim_ctl_blk_read(adapter, 0xa0, 4, fw_status))
2189 CH_ALERT(adapter, "FW status: 0x%x, 0x%x, 0x%x, 0x%x\n",
2190 fw_status[0], fw_status[1],
2191 fw_status[2], fw_status[3]);
2192
2193}
2194
2195static int __devinit cxgb_enable_msix(struct adapter *adap)
2196{
2197 struct msix_entry entries[SGE_QSETS + 1];
2198 int i, err;
2199
2200 for (i = 0; i < ARRAY_SIZE(entries); ++i)
2201 entries[i].entry = i;
2202
2203 err = pci_enable_msix(adap->pdev, entries, ARRAY_SIZE(entries));
2204 if (!err) {
2205 for (i = 0; i < ARRAY_SIZE(entries); ++i)
2206 adap->msix_info[i].vec = entries[i].vector;
2207 } else if (err > 0)
2208 dev_info(&adap->pdev->dev,
2209 "only %d MSI-X vectors left, not using MSI-X\n", err);
2210 return err;
2211}
2212
2213static void __devinit print_port_info(struct adapter *adap,
2214 const struct adapter_info *ai)
2215{
2216 static const char *pci_variant[] = {
2217 "PCI", "PCI-X", "PCI-X ECC", "PCI-X 266", "PCI Express"
2218 };
2219
2220 int i;
2221 char buf[80];
2222
2223 if (is_pcie(adap))
2224 snprintf(buf, sizeof(buf), "%s x%d",
2225 pci_variant[adap->params.pci.variant],
2226 adap->params.pci.width);
2227 else
2228 snprintf(buf, sizeof(buf), "%s %dMHz/%d-bit",
2229 pci_variant[adap->params.pci.variant],
2230 adap->params.pci.speed, adap->params.pci.width);
2231
2232 for_each_port(adap, i) {
2233 struct net_device *dev = adap->port[i];
2234 const struct port_info *pi = netdev_priv(dev);
2235
2236 if (!test_bit(i, &adap->registered_device_map))
2237 continue;
2238 printk(KERN_INFO "%s: %s %s RNIC (rev %d) %s%s\n",
2239 dev->name, ai->desc, pi->port_type->desc,
2240 adap->params.rev, buf,
2241 (adap->flags & USING_MSIX) ? " MSI-X" :
2242 (adap->flags & USING_MSI) ? " MSI" : "");
2243 if (adap->name == dev->name && adap->params.vpd.mclk)
2244 printk(KERN_INFO "%s: %uMB CM, %uMB PMTX, %uMB PMRX\n",
2245 adap->name, t3_mc7_size(&adap->cm) >> 20,
2246 t3_mc7_size(&adap->pmtx) >> 20,
2247 t3_mc7_size(&adap->pmrx) >> 20);
2248 }
2249}
2250
2251static int __devinit init_one(struct pci_dev *pdev,
2252 const struct pci_device_id *ent)
2253{
2254 static int version_printed;
2255
2256 int i, err, pci_using_dac = 0;
2257 unsigned long mmio_start, mmio_len;
2258 const struct adapter_info *ai;
2259 struct adapter *adapter = NULL;
2260 struct port_info *pi;
2261
2262 if (!version_printed) {
2263 printk(KERN_INFO "%s - version %s\n", DRV_DESC, DRV_VERSION);
2264 ++version_printed;
2265 }
2266
2267 if (!cxgb3_wq) {
2268 cxgb3_wq = create_singlethread_workqueue(DRV_NAME);
2269 if (!cxgb3_wq) {
2270 printk(KERN_ERR DRV_NAME
2271 ": cannot initialize work queue\n");
2272 return -ENOMEM;
2273 }
2274 }
2275
2276 err = pci_request_regions(pdev, DRV_NAME);
2277 if (err) {
2278 /* Just info, some other driver may have claimed the device. */
2279 dev_info(&pdev->dev, "cannot obtain PCI resources\n");
2280 return err;
2281 }
2282
2283 err = pci_enable_device(pdev);
2284 if (err) {
2285 dev_err(&pdev->dev, "cannot enable PCI device\n");
2286 goto out_release_regions;
2287 }
2288
2289 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
2290 pci_using_dac = 1;
2291 err = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
2292 if (err) {
2293 dev_err(&pdev->dev, "unable to obtain 64-bit DMA for "
2294 "coherent allocations\n");
2295 goto out_disable_device;
2296 }
2297 } else if ((err = pci_set_dma_mask(pdev, DMA_32BIT_MASK)) != 0) {
2298 dev_err(&pdev->dev, "no usable DMA configuration\n");
2299 goto out_disable_device;
2300 }
2301
2302 pci_set_master(pdev);
2303
2304 mmio_start = pci_resource_start(pdev, 0);
2305 mmio_len = pci_resource_len(pdev, 0);
2306 ai = t3_get_adapter_info(ent->driver_data);
2307
2308 adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
2309 if (!adapter) {
2310 err = -ENOMEM;
2311 goto out_disable_device;
2312 }
2313
2314 adapter->regs = ioremap_nocache(mmio_start, mmio_len);
2315 if (!adapter->regs) {
2316 dev_err(&pdev->dev, "cannot map device registers\n");
2317 err = -ENOMEM;
2318 goto out_free_adapter;
2319 }
2320
2321 adapter->pdev = pdev;
2322 adapter->name = pci_name(pdev);
2323 adapter->msg_enable = dflt_msg_enable;
2324 adapter->mmio_len = mmio_len;
2325
2326 mutex_init(&adapter->mdio_lock);
2327 spin_lock_init(&adapter->work_lock);
2328 spin_lock_init(&adapter->stats_lock);
2329
2330 INIT_LIST_HEAD(&adapter->adapter_list);
2331 INIT_WORK(&adapter->ext_intr_handler_task, ext_intr_task);
2332 INIT_DELAYED_WORK(&adapter->adap_check_task, t3_adap_check_task);
2333
2334 for (i = 0; i < ai->nports; ++i) {
2335 struct net_device *netdev;
2336
2337 netdev = alloc_etherdev(sizeof(struct port_info));
2338 if (!netdev) {
2339 err = -ENOMEM;
2340 goto out_free_dev;
2341 }
2342
2343 SET_MODULE_OWNER(netdev);
2344 SET_NETDEV_DEV(netdev, &pdev->dev);
2345
2346 adapter->port[i] = netdev;
2347 pi = netdev_priv(netdev);
2348 pi->rx_csum_offload = 1;
2349 pi->nqsets = 1;
2350 pi->first_qset = i;
2351 pi->activity = 0;
2352 pi->port_id = i;
2353 netif_carrier_off(netdev);
2354 netdev->irq = pdev->irq;
2355 netdev->mem_start = mmio_start;
2356 netdev->mem_end = mmio_start + mmio_len - 1;
2357 netdev->priv = adapter;
2358 netdev->features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO;
2359 netdev->features |= NETIF_F_LLTX;
2360 if (pci_using_dac)
2361 netdev->features |= NETIF_F_HIGHDMA;
2362
2363 netdev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
2364 netdev->vlan_rx_register = vlan_rx_register;
2365 netdev->vlan_rx_kill_vid = vlan_rx_kill_vid;
2366
2367 netdev->open = cxgb_open;
2368 netdev->stop = cxgb_close;
2369 netdev->hard_start_xmit = t3_eth_xmit;
2370 netdev->get_stats = cxgb_get_stats;
2371 netdev->set_multicast_list = cxgb_set_rxmode;
2372 netdev->do_ioctl = cxgb_ioctl;
2373 netdev->change_mtu = cxgb_change_mtu;
2374 netdev->set_mac_address = cxgb_set_mac_addr;
2375#ifdef CONFIG_NET_POLL_CONTROLLER
2376 netdev->poll_controller = cxgb_netpoll;
2377#endif
2378 netdev->weight = 64;
2379
2380 SET_ETHTOOL_OPS(netdev, &cxgb_ethtool_ops);
2381 }
2382
2383 pci_set_drvdata(pdev, adapter->port[0]);
2384 if (t3_prep_adapter(adapter, ai, 1) < 0) {
2385 err = -ENODEV;
2386 goto out_free_dev;
2387 }
2388
2389 /*
2390 * The card is now ready to go. If any errors occur during device
2391 * registration we do not fail the whole card but rather proceed only
2392 * with the ports we manage to register successfully. However we must
2393 * register at least one net device.
2394 */
2395 for_each_port(adapter, i) {
2396 err = register_netdev(adapter->port[i]);
2397 if (err)
2398 dev_warn(&pdev->dev,
2399 "cannot register net device %s, skipping\n",
2400 adapter->port[i]->name);
2401 else {
2402 /*
2403 * Change the name we use for messages to the name of
2404 * the first successfully registered interface.
2405 */
2406 if (!adapter->registered_device_map)
2407 adapter->name = adapter->port[i]->name;
2408
2409 __set_bit(i, &adapter->registered_device_map);
2410 }
2411 }
2412 if (!adapter->registered_device_map) {
2413 dev_err(&pdev->dev, "could not register any net devices\n");
2414 goto out_free_dev;
2415 }
2416
2417 /* Driver's ready. Reflect it on LEDs */
2418 t3_led_ready(adapter);
2419
2420 if (is_offload(adapter)) {
2421 __set_bit(OFFLOAD_DEVMAP_BIT, &adapter->registered_device_map);
2422 cxgb3_adapter_ofld(adapter);
2423 }
2424
2425 /* See what interrupts we'll be using */
2426 if (msi > 1 && cxgb_enable_msix(adapter) == 0)
2427 adapter->flags |= USING_MSIX;
2428 else if (msi > 0 && pci_enable_msi(pdev) == 0)
2429 adapter->flags |= USING_MSI;
2430
0ee8d33c 2431 err = sysfs_create_group(&adapter->port[0]->dev.kobj,
4d22de3e
DLR
2432 &cxgb3_attr_group);
2433
2434 print_port_info(adapter, ai);
2435 return 0;
2436
2437out_free_dev:
2438 iounmap(adapter->regs);
2439 for (i = ai->nports - 1; i >= 0; --i)
2440 if (adapter->port[i])
2441 free_netdev(adapter->port[i]);
2442
2443out_free_adapter:
2444 kfree(adapter);
2445
2446out_disable_device:
2447 pci_disable_device(pdev);
2448out_release_regions:
2449 pci_release_regions(pdev);
2450 pci_set_drvdata(pdev, NULL);
2451 return err;
2452}
2453
2454static void __devexit remove_one(struct pci_dev *pdev)
2455{
2456 struct net_device *dev = pci_get_drvdata(pdev);
2457
2458 if (dev) {
2459 int i;
2460 struct adapter *adapter = dev->priv;
2461
2462 t3_sge_stop(adapter);
0ee8d33c 2463 sysfs_remove_group(&adapter->port[0]->dev.kobj,
4d22de3e
DLR
2464 &cxgb3_attr_group);
2465
2466 for_each_port(adapter, i)
2467 if (test_bit(i, &adapter->registered_device_map))
2468 unregister_netdev(adapter->port[i]);
2469
2470 if (is_offload(adapter)) {
2471 cxgb3_adapter_unofld(adapter);
2472 if (test_bit(OFFLOAD_DEVMAP_BIT,
2473 &adapter->open_device_map))
2474 offload_close(&adapter->tdev);
2475 }
2476
2477 t3_free_sge_resources(adapter);
2478 cxgb_disable_msi(adapter);
2479
2480 for (i = 0; i < ARRAY_SIZE(adapter->dummy_netdev); i++)
2481 if (adapter->dummy_netdev[i]) {
2482 free_netdev(adapter->dummy_netdev[i]);
2483 adapter->dummy_netdev[i] = NULL;
2484 }
2485
2486 for_each_port(adapter, i)
2487 if (adapter->port[i])
2488 free_netdev(adapter->port[i]);
2489
2490 iounmap(adapter->regs);
2491 kfree(adapter);
2492 pci_release_regions(pdev);
2493 pci_disable_device(pdev);
2494 pci_set_drvdata(pdev, NULL);
2495 }
2496}
2497
2498static struct pci_driver driver = {
2499 .name = DRV_NAME,
2500 .id_table = cxgb3_pci_tbl,
2501 .probe = init_one,
2502 .remove = __devexit_p(remove_one),
2503};
2504
2505static int __init cxgb3_init_module(void)
2506{
2507 int ret;
2508
2509 cxgb3_offload_init();
2510
2511 ret = pci_register_driver(&driver);
2512 return ret;
2513}
2514
2515static void __exit cxgb3_cleanup_module(void)
2516{
2517 pci_unregister_driver(&driver);
2518 if (cxgb3_wq)
2519 destroy_workqueue(cxgb3_wq);
2520}
2521
2522module_init(cxgb3_init_module);
2523module_exit(cxgb3_cleanup_module);