2 * Copyright (c) 2003-2007 Chelsio, Inc. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 #include <linux/module.h>
33 #include <linux/moduleparam.h>
34 #include <linux/init.h>
35 #include <linux/pci.h>
36 #include <linux/dma-mapping.h>
37 #include <linux/netdevice.h>
38 #include <linux/etherdevice.h>
39 #include <linux/if_vlan.h>
40 #include <linux/mii.h>
41 #include <linux/sockios.h>
42 #include <linux/workqueue.h>
43 #include <linux/proc_fs.h>
44 #include <linux/rtnetlink.h>
45 #include <linux/firmware.h>
46 #include <asm/uaccess.h>
49 #include "cxgb3_ioctl.h"
51 #include "cxgb3_offload.h"
54 #include "cxgb3_ctl_defs.h"
56 #include "firmware_exports.h"
59 MAX_TXQ_ENTRIES = 16384,
60 MAX_CTRL_TXQ_ENTRIES = 1024,
61 MAX_RSPQ_ENTRIES = 16384,
62 MAX_RX_BUFFERS = 16384,
63 MAX_RX_JUMBO_BUFFERS = 16384,
65 MIN_CTRL_TXQ_ENTRIES = 4,
66 MIN_RSPQ_ENTRIES = 32,
70 #define PORT_MASK ((1 << MAX_NPORTS) - 1)
72 #define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
73 NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
74 NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
76 #define EEPROM_MAGIC 0x38E2F10C
78 #define CH_DEVICE(devid, ssid, idx) \
79 { PCI_VENDOR_ID_CHELSIO, devid, PCI_ANY_ID, ssid, 0, 0, idx }
81 static const struct pci_device_id cxgb3_pci_tbl[] = {
82 CH_DEVICE(0x20, 1, 0), /* PE9000 */
83 CH_DEVICE(0x21, 1, 1), /* T302E */
84 CH_DEVICE(0x22, 1, 2), /* T310E */
85 CH_DEVICE(0x23, 1, 3), /* T320X */
86 CH_DEVICE(0x24, 1, 1), /* T302X */
87 CH_DEVICE(0x25, 1, 3), /* T320E */
88 CH_DEVICE(0x26, 1, 2), /* T310X */
89 CH_DEVICE(0x30, 1, 2), /* T3B10 */
90 CH_DEVICE(0x31, 1, 3), /* T3B20 */
91 CH_DEVICE(0x32, 1, 1), /* T3B02 */
95 MODULE_DESCRIPTION(DRV_DESC);
96 MODULE_AUTHOR("Chelsio Communications");
97 MODULE_LICENSE("Dual BSD/GPL");
98 MODULE_VERSION(DRV_VERSION);
99 MODULE_DEVICE_TABLE(pci, cxgb3_pci_tbl);
101 static int dflt_msg_enable = DFLT_MSG_ENABLE;
103 module_param(dflt_msg_enable, int, 0644);
104 MODULE_PARM_DESC(dflt_msg_enable, "Chelsio T3 default message enable bitmap");
107 * The driver uses the best interrupt scheme available on a platform in the
108 * order MSI-X, MSI, legacy pin interrupts. This parameter determines which
109 * of these schemes the driver may consider as follows:
111 * msi = 2: choose from among all three options
112 * msi = 1: only consider MSI and pin interrupts
113 * msi = 0: force pin interrupts
117 module_param(msi, int, 0644);
118 MODULE_PARM_DESC(msi, "whether to use MSI or MSI-X");
121 * The driver enables offload as a default.
122 * To disable it, use ofld_disable = 1.
125 static int ofld_disable = 0;
127 module_param(ofld_disable, int, 0644);
128 MODULE_PARM_DESC(ofld_disable, "whether to enable offload at init time or not");
131 * We have work elements that we need to cancel when an interface is taken
132 * down. Normally the work elements would be executed by keventd but that
133 * can deadlock because of linkwatch. If our close method takes the rtnl
134 * lock and linkwatch is ahead of our work elements in keventd, linkwatch
135 * will block keventd as it needs the rtnl lock, and we'll deadlock waiting
136 * for our work to complete. Get our own work queue to solve this.
138 static struct workqueue_struct *cxgb3_wq;
141 * link_report - show link status and link speed/duplex
142 * @p: the port whose settings are to be reported
144 * Shows the link status, speed, and duplex of a port.
146 static void link_report(struct net_device *dev)
148 if (!netif_carrier_ok(dev))
149 printk(KERN_INFO "%s: link down\n", dev->name);
151 const char *s = "10Mbps";
152 const struct port_info *p = netdev_priv(dev);
154 switch (p->link_config.speed) {
166 printk(KERN_INFO "%s: link up, %s, %s-duplex\n", dev->name, s,
167 p->link_config.duplex == DUPLEX_FULL ? "full" : "half");
172 * t3_os_link_changed - handle link status changes
173 * @adapter: the adapter associated with the link change
174 * @port_id: the port index whose limk status has changed
175 * @link_stat: the new status of the link
176 * @speed: the new speed setting
177 * @duplex: the new duplex setting
178 * @pause: the new flow-control setting
180 * This is the OS-dependent handler for link status changes. The OS
181 * neutral handler takes care of most of the processing for these events,
182 * then calls this handler for any OS-specific processing.
184 void t3_os_link_changed(struct adapter *adapter, int port_id, int link_stat,
185 int speed, int duplex, int pause)
187 struct net_device *dev = adapter->port[port_id];
189 /* Skip changes from disabled ports. */
190 if (!netif_running(dev))
193 if (link_stat != netif_carrier_ok(dev)) {
195 netif_carrier_on(dev);
197 netif_carrier_off(dev);
202 static void cxgb_set_rxmode(struct net_device *dev)
204 struct t3_rx_mode rm;
205 struct port_info *pi = netdev_priv(dev);
207 init_rx_mode(&rm, dev, dev->mc_list);
208 t3_mac_set_rx_mode(&pi->mac, &rm);
212 * link_start - enable a port
213 * @dev: the device to enable
215 * Performs the MAC and PHY actions needed to enable a port.
217 static void link_start(struct net_device *dev)
219 struct t3_rx_mode rm;
220 struct port_info *pi = netdev_priv(dev);
221 struct cmac *mac = &pi->mac;
223 init_rx_mode(&rm, dev, dev->mc_list);
225 t3_mac_set_mtu(mac, dev->mtu);
226 t3_mac_set_address(mac, 0, dev->dev_addr);
227 t3_mac_set_rx_mode(mac, &rm);
228 t3_link_start(&pi->phy, mac, &pi->link_config);
229 t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
232 static inline void cxgb_disable_msi(struct adapter *adapter)
234 if (adapter->flags & USING_MSIX) {
235 pci_disable_msix(adapter->pdev);
236 adapter->flags &= ~USING_MSIX;
237 } else if (adapter->flags & USING_MSI) {
238 pci_disable_msi(adapter->pdev);
239 adapter->flags &= ~USING_MSI;
244 * Interrupt handler for asynchronous events used with MSI-X.
246 static irqreturn_t t3_async_intr_handler(int irq, void *cookie)
248 t3_slow_intr_handler(cookie);
253 * Name the MSI-X interrupts.
255 static void name_msix_vecs(struct adapter *adap)
257 int i, j, msi_idx = 1, n = sizeof(adap->msix_info[0].desc) - 1;
259 snprintf(adap->msix_info[0].desc, n, "%s", adap->name);
260 adap->msix_info[0].desc[n] = 0;
262 for_each_port(adap, j) {
263 struct net_device *d = adap->port[j];
264 const struct port_info *pi = netdev_priv(d);
266 for (i = 0; i < pi->nqsets; i++, msi_idx++) {
267 snprintf(adap->msix_info[msi_idx].desc, n,
268 "%s (queue %d)", d->name, i);
269 adap->msix_info[msi_idx].desc[n] = 0;
274 static int request_msix_data_irqs(struct adapter *adap)
276 int i, j, err, qidx = 0;
278 for_each_port(adap, i) {
279 int nqsets = adap2pinfo(adap, i)->nqsets;
281 for (j = 0; j < nqsets; ++j) {
282 err = request_irq(adap->msix_info[qidx + 1].vec,
283 t3_intr_handler(adap,
286 adap->msix_info[qidx + 1].desc,
287 &adap->sge.qs[qidx]);
290 free_irq(adap->msix_info[qidx + 1].vec,
291 &adap->sge.qs[qidx]);
301 * setup_rss - configure RSS
304 * Sets up RSS to distribute packets to multiple receive queues. We
305 * configure the RSS CPU lookup table to distribute to the number of HW
306 * receive queues, and the response queue lookup table to narrow that
307 * down to the response queues actually configured for each port.
308 * We always configure the RSS mapping for two ports since the mapping
309 * table has plenty of entries.
311 static void setup_rss(struct adapter *adap)
314 unsigned int nq0 = adap2pinfo(adap, 0)->nqsets;
315 unsigned int nq1 = adap->port[1] ? adap2pinfo(adap, 1)->nqsets : 1;
316 u8 cpus[SGE_QSETS + 1];
317 u16 rspq_map[RSS_TABLE_SIZE];
319 for (i = 0; i < SGE_QSETS; ++i)
321 cpus[SGE_QSETS] = 0xff; /* terminator */
323 for (i = 0; i < RSS_TABLE_SIZE / 2; ++i) {
324 rspq_map[i] = i % nq0;
325 rspq_map[i + RSS_TABLE_SIZE / 2] = (i % nq1) + nq0;
328 t3_config_rss(adap, F_RQFEEDBACKENABLE | F_TNLLKPEN | F_TNLMAPEN |
329 F_TNLPRTEN | F_TNL2TUPEN | F_TNL4TUPEN |
330 V_RRCPLCPUSIZE(6), cpus, rspq_map);
334 * If we have multiple receive queues per port serviced by NAPI we need one
335 * netdevice per queue as NAPI operates on netdevices. We already have one
336 * netdevice, namely the one associated with the interface, so we use dummy
337 * ones for any additional queues. Note that these netdevices exist purely
338 * so that NAPI has something to work with, they do not represent network
339 * ports and are not registered.
341 static int init_dummy_netdevs(struct adapter *adap)
343 int i, j, dummy_idx = 0;
344 struct net_device *nd;
346 for_each_port(adap, i) {
347 struct net_device *dev = adap->port[i];
348 const struct port_info *pi = netdev_priv(dev);
350 for (j = 0; j < pi->nqsets - 1; j++) {
351 if (!adap->dummy_netdev[dummy_idx]) {
352 nd = alloc_netdev(0, "", ether_setup);
358 set_bit(__LINK_STATE_START, &nd->state);
359 adap->dummy_netdev[dummy_idx] = nd;
361 strcpy(adap->dummy_netdev[dummy_idx]->name, dev->name);
368 while (--dummy_idx >= 0) {
369 free_netdev(adap->dummy_netdev[dummy_idx]);
370 adap->dummy_netdev[dummy_idx] = NULL;
376 * Wait until all NAPI handlers are descheduled. This includes the handlers of
377 * both netdevices representing interfaces and the dummy ones for the extra
380 static void quiesce_rx(struct adapter *adap)
383 struct net_device *dev;
385 for_each_port(adap, i) {
387 while (test_bit(__LINK_STATE_RX_SCHED, &dev->state))
391 for (i = 0; i < ARRAY_SIZE(adap->dummy_netdev); i++) {
392 dev = adap->dummy_netdev[i];
394 while (test_bit(__LINK_STATE_RX_SCHED, &dev->state))
400 * setup_sge_qsets - configure SGE Tx/Rx/response queues
403 * Determines how many sets of SGE queues to use and initializes them.
404 * We support multiple queue sets per port if we have MSI-X, otherwise
405 * just one queue set per port.
407 static int setup_sge_qsets(struct adapter *adap)
409 int i, j, err, irq_idx = 0, qset_idx = 0, dummy_dev_idx = 0;
410 unsigned int ntxq = is_offload(adap) ? SGE_TXQ_PER_SET : 1;
412 if (adap->params.rev > 0 && !(adap->flags & USING_MSI))
415 for_each_port(adap, i) {
416 struct net_device *dev = adap->port[i];
417 const struct port_info *pi = netdev_priv(dev);
419 for (j = 0; j < pi->nqsets; ++j, ++qset_idx) {
420 err = t3_sge_alloc_qset(adap, qset_idx, 1,
421 (adap->flags & USING_MSIX) ? qset_idx + 1 :
423 &adap->params.sge.qset[qset_idx], ntxq,
425 adap-> dummy_netdev[dummy_dev_idx++]);
427 t3_free_sge_resources(adap);
436 static ssize_t attr_show(struct device *d, struct device_attribute *attr,
438 ssize_t(*format) (struct net_device *, char *))
442 /* Synchronize with ioctls that may shut down the device */
444 len = (*format) (to_net_dev(d), buf);
449 static ssize_t attr_store(struct device *d, struct device_attribute *attr,
450 const char *buf, size_t len,
451 ssize_t(*set) (struct net_device *, unsigned int),
452 unsigned int min_val, unsigned int max_val)
458 if (!capable(CAP_NET_ADMIN))
461 val = simple_strtoul(buf, &endp, 0);
462 if (endp == buf || val < min_val || val > max_val)
466 ret = (*set) (to_net_dev(d), val);
473 #define CXGB3_SHOW(name, val_expr) \
474 static ssize_t format_##name(struct net_device *dev, char *buf) \
476 struct adapter *adap = dev->priv; \
477 return sprintf(buf, "%u\n", val_expr); \
479 static ssize_t show_##name(struct device *d, struct device_attribute *attr, \
482 return attr_show(d, attr, buf, format_##name); \
485 static ssize_t set_nfilters(struct net_device *dev, unsigned int val)
487 struct adapter *adap = dev->priv;
489 if (adap->flags & FULL_INIT_DONE)
491 if (val && adap->params.rev == 0)
493 if (val > t3_mc5_size(&adap->mc5) - adap->params.mc5.nservers)
495 adap->params.mc5.nfilters = val;
499 static ssize_t store_nfilters(struct device *d, struct device_attribute *attr,
500 const char *buf, size_t len)
502 return attr_store(d, attr, buf, len, set_nfilters, 0, ~0);
505 static ssize_t set_nservers(struct net_device *dev, unsigned int val)
507 struct adapter *adap = dev->priv;
509 if (adap->flags & FULL_INIT_DONE)
511 if (val > t3_mc5_size(&adap->mc5) - adap->params.mc5.nfilters)
513 adap->params.mc5.nservers = val;
517 static ssize_t store_nservers(struct device *d, struct device_attribute *attr,
518 const char *buf, size_t len)
520 return attr_store(d, attr, buf, len, set_nservers, 0, ~0);
523 #define CXGB3_ATTR_R(name, val_expr) \
524 CXGB3_SHOW(name, val_expr) \
525 static DEVICE_ATTR(name, S_IRUGO, show_##name, NULL)
527 #define CXGB3_ATTR_RW(name, val_expr, store_method) \
528 CXGB3_SHOW(name, val_expr) \
529 static DEVICE_ATTR(name, S_IRUGO | S_IWUSR, show_##name, store_method)
531 CXGB3_ATTR_R(cam_size, t3_mc5_size(&adap->mc5));
532 CXGB3_ATTR_RW(nfilters, adap->params.mc5.nfilters, store_nfilters);
533 CXGB3_ATTR_RW(nservers, adap->params.mc5.nservers, store_nservers);
535 static struct attribute *cxgb3_attrs[] = {
536 &dev_attr_cam_size.attr,
537 &dev_attr_nfilters.attr,
538 &dev_attr_nservers.attr,
542 static struct attribute_group cxgb3_attr_group = {.attrs = cxgb3_attrs };
544 static ssize_t tm_attr_show(struct device *d, struct device_attribute *attr,
545 char *buf, int sched)
548 unsigned int v, addr, bpt, cpt;
549 struct adapter *adap = to_net_dev(d)->priv;
551 addr = A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2;
553 t3_write_reg(adap, A_TP_TM_PIO_ADDR, addr);
554 v = t3_read_reg(adap, A_TP_TM_PIO_DATA);
557 bpt = (v >> 8) & 0xff;
560 len = sprintf(buf, "disabled\n");
562 v = (adap->params.vpd.cclk * 1000) / cpt;
563 len = sprintf(buf, "%u Kbps\n", (v * bpt) / 125);
569 static ssize_t tm_attr_store(struct device *d, struct device_attribute *attr,
570 const char *buf, size_t len, int sched)
575 struct adapter *adap = to_net_dev(d)->priv;
577 if (!capable(CAP_NET_ADMIN))
580 val = simple_strtoul(buf, &endp, 0);
581 if (endp == buf || val > 10000000)
585 ret = t3_config_sched(adap, val, sched);
592 #define TM_ATTR(name, sched) \
593 static ssize_t show_##name(struct device *d, struct device_attribute *attr, \
596 return tm_attr_show(d, attr, buf, sched); \
598 static ssize_t store_##name(struct device *d, struct device_attribute *attr, \
599 const char *buf, size_t len) \
601 return tm_attr_store(d, attr, buf, len, sched); \
603 static DEVICE_ATTR(name, S_IRUGO | S_IWUSR, show_##name, store_##name)
614 static struct attribute *offload_attrs[] = {
615 &dev_attr_sched0.attr,
616 &dev_attr_sched1.attr,
617 &dev_attr_sched2.attr,
618 &dev_attr_sched3.attr,
619 &dev_attr_sched4.attr,
620 &dev_attr_sched5.attr,
621 &dev_attr_sched6.attr,
622 &dev_attr_sched7.attr,
626 static struct attribute_group offload_attr_group = {.attrs = offload_attrs };
629 * Sends an sk_buff to an offload queue driver
630 * after dealing with any active network taps.
632 static inline int offload_tx(struct t3cdev *tdev, struct sk_buff *skb)
637 ret = t3_offload_tx(tdev, skb);
642 static int write_smt_entry(struct adapter *adapter, int idx)
644 struct cpl_smt_write_req *req;
645 struct sk_buff *skb = alloc_skb(sizeof(*req), GFP_KERNEL);
650 req = (struct cpl_smt_write_req *)__skb_put(skb, sizeof(*req));
651 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
652 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SMT_WRITE_REQ, idx));
653 req->mtu_idx = NMTUS - 1; /* should be 0 but there's a T3 bug */
655 memset(req->src_mac1, 0, sizeof(req->src_mac1));
656 memcpy(req->src_mac0, adapter->port[idx]->dev_addr, ETH_ALEN);
658 offload_tx(&adapter->tdev, skb);
662 static int init_smt(struct adapter *adapter)
666 for_each_port(adapter, i)
667 write_smt_entry(adapter, i);
671 static void init_port_mtus(struct adapter *adapter)
673 unsigned int mtus = adapter->port[0]->mtu;
675 if (adapter->port[1])
676 mtus |= adapter->port[1]->mtu << 16;
677 t3_write_reg(adapter, A_TP_MTU_PORT_TABLE, mtus);
680 static void send_pktsched_cmd(struct adapter *adap, int sched, int qidx, int lo,
684 struct mngt_pktsched_wr *req;
686 skb = alloc_skb(sizeof(*req), GFP_KERNEL | __GFP_NOFAIL);
687 req = (struct mngt_pktsched_wr *)skb_put(skb, sizeof(*req));
688 req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_MNGT));
689 req->mngt_opcode = FW_MNGTOPCODE_PKTSCHED_SET;
695 t3_mgmt_tx(adap, skb);
698 static void bind_qsets(struct adapter *adap)
702 for_each_port(adap, i) {
703 const struct port_info *pi = adap2pinfo(adap, i);
705 for (j = 0; j < pi->nqsets; ++j)
706 send_pktsched_cmd(adap, 1, pi->first_qset + j, -1,
711 #define FW_FNAME "t3fw-%d.%d.bin"
713 static int upgrade_fw(struct adapter *adap)
717 const struct firmware *fw;
718 struct device *dev = &adap->pdev->dev;
720 snprintf(buf, sizeof(buf), FW_FNAME, FW_VERSION_MAJOR,
722 ret = request_firmware(&fw, buf, dev);
724 dev_err(dev, "could not upgrade firmware: unable to load %s\n",
728 ret = t3_load_fw(adap, fw->data, fw->size);
729 release_firmware(fw);
734 * cxgb_up - enable the adapter
735 * @adapter: adapter being enabled
737 * Called when the first port is enabled, this function performs the
738 * actions necessary to make an adapter operational, such as completing
739 * the initialization of HW modules, and enabling interrupts.
741 * Must be called with the rtnl lock held.
743 static int cxgb_up(struct adapter *adap)
747 if (!(adap->flags & FULL_INIT_DONE)) {
748 err = t3_check_fw_version(adap);
750 err = upgrade_fw(adap);
754 err = init_dummy_netdevs(adap);
758 err = t3_init_hw(adap, 0);
762 err = setup_sge_qsets(adap);
767 adap->flags |= FULL_INIT_DONE;
772 if (adap->flags & USING_MSIX) {
773 name_msix_vecs(adap);
774 err = request_irq(adap->msix_info[0].vec,
775 t3_async_intr_handler, 0,
776 adap->msix_info[0].desc, adap);
780 if (request_msix_data_irqs(adap)) {
781 free_irq(adap->msix_info[0].vec, adap);
784 } else if ((err = request_irq(adap->pdev->irq,
785 t3_intr_handler(adap,
786 adap->sge.qs[0].rspq.
788 (adap->flags & USING_MSI) ?
794 t3_intr_enable(adap);
796 if ((adap->flags & (USING_MSIX | QUEUES_BOUND)) == USING_MSIX)
798 adap->flags |= QUEUES_BOUND;
803 CH_ERR(adap, "request_irq failed, err %d\n", err);
808 * Release resources when all the ports and offloading have been stopped.
810 static void cxgb_down(struct adapter *adapter)
812 t3_sge_stop(adapter);
813 spin_lock_irq(&adapter->work_lock); /* sync with PHY intr task */
814 t3_intr_disable(adapter);
815 spin_unlock_irq(&adapter->work_lock);
817 if (adapter->flags & USING_MSIX) {
820 free_irq(adapter->msix_info[0].vec, adapter);
821 for_each_port(adapter, i)
822 n += adap2pinfo(adapter, i)->nqsets;
824 for (i = 0; i < n; ++i)
825 free_irq(adapter->msix_info[i + 1].vec,
826 &adapter->sge.qs[i]);
828 free_irq(adapter->pdev->irq, adapter);
830 flush_workqueue(cxgb3_wq); /* wait for external IRQ handler */
834 static void schedule_chk_task(struct adapter *adap)
838 timeo = adap->params.linkpoll_period ?
839 (HZ * adap->params.linkpoll_period) / 10 :
840 adap->params.stats_update_period * HZ;
842 queue_delayed_work(cxgb3_wq, &adap->adap_check_task, timeo);
845 static int offload_open(struct net_device *dev)
847 struct adapter *adapter = dev->priv;
848 struct t3cdev *tdev = T3CDEV(dev);
849 int adap_up = adapter->open_device_map & PORT_MASK;
852 if (test_and_set_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map))
855 if (!adap_up && (err = cxgb_up(adapter)) < 0)
858 t3_tp_set_offload_mode(adapter, 1);
859 tdev->lldev = adapter->port[0];
860 err = cxgb3_offload_activate(adapter);
864 init_port_mtus(adapter);
865 t3_load_mtus(adapter, adapter->params.mtus, adapter->params.a_wnd,
866 adapter->params.b_wnd,
867 adapter->params.rev == 0 ?
868 adapter->port[0]->mtu : 0xffff);
871 /* Never mind if the next step fails */
872 sysfs_create_group(&tdev->lldev->dev.kobj, &offload_attr_group);
874 /* Call back all registered clients */
875 cxgb3_add_clients(tdev);
878 /* restore them in case the offload module has changed them */
880 t3_tp_set_offload_mode(adapter, 0);
881 clear_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map);
882 cxgb3_set_dummy_ops(tdev);
887 static int offload_close(struct t3cdev *tdev)
889 struct adapter *adapter = tdev2adap(tdev);
891 if (!test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map))
894 /* Call back all registered clients */
895 cxgb3_remove_clients(tdev);
897 sysfs_remove_group(&tdev->lldev->dev.kobj, &offload_attr_group);
900 cxgb3_set_dummy_ops(tdev);
901 t3_tp_set_offload_mode(adapter, 0);
902 clear_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map);
904 if (!adapter->open_device_map)
907 cxgb3_offload_deactivate(adapter);
911 static int cxgb_open(struct net_device *dev)
914 struct adapter *adapter = dev->priv;
915 struct port_info *pi = netdev_priv(dev);
916 int other_ports = adapter->open_device_map & PORT_MASK;
918 if (!adapter->open_device_map && (err = cxgb_up(adapter)) < 0)
921 set_bit(pi->port_id, &adapter->open_device_map);
923 err = offload_open(dev);
926 "Could not initialize offload capabilities\n");
930 t3_port_intr_enable(adapter, pi->port_id);
931 netif_start_queue(dev);
933 schedule_chk_task(adapter);
938 static int cxgb_close(struct net_device *dev)
940 struct adapter *adapter = dev->priv;
941 struct port_info *p = netdev_priv(dev);
943 t3_port_intr_disable(adapter, p->port_id);
944 netif_stop_queue(dev);
945 p->phy.ops->power_down(&p->phy, 1);
946 netif_carrier_off(dev);
947 t3_mac_disable(&p->mac, MAC_DIRECTION_TX | MAC_DIRECTION_RX);
949 spin_lock(&adapter->work_lock); /* sync with update task */
950 clear_bit(p->port_id, &adapter->open_device_map);
951 spin_unlock(&adapter->work_lock);
953 if (!(adapter->open_device_map & PORT_MASK))
954 cancel_rearming_delayed_workqueue(cxgb3_wq,
955 &adapter->adap_check_task);
957 if (!adapter->open_device_map)
963 static struct net_device_stats *cxgb_get_stats(struct net_device *dev)
965 struct adapter *adapter = dev->priv;
966 struct port_info *p = netdev_priv(dev);
967 struct net_device_stats *ns = &p->netstats;
968 const struct mac_stats *pstats;
970 spin_lock(&adapter->stats_lock);
971 pstats = t3_mac_update_stats(&p->mac);
972 spin_unlock(&adapter->stats_lock);
974 ns->tx_bytes = pstats->tx_octets;
975 ns->tx_packets = pstats->tx_frames;
976 ns->rx_bytes = pstats->rx_octets;
977 ns->rx_packets = pstats->rx_frames;
978 ns->multicast = pstats->rx_mcast_frames;
980 ns->tx_errors = pstats->tx_underrun;
981 ns->rx_errors = pstats->rx_symbol_errs + pstats->rx_fcs_errs +
982 pstats->rx_too_long + pstats->rx_jabber + pstats->rx_short +
983 pstats->rx_fifo_ovfl;
985 /* detailed rx_errors */
986 ns->rx_length_errors = pstats->rx_jabber + pstats->rx_too_long;
987 ns->rx_over_errors = 0;
988 ns->rx_crc_errors = pstats->rx_fcs_errs;
989 ns->rx_frame_errors = pstats->rx_symbol_errs;
990 ns->rx_fifo_errors = pstats->rx_fifo_ovfl;
991 ns->rx_missed_errors = pstats->rx_cong_drops;
993 /* detailed tx_errors */
994 ns->tx_aborted_errors = 0;
995 ns->tx_carrier_errors = 0;
996 ns->tx_fifo_errors = pstats->tx_underrun;
997 ns->tx_heartbeat_errors = 0;
998 ns->tx_window_errors = 0;
1002 static u32 get_msglevel(struct net_device *dev)
1004 struct adapter *adapter = dev->priv;
1006 return adapter->msg_enable;
1009 static void set_msglevel(struct net_device *dev, u32 val)
1011 struct adapter *adapter = dev->priv;
1013 adapter->msg_enable = val;
1016 static char stats_strings[][ETH_GSTRING_LEN] = {
1019 "TxMulticastFramesOK",
1020 "TxBroadcastFramesOK",
1027 "TxFrames128To255 ",
1028 "TxFrames256To511 ",
1029 "TxFrames512To1023 ",
1030 "TxFrames1024To1518 ",
1031 "TxFrames1519ToMax ",
1035 "RxMulticastFramesOK",
1036 "RxBroadcastFramesOK",
1047 "RxFrames128To255 ",
1048 "RxFrames256To511 ",
1049 "RxFrames512To1023 ",
1050 "RxFrames1024To1518 ",
1051 "RxFrames1519ToMax ",
1062 static int get_stats_count(struct net_device *dev)
1064 return ARRAY_SIZE(stats_strings);
1067 #define T3_REGMAP_SIZE (3 * 1024)
1069 static int get_regs_len(struct net_device *dev)
1071 return T3_REGMAP_SIZE;
1074 static int get_eeprom_len(struct net_device *dev)
1079 static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1082 struct adapter *adapter = dev->priv;
1084 t3_get_fw_version(adapter, &fw_vers);
1086 strcpy(info->driver, DRV_NAME);
1087 strcpy(info->version, DRV_VERSION);
1088 strcpy(info->bus_info, pci_name(adapter->pdev));
1090 strcpy(info->fw_version, "N/A");
1092 snprintf(info->fw_version, sizeof(info->fw_version),
1094 G_FW_VERSION_TYPE(fw_vers) ? "T" : "N",
1095 G_FW_VERSION_MAJOR(fw_vers),
1096 G_FW_VERSION_MINOR(fw_vers),
1097 G_FW_VERSION_MICRO(fw_vers));
1101 static void get_strings(struct net_device *dev, u32 stringset, u8 * data)
1103 if (stringset == ETH_SS_STATS)
1104 memcpy(data, stats_strings, sizeof(stats_strings));
1107 static unsigned long collect_sge_port_stats(struct adapter *adapter,
1108 struct port_info *p, int idx)
1111 unsigned long tot = 0;
1113 for (i = 0; i < p->nqsets; ++i)
1114 tot += adapter->sge.qs[i + p->first_qset].port_stats[idx];
1118 static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
1121 struct adapter *adapter = dev->priv;
1122 struct port_info *pi = netdev_priv(dev);
1123 const struct mac_stats *s;
1125 spin_lock(&adapter->stats_lock);
1126 s = t3_mac_update_stats(&pi->mac);
1127 spin_unlock(&adapter->stats_lock);
1129 *data++ = s->tx_octets;
1130 *data++ = s->tx_frames;
1131 *data++ = s->tx_mcast_frames;
1132 *data++ = s->tx_bcast_frames;
1133 *data++ = s->tx_pause;
1134 *data++ = s->tx_underrun;
1135 *data++ = s->tx_fifo_urun;
1137 *data++ = s->tx_frames_64;
1138 *data++ = s->tx_frames_65_127;
1139 *data++ = s->tx_frames_128_255;
1140 *data++ = s->tx_frames_256_511;
1141 *data++ = s->tx_frames_512_1023;
1142 *data++ = s->tx_frames_1024_1518;
1143 *data++ = s->tx_frames_1519_max;
1145 *data++ = s->rx_octets;
1146 *data++ = s->rx_frames;
1147 *data++ = s->rx_mcast_frames;
1148 *data++ = s->rx_bcast_frames;
1149 *data++ = s->rx_pause;
1150 *data++ = s->rx_fcs_errs;
1151 *data++ = s->rx_symbol_errs;
1152 *data++ = s->rx_short;
1153 *data++ = s->rx_jabber;
1154 *data++ = s->rx_too_long;
1155 *data++ = s->rx_fifo_ovfl;
1157 *data++ = s->rx_frames_64;
1158 *data++ = s->rx_frames_65_127;
1159 *data++ = s->rx_frames_128_255;
1160 *data++ = s->rx_frames_256_511;
1161 *data++ = s->rx_frames_512_1023;
1162 *data++ = s->rx_frames_1024_1518;
1163 *data++ = s->rx_frames_1519_max;
1165 *data++ = pi->phy.fifo_errors;
1167 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_TSO);
1168 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_VLANEX);
1169 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_VLANINS);
1170 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_TX_CSUM);
1171 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_RX_CSUM_GOOD);
1172 *data++ = s->rx_cong_drops;
1175 static inline void reg_block_dump(struct adapter *ap, void *buf,
1176 unsigned int start, unsigned int end)
1178 u32 *p = buf + start;
1180 for (; start <= end; start += sizeof(u32))
1181 *p++ = t3_read_reg(ap, start);
1184 static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
1187 struct adapter *ap = dev->priv;
1191 * bits 0..9: chip version
1192 * bits 10..15: chip revision
1193 * bit 31: set for PCIe cards
1195 regs->version = 3 | (ap->params.rev << 10) | (is_pcie(ap) << 31);
1198 * We skip the MAC statistics registers because they are clear-on-read.
1199 * Also reading multi-register stats would need to synchronize with the
1200 * periodic mac stats accumulation. Hard to justify the complexity.
1202 memset(buf, 0, T3_REGMAP_SIZE);
1203 reg_block_dump(ap, buf, 0, A_SG_RSPQ_CREDIT_RETURN);
1204 reg_block_dump(ap, buf, A_SG_HI_DRB_HI_THRSH, A_ULPRX_PBL_ULIMIT);
1205 reg_block_dump(ap, buf, A_ULPTX_CONFIG, A_MPS_INT_CAUSE);
1206 reg_block_dump(ap, buf, A_CPL_SWITCH_CNTRL, A_CPL_MAP_TBL_DATA);
1207 reg_block_dump(ap, buf, A_SMB_GLOBAL_TIME_CFG, A_XGM_SERDES_STAT3);
1208 reg_block_dump(ap, buf, A_XGM_SERDES_STATUS0,
1209 XGM_REG(A_XGM_SERDES_STAT3, 1));
1210 reg_block_dump(ap, buf, XGM_REG(A_XGM_SERDES_STATUS0, 1),
1211 XGM_REG(A_XGM_RX_SPI4_SOP_EOP_CNT, 1));
1214 static int restart_autoneg(struct net_device *dev)
1216 struct port_info *p = netdev_priv(dev);
1218 if (!netif_running(dev))
1220 if (p->link_config.autoneg != AUTONEG_ENABLE)
1222 p->phy.ops->autoneg_restart(&p->phy);
1226 static int cxgb3_phys_id(struct net_device *dev, u32 data)
1229 struct adapter *adapter = dev->priv;
1234 for (i = 0; i < data * 2; i++) {
1235 t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
1236 (i & 1) ? F_GPIO0_OUT_VAL : 0);
1237 if (msleep_interruptible(500))
1240 t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
1245 static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1247 struct port_info *p = netdev_priv(dev);
1249 cmd->supported = p->link_config.supported;
1250 cmd->advertising = p->link_config.advertising;
1252 if (netif_carrier_ok(dev)) {
1253 cmd->speed = p->link_config.speed;
1254 cmd->duplex = p->link_config.duplex;
1260 cmd->port = (cmd->supported & SUPPORTED_TP) ? PORT_TP : PORT_FIBRE;
1261 cmd->phy_address = p->phy.addr;
1262 cmd->transceiver = XCVR_EXTERNAL;
1263 cmd->autoneg = p->link_config.autoneg;
1269 static int speed_duplex_to_caps(int speed, int duplex)
1275 if (duplex == DUPLEX_FULL)
1276 cap = SUPPORTED_10baseT_Full;
1278 cap = SUPPORTED_10baseT_Half;
1281 if (duplex == DUPLEX_FULL)
1282 cap = SUPPORTED_100baseT_Full;
1284 cap = SUPPORTED_100baseT_Half;
1287 if (duplex == DUPLEX_FULL)
1288 cap = SUPPORTED_1000baseT_Full;
1290 cap = SUPPORTED_1000baseT_Half;
1293 if (duplex == DUPLEX_FULL)
1294 cap = SUPPORTED_10000baseT_Full;
1299 #define ADVERTISED_MASK (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
1300 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
1301 ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full | \
1302 ADVERTISED_10000baseT_Full)
1304 static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1306 struct port_info *p = netdev_priv(dev);
1307 struct link_config *lc = &p->link_config;
1309 if (!(lc->supported & SUPPORTED_Autoneg))
1310 return -EOPNOTSUPP; /* can't change speed/duplex */
1312 if (cmd->autoneg == AUTONEG_DISABLE) {
1313 int cap = speed_duplex_to_caps(cmd->speed, cmd->duplex);
1315 if (!(lc->supported & cap) || cmd->speed == SPEED_1000)
1317 lc->requested_speed = cmd->speed;
1318 lc->requested_duplex = cmd->duplex;
1319 lc->advertising = 0;
1321 cmd->advertising &= ADVERTISED_MASK;
1322 cmd->advertising &= lc->supported;
1323 if (!cmd->advertising)
1325 lc->requested_speed = SPEED_INVALID;
1326 lc->requested_duplex = DUPLEX_INVALID;
1327 lc->advertising = cmd->advertising | ADVERTISED_Autoneg;
1329 lc->autoneg = cmd->autoneg;
1330 if (netif_running(dev))
1331 t3_link_start(&p->phy, &p->mac, lc);
1335 static void get_pauseparam(struct net_device *dev,
1336 struct ethtool_pauseparam *epause)
1338 struct port_info *p = netdev_priv(dev);
1340 epause->autoneg = (p->link_config.requested_fc & PAUSE_AUTONEG) != 0;
1341 epause->rx_pause = (p->link_config.fc & PAUSE_RX) != 0;
1342 epause->tx_pause = (p->link_config.fc & PAUSE_TX) != 0;
1345 static int set_pauseparam(struct net_device *dev,
1346 struct ethtool_pauseparam *epause)
1348 struct port_info *p = netdev_priv(dev);
1349 struct link_config *lc = &p->link_config;
1351 if (epause->autoneg == AUTONEG_DISABLE)
1352 lc->requested_fc = 0;
1353 else if (lc->supported & SUPPORTED_Autoneg)
1354 lc->requested_fc = PAUSE_AUTONEG;
1358 if (epause->rx_pause)
1359 lc->requested_fc |= PAUSE_RX;
1360 if (epause->tx_pause)
1361 lc->requested_fc |= PAUSE_TX;
1362 if (lc->autoneg == AUTONEG_ENABLE) {
1363 if (netif_running(dev))
1364 t3_link_start(&p->phy, &p->mac, lc);
1366 lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1367 if (netif_running(dev))
1368 t3_mac_set_speed_duplex_fc(&p->mac, -1, -1, lc->fc);
1373 static u32 get_rx_csum(struct net_device *dev)
1375 struct port_info *p = netdev_priv(dev);
1377 return p->rx_csum_offload;
1380 static int set_rx_csum(struct net_device *dev, u32 data)
1382 struct port_info *p = netdev_priv(dev);
1384 p->rx_csum_offload = data;
1388 static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
1390 const struct adapter *adapter = dev->priv;
1391 const struct port_info *pi = netdev_priv(dev);
1392 const struct qset_params *q = &adapter->params.sge.qset[pi->first_qset];
1394 e->rx_max_pending = MAX_RX_BUFFERS;
1395 e->rx_mini_max_pending = 0;
1396 e->rx_jumbo_max_pending = MAX_RX_JUMBO_BUFFERS;
1397 e->tx_max_pending = MAX_TXQ_ENTRIES;
1399 e->rx_pending = q->fl_size;
1400 e->rx_mini_pending = q->rspq_size;
1401 e->rx_jumbo_pending = q->jumbo_size;
1402 e->tx_pending = q->txq_size[0];
1405 static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
1408 struct qset_params *q;
1409 struct adapter *adapter = dev->priv;
1410 const struct port_info *pi = netdev_priv(dev);
1412 if (e->rx_pending > MAX_RX_BUFFERS ||
1413 e->rx_jumbo_pending > MAX_RX_JUMBO_BUFFERS ||
1414 e->tx_pending > MAX_TXQ_ENTRIES ||
1415 e->rx_mini_pending > MAX_RSPQ_ENTRIES ||
1416 e->rx_mini_pending < MIN_RSPQ_ENTRIES ||
1417 e->rx_pending < MIN_FL_ENTRIES ||
1418 e->rx_jumbo_pending < MIN_FL_ENTRIES ||
1419 e->tx_pending < adapter->params.nports * MIN_TXQ_ENTRIES)
1422 if (adapter->flags & FULL_INIT_DONE)
1425 q = &adapter->params.sge.qset[pi->first_qset];
1426 for (i = 0; i < pi->nqsets; ++i, ++q) {
1427 q->rspq_size = e->rx_mini_pending;
1428 q->fl_size = e->rx_pending;
1429 q->jumbo_size = e->rx_jumbo_pending;
1430 q->txq_size[0] = e->tx_pending;
1431 q->txq_size[1] = e->tx_pending;
1432 q->txq_size[2] = e->tx_pending;
1437 static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
1439 struct adapter *adapter = dev->priv;
1440 struct qset_params *qsp = &adapter->params.sge.qset[0];
1441 struct sge_qset *qs = &adapter->sge.qs[0];
1443 if (c->rx_coalesce_usecs * 10 > M_NEWTIMER)
1446 qsp->coalesce_usecs = c->rx_coalesce_usecs;
1447 t3_update_qset_coalesce(qs, qsp);
1451 static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
1453 struct adapter *adapter = dev->priv;
1454 struct qset_params *q = adapter->params.sge.qset;
1456 c->rx_coalesce_usecs = q->coalesce_usecs;
1460 static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e,
1464 struct adapter *adapter = dev->priv;
1466 u8 *buf = kmalloc(EEPROMSIZE, GFP_KERNEL);
1470 e->magic = EEPROM_MAGIC;
1471 for (i = e->offset & ~3; !err && i < e->offset + e->len; i += 4)
1472 err = t3_seeprom_read(adapter, i, (u32 *) & buf[i]);
1475 memcpy(data, buf + e->offset, e->len);
1480 static int set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
1485 u32 aligned_offset, aligned_len, *p;
1486 struct adapter *adapter = dev->priv;
1488 if (eeprom->magic != EEPROM_MAGIC)
1491 aligned_offset = eeprom->offset & ~3;
1492 aligned_len = (eeprom->len + (eeprom->offset & 3) + 3) & ~3;
1494 if (aligned_offset != eeprom->offset || aligned_len != eeprom->len) {
1495 buf = kmalloc(aligned_len, GFP_KERNEL);
1498 err = t3_seeprom_read(adapter, aligned_offset, (u32 *) buf);
1499 if (!err && aligned_len > 4)
1500 err = t3_seeprom_read(adapter,
1501 aligned_offset + aligned_len - 4,
1502 (u32 *) & buf[aligned_len - 4]);
1505 memcpy(buf + (eeprom->offset & 3), data, eeprom->len);
1509 err = t3_seeprom_wp(adapter, 0);
1513 for (p = (u32 *) buf; !err && aligned_len; aligned_len -= 4, p++) {
1514 err = t3_seeprom_write(adapter, aligned_offset, *p);
1515 aligned_offset += 4;
1519 err = t3_seeprom_wp(adapter, 1);
1526 static void get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1530 memset(&wol->sopass, 0, sizeof(wol->sopass));
1533 static const struct ethtool_ops cxgb_ethtool_ops = {
1534 .get_settings = get_settings,
1535 .set_settings = set_settings,
1536 .get_drvinfo = get_drvinfo,
1537 .get_msglevel = get_msglevel,
1538 .set_msglevel = set_msglevel,
1539 .get_ringparam = get_sge_param,
1540 .set_ringparam = set_sge_param,
1541 .get_coalesce = get_coalesce,
1542 .set_coalesce = set_coalesce,
1543 .get_eeprom_len = get_eeprom_len,
1544 .get_eeprom = get_eeprom,
1545 .set_eeprom = set_eeprom,
1546 .get_pauseparam = get_pauseparam,
1547 .set_pauseparam = set_pauseparam,
1548 .get_rx_csum = get_rx_csum,
1549 .set_rx_csum = set_rx_csum,
1550 .get_tx_csum = ethtool_op_get_tx_csum,
1551 .set_tx_csum = ethtool_op_set_tx_csum,
1552 .get_sg = ethtool_op_get_sg,
1553 .set_sg = ethtool_op_set_sg,
1554 .get_link = ethtool_op_get_link,
1555 .get_strings = get_strings,
1556 .phys_id = cxgb3_phys_id,
1557 .nway_reset = restart_autoneg,
1558 .get_stats_count = get_stats_count,
1559 .get_ethtool_stats = get_stats,
1560 .get_regs_len = get_regs_len,
1561 .get_regs = get_regs,
1563 .get_tso = ethtool_op_get_tso,
1564 .set_tso = ethtool_op_set_tso,
1565 .get_perm_addr = ethtool_op_get_perm_addr
1568 static int in_range(int val, int lo, int hi)
1570 return val < 0 || (val <= hi && val >= lo);
1573 static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
1577 struct adapter *adapter = dev->priv;
1579 if (copy_from_user(&cmd, useraddr, sizeof(cmd)))
1583 case CHELSIO_SET_QSET_PARAMS:{
1585 struct qset_params *q;
1586 struct ch_qset_params t;
1588 if (!capable(CAP_NET_ADMIN))
1590 if (copy_from_user(&t, useraddr, sizeof(t)))
1592 if (t.qset_idx >= SGE_QSETS)
1594 if (!in_range(t.intr_lat, 0, M_NEWTIMER) ||
1595 !in_range(t.cong_thres, 0, 255) ||
1596 !in_range(t.txq_size[0], MIN_TXQ_ENTRIES,
1598 !in_range(t.txq_size[1], MIN_TXQ_ENTRIES,
1600 !in_range(t.txq_size[2], MIN_CTRL_TXQ_ENTRIES,
1601 MAX_CTRL_TXQ_ENTRIES) ||
1602 !in_range(t.fl_size[0], MIN_FL_ENTRIES,
1604 || !in_range(t.fl_size[1], MIN_FL_ENTRIES,
1605 MAX_RX_JUMBO_BUFFERS)
1606 || !in_range(t.rspq_size, MIN_RSPQ_ENTRIES,
1609 if ((adapter->flags & FULL_INIT_DONE) &&
1610 (t.rspq_size >= 0 || t.fl_size[0] >= 0 ||
1611 t.fl_size[1] >= 0 || t.txq_size[0] >= 0 ||
1612 t.txq_size[1] >= 0 || t.txq_size[2] >= 0 ||
1613 t.polling >= 0 || t.cong_thres >= 0))
1616 q = &adapter->params.sge.qset[t.qset_idx];
1618 if (t.rspq_size >= 0)
1619 q->rspq_size = t.rspq_size;
1620 if (t.fl_size[0] >= 0)
1621 q->fl_size = t.fl_size[0];
1622 if (t.fl_size[1] >= 0)
1623 q->jumbo_size = t.fl_size[1];
1624 if (t.txq_size[0] >= 0)
1625 q->txq_size[0] = t.txq_size[0];
1626 if (t.txq_size[1] >= 0)
1627 q->txq_size[1] = t.txq_size[1];
1628 if (t.txq_size[2] >= 0)
1629 q->txq_size[2] = t.txq_size[2];
1630 if (t.cong_thres >= 0)
1631 q->cong_thres = t.cong_thres;
1632 if (t.intr_lat >= 0) {
1633 struct sge_qset *qs =
1634 &adapter->sge.qs[t.qset_idx];
1636 q->coalesce_usecs = t.intr_lat;
1637 t3_update_qset_coalesce(qs, q);
1639 if (t.polling >= 0) {
1640 if (adapter->flags & USING_MSIX)
1641 q->polling = t.polling;
1643 /* No polling with INTx for T3A */
1644 if (adapter->params.rev == 0 &&
1645 !(adapter->flags & USING_MSI))
1648 for (i = 0; i < SGE_QSETS; i++) {
1649 q = &adapter->params.sge.
1651 q->polling = t.polling;
1657 case CHELSIO_GET_QSET_PARAMS:{
1658 struct qset_params *q;
1659 struct ch_qset_params t;
1661 if (copy_from_user(&t, useraddr, sizeof(t)))
1663 if (t.qset_idx >= SGE_QSETS)
1666 q = &adapter->params.sge.qset[t.qset_idx];
1667 t.rspq_size = q->rspq_size;
1668 t.txq_size[0] = q->txq_size[0];
1669 t.txq_size[1] = q->txq_size[1];
1670 t.txq_size[2] = q->txq_size[2];
1671 t.fl_size[0] = q->fl_size;
1672 t.fl_size[1] = q->jumbo_size;
1673 t.polling = q->polling;
1674 t.intr_lat = q->coalesce_usecs;
1675 t.cong_thres = q->cong_thres;
1677 if (copy_to_user(useraddr, &t, sizeof(t)))
1681 case CHELSIO_SET_QSET_NUM:{
1682 struct ch_reg edata;
1683 struct port_info *pi = netdev_priv(dev);
1684 unsigned int i, first_qset = 0, other_qsets = 0;
1686 if (!capable(CAP_NET_ADMIN))
1688 if (adapter->flags & FULL_INIT_DONE)
1690 if (copy_from_user(&edata, useraddr, sizeof(edata)))
1692 if (edata.val < 1 ||
1693 (edata.val > 1 && !(adapter->flags & USING_MSIX)))
1696 for_each_port(adapter, i)
1697 if (adapter->port[i] && adapter->port[i] != dev)
1698 other_qsets += adap2pinfo(adapter, i)->nqsets;
1700 if (edata.val + other_qsets > SGE_QSETS)
1703 pi->nqsets = edata.val;
1705 for_each_port(adapter, i)
1706 if (adapter->port[i]) {
1707 pi = adap2pinfo(adapter, i);
1708 pi->first_qset = first_qset;
1709 first_qset += pi->nqsets;
1713 case CHELSIO_GET_QSET_NUM:{
1714 struct ch_reg edata;
1715 struct port_info *pi = netdev_priv(dev);
1717 edata.cmd = CHELSIO_GET_QSET_NUM;
1718 edata.val = pi->nqsets;
1719 if (copy_to_user(useraddr, &edata, sizeof(edata)))
1723 case CHELSIO_LOAD_FW:{
1725 struct ch_mem_range t;
1727 if (!capable(CAP_NET_ADMIN))
1729 if (copy_from_user(&t, useraddr, sizeof(t)))
1732 fw_data = kmalloc(t.len, GFP_KERNEL);
1737 (fw_data, useraddr + sizeof(t), t.len)) {
1742 ret = t3_load_fw(adapter, fw_data, t.len);
1748 case CHELSIO_SETMTUTAB:{
1752 if (!is_offload(adapter))
1754 if (!capable(CAP_NET_ADMIN))
1756 if (offload_running(adapter))
1758 if (copy_from_user(&m, useraddr, sizeof(m)))
1760 if (m.nmtus != NMTUS)
1762 if (m.mtus[0] < 81) /* accommodate SACK */
1765 /* MTUs must be in ascending order */
1766 for (i = 1; i < NMTUS; ++i)
1767 if (m.mtus[i] < m.mtus[i - 1])
1770 memcpy(adapter->params.mtus, m.mtus,
1771 sizeof(adapter->params.mtus));
1774 case CHELSIO_GET_PM:{
1775 struct tp_params *p = &adapter->params.tp;
1776 struct ch_pm m = {.cmd = CHELSIO_GET_PM };
1778 if (!is_offload(adapter))
1780 m.tx_pg_sz = p->tx_pg_size;
1781 m.tx_num_pg = p->tx_num_pgs;
1782 m.rx_pg_sz = p->rx_pg_size;
1783 m.rx_num_pg = p->rx_num_pgs;
1784 m.pm_total = p->pmtx_size + p->chan_rx_size * p->nchan;
1785 if (copy_to_user(useraddr, &m, sizeof(m)))
1789 case CHELSIO_SET_PM:{
1791 struct tp_params *p = &adapter->params.tp;
1793 if (!is_offload(adapter))
1795 if (!capable(CAP_NET_ADMIN))
1797 if (adapter->flags & FULL_INIT_DONE)
1799 if (copy_from_user(&m, useraddr, sizeof(m)))
1801 if (!m.rx_pg_sz || (m.rx_pg_sz & (m.rx_pg_sz - 1)) ||
1802 !m.tx_pg_sz || (m.tx_pg_sz & (m.tx_pg_sz - 1)))
1803 return -EINVAL; /* not power of 2 */
1804 if (!(m.rx_pg_sz & 0x14000))
1805 return -EINVAL; /* not 16KB or 64KB */
1806 if (!(m.tx_pg_sz & 0x1554000))
1808 if (m.tx_num_pg == -1)
1809 m.tx_num_pg = p->tx_num_pgs;
1810 if (m.rx_num_pg == -1)
1811 m.rx_num_pg = p->rx_num_pgs;
1812 if (m.tx_num_pg % 24 || m.rx_num_pg % 24)
1814 if (m.rx_num_pg * m.rx_pg_sz > p->chan_rx_size ||
1815 m.tx_num_pg * m.tx_pg_sz > p->chan_tx_size)
1817 p->rx_pg_size = m.rx_pg_sz;
1818 p->tx_pg_size = m.tx_pg_sz;
1819 p->rx_num_pgs = m.rx_num_pg;
1820 p->tx_num_pgs = m.tx_num_pg;
1823 case CHELSIO_GET_MEM:{
1824 struct ch_mem_range t;
1828 if (!is_offload(adapter))
1830 if (!(adapter->flags & FULL_INIT_DONE))
1831 return -EIO; /* need the memory controllers */
1832 if (copy_from_user(&t, useraddr, sizeof(t)))
1834 if ((t.addr & 7) || (t.len & 7))
1836 if (t.mem_id == MEM_CM)
1838 else if (t.mem_id == MEM_PMRX)
1839 mem = &adapter->pmrx;
1840 else if (t.mem_id == MEM_PMTX)
1841 mem = &adapter->pmtx;
1847 * bits 0..9: chip version
1848 * bits 10..15: chip revision
1850 t.version = 3 | (adapter->params.rev << 10);
1851 if (copy_to_user(useraddr, &t, sizeof(t)))
1855 * Read 256 bytes at a time as len can be large and we don't
1856 * want to use huge intermediate buffers.
1858 useraddr += sizeof(t); /* advance to start of buffer */
1860 unsigned int chunk =
1861 min_t(unsigned int, t.len, sizeof(buf));
1864 t3_mc7_bd_read(mem, t.addr / 8, chunk / 8,
1868 if (copy_to_user(useraddr, buf, chunk))
1876 case CHELSIO_SET_TRACE_FILTER:{
1878 const struct trace_params *tp;
1880 if (!capable(CAP_NET_ADMIN))
1882 if (!offload_running(adapter))
1884 if (copy_from_user(&t, useraddr, sizeof(t)))
1887 tp = (const struct trace_params *)&t.sip;
1889 t3_config_trace_filter(adapter, tp, 0,
1893 t3_config_trace_filter(adapter, tp, 1,
1904 static int cxgb_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
1907 struct adapter *adapter = dev->priv;
1908 struct port_info *pi = netdev_priv(dev);
1909 struct mii_ioctl_data *data = if_mii(req);
1913 data->phy_id = pi->phy.addr;
1917 struct cphy *phy = &pi->phy;
1919 if (!phy->mdio_read)
1921 if (is_10G(adapter)) {
1922 mmd = data->phy_id >> 8;
1925 else if (mmd > MDIO_DEV_XGXS)
1929 phy->mdio_read(adapter, data->phy_id & 0x1f,
1930 mmd, data->reg_num, &val);
1933 phy->mdio_read(adapter, data->phy_id & 0x1f,
1934 0, data->reg_num & 0x1f,
1937 data->val_out = val;
1941 struct cphy *phy = &pi->phy;
1943 if (!capable(CAP_NET_ADMIN))
1945 if (!phy->mdio_write)
1947 if (is_10G(adapter)) {
1948 mmd = data->phy_id >> 8;
1951 else if (mmd > MDIO_DEV_XGXS)
1955 phy->mdio_write(adapter,
1956 data->phy_id & 0x1f, mmd,
1961 phy->mdio_write(adapter,
1962 data->phy_id & 0x1f, 0,
1963 data->reg_num & 0x1f,
1968 return cxgb_extension_ioctl(dev, req->ifr_data);
1975 static int cxgb_change_mtu(struct net_device *dev, int new_mtu)
1978 struct adapter *adapter = dev->priv;
1979 struct port_info *pi = netdev_priv(dev);
1981 if (new_mtu < 81) /* accommodate SACK */
1983 if ((ret = t3_mac_set_mtu(&pi->mac, new_mtu)))
1986 init_port_mtus(adapter);
1987 if (adapter->params.rev == 0 && offload_running(adapter))
1988 t3_load_mtus(adapter, adapter->params.mtus,
1989 adapter->params.a_wnd, adapter->params.b_wnd,
1990 adapter->port[0]->mtu);
1994 static int cxgb_set_mac_addr(struct net_device *dev, void *p)
1996 struct adapter *adapter = dev->priv;
1997 struct port_info *pi = netdev_priv(dev);
1998 struct sockaddr *addr = p;
2000 if (!is_valid_ether_addr(addr->sa_data))
2003 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
2004 t3_mac_set_address(&pi->mac, 0, dev->dev_addr);
2005 if (offload_running(adapter))
2006 write_smt_entry(adapter, pi->port_id);
2011 * t3_synchronize_rx - wait for current Rx processing on a port to complete
2012 * @adap: the adapter
2015 * Ensures that current Rx processing on any of the queues associated with
2016 * the given port completes before returning. We do this by acquiring and
2017 * releasing the locks of the response queues associated with the port.
2019 static void t3_synchronize_rx(struct adapter *adap, const struct port_info *p)
2023 for (i = 0; i < p->nqsets; i++) {
2024 struct sge_rspq *q = &adap->sge.qs[i + p->first_qset].rspq;
2026 spin_lock_irq(&q->lock);
2027 spin_unlock_irq(&q->lock);
2031 static void vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
2033 struct adapter *adapter = dev->priv;
2034 struct port_info *pi = netdev_priv(dev);
2037 if (adapter->params.rev > 0)
2038 t3_set_vlan_accel(adapter, 1 << pi->port_id, grp != NULL);
2040 /* single control for all ports */
2041 unsigned int i, have_vlans = 0;
2042 for_each_port(adapter, i)
2043 have_vlans |= adap2pinfo(adapter, i)->vlan_grp != NULL;
2045 t3_set_vlan_accel(adapter, 1, have_vlans);
2047 t3_synchronize_rx(adapter, pi);
2050 static void vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
2055 #ifdef CONFIG_NET_POLL_CONTROLLER
2056 static void cxgb_netpoll(struct net_device *dev)
2058 struct adapter *adapter = dev->priv;
2059 struct sge_qset *qs = dev2qset(dev);
2061 t3_intr_handler(adapter, qs->rspq.polling) (adapter->pdev->irq,
2067 * Periodic accumulation of MAC statistics.
2069 static void mac_stats_update(struct adapter *adapter)
2073 for_each_port(adapter, i) {
2074 struct net_device *dev = adapter->port[i];
2075 struct port_info *p = netdev_priv(dev);
2077 if (netif_running(dev)) {
2078 spin_lock(&adapter->stats_lock);
2079 t3_mac_update_stats(&p->mac);
2080 spin_unlock(&adapter->stats_lock);
2085 static void check_link_status(struct adapter *adapter)
2089 for_each_port(adapter, i) {
2090 struct net_device *dev = adapter->port[i];
2091 struct port_info *p = netdev_priv(dev);
2093 if (!(p->port_type->caps & SUPPORTED_IRQ) && netif_running(dev))
2094 t3_link_changed(adapter, i);
2098 static void t3_adap_check_task(struct work_struct *work)
2100 struct adapter *adapter = container_of(work, struct adapter,
2101 adap_check_task.work);
2102 const struct adapter_params *p = &adapter->params;
2104 adapter->check_task_cnt++;
2106 /* Check link status for PHYs without interrupts */
2107 if (p->linkpoll_period)
2108 check_link_status(adapter);
2110 /* Accumulate MAC stats if needed */
2111 if (!p->linkpoll_period ||
2112 (adapter->check_task_cnt * p->linkpoll_period) / 10 >=
2113 p->stats_update_period) {
2114 mac_stats_update(adapter);
2115 adapter->check_task_cnt = 0;
2118 /* Schedule the next check update if any port is active. */
2119 spin_lock(&adapter->work_lock);
2120 if (adapter->open_device_map & PORT_MASK)
2121 schedule_chk_task(adapter);
2122 spin_unlock(&adapter->work_lock);
2126 * Processes external (PHY) interrupts in process context.
2128 static void ext_intr_task(struct work_struct *work)
2130 struct adapter *adapter = container_of(work, struct adapter,
2131 ext_intr_handler_task);
2133 t3_phy_intr_handler(adapter);
2135 /* Now reenable external interrupts */
2136 spin_lock_irq(&adapter->work_lock);
2137 if (adapter->slow_intr_mask) {
2138 adapter->slow_intr_mask |= F_T3DBG;
2139 t3_write_reg(adapter, A_PL_INT_CAUSE0, F_T3DBG);
2140 t3_write_reg(adapter, A_PL_INT_ENABLE0,
2141 adapter->slow_intr_mask);
2143 spin_unlock_irq(&adapter->work_lock);
2147 * Interrupt-context handler for external (PHY) interrupts.
2149 void t3_os_ext_intr_handler(struct adapter *adapter)
2152 * Schedule a task to handle external interrupts as they may be slow
2153 * and we use a mutex to protect MDIO registers. We disable PHY
2154 * interrupts in the meantime and let the task reenable them when
2157 spin_lock(&adapter->work_lock);
2158 if (adapter->slow_intr_mask) {
2159 adapter->slow_intr_mask &= ~F_T3DBG;
2160 t3_write_reg(adapter, A_PL_INT_ENABLE0,
2161 adapter->slow_intr_mask);
2162 queue_work(cxgb3_wq, &adapter->ext_intr_handler_task);
2164 spin_unlock(&adapter->work_lock);
2167 void t3_fatal_err(struct adapter *adapter)
2169 unsigned int fw_status[4];
2171 if (adapter->flags & FULL_INIT_DONE) {
2172 t3_sge_stop(adapter);
2173 t3_intr_disable(adapter);
2175 CH_ALERT(adapter, "encountered fatal error, operation suspended\n");
2176 if (!t3_cim_ctl_blk_read(adapter, 0xa0, 4, fw_status))
2177 CH_ALERT(adapter, "FW status: 0x%x, 0x%x, 0x%x, 0x%x\n",
2178 fw_status[0], fw_status[1],
2179 fw_status[2], fw_status[3]);
2183 static int __devinit cxgb_enable_msix(struct adapter *adap)
2185 struct msix_entry entries[SGE_QSETS + 1];
2188 for (i = 0; i < ARRAY_SIZE(entries); ++i)
2189 entries[i].entry = i;
2191 err = pci_enable_msix(adap->pdev, entries, ARRAY_SIZE(entries));
2193 for (i = 0; i < ARRAY_SIZE(entries); ++i)
2194 adap->msix_info[i].vec = entries[i].vector;
2196 dev_info(&adap->pdev->dev,
2197 "only %d MSI-X vectors left, not using MSI-X\n", err);
2201 static void __devinit print_port_info(struct adapter *adap,
2202 const struct adapter_info *ai)
2204 static const char *pci_variant[] = {
2205 "PCI", "PCI-X", "PCI-X ECC", "PCI-X 266", "PCI Express"
2212 snprintf(buf, sizeof(buf), "%s x%d",
2213 pci_variant[adap->params.pci.variant],
2214 adap->params.pci.width);
2216 snprintf(buf, sizeof(buf), "%s %dMHz/%d-bit",
2217 pci_variant[adap->params.pci.variant],
2218 adap->params.pci.speed, adap->params.pci.width);
2220 for_each_port(adap, i) {
2221 struct net_device *dev = adap->port[i];
2222 const struct port_info *pi = netdev_priv(dev);
2224 if (!test_bit(i, &adap->registered_device_map))
2226 printk(KERN_INFO "%s: %s %s RNIC (rev %d) %s%s\n",
2227 dev->name, ai->desc, pi->port_type->desc,
2228 adap->params.rev, buf,
2229 (adap->flags & USING_MSIX) ? " MSI-X" :
2230 (adap->flags & USING_MSI) ? " MSI" : "");
2231 if (adap->name == dev->name && adap->params.vpd.mclk)
2232 printk(KERN_INFO "%s: %uMB CM, %uMB PMTX, %uMB PMRX\n",
2233 adap->name, t3_mc7_size(&adap->cm) >> 20,
2234 t3_mc7_size(&adap->pmtx) >> 20,
2235 t3_mc7_size(&adap->pmrx) >> 20);
2239 static int __devinit init_one(struct pci_dev *pdev,
2240 const struct pci_device_id *ent)
2242 static int version_printed;
2244 int i, err, pci_using_dac = 0;
2245 unsigned long mmio_start, mmio_len;
2246 const struct adapter_info *ai;
2247 struct adapter *adapter = NULL;
2248 struct port_info *pi;
2250 if (!version_printed) {
2251 printk(KERN_INFO "%s - version %s\n", DRV_DESC, DRV_VERSION);
2256 cxgb3_wq = create_singlethread_workqueue(DRV_NAME);
2258 printk(KERN_ERR DRV_NAME
2259 ": cannot initialize work queue\n");
2264 err = pci_request_regions(pdev, DRV_NAME);
2266 /* Just info, some other driver may have claimed the device. */
2267 dev_info(&pdev->dev, "cannot obtain PCI resources\n");
2271 err = pci_enable_device(pdev);
2273 dev_err(&pdev->dev, "cannot enable PCI device\n");
2274 goto out_release_regions;
2277 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
2279 err = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
2281 dev_err(&pdev->dev, "unable to obtain 64-bit DMA for "
2282 "coherent allocations\n");
2283 goto out_disable_device;
2285 } else if ((err = pci_set_dma_mask(pdev, DMA_32BIT_MASK)) != 0) {
2286 dev_err(&pdev->dev, "no usable DMA configuration\n");
2287 goto out_disable_device;
2290 pci_set_master(pdev);
2292 mmio_start = pci_resource_start(pdev, 0);
2293 mmio_len = pci_resource_len(pdev, 0);
2294 ai = t3_get_adapter_info(ent->driver_data);
2296 adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
2299 goto out_disable_device;
2302 adapter->regs = ioremap_nocache(mmio_start, mmio_len);
2303 if (!adapter->regs) {
2304 dev_err(&pdev->dev, "cannot map device registers\n");
2306 goto out_free_adapter;
2309 adapter->pdev = pdev;
2310 adapter->name = pci_name(pdev);
2311 adapter->msg_enable = dflt_msg_enable;
2312 adapter->mmio_len = mmio_len;
2314 mutex_init(&adapter->mdio_lock);
2315 spin_lock_init(&adapter->work_lock);
2316 spin_lock_init(&adapter->stats_lock);
2318 INIT_LIST_HEAD(&adapter->adapter_list);
2319 INIT_WORK(&adapter->ext_intr_handler_task, ext_intr_task);
2320 INIT_DELAYED_WORK(&adapter->adap_check_task, t3_adap_check_task);
2322 for (i = 0; i < ai->nports; ++i) {
2323 struct net_device *netdev;
2325 netdev = alloc_etherdev(sizeof(struct port_info));
2331 SET_MODULE_OWNER(netdev);
2332 SET_NETDEV_DEV(netdev, &pdev->dev);
2334 adapter->port[i] = netdev;
2335 pi = netdev_priv(netdev);
2336 pi->rx_csum_offload = 1;
2341 netif_carrier_off(netdev);
2342 netdev->irq = pdev->irq;
2343 netdev->mem_start = mmio_start;
2344 netdev->mem_end = mmio_start + mmio_len - 1;
2345 netdev->priv = adapter;
2346 netdev->features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO;
2347 netdev->features |= NETIF_F_LLTX;
2349 netdev->features |= NETIF_F_HIGHDMA;
2351 netdev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
2352 netdev->vlan_rx_register = vlan_rx_register;
2353 netdev->vlan_rx_kill_vid = vlan_rx_kill_vid;
2355 netdev->open = cxgb_open;
2356 netdev->stop = cxgb_close;
2357 netdev->hard_start_xmit = t3_eth_xmit;
2358 netdev->get_stats = cxgb_get_stats;
2359 netdev->set_multicast_list = cxgb_set_rxmode;
2360 netdev->do_ioctl = cxgb_ioctl;
2361 netdev->change_mtu = cxgb_change_mtu;
2362 netdev->set_mac_address = cxgb_set_mac_addr;
2363 #ifdef CONFIG_NET_POLL_CONTROLLER
2364 netdev->poll_controller = cxgb_netpoll;
2366 netdev->weight = 64;
2368 SET_ETHTOOL_OPS(netdev, &cxgb_ethtool_ops);
2371 pci_set_drvdata(pdev, adapter->port[0]);
2372 if (t3_prep_adapter(adapter, ai, 1) < 0) {
2378 * The card is now ready to go. If any errors occur during device
2379 * registration we do not fail the whole card but rather proceed only
2380 * with the ports we manage to register successfully. However we must
2381 * register at least one net device.
2383 for_each_port(adapter, i) {
2384 err = register_netdev(adapter->port[i]);
2386 dev_warn(&pdev->dev,
2387 "cannot register net device %s, skipping\n",
2388 adapter->port[i]->name);
2391 * Change the name we use for messages to the name of
2392 * the first successfully registered interface.
2394 if (!adapter->registered_device_map)
2395 adapter->name = adapter->port[i]->name;
2397 __set_bit(i, &adapter->registered_device_map);
2400 if (!adapter->registered_device_map) {
2401 dev_err(&pdev->dev, "could not register any net devices\n");
2405 /* Driver's ready. Reflect it on LEDs */
2406 t3_led_ready(adapter);
2408 if (is_offload(adapter)) {
2409 __set_bit(OFFLOAD_DEVMAP_BIT, &adapter->registered_device_map);
2410 cxgb3_adapter_ofld(adapter);
2413 /* See what interrupts we'll be using */
2414 if (msi > 1 && cxgb_enable_msix(adapter) == 0)
2415 adapter->flags |= USING_MSIX;
2416 else if (msi > 0 && pci_enable_msi(pdev) == 0)
2417 adapter->flags |= USING_MSI;
2419 err = sysfs_create_group(&adapter->port[0]->dev.kobj,
2422 print_port_info(adapter, ai);
2426 iounmap(adapter->regs);
2427 for (i = ai->nports - 1; i >= 0; --i)
2428 if (adapter->port[i])
2429 free_netdev(adapter->port[i]);
2435 pci_disable_device(pdev);
2436 out_release_regions:
2437 pci_release_regions(pdev);
2438 pci_set_drvdata(pdev, NULL);
2442 static void __devexit remove_one(struct pci_dev *pdev)
2444 struct net_device *dev = pci_get_drvdata(pdev);
2448 struct adapter *adapter = dev->priv;
2450 t3_sge_stop(adapter);
2451 sysfs_remove_group(&adapter->port[0]->dev.kobj,
2454 for_each_port(adapter, i)
2455 if (test_bit(i, &adapter->registered_device_map))
2456 unregister_netdev(adapter->port[i]);
2458 if (is_offload(adapter)) {
2459 cxgb3_adapter_unofld(adapter);
2460 if (test_bit(OFFLOAD_DEVMAP_BIT,
2461 &adapter->open_device_map))
2462 offload_close(&adapter->tdev);
2465 t3_free_sge_resources(adapter);
2466 cxgb_disable_msi(adapter);
2468 for (i = 0; i < ARRAY_SIZE(adapter->dummy_netdev); i++)
2469 if (adapter->dummy_netdev[i]) {
2470 free_netdev(adapter->dummy_netdev[i]);
2471 adapter->dummy_netdev[i] = NULL;
2474 for_each_port(adapter, i)
2475 if (adapter->port[i])
2476 free_netdev(adapter->port[i]);
2478 iounmap(adapter->regs);
2480 pci_release_regions(pdev);
2481 pci_disable_device(pdev);
2482 pci_set_drvdata(pdev, NULL);
2486 static struct pci_driver driver = {
2488 .id_table = cxgb3_pci_tbl,
2490 .remove = __devexit_p(remove_one),
2493 static int __init cxgb3_init_module(void)
2497 cxgb3_offload_init();
2499 ret = pci_register_driver(&driver);
2503 static void __exit cxgb3_cleanup_module(void)
2505 pci_unregister_driver(&driver);
2507 destroy_workqueue(cxgb3_wq);
2510 module_init(cxgb3_init_module);
2511 module_exit(cxgb3_cleanup_module);