Merge branch 'misc' of git://git.kernel.org/pub/scm/linux/kernel/git/mmarek/kbuild
[linux-2.6-block.git] / drivers / net / ethernet / chelsio / cxgb / cxgb2.c
1 /*****************************************************************************
2  *                                                                           *
3  * File: cxgb2.c                                                             *
4  * $Revision: 1.25 $                                                         *
5  * $Date: 2005/06/22 00:43:25 $                                              *
6  * Description:                                                              *
7  *  Chelsio 10Gb Ethernet Driver.                                            *
8  *                                                                           *
9  * This program is free software; you can redistribute it and/or modify      *
10  * it under the terms of the GNU General Public License, version 2, as       *
11  * published by the Free Software Foundation.                                *
12  *                                                                           *
13  * You should have received a copy of the GNU General Public License along   *
14  * with this program; if not, see <http://www.gnu.org/licenses/>.            *
15  *                                                                           *
16  * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED    *
17  * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF      *
18  * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.                     *
19  *                                                                           *
20  * http://www.chelsio.com                                                    *
21  *                                                                           *
22  * Copyright (c) 2003 - 2005 Chelsio Communications, Inc.                    *
23  * All rights reserved.                                                      *
24  *                                                                           *
25  * Maintainers: maintainers@chelsio.com                                      *
26  *                                                                           *
27  * Authors: Dimitrios Michailidis   <dm@chelsio.com>                         *
28  *          Tina Yang               <tainay@chelsio.com>                     *
29  *          Felix Marti             <felix@chelsio.com>                      *
30  *          Scott Bardone           <sbardone@chelsio.com>                   *
31  *          Kurt Ottaway            <kottaway@chelsio.com>                   *
32  *          Frank DiMambro          <frank@chelsio.com>                      *
33  *                                                                           *
34  * History:                                                                  *
35  *                                                                           *
36  ****************************************************************************/
37
38 #include "common.h"
39 #include <linux/module.h>
40 #include <linux/pci.h>
41 #include <linux/netdevice.h>
42 #include <linux/etherdevice.h>
43 #include <linux/if_vlan.h>
44 #include <linux/mii.h>
45 #include <linux/sockios.h>
46 #include <linux/dma-mapping.h>
47 #include <asm/uaccess.h>
48
49 #include "cpl5_cmd.h"
50 #include "regs.h"
51 #include "gmac.h"
52 #include "cphy.h"
53 #include "sge.h"
54 #include "tp.h"
55 #include "espi.h"
56 #include "elmer0.h"
57
58 #include <linux/workqueue.h>
59
60 static inline void schedule_mac_stats_update(struct adapter *ap, int secs)
61 {
62         schedule_delayed_work(&ap->stats_update_task, secs * HZ);
63 }
64
65 static inline void cancel_mac_stats_update(struct adapter *ap)
66 {
67         cancel_delayed_work(&ap->stats_update_task);
68 }
69
70 #define MAX_CMDQ_ENTRIES        16384
71 #define MAX_CMDQ1_ENTRIES       1024
72 #define MAX_RX_BUFFERS          16384
73 #define MAX_RX_JUMBO_BUFFERS    16384
74 #define MAX_TX_BUFFERS_HIGH     16384U
75 #define MAX_TX_BUFFERS_LOW      1536U
76 #define MAX_TX_BUFFERS          1460U
77 #define MIN_FL_ENTRIES          32
78
79 #define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
80                          NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
81                          NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
82
83 /*
84  * The EEPROM is actually bigger but only the first few bytes are used so we
85  * only report those.
86  */
87 #define EEPROM_SIZE 32
88
89 MODULE_DESCRIPTION(DRV_DESCRIPTION);
90 MODULE_AUTHOR("Chelsio Communications");
91 MODULE_LICENSE("GPL");
92
93 static int dflt_msg_enable = DFLT_MSG_ENABLE;
94
95 module_param(dflt_msg_enable, int, 0);
96 MODULE_PARM_DESC(dflt_msg_enable, "Chelsio T1 default message enable bitmap");
97
98 #define HCLOCK 0x0
99 #define LCLOCK 0x1
100
101 /* T1 cards powersave mode */
102 static int t1_clock(struct adapter *adapter, int mode);
103 static int t1powersave = 1;     /* HW default is powersave mode. */
104
105 module_param(t1powersave, int, 0);
106 MODULE_PARM_DESC(t1powersave, "Enable/Disable T1 powersaving mode");
107
108 static int disable_msi = 0;
109 module_param(disable_msi, int, 0);
110 MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
111
112 static const char pci_speed[][4] = {
113         "33", "66", "100", "133"
114 };
115
116 /*
117  * Setup MAC to receive the types of packets we want.
118  */
119 static void t1_set_rxmode(struct net_device *dev)
120 {
121         struct adapter *adapter = dev->ml_priv;
122         struct cmac *mac = adapter->port[dev->if_port].mac;
123         struct t1_rx_mode rm;
124
125         rm.dev = dev;
126         mac->ops->set_rx_mode(mac, &rm);
127 }
128
129 static void link_report(struct port_info *p)
130 {
131         if (!netif_carrier_ok(p->dev))
132                 netdev_info(p->dev, "link down\n");
133         else {
134                 const char *s = "10Mbps";
135
136                 switch (p->link_config.speed) {
137                         case SPEED_10000: s = "10Gbps"; break;
138                         case SPEED_1000:  s = "1000Mbps"; break;
139                         case SPEED_100:   s = "100Mbps"; break;
140                 }
141
142                 netdev_info(p->dev, "link up, %s, %s-duplex\n",
143                             s, p->link_config.duplex == DUPLEX_FULL
144                             ? "full" : "half");
145         }
146 }
147
148 void t1_link_negotiated(struct adapter *adapter, int port_id, int link_stat,
149                         int speed, int duplex, int pause)
150 {
151         struct port_info *p = &adapter->port[port_id];
152
153         if (link_stat != netif_carrier_ok(p->dev)) {
154                 if (link_stat)
155                         netif_carrier_on(p->dev);
156                 else
157                         netif_carrier_off(p->dev);
158                 link_report(p);
159
160                 /* multi-ports: inform toe */
161                 if ((speed > 0) && (adapter->params.nports > 1)) {
162                         unsigned int sched_speed = 10;
163                         switch (speed) {
164                         case SPEED_1000:
165                                 sched_speed = 1000;
166                                 break;
167                         case SPEED_100:
168                                 sched_speed = 100;
169                                 break;
170                         case SPEED_10:
171                                 sched_speed = 10;
172                                 break;
173                         }
174                         t1_sched_update_parms(adapter->sge, port_id, 0, sched_speed);
175                 }
176         }
177 }
178
179 static void link_start(struct port_info *p)
180 {
181         struct cmac *mac = p->mac;
182
183         mac->ops->reset(mac);
184         if (mac->ops->macaddress_set)
185                 mac->ops->macaddress_set(mac, p->dev->dev_addr);
186         t1_set_rxmode(p->dev);
187         t1_link_start(p->phy, mac, &p->link_config);
188         mac->ops->enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
189 }
190
191 static void enable_hw_csum(struct adapter *adapter)
192 {
193         if (adapter->port[0].dev->hw_features & NETIF_F_TSO)
194                 t1_tp_set_ip_checksum_offload(adapter->tp, 1);  /* for TSO only */
195         t1_tp_set_tcp_checksum_offload(adapter->tp, 1);
196 }
197
198 /*
199  * Things to do upon first use of a card.
200  * This must run with the rtnl lock held.
201  */
202 static int cxgb_up(struct adapter *adapter)
203 {
204         int err = 0;
205
206         if (!(adapter->flags & FULL_INIT_DONE)) {
207                 err = t1_init_hw_modules(adapter);
208                 if (err)
209                         goto out_err;
210
211                 enable_hw_csum(adapter);
212                 adapter->flags |= FULL_INIT_DONE;
213         }
214
215         t1_interrupts_clear(adapter);
216
217         adapter->params.has_msi = !disable_msi && !pci_enable_msi(adapter->pdev);
218         err = request_irq(adapter->pdev->irq, t1_interrupt,
219                           adapter->params.has_msi ? 0 : IRQF_SHARED,
220                           adapter->name, adapter);
221         if (err) {
222                 if (adapter->params.has_msi)
223                         pci_disable_msi(adapter->pdev);
224
225                 goto out_err;
226         }
227
228         t1_sge_start(adapter->sge);
229         t1_interrupts_enable(adapter);
230 out_err:
231         return err;
232 }
233
234 /*
235  * Release resources when all the ports have been stopped.
236  */
237 static void cxgb_down(struct adapter *adapter)
238 {
239         t1_sge_stop(adapter->sge);
240         t1_interrupts_disable(adapter);
241         free_irq(adapter->pdev->irq, adapter);
242         if (adapter->params.has_msi)
243                 pci_disable_msi(adapter->pdev);
244 }
245
246 static int cxgb_open(struct net_device *dev)
247 {
248         int err;
249         struct adapter *adapter = dev->ml_priv;
250         int other_ports = adapter->open_device_map & PORT_MASK;
251
252         napi_enable(&adapter->napi);
253         if (!adapter->open_device_map && (err = cxgb_up(adapter)) < 0) {
254                 napi_disable(&adapter->napi);
255                 return err;
256         }
257
258         __set_bit(dev->if_port, &adapter->open_device_map);
259         link_start(&adapter->port[dev->if_port]);
260         netif_start_queue(dev);
261         if (!other_ports && adapter->params.stats_update_period)
262                 schedule_mac_stats_update(adapter,
263                                           adapter->params.stats_update_period);
264
265         t1_vlan_mode(adapter, dev->features);
266         return 0;
267 }
268
269 static int cxgb_close(struct net_device *dev)
270 {
271         struct adapter *adapter = dev->ml_priv;
272         struct port_info *p = &adapter->port[dev->if_port];
273         struct cmac *mac = p->mac;
274
275         netif_stop_queue(dev);
276         napi_disable(&adapter->napi);
277         mac->ops->disable(mac, MAC_DIRECTION_TX | MAC_DIRECTION_RX);
278         netif_carrier_off(dev);
279
280         clear_bit(dev->if_port, &adapter->open_device_map);
281         if (adapter->params.stats_update_period &&
282             !(adapter->open_device_map & PORT_MASK)) {
283                 /* Stop statistics accumulation. */
284                 smp_mb__after_atomic();
285                 spin_lock(&adapter->work_lock);   /* sync with update task */
286                 spin_unlock(&adapter->work_lock);
287                 cancel_mac_stats_update(adapter);
288         }
289
290         if (!adapter->open_device_map)
291                 cxgb_down(adapter);
292         return 0;
293 }
294
295 static struct net_device_stats *t1_get_stats(struct net_device *dev)
296 {
297         struct adapter *adapter = dev->ml_priv;
298         struct port_info *p = &adapter->port[dev->if_port];
299         struct net_device_stats *ns = &p->netstats;
300         const struct cmac_statistics *pstats;
301
302         /* Do a full update of the MAC stats */
303         pstats = p->mac->ops->statistics_update(p->mac,
304                                                 MAC_STATS_UPDATE_FULL);
305
306         ns->tx_packets = pstats->TxUnicastFramesOK +
307                 pstats->TxMulticastFramesOK + pstats->TxBroadcastFramesOK;
308
309         ns->rx_packets = pstats->RxUnicastFramesOK +
310                 pstats->RxMulticastFramesOK + pstats->RxBroadcastFramesOK;
311
312         ns->tx_bytes = pstats->TxOctetsOK;
313         ns->rx_bytes = pstats->RxOctetsOK;
314
315         ns->tx_errors = pstats->TxLateCollisions + pstats->TxLengthErrors +
316                 pstats->TxUnderrun + pstats->TxFramesAbortedDueToXSCollisions;
317         ns->rx_errors = pstats->RxDataErrors + pstats->RxJabberErrors +
318                 pstats->RxFCSErrors + pstats->RxAlignErrors +
319                 pstats->RxSequenceErrors + pstats->RxFrameTooLongErrors +
320                 pstats->RxSymbolErrors + pstats->RxRuntErrors;
321
322         ns->multicast  = pstats->RxMulticastFramesOK;
323         ns->collisions = pstats->TxTotalCollisions;
324
325         /* detailed rx_errors */
326         ns->rx_length_errors = pstats->RxFrameTooLongErrors +
327                 pstats->RxJabberErrors;
328         ns->rx_over_errors   = 0;
329         ns->rx_crc_errors    = pstats->RxFCSErrors;
330         ns->rx_frame_errors  = pstats->RxAlignErrors;
331         ns->rx_fifo_errors   = 0;
332         ns->rx_missed_errors = 0;
333
334         /* detailed tx_errors */
335         ns->tx_aborted_errors   = pstats->TxFramesAbortedDueToXSCollisions;
336         ns->tx_carrier_errors   = 0;
337         ns->tx_fifo_errors      = pstats->TxUnderrun;
338         ns->tx_heartbeat_errors = 0;
339         ns->tx_window_errors    = pstats->TxLateCollisions;
340         return ns;
341 }
342
343 static u32 get_msglevel(struct net_device *dev)
344 {
345         struct adapter *adapter = dev->ml_priv;
346
347         return adapter->msg_enable;
348 }
349
350 static void set_msglevel(struct net_device *dev, u32 val)
351 {
352         struct adapter *adapter = dev->ml_priv;
353
354         adapter->msg_enable = val;
355 }
356
357 static const char stats_strings[][ETH_GSTRING_LEN] = {
358         "TxOctetsOK",
359         "TxOctetsBad",
360         "TxUnicastFramesOK",
361         "TxMulticastFramesOK",
362         "TxBroadcastFramesOK",
363         "TxPauseFrames",
364         "TxFramesWithDeferredXmissions",
365         "TxLateCollisions",
366         "TxTotalCollisions",
367         "TxFramesAbortedDueToXSCollisions",
368         "TxUnderrun",
369         "TxLengthErrors",
370         "TxInternalMACXmitError",
371         "TxFramesWithExcessiveDeferral",
372         "TxFCSErrors",
373         "TxJumboFramesOk",
374         "TxJumboOctetsOk",
375         
376         "RxOctetsOK",
377         "RxOctetsBad",
378         "RxUnicastFramesOK",
379         "RxMulticastFramesOK",
380         "RxBroadcastFramesOK",
381         "RxPauseFrames",
382         "RxFCSErrors",
383         "RxAlignErrors",
384         "RxSymbolErrors",
385         "RxDataErrors",
386         "RxSequenceErrors",
387         "RxRuntErrors",
388         "RxJabberErrors",
389         "RxInternalMACRcvError",
390         "RxInRangeLengthErrors",
391         "RxOutOfRangeLengthField",
392         "RxFrameTooLongErrors",
393         "RxJumboFramesOk",
394         "RxJumboOctetsOk",
395
396         /* Port stats */
397         "RxCsumGood",
398         "TxCsumOffload",
399         "TxTso",
400         "RxVlan",
401         "TxVlan",
402         "TxNeedHeadroom", 
403         
404         /* Interrupt stats */
405         "rx drops",
406         "pure_rsps",
407         "unhandled irqs",
408         "respQ_empty",
409         "respQ_overflow",
410         "freelistQ_empty",
411         "pkt_too_big",
412         "pkt_mismatch",
413         "cmdQ_full0",
414         "cmdQ_full1",
415
416         "espi_DIP2ParityErr",
417         "espi_DIP4Err",
418         "espi_RxDrops",
419         "espi_TxDrops",
420         "espi_RxOvfl",
421         "espi_ParityErr"
422 };
423
424 #define T2_REGMAP_SIZE (3 * 1024)
425
426 static int get_regs_len(struct net_device *dev)
427 {
428         return T2_REGMAP_SIZE;
429 }
430
431 static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
432 {
433         struct adapter *adapter = dev->ml_priv;
434
435         strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
436         strlcpy(info->version, DRV_VERSION, sizeof(info->version));
437         strlcpy(info->bus_info, pci_name(adapter->pdev),
438                 sizeof(info->bus_info));
439 }
440
441 static int get_sset_count(struct net_device *dev, int sset)
442 {
443         switch (sset) {
444         case ETH_SS_STATS:
445                 return ARRAY_SIZE(stats_strings);
446         default:
447                 return -EOPNOTSUPP;
448         }
449 }
450
451 static void get_strings(struct net_device *dev, u32 stringset, u8 *data)
452 {
453         if (stringset == ETH_SS_STATS)
454                 memcpy(data, stats_strings, sizeof(stats_strings));
455 }
456
457 static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
458                       u64 *data)
459 {
460         struct adapter *adapter = dev->ml_priv;
461         struct cmac *mac = adapter->port[dev->if_port].mac;
462         const struct cmac_statistics *s;
463         const struct sge_intr_counts *t;
464         struct sge_port_stats ss;
465
466         s = mac->ops->statistics_update(mac, MAC_STATS_UPDATE_FULL);
467         t = t1_sge_get_intr_counts(adapter->sge);
468         t1_sge_get_port_stats(adapter->sge, dev->if_port, &ss);
469
470         *data++ = s->TxOctetsOK;
471         *data++ = s->TxOctetsBad;
472         *data++ = s->TxUnicastFramesOK;
473         *data++ = s->TxMulticastFramesOK;
474         *data++ = s->TxBroadcastFramesOK;
475         *data++ = s->TxPauseFrames;
476         *data++ = s->TxFramesWithDeferredXmissions;
477         *data++ = s->TxLateCollisions;
478         *data++ = s->TxTotalCollisions;
479         *data++ = s->TxFramesAbortedDueToXSCollisions;
480         *data++ = s->TxUnderrun;
481         *data++ = s->TxLengthErrors;
482         *data++ = s->TxInternalMACXmitError;
483         *data++ = s->TxFramesWithExcessiveDeferral;
484         *data++ = s->TxFCSErrors;
485         *data++ = s->TxJumboFramesOK;
486         *data++ = s->TxJumboOctetsOK;
487
488         *data++ = s->RxOctetsOK;
489         *data++ = s->RxOctetsBad;
490         *data++ = s->RxUnicastFramesOK;
491         *data++ = s->RxMulticastFramesOK;
492         *data++ = s->RxBroadcastFramesOK;
493         *data++ = s->RxPauseFrames;
494         *data++ = s->RxFCSErrors;
495         *data++ = s->RxAlignErrors;
496         *data++ = s->RxSymbolErrors;
497         *data++ = s->RxDataErrors;
498         *data++ = s->RxSequenceErrors;
499         *data++ = s->RxRuntErrors;
500         *data++ = s->RxJabberErrors;
501         *data++ = s->RxInternalMACRcvError;
502         *data++ = s->RxInRangeLengthErrors;
503         *data++ = s->RxOutOfRangeLengthField;
504         *data++ = s->RxFrameTooLongErrors;
505         *data++ = s->RxJumboFramesOK;
506         *data++ = s->RxJumboOctetsOK;
507
508         *data++ = ss.rx_cso_good;
509         *data++ = ss.tx_cso;
510         *data++ = ss.tx_tso;
511         *data++ = ss.vlan_xtract;
512         *data++ = ss.vlan_insert;
513         *data++ = ss.tx_need_hdrroom;
514         
515         *data++ = t->rx_drops;
516         *data++ = t->pure_rsps;
517         *data++ = t->unhandled_irqs;
518         *data++ = t->respQ_empty;
519         *data++ = t->respQ_overflow;
520         *data++ = t->freelistQ_empty;
521         *data++ = t->pkt_too_big;
522         *data++ = t->pkt_mismatch;
523         *data++ = t->cmdQ_full[0];
524         *data++ = t->cmdQ_full[1];
525
526         if (adapter->espi) {
527                 const struct espi_intr_counts *e;
528
529                 e = t1_espi_get_intr_counts(adapter->espi);
530                 *data++ = e->DIP2_parity_err;
531                 *data++ = e->DIP4_err;
532                 *data++ = e->rx_drops;
533                 *data++ = e->tx_drops;
534                 *data++ = e->rx_ovflw;
535                 *data++ = e->parity_err;
536         }
537 }
538
539 static inline void reg_block_dump(struct adapter *ap, void *buf,
540                                   unsigned int start, unsigned int end)
541 {
542         u32 *p = buf + start;
543
544         for ( ; start <= end; start += sizeof(u32))
545                 *p++ = readl(ap->regs + start);
546 }
547
548 static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
549                      void *buf)
550 {
551         struct adapter *ap = dev->ml_priv;
552
553         /*
554          * Version scheme: bits 0..9: chip version, bits 10..15: chip revision
555          */
556         regs->version = 2;
557
558         memset(buf, 0, T2_REGMAP_SIZE);
559         reg_block_dump(ap, buf, 0, A_SG_RESPACCUTIMER);
560         reg_block_dump(ap, buf, A_MC3_CFG, A_MC4_INT_CAUSE);
561         reg_block_dump(ap, buf, A_TPI_ADDR, A_TPI_PAR);
562         reg_block_dump(ap, buf, A_TP_IN_CONFIG, A_TP_TX_DROP_COUNT);
563         reg_block_dump(ap, buf, A_RAT_ROUTE_CONTROL, A_RAT_INTR_CAUSE);
564         reg_block_dump(ap, buf, A_CSPI_RX_AE_WM, A_CSPI_INTR_ENABLE);
565         reg_block_dump(ap, buf, A_ESPI_SCH_TOKEN0, A_ESPI_GOSTAT);
566         reg_block_dump(ap, buf, A_ULP_ULIMIT, A_ULP_PIO_CTRL);
567         reg_block_dump(ap, buf, A_PL_ENABLE, A_PL_CAUSE);
568         reg_block_dump(ap, buf, A_MC5_CONFIG, A_MC5_MASK_WRITE_CMD);
569 }
570
571 static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
572 {
573         struct adapter *adapter = dev->ml_priv;
574         struct port_info *p = &adapter->port[dev->if_port];
575
576         cmd->supported = p->link_config.supported;
577         cmd->advertising = p->link_config.advertising;
578
579         if (netif_carrier_ok(dev)) {
580                 ethtool_cmd_speed_set(cmd, p->link_config.speed);
581                 cmd->duplex = p->link_config.duplex;
582         } else {
583                 ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN);
584                 cmd->duplex = DUPLEX_UNKNOWN;
585         }
586
587         cmd->port = (cmd->supported & SUPPORTED_TP) ? PORT_TP : PORT_FIBRE;
588         cmd->phy_address = p->phy->mdio.prtad;
589         cmd->transceiver = XCVR_EXTERNAL;
590         cmd->autoneg = p->link_config.autoneg;
591         cmd->maxtxpkt = 0;
592         cmd->maxrxpkt = 0;
593         return 0;
594 }
595
596 static int speed_duplex_to_caps(int speed, int duplex)
597 {
598         int cap = 0;
599
600         switch (speed) {
601         case SPEED_10:
602                 if (duplex == DUPLEX_FULL)
603                         cap = SUPPORTED_10baseT_Full;
604                 else
605                         cap = SUPPORTED_10baseT_Half;
606                 break;
607         case SPEED_100:
608                 if (duplex == DUPLEX_FULL)
609                         cap = SUPPORTED_100baseT_Full;
610                 else
611                         cap = SUPPORTED_100baseT_Half;
612                 break;
613         case SPEED_1000:
614                 if (duplex == DUPLEX_FULL)
615                         cap = SUPPORTED_1000baseT_Full;
616                 else
617                         cap = SUPPORTED_1000baseT_Half;
618                 break;
619         case SPEED_10000:
620                 if (duplex == DUPLEX_FULL)
621                         cap = SUPPORTED_10000baseT_Full;
622         }
623         return cap;
624 }
625
626 #define ADVERTISED_MASK (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
627                       ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
628                       ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full | \
629                       ADVERTISED_10000baseT_Full)
630
631 static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
632 {
633         struct adapter *adapter = dev->ml_priv;
634         struct port_info *p = &adapter->port[dev->if_port];
635         struct link_config *lc = &p->link_config;
636
637         if (!(lc->supported & SUPPORTED_Autoneg))
638                 return -EOPNOTSUPP;             /* can't change speed/duplex */
639
640         if (cmd->autoneg == AUTONEG_DISABLE) {
641                 u32 speed = ethtool_cmd_speed(cmd);
642                 int cap = speed_duplex_to_caps(speed, cmd->duplex);
643
644                 if (!(lc->supported & cap) || (speed == SPEED_1000))
645                         return -EINVAL;
646                 lc->requested_speed = speed;
647                 lc->requested_duplex = cmd->duplex;
648                 lc->advertising = 0;
649         } else {
650                 cmd->advertising &= ADVERTISED_MASK;
651                 if (cmd->advertising & (cmd->advertising - 1))
652                         cmd->advertising = lc->supported;
653                 cmd->advertising &= lc->supported;
654                 if (!cmd->advertising)
655                         return -EINVAL;
656                 lc->requested_speed = SPEED_INVALID;
657                 lc->requested_duplex = DUPLEX_INVALID;
658                 lc->advertising = cmd->advertising | ADVERTISED_Autoneg;
659         }
660         lc->autoneg = cmd->autoneg;
661         if (netif_running(dev))
662                 t1_link_start(p->phy, p->mac, lc);
663         return 0;
664 }
665
666 static void get_pauseparam(struct net_device *dev,
667                            struct ethtool_pauseparam *epause)
668 {
669         struct adapter *adapter = dev->ml_priv;
670         struct port_info *p = &adapter->port[dev->if_port];
671
672         epause->autoneg = (p->link_config.requested_fc & PAUSE_AUTONEG) != 0;
673         epause->rx_pause = (p->link_config.fc & PAUSE_RX) != 0;
674         epause->tx_pause = (p->link_config.fc & PAUSE_TX) != 0;
675 }
676
677 static int set_pauseparam(struct net_device *dev,
678                           struct ethtool_pauseparam *epause)
679 {
680         struct adapter *adapter = dev->ml_priv;
681         struct port_info *p = &adapter->port[dev->if_port];
682         struct link_config *lc = &p->link_config;
683
684         if (epause->autoneg == AUTONEG_DISABLE)
685                 lc->requested_fc = 0;
686         else if (lc->supported & SUPPORTED_Autoneg)
687                 lc->requested_fc = PAUSE_AUTONEG;
688         else
689                 return -EINVAL;
690
691         if (epause->rx_pause)
692                 lc->requested_fc |= PAUSE_RX;
693         if (epause->tx_pause)
694                 lc->requested_fc |= PAUSE_TX;
695         if (lc->autoneg == AUTONEG_ENABLE) {
696                 if (netif_running(dev))
697                         t1_link_start(p->phy, p->mac, lc);
698         } else {
699                 lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
700                 if (netif_running(dev))
701                         p->mac->ops->set_speed_duplex_fc(p->mac, -1, -1,
702                                                          lc->fc);
703         }
704         return 0;
705 }
706
707 static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
708 {
709         struct adapter *adapter = dev->ml_priv;
710         int jumbo_fl = t1_is_T1B(adapter) ? 1 : 0;
711
712         e->rx_max_pending = MAX_RX_BUFFERS;
713         e->rx_jumbo_max_pending = MAX_RX_JUMBO_BUFFERS;
714         e->tx_max_pending = MAX_CMDQ_ENTRIES;
715
716         e->rx_pending = adapter->params.sge.freelQ_size[!jumbo_fl];
717         e->rx_jumbo_pending = adapter->params.sge.freelQ_size[jumbo_fl];
718         e->tx_pending = adapter->params.sge.cmdQ_size[0];
719 }
720
721 static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
722 {
723         struct adapter *adapter = dev->ml_priv;
724         int jumbo_fl = t1_is_T1B(adapter) ? 1 : 0;
725
726         if (e->rx_pending > MAX_RX_BUFFERS || e->rx_mini_pending ||
727             e->rx_jumbo_pending > MAX_RX_JUMBO_BUFFERS ||
728             e->tx_pending > MAX_CMDQ_ENTRIES ||
729             e->rx_pending < MIN_FL_ENTRIES ||
730             e->rx_jumbo_pending < MIN_FL_ENTRIES ||
731             e->tx_pending < (adapter->params.nports + 1) * (MAX_SKB_FRAGS + 1))
732                 return -EINVAL;
733
734         if (adapter->flags & FULL_INIT_DONE)
735                 return -EBUSY;
736
737         adapter->params.sge.freelQ_size[!jumbo_fl] = e->rx_pending;
738         adapter->params.sge.freelQ_size[jumbo_fl] = e->rx_jumbo_pending;
739         adapter->params.sge.cmdQ_size[0] = e->tx_pending;
740         adapter->params.sge.cmdQ_size[1] = e->tx_pending > MAX_CMDQ1_ENTRIES ?
741                 MAX_CMDQ1_ENTRIES : e->tx_pending;
742         return 0;
743 }
744
745 static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
746 {
747         struct adapter *adapter = dev->ml_priv;
748
749         adapter->params.sge.rx_coalesce_usecs = c->rx_coalesce_usecs;
750         adapter->params.sge.coalesce_enable = c->use_adaptive_rx_coalesce;
751         adapter->params.sge.sample_interval_usecs = c->rate_sample_interval;
752         t1_sge_set_coalesce_params(adapter->sge, &adapter->params.sge);
753         return 0;
754 }
755
756 static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
757 {
758         struct adapter *adapter = dev->ml_priv;
759
760         c->rx_coalesce_usecs = adapter->params.sge.rx_coalesce_usecs;
761         c->rate_sample_interval = adapter->params.sge.sample_interval_usecs;
762         c->use_adaptive_rx_coalesce = adapter->params.sge.coalesce_enable;
763         return 0;
764 }
765
766 static int get_eeprom_len(struct net_device *dev)
767 {
768         struct adapter *adapter = dev->ml_priv;
769
770         return t1_is_asic(adapter) ? EEPROM_SIZE : 0;
771 }
772
773 #define EEPROM_MAGIC(ap) \
774         (PCI_VENDOR_ID_CHELSIO | ((ap)->params.chip_version << 16))
775
776 static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e,
777                       u8 *data)
778 {
779         int i;
780         u8 buf[EEPROM_SIZE] __attribute__((aligned(4)));
781         struct adapter *adapter = dev->ml_priv;
782
783         e->magic = EEPROM_MAGIC(adapter);
784         for (i = e->offset & ~3; i < e->offset + e->len; i += sizeof(u32))
785                 t1_seeprom_read(adapter, i, (__le32 *)&buf[i]);
786         memcpy(data, buf + e->offset, e->len);
787         return 0;
788 }
789
790 static const struct ethtool_ops t1_ethtool_ops = {
791         .get_settings      = get_settings,
792         .set_settings      = set_settings,
793         .get_drvinfo       = get_drvinfo,
794         .get_msglevel      = get_msglevel,
795         .set_msglevel      = set_msglevel,
796         .get_ringparam     = get_sge_param,
797         .set_ringparam     = set_sge_param,
798         .get_coalesce      = get_coalesce,
799         .set_coalesce      = set_coalesce,
800         .get_eeprom_len    = get_eeprom_len,
801         .get_eeprom        = get_eeprom,
802         .get_pauseparam    = get_pauseparam,
803         .set_pauseparam    = set_pauseparam,
804         .get_link          = ethtool_op_get_link,
805         .get_strings       = get_strings,
806         .get_sset_count    = get_sset_count,
807         .get_ethtool_stats = get_stats,
808         .get_regs_len      = get_regs_len,
809         .get_regs          = get_regs,
810 };
811
812 static int t1_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
813 {
814         struct adapter *adapter = dev->ml_priv;
815         struct mdio_if_info *mdio = &adapter->port[dev->if_port].phy->mdio;
816
817         return mdio_mii_ioctl(mdio, if_mii(req), cmd);
818 }
819
820 static int t1_change_mtu(struct net_device *dev, int new_mtu)
821 {
822         int ret;
823         struct adapter *adapter = dev->ml_priv;
824         struct cmac *mac = adapter->port[dev->if_port].mac;
825
826         if (!mac->ops->set_mtu)
827                 return -EOPNOTSUPP;
828         if ((ret = mac->ops->set_mtu(mac, new_mtu)))
829                 return ret;
830         dev->mtu = new_mtu;
831         return 0;
832 }
833
834 static int t1_set_mac_addr(struct net_device *dev, void *p)
835 {
836         struct adapter *adapter = dev->ml_priv;
837         struct cmac *mac = adapter->port[dev->if_port].mac;
838         struct sockaddr *addr = p;
839
840         if (!mac->ops->macaddress_set)
841                 return -EOPNOTSUPP;
842
843         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
844         mac->ops->macaddress_set(mac, dev->dev_addr);
845         return 0;
846 }
847
848 static netdev_features_t t1_fix_features(struct net_device *dev,
849         netdev_features_t features)
850 {
851         /*
852          * Since there is no support for separate rx/tx vlan accel
853          * enable/disable make sure tx flag is always in same state as rx.
854          */
855         if (features & NETIF_F_HW_VLAN_CTAG_RX)
856                 features |= NETIF_F_HW_VLAN_CTAG_TX;
857         else
858                 features &= ~NETIF_F_HW_VLAN_CTAG_TX;
859
860         return features;
861 }
862
863 static int t1_set_features(struct net_device *dev, netdev_features_t features)
864 {
865         netdev_features_t changed = dev->features ^ features;
866         struct adapter *adapter = dev->ml_priv;
867
868         if (changed & NETIF_F_HW_VLAN_CTAG_RX)
869                 t1_vlan_mode(adapter, features);
870
871         return 0;
872 }
873 #ifdef CONFIG_NET_POLL_CONTROLLER
874 static void t1_netpoll(struct net_device *dev)
875 {
876         unsigned long flags;
877         struct adapter *adapter = dev->ml_priv;
878
879         local_irq_save(flags);
880         t1_interrupt(adapter->pdev->irq, adapter);
881         local_irq_restore(flags);
882 }
883 #endif
884
885 /*
886  * Periodic accumulation of MAC statistics.  This is used only if the MAC
887  * does not have any other way to prevent stats counter overflow.
888  */
889 static void mac_stats_task(struct work_struct *work)
890 {
891         int i;
892         struct adapter *adapter =
893                 container_of(work, struct adapter, stats_update_task.work);
894
895         for_each_port(adapter, i) {
896                 struct port_info *p = &adapter->port[i];
897
898                 if (netif_running(p->dev))
899                         p->mac->ops->statistics_update(p->mac,
900                                                        MAC_STATS_UPDATE_FAST);
901         }
902
903         /* Schedule the next statistics update if any port is active. */
904         spin_lock(&adapter->work_lock);
905         if (adapter->open_device_map & PORT_MASK)
906                 schedule_mac_stats_update(adapter,
907                                           adapter->params.stats_update_period);
908         spin_unlock(&adapter->work_lock);
909 }
910
911 /*
912  * Processes elmer0 external interrupts in process context.
913  */
914 static void ext_intr_task(struct work_struct *work)
915 {
916         struct adapter *adapter =
917                 container_of(work, struct adapter, ext_intr_handler_task);
918
919         t1_elmer0_ext_intr_handler(adapter);
920
921         /* Now reenable external interrupts */
922         spin_lock_irq(&adapter->async_lock);
923         adapter->slow_intr_mask |= F_PL_INTR_EXT;
924         writel(F_PL_INTR_EXT, adapter->regs + A_PL_CAUSE);
925         writel(adapter->slow_intr_mask | F_PL_INTR_SGE_DATA,
926                    adapter->regs + A_PL_ENABLE);
927         spin_unlock_irq(&adapter->async_lock);
928 }
929
930 /*
931  * Interrupt-context handler for elmer0 external interrupts.
932  */
933 void t1_elmer0_ext_intr(struct adapter *adapter)
934 {
935         /*
936          * Schedule a task to handle external interrupts as we require
937          * a process context.  We disable EXT interrupts in the interim
938          * and let the task reenable them when it's done.
939          */
940         adapter->slow_intr_mask &= ~F_PL_INTR_EXT;
941         writel(adapter->slow_intr_mask | F_PL_INTR_SGE_DATA,
942                    adapter->regs + A_PL_ENABLE);
943         schedule_work(&adapter->ext_intr_handler_task);
944 }
945
946 void t1_fatal_err(struct adapter *adapter)
947 {
948         if (adapter->flags & FULL_INIT_DONE) {
949                 t1_sge_stop(adapter->sge);
950                 t1_interrupts_disable(adapter);
951         }
952         pr_alert("%s: encountered fatal error, operation suspended\n",
953                  adapter->name);
954 }
955
956 static const struct net_device_ops cxgb_netdev_ops = {
957         .ndo_open               = cxgb_open,
958         .ndo_stop               = cxgb_close,
959         .ndo_start_xmit         = t1_start_xmit,
960         .ndo_get_stats          = t1_get_stats,
961         .ndo_validate_addr      = eth_validate_addr,
962         .ndo_set_rx_mode        = t1_set_rxmode,
963         .ndo_do_ioctl           = t1_ioctl,
964         .ndo_change_mtu         = t1_change_mtu,
965         .ndo_set_mac_address    = t1_set_mac_addr,
966         .ndo_fix_features       = t1_fix_features,
967         .ndo_set_features       = t1_set_features,
968 #ifdef CONFIG_NET_POLL_CONTROLLER
969         .ndo_poll_controller    = t1_netpoll,
970 #endif
971 };
972
973 static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
974 {
975         int i, err, pci_using_dac = 0;
976         unsigned long mmio_start, mmio_len;
977         const struct board_info *bi;
978         struct adapter *adapter = NULL;
979         struct port_info *pi;
980
981         pr_info_once("%s - version %s\n", DRV_DESCRIPTION, DRV_VERSION);
982
983         err = pci_enable_device(pdev);
984         if (err)
985                 return err;
986
987         if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
988                 pr_err("%s: cannot find PCI device memory base address\n",
989                        pci_name(pdev));
990                 err = -ENODEV;
991                 goto out_disable_pdev;
992         }
993
994         if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
995                 pci_using_dac = 1;
996
997                 if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) {
998                         pr_err("%s: unable to obtain 64-bit DMA for "
999                                "consistent allocations\n", pci_name(pdev));
1000                         err = -ENODEV;
1001                         goto out_disable_pdev;
1002                 }
1003
1004         } else if ((err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) != 0) {
1005                 pr_err("%s: no usable DMA configuration\n", pci_name(pdev));
1006                 goto out_disable_pdev;
1007         }
1008
1009         err = pci_request_regions(pdev, DRV_NAME);
1010         if (err) {
1011                 pr_err("%s: cannot obtain PCI resources\n", pci_name(pdev));
1012                 goto out_disable_pdev;
1013         }
1014
1015         pci_set_master(pdev);
1016
1017         mmio_start = pci_resource_start(pdev, 0);
1018         mmio_len = pci_resource_len(pdev, 0);
1019         bi = t1_get_board_info(ent->driver_data);
1020
1021         for (i = 0; i < bi->port_number; ++i) {
1022                 struct net_device *netdev;
1023
1024                 netdev = alloc_etherdev(adapter ? 0 : sizeof(*adapter));
1025                 if (!netdev) {
1026                         err = -ENOMEM;
1027                         goto out_free_dev;
1028                 }
1029
1030                 SET_NETDEV_DEV(netdev, &pdev->dev);
1031
1032                 if (!adapter) {
1033                         adapter = netdev_priv(netdev);
1034                         adapter->pdev = pdev;
1035                         adapter->port[0].dev = netdev;  /* so we don't leak it */
1036
1037                         adapter->regs = ioremap(mmio_start, mmio_len);
1038                         if (!adapter->regs) {
1039                                 pr_err("%s: cannot map device registers\n",
1040                                        pci_name(pdev));
1041                                 err = -ENOMEM;
1042                                 goto out_free_dev;
1043                         }
1044
1045                         if (t1_get_board_rev(adapter, bi, &adapter->params)) {
1046                                 err = -ENODEV;    /* Can't handle this chip rev */
1047                                 goto out_free_dev;
1048                         }
1049
1050                         adapter->name = pci_name(pdev);
1051                         adapter->msg_enable = dflt_msg_enable;
1052                         adapter->mmio_len = mmio_len;
1053
1054                         spin_lock_init(&adapter->tpi_lock);
1055                         spin_lock_init(&adapter->work_lock);
1056                         spin_lock_init(&adapter->async_lock);
1057                         spin_lock_init(&adapter->mac_lock);
1058
1059                         INIT_WORK(&adapter->ext_intr_handler_task,
1060                                   ext_intr_task);
1061                         INIT_DELAYED_WORK(&adapter->stats_update_task,
1062                                           mac_stats_task);
1063
1064                         pci_set_drvdata(pdev, netdev);
1065                 }
1066
1067                 pi = &adapter->port[i];
1068                 pi->dev = netdev;
1069                 netif_carrier_off(netdev);
1070                 netdev->irq = pdev->irq;
1071                 netdev->if_port = i;
1072                 netdev->mem_start = mmio_start;
1073                 netdev->mem_end = mmio_start + mmio_len - 1;
1074                 netdev->ml_priv = adapter;
1075                 netdev->hw_features |= NETIF_F_SG | NETIF_F_IP_CSUM |
1076                         NETIF_F_RXCSUM;
1077                 netdev->features |= NETIF_F_SG | NETIF_F_IP_CSUM |
1078                         NETIF_F_RXCSUM | NETIF_F_LLTX;
1079
1080                 if (pci_using_dac)
1081                         netdev->features |= NETIF_F_HIGHDMA;
1082                 if (vlan_tso_capable(adapter)) {
1083                         netdev->features |=
1084                                 NETIF_F_HW_VLAN_CTAG_TX |
1085                                 NETIF_F_HW_VLAN_CTAG_RX;
1086                         netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX;
1087
1088                         /* T204: disable TSO */
1089                         if (!(is_T2(adapter)) || bi->port_number != 4) {
1090                                 netdev->hw_features |= NETIF_F_TSO;
1091                                 netdev->features |= NETIF_F_TSO;
1092                         }
1093                 }
1094
1095                 netdev->netdev_ops = &cxgb_netdev_ops;
1096                 netdev->hard_header_len += (netdev->hw_features & NETIF_F_TSO) ?
1097                         sizeof(struct cpl_tx_pkt_lso) : sizeof(struct cpl_tx_pkt);
1098
1099                 netif_napi_add(netdev, &adapter->napi, t1_poll, 64);
1100
1101                 netdev->ethtool_ops = &t1_ethtool_ops;
1102
1103                 switch (bi->board) {
1104                 case CHBT_BOARD_CHT110:
1105                 case CHBT_BOARD_N110:
1106                 case CHBT_BOARD_N210:
1107                 case CHBT_BOARD_CHT210:
1108                         netdev->max_mtu = PM3393_MAX_FRAME_SIZE -
1109                                           (ETH_HLEN + ETH_FCS_LEN);
1110                         break;
1111                 case CHBT_BOARD_CHN204:
1112                         netdev->max_mtu = VSC7326_MAX_MTU;
1113                         break;
1114                 default:
1115                         netdev->max_mtu = ETH_DATA_LEN;
1116                         break;
1117                 }
1118         }
1119
1120         if (t1_init_sw_modules(adapter, bi) < 0) {
1121                 err = -ENODEV;
1122                 goto out_free_dev;
1123         }
1124
1125         /*
1126          * The card is now ready to go.  If any errors occur during device
1127          * registration we do not fail the whole card but rather proceed only
1128          * with the ports we manage to register successfully.  However we must
1129          * register at least one net device.
1130          */
1131         for (i = 0; i < bi->port_number; ++i) {
1132                 err = register_netdev(adapter->port[i].dev);
1133                 if (err)
1134                         pr_warn("%s: cannot register net device %s, skipping\n",
1135                                 pci_name(pdev), adapter->port[i].dev->name);
1136                 else {
1137                         /*
1138                          * Change the name we use for messages to the name of
1139                          * the first successfully registered interface.
1140                          */
1141                         if (!adapter->registered_device_map)
1142                                 adapter->name = adapter->port[i].dev->name;
1143
1144                         __set_bit(i, &adapter->registered_device_map);
1145                 }
1146         }
1147         if (!adapter->registered_device_map) {
1148                 pr_err("%s: could not register any net devices\n",
1149                        pci_name(pdev));
1150                 goto out_release_adapter_res;
1151         }
1152
1153         pr_info("%s: %s (rev %d), %s %dMHz/%d-bit\n",
1154                 adapter->name, bi->desc, adapter->params.chip_revision,
1155                 adapter->params.pci.is_pcix ? "PCIX" : "PCI",
1156                 adapter->params.pci.speed, adapter->params.pci.width);
1157
1158         /*
1159          * Set the T1B ASIC and memory clocks.
1160          */
1161         if (t1powersave)
1162                 adapter->t1powersave = LCLOCK;  /* HW default is powersave mode. */
1163         else
1164                 adapter->t1powersave = HCLOCK;
1165         if (t1_is_T1B(adapter))
1166                 t1_clock(adapter, t1powersave);
1167
1168         return 0;
1169
1170 out_release_adapter_res:
1171         t1_free_sw_modules(adapter);
1172 out_free_dev:
1173         if (adapter) {
1174                 if (adapter->regs)
1175                         iounmap(adapter->regs);
1176                 for (i = bi->port_number - 1; i >= 0; --i)
1177                         if (adapter->port[i].dev)
1178                                 free_netdev(adapter->port[i].dev);
1179         }
1180         pci_release_regions(pdev);
1181 out_disable_pdev:
1182         pci_disable_device(pdev);
1183         return err;
1184 }
1185
1186 static void bit_bang(struct adapter *adapter, int bitdata, int nbits)
1187 {
1188         int data;
1189         int i;
1190         u32 val;
1191
1192         enum {
1193                 S_CLOCK = 1 << 3,
1194                 S_DATA = 1 << 4
1195         };
1196
1197         for (i = (nbits - 1); i > -1; i--) {
1198
1199                 udelay(50);
1200
1201                 data = ((bitdata >> i) & 0x1);
1202                 __t1_tpi_read(adapter, A_ELMER0_GPO, &val);
1203
1204                 if (data)
1205                         val |= S_DATA;
1206                 else
1207                         val &= ~S_DATA;
1208
1209                 udelay(50);
1210
1211                 /* Set SCLOCK low */
1212                 val &= ~S_CLOCK;
1213                 __t1_tpi_write(adapter, A_ELMER0_GPO, val);
1214
1215                 udelay(50);
1216
1217                 /* Write SCLOCK high */
1218                 val |= S_CLOCK;
1219                 __t1_tpi_write(adapter, A_ELMER0_GPO, val);
1220
1221         }
1222 }
1223
1224 static int t1_clock(struct adapter *adapter, int mode)
1225 {
1226         u32 val;
1227         int M_CORE_VAL;
1228         int M_MEM_VAL;
1229
1230         enum {
1231                 M_CORE_BITS     = 9,
1232                 T_CORE_VAL      = 0,
1233                 T_CORE_BITS     = 2,
1234                 N_CORE_VAL      = 0,
1235                 N_CORE_BITS     = 2,
1236                 M_MEM_BITS      = 9,
1237                 T_MEM_VAL       = 0,
1238                 T_MEM_BITS      = 2,
1239                 N_MEM_VAL       = 0,
1240                 N_MEM_BITS      = 2,
1241                 NP_LOAD         = 1 << 17,
1242                 S_LOAD_MEM      = 1 << 5,
1243                 S_LOAD_CORE     = 1 << 6,
1244                 S_CLOCK         = 1 << 3
1245         };
1246
1247         if (!t1_is_T1B(adapter))
1248                 return -ENODEV; /* Can't re-clock this chip. */
1249
1250         if (mode & 2)
1251                 return 0;       /* show current mode. */
1252
1253         if ((adapter->t1powersave & 1) == (mode & 1))
1254                 return -EALREADY;       /* ASIC already running in mode. */
1255
1256         if ((mode & 1) == HCLOCK) {
1257                 M_CORE_VAL = 0x14;
1258                 M_MEM_VAL = 0x18;
1259                 adapter->t1powersave = HCLOCK;  /* overclock */
1260         } else {
1261                 M_CORE_VAL = 0xe;
1262                 M_MEM_VAL = 0x10;
1263                 adapter->t1powersave = LCLOCK;  /* underclock */
1264         }
1265
1266         /* Don't interrupt this serial stream! */
1267         spin_lock(&adapter->tpi_lock);
1268
1269         /* Initialize for ASIC core */
1270         __t1_tpi_read(adapter, A_ELMER0_GPO, &val);
1271         val |= NP_LOAD;
1272         udelay(50);
1273         __t1_tpi_write(adapter, A_ELMER0_GPO, val);
1274         udelay(50);
1275         __t1_tpi_read(adapter, A_ELMER0_GPO, &val);
1276         val &= ~S_LOAD_CORE;
1277         val &= ~S_CLOCK;
1278         __t1_tpi_write(adapter, A_ELMER0_GPO, val);
1279         udelay(50);
1280
1281         /* Serial program the ASIC clock synthesizer */
1282         bit_bang(adapter, T_CORE_VAL, T_CORE_BITS);
1283         bit_bang(adapter, N_CORE_VAL, N_CORE_BITS);
1284         bit_bang(adapter, M_CORE_VAL, M_CORE_BITS);
1285         udelay(50);
1286
1287         /* Finish ASIC core */
1288         __t1_tpi_read(adapter, A_ELMER0_GPO, &val);
1289         val |= S_LOAD_CORE;
1290         udelay(50);
1291         __t1_tpi_write(adapter, A_ELMER0_GPO, val);
1292         udelay(50);
1293         __t1_tpi_read(adapter, A_ELMER0_GPO, &val);
1294         val &= ~S_LOAD_CORE;
1295         udelay(50);
1296         __t1_tpi_write(adapter, A_ELMER0_GPO, val);
1297         udelay(50);
1298
1299         /* Initialize for memory */
1300         __t1_tpi_read(adapter, A_ELMER0_GPO, &val);
1301         val |= NP_LOAD;
1302         udelay(50);
1303         __t1_tpi_write(adapter, A_ELMER0_GPO, val);
1304         udelay(50);
1305         __t1_tpi_read(adapter, A_ELMER0_GPO, &val);
1306         val &= ~S_LOAD_MEM;
1307         val &= ~S_CLOCK;
1308         udelay(50);
1309         __t1_tpi_write(adapter, A_ELMER0_GPO, val);
1310         udelay(50);
1311
1312         /* Serial program the memory clock synthesizer */
1313         bit_bang(adapter, T_MEM_VAL, T_MEM_BITS);
1314         bit_bang(adapter, N_MEM_VAL, N_MEM_BITS);
1315         bit_bang(adapter, M_MEM_VAL, M_MEM_BITS);
1316         udelay(50);
1317
1318         /* Finish memory */
1319         __t1_tpi_read(adapter, A_ELMER0_GPO, &val);
1320         val |= S_LOAD_MEM;
1321         udelay(50);
1322         __t1_tpi_write(adapter, A_ELMER0_GPO, val);
1323         udelay(50);
1324         __t1_tpi_read(adapter, A_ELMER0_GPO, &val);
1325         val &= ~S_LOAD_MEM;
1326         udelay(50);
1327         __t1_tpi_write(adapter, A_ELMER0_GPO, val);
1328
1329         spin_unlock(&adapter->tpi_lock);
1330
1331         return 0;
1332 }
1333
1334 static inline void t1_sw_reset(struct pci_dev *pdev)
1335 {
1336         pci_write_config_dword(pdev, A_PCICFG_PM_CSR, 3);
1337         pci_write_config_dword(pdev, A_PCICFG_PM_CSR, 0);
1338 }
1339
1340 static void remove_one(struct pci_dev *pdev)
1341 {
1342         struct net_device *dev = pci_get_drvdata(pdev);
1343         struct adapter *adapter = dev->ml_priv;
1344         int i;
1345
1346         for_each_port(adapter, i) {
1347                 if (test_bit(i, &adapter->registered_device_map))
1348                         unregister_netdev(adapter->port[i].dev);
1349         }
1350
1351         t1_free_sw_modules(adapter);
1352         iounmap(adapter->regs);
1353
1354         while (--i >= 0) {
1355                 if (adapter->port[i].dev)
1356                         free_netdev(adapter->port[i].dev);
1357         }
1358
1359         pci_release_regions(pdev);
1360         pci_disable_device(pdev);
1361         t1_sw_reset(pdev);
1362 }
1363
1364 static struct pci_driver cxgb_pci_driver = {
1365         .name     = DRV_NAME,
1366         .id_table = t1_pci_tbl,
1367         .probe    = init_one,
1368         .remove   = remove_one,
1369 };
1370
1371 module_pci_driver(cxgb_pci_driver);