2 * r8169.c: RealTek 8169/8168/8101 ethernet driver.
4 * Copyright (c) 2002 ShuChen <shuchen@realtek.com.tw>
5 * Copyright (c) 2003 - 2007 Francois Romieu <romieu@fr.zoreil.com>
6 * Copyright (c) a lot of people too. Please respect their work.
8 * See MAINTAINERS file for support contact information.
11 #include <linux/module.h>
12 #include <linux/moduleparam.h>
13 #include <linux/pci.h>
14 #include <linux/netdevice.h>
15 #include <linux/etherdevice.h>
16 #include <linux/delay.h>
17 #include <linux/ethtool.h>
18 #include <linux/mii.h>
19 #include <linux/if_vlan.h>
20 #include <linux/crc32.h>
23 #include <linux/tcp.h>
24 #include <linux/init.h>
25 #include <linux/interrupt.h>
26 #include <linux/dma-mapping.h>
27 #include <linux/pm_runtime.h>
28 #include <linux/firmware.h>
29 #include <linux/pci-aspm.h>
30 #include <linux/prefetch.h>
35 #define RTL8169_VERSION "2.3LK-NAPI"
36 #define MODULENAME "r8169"
37 #define PFX MODULENAME ": "
39 #define FIRMWARE_8168D_1 "rtl_nic/rtl8168d-1.fw"
40 #define FIRMWARE_8168D_2 "rtl_nic/rtl8168d-2.fw"
41 #define FIRMWARE_8168E_1 "rtl_nic/rtl8168e-1.fw"
42 #define FIRMWARE_8168E_2 "rtl_nic/rtl8168e-2.fw"
43 #define FIRMWARE_8168E_3 "rtl_nic/rtl8168e-3.fw"
44 #define FIRMWARE_8168F_1 "rtl_nic/rtl8168f-1.fw"
45 #define FIRMWARE_8168F_2 "rtl_nic/rtl8168f-2.fw"
46 #define FIRMWARE_8105E_1 "rtl_nic/rtl8105e-1.fw"
47 #define FIRMWARE_8402_1 "rtl_nic/rtl8402-1.fw"
48 #define FIRMWARE_8411_1 "rtl_nic/rtl8411-1.fw"
49 #define FIRMWARE_8106E_1 "rtl_nic/rtl8106e-1.fw"
50 #define FIRMWARE_8168G_1 "rtl_nic/rtl8168g-1.fw"
53 #define assert(expr) \
55 printk( "Assertion failed! %s,%s,%s,line=%d\n", \
56 #expr,__FILE__,__func__,__LINE__); \
58 #define dprintk(fmt, args...) \
59 do { printk(KERN_DEBUG PFX fmt, ## args); } while (0)
61 #define assert(expr) do {} while (0)
62 #define dprintk(fmt, args...) do {} while (0)
63 #endif /* RTL8169_DEBUG */
65 #define R8169_MSG_DEFAULT \
66 (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_IFUP | NETIF_MSG_IFDOWN)
68 #define TX_SLOTS_AVAIL(tp) \
69 (tp->dirty_tx + NUM_TX_DESC - tp->cur_tx)
71 /* A skbuff with nr_frags needs nr_frags+1 entries in the tx queue */
72 #define TX_FRAGS_READY_FOR(tp,nr_frags) \
73 (TX_SLOTS_AVAIL(tp) >= (nr_frags + 1))
75 /* Maximum number of multicast addresses to filter (vs. Rx-all-multicast).
76 The RTL chips use a 64 element hash table based on the Ethernet CRC. */
77 static const int multicast_filter_limit = 32;
79 #define MAX_READ_REQUEST_SHIFT 12
80 #define TX_DMA_BURST 7 /* Maximum PCI burst, '7' is unlimited */
81 #define InterFrameGap 0x03 /* 3 means InterFrameGap = the shortest one */
83 #define R8169_REGS_SIZE 256
84 #define R8169_NAPI_WEIGHT 64
85 #define NUM_TX_DESC 64 /* Number of Tx descriptor registers */
86 #define NUM_RX_DESC 256U /* Number of Rx descriptor registers */
87 #define R8169_TX_RING_BYTES (NUM_TX_DESC * sizeof(struct TxDesc))
88 #define R8169_RX_RING_BYTES (NUM_RX_DESC * sizeof(struct RxDesc))
90 #define RTL8169_TX_TIMEOUT (6*HZ)
91 #define RTL8169_PHY_TIMEOUT (10*HZ)
93 /* write/read MMIO register */
94 #define RTL_W8(reg, val8) writeb ((val8), ioaddr + (reg))
95 #define RTL_W16(reg, val16) writew ((val16), ioaddr + (reg))
96 #define RTL_W32(reg, val32) writel ((val32), ioaddr + (reg))
97 #define RTL_R8(reg) readb (ioaddr + (reg))
98 #define RTL_R16(reg) readw (ioaddr + (reg))
99 #define RTL_R32(reg) readl (ioaddr + (reg))
102 RTL_GIGA_MAC_VER_01 = 0,
143 RTL_GIGA_MAC_NONE = 0xff,
146 enum rtl_tx_desc_version {
151 #define JUMBO_1K ETH_DATA_LEN
152 #define JUMBO_4K (4*1024 - ETH_HLEN - 2)
153 #define JUMBO_6K (6*1024 - ETH_HLEN - 2)
154 #define JUMBO_7K (7*1024 - ETH_HLEN - 2)
155 #define JUMBO_9K (9*1024 - ETH_HLEN - 2)
157 #define _R(NAME,TD,FW,SZ,B) { \
165 static const struct {
167 enum rtl_tx_desc_version txd_version;
171 } rtl_chip_infos[] = {
173 [RTL_GIGA_MAC_VER_01] =
174 _R("RTL8169", RTL_TD_0, NULL, JUMBO_7K, true),
175 [RTL_GIGA_MAC_VER_02] =
176 _R("RTL8169s", RTL_TD_0, NULL, JUMBO_7K, true),
177 [RTL_GIGA_MAC_VER_03] =
178 _R("RTL8110s", RTL_TD_0, NULL, JUMBO_7K, true),
179 [RTL_GIGA_MAC_VER_04] =
180 _R("RTL8169sb/8110sb", RTL_TD_0, NULL, JUMBO_7K, true),
181 [RTL_GIGA_MAC_VER_05] =
182 _R("RTL8169sc/8110sc", RTL_TD_0, NULL, JUMBO_7K, true),
183 [RTL_GIGA_MAC_VER_06] =
184 _R("RTL8169sc/8110sc", RTL_TD_0, NULL, JUMBO_7K, true),
186 [RTL_GIGA_MAC_VER_07] =
187 _R("RTL8102e", RTL_TD_1, NULL, JUMBO_1K, true),
188 [RTL_GIGA_MAC_VER_08] =
189 _R("RTL8102e", RTL_TD_1, NULL, JUMBO_1K, true),
190 [RTL_GIGA_MAC_VER_09] =
191 _R("RTL8102e", RTL_TD_1, NULL, JUMBO_1K, true),
192 [RTL_GIGA_MAC_VER_10] =
193 _R("RTL8101e", RTL_TD_0, NULL, JUMBO_1K, true),
194 [RTL_GIGA_MAC_VER_11] =
195 _R("RTL8168b/8111b", RTL_TD_0, NULL, JUMBO_4K, false),
196 [RTL_GIGA_MAC_VER_12] =
197 _R("RTL8168b/8111b", RTL_TD_0, NULL, JUMBO_4K, false),
198 [RTL_GIGA_MAC_VER_13] =
199 _R("RTL8101e", RTL_TD_0, NULL, JUMBO_1K, true),
200 [RTL_GIGA_MAC_VER_14] =
201 _R("RTL8100e", RTL_TD_0, NULL, JUMBO_1K, true),
202 [RTL_GIGA_MAC_VER_15] =
203 _R("RTL8100e", RTL_TD_0, NULL, JUMBO_1K, true),
204 [RTL_GIGA_MAC_VER_16] =
205 _R("RTL8101e", RTL_TD_0, NULL, JUMBO_1K, true),
206 [RTL_GIGA_MAC_VER_17] =
207 _R("RTL8168b/8111b", RTL_TD_1, NULL, JUMBO_4K, false),
208 [RTL_GIGA_MAC_VER_18] =
209 _R("RTL8168cp/8111cp", RTL_TD_1, NULL, JUMBO_6K, false),
210 [RTL_GIGA_MAC_VER_19] =
211 _R("RTL8168c/8111c", RTL_TD_1, NULL, JUMBO_6K, false),
212 [RTL_GIGA_MAC_VER_20] =
213 _R("RTL8168c/8111c", RTL_TD_1, NULL, JUMBO_6K, false),
214 [RTL_GIGA_MAC_VER_21] =
215 _R("RTL8168c/8111c", RTL_TD_1, NULL, JUMBO_6K, false),
216 [RTL_GIGA_MAC_VER_22] =
217 _R("RTL8168c/8111c", RTL_TD_1, NULL, JUMBO_6K, false),
218 [RTL_GIGA_MAC_VER_23] =
219 _R("RTL8168cp/8111cp", RTL_TD_1, NULL, JUMBO_6K, false),
220 [RTL_GIGA_MAC_VER_24] =
221 _R("RTL8168cp/8111cp", RTL_TD_1, NULL, JUMBO_6K, false),
222 [RTL_GIGA_MAC_VER_25] =
223 _R("RTL8168d/8111d", RTL_TD_1, FIRMWARE_8168D_1,
225 [RTL_GIGA_MAC_VER_26] =
226 _R("RTL8168d/8111d", RTL_TD_1, FIRMWARE_8168D_2,
228 [RTL_GIGA_MAC_VER_27] =
229 _R("RTL8168dp/8111dp", RTL_TD_1, NULL, JUMBO_9K, false),
230 [RTL_GIGA_MAC_VER_28] =
231 _R("RTL8168dp/8111dp", RTL_TD_1, NULL, JUMBO_9K, false),
232 [RTL_GIGA_MAC_VER_29] =
233 _R("RTL8105e", RTL_TD_1, FIRMWARE_8105E_1,
235 [RTL_GIGA_MAC_VER_30] =
236 _R("RTL8105e", RTL_TD_1, FIRMWARE_8105E_1,
238 [RTL_GIGA_MAC_VER_31] =
239 _R("RTL8168dp/8111dp", RTL_TD_1, NULL, JUMBO_9K, false),
240 [RTL_GIGA_MAC_VER_32] =
241 _R("RTL8168e/8111e", RTL_TD_1, FIRMWARE_8168E_1,
243 [RTL_GIGA_MAC_VER_33] =
244 _R("RTL8168e/8111e", RTL_TD_1, FIRMWARE_8168E_2,
246 [RTL_GIGA_MAC_VER_34] =
247 _R("RTL8168evl/8111evl",RTL_TD_1, FIRMWARE_8168E_3,
249 [RTL_GIGA_MAC_VER_35] =
250 _R("RTL8168f/8111f", RTL_TD_1, FIRMWARE_8168F_1,
252 [RTL_GIGA_MAC_VER_36] =
253 _R("RTL8168f/8111f", RTL_TD_1, FIRMWARE_8168F_2,
255 [RTL_GIGA_MAC_VER_37] =
256 _R("RTL8402", RTL_TD_1, FIRMWARE_8402_1,
258 [RTL_GIGA_MAC_VER_38] =
259 _R("RTL8411", RTL_TD_1, FIRMWARE_8411_1,
261 [RTL_GIGA_MAC_VER_39] =
262 _R("RTL8106e", RTL_TD_1, FIRMWARE_8106E_1,
264 [RTL_GIGA_MAC_VER_40] =
265 _R("RTL8168g/8111g", RTL_TD_1, FIRMWARE_8168G_1,
267 [RTL_GIGA_MAC_VER_41] =
268 _R("RTL8168g/8111g", RTL_TD_1, NULL, JUMBO_9K, false),
278 static DEFINE_PCI_DEVICE_TABLE(rtl8169_pci_tbl) = {
279 { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8129), 0, 0, RTL_CFG_0 },
280 { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8136), 0, 0, RTL_CFG_2 },
281 { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8167), 0, 0, RTL_CFG_0 },
282 { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8168), 0, 0, RTL_CFG_1 },
283 { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8169), 0, 0, RTL_CFG_0 },
284 { PCI_VENDOR_ID_DLINK, 0x4300,
285 PCI_VENDOR_ID_DLINK, 0x4b10, 0, 0, RTL_CFG_1 },
286 { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4300), 0, 0, RTL_CFG_0 },
287 { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4302), 0, 0, RTL_CFG_0 },
288 { PCI_DEVICE(PCI_VENDOR_ID_AT, 0xc107), 0, 0, RTL_CFG_0 },
289 { PCI_DEVICE(0x16ec, 0x0116), 0, 0, RTL_CFG_0 },
290 { PCI_VENDOR_ID_LINKSYS, 0x1032,
291 PCI_ANY_ID, 0x0024, 0, 0, RTL_CFG_0 },
293 PCI_ANY_ID, 0x2410, 0, 0, RTL_CFG_2 },
297 MODULE_DEVICE_TABLE(pci, rtl8169_pci_tbl);
299 static int rx_buf_sz = 16383;
306 MAC0 = 0, /* Ethernet hardware address. */
308 MAR0 = 8, /* Multicast filter. */
309 CounterAddrLow = 0x10,
310 CounterAddrHigh = 0x14,
311 TxDescStartAddrLow = 0x20,
312 TxDescStartAddrHigh = 0x24,
313 TxHDescStartAddrLow = 0x28,
314 TxHDescStartAddrHigh = 0x2c,
323 #define TXCFG_AUTO_FIFO (1 << 7) /* 8111e-vl */
324 #define TXCFG_EMPTY (1 << 11) /* 8111e-vl */
327 #define RX128_INT_EN (1 << 15) /* 8111c and later */
328 #define RX_MULTI_EN (1 << 14) /* 8111c only */
329 #define RXCFG_FIFO_SHIFT 13
330 /* No threshold before first PCI xfer */
331 #define RX_FIFO_THRESH (7 << RXCFG_FIFO_SHIFT)
332 #define RXCFG_DMA_SHIFT 8
333 /* Unlimited maximum PCI burst. */
334 #define RX_DMA_BURST (7 << RXCFG_DMA_SHIFT)
341 #define PME_SIGNAL (1 << 5) /* 8168c and later */
352 RxDescAddrLow = 0xe4,
353 RxDescAddrHigh = 0xe8,
354 EarlyTxThres = 0xec, /* 8169. Unit of 32 bytes. */
356 #define NoEarlyTx 0x3f /* Max value : no early transmit. */
358 MaxTxPacketSize = 0xec, /* 8101/8168. Unit of 128 bytes. */
360 #define TxPacketMax (8064 >> 7)
361 #define EarlySize 0x27
364 FuncEventMask = 0xf4,
365 FuncPresetState = 0xf8,
366 FuncForceEvent = 0xfc,
369 enum rtl8110_registers {
375 enum rtl8168_8101_registers {
378 #define CSIAR_FLAG 0x80000000
379 #define CSIAR_WRITE_CMD 0x80000000
380 #define CSIAR_BYTE_ENABLE 0x0f
381 #define CSIAR_BYTE_ENABLE_SHIFT 12
382 #define CSIAR_ADDR_MASK 0x0fff
383 #define CSIAR_FUNC_CARD 0x00000000
384 #define CSIAR_FUNC_SDIO 0x00010000
385 #define CSIAR_FUNC_NIC 0x00020000
388 #define EPHYAR_FLAG 0x80000000
389 #define EPHYAR_WRITE_CMD 0x80000000
390 #define EPHYAR_REG_MASK 0x1f
391 #define EPHYAR_REG_SHIFT 16
392 #define EPHYAR_DATA_MASK 0xffff
394 #define PFM_EN (1 << 6)
396 #define FIX_NAK_1 (1 << 4)
397 #define FIX_NAK_2 (1 << 3)
400 #define NOW_IS_OOB (1 << 7)
401 #define TX_EMPTY (1 << 5)
402 #define RX_EMPTY (1 << 4)
403 #define RXTX_EMPTY (TX_EMPTY | RX_EMPTY)
404 #define EN_NDP (1 << 3)
405 #define EN_OOB_RESET (1 << 2)
406 #define LINK_LIST_RDY (1 << 1)
408 #define EFUSEAR_FLAG 0x80000000
409 #define EFUSEAR_WRITE_CMD 0x80000000
410 #define EFUSEAR_READ_CMD 0x00000000
411 #define EFUSEAR_REG_MASK 0x03ff
412 #define EFUSEAR_REG_SHIFT 8
413 #define EFUSEAR_DATA_MASK 0xff
416 enum rtl8168_registers {
421 #define ERIAR_FLAG 0x80000000
422 #define ERIAR_WRITE_CMD 0x80000000
423 #define ERIAR_READ_CMD 0x00000000
424 #define ERIAR_ADDR_BYTE_ALIGN 4
425 #define ERIAR_TYPE_SHIFT 16
426 #define ERIAR_EXGMAC (0x00 << ERIAR_TYPE_SHIFT)
427 #define ERIAR_MSIX (0x01 << ERIAR_TYPE_SHIFT)
428 #define ERIAR_ASF (0x02 << ERIAR_TYPE_SHIFT)
429 #define ERIAR_MASK_SHIFT 12
430 #define ERIAR_MASK_0001 (0x1 << ERIAR_MASK_SHIFT)
431 #define ERIAR_MASK_0011 (0x3 << ERIAR_MASK_SHIFT)
432 #define ERIAR_MASK_0101 (0x5 << ERIAR_MASK_SHIFT)
433 #define ERIAR_MASK_1111 (0xf << ERIAR_MASK_SHIFT)
434 EPHY_RXER_NUM = 0x7c,
435 OCPDR = 0xb0, /* OCP GPHY access */
436 #define OCPDR_WRITE_CMD 0x80000000
437 #define OCPDR_READ_CMD 0x00000000
438 #define OCPDR_REG_MASK 0x7f
439 #define OCPDR_GPHY_REG_SHIFT 16
440 #define OCPDR_DATA_MASK 0xffff
442 #define OCPAR_FLAG 0x80000000
443 #define OCPAR_GPHY_WRITE_CMD 0x8000f060
444 #define OCPAR_GPHY_READ_CMD 0x0000f060
446 RDSAR1 = 0xd0, /* 8168c only. Undocumented on 8168dp */
447 MISC = 0xf0, /* 8168e only. */
448 #define TXPLA_RST (1 << 29)
449 #define DISABLE_LAN_EN (1 << 23) /* Enable GPIO pin */
450 #define PWM_EN (1 << 22)
451 #define RXDV_GATED_EN (1 << 19)
452 #define EARLY_TALLY_EN (1 << 16)
453 #define FORCE_CLK (1 << 15) /* force clock request */
456 enum rtl_register_content {
457 /* InterruptStatusBits */
461 TxDescUnavail = 0x0080,
485 /* TXPoll register p.5 */
486 HPQ = 0x80, /* Poll cmd on the high prio queue */
487 NPQ = 0x40, /* Poll cmd on the low prio queue */
488 FSWInt = 0x01, /* Forced software interrupt */
492 Cfg9346_Unlock = 0xc0,
497 AcceptBroadcast = 0x08,
498 AcceptMulticast = 0x04,
500 AcceptAllPhys = 0x01,
501 #define RX_CONFIG_ACCEPT_MASK 0x3f
504 TxInterFrameGapShift = 24,
505 TxDMAShift = 8, /* DMA burst value (0-7) is shift this many bits */
507 /* Config1 register p.24 */
510 Speed_down = (1 << 4),
514 PMEnable = (1 << 0), /* Power Management Enable */
516 /* Config2 register p. 25 */
517 ClkReqEn = (1 << 7), /* Clock Request Enable */
518 MSIEnable = (1 << 5), /* 8169 only. Reserved in the 8168. */
519 PCI_Clock_66MHz = 0x01,
520 PCI_Clock_33MHz = 0x00,
522 /* Config3 register p.25 */
523 MagicPacket = (1 << 5), /* Wake up when receives a Magic Packet */
524 LinkUp = (1 << 4), /* Wake up when the cable connection is re-established */
525 Jumbo_En0 = (1 << 2), /* 8168 only. Reserved in the 8168b */
526 Beacon_en = (1 << 0), /* 8168 only. Reserved in the 8168b */
528 /* Config4 register */
529 Jumbo_En1 = (1 << 1), /* 8168 only. Reserved in the 8168b */
531 /* Config5 register p.27 */
532 BWF = (1 << 6), /* Accept Broadcast wakeup frame */
533 MWF = (1 << 5), /* Accept Multicast wakeup frame */
534 UWF = (1 << 4), /* Accept Unicast wakeup frame */
536 LanWake = (1 << 1), /* LanWake enable/disable */
537 PMEStatus = (1 << 0), /* PME status can be reset by PCI RST# */
538 ASPM_en = (1 << 0), /* ASPM enable */
541 TBIReset = 0x80000000,
542 TBILoopback = 0x40000000,
543 TBINwEnable = 0x20000000,
544 TBINwRestart = 0x10000000,
545 TBILinkOk = 0x02000000,
546 TBINwComplete = 0x01000000,
549 EnableBist = (1 << 15), // 8168 8101
550 Mac_dbgo_oe = (1 << 14), // 8168 8101
551 Normal_mode = (1 << 13), // unused
552 Force_half_dup = (1 << 12), // 8168 8101
553 Force_rxflow_en = (1 << 11), // 8168 8101
554 Force_txflow_en = (1 << 10), // 8168 8101
555 Cxpl_dbg_sel = (1 << 9), // 8168 8101
556 ASF = (1 << 8), // 8168 8101
557 PktCntrDisable = (1 << 7), // 8168 8101
558 Mac_dbgo_sel = 0x001c, // 8168
563 INTT_0 = 0x0000, // 8168
564 INTT_1 = 0x0001, // 8168
565 INTT_2 = 0x0002, // 8168
566 INTT_3 = 0x0003, // 8168
568 /* rtl8169_PHYstatus */
579 TBILinkOK = 0x02000000,
581 /* DumpCounterCommand */
586 /* First doubleword. */
587 DescOwn = (1 << 31), /* Descriptor is owned by NIC */
588 RingEnd = (1 << 30), /* End of descriptor ring */
589 FirstFrag = (1 << 29), /* First segment of a packet */
590 LastFrag = (1 << 28), /* Final segment of a packet */
594 enum rtl_tx_desc_bit {
595 /* First doubleword. */
596 TD_LSO = (1 << 27), /* Large Send Offload */
597 #define TD_MSS_MAX 0x07ffu /* MSS value */
599 /* Second doubleword. */
600 TxVlanTag = (1 << 17), /* Add VLAN tag */
603 /* 8169, 8168b and 810x except 8102e. */
604 enum rtl_tx_desc_bit_0 {
605 /* First doubleword. */
606 #define TD0_MSS_SHIFT 16 /* MSS position (11 bits) */
607 TD0_TCP_CS = (1 << 16), /* Calculate TCP/IP checksum */
608 TD0_UDP_CS = (1 << 17), /* Calculate UDP/IP checksum */
609 TD0_IP_CS = (1 << 18), /* Calculate IP checksum */
612 /* 8102e, 8168c and beyond. */
613 enum rtl_tx_desc_bit_1 {
614 /* Second doubleword. */
615 #define TD1_MSS_SHIFT 18 /* MSS position (11 bits) */
616 TD1_IP_CS = (1 << 29), /* Calculate IP checksum */
617 TD1_TCP_CS = (1 << 30), /* Calculate TCP/IP checksum */
618 TD1_UDP_CS = (1 << 31), /* Calculate UDP/IP checksum */
621 static const struct rtl_tx_desc_info {
628 } tx_desc_info [] = {
631 .udp = TD0_IP_CS | TD0_UDP_CS,
632 .tcp = TD0_IP_CS | TD0_TCP_CS
634 .mss_shift = TD0_MSS_SHIFT,
639 .udp = TD1_IP_CS | TD1_UDP_CS,
640 .tcp = TD1_IP_CS | TD1_TCP_CS
642 .mss_shift = TD1_MSS_SHIFT,
647 enum rtl_rx_desc_bit {
649 PID1 = (1 << 18), /* Protocol ID bit 1/2 */
650 PID0 = (1 << 17), /* Protocol ID bit 2/2 */
652 #define RxProtoUDP (PID1)
653 #define RxProtoTCP (PID0)
654 #define RxProtoIP (PID1 | PID0)
655 #define RxProtoMask RxProtoIP
657 IPFail = (1 << 16), /* IP checksum failed */
658 UDPFail = (1 << 15), /* UDP/IP checksum failed */
659 TCPFail = (1 << 14), /* TCP/IP checksum failed */
660 RxVlanTag = (1 << 16), /* VLAN tag available */
663 #define RsvdMask 0x3fffc000
680 u8 __pad[sizeof(void *) - sizeof(u32)];
684 RTL_FEATURE_WOL = (1 << 0),
685 RTL_FEATURE_MSI = (1 << 1),
686 RTL_FEATURE_GMII = (1 << 2),
687 RTL_FEATURE_FW_LOADED = (1 << 3),
690 struct rtl8169_counters {
697 __le32 tx_one_collision;
698 __le32 tx_multi_collision;
707 RTL_FLAG_TASK_ENABLED,
708 RTL_FLAG_TASK_SLOW_PENDING,
709 RTL_FLAG_TASK_RESET_PENDING,
710 RTL_FLAG_TASK_PHY_PENDING,
714 struct rtl8169_stats {
717 struct u64_stats_sync syncp;
720 struct rtl8169_private {
721 void __iomem *mmio_addr; /* memory map physical address */
722 struct pci_dev *pci_dev;
723 struct net_device *dev;
724 struct napi_struct napi;
728 u32 cur_rx; /* Index into the Rx descriptor buffer of next Rx pkt. */
729 u32 cur_tx; /* Index into the Tx descriptor buffer of next Rx pkt. */
731 struct rtl8169_stats rx_stats;
732 struct rtl8169_stats tx_stats;
733 struct TxDesc *TxDescArray; /* 256-aligned Tx descriptor ring */
734 struct RxDesc *RxDescArray; /* 256-aligned Rx descriptor ring */
735 dma_addr_t TxPhyAddr;
736 dma_addr_t RxPhyAddr;
737 void *Rx_databuff[NUM_RX_DESC]; /* Rx data buffers */
738 struct ring_info tx_skb[NUM_TX_DESC]; /* Tx data buffers */
739 struct timer_list timer;
745 void (*write)(struct rtl8169_private *, int, int);
746 int (*read)(struct rtl8169_private *, int);
749 struct pll_power_ops {
750 void (*down)(struct rtl8169_private *);
751 void (*up)(struct rtl8169_private *);
755 void (*enable)(struct rtl8169_private *);
756 void (*disable)(struct rtl8169_private *);
760 void (*write)(struct rtl8169_private *, int, int);
761 u32 (*read)(struct rtl8169_private *, int);
764 int (*set_speed)(struct net_device *, u8 aneg, u16 sp, u8 dpx, u32 adv);
765 int (*get_settings)(struct net_device *, struct ethtool_cmd *);
766 void (*phy_reset_enable)(struct rtl8169_private *tp);
767 void (*hw_start)(struct net_device *);
768 unsigned int (*phy_reset_pending)(struct rtl8169_private *tp);
769 unsigned int (*link_ok)(void __iomem *);
770 int (*do_ioctl)(struct rtl8169_private *tp, struct mii_ioctl_data *data, int cmd);
773 DECLARE_BITMAP(flags, RTL_FLAG_MAX);
775 struct work_struct work;
780 struct mii_if_info mii;
781 struct rtl8169_counters counters;
786 const struct firmware *fw;
788 #define RTL_VER_SIZE 32
790 char version[RTL_VER_SIZE];
792 struct rtl_fw_phy_action {
797 #define RTL_FIRMWARE_UNKNOWN ERR_PTR(-EAGAIN)
802 MODULE_AUTHOR("Realtek and the Linux r8169 crew <netdev@vger.kernel.org>");
803 MODULE_DESCRIPTION("RealTek RTL-8169 Gigabit Ethernet driver");
804 module_param(use_dac, int, 0);
805 MODULE_PARM_DESC(use_dac, "Enable PCI DAC. Unsafe on 32 bit PCI slot.");
806 module_param_named(debug, debug.msg_enable, int, 0);
807 MODULE_PARM_DESC(debug, "Debug verbosity level (0=none, ..., 16=all)");
808 MODULE_LICENSE("GPL");
809 MODULE_VERSION(RTL8169_VERSION);
810 MODULE_FIRMWARE(FIRMWARE_8168D_1);
811 MODULE_FIRMWARE(FIRMWARE_8168D_2);
812 MODULE_FIRMWARE(FIRMWARE_8168E_1);
813 MODULE_FIRMWARE(FIRMWARE_8168E_2);
814 MODULE_FIRMWARE(FIRMWARE_8168E_3);
815 MODULE_FIRMWARE(FIRMWARE_8105E_1);
816 MODULE_FIRMWARE(FIRMWARE_8168F_1);
817 MODULE_FIRMWARE(FIRMWARE_8168F_2);
818 MODULE_FIRMWARE(FIRMWARE_8402_1);
819 MODULE_FIRMWARE(FIRMWARE_8411_1);
820 MODULE_FIRMWARE(FIRMWARE_8106E_1);
821 MODULE_FIRMWARE(FIRMWARE_8168G_1);
823 static void rtl_lock_work(struct rtl8169_private *tp)
825 mutex_lock(&tp->wk.mutex);
828 static void rtl_unlock_work(struct rtl8169_private *tp)
830 mutex_unlock(&tp->wk.mutex);
833 static void rtl_tx_performance_tweak(struct pci_dev *pdev, u16 force)
835 pcie_capability_clear_and_set_word(pdev, PCI_EXP_DEVCTL,
836 PCI_EXP_DEVCTL_READRQ, force);
840 bool (*check)(struct rtl8169_private *);
844 static void rtl_udelay(unsigned int d)
849 static bool rtl_loop_wait(struct rtl8169_private *tp, const struct rtl_cond *c,
850 void (*delay)(unsigned int), unsigned int d, int n,
855 for (i = 0; i < n; i++) {
857 if (c->check(tp) == high)
860 netif_err(tp, drv, tp->dev, "%s == %d (loop: %d, delay: %d).\n",
861 c->msg, !high, n, d);
865 static bool rtl_udelay_loop_wait_high(struct rtl8169_private *tp,
866 const struct rtl_cond *c,
867 unsigned int d, int n)
869 return rtl_loop_wait(tp, c, rtl_udelay, d, n, true);
872 static bool rtl_udelay_loop_wait_low(struct rtl8169_private *tp,
873 const struct rtl_cond *c,
874 unsigned int d, int n)
876 return rtl_loop_wait(tp, c, rtl_udelay, d, n, false);
879 static bool rtl_msleep_loop_wait_high(struct rtl8169_private *tp,
880 const struct rtl_cond *c,
881 unsigned int d, int n)
883 return rtl_loop_wait(tp, c, msleep, d, n, true);
886 static bool rtl_msleep_loop_wait_low(struct rtl8169_private *tp,
887 const struct rtl_cond *c,
888 unsigned int d, int n)
890 return rtl_loop_wait(tp, c, msleep, d, n, false);
893 #define DECLARE_RTL_COND(name) \
894 static bool name ## _check(struct rtl8169_private *); \
896 static const struct rtl_cond name = { \
897 .check = name ## _check, \
901 static bool name ## _check(struct rtl8169_private *tp)
903 DECLARE_RTL_COND(rtl_ocpar_cond)
905 void __iomem *ioaddr = tp->mmio_addr;
907 return RTL_R32(OCPAR) & OCPAR_FLAG;
910 static u32 ocp_read(struct rtl8169_private *tp, u8 mask, u16 reg)
912 void __iomem *ioaddr = tp->mmio_addr;
914 RTL_W32(OCPAR, ((u32)mask & 0x0f) << 12 | (reg & 0x0fff));
916 return rtl_udelay_loop_wait_high(tp, &rtl_ocpar_cond, 100, 20) ?
920 static void ocp_write(struct rtl8169_private *tp, u8 mask, u16 reg, u32 data)
922 void __iomem *ioaddr = tp->mmio_addr;
924 RTL_W32(OCPDR, data);
925 RTL_W32(OCPAR, OCPAR_FLAG | ((u32)mask & 0x0f) << 12 | (reg & 0x0fff));
927 rtl_udelay_loop_wait_low(tp, &rtl_ocpar_cond, 100, 20);
930 DECLARE_RTL_COND(rtl_eriar_cond)
932 void __iomem *ioaddr = tp->mmio_addr;
934 return RTL_R32(ERIAR) & ERIAR_FLAG;
937 static void rtl8168_oob_notify(struct rtl8169_private *tp, u8 cmd)
939 void __iomem *ioaddr = tp->mmio_addr;
942 RTL_W32(ERIAR, 0x800010e8);
945 if (!rtl_udelay_loop_wait_low(tp, &rtl_eriar_cond, 100, 5))
948 ocp_write(tp, 0x1, 0x30, 0x00000001);
951 #define OOB_CMD_RESET 0x00
952 #define OOB_CMD_DRIVER_START 0x05
953 #define OOB_CMD_DRIVER_STOP 0x06
955 static u16 rtl8168_get_ocp_reg(struct rtl8169_private *tp)
957 return (tp->mac_version == RTL_GIGA_MAC_VER_31) ? 0xb8 : 0x10;
960 DECLARE_RTL_COND(rtl_ocp_read_cond)
964 reg = rtl8168_get_ocp_reg(tp);
966 return ocp_read(tp, 0x0f, reg) & 0x00000800;
969 static void rtl8168_driver_start(struct rtl8169_private *tp)
971 rtl8168_oob_notify(tp, OOB_CMD_DRIVER_START);
973 rtl_msleep_loop_wait_high(tp, &rtl_ocp_read_cond, 10, 10);
976 static void rtl8168_driver_stop(struct rtl8169_private *tp)
978 rtl8168_oob_notify(tp, OOB_CMD_DRIVER_STOP);
980 rtl_msleep_loop_wait_low(tp, &rtl_ocp_read_cond, 10, 10);
983 static int r8168dp_check_dash(struct rtl8169_private *tp)
985 u16 reg = rtl8168_get_ocp_reg(tp);
987 return (ocp_read(tp, 0x0f, reg) & 0x00008000) ? 1 : 0;
990 static bool rtl_ocp_reg_failure(struct rtl8169_private *tp, u32 reg)
992 if (reg & 0xffff0001) {
993 netif_err(tp, drv, tp->dev, "Invalid ocp reg %x!\n", reg);
999 DECLARE_RTL_COND(rtl_ocp_gphy_cond)
1001 void __iomem *ioaddr = tp->mmio_addr;
1003 return RTL_R32(GPHY_OCP) & OCPAR_FLAG;
1006 static void r8168_phy_ocp_write(struct rtl8169_private *tp, u32 reg, u32 data)
1008 void __iomem *ioaddr = tp->mmio_addr;
1010 if (rtl_ocp_reg_failure(tp, reg))
1013 RTL_W32(GPHY_OCP, OCPAR_FLAG | (reg << 15) | data);
1015 rtl_udelay_loop_wait_low(tp, &rtl_ocp_gphy_cond, 25, 10);
1018 static u16 r8168_phy_ocp_read(struct rtl8169_private *tp, u32 reg)
1020 void __iomem *ioaddr = tp->mmio_addr;
1022 if (rtl_ocp_reg_failure(tp, reg))
1025 RTL_W32(GPHY_OCP, reg << 15);
1027 return rtl_udelay_loop_wait_high(tp, &rtl_ocp_gphy_cond, 25, 10) ?
1028 (RTL_R32(GPHY_OCP) & 0xffff) : ~0;
1031 static void rtl_w1w0_phy_ocp(struct rtl8169_private *tp, int reg, int p, int m)
1035 val = r8168_phy_ocp_read(tp, reg);
1036 r8168_phy_ocp_write(tp, reg, (val | p) & ~m);
1039 static void r8168_mac_ocp_write(struct rtl8169_private *tp, u32 reg, u32 data)
1041 void __iomem *ioaddr = tp->mmio_addr;
1043 if (rtl_ocp_reg_failure(tp, reg))
1046 RTL_W32(OCPDR, OCPAR_FLAG | (reg << 15) | data);
1049 static u16 r8168_mac_ocp_read(struct rtl8169_private *tp, u32 reg)
1051 void __iomem *ioaddr = tp->mmio_addr;
1053 if (rtl_ocp_reg_failure(tp, reg))
1056 RTL_W32(OCPDR, reg << 15);
1058 return RTL_R32(OCPDR);
1061 #define OCP_STD_PHY_BASE 0xa400
1063 static void r8168g_mdio_write(struct rtl8169_private *tp, int reg, int value)
1066 tp->ocp_base = value ? value << 4 : OCP_STD_PHY_BASE;
1070 if (tp->ocp_base != OCP_STD_PHY_BASE)
1073 r8168_phy_ocp_write(tp, tp->ocp_base + reg * 2, value);
1076 static int r8168g_mdio_read(struct rtl8169_private *tp, int reg)
1078 if (tp->ocp_base != OCP_STD_PHY_BASE)
1081 return r8168_phy_ocp_read(tp, tp->ocp_base + reg * 2);
1084 DECLARE_RTL_COND(rtl_phyar_cond)
1086 void __iomem *ioaddr = tp->mmio_addr;
1088 return RTL_R32(PHYAR) & 0x80000000;
1091 static void r8169_mdio_write(struct rtl8169_private *tp, int reg, int value)
1093 void __iomem *ioaddr = tp->mmio_addr;
1095 RTL_W32(PHYAR, 0x80000000 | (reg & 0x1f) << 16 | (value & 0xffff));
1097 rtl_udelay_loop_wait_low(tp, &rtl_phyar_cond, 25, 20);
1099 * According to hardware specs a 20us delay is required after write
1100 * complete indication, but before sending next command.
1105 static int r8169_mdio_read(struct rtl8169_private *tp, int reg)
1107 void __iomem *ioaddr = tp->mmio_addr;
1110 RTL_W32(PHYAR, 0x0 | (reg & 0x1f) << 16);
1112 value = rtl_udelay_loop_wait_high(tp, &rtl_phyar_cond, 25, 20) ?
1113 RTL_R32(PHYAR) & 0xffff : ~0;
1116 * According to hardware specs a 20us delay is required after read
1117 * complete indication, but before sending next command.
1124 static void r8168dp_1_mdio_access(struct rtl8169_private *tp, int reg, u32 data)
1126 void __iomem *ioaddr = tp->mmio_addr;
1128 RTL_W32(OCPDR, data | ((reg & OCPDR_REG_MASK) << OCPDR_GPHY_REG_SHIFT));
1129 RTL_W32(OCPAR, OCPAR_GPHY_WRITE_CMD);
1130 RTL_W32(EPHY_RXER_NUM, 0);
1132 rtl_udelay_loop_wait_low(tp, &rtl_ocpar_cond, 1000, 100);
1135 static void r8168dp_1_mdio_write(struct rtl8169_private *tp, int reg, int value)
1137 r8168dp_1_mdio_access(tp, reg,
1138 OCPDR_WRITE_CMD | (value & OCPDR_DATA_MASK));
1141 static int r8168dp_1_mdio_read(struct rtl8169_private *tp, int reg)
1143 void __iomem *ioaddr = tp->mmio_addr;
1145 r8168dp_1_mdio_access(tp, reg, OCPDR_READ_CMD);
1148 RTL_W32(OCPAR, OCPAR_GPHY_READ_CMD);
1149 RTL_W32(EPHY_RXER_NUM, 0);
1151 return rtl_udelay_loop_wait_high(tp, &rtl_ocpar_cond, 1000, 100) ?
1152 RTL_R32(OCPDR) & OCPDR_DATA_MASK : ~0;
1155 #define R8168DP_1_MDIO_ACCESS_BIT 0x00020000
1157 static void r8168dp_2_mdio_start(void __iomem *ioaddr)
1159 RTL_W32(0xd0, RTL_R32(0xd0) & ~R8168DP_1_MDIO_ACCESS_BIT);
1162 static void r8168dp_2_mdio_stop(void __iomem *ioaddr)
1164 RTL_W32(0xd0, RTL_R32(0xd0) | R8168DP_1_MDIO_ACCESS_BIT);
1167 static void r8168dp_2_mdio_write(struct rtl8169_private *tp, int reg, int value)
1169 void __iomem *ioaddr = tp->mmio_addr;
1171 r8168dp_2_mdio_start(ioaddr);
1173 r8169_mdio_write(tp, reg, value);
1175 r8168dp_2_mdio_stop(ioaddr);
1178 static int r8168dp_2_mdio_read(struct rtl8169_private *tp, int reg)
1180 void __iomem *ioaddr = tp->mmio_addr;
1183 r8168dp_2_mdio_start(ioaddr);
1185 value = r8169_mdio_read(tp, reg);
1187 r8168dp_2_mdio_stop(ioaddr);
1192 static void rtl_writephy(struct rtl8169_private *tp, int location, u32 val)
1194 tp->mdio_ops.write(tp, location, val);
1197 static int rtl_readphy(struct rtl8169_private *tp, int location)
1199 return tp->mdio_ops.read(tp, location);
1202 static void rtl_patchphy(struct rtl8169_private *tp, int reg_addr, int value)
1204 rtl_writephy(tp, reg_addr, rtl_readphy(tp, reg_addr) | value);
1207 static void rtl_w1w0_phy(struct rtl8169_private *tp, int reg_addr, int p, int m)
1211 val = rtl_readphy(tp, reg_addr);
1212 rtl_writephy(tp, reg_addr, (val | p) & ~m);
1215 static void rtl_mdio_write(struct net_device *dev, int phy_id, int location,
1218 struct rtl8169_private *tp = netdev_priv(dev);
1220 rtl_writephy(tp, location, val);
1223 static int rtl_mdio_read(struct net_device *dev, int phy_id, int location)
1225 struct rtl8169_private *tp = netdev_priv(dev);
1227 return rtl_readphy(tp, location);
1230 DECLARE_RTL_COND(rtl_ephyar_cond)
1232 void __iomem *ioaddr = tp->mmio_addr;
1234 return RTL_R32(EPHYAR) & EPHYAR_FLAG;
1237 static void rtl_ephy_write(struct rtl8169_private *tp, int reg_addr, int value)
1239 void __iomem *ioaddr = tp->mmio_addr;
1241 RTL_W32(EPHYAR, EPHYAR_WRITE_CMD | (value & EPHYAR_DATA_MASK) |
1242 (reg_addr & EPHYAR_REG_MASK) << EPHYAR_REG_SHIFT);
1244 rtl_udelay_loop_wait_low(tp, &rtl_ephyar_cond, 10, 100);
1249 static u16 rtl_ephy_read(struct rtl8169_private *tp, int reg_addr)
1251 void __iomem *ioaddr = tp->mmio_addr;
1253 RTL_W32(EPHYAR, (reg_addr & EPHYAR_REG_MASK) << EPHYAR_REG_SHIFT);
1255 return rtl_udelay_loop_wait_high(tp, &rtl_ephyar_cond, 10, 100) ?
1256 RTL_R32(EPHYAR) & EPHYAR_DATA_MASK : ~0;
1259 static void rtl_eri_write(struct rtl8169_private *tp, int addr, u32 mask,
1262 void __iomem *ioaddr = tp->mmio_addr;
1264 BUG_ON((addr & 3) || (mask == 0));
1265 RTL_W32(ERIDR, val);
1266 RTL_W32(ERIAR, ERIAR_WRITE_CMD | type | mask | addr);
1268 rtl_udelay_loop_wait_low(tp, &rtl_eriar_cond, 100, 100);
1271 static u32 rtl_eri_read(struct rtl8169_private *tp, int addr, int type)
1273 void __iomem *ioaddr = tp->mmio_addr;
1275 RTL_W32(ERIAR, ERIAR_READ_CMD | type | ERIAR_MASK_1111 | addr);
1277 return rtl_udelay_loop_wait_high(tp, &rtl_eriar_cond, 100, 100) ?
1278 RTL_R32(ERIDR) : ~0;
1281 static void rtl_w1w0_eri(struct rtl8169_private *tp, int addr, u32 mask, u32 p,
1286 val = rtl_eri_read(tp, addr, type);
1287 rtl_eri_write(tp, addr, mask, (val & ~m) | p, type);
1296 static void rtl_write_exgmac_batch(struct rtl8169_private *tp,
1297 const struct exgmac_reg *r, int len)
1300 rtl_eri_write(tp, r->addr, r->mask, r->val, ERIAR_EXGMAC);
1305 DECLARE_RTL_COND(rtl_efusear_cond)
1307 void __iomem *ioaddr = tp->mmio_addr;
1309 return RTL_R32(EFUSEAR) & EFUSEAR_FLAG;
1312 static u8 rtl8168d_efuse_read(struct rtl8169_private *tp, int reg_addr)
1314 void __iomem *ioaddr = tp->mmio_addr;
1316 RTL_W32(EFUSEAR, (reg_addr & EFUSEAR_REG_MASK) << EFUSEAR_REG_SHIFT);
1318 return rtl_udelay_loop_wait_high(tp, &rtl_efusear_cond, 100, 300) ?
1319 RTL_R32(EFUSEAR) & EFUSEAR_DATA_MASK : ~0;
1322 static u16 rtl_get_events(struct rtl8169_private *tp)
1324 void __iomem *ioaddr = tp->mmio_addr;
1326 return RTL_R16(IntrStatus);
1329 static void rtl_ack_events(struct rtl8169_private *tp, u16 bits)
1331 void __iomem *ioaddr = tp->mmio_addr;
1333 RTL_W16(IntrStatus, bits);
1337 static void rtl_irq_disable(struct rtl8169_private *tp)
1339 void __iomem *ioaddr = tp->mmio_addr;
1341 RTL_W16(IntrMask, 0);
1345 static void rtl_irq_enable(struct rtl8169_private *tp, u16 bits)
1347 void __iomem *ioaddr = tp->mmio_addr;
1349 RTL_W16(IntrMask, bits);
1352 #define RTL_EVENT_NAPI_RX (RxOK | RxErr)
1353 #define RTL_EVENT_NAPI_TX (TxOK | TxErr)
1354 #define RTL_EVENT_NAPI (RTL_EVENT_NAPI_RX | RTL_EVENT_NAPI_TX)
1356 static void rtl_irq_enable_all(struct rtl8169_private *tp)
1358 rtl_irq_enable(tp, RTL_EVENT_NAPI | tp->event_slow);
1361 static void rtl8169_irq_mask_and_ack(struct rtl8169_private *tp)
1363 void __iomem *ioaddr = tp->mmio_addr;
1365 rtl_irq_disable(tp);
1366 rtl_ack_events(tp, RTL_EVENT_NAPI | tp->event_slow);
1370 static unsigned int rtl8169_tbi_reset_pending(struct rtl8169_private *tp)
1372 void __iomem *ioaddr = tp->mmio_addr;
1374 return RTL_R32(TBICSR) & TBIReset;
1377 static unsigned int rtl8169_xmii_reset_pending(struct rtl8169_private *tp)
1379 return rtl_readphy(tp, MII_BMCR) & BMCR_RESET;
1382 static unsigned int rtl8169_tbi_link_ok(void __iomem *ioaddr)
1384 return RTL_R32(TBICSR) & TBILinkOk;
1387 static unsigned int rtl8169_xmii_link_ok(void __iomem *ioaddr)
1389 return RTL_R8(PHYstatus) & LinkStatus;
1392 static void rtl8169_tbi_reset_enable(struct rtl8169_private *tp)
1394 void __iomem *ioaddr = tp->mmio_addr;
1396 RTL_W32(TBICSR, RTL_R32(TBICSR) | TBIReset);
1399 static void rtl8169_xmii_reset_enable(struct rtl8169_private *tp)
1403 val = rtl_readphy(tp, MII_BMCR) | BMCR_RESET;
1404 rtl_writephy(tp, MII_BMCR, val & 0xffff);
1407 static void rtl_link_chg_patch(struct rtl8169_private *tp)
1409 void __iomem *ioaddr = tp->mmio_addr;
1410 struct net_device *dev = tp->dev;
1412 if (!netif_running(dev))
1415 if (tp->mac_version == RTL_GIGA_MAC_VER_34 ||
1416 tp->mac_version == RTL_GIGA_MAC_VER_38) {
1417 if (RTL_R8(PHYstatus) & _1000bpsF) {
1418 rtl_eri_write(tp, 0x1bc, ERIAR_MASK_1111, 0x00000011,
1420 rtl_eri_write(tp, 0x1dc, ERIAR_MASK_1111, 0x00000005,
1422 } else if (RTL_R8(PHYstatus) & _100bps) {
1423 rtl_eri_write(tp, 0x1bc, ERIAR_MASK_1111, 0x0000001f,
1425 rtl_eri_write(tp, 0x1dc, ERIAR_MASK_1111, 0x00000005,
1428 rtl_eri_write(tp, 0x1bc, ERIAR_MASK_1111, 0x0000001f,
1430 rtl_eri_write(tp, 0x1dc, ERIAR_MASK_1111, 0x0000003f,
1433 /* Reset packet filter */
1434 rtl_w1w0_eri(tp, 0xdc, ERIAR_MASK_0001, 0x00, 0x01,
1436 rtl_w1w0_eri(tp, 0xdc, ERIAR_MASK_0001, 0x01, 0x00,
1438 } else if (tp->mac_version == RTL_GIGA_MAC_VER_35 ||
1439 tp->mac_version == RTL_GIGA_MAC_VER_36) {
1440 if (RTL_R8(PHYstatus) & _1000bpsF) {
1441 rtl_eri_write(tp, 0x1bc, ERIAR_MASK_1111, 0x00000011,
1443 rtl_eri_write(tp, 0x1dc, ERIAR_MASK_1111, 0x00000005,
1446 rtl_eri_write(tp, 0x1bc, ERIAR_MASK_1111, 0x0000001f,
1448 rtl_eri_write(tp, 0x1dc, ERIAR_MASK_1111, 0x0000003f,
1451 } else if (tp->mac_version == RTL_GIGA_MAC_VER_37) {
1452 if (RTL_R8(PHYstatus) & _10bps) {
1453 rtl_eri_write(tp, 0x1d0, ERIAR_MASK_0011, 0x4d02,
1455 rtl_eri_write(tp, 0x1dc, ERIAR_MASK_0011, 0x0060,
1458 rtl_eri_write(tp, 0x1d0, ERIAR_MASK_0011, 0x0000,
1464 static void __rtl8169_check_link_status(struct net_device *dev,
1465 struct rtl8169_private *tp,
1466 void __iomem *ioaddr, bool pm)
1468 if (tp->link_ok(ioaddr)) {
1469 rtl_link_chg_patch(tp);
1470 /* This is to cancel a scheduled suspend if there's one. */
1472 pm_request_resume(&tp->pci_dev->dev);
1473 netif_carrier_on(dev);
1474 if (net_ratelimit())
1475 netif_info(tp, ifup, dev, "link up\n");
1477 netif_carrier_off(dev);
1478 netif_info(tp, ifdown, dev, "link down\n");
1480 pm_schedule_suspend(&tp->pci_dev->dev, 5000);
1484 static void rtl8169_check_link_status(struct net_device *dev,
1485 struct rtl8169_private *tp,
1486 void __iomem *ioaddr)
1488 __rtl8169_check_link_status(dev, tp, ioaddr, false);
1491 #define WAKE_ANY (WAKE_PHY | WAKE_MAGIC | WAKE_UCAST | WAKE_BCAST | WAKE_MCAST)
1493 static u32 __rtl8169_get_wol(struct rtl8169_private *tp)
1495 void __iomem *ioaddr = tp->mmio_addr;
1499 options = RTL_R8(Config1);
1500 if (!(options & PMEnable))
1503 options = RTL_R8(Config3);
1504 if (options & LinkUp)
1505 wolopts |= WAKE_PHY;
1506 if (options & MagicPacket)
1507 wolopts |= WAKE_MAGIC;
1509 options = RTL_R8(Config5);
1511 wolopts |= WAKE_UCAST;
1513 wolopts |= WAKE_BCAST;
1515 wolopts |= WAKE_MCAST;
1520 static void rtl8169_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1522 struct rtl8169_private *tp = netdev_priv(dev);
1526 wol->supported = WAKE_ANY;
1527 wol->wolopts = __rtl8169_get_wol(tp);
1529 rtl_unlock_work(tp);
1532 static void __rtl8169_set_wol(struct rtl8169_private *tp, u32 wolopts)
1534 void __iomem *ioaddr = tp->mmio_addr;
1536 static const struct {
1541 { WAKE_PHY, Config3, LinkUp },
1542 { WAKE_MAGIC, Config3, MagicPacket },
1543 { WAKE_UCAST, Config5, UWF },
1544 { WAKE_BCAST, Config5, BWF },
1545 { WAKE_MCAST, Config5, MWF },
1546 { WAKE_ANY, Config5, LanWake }
1550 RTL_W8(Cfg9346, Cfg9346_Unlock);
1552 for (i = 0; i < ARRAY_SIZE(cfg); i++) {
1553 options = RTL_R8(cfg[i].reg) & ~cfg[i].mask;
1554 if (wolopts & cfg[i].opt)
1555 options |= cfg[i].mask;
1556 RTL_W8(cfg[i].reg, options);
1559 switch (tp->mac_version) {
1560 case RTL_GIGA_MAC_VER_01 ... RTL_GIGA_MAC_VER_17:
1561 options = RTL_R8(Config1) & ~PMEnable;
1563 options |= PMEnable;
1564 RTL_W8(Config1, options);
1567 options = RTL_R8(Config2) & ~PME_SIGNAL;
1569 options |= PME_SIGNAL;
1570 RTL_W8(Config2, options);
1574 RTL_W8(Cfg9346, Cfg9346_Lock);
1577 static int rtl8169_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1579 struct rtl8169_private *tp = netdev_priv(dev);
1584 tp->features |= RTL_FEATURE_WOL;
1586 tp->features &= ~RTL_FEATURE_WOL;
1587 __rtl8169_set_wol(tp, wol->wolopts);
1589 rtl_unlock_work(tp);
1591 device_set_wakeup_enable(&tp->pci_dev->dev, wol->wolopts);
1596 static const char *rtl_lookup_firmware_name(struct rtl8169_private *tp)
1598 return rtl_chip_infos[tp->mac_version].fw_name;
1601 static void rtl8169_get_drvinfo(struct net_device *dev,
1602 struct ethtool_drvinfo *info)
1604 struct rtl8169_private *tp = netdev_priv(dev);
1605 struct rtl_fw *rtl_fw = tp->rtl_fw;
1607 strlcpy(info->driver, MODULENAME, sizeof(info->driver));
1608 strlcpy(info->version, RTL8169_VERSION, sizeof(info->version));
1609 strlcpy(info->bus_info, pci_name(tp->pci_dev), sizeof(info->bus_info));
1610 BUILD_BUG_ON(sizeof(info->fw_version) < sizeof(rtl_fw->version));
1611 if (!IS_ERR_OR_NULL(rtl_fw))
1612 strlcpy(info->fw_version, rtl_fw->version,
1613 sizeof(info->fw_version));
1616 static int rtl8169_get_regs_len(struct net_device *dev)
1618 return R8169_REGS_SIZE;
1621 static int rtl8169_set_speed_tbi(struct net_device *dev,
1622 u8 autoneg, u16 speed, u8 duplex, u32 ignored)
1624 struct rtl8169_private *tp = netdev_priv(dev);
1625 void __iomem *ioaddr = tp->mmio_addr;
1629 reg = RTL_R32(TBICSR);
1630 if ((autoneg == AUTONEG_DISABLE) && (speed == SPEED_1000) &&
1631 (duplex == DUPLEX_FULL)) {
1632 RTL_W32(TBICSR, reg & ~(TBINwEnable | TBINwRestart));
1633 } else if (autoneg == AUTONEG_ENABLE)
1634 RTL_W32(TBICSR, reg | TBINwEnable | TBINwRestart);
1636 netif_warn(tp, link, dev,
1637 "incorrect speed setting refused in TBI mode\n");
1644 static int rtl8169_set_speed_xmii(struct net_device *dev,
1645 u8 autoneg, u16 speed, u8 duplex, u32 adv)
1647 struct rtl8169_private *tp = netdev_priv(dev);
1648 int giga_ctrl, bmcr;
1651 rtl_writephy(tp, 0x1f, 0x0000);
1653 if (autoneg == AUTONEG_ENABLE) {
1656 auto_nego = rtl_readphy(tp, MII_ADVERTISE);
1657 auto_nego &= ~(ADVERTISE_10HALF | ADVERTISE_10FULL |
1658 ADVERTISE_100HALF | ADVERTISE_100FULL);
1660 if (adv & ADVERTISED_10baseT_Half)
1661 auto_nego |= ADVERTISE_10HALF;
1662 if (adv & ADVERTISED_10baseT_Full)
1663 auto_nego |= ADVERTISE_10FULL;
1664 if (adv & ADVERTISED_100baseT_Half)
1665 auto_nego |= ADVERTISE_100HALF;
1666 if (adv & ADVERTISED_100baseT_Full)
1667 auto_nego |= ADVERTISE_100FULL;
1669 auto_nego |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1671 giga_ctrl = rtl_readphy(tp, MII_CTRL1000);
1672 giga_ctrl &= ~(ADVERTISE_1000FULL | ADVERTISE_1000HALF);
1674 /* The 8100e/8101e/8102e do Fast Ethernet only. */
1675 if (tp->mii.supports_gmii) {
1676 if (adv & ADVERTISED_1000baseT_Half)
1677 giga_ctrl |= ADVERTISE_1000HALF;
1678 if (adv & ADVERTISED_1000baseT_Full)
1679 giga_ctrl |= ADVERTISE_1000FULL;
1680 } else if (adv & (ADVERTISED_1000baseT_Half |
1681 ADVERTISED_1000baseT_Full)) {
1682 netif_info(tp, link, dev,
1683 "PHY does not support 1000Mbps\n");
1687 bmcr = BMCR_ANENABLE | BMCR_ANRESTART;
1689 rtl_writephy(tp, MII_ADVERTISE, auto_nego);
1690 rtl_writephy(tp, MII_CTRL1000, giga_ctrl);
1694 if (speed == SPEED_10)
1696 else if (speed == SPEED_100)
1697 bmcr = BMCR_SPEED100;
1701 if (duplex == DUPLEX_FULL)
1702 bmcr |= BMCR_FULLDPLX;
1705 rtl_writephy(tp, MII_BMCR, bmcr);
1707 if (tp->mac_version == RTL_GIGA_MAC_VER_02 ||
1708 tp->mac_version == RTL_GIGA_MAC_VER_03) {
1709 if ((speed == SPEED_100) && (autoneg != AUTONEG_ENABLE)) {
1710 rtl_writephy(tp, 0x17, 0x2138);
1711 rtl_writephy(tp, 0x0e, 0x0260);
1713 rtl_writephy(tp, 0x17, 0x2108);
1714 rtl_writephy(tp, 0x0e, 0x0000);
1723 static int rtl8169_set_speed(struct net_device *dev,
1724 u8 autoneg, u16 speed, u8 duplex, u32 advertising)
1726 struct rtl8169_private *tp = netdev_priv(dev);
1729 ret = tp->set_speed(dev, autoneg, speed, duplex, advertising);
1733 if (netif_running(dev) && (autoneg == AUTONEG_ENABLE) &&
1734 (advertising & ADVERTISED_1000baseT_Full)) {
1735 mod_timer(&tp->timer, jiffies + RTL8169_PHY_TIMEOUT);
1741 static int rtl8169_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1743 struct rtl8169_private *tp = netdev_priv(dev);
1746 del_timer_sync(&tp->timer);
1749 ret = rtl8169_set_speed(dev, cmd->autoneg, ethtool_cmd_speed(cmd),
1750 cmd->duplex, cmd->advertising);
1751 rtl_unlock_work(tp);
1756 static netdev_features_t rtl8169_fix_features(struct net_device *dev,
1757 netdev_features_t features)
1759 struct rtl8169_private *tp = netdev_priv(dev);
1761 if (dev->mtu > TD_MSS_MAX)
1762 features &= ~NETIF_F_ALL_TSO;
1764 if (dev->mtu > JUMBO_1K &&
1765 !rtl_chip_infos[tp->mac_version].jumbo_tx_csum)
1766 features &= ~NETIF_F_IP_CSUM;
1771 static void __rtl8169_set_features(struct net_device *dev,
1772 netdev_features_t features)
1774 struct rtl8169_private *tp = netdev_priv(dev);
1775 netdev_features_t changed = features ^ dev->features;
1776 void __iomem *ioaddr = tp->mmio_addr;
1778 if (!(changed & (NETIF_F_RXALL | NETIF_F_RXCSUM | NETIF_F_HW_VLAN_RX)))
1781 if (changed & (NETIF_F_RXCSUM | NETIF_F_HW_VLAN_RX)) {
1782 if (features & NETIF_F_RXCSUM)
1783 tp->cp_cmd |= RxChkSum;
1785 tp->cp_cmd &= ~RxChkSum;
1787 if (dev->features & NETIF_F_HW_VLAN_RX)
1788 tp->cp_cmd |= RxVlan;
1790 tp->cp_cmd &= ~RxVlan;
1792 RTL_W16(CPlusCmd, tp->cp_cmd);
1795 if (changed & NETIF_F_RXALL) {
1796 int tmp = (RTL_R32(RxConfig) & ~(AcceptErr | AcceptRunt));
1797 if (features & NETIF_F_RXALL)
1798 tmp |= (AcceptErr | AcceptRunt);
1799 RTL_W32(RxConfig, tmp);
1803 static int rtl8169_set_features(struct net_device *dev,
1804 netdev_features_t features)
1806 struct rtl8169_private *tp = netdev_priv(dev);
1809 __rtl8169_set_features(dev, features);
1810 rtl_unlock_work(tp);
1816 static inline u32 rtl8169_tx_vlan_tag(struct sk_buff *skb)
1818 return (vlan_tx_tag_present(skb)) ?
1819 TxVlanTag | swab16(vlan_tx_tag_get(skb)) : 0x00;
1822 static void rtl8169_rx_vlan_tag(struct RxDesc *desc, struct sk_buff *skb)
1824 u32 opts2 = le32_to_cpu(desc->opts2);
1826 if (opts2 & RxVlanTag)
1827 __vlan_hwaccel_put_tag(skb, swab16(opts2 & 0xffff));
1830 static int rtl8169_gset_tbi(struct net_device *dev, struct ethtool_cmd *cmd)
1832 struct rtl8169_private *tp = netdev_priv(dev);
1833 void __iomem *ioaddr = tp->mmio_addr;
1837 SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg | SUPPORTED_FIBRE;
1838 cmd->port = PORT_FIBRE;
1839 cmd->transceiver = XCVR_INTERNAL;
1841 status = RTL_R32(TBICSR);
1842 cmd->advertising = (status & TBINwEnable) ? ADVERTISED_Autoneg : 0;
1843 cmd->autoneg = !!(status & TBINwEnable);
1845 ethtool_cmd_speed_set(cmd, SPEED_1000);
1846 cmd->duplex = DUPLEX_FULL; /* Always set */
1851 static int rtl8169_gset_xmii(struct net_device *dev, struct ethtool_cmd *cmd)
1853 struct rtl8169_private *tp = netdev_priv(dev);
1855 return mii_ethtool_gset(&tp->mii, cmd);
1858 static int rtl8169_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1860 struct rtl8169_private *tp = netdev_priv(dev);
1864 rc = tp->get_settings(dev, cmd);
1865 rtl_unlock_work(tp);
1870 static void rtl8169_get_regs(struct net_device *dev, struct ethtool_regs *regs,
1873 struct rtl8169_private *tp = netdev_priv(dev);
1875 if (regs->len > R8169_REGS_SIZE)
1876 regs->len = R8169_REGS_SIZE;
1879 memcpy_fromio(p, tp->mmio_addr, regs->len);
1880 rtl_unlock_work(tp);
1883 static u32 rtl8169_get_msglevel(struct net_device *dev)
1885 struct rtl8169_private *tp = netdev_priv(dev);
1887 return tp->msg_enable;
1890 static void rtl8169_set_msglevel(struct net_device *dev, u32 value)
1892 struct rtl8169_private *tp = netdev_priv(dev);
1894 tp->msg_enable = value;
1897 static const char rtl8169_gstrings[][ETH_GSTRING_LEN] = {
1904 "tx_single_collisions",
1905 "tx_multi_collisions",
1913 static int rtl8169_get_sset_count(struct net_device *dev, int sset)
1917 return ARRAY_SIZE(rtl8169_gstrings);
1923 DECLARE_RTL_COND(rtl_counters_cond)
1925 void __iomem *ioaddr = tp->mmio_addr;
1927 return RTL_R32(CounterAddrLow) & CounterDump;
1930 static void rtl8169_update_counters(struct net_device *dev)
1932 struct rtl8169_private *tp = netdev_priv(dev);
1933 void __iomem *ioaddr = tp->mmio_addr;
1934 struct device *d = &tp->pci_dev->dev;
1935 struct rtl8169_counters *counters;
1940 * Some chips are unable to dump tally counters when the receiver
1943 if ((RTL_R8(ChipCmd) & CmdRxEnb) == 0)
1946 counters = dma_alloc_coherent(d, sizeof(*counters), &paddr, GFP_KERNEL);
1950 RTL_W32(CounterAddrHigh, (u64)paddr >> 32);
1951 cmd = (u64)paddr & DMA_BIT_MASK(32);
1952 RTL_W32(CounterAddrLow, cmd);
1953 RTL_W32(CounterAddrLow, cmd | CounterDump);
1955 if (rtl_udelay_loop_wait_low(tp, &rtl_counters_cond, 10, 1000))
1956 memcpy(&tp->counters, counters, sizeof(*counters));
1958 RTL_W32(CounterAddrLow, 0);
1959 RTL_W32(CounterAddrHigh, 0);
1961 dma_free_coherent(d, sizeof(*counters), counters, paddr);
1964 static void rtl8169_get_ethtool_stats(struct net_device *dev,
1965 struct ethtool_stats *stats, u64 *data)
1967 struct rtl8169_private *tp = netdev_priv(dev);
1971 rtl8169_update_counters(dev);
1973 data[0] = le64_to_cpu(tp->counters.tx_packets);
1974 data[1] = le64_to_cpu(tp->counters.rx_packets);
1975 data[2] = le64_to_cpu(tp->counters.tx_errors);
1976 data[3] = le32_to_cpu(tp->counters.rx_errors);
1977 data[4] = le16_to_cpu(tp->counters.rx_missed);
1978 data[5] = le16_to_cpu(tp->counters.align_errors);
1979 data[6] = le32_to_cpu(tp->counters.tx_one_collision);
1980 data[7] = le32_to_cpu(tp->counters.tx_multi_collision);
1981 data[8] = le64_to_cpu(tp->counters.rx_unicast);
1982 data[9] = le64_to_cpu(tp->counters.rx_broadcast);
1983 data[10] = le32_to_cpu(tp->counters.rx_multicast);
1984 data[11] = le16_to_cpu(tp->counters.tx_aborted);
1985 data[12] = le16_to_cpu(tp->counters.tx_underun);
1988 static void rtl8169_get_strings(struct net_device *dev, u32 stringset, u8 *data)
1992 memcpy(data, *rtl8169_gstrings, sizeof(rtl8169_gstrings));
1997 static const struct ethtool_ops rtl8169_ethtool_ops = {
1998 .get_drvinfo = rtl8169_get_drvinfo,
1999 .get_regs_len = rtl8169_get_regs_len,
2000 .get_link = ethtool_op_get_link,
2001 .get_settings = rtl8169_get_settings,
2002 .set_settings = rtl8169_set_settings,
2003 .get_msglevel = rtl8169_get_msglevel,
2004 .set_msglevel = rtl8169_set_msglevel,
2005 .get_regs = rtl8169_get_regs,
2006 .get_wol = rtl8169_get_wol,
2007 .set_wol = rtl8169_set_wol,
2008 .get_strings = rtl8169_get_strings,
2009 .get_sset_count = rtl8169_get_sset_count,
2010 .get_ethtool_stats = rtl8169_get_ethtool_stats,
2011 .get_ts_info = ethtool_op_get_ts_info,
2014 static void rtl8169_get_mac_version(struct rtl8169_private *tp,
2015 struct net_device *dev, u8 default_version)
2017 void __iomem *ioaddr = tp->mmio_addr;
2019 * The driver currently handles the 8168Bf and the 8168Be identically
2020 * but they can be identified more specifically through the test below
2023 * (RTL_R32(TxConfig) & 0x700000) == 0x500000 ? 8168Bf : 8168Be
2025 * Same thing for the 8101Eb and the 8101Ec:
2027 * (RTL_R32(TxConfig) & 0x700000) == 0x200000 ? 8101Eb : 8101Ec
2029 static const struct rtl_mac_info {
2035 { 0x7cf00000, 0x4c100000, RTL_GIGA_MAC_VER_41 },
2036 { 0x7cf00000, 0x4c000000, RTL_GIGA_MAC_VER_40 },
2039 { 0x7c800000, 0x48800000, RTL_GIGA_MAC_VER_38 },
2040 { 0x7cf00000, 0x48100000, RTL_GIGA_MAC_VER_36 },
2041 { 0x7cf00000, 0x48000000, RTL_GIGA_MAC_VER_35 },
2044 { 0x7c800000, 0x2c800000, RTL_GIGA_MAC_VER_34 },
2045 { 0x7cf00000, 0x2c200000, RTL_GIGA_MAC_VER_33 },
2046 { 0x7cf00000, 0x2c100000, RTL_GIGA_MAC_VER_32 },
2047 { 0x7c800000, 0x2c000000, RTL_GIGA_MAC_VER_33 },
2050 { 0x7cf00000, 0x28300000, RTL_GIGA_MAC_VER_26 },
2051 { 0x7cf00000, 0x28100000, RTL_GIGA_MAC_VER_25 },
2052 { 0x7c800000, 0x28000000, RTL_GIGA_MAC_VER_26 },
2054 /* 8168DP family. */
2055 { 0x7cf00000, 0x28800000, RTL_GIGA_MAC_VER_27 },
2056 { 0x7cf00000, 0x28a00000, RTL_GIGA_MAC_VER_28 },
2057 { 0x7cf00000, 0x28b00000, RTL_GIGA_MAC_VER_31 },
2060 { 0x7cf00000, 0x3cb00000, RTL_GIGA_MAC_VER_24 },
2061 { 0x7cf00000, 0x3c900000, RTL_GIGA_MAC_VER_23 },
2062 { 0x7cf00000, 0x3c800000, RTL_GIGA_MAC_VER_18 },
2063 { 0x7c800000, 0x3c800000, RTL_GIGA_MAC_VER_24 },
2064 { 0x7cf00000, 0x3c000000, RTL_GIGA_MAC_VER_19 },
2065 { 0x7cf00000, 0x3c200000, RTL_GIGA_MAC_VER_20 },
2066 { 0x7cf00000, 0x3c300000, RTL_GIGA_MAC_VER_21 },
2067 { 0x7cf00000, 0x3c400000, RTL_GIGA_MAC_VER_22 },
2068 { 0x7c800000, 0x3c000000, RTL_GIGA_MAC_VER_22 },
2071 { 0x7cf00000, 0x38000000, RTL_GIGA_MAC_VER_12 },
2072 { 0x7cf00000, 0x38500000, RTL_GIGA_MAC_VER_17 },
2073 { 0x7c800000, 0x38000000, RTL_GIGA_MAC_VER_17 },
2074 { 0x7c800000, 0x30000000, RTL_GIGA_MAC_VER_11 },
2077 { 0x7cf00000, 0x44900000, RTL_GIGA_MAC_VER_39 },
2078 { 0x7c800000, 0x44800000, RTL_GIGA_MAC_VER_39 },
2079 { 0x7c800000, 0x44000000, RTL_GIGA_MAC_VER_37 },
2080 { 0x7cf00000, 0x40b00000, RTL_GIGA_MAC_VER_30 },
2081 { 0x7cf00000, 0x40a00000, RTL_GIGA_MAC_VER_30 },
2082 { 0x7cf00000, 0x40900000, RTL_GIGA_MAC_VER_29 },
2083 { 0x7c800000, 0x40800000, RTL_GIGA_MAC_VER_30 },
2084 { 0x7cf00000, 0x34a00000, RTL_GIGA_MAC_VER_09 },
2085 { 0x7cf00000, 0x24a00000, RTL_GIGA_MAC_VER_09 },
2086 { 0x7cf00000, 0x34900000, RTL_GIGA_MAC_VER_08 },
2087 { 0x7cf00000, 0x24900000, RTL_GIGA_MAC_VER_08 },
2088 { 0x7cf00000, 0x34800000, RTL_GIGA_MAC_VER_07 },
2089 { 0x7cf00000, 0x24800000, RTL_GIGA_MAC_VER_07 },
2090 { 0x7cf00000, 0x34000000, RTL_GIGA_MAC_VER_13 },
2091 { 0x7cf00000, 0x34300000, RTL_GIGA_MAC_VER_10 },
2092 { 0x7cf00000, 0x34200000, RTL_GIGA_MAC_VER_16 },
2093 { 0x7c800000, 0x34800000, RTL_GIGA_MAC_VER_09 },
2094 { 0x7c800000, 0x24800000, RTL_GIGA_MAC_VER_09 },
2095 { 0x7c800000, 0x34000000, RTL_GIGA_MAC_VER_16 },
2096 /* FIXME: where did these entries come from ? -- FR */
2097 { 0xfc800000, 0x38800000, RTL_GIGA_MAC_VER_15 },
2098 { 0xfc800000, 0x30800000, RTL_GIGA_MAC_VER_14 },
2101 { 0xfc800000, 0x98000000, RTL_GIGA_MAC_VER_06 },
2102 { 0xfc800000, 0x18000000, RTL_GIGA_MAC_VER_05 },
2103 { 0xfc800000, 0x10000000, RTL_GIGA_MAC_VER_04 },
2104 { 0xfc800000, 0x04000000, RTL_GIGA_MAC_VER_03 },
2105 { 0xfc800000, 0x00800000, RTL_GIGA_MAC_VER_02 },
2106 { 0xfc800000, 0x00000000, RTL_GIGA_MAC_VER_01 },
2109 { 0x00000000, 0x00000000, RTL_GIGA_MAC_NONE }
2111 const struct rtl_mac_info *p = mac_info;
2114 reg = RTL_R32(TxConfig);
2115 while ((reg & p->mask) != p->val)
2117 tp->mac_version = p->mac_version;
2119 if (tp->mac_version == RTL_GIGA_MAC_NONE) {
2120 netif_notice(tp, probe, dev,
2121 "unknown MAC, using family default\n");
2122 tp->mac_version = default_version;
2126 static void rtl8169_print_mac_version(struct rtl8169_private *tp)
2128 dprintk("mac_version = 0x%02x\n", tp->mac_version);
2136 static void rtl_writephy_batch(struct rtl8169_private *tp,
2137 const struct phy_reg *regs, int len)
2140 rtl_writephy(tp, regs->reg, regs->val);
2145 #define PHY_READ 0x00000000
2146 #define PHY_DATA_OR 0x10000000
2147 #define PHY_DATA_AND 0x20000000
2148 #define PHY_BJMPN 0x30000000
2149 #define PHY_READ_EFUSE 0x40000000
2150 #define PHY_READ_MAC_BYTE 0x50000000
2151 #define PHY_WRITE_MAC_BYTE 0x60000000
2152 #define PHY_CLEAR_READCOUNT 0x70000000
2153 #define PHY_WRITE 0x80000000
2154 #define PHY_READCOUNT_EQ_SKIP 0x90000000
2155 #define PHY_COMP_EQ_SKIPN 0xa0000000
2156 #define PHY_COMP_NEQ_SKIPN 0xb0000000
2157 #define PHY_WRITE_PREVIOUS 0xc0000000
2158 #define PHY_SKIPN 0xd0000000
2159 #define PHY_DELAY_MS 0xe0000000
2160 #define PHY_WRITE_ERI_WORD 0xf0000000
2164 char version[RTL_VER_SIZE];
2170 #define FW_OPCODE_SIZE sizeof(typeof(*((struct rtl_fw_phy_action *)0)->code))
2172 static bool rtl_fw_format_ok(struct rtl8169_private *tp, struct rtl_fw *rtl_fw)
2174 const struct firmware *fw = rtl_fw->fw;
2175 struct fw_info *fw_info = (struct fw_info *)fw->data;
2176 struct rtl_fw_phy_action *pa = &rtl_fw->phy_action;
2177 char *version = rtl_fw->version;
2180 if (fw->size < FW_OPCODE_SIZE)
2183 if (!fw_info->magic) {
2184 size_t i, size, start;
2187 if (fw->size < sizeof(*fw_info))
2190 for (i = 0; i < fw->size; i++)
2191 checksum += fw->data[i];
2195 start = le32_to_cpu(fw_info->fw_start);
2196 if (start > fw->size)
2199 size = le32_to_cpu(fw_info->fw_len);
2200 if (size > (fw->size - start) / FW_OPCODE_SIZE)
2203 memcpy(version, fw_info->version, RTL_VER_SIZE);
2205 pa->code = (__le32 *)(fw->data + start);
2208 if (fw->size % FW_OPCODE_SIZE)
2211 strlcpy(version, rtl_lookup_firmware_name(tp), RTL_VER_SIZE);
2213 pa->code = (__le32 *)fw->data;
2214 pa->size = fw->size / FW_OPCODE_SIZE;
2216 version[RTL_VER_SIZE - 1] = 0;
2223 static bool rtl_fw_data_ok(struct rtl8169_private *tp, struct net_device *dev,
2224 struct rtl_fw_phy_action *pa)
2229 for (index = 0; index < pa->size; index++) {
2230 u32 action = le32_to_cpu(pa->code[index]);
2231 u32 regno = (action & 0x0fff0000) >> 16;
2233 switch(action & 0xf0000000) {
2237 case PHY_READ_EFUSE:
2238 case PHY_CLEAR_READCOUNT:
2240 case PHY_WRITE_PREVIOUS:
2245 if (regno > index) {
2246 netif_err(tp, ifup, tp->dev,
2247 "Out of range of firmware\n");
2251 case PHY_READCOUNT_EQ_SKIP:
2252 if (index + 2 >= pa->size) {
2253 netif_err(tp, ifup, tp->dev,
2254 "Out of range of firmware\n");
2258 case PHY_COMP_EQ_SKIPN:
2259 case PHY_COMP_NEQ_SKIPN:
2261 if (index + 1 + regno >= pa->size) {
2262 netif_err(tp, ifup, tp->dev,
2263 "Out of range of firmware\n");
2268 case PHY_READ_MAC_BYTE:
2269 case PHY_WRITE_MAC_BYTE:
2270 case PHY_WRITE_ERI_WORD:
2272 netif_err(tp, ifup, tp->dev,
2273 "Invalid action 0x%08x\n", action);
2282 static int rtl_check_firmware(struct rtl8169_private *tp, struct rtl_fw *rtl_fw)
2284 struct net_device *dev = tp->dev;
2287 if (!rtl_fw_format_ok(tp, rtl_fw)) {
2288 netif_err(tp, ifup, dev, "invalid firwmare\n");
2292 if (rtl_fw_data_ok(tp, dev, &rtl_fw->phy_action))
2298 static void rtl_phy_write_fw(struct rtl8169_private *tp, struct rtl_fw *rtl_fw)
2300 struct rtl_fw_phy_action *pa = &rtl_fw->phy_action;
2304 predata = count = 0;
2306 for (index = 0; index < pa->size; ) {
2307 u32 action = le32_to_cpu(pa->code[index]);
2308 u32 data = action & 0x0000ffff;
2309 u32 regno = (action & 0x0fff0000) >> 16;
2314 switch(action & 0xf0000000) {
2316 predata = rtl_readphy(tp, regno);
2331 case PHY_READ_EFUSE:
2332 predata = rtl8168d_efuse_read(tp, regno);
2335 case PHY_CLEAR_READCOUNT:
2340 rtl_writephy(tp, regno, data);
2343 case PHY_READCOUNT_EQ_SKIP:
2344 index += (count == data) ? 2 : 1;
2346 case PHY_COMP_EQ_SKIPN:
2347 if (predata == data)
2351 case PHY_COMP_NEQ_SKIPN:
2352 if (predata != data)
2356 case PHY_WRITE_PREVIOUS:
2357 rtl_writephy(tp, regno, predata);
2368 case PHY_READ_MAC_BYTE:
2369 case PHY_WRITE_MAC_BYTE:
2370 case PHY_WRITE_ERI_WORD:
2377 static void rtl_release_firmware(struct rtl8169_private *tp)
2379 if (!IS_ERR_OR_NULL(tp->rtl_fw)) {
2380 release_firmware(tp->rtl_fw->fw);
2383 tp->rtl_fw = RTL_FIRMWARE_UNKNOWN;
2386 static void rtl_apply_firmware(struct rtl8169_private *tp)
2388 struct rtl_fw *rtl_fw = tp->rtl_fw;
2390 /* TODO: release firmware once rtl_phy_write_fw signals failures. */
2391 if (!IS_ERR_OR_NULL(rtl_fw)) {
2392 rtl_phy_write_fw(tp, rtl_fw);
2393 tp->features |= RTL_FEATURE_FW_LOADED;
2397 static void rtl_apply_firmware_cond(struct rtl8169_private *tp, u8 reg, u16 val)
2399 if (rtl_readphy(tp, reg) != val)
2400 netif_warn(tp, hw, tp->dev, "chipset not ready for firmware\n");
2402 rtl_apply_firmware(tp);
2405 static void r810x_aldps_disable(struct rtl8169_private *tp)
2407 rtl_writephy(tp, 0x1f, 0x0000);
2408 rtl_writephy(tp, 0x18, 0x0310);
2412 static void r810x_aldps_enable(struct rtl8169_private *tp)
2414 if (!(tp->features & RTL_FEATURE_FW_LOADED))
2417 rtl_writephy(tp, 0x1f, 0x0000);
2418 rtl_writephy(tp, 0x18, 0x8310);
2421 static void r8168_aldps_enable_1(struct rtl8169_private *tp)
2423 if (!(tp->features & RTL_FEATURE_FW_LOADED))
2426 rtl_writephy(tp, 0x1f, 0x0000);
2427 rtl_w1w0_phy(tp, 0x15, 0x1000, 0x0000);
2430 static void rtl8169s_hw_phy_config(struct rtl8169_private *tp)
2432 static const struct phy_reg phy_reg_init[] = {
2494 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2497 static void rtl8169sb_hw_phy_config(struct rtl8169_private *tp)
2499 static const struct phy_reg phy_reg_init[] = {
2505 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2508 static void rtl8169scd_hw_phy_config_quirk(struct rtl8169_private *tp)
2510 struct pci_dev *pdev = tp->pci_dev;
2512 if ((pdev->subsystem_vendor != PCI_VENDOR_ID_GIGABYTE) ||
2513 (pdev->subsystem_device != 0xe000))
2516 rtl_writephy(tp, 0x1f, 0x0001);
2517 rtl_writephy(tp, 0x10, 0xf01b);
2518 rtl_writephy(tp, 0x1f, 0x0000);
2521 static void rtl8169scd_hw_phy_config(struct rtl8169_private *tp)
2523 static const struct phy_reg phy_reg_init[] = {
2563 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2565 rtl8169scd_hw_phy_config_quirk(tp);
2568 static void rtl8169sce_hw_phy_config(struct rtl8169_private *tp)
2570 static const struct phy_reg phy_reg_init[] = {
2618 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2621 static void rtl8168bb_hw_phy_config(struct rtl8169_private *tp)
2623 static const struct phy_reg phy_reg_init[] = {
2628 rtl_writephy(tp, 0x1f, 0x0001);
2629 rtl_patchphy(tp, 0x16, 1 << 0);
2631 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2634 static void rtl8168bef_hw_phy_config(struct rtl8169_private *tp)
2636 static const struct phy_reg phy_reg_init[] = {
2642 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2645 static void rtl8168cp_1_hw_phy_config(struct rtl8169_private *tp)
2647 static const struct phy_reg phy_reg_init[] = {
2655 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2658 static void rtl8168cp_2_hw_phy_config(struct rtl8169_private *tp)
2660 static const struct phy_reg phy_reg_init[] = {
2666 rtl_writephy(tp, 0x1f, 0x0000);
2667 rtl_patchphy(tp, 0x14, 1 << 5);
2668 rtl_patchphy(tp, 0x0d, 1 << 5);
2670 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2673 static void rtl8168c_1_hw_phy_config(struct rtl8169_private *tp)
2675 static const struct phy_reg phy_reg_init[] = {
2695 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2697 rtl_patchphy(tp, 0x14, 1 << 5);
2698 rtl_patchphy(tp, 0x0d, 1 << 5);
2699 rtl_writephy(tp, 0x1f, 0x0000);
2702 static void rtl8168c_2_hw_phy_config(struct rtl8169_private *tp)
2704 static const struct phy_reg phy_reg_init[] = {
2722 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2724 rtl_patchphy(tp, 0x16, 1 << 0);
2725 rtl_patchphy(tp, 0x14, 1 << 5);
2726 rtl_patchphy(tp, 0x0d, 1 << 5);
2727 rtl_writephy(tp, 0x1f, 0x0000);
2730 static void rtl8168c_3_hw_phy_config(struct rtl8169_private *tp)
2732 static const struct phy_reg phy_reg_init[] = {
2744 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2746 rtl_patchphy(tp, 0x16, 1 << 0);
2747 rtl_patchphy(tp, 0x14, 1 << 5);
2748 rtl_patchphy(tp, 0x0d, 1 << 5);
2749 rtl_writephy(tp, 0x1f, 0x0000);
2752 static void rtl8168c_4_hw_phy_config(struct rtl8169_private *tp)
2754 rtl8168c_3_hw_phy_config(tp);
2757 static void rtl8168d_1_hw_phy_config(struct rtl8169_private *tp)
2759 static const struct phy_reg phy_reg_init_0[] = {
2760 /* Channel Estimation */
2781 * Enhance line driver power
2790 * Can not link to 1Gbps with bad cable
2791 * Decrease SNR threshold form 21.07dB to 19.04dB
2800 rtl_writephy_batch(tp, phy_reg_init_0, ARRAY_SIZE(phy_reg_init_0));
2804 * Fine Tune Switching regulator parameter
2806 rtl_writephy(tp, 0x1f, 0x0002);
2807 rtl_w1w0_phy(tp, 0x0b, 0x0010, 0x00ef);
2808 rtl_w1w0_phy(tp, 0x0c, 0xa200, 0x5d00);
2810 if (rtl8168d_efuse_read(tp, 0x01) == 0xb1) {
2811 static const struct phy_reg phy_reg_init[] = {
2821 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2823 val = rtl_readphy(tp, 0x0d);
2825 if ((val & 0x00ff) != 0x006c) {
2826 static const u32 set[] = {
2827 0x0065, 0x0066, 0x0067, 0x0068,
2828 0x0069, 0x006a, 0x006b, 0x006c
2832 rtl_writephy(tp, 0x1f, 0x0002);
2835 for (i = 0; i < ARRAY_SIZE(set); i++)
2836 rtl_writephy(tp, 0x0d, val | set[i]);
2839 static const struct phy_reg phy_reg_init[] = {
2847 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2850 /* RSET couple improve */
2851 rtl_writephy(tp, 0x1f, 0x0002);
2852 rtl_patchphy(tp, 0x0d, 0x0300);
2853 rtl_patchphy(tp, 0x0f, 0x0010);
2855 /* Fine tune PLL performance */
2856 rtl_writephy(tp, 0x1f, 0x0002);
2857 rtl_w1w0_phy(tp, 0x02, 0x0100, 0x0600);
2858 rtl_w1w0_phy(tp, 0x03, 0x0000, 0xe000);
2860 rtl_writephy(tp, 0x1f, 0x0005);
2861 rtl_writephy(tp, 0x05, 0x001b);
2863 rtl_apply_firmware_cond(tp, MII_EXPANSION, 0xbf00);
2865 rtl_writephy(tp, 0x1f, 0x0000);
2868 static void rtl8168d_2_hw_phy_config(struct rtl8169_private *tp)
2870 static const struct phy_reg phy_reg_init_0[] = {
2871 /* Channel Estimation */
2892 * Enhance line driver power
2901 * Can not link to 1Gbps with bad cable
2902 * Decrease SNR threshold form 21.07dB to 19.04dB
2911 rtl_writephy_batch(tp, phy_reg_init_0, ARRAY_SIZE(phy_reg_init_0));
2913 if (rtl8168d_efuse_read(tp, 0x01) == 0xb1) {
2914 static const struct phy_reg phy_reg_init[] = {
2925 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2927 val = rtl_readphy(tp, 0x0d);
2928 if ((val & 0x00ff) != 0x006c) {
2929 static const u32 set[] = {
2930 0x0065, 0x0066, 0x0067, 0x0068,
2931 0x0069, 0x006a, 0x006b, 0x006c
2935 rtl_writephy(tp, 0x1f, 0x0002);
2938 for (i = 0; i < ARRAY_SIZE(set); i++)
2939 rtl_writephy(tp, 0x0d, val | set[i]);
2942 static const struct phy_reg phy_reg_init[] = {
2950 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2953 /* Fine tune PLL performance */
2954 rtl_writephy(tp, 0x1f, 0x0002);
2955 rtl_w1w0_phy(tp, 0x02, 0x0100, 0x0600);
2956 rtl_w1w0_phy(tp, 0x03, 0x0000, 0xe000);
2958 /* Switching regulator Slew rate */
2959 rtl_writephy(tp, 0x1f, 0x0002);
2960 rtl_patchphy(tp, 0x0f, 0x0017);
2962 rtl_writephy(tp, 0x1f, 0x0005);
2963 rtl_writephy(tp, 0x05, 0x001b);
2965 rtl_apply_firmware_cond(tp, MII_EXPANSION, 0xb300);
2967 rtl_writephy(tp, 0x1f, 0x0000);
2970 static void rtl8168d_3_hw_phy_config(struct rtl8169_private *tp)
2972 static const struct phy_reg phy_reg_init[] = {
3028 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
3031 static void rtl8168d_4_hw_phy_config(struct rtl8169_private *tp)
3033 static const struct phy_reg phy_reg_init[] = {
3043 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
3044 rtl_patchphy(tp, 0x0d, 1 << 5);
3047 static void rtl8168e_1_hw_phy_config(struct rtl8169_private *tp)
3049 static const struct phy_reg phy_reg_init[] = {
3050 /* Enable Delay cap */
3056 /* Channel estimation fine tune */
3065 /* Update PFM & 10M TX idle timer */
3077 rtl_apply_firmware(tp);
3079 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
3081 /* DCO enable for 10M IDLE Power */
3082 rtl_writephy(tp, 0x1f, 0x0007);
3083 rtl_writephy(tp, 0x1e, 0x0023);
3084 rtl_w1w0_phy(tp, 0x17, 0x0006, 0x0000);
3085 rtl_writephy(tp, 0x1f, 0x0000);
3087 /* For impedance matching */
3088 rtl_writephy(tp, 0x1f, 0x0002);
3089 rtl_w1w0_phy(tp, 0x08, 0x8000, 0x7f00);
3090 rtl_writephy(tp, 0x1f, 0x0000);
3092 /* PHY auto speed down */
3093 rtl_writephy(tp, 0x1f, 0x0007);
3094 rtl_writephy(tp, 0x1e, 0x002d);
3095 rtl_w1w0_phy(tp, 0x18, 0x0050, 0x0000);
3096 rtl_writephy(tp, 0x1f, 0x0000);
3097 rtl_w1w0_phy(tp, 0x14, 0x8000, 0x0000);
3099 rtl_writephy(tp, 0x1f, 0x0005);
3100 rtl_writephy(tp, 0x05, 0x8b86);
3101 rtl_w1w0_phy(tp, 0x06, 0x0001, 0x0000);
3102 rtl_writephy(tp, 0x1f, 0x0000);
3104 rtl_writephy(tp, 0x1f, 0x0005);
3105 rtl_writephy(tp, 0x05, 0x8b85);
3106 rtl_w1w0_phy(tp, 0x06, 0x0000, 0x2000);
3107 rtl_writephy(tp, 0x1f, 0x0007);
3108 rtl_writephy(tp, 0x1e, 0x0020);
3109 rtl_w1w0_phy(tp, 0x15, 0x0000, 0x1100);
3110 rtl_writephy(tp, 0x1f, 0x0006);
3111 rtl_writephy(tp, 0x00, 0x5a00);
3112 rtl_writephy(tp, 0x1f, 0x0000);
3113 rtl_writephy(tp, 0x0d, 0x0007);
3114 rtl_writephy(tp, 0x0e, 0x003c);
3115 rtl_writephy(tp, 0x0d, 0x4007);
3116 rtl_writephy(tp, 0x0e, 0x0000);
3117 rtl_writephy(tp, 0x0d, 0x0000);
3120 static void rtl_rar_exgmac_set(struct rtl8169_private *tp, u8 *addr)
3123 addr[0] | (addr[1] << 8),
3124 addr[2] | (addr[3] << 8),
3125 addr[4] | (addr[5] << 8)
3127 const struct exgmac_reg e[] = {
3128 { .addr = 0xe0, ERIAR_MASK_1111, .val = w[0] | (w[1] << 16) },
3129 { .addr = 0xe4, ERIAR_MASK_1111, .val = w[2] },
3130 { .addr = 0xf0, ERIAR_MASK_1111, .val = w[0] << 16 },
3131 { .addr = 0xf4, ERIAR_MASK_1111, .val = w[1] | (w[2] << 16) }
3134 rtl_write_exgmac_batch(tp, e, ARRAY_SIZE(e));
3137 static void rtl8168e_2_hw_phy_config(struct rtl8169_private *tp)
3139 static const struct phy_reg phy_reg_init[] = {
3140 /* Enable Delay cap */
3149 /* Channel estimation fine tune */
3166 rtl_apply_firmware(tp);
3168 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
3170 /* For 4-corner performance improve */
3171 rtl_writephy(tp, 0x1f, 0x0005);
3172 rtl_writephy(tp, 0x05, 0x8b80);
3173 rtl_w1w0_phy(tp, 0x17, 0x0006, 0x0000);
3174 rtl_writephy(tp, 0x1f, 0x0000);
3176 /* PHY auto speed down */
3177 rtl_writephy(tp, 0x1f, 0x0004);
3178 rtl_writephy(tp, 0x1f, 0x0007);
3179 rtl_writephy(tp, 0x1e, 0x002d);
3180 rtl_w1w0_phy(tp, 0x18, 0x0010, 0x0000);
3181 rtl_writephy(tp, 0x1f, 0x0002);
3182 rtl_writephy(tp, 0x1f, 0x0000);
3183 rtl_w1w0_phy(tp, 0x14, 0x8000, 0x0000);
3185 /* improve 10M EEE waveform */
3186 rtl_writephy(tp, 0x1f, 0x0005);
3187 rtl_writephy(tp, 0x05, 0x8b86);
3188 rtl_w1w0_phy(tp, 0x06, 0x0001, 0x0000);
3189 rtl_writephy(tp, 0x1f, 0x0000);
3191 /* Improve 2-pair detection performance */
3192 rtl_writephy(tp, 0x1f, 0x0005);
3193 rtl_writephy(tp, 0x05, 0x8b85);
3194 rtl_w1w0_phy(tp, 0x06, 0x4000, 0x0000);
3195 rtl_writephy(tp, 0x1f, 0x0000);
3198 rtl_w1w0_eri(tp, 0x1b0, ERIAR_MASK_1111, 0x0000, 0x0003, ERIAR_EXGMAC);
3199 rtl_writephy(tp, 0x1f, 0x0005);
3200 rtl_writephy(tp, 0x05, 0x8b85);
3201 rtl_w1w0_phy(tp, 0x06, 0x0000, 0x2000);
3202 rtl_writephy(tp, 0x1f, 0x0004);
3203 rtl_writephy(tp, 0x1f, 0x0007);
3204 rtl_writephy(tp, 0x1e, 0x0020);
3205 rtl_w1w0_phy(tp, 0x15, 0x0000, 0x0100);
3206 rtl_writephy(tp, 0x1f, 0x0002);
3207 rtl_writephy(tp, 0x1f, 0x0000);
3208 rtl_writephy(tp, 0x0d, 0x0007);
3209 rtl_writephy(tp, 0x0e, 0x003c);
3210 rtl_writephy(tp, 0x0d, 0x4007);
3211 rtl_writephy(tp, 0x0e, 0x0000);
3212 rtl_writephy(tp, 0x0d, 0x0000);
3215 rtl_writephy(tp, 0x1f, 0x0003);
3216 rtl_w1w0_phy(tp, 0x19, 0x0000, 0x0001);
3217 rtl_w1w0_phy(tp, 0x10, 0x0000, 0x0400);
3218 rtl_writephy(tp, 0x1f, 0x0000);
3220 r8168_aldps_enable_1(tp);
3222 /* Broken BIOS workaround: feed GigaMAC registers with MAC address. */
3223 rtl_rar_exgmac_set(tp, tp->dev->dev_addr);
3226 static void rtl8168f_hw_phy_config(struct rtl8169_private *tp)
3228 /* For 4-corner performance improve */
3229 rtl_writephy(tp, 0x1f, 0x0005);
3230 rtl_writephy(tp, 0x05, 0x8b80);
3231 rtl_w1w0_phy(tp, 0x06, 0x0006, 0x0000);
3232 rtl_writephy(tp, 0x1f, 0x0000);
3234 /* PHY auto speed down */
3235 rtl_writephy(tp, 0x1f, 0x0007);
3236 rtl_writephy(tp, 0x1e, 0x002d);
3237 rtl_w1w0_phy(tp, 0x18, 0x0010, 0x0000);
3238 rtl_writephy(tp, 0x1f, 0x0000);
3239 rtl_w1w0_phy(tp, 0x14, 0x8000, 0x0000);
3241 /* Improve 10M EEE waveform */
3242 rtl_writephy(tp, 0x1f, 0x0005);
3243 rtl_writephy(tp, 0x05, 0x8b86);
3244 rtl_w1w0_phy(tp, 0x06, 0x0001, 0x0000);
3245 rtl_writephy(tp, 0x1f, 0x0000);
3248 static void rtl8168f_1_hw_phy_config(struct rtl8169_private *tp)
3250 static const struct phy_reg phy_reg_init[] = {
3251 /* Channel estimation fine tune */
3256 /* Modify green table for giga & fnet */
3273 /* Modify green table for 10M */
3279 /* Disable hiimpedance detection (RTCT) */
3285 rtl_apply_firmware(tp);
3287 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
3289 rtl8168f_hw_phy_config(tp);
3291 /* Improve 2-pair detection performance */
3292 rtl_writephy(tp, 0x1f, 0x0005);
3293 rtl_writephy(tp, 0x05, 0x8b85);
3294 rtl_w1w0_phy(tp, 0x06, 0x4000, 0x0000);
3295 rtl_writephy(tp, 0x1f, 0x0000);
3297 r8168_aldps_enable_1(tp);
3300 static void rtl8168f_2_hw_phy_config(struct rtl8169_private *tp)
3302 rtl_apply_firmware(tp);
3304 rtl8168f_hw_phy_config(tp);
3306 r8168_aldps_enable_1(tp);
3309 static void rtl8411_hw_phy_config(struct rtl8169_private *tp)
3311 static const struct phy_reg phy_reg_init[] = {
3312 /* Channel estimation fine tune */
3317 /* Modify green table for giga & fnet */
3334 /* Modify green table for 10M */
3340 /* Disable hiimpedance detection (RTCT) */
3347 rtl_apply_firmware(tp);
3349 rtl8168f_hw_phy_config(tp);
3351 /* Improve 2-pair detection performance */
3352 rtl_writephy(tp, 0x1f, 0x0005);
3353 rtl_writephy(tp, 0x05, 0x8b85);
3354 rtl_w1w0_phy(tp, 0x06, 0x4000, 0x0000);
3355 rtl_writephy(tp, 0x1f, 0x0000);
3357 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
3359 /* Modify green table for giga */
3360 rtl_writephy(tp, 0x1f, 0x0005);
3361 rtl_writephy(tp, 0x05, 0x8b54);
3362 rtl_w1w0_phy(tp, 0x06, 0x0000, 0x0800);
3363 rtl_writephy(tp, 0x05, 0x8b5d);
3364 rtl_w1w0_phy(tp, 0x06, 0x0000, 0x0800);
3365 rtl_writephy(tp, 0x05, 0x8a7c);
3366 rtl_w1w0_phy(tp, 0x06, 0x0000, 0x0100);
3367 rtl_writephy(tp, 0x05, 0x8a7f);
3368 rtl_w1w0_phy(tp, 0x06, 0x0100, 0x0000);
3369 rtl_writephy(tp, 0x05, 0x8a82);
3370 rtl_w1w0_phy(tp, 0x06, 0x0000, 0x0100);
3371 rtl_writephy(tp, 0x05, 0x8a85);
3372 rtl_w1w0_phy(tp, 0x06, 0x0000, 0x0100);
3373 rtl_writephy(tp, 0x05, 0x8a88);
3374 rtl_w1w0_phy(tp, 0x06, 0x0000, 0x0100);
3375 rtl_writephy(tp, 0x1f, 0x0000);
3377 /* uc same-seed solution */
3378 rtl_writephy(tp, 0x1f, 0x0005);
3379 rtl_writephy(tp, 0x05, 0x8b85);
3380 rtl_w1w0_phy(tp, 0x06, 0x8000, 0x0000);
3381 rtl_writephy(tp, 0x1f, 0x0000);
3384 rtl_w1w0_eri(tp, 0x1b0, ERIAR_MASK_0001, 0x00, 0x03, ERIAR_EXGMAC);
3385 rtl_writephy(tp, 0x1f, 0x0005);
3386 rtl_writephy(tp, 0x05, 0x8b85);
3387 rtl_w1w0_phy(tp, 0x06, 0x0000, 0x2000);
3388 rtl_writephy(tp, 0x1f, 0x0004);
3389 rtl_writephy(tp, 0x1f, 0x0007);
3390 rtl_writephy(tp, 0x1e, 0x0020);
3391 rtl_w1w0_phy(tp, 0x15, 0x0000, 0x0100);
3392 rtl_writephy(tp, 0x1f, 0x0000);
3393 rtl_writephy(tp, 0x0d, 0x0007);
3394 rtl_writephy(tp, 0x0e, 0x003c);
3395 rtl_writephy(tp, 0x0d, 0x4007);
3396 rtl_writephy(tp, 0x0e, 0x0000);
3397 rtl_writephy(tp, 0x0d, 0x0000);
3400 rtl_writephy(tp, 0x1f, 0x0003);
3401 rtl_w1w0_phy(tp, 0x19, 0x0000, 0x0001);
3402 rtl_w1w0_phy(tp, 0x10, 0x0000, 0x0400);
3403 rtl_writephy(tp, 0x1f, 0x0000);
3405 r8168_aldps_enable_1(tp);
3408 static void rtl8168g_1_hw_phy_config(struct rtl8169_private *tp)
3410 static const u16 mac_ocp_patch[] = {
3411 0xe008, 0xe01b, 0xe01d, 0xe01f,
3412 0xe021, 0xe023, 0xe025, 0xe027,
3413 0x49d2, 0xf10d, 0x766c, 0x49e2,
3414 0xf00a, 0x1ec0, 0x8ee1, 0xc60a,
3416 0x77c0, 0x4870, 0x9fc0, 0x1ea0,
3417 0xc707, 0x8ee1, 0x9d6c, 0xc603,
3418 0xbe00, 0xb416, 0x0076, 0xe86c,
3419 0xc602, 0xbe00, 0x0000, 0xc602,
3421 0xbe00, 0x0000, 0xc602, 0xbe00,
3422 0x0000, 0xc602, 0xbe00, 0x0000,
3423 0xc602, 0xbe00, 0x0000, 0xc602,
3424 0xbe00, 0x0000, 0xc602, 0xbe00,
3426 0x0000, 0x0000, 0x0000, 0x0000
3430 /* Patch code for GPHY reset */
3431 for (i = 0; i < ARRAY_SIZE(mac_ocp_patch); i++)
3432 r8168_mac_ocp_write(tp, 0xf800 + 2*i, mac_ocp_patch[i]);
3433 r8168_mac_ocp_write(tp, 0xfc26, 0x8000);
3434 r8168_mac_ocp_write(tp, 0xfc28, 0x0075);
3436 rtl_apply_firmware(tp);
3438 if (r8168_phy_ocp_read(tp, 0xa460) & 0x0100)
3439 rtl_w1w0_phy_ocp(tp, 0xbcc4, 0x0000, 0x8000);
3441 rtl_w1w0_phy_ocp(tp, 0xbcc4, 0x8000, 0x0000);
3443 if (r8168_phy_ocp_read(tp, 0xa466) & 0x0100)
3444 rtl_w1w0_phy_ocp(tp, 0xc41a, 0x0002, 0x0000);
3446 rtl_w1w0_phy_ocp(tp, 0xbcc4, 0x0000, 0x0002);
3448 rtl_w1w0_phy_ocp(tp, 0xa442, 0x000c, 0x0000);
3449 rtl_w1w0_phy_ocp(tp, 0xa4b2, 0x0004, 0x0000);
3451 r8168_phy_ocp_write(tp, 0xa436, 0x8012);
3452 rtl_w1w0_phy_ocp(tp, 0xa438, 0x8000, 0x0000);
3454 rtl_w1w0_phy_ocp(tp, 0xc422, 0x4000, 0x2000);
3457 static void rtl8102e_hw_phy_config(struct rtl8169_private *tp)
3459 static const struct phy_reg phy_reg_init[] = {
3466 rtl_writephy(tp, 0x1f, 0x0000);
3467 rtl_patchphy(tp, 0x11, 1 << 12);
3468 rtl_patchphy(tp, 0x19, 1 << 13);
3469 rtl_patchphy(tp, 0x10, 1 << 15);
3471 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
3474 static void rtl8105e_hw_phy_config(struct rtl8169_private *tp)
3476 static const struct phy_reg phy_reg_init[] = {
3490 /* Disable ALDPS before ram code */
3491 r810x_aldps_disable(tp);
3493 rtl_apply_firmware(tp);
3495 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
3497 r810x_aldps_enable(tp);
3500 static void rtl8402_hw_phy_config(struct rtl8169_private *tp)
3502 /* Disable ALDPS before setting firmware */
3503 r810x_aldps_disable(tp);
3505 rtl_apply_firmware(tp);
3508 rtl_eri_write(tp, 0x1b0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
3509 rtl_writephy(tp, 0x1f, 0x0004);
3510 rtl_writephy(tp, 0x10, 0x401f);
3511 rtl_writephy(tp, 0x19, 0x7030);
3512 rtl_writephy(tp, 0x1f, 0x0000);
3514 r810x_aldps_enable(tp);
3517 static void rtl8106e_hw_phy_config(struct rtl8169_private *tp)
3519 static const struct phy_reg phy_reg_init[] = {
3526 /* Disable ALDPS before ram code */
3527 r810x_aldps_disable(tp);
3529 rtl_apply_firmware(tp);
3531 rtl_eri_write(tp, 0x1b0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
3532 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
3534 rtl_eri_write(tp, 0x1d0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
3536 r810x_aldps_enable(tp);
3539 static void rtl_hw_phy_config(struct net_device *dev)
3541 struct rtl8169_private *tp = netdev_priv(dev);
3543 rtl8169_print_mac_version(tp);
3545 switch (tp->mac_version) {
3546 case RTL_GIGA_MAC_VER_01:
3548 case RTL_GIGA_MAC_VER_02:
3549 case RTL_GIGA_MAC_VER_03:
3550 rtl8169s_hw_phy_config(tp);
3552 case RTL_GIGA_MAC_VER_04:
3553 rtl8169sb_hw_phy_config(tp);
3555 case RTL_GIGA_MAC_VER_05:
3556 rtl8169scd_hw_phy_config(tp);
3558 case RTL_GIGA_MAC_VER_06:
3559 rtl8169sce_hw_phy_config(tp);
3561 case RTL_GIGA_MAC_VER_07:
3562 case RTL_GIGA_MAC_VER_08:
3563 case RTL_GIGA_MAC_VER_09:
3564 rtl8102e_hw_phy_config(tp);
3566 case RTL_GIGA_MAC_VER_11:
3567 rtl8168bb_hw_phy_config(tp);
3569 case RTL_GIGA_MAC_VER_12:
3570 rtl8168bef_hw_phy_config(tp);
3572 case RTL_GIGA_MAC_VER_17:
3573 rtl8168bef_hw_phy_config(tp);
3575 case RTL_GIGA_MAC_VER_18:
3576 rtl8168cp_1_hw_phy_config(tp);
3578 case RTL_GIGA_MAC_VER_19:
3579 rtl8168c_1_hw_phy_config(tp);
3581 case RTL_GIGA_MAC_VER_20:
3582 rtl8168c_2_hw_phy_config(tp);
3584 case RTL_GIGA_MAC_VER_21:
3585 rtl8168c_3_hw_phy_config(tp);
3587 case RTL_GIGA_MAC_VER_22:
3588 rtl8168c_4_hw_phy_config(tp);
3590 case RTL_GIGA_MAC_VER_23:
3591 case RTL_GIGA_MAC_VER_24:
3592 rtl8168cp_2_hw_phy_config(tp);
3594 case RTL_GIGA_MAC_VER_25:
3595 rtl8168d_1_hw_phy_config(tp);
3597 case RTL_GIGA_MAC_VER_26:
3598 rtl8168d_2_hw_phy_config(tp);
3600 case RTL_GIGA_MAC_VER_27:
3601 rtl8168d_3_hw_phy_config(tp);
3603 case RTL_GIGA_MAC_VER_28:
3604 rtl8168d_4_hw_phy_config(tp);
3606 case RTL_GIGA_MAC_VER_29:
3607 case RTL_GIGA_MAC_VER_30:
3608 rtl8105e_hw_phy_config(tp);
3610 case RTL_GIGA_MAC_VER_31:
3613 case RTL_GIGA_MAC_VER_32:
3614 case RTL_GIGA_MAC_VER_33:
3615 rtl8168e_1_hw_phy_config(tp);
3617 case RTL_GIGA_MAC_VER_34:
3618 rtl8168e_2_hw_phy_config(tp);
3620 case RTL_GIGA_MAC_VER_35:
3621 rtl8168f_1_hw_phy_config(tp);
3623 case RTL_GIGA_MAC_VER_36:
3624 rtl8168f_2_hw_phy_config(tp);
3627 case RTL_GIGA_MAC_VER_37:
3628 rtl8402_hw_phy_config(tp);
3631 case RTL_GIGA_MAC_VER_38:
3632 rtl8411_hw_phy_config(tp);
3635 case RTL_GIGA_MAC_VER_39:
3636 rtl8106e_hw_phy_config(tp);
3639 case RTL_GIGA_MAC_VER_40:
3640 rtl8168g_1_hw_phy_config(tp);
3643 case RTL_GIGA_MAC_VER_41:
3649 static void rtl_phy_work(struct rtl8169_private *tp)
3651 struct timer_list *timer = &tp->timer;
3652 void __iomem *ioaddr = tp->mmio_addr;
3653 unsigned long timeout = RTL8169_PHY_TIMEOUT;
3655 assert(tp->mac_version > RTL_GIGA_MAC_VER_01);
3657 if (tp->phy_reset_pending(tp)) {
3659 * A busy loop could burn quite a few cycles on nowadays CPU.
3660 * Let's delay the execution of the timer for a few ticks.
3666 if (tp->link_ok(ioaddr))
3669 netif_warn(tp, link, tp->dev, "PHY reset until link up\n");
3671 tp->phy_reset_enable(tp);
3674 mod_timer(timer, jiffies + timeout);
3677 static void rtl_schedule_task(struct rtl8169_private *tp, enum rtl_flag flag)
3679 if (!test_and_set_bit(flag, tp->wk.flags))
3680 schedule_work(&tp->wk.work);
3683 static void rtl8169_phy_timer(unsigned long __opaque)
3685 struct net_device *dev = (struct net_device *)__opaque;
3686 struct rtl8169_private *tp = netdev_priv(dev);
3688 rtl_schedule_task(tp, RTL_FLAG_TASK_PHY_PENDING);
3691 static void rtl8169_release_board(struct pci_dev *pdev, struct net_device *dev,
3692 void __iomem *ioaddr)
3695 pci_release_regions(pdev);
3696 pci_clear_mwi(pdev);
3697 pci_disable_device(pdev);
3701 DECLARE_RTL_COND(rtl_phy_reset_cond)
3703 return tp->phy_reset_pending(tp);
3706 static void rtl8169_phy_reset(struct net_device *dev,
3707 struct rtl8169_private *tp)
3709 tp->phy_reset_enable(tp);
3710 rtl_msleep_loop_wait_low(tp, &rtl_phy_reset_cond, 1, 100);
3713 static bool rtl_tbi_enabled(struct rtl8169_private *tp)
3715 void __iomem *ioaddr = tp->mmio_addr;
3717 return (tp->mac_version == RTL_GIGA_MAC_VER_01) &&
3718 (RTL_R8(PHYstatus) & TBI_Enable);
3721 static void rtl8169_init_phy(struct net_device *dev, struct rtl8169_private *tp)
3723 void __iomem *ioaddr = tp->mmio_addr;
3725 rtl_hw_phy_config(dev);
3727 if (tp->mac_version <= RTL_GIGA_MAC_VER_06) {
3728 dprintk("Set MAC Reg C+CR Offset 0x82h = 0x01h\n");
3732 pci_write_config_byte(tp->pci_dev, PCI_LATENCY_TIMER, 0x40);
3734 if (tp->mac_version <= RTL_GIGA_MAC_VER_06)
3735 pci_write_config_byte(tp->pci_dev, PCI_CACHE_LINE_SIZE, 0x08);
3737 if (tp->mac_version == RTL_GIGA_MAC_VER_02) {
3738 dprintk("Set MAC Reg C+CR Offset 0x82h = 0x01h\n");
3740 dprintk("Set PHY Reg 0x0bh = 0x00h\n");
3741 rtl_writephy(tp, 0x0b, 0x0000); //w 0x0b 15 0 0
3744 rtl8169_phy_reset(dev, tp);
3746 rtl8169_set_speed(dev, AUTONEG_ENABLE, SPEED_1000, DUPLEX_FULL,
3747 ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
3748 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
3749 (tp->mii.supports_gmii ?
3750 ADVERTISED_1000baseT_Half |
3751 ADVERTISED_1000baseT_Full : 0));
3753 if (rtl_tbi_enabled(tp))
3754 netif_info(tp, link, dev, "TBI auto-negotiating\n");
3757 static void rtl_rar_set(struct rtl8169_private *tp, u8 *addr)
3759 void __iomem *ioaddr = tp->mmio_addr;
3763 RTL_W8(Cfg9346, Cfg9346_Unlock);
3765 RTL_W32(MAC4, addr[4] | addr[5] << 8);
3768 RTL_W32(MAC0, addr[0] | addr[1] << 8 | addr[2] << 16 | addr[3] << 24);
3771 if (tp->mac_version == RTL_GIGA_MAC_VER_34)
3772 rtl_rar_exgmac_set(tp, addr);
3774 RTL_W8(Cfg9346, Cfg9346_Lock);
3776 rtl_unlock_work(tp);
3779 static int rtl_set_mac_address(struct net_device *dev, void *p)
3781 struct rtl8169_private *tp = netdev_priv(dev);
3782 struct sockaddr *addr = p;
3784 if (!is_valid_ether_addr(addr->sa_data))
3785 return -EADDRNOTAVAIL;
3787 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
3789 rtl_rar_set(tp, dev->dev_addr);
3794 static int rtl8169_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
3796 struct rtl8169_private *tp = netdev_priv(dev);
3797 struct mii_ioctl_data *data = if_mii(ifr);
3799 return netif_running(dev) ? tp->do_ioctl(tp, data, cmd) : -ENODEV;
3802 static int rtl_xmii_ioctl(struct rtl8169_private *tp,
3803 struct mii_ioctl_data *data, int cmd)
3807 data->phy_id = 32; /* Internal PHY */
3811 data->val_out = rtl_readphy(tp, data->reg_num & 0x1f);
3815 rtl_writephy(tp, data->reg_num & 0x1f, data->val_in);
3821 static int rtl_tbi_ioctl(struct rtl8169_private *tp, struct mii_ioctl_data *data, int cmd)
3826 static void rtl_disable_msi(struct pci_dev *pdev, struct rtl8169_private *tp)
3828 if (tp->features & RTL_FEATURE_MSI) {
3829 pci_disable_msi(pdev);
3830 tp->features &= ~RTL_FEATURE_MSI;
3834 static void rtl_init_mdio_ops(struct rtl8169_private *tp)
3836 struct mdio_ops *ops = &tp->mdio_ops;
3838 switch (tp->mac_version) {
3839 case RTL_GIGA_MAC_VER_27:
3840 ops->write = r8168dp_1_mdio_write;
3841 ops->read = r8168dp_1_mdio_read;
3843 case RTL_GIGA_MAC_VER_28:
3844 case RTL_GIGA_MAC_VER_31:
3845 ops->write = r8168dp_2_mdio_write;
3846 ops->read = r8168dp_2_mdio_read;
3848 case RTL_GIGA_MAC_VER_40:
3849 case RTL_GIGA_MAC_VER_41:
3850 ops->write = r8168g_mdio_write;
3851 ops->read = r8168g_mdio_read;
3854 ops->write = r8169_mdio_write;
3855 ops->read = r8169_mdio_read;
3860 static void rtl_wol_suspend_quirk(struct rtl8169_private *tp)
3862 void __iomem *ioaddr = tp->mmio_addr;
3864 switch (tp->mac_version) {
3865 case RTL_GIGA_MAC_VER_25:
3866 case RTL_GIGA_MAC_VER_26:
3867 case RTL_GIGA_MAC_VER_29:
3868 case RTL_GIGA_MAC_VER_30:
3869 case RTL_GIGA_MAC_VER_32:
3870 case RTL_GIGA_MAC_VER_33:
3871 case RTL_GIGA_MAC_VER_34:
3872 case RTL_GIGA_MAC_VER_37:
3873 case RTL_GIGA_MAC_VER_38:
3874 case RTL_GIGA_MAC_VER_39:
3875 case RTL_GIGA_MAC_VER_40:
3876 case RTL_GIGA_MAC_VER_41:
3877 RTL_W32(RxConfig, RTL_R32(RxConfig) |
3878 AcceptBroadcast | AcceptMulticast | AcceptMyPhys);
3885 static bool rtl_wol_pll_power_down(struct rtl8169_private *tp)
3887 if (!(__rtl8169_get_wol(tp) & WAKE_ANY))
3890 rtl_writephy(tp, 0x1f, 0x0000);
3891 rtl_writephy(tp, MII_BMCR, 0x0000);
3893 rtl_wol_suspend_quirk(tp);
3898 static void r810x_phy_power_down(struct rtl8169_private *tp)
3900 rtl_writephy(tp, 0x1f, 0x0000);
3901 rtl_writephy(tp, MII_BMCR, BMCR_PDOWN);
3904 static void r810x_phy_power_up(struct rtl8169_private *tp)
3906 rtl_writephy(tp, 0x1f, 0x0000);
3907 rtl_writephy(tp, MII_BMCR, BMCR_ANENABLE);
3910 static void r810x_pll_power_down(struct rtl8169_private *tp)
3912 void __iomem *ioaddr = tp->mmio_addr;
3914 if (rtl_wol_pll_power_down(tp))
3917 r810x_phy_power_down(tp);
3919 switch (tp->mac_version) {
3920 case RTL_GIGA_MAC_VER_07:
3921 case RTL_GIGA_MAC_VER_08:
3922 case RTL_GIGA_MAC_VER_09:
3923 case RTL_GIGA_MAC_VER_10:
3924 case RTL_GIGA_MAC_VER_13:
3925 case RTL_GIGA_MAC_VER_16:
3928 RTL_W8(PMCH, RTL_R8(PMCH) & ~0x80);
3933 static void r810x_pll_power_up(struct rtl8169_private *tp)
3935 void __iomem *ioaddr = tp->mmio_addr;
3937 r810x_phy_power_up(tp);
3939 switch (tp->mac_version) {
3940 case RTL_GIGA_MAC_VER_07:
3941 case RTL_GIGA_MAC_VER_08:
3942 case RTL_GIGA_MAC_VER_09:
3943 case RTL_GIGA_MAC_VER_10:
3944 case RTL_GIGA_MAC_VER_13:
3945 case RTL_GIGA_MAC_VER_16:
3948 RTL_W8(PMCH, RTL_R8(PMCH) | 0x80);
3953 static void r8168_phy_power_up(struct rtl8169_private *tp)
3955 rtl_writephy(tp, 0x1f, 0x0000);
3956 switch (tp->mac_version) {
3957 case RTL_GIGA_MAC_VER_11:
3958 case RTL_GIGA_MAC_VER_12:
3959 case RTL_GIGA_MAC_VER_17:
3960 case RTL_GIGA_MAC_VER_18:
3961 case RTL_GIGA_MAC_VER_19:
3962 case RTL_GIGA_MAC_VER_20:
3963 case RTL_GIGA_MAC_VER_21:
3964 case RTL_GIGA_MAC_VER_22:
3965 case RTL_GIGA_MAC_VER_23:
3966 case RTL_GIGA_MAC_VER_24:
3967 case RTL_GIGA_MAC_VER_25:
3968 case RTL_GIGA_MAC_VER_26:
3969 case RTL_GIGA_MAC_VER_27:
3970 case RTL_GIGA_MAC_VER_28:
3971 case RTL_GIGA_MAC_VER_31:
3972 rtl_writephy(tp, 0x0e, 0x0000);
3977 rtl_writephy(tp, MII_BMCR, BMCR_ANENABLE);
3980 static void r8168_phy_power_down(struct rtl8169_private *tp)
3982 rtl_writephy(tp, 0x1f, 0x0000);
3983 switch (tp->mac_version) {
3984 case RTL_GIGA_MAC_VER_32:
3985 case RTL_GIGA_MAC_VER_33:
3986 rtl_writephy(tp, MII_BMCR, BMCR_ANENABLE | BMCR_PDOWN);
3989 case RTL_GIGA_MAC_VER_11:
3990 case RTL_GIGA_MAC_VER_12:
3991 case RTL_GIGA_MAC_VER_17:
3992 case RTL_GIGA_MAC_VER_18:
3993 case RTL_GIGA_MAC_VER_19:
3994 case RTL_GIGA_MAC_VER_20:
3995 case RTL_GIGA_MAC_VER_21:
3996 case RTL_GIGA_MAC_VER_22:
3997 case RTL_GIGA_MAC_VER_23:
3998 case RTL_GIGA_MAC_VER_24:
3999 case RTL_GIGA_MAC_VER_25:
4000 case RTL_GIGA_MAC_VER_26:
4001 case RTL_GIGA_MAC_VER_27:
4002 case RTL_GIGA_MAC_VER_28:
4003 case RTL_GIGA_MAC_VER_31:
4004 rtl_writephy(tp, 0x0e, 0x0200);
4006 rtl_writephy(tp, MII_BMCR, BMCR_PDOWN);
4011 static void r8168_pll_power_down(struct rtl8169_private *tp)
4013 void __iomem *ioaddr = tp->mmio_addr;
4015 if ((tp->mac_version == RTL_GIGA_MAC_VER_27 ||
4016 tp->mac_version == RTL_GIGA_MAC_VER_28 ||
4017 tp->mac_version == RTL_GIGA_MAC_VER_31) &&
4018 r8168dp_check_dash(tp)) {
4022 if ((tp->mac_version == RTL_GIGA_MAC_VER_23 ||
4023 tp->mac_version == RTL_GIGA_MAC_VER_24) &&
4024 (RTL_R16(CPlusCmd) & ASF)) {
4028 if (tp->mac_version == RTL_GIGA_MAC_VER_32 ||
4029 tp->mac_version == RTL_GIGA_MAC_VER_33)
4030 rtl_ephy_write(tp, 0x19, 0xff64);
4032 if (rtl_wol_pll_power_down(tp))
4035 r8168_phy_power_down(tp);
4037 switch (tp->mac_version) {
4038 case RTL_GIGA_MAC_VER_25:
4039 case RTL_GIGA_MAC_VER_26:
4040 case RTL_GIGA_MAC_VER_27:
4041 case RTL_GIGA_MAC_VER_28:
4042 case RTL_GIGA_MAC_VER_31:
4043 case RTL_GIGA_MAC_VER_32:
4044 case RTL_GIGA_MAC_VER_33:
4045 RTL_W8(PMCH, RTL_R8(PMCH) & ~0x80);
4050 static void r8168_pll_power_up(struct rtl8169_private *tp)
4052 void __iomem *ioaddr = tp->mmio_addr;
4054 switch (tp->mac_version) {
4055 case RTL_GIGA_MAC_VER_25:
4056 case RTL_GIGA_MAC_VER_26:
4057 case RTL_GIGA_MAC_VER_27:
4058 case RTL_GIGA_MAC_VER_28:
4059 case RTL_GIGA_MAC_VER_31:
4060 case RTL_GIGA_MAC_VER_32:
4061 case RTL_GIGA_MAC_VER_33:
4062 RTL_W8(PMCH, RTL_R8(PMCH) | 0x80);
4066 r8168_phy_power_up(tp);
4069 static void rtl_generic_op(struct rtl8169_private *tp,
4070 void (*op)(struct rtl8169_private *))
4076 static void rtl_pll_power_down(struct rtl8169_private *tp)
4078 rtl_generic_op(tp, tp->pll_power_ops.down);
4081 static void rtl_pll_power_up(struct rtl8169_private *tp)
4083 rtl_generic_op(tp, tp->pll_power_ops.up);
4086 static void rtl_init_pll_power_ops(struct rtl8169_private *tp)
4088 struct pll_power_ops *ops = &tp->pll_power_ops;
4090 switch (tp->mac_version) {
4091 case RTL_GIGA_MAC_VER_07:
4092 case RTL_GIGA_MAC_VER_08:
4093 case RTL_GIGA_MAC_VER_09:
4094 case RTL_GIGA_MAC_VER_10:
4095 case RTL_GIGA_MAC_VER_16:
4096 case RTL_GIGA_MAC_VER_29:
4097 case RTL_GIGA_MAC_VER_30:
4098 case RTL_GIGA_MAC_VER_37:
4099 case RTL_GIGA_MAC_VER_39:
4100 ops->down = r810x_pll_power_down;
4101 ops->up = r810x_pll_power_up;
4104 case RTL_GIGA_MAC_VER_11:
4105 case RTL_GIGA_MAC_VER_12:
4106 case RTL_GIGA_MAC_VER_17:
4107 case RTL_GIGA_MAC_VER_18:
4108 case RTL_GIGA_MAC_VER_19:
4109 case RTL_GIGA_MAC_VER_20:
4110 case RTL_GIGA_MAC_VER_21:
4111 case RTL_GIGA_MAC_VER_22:
4112 case RTL_GIGA_MAC_VER_23:
4113 case RTL_GIGA_MAC_VER_24:
4114 case RTL_GIGA_MAC_VER_25:
4115 case RTL_GIGA_MAC_VER_26:
4116 case RTL_GIGA_MAC_VER_27:
4117 case RTL_GIGA_MAC_VER_28:
4118 case RTL_GIGA_MAC_VER_31:
4119 case RTL_GIGA_MAC_VER_32:
4120 case RTL_GIGA_MAC_VER_33:
4121 case RTL_GIGA_MAC_VER_34:
4122 case RTL_GIGA_MAC_VER_35:
4123 case RTL_GIGA_MAC_VER_36:
4124 case RTL_GIGA_MAC_VER_38:
4125 case RTL_GIGA_MAC_VER_40:
4126 case RTL_GIGA_MAC_VER_41:
4127 ops->down = r8168_pll_power_down;
4128 ops->up = r8168_pll_power_up;
4138 static void rtl_init_rxcfg(struct rtl8169_private *tp)
4140 void __iomem *ioaddr = tp->mmio_addr;
4142 switch (tp->mac_version) {
4143 case RTL_GIGA_MAC_VER_01:
4144 case RTL_GIGA_MAC_VER_02:
4145 case RTL_GIGA_MAC_VER_03:
4146 case RTL_GIGA_MAC_VER_04:
4147 case RTL_GIGA_MAC_VER_05:
4148 case RTL_GIGA_MAC_VER_06:
4149 case RTL_GIGA_MAC_VER_10:
4150 case RTL_GIGA_MAC_VER_11:
4151 case RTL_GIGA_MAC_VER_12:
4152 case RTL_GIGA_MAC_VER_13:
4153 case RTL_GIGA_MAC_VER_14:
4154 case RTL_GIGA_MAC_VER_15:
4155 case RTL_GIGA_MAC_VER_16:
4156 case RTL_GIGA_MAC_VER_17:
4157 RTL_W32(RxConfig, RX_FIFO_THRESH | RX_DMA_BURST);
4159 case RTL_GIGA_MAC_VER_18:
4160 case RTL_GIGA_MAC_VER_19:
4161 case RTL_GIGA_MAC_VER_20:
4162 case RTL_GIGA_MAC_VER_21:
4163 case RTL_GIGA_MAC_VER_22:
4164 case RTL_GIGA_MAC_VER_23:
4165 case RTL_GIGA_MAC_VER_24:
4166 case RTL_GIGA_MAC_VER_34:
4167 RTL_W32(RxConfig, RX128_INT_EN | RX_MULTI_EN | RX_DMA_BURST);
4170 RTL_W32(RxConfig, RX128_INT_EN | RX_DMA_BURST);
4175 static void rtl8169_init_ring_indexes(struct rtl8169_private *tp)
4177 tp->dirty_tx = tp->cur_tx = tp->cur_rx = 0;
4180 static void rtl_hw_jumbo_enable(struct rtl8169_private *tp)
4182 void __iomem *ioaddr = tp->mmio_addr;
4184 RTL_W8(Cfg9346, Cfg9346_Unlock);
4185 rtl_generic_op(tp, tp->jumbo_ops.enable);
4186 RTL_W8(Cfg9346, Cfg9346_Lock);
4189 static void rtl_hw_jumbo_disable(struct rtl8169_private *tp)
4191 void __iomem *ioaddr = tp->mmio_addr;
4193 RTL_W8(Cfg9346, Cfg9346_Unlock);
4194 rtl_generic_op(tp, tp->jumbo_ops.disable);
4195 RTL_W8(Cfg9346, Cfg9346_Lock);
4198 static void r8168c_hw_jumbo_enable(struct rtl8169_private *tp)
4200 void __iomem *ioaddr = tp->mmio_addr;
4202 RTL_W8(Config3, RTL_R8(Config3) | Jumbo_En0);
4203 RTL_W8(Config4, RTL_R8(Config4) | Jumbo_En1);
4204 rtl_tx_performance_tweak(tp->pci_dev, 0x2 << MAX_READ_REQUEST_SHIFT);
4207 static void r8168c_hw_jumbo_disable(struct rtl8169_private *tp)
4209 void __iomem *ioaddr = tp->mmio_addr;
4211 RTL_W8(Config3, RTL_R8(Config3) & ~Jumbo_En0);
4212 RTL_W8(Config4, RTL_R8(Config4) & ~Jumbo_En1);
4213 rtl_tx_performance_tweak(tp->pci_dev, 0x5 << MAX_READ_REQUEST_SHIFT);
4216 static void r8168dp_hw_jumbo_enable(struct rtl8169_private *tp)
4218 void __iomem *ioaddr = tp->mmio_addr;
4220 RTL_W8(Config3, RTL_R8(Config3) | Jumbo_En0);
4223 static void r8168dp_hw_jumbo_disable(struct rtl8169_private *tp)
4225 void __iomem *ioaddr = tp->mmio_addr;
4227 RTL_W8(Config3, RTL_R8(Config3) & ~Jumbo_En0);
4230 static void r8168e_hw_jumbo_enable(struct rtl8169_private *tp)
4232 void __iomem *ioaddr = tp->mmio_addr;
4234 RTL_W8(MaxTxPacketSize, 0x3f);
4235 RTL_W8(Config3, RTL_R8(Config3) | Jumbo_En0);
4236 RTL_W8(Config4, RTL_R8(Config4) | 0x01);
4237 rtl_tx_performance_tweak(tp->pci_dev, 0x2 << MAX_READ_REQUEST_SHIFT);
4240 static void r8168e_hw_jumbo_disable(struct rtl8169_private *tp)
4242 void __iomem *ioaddr = tp->mmio_addr;
4244 RTL_W8(MaxTxPacketSize, 0x0c);
4245 RTL_W8(Config3, RTL_R8(Config3) & ~Jumbo_En0);
4246 RTL_W8(Config4, RTL_R8(Config4) & ~0x01);
4247 rtl_tx_performance_tweak(tp->pci_dev, 0x5 << MAX_READ_REQUEST_SHIFT);
4250 static void r8168b_0_hw_jumbo_enable(struct rtl8169_private *tp)
4252 rtl_tx_performance_tweak(tp->pci_dev,
4253 (0x2 << MAX_READ_REQUEST_SHIFT) | PCI_EXP_DEVCTL_NOSNOOP_EN);
4256 static void r8168b_0_hw_jumbo_disable(struct rtl8169_private *tp)
4258 rtl_tx_performance_tweak(tp->pci_dev,
4259 (0x5 << MAX_READ_REQUEST_SHIFT) | PCI_EXP_DEVCTL_NOSNOOP_EN);
4262 static void r8168b_1_hw_jumbo_enable(struct rtl8169_private *tp)
4264 void __iomem *ioaddr = tp->mmio_addr;
4266 r8168b_0_hw_jumbo_enable(tp);
4268 RTL_W8(Config4, RTL_R8(Config4) | (1 << 0));
4271 static void r8168b_1_hw_jumbo_disable(struct rtl8169_private *tp)
4273 void __iomem *ioaddr = tp->mmio_addr;
4275 r8168b_0_hw_jumbo_disable(tp);
4277 RTL_W8(Config4, RTL_R8(Config4) & ~(1 << 0));
4280 static void rtl_init_jumbo_ops(struct rtl8169_private *tp)
4282 struct jumbo_ops *ops = &tp->jumbo_ops;
4284 switch (tp->mac_version) {
4285 case RTL_GIGA_MAC_VER_11:
4286 ops->disable = r8168b_0_hw_jumbo_disable;
4287 ops->enable = r8168b_0_hw_jumbo_enable;
4289 case RTL_GIGA_MAC_VER_12:
4290 case RTL_GIGA_MAC_VER_17:
4291 ops->disable = r8168b_1_hw_jumbo_disable;
4292 ops->enable = r8168b_1_hw_jumbo_enable;
4294 case RTL_GIGA_MAC_VER_18: /* Wild guess. Needs info from Realtek. */
4295 case RTL_GIGA_MAC_VER_19:
4296 case RTL_GIGA_MAC_VER_20:
4297 case RTL_GIGA_MAC_VER_21: /* Wild guess. Needs info from Realtek. */
4298 case RTL_GIGA_MAC_VER_22:
4299 case RTL_GIGA_MAC_VER_23:
4300 case RTL_GIGA_MAC_VER_24:
4301 case RTL_GIGA_MAC_VER_25:
4302 case RTL_GIGA_MAC_VER_26:
4303 ops->disable = r8168c_hw_jumbo_disable;
4304 ops->enable = r8168c_hw_jumbo_enable;
4306 case RTL_GIGA_MAC_VER_27:
4307 case RTL_GIGA_MAC_VER_28:
4308 ops->disable = r8168dp_hw_jumbo_disable;
4309 ops->enable = r8168dp_hw_jumbo_enable;
4311 case RTL_GIGA_MAC_VER_31: /* Wild guess. Needs info from Realtek. */
4312 case RTL_GIGA_MAC_VER_32:
4313 case RTL_GIGA_MAC_VER_33:
4314 case RTL_GIGA_MAC_VER_34:
4315 ops->disable = r8168e_hw_jumbo_disable;
4316 ops->enable = r8168e_hw_jumbo_enable;
4320 * No action needed for jumbo frames with 8169.
4321 * No jumbo for 810x at all.
4323 case RTL_GIGA_MAC_VER_40:
4324 case RTL_GIGA_MAC_VER_41:
4326 ops->disable = NULL;
4332 DECLARE_RTL_COND(rtl_chipcmd_cond)
4334 void __iomem *ioaddr = tp->mmio_addr;
4336 return RTL_R8(ChipCmd) & CmdReset;
4339 static void rtl_hw_reset(struct rtl8169_private *tp)
4341 void __iomem *ioaddr = tp->mmio_addr;
4343 RTL_W8(ChipCmd, CmdReset);
4345 rtl_udelay_loop_wait_low(tp, &rtl_chipcmd_cond, 100, 100);
4348 static void rtl_request_uncached_firmware(struct rtl8169_private *tp)
4350 struct rtl_fw *rtl_fw;
4354 name = rtl_lookup_firmware_name(tp);
4356 goto out_no_firmware;
4358 rtl_fw = kzalloc(sizeof(*rtl_fw), GFP_KERNEL);
4362 rc = request_firmware(&rtl_fw->fw, name, &tp->pci_dev->dev);
4366 rc = rtl_check_firmware(tp, rtl_fw);
4368 goto err_release_firmware;
4370 tp->rtl_fw = rtl_fw;
4374 err_release_firmware:
4375 release_firmware(rtl_fw->fw);
4379 netif_warn(tp, ifup, tp->dev, "unable to load firmware patch %s (%d)\n",
4386 static void rtl_request_firmware(struct rtl8169_private *tp)
4388 if (IS_ERR(tp->rtl_fw))
4389 rtl_request_uncached_firmware(tp);
4392 static void rtl_rx_close(struct rtl8169_private *tp)
4394 void __iomem *ioaddr = tp->mmio_addr;
4396 RTL_W32(RxConfig, RTL_R32(RxConfig) & ~RX_CONFIG_ACCEPT_MASK);
4399 DECLARE_RTL_COND(rtl_npq_cond)
4401 void __iomem *ioaddr = tp->mmio_addr;
4403 return RTL_R8(TxPoll) & NPQ;
4406 DECLARE_RTL_COND(rtl_txcfg_empty_cond)
4408 void __iomem *ioaddr = tp->mmio_addr;
4410 return RTL_R32(TxConfig) & TXCFG_EMPTY;
4413 static void rtl8169_hw_reset(struct rtl8169_private *tp)
4415 void __iomem *ioaddr = tp->mmio_addr;
4417 /* Disable interrupts */
4418 rtl8169_irq_mask_and_ack(tp);
4422 if (tp->mac_version == RTL_GIGA_MAC_VER_27 ||
4423 tp->mac_version == RTL_GIGA_MAC_VER_28 ||
4424 tp->mac_version == RTL_GIGA_MAC_VER_31) {
4425 rtl_udelay_loop_wait_low(tp, &rtl_npq_cond, 20, 42*42);
4426 } else if (tp->mac_version == RTL_GIGA_MAC_VER_34 ||
4427 tp->mac_version == RTL_GIGA_MAC_VER_35 ||
4428 tp->mac_version == RTL_GIGA_MAC_VER_36 ||
4429 tp->mac_version == RTL_GIGA_MAC_VER_37 ||
4430 tp->mac_version == RTL_GIGA_MAC_VER_40 ||
4431 tp->mac_version == RTL_GIGA_MAC_VER_41 ||
4432 tp->mac_version == RTL_GIGA_MAC_VER_38) {
4433 RTL_W8(ChipCmd, RTL_R8(ChipCmd) | StopReq);
4434 rtl_udelay_loop_wait_high(tp, &rtl_txcfg_empty_cond, 100, 666);
4436 RTL_W8(ChipCmd, RTL_R8(ChipCmd) | StopReq);
4443 static void rtl_set_rx_tx_config_registers(struct rtl8169_private *tp)
4445 void __iomem *ioaddr = tp->mmio_addr;
4447 /* Set DMA burst size and Interframe Gap Time */
4448 RTL_W32(TxConfig, (TX_DMA_BURST << TxDMAShift) |
4449 (InterFrameGap << TxInterFrameGapShift));
4452 static void rtl_hw_start(struct net_device *dev)
4454 struct rtl8169_private *tp = netdev_priv(dev);
4458 rtl_irq_enable_all(tp);
4461 static void rtl_set_rx_tx_desc_registers(struct rtl8169_private *tp,
4462 void __iomem *ioaddr)
4465 * Magic spell: some iop3xx ARM board needs the TxDescAddrHigh
4466 * register to be written before TxDescAddrLow to work.
4467 * Switching from MMIO to I/O access fixes the issue as well.
4469 RTL_W32(TxDescStartAddrHigh, ((u64) tp->TxPhyAddr) >> 32);
4470 RTL_W32(TxDescStartAddrLow, ((u64) tp->TxPhyAddr) & DMA_BIT_MASK(32));
4471 RTL_W32(RxDescAddrHigh, ((u64) tp->RxPhyAddr) >> 32);
4472 RTL_W32(RxDescAddrLow, ((u64) tp->RxPhyAddr) & DMA_BIT_MASK(32));
4475 static u16 rtl_rw_cpluscmd(void __iomem *ioaddr)
4479 cmd = RTL_R16(CPlusCmd);
4480 RTL_W16(CPlusCmd, cmd);
4484 static void rtl_set_rx_max_size(void __iomem *ioaddr, unsigned int rx_buf_sz)
4486 /* Low hurts. Let's disable the filtering. */
4487 RTL_W16(RxMaxSize, rx_buf_sz + 1);
4490 static void rtl8169_set_magic_reg(void __iomem *ioaddr, unsigned mac_version)
4492 static const struct rtl_cfg2_info {
4497 { RTL_GIGA_MAC_VER_05, PCI_Clock_33MHz, 0x000fff00 }, // 8110SCd
4498 { RTL_GIGA_MAC_VER_05, PCI_Clock_66MHz, 0x000fffff },
4499 { RTL_GIGA_MAC_VER_06, PCI_Clock_33MHz, 0x00ffff00 }, // 8110SCe
4500 { RTL_GIGA_MAC_VER_06, PCI_Clock_66MHz, 0x00ffffff }
4502 const struct rtl_cfg2_info *p = cfg2_info;
4506 clk = RTL_R8(Config2) & PCI_Clock_66MHz;
4507 for (i = 0; i < ARRAY_SIZE(cfg2_info); i++, p++) {
4508 if ((p->mac_version == mac_version) && (p->clk == clk)) {
4509 RTL_W32(0x7c, p->val);
4515 static void rtl_set_rx_mode(struct net_device *dev)
4517 struct rtl8169_private *tp = netdev_priv(dev);
4518 void __iomem *ioaddr = tp->mmio_addr;
4519 u32 mc_filter[2]; /* Multicast hash filter */
4523 if (dev->flags & IFF_PROMISC) {
4524 /* Unconditionally log net taps. */
4525 netif_notice(tp, link, dev, "Promiscuous mode enabled\n");
4527 AcceptBroadcast | AcceptMulticast | AcceptMyPhys |
4529 mc_filter[1] = mc_filter[0] = 0xffffffff;
4530 } else if ((netdev_mc_count(dev) > multicast_filter_limit) ||
4531 (dev->flags & IFF_ALLMULTI)) {
4532 /* Too many to filter perfectly -- accept all multicasts. */
4533 rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
4534 mc_filter[1] = mc_filter[0] = 0xffffffff;
4536 struct netdev_hw_addr *ha;
4538 rx_mode = AcceptBroadcast | AcceptMyPhys;
4539 mc_filter[1] = mc_filter[0] = 0;
4540 netdev_for_each_mc_addr(ha, dev) {
4541 int bit_nr = ether_crc(ETH_ALEN, ha->addr) >> 26;
4542 mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
4543 rx_mode |= AcceptMulticast;
4547 if (dev->features & NETIF_F_RXALL)
4548 rx_mode |= (AcceptErr | AcceptRunt);
4550 tmp = (RTL_R32(RxConfig) & ~RX_CONFIG_ACCEPT_MASK) | rx_mode;
4552 if (tp->mac_version > RTL_GIGA_MAC_VER_06) {
4553 u32 data = mc_filter[0];
4555 mc_filter[0] = swab32(mc_filter[1]);
4556 mc_filter[1] = swab32(data);
4559 if (tp->mac_version == RTL_GIGA_MAC_VER_35)
4560 mc_filter[1] = mc_filter[0] = 0xffffffff;
4562 RTL_W32(MAR0 + 4, mc_filter[1]);
4563 RTL_W32(MAR0 + 0, mc_filter[0]);
4565 RTL_W32(RxConfig, tmp);
4568 static void rtl_hw_start_8169(struct net_device *dev)
4570 struct rtl8169_private *tp = netdev_priv(dev);
4571 void __iomem *ioaddr = tp->mmio_addr;
4572 struct pci_dev *pdev = tp->pci_dev;
4574 if (tp->mac_version == RTL_GIGA_MAC_VER_05) {
4575 RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) | PCIMulRW);
4576 pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE, 0x08);
4579 RTL_W8(Cfg9346, Cfg9346_Unlock);
4580 if (tp->mac_version == RTL_GIGA_MAC_VER_01 ||
4581 tp->mac_version == RTL_GIGA_MAC_VER_02 ||
4582 tp->mac_version == RTL_GIGA_MAC_VER_03 ||
4583 tp->mac_version == RTL_GIGA_MAC_VER_04)
4584 RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb);
4588 RTL_W8(EarlyTxThres, NoEarlyTx);
4590 rtl_set_rx_max_size(ioaddr, rx_buf_sz);
4592 if (tp->mac_version == RTL_GIGA_MAC_VER_01 ||
4593 tp->mac_version == RTL_GIGA_MAC_VER_02 ||
4594 tp->mac_version == RTL_GIGA_MAC_VER_03 ||
4595 tp->mac_version == RTL_GIGA_MAC_VER_04)
4596 rtl_set_rx_tx_config_registers(tp);
4598 tp->cp_cmd |= rtl_rw_cpluscmd(ioaddr) | PCIMulRW;
4600 if (tp->mac_version == RTL_GIGA_MAC_VER_02 ||
4601 tp->mac_version == RTL_GIGA_MAC_VER_03) {
4602 dprintk("Set MAC Reg C+CR Offset 0xE0. "
4603 "Bit-3 and bit-14 MUST be 1\n");
4604 tp->cp_cmd |= (1 << 14);
4607 RTL_W16(CPlusCmd, tp->cp_cmd);
4609 rtl8169_set_magic_reg(ioaddr, tp->mac_version);
4612 * Undocumented corner. Supposedly:
4613 * (TxTimer << 12) | (TxPackets << 8) | (RxTimer << 4) | RxPackets
4615 RTL_W16(IntrMitigate, 0x0000);
4617 rtl_set_rx_tx_desc_registers(tp, ioaddr);
4619 if (tp->mac_version != RTL_GIGA_MAC_VER_01 &&
4620 tp->mac_version != RTL_GIGA_MAC_VER_02 &&
4621 tp->mac_version != RTL_GIGA_MAC_VER_03 &&
4622 tp->mac_version != RTL_GIGA_MAC_VER_04) {
4623 RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb);
4624 rtl_set_rx_tx_config_registers(tp);
4627 RTL_W8(Cfg9346, Cfg9346_Lock);
4629 /* Initially a 10 us delay. Turned it into a PCI commit. - FR */
4632 RTL_W32(RxMissed, 0);
4634 rtl_set_rx_mode(dev);
4636 /* no early-rx interrupts */
4637 RTL_W16(MultiIntr, RTL_R16(MultiIntr) & 0xF000);
4640 static void rtl_csi_write(struct rtl8169_private *tp, int addr, int value)
4642 if (tp->csi_ops.write)
4643 tp->csi_ops.write(tp, addr, value);
4646 static u32 rtl_csi_read(struct rtl8169_private *tp, int addr)
4648 return tp->csi_ops.read ? tp->csi_ops.read(tp, addr) : ~0;
4651 static void rtl_csi_access_enable(struct rtl8169_private *tp, u32 bits)
4655 csi = rtl_csi_read(tp, 0x070c) & 0x00ffffff;
4656 rtl_csi_write(tp, 0x070c, csi | bits);
4659 static void rtl_csi_access_enable_1(struct rtl8169_private *tp)
4661 rtl_csi_access_enable(tp, 0x17000000);
4664 static void rtl_csi_access_enable_2(struct rtl8169_private *tp)
4666 rtl_csi_access_enable(tp, 0x27000000);
4669 DECLARE_RTL_COND(rtl_csiar_cond)
4671 void __iomem *ioaddr = tp->mmio_addr;
4673 return RTL_R32(CSIAR) & CSIAR_FLAG;
4676 static void r8169_csi_write(struct rtl8169_private *tp, int addr, int value)
4678 void __iomem *ioaddr = tp->mmio_addr;
4680 RTL_W32(CSIDR, value);
4681 RTL_W32(CSIAR, CSIAR_WRITE_CMD | (addr & CSIAR_ADDR_MASK) |
4682 CSIAR_BYTE_ENABLE << CSIAR_BYTE_ENABLE_SHIFT);
4684 rtl_udelay_loop_wait_low(tp, &rtl_csiar_cond, 10, 100);
4687 static u32 r8169_csi_read(struct rtl8169_private *tp, int addr)
4689 void __iomem *ioaddr = tp->mmio_addr;
4691 RTL_W32(CSIAR, (addr & CSIAR_ADDR_MASK) |
4692 CSIAR_BYTE_ENABLE << CSIAR_BYTE_ENABLE_SHIFT);
4694 return rtl_udelay_loop_wait_high(tp, &rtl_csiar_cond, 10, 100) ?
4695 RTL_R32(CSIDR) : ~0;
4698 static void r8402_csi_write(struct rtl8169_private *tp, int addr, int value)
4700 void __iomem *ioaddr = tp->mmio_addr;
4702 RTL_W32(CSIDR, value);
4703 RTL_W32(CSIAR, CSIAR_WRITE_CMD | (addr & CSIAR_ADDR_MASK) |
4704 CSIAR_BYTE_ENABLE << CSIAR_BYTE_ENABLE_SHIFT |
4707 rtl_udelay_loop_wait_low(tp, &rtl_csiar_cond, 10, 100);
4710 static u32 r8402_csi_read(struct rtl8169_private *tp, int addr)
4712 void __iomem *ioaddr = tp->mmio_addr;
4714 RTL_W32(CSIAR, (addr & CSIAR_ADDR_MASK) | CSIAR_FUNC_NIC |
4715 CSIAR_BYTE_ENABLE << CSIAR_BYTE_ENABLE_SHIFT);
4717 return rtl_udelay_loop_wait_high(tp, &rtl_csiar_cond, 10, 100) ?
4718 RTL_R32(CSIDR) : ~0;
4721 static void rtl_init_csi_ops(struct rtl8169_private *tp)
4723 struct csi_ops *ops = &tp->csi_ops;
4725 switch (tp->mac_version) {
4726 case RTL_GIGA_MAC_VER_01:
4727 case RTL_GIGA_MAC_VER_02:
4728 case RTL_GIGA_MAC_VER_03:
4729 case RTL_GIGA_MAC_VER_04:
4730 case RTL_GIGA_MAC_VER_05:
4731 case RTL_GIGA_MAC_VER_06:
4732 case RTL_GIGA_MAC_VER_10:
4733 case RTL_GIGA_MAC_VER_11:
4734 case RTL_GIGA_MAC_VER_12:
4735 case RTL_GIGA_MAC_VER_13:
4736 case RTL_GIGA_MAC_VER_14:
4737 case RTL_GIGA_MAC_VER_15:
4738 case RTL_GIGA_MAC_VER_16:
4739 case RTL_GIGA_MAC_VER_17:
4744 case RTL_GIGA_MAC_VER_37:
4745 case RTL_GIGA_MAC_VER_38:
4746 ops->write = r8402_csi_write;
4747 ops->read = r8402_csi_read;
4751 ops->write = r8169_csi_write;
4752 ops->read = r8169_csi_read;
4758 unsigned int offset;
4763 static void rtl_ephy_init(struct rtl8169_private *tp, const struct ephy_info *e,
4769 w = (rtl_ephy_read(tp, e->offset) & ~e->mask) | e->bits;
4770 rtl_ephy_write(tp, e->offset, w);
4775 static void rtl_disable_clock_request(struct pci_dev *pdev)
4777 pcie_capability_clear_word(pdev, PCI_EXP_LNKCTL,
4778 PCI_EXP_LNKCTL_CLKREQ_EN);
4781 static void rtl_enable_clock_request(struct pci_dev *pdev)
4783 pcie_capability_set_word(pdev, PCI_EXP_LNKCTL,
4784 PCI_EXP_LNKCTL_CLKREQ_EN);
4787 #define R8168_CPCMD_QUIRK_MASK (\
4798 static void rtl_hw_start_8168bb(struct rtl8169_private *tp)
4800 void __iomem *ioaddr = tp->mmio_addr;
4801 struct pci_dev *pdev = tp->pci_dev;
4803 RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en);
4805 RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK);
4807 rtl_tx_performance_tweak(pdev,
4808 (0x5 << MAX_READ_REQUEST_SHIFT) | PCI_EXP_DEVCTL_NOSNOOP_EN);
4811 static void rtl_hw_start_8168bef(struct rtl8169_private *tp)
4813 void __iomem *ioaddr = tp->mmio_addr;
4815 rtl_hw_start_8168bb(tp);
4817 RTL_W8(MaxTxPacketSize, TxPacketMax);
4819 RTL_W8(Config4, RTL_R8(Config4) & ~(1 << 0));
4822 static void __rtl_hw_start_8168cp(struct rtl8169_private *tp)
4824 void __iomem *ioaddr = tp->mmio_addr;
4825 struct pci_dev *pdev = tp->pci_dev;
4827 RTL_W8(Config1, RTL_R8(Config1) | Speed_down);
4829 RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en);
4831 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
4833 rtl_disable_clock_request(pdev);
4835 RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK);
4838 static void rtl_hw_start_8168cp_1(struct rtl8169_private *tp)
4840 static const struct ephy_info e_info_8168cp[] = {
4841 { 0x01, 0, 0x0001 },
4842 { 0x02, 0x0800, 0x1000 },
4843 { 0x03, 0, 0x0042 },
4844 { 0x06, 0x0080, 0x0000 },
4848 rtl_csi_access_enable_2(tp);
4850 rtl_ephy_init(tp, e_info_8168cp, ARRAY_SIZE(e_info_8168cp));
4852 __rtl_hw_start_8168cp(tp);
4855 static void rtl_hw_start_8168cp_2(struct rtl8169_private *tp)
4857 void __iomem *ioaddr = tp->mmio_addr;
4858 struct pci_dev *pdev = tp->pci_dev;
4860 rtl_csi_access_enable_2(tp);
4862 RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en);
4864 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
4866 RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK);
4869 static void rtl_hw_start_8168cp_3(struct rtl8169_private *tp)
4871 void __iomem *ioaddr = tp->mmio_addr;
4872 struct pci_dev *pdev = tp->pci_dev;
4874 rtl_csi_access_enable_2(tp);
4876 RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en);
4879 RTL_W8(DBG_REG, 0x20);
4881 RTL_W8(MaxTxPacketSize, TxPacketMax);
4883 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
4885 RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK);
4888 static void rtl_hw_start_8168c_1(struct rtl8169_private *tp)
4890 void __iomem *ioaddr = tp->mmio_addr;
4891 static const struct ephy_info e_info_8168c_1[] = {
4892 { 0x02, 0x0800, 0x1000 },
4893 { 0x03, 0, 0x0002 },
4894 { 0x06, 0x0080, 0x0000 }
4897 rtl_csi_access_enable_2(tp);
4899 RTL_W8(DBG_REG, 0x06 | FIX_NAK_1 | FIX_NAK_2);
4901 rtl_ephy_init(tp, e_info_8168c_1, ARRAY_SIZE(e_info_8168c_1));
4903 __rtl_hw_start_8168cp(tp);
4906 static void rtl_hw_start_8168c_2(struct rtl8169_private *tp)
4908 static const struct ephy_info e_info_8168c_2[] = {
4909 { 0x01, 0, 0x0001 },
4910 { 0x03, 0x0400, 0x0220 }
4913 rtl_csi_access_enable_2(tp);
4915 rtl_ephy_init(tp, e_info_8168c_2, ARRAY_SIZE(e_info_8168c_2));
4917 __rtl_hw_start_8168cp(tp);
4920 static void rtl_hw_start_8168c_3(struct rtl8169_private *tp)
4922 rtl_hw_start_8168c_2(tp);
4925 static void rtl_hw_start_8168c_4(struct rtl8169_private *tp)
4927 rtl_csi_access_enable_2(tp);
4929 __rtl_hw_start_8168cp(tp);
4932 static void rtl_hw_start_8168d(struct rtl8169_private *tp)
4934 void __iomem *ioaddr = tp->mmio_addr;
4935 struct pci_dev *pdev = tp->pci_dev;
4937 rtl_csi_access_enable_2(tp);
4939 rtl_disable_clock_request(pdev);
4941 RTL_W8(MaxTxPacketSize, TxPacketMax);
4943 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
4945 RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK);
4948 static void rtl_hw_start_8168dp(struct rtl8169_private *tp)
4950 void __iomem *ioaddr = tp->mmio_addr;
4951 struct pci_dev *pdev = tp->pci_dev;
4953 rtl_csi_access_enable_1(tp);
4955 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
4957 RTL_W8(MaxTxPacketSize, TxPacketMax);
4959 rtl_disable_clock_request(pdev);
4962 static void rtl_hw_start_8168d_4(struct rtl8169_private *tp)
4964 void __iomem *ioaddr = tp->mmio_addr;
4965 struct pci_dev *pdev = tp->pci_dev;
4966 static const struct ephy_info e_info_8168d_4[] = {
4968 { 0x19, 0x20, 0x50 },
4973 rtl_csi_access_enable_1(tp);
4975 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
4977 RTL_W8(MaxTxPacketSize, TxPacketMax);
4979 for (i = 0; i < ARRAY_SIZE(e_info_8168d_4); i++) {
4980 const struct ephy_info *e = e_info_8168d_4 + i;
4983 w = rtl_ephy_read(tp, e->offset);
4984 rtl_ephy_write(tp, 0x03, (w & e->mask) | e->bits);
4987 rtl_enable_clock_request(pdev);
4990 static void rtl_hw_start_8168e_1(struct rtl8169_private *tp)
4992 void __iomem *ioaddr = tp->mmio_addr;
4993 struct pci_dev *pdev = tp->pci_dev;
4994 static const struct ephy_info e_info_8168e_1[] = {
4995 { 0x00, 0x0200, 0x0100 },
4996 { 0x00, 0x0000, 0x0004 },
4997 { 0x06, 0x0002, 0x0001 },
4998 { 0x06, 0x0000, 0x0030 },
4999 { 0x07, 0x0000, 0x2000 },
5000 { 0x00, 0x0000, 0x0020 },
5001 { 0x03, 0x5800, 0x2000 },
5002 { 0x03, 0x0000, 0x0001 },
5003 { 0x01, 0x0800, 0x1000 },
5004 { 0x07, 0x0000, 0x4000 },
5005 { 0x1e, 0x0000, 0x2000 },
5006 { 0x19, 0xffff, 0xfe6c },
5007 { 0x0a, 0x0000, 0x0040 }
5010 rtl_csi_access_enable_2(tp);
5012 rtl_ephy_init(tp, e_info_8168e_1, ARRAY_SIZE(e_info_8168e_1));
5014 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
5016 RTL_W8(MaxTxPacketSize, TxPacketMax);
5018 rtl_disable_clock_request(pdev);
5020 /* Reset tx FIFO pointer */
5021 RTL_W32(MISC, RTL_R32(MISC) | TXPLA_RST);
5022 RTL_W32(MISC, RTL_R32(MISC) & ~TXPLA_RST);
5024 RTL_W8(Config5, RTL_R8(Config5) & ~Spi_en);
5027 static void rtl_hw_start_8168e_2(struct rtl8169_private *tp)
5029 void __iomem *ioaddr = tp->mmio_addr;
5030 struct pci_dev *pdev = tp->pci_dev;
5031 static const struct ephy_info e_info_8168e_2[] = {
5032 { 0x09, 0x0000, 0x0080 },
5033 { 0x19, 0x0000, 0x0224 }
5036 rtl_csi_access_enable_1(tp);
5038 rtl_ephy_init(tp, e_info_8168e_2, ARRAY_SIZE(e_info_8168e_2));
5040 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
5042 rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
5043 rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
5044 rtl_eri_write(tp, 0xc8, ERIAR_MASK_1111, 0x00100002, ERIAR_EXGMAC);
5045 rtl_eri_write(tp, 0xe8, ERIAR_MASK_1111, 0x00100006, ERIAR_EXGMAC);
5046 rtl_eri_write(tp, 0xcc, ERIAR_MASK_1111, 0x00000050, ERIAR_EXGMAC);
5047 rtl_eri_write(tp, 0xd0, ERIAR_MASK_1111, 0x07ff0060, ERIAR_EXGMAC);
5048 rtl_w1w0_eri(tp, 0x1b0, ERIAR_MASK_0001, 0x10, 0x00, ERIAR_EXGMAC);
5049 rtl_w1w0_eri(tp, 0x0d4, ERIAR_MASK_0011, 0x0c00, 0xff00, ERIAR_EXGMAC);
5051 RTL_W8(MaxTxPacketSize, EarlySize);
5053 RTL_W32(TxConfig, RTL_R32(TxConfig) | TXCFG_AUTO_FIFO);
5054 RTL_W8(MCU, RTL_R8(MCU) & ~NOW_IS_OOB);
5056 /* Adjust EEE LED frequency */
5057 RTL_W8(EEE_LED, RTL_R8(EEE_LED) & ~0x07);
5059 RTL_W8(DLLPR, RTL_R8(DLLPR) | PFM_EN);
5060 RTL_W32(MISC, RTL_R32(MISC) | PWM_EN);
5061 RTL_W8(Config5, (RTL_R8(Config5) & ~Spi_en) | ASPM_en);
5062 RTL_W8(Config2, RTL_R8(Config2) | ClkReqEn);
5065 static void rtl_hw_start_8168f(struct rtl8169_private *tp)
5067 void __iomem *ioaddr = tp->mmio_addr;
5068 struct pci_dev *pdev = tp->pci_dev;
5070 rtl_csi_access_enable_2(tp);
5072 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
5074 rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
5075 rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
5076 rtl_eri_write(tp, 0xc8, ERIAR_MASK_1111, 0x00100002, ERIAR_EXGMAC);
5077 rtl_eri_write(tp, 0xe8, ERIAR_MASK_1111, 0x00100006, ERIAR_EXGMAC);
5078 rtl_w1w0_eri(tp, 0xdc, ERIAR_MASK_0001, 0x00, 0x01, ERIAR_EXGMAC);
5079 rtl_w1w0_eri(tp, 0xdc, ERIAR_MASK_0001, 0x01, 0x00, ERIAR_EXGMAC);
5080 rtl_w1w0_eri(tp, 0x1b0, ERIAR_MASK_0001, 0x10, 0x00, ERIAR_EXGMAC);
5081 rtl_w1w0_eri(tp, 0x1d0, ERIAR_MASK_0001, 0x10, 0x00, ERIAR_EXGMAC);
5082 rtl_eri_write(tp, 0xcc, ERIAR_MASK_1111, 0x00000050, ERIAR_EXGMAC);
5083 rtl_eri_write(tp, 0xd0, ERIAR_MASK_1111, 0x00000060, ERIAR_EXGMAC);
5085 RTL_W8(MaxTxPacketSize, EarlySize);
5087 RTL_W32(TxConfig, RTL_R32(TxConfig) | TXCFG_AUTO_FIFO);
5088 RTL_W8(MCU, RTL_R8(MCU) & ~NOW_IS_OOB);
5089 RTL_W8(DLLPR, RTL_R8(DLLPR) | PFM_EN);
5090 RTL_W32(MISC, RTL_R32(MISC) | PWM_EN | FORCE_CLK);
5091 RTL_W8(Config5, (RTL_R8(Config5) & ~Spi_en) | ASPM_en);
5092 RTL_W8(Config2, RTL_R8(Config2) | ClkReqEn);
5095 static void rtl_hw_start_8168f_1(struct rtl8169_private *tp)
5097 void __iomem *ioaddr = tp->mmio_addr;
5098 static const struct ephy_info e_info_8168f_1[] = {
5099 { 0x06, 0x00c0, 0x0020 },
5100 { 0x08, 0x0001, 0x0002 },
5101 { 0x09, 0x0000, 0x0080 },
5102 { 0x19, 0x0000, 0x0224 }
5105 rtl_hw_start_8168f(tp);
5107 rtl_ephy_init(tp, e_info_8168f_1, ARRAY_SIZE(e_info_8168f_1));
5109 rtl_w1w0_eri(tp, 0x0d4, ERIAR_MASK_0011, 0x0c00, 0xff00, ERIAR_EXGMAC);
5111 /* Adjust EEE LED frequency */
5112 RTL_W8(EEE_LED, RTL_R8(EEE_LED) & ~0x07);
5115 static void rtl_hw_start_8411(struct rtl8169_private *tp)
5117 static const struct ephy_info e_info_8168f_1[] = {
5118 { 0x06, 0x00c0, 0x0020 },
5119 { 0x0f, 0xffff, 0x5200 },
5120 { 0x1e, 0x0000, 0x4000 },
5121 { 0x19, 0x0000, 0x0224 }
5124 rtl_hw_start_8168f(tp);
5126 rtl_ephy_init(tp, e_info_8168f_1, ARRAY_SIZE(e_info_8168f_1));
5128 rtl_w1w0_eri(tp, 0x0d4, ERIAR_MASK_0011, 0x0c00, 0x0000, ERIAR_EXGMAC);
5131 static void rtl_hw_start_8168g_1(struct rtl8169_private *tp)
5133 void __iomem *ioaddr = tp->mmio_addr;
5134 struct pci_dev *pdev = tp->pci_dev;
5136 rtl_eri_write(tp, 0xc8, ERIAR_MASK_0101, 0x080002, ERIAR_EXGMAC);
5137 rtl_eri_write(tp, 0xcc, ERIAR_MASK_0001, 0x38, ERIAR_EXGMAC);
5138 rtl_eri_write(tp, 0xd0, ERIAR_MASK_0001, 0x48, ERIAR_EXGMAC);
5139 rtl_eri_write(tp, 0xe8, ERIAR_MASK_1111, 0x00100006, ERIAR_EXGMAC);
5141 rtl_csi_access_enable_1(tp);
5143 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
5145 rtl_w1w0_eri(tp, 0xdc, ERIAR_MASK_0001, 0x00, 0x01, ERIAR_EXGMAC);
5146 rtl_w1w0_eri(tp, 0xdc, ERIAR_MASK_0001, 0x01, 0x00, ERIAR_EXGMAC);
5148 RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb);
5149 RTL_W32(MISC, (RTL_R32(MISC) | FORCE_CLK) & ~RXDV_GATED_EN);
5150 RTL_W8(MaxTxPacketSize, EarlySize);
5151 RTL_W8(Config5, RTL_R8(Config5) | ASPM_en);
5152 RTL_W8(Config2, RTL_R8(Config2) | ClkReqEn);
5154 rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
5155 rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
5157 /* Adjust EEE LED frequency */
5158 RTL_W8(EEE_LED, RTL_R8(EEE_LED) & ~0x07);
5160 rtl_w1w0_eri(tp, 0x2fc, ERIAR_MASK_0001, 0x01, 0x02, ERIAR_EXGMAC);
5163 static void rtl_hw_start_8168(struct net_device *dev)
5165 struct rtl8169_private *tp = netdev_priv(dev);
5166 void __iomem *ioaddr = tp->mmio_addr;
5168 RTL_W8(Cfg9346, Cfg9346_Unlock);
5170 RTL_W8(MaxTxPacketSize, TxPacketMax);
5172 rtl_set_rx_max_size(ioaddr, rx_buf_sz);
5174 tp->cp_cmd |= RTL_R16(CPlusCmd) | PktCntrDisable | INTT_1;
5176 RTL_W16(CPlusCmd, tp->cp_cmd);
5178 RTL_W16(IntrMitigate, 0x5151);
5180 /* Work around for RxFIFO overflow. */
5181 if (tp->mac_version == RTL_GIGA_MAC_VER_11) {
5182 tp->event_slow |= RxFIFOOver | PCSTimeout;
5183 tp->event_slow &= ~RxOverflow;
5186 rtl_set_rx_tx_desc_registers(tp, ioaddr);
5188 rtl_set_rx_mode(dev);
5190 RTL_W32(TxConfig, (TX_DMA_BURST << TxDMAShift) |
5191 (InterFrameGap << TxInterFrameGapShift));
5195 switch (tp->mac_version) {
5196 case RTL_GIGA_MAC_VER_11:
5197 rtl_hw_start_8168bb(tp);
5200 case RTL_GIGA_MAC_VER_12:
5201 case RTL_GIGA_MAC_VER_17:
5202 rtl_hw_start_8168bef(tp);
5205 case RTL_GIGA_MAC_VER_18:
5206 rtl_hw_start_8168cp_1(tp);
5209 case RTL_GIGA_MAC_VER_19:
5210 rtl_hw_start_8168c_1(tp);
5213 case RTL_GIGA_MAC_VER_20:
5214 rtl_hw_start_8168c_2(tp);
5217 case RTL_GIGA_MAC_VER_21:
5218 rtl_hw_start_8168c_3(tp);
5221 case RTL_GIGA_MAC_VER_22:
5222 rtl_hw_start_8168c_4(tp);
5225 case RTL_GIGA_MAC_VER_23:
5226 rtl_hw_start_8168cp_2(tp);
5229 case RTL_GIGA_MAC_VER_24:
5230 rtl_hw_start_8168cp_3(tp);
5233 case RTL_GIGA_MAC_VER_25:
5234 case RTL_GIGA_MAC_VER_26:
5235 case RTL_GIGA_MAC_VER_27:
5236 rtl_hw_start_8168d(tp);
5239 case RTL_GIGA_MAC_VER_28:
5240 rtl_hw_start_8168d_4(tp);
5243 case RTL_GIGA_MAC_VER_31:
5244 rtl_hw_start_8168dp(tp);
5247 case RTL_GIGA_MAC_VER_32:
5248 case RTL_GIGA_MAC_VER_33:
5249 rtl_hw_start_8168e_1(tp);
5251 case RTL_GIGA_MAC_VER_34:
5252 rtl_hw_start_8168e_2(tp);
5255 case RTL_GIGA_MAC_VER_35:
5256 case RTL_GIGA_MAC_VER_36:
5257 rtl_hw_start_8168f_1(tp);
5260 case RTL_GIGA_MAC_VER_38:
5261 rtl_hw_start_8411(tp);
5264 case RTL_GIGA_MAC_VER_40:
5265 case RTL_GIGA_MAC_VER_41:
5266 rtl_hw_start_8168g_1(tp);
5270 printk(KERN_ERR PFX "%s: unknown chipset (mac_version = %d).\n",
5271 dev->name, tp->mac_version);
5275 RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb);
5277 RTL_W8(Cfg9346, Cfg9346_Lock);
5279 RTL_W16(MultiIntr, RTL_R16(MultiIntr) & 0xF000);
5282 #define R810X_CPCMD_QUIRK_MASK (\
5293 static void rtl_hw_start_8102e_1(struct rtl8169_private *tp)
5295 void __iomem *ioaddr = tp->mmio_addr;
5296 struct pci_dev *pdev = tp->pci_dev;
5297 static const struct ephy_info e_info_8102e_1[] = {
5298 { 0x01, 0, 0x6e65 },
5299 { 0x02, 0, 0x091f },
5300 { 0x03, 0, 0xc2f9 },
5301 { 0x06, 0, 0xafb5 },
5302 { 0x07, 0, 0x0e00 },
5303 { 0x19, 0, 0xec80 },
5304 { 0x01, 0, 0x2e65 },
5309 rtl_csi_access_enable_2(tp);
5311 RTL_W8(DBG_REG, FIX_NAK_1);
5313 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
5316 LEDS1 | LEDS0 | Speed_down | MEMMAP | IOMAP | VPD | PMEnable);
5317 RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en);
5319 cfg1 = RTL_R8(Config1);
5320 if ((cfg1 & LEDS0) && (cfg1 & LEDS1))
5321 RTL_W8(Config1, cfg1 & ~LEDS0);
5323 rtl_ephy_init(tp, e_info_8102e_1, ARRAY_SIZE(e_info_8102e_1));
5326 static void rtl_hw_start_8102e_2(struct rtl8169_private *tp)
5328 void __iomem *ioaddr = tp->mmio_addr;
5329 struct pci_dev *pdev = tp->pci_dev;
5331 rtl_csi_access_enable_2(tp);
5333 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
5335 RTL_W8(Config1, MEMMAP | IOMAP | VPD | PMEnable);
5336 RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en);
5339 static void rtl_hw_start_8102e_3(struct rtl8169_private *tp)
5341 rtl_hw_start_8102e_2(tp);
5343 rtl_ephy_write(tp, 0x03, 0xc2f9);
5346 static void rtl_hw_start_8105e_1(struct rtl8169_private *tp)
5348 void __iomem *ioaddr = tp->mmio_addr;
5349 static const struct ephy_info e_info_8105e_1[] = {
5350 { 0x07, 0, 0x4000 },
5351 { 0x19, 0, 0x0200 },
5352 { 0x19, 0, 0x0020 },
5353 { 0x1e, 0, 0x2000 },
5354 { 0x03, 0, 0x0001 },
5355 { 0x19, 0, 0x0100 },
5356 { 0x19, 0, 0x0004 },
5360 /* Force LAN exit from ASPM if Rx/Tx are not idle */
5361 RTL_W32(FuncEvent, RTL_R32(FuncEvent) | 0x002800);
5363 /* Disable Early Tally Counter */
5364 RTL_W32(FuncEvent, RTL_R32(FuncEvent) & ~0x010000);
5366 RTL_W8(MCU, RTL_R8(MCU) | EN_NDP | EN_OOB_RESET);
5367 RTL_W8(DLLPR, RTL_R8(DLLPR) | PFM_EN);
5368 RTL_W8(Config5, RTL_R8(Config5) | ASPM_en);
5369 RTL_W8(Config2, RTL_R8(Config2) | ClkReqEn);
5370 RTL_W32(MISC, RTL_R32(MISC) | FORCE_CLK);
5372 rtl_ephy_init(tp, e_info_8105e_1, ARRAY_SIZE(e_info_8105e_1));
5375 static void rtl_hw_start_8105e_2(struct rtl8169_private *tp)
5377 rtl_hw_start_8105e_1(tp);
5378 rtl_ephy_write(tp, 0x1e, rtl_ephy_read(tp, 0x1e) | 0x8000);
5381 static void rtl_hw_start_8402(struct rtl8169_private *tp)
5383 void __iomem *ioaddr = tp->mmio_addr;
5384 static const struct ephy_info e_info_8402[] = {
5385 { 0x19, 0xffff, 0xff64 },
5389 rtl_csi_access_enable_2(tp);
5391 /* Force LAN exit from ASPM if Rx/Tx are not idle */
5392 RTL_W32(FuncEvent, RTL_R32(FuncEvent) | 0x002800);
5394 RTL_W32(TxConfig, RTL_R32(TxConfig) | TXCFG_AUTO_FIFO);
5395 RTL_W8(MCU, RTL_R8(MCU) & ~NOW_IS_OOB);
5396 RTL_W8(Config5, RTL_R8(Config5) | ASPM_en);
5397 RTL_W8(Config2, RTL_R8(Config2) | ClkReqEn);
5398 RTL_W32(MISC, RTL_R32(MISC) | FORCE_CLK);
5400 rtl_ephy_init(tp, e_info_8402, ARRAY_SIZE(e_info_8402));
5402 rtl_tx_performance_tweak(tp->pci_dev, 0x5 << MAX_READ_REQUEST_SHIFT);
5404 rtl_eri_write(tp, 0xc8, ERIAR_MASK_1111, 0x00000002, ERIAR_EXGMAC);
5405 rtl_eri_write(tp, 0xe8, ERIAR_MASK_1111, 0x00000006, ERIAR_EXGMAC);
5406 rtl_w1w0_eri(tp, 0xdc, ERIAR_MASK_0001, 0x00, 0x01, ERIAR_EXGMAC);
5407 rtl_w1w0_eri(tp, 0xdc, ERIAR_MASK_0001, 0x01, 0x00, ERIAR_EXGMAC);
5408 rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
5409 rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
5410 rtl_w1w0_eri(tp, 0x0d4, ERIAR_MASK_0011, 0x0e00, 0xff00, ERIAR_EXGMAC);
5413 static void rtl_hw_start_8106(struct rtl8169_private *tp)
5415 void __iomem *ioaddr = tp->mmio_addr;
5417 /* Force LAN exit from ASPM if Rx/Tx are not idle */
5418 RTL_W32(FuncEvent, RTL_R32(FuncEvent) | 0x002800);
5421 (RTL_R32(MISC) | DISABLE_LAN_EN | FORCE_CLK) & ~EARLY_TALLY_EN);
5422 RTL_W8(Config5, RTL_R8(Config5) | ASPM_en);
5423 RTL_W8(Config2, RTL_R8(Config2) | ClkReqEn);
5424 RTL_W8(MCU, RTL_R8(MCU) | EN_NDP | EN_OOB_RESET);
5425 RTL_W8(DLLPR, RTL_R8(DLLPR) & ~PFM_EN);
5428 static void rtl_hw_start_8101(struct net_device *dev)
5430 struct rtl8169_private *tp = netdev_priv(dev);
5431 void __iomem *ioaddr = tp->mmio_addr;
5432 struct pci_dev *pdev = tp->pci_dev;
5434 if (tp->mac_version >= RTL_GIGA_MAC_VER_30)
5435 tp->event_slow &= ~RxFIFOOver;
5437 if (tp->mac_version == RTL_GIGA_MAC_VER_13 ||
5438 tp->mac_version == RTL_GIGA_MAC_VER_16)
5439 pcie_capability_set_word(pdev, PCI_EXP_DEVCTL,
5440 PCI_EXP_DEVCTL_NOSNOOP_EN);
5442 RTL_W8(Cfg9346, Cfg9346_Unlock);
5444 switch (tp->mac_version) {
5445 case RTL_GIGA_MAC_VER_07:
5446 rtl_hw_start_8102e_1(tp);
5449 case RTL_GIGA_MAC_VER_08:
5450 rtl_hw_start_8102e_3(tp);
5453 case RTL_GIGA_MAC_VER_09:
5454 rtl_hw_start_8102e_2(tp);
5457 case RTL_GIGA_MAC_VER_29:
5458 rtl_hw_start_8105e_1(tp);
5460 case RTL_GIGA_MAC_VER_30:
5461 rtl_hw_start_8105e_2(tp);
5464 case RTL_GIGA_MAC_VER_37:
5465 rtl_hw_start_8402(tp);
5468 case RTL_GIGA_MAC_VER_39:
5469 rtl_hw_start_8106(tp);
5473 RTL_W8(Cfg9346, Cfg9346_Lock);
5475 RTL_W8(MaxTxPacketSize, TxPacketMax);
5477 rtl_set_rx_max_size(ioaddr, rx_buf_sz);
5479 tp->cp_cmd &= ~R810X_CPCMD_QUIRK_MASK;
5480 RTL_W16(CPlusCmd, tp->cp_cmd);
5482 RTL_W16(IntrMitigate, 0x0000);
5484 rtl_set_rx_tx_desc_registers(tp, ioaddr);
5486 RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb);
5487 rtl_set_rx_tx_config_registers(tp);
5491 rtl_set_rx_mode(dev);
5493 RTL_W16(MultiIntr, RTL_R16(MultiIntr) & 0xf000);
5496 static int rtl8169_change_mtu(struct net_device *dev, int new_mtu)
5498 struct rtl8169_private *tp = netdev_priv(dev);
5500 if (new_mtu < ETH_ZLEN ||
5501 new_mtu > rtl_chip_infos[tp->mac_version].jumbo_max)
5504 if (new_mtu > ETH_DATA_LEN)
5505 rtl_hw_jumbo_enable(tp);
5507 rtl_hw_jumbo_disable(tp);
5510 netdev_update_features(dev);
5515 static inline void rtl8169_make_unusable_by_asic(struct RxDesc *desc)
5517 desc->addr = cpu_to_le64(0x0badbadbadbadbadull);
5518 desc->opts1 &= ~cpu_to_le32(DescOwn | RsvdMask);
5521 static void rtl8169_free_rx_databuff(struct rtl8169_private *tp,
5522 void **data_buff, struct RxDesc *desc)
5524 dma_unmap_single(&tp->pci_dev->dev, le64_to_cpu(desc->addr), rx_buf_sz,
5529 rtl8169_make_unusable_by_asic(desc);
5532 static inline void rtl8169_mark_to_asic(struct RxDesc *desc, u32 rx_buf_sz)
5534 u32 eor = le32_to_cpu(desc->opts1) & RingEnd;
5536 desc->opts1 = cpu_to_le32(DescOwn | eor | rx_buf_sz);
5539 static inline void rtl8169_map_to_asic(struct RxDesc *desc, dma_addr_t mapping,
5542 desc->addr = cpu_to_le64(mapping);
5544 rtl8169_mark_to_asic(desc, rx_buf_sz);
5547 static inline void *rtl8169_align(void *data)
5549 return (void *)ALIGN((long)data, 16);
5552 static struct sk_buff *rtl8169_alloc_rx_data(struct rtl8169_private *tp,
5553 struct RxDesc *desc)
5557 struct device *d = &tp->pci_dev->dev;
5558 struct net_device *dev = tp->dev;
5559 int node = dev->dev.parent ? dev_to_node(dev->dev.parent) : -1;
5561 data = kmalloc_node(rx_buf_sz, GFP_KERNEL, node);
5565 if (rtl8169_align(data) != data) {
5567 data = kmalloc_node(rx_buf_sz + 15, GFP_KERNEL, node);
5572 mapping = dma_map_single(d, rtl8169_align(data), rx_buf_sz,
5574 if (unlikely(dma_mapping_error(d, mapping))) {
5575 if (net_ratelimit())
5576 netif_err(tp, drv, tp->dev, "Failed to map RX DMA!\n");
5580 rtl8169_map_to_asic(desc, mapping, rx_buf_sz);
5588 static void rtl8169_rx_clear(struct rtl8169_private *tp)
5592 for (i = 0; i < NUM_RX_DESC; i++) {
5593 if (tp->Rx_databuff[i]) {
5594 rtl8169_free_rx_databuff(tp, tp->Rx_databuff + i,
5595 tp->RxDescArray + i);
5600 static inline void rtl8169_mark_as_last_descriptor(struct RxDesc *desc)
5602 desc->opts1 |= cpu_to_le32(RingEnd);
5605 static int rtl8169_rx_fill(struct rtl8169_private *tp)
5609 for (i = 0; i < NUM_RX_DESC; i++) {
5612 if (tp->Rx_databuff[i])
5615 data = rtl8169_alloc_rx_data(tp, tp->RxDescArray + i);
5617 rtl8169_make_unusable_by_asic(tp->RxDescArray + i);
5620 tp->Rx_databuff[i] = data;
5623 rtl8169_mark_as_last_descriptor(tp->RxDescArray + NUM_RX_DESC - 1);
5627 rtl8169_rx_clear(tp);
5631 static int rtl8169_init_ring(struct net_device *dev)
5633 struct rtl8169_private *tp = netdev_priv(dev);
5635 rtl8169_init_ring_indexes(tp);
5637 memset(tp->tx_skb, 0x0, NUM_TX_DESC * sizeof(struct ring_info));
5638 memset(tp->Rx_databuff, 0x0, NUM_RX_DESC * sizeof(void *));
5640 return rtl8169_rx_fill(tp);
5643 static void rtl8169_unmap_tx_skb(struct device *d, struct ring_info *tx_skb,
5644 struct TxDesc *desc)
5646 unsigned int len = tx_skb->len;
5648 dma_unmap_single(d, le64_to_cpu(desc->addr), len, DMA_TO_DEVICE);
5656 static void rtl8169_tx_clear_range(struct rtl8169_private *tp, u32 start,
5661 for (i = 0; i < n; i++) {
5662 unsigned int entry = (start + i) % NUM_TX_DESC;
5663 struct ring_info *tx_skb = tp->tx_skb + entry;
5664 unsigned int len = tx_skb->len;
5667 struct sk_buff *skb = tx_skb->skb;
5669 rtl8169_unmap_tx_skb(&tp->pci_dev->dev, tx_skb,
5670 tp->TxDescArray + entry);
5672 tp->dev->stats.tx_dropped++;
5680 static void rtl8169_tx_clear(struct rtl8169_private *tp)
5682 rtl8169_tx_clear_range(tp, tp->dirty_tx, NUM_TX_DESC);
5683 tp->cur_tx = tp->dirty_tx = 0;
5686 static void rtl_reset_work(struct rtl8169_private *tp)
5688 struct net_device *dev = tp->dev;
5691 napi_disable(&tp->napi);
5692 netif_stop_queue(dev);
5693 synchronize_sched();
5695 rtl8169_hw_reset(tp);
5697 for (i = 0; i < NUM_RX_DESC; i++)
5698 rtl8169_mark_to_asic(tp->RxDescArray + i, rx_buf_sz);
5700 rtl8169_tx_clear(tp);
5701 rtl8169_init_ring_indexes(tp);
5703 napi_enable(&tp->napi);
5705 netif_wake_queue(dev);
5706 rtl8169_check_link_status(dev, tp, tp->mmio_addr);
5709 static void rtl8169_tx_timeout(struct net_device *dev)
5711 struct rtl8169_private *tp = netdev_priv(dev);
5713 rtl_schedule_task(tp, RTL_FLAG_TASK_RESET_PENDING);
5716 static int rtl8169_xmit_frags(struct rtl8169_private *tp, struct sk_buff *skb,
5719 struct skb_shared_info *info = skb_shinfo(skb);
5720 unsigned int cur_frag, entry;
5721 struct TxDesc * uninitialized_var(txd);
5722 struct device *d = &tp->pci_dev->dev;
5725 for (cur_frag = 0; cur_frag < info->nr_frags; cur_frag++) {
5726 const skb_frag_t *frag = info->frags + cur_frag;
5731 entry = (entry + 1) % NUM_TX_DESC;
5733 txd = tp->TxDescArray + entry;
5734 len = skb_frag_size(frag);
5735 addr = skb_frag_address(frag);
5736 mapping = dma_map_single(d, addr, len, DMA_TO_DEVICE);
5737 if (unlikely(dma_mapping_error(d, mapping))) {
5738 if (net_ratelimit())
5739 netif_err(tp, drv, tp->dev,
5740 "Failed to map TX fragments DMA!\n");
5744 /* Anti gcc 2.95.3 bugware (sic) */
5745 status = opts[0] | len |
5746 (RingEnd * !((entry + 1) % NUM_TX_DESC));
5748 txd->opts1 = cpu_to_le32(status);
5749 txd->opts2 = cpu_to_le32(opts[1]);
5750 txd->addr = cpu_to_le64(mapping);
5752 tp->tx_skb[entry].len = len;
5756 tp->tx_skb[entry].skb = skb;
5757 txd->opts1 |= cpu_to_le32(LastFrag);
5763 rtl8169_tx_clear_range(tp, tp->cur_tx + 1, cur_frag);
5767 static inline void rtl8169_tso_csum(struct rtl8169_private *tp,
5768 struct sk_buff *skb, u32 *opts)
5770 const struct rtl_tx_desc_info *info = tx_desc_info + tp->txd_version;
5771 u32 mss = skb_shinfo(skb)->gso_size;
5772 int offset = info->opts_offset;
5776 opts[offset] |= min(mss, TD_MSS_MAX) << info->mss_shift;
5777 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
5778 const struct iphdr *ip = ip_hdr(skb);
5780 if (ip->protocol == IPPROTO_TCP)
5781 opts[offset] |= info->checksum.tcp;
5782 else if (ip->protocol == IPPROTO_UDP)
5783 opts[offset] |= info->checksum.udp;
5789 static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
5790 struct net_device *dev)
5792 struct rtl8169_private *tp = netdev_priv(dev);
5793 unsigned int entry = tp->cur_tx % NUM_TX_DESC;
5794 struct TxDesc *txd = tp->TxDescArray + entry;
5795 void __iomem *ioaddr = tp->mmio_addr;
5796 struct device *d = &tp->pci_dev->dev;
5802 if (unlikely(!TX_FRAGS_READY_FOR(tp, skb_shinfo(skb)->nr_frags))) {
5803 netif_err(tp, drv, dev, "BUG! Tx Ring full when queue awake!\n");
5807 if (unlikely(le32_to_cpu(txd->opts1) & DescOwn))
5810 len = skb_headlen(skb);
5811 mapping = dma_map_single(d, skb->data, len, DMA_TO_DEVICE);
5812 if (unlikely(dma_mapping_error(d, mapping))) {
5813 if (net_ratelimit())
5814 netif_err(tp, drv, dev, "Failed to map TX DMA!\n");
5818 tp->tx_skb[entry].len = len;
5819 txd->addr = cpu_to_le64(mapping);
5821 opts[1] = cpu_to_le32(rtl8169_tx_vlan_tag(skb));
5824 rtl8169_tso_csum(tp, skb, opts);
5826 frags = rtl8169_xmit_frags(tp, skb, opts);
5830 opts[0] |= FirstFrag;
5832 opts[0] |= FirstFrag | LastFrag;
5833 tp->tx_skb[entry].skb = skb;
5836 txd->opts2 = cpu_to_le32(opts[1]);
5838 skb_tx_timestamp(skb);
5842 /* Anti gcc 2.95.3 bugware (sic) */
5843 status = opts[0] | len | (RingEnd * !((entry + 1) % NUM_TX_DESC));
5844 txd->opts1 = cpu_to_le32(status);
5846 tp->cur_tx += frags + 1;
5850 RTL_W8(TxPoll, NPQ);
5854 if (!TX_FRAGS_READY_FOR(tp, MAX_SKB_FRAGS)) {
5855 /* Avoid wrongly optimistic queue wake-up: rtl_tx thread must
5856 * not miss a ring update when it notices a stopped queue.
5859 netif_stop_queue(dev);
5860 /* Sync with rtl_tx:
5861 * - publish queue status and cur_tx ring index (write barrier)
5862 * - refresh dirty_tx ring index (read barrier).
5863 * May the current thread have a pessimistic view of the ring
5864 * status and forget to wake up queue, a racing rtl_tx thread
5868 if (TX_FRAGS_READY_FOR(tp, MAX_SKB_FRAGS))
5869 netif_wake_queue(dev);
5872 return NETDEV_TX_OK;
5875 rtl8169_unmap_tx_skb(d, tp->tx_skb + entry, txd);
5878 dev->stats.tx_dropped++;
5879 return NETDEV_TX_OK;
5882 netif_stop_queue(dev);
5883 dev->stats.tx_dropped++;
5884 return NETDEV_TX_BUSY;
5887 static void rtl8169_pcierr_interrupt(struct net_device *dev)
5889 struct rtl8169_private *tp = netdev_priv(dev);
5890 struct pci_dev *pdev = tp->pci_dev;
5891 u16 pci_status, pci_cmd;
5893 pci_read_config_word(pdev, PCI_COMMAND, &pci_cmd);
5894 pci_read_config_word(pdev, PCI_STATUS, &pci_status);
5896 netif_err(tp, intr, dev, "PCI error (cmd = 0x%04x, status = 0x%04x)\n",
5897 pci_cmd, pci_status);
5900 * The recovery sequence below admits a very elaborated explanation:
5901 * - it seems to work;
5902 * - I did not see what else could be done;
5903 * - it makes iop3xx happy.
5905 * Feel free to adjust to your needs.
5907 if (pdev->broken_parity_status)
5908 pci_cmd &= ~PCI_COMMAND_PARITY;
5910 pci_cmd |= PCI_COMMAND_SERR | PCI_COMMAND_PARITY;
5912 pci_write_config_word(pdev, PCI_COMMAND, pci_cmd);
5914 pci_write_config_word(pdev, PCI_STATUS,
5915 pci_status & (PCI_STATUS_DETECTED_PARITY |
5916 PCI_STATUS_SIG_SYSTEM_ERROR | PCI_STATUS_REC_MASTER_ABORT |
5917 PCI_STATUS_REC_TARGET_ABORT | PCI_STATUS_SIG_TARGET_ABORT));
5919 /* The infamous DAC f*ckup only happens at boot time */
5920 if ((tp->cp_cmd & PCIDAC) && !tp->cur_rx) {
5921 void __iomem *ioaddr = tp->mmio_addr;
5923 netif_info(tp, intr, dev, "disabling PCI DAC\n");
5924 tp->cp_cmd &= ~PCIDAC;
5925 RTL_W16(CPlusCmd, tp->cp_cmd);
5926 dev->features &= ~NETIF_F_HIGHDMA;
5929 rtl8169_hw_reset(tp);
5931 rtl_schedule_task(tp, RTL_FLAG_TASK_RESET_PENDING);
5934 static void rtl_tx(struct net_device *dev, struct rtl8169_private *tp)
5936 unsigned int dirty_tx, tx_left;
5938 dirty_tx = tp->dirty_tx;
5940 tx_left = tp->cur_tx - dirty_tx;
5942 while (tx_left > 0) {
5943 unsigned int entry = dirty_tx % NUM_TX_DESC;
5944 struct ring_info *tx_skb = tp->tx_skb + entry;
5948 status = le32_to_cpu(tp->TxDescArray[entry].opts1);
5949 if (status & DescOwn)
5952 rtl8169_unmap_tx_skb(&tp->pci_dev->dev, tx_skb,
5953 tp->TxDescArray + entry);
5954 if (status & LastFrag) {
5955 u64_stats_update_begin(&tp->tx_stats.syncp);
5956 tp->tx_stats.packets++;
5957 tp->tx_stats.bytes += tx_skb->skb->len;
5958 u64_stats_update_end(&tp->tx_stats.syncp);
5959 dev_kfree_skb(tx_skb->skb);
5966 if (tp->dirty_tx != dirty_tx) {
5967 tp->dirty_tx = dirty_tx;
5968 /* Sync with rtl8169_start_xmit:
5969 * - publish dirty_tx ring index (write barrier)
5970 * - refresh cur_tx ring index and queue status (read barrier)
5971 * May the current thread miss the stopped queue condition,
5972 * a racing xmit thread can only have a right view of the
5976 if (netif_queue_stopped(dev) &&
5977 TX_FRAGS_READY_FOR(tp, MAX_SKB_FRAGS)) {
5978 netif_wake_queue(dev);
5981 * 8168 hack: TxPoll requests are lost when the Tx packets are
5982 * too close. Let's kick an extra TxPoll request when a burst
5983 * of start_xmit activity is detected (if it is not detected,
5984 * it is slow enough). -- FR
5986 if (tp->cur_tx != dirty_tx) {
5987 void __iomem *ioaddr = tp->mmio_addr;
5989 RTL_W8(TxPoll, NPQ);
5994 static inline int rtl8169_fragmented_frame(u32 status)
5996 return (status & (FirstFrag | LastFrag)) != (FirstFrag | LastFrag);
5999 static inline void rtl8169_rx_csum(struct sk_buff *skb, u32 opts1)
6001 u32 status = opts1 & RxProtoMask;
6003 if (((status == RxProtoTCP) && !(opts1 & TCPFail)) ||
6004 ((status == RxProtoUDP) && !(opts1 & UDPFail)))
6005 skb->ip_summed = CHECKSUM_UNNECESSARY;
6007 skb_checksum_none_assert(skb);
6010 static struct sk_buff *rtl8169_try_rx_copy(void *data,
6011 struct rtl8169_private *tp,
6015 struct sk_buff *skb;
6016 struct device *d = &tp->pci_dev->dev;
6018 data = rtl8169_align(data);
6019 dma_sync_single_for_cpu(d, addr, pkt_size, DMA_FROM_DEVICE);
6021 skb = netdev_alloc_skb_ip_align(tp->dev, pkt_size);
6023 memcpy(skb->data, data, pkt_size);
6024 dma_sync_single_for_device(d, addr, pkt_size, DMA_FROM_DEVICE);
6029 static int rtl_rx(struct net_device *dev, struct rtl8169_private *tp, u32 budget)
6031 unsigned int cur_rx, rx_left;
6034 cur_rx = tp->cur_rx;
6036 for (rx_left = min(budget, NUM_RX_DESC); rx_left > 0; rx_left--, cur_rx++) {
6037 unsigned int entry = cur_rx % NUM_RX_DESC;
6038 struct RxDesc *desc = tp->RxDescArray + entry;
6042 status = le32_to_cpu(desc->opts1) & tp->opts1_mask;
6044 if (status & DescOwn)
6046 if (unlikely(status & RxRES)) {
6047 netif_info(tp, rx_err, dev, "Rx ERROR. status = %08x\n",
6049 dev->stats.rx_errors++;
6050 if (status & (RxRWT | RxRUNT))
6051 dev->stats.rx_length_errors++;
6053 dev->stats.rx_crc_errors++;
6054 if (status & RxFOVF) {
6055 rtl_schedule_task(tp, RTL_FLAG_TASK_RESET_PENDING);
6056 dev->stats.rx_fifo_errors++;
6058 if ((status & (RxRUNT | RxCRC)) &&
6059 !(status & (RxRWT | RxFOVF)) &&
6060 (dev->features & NETIF_F_RXALL))
6063 struct sk_buff *skb;
6068 addr = le64_to_cpu(desc->addr);
6069 if (likely(!(dev->features & NETIF_F_RXFCS)))
6070 pkt_size = (status & 0x00003fff) - 4;
6072 pkt_size = status & 0x00003fff;
6075 * The driver does not support incoming fragmented
6076 * frames. They are seen as a symptom of over-mtu
6079 if (unlikely(rtl8169_fragmented_frame(status))) {
6080 dev->stats.rx_dropped++;
6081 dev->stats.rx_length_errors++;
6082 goto release_descriptor;
6085 skb = rtl8169_try_rx_copy(tp->Rx_databuff[entry],
6086 tp, pkt_size, addr);
6088 dev->stats.rx_dropped++;
6089 goto release_descriptor;
6092 rtl8169_rx_csum(skb, status);
6093 skb_put(skb, pkt_size);
6094 skb->protocol = eth_type_trans(skb, dev);
6096 rtl8169_rx_vlan_tag(desc, skb);
6098 napi_gro_receive(&tp->napi, skb);
6100 u64_stats_update_begin(&tp->rx_stats.syncp);
6101 tp->rx_stats.packets++;
6102 tp->rx_stats.bytes += pkt_size;
6103 u64_stats_update_end(&tp->rx_stats.syncp);
6108 rtl8169_mark_to_asic(desc, rx_buf_sz);
6111 count = cur_rx - tp->cur_rx;
6112 tp->cur_rx = cur_rx;
6117 static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance)
6119 struct net_device *dev = dev_instance;
6120 struct rtl8169_private *tp = netdev_priv(dev);
6124 status = rtl_get_events(tp);
6125 if (status && status != 0xffff) {
6126 status &= RTL_EVENT_NAPI | tp->event_slow;
6130 rtl_irq_disable(tp);
6131 napi_schedule(&tp->napi);
6134 return IRQ_RETVAL(handled);
6138 * Workqueue context.
6140 static void rtl_slow_event_work(struct rtl8169_private *tp)
6142 struct net_device *dev = tp->dev;
6145 status = rtl_get_events(tp) & tp->event_slow;
6146 rtl_ack_events(tp, status);
6148 if (unlikely(status & RxFIFOOver)) {
6149 switch (tp->mac_version) {
6150 /* Work around for rx fifo overflow */
6151 case RTL_GIGA_MAC_VER_11:
6152 netif_stop_queue(dev);
6153 /* XXX - Hack alert. See rtl_task(). */
6154 set_bit(RTL_FLAG_TASK_RESET_PENDING, tp->wk.flags);
6160 if (unlikely(status & SYSErr))
6161 rtl8169_pcierr_interrupt(dev);
6163 if (status & LinkChg)
6164 __rtl8169_check_link_status(dev, tp, tp->mmio_addr, true);
6166 rtl_irq_enable_all(tp);
6169 static void rtl_task(struct work_struct *work)
6171 static const struct {
6173 void (*action)(struct rtl8169_private *);
6175 /* XXX - keep rtl_slow_event_work() as first element. */
6176 { RTL_FLAG_TASK_SLOW_PENDING, rtl_slow_event_work },
6177 { RTL_FLAG_TASK_RESET_PENDING, rtl_reset_work },
6178 { RTL_FLAG_TASK_PHY_PENDING, rtl_phy_work }
6180 struct rtl8169_private *tp =
6181 container_of(work, struct rtl8169_private, wk.work);
6182 struct net_device *dev = tp->dev;
6187 if (!netif_running(dev) ||
6188 !test_bit(RTL_FLAG_TASK_ENABLED, tp->wk.flags))
6191 for (i = 0; i < ARRAY_SIZE(rtl_work); i++) {
6194 pending = test_and_clear_bit(rtl_work[i].bitnr, tp->wk.flags);
6196 rtl_work[i].action(tp);
6200 rtl_unlock_work(tp);
6203 static int rtl8169_poll(struct napi_struct *napi, int budget)
6205 struct rtl8169_private *tp = container_of(napi, struct rtl8169_private, napi);
6206 struct net_device *dev = tp->dev;
6207 u16 enable_mask = RTL_EVENT_NAPI | tp->event_slow;
6211 status = rtl_get_events(tp);
6212 rtl_ack_events(tp, status & ~tp->event_slow);
6214 if (status & RTL_EVENT_NAPI_RX)
6215 work_done = rtl_rx(dev, tp, (u32) budget);
6217 if (status & RTL_EVENT_NAPI_TX)
6220 if (status & tp->event_slow) {
6221 enable_mask &= ~tp->event_slow;
6223 rtl_schedule_task(tp, RTL_FLAG_TASK_SLOW_PENDING);
6226 if (work_done < budget) {
6227 napi_complete(napi);
6229 rtl_irq_enable(tp, enable_mask);
6236 static void rtl8169_rx_missed(struct net_device *dev, void __iomem *ioaddr)
6238 struct rtl8169_private *tp = netdev_priv(dev);
6240 if (tp->mac_version > RTL_GIGA_MAC_VER_06)
6243 dev->stats.rx_missed_errors += (RTL_R32(RxMissed) & 0xffffff);
6244 RTL_W32(RxMissed, 0);
6247 static void rtl8169_down(struct net_device *dev)
6249 struct rtl8169_private *tp = netdev_priv(dev);
6250 void __iomem *ioaddr = tp->mmio_addr;
6252 del_timer_sync(&tp->timer);
6254 napi_disable(&tp->napi);
6255 netif_stop_queue(dev);
6257 rtl8169_hw_reset(tp);
6259 * At this point device interrupts can not be enabled in any function,
6260 * as netif_running is not true (rtl8169_interrupt, rtl8169_reset_task)
6261 * and napi is disabled (rtl8169_poll).
6263 rtl8169_rx_missed(dev, ioaddr);
6265 /* Give a racing hard_start_xmit a few cycles to complete. */
6266 synchronize_sched();
6268 rtl8169_tx_clear(tp);
6270 rtl8169_rx_clear(tp);
6272 rtl_pll_power_down(tp);
6275 static int rtl8169_close(struct net_device *dev)
6277 struct rtl8169_private *tp = netdev_priv(dev);
6278 struct pci_dev *pdev = tp->pci_dev;
6280 pm_runtime_get_sync(&pdev->dev);
6282 /* Update counters before going down */
6283 rtl8169_update_counters(dev);
6286 clear_bit(RTL_FLAG_TASK_ENABLED, tp->wk.flags);
6289 rtl_unlock_work(tp);
6291 free_irq(pdev->irq, dev);
6293 dma_free_coherent(&pdev->dev, R8169_RX_RING_BYTES, tp->RxDescArray,
6295 dma_free_coherent(&pdev->dev, R8169_TX_RING_BYTES, tp->TxDescArray,
6297 tp->TxDescArray = NULL;
6298 tp->RxDescArray = NULL;
6300 pm_runtime_put_sync(&pdev->dev);
6305 #ifdef CONFIG_NET_POLL_CONTROLLER
6306 static void rtl8169_netpoll(struct net_device *dev)
6308 struct rtl8169_private *tp = netdev_priv(dev);
6310 rtl8169_interrupt(tp->pci_dev->irq, dev);
6314 static int rtl_open(struct net_device *dev)
6316 struct rtl8169_private *tp = netdev_priv(dev);
6317 void __iomem *ioaddr = tp->mmio_addr;
6318 struct pci_dev *pdev = tp->pci_dev;
6319 int retval = -ENOMEM;
6321 pm_runtime_get_sync(&pdev->dev);
6324 * Rx and Tx descriptors needs 256 bytes alignment.
6325 * dma_alloc_coherent provides more.
6327 tp->TxDescArray = dma_alloc_coherent(&pdev->dev, R8169_TX_RING_BYTES,
6328 &tp->TxPhyAddr, GFP_KERNEL);
6329 if (!tp->TxDescArray)
6330 goto err_pm_runtime_put;
6332 tp->RxDescArray = dma_alloc_coherent(&pdev->dev, R8169_RX_RING_BYTES,
6333 &tp->RxPhyAddr, GFP_KERNEL);
6334 if (!tp->RxDescArray)
6337 retval = rtl8169_init_ring(dev);
6341 INIT_WORK(&tp->wk.work, rtl_task);
6345 rtl_request_firmware(tp);
6347 retval = request_irq(pdev->irq, rtl8169_interrupt,
6348 (tp->features & RTL_FEATURE_MSI) ? 0 : IRQF_SHARED,
6351 goto err_release_fw_2;
6355 set_bit(RTL_FLAG_TASK_ENABLED, tp->wk.flags);
6357 napi_enable(&tp->napi);
6359 rtl8169_init_phy(dev, tp);
6361 __rtl8169_set_features(dev, dev->features);
6363 rtl_pll_power_up(tp);
6367 netif_start_queue(dev);
6369 rtl_unlock_work(tp);
6371 tp->saved_wolopts = 0;
6372 pm_runtime_put_noidle(&pdev->dev);
6374 rtl8169_check_link_status(dev, tp, ioaddr);
6379 rtl_release_firmware(tp);
6380 rtl8169_rx_clear(tp);
6382 dma_free_coherent(&pdev->dev, R8169_RX_RING_BYTES, tp->RxDescArray,
6384 tp->RxDescArray = NULL;
6386 dma_free_coherent(&pdev->dev, R8169_TX_RING_BYTES, tp->TxDescArray,
6388 tp->TxDescArray = NULL;
6390 pm_runtime_put_noidle(&pdev->dev);
6394 static struct rtnl_link_stats64 *
6395 rtl8169_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
6397 struct rtl8169_private *tp = netdev_priv(dev);
6398 void __iomem *ioaddr = tp->mmio_addr;
6401 if (netif_running(dev))
6402 rtl8169_rx_missed(dev, ioaddr);
6405 start = u64_stats_fetch_begin_bh(&tp->rx_stats.syncp);
6406 stats->rx_packets = tp->rx_stats.packets;
6407 stats->rx_bytes = tp->rx_stats.bytes;
6408 } while (u64_stats_fetch_retry_bh(&tp->rx_stats.syncp, start));
6412 start = u64_stats_fetch_begin_bh(&tp->tx_stats.syncp);
6413 stats->tx_packets = tp->tx_stats.packets;
6414 stats->tx_bytes = tp->tx_stats.bytes;
6415 } while (u64_stats_fetch_retry_bh(&tp->tx_stats.syncp, start));
6417 stats->rx_dropped = dev->stats.rx_dropped;
6418 stats->tx_dropped = dev->stats.tx_dropped;
6419 stats->rx_length_errors = dev->stats.rx_length_errors;
6420 stats->rx_errors = dev->stats.rx_errors;
6421 stats->rx_crc_errors = dev->stats.rx_crc_errors;
6422 stats->rx_fifo_errors = dev->stats.rx_fifo_errors;
6423 stats->rx_missed_errors = dev->stats.rx_missed_errors;
6428 static void rtl8169_net_suspend(struct net_device *dev)
6430 struct rtl8169_private *tp = netdev_priv(dev);
6432 if (!netif_running(dev))
6435 netif_device_detach(dev);
6436 netif_stop_queue(dev);
6439 napi_disable(&tp->napi);
6440 clear_bit(RTL_FLAG_TASK_ENABLED, tp->wk.flags);
6441 rtl_unlock_work(tp);
6443 rtl_pll_power_down(tp);
6448 static int rtl8169_suspend(struct device *device)
6450 struct pci_dev *pdev = to_pci_dev(device);
6451 struct net_device *dev = pci_get_drvdata(pdev);
6453 rtl8169_net_suspend(dev);
6458 static void __rtl8169_resume(struct net_device *dev)
6460 struct rtl8169_private *tp = netdev_priv(dev);
6462 netif_device_attach(dev);
6464 rtl_pll_power_up(tp);
6467 napi_enable(&tp->napi);
6468 set_bit(RTL_FLAG_TASK_ENABLED, tp->wk.flags);
6469 rtl_unlock_work(tp);
6471 rtl_schedule_task(tp, RTL_FLAG_TASK_RESET_PENDING);
6474 static int rtl8169_resume(struct device *device)
6476 struct pci_dev *pdev = to_pci_dev(device);
6477 struct net_device *dev = pci_get_drvdata(pdev);
6478 struct rtl8169_private *tp = netdev_priv(dev);
6480 rtl8169_init_phy(dev, tp);
6482 if (netif_running(dev))
6483 __rtl8169_resume(dev);
6488 static int rtl8169_runtime_suspend(struct device *device)
6490 struct pci_dev *pdev = to_pci_dev(device);
6491 struct net_device *dev = pci_get_drvdata(pdev);
6492 struct rtl8169_private *tp = netdev_priv(dev);
6494 if (!tp->TxDescArray)
6498 tp->saved_wolopts = __rtl8169_get_wol(tp);
6499 __rtl8169_set_wol(tp, WAKE_ANY);
6500 rtl_unlock_work(tp);
6502 rtl8169_net_suspend(dev);
6507 static int rtl8169_runtime_resume(struct device *device)
6509 struct pci_dev *pdev = to_pci_dev(device);
6510 struct net_device *dev = pci_get_drvdata(pdev);
6511 struct rtl8169_private *tp = netdev_priv(dev);
6513 if (!tp->TxDescArray)
6517 __rtl8169_set_wol(tp, tp->saved_wolopts);
6518 tp->saved_wolopts = 0;
6519 rtl_unlock_work(tp);
6521 rtl8169_init_phy(dev, tp);
6523 __rtl8169_resume(dev);
6528 static int rtl8169_runtime_idle(struct device *device)
6530 struct pci_dev *pdev = to_pci_dev(device);
6531 struct net_device *dev = pci_get_drvdata(pdev);
6532 struct rtl8169_private *tp = netdev_priv(dev);
6534 return tp->TxDescArray ? -EBUSY : 0;
6537 static const struct dev_pm_ops rtl8169_pm_ops = {
6538 .suspend = rtl8169_suspend,
6539 .resume = rtl8169_resume,
6540 .freeze = rtl8169_suspend,
6541 .thaw = rtl8169_resume,
6542 .poweroff = rtl8169_suspend,
6543 .restore = rtl8169_resume,
6544 .runtime_suspend = rtl8169_runtime_suspend,
6545 .runtime_resume = rtl8169_runtime_resume,
6546 .runtime_idle = rtl8169_runtime_idle,
6549 #define RTL8169_PM_OPS (&rtl8169_pm_ops)
6551 #else /* !CONFIG_PM */
6553 #define RTL8169_PM_OPS NULL
6555 #endif /* !CONFIG_PM */
6557 static void rtl_wol_shutdown_quirk(struct rtl8169_private *tp)
6559 void __iomem *ioaddr = tp->mmio_addr;
6561 /* WoL fails with 8168b when the receiver is disabled. */
6562 switch (tp->mac_version) {
6563 case RTL_GIGA_MAC_VER_11:
6564 case RTL_GIGA_MAC_VER_12:
6565 case RTL_GIGA_MAC_VER_17:
6566 pci_clear_master(tp->pci_dev);
6568 RTL_W8(ChipCmd, CmdRxEnb);
6577 static void rtl_shutdown(struct pci_dev *pdev)
6579 struct net_device *dev = pci_get_drvdata(pdev);
6580 struct rtl8169_private *tp = netdev_priv(dev);
6581 struct device *d = &pdev->dev;
6583 pm_runtime_get_sync(d);
6585 rtl8169_net_suspend(dev);
6587 /* Restore original MAC address */
6588 rtl_rar_set(tp, dev->perm_addr);
6590 rtl8169_hw_reset(tp);
6592 if (system_state == SYSTEM_POWER_OFF) {
6593 if (__rtl8169_get_wol(tp) & WAKE_ANY) {
6594 rtl_wol_suspend_quirk(tp);
6595 rtl_wol_shutdown_quirk(tp);
6598 pci_wake_from_d3(pdev, true);
6599 pci_set_power_state(pdev, PCI_D3hot);
6602 pm_runtime_put_noidle(d);
6605 static void rtl_remove_one(struct pci_dev *pdev)
6607 struct net_device *dev = pci_get_drvdata(pdev);
6608 struct rtl8169_private *tp = netdev_priv(dev);
6610 if (tp->mac_version == RTL_GIGA_MAC_VER_27 ||
6611 tp->mac_version == RTL_GIGA_MAC_VER_28 ||
6612 tp->mac_version == RTL_GIGA_MAC_VER_31) {
6613 rtl8168_driver_stop(tp);
6616 cancel_work_sync(&tp->wk.work);
6618 netif_napi_del(&tp->napi);
6620 unregister_netdev(dev);
6622 rtl_release_firmware(tp);
6624 if (pci_dev_run_wake(pdev))
6625 pm_runtime_get_noresume(&pdev->dev);
6627 /* restore original MAC address */
6628 rtl_rar_set(tp, dev->perm_addr);
6630 rtl_disable_msi(pdev, tp);
6631 rtl8169_release_board(pdev, dev, tp->mmio_addr);
6632 pci_set_drvdata(pdev, NULL);
6635 static const struct net_device_ops rtl_netdev_ops = {
6636 .ndo_open = rtl_open,
6637 .ndo_stop = rtl8169_close,
6638 .ndo_get_stats64 = rtl8169_get_stats64,
6639 .ndo_start_xmit = rtl8169_start_xmit,
6640 .ndo_tx_timeout = rtl8169_tx_timeout,
6641 .ndo_validate_addr = eth_validate_addr,
6642 .ndo_change_mtu = rtl8169_change_mtu,
6643 .ndo_fix_features = rtl8169_fix_features,
6644 .ndo_set_features = rtl8169_set_features,
6645 .ndo_set_mac_address = rtl_set_mac_address,
6646 .ndo_do_ioctl = rtl8169_ioctl,
6647 .ndo_set_rx_mode = rtl_set_rx_mode,
6648 #ifdef CONFIG_NET_POLL_CONTROLLER
6649 .ndo_poll_controller = rtl8169_netpoll,
6654 static const struct rtl_cfg_info {
6655 void (*hw_start)(struct net_device *);
6656 unsigned int region;
6661 } rtl_cfg_infos [] = {
6663 .hw_start = rtl_hw_start_8169,
6666 .event_slow = SYSErr | LinkChg | RxOverflow | RxFIFOOver,
6667 .features = RTL_FEATURE_GMII,
6668 .default_ver = RTL_GIGA_MAC_VER_01,
6671 .hw_start = rtl_hw_start_8168,
6674 .event_slow = SYSErr | LinkChg | RxOverflow,
6675 .features = RTL_FEATURE_GMII | RTL_FEATURE_MSI,
6676 .default_ver = RTL_GIGA_MAC_VER_11,
6679 .hw_start = rtl_hw_start_8101,
6682 .event_slow = SYSErr | LinkChg | RxOverflow | RxFIFOOver |
6684 .features = RTL_FEATURE_MSI,
6685 .default_ver = RTL_GIGA_MAC_VER_13,
6689 /* Cfg9346_Unlock assumed. */
6690 static unsigned rtl_try_msi(struct rtl8169_private *tp,
6691 const struct rtl_cfg_info *cfg)
6693 void __iomem *ioaddr = tp->mmio_addr;
6697 cfg2 = RTL_R8(Config2) & ~MSIEnable;
6698 if (cfg->features & RTL_FEATURE_MSI) {
6699 if (pci_enable_msi(tp->pci_dev)) {
6700 netif_info(tp, hw, tp->dev, "no MSI. Back to INTx.\n");
6703 msi = RTL_FEATURE_MSI;
6706 if (tp->mac_version <= RTL_GIGA_MAC_VER_06)
6707 RTL_W8(Config2, cfg2);
6711 DECLARE_RTL_COND(rtl_link_list_ready_cond)
6713 void __iomem *ioaddr = tp->mmio_addr;
6715 return RTL_R8(MCU) & LINK_LIST_RDY;
6718 DECLARE_RTL_COND(rtl_rxtx_empty_cond)
6720 void __iomem *ioaddr = tp->mmio_addr;
6722 return (RTL_R8(MCU) & RXTX_EMPTY) == RXTX_EMPTY;
6725 static void rtl_hw_init_8168g(struct rtl8169_private *tp)
6727 void __iomem *ioaddr = tp->mmio_addr;
6730 tp->ocp_base = OCP_STD_PHY_BASE;
6732 RTL_W32(MISC, RTL_R32(MISC) | RXDV_GATED_EN);
6734 if (!rtl_udelay_loop_wait_high(tp, &rtl_txcfg_empty_cond, 100, 42))
6737 if (!rtl_udelay_loop_wait_high(tp, &rtl_rxtx_empty_cond, 100, 42))
6740 RTL_W8(ChipCmd, RTL_R8(ChipCmd) & ~(CmdTxEnb | CmdRxEnb));
6742 RTL_W8(MCU, RTL_R8(MCU) & ~NOW_IS_OOB);
6744 data = r8168_mac_ocp_read(tp, 0xe8de);
6746 r8168_mac_ocp_write(tp, 0xe8de, data);
6748 if (!rtl_udelay_loop_wait_high(tp, &rtl_link_list_ready_cond, 100, 42))
6751 data = r8168_mac_ocp_read(tp, 0xe8de);
6753 r8168_mac_ocp_write(tp, 0xe8de, data);
6755 if (!rtl_udelay_loop_wait_high(tp, &rtl_link_list_ready_cond, 100, 42))
6759 static void rtl_hw_initialize(struct rtl8169_private *tp)
6761 switch (tp->mac_version) {
6762 case RTL_GIGA_MAC_VER_40:
6763 case RTL_GIGA_MAC_VER_41:
6764 rtl_hw_init_8168g(tp);
6773 rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
6775 const struct rtl_cfg_info *cfg = rtl_cfg_infos + ent->driver_data;
6776 const unsigned int region = cfg->region;
6777 struct rtl8169_private *tp;
6778 struct mii_if_info *mii;
6779 struct net_device *dev;
6780 void __iomem *ioaddr;
6784 if (netif_msg_drv(&debug)) {
6785 printk(KERN_INFO "%s Gigabit Ethernet driver %s loaded\n",
6786 MODULENAME, RTL8169_VERSION);
6789 dev = alloc_etherdev(sizeof (*tp));
6795 SET_NETDEV_DEV(dev, &pdev->dev);
6796 dev->netdev_ops = &rtl_netdev_ops;
6797 tp = netdev_priv(dev);
6800 tp->msg_enable = netif_msg_init(debug.msg_enable, R8169_MSG_DEFAULT);
6804 mii->mdio_read = rtl_mdio_read;
6805 mii->mdio_write = rtl_mdio_write;
6806 mii->phy_id_mask = 0x1f;
6807 mii->reg_num_mask = 0x1f;
6808 mii->supports_gmii = !!(cfg->features & RTL_FEATURE_GMII);
6810 /* disable ASPM completely as that cause random device stop working
6811 * problems as well as full system hangs for some PCIe devices users */
6812 pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 |
6813 PCIE_LINK_STATE_CLKPM);
6815 /* enable device (incl. PCI PM wakeup and hotplug setup) */
6816 rc = pci_enable_device(pdev);
6818 netif_err(tp, probe, dev, "enable failure\n");
6819 goto err_out_free_dev_1;
6822 if (pci_set_mwi(pdev) < 0)
6823 netif_info(tp, probe, dev, "Mem-Wr-Inval unavailable\n");
6825 /* make sure PCI base addr 1 is MMIO */
6826 if (!(pci_resource_flags(pdev, region) & IORESOURCE_MEM)) {
6827 netif_err(tp, probe, dev,
6828 "region #%d not an MMIO resource, aborting\n",
6834 /* check for weird/broken PCI region reporting */
6835 if (pci_resource_len(pdev, region) < R8169_REGS_SIZE) {
6836 netif_err(tp, probe, dev,
6837 "Invalid PCI region size(s), aborting\n");
6842 rc = pci_request_regions(pdev, MODULENAME);
6844 netif_err(tp, probe, dev, "could not request regions\n");
6848 tp->cp_cmd = RxChkSum;
6850 if ((sizeof(dma_addr_t) > 4) &&
6851 !pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) && use_dac) {
6852 tp->cp_cmd |= PCIDAC;
6853 dev->features |= NETIF_F_HIGHDMA;
6855 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
6857 netif_err(tp, probe, dev, "DMA configuration failed\n");
6858 goto err_out_free_res_3;
6862 /* ioremap MMIO region */
6863 ioaddr = ioremap(pci_resource_start(pdev, region), R8169_REGS_SIZE);
6865 netif_err(tp, probe, dev, "cannot remap MMIO, aborting\n");
6867 goto err_out_free_res_3;
6869 tp->mmio_addr = ioaddr;
6871 if (!pci_is_pcie(pdev))
6872 netif_info(tp, probe, dev, "not PCI Express\n");
6874 /* Identify chip attached to board */
6875 rtl8169_get_mac_version(tp, dev, cfg->default_ver);
6879 rtl_irq_disable(tp);
6881 rtl_hw_initialize(tp);
6885 rtl_ack_events(tp, 0xffff);
6887 pci_set_master(pdev);
6890 * Pretend we are using VLANs; This bypasses a nasty bug where
6891 * Interrupts stop flowing on high load on 8110SCd controllers.
6893 if (tp->mac_version == RTL_GIGA_MAC_VER_05)
6894 tp->cp_cmd |= RxVlan;
6896 rtl_init_mdio_ops(tp);
6897 rtl_init_pll_power_ops(tp);
6898 rtl_init_jumbo_ops(tp);
6899 rtl_init_csi_ops(tp);
6901 rtl8169_print_mac_version(tp);
6903 chipset = tp->mac_version;
6904 tp->txd_version = rtl_chip_infos[chipset].txd_version;
6906 RTL_W8(Cfg9346, Cfg9346_Unlock);
6907 RTL_W8(Config1, RTL_R8(Config1) | PMEnable);
6908 RTL_W8(Config5, RTL_R8(Config5) & PMEStatus);
6909 if ((RTL_R8(Config3) & (LinkUp | MagicPacket)) != 0)
6910 tp->features |= RTL_FEATURE_WOL;
6911 if ((RTL_R8(Config5) & (UWF | BWF | MWF)) != 0)
6912 tp->features |= RTL_FEATURE_WOL;
6913 tp->features |= rtl_try_msi(tp, cfg);
6914 RTL_W8(Cfg9346, Cfg9346_Lock);
6916 if (rtl_tbi_enabled(tp)) {
6917 tp->set_speed = rtl8169_set_speed_tbi;
6918 tp->get_settings = rtl8169_gset_tbi;
6919 tp->phy_reset_enable = rtl8169_tbi_reset_enable;
6920 tp->phy_reset_pending = rtl8169_tbi_reset_pending;
6921 tp->link_ok = rtl8169_tbi_link_ok;
6922 tp->do_ioctl = rtl_tbi_ioctl;
6924 tp->set_speed = rtl8169_set_speed_xmii;
6925 tp->get_settings = rtl8169_gset_xmii;
6926 tp->phy_reset_enable = rtl8169_xmii_reset_enable;
6927 tp->phy_reset_pending = rtl8169_xmii_reset_pending;
6928 tp->link_ok = rtl8169_xmii_link_ok;
6929 tp->do_ioctl = rtl_xmii_ioctl;
6932 mutex_init(&tp->wk.mutex);
6934 /* Get MAC address */
6935 for (i = 0; i < ETH_ALEN; i++)
6936 dev->dev_addr[i] = RTL_R8(MAC0 + i);
6938 SET_ETHTOOL_OPS(dev, &rtl8169_ethtool_ops);
6939 dev->watchdog_timeo = RTL8169_TX_TIMEOUT;
6941 netif_napi_add(dev, &tp->napi, rtl8169_poll, R8169_NAPI_WEIGHT);
6943 /* don't enable SG, IP_CSUM and TSO by default - it might not work
6944 * properly for all devices */
6945 dev->features |= NETIF_F_RXCSUM |
6946 NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
6948 dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO |
6949 NETIF_F_RXCSUM | NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
6950 dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO |
6953 if (tp->mac_version == RTL_GIGA_MAC_VER_05)
6954 /* 8110SCd requires hardware Rx VLAN - disallow toggling */
6955 dev->hw_features &= ~NETIF_F_HW_VLAN_RX;
6957 dev->hw_features |= NETIF_F_RXALL;
6958 dev->hw_features |= NETIF_F_RXFCS;
6960 tp->hw_start = cfg->hw_start;
6961 tp->event_slow = cfg->event_slow;
6963 tp->opts1_mask = (tp->mac_version != RTL_GIGA_MAC_VER_01) ?
6964 ~(RxBOVF | RxFOVF) : ~0;
6966 init_timer(&tp->timer);
6967 tp->timer.data = (unsigned long) dev;
6968 tp->timer.function = rtl8169_phy_timer;
6970 tp->rtl_fw = RTL_FIRMWARE_UNKNOWN;
6972 rc = register_netdev(dev);
6976 pci_set_drvdata(pdev, dev);
6978 netif_info(tp, probe, dev, "%s at 0x%p, %pM, XID %08x IRQ %d\n",
6979 rtl_chip_infos[chipset].name, ioaddr, dev->dev_addr,
6980 (u32)(RTL_R32(TxConfig) & 0x9cf0f8ff), pdev->irq);
6981 if (rtl_chip_infos[chipset].jumbo_max != JUMBO_1K) {
6982 netif_info(tp, probe, dev, "jumbo features [frames: %d bytes, "
6983 "tx checksumming: %s]\n",
6984 rtl_chip_infos[chipset].jumbo_max,
6985 rtl_chip_infos[chipset].jumbo_tx_csum ? "ok" : "ko");
6988 if (tp->mac_version == RTL_GIGA_MAC_VER_27 ||
6989 tp->mac_version == RTL_GIGA_MAC_VER_28 ||
6990 tp->mac_version == RTL_GIGA_MAC_VER_31) {
6991 rtl8168_driver_start(tp);
6994 device_set_wakeup_enable(&pdev->dev, tp->features & RTL_FEATURE_WOL);
6996 if (pci_dev_run_wake(pdev))
6997 pm_runtime_put_noidle(&pdev->dev);
6999 netif_carrier_off(dev);
7005 netif_napi_del(&tp->napi);
7006 rtl_disable_msi(pdev, tp);
7009 pci_release_regions(pdev);
7011 pci_clear_mwi(pdev);
7012 pci_disable_device(pdev);
7018 static struct pci_driver rtl8169_pci_driver = {
7020 .id_table = rtl8169_pci_tbl,
7021 .probe = rtl_init_one,
7022 .remove = rtl_remove_one,
7023 .shutdown = rtl_shutdown,
7024 .driver.pm = RTL8169_PM_OPS,
7027 module_pci_driver(rtl8169_pci_driver);