Commit | Line | Data |
---|---|---|
85c10f28 RH |
1 | /* |
2 | * Copyright 2010-2011 Calxeda, Inc. | |
3 | * | |
4 | * This program is free software; you can redistribute it and/or modify it | |
5 | * under the terms and conditions of the GNU General Public License, | |
6 | * version 2, as published by the Free Software Foundation. | |
7 | * | |
8 | * This program is distributed in the hope it will be useful, but WITHOUT | |
9 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
10 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | |
11 | * more details. | |
12 | * | |
13 | * You should have received a copy of the GNU General Public License along with | |
14 | * this program. If not, see <http://www.gnu.org/licenses/>. | |
15 | */ | |
16 | #include <linux/module.h> | |
17 | #include <linux/init.h> | |
18 | #include <linux/kernel.h> | |
19 | #include <linux/circ_buf.h> | |
20 | #include <linux/interrupt.h> | |
21 | #include <linux/etherdevice.h> | |
22 | #include <linux/platform_device.h> | |
23 | #include <linux/skbuff.h> | |
24 | #include <linux/ethtool.h> | |
25 | #include <linux/if.h> | |
26 | #include <linux/crc32.h> | |
27 | #include <linux/dma-mapping.h> | |
28 | #include <linux/slab.h> | |
29 | ||
30 | /* XGMAC Register definitions */ | |
31 | #define XGMAC_CONTROL 0x00000000 /* MAC Configuration */ | |
32 | #define XGMAC_FRAME_FILTER 0x00000004 /* MAC Frame Filter */ | |
33 | #define XGMAC_FLOW_CTRL 0x00000018 /* MAC Flow Control */ | |
34 | #define XGMAC_VLAN_TAG 0x0000001C /* VLAN Tags */ | |
35 | #define XGMAC_VERSION 0x00000020 /* Version */ | |
36 | #define XGMAC_VLAN_INCL 0x00000024 /* VLAN tag for tx frames */ | |
37 | #define XGMAC_LPI_CTRL 0x00000028 /* LPI Control and Status */ | |
38 | #define XGMAC_LPI_TIMER 0x0000002C /* LPI Timers Control */ | |
39 | #define XGMAC_TX_PACE 0x00000030 /* Transmit Pace and Stretch */ | |
40 | #define XGMAC_VLAN_HASH 0x00000034 /* VLAN Hash Table */ | |
41 | #define XGMAC_DEBUG 0x00000038 /* Debug */ | |
42 | #define XGMAC_INT_STAT 0x0000003C /* Interrupt and Control */ | |
43 | #define XGMAC_ADDR_HIGH(reg) (0x00000040 + ((reg) * 8)) | |
44 | #define XGMAC_ADDR_LOW(reg) (0x00000044 + ((reg) * 8)) | |
45 | #define XGMAC_HASH(n) (0x00000300 + (n) * 4) /* HASH table regs */ | |
46 | #define XGMAC_NUM_HASH 16 | |
47 | #define XGMAC_OMR 0x00000400 | |
48 | #define XGMAC_REMOTE_WAKE 0x00000700 /* Remote Wake-Up Frm Filter */ | |
49 | #define XGMAC_PMT 0x00000704 /* PMT Control and Status */ | |
50 | #define XGMAC_MMC_CTRL 0x00000800 /* XGMAC MMC Control */ | |
51 | #define XGMAC_MMC_INTR_RX 0x00000804 /* Recieve Interrupt */ | |
52 | #define XGMAC_MMC_INTR_TX 0x00000808 /* Transmit Interrupt */ | |
53 | #define XGMAC_MMC_INTR_MASK_RX 0x0000080c /* Recieve Interrupt Mask */ | |
54 | #define XGMAC_MMC_INTR_MASK_TX 0x00000810 /* Transmit Interrupt Mask */ | |
55 | ||
56 | /* Hardware TX Statistics Counters */ | |
57 | #define XGMAC_MMC_TXOCTET_GB_LO 0x00000814 | |
58 | #define XGMAC_MMC_TXOCTET_GB_HI 0x00000818 | |
59 | #define XGMAC_MMC_TXFRAME_GB_LO 0x0000081C | |
60 | #define XGMAC_MMC_TXFRAME_GB_HI 0x00000820 | |
61 | #define XGMAC_MMC_TXBCFRAME_G 0x00000824 | |
62 | #define XGMAC_MMC_TXMCFRAME_G 0x0000082C | |
63 | #define XGMAC_MMC_TXUCFRAME_GB 0x00000864 | |
64 | #define XGMAC_MMC_TXMCFRAME_GB 0x0000086C | |
65 | #define XGMAC_MMC_TXBCFRAME_GB 0x00000874 | |
66 | #define XGMAC_MMC_TXUNDERFLOW 0x0000087C | |
67 | #define XGMAC_MMC_TXOCTET_G_LO 0x00000884 | |
68 | #define XGMAC_MMC_TXOCTET_G_HI 0x00000888 | |
69 | #define XGMAC_MMC_TXFRAME_G_LO 0x0000088C | |
70 | #define XGMAC_MMC_TXFRAME_G_HI 0x00000890 | |
71 | #define XGMAC_MMC_TXPAUSEFRAME 0x00000894 | |
72 | #define XGMAC_MMC_TXVLANFRAME 0x0000089C | |
73 | ||
74 | /* Hardware RX Statistics Counters */ | |
75 | #define XGMAC_MMC_RXFRAME_GB_LO 0x00000900 | |
76 | #define XGMAC_MMC_RXFRAME_GB_HI 0x00000904 | |
77 | #define XGMAC_MMC_RXOCTET_GB_LO 0x00000908 | |
78 | #define XGMAC_MMC_RXOCTET_GB_HI 0x0000090C | |
79 | #define XGMAC_MMC_RXOCTET_G_LO 0x00000910 | |
80 | #define XGMAC_MMC_RXOCTET_G_HI 0x00000914 | |
81 | #define XGMAC_MMC_RXBCFRAME_G 0x00000918 | |
82 | #define XGMAC_MMC_RXMCFRAME_G 0x00000920 | |
83 | #define XGMAC_MMC_RXCRCERR 0x00000928 | |
84 | #define XGMAC_MMC_RXRUNT 0x00000930 | |
85 | #define XGMAC_MMC_RXJABBER 0x00000934 | |
86 | #define XGMAC_MMC_RXUCFRAME_G 0x00000970 | |
87 | #define XGMAC_MMC_RXLENGTHERR 0x00000978 | |
88 | #define XGMAC_MMC_RXPAUSEFRAME 0x00000988 | |
89 | #define XGMAC_MMC_RXOVERFLOW 0x00000990 | |
90 | #define XGMAC_MMC_RXVLANFRAME 0x00000998 | |
91 | #define XGMAC_MMC_RXWATCHDOG 0x000009a0 | |
92 | ||
93 | /* DMA Control and Status Registers */ | |
94 | #define XGMAC_DMA_BUS_MODE 0x00000f00 /* Bus Mode */ | |
95 | #define XGMAC_DMA_TX_POLL 0x00000f04 /* Transmit Poll Demand */ | |
96 | #define XGMAC_DMA_RX_POLL 0x00000f08 /* Received Poll Demand */ | |
97 | #define XGMAC_DMA_RX_BASE_ADDR 0x00000f0c /* Receive List Base */ | |
98 | #define XGMAC_DMA_TX_BASE_ADDR 0x00000f10 /* Transmit List Base */ | |
99 | #define XGMAC_DMA_STATUS 0x00000f14 /* Status Register */ | |
100 | #define XGMAC_DMA_CONTROL 0x00000f18 /* Ctrl (Operational Mode) */ | |
101 | #define XGMAC_DMA_INTR_ENA 0x00000f1c /* Interrupt Enable */ | |
102 | #define XGMAC_DMA_MISS_FRAME_CTR 0x00000f20 /* Missed Frame Counter */ | |
103 | #define XGMAC_DMA_RI_WDOG_TIMER 0x00000f24 /* RX Intr Watchdog Timer */ | |
104 | #define XGMAC_DMA_AXI_BUS 0x00000f28 /* AXI Bus Mode */ | |
105 | #define XGMAC_DMA_AXI_STATUS 0x00000f2C /* AXI Status */ | |
106 | #define XGMAC_DMA_HW_FEATURE 0x00000f58 /* Enabled Hardware Features */ | |
107 | ||
108 | #define XGMAC_ADDR_AE 0x80000000 | |
109 | #define XGMAC_MAX_FILTER_ADDR 31 | |
110 | ||
111 | /* PMT Control and Status */ | |
112 | #define XGMAC_PMT_POINTER_RESET 0x80000000 | |
113 | #define XGMAC_PMT_GLBL_UNICAST 0x00000200 | |
114 | #define XGMAC_PMT_WAKEUP_RX_FRM 0x00000040 | |
115 | #define XGMAC_PMT_MAGIC_PKT 0x00000020 | |
116 | #define XGMAC_PMT_WAKEUP_FRM_EN 0x00000004 | |
117 | #define XGMAC_PMT_MAGIC_PKT_EN 0x00000002 | |
118 | #define XGMAC_PMT_POWERDOWN 0x00000001 | |
119 | ||
120 | #define XGMAC_CONTROL_SPD 0x40000000 /* Speed control */ | |
121 | #define XGMAC_CONTROL_SPD_MASK 0x60000000 | |
122 | #define XGMAC_CONTROL_SPD_1G 0x60000000 | |
123 | #define XGMAC_CONTROL_SPD_2_5G 0x40000000 | |
124 | #define XGMAC_CONTROL_SPD_10G 0x00000000 | |
125 | #define XGMAC_CONTROL_SARC 0x10000000 /* Source Addr Insert/Replace */ | |
126 | #define XGMAC_CONTROL_SARK_MASK 0x18000000 | |
127 | #define XGMAC_CONTROL_CAR 0x04000000 /* CRC Addition/Replacement */ | |
128 | #define XGMAC_CONTROL_CAR_MASK 0x06000000 | |
129 | #define XGMAC_CONTROL_DP 0x01000000 /* Disable Padding */ | |
130 | #define XGMAC_CONTROL_WD 0x00800000 /* Disable Watchdog on rx */ | |
131 | #define XGMAC_CONTROL_JD 0x00400000 /* Jabber disable */ | |
132 | #define XGMAC_CONTROL_JE 0x00100000 /* Jumbo frame */ | |
133 | #define XGMAC_CONTROL_LM 0x00001000 /* Loop-back mode */ | |
134 | #define XGMAC_CONTROL_IPC 0x00000400 /* Checksum Offload */ | |
135 | #define XGMAC_CONTROL_ACS 0x00000080 /* Automatic Pad/FCS Strip */ | |
136 | #define XGMAC_CONTROL_DDIC 0x00000010 /* Disable Deficit Idle Count */ | |
137 | #define XGMAC_CONTROL_TE 0x00000008 /* Transmitter Enable */ | |
138 | #define XGMAC_CONTROL_RE 0x00000004 /* Receiver Enable */ | |
139 | ||
140 | /* XGMAC Frame Filter defines */ | |
141 | #define XGMAC_FRAME_FILTER_PR 0x00000001 /* Promiscuous Mode */ | |
142 | #define XGMAC_FRAME_FILTER_HUC 0x00000002 /* Hash Unicast */ | |
143 | #define XGMAC_FRAME_FILTER_HMC 0x00000004 /* Hash Multicast */ | |
144 | #define XGMAC_FRAME_FILTER_DAIF 0x00000008 /* DA Inverse Filtering */ | |
145 | #define XGMAC_FRAME_FILTER_PM 0x00000010 /* Pass all multicast */ | |
146 | #define XGMAC_FRAME_FILTER_DBF 0x00000020 /* Disable Broadcast frames */ | |
147 | #define XGMAC_FRAME_FILTER_SAIF 0x00000100 /* Inverse Filtering */ | |
148 | #define XGMAC_FRAME_FILTER_SAF 0x00000200 /* Source Address Filter */ | |
149 | #define XGMAC_FRAME_FILTER_HPF 0x00000400 /* Hash or perfect Filter */ | |
150 | #define XGMAC_FRAME_FILTER_VHF 0x00000800 /* VLAN Hash Filter */ | |
151 | #define XGMAC_FRAME_FILTER_VPF 0x00001000 /* VLAN Perfect Filter */ | |
152 | #define XGMAC_FRAME_FILTER_RA 0x80000000 /* Receive all mode */ | |
153 | ||
154 | /* XGMAC FLOW CTRL defines */ | |
155 | #define XGMAC_FLOW_CTRL_PT_MASK 0xffff0000 /* Pause Time Mask */ | |
156 | #define XGMAC_FLOW_CTRL_PT_SHIFT 16 | |
157 | #define XGMAC_FLOW_CTRL_DZQP 0x00000080 /* Disable Zero-Quanta Phase */ | |
158 | #define XGMAC_FLOW_CTRL_PLT 0x00000020 /* Pause Low Threshhold */ | |
159 | #define XGMAC_FLOW_CTRL_PLT_MASK 0x00000030 /* PLT MASK */ | |
160 | #define XGMAC_FLOW_CTRL_UP 0x00000008 /* Unicast Pause Frame Detect */ | |
161 | #define XGMAC_FLOW_CTRL_RFE 0x00000004 /* Rx Flow Control Enable */ | |
162 | #define XGMAC_FLOW_CTRL_TFE 0x00000002 /* Tx Flow Control Enable */ | |
163 | #define XGMAC_FLOW_CTRL_FCB_BPA 0x00000001 /* Flow Control Busy ... */ | |
164 | ||
165 | /* XGMAC_INT_STAT reg */ | |
166 | #define XGMAC_INT_STAT_PMT 0x0080 /* PMT Interrupt Status */ | |
167 | #define XGMAC_INT_STAT_LPI 0x0040 /* LPI Interrupt Status */ | |
168 | ||
169 | /* DMA Bus Mode register defines */ | |
170 | #define DMA_BUS_MODE_SFT_RESET 0x00000001 /* Software Reset */ | |
171 | #define DMA_BUS_MODE_DSL_MASK 0x0000007c /* Descriptor Skip Length */ | |
172 | #define DMA_BUS_MODE_DSL_SHIFT 2 /* (in DWORDS) */ | |
173 | #define DMA_BUS_MODE_ATDS 0x00000080 /* Alternate Descriptor Size */ | |
174 | ||
175 | /* Programmable burst length */ | |
176 | #define DMA_BUS_MODE_PBL_MASK 0x00003f00 /* Programmable Burst Len */ | |
177 | #define DMA_BUS_MODE_PBL_SHIFT 8 | |
178 | #define DMA_BUS_MODE_FB 0x00010000 /* Fixed burst */ | |
179 | #define DMA_BUS_MODE_RPBL_MASK 0x003e0000 /* Rx-Programmable Burst Len */ | |
180 | #define DMA_BUS_MODE_RPBL_SHIFT 17 | |
181 | #define DMA_BUS_MODE_USP 0x00800000 | |
182 | #define DMA_BUS_MODE_8PBL 0x01000000 | |
183 | #define DMA_BUS_MODE_AAL 0x02000000 | |
184 | ||
185 | /* DMA Bus Mode register defines */ | |
186 | #define DMA_BUS_PR_RATIO_MASK 0x0000c000 /* Rx/Tx priority ratio */ | |
187 | #define DMA_BUS_PR_RATIO_SHIFT 14 | |
188 | #define DMA_BUS_FB 0x00010000 /* Fixed Burst */ | |
189 | ||
190 | /* DMA Control register defines */ | |
191 | #define DMA_CONTROL_ST 0x00002000 /* Start/Stop Transmission */ | |
192 | #define DMA_CONTROL_SR 0x00000002 /* Start/Stop Receive */ | |
193 | #define DMA_CONTROL_DFF 0x01000000 /* Disable flush of rx frames */ | |
194 | ||
195 | /* DMA Normal interrupt */ | |
196 | #define DMA_INTR_ENA_NIE 0x00010000 /* Normal Summary */ | |
197 | #define DMA_INTR_ENA_AIE 0x00008000 /* Abnormal Summary */ | |
198 | #define DMA_INTR_ENA_ERE 0x00004000 /* Early Receive */ | |
199 | #define DMA_INTR_ENA_FBE 0x00002000 /* Fatal Bus Error */ | |
200 | #define DMA_INTR_ENA_ETE 0x00000400 /* Early Transmit */ | |
201 | #define DMA_INTR_ENA_RWE 0x00000200 /* Receive Watchdog */ | |
202 | #define DMA_INTR_ENA_RSE 0x00000100 /* Receive Stopped */ | |
203 | #define DMA_INTR_ENA_RUE 0x00000080 /* Receive Buffer Unavailable */ | |
204 | #define DMA_INTR_ENA_RIE 0x00000040 /* Receive Interrupt */ | |
205 | #define DMA_INTR_ENA_UNE 0x00000020 /* Tx Underflow */ | |
206 | #define DMA_INTR_ENA_OVE 0x00000010 /* Receive Overflow */ | |
207 | #define DMA_INTR_ENA_TJE 0x00000008 /* Transmit Jabber */ | |
208 | #define DMA_INTR_ENA_TUE 0x00000004 /* Transmit Buffer Unavail */ | |
209 | #define DMA_INTR_ENA_TSE 0x00000002 /* Transmit Stopped */ | |
210 | #define DMA_INTR_ENA_TIE 0x00000001 /* Transmit Interrupt */ | |
211 | ||
212 | #define DMA_INTR_NORMAL (DMA_INTR_ENA_NIE | DMA_INTR_ENA_RIE | \ | |
213 | DMA_INTR_ENA_TUE) | |
214 | ||
215 | #define DMA_INTR_ABNORMAL (DMA_INTR_ENA_AIE | DMA_INTR_ENA_FBE | \ | |
216 | DMA_INTR_ENA_RWE | DMA_INTR_ENA_RSE | \ | |
217 | DMA_INTR_ENA_RUE | DMA_INTR_ENA_UNE | \ | |
218 | DMA_INTR_ENA_OVE | DMA_INTR_ENA_TJE | \ | |
219 | DMA_INTR_ENA_TSE) | |
220 | ||
221 | /* DMA default interrupt mask */ | |
222 | #define DMA_INTR_DEFAULT_MASK (DMA_INTR_NORMAL | DMA_INTR_ABNORMAL) | |
223 | ||
224 | /* DMA Status register defines */ | |
225 | #define DMA_STATUS_GMI 0x08000000 /* MMC interrupt */ | |
226 | #define DMA_STATUS_GLI 0x04000000 /* GMAC Line interface int */ | |
227 | #define DMA_STATUS_EB_MASK 0x00380000 /* Error Bits Mask */ | |
228 | #define DMA_STATUS_EB_TX_ABORT 0x00080000 /* Error Bits - TX Abort */ | |
229 | #define DMA_STATUS_EB_RX_ABORT 0x00100000 /* Error Bits - RX Abort */ | |
230 | #define DMA_STATUS_TS_MASK 0x00700000 /* Transmit Process State */ | |
231 | #define DMA_STATUS_TS_SHIFT 20 | |
232 | #define DMA_STATUS_RS_MASK 0x000e0000 /* Receive Process State */ | |
233 | #define DMA_STATUS_RS_SHIFT 17 | |
234 | #define DMA_STATUS_NIS 0x00010000 /* Normal Interrupt Summary */ | |
235 | #define DMA_STATUS_AIS 0x00008000 /* Abnormal Interrupt Summary */ | |
236 | #define DMA_STATUS_ERI 0x00004000 /* Early Receive Interrupt */ | |
237 | #define DMA_STATUS_FBI 0x00002000 /* Fatal Bus Error Interrupt */ | |
238 | #define DMA_STATUS_ETI 0x00000400 /* Early Transmit Interrupt */ | |
239 | #define DMA_STATUS_RWT 0x00000200 /* Receive Watchdog Timeout */ | |
240 | #define DMA_STATUS_RPS 0x00000100 /* Receive Process Stopped */ | |
241 | #define DMA_STATUS_RU 0x00000080 /* Receive Buffer Unavailable */ | |
242 | #define DMA_STATUS_RI 0x00000040 /* Receive Interrupt */ | |
243 | #define DMA_STATUS_UNF 0x00000020 /* Transmit Underflow */ | |
244 | #define DMA_STATUS_OVF 0x00000010 /* Receive Overflow */ | |
245 | #define DMA_STATUS_TJT 0x00000008 /* Transmit Jabber Timeout */ | |
246 | #define DMA_STATUS_TU 0x00000004 /* Transmit Buffer Unavail */ | |
247 | #define DMA_STATUS_TPS 0x00000002 /* Transmit Process Stopped */ | |
248 | #define DMA_STATUS_TI 0x00000001 /* Transmit Interrupt */ | |
249 | ||
250 | /* Common MAC defines */ | |
251 | #define MAC_ENABLE_TX 0x00000008 /* Transmitter Enable */ | |
252 | #define MAC_ENABLE_RX 0x00000004 /* Receiver Enable */ | |
253 | ||
254 | /* XGMAC Operation Mode Register */ | |
255 | #define XGMAC_OMR_TSF 0x00200000 /* TX FIFO Store and Forward */ | |
256 | #define XGMAC_OMR_FTF 0x00100000 /* Flush Transmit FIFO */ | |
257 | #define XGMAC_OMR_TTC 0x00020000 /* Transmit Threshhold Ctrl */ | |
258 | #define XGMAC_OMR_TTC_MASK 0x00030000 | |
259 | #define XGMAC_OMR_RFD 0x00006000 /* FC Deactivation Threshhold */ | |
260 | #define XGMAC_OMR_RFD_MASK 0x00007000 /* FC Deact Threshhold MASK */ | |
261 | #define XGMAC_OMR_RFA 0x00000600 /* FC Activation Threshhold */ | |
262 | #define XGMAC_OMR_RFA_MASK 0x00000E00 /* FC Act Threshhold MASK */ | |
263 | #define XGMAC_OMR_EFC 0x00000100 /* Enable Hardware FC */ | |
264 | #define XGMAC_OMR_FEF 0x00000080 /* Forward Error Frames */ | |
265 | #define XGMAC_OMR_DT 0x00000040 /* Drop TCP/IP csum Errors */ | |
266 | #define XGMAC_OMR_RSF 0x00000020 /* RX FIFO Store and Forward */ | |
267 | #define XGMAC_OMR_RTC 0x00000010 /* RX Threshhold Ctrl */ | |
268 | #define XGMAC_OMR_RTC_MASK 0x00000018 /* RX Threshhold Ctrl MASK */ | |
269 | ||
270 | /* XGMAC HW Features Register */ | |
271 | #define DMA_HW_FEAT_TXCOESEL 0x00010000 /* TX Checksum offload */ | |
272 | ||
273 | #define XGMAC_MMC_CTRL_CNT_FRZ 0x00000008 | |
274 | ||
275 | /* XGMAC Descriptor Defines */ | |
276 | #define MAX_DESC_BUF_SZ (0x2000 - 8) | |
277 | ||
278 | #define RXDESC_EXT_STATUS 0x00000001 | |
279 | #define RXDESC_CRC_ERR 0x00000002 | |
280 | #define RXDESC_RX_ERR 0x00000008 | |
281 | #define RXDESC_RX_WDOG 0x00000010 | |
282 | #define RXDESC_FRAME_TYPE 0x00000020 | |
283 | #define RXDESC_GIANT_FRAME 0x00000080 | |
284 | #define RXDESC_LAST_SEG 0x00000100 | |
285 | #define RXDESC_FIRST_SEG 0x00000200 | |
286 | #define RXDESC_VLAN_FRAME 0x00000400 | |
287 | #define RXDESC_OVERFLOW_ERR 0x00000800 | |
288 | #define RXDESC_LENGTH_ERR 0x00001000 | |
289 | #define RXDESC_SA_FILTER_FAIL 0x00002000 | |
290 | #define RXDESC_DESCRIPTOR_ERR 0x00004000 | |
291 | #define RXDESC_ERROR_SUMMARY 0x00008000 | |
292 | #define RXDESC_FRAME_LEN_OFFSET 16 | |
293 | #define RXDESC_FRAME_LEN_MASK 0x3fff0000 | |
294 | #define RXDESC_DA_FILTER_FAIL 0x40000000 | |
295 | ||
296 | #define RXDESC1_END_RING 0x00008000 | |
297 | ||
298 | #define RXDESC_IP_PAYLOAD_MASK 0x00000003 | |
299 | #define RXDESC_IP_PAYLOAD_UDP 0x00000001 | |
300 | #define RXDESC_IP_PAYLOAD_TCP 0x00000002 | |
301 | #define RXDESC_IP_PAYLOAD_ICMP 0x00000003 | |
302 | #define RXDESC_IP_HEADER_ERR 0x00000008 | |
303 | #define RXDESC_IP_PAYLOAD_ERR 0x00000010 | |
304 | #define RXDESC_IPV4_PACKET 0x00000040 | |
305 | #define RXDESC_IPV6_PACKET 0x00000080 | |
306 | #define TXDESC_UNDERFLOW_ERR 0x00000001 | |
307 | #define TXDESC_JABBER_TIMEOUT 0x00000002 | |
308 | #define TXDESC_LOCAL_FAULT 0x00000004 | |
309 | #define TXDESC_REMOTE_FAULT 0x00000008 | |
310 | #define TXDESC_VLAN_FRAME 0x00000010 | |
311 | #define TXDESC_FRAME_FLUSHED 0x00000020 | |
312 | #define TXDESC_IP_HEADER_ERR 0x00000040 | |
313 | #define TXDESC_PAYLOAD_CSUM_ERR 0x00000080 | |
314 | #define TXDESC_ERROR_SUMMARY 0x00008000 | |
315 | #define TXDESC_SA_CTRL_INSERT 0x00040000 | |
316 | #define TXDESC_SA_CTRL_REPLACE 0x00080000 | |
317 | #define TXDESC_2ND_ADDR_CHAINED 0x00100000 | |
318 | #define TXDESC_END_RING 0x00200000 | |
319 | #define TXDESC_CSUM_IP 0x00400000 | |
320 | #define TXDESC_CSUM_IP_PAYLD 0x00800000 | |
321 | #define TXDESC_CSUM_ALL 0x00C00000 | |
322 | #define TXDESC_CRC_EN_REPLACE 0x01000000 | |
323 | #define TXDESC_CRC_EN_APPEND 0x02000000 | |
324 | #define TXDESC_DISABLE_PAD 0x04000000 | |
325 | #define TXDESC_FIRST_SEG 0x10000000 | |
326 | #define TXDESC_LAST_SEG 0x20000000 | |
327 | #define TXDESC_INTERRUPT 0x40000000 | |
328 | ||
329 | #define DESC_OWN 0x80000000 | |
330 | #define DESC_BUFFER1_SZ_MASK 0x00001fff | |
331 | #define DESC_BUFFER2_SZ_MASK 0x1fff0000 | |
332 | #define DESC_BUFFER2_SZ_OFFSET 16 | |
333 | ||
334 | struct xgmac_dma_desc { | |
335 | __le32 flags; | |
336 | __le32 buf_size; | |
337 | __le32 buf1_addr; /* Buffer 1 Address Pointer */ | |
338 | __le32 buf2_addr; /* Buffer 2 Address Pointer */ | |
339 | __le32 ext_status; | |
340 | __le32 res[3]; | |
341 | }; | |
342 | ||
343 | struct xgmac_extra_stats { | |
344 | /* Transmit errors */ | |
345 | unsigned long tx_jabber; | |
346 | unsigned long tx_frame_flushed; | |
347 | unsigned long tx_payload_error; | |
348 | unsigned long tx_ip_header_error; | |
349 | unsigned long tx_local_fault; | |
350 | unsigned long tx_remote_fault; | |
351 | /* Receive errors */ | |
352 | unsigned long rx_watchdog; | |
353 | unsigned long rx_da_filter_fail; | |
354 | unsigned long rx_sa_filter_fail; | |
355 | unsigned long rx_payload_error; | |
356 | unsigned long rx_ip_header_error; | |
357 | /* Tx/Rx IRQ errors */ | |
358 | unsigned long tx_undeflow; | |
359 | unsigned long tx_process_stopped; | |
360 | unsigned long rx_buf_unav; | |
361 | unsigned long rx_process_stopped; | |
362 | unsigned long tx_early; | |
363 | unsigned long fatal_bus_error; | |
364 | }; | |
365 | ||
366 | struct xgmac_priv { | |
367 | struct xgmac_dma_desc *dma_rx; | |
368 | struct sk_buff **rx_skbuff; | |
369 | unsigned int rx_tail; | |
370 | unsigned int rx_head; | |
371 | ||
372 | struct xgmac_dma_desc *dma_tx; | |
373 | struct sk_buff **tx_skbuff; | |
374 | unsigned int tx_head; | |
375 | unsigned int tx_tail; | |
376 | ||
377 | void __iomem *base; | |
378 | struct sk_buff_head rx_recycle; | |
379 | unsigned int dma_buf_sz; | |
380 | dma_addr_t dma_rx_phy; | |
381 | dma_addr_t dma_tx_phy; | |
382 | ||
383 | struct net_device *dev; | |
384 | struct device *device; | |
385 | struct napi_struct napi; | |
386 | ||
387 | struct xgmac_extra_stats xstats; | |
388 | ||
389 | spinlock_t stats_lock; | |
390 | int pmt_irq; | |
391 | char rx_pause; | |
392 | char tx_pause; | |
393 | int wolopts; | |
394 | }; | |
395 | ||
396 | /* XGMAC Configuration Settings */ | |
397 | #define MAX_MTU 9000 | |
398 | #define PAUSE_TIME 0x400 | |
399 | ||
400 | #define DMA_RX_RING_SZ 256 | |
401 | #define DMA_TX_RING_SZ 128 | |
402 | /* minimum number of free TX descriptors required to wake up TX process */ | |
403 | #define TX_THRESH (DMA_TX_RING_SZ/4) | |
404 | ||
405 | /* DMA descriptor ring helpers */ | |
406 | #define dma_ring_incr(n, s) (((n) + 1) & ((s) - 1)) | |
407 | #define dma_ring_space(h, t, s) CIRC_SPACE(h, t, s) | |
408 | #define dma_ring_cnt(h, t, s) CIRC_CNT(h, t, s) | |
409 | ||
410 | /* XGMAC Descriptor Access Helpers */ | |
411 | static inline void desc_set_buf_len(struct xgmac_dma_desc *p, u32 buf_sz) | |
412 | { | |
413 | if (buf_sz > MAX_DESC_BUF_SZ) | |
414 | p->buf_size = cpu_to_le32(MAX_DESC_BUF_SZ | | |
415 | (buf_sz - MAX_DESC_BUF_SZ) << DESC_BUFFER2_SZ_OFFSET); | |
416 | else | |
417 | p->buf_size = cpu_to_le32(buf_sz); | |
418 | } | |
419 | ||
420 | static inline int desc_get_buf_len(struct xgmac_dma_desc *p) | |
421 | { | |
422 | u32 len = cpu_to_le32(p->flags); | |
423 | return (len & DESC_BUFFER1_SZ_MASK) + | |
424 | ((len & DESC_BUFFER2_SZ_MASK) >> DESC_BUFFER2_SZ_OFFSET); | |
425 | } | |
426 | ||
427 | static inline void desc_init_rx_desc(struct xgmac_dma_desc *p, int ring_size, | |
428 | int buf_sz) | |
429 | { | |
430 | struct xgmac_dma_desc *end = p + ring_size - 1; | |
431 | ||
432 | memset(p, 0, sizeof(*p) * ring_size); | |
433 | ||
434 | for (; p <= end; p++) | |
435 | desc_set_buf_len(p, buf_sz); | |
436 | ||
437 | end->buf_size |= cpu_to_le32(RXDESC1_END_RING); | |
438 | } | |
439 | ||
440 | static inline void desc_init_tx_desc(struct xgmac_dma_desc *p, u32 ring_size) | |
441 | { | |
442 | memset(p, 0, sizeof(*p) * ring_size); | |
443 | p[ring_size - 1].flags = cpu_to_le32(TXDESC_END_RING); | |
444 | } | |
445 | ||
446 | static inline int desc_get_owner(struct xgmac_dma_desc *p) | |
447 | { | |
448 | return le32_to_cpu(p->flags) & DESC_OWN; | |
449 | } | |
450 | ||
451 | static inline void desc_set_rx_owner(struct xgmac_dma_desc *p) | |
452 | { | |
453 | /* Clear all fields and set the owner */ | |
454 | p->flags = cpu_to_le32(DESC_OWN); | |
455 | } | |
456 | ||
457 | static inline void desc_set_tx_owner(struct xgmac_dma_desc *p, u32 flags) | |
458 | { | |
459 | u32 tmpflags = le32_to_cpu(p->flags); | |
460 | tmpflags &= TXDESC_END_RING; | |
461 | tmpflags |= flags | DESC_OWN; | |
462 | p->flags = cpu_to_le32(tmpflags); | |
463 | } | |
464 | ||
465 | static inline int desc_get_tx_ls(struct xgmac_dma_desc *p) | |
466 | { | |
467 | return le32_to_cpu(p->flags) & TXDESC_LAST_SEG; | |
468 | } | |
469 | ||
470 | static inline u32 desc_get_buf_addr(struct xgmac_dma_desc *p) | |
471 | { | |
472 | return le32_to_cpu(p->buf1_addr); | |
473 | } | |
474 | ||
475 | static inline void desc_set_buf_addr(struct xgmac_dma_desc *p, | |
476 | u32 paddr, int len) | |
477 | { | |
478 | p->buf1_addr = cpu_to_le32(paddr); | |
479 | if (len > MAX_DESC_BUF_SZ) | |
480 | p->buf2_addr = cpu_to_le32(paddr + MAX_DESC_BUF_SZ); | |
481 | } | |
482 | ||
483 | static inline void desc_set_buf_addr_and_size(struct xgmac_dma_desc *p, | |
484 | u32 paddr, int len) | |
485 | { | |
486 | desc_set_buf_len(p, len); | |
487 | desc_set_buf_addr(p, paddr, len); | |
488 | } | |
489 | ||
490 | static inline int desc_get_rx_frame_len(struct xgmac_dma_desc *p) | |
491 | { | |
492 | u32 data = le32_to_cpu(p->flags); | |
493 | u32 len = (data & RXDESC_FRAME_LEN_MASK) >> RXDESC_FRAME_LEN_OFFSET; | |
494 | if (data & RXDESC_FRAME_TYPE) | |
495 | len -= ETH_FCS_LEN; | |
496 | ||
497 | return len; | |
498 | } | |
499 | ||
500 | static void xgmac_dma_flush_tx_fifo(void __iomem *ioaddr) | |
501 | { | |
502 | int timeout = 1000; | |
503 | u32 reg = readl(ioaddr + XGMAC_OMR); | |
504 | writel(reg | XGMAC_OMR_FTF, ioaddr + XGMAC_OMR); | |
505 | ||
506 | while ((timeout-- > 0) && readl(ioaddr + XGMAC_OMR) & XGMAC_OMR_FTF) | |
507 | udelay(1); | |
508 | } | |
509 | ||
510 | static int desc_get_tx_status(struct xgmac_priv *priv, struct xgmac_dma_desc *p) | |
511 | { | |
512 | struct xgmac_extra_stats *x = &priv->xstats; | |
513 | u32 status = le32_to_cpu(p->flags); | |
514 | ||
515 | if (!(status & TXDESC_ERROR_SUMMARY)) | |
516 | return 0; | |
517 | ||
518 | netdev_dbg(priv->dev, "tx desc error = 0x%08x\n", status); | |
519 | if (status & TXDESC_JABBER_TIMEOUT) | |
520 | x->tx_jabber++; | |
521 | if (status & TXDESC_FRAME_FLUSHED) | |
522 | x->tx_frame_flushed++; | |
523 | if (status & TXDESC_UNDERFLOW_ERR) | |
524 | xgmac_dma_flush_tx_fifo(priv->base); | |
525 | if (status & TXDESC_IP_HEADER_ERR) | |
526 | x->tx_ip_header_error++; | |
527 | if (status & TXDESC_LOCAL_FAULT) | |
528 | x->tx_local_fault++; | |
529 | if (status & TXDESC_REMOTE_FAULT) | |
530 | x->tx_remote_fault++; | |
531 | if (status & TXDESC_PAYLOAD_CSUM_ERR) | |
532 | x->tx_payload_error++; | |
533 | ||
534 | return -1; | |
535 | } | |
536 | ||
537 | static int desc_get_rx_status(struct xgmac_priv *priv, struct xgmac_dma_desc *p) | |
538 | { | |
539 | struct xgmac_extra_stats *x = &priv->xstats; | |
540 | int ret = CHECKSUM_UNNECESSARY; | |
541 | u32 status = le32_to_cpu(p->flags); | |
542 | u32 ext_status = le32_to_cpu(p->ext_status); | |
543 | ||
544 | if (status & RXDESC_DA_FILTER_FAIL) { | |
545 | netdev_dbg(priv->dev, "XGMAC RX : Dest Address filter fail\n"); | |
546 | x->rx_da_filter_fail++; | |
547 | return -1; | |
548 | } | |
549 | ||
550 | /* Check if packet has checksum already */ | |
551 | if ((status & RXDESC_FRAME_TYPE) && (status & RXDESC_EXT_STATUS) && | |
552 | !(ext_status & RXDESC_IP_PAYLOAD_MASK)) | |
553 | ret = CHECKSUM_NONE; | |
554 | ||
555 | netdev_dbg(priv->dev, "rx status - frame type=%d, csum = %d, ext stat %08x\n", | |
556 | (status & RXDESC_FRAME_TYPE) ? 1 : 0, ret, ext_status); | |
557 | ||
558 | if (!(status & RXDESC_ERROR_SUMMARY)) | |
559 | return ret; | |
560 | ||
561 | /* Handle any errors */ | |
562 | if (status & (RXDESC_DESCRIPTOR_ERR | RXDESC_OVERFLOW_ERR | | |
563 | RXDESC_GIANT_FRAME | RXDESC_LENGTH_ERR | RXDESC_CRC_ERR)) | |
564 | return -1; | |
565 | ||
566 | if (status & RXDESC_EXT_STATUS) { | |
567 | if (ext_status & RXDESC_IP_HEADER_ERR) | |
568 | x->rx_ip_header_error++; | |
569 | if (ext_status & RXDESC_IP_PAYLOAD_ERR) | |
570 | x->rx_payload_error++; | |
571 | netdev_dbg(priv->dev, "IP checksum error - stat %08x\n", | |
572 | ext_status); | |
573 | return CHECKSUM_NONE; | |
574 | } | |
575 | ||
576 | return ret; | |
577 | } | |
578 | ||
579 | static inline void xgmac_mac_enable(void __iomem *ioaddr) | |
580 | { | |
581 | u32 value = readl(ioaddr + XGMAC_CONTROL); | |
582 | value |= MAC_ENABLE_RX | MAC_ENABLE_TX; | |
583 | writel(value, ioaddr + XGMAC_CONTROL); | |
584 | ||
585 | value = readl(ioaddr + XGMAC_DMA_CONTROL); | |
586 | value |= DMA_CONTROL_ST | DMA_CONTROL_SR; | |
587 | writel(value, ioaddr + XGMAC_DMA_CONTROL); | |
588 | } | |
589 | ||
590 | static inline void xgmac_mac_disable(void __iomem *ioaddr) | |
591 | { | |
592 | u32 value = readl(ioaddr + XGMAC_DMA_CONTROL); | |
593 | value &= ~(DMA_CONTROL_ST | DMA_CONTROL_SR); | |
594 | writel(value, ioaddr + XGMAC_DMA_CONTROL); | |
595 | ||
596 | value = readl(ioaddr + XGMAC_CONTROL); | |
597 | value &= ~(MAC_ENABLE_TX | MAC_ENABLE_RX); | |
598 | writel(value, ioaddr + XGMAC_CONTROL); | |
599 | } | |
600 | ||
601 | static void xgmac_set_mac_addr(void __iomem *ioaddr, unsigned char *addr, | |
602 | int num) | |
603 | { | |
604 | u32 data; | |
605 | ||
606 | data = (addr[5] << 8) | addr[4] | (num ? XGMAC_ADDR_AE : 0); | |
607 | writel(data, ioaddr + XGMAC_ADDR_HIGH(num)); | |
608 | data = (addr[3] << 24) | (addr[2] << 16) | (addr[1] << 8) | addr[0]; | |
609 | writel(data, ioaddr + XGMAC_ADDR_LOW(num)); | |
610 | } | |
611 | ||
612 | static void xgmac_get_mac_addr(void __iomem *ioaddr, unsigned char *addr, | |
613 | int num) | |
614 | { | |
615 | u32 hi_addr, lo_addr; | |
616 | ||
617 | /* Read the MAC address from the hardware */ | |
618 | hi_addr = readl(ioaddr + XGMAC_ADDR_HIGH(num)); | |
619 | lo_addr = readl(ioaddr + XGMAC_ADDR_LOW(num)); | |
620 | ||
621 | /* Extract the MAC address from the high and low words */ | |
622 | addr[0] = lo_addr & 0xff; | |
623 | addr[1] = (lo_addr >> 8) & 0xff; | |
624 | addr[2] = (lo_addr >> 16) & 0xff; | |
625 | addr[3] = (lo_addr >> 24) & 0xff; | |
626 | addr[4] = hi_addr & 0xff; | |
627 | addr[5] = (hi_addr >> 8) & 0xff; | |
628 | } | |
629 | ||
630 | static int xgmac_set_flow_ctrl(struct xgmac_priv *priv, int rx, int tx) | |
631 | { | |
632 | u32 reg; | |
633 | unsigned int flow = 0; | |
634 | ||
635 | priv->rx_pause = rx; | |
636 | priv->tx_pause = tx; | |
637 | ||
638 | if (rx || tx) { | |
639 | if (rx) | |
640 | flow |= XGMAC_FLOW_CTRL_RFE; | |
641 | if (tx) | |
642 | flow |= XGMAC_FLOW_CTRL_TFE; | |
643 | ||
644 | flow |= XGMAC_FLOW_CTRL_PLT | XGMAC_FLOW_CTRL_UP; | |
645 | flow |= (PAUSE_TIME << XGMAC_FLOW_CTRL_PT_SHIFT); | |
646 | ||
647 | writel(flow, priv->base + XGMAC_FLOW_CTRL); | |
648 | ||
649 | reg = readl(priv->base + XGMAC_OMR); | |
650 | reg |= XGMAC_OMR_EFC; | |
651 | writel(reg, priv->base + XGMAC_OMR); | |
652 | } else { | |
653 | writel(0, priv->base + XGMAC_FLOW_CTRL); | |
654 | ||
655 | reg = readl(priv->base + XGMAC_OMR); | |
656 | reg &= ~XGMAC_OMR_EFC; | |
657 | writel(reg, priv->base + XGMAC_OMR); | |
658 | } | |
659 | ||
660 | return 0; | |
661 | } | |
662 | ||
663 | static void xgmac_rx_refill(struct xgmac_priv *priv) | |
664 | { | |
665 | struct xgmac_dma_desc *p; | |
666 | dma_addr_t paddr; | |
667 | ||
668 | while (dma_ring_space(priv->rx_head, priv->rx_tail, DMA_RX_RING_SZ) > 1) { | |
669 | int entry = priv->rx_head; | |
670 | struct sk_buff *skb; | |
671 | ||
672 | p = priv->dma_rx + entry; | |
673 | ||
7c400919 RH |
674 | if (priv->rx_skbuff[entry] == NULL) { |
675 | skb = __skb_dequeue(&priv->rx_recycle); | |
676 | if (skb == NULL) | |
677 | skb = netdev_alloc_skb(priv->dev, priv->dma_buf_sz); | |
678 | if (unlikely(skb == NULL)) | |
679 | break; | |
680 | ||
681 | priv->rx_skbuff[entry] = skb; | |
682 | paddr = dma_map_single(priv->device, skb->data, | |
683 | priv->dma_buf_sz, DMA_FROM_DEVICE); | |
684 | desc_set_buf_addr(p, paddr, priv->dma_buf_sz); | |
685 | } | |
85c10f28 RH |
686 | |
687 | netdev_dbg(priv->dev, "rx ring: head %d, tail %d\n", | |
688 | priv->rx_head, priv->rx_tail); | |
689 | ||
690 | priv->rx_head = dma_ring_incr(priv->rx_head, DMA_RX_RING_SZ); | |
85c10f28 RH |
691 | desc_set_rx_owner(p); |
692 | } | |
693 | } | |
694 | ||
695 | /** | |
696 | * init_xgmac_dma_desc_rings - init the RX/TX descriptor rings | |
697 | * @dev: net device structure | |
698 | * Description: this function initializes the DMA RX/TX descriptors | |
699 | * and allocates the socket buffers. | |
700 | */ | |
701 | static int xgmac_dma_desc_rings_init(struct net_device *dev) | |
702 | { | |
703 | struct xgmac_priv *priv = netdev_priv(dev); | |
704 | unsigned int bfsize; | |
705 | ||
706 | /* Set the Buffer size according to the MTU; | |
707 | * indeed, in case of jumbo we need to bump-up the buffer sizes. | |
708 | */ | |
709 | bfsize = ALIGN(dev->mtu + ETH_HLEN + ETH_FCS_LEN + NET_IP_ALIGN + 64, | |
710 | 64); | |
711 | ||
712 | netdev_dbg(priv->dev, "mtu [%d] bfsize [%d]\n", dev->mtu, bfsize); | |
713 | ||
714 | priv->rx_skbuff = kzalloc(sizeof(struct sk_buff *) * DMA_RX_RING_SZ, | |
715 | GFP_KERNEL); | |
716 | if (!priv->rx_skbuff) | |
717 | return -ENOMEM; | |
718 | ||
719 | priv->dma_rx = dma_alloc_coherent(priv->device, | |
720 | DMA_RX_RING_SZ * | |
721 | sizeof(struct xgmac_dma_desc), | |
722 | &priv->dma_rx_phy, | |
723 | GFP_KERNEL); | |
724 | if (!priv->dma_rx) | |
725 | goto err_dma_rx; | |
726 | ||
727 | priv->tx_skbuff = kzalloc(sizeof(struct sk_buff *) * DMA_TX_RING_SZ, | |
728 | GFP_KERNEL); | |
729 | if (!priv->tx_skbuff) | |
730 | goto err_tx_skb; | |
731 | ||
732 | priv->dma_tx = dma_alloc_coherent(priv->device, | |
733 | DMA_TX_RING_SZ * | |
734 | sizeof(struct xgmac_dma_desc), | |
735 | &priv->dma_tx_phy, | |
736 | GFP_KERNEL); | |
737 | if (!priv->dma_tx) | |
738 | goto err_dma_tx; | |
739 | ||
740 | netdev_dbg(priv->dev, "DMA desc rings: virt addr (Rx %p, " | |
741 | "Tx %p)\n\tDMA phy addr (Rx 0x%08x, Tx 0x%08x)\n", | |
742 | priv->dma_rx, priv->dma_tx, | |
743 | (unsigned int)priv->dma_rx_phy, (unsigned int)priv->dma_tx_phy); | |
744 | ||
745 | priv->rx_tail = 0; | |
746 | priv->rx_head = 0; | |
747 | priv->dma_buf_sz = bfsize; | |
748 | desc_init_rx_desc(priv->dma_rx, DMA_RX_RING_SZ, priv->dma_buf_sz); | |
749 | xgmac_rx_refill(priv); | |
750 | ||
751 | priv->tx_tail = 0; | |
752 | priv->tx_head = 0; | |
753 | desc_init_tx_desc(priv->dma_tx, DMA_TX_RING_SZ); | |
754 | ||
755 | writel(priv->dma_tx_phy, priv->base + XGMAC_DMA_TX_BASE_ADDR); | |
756 | writel(priv->dma_rx_phy, priv->base + XGMAC_DMA_RX_BASE_ADDR); | |
757 | ||
758 | return 0; | |
759 | ||
760 | err_dma_tx: | |
761 | kfree(priv->tx_skbuff); | |
762 | err_tx_skb: | |
763 | dma_free_coherent(priv->device, | |
764 | DMA_RX_RING_SZ * sizeof(struct xgmac_dma_desc), | |
765 | priv->dma_rx, priv->dma_rx_phy); | |
766 | err_dma_rx: | |
767 | kfree(priv->rx_skbuff); | |
768 | return -ENOMEM; | |
769 | } | |
770 | ||
771 | static void xgmac_free_rx_skbufs(struct xgmac_priv *priv) | |
772 | { | |
773 | int i; | |
774 | struct xgmac_dma_desc *p; | |
775 | ||
776 | if (!priv->rx_skbuff) | |
777 | return; | |
778 | ||
779 | for (i = 0; i < DMA_RX_RING_SZ; i++) { | |
780 | if (priv->rx_skbuff[i] == NULL) | |
781 | continue; | |
782 | ||
783 | p = priv->dma_rx + i; | |
784 | dma_unmap_single(priv->device, desc_get_buf_addr(p), | |
785 | priv->dma_buf_sz, DMA_FROM_DEVICE); | |
786 | dev_kfree_skb_any(priv->rx_skbuff[i]); | |
787 | priv->rx_skbuff[i] = NULL; | |
788 | } | |
789 | } | |
790 | ||
791 | static void xgmac_free_tx_skbufs(struct xgmac_priv *priv) | |
792 | { | |
793 | int i, f; | |
794 | struct xgmac_dma_desc *p; | |
795 | ||
796 | if (!priv->tx_skbuff) | |
797 | return; | |
798 | ||
799 | for (i = 0; i < DMA_TX_RING_SZ; i++) { | |
800 | if (priv->tx_skbuff[i] == NULL) | |
801 | continue; | |
802 | ||
803 | p = priv->dma_tx + i; | |
804 | dma_unmap_single(priv->device, desc_get_buf_addr(p), | |
805 | desc_get_buf_len(p), DMA_TO_DEVICE); | |
806 | ||
807 | for (f = 0; f < skb_shinfo(priv->tx_skbuff[i])->nr_frags; f++) { | |
808 | p = priv->dma_tx + i++; | |
809 | dma_unmap_page(priv->device, desc_get_buf_addr(p), | |
810 | desc_get_buf_len(p), DMA_TO_DEVICE); | |
811 | } | |
812 | ||
813 | dev_kfree_skb_any(priv->tx_skbuff[i]); | |
814 | priv->tx_skbuff[i] = NULL; | |
815 | } | |
816 | } | |
817 | ||
818 | static void xgmac_free_dma_desc_rings(struct xgmac_priv *priv) | |
819 | { | |
820 | /* Release the DMA TX/RX socket buffers */ | |
821 | xgmac_free_rx_skbufs(priv); | |
822 | xgmac_free_tx_skbufs(priv); | |
823 | ||
824 | /* Free the consistent memory allocated for descriptor rings */ | |
825 | if (priv->dma_tx) { | |
826 | dma_free_coherent(priv->device, | |
827 | DMA_TX_RING_SZ * sizeof(struct xgmac_dma_desc), | |
828 | priv->dma_tx, priv->dma_tx_phy); | |
829 | priv->dma_tx = NULL; | |
830 | } | |
831 | if (priv->dma_rx) { | |
832 | dma_free_coherent(priv->device, | |
833 | DMA_RX_RING_SZ * sizeof(struct xgmac_dma_desc), | |
834 | priv->dma_rx, priv->dma_rx_phy); | |
835 | priv->dma_rx = NULL; | |
836 | } | |
837 | kfree(priv->rx_skbuff); | |
838 | priv->rx_skbuff = NULL; | |
839 | kfree(priv->tx_skbuff); | |
840 | priv->tx_skbuff = NULL; | |
841 | } | |
842 | ||
843 | /** | |
844 | * xgmac_tx: | |
845 | * @priv: private driver structure | |
846 | * Description: it reclaims resources after transmission completes. | |
847 | */ | |
848 | static void xgmac_tx_complete(struct xgmac_priv *priv) | |
849 | { | |
850 | int i; | |
851 | void __iomem *ioaddr = priv->base; | |
852 | ||
853 | writel(DMA_STATUS_TU | DMA_STATUS_NIS, ioaddr + XGMAC_DMA_STATUS); | |
854 | ||
855 | while (dma_ring_cnt(priv->tx_head, priv->tx_tail, DMA_TX_RING_SZ)) { | |
856 | unsigned int entry = priv->tx_tail; | |
857 | struct sk_buff *skb = priv->tx_skbuff[entry]; | |
858 | struct xgmac_dma_desc *p = priv->dma_tx + entry; | |
859 | ||
860 | /* Check if the descriptor is owned by the DMA. */ | |
861 | if (desc_get_owner(p)) | |
862 | break; | |
863 | ||
864 | /* Verify tx error by looking at the last segment */ | |
865 | if (desc_get_tx_ls(p)) | |
866 | desc_get_tx_status(priv, p); | |
867 | ||
868 | netdev_dbg(priv->dev, "tx ring: curr %d, dirty %d\n", | |
869 | priv->tx_head, priv->tx_tail); | |
870 | ||
871 | dma_unmap_single(priv->device, desc_get_buf_addr(p), | |
872 | desc_get_buf_len(p), DMA_TO_DEVICE); | |
873 | ||
874 | priv->tx_skbuff[entry] = NULL; | |
875 | priv->tx_tail = dma_ring_incr(entry, DMA_TX_RING_SZ); | |
876 | ||
877 | if (!skb) { | |
878 | continue; | |
879 | } | |
880 | ||
881 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { | |
882 | entry = priv->tx_tail = dma_ring_incr(priv->tx_tail, | |
883 | DMA_TX_RING_SZ); | |
884 | p = priv->dma_tx + priv->tx_tail; | |
885 | ||
886 | dma_unmap_page(priv->device, desc_get_buf_addr(p), | |
887 | desc_get_buf_len(p), DMA_TO_DEVICE); | |
888 | } | |
889 | ||
890 | /* | |
891 | * If there's room in the queue (limit it to size) | |
892 | * we add this skb back into the pool, | |
893 | * if it's the right size. | |
894 | */ | |
895 | if ((skb_queue_len(&priv->rx_recycle) < | |
896 | DMA_RX_RING_SZ) && | |
897 | skb_recycle_check(skb, priv->dma_buf_sz)) | |
898 | __skb_queue_head(&priv->rx_recycle, skb); | |
899 | else | |
900 | dev_kfree_skb(skb); | |
901 | } | |
902 | ||
903 | if (dma_ring_space(priv->tx_head, priv->tx_tail, DMA_TX_RING_SZ) > | |
904 | TX_THRESH) | |
905 | netif_wake_queue(priv->dev); | |
906 | } | |
907 | ||
908 | /** | |
909 | * xgmac_tx_err: | |
910 | * @priv: pointer to the private device structure | |
911 | * Description: it cleans the descriptors and restarts the transmission | |
912 | * in case of errors. | |
913 | */ | |
914 | static void xgmac_tx_err(struct xgmac_priv *priv) | |
915 | { | |
916 | u32 reg, value, inten; | |
917 | ||
918 | netif_stop_queue(priv->dev); | |
919 | ||
920 | inten = readl(priv->base + XGMAC_DMA_INTR_ENA); | |
921 | writel(0, priv->base + XGMAC_DMA_INTR_ENA); | |
922 | ||
923 | reg = readl(priv->base + XGMAC_DMA_CONTROL); | |
924 | writel(reg & ~DMA_CONTROL_ST, priv->base + XGMAC_DMA_CONTROL); | |
925 | do { | |
926 | value = readl(priv->base + XGMAC_DMA_STATUS) & 0x700000; | |
927 | } while (value && (value != 0x600000)); | |
928 | ||
929 | xgmac_free_tx_skbufs(priv); | |
930 | desc_init_tx_desc(priv->dma_tx, DMA_TX_RING_SZ); | |
931 | priv->tx_tail = 0; | |
932 | priv->tx_head = 0; | |
eb5e1b29 | 933 | writel(priv->dma_tx_phy, priv->base + XGMAC_DMA_TX_BASE_ADDR); |
85c10f28 RH |
934 | writel(reg | DMA_CONTROL_ST, priv->base + XGMAC_DMA_CONTROL); |
935 | ||
936 | writel(DMA_STATUS_TU | DMA_STATUS_TPS | DMA_STATUS_NIS | DMA_STATUS_AIS, | |
937 | priv->base + XGMAC_DMA_STATUS); | |
938 | writel(inten, priv->base + XGMAC_DMA_INTR_ENA); | |
939 | ||
940 | netif_wake_queue(priv->dev); | |
941 | } | |
942 | ||
943 | static int xgmac_hw_init(struct net_device *dev) | |
944 | { | |
945 | u32 value, ctrl; | |
946 | int limit; | |
947 | struct xgmac_priv *priv = netdev_priv(dev); | |
948 | void __iomem *ioaddr = priv->base; | |
949 | ||
950 | /* Save the ctrl register value */ | |
951 | ctrl = readl(ioaddr + XGMAC_CONTROL) & XGMAC_CONTROL_SPD_MASK; | |
952 | ||
953 | /* SW reset */ | |
954 | value = DMA_BUS_MODE_SFT_RESET; | |
955 | writel(value, ioaddr + XGMAC_DMA_BUS_MODE); | |
956 | limit = 15000; | |
957 | while (limit-- && | |
958 | (readl(ioaddr + XGMAC_DMA_BUS_MODE) & DMA_BUS_MODE_SFT_RESET)) | |
959 | cpu_relax(); | |
960 | if (limit < 0) | |
961 | return -EBUSY; | |
962 | ||
963 | value = (0x10 << DMA_BUS_MODE_PBL_SHIFT) | | |
964 | (0x10 << DMA_BUS_MODE_RPBL_SHIFT) | | |
965 | DMA_BUS_MODE_FB | DMA_BUS_MODE_ATDS | DMA_BUS_MODE_AAL; | |
966 | writel(value, ioaddr + XGMAC_DMA_BUS_MODE); | |
967 | ||
968 | /* Enable interrupts */ | |
969 | writel(DMA_INTR_DEFAULT_MASK, ioaddr + XGMAC_DMA_STATUS); | |
970 | writel(DMA_INTR_DEFAULT_MASK, ioaddr + XGMAC_DMA_INTR_ENA); | |
971 | ||
972 | /* XGMAC requires AXI bus init. This is a 'magic number' for now */ | |
e36ce6eb | 973 | writel(0x0077000E, ioaddr + XGMAC_DMA_AXI_BUS); |
85c10f28 RH |
974 | |
975 | ctrl |= XGMAC_CONTROL_DDIC | XGMAC_CONTROL_JE | XGMAC_CONTROL_ACS | | |
976 | XGMAC_CONTROL_CAR; | |
977 | if (dev->features & NETIF_F_RXCSUM) | |
978 | ctrl |= XGMAC_CONTROL_IPC; | |
979 | writel(ctrl, ioaddr + XGMAC_CONTROL); | |
980 | ||
981 | value = DMA_CONTROL_DFF; | |
982 | writel(value, ioaddr + XGMAC_DMA_CONTROL); | |
983 | ||
984 | /* Set the HW DMA mode and the COE */ | |
985 | writel(XGMAC_OMR_TSF | XGMAC_OMR_RSF | XGMAC_OMR_RFD | XGMAC_OMR_RFA, | |
986 | ioaddr + XGMAC_OMR); | |
987 | ||
988 | /* Reset the MMC counters */ | |
989 | writel(1, ioaddr + XGMAC_MMC_CTRL); | |
990 | return 0; | |
991 | } | |
992 | ||
993 | /** | |
994 | * xgmac_open - open entry point of the driver | |
995 | * @dev : pointer to the device structure. | |
996 | * Description: | |
997 | * This function is the open entry point of the driver. | |
998 | * Return value: | |
999 | * 0 on success and an appropriate (-)ve integer as defined in errno.h | |
1000 | * file on failure. | |
1001 | */ | |
1002 | static int xgmac_open(struct net_device *dev) | |
1003 | { | |
1004 | int ret; | |
1005 | struct xgmac_priv *priv = netdev_priv(dev); | |
1006 | void __iomem *ioaddr = priv->base; | |
1007 | ||
1008 | /* Check that the MAC address is valid. If its not, refuse | |
1009 | * to bring the device up. The user must specify an | |
1010 | * address using the following linux command: | |
1011 | * ifconfig eth0 hw ether xx:xx:xx:xx:xx:xx */ | |
1012 | if (!is_valid_ether_addr(dev->dev_addr)) { | |
7ce5d222 | 1013 | eth_hw_addr_random(dev); |
85c10f28 RH |
1014 | netdev_dbg(priv->dev, "generated random MAC address %pM\n", |
1015 | dev->dev_addr); | |
1016 | } | |
1017 | ||
1018 | skb_queue_head_init(&priv->rx_recycle); | |
1019 | memset(&priv->xstats, 0, sizeof(struct xgmac_extra_stats)); | |
1020 | ||
1021 | /* Initialize the XGMAC and descriptors */ | |
1022 | xgmac_hw_init(dev); | |
1023 | xgmac_set_mac_addr(ioaddr, dev->dev_addr, 0); | |
1024 | xgmac_set_flow_ctrl(priv, priv->rx_pause, priv->tx_pause); | |
1025 | ||
1026 | ret = xgmac_dma_desc_rings_init(dev); | |
1027 | if (ret < 0) | |
1028 | return ret; | |
1029 | ||
1030 | /* Enable the MAC Rx/Tx */ | |
1031 | xgmac_mac_enable(ioaddr); | |
1032 | ||
1033 | napi_enable(&priv->napi); | |
1034 | netif_start_queue(dev); | |
1035 | ||
1036 | return 0; | |
1037 | } | |
1038 | ||
1039 | /** | |
1040 | * xgmac_release - close entry point of the driver | |
1041 | * @dev : device pointer. | |
1042 | * Description: | |
1043 | * This is the stop entry point of the driver. | |
1044 | */ | |
1045 | static int xgmac_stop(struct net_device *dev) | |
1046 | { | |
1047 | struct xgmac_priv *priv = netdev_priv(dev); | |
1048 | ||
1049 | netif_stop_queue(dev); | |
1050 | ||
1051 | if (readl(priv->base + XGMAC_DMA_INTR_ENA)) | |
1052 | napi_disable(&priv->napi); | |
1053 | ||
1054 | writel(0, priv->base + XGMAC_DMA_INTR_ENA); | |
1055 | skb_queue_purge(&priv->rx_recycle); | |
1056 | ||
1057 | /* Disable the MAC core */ | |
1058 | xgmac_mac_disable(priv->base); | |
1059 | ||
1060 | /* Release and free the Rx/Tx resources */ | |
1061 | xgmac_free_dma_desc_rings(priv); | |
1062 | ||
1063 | return 0; | |
1064 | } | |
1065 | ||
1066 | /** | |
1067 | * xgmac_xmit: | |
1068 | * @skb : the socket buffer | |
1069 | * @dev : device pointer | |
1070 | * Description : Tx entry point of the driver. | |
1071 | */ | |
1072 | static netdev_tx_t xgmac_xmit(struct sk_buff *skb, struct net_device *dev) | |
1073 | { | |
1074 | struct xgmac_priv *priv = netdev_priv(dev); | |
1075 | unsigned int entry; | |
1076 | int i; | |
1077 | int nfrags = skb_shinfo(skb)->nr_frags; | |
1078 | struct xgmac_dma_desc *desc, *first; | |
1079 | unsigned int desc_flags; | |
1080 | unsigned int len; | |
1081 | dma_addr_t paddr; | |
1082 | ||
1083 | if (dma_ring_space(priv->tx_head, priv->tx_tail, DMA_TX_RING_SZ) < | |
1084 | (nfrags + 1)) { | |
1085 | writel(DMA_INTR_DEFAULT_MASK | DMA_INTR_ENA_TIE, | |
1086 | priv->base + XGMAC_DMA_INTR_ENA); | |
1087 | netif_stop_queue(dev); | |
1088 | return NETDEV_TX_BUSY; | |
1089 | } | |
1090 | ||
1091 | desc_flags = (skb->ip_summed == CHECKSUM_PARTIAL) ? | |
1092 | TXDESC_CSUM_ALL : 0; | |
1093 | entry = priv->tx_head; | |
1094 | desc = priv->dma_tx + entry; | |
1095 | first = desc; | |
1096 | ||
1097 | len = skb_headlen(skb); | |
1098 | paddr = dma_map_single(priv->device, skb->data, len, DMA_TO_DEVICE); | |
1099 | if (dma_mapping_error(priv->device, paddr)) { | |
1100 | dev_kfree_skb(skb); | |
1101 | return -EIO; | |
1102 | } | |
1103 | priv->tx_skbuff[entry] = skb; | |
1104 | desc_set_buf_addr_and_size(desc, paddr, len); | |
1105 | ||
1106 | for (i = 0; i < nfrags; i++) { | |
1107 | skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; | |
1108 | ||
1109 | len = frag->size; | |
1110 | ||
1111 | paddr = skb_frag_dma_map(priv->device, frag, 0, len, | |
1112 | DMA_TO_DEVICE); | |
1113 | if (dma_mapping_error(priv->device, paddr)) { | |
1114 | dev_kfree_skb(skb); | |
1115 | return -EIO; | |
1116 | } | |
1117 | ||
1118 | entry = dma_ring_incr(entry, DMA_TX_RING_SZ); | |
1119 | desc = priv->dma_tx + entry; | |
1120 | priv->tx_skbuff[entry] = NULL; | |
1121 | ||
1122 | desc_set_buf_addr_and_size(desc, paddr, len); | |
1123 | if (i < (nfrags - 1)) | |
1124 | desc_set_tx_owner(desc, desc_flags); | |
1125 | } | |
1126 | ||
1127 | /* Interrupt on completition only for the latest segment */ | |
1128 | if (desc != first) | |
1129 | desc_set_tx_owner(desc, desc_flags | | |
1130 | TXDESC_LAST_SEG | TXDESC_INTERRUPT); | |
1131 | else | |
1132 | desc_flags |= TXDESC_LAST_SEG | TXDESC_INTERRUPT; | |
1133 | ||
1134 | /* Set owner on first desc last to avoid race condition */ | |
1135 | wmb(); | |
1136 | desc_set_tx_owner(first, desc_flags | TXDESC_FIRST_SEG); | |
1137 | ||
1138 | priv->tx_head = dma_ring_incr(entry, DMA_TX_RING_SZ); | |
1139 | ||
1140 | writel(1, priv->base + XGMAC_DMA_TX_POLL); | |
1141 | ||
1142 | return NETDEV_TX_OK; | |
1143 | } | |
1144 | ||
1145 | static int xgmac_rx(struct xgmac_priv *priv, int limit) | |
1146 | { | |
1147 | unsigned int entry; | |
1148 | unsigned int count = 0; | |
1149 | struct xgmac_dma_desc *p; | |
1150 | ||
1151 | while (count < limit) { | |
1152 | int ip_checksum; | |
1153 | struct sk_buff *skb; | |
1154 | int frame_len; | |
1155 | ||
1156 | writel(DMA_STATUS_RI | DMA_STATUS_NIS, | |
1157 | priv->base + XGMAC_DMA_STATUS); | |
1158 | ||
1159 | entry = priv->rx_tail; | |
1160 | p = priv->dma_rx + entry; | |
1161 | if (desc_get_owner(p)) | |
1162 | break; | |
1163 | ||
1164 | count++; | |
1165 | priv->rx_tail = dma_ring_incr(priv->rx_tail, DMA_RX_RING_SZ); | |
1166 | ||
1167 | /* read the status of the incoming frame */ | |
1168 | ip_checksum = desc_get_rx_status(priv, p); | |
1169 | if (ip_checksum < 0) | |
1170 | continue; | |
1171 | ||
1172 | skb = priv->rx_skbuff[entry]; | |
1173 | if (unlikely(!skb)) { | |
1174 | netdev_err(priv->dev, "Inconsistent Rx descriptor chain\n"); | |
1175 | break; | |
1176 | } | |
1177 | priv->rx_skbuff[entry] = NULL; | |
1178 | ||
1179 | frame_len = desc_get_rx_frame_len(p); | |
1180 | netdev_dbg(priv->dev, "RX frame size %d, COE status: %d\n", | |
1181 | frame_len, ip_checksum); | |
1182 | ||
1183 | skb_put(skb, frame_len); | |
1184 | dma_unmap_single(priv->device, desc_get_buf_addr(p), | |
1185 | frame_len, DMA_FROM_DEVICE); | |
1186 | ||
1187 | skb->protocol = eth_type_trans(skb, priv->dev); | |
1188 | skb->ip_summed = ip_checksum; | |
1189 | if (ip_checksum == CHECKSUM_NONE) | |
1190 | netif_receive_skb(skb); | |
1191 | else | |
1192 | napi_gro_receive(&priv->napi, skb); | |
1193 | } | |
1194 | ||
1195 | xgmac_rx_refill(priv); | |
1196 | ||
1197 | writel(1, priv->base + XGMAC_DMA_RX_POLL); | |
1198 | ||
1199 | return count; | |
1200 | } | |
1201 | ||
1202 | /** | |
1203 | * xgmac_poll - xgmac poll method (NAPI) | |
1204 | * @napi : pointer to the napi structure. | |
1205 | * @budget : maximum number of packets that the current CPU can receive from | |
1206 | * all interfaces. | |
1207 | * Description : | |
1208 | * This function implements the the reception process. | |
1209 | * Also it runs the TX completion thread | |
1210 | */ | |
1211 | static int xgmac_poll(struct napi_struct *napi, int budget) | |
1212 | { | |
1213 | struct xgmac_priv *priv = container_of(napi, | |
1214 | struct xgmac_priv, napi); | |
1215 | int work_done = 0; | |
1216 | ||
1217 | xgmac_tx_complete(priv); | |
1218 | work_done = xgmac_rx(priv, budget); | |
1219 | ||
1220 | if (work_done < budget) { | |
1221 | napi_complete(napi); | |
1222 | writel(DMA_INTR_DEFAULT_MASK, priv->base + XGMAC_DMA_INTR_ENA); | |
1223 | } | |
1224 | return work_done; | |
1225 | } | |
1226 | ||
1227 | /** | |
1228 | * xgmac_tx_timeout | |
1229 | * @dev : Pointer to net device structure | |
1230 | * Description: this function is called when a packet transmission fails to | |
1231 | * complete within a reasonable tmrate. The driver will mark the error in the | |
1232 | * netdev structure and arrange for the device to be reset to a sane state | |
1233 | * in order to transmit a new packet. | |
1234 | */ | |
1235 | static void xgmac_tx_timeout(struct net_device *dev) | |
1236 | { | |
1237 | struct xgmac_priv *priv = netdev_priv(dev); | |
1238 | ||
1239 | /* Clear Tx resources and restart transmitting again */ | |
1240 | xgmac_tx_err(priv); | |
1241 | } | |
1242 | ||
1243 | /** | |
1244 | * xgmac_set_rx_mode - entry point for multicast addressing | |
1245 | * @dev : pointer to the device structure | |
1246 | * Description: | |
1247 | * This function is a driver entry point which gets called by the kernel | |
1248 | * whenever multicast addresses must be enabled/disabled. | |
1249 | * Return value: | |
1250 | * void. | |
1251 | */ | |
1252 | static void xgmac_set_rx_mode(struct net_device *dev) | |
1253 | { | |
1254 | int i; | |
1255 | struct xgmac_priv *priv = netdev_priv(dev); | |
1256 | void __iomem *ioaddr = priv->base; | |
1257 | unsigned int value = 0; | |
1258 | u32 hash_filter[XGMAC_NUM_HASH]; | |
1259 | int reg = 1; | |
1260 | struct netdev_hw_addr *ha; | |
1261 | bool use_hash = false; | |
1262 | ||
1263 | netdev_dbg(priv->dev, "# mcasts %d, # unicast %d\n", | |
1264 | netdev_mc_count(dev), netdev_uc_count(dev)); | |
1265 | ||
1266 | if (dev->flags & IFF_PROMISC) { | |
1267 | writel(XGMAC_FRAME_FILTER_PR, ioaddr + XGMAC_FRAME_FILTER); | |
1268 | return; | |
1269 | } | |
1270 | ||
1271 | memset(hash_filter, 0, sizeof(hash_filter)); | |
1272 | ||
1273 | if (netdev_uc_count(dev) > XGMAC_MAX_FILTER_ADDR) { | |
1274 | use_hash = true; | |
1275 | value |= XGMAC_FRAME_FILTER_HUC | XGMAC_FRAME_FILTER_HPF; | |
1276 | } | |
1277 | netdev_for_each_uc_addr(ha, dev) { | |
1278 | if (use_hash) { | |
1279 | u32 bit_nr = ~ether_crc(ETH_ALEN, ha->addr) >> 23; | |
1280 | ||
1281 | /* The most significant 4 bits determine the register to | |
1282 | * use (H/L) while the other 5 bits determine the bit | |
1283 | * within the register. */ | |
1284 | hash_filter[bit_nr >> 5] |= 1 << (bit_nr & 31); | |
1285 | } else { | |
1286 | xgmac_set_mac_addr(ioaddr, ha->addr, reg); | |
1287 | reg++; | |
1288 | } | |
1289 | } | |
1290 | ||
1291 | if (dev->flags & IFF_ALLMULTI) { | |
1292 | value |= XGMAC_FRAME_FILTER_PM; | |
1293 | goto out; | |
1294 | } | |
1295 | ||
1296 | if ((netdev_mc_count(dev) + reg - 1) > XGMAC_MAX_FILTER_ADDR) { | |
1297 | use_hash = true; | |
1298 | value |= XGMAC_FRAME_FILTER_HMC | XGMAC_FRAME_FILTER_HPF; | |
1299 | } | |
1300 | netdev_for_each_mc_addr(ha, dev) { | |
1301 | if (use_hash) { | |
1302 | u32 bit_nr = ~ether_crc(ETH_ALEN, ha->addr) >> 23; | |
1303 | ||
1304 | /* The most significant 4 bits determine the register to | |
1305 | * use (H/L) while the other 5 bits determine the bit | |
1306 | * within the register. */ | |
1307 | hash_filter[bit_nr >> 5] |= 1 << (bit_nr & 31); | |
1308 | } else { | |
1309 | xgmac_set_mac_addr(ioaddr, ha->addr, reg); | |
1310 | reg++; | |
1311 | } | |
1312 | } | |
1313 | ||
1314 | out: | |
1315 | for (i = 0; i < XGMAC_NUM_HASH; i++) | |
1316 | writel(hash_filter[i], ioaddr + XGMAC_HASH(i)); | |
1317 | ||
1318 | writel(value, ioaddr + XGMAC_FRAME_FILTER); | |
1319 | } | |
1320 | ||
1321 | /** | |
1322 | * xgmac_change_mtu - entry point to change MTU size for the device. | |
1323 | * @dev : device pointer. | |
1324 | * @new_mtu : the new MTU size for the device. | |
1325 | * Description: the Maximum Transfer Unit (MTU) is used by the network layer | |
1326 | * to drive packet transmission. Ethernet has an MTU of 1500 octets | |
1327 | * (ETH_DATA_LEN). This value can be changed with ifconfig. | |
1328 | * Return value: | |
1329 | * 0 on success and an appropriate (-)ve integer as defined in errno.h | |
1330 | * file on failure. | |
1331 | */ | |
1332 | static int xgmac_change_mtu(struct net_device *dev, int new_mtu) | |
1333 | { | |
1334 | struct xgmac_priv *priv = netdev_priv(dev); | |
1335 | int old_mtu; | |
1336 | ||
1337 | if ((new_mtu < 46) || (new_mtu > MAX_MTU)) { | |
1338 | netdev_err(priv->dev, "invalid MTU, max MTU is: %d\n", MAX_MTU); | |
1339 | return -EINVAL; | |
1340 | } | |
1341 | ||
1342 | old_mtu = dev->mtu; | |
1343 | dev->mtu = new_mtu; | |
1344 | ||
1345 | /* return early if the buffer sizes will not change */ | |
1346 | if (old_mtu <= ETH_DATA_LEN && new_mtu <= ETH_DATA_LEN) | |
1347 | return 0; | |
1348 | if (old_mtu == new_mtu) | |
1349 | return 0; | |
1350 | ||
1351 | /* Stop everything, get ready to change the MTU */ | |
1352 | if (!netif_running(dev)) | |
1353 | return 0; | |
1354 | ||
1355 | /* Bring the interface down and then back up */ | |
1356 | xgmac_stop(dev); | |
1357 | return xgmac_open(dev); | |
1358 | } | |
1359 | ||
1360 | static irqreturn_t xgmac_pmt_interrupt(int irq, void *dev_id) | |
1361 | { | |
1362 | u32 intr_status; | |
1363 | struct net_device *dev = (struct net_device *)dev_id; | |
1364 | struct xgmac_priv *priv = netdev_priv(dev); | |
1365 | void __iomem *ioaddr = priv->base; | |
1366 | ||
1367 | intr_status = readl(ioaddr + XGMAC_INT_STAT); | |
1368 | if (intr_status & XGMAC_INT_STAT_PMT) { | |
1369 | netdev_dbg(priv->dev, "received Magic frame\n"); | |
1370 | /* clear the PMT bits 5 and 6 by reading the PMT */ | |
1371 | readl(ioaddr + XGMAC_PMT); | |
1372 | } | |
1373 | return IRQ_HANDLED; | |
1374 | } | |
1375 | ||
1376 | static irqreturn_t xgmac_interrupt(int irq, void *dev_id) | |
1377 | { | |
1378 | u32 intr_status; | |
1379 | bool tx_err = false; | |
1380 | struct net_device *dev = (struct net_device *)dev_id; | |
1381 | struct xgmac_priv *priv = netdev_priv(dev); | |
1382 | struct xgmac_extra_stats *x = &priv->xstats; | |
1383 | ||
1384 | /* read the status register (CSR5) */ | |
1385 | intr_status = readl(priv->base + XGMAC_DMA_STATUS); | |
1386 | intr_status &= readl(priv->base + XGMAC_DMA_INTR_ENA); | |
1387 | writel(intr_status, priv->base + XGMAC_DMA_STATUS); | |
1388 | ||
1389 | /* It displays the DMA process states (CSR5 register) */ | |
1390 | /* ABNORMAL interrupts */ | |
1391 | if (unlikely(intr_status & DMA_STATUS_AIS)) { | |
1392 | if (intr_status & DMA_STATUS_TJT) { | |
1393 | netdev_err(priv->dev, "transmit jabber\n"); | |
1394 | x->tx_jabber++; | |
1395 | } | |
1396 | if (intr_status & DMA_STATUS_RU) | |
1397 | x->rx_buf_unav++; | |
1398 | if (intr_status & DMA_STATUS_RPS) { | |
1399 | netdev_err(priv->dev, "receive process stopped\n"); | |
1400 | x->rx_process_stopped++; | |
1401 | } | |
1402 | if (intr_status & DMA_STATUS_ETI) { | |
1403 | netdev_err(priv->dev, "transmit early interrupt\n"); | |
1404 | x->tx_early++; | |
1405 | } | |
1406 | if (intr_status & DMA_STATUS_TPS) { | |
1407 | netdev_err(priv->dev, "transmit process stopped\n"); | |
1408 | x->tx_process_stopped++; | |
1409 | tx_err = true; | |
1410 | } | |
1411 | if (intr_status & DMA_STATUS_FBI) { | |
1412 | netdev_err(priv->dev, "fatal bus error\n"); | |
1413 | x->fatal_bus_error++; | |
1414 | tx_err = true; | |
1415 | } | |
1416 | ||
1417 | if (tx_err) | |
1418 | xgmac_tx_err(priv); | |
1419 | } | |
1420 | ||
1421 | /* TX/RX NORMAL interrupts */ | |
1422 | if (intr_status & (DMA_STATUS_RI | DMA_STATUS_TU)) { | |
1423 | writel(DMA_INTR_ABNORMAL, priv->base + XGMAC_DMA_INTR_ENA); | |
1424 | napi_schedule(&priv->napi); | |
1425 | } | |
1426 | ||
1427 | return IRQ_HANDLED; | |
1428 | } | |
1429 | ||
1430 | #ifdef CONFIG_NET_POLL_CONTROLLER | |
1431 | /* Polling receive - used by NETCONSOLE and other diagnostic tools | |
1432 | * to allow network I/O with interrupts disabled. */ | |
1433 | static void xgmac_poll_controller(struct net_device *dev) | |
1434 | { | |
1435 | disable_irq(dev->irq); | |
1436 | xgmac_interrupt(dev->irq, dev); | |
1437 | enable_irq(dev->irq); | |
1438 | } | |
1439 | #endif | |
1440 | ||
bd601cc4 | 1441 | static struct rtnl_link_stats64 * |
85c10f28 RH |
1442 | xgmac_get_stats64(struct net_device *dev, |
1443 | struct rtnl_link_stats64 *storage) | |
1444 | { | |
1445 | struct xgmac_priv *priv = netdev_priv(dev); | |
1446 | void __iomem *base = priv->base; | |
1447 | u32 count; | |
1448 | ||
1449 | spin_lock_bh(&priv->stats_lock); | |
1450 | writel(XGMAC_MMC_CTRL_CNT_FRZ, base + XGMAC_MMC_CTRL); | |
1451 | ||
1452 | storage->rx_bytes = readl(base + XGMAC_MMC_RXOCTET_G_LO); | |
1453 | storage->rx_bytes |= (u64)(readl(base + XGMAC_MMC_RXOCTET_G_HI)) << 32; | |
1454 | ||
1455 | storage->rx_packets = readl(base + XGMAC_MMC_RXFRAME_GB_LO); | |
1456 | storage->multicast = readl(base + XGMAC_MMC_RXMCFRAME_G); | |
1457 | storage->rx_crc_errors = readl(base + XGMAC_MMC_RXCRCERR); | |
1458 | storage->rx_length_errors = readl(base + XGMAC_MMC_RXLENGTHERR); | |
1459 | storage->rx_missed_errors = readl(base + XGMAC_MMC_RXOVERFLOW); | |
1460 | ||
1461 | storage->tx_bytes = readl(base + XGMAC_MMC_TXOCTET_G_LO); | |
1462 | storage->tx_bytes |= (u64)(readl(base + XGMAC_MMC_TXOCTET_G_HI)) << 32; | |
1463 | ||
1464 | count = readl(base + XGMAC_MMC_TXFRAME_GB_LO); | |
1465 | storage->tx_errors = count - readl(base + XGMAC_MMC_TXFRAME_G_LO); | |
1466 | storage->tx_packets = count; | |
1467 | storage->tx_fifo_errors = readl(base + XGMAC_MMC_TXUNDERFLOW); | |
1468 | ||
1469 | writel(0, base + XGMAC_MMC_CTRL); | |
1470 | spin_unlock_bh(&priv->stats_lock); | |
1471 | return storage; | |
1472 | } | |
1473 | ||
1474 | static int xgmac_set_mac_address(struct net_device *dev, void *p) | |
1475 | { | |
1476 | struct xgmac_priv *priv = netdev_priv(dev); | |
1477 | void __iomem *ioaddr = priv->base; | |
1478 | struct sockaddr *addr = p; | |
1479 | ||
1480 | if (!is_valid_ether_addr(addr->sa_data)) | |
1481 | return -EADDRNOTAVAIL; | |
1482 | ||
7ce5d222 | 1483 | dev->addr_assign_type &= ~NET_ADDR_RANDOM; |
85c10f28 RH |
1484 | memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); |
1485 | ||
1486 | xgmac_set_mac_addr(ioaddr, dev->dev_addr, 0); | |
1487 | ||
1488 | return 0; | |
1489 | } | |
1490 | ||
1491 | static int xgmac_set_features(struct net_device *dev, netdev_features_t features) | |
1492 | { | |
1493 | u32 ctrl; | |
1494 | struct xgmac_priv *priv = netdev_priv(dev); | |
1495 | void __iomem *ioaddr = priv->base; | |
1496 | u32 changed = dev->features ^ features; | |
1497 | ||
1498 | if (!(changed & NETIF_F_RXCSUM)) | |
1499 | return 0; | |
1500 | ||
1501 | ctrl = readl(ioaddr + XGMAC_CONTROL); | |
1502 | if (features & NETIF_F_RXCSUM) | |
1503 | ctrl |= XGMAC_CONTROL_IPC; | |
1504 | else | |
1505 | ctrl &= ~XGMAC_CONTROL_IPC; | |
1506 | writel(ctrl, ioaddr + XGMAC_CONTROL); | |
1507 | ||
1508 | return 0; | |
1509 | } | |
1510 | ||
1511 | static const struct net_device_ops xgmac_netdev_ops = { | |
1512 | .ndo_open = xgmac_open, | |
1513 | .ndo_start_xmit = xgmac_xmit, | |
1514 | .ndo_stop = xgmac_stop, | |
1515 | .ndo_change_mtu = xgmac_change_mtu, | |
1516 | .ndo_set_rx_mode = xgmac_set_rx_mode, | |
1517 | .ndo_tx_timeout = xgmac_tx_timeout, | |
1518 | .ndo_get_stats64 = xgmac_get_stats64, | |
1519 | #ifdef CONFIG_NET_POLL_CONTROLLER | |
1520 | .ndo_poll_controller = xgmac_poll_controller, | |
1521 | #endif | |
1522 | .ndo_set_mac_address = xgmac_set_mac_address, | |
1523 | .ndo_set_features = xgmac_set_features, | |
1524 | }; | |
1525 | ||
1526 | static int xgmac_ethtool_getsettings(struct net_device *dev, | |
1527 | struct ethtool_cmd *cmd) | |
1528 | { | |
1529 | cmd->autoneg = 0; | |
1530 | cmd->duplex = DUPLEX_FULL; | |
1531 | ethtool_cmd_speed_set(cmd, 10000); | |
1532 | cmd->supported = 0; | |
1533 | cmd->advertising = 0; | |
1534 | cmd->transceiver = XCVR_INTERNAL; | |
1535 | return 0; | |
1536 | } | |
1537 | ||
1538 | static void xgmac_get_pauseparam(struct net_device *netdev, | |
1539 | struct ethtool_pauseparam *pause) | |
1540 | { | |
1541 | struct xgmac_priv *priv = netdev_priv(netdev); | |
1542 | ||
1543 | pause->rx_pause = priv->rx_pause; | |
1544 | pause->tx_pause = priv->tx_pause; | |
1545 | } | |
1546 | ||
1547 | static int xgmac_set_pauseparam(struct net_device *netdev, | |
1548 | struct ethtool_pauseparam *pause) | |
1549 | { | |
1550 | struct xgmac_priv *priv = netdev_priv(netdev); | |
1551 | ||
1552 | if (pause->autoneg) | |
1553 | return -EINVAL; | |
1554 | ||
1555 | return xgmac_set_flow_ctrl(priv, pause->rx_pause, pause->tx_pause); | |
1556 | } | |
1557 | ||
1558 | struct xgmac_stats { | |
1559 | char stat_string[ETH_GSTRING_LEN]; | |
1560 | int stat_offset; | |
1561 | bool is_reg; | |
1562 | }; | |
1563 | ||
1564 | #define XGMAC_STAT(m) \ | |
1565 | { #m, offsetof(struct xgmac_priv, xstats.m), false } | |
1566 | #define XGMAC_HW_STAT(m, reg_offset) \ | |
1567 | { #m, reg_offset, true } | |
1568 | ||
1569 | static const struct xgmac_stats xgmac_gstrings_stats[] = { | |
1570 | XGMAC_STAT(tx_frame_flushed), | |
1571 | XGMAC_STAT(tx_payload_error), | |
1572 | XGMAC_STAT(tx_ip_header_error), | |
1573 | XGMAC_STAT(tx_local_fault), | |
1574 | XGMAC_STAT(tx_remote_fault), | |
1575 | XGMAC_STAT(tx_early), | |
1576 | XGMAC_STAT(tx_process_stopped), | |
1577 | XGMAC_STAT(tx_jabber), | |
1578 | XGMAC_STAT(rx_buf_unav), | |
1579 | XGMAC_STAT(rx_process_stopped), | |
1580 | XGMAC_STAT(rx_payload_error), | |
1581 | XGMAC_STAT(rx_ip_header_error), | |
1582 | XGMAC_STAT(rx_da_filter_fail), | |
1583 | XGMAC_STAT(rx_sa_filter_fail), | |
1584 | XGMAC_STAT(fatal_bus_error), | |
1585 | XGMAC_HW_STAT(rx_watchdog, XGMAC_MMC_RXWATCHDOG), | |
1586 | XGMAC_HW_STAT(tx_vlan, XGMAC_MMC_TXVLANFRAME), | |
1587 | XGMAC_HW_STAT(rx_vlan, XGMAC_MMC_RXVLANFRAME), | |
1588 | XGMAC_HW_STAT(tx_pause, XGMAC_MMC_TXPAUSEFRAME), | |
1589 | XGMAC_HW_STAT(rx_pause, XGMAC_MMC_RXPAUSEFRAME), | |
1590 | }; | |
1591 | #define XGMAC_STATS_LEN ARRAY_SIZE(xgmac_gstrings_stats) | |
1592 | ||
1593 | static void xgmac_get_ethtool_stats(struct net_device *dev, | |
1594 | struct ethtool_stats *dummy, | |
1595 | u64 *data) | |
1596 | { | |
1597 | struct xgmac_priv *priv = netdev_priv(dev); | |
1598 | void *p = priv; | |
1599 | int i; | |
1600 | ||
1601 | for (i = 0; i < XGMAC_STATS_LEN; i++) { | |
1602 | if (xgmac_gstrings_stats[i].is_reg) | |
1603 | *data++ = readl(priv->base + | |
1604 | xgmac_gstrings_stats[i].stat_offset); | |
1605 | else | |
1606 | *data++ = *(u32 *)(p + | |
1607 | xgmac_gstrings_stats[i].stat_offset); | |
1608 | } | |
1609 | } | |
1610 | ||
1611 | static int xgmac_get_sset_count(struct net_device *netdev, int sset) | |
1612 | { | |
1613 | switch (sset) { | |
1614 | case ETH_SS_STATS: | |
1615 | return XGMAC_STATS_LEN; | |
1616 | default: | |
1617 | return -EINVAL; | |
1618 | } | |
1619 | } | |
1620 | ||
1621 | static void xgmac_get_strings(struct net_device *dev, u32 stringset, | |
1622 | u8 *data) | |
1623 | { | |
1624 | int i; | |
1625 | u8 *p = data; | |
1626 | ||
1627 | switch (stringset) { | |
1628 | case ETH_SS_STATS: | |
1629 | for (i = 0; i < XGMAC_STATS_LEN; i++) { | |
1630 | memcpy(p, xgmac_gstrings_stats[i].stat_string, | |
1631 | ETH_GSTRING_LEN); | |
1632 | p += ETH_GSTRING_LEN; | |
1633 | } | |
1634 | break; | |
1635 | default: | |
1636 | WARN_ON(1); | |
1637 | break; | |
1638 | } | |
1639 | } | |
1640 | ||
1641 | static void xgmac_get_wol(struct net_device *dev, | |
1642 | struct ethtool_wolinfo *wol) | |
1643 | { | |
1644 | struct xgmac_priv *priv = netdev_priv(dev); | |
1645 | ||
1646 | if (device_can_wakeup(priv->device)) { | |
1647 | wol->supported = WAKE_MAGIC | WAKE_UCAST; | |
1648 | wol->wolopts = priv->wolopts; | |
1649 | } | |
1650 | } | |
1651 | ||
1652 | static int xgmac_set_wol(struct net_device *dev, | |
1653 | struct ethtool_wolinfo *wol) | |
1654 | { | |
1655 | struct xgmac_priv *priv = netdev_priv(dev); | |
1656 | u32 support = WAKE_MAGIC | WAKE_UCAST; | |
1657 | ||
1658 | if (!device_can_wakeup(priv->device)) | |
1659 | return -ENOTSUPP; | |
1660 | ||
1661 | if (wol->wolopts & ~support) | |
1662 | return -EINVAL; | |
1663 | ||
1664 | priv->wolopts = wol->wolopts; | |
1665 | ||
1666 | if (wol->wolopts) { | |
1667 | device_set_wakeup_enable(priv->device, 1); | |
1668 | enable_irq_wake(dev->irq); | |
1669 | } else { | |
1670 | device_set_wakeup_enable(priv->device, 0); | |
1671 | disable_irq_wake(dev->irq); | |
1672 | } | |
1673 | ||
1674 | return 0; | |
1675 | } | |
1676 | ||
bd601cc4 | 1677 | static const struct ethtool_ops xgmac_ethtool_ops = { |
85c10f28 RH |
1678 | .get_settings = xgmac_ethtool_getsettings, |
1679 | .get_link = ethtool_op_get_link, | |
1680 | .get_pauseparam = xgmac_get_pauseparam, | |
1681 | .set_pauseparam = xgmac_set_pauseparam, | |
1682 | .get_ethtool_stats = xgmac_get_ethtool_stats, | |
1683 | .get_strings = xgmac_get_strings, | |
1684 | .get_wol = xgmac_get_wol, | |
1685 | .set_wol = xgmac_set_wol, | |
1686 | .get_sset_count = xgmac_get_sset_count, | |
1687 | }; | |
1688 | ||
1689 | /** | |
1690 | * xgmac_probe | |
1691 | * @pdev: platform device pointer | |
1692 | * Description: the driver is initialized through platform_device. | |
1693 | */ | |
1694 | static int xgmac_probe(struct platform_device *pdev) | |
1695 | { | |
1696 | int ret = 0; | |
1697 | struct resource *res; | |
1698 | struct net_device *ndev = NULL; | |
1699 | struct xgmac_priv *priv = NULL; | |
1700 | u32 uid; | |
1701 | ||
1702 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | |
1703 | if (!res) | |
1704 | return -ENODEV; | |
1705 | ||
1706 | if (!request_mem_region(res->start, resource_size(res), pdev->name)) | |
1707 | return -EBUSY; | |
1708 | ||
1709 | ndev = alloc_etherdev(sizeof(struct xgmac_priv)); | |
1710 | if (!ndev) { | |
1711 | ret = -ENOMEM; | |
1712 | goto err_alloc; | |
1713 | } | |
1714 | ||
1715 | SET_NETDEV_DEV(ndev, &pdev->dev); | |
1716 | priv = netdev_priv(ndev); | |
1717 | platform_set_drvdata(pdev, ndev); | |
1718 | ether_setup(ndev); | |
1719 | ndev->netdev_ops = &xgmac_netdev_ops; | |
1720 | SET_ETHTOOL_OPS(ndev, &xgmac_ethtool_ops); | |
1721 | spin_lock_init(&priv->stats_lock); | |
1722 | ||
1723 | priv->device = &pdev->dev; | |
1724 | priv->dev = ndev; | |
1725 | priv->rx_pause = 1; | |
1726 | priv->tx_pause = 1; | |
1727 | ||
1728 | priv->base = ioremap(res->start, resource_size(res)); | |
1729 | if (!priv->base) { | |
1730 | netdev_err(ndev, "ioremap failed\n"); | |
1731 | ret = -ENOMEM; | |
1732 | goto err_io; | |
1733 | } | |
1734 | ||
1735 | uid = readl(priv->base + XGMAC_VERSION); | |
1736 | netdev_info(ndev, "h/w version is 0x%x\n", uid); | |
1737 | ||
1738 | writel(0, priv->base + XGMAC_DMA_INTR_ENA); | |
1739 | ndev->irq = platform_get_irq(pdev, 0); | |
1740 | if (ndev->irq == -ENXIO) { | |
1741 | netdev_err(ndev, "No irq resource\n"); | |
1742 | ret = ndev->irq; | |
1743 | goto err_irq; | |
1744 | } | |
1745 | ||
1746 | ret = request_irq(ndev->irq, xgmac_interrupt, 0, | |
1747 | dev_name(&pdev->dev), ndev); | |
1748 | if (ret < 0) { | |
1749 | netdev_err(ndev, "Could not request irq %d - ret %d)\n", | |
1750 | ndev->irq, ret); | |
1751 | goto err_irq; | |
1752 | } | |
1753 | ||
1754 | priv->pmt_irq = platform_get_irq(pdev, 1); | |
1755 | if (priv->pmt_irq == -ENXIO) { | |
1756 | netdev_err(ndev, "No pmt irq resource\n"); | |
1757 | ret = priv->pmt_irq; | |
1758 | goto err_pmt_irq; | |
1759 | } | |
1760 | ||
1761 | ret = request_irq(priv->pmt_irq, xgmac_pmt_interrupt, 0, | |
1762 | dev_name(&pdev->dev), ndev); | |
1763 | if (ret < 0) { | |
1764 | netdev_err(ndev, "Could not request irq %d - ret %d)\n", | |
1765 | priv->pmt_irq, ret); | |
1766 | goto err_pmt_irq; | |
1767 | } | |
1768 | ||
1769 | device_set_wakeup_capable(&pdev->dev, 1); | |
1770 | if (device_can_wakeup(priv->device)) | |
1771 | priv->wolopts = WAKE_MAGIC; /* Magic Frame as default */ | |
1772 | ||
1773 | ndev->hw_features = NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_HIGHDMA; | |
1774 | if (readl(priv->base + XGMAC_DMA_HW_FEATURE) & DMA_HW_FEAT_TXCOESEL) | |
1775 | ndev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | | |
1776 | NETIF_F_RXCSUM; | |
1777 | ndev->features |= ndev->hw_features; | |
1778 | ndev->priv_flags |= IFF_UNICAST_FLT; | |
1779 | ||
1780 | /* Get the MAC address */ | |
1781 | xgmac_get_mac_addr(priv->base, ndev->dev_addr, 0); | |
1782 | if (!is_valid_ether_addr(ndev->dev_addr)) | |
1783 | netdev_warn(ndev, "MAC address %pM not valid", | |
1784 | ndev->dev_addr); | |
1785 | ||
1786 | netif_napi_add(ndev, &priv->napi, xgmac_poll, 64); | |
1787 | ret = register_netdev(ndev); | |
1788 | if (ret) | |
1789 | goto err_reg; | |
1790 | ||
1791 | return 0; | |
1792 | ||
1793 | err_reg: | |
1794 | netif_napi_del(&priv->napi); | |
1795 | free_irq(priv->pmt_irq, ndev); | |
1796 | err_pmt_irq: | |
1797 | free_irq(ndev->irq, ndev); | |
1798 | err_irq: | |
1799 | iounmap(priv->base); | |
1800 | err_io: | |
1801 | free_netdev(ndev); | |
1802 | err_alloc: | |
1803 | release_mem_region(res->start, resource_size(res)); | |
1804 | platform_set_drvdata(pdev, NULL); | |
1805 | return ret; | |
1806 | } | |
1807 | ||
1808 | /** | |
1809 | * xgmac_dvr_remove | |
1810 | * @pdev: platform device pointer | |
1811 | * Description: this function resets the TX/RX processes, disables the MAC RX/TX | |
1812 | * changes the link status, releases the DMA descriptor rings, | |
1813 | * unregisters the MDIO bus and unmaps the allocated memory. | |
1814 | */ | |
1815 | static int xgmac_remove(struct platform_device *pdev) | |
1816 | { | |
1817 | struct net_device *ndev = platform_get_drvdata(pdev); | |
1818 | struct xgmac_priv *priv = netdev_priv(ndev); | |
1819 | struct resource *res; | |
1820 | ||
1821 | xgmac_mac_disable(priv->base); | |
1822 | ||
1823 | /* Free the IRQ lines */ | |
1824 | free_irq(ndev->irq, ndev); | |
1825 | free_irq(priv->pmt_irq, ndev); | |
1826 | ||
1827 | platform_set_drvdata(pdev, NULL); | |
1828 | unregister_netdev(ndev); | |
1829 | netif_napi_del(&priv->napi); | |
1830 | ||
1831 | iounmap(priv->base); | |
1832 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | |
1833 | release_mem_region(res->start, resource_size(res)); | |
1834 | ||
1835 | free_netdev(ndev); | |
1836 | ||
1837 | return 0; | |
1838 | } | |
1839 | ||
1840 | #ifdef CONFIG_PM_SLEEP | |
1841 | static void xgmac_pmt(void __iomem *ioaddr, unsigned long mode) | |
1842 | { | |
1843 | unsigned int pmt = 0; | |
1844 | ||
1845 | if (mode & WAKE_MAGIC) | |
1846 | pmt |= XGMAC_PMT_POWERDOWN | XGMAC_PMT_MAGIC_PKT; | |
1847 | if (mode & WAKE_UCAST) | |
1848 | pmt |= XGMAC_PMT_POWERDOWN | XGMAC_PMT_GLBL_UNICAST; | |
1849 | ||
1850 | writel(pmt, ioaddr + XGMAC_PMT); | |
1851 | } | |
1852 | ||
1853 | static int xgmac_suspend(struct device *dev) | |
1854 | { | |
1855 | struct net_device *ndev = platform_get_drvdata(to_platform_device(dev)); | |
1856 | struct xgmac_priv *priv = netdev_priv(ndev); | |
1857 | u32 value; | |
1858 | ||
1859 | if (!ndev || !netif_running(ndev)) | |
1860 | return 0; | |
1861 | ||
1862 | netif_device_detach(ndev); | |
1863 | napi_disable(&priv->napi); | |
1864 | writel(0, priv->base + XGMAC_DMA_INTR_ENA); | |
1865 | ||
1866 | if (device_may_wakeup(priv->device)) { | |
1867 | /* Stop TX/RX DMA Only */ | |
1868 | value = readl(priv->base + XGMAC_DMA_CONTROL); | |
1869 | value &= ~(DMA_CONTROL_ST | DMA_CONTROL_SR); | |
1870 | writel(value, priv->base + XGMAC_DMA_CONTROL); | |
1871 | ||
1872 | xgmac_pmt(priv->base, priv->wolopts); | |
1873 | } else | |
1874 | xgmac_mac_disable(priv->base); | |
1875 | ||
1876 | return 0; | |
1877 | } | |
1878 | ||
1879 | static int xgmac_resume(struct device *dev) | |
1880 | { | |
1881 | struct net_device *ndev = platform_get_drvdata(to_platform_device(dev)); | |
1882 | struct xgmac_priv *priv = netdev_priv(ndev); | |
1883 | void __iomem *ioaddr = priv->base; | |
1884 | ||
1885 | if (!netif_running(ndev)) | |
1886 | return 0; | |
1887 | ||
1888 | xgmac_pmt(ioaddr, 0); | |
1889 | ||
1890 | /* Enable the MAC and DMA */ | |
1891 | xgmac_mac_enable(ioaddr); | |
1892 | writel(DMA_INTR_DEFAULT_MASK, ioaddr + XGMAC_DMA_STATUS); | |
1893 | writel(DMA_INTR_DEFAULT_MASK, ioaddr + XGMAC_DMA_INTR_ENA); | |
1894 | ||
1895 | netif_device_attach(ndev); | |
1896 | napi_enable(&priv->napi); | |
1897 | ||
1898 | return 0; | |
1899 | } | |
1900 | ||
1901 | static SIMPLE_DEV_PM_OPS(xgmac_pm_ops, xgmac_suspend, xgmac_resume); | |
1902 | #define XGMAC_PM_OPS (&xgmac_pm_ops) | |
1903 | #else | |
1904 | #define XGMAC_PM_OPS NULL | |
1905 | #endif /* CONFIG_PM_SLEEP */ | |
1906 | ||
1907 | static const struct of_device_id xgmac_of_match[] = { | |
1908 | { .compatible = "calxeda,hb-xgmac", }, | |
1909 | {}, | |
1910 | }; | |
1911 | MODULE_DEVICE_TABLE(of, xgmac_of_match); | |
1912 | ||
1913 | static struct platform_driver xgmac_driver = { | |
1914 | .driver = { | |
1915 | .name = "calxedaxgmac", | |
1916 | .of_match_table = xgmac_of_match, | |
1917 | }, | |
1918 | .probe = xgmac_probe, | |
1919 | .remove = xgmac_remove, | |
1920 | .driver.pm = XGMAC_PM_OPS, | |
1921 | }; | |
1922 | ||
1923 | module_platform_driver(xgmac_driver); | |
1924 | ||
1925 | MODULE_AUTHOR("Calxeda, Inc."); | |
1926 | MODULE_DESCRIPTION("Calxeda 10G XGMAC driver"); | |
1927 | MODULE_LICENSE("GPL v2"); |