Commit | Line | Data |
---|---|---|
65e0ace2 JD |
1 | /* Synopsys DesignWare Core Enterprise Ethernet (XLGMAC) Driver |
2 | * | |
3 | * Copyright (c) 2017 Synopsys, Inc. (www.synopsys.com) | |
4 | * | |
ea8c1c64 JD |
5 | * This program is dual-licensed; you may select either version 2 of |
6 | * the GNU General Public License ("GPL") or BSD license ("BSD"). | |
65e0ace2 JD |
7 | * |
8 | * This Synopsys DWC XLGMAC software driver and associated documentation | |
9 | * (hereinafter the "Software") is an unsupported proprietary work of | |
10 | * Synopsys, Inc. unless otherwise expressly agreed to in writing between | |
11 | * Synopsys and you. The Software IS NOT an item of Licensed Software or a | |
12 | * Licensed Product under any End User Software License Agreement or | |
13 | * Agreement for Licensed Products with Synopsys or any supplement thereto. | |
14 | * Synopsys is a registered trademark of Synopsys, Inc. Other names included | |
15 | * in the SOFTWARE may be the trademarks of their respective owners. | |
16 | */ | |
17 | ||
18 | #include <linux/phy.h> | |
19 | #include <linux/mdio.h> | |
20 | #include <linux/clk.h> | |
21 | #include <linux/bitrev.h> | |
22 | #include <linux/crc32.h> | |
424fa00e | 23 | #include <linux/dcbnl.h> |
65e0ace2 JD |
24 | |
25 | #include "dwc-xlgmac.h" | |
26 | #include "dwc-xlgmac-reg.h" | |
27 | ||
28 | static int xlgmac_tx_complete(struct xlgmac_dma_desc *dma_desc) | |
29 | { | |
30 | return !XLGMAC_GET_REG_BITS_LE(dma_desc->desc3, | |
31 | TX_NORMAL_DESC3_OWN_POS, | |
32 | TX_NORMAL_DESC3_OWN_LEN); | |
33 | } | |
34 | ||
35 | static int xlgmac_disable_rx_csum(struct xlgmac_pdata *pdata) | |
36 | { | |
37 | u32 regval; | |
38 | ||
39 | regval = readl(pdata->mac_regs + MAC_RCR); | |
40 | regval = XLGMAC_SET_REG_BITS(regval, MAC_RCR_IPC_POS, | |
41 | MAC_RCR_IPC_LEN, 0); | |
42 | writel(regval, pdata->mac_regs + MAC_RCR); | |
43 | ||
44 | return 0; | |
45 | } | |
46 | ||
47 | static int xlgmac_enable_rx_csum(struct xlgmac_pdata *pdata) | |
48 | { | |
49 | u32 regval; | |
50 | ||
51 | regval = readl(pdata->mac_regs + MAC_RCR); | |
52 | regval = XLGMAC_SET_REG_BITS(regval, MAC_RCR_IPC_POS, | |
53 | MAC_RCR_IPC_LEN, 1); | |
54 | writel(regval, pdata->mac_regs + MAC_RCR); | |
55 | ||
56 | return 0; | |
57 | } | |
58 | ||
59 | static int xlgmac_set_mac_address(struct xlgmac_pdata *pdata, u8 *addr) | |
60 | { | |
61 | unsigned int mac_addr_hi, mac_addr_lo; | |
62 | ||
63 | mac_addr_hi = (addr[5] << 8) | (addr[4] << 0); | |
64 | mac_addr_lo = (addr[3] << 24) | (addr[2] << 16) | | |
65 | (addr[1] << 8) | (addr[0] << 0); | |
66 | ||
67 | writel(mac_addr_hi, pdata->mac_regs + MAC_MACA0HR); | |
68 | writel(mac_addr_lo, pdata->mac_regs + MAC_MACA0LR); | |
69 | ||
70 | return 0; | |
71 | } | |
72 | ||
73 | static void xlgmac_set_mac_reg(struct xlgmac_pdata *pdata, | |
74 | struct netdev_hw_addr *ha, | |
75 | unsigned int *mac_reg) | |
76 | { | |
77 | unsigned int mac_addr_hi, mac_addr_lo; | |
78 | u8 *mac_addr; | |
79 | ||
80 | mac_addr_lo = 0; | |
81 | mac_addr_hi = 0; | |
82 | ||
83 | if (ha) { | |
84 | mac_addr = (u8 *)&mac_addr_lo; | |
85 | mac_addr[0] = ha->addr[0]; | |
86 | mac_addr[1] = ha->addr[1]; | |
87 | mac_addr[2] = ha->addr[2]; | |
88 | mac_addr[3] = ha->addr[3]; | |
89 | mac_addr = (u8 *)&mac_addr_hi; | |
90 | mac_addr[0] = ha->addr[4]; | |
91 | mac_addr[1] = ha->addr[5]; | |
92 | ||
93 | netif_dbg(pdata, drv, pdata->netdev, | |
94 | "adding mac address %pM at %#x\n", | |
95 | ha->addr, *mac_reg); | |
96 | ||
97 | mac_addr_hi = XLGMAC_SET_REG_BITS(mac_addr_hi, | |
98 | MAC_MACA1HR_AE_POS, | |
99 | MAC_MACA1HR_AE_LEN, | |
100 | 1); | |
101 | } | |
102 | ||
103 | writel(mac_addr_hi, pdata->mac_regs + *mac_reg); | |
104 | *mac_reg += MAC_MACA_INC; | |
105 | writel(mac_addr_lo, pdata->mac_regs + *mac_reg); | |
106 | *mac_reg += MAC_MACA_INC; | |
107 | } | |
108 | ||
109 | static int xlgmac_enable_rx_vlan_stripping(struct xlgmac_pdata *pdata) | |
110 | { | |
111 | u32 regval; | |
112 | ||
113 | regval = readl(pdata->mac_regs + MAC_VLANTR); | |
114 | /* Put the VLAN tag in the Rx descriptor */ | |
115 | regval = XLGMAC_SET_REG_BITS(regval, MAC_VLANTR_EVLRXS_POS, | |
116 | MAC_VLANTR_EVLRXS_LEN, 1); | |
117 | /* Don't check the VLAN type */ | |
118 | regval = XLGMAC_SET_REG_BITS(regval, MAC_VLANTR_DOVLTC_POS, | |
119 | MAC_VLANTR_DOVLTC_LEN, 1); | |
120 | /* Check only C-TAG (0x8100) packets */ | |
121 | regval = XLGMAC_SET_REG_BITS(regval, MAC_VLANTR_ERSVLM_POS, | |
122 | MAC_VLANTR_ERSVLM_LEN, 0); | |
123 | /* Don't consider an S-TAG (0x88A8) packet as a VLAN packet */ | |
124 | regval = XLGMAC_SET_REG_BITS(regval, MAC_VLANTR_ESVL_POS, | |
125 | MAC_VLANTR_ESVL_LEN, 0); | |
126 | /* Enable VLAN tag stripping */ | |
127 | regval = XLGMAC_SET_REG_BITS(regval, MAC_VLANTR_EVLS_POS, | |
128 | MAC_VLANTR_EVLS_LEN, 0x3); | |
129 | writel(regval, pdata->mac_regs + MAC_VLANTR); | |
130 | ||
131 | return 0; | |
132 | } | |
133 | ||
134 | static int xlgmac_disable_rx_vlan_stripping(struct xlgmac_pdata *pdata) | |
135 | { | |
136 | u32 regval; | |
137 | ||
138 | regval = readl(pdata->mac_regs + MAC_VLANTR); | |
139 | regval = XLGMAC_SET_REG_BITS(regval, MAC_VLANTR_EVLS_POS, | |
140 | MAC_VLANTR_EVLS_LEN, 0); | |
141 | writel(regval, pdata->mac_regs + MAC_VLANTR); | |
142 | ||
143 | return 0; | |
144 | } | |
145 | ||
146 | static int xlgmac_enable_rx_vlan_filtering(struct xlgmac_pdata *pdata) | |
147 | { | |
148 | u32 regval; | |
149 | ||
150 | regval = readl(pdata->mac_regs + MAC_PFR); | |
151 | /* Enable VLAN filtering */ | |
152 | regval = XLGMAC_SET_REG_BITS(regval, MAC_PFR_VTFE_POS, | |
153 | MAC_PFR_VTFE_LEN, 1); | |
154 | writel(regval, pdata->mac_regs + MAC_PFR); | |
155 | ||
156 | regval = readl(pdata->mac_regs + MAC_VLANTR); | |
157 | /* Enable VLAN Hash Table filtering */ | |
158 | regval = XLGMAC_SET_REG_BITS(regval, MAC_VLANTR_VTHM_POS, | |
159 | MAC_VLANTR_VTHM_LEN, 1); | |
160 | /* Disable VLAN tag inverse matching */ | |
161 | regval = XLGMAC_SET_REG_BITS(regval, MAC_VLANTR_VTIM_POS, | |
162 | MAC_VLANTR_VTIM_LEN, 0); | |
163 | /* Only filter on the lower 12-bits of the VLAN tag */ | |
164 | regval = XLGMAC_SET_REG_BITS(regval, MAC_VLANTR_ETV_POS, | |
165 | MAC_VLANTR_ETV_LEN, 1); | |
166 | /* In order for the VLAN Hash Table filtering to be effective, | |
167 | * the VLAN tag identifier in the VLAN Tag Register must not | |
168 | * be zero. Set the VLAN tag identifier to "1" to enable the | |
169 | * VLAN Hash Table filtering. This implies that a VLAN tag of | |
170 | * 1 will always pass filtering. | |
171 | */ | |
172 | regval = XLGMAC_SET_REG_BITS(regval, MAC_VLANTR_VL_POS, | |
173 | MAC_VLANTR_VL_LEN, 1); | |
174 | writel(regval, pdata->mac_regs + MAC_VLANTR); | |
175 | ||
176 | return 0; | |
177 | } | |
178 | ||
179 | static int xlgmac_disable_rx_vlan_filtering(struct xlgmac_pdata *pdata) | |
180 | { | |
181 | u32 regval; | |
182 | ||
183 | regval = readl(pdata->mac_regs + MAC_PFR); | |
184 | /* Disable VLAN filtering */ | |
185 | regval = XLGMAC_SET_REG_BITS(regval, MAC_PFR_VTFE_POS, | |
186 | MAC_PFR_VTFE_LEN, 0); | |
187 | writel(regval, pdata->mac_regs + MAC_PFR); | |
188 | ||
189 | return 0; | |
190 | } | |
191 | ||
192 | static u32 xlgmac_vid_crc32_le(__le16 vid_le) | |
193 | { | |
194 | unsigned char *data = (unsigned char *)&vid_le; | |
195 | unsigned char data_byte = 0; | |
196 | u32 poly = 0xedb88320; | |
197 | u32 crc = ~0; | |
198 | u32 temp = 0; | |
199 | int i, bits; | |
200 | ||
201 | bits = get_bitmask_order(VLAN_VID_MASK); | |
202 | for (i = 0; i < bits; i++) { | |
203 | if ((i % 8) == 0) | |
204 | data_byte = data[i / 8]; | |
205 | ||
206 | temp = ((crc & 1) ^ data_byte) & 1; | |
207 | crc >>= 1; | |
208 | data_byte >>= 1; | |
209 | ||
210 | if (temp) | |
211 | crc ^= poly; | |
212 | } | |
213 | ||
214 | return crc; | |
215 | } | |
216 | ||
217 | static int xlgmac_update_vlan_hash_table(struct xlgmac_pdata *pdata) | |
218 | { | |
219 | u16 vlan_hash_table = 0; | |
220 | __le16 vid_le; | |
221 | u32 regval; | |
222 | u32 crc; | |
223 | u16 vid; | |
224 | ||
225 | /* Generate the VLAN Hash Table value */ | |
226 | for_each_set_bit(vid, pdata->active_vlans, VLAN_N_VID) { | |
227 | /* Get the CRC32 value of the VLAN ID */ | |
228 | vid_le = cpu_to_le16(vid); | |
229 | crc = bitrev32(~xlgmac_vid_crc32_le(vid_le)) >> 28; | |
230 | ||
231 | vlan_hash_table |= (1 << crc); | |
232 | } | |
233 | ||
234 | regval = readl(pdata->mac_regs + MAC_VLANHTR); | |
235 | /* Set the VLAN Hash Table filtering register */ | |
236 | regval = XLGMAC_SET_REG_BITS(regval, MAC_VLANHTR_VLHT_POS, | |
237 | MAC_VLANHTR_VLHT_LEN, vlan_hash_table); | |
238 | writel(regval, pdata->mac_regs + MAC_VLANHTR); | |
239 | ||
240 | return 0; | |
241 | } | |
242 | ||
243 | static int xlgmac_set_promiscuous_mode(struct xlgmac_pdata *pdata, | |
244 | unsigned int enable) | |
245 | { | |
246 | unsigned int val = enable ? 1 : 0; | |
247 | u32 regval; | |
248 | ||
249 | regval = XLGMAC_GET_REG_BITS(readl(pdata->mac_regs + MAC_PFR), | |
250 | MAC_PFR_PR_POS, MAC_PFR_PR_LEN); | |
251 | if (regval == val) | |
252 | return 0; | |
253 | ||
254 | netif_dbg(pdata, drv, pdata->netdev, "%s promiscuous mode\n", | |
255 | enable ? "entering" : "leaving"); | |
256 | ||
257 | regval = readl(pdata->mac_regs + MAC_PFR); | |
258 | regval = XLGMAC_SET_REG_BITS(regval, MAC_PFR_PR_POS, | |
259 | MAC_PFR_PR_LEN, val); | |
260 | writel(regval, pdata->mac_regs + MAC_PFR); | |
261 | ||
262 | /* Hardware will still perform VLAN filtering in promiscuous mode */ | |
263 | if (enable) { | |
264 | xlgmac_disable_rx_vlan_filtering(pdata); | |
265 | } else { | |
266 | if (pdata->netdev->features & NETIF_F_HW_VLAN_CTAG_FILTER) | |
267 | xlgmac_enable_rx_vlan_filtering(pdata); | |
268 | } | |
269 | ||
270 | return 0; | |
271 | } | |
272 | ||
273 | static int xlgmac_set_all_multicast_mode(struct xlgmac_pdata *pdata, | |
274 | unsigned int enable) | |
275 | { | |
276 | unsigned int val = enable ? 1 : 0; | |
277 | u32 regval; | |
278 | ||
279 | regval = XLGMAC_GET_REG_BITS(readl(pdata->mac_regs + MAC_PFR), | |
280 | MAC_PFR_PM_POS, MAC_PFR_PM_LEN); | |
281 | if (regval == val) | |
282 | return 0; | |
283 | ||
284 | netif_dbg(pdata, drv, pdata->netdev, "%s allmulti mode\n", | |
285 | enable ? "entering" : "leaving"); | |
286 | ||
287 | regval = readl(pdata->mac_regs + MAC_PFR); | |
288 | regval = XLGMAC_SET_REG_BITS(regval, MAC_PFR_PM_POS, | |
289 | MAC_PFR_PM_LEN, val); | |
290 | writel(regval, pdata->mac_regs + MAC_PFR); | |
291 | ||
292 | return 0; | |
293 | } | |
294 | ||
295 | static void xlgmac_set_mac_addn_addrs(struct xlgmac_pdata *pdata) | |
296 | { | |
297 | struct net_device *netdev = pdata->netdev; | |
298 | struct netdev_hw_addr *ha; | |
299 | unsigned int addn_macs; | |
300 | unsigned int mac_reg; | |
301 | ||
302 | mac_reg = MAC_MACA1HR; | |
303 | addn_macs = pdata->hw_feat.addn_mac; | |
304 | ||
305 | if (netdev_uc_count(netdev) > addn_macs) { | |
306 | xlgmac_set_promiscuous_mode(pdata, 1); | |
307 | } else { | |
308 | netdev_for_each_uc_addr(ha, netdev) { | |
309 | xlgmac_set_mac_reg(pdata, ha, &mac_reg); | |
310 | addn_macs--; | |
311 | } | |
312 | ||
313 | if (netdev_mc_count(netdev) > addn_macs) { | |
314 | xlgmac_set_all_multicast_mode(pdata, 1); | |
315 | } else { | |
316 | netdev_for_each_mc_addr(ha, netdev) { | |
317 | xlgmac_set_mac_reg(pdata, ha, &mac_reg); | |
318 | addn_macs--; | |
319 | } | |
320 | } | |
321 | } | |
322 | ||
323 | /* Clear remaining additional MAC address entries */ | |
324 | while (addn_macs--) | |
325 | xlgmac_set_mac_reg(pdata, NULL, &mac_reg); | |
326 | } | |
327 | ||
328 | static void xlgmac_set_mac_hash_table(struct xlgmac_pdata *pdata) | |
329 | { | |
330 | unsigned int hash_table_shift, hash_table_count; | |
331 | u32 hash_table[XLGMAC_MAC_HASH_TABLE_SIZE]; | |
332 | struct net_device *netdev = pdata->netdev; | |
333 | struct netdev_hw_addr *ha; | |
334 | unsigned int hash_reg; | |
335 | unsigned int i; | |
336 | u32 crc; | |
337 | ||
338 | hash_table_shift = 26 - (pdata->hw_feat.hash_table_size >> 7); | |
339 | hash_table_count = pdata->hw_feat.hash_table_size / 32; | |
340 | memset(hash_table, 0, sizeof(hash_table)); | |
341 | ||
342 | /* Build the MAC Hash Table register values */ | |
343 | netdev_for_each_uc_addr(ha, netdev) { | |
344 | crc = bitrev32(~crc32_le(~0, ha->addr, ETH_ALEN)); | |
345 | crc >>= hash_table_shift; | |
346 | hash_table[crc >> 5] |= (1 << (crc & 0x1f)); | |
347 | } | |
348 | ||
349 | netdev_for_each_mc_addr(ha, netdev) { | |
350 | crc = bitrev32(~crc32_le(~0, ha->addr, ETH_ALEN)); | |
351 | crc >>= hash_table_shift; | |
352 | hash_table[crc >> 5] |= (1 << (crc & 0x1f)); | |
353 | } | |
354 | ||
355 | /* Set the MAC Hash Table registers */ | |
356 | hash_reg = MAC_HTR0; | |
357 | for (i = 0; i < hash_table_count; i++) { | |
358 | writel(hash_table[i], pdata->mac_regs + hash_reg); | |
359 | hash_reg += MAC_HTR_INC; | |
360 | } | |
361 | } | |
362 | ||
363 | static int xlgmac_add_mac_addresses(struct xlgmac_pdata *pdata) | |
364 | { | |
365 | if (pdata->hw_feat.hash_table_size) | |
366 | xlgmac_set_mac_hash_table(pdata); | |
367 | else | |
368 | xlgmac_set_mac_addn_addrs(pdata); | |
369 | ||
370 | return 0; | |
371 | } | |
372 | ||
373 | static void xlgmac_config_mac_address(struct xlgmac_pdata *pdata) | |
374 | { | |
375 | u32 regval; | |
376 | ||
377 | xlgmac_set_mac_address(pdata, pdata->netdev->dev_addr); | |
378 | ||
379 | /* Filtering is done using perfect filtering and hash filtering */ | |
380 | if (pdata->hw_feat.hash_table_size) { | |
381 | regval = readl(pdata->mac_regs + MAC_PFR); | |
382 | regval = XLGMAC_SET_REG_BITS(regval, MAC_PFR_HPF_POS, | |
383 | MAC_PFR_HPF_LEN, 1); | |
384 | regval = XLGMAC_SET_REG_BITS(regval, MAC_PFR_HUC_POS, | |
385 | MAC_PFR_HUC_LEN, 1); | |
386 | regval = XLGMAC_SET_REG_BITS(regval, MAC_PFR_HMC_POS, | |
387 | MAC_PFR_HMC_LEN, 1); | |
388 | writel(regval, pdata->mac_regs + MAC_PFR); | |
389 | } | |
390 | } | |
391 | ||
392 | static void xlgmac_config_jumbo_enable(struct xlgmac_pdata *pdata) | |
393 | { | |
394 | unsigned int val; | |
395 | u32 regval; | |
396 | ||
397 | val = (pdata->netdev->mtu > XLGMAC_STD_PACKET_MTU) ? 1 : 0; | |
398 | ||
399 | regval = readl(pdata->mac_regs + MAC_RCR); | |
400 | regval = XLGMAC_SET_REG_BITS(regval, MAC_RCR_JE_POS, | |
401 | MAC_RCR_JE_LEN, val); | |
402 | writel(regval, pdata->mac_regs + MAC_RCR); | |
403 | } | |
404 | ||
405 | static void xlgmac_config_checksum_offload(struct xlgmac_pdata *pdata) | |
406 | { | |
407 | if (pdata->netdev->features & NETIF_F_RXCSUM) | |
408 | xlgmac_enable_rx_csum(pdata); | |
409 | else | |
410 | xlgmac_disable_rx_csum(pdata); | |
411 | } | |
412 | ||
413 | static void xlgmac_config_vlan_support(struct xlgmac_pdata *pdata) | |
414 | { | |
415 | u32 regval; | |
416 | ||
417 | regval = readl(pdata->mac_regs + MAC_VLANIR); | |
418 | /* Indicate that VLAN Tx CTAGs come from context descriptors */ | |
419 | regval = XLGMAC_SET_REG_BITS(regval, MAC_VLANIR_CSVL_POS, | |
420 | MAC_VLANIR_CSVL_LEN, 0); | |
421 | regval = XLGMAC_SET_REG_BITS(regval, MAC_VLANIR_VLTI_POS, | |
422 | MAC_VLANIR_VLTI_LEN, 1); | |
423 | writel(regval, pdata->mac_regs + MAC_VLANIR); | |
424 | ||
425 | /* Set the current VLAN Hash Table register value */ | |
426 | xlgmac_update_vlan_hash_table(pdata); | |
427 | ||
428 | if (pdata->netdev->features & NETIF_F_HW_VLAN_CTAG_FILTER) | |
429 | xlgmac_enable_rx_vlan_filtering(pdata); | |
430 | else | |
431 | xlgmac_disable_rx_vlan_filtering(pdata); | |
432 | ||
433 | if (pdata->netdev->features & NETIF_F_HW_VLAN_CTAG_RX) | |
434 | xlgmac_enable_rx_vlan_stripping(pdata); | |
435 | else | |
436 | xlgmac_disable_rx_vlan_stripping(pdata); | |
437 | } | |
438 | ||
439 | static int xlgmac_config_rx_mode(struct xlgmac_pdata *pdata) | |
440 | { | |
441 | struct net_device *netdev = pdata->netdev; | |
442 | unsigned int pr_mode, am_mode; | |
443 | ||
444 | pr_mode = ((netdev->flags & IFF_PROMISC) != 0); | |
445 | am_mode = ((netdev->flags & IFF_ALLMULTI) != 0); | |
446 | ||
447 | xlgmac_set_promiscuous_mode(pdata, pr_mode); | |
448 | xlgmac_set_all_multicast_mode(pdata, am_mode); | |
449 | ||
450 | xlgmac_add_mac_addresses(pdata); | |
451 | ||
452 | return 0; | |
453 | } | |
454 | ||
455 | static void xlgmac_prepare_tx_stop(struct xlgmac_pdata *pdata, | |
456 | struct xlgmac_channel *channel) | |
457 | { | |
458 | unsigned int tx_dsr, tx_pos, tx_qidx; | |
459 | unsigned long tx_timeout; | |
460 | unsigned int tx_status; | |
461 | ||
462 | /* Calculate the status register to read and the position within */ | |
463 | if (channel->queue_index < DMA_DSRX_FIRST_QUEUE) { | |
464 | tx_dsr = DMA_DSR0; | |
465 | tx_pos = (channel->queue_index * DMA_DSR_Q_LEN) + | |
466 | DMA_DSR0_TPS_START; | |
467 | } else { | |
468 | tx_qidx = channel->queue_index - DMA_DSRX_FIRST_QUEUE; | |
469 | ||
470 | tx_dsr = DMA_DSR1 + ((tx_qidx / DMA_DSRX_QPR) * DMA_DSRX_INC); | |
471 | tx_pos = ((tx_qidx % DMA_DSRX_QPR) * DMA_DSR_Q_LEN) + | |
472 | DMA_DSRX_TPS_START; | |
473 | } | |
474 | ||
475 | /* The Tx engine cannot be stopped if it is actively processing | |
476 | * descriptors. Wait for the Tx engine to enter the stopped or | |
477 | * suspended state. Don't wait forever though... | |
478 | */ | |
479 | tx_timeout = jiffies + (XLGMAC_DMA_STOP_TIMEOUT * HZ); | |
480 | while (time_before(jiffies, tx_timeout)) { | |
481 | tx_status = readl(pdata->mac_regs + tx_dsr); | |
482 | tx_status = XLGMAC_GET_REG_BITS(tx_status, tx_pos, | |
483 | DMA_DSR_TPS_LEN); | |
484 | if ((tx_status == DMA_TPS_STOPPED) || | |
485 | (tx_status == DMA_TPS_SUSPENDED)) | |
486 | break; | |
487 | ||
488 | usleep_range(500, 1000); | |
489 | } | |
490 | ||
491 | if (!time_before(jiffies, tx_timeout)) | |
492 | netdev_info(pdata->netdev, | |
493 | "timed out waiting for Tx DMA channel %u to stop\n", | |
494 | channel->queue_index); | |
495 | } | |
496 | ||
497 | static void xlgmac_enable_tx(struct xlgmac_pdata *pdata) | |
498 | { | |
499 | struct xlgmac_channel *channel; | |
500 | unsigned int i; | |
501 | u32 regval; | |
502 | ||
503 | /* Enable each Tx DMA channel */ | |
504 | channel = pdata->channel_head; | |
505 | for (i = 0; i < pdata->channel_count; i++, channel++) { | |
506 | if (!channel->tx_ring) | |
507 | break; | |
508 | ||
509 | regval = readl(XLGMAC_DMA_REG(channel, DMA_CH_TCR)); | |
510 | regval = XLGMAC_SET_REG_BITS(regval, DMA_CH_TCR_ST_POS, | |
511 | DMA_CH_TCR_ST_LEN, 1); | |
512 | writel(regval, XLGMAC_DMA_REG(channel, DMA_CH_TCR)); | |
513 | } | |
514 | ||
515 | /* Enable each Tx queue */ | |
516 | for (i = 0; i < pdata->tx_q_count; i++) { | |
517 | regval = readl(XLGMAC_MTL_REG(pdata, i, MTL_Q_TQOMR)); | |
518 | regval = XLGMAC_SET_REG_BITS(regval, MTL_Q_TQOMR_TXQEN_POS, | |
519 | MTL_Q_TQOMR_TXQEN_LEN, | |
520 | MTL_Q_ENABLED); | |
521 | writel(regval, XLGMAC_MTL_REG(pdata, i, MTL_Q_TQOMR)); | |
522 | } | |
523 | ||
524 | /* Enable MAC Tx */ | |
525 | regval = readl(pdata->mac_regs + MAC_TCR); | |
526 | regval = XLGMAC_SET_REG_BITS(regval, MAC_TCR_TE_POS, | |
527 | MAC_TCR_TE_LEN, 1); | |
528 | writel(regval, pdata->mac_regs + MAC_TCR); | |
529 | } | |
530 | ||
531 | static void xlgmac_disable_tx(struct xlgmac_pdata *pdata) | |
532 | { | |
533 | struct xlgmac_channel *channel; | |
534 | unsigned int i; | |
535 | u32 regval; | |
536 | ||
537 | /* Prepare for Tx DMA channel stop */ | |
538 | channel = pdata->channel_head; | |
539 | for (i = 0; i < pdata->channel_count; i++, channel++) { | |
540 | if (!channel->tx_ring) | |
541 | break; | |
542 | ||
543 | xlgmac_prepare_tx_stop(pdata, channel); | |
544 | } | |
545 | ||
546 | /* Disable MAC Tx */ | |
547 | regval = readl(pdata->mac_regs + MAC_TCR); | |
548 | regval = XLGMAC_SET_REG_BITS(regval, MAC_TCR_TE_POS, | |
549 | MAC_TCR_TE_LEN, 0); | |
550 | writel(regval, pdata->mac_regs + MAC_TCR); | |
551 | ||
552 | /* Disable each Tx queue */ | |
553 | for (i = 0; i < pdata->tx_q_count; i++) { | |
554 | regval = readl(XLGMAC_MTL_REG(pdata, i, MTL_Q_TQOMR)); | |
555 | regval = XLGMAC_SET_REG_BITS(regval, MTL_Q_TQOMR_TXQEN_POS, | |
556 | MTL_Q_TQOMR_TXQEN_LEN, 0); | |
557 | writel(regval, XLGMAC_MTL_REG(pdata, i, MTL_Q_TQOMR)); | |
558 | } | |
559 | ||
560 | /* Disable each Tx DMA channel */ | |
561 | channel = pdata->channel_head; | |
562 | for (i = 0; i < pdata->channel_count; i++, channel++) { | |
563 | if (!channel->tx_ring) | |
564 | break; | |
565 | ||
566 | regval = readl(XLGMAC_DMA_REG(channel, DMA_CH_TCR)); | |
567 | regval = XLGMAC_SET_REG_BITS(regval, DMA_CH_TCR_ST_POS, | |
568 | DMA_CH_TCR_ST_LEN, 0); | |
569 | writel(regval, XLGMAC_DMA_REG(channel, DMA_CH_TCR)); | |
570 | } | |
571 | } | |
572 | ||
573 | static void xlgmac_prepare_rx_stop(struct xlgmac_pdata *pdata, | |
574 | unsigned int queue) | |
575 | { | |
576 | unsigned int rx_status, prxq, rxqsts; | |
577 | unsigned long rx_timeout; | |
578 | ||
579 | /* The Rx engine cannot be stopped if it is actively processing | |
580 | * packets. Wait for the Rx queue to empty the Rx fifo. Don't | |
581 | * wait forever though... | |
582 | */ | |
583 | rx_timeout = jiffies + (XLGMAC_DMA_STOP_TIMEOUT * HZ); | |
584 | while (time_before(jiffies, rx_timeout)) { | |
585 | rx_status = readl(XLGMAC_MTL_REG(pdata, queue, MTL_Q_RQDR)); | |
586 | prxq = XLGMAC_GET_REG_BITS(rx_status, MTL_Q_RQDR_PRXQ_POS, | |
587 | MTL_Q_RQDR_PRXQ_LEN); | |
588 | rxqsts = XLGMAC_GET_REG_BITS(rx_status, MTL_Q_RQDR_RXQSTS_POS, | |
589 | MTL_Q_RQDR_RXQSTS_LEN); | |
590 | if ((prxq == 0) && (rxqsts == 0)) | |
591 | break; | |
592 | ||
593 | usleep_range(500, 1000); | |
594 | } | |
595 | ||
596 | if (!time_before(jiffies, rx_timeout)) | |
597 | netdev_info(pdata->netdev, | |
598 | "timed out waiting for Rx queue %u to empty\n", | |
599 | queue); | |
600 | } | |
601 | ||
602 | static void xlgmac_enable_rx(struct xlgmac_pdata *pdata) | |
603 | { | |
604 | struct xlgmac_channel *channel; | |
605 | unsigned int regval, i; | |
606 | ||
607 | /* Enable each Rx DMA channel */ | |
608 | channel = pdata->channel_head; | |
609 | for (i = 0; i < pdata->channel_count; i++, channel++) { | |
610 | if (!channel->rx_ring) | |
611 | break; | |
612 | ||
613 | regval = readl(XLGMAC_DMA_REG(channel, DMA_CH_RCR)); | |
614 | regval = XLGMAC_SET_REG_BITS(regval, DMA_CH_RCR_SR_POS, | |
615 | DMA_CH_RCR_SR_LEN, 1); | |
616 | writel(regval, XLGMAC_DMA_REG(channel, DMA_CH_RCR)); | |
617 | } | |
618 | ||
619 | /* Enable each Rx queue */ | |
620 | regval = 0; | |
621 | for (i = 0; i < pdata->rx_q_count; i++) | |
622 | regval |= (0x02 << (i << 1)); | |
623 | writel(regval, pdata->mac_regs + MAC_RQC0R); | |
624 | ||
625 | /* Enable MAC Rx */ | |
626 | regval = readl(pdata->mac_regs + MAC_RCR); | |
627 | regval = XLGMAC_SET_REG_BITS(regval, MAC_RCR_DCRCC_POS, | |
628 | MAC_RCR_DCRCC_LEN, 1); | |
629 | regval = XLGMAC_SET_REG_BITS(regval, MAC_RCR_CST_POS, | |
630 | MAC_RCR_CST_LEN, 1); | |
631 | regval = XLGMAC_SET_REG_BITS(regval, MAC_RCR_ACS_POS, | |
632 | MAC_RCR_ACS_LEN, 1); | |
633 | regval = XLGMAC_SET_REG_BITS(regval, MAC_RCR_RE_POS, | |
634 | MAC_RCR_RE_LEN, 1); | |
635 | writel(regval, pdata->mac_regs + MAC_RCR); | |
636 | } | |
637 | ||
638 | static void xlgmac_disable_rx(struct xlgmac_pdata *pdata) | |
639 | { | |
640 | struct xlgmac_channel *channel; | |
641 | unsigned int i; | |
642 | u32 regval; | |
643 | ||
644 | /* Disable MAC Rx */ | |
645 | regval = readl(pdata->mac_regs + MAC_RCR); | |
646 | regval = XLGMAC_SET_REG_BITS(regval, MAC_RCR_DCRCC_POS, | |
647 | MAC_RCR_DCRCC_LEN, 0); | |
648 | regval = XLGMAC_SET_REG_BITS(regval, MAC_RCR_CST_POS, | |
649 | MAC_RCR_CST_LEN, 0); | |
650 | regval = XLGMAC_SET_REG_BITS(regval, MAC_RCR_ACS_POS, | |
651 | MAC_RCR_ACS_LEN, 0); | |
652 | regval = XLGMAC_SET_REG_BITS(regval, MAC_RCR_RE_POS, | |
653 | MAC_RCR_RE_LEN, 0); | |
654 | writel(regval, pdata->mac_regs + MAC_RCR); | |
655 | ||
656 | /* Prepare for Rx DMA channel stop */ | |
657 | for (i = 0; i < pdata->rx_q_count; i++) | |
658 | xlgmac_prepare_rx_stop(pdata, i); | |
659 | ||
660 | /* Disable each Rx queue */ | |
661 | writel(0, pdata->mac_regs + MAC_RQC0R); | |
662 | ||
663 | /* Disable each Rx DMA channel */ | |
664 | channel = pdata->channel_head; | |
665 | for (i = 0; i < pdata->channel_count; i++, channel++) { | |
666 | if (!channel->rx_ring) | |
667 | break; | |
668 | ||
669 | regval = readl(XLGMAC_DMA_REG(channel, DMA_CH_RCR)); | |
670 | regval = XLGMAC_SET_REG_BITS(regval, DMA_CH_RCR_SR_POS, | |
671 | DMA_CH_RCR_SR_LEN, 0); | |
672 | writel(regval, XLGMAC_DMA_REG(channel, DMA_CH_RCR)); | |
673 | } | |
674 | } | |
675 | ||
676 | static void xlgmac_tx_start_xmit(struct xlgmac_channel *channel, | |
677 | struct xlgmac_ring *ring) | |
678 | { | |
679 | struct xlgmac_pdata *pdata = channel->pdata; | |
680 | struct xlgmac_desc_data *desc_data; | |
681 | ||
682 | /* Make sure everything is written before the register write */ | |
683 | wmb(); | |
684 | ||
685 | /* Issue a poll command to Tx DMA by writing address | |
686 | * of next immediate free descriptor | |
687 | */ | |
688 | desc_data = XLGMAC_GET_DESC_DATA(ring, ring->cur); | |
689 | writel(lower_32_bits(desc_data->dma_desc_addr), | |
690 | XLGMAC_DMA_REG(channel, DMA_CH_TDTR_LO)); | |
691 | ||
692 | /* Start the Tx timer */ | |
693 | if (pdata->tx_usecs && !channel->tx_timer_active) { | |
694 | channel->tx_timer_active = 1; | |
695 | mod_timer(&channel->tx_timer, | |
696 | jiffies + usecs_to_jiffies(pdata->tx_usecs)); | |
697 | } | |
698 | ||
699 | ring->tx.xmit_more = 0; | |
700 | } | |
701 | ||
702 | static void xlgmac_dev_xmit(struct xlgmac_channel *channel) | |
703 | { | |
704 | struct xlgmac_pdata *pdata = channel->pdata; | |
705 | struct xlgmac_ring *ring = channel->tx_ring; | |
706 | unsigned int tso_context, vlan_context; | |
707 | struct xlgmac_desc_data *desc_data; | |
708 | struct xlgmac_dma_desc *dma_desc; | |
709 | struct xlgmac_pkt_info *pkt_info; | |
710 | unsigned int csum, tso, vlan; | |
711 | int start_index = ring->cur; | |
712 | int cur_index = ring->cur; | |
713 | unsigned int tx_set_ic; | |
714 | int i; | |
715 | ||
716 | pkt_info = &ring->pkt_info; | |
717 | csum = XLGMAC_GET_REG_BITS(pkt_info->attributes, | |
718 | TX_PACKET_ATTRIBUTES_CSUM_ENABLE_POS, | |
719 | TX_PACKET_ATTRIBUTES_CSUM_ENABLE_LEN); | |
720 | tso = XLGMAC_GET_REG_BITS(pkt_info->attributes, | |
721 | TX_PACKET_ATTRIBUTES_TSO_ENABLE_POS, | |
722 | TX_PACKET_ATTRIBUTES_TSO_ENABLE_LEN); | |
723 | vlan = XLGMAC_GET_REG_BITS(pkt_info->attributes, | |
724 | TX_PACKET_ATTRIBUTES_VLAN_CTAG_POS, | |
725 | TX_PACKET_ATTRIBUTES_VLAN_CTAG_LEN); | |
726 | ||
727 | if (tso && (pkt_info->mss != ring->tx.cur_mss)) | |
728 | tso_context = 1; | |
729 | else | |
730 | tso_context = 0; | |
731 | ||
732 | if (vlan && (pkt_info->vlan_ctag != ring->tx.cur_vlan_ctag)) | |
733 | vlan_context = 1; | |
734 | else | |
735 | vlan_context = 0; | |
736 | ||
737 | /* Determine if an interrupt should be generated for this Tx: | |
738 | * Interrupt: | |
739 | * - Tx frame count exceeds the frame count setting | |
740 | * - Addition of Tx frame count to the frame count since the | |
741 | * last interrupt was set exceeds the frame count setting | |
742 | * No interrupt: | |
743 | * - No frame count setting specified (ethtool -C ethX tx-frames 0) | |
744 | * - Addition of Tx frame count to the frame count since the | |
745 | * last interrupt was set does not exceed the frame count setting | |
746 | */ | |
747 | ring->coalesce_count += pkt_info->tx_packets; | |
748 | if (!pdata->tx_frames) | |
749 | tx_set_ic = 0; | |
750 | else if (pkt_info->tx_packets > pdata->tx_frames) | |
751 | tx_set_ic = 1; | |
752 | else if ((ring->coalesce_count % pdata->tx_frames) < | |
753 | pkt_info->tx_packets) | |
754 | tx_set_ic = 1; | |
755 | else | |
756 | tx_set_ic = 0; | |
757 | ||
758 | desc_data = XLGMAC_GET_DESC_DATA(ring, cur_index); | |
759 | dma_desc = desc_data->dma_desc; | |
760 | ||
761 | /* Create a context descriptor if this is a TSO pkt_info */ | |
762 | if (tso_context || vlan_context) { | |
763 | if (tso_context) { | |
764 | netif_dbg(pdata, tx_queued, pdata->netdev, | |
765 | "TSO context descriptor, mss=%u\n", | |
766 | pkt_info->mss); | |
767 | ||
768 | /* Set the MSS size */ | |
769 | dma_desc->desc2 = XLGMAC_SET_REG_BITS_LE( | |
770 | dma_desc->desc2, | |
771 | TX_CONTEXT_DESC2_MSS_POS, | |
772 | TX_CONTEXT_DESC2_MSS_LEN, | |
773 | pkt_info->mss); | |
774 | ||
775 | /* Mark it as a CONTEXT descriptor */ | |
776 | dma_desc->desc3 = XLGMAC_SET_REG_BITS_LE( | |
777 | dma_desc->desc3, | |
778 | TX_CONTEXT_DESC3_CTXT_POS, | |
779 | TX_CONTEXT_DESC3_CTXT_LEN, | |
780 | 1); | |
781 | ||
782 | /* Indicate this descriptor contains the MSS */ | |
783 | dma_desc->desc3 = XLGMAC_SET_REG_BITS_LE( | |
784 | dma_desc->desc3, | |
785 | TX_CONTEXT_DESC3_TCMSSV_POS, | |
786 | TX_CONTEXT_DESC3_TCMSSV_LEN, | |
787 | 1); | |
788 | ||
789 | ring->tx.cur_mss = pkt_info->mss; | |
790 | } | |
791 | ||
792 | if (vlan_context) { | |
793 | netif_dbg(pdata, tx_queued, pdata->netdev, | |
794 | "VLAN context descriptor, ctag=%u\n", | |
795 | pkt_info->vlan_ctag); | |
796 | ||
797 | /* Mark it as a CONTEXT descriptor */ | |
798 | dma_desc->desc3 = XLGMAC_SET_REG_BITS_LE( | |
799 | dma_desc->desc3, | |
800 | TX_CONTEXT_DESC3_CTXT_POS, | |
801 | TX_CONTEXT_DESC3_CTXT_LEN, | |
802 | 1); | |
803 | ||
804 | /* Set the VLAN tag */ | |
805 | dma_desc->desc3 = XLGMAC_SET_REG_BITS_LE( | |
806 | dma_desc->desc3, | |
807 | TX_CONTEXT_DESC3_VT_POS, | |
808 | TX_CONTEXT_DESC3_VT_LEN, | |
809 | pkt_info->vlan_ctag); | |
810 | ||
811 | /* Indicate this descriptor contains the VLAN tag */ | |
812 | dma_desc->desc3 = XLGMAC_SET_REG_BITS_LE( | |
813 | dma_desc->desc3, | |
814 | TX_CONTEXT_DESC3_VLTV_POS, | |
815 | TX_CONTEXT_DESC3_VLTV_LEN, | |
816 | 1); | |
817 | ||
818 | ring->tx.cur_vlan_ctag = pkt_info->vlan_ctag; | |
819 | } | |
820 | ||
821 | cur_index++; | |
822 | desc_data = XLGMAC_GET_DESC_DATA(ring, cur_index); | |
823 | dma_desc = desc_data->dma_desc; | |
824 | } | |
825 | ||
826 | /* Update buffer address (for TSO this is the header) */ | |
827 | dma_desc->desc0 = cpu_to_le32(lower_32_bits(desc_data->skb_dma)); | |
828 | dma_desc->desc1 = cpu_to_le32(upper_32_bits(desc_data->skb_dma)); | |
829 | ||
830 | /* Update the buffer length */ | |
831 | dma_desc->desc2 = XLGMAC_SET_REG_BITS_LE( | |
832 | dma_desc->desc2, | |
833 | TX_NORMAL_DESC2_HL_B1L_POS, | |
834 | TX_NORMAL_DESC2_HL_B1L_LEN, | |
835 | desc_data->skb_dma_len); | |
836 | ||
837 | /* VLAN tag insertion check */ | |
838 | if (vlan) | |
839 | dma_desc->desc2 = XLGMAC_SET_REG_BITS_LE( | |
840 | dma_desc->desc2, | |
841 | TX_NORMAL_DESC2_VTIR_POS, | |
842 | TX_NORMAL_DESC2_VTIR_LEN, | |
843 | TX_NORMAL_DESC2_VLAN_INSERT); | |
844 | ||
845 | /* Timestamp enablement check */ | |
846 | if (XLGMAC_GET_REG_BITS(pkt_info->attributes, | |
847 | TX_PACKET_ATTRIBUTES_PTP_POS, | |
848 | TX_PACKET_ATTRIBUTES_PTP_LEN)) | |
849 | dma_desc->desc2 = XLGMAC_SET_REG_BITS_LE( | |
850 | dma_desc->desc2, | |
851 | TX_NORMAL_DESC2_TTSE_POS, | |
852 | TX_NORMAL_DESC2_TTSE_LEN, | |
853 | 1); | |
854 | ||
855 | /* Mark it as First Descriptor */ | |
856 | dma_desc->desc3 = XLGMAC_SET_REG_BITS_LE( | |
857 | dma_desc->desc3, | |
858 | TX_NORMAL_DESC3_FD_POS, | |
859 | TX_NORMAL_DESC3_FD_LEN, | |
860 | 1); | |
861 | ||
862 | /* Mark it as a NORMAL descriptor */ | |
863 | dma_desc->desc3 = XLGMAC_SET_REG_BITS_LE( | |
864 | dma_desc->desc3, | |
865 | TX_NORMAL_DESC3_CTXT_POS, | |
866 | TX_NORMAL_DESC3_CTXT_LEN, | |
867 | 0); | |
868 | ||
869 | /* Set OWN bit if not the first descriptor */ | |
870 | if (cur_index != start_index) | |
871 | dma_desc->desc3 = XLGMAC_SET_REG_BITS_LE( | |
872 | dma_desc->desc3, | |
873 | TX_NORMAL_DESC3_OWN_POS, | |
874 | TX_NORMAL_DESC3_OWN_LEN, | |
875 | 1); | |
876 | ||
877 | if (tso) { | |
878 | /* Enable TSO */ | |
879 | dma_desc->desc3 = XLGMAC_SET_REG_BITS_LE( | |
880 | dma_desc->desc3, | |
881 | TX_NORMAL_DESC3_TSE_POS, | |
882 | TX_NORMAL_DESC3_TSE_LEN, 1); | |
883 | dma_desc->desc3 = XLGMAC_SET_REG_BITS_LE( | |
884 | dma_desc->desc3, | |
885 | TX_NORMAL_DESC3_TCPPL_POS, | |
886 | TX_NORMAL_DESC3_TCPPL_LEN, | |
887 | pkt_info->tcp_payload_len); | |
888 | dma_desc->desc3 = XLGMAC_SET_REG_BITS_LE( | |
889 | dma_desc->desc3, | |
890 | TX_NORMAL_DESC3_TCPHDRLEN_POS, | |
891 | TX_NORMAL_DESC3_TCPHDRLEN_LEN, | |
892 | pkt_info->tcp_header_len / 4); | |
893 | ||
894 | pdata->stats.tx_tso_packets++; | |
895 | } else { | |
896 | /* Enable CRC and Pad Insertion */ | |
897 | dma_desc->desc3 = XLGMAC_SET_REG_BITS_LE( | |
898 | dma_desc->desc3, | |
899 | TX_NORMAL_DESC3_CPC_POS, | |
900 | TX_NORMAL_DESC3_CPC_LEN, 0); | |
901 | ||
902 | /* Enable HW CSUM */ | |
903 | if (csum) | |
904 | dma_desc->desc3 = XLGMAC_SET_REG_BITS_LE( | |
905 | dma_desc->desc3, | |
906 | TX_NORMAL_DESC3_CIC_POS, | |
907 | TX_NORMAL_DESC3_CIC_LEN, | |
908 | 0x3); | |
909 | ||
910 | /* Set the total length to be transmitted */ | |
911 | dma_desc->desc3 = XLGMAC_SET_REG_BITS_LE( | |
912 | dma_desc->desc3, | |
913 | TX_NORMAL_DESC3_FL_POS, | |
914 | TX_NORMAL_DESC3_FL_LEN, | |
915 | pkt_info->length); | |
916 | } | |
917 | ||
918 | for (i = cur_index - start_index + 1; i < pkt_info->desc_count; i++) { | |
919 | cur_index++; | |
920 | desc_data = XLGMAC_GET_DESC_DATA(ring, cur_index); | |
921 | dma_desc = desc_data->dma_desc; | |
922 | ||
923 | /* Update buffer address */ | |
924 | dma_desc->desc0 = | |
925 | cpu_to_le32(lower_32_bits(desc_data->skb_dma)); | |
926 | dma_desc->desc1 = | |
927 | cpu_to_le32(upper_32_bits(desc_data->skb_dma)); | |
928 | ||
929 | /* Update the buffer length */ | |
930 | dma_desc->desc2 = XLGMAC_SET_REG_BITS_LE( | |
931 | dma_desc->desc2, | |
932 | TX_NORMAL_DESC2_HL_B1L_POS, | |
933 | TX_NORMAL_DESC2_HL_B1L_LEN, | |
934 | desc_data->skb_dma_len); | |
935 | ||
936 | /* Set OWN bit */ | |
937 | dma_desc->desc3 = XLGMAC_SET_REG_BITS_LE( | |
938 | dma_desc->desc3, | |
939 | TX_NORMAL_DESC3_OWN_POS, | |
940 | TX_NORMAL_DESC3_OWN_LEN, 1); | |
941 | ||
942 | /* Mark it as NORMAL descriptor */ | |
943 | dma_desc->desc3 = XLGMAC_SET_REG_BITS_LE( | |
944 | dma_desc->desc3, | |
945 | TX_NORMAL_DESC3_CTXT_POS, | |
946 | TX_NORMAL_DESC3_CTXT_LEN, 0); | |
947 | ||
948 | /* Enable HW CSUM */ | |
949 | if (csum) | |
950 | dma_desc->desc3 = XLGMAC_SET_REG_BITS_LE( | |
951 | dma_desc->desc3, | |
952 | TX_NORMAL_DESC3_CIC_POS, | |
953 | TX_NORMAL_DESC3_CIC_LEN, | |
954 | 0x3); | |
955 | } | |
956 | ||
957 | /* Set LAST bit for the last descriptor */ | |
958 | dma_desc->desc3 = XLGMAC_SET_REG_BITS_LE( | |
959 | dma_desc->desc3, | |
960 | TX_NORMAL_DESC3_LD_POS, | |
961 | TX_NORMAL_DESC3_LD_LEN, 1); | |
962 | ||
963 | /* Set IC bit based on Tx coalescing settings */ | |
964 | if (tx_set_ic) | |
965 | dma_desc->desc2 = XLGMAC_SET_REG_BITS_LE( | |
966 | dma_desc->desc2, | |
967 | TX_NORMAL_DESC2_IC_POS, | |
968 | TX_NORMAL_DESC2_IC_LEN, 1); | |
969 | ||
970 | /* Save the Tx info to report back during cleanup */ | |
971 | desc_data->tx.packets = pkt_info->tx_packets; | |
972 | desc_data->tx.bytes = pkt_info->tx_bytes; | |
973 | ||
974 | /* In case the Tx DMA engine is running, make sure everything | |
975 | * is written to the descriptor(s) before setting the OWN bit | |
976 | * for the first descriptor | |
977 | */ | |
978 | dma_wmb(); | |
979 | ||
980 | /* Set OWN bit for the first descriptor */ | |
981 | desc_data = XLGMAC_GET_DESC_DATA(ring, start_index); | |
982 | dma_desc = desc_data->dma_desc; | |
983 | dma_desc->desc3 = XLGMAC_SET_REG_BITS_LE( | |
984 | dma_desc->desc3, | |
985 | TX_NORMAL_DESC3_OWN_POS, | |
986 | TX_NORMAL_DESC3_OWN_LEN, 1); | |
987 | ||
988 | if (netif_msg_tx_queued(pdata)) | |
989 | xlgmac_dump_tx_desc(pdata, ring, start_index, | |
990 | pkt_info->desc_count, 1); | |
991 | ||
992 | /* Make sure ownership is written to the descriptor */ | |
993 | smp_wmb(); | |
994 | ||
995 | ring->cur = cur_index + 1; | |
996 | if (!pkt_info->skb->xmit_more || | |
997 | netif_xmit_stopped(netdev_get_tx_queue(pdata->netdev, | |
998 | channel->queue_index))) | |
999 | xlgmac_tx_start_xmit(channel, ring); | |
1000 | else | |
1001 | ring->tx.xmit_more = 1; | |
1002 | ||
1003 | XLGMAC_PR("%s: descriptors %u to %u written\n", | |
1004 | channel->name, start_index & (ring->dma_desc_count - 1), | |
1005 | (ring->cur - 1) & (ring->dma_desc_count - 1)); | |
1006 | } | |
1007 | ||
1008 | static void xlgmac_get_rx_tstamp(struct xlgmac_pkt_info *pkt_info, | |
1009 | struct xlgmac_dma_desc *dma_desc) | |
1010 | { | |
1011 | u32 tsa, tsd; | |
1012 | u64 nsec; | |
1013 | ||
1014 | tsa = XLGMAC_GET_REG_BITS_LE(dma_desc->desc3, | |
1015 | RX_CONTEXT_DESC3_TSA_POS, | |
1016 | RX_CONTEXT_DESC3_TSA_LEN); | |
1017 | tsd = XLGMAC_GET_REG_BITS_LE(dma_desc->desc3, | |
1018 | RX_CONTEXT_DESC3_TSD_POS, | |
1019 | RX_CONTEXT_DESC3_TSD_LEN); | |
1020 | if (tsa && !tsd) { | |
1021 | nsec = le32_to_cpu(dma_desc->desc1); | |
1022 | nsec <<= 32; | |
1023 | nsec |= le32_to_cpu(dma_desc->desc0); | |
1024 | if (nsec != 0xffffffffffffffffULL) { | |
1025 | pkt_info->rx_tstamp = nsec; | |
1026 | pkt_info->attributes = XLGMAC_SET_REG_BITS( | |
1027 | pkt_info->attributes, | |
1028 | RX_PACKET_ATTRIBUTES_RX_TSTAMP_POS, | |
1029 | RX_PACKET_ATTRIBUTES_RX_TSTAMP_LEN, | |
1030 | 1); | |
1031 | } | |
1032 | } | |
1033 | } | |
1034 | ||
1035 | static void xlgmac_tx_desc_reset(struct xlgmac_desc_data *desc_data) | |
1036 | { | |
1037 | struct xlgmac_dma_desc *dma_desc = desc_data->dma_desc; | |
1038 | ||
1039 | /* Reset the Tx descriptor | |
1040 | * Set buffer 1 (lo) address to zero | |
1041 | * Set buffer 1 (hi) address to zero | |
1042 | * Reset all other control bits (IC, TTSE, B2L & B1L) | |
1043 | * Reset all other control bits (OWN, CTXT, FD, LD, CPC, CIC, etc) | |
1044 | */ | |
1045 | dma_desc->desc0 = 0; | |
1046 | dma_desc->desc1 = 0; | |
1047 | dma_desc->desc2 = 0; | |
1048 | dma_desc->desc3 = 0; | |
1049 | ||
1050 | /* Make sure ownership is written to the descriptor */ | |
1051 | dma_wmb(); | |
1052 | } | |
1053 | ||
1054 | static void xlgmac_tx_desc_init(struct xlgmac_channel *channel) | |
1055 | { | |
1056 | struct xlgmac_ring *ring = channel->tx_ring; | |
1057 | struct xlgmac_desc_data *desc_data; | |
1058 | int start_index = ring->cur; | |
1059 | int i; | |
1060 | ||
1061 | /* Initialze all descriptors */ | |
1062 | for (i = 0; i < ring->dma_desc_count; i++) { | |
1063 | desc_data = XLGMAC_GET_DESC_DATA(ring, i); | |
1064 | ||
1065 | /* Initialize Tx descriptor */ | |
1066 | xlgmac_tx_desc_reset(desc_data); | |
1067 | } | |
1068 | ||
1069 | /* Update the total number of Tx descriptors */ | |
1070 | writel(ring->dma_desc_count - 1, XLGMAC_DMA_REG(channel, DMA_CH_TDRLR)); | |
1071 | ||
1072 | /* Update the starting address of descriptor ring */ | |
1073 | desc_data = XLGMAC_GET_DESC_DATA(ring, start_index); | |
1074 | writel(upper_32_bits(desc_data->dma_desc_addr), | |
1075 | XLGMAC_DMA_REG(channel, DMA_CH_TDLR_HI)); | |
1076 | writel(lower_32_bits(desc_data->dma_desc_addr), | |
1077 | XLGMAC_DMA_REG(channel, DMA_CH_TDLR_LO)); | |
1078 | } | |
1079 | ||
1080 | static void xlgmac_rx_desc_reset(struct xlgmac_pdata *pdata, | |
1081 | struct xlgmac_desc_data *desc_data, | |
1082 | unsigned int index) | |
1083 | { | |
1084 | struct xlgmac_dma_desc *dma_desc = desc_data->dma_desc; | |
1085 | unsigned int rx_frames = pdata->rx_frames; | |
1086 | unsigned int rx_usecs = pdata->rx_usecs; | |
1087 | dma_addr_t hdr_dma, buf_dma; | |
1088 | unsigned int inte; | |
1089 | ||
1090 | if (!rx_usecs && !rx_frames) { | |
1091 | /* No coalescing, interrupt for every descriptor */ | |
1092 | inte = 1; | |
1093 | } else { | |
1094 | /* Set interrupt based on Rx frame coalescing setting */ | |
1095 | if (rx_frames && !((index + 1) % rx_frames)) | |
1096 | inte = 1; | |
1097 | else | |
1098 | inte = 0; | |
1099 | } | |
1100 | ||
1101 | /* Reset the Rx descriptor | |
1102 | * Set buffer 1 (lo) address to header dma address (lo) | |
1103 | * Set buffer 1 (hi) address to header dma address (hi) | |
1104 | * Set buffer 2 (lo) address to buffer dma address (lo) | |
1105 | * Set buffer 2 (hi) address to buffer dma address (hi) and | |
1106 | * set control bits OWN and INTE | |
1107 | */ | |
1108 | hdr_dma = desc_data->rx.hdr.dma_base + desc_data->rx.hdr.dma_off; | |
1109 | buf_dma = desc_data->rx.buf.dma_base + desc_data->rx.buf.dma_off; | |
1110 | dma_desc->desc0 = cpu_to_le32(lower_32_bits(hdr_dma)); | |
1111 | dma_desc->desc1 = cpu_to_le32(upper_32_bits(hdr_dma)); | |
1112 | dma_desc->desc2 = cpu_to_le32(lower_32_bits(buf_dma)); | |
1113 | dma_desc->desc3 = cpu_to_le32(upper_32_bits(buf_dma)); | |
1114 | ||
1115 | dma_desc->desc3 = XLGMAC_SET_REG_BITS_LE( | |
1116 | dma_desc->desc3, | |
1117 | RX_NORMAL_DESC3_INTE_POS, | |
1118 | RX_NORMAL_DESC3_INTE_LEN, | |
1119 | inte); | |
1120 | ||
1121 | /* Since the Rx DMA engine is likely running, make sure everything | |
1122 | * is written to the descriptor(s) before setting the OWN bit | |
1123 | * for the descriptor | |
1124 | */ | |
1125 | dma_wmb(); | |
1126 | ||
1127 | dma_desc->desc3 = XLGMAC_SET_REG_BITS_LE( | |
1128 | dma_desc->desc3, | |
1129 | RX_NORMAL_DESC3_OWN_POS, | |
1130 | RX_NORMAL_DESC3_OWN_LEN, | |
1131 | 1); | |
1132 | ||
1133 | /* Make sure ownership is written to the descriptor */ | |
1134 | dma_wmb(); | |
1135 | } | |
1136 | ||
1137 | static void xlgmac_rx_desc_init(struct xlgmac_channel *channel) | |
1138 | { | |
1139 | struct xlgmac_pdata *pdata = channel->pdata; | |
1140 | struct xlgmac_ring *ring = channel->rx_ring; | |
1141 | unsigned int start_index = ring->cur; | |
1142 | struct xlgmac_desc_data *desc_data; | |
1143 | unsigned int i; | |
1144 | ||
1145 | /* Initialize all descriptors */ | |
1146 | for (i = 0; i < ring->dma_desc_count; i++) { | |
1147 | desc_data = XLGMAC_GET_DESC_DATA(ring, i); | |
1148 | ||
1149 | /* Initialize Rx descriptor */ | |
1150 | xlgmac_rx_desc_reset(pdata, desc_data, i); | |
1151 | } | |
1152 | ||
1153 | /* Update the total number of Rx descriptors */ | |
1154 | writel(ring->dma_desc_count - 1, XLGMAC_DMA_REG(channel, DMA_CH_RDRLR)); | |
1155 | ||
1156 | /* Update the starting address of descriptor ring */ | |
1157 | desc_data = XLGMAC_GET_DESC_DATA(ring, start_index); | |
1158 | writel(upper_32_bits(desc_data->dma_desc_addr), | |
1159 | XLGMAC_DMA_REG(channel, DMA_CH_RDLR_HI)); | |
1160 | writel(lower_32_bits(desc_data->dma_desc_addr), | |
1161 | XLGMAC_DMA_REG(channel, DMA_CH_RDLR_LO)); | |
1162 | ||
1163 | /* Update the Rx Descriptor Tail Pointer */ | |
1164 | desc_data = XLGMAC_GET_DESC_DATA(ring, start_index + | |
1165 | ring->dma_desc_count - 1); | |
1166 | writel(lower_32_bits(desc_data->dma_desc_addr), | |
1167 | XLGMAC_DMA_REG(channel, DMA_CH_RDTR_LO)); | |
1168 | } | |
1169 | ||
1170 | static int xlgmac_is_context_desc(struct xlgmac_dma_desc *dma_desc) | |
1171 | { | |
1172 | /* Rx and Tx share CTXT bit, so check TDES3.CTXT bit */ | |
1173 | return XLGMAC_GET_REG_BITS_LE(dma_desc->desc3, | |
1174 | TX_NORMAL_DESC3_CTXT_POS, | |
1175 | TX_NORMAL_DESC3_CTXT_LEN); | |
1176 | } | |
1177 | ||
1178 | static int xlgmac_is_last_desc(struct xlgmac_dma_desc *dma_desc) | |
1179 | { | |
1180 | /* Rx and Tx share LD bit, so check TDES3.LD bit */ | |
1181 | return XLGMAC_GET_REG_BITS_LE(dma_desc->desc3, | |
1182 | TX_NORMAL_DESC3_LD_POS, | |
1183 | TX_NORMAL_DESC3_LD_LEN); | |
1184 | } | |
1185 | ||
1186 | static int xlgmac_disable_tx_flow_control(struct xlgmac_pdata *pdata) | |
1187 | { | |
1188 | unsigned int max_q_count, q_count; | |
1189 | unsigned int reg, regval; | |
1190 | unsigned int i; | |
1191 | ||
1192 | /* Clear MTL flow control */ | |
1193 | for (i = 0; i < pdata->rx_q_count; i++) { | |
1194 | regval = readl(XLGMAC_MTL_REG(pdata, i, MTL_Q_RQOMR)); | |
1195 | regval = XLGMAC_SET_REG_BITS(regval, MTL_Q_RQOMR_EHFC_POS, | |
1196 | MTL_Q_RQOMR_EHFC_LEN, 0); | |
1197 | writel(regval, XLGMAC_MTL_REG(pdata, i, MTL_Q_RQOMR)); | |
1198 | } | |
1199 | ||
1200 | /* Clear MAC flow control */ | |
1201 | max_q_count = XLGMAC_MAX_FLOW_CONTROL_QUEUES; | |
1202 | q_count = min_t(unsigned int, pdata->tx_q_count, max_q_count); | |
1203 | reg = MAC_Q0TFCR; | |
1204 | for (i = 0; i < q_count; i++) { | |
1205 | regval = readl(pdata->mac_regs + reg); | |
1206 | regval = XLGMAC_SET_REG_BITS(regval, | |
1207 | MAC_Q0TFCR_TFE_POS, | |
1208 | MAC_Q0TFCR_TFE_LEN, | |
1209 | 0); | |
1210 | writel(regval, pdata->mac_regs + reg); | |
1211 | ||
1212 | reg += MAC_QTFCR_INC; | |
1213 | } | |
1214 | ||
1215 | return 0; | |
1216 | } | |
1217 | ||
1218 | static int xlgmac_enable_tx_flow_control(struct xlgmac_pdata *pdata) | |
1219 | { | |
1220 | unsigned int max_q_count, q_count; | |
1221 | unsigned int reg, regval; | |
1222 | unsigned int i; | |
1223 | ||
1224 | /* Set MTL flow control */ | |
1225 | for (i = 0; i < pdata->rx_q_count; i++) { | |
1226 | regval = readl(XLGMAC_MTL_REG(pdata, i, MTL_Q_RQOMR)); | |
1227 | regval = XLGMAC_SET_REG_BITS(regval, MTL_Q_RQOMR_EHFC_POS, | |
1228 | MTL_Q_RQOMR_EHFC_LEN, 1); | |
1229 | writel(regval, XLGMAC_MTL_REG(pdata, i, MTL_Q_RQOMR)); | |
1230 | } | |
1231 | ||
1232 | /* Set MAC flow control */ | |
1233 | max_q_count = XLGMAC_MAX_FLOW_CONTROL_QUEUES; | |
1234 | q_count = min_t(unsigned int, pdata->tx_q_count, max_q_count); | |
1235 | reg = MAC_Q0TFCR; | |
1236 | for (i = 0; i < q_count; i++) { | |
1237 | regval = readl(pdata->mac_regs + reg); | |
1238 | ||
1239 | /* Enable transmit flow control */ | |
1240 | regval = XLGMAC_SET_REG_BITS(regval, MAC_Q0TFCR_TFE_POS, | |
1241 | MAC_Q0TFCR_TFE_LEN, 1); | |
1242 | /* Set pause time */ | |
1243 | regval = XLGMAC_SET_REG_BITS(regval, MAC_Q0TFCR_PT_POS, | |
1244 | MAC_Q0TFCR_PT_LEN, 0xffff); | |
1245 | ||
1246 | writel(regval, pdata->mac_regs + reg); | |
1247 | ||
1248 | reg += MAC_QTFCR_INC; | |
1249 | } | |
1250 | ||
1251 | return 0; | |
1252 | } | |
1253 | ||
1254 | static int xlgmac_disable_rx_flow_control(struct xlgmac_pdata *pdata) | |
1255 | { | |
1256 | u32 regval; | |
1257 | ||
1258 | regval = readl(pdata->mac_regs + MAC_RFCR); | |
1259 | regval = XLGMAC_SET_REG_BITS(regval, MAC_RFCR_RFE_POS, | |
1260 | MAC_RFCR_RFE_LEN, 0); | |
1261 | writel(regval, pdata->mac_regs + MAC_RFCR); | |
1262 | ||
1263 | return 0; | |
1264 | } | |
1265 | ||
1266 | static int xlgmac_enable_rx_flow_control(struct xlgmac_pdata *pdata) | |
1267 | { | |
1268 | u32 regval; | |
1269 | ||
1270 | regval = readl(pdata->mac_regs + MAC_RFCR); | |
1271 | regval = XLGMAC_SET_REG_BITS(regval, MAC_RFCR_RFE_POS, | |
1272 | MAC_RFCR_RFE_LEN, 1); | |
1273 | writel(regval, pdata->mac_regs + MAC_RFCR); | |
1274 | ||
1275 | return 0; | |
1276 | } | |
1277 | ||
1278 | static int xlgmac_config_tx_flow_control(struct xlgmac_pdata *pdata) | |
1279 | { | |
1280 | if (pdata->tx_pause) | |
1281 | xlgmac_enable_tx_flow_control(pdata); | |
1282 | else | |
1283 | xlgmac_disable_tx_flow_control(pdata); | |
1284 | ||
1285 | return 0; | |
1286 | } | |
1287 | ||
1288 | static int xlgmac_config_rx_flow_control(struct xlgmac_pdata *pdata) | |
1289 | { | |
1290 | if (pdata->rx_pause) | |
1291 | xlgmac_enable_rx_flow_control(pdata); | |
1292 | else | |
1293 | xlgmac_disable_rx_flow_control(pdata); | |
1294 | ||
1295 | return 0; | |
1296 | } | |
1297 | ||
1298 | static int xlgmac_config_rx_coalesce(struct xlgmac_pdata *pdata) | |
1299 | { | |
1300 | struct xlgmac_channel *channel; | |
1301 | unsigned int i; | |
1302 | u32 regval; | |
1303 | ||
1304 | channel = pdata->channel_head; | |
1305 | for (i = 0; i < pdata->channel_count; i++, channel++) { | |
1306 | if (!channel->rx_ring) | |
1307 | break; | |
1308 | ||
1309 | regval = readl(XLGMAC_DMA_REG(channel, DMA_CH_RIWT)); | |
1310 | regval = XLGMAC_SET_REG_BITS(regval, DMA_CH_RIWT_RWT_POS, | |
1311 | DMA_CH_RIWT_RWT_LEN, | |
1312 | pdata->rx_riwt); | |
1313 | writel(regval, XLGMAC_DMA_REG(channel, DMA_CH_RIWT)); | |
1314 | } | |
1315 | ||
1316 | return 0; | |
1317 | } | |
1318 | ||
1319 | static void xlgmac_config_flow_control(struct xlgmac_pdata *pdata) | |
1320 | { | |
1321 | xlgmac_config_tx_flow_control(pdata); | |
1322 | xlgmac_config_rx_flow_control(pdata); | |
1323 | } | |
1324 | ||
1325 | static void xlgmac_config_rx_fep_enable(struct xlgmac_pdata *pdata) | |
1326 | { | |
1327 | unsigned int i; | |
1328 | u32 regval; | |
1329 | ||
1330 | for (i = 0; i < pdata->rx_q_count; i++) { | |
1331 | regval = readl(XLGMAC_MTL_REG(pdata, i, MTL_Q_RQOMR)); | |
1332 | regval = XLGMAC_SET_REG_BITS(regval, MTL_Q_RQOMR_FEP_POS, | |
1333 | MTL_Q_RQOMR_FEP_LEN, 1); | |
1334 | writel(regval, XLGMAC_MTL_REG(pdata, i, MTL_Q_RQOMR)); | |
1335 | } | |
1336 | } | |
1337 | ||
1338 | static void xlgmac_config_rx_fup_enable(struct xlgmac_pdata *pdata) | |
1339 | { | |
1340 | unsigned int i; | |
1341 | u32 regval; | |
1342 | ||
1343 | for (i = 0; i < pdata->rx_q_count; i++) { | |
1344 | regval = readl(XLGMAC_MTL_REG(pdata, i, MTL_Q_RQOMR)); | |
1345 | regval = XLGMAC_SET_REG_BITS(regval, MTL_Q_RQOMR_FUP_POS, | |
1346 | MTL_Q_RQOMR_FUP_LEN, 1); | |
1347 | writel(regval, XLGMAC_MTL_REG(pdata, i, MTL_Q_RQOMR)); | |
1348 | } | |
1349 | } | |
1350 | ||
1351 | static int xlgmac_config_tx_coalesce(struct xlgmac_pdata *pdata) | |
1352 | { | |
1353 | return 0; | |
1354 | } | |
1355 | ||
1356 | static void xlgmac_config_rx_buffer_size(struct xlgmac_pdata *pdata) | |
1357 | { | |
1358 | struct xlgmac_channel *channel; | |
1359 | unsigned int i; | |
1360 | u32 regval; | |
1361 | ||
1362 | channel = pdata->channel_head; | |
1363 | for (i = 0; i < pdata->channel_count; i++, channel++) { | |
1364 | if (!channel->rx_ring) | |
1365 | break; | |
1366 | ||
1367 | regval = readl(XLGMAC_DMA_REG(channel, DMA_CH_RCR)); | |
1368 | regval = XLGMAC_SET_REG_BITS(regval, DMA_CH_RCR_RBSZ_POS, | |
1369 | DMA_CH_RCR_RBSZ_LEN, | |
1370 | pdata->rx_buf_size); | |
1371 | writel(regval, XLGMAC_DMA_REG(channel, DMA_CH_RCR)); | |
1372 | } | |
1373 | } | |
1374 | ||
1375 | static void xlgmac_config_tso_mode(struct xlgmac_pdata *pdata) | |
1376 | { | |
1377 | struct xlgmac_channel *channel; | |
1378 | unsigned int i; | |
1379 | u32 regval; | |
1380 | ||
1381 | channel = pdata->channel_head; | |
1382 | for (i = 0; i < pdata->channel_count; i++, channel++) { | |
1383 | if (!channel->tx_ring) | |
1384 | break; | |
1385 | ||
1386 | if (pdata->hw_feat.tso) { | |
1387 | regval = readl(XLGMAC_DMA_REG(channel, DMA_CH_TCR)); | |
1388 | regval = XLGMAC_SET_REG_BITS(regval, DMA_CH_TCR_TSE_POS, | |
1389 | DMA_CH_TCR_TSE_LEN, 1); | |
1390 | writel(regval, XLGMAC_DMA_REG(channel, DMA_CH_TCR)); | |
1391 | } | |
1392 | } | |
1393 | } | |
1394 | ||
1395 | static void xlgmac_config_sph_mode(struct xlgmac_pdata *pdata) | |
1396 | { | |
1397 | struct xlgmac_channel *channel; | |
1398 | unsigned int i; | |
1399 | u32 regval; | |
1400 | ||
1401 | channel = pdata->channel_head; | |
1402 | for (i = 0; i < pdata->channel_count; i++, channel++) { | |
1403 | if (!channel->rx_ring) | |
1404 | break; | |
1405 | ||
1406 | regval = readl(XLGMAC_DMA_REG(channel, DMA_CH_CR)); | |
1407 | regval = XLGMAC_SET_REG_BITS(regval, DMA_CH_CR_SPH_POS, | |
1408 | DMA_CH_CR_SPH_LEN, 1); | |
1409 | writel(regval, XLGMAC_DMA_REG(channel, DMA_CH_CR)); | |
1410 | } | |
1411 | ||
1412 | regval = readl(pdata->mac_regs + MAC_RCR); | |
1413 | regval = XLGMAC_SET_REG_BITS(regval, MAC_RCR_HDSMS_POS, | |
1414 | MAC_RCR_HDSMS_LEN, | |
1415 | XLGMAC_SPH_HDSMS_SIZE); | |
1416 | writel(regval, pdata->mac_regs + MAC_RCR); | |
1417 | } | |
1418 | ||
1419 | static unsigned int xlgmac_usec_to_riwt(struct xlgmac_pdata *pdata, | |
1420 | unsigned int usec) | |
1421 | { | |
1422 | unsigned long rate; | |
1423 | unsigned int ret; | |
1424 | ||
1425 | rate = pdata->sysclk_rate; | |
1426 | ||
1427 | /* Convert the input usec value to the watchdog timer value. Each | |
1428 | * watchdog timer value is equivalent to 256 clock cycles. | |
1429 | * Calculate the required value as: | |
1430 | * ( usec * ( system_clock_mhz / 10^6 ) / 256 | |
1431 | */ | |
1432 | ret = (usec * (rate / 1000000)) / 256; | |
1433 | ||
1434 | return ret; | |
1435 | } | |
1436 | ||
1437 | static unsigned int xlgmac_riwt_to_usec(struct xlgmac_pdata *pdata, | |
1438 | unsigned int riwt) | |
1439 | { | |
1440 | unsigned long rate; | |
1441 | unsigned int ret; | |
1442 | ||
1443 | rate = pdata->sysclk_rate; | |
1444 | ||
1445 | /* Convert the input watchdog timer value to the usec value. Each | |
1446 | * watchdog timer value is equivalent to 256 clock cycles. | |
1447 | * Calculate the required value as: | |
1448 | * ( riwt * 256 ) / ( system_clock_mhz / 10^6 ) | |
1449 | */ | |
1450 | ret = (riwt * 256) / (rate / 1000000); | |
1451 | ||
1452 | return ret; | |
1453 | } | |
1454 | ||
1455 | static int xlgmac_config_rx_threshold(struct xlgmac_pdata *pdata, | |
1456 | unsigned int val) | |
1457 | { | |
1458 | unsigned int i; | |
1459 | u32 regval; | |
1460 | ||
1461 | for (i = 0; i < pdata->rx_q_count; i++) { | |
1462 | regval = readl(XLGMAC_MTL_REG(pdata, i, MTL_Q_RQOMR)); | |
1463 | regval = XLGMAC_SET_REG_BITS(regval, MTL_Q_RQOMR_RTC_POS, | |
1464 | MTL_Q_RQOMR_RTC_LEN, val); | |
1465 | writel(regval, XLGMAC_MTL_REG(pdata, i, MTL_Q_RQOMR)); | |
1466 | } | |
1467 | ||
1468 | return 0; | |
1469 | } | |
1470 | ||
1471 | static void xlgmac_config_mtl_mode(struct xlgmac_pdata *pdata) | |
1472 | { | |
1473 | unsigned int i; | |
1474 | u32 regval; | |
1475 | ||
1476 | /* Set Tx to weighted round robin scheduling algorithm */ | |
1477 | regval = readl(pdata->mac_regs + MTL_OMR); | |
1478 | regval = XLGMAC_SET_REG_BITS(regval, MTL_OMR_ETSALG_POS, | |
1479 | MTL_OMR_ETSALG_LEN, MTL_ETSALG_WRR); | |
1480 | writel(regval, pdata->mac_regs + MTL_OMR); | |
1481 | ||
1482 | /* Set Tx traffic classes to use WRR algorithm with equal weights */ | |
1483 | for (i = 0; i < pdata->hw_feat.tc_cnt; i++) { | |
1484 | regval = readl(XLGMAC_MTL_REG(pdata, i, MTL_TC_ETSCR)); | |
1485 | regval = XLGMAC_SET_REG_BITS(regval, MTL_TC_ETSCR_TSA_POS, | |
1486 | MTL_TC_ETSCR_TSA_LEN, MTL_TSA_ETS); | |
1487 | writel(regval, XLGMAC_MTL_REG(pdata, i, MTL_TC_ETSCR)); | |
1488 | ||
1489 | regval = readl(XLGMAC_MTL_REG(pdata, i, MTL_TC_QWR)); | |
1490 | regval = XLGMAC_SET_REG_BITS(regval, MTL_TC_QWR_QW_POS, | |
1491 | MTL_TC_QWR_QW_LEN, 1); | |
1492 | writel(regval, XLGMAC_MTL_REG(pdata, i, MTL_TC_QWR)); | |
1493 | } | |
1494 | ||
1495 | /* Set Rx to strict priority algorithm */ | |
1496 | regval = readl(pdata->mac_regs + MTL_OMR); | |
1497 | regval = XLGMAC_SET_REG_BITS(regval, MTL_OMR_RAA_POS, | |
1498 | MTL_OMR_RAA_LEN, MTL_RAA_SP); | |
1499 | writel(regval, pdata->mac_regs + MTL_OMR); | |
1500 | } | |
1501 | ||
1502 | static void xlgmac_config_queue_mapping(struct xlgmac_pdata *pdata) | |
1503 | { | |
1504 | unsigned int ppq, ppq_extra, prio, prio_queues; | |
1505 | unsigned int qptc, qptc_extra, queue; | |
1506 | unsigned int reg, regval; | |
1507 | unsigned int mask; | |
1508 | unsigned int i, j; | |
1509 | ||
1510 | /* Map the MTL Tx Queues to Traffic Classes | |
1511 | * Note: Tx Queues >= Traffic Classes | |
1512 | */ | |
1513 | qptc = pdata->tx_q_count / pdata->hw_feat.tc_cnt; | |
1514 | qptc_extra = pdata->tx_q_count % pdata->hw_feat.tc_cnt; | |
1515 | ||
1516 | for (i = 0, queue = 0; i < pdata->hw_feat.tc_cnt; i++) { | |
1517 | for (j = 0; j < qptc; j++) { | |
1518 | netif_dbg(pdata, drv, pdata->netdev, | |
1519 | "TXq%u mapped to TC%u\n", queue, i); | |
1520 | regval = readl(XLGMAC_MTL_REG(pdata, queue, | |
1521 | MTL_Q_TQOMR)); | |
1522 | regval = XLGMAC_SET_REG_BITS(regval, | |
1523 | MTL_Q_TQOMR_Q2TCMAP_POS, | |
1524 | MTL_Q_TQOMR_Q2TCMAP_LEN, | |
1525 | i); | |
1526 | writel(regval, XLGMAC_MTL_REG(pdata, queue, | |
1527 | MTL_Q_TQOMR)); | |
1528 | queue++; | |
1529 | } | |
1530 | ||
1531 | if (i < qptc_extra) { | |
1532 | netif_dbg(pdata, drv, pdata->netdev, | |
1533 | "TXq%u mapped to TC%u\n", queue, i); | |
1534 | regval = readl(XLGMAC_MTL_REG(pdata, queue, | |
1535 | MTL_Q_TQOMR)); | |
1536 | regval = XLGMAC_SET_REG_BITS(regval, | |
1537 | MTL_Q_TQOMR_Q2TCMAP_POS, | |
1538 | MTL_Q_TQOMR_Q2TCMAP_LEN, | |
1539 | i); | |
1540 | writel(regval, XLGMAC_MTL_REG(pdata, queue, | |
1541 | MTL_Q_TQOMR)); | |
1542 | queue++; | |
1543 | } | |
1544 | } | |
1545 | ||
1546 | /* Map the 8 VLAN priority values to available MTL Rx queues */ | |
1547 | prio_queues = min_t(unsigned int, IEEE_8021QAZ_MAX_TCS, | |
1548 | pdata->rx_q_count); | |
1549 | ppq = IEEE_8021QAZ_MAX_TCS / prio_queues; | |
1550 | ppq_extra = IEEE_8021QAZ_MAX_TCS % prio_queues; | |
1551 | ||
1552 | reg = MAC_RQC2R; | |
1553 | regval = 0; | |
1554 | for (i = 0, prio = 0; i < prio_queues;) { | |
1555 | mask = 0; | |
1556 | for (j = 0; j < ppq; j++) { | |
1557 | netif_dbg(pdata, drv, pdata->netdev, | |
1558 | "PRIO%u mapped to RXq%u\n", prio, i); | |
1559 | mask |= (1 << prio); | |
1560 | prio++; | |
1561 | } | |
1562 | ||
1563 | if (i < ppq_extra) { | |
1564 | netif_dbg(pdata, drv, pdata->netdev, | |
1565 | "PRIO%u mapped to RXq%u\n", prio, i); | |
1566 | mask |= (1 << prio); | |
1567 | prio++; | |
1568 | } | |
1569 | ||
1570 | regval |= (mask << ((i++ % MAC_RQC2_Q_PER_REG) << 3)); | |
1571 | ||
1572 | if ((i % MAC_RQC2_Q_PER_REG) && (i != prio_queues)) | |
1573 | continue; | |
1574 | ||
1575 | writel(regval, pdata->mac_regs + reg); | |
1576 | reg += MAC_RQC2_INC; | |
1577 | regval = 0; | |
1578 | } | |
1579 | ||
1580 | /* Configure one to one, MTL Rx queue to DMA Rx channel mapping | |
1581 | * ie Q0 <--> CH0, Q1 <--> CH1 ... Q11 <--> CH11 | |
1582 | */ | |
1583 | reg = MTL_RQDCM0R; | |
1584 | regval = readl(pdata->mac_regs + reg); | |
1585 | regval |= (MTL_RQDCM0R_Q0MDMACH | MTL_RQDCM0R_Q1MDMACH | | |
1586 | MTL_RQDCM0R_Q2MDMACH | MTL_RQDCM0R_Q3MDMACH); | |
1587 | writel(regval, pdata->mac_regs + reg); | |
1588 | ||
1589 | reg += MTL_RQDCM_INC; | |
1590 | regval = readl(pdata->mac_regs + reg); | |
1591 | regval |= (MTL_RQDCM1R_Q4MDMACH | MTL_RQDCM1R_Q5MDMACH | | |
1592 | MTL_RQDCM1R_Q6MDMACH | MTL_RQDCM1R_Q7MDMACH); | |
1593 | writel(regval, pdata->mac_regs + reg); | |
1594 | ||
1595 | reg += MTL_RQDCM_INC; | |
1596 | regval = readl(pdata->mac_regs + reg); | |
1597 | regval |= (MTL_RQDCM2R_Q8MDMACH | MTL_RQDCM2R_Q9MDMACH | | |
1598 | MTL_RQDCM2R_Q10MDMACH | MTL_RQDCM2R_Q11MDMACH); | |
1599 | writel(regval, pdata->mac_regs + reg); | |
1600 | } | |
1601 | ||
1602 | static unsigned int xlgmac_calculate_per_queue_fifo( | |
1603 | unsigned int fifo_size, | |
1604 | unsigned int queue_count) | |
1605 | { | |
1606 | unsigned int q_fifo_size; | |
1607 | unsigned int p_fifo; | |
1608 | ||
1609 | /* Calculate the configured fifo size */ | |
1610 | q_fifo_size = 1 << (fifo_size + 7); | |
1611 | ||
1612 | /* The configured value may not be the actual amount of fifo RAM */ | |
1613 | q_fifo_size = min_t(unsigned int, XLGMAC_MAX_FIFO, q_fifo_size); | |
1614 | ||
1615 | q_fifo_size = q_fifo_size / queue_count; | |
1616 | ||
1617 | /* Each increment in the queue fifo size represents 256 bytes of | |
1618 | * fifo, with 0 representing 256 bytes. Distribute the fifo equally | |
1619 | * between the queues. | |
1620 | */ | |
1621 | p_fifo = q_fifo_size / 256; | |
1622 | if (p_fifo) | |
1623 | p_fifo--; | |
1624 | ||
1625 | return p_fifo; | |
1626 | } | |
1627 | ||
1628 | static void xlgmac_config_tx_fifo_size(struct xlgmac_pdata *pdata) | |
1629 | { | |
1630 | unsigned int fifo_size; | |
1631 | unsigned int i; | |
1632 | u32 regval; | |
1633 | ||
1634 | fifo_size = xlgmac_calculate_per_queue_fifo( | |
1635 | pdata->hw_feat.tx_fifo_size, | |
1636 | pdata->tx_q_count); | |
1637 | ||
1638 | for (i = 0; i < pdata->tx_q_count; i++) { | |
1639 | regval = readl(XLGMAC_MTL_REG(pdata, i, MTL_Q_TQOMR)); | |
1640 | regval = XLGMAC_SET_REG_BITS(regval, MTL_Q_TQOMR_TQS_POS, | |
1641 | MTL_Q_TQOMR_TQS_LEN, fifo_size); | |
1642 | writel(regval, XLGMAC_MTL_REG(pdata, i, MTL_Q_TQOMR)); | |
1643 | } | |
1644 | ||
1645 | netif_info(pdata, drv, pdata->netdev, | |
1646 | "%d Tx hardware queues, %d byte fifo per queue\n", | |
1647 | pdata->tx_q_count, ((fifo_size + 1) * 256)); | |
1648 | } | |
1649 | ||
1650 | static void xlgmac_config_rx_fifo_size(struct xlgmac_pdata *pdata) | |
1651 | { | |
1652 | unsigned int fifo_size; | |
1653 | unsigned int i; | |
1654 | u32 regval; | |
1655 | ||
1656 | fifo_size = xlgmac_calculate_per_queue_fifo( | |
1657 | pdata->hw_feat.rx_fifo_size, | |
1658 | pdata->rx_q_count); | |
1659 | ||
1660 | for (i = 0; i < pdata->rx_q_count; i++) { | |
1661 | regval = readl(XLGMAC_MTL_REG(pdata, i, MTL_Q_RQOMR)); | |
1662 | regval = XLGMAC_SET_REG_BITS(regval, MTL_Q_RQOMR_RQS_POS, | |
1663 | MTL_Q_RQOMR_RQS_LEN, fifo_size); | |
1664 | writel(regval, XLGMAC_MTL_REG(pdata, i, MTL_Q_RQOMR)); | |
1665 | } | |
1666 | ||
1667 | netif_info(pdata, drv, pdata->netdev, | |
1668 | "%d Rx hardware queues, %d byte fifo per queue\n", | |
1669 | pdata->rx_q_count, ((fifo_size + 1) * 256)); | |
1670 | } | |
1671 | ||
1672 | static void xlgmac_config_flow_control_threshold(struct xlgmac_pdata *pdata) | |
1673 | { | |
1674 | unsigned int i; | |
1675 | u32 regval; | |
1676 | ||
1677 | for (i = 0; i < pdata->rx_q_count; i++) { | |
1678 | regval = readl(XLGMAC_MTL_REG(pdata, i, MTL_Q_RQFCR)); | |
1679 | /* Activate flow control when less than 4k left in fifo */ | |
1680 | regval = XLGMAC_SET_REG_BITS(regval, MTL_Q_RQFCR_RFA_POS, | |
1681 | MTL_Q_RQFCR_RFA_LEN, 2); | |
1682 | /* De-activate flow control when more than 6k left in fifo */ | |
1683 | regval = XLGMAC_SET_REG_BITS(regval, MTL_Q_RQFCR_RFD_POS, | |
1684 | MTL_Q_RQFCR_RFD_LEN, 4); | |
1685 | writel(regval, XLGMAC_MTL_REG(pdata, i, MTL_Q_RQFCR)); | |
1686 | } | |
1687 | } | |
1688 | ||
1689 | static int xlgmac_config_tx_threshold(struct xlgmac_pdata *pdata, | |
1690 | unsigned int val) | |
1691 | { | |
1692 | unsigned int i; | |
1693 | u32 regval; | |
1694 | ||
1695 | for (i = 0; i < pdata->tx_q_count; i++) { | |
1696 | regval = readl(XLGMAC_MTL_REG(pdata, i, MTL_Q_TQOMR)); | |
1697 | regval = XLGMAC_SET_REG_BITS(regval, MTL_Q_TQOMR_TTC_POS, | |
1698 | MTL_Q_TQOMR_TTC_LEN, val); | |
1699 | writel(regval, XLGMAC_MTL_REG(pdata, i, MTL_Q_TQOMR)); | |
1700 | } | |
1701 | ||
1702 | return 0; | |
1703 | } | |
1704 | ||
1705 | static int xlgmac_config_rsf_mode(struct xlgmac_pdata *pdata, | |
1706 | unsigned int val) | |
1707 | { | |
1708 | unsigned int i; | |
1709 | u32 regval; | |
1710 | ||
1711 | for (i = 0; i < pdata->rx_q_count; i++) { | |
1712 | regval = readl(XLGMAC_MTL_REG(pdata, i, MTL_Q_RQOMR)); | |
1713 | regval = XLGMAC_SET_REG_BITS(regval, MTL_Q_RQOMR_RSF_POS, | |
1714 | MTL_Q_RQOMR_RSF_LEN, val); | |
1715 | writel(regval, XLGMAC_MTL_REG(pdata, i, MTL_Q_RQOMR)); | |
1716 | } | |
1717 | ||
1718 | return 0; | |
1719 | } | |
1720 | ||
1721 | static int xlgmac_config_tsf_mode(struct xlgmac_pdata *pdata, | |
1722 | unsigned int val) | |
1723 | { | |
1724 | unsigned int i; | |
1725 | u32 regval; | |
1726 | ||
1727 | for (i = 0; i < pdata->tx_q_count; i++) { | |
1728 | regval = readl(XLGMAC_MTL_REG(pdata, i, MTL_Q_TQOMR)); | |
1729 | regval = XLGMAC_SET_REG_BITS(regval, MTL_Q_TQOMR_TSF_POS, | |
1730 | MTL_Q_TQOMR_TSF_LEN, val); | |
1731 | writel(regval, XLGMAC_MTL_REG(pdata, i, MTL_Q_TQOMR)); | |
1732 | } | |
1733 | ||
1734 | return 0; | |
1735 | } | |
1736 | ||
1737 | static int xlgmac_config_osp_mode(struct xlgmac_pdata *pdata) | |
1738 | { | |
1739 | struct xlgmac_channel *channel; | |
1740 | unsigned int i; | |
1741 | u32 regval; | |
1742 | ||
1743 | channel = pdata->channel_head; | |
1744 | for (i = 0; i < pdata->channel_count; i++, channel++) { | |
1745 | if (!channel->tx_ring) | |
1746 | break; | |
1747 | ||
1748 | regval = readl(XLGMAC_DMA_REG(channel, DMA_CH_TCR)); | |
1749 | regval = XLGMAC_SET_REG_BITS(regval, DMA_CH_TCR_OSP_POS, | |
1750 | DMA_CH_TCR_OSP_LEN, | |
1751 | pdata->tx_osp_mode); | |
1752 | writel(regval, XLGMAC_DMA_REG(channel, DMA_CH_TCR)); | |
1753 | } | |
1754 | ||
1755 | return 0; | |
1756 | } | |
1757 | ||
1758 | static int xlgmac_config_pblx8(struct xlgmac_pdata *pdata) | |
1759 | { | |
1760 | struct xlgmac_channel *channel; | |
1761 | unsigned int i; | |
1762 | u32 regval; | |
1763 | ||
1764 | channel = pdata->channel_head; | |
1765 | for (i = 0; i < pdata->channel_count; i++, channel++) { | |
1766 | regval = readl(XLGMAC_DMA_REG(channel, DMA_CH_CR)); | |
1767 | regval = XLGMAC_SET_REG_BITS(regval, DMA_CH_CR_PBLX8_POS, | |
1768 | DMA_CH_CR_PBLX8_LEN, | |
1769 | pdata->pblx8); | |
1770 | writel(regval, XLGMAC_DMA_REG(channel, DMA_CH_CR)); | |
1771 | } | |
1772 | ||
1773 | return 0; | |
1774 | } | |
1775 | ||
1776 | static int xlgmac_get_tx_pbl_val(struct xlgmac_pdata *pdata) | |
1777 | { | |
1778 | u32 regval; | |
1779 | ||
1780 | regval = readl(XLGMAC_DMA_REG(pdata->channel_head, DMA_CH_TCR)); | |
1781 | regval = XLGMAC_GET_REG_BITS(regval, DMA_CH_TCR_PBL_POS, | |
1782 | DMA_CH_TCR_PBL_LEN); | |
1783 | return regval; | |
1784 | } | |
1785 | ||
1786 | static int xlgmac_config_tx_pbl_val(struct xlgmac_pdata *pdata) | |
1787 | { | |
1788 | struct xlgmac_channel *channel; | |
1789 | unsigned int i; | |
1790 | u32 regval; | |
1791 | ||
1792 | channel = pdata->channel_head; | |
1793 | for (i = 0; i < pdata->channel_count; i++, channel++) { | |
1794 | if (!channel->tx_ring) | |
1795 | break; | |
1796 | ||
1797 | regval = readl(XLGMAC_DMA_REG(channel, DMA_CH_TCR)); | |
1798 | regval = XLGMAC_SET_REG_BITS(regval, DMA_CH_TCR_PBL_POS, | |
1799 | DMA_CH_TCR_PBL_LEN, | |
1800 | pdata->tx_pbl); | |
1801 | writel(regval, XLGMAC_DMA_REG(channel, DMA_CH_TCR)); | |
1802 | } | |
1803 | ||
1804 | return 0; | |
1805 | } | |
1806 | ||
1807 | static int xlgmac_get_rx_pbl_val(struct xlgmac_pdata *pdata) | |
1808 | { | |
1809 | u32 regval; | |
1810 | ||
1811 | regval = readl(XLGMAC_DMA_REG(pdata->channel_head, DMA_CH_RCR)); | |
1812 | regval = XLGMAC_GET_REG_BITS(regval, DMA_CH_RCR_PBL_POS, | |
1813 | DMA_CH_RCR_PBL_LEN); | |
1814 | return regval; | |
1815 | } | |
1816 | ||
1817 | static int xlgmac_config_rx_pbl_val(struct xlgmac_pdata *pdata) | |
1818 | { | |
1819 | struct xlgmac_channel *channel; | |
1820 | unsigned int i; | |
1821 | u32 regval; | |
1822 | ||
1823 | channel = pdata->channel_head; | |
1824 | for (i = 0; i < pdata->channel_count; i++, channel++) { | |
1825 | if (!channel->rx_ring) | |
1826 | break; | |
1827 | ||
1828 | regval = readl(XLGMAC_DMA_REG(channel, DMA_CH_RCR)); | |
1829 | regval = XLGMAC_SET_REG_BITS(regval, DMA_CH_RCR_PBL_POS, | |
1830 | DMA_CH_RCR_PBL_LEN, | |
1831 | pdata->rx_pbl); | |
1832 | writel(regval, XLGMAC_DMA_REG(channel, DMA_CH_RCR)); | |
1833 | } | |
1834 | ||
1835 | return 0; | |
1836 | } | |
1837 | ||
1838 | static u64 xlgmac_mmc_read(struct xlgmac_pdata *pdata, unsigned int reg_lo) | |
1839 | { | |
1840 | bool read_hi; | |
1841 | u64 val; | |
1842 | ||
1843 | switch (reg_lo) { | |
1844 | /* These registers are always 64 bit */ | |
1845 | case MMC_TXOCTETCOUNT_GB_LO: | |
1846 | case MMC_TXOCTETCOUNT_G_LO: | |
1847 | case MMC_RXOCTETCOUNT_GB_LO: | |
1848 | case MMC_RXOCTETCOUNT_G_LO: | |
1849 | read_hi = true; | |
1850 | break; | |
1851 | ||
1852 | default: | |
1853 | read_hi = false; | |
1854 | } | |
1855 | ||
1856 | val = (u64)readl(pdata->mac_regs + reg_lo); | |
1857 | ||
1858 | if (read_hi) | |
1859 | val |= ((u64)readl(pdata->mac_regs + reg_lo + 4) << 32); | |
1860 | ||
1861 | return val; | |
1862 | } | |
1863 | ||
1864 | static void xlgmac_tx_mmc_int(struct xlgmac_pdata *pdata) | |
1865 | { | |
1866 | unsigned int mmc_isr = readl(pdata->mac_regs + MMC_TISR); | |
1867 | struct xlgmac_stats *stats = &pdata->stats; | |
1868 | ||
1869 | if (XLGMAC_GET_REG_BITS(mmc_isr, | |
1870 | MMC_TISR_TXOCTETCOUNT_GB_POS, | |
1871 | MMC_TISR_TXOCTETCOUNT_GB_LEN)) | |
1872 | stats->txoctetcount_gb += | |
1873 | xlgmac_mmc_read(pdata, MMC_TXOCTETCOUNT_GB_LO); | |
1874 | ||
1875 | if (XLGMAC_GET_REG_BITS(mmc_isr, | |
1876 | MMC_TISR_TXFRAMECOUNT_GB_POS, | |
1877 | MMC_TISR_TXFRAMECOUNT_GB_LEN)) | |
1878 | stats->txframecount_gb += | |
1879 | xlgmac_mmc_read(pdata, MMC_TXFRAMECOUNT_GB_LO); | |
1880 | ||
1881 | if (XLGMAC_GET_REG_BITS(mmc_isr, | |
1882 | MMC_TISR_TXBROADCASTFRAMES_G_POS, | |
1883 | MMC_TISR_TXBROADCASTFRAMES_G_LEN)) | |
1884 | stats->txbroadcastframes_g += | |
1885 | xlgmac_mmc_read(pdata, MMC_TXBROADCASTFRAMES_G_LO); | |
1886 | ||
1887 | if (XLGMAC_GET_REG_BITS(mmc_isr, | |
1888 | MMC_TISR_TXMULTICASTFRAMES_G_POS, | |
1889 | MMC_TISR_TXMULTICASTFRAMES_G_LEN)) | |
1890 | stats->txmulticastframes_g += | |
1891 | xlgmac_mmc_read(pdata, MMC_TXMULTICASTFRAMES_G_LO); | |
1892 | ||
1893 | if (XLGMAC_GET_REG_BITS(mmc_isr, | |
1894 | MMC_TISR_TX64OCTETS_GB_POS, | |
1895 | MMC_TISR_TX64OCTETS_GB_LEN)) | |
1896 | stats->tx64octets_gb += | |
1897 | xlgmac_mmc_read(pdata, MMC_TX64OCTETS_GB_LO); | |
1898 | ||
1899 | if (XLGMAC_GET_REG_BITS(mmc_isr, | |
1900 | MMC_TISR_TX65TO127OCTETS_GB_POS, | |
1901 | MMC_TISR_TX65TO127OCTETS_GB_LEN)) | |
1902 | stats->tx65to127octets_gb += | |
1903 | xlgmac_mmc_read(pdata, MMC_TX65TO127OCTETS_GB_LO); | |
1904 | ||
1905 | if (XLGMAC_GET_REG_BITS(mmc_isr, | |
1906 | MMC_TISR_TX128TO255OCTETS_GB_POS, | |
1907 | MMC_TISR_TX128TO255OCTETS_GB_LEN)) | |
1908 | stats->tx128to255octets_gb += | |
1909 | xlgmac_mmc_read(pdata, MMC_TX128TO255OCTETS_GB_LO); | |
1910 | ||
1911 | if (XLGMAC_GET_REG_BITS(mmc_isr, | |
1912 | MMC_TISR_TX256TO511OCTETS_GB_POS, | |
1913 | MMC_TISR_TX256TO511OCTETS_GB_LEN)) | |
1914 | stats->tx256to511octets_gb += | |
1915 | xlgmac_mmc_read(pdata, MMC_TX256TO511OCTETS_GB_LO); | |
1916 | ||
1917 | if (XLGMAC_GET_REG_BITS(mmc_isr, | |
1918 | MMC_TISR_TX512TO1023OCTETS_GB_POS, | |
1919 | MMC_TISR_TX512TO1023OCTETS_GB_LEN)) | |
1920 | stats->tx512to1023octets_gb += | |
1921 | xlgmac_mmc_read(pdata, MMC_TX512TO1023OCTETS_GB_LO); | |
1922 | ||
1923 | if (XLGMAC_GET_REG_BITS(mmc_isr, | |
1924 | MMC_TISR_TX1024TOMAXOCTETS_GB_POS, | |
1925 | MMC_TISR_TX1024TOMAXOCTETS_GB_LEN)) | |
1926 | stats->tx1024tomaxoctets_gb += | |
1927 | xlgmac_mmc_read(pdata, MMC_TX1024TOMAXOCTETS_GB_LO); | |
1928 | ||
1929 | if (XLGMAC_GET_REG_BITS(mmc_isr, | |
1930 | MMC_TISR_TXUNICASTFRAMES_GB_POS, | |
1931 | MMC_TISR_TXUNICASTFRAMES_GB_LEN)) | |
1932 | stats->txunicastframes_gb += | |
1933 | xlgmac_mmc_read(pdata, MMC_TXUNICASTFRAMES_GB_LO); | |
1934 | ||
1935 | if (XLGMAC_GET_REG_BITS(mmc_isr, | |
1936 | MMC_TISR_TXMULTICASTFRAMES_GB_POS, | |
1937 | MMC_TISR_TXMULTICASTFRAMES_GB_LEN)) | |
1938 | stats->txmulticastframes_gb += | |
1939 | xlgmac_mmc_read(pdata, MMC_TXMULTICASTFRAMES_GB_LO); | |
1940 | ||
1941 | if (XLGMAC_GET_REG_BITS(mmc_isr, | |
1942 | MMC_TISR_TXBROADCASTFRAMES_GB_POS, | |
1943 | MMC_TISR_TXBROADCASTFRAMES_GB_LEN)) | |
1944 | stats->txbroadcastframes_g += | |
1945 | xlgmac_mmc_read(pdata, MMC_TXBROADCASTFRAMES_GB_LO); | |
1946 | ||
1947 | if (XLGMAC_GET_REG_BITS(mmc_isr, | |
1948 | MMC_TISR_TXUNDERFLOWERROR_POS, | |
1949 | MMC_TISR_TXUNDERFLOWERROR_LEN)) | |
1950 | stats->txunderflowerror += | |
1951 | xlgmac_mmc_read(pdata, MMC_TXUNDERFLOWERROR_LO); | |
1952 | ||
1953 | if (XLGMAC_GET_REG_BITS(mmc_isr, | |
1954 | MMC_TISR_TXOCTETCOUNT_G_POS, | |
1955 | MMC_TISR_TXOCTETCOUNT_G_LEN)) | |
1956 | stats->txoctetcount_g += | |
1957 | xlgmac_mmc_read(pdata, MMC_TXOCTETCOUNT_G_LO); | |
1958 | ||
1959 | if (XLGMAC_GET_REG_BITS(mmc_isr, | |
1960 | MMC_TISR_TXFRAMECOUNT_G_POS, | |
1961 | MMC_TISR_TXFRAMECOUNT_G_LEN)) | |
1962 | stats->txframecount_g += | |
1963 | xlgmac_mmc_read(pdata, MMC_TXFRAMECOUNT_G_LO); | |
1964 | ||
1965 | if (XLGMAC_GET_REG_BITS(mmc_isr, | |
1966 | MMC_TISR_TXPAUSEFRAMES_POS, | |
1967 | MMC_TISR_TXPAUSEFRAMES_LEN)) | |
1968 | stats->txpauseframes += | |
1969 | xlgmac_mmc_read(pdata, MMC_TXPAUSEFRAMES_LO); | |
1970 | ||
1971 | if (XLGMAC_GET_REG_BITS(mmc_isr, | |
1972 | MMC_TISR_TXVLANFRAMES_G_POS, | |
1973 | MMC_TISR_TXVLANFRAMES_G_LEN)) | |
1974 | stats->txvlanframes_g += | |
1975 | xlgmac_mmc_read(pdata, MMC_TXVLANFRAMES_G_LO); | |
1976 | } | |
1977 | ||
1978 | static void xlgmac_rx_mmc_int(struct xlgmac_pdata *pdata) | |
1979 | { | |
1980 | unsigned int mmc_isr = readl(pdata->mac_regs + MMC_RISR); | |
1981 | struct xlgmac_stats *stats = &pdata->stats; | |
1982 | ||
1983 | if (XLGMAC_GET_REG_BITS(mmc_isr, | |
1984 | MMC_RISR_RXFRAMECOUNT_GB_POS, | |
1985 | MMC_RISR_RXFRAMECOUNT_GB_LEN)) | |
1986 | stats->rxframecount_gb += | |
1987 | xlgmac_mmc_read(pdata, MMC_RXFRAMECOUNT_GB_LO); | |
1988 | ||
1989 | if (XLGMAC_GET_REG_BITS(mmc_isr, | |
1990 | MMC_RISR_RXOCTETCOUNT_GB_POS, | |
1991 | MMC_RISR_RXOCTETCOUNT_GB_LEN)) | |
1992 | stats->rxoctetcount_gb += | |
1993 | xlgmac_mmc_read(pdata, MMC_RXOCTETCOUNT_GB_LO); | |
1994 | ||
1995 | if (XLGMAC_GET_REG_BITS(mmc_isr, | |
1996 | MMC_RISR_RXOCTETCOUNT_G_POS, | |
1997 | MMC_RISR_RXOCTETCOUNT_G_LEN)) | |
1998 | stats->rxoctetcount_g += | |
1999 | xlgmac_mmc_read(pdata, MMC_RXOCTETCOUNT_G_LO); | |
2000 | ||
2001 | if (XLGMAC_GET_REG_BITS(mmc_isr, | |
2002 | MMC_RISR_RXBROADCASTFRAMES_G_POS, | |
2003 | MMC_RISR_RXBROADCASTFRAMES_G_LEN)) | |
2004 | stats->rxbroadcastframes_g += | |
2005 | xlgmac_mmc_read(pdata, MMC_RXBROADCASTFRAMES_G_LO); | |
2006 | ||
2007 | if (XLGMAC_GET_REG_BITS(mmc_isr, | |
2008 | MMC_RISR_RXMULTICASTFRAMES_G_POS, | |
2009 | MMC_RISR_RXMULTICASTFRAMES_G_LEN)) | |
2010 | stats->rxmulticastframes_g += | |
2011 | xlgmac_mmc_read(pdata, MMC_RXMULTICASTFRAMES_G_LO); | |
2012 | ||
2013 | if (XLGMAC_GET_REG_BITS(mmc_isr, | |
2014 | MMC_RISR_RXCRCERROR_POS, | |
2015 | MMC_RISR_RXCRCERROR_LEN)) | |
2016 | stats->rxcrcerror += | |
2017 | xlgmac_mmc_read(pdata, MMC_RXCRCERROR_LO); | |
2018 | ||
2019 | if (XLGMAC_GET_REG_BITS(mmc_isr, | |
2020 | MMC_RISR_RXRUNTERROR_POS, | |
2021 | MMC_RISR_RXRUNTERROR_LEN)) | |
2022 | stats->rxrunterror += | |
2023 | xlgmac_mmc_read(pdata, MMC_RXRUNTERROR); | |
2024 | ||
2025 | if (XLGMAC_GET_REG_BITS(mmc_isr, | |
2026 | MMC_RISR_RXJABBERERROR_POS, | |
2027 | MMC_RISR_RXJABBERERROR_LEN)) | |
2028 | stats->rxjabbererror += | |
2029 | xlgmac_mmc_read(pdata, MMC_RXJABBERERROR); | |
2030 | ||
2031 | if (XLGMAC_GET_REG_BITS(mmc_isr, | |
2032 | MMC_RISR_RXUNDERSIZE_G_POS, | |
2033 | MMC_RISR_RXUNDERSIZE_G_LEN)) | |
2034 | stats->rxundersize_g += | |
2035 | xlgmac_mmc_read(pdata, MMC_RXUNDERSIZE_G); | |
2036 | ||
2037 | if (XLGMAC_GET_REG_BITS(mmc_isr, | |
2038 | MMC_RISR_RXOVERSIZE_G_POS, | |
2039 | MMC_RISR_RXOVERSIZE_G_LEN)) | |
2040 | stats->rxoversize_g += | |
2041 | xlgmac_mmc_read(pdata, MMC_RXOVERSIZE_G); | |
2042 | ||
2043 | if (XLGMAC_GET_REG_BITS(mmc_isr, | |
2044 | MMC_RISR_RX64OCTETS_GB_POS, | |
2045 | MMC_RISR_RX64OCTETS_GB_LEN)) | |
2046 | stats->rx64octets_gb += | |
2047 | xlgmac_mmc_read(pdata, MMC_RX64OCTETS_GB_LO); | |
2048 | ||
2049 | if (XLGMAC_GET_REG_BITS(mmc_isr, | |
2050 | MMC_RISR_RX65TO127OCTETS_GB_POS, | |
2051 | MMC_RISR_RX65TO127OCTETS_GB_LEN)) | |
2052 | stats->rx65to127octets_gb += | |
2053 | xlgmac_mmc_read(pdata, MMC_RX65TO127OCTETS_GB_LO); | |
2054 | ||
2055 | if (XLGMAC_GET_REG_BITS(mmc_isr, | |
2056 | MMC_RISR_RX128TO255OCTETS_GB_POS, | |
2057 | MMC_RISR_RX128TO255OCTETS_GB_LEN)) | |
2058 | stats->rx128to255octets_gb += | |
2059 | xlgmac_mmc_read(pdata, MMC_RX128TO255OCTETS_GB_LO); | |
2060 | ||
2061 | if (XLGMAC_GET_REG_BITS(mmc_isr, | |
2062 | MMC_RISR_RX256TO511OCTETS_GB_POS, | |
2063 | MMC_RISR_RX256TO511OCTETS_GB_LEN)) | |
2064 | stats->rx256to511octets_gb += | |
2065 | xlgmac_mmc_read(pdata, MMC_RX256TO511OCTETS_GB_LO); | |
2066 | ||
2067 | if (XLGMAC_GET_REG_BITS(mmc_isr, | |
2068 | MMC_RISR_RX512TO1023OCTETS_GB_POS, | |
2069 | MMC_RISR_RX512TO1023OCTETS_GB_LEN)) | |
2070 | stats->rx512to1023octets_gb += | |
2071 | xlgmac_mmc_read(pdata, MMC_RX512TO1023OCTETS_GB_LO); | |
2072 | ||
2073 | if (XLGMAC_GET_REG_BITS(mmc_isr, | |
2074 | MMC_RISR_RX1024TOMAXOCTETS_GB_POS, | |
2075 | MMC_RISR_RX1024TOMAXOCTETS_GB_LEN)) | |
2076 | stats->rx1024tomaxoctets_gb += | |
2077 | xlgmac_mmc_read(pdata, MMC_RX1024TOMAXOCTETS_GB_LO); | |
2078 | ||
2079 | if (XLGMAC_GET_REG_BITS(mmc_isr, | |
2080 | MMC_RISR_RXUNICASTFRAMES_G_POS, | |
2081 | MMC_RISR_RXUNICASTFRAMES_G_LEN)) | |
2082 | stats->rxunicastframes_g += | |
2083 | xlgmac_mmc_read(pdata, MMC_RXUNICASTFRAMES_G_LO); | |
2084 | ||
2085 | if (XLGMAC_GET_REG_BITS(mmc_isr, | |
2086 | MMC_RISR_RXLENGTHERROR_POS, | |
2087 | MMC_RISR_RXLENGTHERROR_LEN)) | |
2088 | stats->rxlengtherror += | |
2089 | xlgmac_mmc_read(pdata, MMC_RXLENGTHERROR_LO); | |
2090 | ||
2091 | if (XLGMAC_GET_REG_BITS(mmc_isr, | |
2092 | MMC_RISR_RXOUTOFRANGETYPE_POS, | |
2093 | MMC_RISR_RXOUTOFRANGETYPE_LEN)) | |
2094 | stats->rxoutofrangetype += | |
2095 | xlgmac_mmc_read(pdata, MMC_RXOUTOFRANGETYPE_LO); | |
2096 | ||
2097 | if (XLGMAC_GET_REG_BITS(mmc_isr, | |
2098 | MMC_RISR_RXPAUSEFRAMES_POS, | |
2099 | MMC_RISR_RXPAUSEFRAMES_LEN)) | |
2100 | stats->rxpauseframes += | |
2101 | xlgmac_mmc_read(pdata, MMC_RXPAUSEFRAMES_LO); | |
2102 | ||
2103 | if (XLGMAC_GET_REG_BITS(mmc_isr, | |
2104 | MMC_RISR_RXFIFOOVERFLOW_POS, | |
2105 | MMC_RISR_RXFIFOOVERFLOW_LEN)) | |
2106 | stats->rxfifooverflow += | |
2107 | xlgmac_mmc_read(pdata, MMC_RXFIFOOVERFLOW_LO); | |
2108 | ||
2109 | if (XLGMAC_GET_REG_BITS(mmc_isr, | |
2110 | MMC_RISR_RXVLANFRAMES_GB_POS, | |
2111 | MMC_RISR_RXVLANFRAMES_GB_LEN)) | |
2112 | stats->rxvlanframes_gb += | |
2113 | xlgmac_mmc_read(pdata, MMC_RXVLANFRAMES_GB_LO); | |
2114 | ||
2115 | if (XLGMAC_GET_REG_BITS(mmc_isr, | |
2116 | MMC_RISR_RXWATCHDOGERROR_POS, | |
2117 | MMC_RISR_RXWATCHDOGERROR_LEN)) | |
2118 | stats->rxwatchdogerror += | |
2119 | xlgmac_mmc_read(pdata, MMC_RXWATCHDOGERROR); | |
2120 | } | |
2121 | ||
2122 | static void xlgmac_read_mmc_stats(struct xlgmac_pdata *pdata) | |
2123 | { | |
2124 | struct xlgmac_stats *stats = &pdata->stats; | |
2125 | u32 regval; | |
2126 | ||
2127 | /* Freeze counters */ | |
2128 | regval = readl(pdata->mac_regs + MMC_CR); | |
2129 | regval = XLGMAC_SET_REG_BITS(regval, MMC_CR_MCF_POS, | |
2130 | MMC_CR_MCF_LEN, 1); | |
2131 | writel(regval, pdata->mac_regs + MMC_CR); | |
2132 | ||
2133 | stats->txoctetcount_gb += | |
2134 | xlgmac_mmc_read(pdata, MMC_TXOCTETCOUNT_GB_LO); | |
2135 | ||
2136 | stats->txframecount_gb += | |
2137 | xlgmac_mmc_read(pdata, MMC_TXFRAMECOUNT_GB_LO); | |
2138 | ||
2139 | stats->txbroadcastframes_g += | |
2140 | xlgmac_mmc_read(pdata, MMC_TXBROADCASTFRAMES_G_LO); | |
2141 | ||
2142 | stats->txmulticastframes_g += | |
2143 | xlgmac_mmc_read(pdata, MMC_TXMULTICASTFRAMES_G_LO); | |
2144 | ||
2145 | stats->tx64octets_gb += | |
2146 | xlgmac_mmc_read(pdata, MMC_TX64OCTETS_GB_LO); | |
2147 | ||
2148 | stats->tx65to127octets_gb += | |
2149 | xlgmac_mmc_read(pdata, MMC_TX65TO127OCTETS_GB_LO); | |
2150 | ||
2151 | stats->tx128to255octets_gb += | |
2152 | xlgmac_mmc_read(pdata, MMC_TX128TO255OCTETS_GB_LO); | |
2153 | ||
2154 | stats->tx256to511octets_gb += | |
2155 | xlgmac_mmc_read(pdata, MMC_TX256TO511OCTETS_GB_LO); | |
2156 | ||
2157 | stats->tx512to1023octets_gb += | |
2158 | xlgmac_mmc_read(pdata, MMC_TX512TO1023OCTETS_GB_LO); | |
2159 | ||
2160 | stats->tx1024tomaxoctets_gb += | |
2161 | xlgmac_mmc_read(pdata, MMC_TX1024TOMAXOCTETS_GB_LO); | |
2162 | ||
2163 | stats->txunicastframes_gb += | |
2164 | xlgmac_mmc_read(pdata, MMC_TXUNICASTFRAMES_GB_LO); | |
2165 | ||
2166 | stats->txmulticastframes_gb += | |
2167 | xlgmac_mmc_read(pdata, MMC_TXMULTICASTFRAMES_GB_LO); | |
2168 | ||
2169 | stats->txbroadcastframes_g += | |
2170 | xlgmac_mmc_read(pdata, MMC_TXBROADCASTFRAMES_GB_LO); | |
2171 | ||
2172 | stats->txunderflowerror += | |
2173 | xlgmac_mmc_read(pdata, MMC_TXUNDERFLOWERROR_LO); | |
2174 | ||
2175 | stats->txoctetcount_g += | |
2176 | xlgmac_mmc_read(pdata, MMC_TXOCTETCOUNT_G_LO); | |
2177 | ||
2178 | stats->txframecount_g += | |
2179 | xlgmac_mmc_read(pdata, MMC_TXFRAMECOUNT_G_LO); | |
2180 | ||
2181 | stats->txpauseframes += | |
2182 | xlgmac_mmc_read(pdata, MMC_TXPAUSEFRAMES_LO); | |
2183 | ||
2184 | stats->txvlanframes_g += | |
2185 | xlgmac_mmc_read(pdata, MMC_TXVLANFRAMES_G_LO); | |
2186 | ||
2187 | stats->rxframecount_gb += | |
2188 | xlgmac_mmc_read(pdata, MMC_RXFRAMECOUNT_GB_LO); | |
2189 | ||
2190 | stats->rxoctetcount_gb += | |
2191 | xlgmac_mmc_read(pdata, MMC_RXOCTETCOUNT_GB_LO); | |
2192 | ||
2193 | stats->rxoctetcount_g += | |
2194 | xlgmac_mmc_read(pdata, MMC_RXOCTETCOUNT_G_LO); | |
2195 | ||
2196 | stats->rxbroadcastframes_g += | |
2197 | xlgmac_mmc_read(pdata, MMC_RXBROADCASTFRAMES_G_LO); | |
2198 | ||
2199 | stats->rxmulticastframes_g += | |
2200 | xlgmac_mmc_read(pdata, MMC_RXMULTICASTFRAMES_G_LO); | |
2201 | ||
2202 | stats->rxcrcerror += | |
2203 | xlgmac_mmc_read(pdata, MMC_RXCRCERROR_LO); | |
2204 | ||
2205 | stats->rxrunterror += | |
2206 | xlgmac_mmc_read(pdata, MMC_RXRUNTERROR); | |
2207 | ||
2208 | stats->rxjabbererror += | |
2209 | xlgmac_mmc_read(pdata, MMC_RXJABBERERROR); | |
2210 | ||
2211 | stats->rxundersize_g += | |
2212 | xlgmac_mmc_read(pdata, MMC_RXUNDERSIZE_G); | |
2213 | ||
2214 | stats->rxoversize_g += | |
2215 | xlgmac_mmc_read(pdata, MMC_RXOVERSIZE_G); | |
2216 | ||
2217 | stats->rx64octets_gb += | |
2218 | xlgmac_mmc_read(pdata, MMC_RX64OCTETS_GB_LO); | |
2219 | ||
2220 | stats->rx65to127octets_gb += | |
2221 | xlgmac_mmc_read(pdata, MMC_RX65TO127OCTETS_GB_LO); | |
2222 | ||
2223 | stats->rx128to255octets_gb += | |
2224 | xlgmac_mmc_read(pdata, MMC_RX128TO255OCTETS_GB_LO); | |
2225 | ||
2226 | stats->rx256to511octets_gb += | |
2227 | xlgmac_mmc_read(pdata, MMC_RX256TO511OCTETS_GB_LO); | |
2228 | ||
2229 | stats->rx512to1023octets_gb += | |
2230 | xlgmac_mmc_read(pdata, MMC_RX512TO1023OCTETS_GB_LO); | |
2231 | ||
2232 | stats->rx1024tomaxoctets_gb += | |
2233 | xlgmac_mmc_read(pdata, MMC_RX1024TOMAXOCTETS_GB_LO); | |
2234 | ||
2235 | stats->rxunicastframes_g += | |
2236 | xlgmac_mmc_read(pdata, MMC_RXUNICASTFRAMES_G_LO); | |
2237 | ||
2238 | stats->rxlengtherror += | |
2239 | xlgmac_mmc_read(pdata, MMC_RXLENGTHERROR_LO); | |
2240 | ||
2241 | stats->rxoutofrangetype += | |
2242 | xlgmac_mmc_read(pdata, MMC_RXOUTOFRANGETYPE_LO); | |
2243 | ||
2244 | stats->rxpauseframes += | |
2245 | xlgmac_mmc_read(pdata, MMC_RXPAUSEFRAMES_LO); | |
2246 | ||
2247 | stats->rxfifooverflow += | |
2248 | xlgmac_mmc_read(pdata, MMC_RXFIFOOVERFLOW_LO); | |
2249 | ||
2250 | stats->rxvlanframes_gb += | |
2251 | xlgmac_mmc_read(pdata, MMC_RXVLANFRAMES_GB_LO); | |
2252 | ||
2253 | stats->rxwatchdogerror += | |
2254 | xlgmac_mmc_read(pdata, MMC_RXWATCHDOGERROR); | |
2255 | ||
2256 | /* Un-freeze counters */ | |
2257 | regval = readl(pdata->mac_regs + MMC_CR); | |
2258 | regval = XLGMAC_SET_REG_BITS(regval, MMC_CR_MCF_POS, | |
2259 | MMC_CR_MCF_LEN, 0); | |
2260 | writel(regval, pdata->mac_regs + MMC_CR); | |
2261 | } | |
2262 | ||
2263 | static void xlgmac_config_mmc(struct xlgmac_pdata *pdata) | |
2264 | { | |
2265 | u32 regval; | |
2266 | ||
2267 | regval = readl(pdata->mac_regs + MMC_CR); | |
2268 | /* Set counters to reset on read */ | |
2269 | regval = XLGMAC_SET_REG_BITS(regval, MMC_CR_ROR_POS, | |
2270 | MMC_CR_ROR_LEN, 1); | |
2271 | /* Reset the counters */ | |
2272 | regval = XLGMAC_SET_REG_BITS(regval, MMC_CR_CR_POS, | |
2273 | MMC_CR_CR_LEN, 1); | |
2274 | writel(regval, pdata->mac_regs + MMC_CR); | |
2275 | } | |
2276 | ||
2277 | static int xlgmac_write_rss_reg(struct xlgmac_pdata *pdata, unsigned int type, | |
2278 | unsigned int index, unsigned int val) | |
2279 | { | |
2280 | unsigned int wait; | |
2281 | int ret = 0; | |
2282 | u32 regval; | |
2283 | ||
2284 | mutex_lock(&pdata->rss_mutex); | |
2285 | ||
2286 | regval = XLGMAC_GET_REG_BITS(readl(pdata->mac_regs + MAC_RSSAR), | |
2287 | MAC_RSSAR_OB_POS, MAC_RSSAR_OB_LEN); | |
2288 | if (regval) { | |
2289 | ret = -EBUSY; | |
2290 | goto unlock; | |
2291 | } | |
2292 | ||
2293 | writel(val, pdata->mac_regs + MAC_RSSDR); | |
2294 | ||
2295 | regval = readl(pdata->mac_regs + MAC_RSSAR); | |
2296 | regval = XLGMAC_SET_REG_BITS(regval, MAC_RSSAR_RSSIA_POS, | |
2297 | MAC_RSSAR_RSSIA_LEN, index); | |
2298 | regval = XLGMAC_SET_REG_BITS(regval, MAC_RSSAR_ADDRT_POS, | |
2299 | MAC_RSSAR_ADDRT_LEN, type); | |
2300 | regval = XLGMAC_SET_REG_BITS(regval, MAC_RSSAR_CT_POS, | |
2301 | MAC_RSSAR_CT_LEN, 0); | |
2302 | regval = XLGMAC_SET_REG_BITS(regval, MAC_RSSAR_OB_POS, | |
2303 | MAC_RSSAR_OB_LEN, 1); | |
2304 | writel(regval, pdata->mac_regs + MAC_RSSAR); | |
2305 | ||
2306 | wait = 1000; | |
2307 | while (wait--) { | |
2308 | regval = XLGMAC_GET_REG_BITS(readl(pdata->mac_regs + MAC_RSSAR), | |
2309 | MAC_RSSAR_OB_POS, | |
2310 | MAC_RSSAR_OB_LEN); | |
2311 | if (!regval) | |
2312 | goto unlock; | |
2313 | ||
2314 | usleep_range(1000, 1500); | |
2315 | } | |
2316 | ||
2317 | ret = -EBUSY; | |
2318 | ||
2319 | unlock: | |
2320 | mutex_unlock(&pdata->rss_mutex); | |
2321 | ||
2322 | return ret; | |
2323 | } | |
2324 | ||
2325 | static int xlgmac_write_rss_hash_key(struct xlgmac_pdata *pdata) | |
2326 | { | |
2327 | unsigned int key_regs = sizeof(pdata->rss_key) / sizeof(u32); | |
2328 | unsigned int *key = (unsigned int *)&pdata->rss_key; | |
2329 | int ret; | |
2330 | ||
2331 | while (key_regs--) { | |
2332 | ret = xlgmac_write_rss_reg(pdata, XLGMAC_RSS_HASH_KEY_TYPE, | |
2333 | key_regs, *key++); | |
2334 | if (ret) | |
2335 | return ret; | |
2336 | } | |
2337 | ||
2338 | return 0; | |
2339 | } | |
2340 | ||
2341 | static int xlgmac_write_rss_lookup_table(struct xlgmac_pdata *pdata) | |
2342 | { | |
2343 | unsigned int i; | |
2344 | int ret; | |
2345 | ||
2346 | for (i = 0; i < ARRAY_SIZE(pdata->rss_table); i++) { | |
2347 | ret = xlgmac_write_rss_reg(pdata, | |
2348 | XLGMAC_RSS_LOOKUP_TABLE_TYPE, i, | |
2349 | pdata->rss_table[i]); | |
2350 | if (ret) | |
2351 | return ret; | |
2352 | } | |
2353 | ||
2354 | return 0; | |
2355 | } | |
2356 | ||
2357 | static int xlgmac_set_rss_hash_key(struct xlgmac_pdata *pdata, const u8 *key) | |
2358 | { | |
2359 | memcpy(pdata->rss_key, key, sizeof(pdata->rss_key)); | |
2360 | ||
2361 | return xlgmac_write_rss_hash_key(pdata); | |
2362 | } | |
2363 | ||
2364 | static int xlgmac_set_rss_lookup_table(struct xlgmac_pdata *pdata, | |
2365 | const u32 *table) | |
2366 | { | |
2367 | unsigned int i; | |
2368 | u32 tval; | |
2369 | ||
2370 | for (i = 0; i < ARRAY_SIZE(pdata->rss_table); i++) { | |
2371 | tval = table[i]; | |
2372 | pdata->rss_table[i] = XLGMAC_SET_REG_BITS( | |
2373 | pdata->rss_table[i], | |
2374 | MAC_RSSDR_DMCH_POS, | |
2375 | MAC_RSSDR_DMCH_LEN, | |
2376 | tval); | |
2377 | } | |
2378 | ||
2379 | return xlgmac_write_rss_lookup_table(pdata); | |
2380 | } | |
2381 | ||
2382 | static int xlgmac_enable_rss(struct xlgmac_pdata *pdata) | |
2383 | { | |
2384 | u32 regval; | |
2385 | int ret; | |
2386 | ||
2387 | if (!pdata->hw_feat.rss) | |
2388 | return -EOPNOTSUPP; | |
2389 | ||
2390 | /* Program the hash key */ | |
2391 | ret = xlgmac_write_rss_hash_key(pdata); | |
2392 | if (ret) | |
2393 | return ret; | |
2394 | ||
2395 | /* Program the lookup table */ | |
2396 | ret = xlgmac_write_rss_lookup_table(pdata); | |
2397 | if (ret) | |
2398 | return ret; | |
2399 | ||
2400 | /* Set the RSS options */ | |
2401 | writel(pdata->rss_options, pdata->mac_regs + MAC_RSSCR); | |
2402 | ||
2403 | /* Enable RSS */ | |
2404 | regval = readl(pdata->mac_regs + MAC_RSSCR); | |
2405 | regval = XLGMAC_SET_REG_BITS(regval, MAC_RSSCR_RSSE_POS, | |
2406 | MAC_RSSCR_RSSE_LEN, 1); | |
2407 | writel(regval, pdata->mac_regs + MAC_RSSCR); | |
2408 | ||
2409 | return 0; | |
2410 | } | |
2411 | ||
2412 | static int xlgmac_disable_rss(struct xlgmac_pdata *pdata) | |
2413 | { | |
2414 | u32 regval; | |
2415 | ||
2416 | if (!pdata->hw_feat.rss) | |
2417 | return -EOPNOTSUPP; | |
2418 | ||
2419 | regval = readl(pdata->mac_regs + MAC_RSSCR); | |
2420 | regval = XLGMAC_SET_REG_BITS(regval, MAC_RSSCR_RSSE_POS, | |
2421 | MAC_RSSCR_RSSE_LEN, 0); | |
2422 | writel(regval, pdata->mac_regs + MAC_RSSCR); | |
2423 | ||
2424 | return 0; | |
2425 | } | |
2426 | ||
2427 | static void xlgmac_config_rss(struct xlgmac_pdata *pdata) | |
2428 | { | |
2429 | int ret; | |
2430 | ||
2431 | if (!pdata->hw_feat.rss) | |
2432 | return; | |
2433 | ||
2434 | if (pdata->netdev->features & NETIF_F_RXHASH) | |
2435 | ret = xlgmac_enable_rss(pdata); | |
2436 | else | |
2437 | ret = xlgmac_disable_rss(pdata); | |
2438 | ||
2439 | if (ret) | |
2440 | netdev_err(pdata->netdev, | |
2441 | "error configuring RSS, RSS disabled\n"); | |
2442 | } | |
2443 | ||
2444 | static void xlgmac_enable_dma_interrupts(struct xlgmac_pdata *pdata) | |
2445 | { | |
2446 | unsigned int dma_ch_isr, dma_ch_ier; | |
2447 | struct xlgmac_channel *channel; | |
2448 | unsigned int i; | |
2449 | ||
2450 | channel = pdata->channel_head; | |
2451 | for (i = 0; i < pdata->channel_count; i++, channel++) { | |
2452 | /* Clear all the interrupts which are set */ | |
2453 | dma_ch_isr = readl(XLGMAC_DMA_REG(channel, DMA_CH_SR)); | |
2454 | writel(dma_ch_isr, XLGMAC_DMA_REG(channel, DMA_CH_SR)); | |
2455 | ||
2456 | /* Clear all interrupt enable bits */ | |
2457 | dma_ch_ier = 0; | |
2458 | ||
2459 | /* Enable following interrupts | |
2460 | * NIE - Normal Interrupt Summary Enable | |
2461 | * AIE - Abnormal Interrupt Summary Enable | |
2462 | * FBEE - Fatal Bus Error Enable | |
2463 | */ | |
2464 | dma_ch_ier = XLGMAC_SET_REG_BITS(dma_ch_ier, | |
2465 | DMA_CH_IER_NIE_POS, | |
2466 | DMA_CH_IER_NIE_LEN, 1); | |
2467 | dma_ch_ier = XLGMAC_SET_REG_BITS(dma_ch_ier, | |
2468 | DMA_CH_IER_AIE_POS, | |
2469 | DMA_CH_IER_AIE_LEN, 1); | |
2470 | dma_ch_ier = XLGMAC_SET_REG_BITS(dma_ch_ier, | |
2471 | DMA_CH_IER_FBEE_POS, | |
2472 | DMA_CH_IER_FBEE_LEN, 1); | |
2473 | ||
2474 | if (channel->tx_ring) { | |
2475 | /* Enable the following Tx interrupts | |
2476 | * TIE - Transmit Interrupt Enable (unless using | |
2477 | * per channel interrupts) | |
2478 | */ | |
2479 | if (!pdata->per_channel_irq) | |
2480 | dma_ch_ier = XLGMAC_SET_REG_BITS( | |
2481 | dma_ch_ier, | |
2482 | DMA_CH_IER_TIE_POS, | |
2483 | DMA_CH_IER_TIE_LEN, | |
2484 | 1); | |
2485 | } | |
2486 | if (channel->rx_ring) { | |
2487 | /* Enable following Rx interrupts | |
2488 | * RBUE - Receive Buffer Unavailable Enable | |
2489 | * RIE - Receive Interrupt Enable (unless using | |
2490 | * per channel interrupts) | |
2491 | */ | |
2492 | dma_ch_ier = XLGMAC_SET_REG_BITS( | |
2493 | dma_ch_ier, | |
2494 | DMA_CH_IER_RBUE_POS, | |
2495 | DMA_CH_IER_RBUE_LEN, | |
2496 | 1); | |
2497 | if (!pdata->per_channel_irq) | |
2498 | dma_ch_ier = XLGMAC_SET_REG_BITS( | |
2499 | dma_ch_ier, | |
2500 | DMA_CH_IER_RIE_POS, | |
2501 | DMA_CH_IER_RIE_LEN, | |
2502 | 1); | |
2503 | } | |
2504 | ||
2505 | writel(dma_ch_isr, XLGMAC_DMA_REG(channel, DMA_CH_IER)); | |
2506 | } | |
2507 | } | |
2508 | ||
2509 | static void xlgmac_enable_mtl_interrupts(struct xlgmac_pdata *pdata) | |
2510 | { | |
2511 | unsigned int q_count, i; | |
2512 | unsigned int mtl_q_isr; | |
2513 | ||
2514 | q_count = max(pdata->hw_feat.tx_q_cnt, pdata->hw_feat.rx_q_cnt); | |
2515 | for (i = 0; i < q_count; i++) { | |
2516 | /* Clear all the interrupts which are set */ | |
2517 | mtl_q_isr = readl(XLGMAC_MTL_REG(pdata, i, MTL_Q_ISR)); | |
2518 | writel(mtl_q_isr, XLGMAC_MTL_REG(pdata, i, MTL_Q_ISR)); | |
2519 | ||
2520 | /* No MTL interrupts to be enabled */ | |
2521 | writel(0, XLGMAC_MTL_REG(pdata, i, MTL_Q_IER)); | |
2522 | } | |
2523 | } | |
2524 | ||
2525 | static void xlgmac_enable_mac_interrupts(struct xlgmac_pdata *pdata) | |
2526 | { | |
2527 | unsigned int mac_ier = 0; | |
2528 | u32 regval; | |
2529 | ||
2530 | /* Enable Timestamp interrupt */ | |
2531 | mac_ier = XLGMAC_SET_REG_BITS(mac_ier, MAC_IER_TSIE_POS, | |
2532 | MAC_IER_TSIE_LEN, 1); | |
2533 | ||
2534 | writel(mac_ier, pdata->mac_regs + MAC_IER); | |
2535 | ||
2536 | /* Enable all counter interrupts */ | |
2537 | regval = readl(pdata->mac_regs + MMC_RIER); | |
2538 | regval = XLGMAC_SET_REG_BITS(regval, MMC_RIER_ALL_INTERRUPTS_POS, | |
2539 | MMC_RIER_ALL_INTERRUPTS_LEN, 0xffffffff); | |
2540 | writel(regval, pdata->mac_regs + MMC_RIER); | |
2541 | regval = readl(pdata->mac_regs + MMC_TIER); | |
2542 | regval = XLGMAC_SET_REG_BITS(regval, MMC_TIER_ALL_INTERRUPTS_POS, | |
2543 | MMC_TIER_ALL_INTERRUPTS_LEN, 0xffffffff); | |
2544 | writel(regval, pdata->mac_regs + MMC_TIER); | |
2545 | } | |
2546 | ||
2547 | static int xlgmac_set_xlgmii_25000_speed(struct xlgmac_pdata *pdata) | |
2548 | { | |
2549 | u32 regval; | |
2550 | ||
2551 | regval = XLGMAC_GET_REG_BITS(readl(pdata->mac_regs + MAC_TCR), | |
2552 | MAC_TCR_SS_POS, MAC_TCR_SS_LEN); | |
2553 | if (regval == 0x1) | |
2554 | return 0; | |
2555 | ||
2556 | regval = readl(pdata->mac_regs + MAC_TCR); | |
2557 | regval = XLGMAC_SET_REG_BITS(regval, MAC_TCR_SS_POS, | |
2558 | MAC_TCR_SS_LEN, 0x1); | |
2559 | writel(regval, pdata->mac_regs + MAC_TCR); | |
2560 | ||
2561 | return 0; | |
2562 | } | |
2563 | ||
2564 | static int xlgmac_set_xlgmii_40000_speed(struct xlgmac_pdata *pdata) | |
2565 | { | |
2566 | u32 regval; | |
2567 | ||
2568 | regval = XLGMAC_GET_REG_BITS(readl(pdata->mac_regs + MAC_TCR), | |
2569 | MAC_TCR_SS_POS, MAC_TCR_SS_LEN); | |
2570 | if (regval == 0) | |
2571 | return 0; | |
2572 | ||
2573 | regval = readl(pdata->mac_regs + MAC_TCR); | |
2574 | regval = XLGMAC_SET_REG_BITS(regval, MAC_TCR_SS_POS, | |
2575 | MAC_TCR_SS_LEN, 0); | |
2576 | writel(regval, pdata->mac_regs + MAC_TCR); | |
2577 | ||
2578 | return 0; | |
2579 | } | |
2580 | ||
2581 | static int xlgmac_set_xlgmii_50000_speed(struct xlgmac_pdata *pdata) | |
2582 | { | |
2583 | u32 regval; | |
2584 | ||
2585 | regval = XLGMAC_GET_REG_BITS(readl(pdata->mac_regs + MAC_TCR), | |
2586 | MAC_TCR_SS_POS, MAC_TCR_SS_LEN); | |
2587 | if (regval == 0x2) | |
2588 | return 0; | |
2589 | ||
2590 | regval = readl(pdata->mac_regs + MAC_TCR); | |
2591 | regval = XLGMAC_SET_REG_BITS(regval, MAC_TCR_SS_POS, | |
2592 | MAC_TCR_SS_LEN, 0x2); | |
2593 | writel(regval, pdata->mac_regs + MAC_TCR); | |
2594 | ||
2595 | return 0; | |
2596 | } | |
2597 | ||
2598 | static int xlgmac_set_xlgmii_100000_speed(struct xlgmac_pdata *pdata) | |
2599 | { | |
2600 | u32 regval; | |
2601 | ||
2602 | regval = XLGMAC_GET_REG_BITS(readl(pdata->mac_regs + MAC_TCR), | |
2603 | MAC_TCR_SS_POS, MAC_TCR_SS_LEN); | |
2604 | if (regval == 0x3) | |
2605 | return 0; | |
2606 | ||
2607 | regval = readl(pdata->mac_regs + MAC_TCR); | |
2608 | regval = XLGMAC_SET_REG_BITS(regval, MAC_TCR_SS_POS, | |
2609 | MAC_TCR_SS_LEN, 0x3); | |
2610 | writel(regval, pdata->mac_regs + MAC_TCR); | |
2611 | ||
2612 | return 0; | |
2613 | } | |
2614 | ||
2615 | static void xlgmac_config_mac_speed(struct xlgmac_pdata *pdata) | |
2616 | { | |
2617 | switch (pdata->phy_speed) { | |
2618 | case SPEED_100000: | |
2619 | xlgmac_set_xlgmii_100000_speed(pdata); | |
2620 | break; | |
2621 | ||
2622 | case SPEED_50000: | |
2623 | xlgmac_set_xlgmii_50000_speed(pdata); | |
2624 | break; | |
2625 | ||
2626 | case SPEED_40000: | |
2627 | xlgmac_set_xlgmii_40000_speed(pdata); | |
2628 | break; | |
2629 | ||
2630 | case SPEED_25000: | |
2631 | xlgmac_set_xlgmii_25000_speed(pdata); | |
2632 | break; | |
2633 | } | |
2634 | } | |
2635 | ||
2636 | static int xlgmac_dev_read(struct xlgmac_channel *channel) | |
2637 | { | |
2638 | struct xlgmac_pdata *pdata = channel->pdata; | |
2639 | struct xlgmac_ring *ring = channel->rx_ring; | |
2640 | struct net_device *netdev = pdata->netdev; | |
2641 | struct xlgmac_desc_data *desc_data; | |
2642 | struct xlgmac_dma_desc *dma_desc; | |
2643 | struct xlgmac_pkt_info *pkt_info; | |
2644 | unsigned int err, etlt, l34t; | |
2645 | ||
2646 | desc_data = XLGMAC_GET_DESC_DATA(ring, ring->cur); | |
2647 | dma_desc = desc_data->dma_desc; | |
2648 | pkt_info = &ring->pkt_info; | |
2649 | ||
2650 | /* Check for data availability */ | |
2651 | if (XLGMAC_GET_REG_BITS_LE(dma_desc->desc3, | |
2652 | RX_NORMAL_DESC3_OWN_POS, | |
2653 | RX_NORMAL_DESC3_OWN_LEN)) | |
2654 | return 1; | |
2655 | ||
2656 | /* Make sure descriptor fields are read after reading the OWN bit */ | |
2657 | dma_rmb(); | |
2658 | ||
2659 | if (netif_msg_rx_status(pdata)) | |
2660 | xlgmac_dump_rx_desc(pdata, ring, ring->cur); | |
2661 | ||
2662 | if (XLGMAC_GET_REG_BITS_LE(dma_desc->desc3, | |
2663 | RX_NORMAL_DESC3_CTXT_POS, | |
2664 | RX_NORMAL_DESC3_CTXT_LEN)) { | |
2665 | /* Timestamp Context Descriptor */ | |
2666 | xlgmac_get_rx_tstamp(pkt_info, dma_desc); | |
2667 | ||
2668 | pkt_info->attributes = XLGMAC_SET_REG_BITS( | |
2669 | pkt_info->attributes, | |
2670 | RX_PACKET_ATTRIBUTES_CONTEXT_POS, | |
2671 | RX_PACKET_ATTRIBUTES_CONTEXT_LEN, | |
2672 | 1); | |
2673 | pkt_info->attributes = XLGMAC_SET_REG_BITS( | |
2674 | pkt_info->attributes, | |
2675 | RX_PACKET_ATTRIBUTES_CONTEXT_NEXT_POS, | |
2676 | RX_PACKET_ATTRIBUTES_CONTEXT_NEXT_LEN, | |
2677 | 0); | |
2678 | return 0; | |
2679 | } | |
2680 | ||
2681 | /* Normal Descriptor, be sure Context Descriptor bit is off */ | |
2682 | pkt_info->attributes = XLGMAC_SET_REG_BITS( | |
2683 | pkt_info->attributes, | |
2684 | RX_PACKET_ATTRIBUTES_CONTEXT_POS, | |
2685 | RX_PACKET_ATTRIBUTES_CONTEXT_LEN, | |
2686 | 0); | |
2687 | ||
2688 | /* Indicate if a Context Descriptor is next */ | |
2689 | if (XLGMAC_GET_REG_BITS_LE(dma_desc->desc3, | |
2690 | RX_NORMAL_DESC3_CDA_POS, | |
2691 | RX_NORMAL_DESC3_CDA_LEN)) | |
2692 | pkt_info->attributes = XLGMAC_SET_REG_BITS( | |
2693 | pkt_info->attributes, | |
2694 | RX_PACKET_ATTRIBUTES_CONTEXT_NEXT_POS, | |
2695 | RX_PACKET_ATTRIBUTES_CONTEXT_NEXT_LEN, | |
2696 | 1); | |
2697 | ||
2698 | /* Get the header length */ | |
2699 | if (XLGMAC_GET_REG_BITS_LE(dma_desc->desc3, | |
2700 | RX_NORMAL_DESC3_FD_POS, | |
2701 | RX_NORMAL_DESC3_FD_LEN)) { | |
2702 | desc_data->rx.hdr_len = XLGMAC_GET_REG_BITS_LE(dma_desc->desc2, | |
2703 | RX_NORMAL_DESC2_HL_POS, | |
2704 | RX_NORMAL_DESC2_HL_LEN); | |
2705 | if (desc_data->rx.hdr_len) | |
2706 | pdata->stats.rx_split_header_packets++; | |
2707 | } | |
2708 | ||
2709 | /* Get the RSS hash */ | |
2710 | if (XLGMAC_GET_REG_BITS_LE(dma_desc->desc3, | |
2711 | RX_NORMAL_DESC3_RSV_POS, | |
2712 | RX_NORMAL_DESC3_RSV_LEN)) { | |
2713 | pkt_info->attributes = XLGMAC_SET_REG_BITS( | |
2714 | pkt_info->attributes, | |
2715 | RX_PACKET_ATTRIBUTES_RSS_HASH_POS, | |
2716 | RX_PACKET_ATTRIBUTES_RSS_HASH_LEN, | |
2717 | 1); | |
2718 | ||
2719 | pkt_info->rss_hash = le32_to_cpu(dma_desc->desc1); | |
2720 | ||
2721 | l34t = XLGMAC_GET_REG_BITS_LE(dma_desc->desc3, | |
2722 | RX_NORMAL_DESC3_L34T_POS, | |
2723 | RX_NORMAL_DESC3_L34T_LEN); | |
2724 | switch (l34t) { | |
2725 | case RX_DESC3_L34T_IPV4_TCP: | |
2726 | case RX_DESC3_L34T_IPV4_UDP: | |
2727 | case RX_DESC3_L34T_IPV6_TCP: | |
2728 | case RX_DESC3_L34T_IPV6_UDP: | |
2729 | pkt_info->rss_hash_type = PKT_HASH_TYPE_L4; | |
2730 | break; | |
2731 | default: | |
2732 | pkt_info->rss_hash_type = PKT_HASH_TYPE_L3; | |
2733 | } | |
2734 | } | |
2735 | ||
2736 | /* Get the pkt_info length */ | |
2737 | desc_data->rx.len = XLGMAC_GET_REG_BITS_LE(dma_desc->desc3, | |
2738 | RX_NORMAL_DESC3_PL_POS, | |
2739 | RX_NORMAL_DESC3_PL_LEN); | |
2740 | ||
2741 | if (!XLGMAC_GET_REG_BITS_LE(dma_desc->desc3, | |
2742 | RX_NORMAL_DESC3_LD_POS, | |
2743 | RX_NORMAL_DESC3_LD_LEN)) { | |
2744 | /* Not all the data has been transferred for this pkt_info */ | |
2745 | pkt_info->attributes = XLGMAC_SET_REG_BITS( | |
2746 | pkt_info->attributes, | |
2747 | RX_PACKET_ATTRIBUTES_INCOMPLETE_POS, | |
2748 | RX_PACKET_ATTRIBUTES_INCOMPLETE_LEN, | |
2749 | 1); | |
2750 | return 0; | |
2751 | } | |
2752 | ||
2753 | /* This is the last of the data for this pkt_info */ | |
2754 | pkt_info->attributes = XLGMAC_SET_REG_BITS( | |
2755 | pkt_info->attributes, | |
2756 | RX_PACKET_ATTRIBUTES_INCOMPLETE_POS, | |
2757 | RX_PACKET_ATTRIBUTES_INCOMPLETE_LEN, | |
2758 | 0); | |
2759 | ||
2760 | /* Set checksum done indicator as appropriate */ | |
2761 | if (netdev->features & NETIF_F_RXCSUM) | |
2762 | pkt_info->attributes = XLGMAC_SET_REG_BITS( | |
2763 | pkt_info->attributes, | |
2764 | RX_PACKET_ATTRIBUTES_CSUM_DONE_POS, | |
2765 | RX_PACKET_ATTRIBUTES_CSUM_DONE_LEN, | |
2766 | 1); | |
2767 | ||
2768 | /* Check for errors (only valid in last descriptor) */ | |
2769 | err = XLGMAC_GET_REG_BITS_LE(dma_desc->desc3, | |
2770 | RX_NORMAL_DESC3_ES_POS, | |
2771 | RX_NORMAL_DESC3_ES_LEN); | |
2772 | etlt = XLGMAC_GET_REG_BITS_LE(dma_desc->desc3, | |
2773 | RX_NORMAL_DESC3_ETLT_POS, | |
2774 | RX_NORMAL_DESC3_ETLT_LEN); | |
2775 | netif_dbg(pdata, rx_status, netdev, "err=%u, etlt=%#x\n", err, etlt); | |
2776 | ||
2777 | if (!err || !etlt) { | |
2778 | /* No error if err is 0 or etlt is 0 */ | |
2779 | if ((etlt == 0x09) && | |
2780 | (netdev->features & NETIF_F_HW_VLAN_CTAG_RX)) { | |
2781 | pkt_info->attributes = XLGMAC_SET_REG_BITS( | |
2782 | pkt_info->attributes, | |
2783 | RX_PACKET_ATTRIBUTES_VLAN_CTAG_POS, | |
2784 | RX_PACKET_ATTRIBUTES_VLAN_CTAG_LEN, | |
2785 | 1); | |
2786 | pkt_info->vlan_ctag = | |
2787 | XLGMAC_GET_REG_BITS_LE(dma_desc->desc0, | |
2788 | RX_NORMAL_DESC0_OVT_POS, | |
2789 | RX_NORMAL_DESC0_OVT_LEN); | |
2790 | netif_dbg(pdata, rx_status, netdev, "vlan-ctag=%#06x\n", | |
2791 | pkt_info->vlan_ctag); | |
2792 | } | |
2793 | } else { | |
2794 | if ((etlt == 0x05) || (etlt == 0x06)) | |
2795 | pkt_info->attributes = XLGMAC_SET_REG_BITS( | |
2796 | pkt_info->attributes, | |
2797 | RX_PACKET_ATTRIBUTES_CSUM_DONE_POS, | |
2798 | RX_PACKET_ATTRIBUTES_CSUM_DONE_LEN, | |
2799 | 0); | |
2800 | else | |
2801 | pkt_info->errors = XLGMAC_SET_REG_BITS( | |
2802 | pkt_info->errors, | |
2803 | RX_PACKET_ERRORS_FRAME_POS, | |
2804 | RX_PACKET_ERRORS_FRAME_LEN, | |
2805 | 1); | |
2806 | } | |
2807 | ||
2808 | XLGMAC_PR("%s - descriptor=%u (cur=%d)\n", channel->name, | |
2809 | ring->cur & (ring->dma_desc_count - 1), ring->cur); | |
2810 | ||
2811 | return 0; | |
2812 | } | |
2813 | ||
2814 | static int xlgmac_enable_int(struct xlgmac_channel *channel, | |
2815 | enum xlgmac_int int_id) | |
2816 | { | |
2817 | unsigned int dma_ch_ier; | |
2818 | ||
2819 | dma_ch_ier = readl(XLGMAC_DMA_REG(channel, DMA_CH_IER)); | |
2820 | ||
2821 | switch (int_id) { | |
2822 | case XLGMAC_INT_DMA_CH_SR_TI: | |
2823 | dma_ch_ier = XLGMAC_SET_REG_BITS( | |
2824 | dma_ch_ier, DMA_CH_IER_TIE_POS, | |
2825 | DMA_CH_IER_TIE_LEN, 1); | |
2826 | break; | |
2827 | case XLGMAC_INT_DMA_CH_SR_TPS: | |
2828 | dma_ch_ier = XLGMAC_SET_REG_BITS( | |
2829 | dma_ch_ier, DMA_CH_IER_TXSE_POS, | |
2830 | DMA_CH_IER_TXSE_LEN, 1); | |
2831 | break; | |
2832 | case XLGMAC_INT_DMA_CH_SR_TBU: | |
2833 | dma_ch_ier = XLGMAC_SET_REG_BITS( | |
2834 | dma_ch_ier, DMA_CH_IER_TBUE_POS, | |
2835 | DMA_CH_IER_TBUE_LEN, 1); | |
2836 | break; | |
2837 | case XLGMAC_INT_DMA_CH_SR_RI: | |
2838 | dma_ch_ier = XLGMAC_SET_REG_BITS( | |
2839 | dma_ch_ier, DMA_CH_IER_RIE_POS, | |
2840 | DMA_CH_IER_RIE_LEN, 1); | |
2841 | break; | |
2842 | case XLGMAC_INT_DMA_CH_SR_RBU: | |
2843 | dma_ch_ier = XLGMAC_SET_REG_BITS( | |
2844 | dma_ch_ier, DMA_CH_IER_RBUE_POS, | |
2845 | DMA_CH_IER_RBUE_LEN, 1); | |
2846 | break; | |
2847 | case XLGMAC_INT_DMA_CH_SR_RPS: | |
2848 | dma_ch_ier = XLGMAC_SET_REG_BITS( | |
2849 | dma_ch_ier, DMA_CH_IER_RSE_POS, | |
2850 | DMA_CH_IER_RSE_LEN, 1); | |
2851 | break; | |
2852 | case XLGMAC_INT_DMA_CH_SR_TI_RI: | |
2853 | dma_ch_ier = XLGMAC_SET_REG_BITS( | |
2854 | dma_ch_ier, DMA_CH_IER_TIE_POS, | |
2855 | DMA_CH_IER_TIE_LEN, 1); | |
2856 | dma_ch_ier = XLGMAC_SET_REG_BITS( | |
2857 | dma_ch_ier, DMA_CH_IER_RIE_POS, | |
2858 | DMA_CH_IER_RIE_LEN, 1); | |
2859 | break; | |
2860 | case XLGMAC_INT_DMA_CH_SR_FBE: | |
2861 | dma_ch_ier = XLGMAC_SET_REG_BITS( | |
2862 | dma_ch_ier, DMA_CH_IER_FBEE_POS, | |
2863 | DMA_CH_IER_FBEE_LEN, 1); | |
2864 | break; | |
2865 | case XLGMAC_INT_DMA_ALL: | |
2866 | dma_ch_ier |= channel->saved_ier; | |
2867 | break; | |
2868 | default: | |
2869 | return -1; | |
2870 | } | |
2871 | ||
2872 | writel(dma_ch_ier, XLGMAC_DMA_REG(channel, DMA_CH_IER)); | |
2873 | ||
2874 | return 0; | |
2875 | } | |
2876 | ||
2877 | static int xlgmac_disable_int(struct xlgmac_channel *channel, | |
2878 | enum xlgmac_int int_id) | |
2879 | { | |
2880 | unsigned int dma_ch_ier; | |
2881 | ||
2882 | dma_ch_ier = readl(XLGMAC_DMA_REG(channel, DMA_CH_IER)); | |
2883 | ||
2884 | switch (int_id) { | |
2885 | case XLGMAC_INT_DMA_CH_SR_TI: | |
2886 | dma_ch_ier = XLGMAC_SET_REG_BITS( | |
2887 | dma_ch_ier, DMA_CH_IER_TIE_POS, | |
2888 | DMA_CH_IER_TIE_LEN, 0); | |
2889 | break; | |
2890 | case XLGMAC_INT_DMA_CH_SR_TPS: | |
2891 | dma_ch_ier = XLGMAC_SET_REG_BITS( | |
2892 | dma_ch_ier, DMA_CH_IER_TXSE_POS, | |
2893 | DMA_CH_IER_TXSE_LEN, 0); | |
2894 | break; | |
2895 | case XLGMAC_INT_DMA_CH_SR_TBU: | |
2896 | dma_ch_ier = XLGMAC_SET_REG_BITS( | |
2897 | dma_ch_ier, DMA_CH_IER_TBUE_POS, | |
2898 | DMA_CH_IER_TBUE_LEN, 0); | |
2899 | break; | |
2900 | case XLGMAC_INT_DMA_CH_SR_RI: | |
2901 | dma_ch_ier = XLGMAC_SET_REG_BITS( | |
2902 | dma_ch_ier, DMA_CH_IER_RIE_POS, | |
2903 | DMA_CH_IER_RIE_LEN, 0); | |
2904 | break; | |
2905 | case XLGMAC_INT_DMA_CH_SR_RBU: | |
2906 | dma_ch_ier = XLGMAC_SET_REG_BITS( | |
2907 | dma_ch_ier, DMA_CH_IER_RBUE_POS, | |
2908 | DMA_CH_IER_RBUE_LEN, 0); | |
2909 | break; | |
2910 | case XLGMAC_INT_DMA_CH_SR_RPS: | |
2911 | dma_ch_ier = XLGMAC_SET_REG_BITS( | |
2912 | dma_ch_ier, DMA_CH_IER_RSE_POS, | |
2913 | DMA_CH_IER_RSE_LEN, 0); | |
2914 | break; | |
2915 | case XLGMAC_INT_DMA_CH_SR_TI_RI: | |
2916 | dma_ch_ier = XLGMAC_SET_REG_BITS( | |
2917 | dma_ch_ier, DMA_CH_IER_TIE_POS, | |
2918 | DMA_CH_IER_TIE_LEN, 0); | |
2919 | dma_ch_ier = XLGMAC_SET_REG_BITS( | |
2920 | dma_ch_ier, DMA_CH_IER_RIE_POS, | |
2921 | DMA_CH_IER_RIE_LEN, 0); | |
2922 | break; | |
2923 | case XLGMAC_INT_DMA_CH_SR_FBE: | |
2924 | dma_ch_ier = XLGMAC_SET_REG_BITS( | |
2925 | dma_ch_ier, DMA_CH_IER_FBEE_POS, | |
2926 | DMA_CH_IER_FBEE_LEN, 0); | |
2927 | break; | |
2928 | case XLGMAC_INT_DMA_ALL: | |
2929 | channel->saved_ier = dma_ch_ier & XLGMAC_DMA_INTERRUPT_MASK; | |
2930 | dma_ch_ier &= ~XLGMAC_DMA_INTERRUPT_MASK; | |
2931 | break; | |
2932 | default: | |
2933 | return -1; | |
2934 | } | |
2935 | ||
2936 | writel(dma_ch_ier, XLGMAC_DMA_REG(channel, DMA_CH_IER)); | |
2937 | ||
2938 | return 0; | |
2939 | } | |
2940 | ||
2941 | static int xlgmac_flush_tx_queues(struct xlgmac_pdata *pdata) | |
2942 | { | |
2943 | unsigned int i, count; | |
2944 | u32 regval; | |
2945 | ||
2946 | for (i = 0; i < pdata->tx_q_count; i++) { | |
2947 | regval = readl(XLGMAC_MTL_REG(pdata, i, MTL_Q_TQOMR)); | |
2948 | regval = XLGMAC_SET_REG_BITS(regval, MTL_Q_TQOMR_FTQ_POS, | |
2949 | MTL_Q_TQOMR_FTQ_LEN, 1); | |
2950 | writel(regval, XLGMAC_MTL_REG(pdata, i, MTL_Q_TQOMR)); | |
2951 | } | |
2952 | ||
2953 | /* Poll Until Poll Condition */ | |
2954 | for (i = 0; i < pdata->tx_q_count; i++) { | |
2955 | count = 2000; | |
2956 | regval = readl(XLGMAC_MTL_REG(pdata, i, MTL_Q_TQOMR)); | |
2957 | regval = XLGMAC_GET_REG_BITS(regval, MTL_Q_TQOMR_FTQ_POS, | |
2958 | MTL_Q_TQOMR_FTQ_LEN); | |
2959 | while (--count && regval) | |
2960 | usleep_range(500, 600); | |
2961 | ||
2962 | if (!count) | |
2963 | return -EBUSY; | |
2964 | } | |
2965 | ||
2966 | return 0; | |
2967 | } | |
2968 | ||
2969 | static void xlgmac_config_dma_bus(struct xlgmac_pdata *pdata) | |
2970 | { | |
2971 | u32 regval; | |
2972 | ||
2973 | regval = readl(pdata->mac_regs + DMA_SBMR); | |
2974 | /* Set enhanced addressing mode */ | |
2975 | regval = XLGMAC_SET_REG_BITS(regval, DMA_SBMR_EAME_POS, | |
2976 | DMA_SBMR_EAME_LEN, 1); | |
2977 | /* Set the System Bus mode */ | |
2978 | regval = XLGMAC_SET_REG_BITS(regval, DMA_SBMR_UNDEF_POS, | |
2979 | DMA_SBMR_UNDEF_LEN, 1); | |
2980 | regval = XLGMAC_SET_REG_BITS(regval, DMA_SBMR_BLEN_256_POS, | |
2981 | DMA_SBMR_BLEN_256_LEN, 1); | |
2982 | writel(regval, pdata->mac_regs + DMA_SBMR); | |
2983 | } | |
2984 | ||
2985 | static int xlgmac_hw_init(struct xlgmac_pdata *pdata) | |
2986 | { | |
2987 | struct xlgmac_desc_ops *desc_ops = &pdata->desc_ops; | |
2988 | int ret; | |
2989 | ||
2990 | /* Flush Tx queues */ | |
2991 | ret = xlgmac_flush_tx_queues(pdata); | |
2992 | if (ret) | |
2993 | return ret; | |
2994 | ||
2995 | /* Initialize DMA related features */ | |
2996 | xlgmac_config_dma_bus(pdata); | |
2997 | xlgmac_config_osp_mode(pdata); | |
2998 | xlgmac_config_pblx8(pdata); | |
2999 | xlgmac_config_tx_pbl_val(pdata); | |
3000 | xlgmac_config_rx_pbl_val(pdata); | |
3001 | xlgmac_config_rx_coalesce(pdata); | |
3002 | xlgmac_config_tx_coalesce(pdata); | |
3003 | xlgmac_config_rx_buffer_size(pdata); | |
3004 | xlgmac_config_tso_mode(pdata); | |
3005 | xlgmac_config_sph_mode(pdata); | |
3006 | xlgmac_config_rss(pdata); | |
3007 | desc_ops->tx_desc_init(pdata); | |
3008 | desc_ops->rx_desc_init(pdata); | |
3009 | xlgmac_enable_dma_interrupts(pdata); | |
3010 | ||
3011 | /* Initialize MTL related features */ | |
3012 | xlgmac_config_mtl_mode(pdata); | |
3013 | xlgmac_config_queue_mapping(pdata); | |
3014 | xlgmac_config_tsf_mode(pdata, pdata->tx_sf_mode); | |
3015 | xlgmac_config_rsf_mode(pdata, pdata->rx_sf_mode); | |
3016 | xlgmac_config_tx_threshold(pdata, pdata->tx_threshold); | |
3017 | xlgmac_config_rx_threshold(pdata, pdata->rx_threshold); | |
3018 | xlgmac_config_tx_fifo_size(pdata); | |
3019 | xlgmac_config_rx_fifo_size(pdata); | |
3020 | xlgmac_config_flow_control_threshold(pdata); | |
3021 | xlgmac_config_rx_fep_enable(pdata); | |
3022 | xlgmac_config_rx_fup_enable(pdata); | |
3023 | xlgmac_enable_mtl_interrupts(pdata); | |
3024 | ||
3025 | /* Initialize MAC related features */ | |
3026 | xlgmac_config_mac_address(pdata); | |
3027 | xlgmac_config_rx_mode(pdata); | |
3028 | xlgmac_config_jumbo_enable(pdata); | |
3029 | xlgmac_config_flow_control(pdata); | |
3030 | xlgmac_config_mac_speed(pdata); | |
3031 | xlgmac_config_checksum_offload(pdata); | |
3032 | xlgmac_config_vlan_support(pdata); | |
3033 | xlgmac_config_mmc(pdata); | |
3034 | xlgmac_enable_mac_interrupts(pdata); | |
3035 | ||
3036 | return 0; | |
3037 | } | |
3038 | ||
3039 | static int xlgmac_hw_exit(struct xlgmac_pdata *pdata) | |
3040 | { | |
3041 | unsigned int count = 2000; | |
3042 | u32 regval; | |
3043 | ||
3044 | /* Issue a software reset */ | |
3045 | regval = readl(pdata->mac_regs + DMA_MR); | |
3046 | regval = XLGMAC_SET_REG_BITS(regval, DMA_MR_SWR_POS, | |
3047 | DMA_MR_SWR_LEN, 1); | |
3048 | writel(regval, pdata->mac_regs + DMA_MR); | |
3049 | usleep_range(10, 15); | |
3050 | ||
3051 | /* Poll Until Poll Condition */ | |
3052 | while (--count && | |
3053 | XLGMAC_GET_REG_BITS(readl(pdata->mac_regs + DMA_MR), | |
3054 | DMA_MR_SWR_POS, DMA_MR_SWR_LEN)) | |
3055 | usleep_range(500, 600); | |
3056 | ||
3057 | if (!count) | |
3058 | return -EBUSY; | |
3059 | ||
3060 | return 0; | |
3061 | } | |
3062 | ||
3063 | void xlgmac_init_hw_ops(struct xlgmac_hw_ops *hw_ops) | |
3064 | { | |
3065 | hw_ops->init = xlgmac_hw_init; | |
3066 | hw_ops->exit = xlgmac_hw_exit; | |
3067 | ||
3068 | hw_ops->tx_complete = xlgmac_tx_complete; | |
3069 | ||
3070 | hw_ops->enable_tx = xlgmac_enable_tx; | |
3071 | hw_ops->disable_tx = xlgmac_disable_tx; | |
3072 | hw_ops->enable_rx = xlgmac_enable_rx; | |
3073 | hw_ops->disable_rx = xlgmac_disable_rx; | |
3074 | ||
3075 | hw_ops->dev_xmit = xlgmac_dev_xmit; | |
3076 | hw_ops->dev_read = xlgmac_dev_read; | |
3077 | hw_ops->enable_int = xlgmac_enable_int; | |
3078 | hw_ops->disable_int = xlgmac_disable_int; | |
3079 | ||
3080 | hw_ops->set_mac_address = xlgmac_set_mac_address; | |
3081 | hw_ops->config_rx_mode = xlgmac_config_rx_mode; | |
3082 | hw_ops->enable_rx_csum = xlgmac_enable_rx_csum; | |
3083 | hw_ops->disable_rx_csum = xlgmac_disable_rx_csum; | |
3084 | ||
3085 | /* For MII speed configuration */ | |
3086 | hw_ops->set_xlgmii_25000_speed = xlgmac_set_xlgmii_25000_speed; | |
3087 | hw_ops->set_xlgmii_40000_speed = xlgmac_set_xlgmii_40000_speed; | |
3088 | hw_ops->set_xlgmii_50000_speed = xlgmac_set_xlgmii_50000_speed; | |
3089 | hw_ops->set_xlgmii_100000_speed = xlgmac_set_xlgmii_100000_speed; | |
3090 | ||
3091 | /* For descriptor related operation */ | |
3092 | hw_ops->tx_desc_init = xlgmac_tx_desc_init; | |
3093 | hw_ops->rx_desc_init = xlgmac_rx_desc_init; | |
3094 | hw_ops->tx_desc_reset = xlgmac_tx_desc_reset; | |
3095 | hw_ops->rx_desc_reset = xlgmac_rx_desc_reset; | |
3096 | hw_ops->is_last_desc = xlgmac_is_last_desc; | |
3097 | hw_ops->is_context_desc = xlgmac_is_context_desc; | |
3098 | hw_ops->tx_start_xmit = xlgmac_tx_start_xmit; | |
3099 | ||
3100 | /* For Flow Control */ | |
3101 | hw_ops->config_tx_flow_control = xlgmac_config_tx_flow_control; | |
3102 | hw_ops->config_rx_flow_control = xlgmac_config_rx_flow_control; | |
3103 | ||
3104 | /* For Vlan related config */ | |
3105 | hw_ops->enable_rx_vlan_stripping = xlgmac_enable_rx_vlan_stripping; | |
3106 | hw_ops->disable_rx_vlan_stripping = xlgmac_disable_rx_vlan_stripping; | |
3107 | hw_ops->enable_rx_vlan_filtering = xlgmac_enable_rx_vlan_filtering; | |
3108 | hw_ops->disable_rx_vlan_filtering = xlgmac_disable_rx_vlan_filtering; | |
3109 | hw_ops->update_vlan_hash_table = xlgmac_update_vlan_hash_table; | |
3110 | ||
3111 | /* For RX coalescing */ | |
3112 | hw_ops->config_rx_coalesce = xlgmac_config_rx_coalesce; | |
3113 | hw_ops->config_tx_coalesce = xlgmac_config_tx_coalesce; | |
3114 | hw_ops->usec_to_riwt = xlgmac_usec_to_riwt; | |
3115 | hw_ops->riwt_to_usec = xlgmac_riwt_to_usec; | |
3116 | ||
3117 | /* For RX and TX threshold config */ | |
3118 | hw_ops->config_rx_threshold = xlgmac_config_rx_threshold; | |
3119 | hw_ops->config_tx_threshold = xlgmac_config_tx_threshold; | |
3120 | ||
3121 | /* For RX and TX Store and Forward Mode config */ | |
3122 | hw_ops->config_rsf_mode = xlgmac_config_rsf_mode; | |
3123 | hw_ops->config_tsf_mode = xlgmac_config_tsf_mode; | |
3124 | ||
3125 | /* For TX DMA Operating on Second Frame config */ | |
3126 | hw_ops->config_osp_mode = xlgmac_config_osp_mode; | |
3127 | ||
3128 | /* For RX and TX PBL config */ | |
3129 | hw_ops->config_rx_pbl_val = xlgmac_config_rx_pbl_val; | |
3130 | hw_ops->get_rx_pbl_val = xlgmac_get_rx_pbl_val; | |
3131 | hw_ops->config_tx_pbl_val = xlgmac_config_tx_pbl_val; | |
3132 | hw_ops->get_tx_pbl_val = xlgmac_get_tx_pbl_val; | |
3133 | hw_ops->config_pblx8 = xlgmac_config_pblx8; | |
3134 | ||
3135 | /* For MMC statistics support */ | |
3136 | hw_ops->tx_mmc_int = xlgmac_tx_mmc_int; | |
3137 | hw_ops->rx_mmc_int = xlgmac_rx_mmc_int; | |
3138 | hw_ops->read_mmc_stats = xlgmac_read_mmc_stats; | |
3139 | ||
3140 | /* For Receive Side Scaling */ | |
3141 | hw_ops->enable_rss = xlgmac_enable_rss; | |
3142 | hw_ops->disable_rss = xlgmac_disable_rss; | |
3143 | hw_ops->set_rss_hash_key = xlgmac_set_rss_hash_key; | |
3144 | hw_ops->set_rss_lookup_table = xlgmac_set_rss_lookup_table; | |
3145 | } |