Commit | Line | Data |
---|---|---|
65e0ace2 JD |
1 | /* Synopsys DesignWare Core Enterprise Ethernet (XLGMAC) Driver |
2 | * | |
3 | * Copyright (c) 2017 Synopsys, Inc. (www.synopsys.com) | |
4 | * | |
5 | * This program is free software; you can redistribute it and/or modify it | |
6 | * under the terms of the GNU General Public License as published by the | |
7 | * Free Software Foundation; either version 2 of the License, or (at your | |
8 | * option) any later version. | |
9 | * | |
10 | * This Synopsys DWC XLGMAC software driver and associated documentation | |
11 | * (hereinafter the "Software") is an unsupported proprietary work of | |
12 | * Synopsys, Inc. unless otherwise expressly agreed to in writing between | |
13 | * Synopsys and you. The Software IS NOT an item of Licensed Software or a | |
14 | * Licensed Product under any End User Software License Agreement or | |
15 | * Agreement for Licensed Products with Synopsys or any supplement thereto. | |
16 | * Synopsys is a registered trademark of Synopsys, Inc. Other names included | |
17 | * in the SOFTWARE may be the trademarks of their respective owners. | |
18 | */ | |
19 | ||
20 | #include <linux/phy.h> | |
21 | #include <linux/mdio.h> | |
22 | #include <linux/clk.h> | |
23 | #include <linux/bitrev.h> | |
24 | #include <linux/crc32.h> | |
25 | ||
26 | #include "dwc-xlgmac.h" | |
27 | #include "dwc-xlgmac-reg.h" | |
28 | ||
29 | static int xlgmac_tx_complete(struct xlgmac_dma_desc *dma_desc) | |
30 | { | |
31 | return !XLGMAC_GET_REG_BITS_LE(dma_desc->desc3, | |
32 | TX_NORMAL_DESC3_OWN_POS, | |
33 | TX_NORMAL_DESC3_OWN_LEN); | |
34 | } | |
35 | ||
36 | static int xlgmac_disable_rx_csum(struct xlgmac_pdata *pdata) | |
37 | { | |
38 | u32 regval; | |
39 | ||
40 | regval = readl(pdata->mac_regs + MAC_RCR); | |
41 | regval = XLGMAC_SET_REG_BITS(regval, MAC_RCR_IPC_POS, | |
42 | MAC_RCR_IPC_LEN, 0); | |
43 | writel(regval, pdata->mac_regs + MAC_RCR); | |
44 | ||
45 | return 0; | |
46 | } | |
47 | ||
48 | static int xlgmac_enable_rx_csum(struct xlgmac_pdata *pdata) | |
49 | { | |
50 | u32 regval; | |
51 | ||
52 | regval = readl(pdata->mac_regs + MAC_RCR); | |
53 | regval = XLGMAC_SET_REG_BITS(regval, MAC_RCR_IPC_POS, | |
54 | MAC_RCR_IPC_LEN, 1); | |
55 | writel(regval, pdata->mac_regs + MAC_RCR); | |
56 | ||
57 | return 0; | |
58 | } | |
59 | ||
60 | static int xlgmac_set_mac_address(struct xlgmac_pdata *pdata, u8 *addr) | |
61 | { | |
62 | unsigned int mac_addr_hi, mac_addr_lo; | |
63 | ||
64 | mac_addr_hi = (addr[5] << 8) | (addr[4] << 0); | |
65 | mac_addr_lo = (addr[3] << 24) | (addr[2] << 16) | | |
66 | (addr[1] << 8) | (addr[0] << 0); | |
67 | ||
68 | writel(mac_addr_hi, pdata->mac_regs + MAC_MACA0HR); | |
69 | writel(mac_addr_lo, pdata->mac_regs + MAC_MACA0LR); | |
70 | ||
71 | return 0; | |
72 | } | |
73 | ||
74 | static void xlgmac_set_mac_reg(struct xlgmac_pdata *pdata, | |
75 | struct netdev_hw_addr *ha, | |
76 | unsigned int *mac_reg) | |
77 | { | |
78 | unsigned int mac_addr_hi, mac_addr_lo; | |
79 | u8 *mac_addr; | |
80 | ||
81 | mac_addr_lo = 0; | |
82 | mac_addr_hi = 0; | |
83 | ||
84 | if (ha) { | |
85 | mac_addr = (u8 *)&mac_addr_lo; | |
86 | mac_addr[0] = ha->addr[0]; | |
87 | mac_addr[1] = ha->addr[1]; | |
88 | mac_addr[2] = ha->addr[2]; | |
89 | mac_addr[3] = ha->addr[3]; | |
90 | mac_addr = (u8 *)&mac_addr_hi; | |
91 | mac_addr[0] = ha->addr[4]; | |
92 | mac_addr[1] = ha->addr[5]; | |
93 | ||
94 | netif_dbg(pdata, drv, pdata->netdev, | |
95 | "adding mac address %pM at %#x\n", | |
96 | ha->addr, *mac_reg); | |
97 | ||
98 | mac_addr_hi = XLGMAC_SET_REG_BITS(mac_addr_hi, | |
99 | MAC_MACA1HR_AE_POS, | |
100 | MAC_MACA1HR_AE_LEN, | |
101 | 1); | |
102 | } | |
103 | ||
104 | writel(mac_addr_hi, pdata->mac_regs + *mac_reg); | |
105 | *mac_reg += MAC_MACA_INC; | |
106 | writel(mac_addr_lo, pdata->mac_regs + *mac_reg); | |
107 | *mac_reg += MAC_MACA_INC; | |
108 | } | |
109 | ||
110 | static int xlgmac_enable_rx_vlan_stripping(struct xlgmac_pdata *pdata) | |
111 | { | |
112 | u32 regval; | |
113 | ||
114 | regval = readl(pdata->mac_regs + MAC_VLANTR); | |
115 | /* Put the VLAN tag in the Rx descriptor */ | |
116 | regval = XLGMAC_SET_REG_BITS(regval, MAC_VLANTR_EVLRXS_POS, | |
117 | MAC_VLANTR_EVLRXS_LEN, 1); | |
118 | /* Don't check the VLAN type */ | |
119 | regval = XLGMAC_SET_REG_BITS(regval, MAC_VLANTR_DOVLTC_POS, | |
120 | MAC_VLANTR_DOVLTC_LEN, 1); | |
121 | /* Check only C-TAG (0x8100) packets */ | |
122 | regval = XLGMAC_SET_REG_BITS(regval, MAC_VLANTR_ERSVLM_POS, | |
123 | MAC_VLANTR_ERSVLM_LEN, 0); | |
124 | /* Don't consider an S-TAG (0x88A8) packet as a VLAN packet */ | |
125 | regval = XLGMAC_SET_REG_BITS(regval, MAC_VLANTR_ESVL_POS, | |
126 | MAC_VLANTR_ESVL_LEN, 0); | |
127 | /* Enable VLAN tag stripping */ | |
128 | regval = XLGMAC_SET_REG_BITS(regval, MAC_VLANTR_EVLS_POS, | |
129 | MAC_VLANTR_EVLS_LEN, 0x3); | |
130 | writel(regval, pdata->mac_regs + MAC_VLANTR); | |
131 | ||
132 | return 0; | |
133 | } | |
134 | ||
135 | static int xlgmac_disable_rx_vlan_stripping(struct xlgmac_pdata *pdata) | |
136 | { | |
137 | u32 regval; | |
138 | ||
139 | regval = readl(pdata->mac_regs + MAC_VLANTR); | |
140 | regval = XLGMAC_SET_REG_BITS(regval, MAC_VLANTR_EVLS_POS, | |
141 | MAC_VLANTR_EVLS_LEN, 0); | |
142 | writel(regval, pdata->mac_regs + MAC_VLANTR); | |
143 | ||
144 | return 0; | |
145 | } | |
146 | ||
147 | static int xlgmac_enable_rx_vlan_filtering(struct xlgmac_pdata *pdata) | |
148 | { | |
149 | u32 regval; | |
150 | ||
151 | regval = readl(pdata->mac_regs + MAC_PFR); | |
152 | /* Enable VLAN filtering */ | |
153 | regval = XLGMAC_SET_REG_BITS(regval, MAC_PFR_VTFE_POS, | |
154 | MAC_PFR_VTFE_LEN, 1); | |
155 | writel(regval, pdata->mac_regs + MAC_PFR); | |
156 | ||
157 | regval = readl(pdata->mac_regs + MAC_VLANTR); | |
158 | /* Enable VLAN Hash Table filtering */ | |
159 | regval = XLGMAC_SET_REG_BITS(regval, MAC_VLANTR_VTHM_POS, | |
160 | MAC_VLANTR_VTHM_LEN, 1); | |
161 | /* Disable VLAN tag inverse matching */ | |
162 | regval = XLGMAC_SET_REG_BITS(regval, MAC_VLANTR_VTIM_POS, | |
163 | MAC_VLANTR_VTIM_LEN, 0); | |
164 | /* Only filter on the lower 12-bits of the VLAN tag */ | |
165 | regval = XLGMAC_SET_REG_BITS(regval, MAC_VLANTR_ETV_POS, | |
166 | MAC_VLANTR_ETV_LEN, 1); | |
167 | /* In order for the VLAN Hash Table filtering to be effective, | |
168 | * the VLAN tag identifier in the VLAN Tag Register must not | |
169 | * be zero. Set the VLAN tag identifier to "1" to enable the | |
170 | * VLAN Hash Table filtering. This implies that a VLAN tag of | |
171 | * 1 will always pass filtering. | |
172 | */ | |
173 | regval = XLGMAC_SET_REG_BITS(regval, MAC_VLANTR_VL_POS, | |
174 | MAC_VLANTR_VL_LEN, 1); | |
175 | writel(regval, pdata->mac_regs + MAC_VLANTR); | |
176 | ||
177 | return 0; | |
178 | } | |
179 | ||
180 | static int xlgmac_disable_rx_vlan_filtering(struct xlgmac_pdata *pdata) | |
181 | { | |
182 | u32 regval; | |
183 | ||
184 | regval = readl(pdata->mac_regs + MAC_PFR); | |
185 | /* Disable VLAN filtering */ | |
186 | regval = XLGMAC_SET_REG_BITS(regval, MAC_PFR_VTFE_POS, | |
187 | MAC_PFR_VTFE_LEN, 0); | |
188 | writel(regval, pdata->mac_regs + MAC_PFR); | |
189 | ||
190 | return 0; | |
191 | } | |
192 | ||
193 | static u32 xlgmac_vid_crc32_le(__le16 vid_le) | |
194 | { | |
195 | unsigned char *data = (unsigned char *)&vid_le; | |
196 | unsigned char data_byte = 0; | |
197 | u32 poly = 0xedb88320; | |
198 | u32 crc = ~0; | |
199 | u32 temp = 0; | |
200 | int i, bits; | |
201 | ||
202 | bits = get_bitmask_order(VLAN_VID_MASK); | |
203 | for (i = 0; i < bits; i++) { | |
204 | if ((i % 8) == 0) | |
205 | data_byte = data[i / 8]; | |
206 | ||
207 | temp = ((crc & 1) ^ data_byte) & 1; | |
208 | crc >>= 1; | |
209 | data_byte >>= 1; | |
210 | ||
211 | if (temp) | |
212 | crc ^= poly; | |
213 | } | |
214 | ||
215 | return crc; | |
216 | } | |
217 | ||
218 | static int xlgmac_update_vlan_hash_table(struct xlgmac_pdata *pdata) | |
219 | { | |
220 | u16 vlan_hash_table = 0; | |
221 | __le16 vid_le; | |
222 | u32 regval; | |
223 | u32 crc; | |
224 | u16 vid; | |
225 | ||
226 | /* Generate the VLAN Hash Table value */ | |
227 | for_each_set_bit(vid, pdata->active_vlans, VLAN_N_VID) { | |
228 | /* Get the CRC32 value of the VLAN ID */ | |
229 | vid_le = cpu_to_le16(vid); | |
230 | crc = bitrev32(~xlgmac_vid_crc32_le(vid_le)) >> 28; | |
231 | ||
232 | vlan_hash_table |= (1 << crc); | |
233 | } | |
234 | ||
235 | regval = readl(pdata->mac_regs + MAC_VLANHTR); | |
236 | /* Set the VLAN Hash Table filtering register */ | |
237 | regval = XLGMAC_SET_REG_BITS(regval, MAC_VLANHTR_VLHT_POS, | |
238 | MAC_VLANHTR_VLHT_LEN, vlan_hash_table); | |
239 | writel(regval, pdata->mac_regs + MAC_VLANHTR); | |
240 | ||
241 | return 0; | |
242 | } | |
243 | ||
244 | static int xlgmac_set_promiscuous_mode(struct xlgmac_pdata *pdata, | |
245 | unsigned int enable) | |
246 | { | |
247 | unsigned int val = enable ? 1 : 0; | |
248 | u32 regval; | |
249 | ||
250 | regval = XLGMAC_GET_REG_BITS(readl(pdata->mac_regs + MAC_PFR), | |
251 | MAC_PFR_PR_POS, MAC_PFR_PR_LEN); | |
252 | if (regval == val) | |
253 | return 0; | |
254 | ||
255 | netif_dbg(pdata, drv, pdata->netdev, "%s promiscuous mode\n", | |
256 | enable ? "entering" : "leaving"); | |
257 | ||
258 | regval = readl(pdata->mac_regs + MAC_PFR); | |
259 | regval = XLGMAC_SET_REG_BITS(regval, MAC_PFR_PR_POS, | |
260 | MAC_PFR_PR_LEN, val); | |
261 | writel(regval, pdata->mac_regs + MAC_PFR); | |
262 | ||
263 | /* Hardware will still perform VLAN filtering in promiscuous mode */ | |
264 | if (enable) { | |
265 | xlgmac_disable_rx_vlan_filtering(pdata); | |
266 | } else { | |
267 | if (pdata->netdev->features & NETIF_F_HW_VLAN_CTAG_FILTER) | |
268 | xlgmac_enable_rx_vlan_filtering(pdata); | |
269 | } | |
270 | ||
271 | return 0; | |
272 | } | |
273 | ||
274 | static int xlgmac_set_all_multicast_mode(struct xlgmac_pdata *pdata, | |
275 | unsigned int enable) | |
276 | { | |
277 | unsigned int val = enable ? 1 : 0; | |
278 | u32 regval; | |
279 | ||
280 | regval = XLGMAC_GET_REG_BITS(readl(pdata->mac_regs + MAC_PFR), | |
281 | MAC_PFR_PM_POS, MAC_PFR_PM_LEN); | |
282 | if (regval == val) | |
283 | return 0; | |
284 | ||
285 | netif_dbg(pdata, drv, pdata->netdev, "%s allmulti mode\n", | |
286 | enable ? "entering" : "leaving"); | |
287 | ||
288 | regval = readl(pdata->mac_regs + MAC_PFR); | |
289 | regval = XLGMAC_SET_REG_BITS(regval, MAC_PFR_PM_POS, | |
290 | MAC_PFR_PM_LEN, val); | |
291 | writel(regval, pdata->mac_regs + MAC_PFR); | |
292 | ||
293 | return 0; | |
294 | } | |
295 | ||
296 | static void xlgmac_set_mac_addn_addrs(struct xlgmac_pdata *pdata) | |
297 | { | |
298 | struct net_device *netdev = pdata->netdev; | |
299 | struct netdev_hw_addr *ha; | |
300 | unsigned int addn_macs; | |
301 | unsigned int mac_reg; | |
302 | ||
303 | mac_reg = MAC_MACA1HR; | |
304 | addn_macs = pdata->hw_feat.addn_mac; | |
305 | ||
306 | if (netdev_uc_count(netdev) > addn_macs) { | |
307 | xlgmac_set_promiscuous_mode(pdata, 1); | |
308 | } else { | |
309 | netdev_for_each_uc_addr(ha, netdev) { | |
310 | xlgmac_set_mac_reg(pdata, ha, &mac_reg); | |
311 | addn_macs--; | |
312 | } | |
313 | ||
314 | if (netdev_mc_count(netdev) > addn_macs) { | |
315 | xlgmac_set_all_multicast_mode(pdata, 1); | |
316 | } else { | |
317 | netdev_for_each_mc_addr(ha, netdev) { | |
318 | xlgmac_set_mac_reg(pdata, ha, &mac_reg); | |
319 | addn_macs--; | |
320 | } | |
321 | } | |
322 | } | |
323 | ||
324 | /* Clear remaining additional MAC address entries */ | |
325 | while (addn_macs--) | |
326 | xlgmac_set_mac_reg(pdata, NULL, &mac_reg); | |
327 | } | |
328 | ||
329 | static void xlgmac_set_mac_hash_table(struct xlgmac_pdata *pdata) | |
330 | { | |
331 | unsigned int hash_table_shift, hash_table_count; | |
332 | u32 hash_table[XLGMAC_MAC_HASH_TABLE_SIZE]; | |
333 | struct net_device *netdev = pdata->netdev; | |
334 | struct netdev_hw_addr *ha; | |
335 | unsigned int hash_reg; | |
336 | unsigned int i; | |
337 | u32 crc; | |
338 | ||
339 | hash_table_shift = 26 - (pdata->hw_feat.hash_table_size >> 7); | |
340 | hash_table_count = pdata->hw_feat.hash_table_size / 32; | |
341 | memset(hash_table, 0, sizeof(hash_table)); | |
342 | ||
343 | /* Build the MAC Hash Table register values */ | |
344 | netdev_for_each_uc_addr(ha, netdev) { | |
345 | crc = bitrev32(~crc32_le(~0, ha->addr, ETH_ALEN)); | |
346 | crc >>= hash_table_shift; | |
347 | hash_table[crc >> 5] |= (1 << (crc & 0x1f)); | |
348 | } | |
349 | ||
350 | netdev_for_each_mc_addr(ha, netdev) { | |
351 | crc = bitrev32(~crc32_le(~0, ha->addr, ETH_ALEN)); | |
352 | crc >>= hash_table_shift; | |
353 | hash_table[crc >> 5] |= (1 << (crc & 0x1f)); | |
354 | } | |
355 | ||
356 | /* Set the MAC Hash Table registers */ | |
357 | hash_reg = MAC_HTR0; | |
358 | for (i = 0; i < hash_table_count; i++) { | |
359 | writel(hash_table[i], pdata->mac_regs + hash_reg); | |
360 | hash_reg += MAC_HTR_INC; | |
361 | } | |
362 | } | |
363 | ||
364 | static int xlgmac_add_mac_addresses(struct xlgmac_pdata *pdata) | |
365 | { | |
366 | if (pdata->hw_feat.hash_table_size) | |
367 | xlgmac_set_mac_hash_table(pdata); | |
368 | else | |
369 | xlgmac_set_mac_addn_addrs(pdata); | |
370 | ||
371 | return 0; | |
372 | } | |
373 | ||
374 | static void xlgmac_config_mac_address(struct xlgmac_pdata *pdata) | |
375 | { | |
376 | u32 regval; | |
377 | ||
378 | xlgmac_set_mac_address(pdata, pdata->netdev->dev_addr); | |
379 | ||
380 | /* Filtering is done using perfect filtering and hash filtering */ | |
381 | if (pdata->hw_feat.hash_table_size) { | |
382 | regval = readl(pdata->mac_regs + MAC_PFR); | |
383 | regval = XLGMAC_SET_REG_BITS(regval, MAC_PFR_HPF_POS, | |
384 | MAC_PFR_HPF_LEN, 1); | |
385 | regval = XLGMAC_SET_REG_BITS(regval, MAC_PFR_HUC_POS, | |
386 | MAC_PFR_HUC_LEN, 1); | |
387 | regval = XLGMAC_SET_REG_BITS(regval, MAC_PFR_HMC_POS, | |
388 | MAC_PFR_HMC_LEN, 1); | |
389 | writel(regval, pdata->mac_regs + MAC_PFR); | |
390 | } | |
391 | } | |
392 | ||
393 | static void xlgmac_config_jumbo_enable(struct xlgmac_pdata *pdata) | |
394 | { | |
395 | unsigned int val; | |
396 | u32 regval; | |
397 | ||
398 | val = (pdata->netdev->mtu > XLGMAC_STD_PACKET_MTU) ? 1 : 0; | |
399 | ||
400 | regval = readl(pdata->mac_regs + MAC_RCR); | |
401 | regval = XLGMAC_SET_REG_BITS(regval, MAC_RCR_JE_POS, | |
402 | MAC_RCR_JE_LEN, val); | |
403 | writel(regval, pdata->mac_regs + MAC_RCR); | |
404 | } | |
405 | ||
406 | static void xlgmac_config_checksum_offload(struct xlgmac_pdata *pdata) | |
407 | { | |
408 | if (pdata->netdev->features & NETIF_F_RXCSUM) | |
409 | xlgmac_enable_rx_csum(pdata); | |
410 | else | |
411 | xlgmac_disable_rx_csum(pdata); | |
412 | } | |
413 | ||
414 | static void xlgmac_config_vlan_support(struct xlgmac_pdata *pdata) | |
415 | { | |
416 | u32 regval; | |
417 | ||
418 | regval = readl(pdata->mac_regs + MAC_VLANIR); | |
419 | /* Indicate that VLAN Tx CTAGs come from context descriptors */ | |
420 | regval = XLGMAC_SET_REG_BITS(regval, MAC_VLANIR_CSVL_POS, | |
421 | MAC_VLANIR_CSVL_LEN, 0); | |
422 | regval = XLGMAC_SET_REG_BITS(regval, MAC_VLANIR_VLTI_POS, | |
423 | MAC_VLANIR_VLTI_LEN, 1); | |
424 | writel(regval, pdata->mac_regs + MAC_VLANIR); | |
425 | ||
426 | /* Set the current VLAN Hash Table register value */ | |
427 | xlgmac_update_vlan_hash_table(pdata); | |
428 | ||
429 | if (pdata->netdev->features & NETIF_F_HW_VLAN_CTAG_FILTER) | |
430 | xlgmac_enable_rx_vlan_filtering(pdata); | |
431 | else | |
432 | xlgmac_disable_rx_vlan_filtering(pdata); | |
433 | ||
434 | if (pdata->netdev->features & NETIF_F_HW_VLAN_CTAG_RX) | |
435 | xlgmac_enable_rx_vlan_stripping(pdata); | |
436 | else | |
437 | xlgmac_disable_rx_vlan_stripping(pdata); | |
438 | } | |
439 | ||
440 | static int xlgmac_config_rx_mode(struct xlgmac_pdata *pdata) | |
441 | { | |
442 | struct net_device *netdev = pdata->netdev; | |
443 | unsigned int pr_mode, am_mode; | |
444 | ||
445 | pr_mode = ((netdev->flags & IFF_PROMISC) != 0); | |
446 | am_mode = ((netdev->flags & IFF_ALLMULTI) != 0); | |
447 | ||
448 | xlgmac_set_promiscuous_mode(pdata, pr_mode); | |
449 | xlgmac_set_all_multicast_mode(pdata, am_mode); | |
450 | ||
451 | xlgmac_add_mac_addresses(pdata); | |
452 | ||
453 | return 0; | |
454 | } | |
455 | ||
456 | static void xlgmac_prepare_tx_stop(struct xlgmac_pdata *pdata, | |
457 | struct xlgmac_channel *channel) | |
458 | { | |
459 | unsigned int tx_dsr, tx_pos, tx_qidx; | |
460 | unsigned long tx_timeout; | |
461 | unsigned int tx_status; | |
462 | ||
463 | /* Calculate the status register to read and the position within */ | |
464 | if (channel->queue_index < DMA_DSRX_FIRST_QUEUE) { | |
465 | tx_dsr = DMA_DSR0; | |
466 | tx_pos = (channel->queue_index * DMA_DSR_Q_LEN) + | |
467 | DMA_DSR0_TPS_START; | |
468 | } else { | |
469 | tx_qidx = channel->queue_index - DMA_DSRX_FIRST_QUEUE; | |
470 | ||
471 | tx_dsr = DMA_DSR1 + ((tx_qidx / DMA_DSRX_QPR) * DMA_DSRX_INC); | |
472 | tx_pos = ((tx_qidx % DMA_DSRX_QPR) * DMA_DSR_Q_LEN) + | |
473 | DMA_DSRX_TPS_START; | |
474 | } | |
475 | ||
476 | /* The Tx engine cannot be stopped if it is actively processing | |
477 | * descriptors. Wait for the Tx engine to enter the stopped or | |
478 | * suspended state. Don't wait forever though... | |
479 | */ | |
480 | tx_timeout = jiffies + (XLGMAC_DMA_STOP_TIMEOUT * HZ); | |
481 | while (time_before(jiffies, tx_timeout)) { | |
482 | tx_status = readl(pdata->mac_regs + tx_dsr); | |
483 | tx_status = XLGMAC_GET_REG_BITS(tx_status, tx_pos, | |
484 | DMA_DSR_TPS_LEN); | |
485 | if ((tx_status == DMA_TPS_STOPPED) || | |
486 | (tx_status == DMA_TPS_SUSPENDED)) | |
487 | break; | |
488 | ||
489 | usleep_range(500, 1000); | |
490 | } | |
491 | ||
492 | if (!time_before(jiffies, tx_timeout)) | |
493 | netdev_info(pdata->netdev, | |
494 | "timed out waiting for Tx DMA channel %u to stop\n", | |
495 | channel->queue_index); | |
496 | } | |
497 | ||
498 | static void xlgmac_enable_tx(struct xlgmac_pdata *pdata) | |
499 | { | |
500 | struct xlgmac_channel *channel; | |
501 | unsigned int i; | |
502 | u32 regval; | |
503 | ||
504 | /* Enable each Tx DMA channel */ | |
505 | channel = pdata->channel_head; | |
506 | for (i = 0; i < pdata->channel_count; i++, channel++) { | |
507 | if (!channel->tx_ring) | |
508 | break; | |
509 | ||
510 | regval = readl(XLGMAC_DMA_REG(channel, DMA_CH_TCR)); | |
511 | regval = XLGMAC_SET_REG_BITS(regval, DMA_CH_TCR_ST_POS, | |
512 | DMA_CH_TCR_ST_LEN, 1); | |
513 | writel(regval, XLGMAC_DMA_REG(channel, DMA_CH_TCR)); | |
514 | } | |
515 | ||
516 | /* Enable each Tx queue */ | |
517 | for (i = 0; i < pdata->tx_q_count; i++) { | |
518 | regval = readl(XLGMAC_MTL_REG(pdata, i, MTL_Q_TQOMR)); | |
519 | regval = XLGMAC_SET_REG_BITS(regval, MTL_Q_TQOMR_TXQEN_POS, | |
520 | MTL_Q_TQOMR_TXQEN_LEN, | |
521 | MTL_Q_ENABLED); | |
522 | writel(regval, XLGMAC_MTL_REG(pdata, i, MTL_Q_TQOMR)); | |
523 | } | |
524 | ||
525 | /* Enable MAC Tx */ | |
526 | regval = readl(pdata->mac_regs + MAC_TCR); | |
527 | regval = XLGMAC_SET_REG_BITS(regval, MAC_TCR_TE_POS, | |
528 | MAC_TCR_TE_LEN, 1); | |
529 | writel(regval, pdata->mac_regs + MAC_TCR); | |
530 | } | |
531 | ||
532 | static void xlgmac_disable_tx(struct xlgmac_pdata *pdata) | |
533 | { | |
534 | struct xlgmac_channel *channel; | |
535 | unsigned int i; | |
536 | u32 regval; | |
537 | ||
538 | /* Prepare for Tx DMA channel stop */ | |
539 | channel = pdata->channel_head; | |
540 | for (i = 0; i < pdata->channel_count; i++, channel++) { | |
541 | if (!channel->tx_ring) | |
542 | break; | |
543 | ||
544 | xlgmac_prepare_tx_stop(pdata, channel); | |
545 | } | |
546 | ||
547 | /* Disable MAC Tx */ | |
548 | regval = readl(pdata->mac_regs + MAC_TCR); | |
549 | regval = XLGMAC_SET_REG_BITS(regval, MAC_TCR_TE_POS, | |
550 | MAC_TCR_TE_LEN, 0); | |
551 | writel(regval, pdata->mac_regs + MAC_TCR); | |
552 | ||
553 | /* Disable each Tx queue */ | |
554 | for (i = 0; i < pdata->tx_q_count; i++) { | |
555 | regval = readl(XLGMAC_MTL_REG(pdata, i, MTL_Q_TQOMR)); | |
556 | regval = XLGMAC_SET_REG_BITS(regval, MTL_Q_TQOMR_TXQEN_POS, | |
557 | MTL_Q_TQOMR_TXQEN_LEN, 0); | |
558 | writel(regval, XLGMAC_MTL_REG(pdata, i, MTL_Q_TQOMR)); | |
559 | } | |
560 | ||
561 | /* Disable each Tx DMA channel */ | |
562 | channel = pdata->channel_head; | |
563 | for (i = 0; i < pdata->channel_count; i++, channel++) { | |
564 | if (!channel->tx_ring) | |
565 | break; | |
566 | ||
567 | regval = readl(XLGMAC_DMA_REG(channel, DMA_CH_TCR)); | |
568 | regval = XLGMAC_SET_REG_BITS(regval, DMA_CH_TCR_ST_POS, | |
569 | DMA_CH_TCR_ST_LEN, 0); | |
570 | writel(regval, XLGMAC_DMA_REG(channel, DMA_CH_TCR)); | |
571 | } | |
572 | } | |
573 | ||
574 | static void xlgmac_prepare_rx_stop(struct xlgmac_pdata *pdata, | |
575 | unsigned int queue) | |
576 | { | |
577 | unsigned int rx_status, prxq, rxqsts; | |
578 | unsigned long rx_timeout; | |
579 | ||
580 | /* The Rx engine cannot be stopped if it is actively processing | |
581 | * packets. Wait for the Rx queue to empty the Rx fifo. Don't | |
582 | * wait forever though... | |
583 | */ | |
584 | rx_timeout = jiffies + (XLGMAC_DMA_STOP_TIMEOUT * HZ); | |
585 | while (time_before(jiffies, rx_timeout)) { | |
586 | rx_status = readl(XLGMAC_MTL_REG(pdata, queue, MTL_Q_RQDR)); | |
587 | prxq = XLGMAC_GET_REG_BITS(rx_status, MTL_Q_RQDR_PRXQ_POS, | |
588 | MTL_Q_RQDR_PRXQ_LEN); | |
589 | rxqsts = XLGMAC_GET_REG_BITS(rx_status, MTL_Q_RQDR_RXQSTS_POS, | |
590 | MTL_Q_RQDR_RXQSTS_LEN); | |
591 | if ((prxq == 0) && (rxqsts == 0)) | |
592 | break; | |
593 | ||
594 | usleep_range(500, 1000); | |
595 | } | |
596 | ||
597 | if (!time_before(jiffies, rx_timeout)) | |
598 | netdev_info(pdata->netdev, | |
599 | "timed out waiting for Rx queue %u to empty\n", | |
600 | queue); | |
601 | } | |
602 | ||
603 | static void xlgmac_enable_rx(struct xlgmac_pdata *pdata) | |
604 | { | |
605 | struct xlgmac_channel *channel; | |
606 | unsigned int regval, i; | |
607 | ||
608 | /* Enable each Rx DMA channel */ | |
609 | channel = pdata->channel_head; | |
610 | for (i = 0; i < pdata->channel_count; i++, channel++) { | |
611 | if (!channel->rx_ring) | |
612 | break; | |
613 | ||
614 | regval = readl(XLGMAC_DMA_REG(channel, DMA_CH_RCR)); | |
615 | regval = XLGMAC_SET_REG_BITS(regval, DMA_CH_RCR_SR_POS, | |
616 | DMA_CH_RCR_SR_LEN, 1); | |
617 | writel(regval, XLGMAC_DMA_REG(channel, DMA_CH_RCR)); | |
618 | } | |
619 | ||
620 | /* Enable each Rx queue */ | |
621 | regval = 0; | |
622 | for (i = 0; i < pdata->rx_q_count; i++) | |
623 | regval |= (0x02 << (i << 1)); | |
624 | writel(regval, pdata->mac_regs + MAC_RQC0R); | |
625 | ||
626 | /* Enable MAC Rx */ | |
627 | regval = readl(pdata->mac_regs + MAC_RCR); | |
628 | regval = XLGMAC_SET_REG_BITS(regval, MAC_RCR_DCRCC_POS, | |
629 | MAC_RCR_DCRCC_LEN, 1); | |
630 | regval = XLGMAC_SET_REG_BITS(regval, MAC_RCR_CST_POS, | |
631 | MAC_RCR_CST_LEN, 1); | |
632 | regval = XLGMAC_SET_REG_BITS(regval, MAC_RCR_ACS_POS, | |
633 | MAC_RCR_ACS_LEN, 1); | |
634 | regval = XLGMAC_SET_REG_BITS(regval, MAC_RCR_RE_POS, | |
635 | MAC_RCR_RE_LEN, 1); | |
636 | writel(regval, pdata->mac_regs + MAC_RCR); | |
637 | } | |
638 | ||
639 | static void xlgmac_disable_rx(struct xlgmac_pdata *pdata) | |
640 | { | |
641 | struct xlgmac_channel *channel; | |
642 | unsigned int i; | |
643 | u32 regval; | |
644 | ||
645 | /* Disable MAC Rx */ | |
646 | regval = readl(pdata->mac_regs + MAC_RCR); | |
647 | regval = XLGMAC_SET_REG_BITS(regval, MAC_RCR_DCRCC_POS, | |
648 | MAC_RCR_DCRCC_LEN, 0); | |
649 | regval = XLGMAC_SET_REG_BITS(regval, MAC_RCR_CST_POS, | |
650 | MAC_RCR_CST_LEN, 0); | |
651 | regval = XLGMAC_SET_REG_BITS(regval, MAC_RCR_ACS_POS, | |
652 | MAC_RCR_ACS_LEN, 0); | |
653 | regval = XLGMAC_SET_REG_BITS(regval, MAC_RCR_RE_POS, | |
654 | MAC_RCR_RE_LEN, 0); | |
655 | writel(regval, pdata->mac_regs + MAC_RCR); | |
656 | ||
657 | /* Prepare for Rx DMA channel stop */ | |
658 | for (i = 0; i < pdata->rx_q_count; i++) | |
659 | xlgmac_prepare_rx_stop(pdata, i); | |
660 | ||
661 | /* Disable each Rx queue */ | |
662 | writel(0, pdata->mac_regs + MAC_RQC0R); | |
663 | ||
664 | /* Disable each Rx DMA channel */ | |
665 | channel = pdata->channel_head; | |
666 | for (i = 0; i < pdata->channel_count; i++, channel++) { | |
667 | if (!channel->rx_ring) | |
668 | break; | |
669 | ||
670 | regval = readl(XLGMAC_DMA_REG(channel, DMA_CH_RCR)); | |
671 | regval = XLGMAC_SET_REG_BITS(regval, DMA_CH_RCR_SR_POS, | |
672 | DMA_CH_RCR_SR_LEN, 0); | |
673 | writel(regval, XLGMAC_DMA_REG(channel, DMA_CH_RCR)); | |
674 | } | |
675 | } | |
676 | ||
677 | static void xlgmac_tx_start_xmit(struct xlgmac_channel *channel, | |
678 | struct xlgmac_ring *ring) | |
679 | { | |
680 | struct xlgmac_pdata *pdata = channel->pdata; | |
681 | struct xlgmac_desc_data *desc_data; | |
682 | ||
683 | /* Make sure everything is written before the register write */ | |
684 | wmb(); | |
685 | ||
686 | /* Issue a poll command to Tx DMA by writing address | |
687 | * of next immediate free descriptor | |
688 | */ | |
689 | desc_data = XLGMAC_GET_DESC_DATA(ring, ring->cur); | |
690 | writel(lower_32_bits(desc_data->dma_desc_addr), | |
691 | XLGMAC_DMA_REG(channel, DMA_CH_TDTR_LO)); | |
692 | ||
693 | /* Start the Tx timer */ | |
694 | if (pdata->tx_usecs && !channel->tx_timer_active) { | |
695 | channel->tx_timer_active = 1; | |
696 | mod_timer(&channel->tx_timer, | |
697 | jiffies + usecs_to_jiffies(pdata->tx_usecs)); | |
698 | } | |
699 | ||
700 | ring->tx.xmit_more = 0; | |
701 | } | |
702 | ||
703 | static void xlgmac_dev_xmit(struct xlgmac_channel *channel) | |
704 | { | |
705 | struct xlgmac_pdata *pdata = channel->pdata; | |
706 | struct xlgmac_ring *ring = channel->tx_ring; | |
707 | unsigned int tso_context, vlan_context; | |
708 | struct xlgmac_desc_data *desc_data; | |
709 | struct xlgmac_dma_desc *dma_desc; | |
710 | struct xlgmac_pkt_info *pkt_info; | |
711 | unsigned int csum, tso, vlan; | |
712 | int start_index = ring->cur; | |
713 | int cur_index = ring->cur; | |
714 | unsigned int tx_set_ic; | |
715 | int i; | |
716 | ||
717 | pkt_info = &ring->pkt_info; | |
718 | csum = XLGMAC_GET_REG_BITS(pkt_info->attributes, | |
719 | TX_PACKET_ATTRIBUTES_CSUM_ENABLE_POS, | |
720 | TX_PACKET_ATTRIBUTES_CSUM_ENABLE_LEN); | |
721 | tso = XLGMAC_GET_REG_BITS(pkt_info->attributes, | |
722 | TX_PACKET_ATTRIBUTES_TSO_ENABLE_POS, | |
723 | TX_PACKET_ATTRIBUTES_TSO_ENABLE_LEN); | |
724 | vlan = XLGMAC_GET_REG_BITS(pkt_info->attributes, | |
725 | TX_PACKET_ATTRIBUTES_VLAN_CTAG_POS, | |
726 | TX_PACKET_ATTRIBUTES_VLAN_CTAG_LEN); | |
727 | ||
728 | if (tso && (pkt_info->mss != ring->tx.cur_mss)) | |
729 | tso_context = 1; | |
730 | else | |
731 | tso_context = 0; | |
732 | ||
733 | if (vlan && (pkt_info->vlan_ctag != ring->tx.cur_vlan_ctag)) | |
734 | vlan_context = 1; | |
735 | else | |
736 | vlan_context = 0; | |
737 | ||
738 | /* Determine if an interrupt should be generated for this Tx: | |
739 | * Interrupt: | |
740 | * - Tx frame count exceeds the frame count setting | |
741 | * - Addition of Tx frame count to the frame count since the | |
742 | * last interrupt was set exceeds the frame count setting | |
743 | * No interrupt: | |
744 | * - No frame count setting specified (ethtool -C ethX tx-frames 0) | |
745 | * - Addition of Tx frame count to the frame count since the | |
746 | * last interrupt was set does not exceed the frame count setting | |
747 | */ | |
748 | ring->coalesce_count += pkt_info->tx_packets; | |
749 | if (!pdata->tx_frames) | |
750 | tx_set_ic = 0; | |
751 | else if (pkt_info->tx_packets > pdata->tx_frames) | |
752 | tx_set_ic = 1; | |
753 | else if ((ring->coalesce_count % pdata->tx_frames) < | |
754 | pkt_info->tx_packets) | |
755 | tx_set_ic = 1; | |
756 | else | |
757 | tx_set_ic = 0; | |
758 | ||
759 | desc_data = XLGMAC_GET_DESC_DATA(ring, cur_index); | |
760 | dma_desc = desc_data->dma_desc; | |
761 | ||
762 | /* Create a context descriptor if this is a TSO pkt_info */ | |
763 | if (tso_context || vlan_context) { | |
764 | if (tso_context) { | |
765 | netif_dbg(pdata, tx_queued, pdata->netdev, | |
766 | "TSO context descriptor, mss=%u\n", | |
767 | pkt_info->mss); | |
768 | ||
769 | /* Set the MSS size */ | |
770 | dma_desc->desc2 = XLGMAC_SET_REG_BITS_LE( | |
771 | dma_desc->desc2, | |
772 | TX_CONTEXT_DESC2_MSS_POS, | |
773 | TX_CONTEXT_DESC2_MSS_LEN, | |
774 | pkt_info->mss); | |
775 | ||
776 | /* Mark it as a CONTEXT descriptor */ | |
777 | dma_desc->desc3 = XLGMAC_SET_REG_BITS_LE( | |
778 | dma_desc->desc3, | |
779 | TX_CONTEXT_DESC3_CTXT_POS, | |
780 | TX_CONTEXT_DESC3_CTXT_LEN, | |
781 | 1); | |
782 | ||
783 | /* Indicate this descriptor contains the MSS */ | |
784 | dma_desc->desc3 = XLGMAC_SET_REG_BITS_LE( | |
785 | dma_desc->desc3, | |
786 | TX_CONTEXT_DESC3_TCMSSV_POS, | |
787 | TX_CONTEXT_DESC3_TCMSSV_LEN, | |
788 | 1); | |
789 | ||
790 | ring->tx.cur_mss = pkt_info->mss; | |
791 | } | |
792 | ||
793 | if (vlan_context) { | |
794 | netif_dbg(pdata, tx_queued, pdata->netdev, | |
795 | "VLAN context descriptor, ctag=%u\n", | |
796 | pkt_info->vlan_ctag); | |
797 | ||
798 | /* Mark it as a CONTEXT descriptor */ | |
799 | dma_desc->desc3 = XLGMAC_SET_REG_BITS_LE( | |
800 | dma_desc->desc3, | |
801 | TX_CONTEXT_DESC3_CTXT_POS, | |
802 | TX_CONTEXT_DESC3_CTXT_LEN, | |
803 | 1); | |
804 | ||
805 | /* Set the VLAN tag */ | |
806 | dma_desc->desc3 = XLGMAC_SET_REG_BITS_LE( | |
807 | dma_desc->desc3, | |
808 | TX_CONTEXT_DESC3_VT_POS, | |
809 | TX_CONTEXT_DESC3_VT_LEN, | |
810 | pkt_info->vlan_ctag); | |
811 | ||
812 | /* Indicate this descriptor contains the VLAN tag */ | |
813 | dma_desc->desc3 = XLGMAC_SET_REG_BITS_LE( | |
814 | dma_desc->desc3, | |
815 | TX_CONTEXT_DESC3_VLTV_POS, | |
816 | TX_CONTEXT_DESC3_VLTV_LEN, | |
817 | 1); | |
818 | ||
819 | ring->tx.cur_vlan_ctag = pkt_info->vlan_ctag; | |
820 | } | |
821 | ||
822 | cur_index++; | |
823 | desc_data = XLGMAC_GET_DESC_DATA(ring, cur_index); | |
824 | dma_desc = desc_data->dma_desc; | |
825 | } | |
826 | ||
827 | /* Update buffer address (for TSO this is the header) */ | |
828 | dma_desc->desc0 = cpu_to_le32(lower_32_bits(desc_data->skb_dma)); | |
829 | dma_desc->desc1 = cpu_to_le32(upper_32_bits(desc_data->skb_dma)); | |
830 | ||
831 | /* Update the buffer length */ | |
832 | dma_desc->desc2 = XLGMAC_SET_REG_BITS_LE( | |
833 | dma_desc->desc2, | |
834 | TX_NORMAL_DESC2_HL_B1L_POS, | |
835 | TX_NORMAL_DESC2_HL_B1L_LEN, | |
836 | desc_data->skb_dma_len); | |
837 | ||
838 | /* VLAN tag insertion check */ | |
839 | if (vlan) | |
840 | dma_desc->desc2 = XLGMAC_SET_REG_BITS_LE( | |
841 | dma_desc->desc2, | |
842 | TX_NORMAL_DESC2_VTIR_POS, | |
843 | TX_NORMAL_DESC2_VTIR_LEN, | |
844 | TX_NORMAL_DESC2_VLAN_INSERT); | |
845 | ||
846 | /* Timestamp enablement check */ | |
847 | if (XLGMAC_GET_REG_BITS(pkt_info->attributes, | |
848 | TX_PACKET_ATTRIBUTES_PTP_POS, | |
849 | TX_PACKET_ATTRIBUTES_PTP_LEN)) | |
850 | dma_desc->desc2 = XLGMAC_SET_REG_BITS_LE( | |
851 | dma_desc->desc2, | |
852 | TX_NORMAL_DESC2_TTSE_POS, | |
853 | TX_NORMAL_DESC2_TTSE_LEN, | |
854 | 1); | |
855 | ||
856 | /* Mark it as First Descriptor */ | |
857 | dma_desc->desc3 = XLGMAC_SET_REG_BITS_LE( | |
858 | dma_desc->desc3, | |
859 | TX_NORMAL_DESC3_FD_POS, | |
860 | TX_NORMAL_DESC3_FD_LEN, | |
861 | 1); | |
862 | ||
863 | /* Mark it as a NORMAL descriptor */ | |
864 | dma_desc->desc3 = XLGMAC_SET_REG_BITS_LE( | |
865 | dma_desc->desc3, | |
866 | TX_NORMAL_DESC3_CTXT_POS, | |
867 | TX_NORMAL_DESC3_CTXT_LEN, | |
868 | 0); | |
869 | ||
870 | /* Set OWN bit if not the first descriptor */ | |
871 | if (cur_index != start_index) | |
872 | dma_desc->desc3 = XLGMAC_SET_REG_BITS_LE( | |
873 | dma_desc->desc3, | |
874 | TX_NORMAL_DESC3_OWN_POS, | |
875 | TX_NORMAL_DESC3_OWN_LEN, | |
876 | 1); | |
877 | ||
878 | if (tso) { | |
879 | /* Enable TSO */ | |
880 | dma_desc->desc3 = XLGMAC_SET_REG_BITS_LE( | |
881 | dma_desc->desc3, | |
882 | TX_NORMAL_DESC3_TSE_POS, | |
883 | TX_NORMAL_DESC3_TSE_LEN, 1); | |
884 | dma_desc->desc3 = XLGMAC_SET_REG_BITS_LE( | |
885 | dma_desc->desc3, | |
886 | TX_NORMAL_DESC3_TCPPL_POS, | |
887 | TX_NORMAL_DESC3_TCPPL_LEN, | |
888 | pkt_info->tcp_payload_len); | |
889 | dma_desc->desc3 = XLGMAC_SET_REG_BITS_LE( | |
890 | dma_desc->desc3, | |
891 | TX_NORMAL_DESC3_TCPHDRLEN_POS, | |
892 | TX_NORMAL_DESC3_TCPHDRLEN_LEN, | |
893 | pkt_info->tcp_header_len / 4); | |
894 | ||
895 | pdata->stats.tx_tso_packets++; | |
896 | } else { | |
897 | /* Enable CRC and Pad Insertion */ | |
898 | dma_desc->desc3 = XLGMAC_SET_REG_BITS_LE( | |
899 | dma_desc->desc3, | |
900 | TX_NORMAL_DESC3_CPC_POS, | |
901 | TX_NORMAL_DESC3_CPC_LEN, 0); | |
902 | ||
903 | /* Enable HW CSUM */ | |
904 | if (csum) | |
905 | dma_desc->desc3 = XLGMAC_SET_REG_BITS_LE( | |
906 | dma_desc->desc3, | |
907 | TX_NORMAL_DESC3_CIC_POS, | |
908 | TX_NORMAL_DESC3_CIC_LEN, | |
909 | 0x3); | |
910 | ||
911 | /* Set the total length to be transmitted */ | |
912 | dma_desc->desc3 = XLGMAC_SET_REG_BITS_LE( | |
913 | dma_desc->desc3, | |
914 | TX_NORMAL_DESC3_FL_POS, | |
915 | TX_NORMAL_DESC3_FL_LEN, | |
916 | pkt_info->length); | |
917 | } | |
918 | ||
919 | for (i = cur_index - start_index + 1; i < pkt_info->desc_count; i++) { | |
920 | cur_index++; | |
921 | desc_data = XLGMAC_GET_DESC_DATA(ring, cur_index); | |
922 | dma_desc = desc_data->dma_desc; | |
923 | ||
924 | /* Update buffer address */ | |
925 | dma_desc->desc0 = | |
926 | cpu_to_le32(lower_32_bits(desc_data->skb_dma)); | |
927 | dma_desc->desc1 = | |
928 | cpu_to_le32(upper_32_bits(desc_data->skb_dma)); | |
929 | ||
930 | /* Update the buffer length */ | |
931 | dma_desc->desc2 = XLGMAC_SET_REG_BITS_LE( | |
932 | dma_desc->desc2, | |
933 | TX_NORMAL_DESC2_HL_B1L_POS, | |
934 | TX_NORMAL_DESC2_HL_B1L_LEN, | |
935 | desc_data->skb_dma_len); | |
936 | ||
937 | /* Set OWN bit */ | |
938 | dma_desc->desc3 = XLGMAC_SET_REG_BITS_LE( | |
939 | dma_desc->desc3, | |
940 | TX_NORMAL_DESC3_OWN_POS, | |
941 | TX_NORMAL_DESC3_OWN_LEN, 1); | |
942 | ||
943 | /* Mark it as NORMAL descriptor */ | |
944 | dma_desc->desc3 = XLGMAC_SET_REG_BITS_LE( | |
945 | dma_desc->desc3, | |
946 | TX_NORMAL_DESC3_CTXT_POS, | |
947 | TX_NORMAL_DESC3_CTXT_LEN, 0); | |
948 | ||
949 | /* Enable HW CSUM */ | |
950 | if (csum) | |
951 | dma_desc->desc3 = XLGMAC_SET_REG_BITS_LE( | |
952 | dma_desc->desc3, | |
953 | TX_NORMAL_DESC3_CIC_POS, | |
954 | TX_NORMAL_DESC3_CIC_LEN, | |
955 | 0x3); | |
956 | } | |
957 | ||
958 | /* Set LAST bit for the last descriptor */ | |
959 | dma_desc->desc3 = XLGMAC_SET_REG_BITS_LE( | |
960 | dma_desc->desc3, | |
961 | TX_NORMAL_DESC3_LD_POS, | |
962 | TX_NORMAL_DESC3_LD_LEN, 1); | |
963 | ||
964 | /* Set IC bit based on Tx coalescing settings */ | |
965 | if (tx_set_ic) | |
966 | dma_desc->desc2 = XLGMAC_SET_REG_BITS_LE( | |
967 | dma_desc->desc2, | |
968 | TX_NORMAL_DESC2_IC_POS, | |
969 | TX_NORMAL_DESC2_IC_LEN, 1); | |
970 | ||
971 | /* Save the Tx info to report back during cleanup */ | |
972 | desc_data->tx.packets = pkt_info->tx_packets; | |
973 | desc_data->tx.bytes = pkt_info->tx_bytes; | |
974 | ||
975 | /* In case the Tx DMA engine is running, make sure everything | |
976 | * is written to the descriptor(s) before setting the OWN bit | |
977 | * for the first descriptor | |
978 | */ | |
979 | dma_wmb(); | |
980 | ||
981 | /* Set OWN bit for the first descriptor */ | |
982 | desc_data = XLGMAC_GET_DESC_DATA(ring, start_index); | |
983 | dma_desc = desc_data->dma_desc; | |
984 | dma_desc->desc3 = XLGMAC_SET_REG_BITS_LE( | |
985 | dma_desc->desc3, | |
986 | TX_NORMAL_DESC3_OWN_POS, | |
987 | TX_NORMAL_DESC3_OWN_LEN, 1); | |
988 | ||
989 | if (netif_msg_tx_queued(pdata)) | |
990 | xlgmac_dump_tx_desc(pdata, ring, start_index, | |
991 | pkt_info->desc_count, 1); | |
992 | ||
993 | /* Make sure ownership is written to the descriptor */ | |
994 | smp_wmb(); | |
995 | ||
996 | ring->cur = cur_index + 1; | |
997 | if (!pkt_info->skb->xmit_more || | |
998 | netif_xmit_stopped(netdev_get_tx_queue(pdata->netdev, | |
999 | channel->queue_index))) | |
1000 | xlgmac_tx_start_xmit(channel, ring); | |
1001 | else | |
1002 | ring->tx.xmit_more = 1; | |
1003 | ||
1004 | XLGMAC_PR("%s: descriptors %u to %u written\n", | |
1005 | channel->name, start_index & (ring->dma_desc_count - 1), | |
1006 | (ring->cur - 1) & (ring->dma_desc_count - 1)); | |
1007 | } | |
1008 | ||
1009 | static void xlgmac_get_rx_tstamp(struct xlgmac_pkt_info *pkt_info, | |
1010 | struct xlgmac_dma_desc *dma_desc) | |
1011 | { | |
1012 | u32 tsa, tsd; | |
1013 | u64 nsec; | |
1014 | ||
1015 | tsa = XLGMAC_GET_REG_BITS_LE(dma_desc->desc3, | |
1016 | RX_CONTEXT_DESC3_TSA_POS, | |
1017 | RX_CONTEXT_DESC3_TSA_LEN); | |
1018 | tsd = XLGMAC_GET_REG_BITS_LE(dma_desc->desc3, | |
1019 | RX_CONTEXT_DESC3_TSD_POS, | |
1020 | RX_CONTEXT_DESC3_TSD_LEN); | |
1021 | if (tsa && !tsd) { | |
1022 | nsec = le32_to_cpu(dma_desc->desc1); | |
1023 | nsec <<= 32; | |
1024 | nsec |= le32_to_cpu(dma_desc->desc0); | |
1025 | if (nsec != 0xffffffffffffffffULL) { | |
1026 | pkt_info->rx_tstamp = nsec; | |
1027 | pkt_info->attributes = XLGMAC_SET_REG_BITS( | |
1028 | pkt_info->attributes, | |
1029 | RX_PACKET_ATTRIBUTES_RX_TSTAMP_POS, | |
1030 | RX_PACKET_ATTRIBUTES_RX_TSTAMP_LEN, | |
1031 | 1); | |
1032 | } | |
1033 | } | |
1034 | } | |
1035 | ||
1036 | static void xlgmac_tx_desc_reset(struct xlgmac_desc_data *desc_data) | |
1037 | { | |
1038 | struct xlgmac_dma_desc *dma_desc = desc_data->dma_desc; | |
1039 | ||
1040 | /* Reset the Tx descriptor | |
1041 | * Set buffer 1 (lo) address to zero | |
1042 | * Set buffer 1 (hi) address to zero | |
1043 | * Reset all other control bits (IC, TTSE, B2L & B1L) | |
1044 | * Reset all other control bits (OWN, CTXT, FD, LD, CPC, CIC, etc) | |
1045 | */ | |
1046 | dma_desc->desc0 = 0; | |
1047 | dma_desc->desc1 = 0; | |
1048 | dma_desc->desc2 = 0; | |
1049 | dma_desc->desc3 = 0; | |
1050 | ||
1051 | /* Make sure ownership is written to the descriptor */ | |
1052 | dma_wmb(); | |
1053 | } | |
1054 | ||
1055 | static void xlgmac_tx_desc_init(struct xlgmac_channel *channel) | |
1056 | { | |
1057 | struct xlgmac_ring *ring = channel->tx_ring; | |
1058 | struct xlgmac_desc_data *desc_data; | |
1059 | int start_index = ring->cur; | |
1060 | int i; | |
1061 | ||
1062 | /* Initialze all descriptors */ | |
1063 | for (i = 0; i < ring->dma_desc_count; i++) { | |
1064 | desc_data = XLGMAC_GET_DESC_DATA(ring, i); | |
1065 | ||
1066 | /* Initialize Tx descriptor */ | |
1067 | xlgmac_tx_desc_reset(desc_data); | |
1068 | } | |
1069 | ||
1070 | /* Update the total number of Tx descriptors */ | |
1071 | writel(ring->dma_desc_count - 1, XLGMAC_DMA_REG(channel, DMA_CH_TDRLR)); | |
1072 | ||
1073 | /* Update the starting address of descriptor ring */ | |
1074 | desc_data = XLGMAC_GET_DESC_DATA(ring, start_index); | |
1075 | writel(upper_32_bits(desc_data->dma_desc_addr), | |
1076 | XLGMAC_DMA_REG(channel, DMA_CH_TDLR_HI)); | |
1077 | writel(lower_32_bits(desc_data->dma_desc_addr), | |
1078 | XLGMAC_DMA_REG(channel, DMA_CH_TDLR_LO)); | |
1079 | } | |
1080 | ||
1081 | static void xlgmac_rx_desc_reset(struct xlgmac_pdata *pdata, | |
1082 | struct xlgmac_desc_data *desc_data, | |
1083 | unsigned int index) | |
1084 | { | |
1085 | struct xlgmac_dma_desc *dma_desc = desc_data->dma_desc; | |
1086 | unsigned int rx_frames = pdata->rx_frames; | |
1087 | unsigned int rx_usecs = pdata->rx_usecs; | |
1088 | dma_addr_t hdr_dma, buf_dma; | |
1089 | unsigned int inte; | |
1090 | ||
1091 | if (!rx_usecs && !rx_frames) { | |
1092 | /* No coalescing, interrupt for every descriptor */ | |
1093 | inte = 1; | |
1094 | } else { | |
1095 | /* Set interrupt based on Rx frame coalescing setting */ | |
1096 | if (rx_frames && !((index + 1) % rx_frames)) | |
1097 | inte = 1; | |
1098 | else | |
1099 | inte = 0; | |
1100 | } | |
1101 | ||
1102 | /* Reset the Rx descriptor | |
1103 | * Set buffer 1 (lo) address to header dma address (lo) | |
1104 | * Set buffer 1 (hi) address to header dma address (hi) | |
1105 | * Set buffer 2 (lo) address to buffer dma address (lo) | |
1106 | * Set buffer 2 (hi) address to buffer dma address (hi) and | |
1107 | * set control bits OWN and INTE | |
1108 | */ | |
1109 | hdr_dma = desc_data->rx.hdr.dma_base + desc_data->rx.hdr.dma_off; | |
1110 | buf_dma = desc_data->rx.buf.dma_base + desc_data->rx.buf.dma_off; | |
1111 | dma_desc->desc0 = cpu_to_le32(lower_32_bits(hdr_dma)); | |
1112 | dma_desc->desc1 = cpu_to_le32(upper_32_bits(hdr_dma)); | |
1113 | dma_desc->desc2 = cpu_to_le32(lower_32_bits(buf_dma)); | |
1114 | dma_desc->desc3 = cpu_to_le32(upper_32_bits(buf_dma)); | |
1115 | ||
1116 | dma_desc->desc3 = XLGMAC_SET_REG_BITS_LE( | |
1117 | dma_desc->desc3, | |
1118 | RX_NORMAL_DESC3_INTE_POS, | |
1119 | RX_NORMAL_DESC3_INTE_LEN, | |
1120 | inte); | |
1121 | ||
1122 | /* Since the Rx DMA engine is likely running, make sure everything | |
1123 | * is written to the descriptor(s) before setting the OWN bit | |
1124 | * for the descriptor | |
1125 | */ | |
1126 | dma_wmb(); | |
1127 | ||
1128 | dma_desc->desc3 = XLGMAC_SET_REG_BITS_LE( | |
1129 | dma_desc->desc3, | |
1130 | RX_NORMAL_DESC3_OWN_POS, | |
1131 | RX_NORMAL_DESC3_OWN_LEN, | |
1132 | 1); | |
1133 | ||
1134 | /* Make sure ownership is written to the descriptor */ | |
1135 | dma_wmb(); | |
1136 | } | |
1137 | ||
1138 | static void xlgmac_rx_desc_init(struct xlgmac_channel *channel) | |
1139 | { | |
1140 | struct xlgmac_pdata *pdata = channel->pdata; | |
1141 | struct xlgmac_ring *ring = channel->rx_ring; | |
1142 | unsigned int start_index = ring->cur; | |
1143 | struct xlgmac_desc_data *desc_data; | |
1144 | unsigned int i; | |
1145 | ||
1146 | /* Initialize all descriptors */ | |
1147 | for (i = 0; i < ring->dma_desc_count; i++) { | |
1148 | desc_data = XLGMAC_GET_DESC_DATA(ring, i); | |
1149 | ||
1150 | /* Initialize Rx descriptor */ | |
1151 | xlgmac_rx_desc_reset(pdata, desc_data, i); | |
1152 | } | |
1153 | ||
1154 | /* Update the total number of Rx descriptors */ | |
1155 | writel(ring->dma_desc_count - 1, XLGMAC_DMA_REG(channel, DMA_CH_RDRLR)); | |
1156 | ||
1157 | /* Update the starting address of descriptor ring */ | |
1158 | desc_data = XLGMAC_GET_DESC_DATA(ring, start_index); | |
1159 | writel(upper_32_bits(desc_data->dma_desc_addr), | |
1160 | XLGMAC_DMA_REG(channel, DMA_CH_RDLR_HI)); | |
1161 | writel(lower_32_bits(desc_data->dma_desc_addr), | |
1162 | XLGMAC_DMA_REG(channel, DMA_CH_RDLR_LO)); | |
1163 | ||
1164 | /* Update the Rx Descriptor Tail Pointer */ | |
1165 | desc_data = XLGMAC_GET_DESC_DATA(ring, start_index + | |
1166 | ring->dma_desc_count - 1); | |
1167 | writel(lower_32_bits(desc_data->dma_desc_addr), | |
1168 | XLGMAC_DMA_REG(channel, DMA_CH_RDTR_LO)); | |
1169 | } | |
1170 | ||
1171 | static int xlgmac_is_context_desc(struct xlgmac_dma_desc *dma_desc) | |
1172 | { | |
1173 | /* Rx and Tx share CTXT bit, so check TDES3.CTXT bit */ | |
1174 | return XLGMAC_GET_REG_BITS_LE(dma_desc->desc3, | |
1175 | TX_NORMAL_DESC3_CTXT_POS, | |
1176 | TX_NORMAL_DESC3_CTXT_LEN); | |
1177 | } | |
1178 | ||
1179 | static int xlgmac_is_last_desc(struct xlgmac_dma_desc *dma_desc) | |
1180 | { | |
1181 | /* Rx and Tx share LD bit, so check TDES3.LD bit */ | |
1182 | return XLGMAC_GET_REG_BITS_LE(dma_desc->desc3, | |
1183 | TX_NORMAL_DESC3_LD_POS, | |
1184 | TX_NORMAL_DESC3_LD_LEN); | |
1185 | } | |
1186 | ||
1187 | static int xlgmac_disable_tx_flow_control(struct xlgmac_pdata *pdata) | |
1188 | { | |
1189 | unsigned int max_q_count, q_count; | |
1190 | unsigned int reg, regval; | |
1191 | unsigned int i; | |
1192 | ||
1193 | /* Clear MTL flow control */ | |
1194 | for (i = 0; i < pdata->rx_q_count; i++) { | |
1195 | regval = readl(XLGMAC_MTL_REG(pdata, i, MTL_Q_RQOMR)); | |
1196 | regval = XLGMAC_SET_REG_BITS(regval, MTL_Q_RQOMR_EHFC_POS, | |
1197 | MTL_Q_RQOMR_EHFC_LEN, 0); | |
1198 | writel(regval, XLGMAC_MTL_REG(pdata, i, MTL_Q_RQOMR)); | |
1199 | } | |
1200 | ||
1201 | /* Clear MAC flow control */ | |
1202 | max_q_count = XLGMAC_MAX_FLOW_CONTROL_QUEUES; | |
1203 | q_count = min_t(unsigned int, pdata->tx_q_count, max_q_count); | |
1204 | reg = MAC_Q0TFCR; | |
1205 | for (i = 0; i < q_count; i++) { | |
1206 | regval = readl(pdata->mac_regs + reg); | |
1207 | regval = XLGMAC_SET_REG_BITS(regval, | |
1208 | MAC_Q0TFCR_TFE_POS, | |
1209 | MAC_Q0TFCR_TFE_LEN, | |
1210 | 0); | |
1211 | writel(regval, pdata->mac_regs + reg); | |
1212 | ||
1213 | reg += MAC_QTFCR_INC; | |
1214 | } | |
1215 | ||
1216 | return 0; | |
1217 | } | |
1218 | ||
1219 | static int xlgmac_enable_tx_flow_control(struct xlgmac_pdata *pdata) | |
1220 | { | |
1221 | unsigned int max_q_count, q_count; | |
1222 | unsigned int reg, regval; | |
1223 | unsigned int i; | |
1224 | ||
1225 | /* Set MTL flow control */ | |
1226 | for (i = 0; i < pdata->rx_q_count; i++) { | |
1227 | regval = readl(XLGMAC_MTL_REG(pdata, i, MTL_Q_RQOMR)); | |
1228 | regval = XLGMAC_SET_REG_BITS(regval, MTL_Q_RQOMR_EHFC_POS, | |
1229 | MTL_Q_RQOMR_EHFC_LEN, 1); | |
1230 | writel(regval, XLGMAC_MTL_REG(pdata, i, MTL_Q_RQOMR)); | |
1231 | } | |
1232 | ||
1233 | /* Set MAC flow control */ | |
1234 | max_q_count = XLGMAC_MAX_FLOW_CONTROL_QUEUES; | |
1235 | q_count = min_t(unsigned int, pdata->tx_q_count, max_q_count); | |
1236 | reg = MAC_Q0TFCR; | |
1237 | for (i = 0; i < q_count; i++) { | |
1238 | regval = readl(pdata->mac_regs + reg); | |
1239 | ||
1240 | /* Enable transmit flow control */ | |
1241 | regval = XLGMAC_SET_REG_BITS(regval, MAC_Q0TFCR_TFE_POS, | |
1242 | MAC_Q0TFCR_TFE_LEN, 1); | |
1243 | /* Set pause time */ | |
1244 | regval = XLGMAC_SET_REG_BITS(regval, MAC_Q0TFCR_PT_POS, | |
1245 | MAC_Q0TFCR_PT_LEN, 0xffff); | |
1246 | ||
1247 | writel(regval, pdata->mac_regs + reg); | |
1248 | ||
1249 | reg += MAC_QTFCR_INC; | |
1250 | } | |
1251 | ||
1252 | return 0; | |
1253 | } | |
1254 | ||
1255 | static int xlgmac_disable_rx_flow_control(struct xlgmac_pdata *pdata) | |
1256 | { | |
1257 | u32 regval; | |
1258 | ||
1259 | regval = readl(pdata->mac_regs + MAC_RFCR); | |
1260 | regval = XLGMAC_SET_REG_BITS(regval, MAC_RFCR_RFE_POS, | |
1261 | MAC_RFCR_RFE_LEN, 0); | |
1262 | writel(regval, pdata->mac_regs + MAC_RFCR); | |
1263 | ||
1264 | return 0; | |
1265 | } | |
1266 | ||
1267 | static int xlgmac_enable_rx_flow_control(struct xlgmac_pdata *pdata) | |
1268 | { | |
1269 | u32 regval; | |
1270 | ||
1271 | regval = readl(pdata->mac_regs + MAC_RFCR); | |
1272 | regval = XLGMAC_SET_REG_BITS(regval, MAC_RFCR_RFE_POS, | |
1273 | MAC_RFCR_RFE_LEN, 1); | |
1274 | writel(regval, pdata->mac_regs + MAC_RFCR); | |
1275 | ||
1276 | return 0; | |
1277 | } | |
1278 | ||
1279 | static int xlgmac_config_tx_flow_control(struct xlgmac_pdata *pdata) | |
1280 | { | |
1281 | if (pdata->tx_pause) | |
1282 | xlgmac_enable_tx_flow_control(pdata); | |
1283 | else | |
1284 | xlgmac_disable_tx_flow_control(pdata); | |
1285 | ||
1286 | return 0; | |
1287 | } | |
1288 | ||
1289 | static int xlgmac_config_rx_flow_control(struct xlgmac_pdata *pdata) | |
1290 | { | |
1291 | if (pdata->rx_pause) | |
1292 | xlgmac_enable_rx_flow_control(pdata); | |
1293 | else | |
1294 | xlgmac_disable_rx_flow_control(pdata); | |
1295 | ||
1296 | return 0; | |
1297 | } | |
1298 | ||
1299 | static int xlgmac_config_rx_coalesce(struct xlgmac_pdata *pdata) | |
1300 | { | |
1301 | struct xlgmac_channel *channel; | |
1302 | unsigned int i; | |
1303 | u32 regval; | |
1304 | ||
1305 | channel = pdata->channel_head; | |
1306 | for (i = 0; i < pdata->channel_count; i++, channel++) { | |
1307 | if (!channel->rx_ring) | |
1308 | break; | |
1309 | ||
1310 | regval = readl(XLGMAC_DMA_REG(channel, DMA_CH_RIWT)); | |
1311 | regval = XLGMAC_SET_REG_BITS(regval, DMA_CH_RIWT_RWT_POS, | |
1312 | DMA_CH_RIWT_RWT_LEN, | |
1313 | pdata->rx_riwt); | |
1314 | writel(regval, XLGMAC_DMA_REG(channel, DMA_CH_RIWT)); | |
1315 | } | |
1316 | ||
1317 | return 0; | |
1318 | } | |
1319 | ||
1320 | static void xlgmac_config_flow_control(struct xlgmac_pdata *pdata) | |
1321 | { | |
1322 | xlgmac_config_tx_flow_control(pdata); | |
1323 | xlgmac_config_rx_flow_control(pdata); | |
1324 | } | |
1325 | ||
1326 | static void xlgmac_config_rx_fep_enable(struct xlgmac_pdata *pdata) | |
1327 | { | |
1328 | unsigned int i; | |
1329 | u32 regval; | |
1330 | ||
1331 | for (i = 0; i < pdata->rx_q_count; i++) { | |
1332 | regval = readl(XLGMAC_MTL_REG(pdata, i, MTL_Q_RQOMR)); | |
1333 | regval = XLGMAC_SET_REG_BITS(regval, MTL_Q_RQOMR_FEP_POS, | |
1334 | MTL_Q_RQOMR_FEP_LEN, 1); | |
1335 | writel(regval, XLGMAC_MTL_REG(pdata, i, MTL_Q_RQOMR)); | |
1336 | } | |
1337 | } | |
1338 | ||
1339 | static void xlgmac_config_rx_fup_enable(struct xlgmac_pdata *pdata) | |
1340 | { | |
1341 | unsigned int i; | |
1342 | u32 regval; | |
1343 | ||
1344 | for (i = 0; i < pdata->rx_q_count; i++) { | |
1345 | regval = readl(XLGMAC_MTL_REG(pdata, i, MTL_Q_RQOMR)); | |
1346 | regval = XLGMAC_SET_REG_BITS(regval, MTL_Q_RQOMR_FUP_POS, | |
1347 | MTL_Q_RQOMR_FUP_LEN, 1); | |
1348 | writel(regval, XLGMAC_MTL_REG(pdata, i, MTL_Q_RQOMR)); | |
1349 | } | |
1350 | } | |
1351 | ||
1352 | static int xlgmac_config_tx_coalesce(struct xlgmac_pdata *pdata) | |
1353 | { | |
1354 | return 0; | |
1355 | } | |
1356 | ||
1357 | static void xlgmac_config_rx_buffer_size(struct xlgmac_pdata *pdata) | |
1358 | { | |
1359 | struct xlgmac_channel *channel; | |
1360 | unsigned int i; | |
1361 | u32 regval; | |
1362 | ||
1363 | channel = pdata->channel_head; | |
1364 | for (i = 0; i < pdata->channel_count; i++, channel++) { | |
1365 | if (!channel->rx_ring) | |
1366 | break; | |
1367 | ||
1368 | regval = readl(XLGMAC_DMA_REG(channel, DMA_CH_RCR)); | |
1369 | regval = XLGMAC_SET_REG_BITS(regval, DMA_CH_RCR_RBSZ_POS, | |
1370 | DMA_CH_RCR_RBSZ_LEN, | |
1371 | pdata->rx_buf_size); | |
1372 | writel(regval, XLGMAC_DMA_REG(channel, DMA_CH_RCR)); | |
1373 | } | |
1374 | } | |
1375 | ||
1376 | static void xlgmac_config_tso_mode(struct xlgmac_pdata *pdata) | |
1377 | { | |
1378 | struct xlgmac_channel *channel; | |
1379 | unsigned int i; | |
1380 | u32 regval; | |
1381 | ||
1382 | channel = pdata->channel_head; | |
1383 | for (i = 0; i < pdata->channel_count; i++, channel++) { | |
1384 | if (!channel->tx_ring) | |
1385 | break; | |
1386 | ||
1387 | if (pdata->hw_feat.tso) { | |
1388 | regval = readl(XLGMAC_DMA_REG(channel, DMA_CH_TCR)); | |
1389 | regval = XLGMAC_SET_REG_BITS(regval, DMA_CH_TCR_TSE_POS, | |
1390 | DMA_CH_TCR_TSE_LEN, 1); | |
1391 | writel(regval, XLGMAC_DMA_REG(channel, DMA_CH_TCR)); | |
1392 | } | |
1393 | } | |
1394 | } | |
1395 | ||
1396 | static void xlgmac_config_sph_mode(struct xlgmac_pdata *pdata) | |
1397 | { | |
1398 | struct xlgmac_channel *channel; | |
1399 | unsigned int i; | |
1400 | u32 regval; | |
1401 | ||
1402 | channel = pdata->channel_head; | |
1403 | for (i = 0; i < pdata->channel_count; i++, channel++) { | |
1404 | if (!channel->rx_ring) | |
1405 | break; | |
1406 | ||
1407 | regval = readl(XLGMAC_DMA_REG(channel, DMA_CH_CR)); | |
1408 | regval = XLGMAC_SET_REG_BITS(regval, DMA_CH_CR_SPH_POS, | |
1409 | DMA_CH_CR_SPH_LEN, 1); | |
1410 | writel(regval, XLGMAC_DMA_REG(channel, DMA_CH_CR)); | |
1411 | } | |
1412 | ||
1413 | regval = readl(pdata->mac_regs + MAC_RCR); | |
1414 | regval = XLGMAC_SET_REG_BITS(regval, MAC_RCR_HDSMS_POS, | |
1415 | MAC_RCR_HDSMS_LEN, | |
1416 | XLGMAC_SPH_HDSMS_SIZE); | |
1417 | writel(regval, pdata->mac_regs + MAC_RCR); | |
1418 | } | |
1419 | ||
1420 | static unsigned int xlgmac_usec_to_riwt(struct xlgmac_pdata *pdata, | |
1421 | unsigned int usec) | |
1422 | { | |
1423 | unsigned long rate; | |
1424 | unsigned int ret; | |
1425 | ||
1426 | rate = pdata->sysclk_rate; | |
1427 | ||
1428 | /* Convert the input usec value to the watchdog timer value. Each | |
1429 | * watchdog timer value is equivalent to 256 clock cycles. | |
1430 | * Calculate the required value as: | |
1431 | * ( usec * ( system_clock_mhz / 10^6 ) / 256 | |
1432 | */ | |
1433 | ret = (usec * (rate / 1000000)) / 256; | |
1434 | ||
1435 | return ret; | |
1436 | } | |
1437 | ||
1438 | static unsigned int xlgmac_riwt_to_usec(struct xlgmac_pdata *pdata, | |
1439 | unsigned int riwt) | |
1440 | { | |
1441 | unsigned long rate; | |
1442 | unsigned int ret; | |
1443 | ||
1444 | rate = pdata->sysclk_rate; | |
1445 | ||
1446 | /* Convert the input watchdog timer value to the usec value. Each | |
1447 | * watchdog timer value is equivalent to 256 clock cycles. | |
1448 | * Calculate the required value as: | |
1449 | * ( riwt * 256 ) / ( system_clock_mhz / 10^6 ) | |
1450 | */ | |
1451 | ret = (riwt * 256) / (rate / 1000000); | |
1452 | ||
1453 | return ret; | |
1454 | } | |
1455 | ||
1456 | static int xlgmac_config_rx_threshold(struct xlgmac_pdata *pdata, | |
1457 | unsigned int val) | |
1458 | { | |
1459 | unsigned int i; | |
1460 | u32 regval; | |
1461 | ||
1462 | for (i = 0; i < pdata->rx_q_count; i++) { | |
1463 | regval = readl(XLGMAC_MTL_REG(pdata, i, MTL_Q_RQOMR)); | |
1464 | regval = XLGMAC_SET_REG_BITS(regval, MTL_Q_RQOMR_RTC_POS, | |
1465 | MTL_Q_RQOMR_RTC_LEN, val); | |
1466 | writel(regval, XLGMAC_MTL_REG(pdata, i, MTL_Q_RQOMR)); | |
1467 | } | |
1468 | ||
1469 | return 0; | |
1470 | } | |
1471 | ||
1472 | static void xlgmac_config_mtl_mode(struct xlgmac_pdata *pdata) | |
1473 | { | |
1474 | unsigned int i; | |
1475 | u32 regval; | |
1476 | ||
1477 | /* Set Tx to weighted round robin scheduling algorithm */ | |
1478 | regval = readl(pdata->mac_regs + MTL_OMR); | |
1479 | regval = XLGMAC_SET_REG_BITS(regval, MTL_OMR_ETSALG_POS, | |
1480 | MTL_OMR_ETSALG_LEN, MTL_ETSALG_WRR); | |
1481 | writel(regval, pdata->mac_regs + MTL_OMR); | |
1482 | ||
1483 | /* Set Tx traffic classes to use WRR algorithm with equal weights */ | |
1484 | for (i = 0; i < pdata->hw_feat.tc_cnt; i++) { | |
1485 | regval = readl(XLGMAC_MTL_REG(pdata, i, MTL_TC_ETSCR)); | |
1486 | regval = XLGMAC_SET_REG_BITS(regval, MTL_TC_ETSCR_TSA_POS, | |
1487 | MTL_TC_ETSCR_TSA_LEN, MTL_TSA_ETS); | |
1488 | writel(regval, XLGMAC_MTL_REG(pdata, i, MTL_TC_ETSCR)); | |
1489 | ||
1490 | regval = readl(XLGMAC_MTL_REG(pdata, i, MTL_TC_QWR)); | |
1491 | regval = XLGMAC_SET_REG_BITS(regval, MTL_TC_QWR_QW_POS, | |
1492 | MTL_TC_QWR_QW_LEN, 1); | |
1493 | writel(regval, XLGMAC_MTL_REG(pdata, i, MTL_TC_QWR)); | |
1494 | } | |
1495 | ||
1496 | /* Set Rx to strict priority algorithm */ | |
1497 | regval = readl(pdata->mac_regs + MTL_OMR); | |
1498 | regval = XLGMAC_SET_REG_BITS(regval, MTL_OMR_RAA_POS, | |
1499 | MTL_OMR_RAA_LEN, MTL_RAA_SP); | |
1500 | writel(regval, pdata->mac_regs + MTL_OMR); | |
1501 | } | |
1502 | ||
1503 | static void xlgmac_config_queue_mapping(struct xlgmac_pdata *pdata) | |
1504 | { | |
1505 | unsigned int ppq, ppq_extra, prio, prio_queues; | |
1506 | unsigned int qptc, qptc_extra, queue; | |
1507 | unsigned int reg, regval; | |
1508 | unsigned int mask; | |
1509 | unsigned int i, j; | |
1510 | ||
1511 | /* Map the MTL Tx Queues to Traffic Classes | |
1512 | * Note: Tx Queues >= Traffic Classes | |
1513 | */ | |
1514 | qptc = pdata->tx_q_count / pdata->hw_feat.tc_cnt; | |
1515 | qptc_extra = pdata->tx_q_count % pdata->hw_feat.tc_cnt; | |
1516 | ||
1517 | for (i = 0, queue = 0; i < pdata->hw_feat.tc_cnt; i++) { | |
1518 | for (j = 0; j < qptc; j++) { | |
1519 | netif_dbg(pdata, drv, pdata->netdev, | |
1520 | "TXq%u mapped to TC%u\n", queue, i); | |
1521 | regval = readl(XLGMAC_MTL_REG(pdata, queue, | |
1522 | MTL_Q_TQOMR)); | |
1523 | regval = XLGMAC_SET_REG_BITS(regval, | |
1524 | MTL_Q_TQOMR_Q2TCMAP_POS, | |
1525 | MTL_Q_TQOMR_Q2TCMAP_LEN, | |
1526 | i); | |
1527 | writel(regval, XLGMAC_MTL_REG(pdata, queue, | |
1528 | MTL_Q_TQOMR)); | |
1529 | queue++; | |
1530 | } | |
1531 | ||
1532 | if (i < qptc_extra) { | |
1533 | netif_dbg(pdata, drv, pdata->netdev, | |
1534 | "TXq%u mapped to TC%u\n", queue, i); | |
1535 | regval = readl(XLGMAC_MTL_REG(pdata, queue, | |
1536 | MTL_Q_TQOMR)); | |
1537 | regval = XLGMAC_SET_REG_BITS(regval, | |
1538 | MTL_Q_TQOMR_Q2TCMAP_POS, | |
1539 | MTL_Q_TQOMR_Q2TCMAP_LEN, | |
1540 | i); | |
1541 | writel(regval, XLGMAC_MTL_REG(pdata, queue, | |
1542 | MTL_Q_TQOMR)); | |
1543 | queue++; | |
1544 | } | |
1545 | } | |
1546 | ||
1547 | /* Map the 8 VLAN priority values to available MTL Rx queues */ | |
1548 | prio_queues = min_t(unsigned int, IEEE_8021QAZ_MAX_TCS, | |
1549 | pdata->rx_q_count); | |
1550 | ppq = IEEE_8021QAZ_MAX_TCS / prio_queues; | |
1551 | ppq_extra = IEEE_8021QAZ_MAX_TCS % prio_queues; | |
1552 | ||
1553 | reg = MAC_RQC2R; | |
1554 | regval = 0; | |
1555 | for (i = 0, prio = 0; i < prio_queues;) { | |
1556 | mask = 0; | |
1557 | for (j = 0; j < ppq; j++) { | |
1558 | netif_dbg(pdata, drv, pdata->netdev, | |
1559 | "PRIO%u mapped to RXq%u\n", prio, i); | |
1560 | mask |= (1 << prio); | |
1561 | prio++; | |
1562 | } | |
1563 | ||
1564 | if (i < ppq_extra) { | |
1565 | netif_dbg(pdata, drv, pdata->netdev, | |
1566 | "PRIO%u mapped to RXq%u\n", prio, i); | |
1567 | mask |= (1 << prio); | |
1568 | prio++; | |
1569 | } | |
1570 | ||
1571 | regval |= (mask << ((i++ % MAC_RQC2_Q_PER_REG) << 3)); | |
1572 | ||
1573 | if ((i % MAC_RQC2_Q_PER_REG) && (i != prio_queues)) | |
1574 | continue; | |
1575 | ||
1576 | writel(regval, pdata->mac_regs + reg); | |
1577 | reg += MAC_RQC2_INC; | |
1578 | regval = 0; | |
1579 | } | |
1580 | ||
1581 | /* Configure one to one, MTL Rx queue to DMA Rx channel mapping | |
1582 | * ie Q0 <--> CH0, Q1 <--> CH1 ... Q11 <--> CH11 | |
1583 | */ | |
1584 | reg = MTL_RQDCM0R; | |
1585 | regval = readl(pdata->mac_regs + reg); | |
1586 | regval |= (MTL_RQDCM0R_Q0MDMACH | MTL_RQDCM0R_Q1MDMACH | | |
1587 | MTL_RQDCM0R_Q2MDMACH | MTL_RQDCM0R_Q3MDMACH); | |
1588 | writel(regval, pdata->mac_regs + reg); | |
1589 | ||
1590 | reg += MTL_RQDCM_INC; | |
1591 | regval = readl(pdata->mac_regs + reg); | |
1592 | regval |= (MTL_RQDCM1R_Q4MDMACH | MTL_RQDCM1R_Q5MDMACH | | |
1593 | MTL_RQDCM1R_Q6MDMACH | MTL_RQDCM1R_Q7MDMACH); | |
1594 | writel(regval, pdata->mac_regs + reg); | |
1595 | ||
1596 | reg += MTL_RQDCM_INC; | |
1597 | regval = readl(pdata->mac_regs + reg); | |
1598 | regval |= (MTL_RQDCM2R_Q8MDMACH | MTL_RQDCM2R_Q9MDMACH | | |
1599 | MTL_RQDCM2R_Q10MDMACH | MTL_RQDCM2R_Q11MDMACH); | |
1600 | writel(regval, pdata->mac_regs + reg); | |
1601 | } | |
1602 | ||
1603 | static unsigned int xlgmac_calculate_per_queue_fifo( | |
1604 | unsigned int fifo_size, | |
1605 | unsigned int queue_count) | |
1606 | { | |
1607 | unsigned int q_fifo_size; | |
1608 | unsigned int p_fifo; | |
1609 | ||
1610 | /* Calculate the configured fifo size */ | |
1611 | q_fifo_size = 1 << (fifo_size + 7); | |
1612 | ||
1613 | /* The configured value may not be the actual amount of fifo RAM */ | |
1614 | q_fifo_size = min_t(unsigned int, XLGMAC_MAX_FIFO, q_fifo_size); | |
1615 | ||
1616 | q_fifo_size = q_fifo_size / queue_count; | |
1617 | ||
1618 | /* Each increment in the queue fifo size represents 256 bytes of | |
1619 | * fifo, with 0 representing 256 bytes. Distribute the fifo equally | |
1620 | * between the queues. | |
1621 | */ | |
1622 | p_fifo = q_fifo_size / 256; | |
1623 | if (p_fifo) | |
1624 | p_fifo--; | |
1625 | ||
1626 | return p_fifo; | |
1627 | } | |
1628 | ||
1629 | static void xlgmac_config_tx_fifo_size(struct xlgmac_pdata *pdata) | |
1630 | { | |
1631 | unsigned int fifo_size; | |
1632 | unsigned int i; | |
1633 | u32 regval; | |
1634 | ||
1635 | fifo_size = xlgmac_calculate_per_queue_fifo( | |
1636 | pdata->hw_feat.tx_fifo_size, | |
1637 | pdata->tx_q_count); | |
1638 | ||
1639 | for (i = 0; i < pdata->tx_q_count; i++) { | |
1640 | regval = readl(XLGMAC_MTL_REG(pdata, i, MTL_Q_TQOMR)); | |
1641 | regval = XLGMAC_SET_REG_BITS(regval, MTL_Q_TQOMR_TQS_POS, | |
1642 | MTL_Q_TQOMR_TQS_LEN, fifo_size); | |
1643 | writel(regval, XLGMAC_MTL_REG(pdata, i, MTL_Q_TQOMR)); | |
1644 | } | |
1645 | ||
1646 | netif_info(pdata, drv, pdata->netdev, | |
1647 | "%d Tx hardware queues, %d byte fifo per queue\n", | |
1648 | pdata->tx_q_count, ((fifo_size + 1) * 256)); | |
1649 | } | |
1650 | ||
1651 | static void xlgmac_config_rx_fifo_size(struct xlgmac_pdata *pdata) | |
1652 | { | |
1653 | unsigned int fifo_size; | |
1654 | unsigned int i; | |
1655 | u32 regval; | |
1656 | ||
1657 | fifo_size = xlgmac_calculate_per_queue_fifo( | |
1658 | pdata->hw_feat.rx_fifo_size, | |
1659 | pdata->rx_q_count); | |
1660 | ||
1661 | for (i = 0; i < pdata->rx_q_count; i++) { | |
1662 | regval = readl(XLGMAC_MTL_REG(pdata, i, MTL_Q_RQOMR)); | |
1663 | regval = XLGMAC_SET_REG_BITS(regval, MTL_Q_RQOMR_RQS_POS, | |
1664 | MTL_Q_RQOMR_RQS_LEN, fifo_size); | |
1665 | writel(regval, XLGMAC_MTL_REG(pdata, i, MTL_Q_RQOMR)); | |
1666 | } | |
1667 | ||
1668 | netif_info(pdata, drv, pdata->netdev, | |
1669 | "%d Rx hardware queues, %d byte fifo per queue\n", | |
1670 | pdata->rx_q_count, ((fifo_size + 1) * 256)); | |
1671 | } | |
1672 | ||
1673 | static void xlgmac_config_flow_control_threshold(struct xlgmac_pdata *pdata) | |
1674 | { | |
1675 | unsigned int i; | |
1676 | u32 regval; | |
1677 | ||
1678 | for (i = 0; i < pdata->rx_q_count; i++) { | |
1679 | regval = readl(XLGMAC_MTL_REG(pdata, i, MTL_Q_RQFCR)); | |
1680 | /* Activate flow control when less than 4k left in fifo */ | |
1681 | regval = XLGMAC_SET_REG_BITS(regval, MTL_Q_RQFCR_RFA_POS, | |
1682 | MTL_Q_RQFCR_RFA_LEN, 2); | |
1683 | /* De-activate flow control when more than 6k left in fifo */ | |
1684 | regval = XLGMAC_SET_REG_BITS(regval, MTL_Q_RQFCR_RFD_POS, | |
1685 | MTL_Q_RQFCR_RFD_LEN, 4); | |
1686 | writel(regval, XLGMAC_MTL_REG(pdata, i, MTL_Q_RQFCR)); | |
1687 | } | |
1688 | } | |
1689 | ||
1690 | static int xlgmac_config_tx_threshold(struct xlgmac_pdata *pdata, | |
1691 | unsigned int val) | |
1692 | { | |
1693 | unsigned int i; | |
1694 | u32 regval; | |
1695 | ||
1696 | for (i = 0; i < pdata->tx_q_count; i++) { | |
1697 | regval = readl(XLGMAC_MTL_REG(pdata, i, MTL_Q_TQOMR)); | |
1698 | regval = XLGMAC_SET_REG_BITS(regval, MTL_Q_TQOMR_TTC_POS, | |
1699 | MTL_Q_TQOMR_TTC_LEN, val); | |
1700 | writel(regval, XLGMAC_MTL_REG(pdata, i, MTL_Q_TQOMR)); | |
1701 | } | |
1702 | ||
1703 | return 0; | |
1704 | } | |
1705 | ||
1706 | static int xlgmac_config_rsf_mode(struct xlgmac_pdata *pdata, | |
1707 | unsigned int val) | |
1708 | { | |
1709 | unsigned int i; | |
1710 | u32 regval; | |
1711 | ||
1712 | for (i = 0; i < pdata->rx_q_count; i++) { | |
1713 | regval = readl(XLGMAC_MTL_REG(pdata, i, MTL_Q_RQOMR)); | |
1714 | regval = XLGMAC_SET_REG_BITS(regval, MTL_Q_RQOMR_RSF_POS, | |
1715 | MTL_Q_RQOMR_RSF_LEN, val); | |
1716 | writel(regval, XLGMAC_MTL_REG(pdata, i, MTL_Q_RQOMR)); | |
1717 | } | |
1718 | ||
1719 | return 0; | |
1720 | } | |
1721 | ||
1722 | static int xlgmac_config_tsf_mode(struct xlgmac_pdata *pdata, | |
1723 | unsigned int val) | |
1724 | { | |
1725 | unsigned int i; | |
1726 | u32 regval; | |
1727 | ||
1728 | for (i = 0; i < pdata->tx_q_count; i++) { | |
1729 | regval = readl(XLGMAC_MTL_REG(pdata, i, MTL_Q_TQOMR)); | |
1730 | regval = XLGMAC_SET_REG_BITS(regval, MTL_Q_TQOMR_TSF_POS, | |
1731 | MTL_Q_TQOMR_TSF_LEN, val); | |
1732 | writel(regval, XLGMAC_MTL_REG(pdata, i, MTL_Q_TQOMR)); | |
1733 | } | |
1734 | ||
1735 | return 0; | |
1736 | } | |
1737 | ||
1738 | static int xlgmac_config_osp_mode(struct xlgmac_pdata *pdata) | |
1739 | { | |
1740 | struct xlgmac_channel *channel; | |
1741 | unsigned int i; | |
1742 | u32 regval; | |
1743 | ||
1744 | channel = pdata->channel_head; | |
1745 | for (i = 0; i < pdata->channel_count; i++, channel++) { | |
1746 | if (!channel->tx_ring) | |
1747 | break; | |
1748 | ||
1749 | regval = readl(XLGMAC_DMA_REG(channel, DMA_CH_TCR)); | |
1750 | regval = XLGMAC_SET_REG_BITS(regval, DMA_CH_TCR_OSP_POS, | |
1751 | DMA_CH_TCR_OSP_LEN, | |
1752 | pdata->tx_osp_mode); | |
1753 | writel(regval, XLGMAC_DMA_REG(channel, DMA_CH_TCR)); | |
1754 | } | |
1755 | ||
1756 | return 0; | |
1757 | } | |
1758 | ||
1759 | static int xlgmac_config_pblx8(struct xlgmac_pdata *pdata) | |
1760 | { | |
1761 | struct xlgmac_channel *channel; | |
1762 | unsigned int i; | |
1763 | u32 regval; | |
1764 | ||
1765 | channel = pdata->channel_head; | |
1766 | for (i = 0; i < pdata->channel_count; i++, channel++) { | |
1767 | regval = readl(XLGMAC_DMA_REG(channel, DMA_CH_CR)); | |
1768 | regval = XLGMAC_SET_REG_BITS(regval, DMA_CH_CR_PBLX8_POS, | |
1769 | DMA_CH_CR_PBLX8_LEN, | |
1770 | pdata->pblx8); | |
1771 | writel(regval, XLGMAC_DMA_REG(channel, DMA_CH_CR)); | |
1772 | } | |
1773 | ||
1774 | return 0; | |
1775 | } | |
1776 | ||
1777 | static int xlgmac_get_tx_pbl_val(struct xlgmac_pdata *pdata) | |
1778 | { | |
1779 | u32 regval; | |
1780 | ||
1781 | regval = readl(XLGMAC_DMA_REG(pdata->channel_head, DMA_CH_TCR)); | |
1782 | regval = XLGMAC_GET_REG_BITS(regval, DMA_CH_TCR_PBL_POS, | |
1783 | DMA_CH_TCR_PBL_LEN); | |
1784 | return regval; | |
1785 | } | |
1786 | ||
1787 | static int xlgmac_config_tx_pbl_val(struct xlgmac_pdata *pdata) | |
1788 | { | |
1789 | struct xlgmac_channel *channel; | |
1790 | unsigned int i; | |
1791 | u32 regval; | |
1792 | ||
1793 | channel = pdata->channel_head; | |
1794 | for (i = 0; i < pdata->channel_count; i++, channel++) { | |
1795 | if (!channel->tx_ring) | |
1796 | break; | |
1797 | ||
1798 | regval = readl(XLGMAC_DMA_REG(channel, DMA_CH_TCR)); | |
1799 | regval = XLGMAC_SET_REG_BITS(regval, DMA_CH_TCR_PBL_POS, | |
1800 | DMA_CH_TCR_PBL_LEN, | |
1801 | pdata->tx_pbl); | |
1802 | writel(regval, XLGMAC_DMA_REG(channel, DMA_CH_TCR)); | |
1803 | } | |
1804 | ||
1805 | return 0; | |
1806 | } | |
1807 | ||
1808 | static int xlgmac_get_rx_pbl_val(struct xlgmac_pdata *pdata) | |
1809 | { | |
1810 | u32 regval; | |
1811 | ||
1812 | regval = readl(XLGMAC_DMA_REG(pdata->channel_head, DMA_CH_RCR)); | |
1813 | regval = XLGMAC_GET_REG_BITS(regval, DMA_CH_RCR_PBL_POS, | |
1814 | DMA_CH_RCR_PBL_LEN); | |
1815 | return regval; | |
1816 | } | |
1817 | ||
1818 | static int xlgmac_config_rx_pbl_val(struct xlgmac_pdata *pdata) | |
1819 | { | |
1820 | struct xlgmac_channel *channel; | |
1821 | unsigned int i; | |
1822 | u32 regval; | |
1823 | ||
1824 | channel = pdata->channel_head; | |
1825 | for (i = 0; i < pdata->channel_count; i++, channel++) { | |
1826 | if (!channel->rx_ring) | |
1827 | break; | |
1828 | ||
1829 | regval = readl(XLGMAC_DMA_REG(channel, DMA_CH_RCR)); | |
1830 | regval = XLGMAC_SET_REG_BITS(regval, DMA_CH_RCR_PBL_POS, | |
1831 | DMA_CH_RCR_PBL_LEN, | |
1832 | pdata->rx_pbl); | |
1833 | writel(regval, XLGMAC_DMA_REG(channel, DMA_CH_RCR)); | |
1834 | } | |
1835 | ||
1836 | return 0; | |
1837 | } | |
1838 | ||
1839 | static u64 xlgmac_mmc_read(struct xlgmac_pdata *pdata, unsigned int reg_lo) | |
1840 | { | |
1841 | bool read_hi; | |
1842 | u64 val; | |
1843 | ||
1844 | switch (reg_lo) { | |
1845 | /* These registers are always 64 bit */ | |
1846 | case MMC_TXOCTETCOUNT_GB_LO: | |
1847 | case MMC_TXOCTETCOUNT_G_LO: | |
1848 | case MMC_RXOCTETCOUNT_GB_LO: | |
1849 | case MMC_RXOCTETCOUNT_G_LO: | |
1850 | read_hi = true; | |
1851 | break; | |
1852 | ||
1853 | default: | |
1854 | read_hi = false; | |
1855 | } | |
1856 | ||
1857 | val = (u64)readl(pdata->mac_regs + reg_lo); | |
1858 | ||
1859 | if (read_hi) | |
1860 | val |= ((u64)readl(pdata->mac_regs + reg_lo + 4) << 32); | |
1861 | ||
1862 | return val; | |
1863 | } | |
1864 | ||
1865 | static void xlgmac_tx_mmc_int(struct xlgmac_pdata *pdata) | |
1866 | { | |
1867 | unsigned int mmc_isr = readl(pdata->mac_regs + MMC_TISR); | |
1868 | struct xlgmac_stats *stats = &pdata->stats; | |
1869 | ||
1870 | if (XLGMAC_GET_REG_BITS(mmc_isr, | |
1871 | MMC_TISR_TXOCTETCOUNT_GB_POS, | |
1872 | MMC_TISR_TXOCTETCOUNT_GB_LEN)) | |
1873 | stats->txoctetcount_gb += | |
1874 | xlgmac_mmc_read(pdata, MMC_TXOCTETCOUNT_GB_LO); | |
1875 | ||
1876 | if (XLGMAC_GET_REG_BITS(mmc_isr, | |
1877 | MMC_TISR_TXFRAMECOUNT_GB_POS, | |
1878 | MMC_TISR_TXFRAMECOUNT_GB_LEN)) | |
1879 | stats->txframecount_gb += | |
1880 | xlgmac_mmc_read(pdata, MMC_TXFRAMECOUNT_GB_LO); | |
1881 | ||
1882 | if (XLGMAC_GET_REG_BITS(mmc_isr, | |
1883 | MMC_TISR_TXBROADCASTFRAMES_G_POS, | |
1884 | MMC_TISR_TXBROADCASTFRAMES_G_LEN)) | |
1885 | stats->txbroadcastframes_g += | |
1886 | xlgmac_mmc_read(pdata, MMC_TXBROADCASTFRAMES_G_LO); | |
1887 | ||
1888 | if (XLGMAC_GET_REG_BITS(mmc_isr, | |
1889 | MMC_TISR_TXMULTICASTFRAMES_G_POS, | |
1890 | MMC_TISR_TXMULTICASTFRAMES_G_LEN)) | |
1891 | stats->txmulticastframes_g += | |
1892 | xlgmac_mmc_read(pdata, MMC_TXMULTICASTFRAMES_G_LO); | |
1893 | ||
1894 | if (XLGMAC_GET_REG_BITS(mmc_isr, | |
1895 | MMC_TISR_TX64OCTETS_GB_POS, | |
1896 | MMC_TISR_TX64OCTETS_GB_LEN)) | |
1897 | stats->tx64octets_gb += | |
1898 | xlgmac_mmc_read(pdata, MMC_TX64OCTETS_GB_LO); | |
1899 | ||
1900 | if (XLGMAC_GET_REG_BITS(mmc_isr, | |
1901 | MMC_TISR_TX65TO127OCTETS_GB_POS, | |
1902 | MMC_TISR_TX65TO127OCTETS_GB_LEN)) | |
1903 | stats->tx65to127octets_gb += | |
1904 | xlgmac_mmc_read(pdata, MMC_TX65TO127OCTETS_GB_LO); | |
1905 | ||
1906 | if (XLGMAC_GET_REG_BITS(mmc_isr, | |
1907 | MMC_TISR_TX128TO255OCTETS_GB_POS, | |
1908 | MMC_TISR_TX128TO255OCTETS_GB_LEN)) | |
1909 | stats->tx128to255octets_gb += | |
1910 | xlgmac_mmc_read(pdata, MMC_TX128TO255OCTETS_GB_LO); | |
1911 | ||
1912 | if (XLGMAC_GET_REG_BITS(mmc_isr, | |
1913 | MMC_TISR_TX256TO511OCTETS_GB_POS, | |
1914 | MMC_TISR_TX256TO511OCTETS_GB_LEN)) | |
1915 | stats->tx256to511octets_gb += | |
1916 | xlgmac_mmc_read(pdata, MMC_TX256TO511OCTETS_GB_LO); | |
1917 | ||
1918 | if (XLGMAC_GET_REG_BITS(mmc_isr, | |
1919 | MMC_TISR_TX512TO1023OCTETS_GB_POS, | |
1920 | MMC_TISR_TX512TO1023OCTETS_GB_LEN)) | |
1921 | stats->tx512to1023octets_gb += | |
1922 | xlgmac_mmc_read(pdata, MMC_TX512TO1023OCTETS_GB_LO); | |
1923 | ||
1924 | if (XLGMAC_GET_REG_BITS(mmc_isr, | |
1925 | MMC_TISR_TX1024TOMAXOCTETS_GB_POS, | |
1926 | MMC_TISR_TX1024TOMAXOCTETS_GB_LEN)) | |
1927 | stats->tx1024tomaxoctets_gb += | |
1928 | xlgmac_mmc_read(pdata, MMC_TX1024TOMAXOCTETS_GB_LO); | |
1929 | ||
1930 | if (XLGMAC_GET_REG_BITS(mmc_isr, | |
1931 | MMC_TISR_TXUNICASTFRAMES_GB_POS, | |
1932 | MMC_TISR_TXUNICASTFRAMES_GB_LEN)) | |
1933 | stats->txunicastframes_gb += | |
1934 | xlgmac_mmc_read(pdata, MMC_TXUNICASTFRAMES_GB_LO); | |
1935 | ||
1936 | if (XLGMAC_GET_REG_BITS(mmc_isr, | |
1937 | MMC_TISR_TXMULTICASTFRAMES_GB_POS, | |
1938 | MMC_TISR_TXMULTICASTFRAMES_GB_LEN)) | |
1939 | stats->txmulticastframes_gb += | |
1940 | xlgmac_mmc_read(pdata, MMC_TXMULTICASTFRAMES_GB_LO); | |
1941 | ||
1942 | if (XLGMAC_GET_REG_BITS(mmc_isr, | |
1943 | MMC_TISR_TXBROADCASTFRAMES_GB_POS, | |
1944 | MMC_TISR_TXBROADCASTFRAMES_GB_LEN)) | |
1945 | stats->txbroadcastframes_g += | |
1946 | xlgmac_mmc_read(pdata, MMC_TXBROADCASTFRAMES_GB_LO); | |
1947 | ||
1948 | if (XLGMAC_GET_REG_BITS(mmc_isr, | |
1949 | MMC_TISR_TXUNDERFLOWERROR_POS, | |
1950 | MMC_TISR_TXUNDERFLOWERROR_LEN)) | |
1951 | stats->txunderflowerror += | |
1952 | xlgmac_mmc_read(pdata, MMC_TXUNDERFLOWERROR_LO); | |
1953 | ||
1954 | if (XLGMAC_GET_REG_BITS(mmc_isr, | |
1955 | MMC_TISR_TXOCTETCOUNT_G_POS, | |
1956 | MMC_TISR_TXOCTETCOUNT_G_LEN)) | |
1957 | stats->txoctetcount_g += | |
1958 | xlgmac_mmc_read(pdata, MMC_TXOCTETCOUNT_G_LO); | |
1959 | ||
1960 | if (XLGMAC_GET_REG_BITS(mmc_isr, | |
1961 | MMC_TISR_TXFRAMECOUNT_G_POS, | |
1962 | MMC_TISR_TXFRAMECOUNT_G_LEN)) | |
1963 | stats->txframecount_g += | |
1964 | xlgmac_mmc_read(pdata, MMC_TXFRAMECOUNT_G_LO); | |
1965 | ||
1966 | if (XLGMAC_GET_REG_BITS(mmc_isr, | |
1967 | MMC_TISR_TXPAUSEFRAMES_POS, | |
1968 | MMC_TISR_TXPAUSEFRAMES_LEN)) | |
1969 | stats->txpauseframes += | |
1970 | xlgmac_mmc_read(pdata, MMC_TXPAUSEFRAMES_LO); | |
1971 | ||
1972 | if (XLGMAC_GET_REG_BITS(mmc_isr, | |
1973 | MMC_TISR_TXVLANFRAMES_G_POS, | |
1974 | MMC_TISR_TXVLANFRAMES_G_LEN)) | |
1975 | stats->txvlanframes_g += | |
1976 | xlgmac_mmc_read(pdata, MMC_TXVLANFRAMES_G_LO); | |
1977 | } | |
1978 | ||
1979 | static void xlgmac_rx_mmc_int(struct xlgmac_pdata *pdata) | |
1980 | { | |
1981 | unsigned int mmc_isr = readl(pdata->mac_regs + MMC_RISR); | |
1982 | struct xlgmac_stats *stats = &pdata->stats; | |
1983 | ||
1984 | if (XLGMAC_GET_REG_BITS(mmc_isr, | |
1985 | MMC_RISR_RXFRAMECOUNT_GB_POS, | |
1986 | MMC_RISR_RXFRAMECOUNT_GB_LEN)) | |
1987 | stats->rxframecount_gb += | |
1988 | xlgmac_mmc_read(pdata, MMC_RXFRAMECOUNT_GB_LO); | |
1989 | ||
1990 | if (XLGMAC_GET_REG_BITS(mmc_isr, | |
1991 | MMC_RISR_RXOCTETCOUNT_GB_POS, | |
1992 | MMC_RISR_RXOCTETCOUNT_GB_LEN)) | |
1993 | stats->rxoctetcount_gb += | |
1994 | xlgmac_mmc_read(pdata, MMC_RXOCTETCOUNT_GB_LO); | |
1995 | ||
1996 | if (XLGMAC_GET_REG_BITS(mmc_isr, | |
1997 | MMC_RISR_RXOCTETCOUNT_G_POS, | |
1998 | MMC_RISR_RXOCTETCOUNT_G_LEN)) | |
1999 | stats->rxoctetcount_g += | |
2000 | xlgmac_mmc_read(pdata, MMC_RXOCTETCOUNT_G_LO); | |
2001 | ||
2002 | if (XLGMAC_GET_REG_BITS(mmc_isr, | |
2003 | MMC_RISR_RXBROADCASTFRAMES_G_POS, | |
2004 | MMC_RISR_RXBROADCASTFRAMES_G_LEN)) | |
2005 | stats->rxbroadcastframes_g += | |
2006 | xlgmac_mmc_read(pdata, MMC_RXBROADCASTFRAMES_G_LO); | |
2007 | ||
2008 | if (XLGMAC_GET_REG_BITS(mmc_isr, | |
2009 | MMC_RISR_RXMULTICASTFRAMES_G_POS, | |
2010 | MMC_RISR_RXMULTICASTFRAMES_G_LEN)) | |
2011 | stats->rxmulticastframes_g += | |
2012 | xlgmac_mmc_read(pdata, MMC_RXMULTICASTFRAMES_G_LO); | |
2013 | ||
2014 | if (XLGMAC_GET_REG_BITS(mmc_isr, | |
2015 | MMC_RISR_RXCRCERROR_POS, | |
2016 | MMC_RISR_RXCRCERROR_LEN)) | |
2017 | stats->rxcrcerror += | |
2018 | xlgmac_mmc_read(pdata, MMC_RXCRCERROR_LO); | |
2019 | ||
2020 | if (XLGMAC_GET_REG_BITS(mmc_isr, | |
2021 | MMC_RISR_RXRUNTERROR_POS, | |
2022 | MMC_RISR_RXRUNTERROR_LEN)) | |
2023 | stats->rxrunterror += | |
2024 | xlgmac_mmc_read(pdata, MMC_RXRUNTERROR); | |
2025 | ||
2026 | if (XLGMAC_GET_REG_BITS(mmc_isr, | |
2027 | MMC_RISR_RXJABBERERROR_POS, | |
2028 | MMC_RISR_RXJABBERERROR_LEN)) | |
2029 | stats->rxjabbererror += | |
2030 | xlgmac_mmc_read(pdata, MMC_RXJABBERERROR); | |
2031 | ||
2032 | if (XLGMAC_GET_REG_BITS(mmc_isr, | |
2033 | MMC_RISR_RXUNDERSIZE_G_POS, | |
2034 | MMC_RISR_RXUNDERSIZE_G_LEN)) | |
2035 | stats->rxundersize_g += | |
2036 | xlgmac_mmc_read(pdata, MMC_RXUNDERSIZE_G); | |
2037 | ||
2038 | if (XLGMAC_GET_REG_BITS(mmc_isr, | |
2039 | MMC_RISR_RXOVERSIZE_G_POS, | |
2040 | MMC_RISR_RXOVERSIZE_G_LEN)) | |
2041 | stats->rxoversize_g += | |
2042 | xlgmac_mmc_read(pdata, MMC_RXOVERSIZE_G); | |
2043 | ||
2044 | if (XLGMAC_GET_REG_BITS(mmc_isr, | |
2045 | MMC_RISR_RX64OCTETS_GB_POS, | |
2046 | MMC_RISR_RX64OCTETS_GB_LEN)) | |
2047 | stats->rx64octets_gb += | |
2048 | xlgmac_mmc_read(pdata, MMC_RX64OCTETS_GB_LO); | |
2049 | ||
2050 | if (XLGMAC_GET_REG_BITS(mmc_isr, | |
2051 | MMC_RISR_RX65TO127OCTETS_GB_POS, | |
2052 | MMC_RISR_RX65TO127OCTETS_GB_LEN)) | |
2053 | stats->rx65to127octets_gb += | |
2054 | xlgmac_mmc_read(pdata, MMC_RX65TO127OCTETS_GB_LO); | |
2055 | ||
2056 | if (XLGMAC_GET_REG_BITS(mmc_isr, | |
2057 | MMC_RISR_RX128TO255OCTETS_GB_POS, | |
2058 | MMC_RISR_RX128TO255OCTETS_GB_LEN)) | |
2059 | stats->rx128to255octets_gb += | |
2060 | xlgmac_mmc_read(pdata, MMC_RX128TO255OCTETS_GB_LO); | |
2061 | ||
2062 | if (XLGMAC_GET_REG_BITS(mmc_isr, | |
2063 | MMC_RISR_RX256TO511OCTETS_GB_POS, | |
2064 | MMC_RISR_RX256TO511OCTETS_GB_LEN)) | |
2065 | stats->rx256to511octets_gb += | |
2066 | xlgmac_mmc_read(pdata, MMC_RX256TO511OCTETS_GB_LO); | |
2067 | ||
2068 | if (XLGMAC_GET_REG_BITS(mmc_isr, | |
2069 | MMC_RISR_RX512TO1023OCTETS_GB_POS, | |
2070 | MMC_RISR_RX512TO1023OCTETS_GB_LEN)) | |
2071 | stats->rx512to1023octets_gb += | |
2072 | xlgmac_mmc_read(pdata, MMC_RX512TO1023OCTETS_GB_LO); | |
2073 | ||
2074 | if (XLGMAC_GET_REG_BITS(mmc_isr, | |
2075 | MMC_RISR_RX1024TOMAXOCTETS_GB_POS, | |
2076 | MMC_RISR_RX1024TOMAXOCTETS_GB_LEN)) | |
2077 | stats->rx1024tomaxoctets_gb += | |
2078 | xlgmac_mmc_read(pdata, MMC_RX1024TOMAXOCTETS_GB_LO); | |
2079 | ||
2080 | if (XLGMAC_GET_REG_BITS(mmc_isr, | |
2081 | MMC_RISR_RXUNICASTFRAMES_G_POS, | |
2082 | MMC_RISR_RXUNICASTFRAMES_G_LEN)) | |
2083 | stats->rxunicastframes_g += | |
2084 | xlgmac_mmc_read(pdata, MMC_RXUNICASTFRAMES_G_LO); | |
2085 | ||
2086 | if (XLGMAC_GET_REG_BITS(mmc_isr, | |
2087 | MMC_RISR_RXLENGTHERROR_POS, | |
2088 | MMC_RISR_RXLENGTHERROR_LEN)) | |
2089 | stats->rxlengtherror += | |
2090 | xlgmac_mmc_read(pdata, MMC_RXLENGTHERROR_LO); | |
2091 | ||
2092 | if (XLGMAC_GET_REG_BITS(mmc_isr, | |
2093 | MMC_RISR_RXOUTOFRANGETYPE_POS, | |
2094 | MMC_RISR_RXOUTOFRANGETYPE_LEN)) | |
2095 | stats->rxoutofrangetype += | |
2096 | xlgmac_mmc_read(pdata, MMC_RXOUTOFRANGETYPE_LO); | |
2097 | ||
2098 | if (XLGMAC_GET_REG_BITS(mmc_isr, | |
2099 | MMC_RISR_RXPAUSEFRAMES_POS, | |
2100 | MMC_RISR_RXPAUSEFRAMES_LEN)) | |
2101 | stats->rxpauseframes += | |
2102 | xlgmac_mmc_read(pdata, MMC_RXPAUSEFRAMES_LO); | |
2103 | ||
2104 | if (XLGMAC_GET_REG_BITS(mmc_isr, | |
2105 | MMC_RISR_RXFIFOOVERFLOW_POS, | |
2106 | MMC_RISR_RXFIFOOVERFLOW_LEN)) | |
2107 | stats->rxfifooverflow += | |
2108 | xlgmac_mmc_read(pdata, MMC_RXFIFOOVERFLOW_LO); | |
2109 | ||
2110 | if (XLGMAC_GET_REG_BITS(mmc_isr, | |
2111 | MMC_RISR_RXVLANFRAMES_GB_POS, | |
2112 | MMC_RISR_RXVLANFRAMES_GB_LEN)) | |
2113 | stats->rxvlanframes_gb += | |
2114 | xlgmac_mmc_read(pdata, MMC_RXVLANFRAMES_GB_LO); | |
2115 | ||
2116 | if (XLGMAC_GET_REG_BITS(mmc_isr, | |
2117 | MMC_RISR_RXWATCHDOGERROR_POS, | |
2118 | MMC_RISR_RXWATCHDOGERROR_LEN)) | |
2119 | stats->rxwatchdogerror += | |
2120 | xlgmac_mmc_read(pdata, MMC_RXWATCHDOGERROR); | |
2121 | } | |
2122 | ||
2123 | static void xlgmac_read_mmc_stats(struct xlgmac_pdata *pdata) | |
2124 | { | |
2125 | struct xlgmac_stats *stats = &pdata->stats; | |
2126 | u32 regval; | |
2127 | ||
2128 | /* Freeze counters */ | |
2129 | regval = readl(pdata->mac_regs + MMC_CR); | |
2130 | regval = XLGMAC_SET_REG_BITS(regval, MMC_CR_MCF_POS, | |
2131 | MMC_CR_MCF_LEN, 1); | |
2132 | writel(regval, pdata->mac_regs + MMC_CR); | |
2133 | ||
2134 | stats->txoctetcount_gb += | |
2135 | xlgmac_mmc_read(pdata, MMC_TXOCTETCOUNT_GB_LO); | |
2136 | ||
2137 | stats->txframecount_gb += | |
2138 | xlgmac_mmc_read(pdata, MMC_TXFRAMECOUNT_GB_LO); | |
2139 | ||
2140 | stats->txbroadcastframes_g += | |
2141 | xlgmac_mmc_read(pdata, MMC_TXBROADCASTFRAMES_G_LO); | |
2142 | ||
2143 | stats->txmulticastframes_g += | |
2144 | xlgmac_mmc_read(pdata, MMC_TXMULTICASTFRAMES_G_LO); | |
2145 | ||
2146 | stats->tx64octets_gb += | |
2147 | xlgmac_mmc_read(pdata, MMC_TX64OCTETS_GB_LO); | |
2148 | ||
2149 | stats->tx65to127octets_gb += | |
2150 | xlgmac_mmc_read(pdata, MMC_TX65TO127OCTETS_GB_LO); | |
2151 | ||
2152 | stats->tx128to255octets_gb += | |
2153 | xlgmac_mmc_read(pdata, MMC_TX128TO255OCTETS_GB_LO); | |
2154 | ||
2155 | stats->tx256to511octets_gb += | |
2156 | xlgmac_mmc_read(pdata, MMC_TX256TO511OCTETS_GB_LO); | |
2157 | ||
2158 | stats->tx512to1023octets_gb += | |
2159 | xlgmac_mmc_read(pdata, MMC_TX512TO1023OCTETS_GB_LO); | |
2160 | ||
2161 | stats->tx1024tomaxoctets_gb += | |
2162 | xlgmac_mmc_read(pdata, MMC_TX1024TOMAXOCTETS_GB_LO); | |
2163 | ||
2164 | stats->txunicastframes_gb += | |
2165 | xlgmac_mmc_read(pdata, MMC_TXUNICASTFRAMES_GB_LO); | |
2166 | ||
2167 | stats->txmulticastframes_gb += | |
2168 | xlgmac_mmc_read(pdata, MMC_TXMULTICASTFRAMES_GB_LO); | |
2169 | ||
2170 | stats->txbroadcastframes_g += | |
2171 | xlgmac_mmc_read(pdata, MMC_TXBROADCASTFRAMES_GB_LO); | |
2172 | ||
2173 | stats->txunderflowerror += | |
2174 | xlgmac_mmc_read(pdata, MMC_TXUNDERFLOWERROR_LO); | |
2175 | ||
2176 | stats->txoctetcount_g += | |
2177 | xlgmac_mmc_read(pdata, MMC_TXOCTETCOUNT_G_LO); | |
2178 | ||
2179 | stats->txframecount_g += | |
2180 | xlgmac_mmc_read(pdata, MMC_TXFRAMECOUNT_G_LO); | |
2181 | ||
2182 | stats->txpauseframes += | |
2183 | xlgmac_mmc_read(pdata, MMC_TXPAUSEFRAMES_LO); | |
2184 | ||
2185 | stats->txvlanframes_g += | |
2186 | xlgmac_mmc_read(pdata, MMC_TXVLANFRAMES_G_LO); | |
2187 | ||
2188 | stats->rxframecount_gb += | |
2189 | xlgmac_mmc_read(pdata, MMC_RXFRAMECOUNT_GB_LO); | |
2190 | ||
2191 | stats->rxoctetcount_gb += | |
2192 | xlgmac_mmc_read(pdata, MMC_RXOCTETCOUNT_GB_LO); | |
2193 | ||
2194 | stats->rxoctetcount_g += | |
2195 | xlgmac_mmc_read(pdata, MMC_RXOCTETCOUNT_G_LO); | |
2196 | ||
2197 | stats->rxbroadcastframes_g += | |
2198 | xlgmac_mmc_read(pdata, MMC_RXBROADCASTFRAMES_G_LO); | |
2199 | ||
2200 | stats->rxmulticastframes_g += | |
2201 | xlgmac_mmc_read(pdata, MMC_RXMULTICASTFRAMES_G_LO); | |
2202 | ||
2203 | stats->rxcrcerror += | |
2204 | xlgmac_mmc_read(pdata, MMC_RXCRCERROR_LO); | |
2205 | ||
2206 | stats->rxrunterror += | |
2207 | xlgmac_mmc_read(pdata, MMC_RXRUNTERROR); | |
2208 | ||
2209 | stats->rxjabbererror += | |
2210 | xlgmac_mmc_read(pdata, MMC_RXJABBERERROR); | |
2211 | ||
2212 | stats->rxundersize_g += | |
2213 | xlgmac_mmc_read(pdata, MMC_RXUNDERSIZE_G); | |
2214 | ||
2215 | stats->rxoversize_g += | |
2216 | xlgmac_mmc_read(pdata, MMC_RXOVERSIZE_G); | |
2217 | ||
2218 | stats->rx64octets_gb += | |
2219 | xlgmac_mmc_read(pdata, MMC_RX64OCTETS_GB_LO); | |
2220 | ||
2221 | stats->rx65to127octets_gb += | |
2222 | xlgmac_mmc_read(pdata, MMC_RX65TO127OCTETS_GB_LO); | |
2223 | ||
2224 | stats->rx128to255octets_gb += | |
2225 | xlgmac_mmc_read(pdata, MMC_RX128TO255OCTETS_GB_LO); | |
2226 | ||
2227 | stats->rx256to511octets_gb += | |
2228 | xlgmac_mmc_read(pdata, MMC_RX256TO511OCTETS_GB_LO); | |
2229 | ||
2230 | stats->rx512to1023octets_gb += | |
2231 | xlgmac_mmc_read(pdata, MMC_RX512TO1023OCTETS_GB_LO); | |
2232 | ||
2233 | stats->rx1024tomaxoctets_gb += | |
2234 | xlgmac_mmc_read(pdata, MMC_RX1024TOMAXOCTETS_GB_LO); | |
2235 | ||
2236 | stats->rxunicastframes_g += | |
2237 | xlgmac_mmc_read(pdata, MMC_RXUNICASTFRAMES_G_LO); | |
2238 | ||
2239 | stats->rxlengtherror += | |
2240 | xlgmac_mmc_read(pdata, MMC_RXLENGTHERROR_LO); | |
2241 | ||
2242 | stats->rxoutofrangetype += | |
2243 | xlgmac_mmc_read(pdata, MMC_RXOUTOFRANGETYPE_LO); | |
2244 | ||
2245 | stats->rxpauseframes += | |
2246 | xlgmac_mmc_read(pdata, MMC_RXPAUSEFRAMES_LO); | |
2247 | ||
2248 | stats->rxfifooverflow += | |
2249 | xlgmac_mmc_read(pdata, MMC_RXFIFOOVERFLOW_LO); | |
2250 | ||
2251 | stats->rxvlanframes_gb += | |
2252 | xlgmac_mmc_read(pdata, MMC_RXVLANFRAMES_GB_LO); | |
2253 | ||
2254 | stats->rxwatchdogerror += | |
2255 | xlgmac_mmc_read(pdata, MMC_RXWATCHDOGERROR); | |
2256 | ||
2257 | /* Un-freeze counters */ | |
2258 | regval = readl(pdata->mac_regs + MMC_CR); | |
2259 | regval = XLGMAC_SET_REG_BITS(regval, MMC_CR_MCF_POS, | |
2260 | MMC_CR_MCF_LEN, 0); | |
2261 | writel(regval, pdata->mac_regs + MMC_CR); | |
2262 | } | |
2263 | ||
2264 | static void xlgmac_config_mmc(struct xlgmac_pdata *pdata) | |
2265 | { | |
2266 | u32 regval; | |
2267 | ||
2268 | regval = readl(pdata->mac_regs + MMC_CR); | |
2269 | /* Set counters to reset on read */ | |
2270 | regval = XLGMAC_SET_REG_BITS(regval, MMC_CR_ROR_POS, | |
2271 | MMC_CR_ROR_LEN, 1); | |
2272 | /* Reset the counters */ | |
2273 | regval = XLGMAC_SET_REG_BITS(regval, MMC_CR_CR_POS, | |
2274 | MMC_CR_CR_LEN, 1); | |
2275 | writel(regval, pdata->mac_regs + MMC_CR); | |
2276 | } | |
2277 | ||
2278 | static int xlgmac_write_rss_reg(struct xlgmac_pdata *pdata, unsigned int type, | |
2279 | unsigned int index, unsigned int val) | |
2280 | { | |
2281 | unsigned int wait; | |
2282 | int ret = 0; | |
2283 | u32 regval; | |
2284 | ||
2285 | mutex_lock(&pdata->rss_mutex); | |
2286 | ||
2287 | regval = XLGMAC_GET_REG_BITS(readl(pdata->mac_regs + MAC_RSSAR), | |
2288 | MAC_RSSAR_OB_POS, MAC_RSSAR_OB_LEN); | |
2289 | if (regval) { | |
2290 | ret = -EBUSY; | |
2291 | goto unlock; | |
2292 | } | |
2293 | ||
2294 | writel(val, pdata->mac_regs + MAC_RSSDR); | |
2295 | ||
2296 | regval = readl(pdata->mac_regs + MAC_RSSAR); | |
2297 | regval = XLGMAC_SET_REG_BITS(regval, MAC_RSSAR_RSSIA_POS, | |
2298 | MAC_RSSAR_RSSIA_LEN, index); | |
2299 | regval = XLGMAC_SET_REG_BITS(regval, MAC_RSSAR_ADDRT_POS, | |
2300 | MAC_RSSAR_ADDRT_LEN, type); | |
2301 | regval = XLGMAC_SET_REG_BITS(regval, MAC_RSSAR_CT_POS, | |
2302 | MAC_RSSAR_CT_LEN, 0); | |
2303 | regval = XLGMAC_SET_REG_BITS(regval, MAC_RSSAR_OB_POS, | |
2304 | MAC_RSSAR_OB_LEN, 1); | |
2305 | writel(regval, pdata->mac_regs + MAC_RSSAR); | |
2306 | ||
2307 | wait = 1000; | |
2308 | while (wait--) { | |
2309 | regval = XLGMAC_GET_REG_BITS(readl(pdata->mac_regs + MAC_RSSAR), | |
2310 | MAC_RSSAR_OB_POS, | |
2311 | MAC_RSSAR_OB_LEN); | |
2312 | if (!regval) | |
2313 | goto unlock; | |
2314 | ||
2315 | usleep_range(1000, 1500); | |
2316 | } | |
2317 | ||
2318 | ret = -EBUSY; | |
2319 | ||
2320 | unlock: | |
2321 | mutex_unlock(&pdata->rss_mutex); | |
2322 | ||
2323 | return ret; | |
2324 | } | |
2325 | ||
2326 | static int xlgmac_write_rss_hash_key(struct xlgmac_pdata *pdata) | |
2327 | { | |
2328 | unsigned int key_regs = sizeof(pdata->rss_key) / sizeof(u32); | |
2329 | unsigned int *key = (unsigned int *)&pdata->rss_key; | |
2330 | int ret; | |
2331 | ||
2332 | while (key_regs--) { | |
2333 | ret = xlgmac_write_rss_reg(pdata, XLGMAC_RSS_HASH_KEY_TYPE, | |
2334 | key_regs, *key++); | |
2335 | if (ret) | |
2336 | return ret; | |
2337 | } | |
2338 | ||
2339 | return 0; | |
2340 | } | |
2341 | ||
2342 | static int xlgmac_write_rss_lookup_table(struct xlgmac_pdata *pdata) | |
2343 | { | |
2344 | unsigned int i; | |
2345 | int ret; | |
2346 | ||
2347 | for (i = 0; i < ARRAY_SIZE(pdata->rss_table); i++) { | |
2348 | ret = xlgmac_write_rss_reg(pdata, | |
2349 | XLGMAC_RSS_LOOKUP_TABLE_TYPE, i, | |
2350 | pdata->rss_table[i]); | |
2351 | if (ret) | |
2352 | return ret; | |
2353 | } | |
2354 | ||
2355 | return 0; | |
2356 | } | |
2357 | ||
2358 | static int xlgmac_set_rss_hash_key(struct xlgmac_pdata *pdata, const u8 *key) | |
2359 | { | |
2360 | memcpy(pdata->rss_key, key, sizeof(pdata->rss_key)); | |
2361 | ||
2362 | return xlgmac_write_rss_hash_key(pdata); | |
2363 | } | |
2364 | ||
2365 | static int xlgmac_set_rss_lookup_table(struct xlgmac_pdata *pdata, | |
2366 | const u32 *table) | |
2367 | { | |
2368 | unsigned int i; | |
2369 | u32 tval; | |
2370 | ||
2371 | for (i = 0; i < ARRAY_SIZE(pdata->rss_table); i++) { | |
2372 | tval = table[i]; | |
2373 | pdata->rss_table[i] = XLGMAC_SET_REG_BITS( | |
2374 | pdata->rss_table[i], | |
2375 | MAC_RSSDR_DMCH_POS, | |
2376 | MAC_RSSDR_DMCH_LEN, | |
2377 | tval); | |
2378 | } | |
2379 | ||
2380 | return xlgmac_write_rss_lookup_table(pdata); | |
2381 | } | |
2382 | ||
2383 | static int xlgmac_enable_rss(struct xlgmac_pdata *pdata) | |
2384 | { | |
2385 | u32 regval; | |
2386 | int ret; | |
2387 | ||
2388 | if (!pdata->hw_feat.rss) | |
2389 | return -EOPNOTSUPP; | |
2390 | ||
2391 | /* Program the hash key */ | |
2392 | ret = xlgmac_write_rss_hash_key(pdata); | |
2393 | if (ret) | |
2394 | return ret; | |
2395 | ||
2396 | /* Program the lookup table */ | |
2397 | ret = xlgmac_write_rss_lookup_table(pdata); | |
2398 | if (ret) | |
2399 | return ret; | |
2400 | ||
2401 | /* Set the RSS options */ | |
2402 | writel(pdata->rss_options, pdata->mac_regs + MAC_RSSCR); | |
2403 | ||
2404 | /* Enable RSS */ | |
2405 | regval = readl(pdata->mac_regs + MAC_RSSCR); | |
2406 | regval = XLGMAC_SET_REG_BITS(regval, MAC_RSSCR_RSSE_POS, | |
2407 | MAC_RSSCR_RSSE_LEN, 1); | |
2408 | writel(regval, pdata->mac_regs + MAC_RSSCR); | |
2409 | ||
2410 | return 0; | |
2411 | } | |
2412 | ||
2413 | static int xlgmac_disable_rss(struct xlgmac_pdata *pdata) | |
2414 | { | |
2415 | u32 regval; | |
2416 | ||
2417 | if (!pdata->hw_feat.rss) | |
2418 | return -EOPNOTSUPP; | |
2419 | ||
2420 | regval = readl(pdata->mac_regs + MAC_RSSCR); | |
2421 | regval = XLGMAC_SET_REG_BITS(regval, MAC_RSSCR_RSSE_POS, | |
2422 | MAC_RSSCR_RSSE_LEN, 0); | |
2423 | writel(regval, pdata->mac_regs + MAC_RSSCR); | |
2424 | ||
2425 | return 0; | |
2426 | } | |
2427 | ||
2428 | static void xlgmac_config_rss(struct xlgmac_pdata *pdata) | |
2429 | { | |
2430 | int ret; | |
2431 | ||
2432 | if (!pdata->hw_feat.rss) | |
2433 | return; | |
2434 | ||
2435 | if (pdata->netdev->features & NETIF_F_RXHASH) | |
2436 | ret = xlgmac_enable_rss(pdata); | |
2437 | else | |
2438 | ret = xlgmac_disable_rss(pdata); | |
2439 | ||
2440 | if (ret) | |
2441 | netdev_err(pdata->netdev, | |
2442 | "error configuring RSS, RSS disabled\n"); | |
2443 | } | |
2444 | ||
2445 | static void xlgmac_enable_dma_interrupts(struct xlgmac_pdata *pdata) | |
2446 | { | |
2447 | unsigned int dma_ch_isr, dma_ch_ier; | |
2448 | struct xlgmac_channel *channel; | |
2449 | unsigned int i; | |
2450 | ||
2451 | channel = pdata->channel_head; | |
2452 | for (i = 0; i < pdata->channel_count; i++, channel++) { | |
2453 | /* Clear all the interrupts which are set */ | |
2454 | dma_ch_isr = readl(XLGMAC_DMA_REG(channel, DMA_CH_SR)); | |
2455 | writel(dma_ch_isr, XLGMAC_DMA_REG(channel, DMA_CH_SR)); | |
2456 | ||
2457 | /* Clear all interrupt enable bits */ | |
2458 | dma_ch_ier = 0; | |
2459 | ||
2460 | /* Enable following interrupts | |
2461 | * NIE - Normal Interrupt Summary Enable | |
2462 | * AIE - Abnormal Interrupt Summary Enable | |
2463 | * FBEE - Fatal Bus Error Enable | |
2464 | */ | |
2465 | dma_ch_ier = XLGMAC_SET_REG_BITS(dma_ch_ier, | |
2466 | DMA_CH_IER_NIE_POS, | |
2467 | DMA_CH_IER_NIE_LEN, 1); | |
2468 | dma_ch_ier = XLGMAC_SET_REG_BITS(dma_ch_ier, | |
2469 | DMA_CH_IER_AIE_POS, | |
2470 | DMA_CH_IER_AIE_LEN, 1); | |
2471 | dma_ch_ier = XLGMAC_SET_REG_BITS(dma_ch_ier, | |
2472 | DMA_CH_IER_FBEE_POS, | |
2473 | DMA_CH_IER_FBEE_LEN, 1); | |
2474 | ||
2475 | if (channel->tx_ring) { | |
2476 | /* Enable the following Tx interrupts | |
2477 | * TIE - Transmit Interrupt Enable (unless using | |
2478 | * per channel interrupts) | |
2479 | */ | |
2480 | if (!pdata->per_channel_irq) | |
2481 | dma_ch_ier = XLGMAC_SET_REG_BITS( | |
2482 | dma_ch_ier, | |
2483 | DMA_CH_IER_TIE_POS, | |
2484 | DMA_CH_IER_TIE_LEN, | |
2485 | 1); | |
2486 | } | |
2487 | if (channel->rx_ring) { | |
2488 | /* Enable following Rx interrupts | |
2489 | * RBUE - Receive Buffer Unavailable Enable | |
2490 | * RIE - Receive Interrupt Enable (unless using | |
2491 | * per channel interrupts) | |
2492 | */ | |
2493 | dma_ch_ier = XLGMAC_SET_REG_BITS( | |
2494 | dma_ch_ier, | |
2495 | DMA_CH_IER_RBUE_POS, | |
2496 | DMA_CH_IER_RBUE_LEN, | |
2497 | 1); | |
2498 | if (!pdata->per_channel_irq) | |
2499 | dma_ch_ier = XLGMAC_SET_REG_BITS( | |
2500 | dma_ch_ier, | |
2501 | DMA_CH_IER_RIE_POS, | |
2502 | DMA_CH_IER_RIE_LEN, | |
2503 | 1); | |
2504 | } | |
2505 | ||
2506 | writel(dma_ch_isr, XLGMAC_DMA_REG(channel, DMA_CH_IER)); | |
2507 | } | |
2508 | } | |
2509 | ||
2510 | static void xlgmac_enable_mtl_interrupts(struct xlgmac_pdata *pdata) | |
2511 | { | |
2512 | unsigned int q_count, i; | |
2513 | unsigned int mtl_q_isr; | |
2514 | ||
2515 | q_count = max(pdata->hw_feat.tx_q_cnt, pdata->hw_feat.rx_q_cnt); | |
2516 | for (i = 0; i < q_count; i++) { | |
2517 | /* Clear all the interrupts which are set */ | |
2518 | mtl_q_isr = readl(XLGMAC_MTL_REG(pdata, i, MTL_Q_ISR)); | |
2519 | writel(mtl_q_isr, XLGMAC_MTL_REG(pdata, i, MTL_Q_ISR)); | |
2520 | ||
2521 | /* No MTL interrupts to be enabled */ | |
2522 | writel(0, XLGMAC_MTL_REG(pdata, i, MTL_Q_IER)); | |
2523 | } | |
2524 | } | |
2525 | ||
2526 | static void xlgmac_enable_mac_interrupts(struct xlgmac_pdata *pdata) | |
2527 | { | |
2528 | unsigned int mac_ier = 0; | |
2529 | u32 regval; | |
2530 | ||
2531 | /* Enable Timestamp interrupt */ | |
2532 | mac_ier = XLGMAC_SET_REG_BITS(mac_ier, MAC_IER_TSIE_POS, | |
2533 | MAC_IER_TSIE_LEN, 1); | |
2534 | ||
2535 | writel(mac_ier, pdata->mac_regs + MAC_IER); | |
2536 | ||
2537 | /* Enable all counter interrupts */ | |
2538 | regval = readl(pdata->mac_regs + MMC_RIER); | |
2539 | regval = XLGMAC_SET_REG_BITS(regval, MMC_RIER_ALL_INTERRUPTS_POS, | |
2540 | MMC_RIER_ALL_INTERRUPTS_LEN, 0xffffffff); | |
2541 | writel(regval, pdata->mac_regs + MMC_RIER); | |
2542 | regval = readl(pdata->mac_regs + MMC_TIER); | |
2543 | regval = XLGMAC_SET_REG_BITS(regval, MMC_TIER_ALL_INTERRUPTS_POS, | |
2544 | MMC_TIER_ALL_INTERRUPTS_LEN, 0xffffffff); | |
2545 | writel(regval, pdata->mac_regs + MMC_TIER); | |
2546 | } | |
2547 | ||
2548 | static int xlgmac_set_xlgmii_25000_speed(struct xlgmac_pdata *pdata) | |
2549 | { | |
2550 | u32 regval; | |
2551 | ||
2552 | regval = XLGMAC_GET_REG_BITS(readl(pdata->mac_regs + MAC_TCR), | |
2553 | MAC_TCR_SS_POS, MAC_TCR_SS_LEN); | |
2554 | if (regval == 0x1) | |
2555 | return 0; | |
2556 | ||
2557 | regval = readl(pdata->mac_regs + MAC_TCR); | |
2558 | regval = XLGMAC_SET_REG_BITS(regval, MAC_TCR_SS_POS, | |
2559 | MAC_TCR_SS_LEN, 0x1); | |
2560 | writel(regval, pdata->mac_regs + MAC_TCR); | |
2561 | ||
2562 | return 0; | |
2563 | } | |
2564 | ||
2565 | static int xlgmac_set_xlgmii_40000_speed(struct xlgmac_pdata *pdata) | |
2566 | { | |
2567 | u32 regval; | |
2568 | ||
2569 | regval = XLGMAC_GET_REG_BITS(readl(pdata->mac_regs + MAC_TCR), | |
2570 | MAC_TCR_SS_POS, MAC_TCR_SS_LEN); | |
2571 | if (regval == 0) | |
2572 | return 0; | |
2573 | ||
2574 | regval = readl(pdata->mac_regs + MAC_TCR); | |
2575 | regval = XLGMAC_SET_REG_BITS(regval, MAC_TCR_SS_POS, | |
2576 | MAC_TCR_SS_LEN, 0); | |
2577 | writel(regval, pdata->mac_regs + MAC_TCR); | |
2578 | ||
2579 | return 0; | |
2580 | } | |
2581 | ||
2582 | static int xlgmac_set_xlgmii_50000_speed(struct xlgmac_pdata *pdata) | |
2583 | { | |
2584 | u32 regval; | |
2585 | ||
2586 | regval = XLGMAC_GET_REG_BITS(readl(pdata->mac_regs + MAC_TCR), | |
2587 | MAC_TCR_SS_POS, MAC_TCR_SS_LEN); | |
2588 | if (regval == 0x2) | |
2589 | return 0; | |
2590 | ||
2591 | regval = readl(pdata->mac_regs + MAC_TCR); | |
2592 | regval = XLGMAC_SET_REG_BITS(regval, MAC_TCR_SS_POS, | |
2593 | MAC_TCR_SS_LEN, 0x2); | |
2594 | writel(regval, pdata->mac_regs + MAC_TCR); | |
2595 | ||
2596 | return 0; | |
2597 | } | |
2598 | ||
2599 | static int xlgmac_set_xlgmii_100000_speed(struct xlgmac_pdata *pdata) | |
2600 | { | |
2601 | u32 regval; | |
2602 | ||
2603 | regval = XLGMAC_GET_REG_BITS(readl(pdata->mac_regs + MAC_TCR), | |
2604 | MAC_TCR_SS_POS, MAC_TCR_SS_LEN); | |
2605 | if (regval == 0x3) | |
2606 | return 0; | |
2607 | ||
2608 | regval = readl(pdata->mac_regs + MAC_TCR); | |
2609 | regval = XLGMAC_SET_REG_BITS(regval, MAC_TCR_SS_POS, | |
2610 | MAC_TCR_SS_LEN, 0x3); | |
2611 | writel(regval, pdata->mac_regs + MAC_TCR); | |
2612 | ||
2613 | return 0; | |
2614 | } | |
2615 | ||
2616 | static void xlgmac_config_mac_speed(struct xlgmac_pdata *pdata) | |
2617 | { | |
2618 | switch (pdata->phy_speed) { | |
2619 | case SPEED_100000: | |
2620 | xlgmac_set_xlgmii_100000_speed(pdata); | |
2621 | break; | |
2622 | ||
2623 | case SPEED_50000: | |
2624 | xlgmac_set_xlgmii_50000_speed(pdata); | |
2625 | break; | |
2626 | ||
2627 | case SPEED_40000: | |
2628 | xlgmac_set_xlgmii_40000_speed(pdata); | |
2629 | break; | |
2630 | ||
2631 | case SPEED_25000: | |
2632 | xlgmac_set_xlgmii_25000_speed(pdata); | |
2633 | break; | |
2634 | } | |
2635 | } | |
2636 | ||
2637 | static int xlgmac_dev_read(struct xlgmac_channel *channel) | |
2638 | { | |
2639 | struct xlgmac_pdata *pdata = channel->pdata; | |
2640 | struct xlgmac_ring *ring = channel->rx_ring; | |
2641 | struct net_device *netdev = pdata->netdev; | |
2642 | struct xlgmac_desc_data *desc_data; | |
2643 | struct xlgmac_dma_desc *dma_desc; | |
2644 | struct xlgmac_pkt_info *pkt_info; | |
2645 | unsigned int err, etlt, l34t; | |
2646 | ||
2647 | desc_data = XLGMAC_GET_DESC_DATA(ring, ring->cur); | |
2648 | dma_desc = desc_data->dma_desc; | |
2649 | pkt_info = &ring->pkt_info; | |
2650 | ||
2651 | /* Check for data availability */ | |
2652 | if (XLGMAC_GET_REG_BITS_LE(dma_desc->desc3, | |
2653 | RX_NORMAL_DESC3_OWN_POS, | |
2654 | RX_NORMAL_DESC3_OWN_LEN)) | |
2655 | return 1; | |
2656 | ||
2657 | /* Make sure descriptor fields are read after reading the OWN bit */ | |
2658 | dma_rmb(); | |
2659 | ||
2660 | if (netif_msg_rx_status(pdata)) | |
2661 | xlgmac_dump_rx_desc(pdata, ring, ring->cur); | |
2662 | ||
2663 | if (XLGMAC_GET_REG_BITS_LE(dma_desc->desc3, | |
2664 | RX_NORMAL_DESC3_CTXT_POS, | |
2665 | RX_NORMAL_DESC3_CTXT_LEN)) { | |
2666 | /* Timestamp Context Descriptor */ | |
2667 | xlgmac_get_rx_tstamp(pkt_info, dma_desc); | |
2668 | ||
2669 | pkt_info->attributes = XLGMAC_SET_REG_BITS( | |
2670 | pkt_info->attributes, | |
2671 | RX_PACKET_ATTRIBUTES_CONTEXT_POS, | |
2672 | RX_PACKET_ATTRIBUTES_CONTEXT_LEN, | |
2673 | 1); | |
2674 | pkt_info->attributes = XLGMAC_SET_REG_BITS( | |
2675 | pkt_info->attributes, | |
2676 | RX_PACKET_ATTRIBUTES_CONTEXT_NEXT_POS, | |
2677 | RX_PACKET_ATTRIBUTES_CONTEXT_NEXT_LEN, | |
2678 | 0); | |
2679 | return 0; | |
2680 | } | |
2681 | ||
2682 | /* Normal Descriptor, be sure Context Descriptor bit is off */ | |
2683 | pkt_info->attributes = XLGMAC_SET_REG_BITS( | |
2684 | pkt_info->attributes, | |
2685 | RX_PACKET_ATTRIBUTES_CONTEXT_POS, | |
2686 | RX_PACKET_ATTRIBUTES_CONTEXT_LEN, | |
2687 | 0); | |
2688 | ||
2689 | /* Indicate if a Context Descriptor is next */ | |
2690 | if (XLGMAC_GET_REG_BITS_LE(dma_desc->desc3, | |
2691 | RX_NORMAL_DESC3_CDA_POS, | |
2692 | RX_NORMAL_DESC3_CDA_LEN)) | |
2693 | pkt_info->attributes = XLGMAC_SET_REG_BITS( | |
2694 | pkt_info->attributes, | |
2695 | RX_PACKET_ATTRIBUTES_CONTEXT_NEXT_POS, | |
2696 | RX_PACKET_ATTRIBUTES_CONTEXT_NEXT_LEN, | |
2697 | 1); | |
2698 | ||
2699 | /* Get the header length */ | |
2700 | if (XLGMAC_GET_REG_BITS_LE(dma_desc->desc3, | |
2701 | RX_NORMAL_DESC3_FD_POS, | |
2702 | RX_NORMAL_DESC3_FD_LEN)) { | |
2703 | desc_data->rx.hdr_len = XLGMAC_GET_REG_BITS_LE(dma_desc->desc2, | |
2704 | RX_NORMAL_DESC2_HL_POS, | |
2705 | RX_NORMAL_DESC2_HL_LEN); | |
2706 | if (desc_data->rx.hdr_len) | |
2707 | pdata->stats.rx_split_header_packets++; | |
2708 | } | |
2709 | ||
2710 | /* Get the RSS hash */ | |
2711 | if (XLGMAC_GET_REG_BITS_LE(dma_desc->desc3, | |
2712 | RX_NORMAL_DESC3_RSV_POS, | |
2713 | RX_NORMAL_DESC3_RSV_LEN)) { | |
2714 | pkt_info->attributes = XLGMAC_SET_REG_BITS( | |
2715 | pkt_info->attributes, | |
2716 | RX_PACKET_ATTRIBUTES_RSS_HASH_POS, | |
2717 | RX_PACKET_ATTRIBUTES_RSS_HASH_LEN, | |
2718 | 1); | |
2719 | ||
2720 | pkt_info->rss_hash = le32_to_cpu(dma_desc->desc1); | |
2721 | ||
2722 | l34t = XLGMAC_GET_REG_BITS_LE(dma_desc->desc3, | |
2723 | RX_NORMAL_DESC3_L34T_POS, | |
2724 | RX_NORMAL_DESC3_L34T_LEN); | |
2725 | switch (l34t) { | |
2726 | case RX_DESC3_L34T_IPV4_TCP: | |
2727 | case RX_DESC3_L34T_IPV4_UDP: | |
2728 | case RX_DESC3_L34T_IPV6_TCP: | |
2729 | case RX_DESC3_L34T_IPV6_UDP: | |
2730 | pkt_info->rss_hash_type = PKT_HASH_TYPE_L4; | |
2731 | break; | |
2732 | default: | |
2733 | pkt_info->rss_hash_type = PKT_HASH_TYPE_L3; | |
2734 | } | |
2735 | } | |
2736 | ||
2737 | /* Get the pkt_info length */ | |
2738 | desc_data->rx.len = XLGMAC_GET_REG_BITS_LE(dma_desc->desc3, | |
2739 | RX_NORMAL_DESC3_PL_POS, | |
2740 | RX_NORMAL_DESC3_PL_LEN); | |
2741 | ||
2742 | if (!XLGMAC_GET_REG_BITS_LE(dma_desc->desc3, | |
2743 | RX_NORMAL_DESC3_LD_POS, | |
2744 | RX_NORMAL_DESC3_LD_LEN)) { | |
2745 | /* Not all the data has been transferred for this pkt_info */ | |
2746 | pkt_info->attributes = XLGMAC_SET_REG_BITS( | |
2747 | pkt_info->attributes, | |
2748 | RX_PACKET_ATTRIBUTES_INCOMPLETE_POS, | |
2749 | RX_PACKET_ATTRIBUTES_INCOMPLETE_LEN, | |
2750 | 1); | |
2751 | return 0; | |
2752 | } | |
2753 | ||
2754 | /* This is the last of the data for this pkt_info */ | |
2755 | pkt_info->attributes = XLGMAC_SET_REG_BITS( | |
2756 | pkt_info->attributes, | |
2757 | RX_PACKET_ATTRIBUTES_INCOMPLETE_POS, | |
2758 | RX_PACKET_ATTRIBUTES_INCOMPLETE_LEN, | |
2759 | 0); | |
2760 | ||
2761 | /* Set checksum done indicator as appropriate */ | |
2762 | if (netdev->features & NETIF_F_RXCSUM) | |
2763 | pkt_info->attributes = XLGMAC_SET_REG_BITS( | |
2764 | pkt_info->attributes, | |
2765 | RX_PACKET_ATTRIBUTES_CSUM_DONE_POS, | |
2766 | RX_PACKET_ATTRIBUTES_CSUM_DONE_LEN, | |
2767 | 1); | |
2768 | ||
2769 | /* Check for errors (only valid in last descriptor) */ | |
2770 | err = XLGMAC_GET_REG_BITS_LE(dma_desc->desc3, | |
2771 | RX_NORMAL_DESC3_ES_POS, | |
2772 | RX_NORMAL_DESC3_ES_LEN); | |
2773 | etlt = XLGMAC_GET_REG_BITS_LE(dma_desc->desc3, | |
2774 | RX_NORMAL_DESC3_ETLT_POS, | |
2775 | RX_NORMAL_DESC3_ETLT_LEN); | |
2776 | netif_dbg(pdata, rx_status, netdev, "err=%u, etlt=%#x\n", err, etlt); | |
2777 | ||
2778 | if (!err || !etlt) { | |
2779 | /* No error if err is 0 or etlt is 0 */ | |
2780 | if ((etlt == 0x09) && | |
2781 | (netdev->features & NETIF_F_HW_VLAN_CTAG_RX)) { | |
2782 | pkt_info->attributes = XLGMAC_SET_REG_BITS( | |
2783 | pkt_info->attributes, | |
2784 | RX_PACKET_ATTRIBUTES_VLAN_CTAG_POS, | |
2785 | RX_PACKET_ATTRIBUTES_VLAN_CTAG_LEN, | |
2786 | 1); | |
2787 | pkt_info->vlan_ctag = | |
2788 | XLGMAC_GET_REG_BITS_LE(dma_desc->desc0, | |
2789 | RX_NORMAL_DESC0_OVT_POS, | |
2790 | RX_NORMAL_DESC0_OVT_LEN); | |
2791 | netif_dbg(pdata, rx_status, netdev, "vlan-ctag=%#06x\n", | |
2792 | pkt_info->vlan_ctag); | |
2793 | } | |
2794 | } else { | |
2795 | if ((etlt == 0x05) || (etlt == 0x06)) | |
2796 | pkt_info->attributes = XLGMAC_SET_REG_BITS( | |
2797 | pkt_info->attributes, | |
2798 | RX_PACKET_ATTRIBUTES_CSUM_DONE_POS, | |
2799 | RX_PACKET_ATTRIBUTES_CSUM_DONE_LEN, | |
2800 | 0); | |
2801 | else | |
2802 | pkt_info->errors = XLGMAC_SET_REG_BITS( | |
2803 | pkt_info->errors, | |
2804 | RX_PACKET_ERRORS_FRAME_POS, | |
2805 | RX_PACKET_ERRORS_FRAME_LEN, | |
2806 | 1); | |
2807 | } | |
2808 | ||
2809 | XLGMAC_PR("%s - descriptor=%u (cur=%d)\n", channel->name, | |
2810 | ring->cur & (ring->dma_desc_count - 1), ring->cur); | |
2811 | ||
2812 | return 0; | |
2813 | } | |
2814 | ||
2815 | static int xlgmac_enable_int(struct xlgmac_channel *channel, | |
2816 | enum xlgmac_int int_id) | |
2817 | { | |
2818 | unsigned int dma_ch_ier; | |
2819 | ||
2820 | dma_ch_ier = readl(XLGMAC_DMA_REG(channel, DMA_CH_IER)); | |
2821 | ||
2822 | switch (int_id) { | |
2823 | case XLGMAC_INT_DMA_CH_SR_TI: | |
2824 | dma_ch_ier = XLGMAC_SET_REG_BITS( | |
2825 | dma_ch_ier, DMA_CH_IER_TIE_POS, | |
2826 | DMA_CH_IER_TIE_LEN, 1); | |
2827 | break; | |
2828 | case XLGMAC_INT_DMA_CH_SR_TPS: | |
2829 | dma_ch_ier = XLGMAC_SET_REG_BITS( | |
2830 | dma_ch_ier, DMA_CH_IER_TXSE_POS, | |
2831 | DMA_CH_IER_TXSE_LEN, 1); | |
2832 | break; | |
2833 | case XLGMAC_INT_DMA_CH_SR_TBU: | |
2834 | dma_ch_ier = XLGMAC_SET_REG_BITS( | |
2835 | dma_ch_ier, DMA_CH_IER_TBUE_POS, | |
2836 | DMA_CH_IER_TBUE_LEN, 1); | |
2837 | break; | |
2838 | case XLGMAC_INT_DMA_CH_SR_RI: | |
2839 | dma_ch_ier = XLGMAC_SET_REG_BITS( | |
2840 | dma_ch_ier, DMA_CH_IER_RIE_POS, | |
2841 | DMA_CH_IER_RIE_LEN, 1); | |
2842 | break; | |
2843 | case XLGMAC_INT_DMA_CH_SR_RBU: | |
2844 | dma_ch_ier = XLGMAC_SET_REG_BITS( | |
2845 | dma_ch_ier, DMA_CH_IER_RBUE_POS, | |
2846 | DMA_CH_IER_RBUE_LEN, 1); | |
2847 | break; | |
2848 | case XLGMAC_INT_DMA_CH_SR_RPS: | |
2849 | dma_ch_ier = XLGMAC_SET_REG_BITS( | |
2850 | dma_ch_ier, DMA_CH_IER_RSE_POS, | |
2851 | DMA_CH_IER_RSE_LEN, 1); | |
2852 | break; | |
2853 | case XLGMAC_INT_DMA_CH_SR_TI_RI: | |
2854 | dma_ch_ier = XLGMAC_SET_REG_BITS( | |
2855 | dma_ch_ier, DMA_CH_IER_TIE_POS, | |
2856 | DMA_CH_IER_TIE_LEN, 1); | |
2857 | dma_ch_ier = XLGMAC_SET_REG_BITS( | |
2858 | dma_ch_ier, DMA_CH_IER_RIE_POS, | |
2859 | DMA_CH_IER_RIE_LEN, 1); | |
2860 | break; | |
2861 | case XLGMAC_INT_DMA_CH_SR_FBE: | |
2862 | dma_ch_ier = XLGMAC_SET_REG_BITS( | |
2863 | dma_ch_ier, DMA_CH_IER_FBEE_POS, | |
2864 | DMA_CH_IER_FBEE_LEN, 1); | |
2865 | break; | |
2866 | case XLGMAC_INT_DMA_ALL: | |
2867 | dma_ch_ier |= channel->saved_ier; | |
2868 | break; | |
2869 | default: | |
2870 | return -1; | |
2871 | } | |
2872 | ||
2873 | writel(dma_ch_ier, XLGMAC_DMA_REG(channel, DMA_CH_IER)); | |
2874 | ||
2875 | return 0; | |
2876 | } | |
2877 | ||
2878 | static int xlgmac_disable_int(struct xlgmac_channel *channel, | |
2879 | enum xlgmac_int int_id) | |
2880 | { | |
2881 | unsigned int dma_ch_ier; | |
2882 | ||
2883 | dma_ch_ier = readl(XLGMAC_DMA_REG(channel, DMA_CH_IER)); | |
2884 | ||
2885 | switch (int_id) { | |
2886 | case XLGMAC_INT_DMA_CH_SR_TI: | |
2887 | dma_ch_ier = XLGMAC_SET_REG_BITS( | |
2888 | dma_ch_ier, DMA_CH_IER_TIE_POS, | |
2889 | DMA_CH_IER_TIE_LEN, 0); | |
2890 | break; | |
2891 | case XLGMAC_INT_DMA_CH_SR_TPS: | |
2892 | dma_ch_ier = XLGMAC_SET_REG_BITS( | |
2893 | dma_ch_ier, DMA_CH_IER_TXSE_POS, | |
2894 | DMA_CH_IER_TXSE_LEN, 0); | |
2895 | break; | |
2896 | case XLGMAC_INT_DMA_CH_SR_TBU: | |
2897 | dma_ch_ier = XLGMAC_SET_REG_BITS( | |
2898 | dma_ch_ier, DMA_CH_IER_TBUE_POS, | |
2899 | DMA_CH_IER_TBUE_LEN, 0); | |
2900 | break; | |
2901 | case XLGMAC_INT_DMA_CH_SR_RI: | |
2902 | dma_ch_ier = XLGMAC_SET_REG_BITS( | |
2903 | dma_ch_ier, DMA_CH_IER_RIE_POS, | |
2904 | DMA_CH_IER_RIE_LEN, 0); | |
2905 | break; | |
2906 | case XLGMAC_INT_DMA_CH_SR_RBU: | |
2907 | dma_ch_ier = XLGMAC_SET_REG_BITS( | |
2908 | dma_ch_ier, DMA_CH_IER_RBUE_POS, | |
2909 | DMA_CH_IER_RBUE_LEN, 0); | |
2910 | break; | |
2911 | case XLGMAC_INT_DMA_CH_SR_RPS: | |
2912 | dma_ch_ier = XLGMAC_SET_REG_BITS( | |
2913 | dma_ch_ier, DMA_CH_IER_RSE_POS, | |
2914 | DMA_CH_IER_RSE_LEN, 0); | |
2915 | break; | |
2916 | case XLGMAC_INT_DMA_CH_SR_TI_RI: | |
2917 | dma_ch_ier = XLGMAC_SET_REG_BITS( | |
2918 | dma_ch_ier, DMA_CH_IER_TIE_POS, | |
2919 | DMA_CH_IER_TIE_LEN, 0); | |
2920 | dma_ch_ier = XLGMAC_SET_REG_BITS( | |
2921 | dma_ch_ier, DMA_CH_IER_RIE_POS, | |
2922 | DMA_CH_IER_RIE_LEN, 0); | |
2923 | break; | |
2924 | case XLGMAC_INT_DMA_CH_SR_FBE: | |
2925 | dma_ch_ier = XLGMAC_SET_REG_BITS( | |
2926 | dma_ch_ier, DMA_CH_IER_FBEE_POS, | |
2927 | DMA_CH_IER_FBEE_LEN, 0); | |
2928 | break; | |
2929 | case XLGMAC_INT_DMA_ALL: | |
2930 | channel->saved_ier = dma_ch_ier & XLGMAC_DMA_INTERRUPT_MASK; | |
2931 | dma_ch_ier &= ~XLGMAC_DMA_INTERRUPT_MASK; | |
2932 | break; | |
2933 | default: | |
2934 | return -1; | |
2935 | } | |
2936 | ||
2937 | writel(dma_ch_ier, XLGMAC_DMA_REG(channel, DMA_CH_IER)); | |
2938 | ||
2939 | return 0; | |
2940 | } | |
2941 | ||
2942 | static int xlgmac_flush_tx_queues(struct xlgmac_pdata *pdata) | |
2943 | { | |
2944 | unsigned int i, count; | |
2945 | u32 regval; | |
2946 | ||
2947 | for (i = 0; i < pdata->tx_q_count; i++) { | |
2948 | regval = readl(XLGMAC_MTL_REG(pdata, i, MTL_Q_TQOMR)); | |
2949 | regval = XLGMAC_SET_REG_BITS(regval, MTL_Q_TQOMR_FTQ_POS, | |
2950 | MTL_Q_TQOMR_FTQ_LEN, 1); | |
2951 | writel(regval, XLGMAC_MTL_REG(pdata, i, MTL_Q_TQOMR)); | |
2952 | } | |
2953 | ||
2954 | /* Poll Until Poll Condition */ | |
2955 | for (i = 0; i < pdata->tx_q_count; i++) { | |
2956 | count = 2000; | |
2957 | regval = readl(XLGMAC_MTL_REG(pdata, i, MTL_Q_TQOMR)); | |
2958 | regval = XLGMAC_GET_REG_BITS(regval, MTL_Q_TQOMR_FTQ_POS, | |
2959 | MTL_Q_TQOMR_FTQ_LEN); | |
2960 | while (--count && regval) | |
2961 | usleep_range(500, 600); | |
2962 | ||
2963 | if (!count) | |
2964 | return -EBUSY; | |
2965 | } | |
2966 | ||
2967 | return 0; | |
2968 | } | |
2969 | ||
2970 | static void xlgmac_config_dma_bus(struct xlgmac_pdata *pdata) | |
2971 | { | |
2972 | u32 regval; | |
2973 | ||
2974 | regval = readl(pdata->mac_regs + DMA_SBMR); | |
2975 | /* Set enhanced addressing mode */ | |
2976 | regval = XLGMAC_SET_REG_BITS(regval, DMA_SBMR_EAME_POS, | |
2977 | DMA_SBMR_EAME_LEN, 1); | |
2978 | /* Set the System Bus mode */ | |
2979 | regval = XLGMAC_SET_REG_BITS(regval, DMA_SBMR_UNDEF_POS, | |
2980 | DMA_SBMR_UNDEF_LEN, 1); | |
2981 | regval = XLGMAC_SET_REG_BITS(regval, DMA_SBMR_BLEN_256_POS, | |
2982 | DMA_SBMR_BLEN_256_LEN, 1); | |
2983 | writel(regval, pdata->mac_regs + DMA_SBMR); | |
2984 | } | |
2985 | ||
2986 | static int xlgmac_hw_init(struct xlgmac_pdata *pdata) | |
2987 | { | |
2988 | struct xlgmac_desc_ops *desc_ops = &pdata->desc_ops; | |
2989 | int ret; | |
2990 | ||
2991 | /* Flush Tx queues */ | |
2992 | ret = xlgmac_flush_tx_queues(pdata); | |
2993 | if (ret) | |
2994 | return ret; | |
2995 | ||
2996 | /* Initialize DMA related features */ | |
2997 | xlgmac_config_dma_bus(pdata); | |
2998 | xlgmac_config_osp_mode(pdata); | |
2999 | xlgmac_config_pblx8(pdata); | |
3000 | xlgmac_config_tx_pbl_val(pdata); | |
3001 | xlgmac_config_rx_pbl_val(pdata); | |
3002 | xlgmac_config_rx_coalesce(pdata); | |
3003 | xlgmac_config_tx_coalesce(pdata); | |
3004 | xlgmac_config_rx_buffer_size(pdata); | |
3005 | xlgmac_config_tso_mode(pdata); | |
3006 | xlgmac_config_sph_mode(pdata); | |
3007 | xlgmac_config_rss(pdata); | |
3008 | desc_ops->tx_desc_init(pdata); | |
3009 | desc_ops->rx_desc_init(pdata); | |
3010 | xlgmac_enable_dma_interrupts(pdata); | |
3011 | ||
3012 | /* Initialize MTL related features */ | |
3013 | xlgmac_config_mtl_mode(pdata); | |
3014 | xlgmac_config_queue_mapping(pdata); | |
3015 | xlgmac_config_tsf_mode(pdata, pdata->tx_sf_mode); | |
3016 | xlgmac_config_rsf_mode(pdata, pdata->rx_sf_mode); | |
3017 | xlgmac_config_tx_threshold(pdata, pdata->tx_threshold); | |
3018 | xlgmac_config_rx_threshold(pdata, pdata->rx_threshold); | |
3019 | xlgmac_config_tx_fifo_size(pdata); | |
3020 | xlgmac_config_rx_fifo_size(pdata); | |
3021 | xlgmac_config_flow_control_threshold(pdata); | |
3022 | xlgmac_config_rx_fep_enable(pdata); | |
3023 | xlgmac_config_rx_fup_enable(pdata); | |
3024 | xlgmac_enable_mtl_interrupts(pdata); | |
3025 | ||
3026 | /* Initialize MAC related features */ | |
3027 | xlgmac_config_mac_address(pdata); | |
3028 | xlgmac_config_rx_mode(pdata); | |
3029 | xlgmac_config_jumbo_enable(pdata); | |
3030 | xlgmac_config_flow_control(pdata); | |
3031 | xlgmac_config_mac_speed(pdata); | |
3032 | xlgmac_config_checksum_offload(pdata); | |
3033 | xlgmac_config_vlan_support(pdata); | |
3034 | xlgmac_config_mmc(pdata); | |
3035 | xlgmac_enable_mac_interrupts(pdata); | |
3036 | ||
3037 | return 0; | |
3038 | } | |
3039 | ||
3040 | static int xlgmac_hw_exit(struct xlgmac_pdata *pdata) | |
3041 | { | |
3042 | unsigned int count = 2000; | |
3043 | u32 regval; | |
3044 | ||
3045 | /* Issue a software reset */ | |
3046 | regval = readl(pdata->mac_regs + DMA_MR); | |
3047 | regval = XLGMAC_SET_REG_BITS(regval, DMA_MR_SWR_POS, | |
3048 | DMA_MR_SWR_LEN, 1); | |
3049 | writel(regval, pdata->mac_regs + DMA_MR); | |
3050 | usleep_range(10, 15); | |
3051 | ||
3052 | /* Poll Until Poll Condition */ | |
3053 | while (--count && | |
3054 | XLGMAC_GET_REG_BITS(readl(pdata->mac_regs + DMA_MR), | |
3055 | DMA_MR_SWR_POS, DMA_MR_SWR_LEN)) | |
3056 | usleep_range(500, 600); | |
3057 | ||
3058 | if (!count) | |
3059 | return -EBUSY; | |
3060 | ||
3061 | return 0; | |
3062 | } | |
3063 | ||
3064 | void xlgmac_init_hw_ops(struct xlgmac_hw_ops *hw_ops) | |
3065 | { | |
3066 | hw_ops->init = xlgmac_hw_init; | |
3067 | hw_ops->exit = xlgmac_hw_exit; | |
3068 | ||
3069 | hw_ops->tx_complete = xlgmac_tx_complete; | |
3070 | ||
3071 | hw_ops->enable_tx = xlgmac_enable_tx; | |
3072 | hw_ops->disable_tx = xlgmac_disable_tx; | |
3073 | hw_ops->enable_rx = xlgmac_enable_rx; | |
3074 | hw_ops->disable_rx = xlgmac_disable_rx; | |
3075 | ||
3076 | hw_ops->dev_xmit = xlgmac_dev_xmit; | |
3077 | hw_ops->dev_read = xlgmac_dev_read; | |
3078 | hw_ops->enable_int = xlgmac_enable_int; | |
3079 | hw_ops->disable_int = xlgmac_disable_int; | |
3080 | ||
3081 | hw_ops->set_mac_address = xlgmac_set_mac_address; | |
3082 | hw_ops->config_rx_mode = xlgmac_config_rx_mode; | |
3083 | hw_ops->enable_rx_csum = xlgmac_enable_rx_csum; | |
3084 | hw_ops->disable_rx_csum = xlgmac_disable_rx_csum; | |
3085 | ||
3086 | /* For MII speed configuration */ | |
3087 | hw_ops->set_xlgmii_25000_speed = xlgmac_set_xlgmii_25000_speed; | |
3088 | hw_ops->set_xlgmii_40000_speed = xlgmac_set_xlgmii_40000_speed; | |
3089 | hw_ops->set_xlgmii_50000_speed = xlgmac_set_xlgmii_50000_speed; | |
3090 | hw_ops->set_xlgmii_100000_speed = xlgmac_set_xlgmii_100000_speed; | |
3091 | ||
3092 | /* For descriptor related operation */ | |
3093 | hw_ops->tx_desc_init = xlgmac_tx_desc_init; | |
3094 | hw_ops->rx_desc_init = xlgmac_rx_desc_init; | |
3095 | hw_ops->tx_desc_reset = xlgmac_tx_desc_reset; | |
3096 | hw_ops->rx_desc_reset = xlgmac_rx_desc_reset; | |
3097 | hw_ops->is_last_desc = xlgmac_is_last_desc; | |
3098 | hw_ops->is_context_desc = xlgmac_is_context_desc; | |
3099 | hw_ops->tx_start_xmit = xlgmac_tx_start_xmit; | |
3100 | ||
3101 | /* For Flow Control */ | |
3102 | hw_ops->config_tx_flow_control = xlgmac_config_tx_flow_control; | |
3103 | hw_ops->config_rx_flow_control = xlgmac_config_rx_flow_control; | |
3104 | ||
3105 | /* For Vlan related config */ | |
3106 | hw_ops->enable_rx_vlan_stripping = xlgmac_enable_rx_vlan_stripping; | |
3107 | hw_ops->disable_rx_vlan_stripping = xlgmac_disable_rx_vlan_stripping; | |
3108 | hw_ops->enable_rx_vlan_filtering = xlgmac_enable_rx_vlan_filtering; | |
3109 | hw_ops->disable_rx_vlan_filtering = xlgmac_disable_rx_vlan_filtering; | |
3110 | hw_ops->update_vlan_hash_table = xlgmac_update_vlan_hash_table; | |
3111 | ||
3112 | /* For RX coalescing */ | |
3113 | hw_ops->config_rx_coalesce = xlgmac_config_rx_coalesce; | |
3114 | hw_ops->config_tx_coalesce = xlgmac_config_tx_coalesce; | |
3115 | hw_ops->usec_to_riwt = xlgmac_usec_to_riwt; | |
3116 | hw_ops->riwt_to_usec = xlgmac_riwt_to_usec; | |
3117 | ||
3118 | /* For RX and TX threshold config */ | |
3119 | hw_ops->config_rx_threshold = xlgmac_config_rx_threshold; | |
3120 | hw_ops->config_tx_threshold = xlgmac_config_tx_threshold; | |
3121 | ||
3122 | /* For RX and TX Store and Forward Mode config */ | |
3123 | hw_ops->config_rsf_mode = xlgmac_config_rsf_mode; | |
3124 | hw_ops->config_tsf_mode = xlgmac_config_tsf_mode; | |
3125 | ||
3126 | /* For TX DMA Operating on Second Frame config */ | |
3127 | hw_ops->config_osp_mode = xlgmac_config_osp_mode; | |
3128 | ||
3129 | /* For RX and TX PBL config */ | |
3130 | hw_ops->config_rx_pbl_val = xlgmac_config_rx_pbl_val; | |
3131 | hw_ops->get_rx_pbl_val = xlgmac_get_rx_pbl_val; | |
3132 | hw_ops->config_tx_pbl_val = xlgmac_config_tx_pbl_val; | |
3133 | hw_ops->get_tx_pbl_val = xlgmac_get_tx_pbl_val; | |
3134 | hw_ops->config_pblx8 = xlgmac_config_pblx8; | |
3135 | ||
3136 | /* For MMC statistics support */ | |
3137 | hw_ops->tx_mmc_int = xlgmac_tx_mmc_int; | |
3138 | hw_ops->rx_mmc_int = xlgmac_rx_mmc_int; | |
3139 | hw_ops->read_mmc_stats = xlgmac_read_mmc_stats; | |
3140 | ||
3141 | /* For Receive Side Scaling */ | |
3142 | hw_ops->enable_rss = xlgmac_enable_rss; | |
3143 | hw_ops->disable_rss = xlgmac_disable_rss; | |
3144 | hw_ops->set_rss_hash_key = xlgmac_set_rss_hash_key; | |
3145 | hw_ops->set_rss_lookup_table = xlgmac_set_rss_lookup_table; | |
3146 | } |