Commit | Line | Data |
---|---|---|
47dd7a54 GC |
1 | /******************************************************************************* |
2 | This is the driver for the ST MAC 10/100/1000 on-chip Ethernet controllers. | |
3 | ST Ethernet IPs are built around a Synopsys IP Core. | |
4 | ||
286a8372 | 5 | Copyright(C) 2007-2011 STMicroelectronics Ltd |
47dd7a54 GC |
6 | |
7 | This program is free software; you can redistribute it and/or modify it | |
8 | under the terms and conditions of the GNU General Public License, | |
9 | version 2, as published by the Free Software Foundation. | |
10 | ||
11 | This program is distributed in the hope it will be useful, but WITHOUT | |
12 | ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
13 | FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | |
14 | more details. | |
15 | ||
47dd7a54 GC |
16 | The full GNU General Public License is included in this distribution in |
17 | the file called "COPYING". | |
18 | ||
19 | Author: Giuseppe Cavallaro <peppe.cavallaro@st.com> | |
20 | ||
21 | Documentation available at: | |
22 | http://www.stlinux.com | |
23 | Support available at: | |
24 | https://bugzilla.stlinux.com/ | |
25 | *******************************************************************************/ | |
26 | ||
6a81c26f | 27 | #include <linux/clk.h> |
47dd7a54 GC |
28 | #include <linux/kernel.h> |
29 | #include <linux/interrupt.h> | |
47dd7a54 GC |
30 | #include <linux/ip.h> |
31 | #include <linux/tcp.h> | |
32 | #include <linux/skbuff.h> | |
33 | #include <linux/ethtool.h> | |
34 | #include <linux/if_ether.h> | |
35 | #include <linux/crc32.h> | |
36 | #include <linux/mii.h> | |
01789349 | 37 | #include <linux/if.h> |
47dd7a54 GC |
38 | #include <linux/if_vlan.h> |
39 | #include <linux/dma-mapping.h> | |
5a0e3ad6 | 40 | #include <linux/slab.h> |
70c71606 | 41 | #include <linux/prefetch.h> |
db88f10a | 42 | #include <linux/pinctrl/consumer.h> |
50fb4f74 | 43 | #ifdef CONFIG_DEBUG_FS |
7ac29055 GC |
44 | #include <linux/debugfs.h> |
45 | #include <linux/seq_file.h> | |
50fb4f74 | 46 | #endif /* CONFIG_DEBUG_FS */ |
891434b1 RK |
47 | #include <linux/net_tstamp.h> |
48 | #include "stmmac_ptp.h" | |
286a8372 | 49 | #include "stmmac.h" |
c5e4ddbd | 50 | #include <linux/reset.h> |
5790cf3c | 51 | #include <linux/of_mdio.h> |
19d857c9 | 52 | #include "dwmac1000.h" |
47dd7a54 | 53 | |
47dd7a54 | 54 | #define STMMAC_ALIGN(x) L1_CACHE_ALIGN(x) |
f748be53 | 55 | #define TSO_MAX_BUFF_SIZE (SZ_16K - 1) |
47dd7a54 GC |
56 | |
57 | /* Module parameters */ | |
32ceabca | 58 | #define TX_TIMEO 5000 |
47dd7a54 GC |
59 | static int watchdog = TX_TIMEO; |
60 | module_param(watchdog, int, S_IRUGO | S_IWUSR); | |
32ceabca | 61 | MODULE_PARM_DESC(watchdog, "Transmit timeout in milliseconds (default 5s)"); |
47dd7a54 | 62 | |
32ceabca | 63 | static int debug = -1; |
47dd7a54 | 64 | module_param(debug, int, S_IRUGO | S_IWUSR); |
32ceabca | 65 | MODULE_PARM_DESC(debug, "Message Level (-1: default, 0: no output, 16: all)"); |
47dd7a54 | 66 | |
47d1f71f | 67 | static int phyaddr = -1; |
47dd7a54 GC |
68 | module_param(phyaddr, int, S_IRUGO); |
69 | MODULE_PARM_DESC(phyaddr, "Physical device address"); | |
70 | ||
e3ad57c9 | 71 | #define STMMAC_TX_THRESH (DMA_TX_SIZE / 4) |
120e87f9 | 72 | #define STMMAC_RX_THRESH (DMA_RX_SIZE / 4) |
47dd7a54 GC |
73 | |
74 | static int flow_ctrl = FLOW_OFF; | |
75 | module_param(flow_ctrl, int, S_IRUGO | S_IWUSR); | |
76 | MODULE_PARM_DESC(flow_ctrl, "Flow control ability [on/off]"); | |
77 | ||
78 | static int pause = PAUSE_TIME; | |
79 | module_param(pause, int, S_IRUGO | S_IWUSR); | |
80 | MODULE_PARM_DESC(pause, "Flow Control Pause Time"); | |
81 | ||
82 | #define TC_DEFAULT 64 | |
83 | static int tc = TC_DEFAULT; | |
84 | module_param(tc, int, S_IRUGO | S_IWUSR); | |
85 | MODULE_PARM_DESC(tc, "DMA threshold control value"); | |
86 | ||
d916701c GC |
87 | #define DEFAULT_BUFSIZE 1536 |
88 | static int buf_sz = DEFAULT_BUFSIZE; | |
47dd7a54 GC |
89 | module_param(buf_sz, int, S_IRUGO | S_IWUSR); |
90 | MODULE_PARM_DESC(buf_sz, "DMA buffer size"); | |
91 | ||
22ad3838 GC |
92 | #define STMMAC_RX_COPYBREAK 256 |
93 | ||
47dd7a54 GC |
94 | static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE | |
95 | NETIF_MSG_LINK | NETIF_MSG_IFUP | | |
96 | NETIF_MSG_IFDOWN | NETIF_MSG_TIMER); | |
97 | ||
d765955d GC |
98 | #define STMMAC_DEFAULT_LPI_TIMER 1000 |
99 | static int eee_timer = STMMAC_DEFAULT_LPI_TIMER; | |
100 | module_param(eee_timer, int, S_IRUGO | S_IWUSR); | |
101 | MODULE_PARM_DESC(eee_timer, "LPI tx expiration time in msec"); | |
f5351ef7 | 102 | #define STMMAC_LPI_T(x) (jiffies + msecs_to_jiffies(x)) |
d765955d | 103 | |
22d3efe5 PM |
104 | /* By default the driver will use the ring mode to manage tx and rx descriptors, |
105 | * but allow user to force to use the chain instead of the ring | |
4a7d666a GC |
106 | */ |
107 | static unsigned int chain_mode; | |
108 | module_param(chain_mode, int, S_IRUGO); | |
109 | MODULE_PARM_DESC(chain_mode, "To use chain instead of ring mode"); | |
110 | ||
47dd7a54 | 111 | static irqreturn_t stmmac_interrupt(int irq, void *dev_id); |
47dd7a54 | 112 | |
50fb4f74 | 113 | #ifdef CONFIG_DEBUG_FS |
bfab27a1 | 114 | static int stmmac_init_fs(struct net_device *dev); |
466c5ac8 | 115 | static void stmmac_exit_fs(struct net_device *dev); |
bfab27a1 GC |
116 | #endif |
117 | ||
9125cdd1 GC |
118 | #define STMMAC_COAL_TIMER(x) (jiffies + usecs_to_jiffies(x)) |
119 | ||
47dd7a54 GC |
120 | /** |
121 | * stmmac_verify_args - verify the driver parameters. | |
732fdf0e GC |
122 | * Description: it checks the driver parameters and set a default in case of |
123 | * errors. | |
47dd7a54 GC |
124 | */ |
125 | static void stmmac_verify_args(void) | |
126 | { | |
127 | if (unlikely(watchdog < 0)) | |
128 | watchdog = TX_TIMEO; | |
d916701c GC |
129 | if (unlikely((buf_sz < DEFAULT_BUFSIZE) || (buf_sz > BUF_SIZE_16KiB))) |
130 | buf_sz = DEFAULT_BUFSIZE; | |
47dd7a54 GC |
131 | if (unlikely(flow_ctrl > 1)) |
132 | flow_ctrl = FLOW_AUTO; | |
133 | else if (likely(flow_ctrl < 0)) | |
134 | flow_ctrl = FLOW_OFF; | |
135 | if (unlikely((pause < 0) || (pause > 0xffff))) | |
136 | pause = PAUSE_TIME; | |
d765955d GC |
137 | if (eee_timer < 0) |
138 | eee_timer = STMMAC_DEFAULT_LPI_TIMER; | |
47dd7a54 GC |
139 | } |
140 | ||
c22a3f48 JP |
141 | /** |
142 | * stmmac_disable_all_queues - Disable all queues | |
143 | * @priv: driver private structure | |
144 | */ | |
145 | static void stmmac_disable_all_queues(struct stmmac_priv *priv) | |
146 | { | |
147 | u32 rx_queues_cnt = priv->plat->rx_queues_to_use; | |
148 | u32 queue; | |
149 | ||
150 | for (queue = 0; queue < rx_queues_cnt; queue++) { | |
151 | struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; | |
152 | ||
153 | napi_disable(&rx_q->napi); | |
154 | } | |
155 | } | |
156 | ||
157 | /** | |
158 | * stmmac_enable_all_queues - Enable all queues | |
159 | * @priv: driver private structure | |
160 | */ | |
161 | static void stmmac_enable_all_queues(struct stmmac_priv *priv) | |
162 | { | |
163 | u32 rx_queues_cnt = priv->plat->rx_queues_to_use; | |
164 | u32 queue; | |
165 | ||
166 | for (queue = 0; queue < rx_queues_cnt; queue++) { | |
167 | struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; | |
168 | ||
169 | napi_enable(&rx_q->napi); | |
170 | } | |
171 | } | |
172 | ||
173 | /** | |
174 | * stmmac_stop_all_queues - Stop all queues | |
175 | * @priv: driver private structure | |
176 | */ | |
177 | static void stmmac_stop_all_queues(struct stmmac_priv *priv) | |
178 | { | |
179 | u32 tx_queues_cnt = priv->plat->tx_queues_to_use; | |
180 | u32 queue; | |
181 | ||
182 | for (queue = 0; queue < tx_queues_cnt; queue++) | |
183 | netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue)); | |
184 | } | |
185 | ||
186 | /** | |
187 | * stmmac_start_all_queues - Start all queues | |
188 | * @priv: driver private structure | |
189 | */ | |
190 | static void stmmac_start_all_queues(struct stmmac_priv *priv) | |
191 | { | |
192 | u32 tx_queues_cnt = priv->plat->tx_queues_to_use; | |
193 | u32 queue; | |
194 | ||
195 | for (queue = 0; queue < tx_queues_cnt; queue++) | |
196 | netif_tx_start_queue(netdev_get_tx_queue(priv->dev, queue)); | |
197 | } | |
198 | ||
32ceabca GC |
199 | /** |
200 | * stmmac_clk_csr_set - dynamically set the MDC clock | |
201 | * @priv: driver private structure | |
202 | * Description: this is to dynamically set the MDC clock according to the csr | |
203 | * clock input. | |
204 | * Note: | |
205 | * If a specific clk_csr value is passed from the platform | |
206 | * this means that the CSR Clock Range selection cannot be | |
207 | * changed at run-time and it is fixed (as reported in the driver | |
208 | * documentation). Viceversa the driver will try to set the MDC | |
209 | * clock dynamically according to the actual clock input. | |
210 | */ | |
cd7201f4 GC |
211 | static void stmmac_clk_csr_set(struct stmmac_priv *priv) |
212 | { | |
cd7201f4 GC |
213 | u32 clk_rate; |
214 | ||
f573c0b9 | 215 | clk_rate = clk_get_rate(priv->plat->stmmac_clk); |
cd7201f4 GC |
216 | |
217 | /* Platform provided default clk_csr would be assumed valid | |
ceb69499 GC |
218 | * for all other cases except for the below mentioned ones. |
219 | * For values higher than the IEEE 802.3 specified frequency | |
220 | * we can not estimate the proper divider as it is not known | |
221 | * the frequency of clk_csr_i. So we do not change the default | |
222 | * divider. | |
223 | */ | |
cd7201f4 GC |
224 | if (!(priv->clk_csr & MAC_CSR_H_FRQ_MASK)) { |
225 | if (clk_rate < CSR_F_35M) | |
226 | priv->clk_csr = STMMAC_CSR_20_35M; | |
227 | else if ((clk_rate >= CSR_F_35M) && (clk_rate < CSR_F_60M)) | |
228 | priv->clk_csr = STMMAC_CSR_35_60M; | |
229 | else if ((clk_rate >= CSR_F_60M) && (clk_rate < CSR_F_100M)) | |
230 | priv->clk_csr = STMMAC_CSR_60_100M; | |
231 | else if ((clk_rate >= CSR_F_100M) && (clk_rate < CSR_F_150M)) | |
232 | priv->clk_csr = STMMAC_CSR_100_150M; | |
233 | else if ((clk_rate >= CSR_F_150M) && (clk_rate < CSR_F_250M)) | |
234 | priv->clk_csr = STMMAC_CSR_150_250M; | |
19d857c9 | 235 | else if ((clk_rate >= CSR_F_250M) && (clk_rate < CSR_F_300M)) |
cd7201f4 | 236 | priv->clk_csr = STMMAC_CSR_250_300M; |
ceb69499 | 237 | } |
9f93ac8d LC |
238 | |
239 | if (priv->plat->has_sun8i) { | |
240 | if (clk_rate > 160000000) | |
241 | priv->clk_csr = 0x03; | |
242 | else if (clk_rate > 80000000) | |
243 | priv->clk_csr = 0x02; | |
244 | else if (clk_rate > 40000000) | |
245 | priv->clk_csr = 0x01; | |
246 | else | |
247 | priv->clk_csr = 0; | |
248 | } | |
cd7201f4 GC |
249 | } |
250 | ||
47dd7a54 GC |
251 | static void print_pkt(unsigned char *buf, int len) |
252 | { | |
424c4f78 AS |
253 | pr_debug("len = %d byte, buf addr: 0x%p\n", len, buf); |
254 | print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, buf, len); | |
47dd7a54 | 255 | } |
47dd7a54 | 256 | |
ce736788 | 257 | static inline u32 stmmac_tx_avail(struct stmmac_priv *priv, u32 queue) |
47dd7a54 | 258 | { |
ce736788 | 259 | struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; |
a6a3e026 | 260 | u32 avail; |
e3ad57c9 | 261 | |
ce736788 JP |
262 | if (tx_q->dirty_tx > tx_q->cur_tx) |
263 | avail = tx_q->dirty_tx - tx_q->cur_tx - 1; | |
e3ad57c9 | 264 | else |
ce736788 | 265 | avail = DMA_TX_SIZE - tx_q->cur_tx + tx_q->dirty_tx - 1; |
e3ad57c9 GC |
266 | |
267 | return avail; | |
268 | } | |
269 | ||
54139cf3 JP |
270 | /** |
271 | * stmmac_rx_dirty - Get RX queue dirty | |
272 | * @priv: driver private structure | |
273 | * @queue: RX queue index | |
274 | */ | |
275 | static inline u32 stmmac_rx_dirty(struct stmmac_priv *priv, u32 queue) | |
e3ad57c9 | 276 | { |
54139cf3 | 277 | struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; |
a6a3e026 | 278 | u32 dirty; |
e3ad57c9 | 279 | |
54139cf3 JP |
280 | if (rx_q->dirty_rx <= rx_q->cur_rx) |
281 | dirty = rx_q->cur_rx - rx_q->dirty_rx; | |
e3ad57c9 | 282 | else |
54139cf3 | 283 | dirty = DMA_RX_SIZE - rx_q->dirty_rx + rx_q->cur_rx; |
e3ad57c9 GC |
284 | |
285 | return dirty; | |
47dd7a54 GC |
286 | } |
287 | ||
32ceabca | 288 | /** |
732fdf0e | 289 | * stmmac_hw_fix_mac_speed - callback for speed selection |
32ceabca | 290 | * @priv: driver private structure |
8d45e42b | 291 | * Description: on some platforms (e.g. ST), some HW system configuration |
32ceabca | 292 | * registers have to be set according to the link speed negotiated. |
9dfeb4d9 GC |
293 | */ |
294 | static inline void stmmac_hw_fix_mac_speed(struct stmmac_priv *priv) | |
295 | { | |
d6d50c7e PR |
296 | struct net_device *ndev = priv->dev; |
297 | struct phy_device *phydev = ndev->phydev; | |
9dfeb4d9 GC |
298 | |
299 | if (likely(priv->plat->fix_mac_speed)) | |
ceb69499 | 300 | priv->plat->fix_mac_speed(priv->plat->bsp_priv, phydev->speed); |
9dfeb4d9 GC |
301 | } |
302 | ||
32ceabca | 303 | /** |
732fdf0e | 304 | * stmmac_enable_eee_mode - check and enter in LPI mode |
32ceabca | 305 | * @priv: driver private structure |
732fdf0e GC |
306 | * Description: this function is to verify and enter in LPI mode in case of |
307 | * EEE. | |
32ceabca | 308 | */ |
d765955d GC |
309 | static void stmmac_enable_eee_mode(struct stmmac_priv *priv) |
310 | { | |
ce736788 JP |
311 | u32 tx_cnt = priv->plat->tx_queues_to_use; |
312 | u32 queue; | |
313 | ||
314 | /* check if all TX queues have the work finished */ | |
315 | for (queue = 0; queue < tx_cnt; queue++) { | |
316 | struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; | |
317 | ||
318 | if (tx_q->dirty_tx != tx_q->cur_tx) | |
319 | return; /* still unfinished work */ | |
320 | } | |
321 | ||
d765955d | 322 | /* Check and enter in LPI mode */ |
ce736788 | 323 | if (!priv->tx_path_in_lpi_mode) |
b4b7b772 | 324 | priv->hw->mac->set_eee_mode(priv->hw, |
325 | priv->plat->en_tx_lpi_clockgating); | |
d765955d GC |
326 | } |
327 | ||
32ceabca | 328 | /** |
732fdf0e | 329 | * stmmac_disable_eee_mode - disable and exit from LPI mode |
32ceabca GC |
330 | * @priv: driver private structure |
331 | * Description: this function is to exit and disable EEE in case of | |
332 | * LPI state is true. This is called by the xmit. | |
333 | */ | |
d765955d GC |
334 | void stmmac_disable_eee_mode(struct stmmac_priv *priv) |
335 | { | |
7ed24bbe | 336 | priv->hw->mac->reset_eee_mode(priv->hw); |
d765955d GC |
337 | del_timer_sync(&priv->eee_ctrl_timer); |
338 | priv->tx_path_in_lpi_mode = false; | |
339 | } | |
340 | ||
341 | /** | |
732fdf0e | 342 | * stmmac_eee_ctrl_timer - EEE TX SW timer. |
d765955d GC |
343 | * @arg : data hook |
344 | * Description: | |
32ceabca | 345 | * if there is no data transfer and if we are not in LPI state, |
d765955d GC |
346 | * then MAC Transmitter can be moved to LPI state. |
347 | */ | |
348 | static void stmmac_eee_ctrl_timer(unsigned long arg) | |
349 | { | |
350 | struct stmmac_priv *priv = (struct stmmac_priv *)arg; | |
351 | ||
352 | stmmac_enable_eee_mode(priv); | |
f5351ef7 | 353 | mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(eee_timer)); |
d765955d GC |
354 | } |
355 | ||
356 | /** | |
732fdf0e | 357 | * stmmac_eee_init - init EEE |
32ceabca | 358 | * @priv: driver private structure |
d765955d | 359 | * Description: |
732fdf0e GC |
360 | * if the GMAC supports the EEE (from the HW cap reg) and the phy device |
361 | * can also manage EEE, this function enable the LPI state and start related | |
362 | * timer. | |
d765955d GC |
363 | */ |
364 | bool stmmac_eee_init(struct stmmac_priv *priv) | |
365 | { | |
d6d50c7e | 366 | struct net_device *ndev = priv->dev; |
4741cf9c | 367 | unsigned long flags; |
d765955d GC |
368 | bool ret = false; |
369 | ||
f5351ef7 GC |
370 | /* Using PCS we cannot dial with the phy registers at this stage |
371 | * so we do not support extra feature like EEE. | |
372 | */ | |
3fe5cadb GC |
373 | if ((priv->hw->pcs == STMMAC_PCS_RGMII) || |
374 | (priv->hw->pcs == STMMAC_PCS_TBI) || | |
375 | (priv->hw->pcs == STMMAC_PCS_RTBI)) | |
f5351ef7 GC |
376 | goto out; |
377 | ||
d765955d GC |
378 | /* MAC core supports the EEE feature. */ |
379 | if (priv->dma_cap.eee) { | |
83bf79b6 GC |
380 | int tx_lpi_timer = priv->tx_lpi_timer; |
381 | ||
d765955d | 382 | /* Check if the PHY supports EEE */ |
d6d50c7e | 383 | if (phy_init_eee(ndev->phydev, 1)) { |
83bf79b6 GC |
384 | /* To manage at run-time if the EEE cannot be supported |
385 | * anymore (for example because the lp caps have been | |
386 | * changed). | |
387 | * In that case the driver disable own timers. | |
388 | */ | |
4741cf9c | 389 | spin_lock_irqsave(&priv->lock, flags); |
83bf79b6 | 390 | if (priv->eee_active) { |
38ddc59d | 391 | netdev_dbg(priv->dev, "disable EEE\n"); |
83bf79b6 | 392 | del_timer_sync(&priv->eee_ctrl_timer); |
7ed24bbe | 393 | priv->hw->mac->set_eee_timer(priv->hw, 0, |
83bf79b6 GC |
394 | tx_lpi_timer); |
395 | } | |
396 | priv->eee_active = 0; | |
4741cf9c | 397 | spin_unlock_irqrestore(&priv->lock, flags); |
d765955d | 398 | goto out; |
83bf79b6 GC |
399 | } |
400 | /* Activate the EEE and start timers */ | |
4741cf9c | 401 | spin_lock_irqsave(&priv->lock, flags); |
f5351ef7 GC |
402 | if (!priv->eee_active) { |
403 | priv->eee_active = 1; | |
ccb36da1 VT |
404 | setup_timer(&priv->eee_ctrl_timer, |
405 | stmmac_eee_ctrl_timer, | |
406 | (unsigned long)priv); | |
407 | mod_timer(&priv->eee_ctrl_timer, | |
408 | STMMAC_LPI_T(eee_timer)); | |
f5351ef7 | 409 | |
7ed24bbe | 410 | priv->hw->mac->set_eee_timer(priv->hw, |
f5351ef7 | 411 | STMMAC_DEFAULT_LIT_LS, |
83bf79b6 | 412 | tx_lpi_timer); |
71965352 GC |
413 | } |
414 | /* Set HW EEE according to the speed */ | |
d6d50c7e | 415 | priv->hw->mac->set_eee_pls(priv->hw, ndev->phydev->link); |
d765955d | 416 | |
d765955d | 417 | ret = true; |
4741cf9c GC |
418 | spin_unlock_irqrestore(&priv->lock, flags); |
419 | ||
38ddc59d | 420 | netdev_dbg(priv->dev, "Energy-Efficient Ethernet initialized\n"); |
d765955d GC |
421 | } |
422 | out: | |
423 | return ret; | |
424 | } | |
425 | ||
732fdf0e | 426 | /* stmmac_get_tx_hwtstamp - get HW TX timestamps |
32ceabca | 427 | * @priv: driver private structure |
ba1ffd74 | 428 | * @p : descriptor pointer |
891434b1 RK |
429 | * @skb : the socket buffer |
430 | * Description : | |
431 | * This function will read timestamp from the descriptor & pass it to stack. | |
432 | * and also perform some sanity checks. | |
433 | */ | |
434 | static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv, | |
ba1ffd74 | 435 | struct dma_desc *p, struct sk_buff *skb) |
891434b1 RK |
436 | { |
437 | struct skb_shared_hwtstamps shhwtstamp; | |
438 | u64 ns; | |
891434b1 RK |
439 | |
440 | if (!priv->hwts_tx_en) | |
441 | return; | |
442 | ||
ceb69499 | 443 | /* exit if skb doesn't support hw tstamp */ |
75e4364f | 444 | if (likely(!skb || !(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS))) |
891434b1 RK |
445 | return; |
446 | ||
891434b1 | 447 | /* check tx tstamp status */ |
33d4c482 | 448 | if (priv->hw->desc->get_tx_timestamp_status(p)) { |
ba1ffd74 GC |
449 | /* get the valid tstamp */ |
450 | ns = priv->hw->desc->get_timestamp(p, priv->adv_ts); | |
891434b1 | 451 | |
ba1ffd74 GC |
452 | memset(&shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps)); |
453 | shhwtstamp.hwtstamp = ns_to_ktime(ns); | |
891434b1 | 454 | |
33d4c482 | 455 | netdev_dbg(priv->dev, "get valid TX hw timestamp %llu\n", ns); |
ba1ffd74 GC |
456 | /* pass tstamp to stack */ |
457 | skb_tstamp_tx(skb, &shhwtstamp); | |
458 | } | |
891434b1 RK |
459 | |
460 | return; | |
461 | } | |
462 | ||
732fdf0e | 463 | /* stmmac_get_rx_hwtstamp - get HW RX timestamps |
32ceabca | 464 | * @priv: driver private structure |
ba1ffd74 GC |
465 | * @p : descriptor pointer |
466 | * @np : next descriptor pointer | |
891434b1 RK |
467 | * @skb : the socket buffer |
468 | * Description : | |
469 | * This function will read received packet's timestamp from the descriptor | |
470 | * and pass it to stack. It also perform some sanity checks. | |
471 | */ | |
ba1ffd74 GC |
472 | static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p, |
473 | struct dma_desc *np, struct sk_buff *skb) | |
891434b1 RK |
474 | { |
475 | struct skb_shared_hwtstamps *shhwtstamp = NULL; | |
476 | u64 ns; | |
891434b1 RK |
477 | |
478 | if (!priv->hwts_rx_en) | |
479 | return; | |
480 | ||
ba1ffd74 | 481 | /* Check if timestamp is available */ |
33d4c482 | 482 | if (priv->hw->desc->get_rx_timestamp_status(p, priv->adv_ts)) { |
ba1ffd74 GC |
483 | /* For GMAC4, the valid timestamp is from CTX next desc. */ |
484 | if (priv->plat->has_gmac4) | |
485 | ns = priv->hw->desc->get_timestamp(np, priv->adv_ts); | |
486 | else | |
487 | ns = priv->hw->desc->get_timestamp(p, priv->adv_ts); | |
891434b1 | 488 | |
33d4c482 | 489 | netdev_dbg(priv->dev, "get valid RX hw timestamp %llu\n", ns); |
ba1ffd74 GC |
490 | shhwtstamp = skb_hwtstamps(skb); |
491 | memset(shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps)); | |
492 | shhwtstamp->hwtstamp = ns_to_ktime(ns); | |
493 | } else { | |
33d4c482 | 494 | netdev_dbg(priv->dev, "cannot get RX hw timestamp\n"); |
ba1ffd74 | 495 | } |
891434b1 RK |
496 | } |
497 | ||
498 | /** | |
499 | * stmmac_hwtstamp_ioctl - control hardware timestamping. | |
500 | * @dev: device pointer. | |
8d45e42b | 501 | * @ifr: An IOCTL specific structure, that can contain a pointer to |
891434b1 RK |
502 | * a proprietary structure used to pass information to the driver. |
503 | * Description: | |
504 | * This function configures the MAC to enable/disable both outgoing(TX) | |
505 | * and incoming(RX) packets time stamping based on user input. | |
506 | * Return Value: | |
507 | * 0 on success and an appropriate -ve integer on failure. | |
508 | */ | |
509 | static int stmmac_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr) | |
510 | { | |
511 | struct stmmac_priv *priv = netdev_priv(dev); | |
512 | struct hwtstamp_config config; | |
0a624155 | 513 | struct timespec64 now; |
891434b1 RK |
514 | u64 temp = 0; |
515 | u32 ptp_v2 = 0; | |
516 | u32 tstamp_all = 0; | |
517 | u32 ptp_over_ipv4_udp = 0; | |
518 | u32 ptp_over_ipv6_udp = 0; | |
519 | u32 ptp_over_ethernet = 0; | |
520 | u32 snap_type_sel = 0; | |
521 | u32 ts_master_en = 0; | |
522 | u32 ts_event_en = 0; | |
523 | u32 value = 0; | |
19d857c9 | 524 | u32 sec_inc; |
891434b1 RK |
525 | |
526 | if (!(priv->dma_cap.time_stamp || priv->adv_ts)) { | |
527 | netdev_alert(priv->dev, "No support for HW time stamping\n"); | |
528 | priv->hwts_tx_en = 0; | |
529 | priv->hwts_rx_en = 0; | |
530 | ||
531 | return -EOPNOTSUPP; | |
532 | } | |
533 | ||
534 | if (copy_from_user(&config, ifr->ifr_data, | |
ceb69499 | 535 | sizeof(struct hwtstamp_config))) |
891434b1 RK |
536 | return -EFAULT; |
537 | ||
38ddc59d LC |
538 | netdev_dbg(priv->dev, "%s config flags:0x%x, tx_type:0x%x, rx_filter:0x%x\n", |
539 | __func__, config.flags, config.tx_type, config.rx_filter); | |
891434b1 RK |
540 | |
541 | /* reserved for future extensions */ | |
542 | if (config.flags) | |
543 | return -EINVAL; | |
544 | ||
5f3da328 BH |
545 | if (config.tx_type != HWTSTAMP_TX_OFF && |
546 | config.tx_type != HWTSTAMP_TX_ON) | |
891434b1 | 547 | return -ERANGE; |
891434b1 RK |
548 | |
549 | if (priv->adv_ts) { | |
550 | switch (config.rx_filter) { | |
891434b1 | 551 | case HWTSTAMP_FILTER_NONE: |
ceb69499 | 552 | /* time stamp no incoming packet at all */ |
891434b1 RK |
553 | config.rx_filter = HWTSTAMP_FILTER_NONE; |
554 | break; | |
555 | ||
891434b1 | 556 | case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: |
ceb69499 | 557 | /* PTP v1, UDP, any kind of event packet */ |
891434b1 RK |
558 | config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT; |
559 | /* take time stamp for all event messages */ | |
fd6720ae MM |
560 | if (priv->plat->has_gmac4) |
561 | snap_type_sel = PTP_GMAC4_TCR_SNAPTYPSEL_1; | |
562 | else | |
563 | snap_type_sel = PTP_TCR_SNAPTYPSEL_1; | |
891434b1 RK |
564 | |
565 | ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA; | |
566 | ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA; | |
567 | break; | |
568 | ||
891434b1 | 569 | case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: |
ceb69499 | 570 | /* PTP v1, UDP, Sync packet */ |
891434b1 RK |
571 | config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC; |
572 | /* take time stamp for SYNC messages only */ | |
573 | ts_event_en = PTP_TCR_TSEVNTENA; | |
574 | ||
575 | ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA; | |
576 | ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA; | |
577 | break; | |
578 | ||
891434b1 | 579 | case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: |
ceb69499 | 580 | /* PTP v1, UDP, Delay_req packet */ |
891434b1 RK |
581 | config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ; |
582 | /* take time stamp for Delay_Req messages only */ | |
583 | ts_master_en = PTP_TCR_TSMSTRENA; | |
584 | ts_event_en = PTP_TCR_TSEVNTENA; | |
585 | ||
586 | ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA; | |
587 | ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA; | |
588 | break; | |
589 | ||
891434b1 | 590 | case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: |
ceb69499 | 591 | /* PTP v2, UDP, any kind of event packet */ |
891434b1 RK |
592 | config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT; |
593 | ptp_v2 = PTP_TCR_TSVER2ENA; | |
594 | /* take time stamp for all event messages */ | |
fd6720ae MM |
595 | if (priv->plat->has_gmac4) |
596 | snap_type_sel = PTP_GMAC4_TCR_SNAPTYPSEL_1; | |
597 | else | |
598 | snap_type_sel = PTP_TCR_SNAPTYPSEL_1; | |
891434b1 RK |
599 | |
600 | ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA; | |
601 | ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA; | |
602 | break; | |
603 | ||
891434b1 | 604 | case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: |
ceb69499 | 605 | /* PTP v2, UDP, Sync packet */ |
891434b1 RK |
606 | config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC; |
607 | ptp_v2 = PTP_TCR_TSVER2ENA; | |
608 | /* take time stamp for SYNC messages only */ | |
609 | ts_event_en = PTP_TCR_TSEVNTENA; | |
610 | ||
611 | ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA; | |
612 | ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA; | |
613 | break; | |
614 | ||
891434b1 | 615 | case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: |
ceb69499 | 616 | /* PTP v2, UDP, Delay_req packet */ |
891434b1 RK |
617 | config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ; |
618 | ptp_v2 = PTP_TCR_TSVER2ENA; | |
619 | /* take time stamp for Delay_Req messages only */ | |
620 | ts_master_en = PTP_TCR_TSMSTRENA; | |
621 | ts_event_en = PTP_TCR_TSEVNTENA; | |
622 | ||
623 | ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA; | |
624 | ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA; | |
625 | break; | |
626 | ||
891434b1 | 627 | case HWTSTAMP_FILTER_PTP_V2_EVENT: |
ceb69499 | 628 | /* PTP v2/802.AS1 any layer, any kind of event packet */ |
891434b1 RK |
629 | config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT; |
630 | ptp_v2 = PTP_TCR_TSVER2ENA; | |
631 | /* take time stamp for all event messages */ | |
fd6720ae MM |
632 | if (priv->plat->has_gmac4) |
633 | snap_type_sel = PTP_GMAC4_TCR_SNAPTYPSEL_1; | |
634 | else | |
635 | snap_type_sel = PTP_TCR_SNAPTYPSEL_1; | |
891434b1 RK |
636 | |
637 | ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA; | |
638 | ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA; | |
639 | ptp_over_ethernet = PTP_TCR_TSIPENA; | |
640 | break; | |
641 | ||
891434b1 | 642 | case HWTSTAMP_FILTER_PTP_V2_SYNC: |
ceb69499 | 643 | /* PTP v2/802.AS1, any layer, Sync packet */ |
891434b1 RK |
644 | config.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC; |
645 | ptp_v2 = PTP_TCR_TSVER2ENA; | |
646 | /* take time stamp for SYNC messages only */ | |
647 | ts_event_en = PTP_TCR_TSEVNTENA; | |
648 | ||
649 | ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA; | |
650 | ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA; | |
651 | ptp_over_ethernet = PTP_TCR_TSIPENA; | |
652 | break; | |
653 | ||
891434b1 | 654 | case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: |
ceb69499 | 655 | /* PTP v2/802.AS1, any layer, Delay_req packet */ |
891434b1 RK |
656 | config.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ; |
657 | ptp_v2 = PTP_TCR_TSVER2ENA; | |
658 | /* take time stamp for Delay_Req messages only */ | |
659 | ts_master_en = PTP_TCR_TSMSTRENA; | |
660 | ts_event_en = PTP_TCR_TSEVNTENA; | |
661 | ||
662 | ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA; | |
663 | ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA; | |
664 | ptp_over_ethernet = PTP_TCR_TSIPENA; | |
665 | break; | |
666 | ||
e3412575 | 667 | case HWTSTAMP_FILTER_NTP_ALL: |
891434b1 | 668 | case HWTSTAMP_FILTER_ALL: |
ceb69499 | 669 | /* time stamp any incoming packet */ |
891434b1 RK |
670 | config.rx_filter = HWTSTAMP_FILTER_ALL; |
671 | tstamp_all = PTP_TCR_TSENALL; | |
672 | break; | |
673 | ||
674 | default: | |
675 | return -ERANGE; | |
676 | } | |
677 | } else { | |
678 | switch (config.rx_filter) { | |
679 | case HWTSTAMP_FILTER_NONE: | |
680 | config.rx_filter = HWTSTAMP_FILTER_NONE; | |
681 | break; | |
682 | default: | |
683 | /* PTP v1, UDP, any kind of event packet */ | |
684 | config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT; | |
685 | break; | |
686 | } | |
687 | } | |
688 | priv->hwts_rx_en = ((config.rx_filter == HWTSTAMP_FILTER_NONE) ? 0 : 1); | |
5f3da328 | 689 | priv->hwts_tx_en = config.tx_type == HWTSTAMP_TX_ON; |
891434b1 RK |
690 | |
691 | if (!priv->hwts_tx_en && !priv->hwts_rx_en) | |
ba1ffd74 | 692 | priv->hw->ptp->config_hw_tstamping(priv->ptpaddr, 0); |
891434b1 RK |
693 | else { |
694 | value = (PTP_TCR_TSENA | PTP_TCR_TSCFUPDT | PTP_TCR_TSCTRLSSR | | |
ceb69499 GC |
695 | tstamp_all | ptp_v2 | ptp_over_ethernet | |
696 | ptp_over_ipv6_udp | ptp_over_ipv4_udp | ts_event_en | | |
697 | ts_master_en | snap_type_sel); | |
ba1ffd74 | 698 | priv->hw->ptp->config_hw_tstamping(priv->ptpaddr, value); |
891434b1 RK |
699 | |
700 | /* program Sub Second Increment reg */ | |
19d857c9 | 701 | sec_inc = priv->hw->ptp->config_sub_second_increment( |
f573c0b9 | 702 | priv->ptpaddr, priv->plat->clk_ptp_rate, |
ba1ffd74 | 703 | priv->plat->has_gmac4); |
19d857c9 | 704 | temp = div_u64(1000000000ULL, sec_inc); |
891434b1 RK |
705 | |
706 | /* calculate default added value: | |
707 | * formula is : | |
708 | * addend = (2^32)/freq_div_ratio; | |
19d857c9 | 709 | * where, freq_div_ratio = 1e9ns/sec_inc |
891434b1 | 710 | */ |
19d857c9 | 711 | temp = (u64)(temp << 32); |
f573c0b9 | 712 | priv->default_addend = div_u64(temp, priv->plat->clk_ptp_rate); |
ba1ffd74 | 713 | priv->hw->ptp->config_addend(priv->ptpaddr, |
891434b1 RK |
714 | priv->default_addend); |
715 | ||
716 | /* initialize system time */ | |
0a624155 AB |
717 | ktime_get_real_ts64(&now); |
718 | ||
719 | /* lower 32 bits of tv_sec are safe until y2106 */ | |
ba1ffd74 | 720 | priv->hw->ptp->init_systime(priv->ptpaddr, (u32)now.tv_sec, |
891434b1 RK |
721 | now.tv_nsec); |
722 | } | |
723 | ||
724 | return copy_to_user(ifr->ifr_data, &config, | |
725 | sizeof(struct hwtstamp_config)) ? -EFAULT : 0; | |
726 | } | |
727 | ||
32ceabca | 728 | /** |
732fdf0e | 729 | * stmmac_init_ptp - init PTP |
32ceabca | 730 | * @priv: driver private structure |
732fdf0e | 731 | * Description: this is to verify if the HW supports the PTPv1 or PTPv2. |
32ceabca | 732 | * This is done by looking at the HW cap. register. |
732fdf0e | 733 | * This function also registers the ptp driver. |
32ceabca | 734 | */ |
92ba6888 | 735 | static int stmmac_init_ptp(struct stmmac_priv *priv) |
891434b1 | 736 | { |
92ba6888 RK |
737 | if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp)) |
738 | return -EOPNOTSUPP; | |
739 | ||
7cd01399 | 740 | priv->adv_ts = 0; |
be9b3174 GC |
741 | /* Check if adv_ts can be enabled for dwmac 4.x core */ |
742 | if (priv->plat->has_gmac4 && priv->dma_cap.atime_stamp) | |
743 | priv->adv_ts = 1; | |
744 | /* Dwmac 3.x core with extend_desc can support adv_ts */ | |
745 | else if (priv->extend_desc && priv->dma_cap.atime_stamp) | |
7cd01399 VB |
746 | priv->adv_ts = 1; |
747 | ||
be9b3174 GC |
748 | if (priv->dma_cap.time_stamp) |
749 | netdev_info(priv->dev, "IEEE 1588-2002 Timestamp supported\n"); | |
7cd01399 | 750 | |
be9b3174 GC |
751 | if (priv->adv_ts) |
752 | netdev_info(priv->dev, | |
753 | "IEEE 1588-2008 Advanced Timestamp supported\n"); | |
891434b1 RK |
754 | |
755 | priv->hw->ptp = &stmmac_ptp; | |
756 | priv->hwts_tx_en = 0; | |
757 | priv->hwts_rx_en = 0; | |
92ba6888 | 758 | |
c30a70d3 GC |
759 | stmmac_ptp_register(priv); |
760 | ||
761 | return 0; | |
92ba6888 RK |
762 | } |
763 | ||
764 | static void stmmac_release_ptp(struct stmmac_priv *priv) | |
765 | { | |
f573c0b9 | 766 | if (priv->plat->clk_ptp_ref) |
767 | clk_disable_unprepare(priv->plat->clk_ptp_ref); | |
92ba6888 | 768 | stmmac_ptp_unregister(priv); |
891434b1 RK |
769 | } |
770 | ||
29feff39 JP |
771 | /** |
772 | * stmmac_mac_flow_ctrl - Configure flow control in all queues | |
773 | * @priv: driver private structure | |
774 | * Description: It is used for configuring the flow control in all queues | |
775 | */ | |
776 | static void stmmac_mac_flow_ctrl(struct stmmac_priv *priv, u32 duplex) | |
777 | { | |
778 | u32 tx_cnt = priv->plat->tx_queues_to_use; | |
779 | ||
780 | priv->hw->mac->flow_ctrl(priv->hw, duplex, priv->flow_ctrl, | |
781 | priv->pause, tx_cnt); | |
782 | } | |
783 | ||
47dd7a54 | 784 | /** |
732fdf0e | 785 | * stmmac_adjust_link - adjusts the link parameters |
47dd7a54 | 786 | * @dev: net device structure |
732fdf0e GC |
787 | * Description: this is the helper called by the physical abstraction layer |
788 | * drivers to communicate the phy link status. According the speed and duplex | |
789 | * this driver can invoke registered glue-logic as well. | |
790 | * It also invoke the eee initialization because it could happen when switch | |
791 | * on different networks (that are eee capable). | |
47dd7a54 GC |
792 | */ |
793 | static void stmmac_adjust_link(struct net_device *dev) | |
794 | { | |
795 | struct stmmac_priv *priv = netdev_priv(dev); | |
d6d50c7e | 796 | struct phy_device *phydev = dev->phydev; |
47dd7a54 | 797 | unsigned long flags; |
99a4cca2 | 798 | bool new_state = false; |
47dd7a54 | 799 | |
662ec2b7 | 800 | if (!phydev) |
47dd7a54 GC |
801 | return; |
802 | ||
47dd7a54 | 803 | spin_lock_irqsave(&priv->lock, flags); |
d765955d | 804 | |
47dd7a54 | 805 | if (phydev->link) { |
ad01b7d4 | 806 | u32 ctrl = readl(priv->ioaddr + MAC_CTRL_REG); |
47dd7a54 GC |
807 | |
808 | /* Now we make sure that we can be in full duplex mode. | |
809 | * If not, we operate in half-duplex mode. */ | |
810 | if (phydev->duplex != priv->oldduplex) { | |
99a4cca2 | 811 | new_state = true; |
50cb16d4 | 812 | if (!phydev->duplex) |
db98a0b0 | 813 | ctrl &= ~priv->hw->link.duplex; |
47dd7a54 | 814 | else |
db98a0b0 | 815 | ctrl |= priv->hw->link.duplex; |
47dd7a54 GC |
816 | priv->oldduplex = phydev->duplex; |
817 | } | |
818 | /* Flow Control operation */ | |
819 | if (phydev->pause) | |
29feff39 | 820 | stmmac_mac_flow_ctrl(priv, phydev->duplex); |
47dd7a54 GC |
821 | |
822 | if (phydev->speed != priv->speed) { | |
99a4cca2 | 823 | new_state = true; |
ca84dfb9 | 824 | ctrl &= ~priv->hw->link.speed_mask; |
47dd7a54 | 825 | switch (phydev->speed) { |
afbe17a3 | 826 | case SPEED_1000: |
ca84dfb9 | 827 | ctrl |= priv->hw->link.speed1000; |
47dd7a54 | 828 | break; |
afbe17a3 | 829 | case SPEED_100: |
ca84dfb9 | 830 | ctrl |= priv->hw->link.speed100; |
9beae261 | 831 | break; |
afbe17a3 | 832 | case SPEED_10: |
ca84dfb9 | 833 | ctrl |= priv->hw->link.speed10; |
47dd7a54 GC |
834 | break; |
835 | default: | |
b3e51069 | 836 | netif_warn(priv, link, priv->dev, |
cba920af | 837 | "broken speed: %d\n", phydev->speed); |
688495b1 | 838 | phydev->speed = SPEED_UNKNOWN; |
47dd7a54 GC |
839 | break; |
840 | } | |
5db13556 LC |
841 | if (phydev->speed != SPEED_UNKNOWN) |
842 | stmmac_hw_fix_mac_speed(priv); | |
47dd7a54 GC |
843 | priv->speed = phydev->speed; |
844 | } | |
845 | ||
ad01b7d4 | 846 | writel(ctrl, priv->ioaddr + MAC_CTRL_REG); |
47dd7a54 GC |
847 | |
848 | if (!priv->oldlink) { | |
99a4cca2 | 849 | new_state = true; |
4d869b03 | 850 | priv->oldlink = true; |
47dd7a54 GC |
851 | } |
852 | } else if (priv->oldlink) { | |
99a4cca2 | 853 | new_state = true; |
4d869b03 | 854 | priv->oldlink = false; |
bd00632c LC |
855 | priv->speed = SPEED_UNKNOWN; |
856 | priv->oldduplex = DUPLEX_UNKNOWN; | |
47dd7a54 GC |
857 | } |
858 | ||
859 | if (new_state && netif_msg_link(priv)) | |
860 | phy_print_status(phydev); | |
861 | ||
4741cf9c GC |
862 | spin_unlock_irqrestore(&priv->lock, flags); |
863 | ||
52f95bbf GC |
864 | if (phydev->is_pseudo_fixed_link) |
865 | /* Stop PHY layer to call the hook to adjust the link in case | |
866 | * of a switch is attached to the stmmac driver. | |
867 | */ | |
868 | phydev->irq = PHY_IGNORE_INTERRUPT; | |
869 | else | |
870 | /* At this stage, init the EEE if supported. | |
871 | * Never called in case of fixed_link. | |
872 | */ | |
873 | priv->eee_enabled = stmmac_eee_init(priv); | |
47dd7a54 GC |
874 | } |
875 | ||
32ceabca | 876 | /** |
732fdf0e | 877 | * stmmac_check_pcs_mode - verify if RGMII/SGMII is supported |
32ceabca GC |
878 | * @priv: driver private structure |
879 | * Description: this is to verify if the HW supports the PCS. | |
880 | * Physical Coding Sublayer (PCS) interface that can be used when the MAC is | |
881 | * configured for the TBI, RTBI, or SGMII PHY interface. | |
882 | */ | |
e58bb43f GC |
883 | static void stmmac_check_pcs_mode(struct stmmac_priv *priv) |
884 | { | |
885 | int interface = priv->plat->interface; | |
886 | ||
887 | if (priv->dma_cap.pcs) { | |
0d909dcd BA |
888 | if ((interface == PHY_INTERFACE_MODE_RGMII) || |
889 | (interface == PHY_INTERFACE_MODE_RGMII_ID) || | |
890 | (interface == PHY_INTERFACE_MODE_RGMII_RXID) || | |
891 | (interface == PHY_INTERFACE_MODE_RGMII_TXID)) { | |
38ddc59d | 892 | netdev_dbg(priv->dev, "PCS RGMII support enabled\n"); |
3fe5cadb | 893 | priv->hw->pcs = STMMAC_PCS_RGMII; |
0d909dcd | 894 | } else if (interface == PHY_INTERFACE_MODE_SGMII) { |
38ddc59d | 895 | netdev_dbg(priv->dev, "PCS SGMII support enabled\n"); |
3fe5cadb | 896 | priv->hw->pcs = STMMAC_PCS_SGMII; |
e58bb43f GC |
897 | } |
898 | } | |
899 | } | |
900 | ||
47dd7a54 GC |
901 | /** |
902 | * stmmac_init_phy - PHY initialization | |
903 | * @dev: net device structure | |
904 | * Description: it initializes the driver's PHY state, and attaches the PHY | |
905 | * to the mac driver. | |
906 | * Return value: | |
907 | * 0 on success | |
908 | */ | |
909 | static int stmmac_init_phy(struct net_device *dev) | |
910 | { | |
911 | struct stmmac_priv *priv = netdev_priv(dev); | |
912 | struct phy_device *phydev; | |
d765955d | 913 | char phy_id_fmt[MII_BUS_ID_SIZE + 3]; |
109cdd66 | 914 | char bus_id[MII_BUS_ID_SIZE]; |
79ee1dc3 | 915 | int interface = priv->plat->interface; |
9cbadf09 | 916 | int max_speed = priv->plat->max_speed; |
4d869b03 | 917 | priv->oldlink = false; |
bd00632c LC |
918 | priv->speed = SPEED_UNKNOWN; |
919 | priv->oldduplex = DUPLEX_UNKNOWN; | |
47dd7a54 | 920 | |
5790cf3c MO |
921 | if (priv->plat->phy_node) { |
922 | phydev = of_phy_connect(dev, priv->plat->phy_node, | |
923 | &stmmac_adjust_link, 0, interface); | |
924 | } else { | |
a7657f12 GC |
925 | snprintf(bus_id, MII_BUS_ID_SIZE, "stmmac-%x", |
926 | priv->plat->bus_id); | |
5790cf3c MO |
927 | |
928 | snprintf(phy_id_fmt, MII_BUS_ID_SIZE + 3, PHY_ID_FMT, bus_id, | |
929 | priv->plat->phy_addr); | |
de9a2165 | 930 | netdev_dbg(priv->dev, "%s: trying to attach to %s\n", __func__, |
38ddc59d | 931 | phy_id_fmt); |
5790cf3c MO |
932 | |
933 | phydev = phy_connect(dev, phy_id_fmt, &stmmac_adjust_link, | |
934 | interface); | |
935 | } | |
47dd7a54 | 936 | |
dfc50fca | 937 | if (IS_ERR_OR_NULL(phydev)) { |
38ddc59d | 938 | netdev_err(priv->dev, "Could not attach to PHY\n"); |
dfc50fca AB |
939 | if (!phydev) |
940 | return -ENODEV; | |
941 | ||
47dd7a54 GC |
942 | return PTR_ERR(phydev); |
943 | } | |
944 | ||
79ee1dc3 | 945 | /* Stop Advertising 1000BASE Capability if interface is not GMII */ |
c5b9b4e4 | 946 | if ((interface == PHY_INTERFACE_MODE_MII) || |
9cbadf09 | 947 | (interface == PHY_INTERFACE_MODE_RMII) || |
a77e4acc | 948 | (max_speed < 1000 && max_speed > 0)) |
c5b9b4e4 SK |
949 | phydev->advertising &= ~(SUPPORTED_1000baseT_Half | |
950 | SUPPORTED_1000baseT_Full); | |
79ee1dc3 | 951 | |
47dd7a54 GC |
952 | /* |
953 | * Broken HW is sometimes missing the pull-up resistor on the | |
954 | * MDIO line, which results in reads to non-existent devices returning | |
955 | * 0 rather than 0xffff. Catch this here and treat 0 as a non-existent | |
956 | * device as well. | |
957 | * Note: phydev->phy_id is the result of reading the UID PHY registers. | |
958 | */ | |
27732381 | 959 | if (!priv->plat->phy_node && phydev->phy_id == 0) { |
47dd7a54 GC |
960 | phy_disconnect(phydev); |
961 | return -ENODEV; | |
962 | } | |
8e99fc5f | 963 | |
c51e424d FF |
964 | /* stmmac_adjust_link will change this to PHY_IGNORE_INTERRUPT to avoid |
965 | * subsequent PHY polling, make sure we force a link transition if | |
966 | * we have a UP/DOWN/UP transition | |
967 | */ | |
968 | if (phydev->is_pseudo_fixed_link) | |
969 | phydev->irq = PHY_POLL; | |
970 | ||
b05c76a1 | 971 | phy_attached_info(phydev); |
47dd7a54 GC |
972 | return 0; |
973 | } | |
974 | ||
71fedb01 | 975 | static void stmmac_display_rx_rings(struct stmmac_priv *priv) |
c24602ef | 976 | { |
54139cf3 | 977 | u32 rx_cnt = priv->plat->rx_queues_to_use; |
71fedb01 | 978 | void *head_rx; |
54139cf3 | 979 | u32 queue; |
aff3d9ef | 980 | |
54139cf3 JP |
981 | /* Display RX rings */ |
982 | for (queue = 0; queue < rx_cnt; queue++) { | |
983 | struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; | |
d0225e7d | 984 | |
54139cf3 JP |
985 | pr_info("\tRX Queue %u rings\n", queue); |
986 | ||
987 | if (priv->extend_desc) | |
988 | head_rx = (void *)rx_q->dma_erx; | |
989 | else | |
990 | head_rx = (void *)rx_q->dma_rx; | |
991 | ||
992 | /* Display RX ring */ | |
993 | priv->hw->desc->display_ring(head_rx, DMA_RX_SIZE, true); | |
994 | } | |
71fedb01 JP |
995 | } |
996 | ||
997 | static void stmmac_display_tx_rings(struct stmmac_priv *priv) | |
998 | { | |
ce736788 | 999 | u32 tx_cnt = priv->plat->tx_queues_to_use; |
71fedb01 | 1000 | void *head_tx; |
ce736788 | 1001 | u32 queue; |
71fedb01 | 1002 | |
ce736788 JP |
1003 | /* Display TX rings */ |
1004 | for (queue = 0; queue < tx_cnt; queue++) { | |
1005 | struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; | |
71fedb01 | 1006 | |
ce736788 JP |
1007 | pr_info("\tTX Queue %d rings\n", queue); |
1008 | ||
1009 | if (priv->extend_desc) | |
1010 | head_tx = (void *)tx_q->dma_etx; | |
1011 | else | |
1012 | head_tx = (void *)tx_q->dma_tx; | |
1013 | ||
1014 | priv->hw->desc->display_ring(head_tx, DMA_TX_SIZE, false); | |
1015 | } | |
c24602ef GC |
1016 | } |
1017 | ||
71fedb01 JP |
1018 | static void stmmac_display_rings(struct stmmac_priv *priv) |
1019 | { | |
1020 | /* Display RX ring */ | |
1021 | stmmac_display_rx_rings(priv); | |
1022 | ||
1023 | /* Display TX ring */ | |
1024 | stmmac_display_tx_rings(priv); | |
1025 | } | |
1026 | ||
286a8372 GC |
1027 | static int stmmac_set_bfsize(int mtu, int bufsize) |
1028 | { | |
1029 | int ret = bufsize; | |
1030 | ||
1031 | if (mtu >= BUF_SIZE_4KiB) | |
1032 | ret = BUF_SIZE_8KiB; | |
1033 | else if (mtu >= BUF_SIZE_2KiB) | |
1034 | ret = BUF_SIZE_4KiB; | |
d916701c | 1035 | else if (mtu > DEFAULT_BUFSIZE) |
286a8372 GC |
1036 | ret = BUF_SIZE_2KiB; |
1037 | else | |
d916701c | 1038 | ret = DEFAULT_BUFSIZE; |
286a8372 GC |
1039 | |
1040 | return ret; | |
1041 | } | |
1042 | ||
32ceabca | 1043 | /** |
71fedb01 | 1044 | * stmmac_clear_rx_descriptors - clear RX descriptors |
32ceabca | 1045 | * @priv: driver private structure |
54139cf3 | 1046 | * @queue: RX queue index |
71fedb01 | 1047 | * Description: this function is called to clear the RX descriptors |
32ceabca GC |
1048 | * in case of both basic and extended descriptors are used. |
1049 | */ | |
54139cf3 | 1050 | static void stmmac_clear_rx_descriptors(struct stmmac_priv *priv, u32 queue) |
c24602ef | 1051 | { |
54139cf3 | 1052 | struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; |
5bacd778 | 1053 | int i; |
c24602ef | 1054 | |
71fedb01 | 1055 | /* Clear the RX descriptors */ |
e3ad57c9 | 1056 | for (i = 0; i < DMA_RX_SIZE; i++) |
c24602ef | 1057 | if (priv->extend_desc) |
54139cf3 | 1058 | priv->hw->desc->init_rx_desc(&rx_q->dma_erx[i].basic, |
c24602ef | 1059 | priv->use_riwt, priv->mode, |
e3ad57c9 | 1060 | (i == DMA_RX_SIZE - 1)); |
c24602ef | 1061 | else |
54139cf3 | 1062 | priv->hw->desc->init_rx_desc(&rx_q->dma_rx[i], |
c24602ef | 1063 | priv->use_riwt, priv->mode, |
e3ad57c9 | 1064 | (i == DMA_RX_SIZE - 1)); |
71fedb01 JP |
1065 | } |
1066 | ||
1067 | /** | |
1068 | * stmmac_clear_tx_descriptors - clear tx descriptors | |
1069 | * @priv: driver private structure | |
ce736788 | 1070 | * @queue: TX queue index. |
71fedb01 JP |
1071 | * Description: this function is called to clear the TX descriptors |
1072 | * in case of both basic and extended descriptors are used. | |
1073 | */ | |
ce736788 | 1074 | static void stmmac_clear_tx_descriptors(struct stmmac_priv *priv, u32 queue) |
71fedb01 | 1075 | { |
ce736788 | 1076 | struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; |
71fedb01 JP |
1077 | int i; |
1078 | ||
1079 | /* Clear the TX descriptors */ | |
e3ad57c9 | 1080 | for (i = 0; i < DMA_TX_SIZE; i++) |
c24602ef | 1081 | if (priv->extend_desc) |
ce736788 | 1082 | priv->hw->desc->init_tx_desc(&tx_q->dma_etx[i].basic, |
c24602ef | 1083 | priv->mode, |
e3ad57c9 | 1084 | (i == DMA_TX_SIZE - 1)); |
c24602ef | 1085 | else |
ce736788 | 1086 | priv->hw->desc->init_tx_desc(&tx_q->dma_tx[i], |
c24602ef | 1087 | priv->mode, |
e3ad57c9 | 1088 | (i == DMA_TX_SIZE - 1)); |
c24602ef GC |
1089 | } |
1090 | ||
71fedb01 JP |
1091 | /** |
1092 | * stmmac_clear_descriptors - clear descriptors | |
1093 | * @priv: driver private structure | |
1094 | * Description: this function is called to clear the TX and RX descriptors | |
1095 | * in case of both basic and extended descriptors are used. | |
1096 | */ | |
1097 | static void stmmac_clear_descriptors(struct stmmac_priv *priv) | |
1098 | { | |
54139cf3 | 1099 | u32 rx_queue_cnt = priv->plat->rx_queues_to_use; |
ce736788 | 1100 | u32 tx_queue_cnt = priv->plat->tx_queues_to_use; |
54139cf3 JP |
1101 | u32 queue; |
1102 | ||
71fedb01 | 1103 | /* Clear the RX descriptors */ |
54139cf3 JP |
1104 | for (queue = 0; queue < rx_queue_cnt; queue++) |
1105 | stmmac_clear_rx_descriptors(priv, queue); | |
71fedb01 JP |
1106 | |
1107 | /* Clear the TX descriptors */ | |
ce736788 JP |
1108 | for (queue = 0; queue < tx_queue_cnt; queue++) |
1109 | stmmac_clear_tx_descriptors(priv, queue); | |
71fedb01 JP |
1110 | } |
1111 | ||
732fdf0e GC |
1112 | /** |
1113 | * stmmac_init_rx_buffers - init the RX descriptor buffer. | |
1114 | * @priv: driver private structure | |
1115 | * @p: descriptor pointer | |
1116 | * @i: descriptor index | |
54139cf3 JP |
1117 | * @flags: gfp flag |
1118 | * @queue: RX queue index | |
732fdf0e GC |
1119 | * Description: this function is called to allocate a receive buffer, perform |
1120 | * the DMA mapping and init the descriptor. | |
1121 | */ | |
c24602ef | 1122 | static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p, |
54139cf3 | 1123 | int i, gfp_t flags, u32 queue) |
c24602ef | 1124 | { |
54139cf3 | 1125 | struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; |
c24602ef GC |
1126 | struct sk_buff *skb; |
1127 | ||
4ec49a37 | 1128 | skb = __netdev_alloc_skb_ip_align(priv->dev, priv->dma_buf_sz, flags); |
56329137 | 1129 | if (!skb) { |
38ddc59d LC |
1130 | netdev_err(priv->dev, |
1131 | "%s: Rx init fails; skb is NULL\n", __func__); | |
56329137 | 1132 | return -ENOMEM; |
c24602ef | 1133 | } |
54139cf3 JP |
1134 | rx_q->rx_skbuff[i] = skb; |
1135 | rx_q->rx_skbuff_dma[i] = dma_map_single(priv->device, skb->data, | |
c24602ef GC |
1136 | priv->dma_buf_sz, |
1137 | DMA_FROM_DEVICE); | |
54139cf3 | 1138 | if (dma_mapping_error(priv->device, rx_q->rx_skbuff_dma[i])) { |
38ddc59d | 1139 | netdev_err(priv->dev, "%s: DMA mapping error\n", __func__); |
56329137 BZ |
1140 | dev_kfree_skb_any(skb); |
1141 | return -EINVAL; | |
1142 | } | |
c24602ef | 1143 | |
f748be53 | 1144 | if (priv->synopsys_id >= DWMAC_CORE_4_00) |
54139cf3 | 1145 | p->des0 = cpu_to_le32(rx_q->rx_skbuff_dma[i]); |
f748be53 | 1146 | else |
54139cf3 | 1147 | p->des2 = cpu_to_le32(rx_q->rx_skbuff_dma[i]); |
c24602ef | 1148 | |
29896a67 | 1149 | if ((priv->hw->mode->init_desc3) && |
c24602ef | 1150 | (priv->dma_buf_sz == BUF_SIZE_16KiB)) |
29896a67 | 1151 | priv->hw->mode->init_desc3(p); |
c24602ef GC |
1152 | |
1153 | return 0; | |
1154 | } | |
1155 | ||
71fedb01 JP |
1156 | /** |
1157 | * stmmac_free_rx_buffer - free RX dma buffers | |
1158 | * @priv: private structure | |
54139cf3 | 1159 | * @queue: RX queue index |
71fedb01 JP |
1160 | * @i: buffer index. |
1161 | */ | |
54139cf3 | 1162 | static void stmmac_free_rx_buffer(struct stmmac_priv *priv, u32 queue, int i) |
56329137 | 1163 | { |
54139cf3 JP |
1164 | struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; |
1165 | ||
1166 | if (rx_q->rx_skbuff[i]) { | |
1167 | dma_unmap_single(priv->device, rx_q->rx_skbuff_dma[i], | |
56329137 | 1168 | priv->dma_buf_sz, DMA_FROM_DEVICE); |
54139cf3 | 1169 | dev_kfree_skb_any(rx_q->rx_skbuff[i]); |
aff3d9ef | 1170 | } |
54139cf3 | 1171 | rx_q->rx_skbuff[i] = NULL; |
aff3d9ef JP |
1172 | } |
1173 | ||
1174 | /** | |
71fedb01 JP |
1175 | * stmmac_free_tx_buffer - free RX dma buffers |
1176 | * @priv: private structure | |
ce736788 | 1177 | * @queue: RX queue index |
71fedb01 JP |
1178 | * @i: buffer index. |
1179 | */ | |
ce736788 | 1180 | static void stmmac_free_tx_buffer(struct stmmac_priv *priv, u32 queue, int i) |
71fedb01 | 1181 | { |
ce736788 JP |
1182 | struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; |
1183 | ||
1184 | if (tx_q->tx_skbuff_dma[i].buf) { | |
1185 | if (tx_q->tx_skbuff_dma[i].map_as_page) | |
71fedb01 | 1186 | dma_unmap_page(priv->device, |
ce736788 JP |
1187 | tx_q->tx_skbuff_dma[i].buf, |
1188 | tx_q->tx_skbuff_dma[i].len, | |
71fedb01 JP |
1189 | DMA_TO_DEVICE); |
1190 | else | |
1191 | dma_unmap_single(priv->device, | |
ce736788 JP |
1192 | tx_q->tx_skbuff_dma[i].buf, |
1193 | tx_q->tx_skbuff_dma[i].len, | |
71fedb01 JP |
1194 | DMA_TO_DEVICE); |
1195 | } | |
1196 | ||
ce736788 JP |
1197 | if (tx_q->tx_skbuff[i]) { |
1198 | dev_kfree_skb_any(tx_q->tx_skbuff[i]); | |
1199 | tx_q->tx_skbuff[i] = NULL; | |
1200 | tx_q->tx_skbuff_dma[i].buf = 0; | |
1201 | tx_q->tx_skbuff_dma[i].map_as_page = false; | |
71fedb01 JP |
1202 | } |
1203 | } | |
1204 | ||
1205 | /** | |
1206 | * init_dma_rx_desc_rings - init the RX descriptor rings | |
47dd7a54 | 1207 | * @dev: net device structure |
732fdf0e | 1208 | * @flags: gfp flag. |
71fedb01 | 1209 | * Description: this function initializes the DMA RX descriptors |
5bacd778 | 1210 | * and allocates the socket buffers. It supports the chained and ring |
286a8372 | 1211 | * modes. |
47dd7a54 | 1212 | */ |
71fedb01 | 1213 | static int init_dma_rx_desc_rings(struct net_device *dev, gfp_t flags) |
47dd7a54 | 1214 | { |
47dd7a54 | 1215 | struct stmmac_priv *priv = netdev_priv(dev); |
54139cf3 | 1216 | u32 rx_count = priv->plat->rx_queues_to_use; |
4a7d666a | 1217 | unsigned int bfsize = 0; |
56329137 | 1218 | int ret = -ENOMEM; |
1d3028f4 | 1219 | int queue; |
54139cf3 | 1220 | int i; |
47dd7a54 | 1221 | |
29896a67 GC |
1222 | if (priv->hw->mode->set_16kib_bfsize) |
1223 | bfsize = priv->hw->mode->set_16kib_bfsize(dev->mtu); | |
286a8372 | 1224 | |
4a7d666a | 1225 | if (bfsize < BUF_SIZE_16KiB) |
286a8372 | 1226 | bfsize = stmmac_set_bfsize(dev->mtu, priv->dma_buf_sz); |
47dd7a54 | 1227 | |
2618abb7 VB |
1228 | priv->dma_buf_sz = bfsize; |
1229 | ||
54139cf3 | 1230 | /* RX INITIALIZATION */ |
b3e51069 LC |
1231 | netif_dbg(priv, probe, priv->dev, |
1232 | "SKB addresses:\nskb\t\tskb data\tdma data\n"); | |
47dd7a54 | 1233 | |
54139cf3 JP |
1234 | for (queue = 0; queue < rx_count; queue++) { |
1235 | struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; | |
c24602ef | 1236 | |
54139cf3 JP |
1237 | netif_dbg(priv, probe, priv->dev, |
1238 | "(%s) dma_rx_phy=0x%08x\n", __func__, | |
1239 | (u32)rx_q->dma_rx_phy); | |
f748be53 | 1240 | |
54139cf3 JP |
1241 | for (i = 0; i < DMA_RX_SIZE; i++) { |
1242 | struct dma_desc *p; | |
aff3d9ef | 1243 | |
54139cf3 JP |
1244 | if (priv->extend_desc) |
1245 | p = &((rx_q->dma_erx + i)->basic); | |
1246 | else | |
1247 | p = rx_q->dma_rx + i; | |
1248 | ||
1249 | ret = stmmac_init_rx_buffers(priv, p, i, flags, | |
1250 | queue); | |
1251 | if (ret) | |
1252 | goto err_init_rx_buffers; | |
1253 | ||
1254 | netif_dbg(priv, probe, priv->dev, "[%p]\t[%p]\t[%x]\n", | |
1255 | rx_q->rx_skbuff[i], rx_q->rx_skbuff[i]->data, | |
1256 | (unsigned int)rx_q->rx_skbuff_dma[i]); | |
1257 | } | |
1258 | ||
1259 | rx_q->cur_rx = 0; | |
1260 | rx_q->dirty_rx = (unsigned int)(i - DMA_RX_SIZE); | |
1261 | ||
1262 | stmmac_clear_rx_descriptors(priv, queue); | |
1263 | ||
1264 | /* Setup the chained descriptor addresses */ | |
1265 | if (priv->mode == STMMAC_CHAIN_MODE) { | |
1266 | if (priv->extend_desc) | |
1267 | priv->hw->mode->init(rx_q->dma_erx, | |
1268 | rx_q->dma_rx_phy, | |
1269 | DMA_RX_SIZE, 1); | |
1270 | else | |
1271 | priv->hw->mode->init(rx_q->dma_rx, | |
1272 | rx_q->dma_rx_phy, | |
1273 | DMA_RX_SIZE, 0); | |
1274 | } | |
71fedb01 JP |
1275 | } |
1276 | ||
54139cf3 JP |
1277 | buf_sz = bfsize; |
1278 | ||
71fedb01 | 1279 | return 0; |
54139cf3 | 1280 | |
71fedb01 | 1281 | err_init_rx_buffers: |
54139cf3 JP |
1282 | while (queue >= 0) { |
1283 | while (--i >= 0) | |
1284 | stmmac_free_rx_buffer(priv, queue, i); | |
1285 | ||
1286 | if (queue == 0) | |
1287 | break; | |
1288 | ||
1289 | i = DMA_RX_SIZE; | |
1290 | queue--; | |
1291 | } | |
1292 | ||
71fedb01 JP |
1293 | return ret; |
1294 | } | |
1295 | ||
1296 | /** | |
1297 | * init_dma_tx_desc_rings - init the TX descriptor rings | |
1298 | * @dev: net device structure. | |
1299 | * Description: this function initializes the DMA TX descriptors | |
1300 | * and allocates the socket buffers. It supports the chained and ring | |
1301 | * modes. | |
1302 | */ | |
1303 | static int init_dma_tx_desc_rings(struct net_device *dev) | |
1304 | { | |
1305 | struct stmmac_priv *priv = netdev_priv(dev); | |
ce736788 JP |
1306 | u32 tx_queue_cnt = priv->plat->tx_queues_to_use; |
1307 | u32 queue; | |
71fedb01 JP |
1308 | int i; |
1309 | ||
ce736788 JP |
1310 | for (queue = 0; queue < tx_queue_cnt; queue++) { |
1311 | struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; | |
71fedb01 | 1312 | |
ce736788 JP |
1313 | netif_dbg(priv, probe, priv->dev, |
1314 | "(%s) dma_tx_phy=0x%08x\n", __func__, | |
1315 | (u32)tx_q->dma_tx_phy); | |
f748be53 | 1316 | |
ce736788 JP |
1317 | /* Setup the chained descriptor addresses */ |
1318 | if (priv->mode == STMMAC_CHAIN_MODE) { | |
1319 | if (priv->extend_desc) | |
1320 | priv->hw->mode->init(tx_q->dma_etx, | |
1321 | tx_q->dma_tx_phy, | |
1322 | DMA_TX_SIZE, 1); | |
1323 | else | |
1324 | priv->hw->mode->init(tx_q->dma_tx, | |
1325 | tx_q->dma_tx_phy, | |
1326 | DMA_TX_SIZE, 0); | |
1327 | } | |
aff3d9ef | 1328 | |
ce736788 JP |
1329 | for (i = 0; i < DMA_TX_SIZE; i++) { |
1330 | struct dma_desc *p; | |
ce736788 JP |
1331 | if (priv->extend_desc) |
1332 | p = &((tx_q->dma_etx + i)->basic); | |
1333 | else | |
1334 | p = tx_q->dma_tx + i; | |
1335 | ||
1336 | if (priv->synopsys_id >= DWMAC_CORE_4_00) { | |
1337 | p->des0 = 0; | |
1338 | p->des1 = 0; | |
1339 | p->des2 = 0; | |
1340 | p->des3 = 0; | |
1341 | } else { | |
1342 | p->des2 = 0; | |
1343 | } | |
1344 | ||
1345 | tx_q->tx_skbuff_dma[i].buf = 0; | |
1346 | tx_q->tx_skbuff_dma[i].map_as_page = false; | |
1347 | tx_q->tx_skbuff_dma[i].len = 0; | |
1348 | tx_q->tx_skbuff_dma[i].last_segment = false; | |
1349 | tx_q->tx_skbuff[i] = NULL; | |
5bacd778 | 1350 | } |
aff3d9ef | 1351 | |
ce736788 JP |
1352 | tx_q->dirty_tx = 0; |
1353 | tx_q->cur_tx = 0; | |
286a8372 | 1354 | |
c22a3f48 JP |
1355 | netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, queue)); |
1356 | } | |
aff3d9ef | 1357 | |
71fedb01 JP |
1358 | return 0; |
1359 | } | |
1360 | ||
1361 | /** | |
1362 | * init_dma_desc_rings - init the RX/TX descriptor rings | |
1363 | * @dev: net device structure | |
1364 | * @flags: gfp flag. | |
1365 | * Description: this function initializes the DMA RX/TX descriptors | |
1366 | * and allocates the socket buffers. It supports the chained and ring | |
1367 | * modes. | |
1368 | */ | |
1369 | static int init_dma_desc_rings(struct net_device *dev, gfp_t flags) | |
1370 | { | |
1371 | struct stmmac_priv *priv = netdev_priv(dev); | |
1372 | int ret; | |
1373 | ||
1374 | ret = init_dma_rx_desc_rings(dev, flags); | |
1375 | if (ret) | |
1376 | return ret; | |
1377 | ||
1378 | ret = init_dma_tx_desc_rings(dev); | |
1379 | ||
5bacd778 | 1380 | stmmac_clear_descriptors(priv); |
47dd7a54 | 1381 | |
c24602ef GC |
1382 | if (netif_msg_hw(priv)) |
1383 | stmmac_display_rings(priv); | |
56329137 | 1384 | |
56329137 | 1385 | return ret; |
47dd7a54 GC |
1386 | } |
1387 | ||
71fedb01 JP |
1388 | /** |
1389 | * dma_free_rx_skbufs - free RX dma buffers | |
1390 | * @priv: private structure | |
54139cf3 | 1391 | * @queue: RX queue index |
71fedb01 | 1392 | */ |
54139cf3 | 1393 | static void dma_free_rx_skbufs(struct stmmac_priv *priv, u32 queue) |
47dd7a54 GC |
1394 | { |
1395 | int i; | |
1396 | ||
e3ad57c9 | 1397 | for (i = 0; i < DMA_RX_SIZE; i++) |
54139cf3 | 1398 | stmmac_free_rx_buffer(priv, queue, i); |
47dd7a54 GC |
1399 | } |
1400 | ||
71fedb01 JP |
1401 | /** |
1402 | * dma_free_tx_skbufs - free TX dma buffers | |
1403 | * @priv: private structure | |
ce736788 | 1404 | * @queue: TX queue index |
71fedb01 | 1405 | */ |
ce736788 | 1406 | static void dma_free_tx_skbufs(struct stmmac_priv *priv, u32 queue) |
47dd7a54 GC |
1407 | { |
1408 | int i; | |
1409 | ||
71fedb01 | 1410 | for (i = 0; i < DMA_TX_SIZE; i++) |
ce736788 | 1411 | stmmac_free_tx_buffer(priv, queue, i); |
47dd7a54 GC |
1412 | } |
1413 | ||
54139cf3 JP |
1414 | /** |
1415 | * free_dma_rx_desc_resources - free RX dma desc resources | |
1416 | * @priv: private structure | |
1417 | */ | |
1418 | static void free_dma_rx_desc_resources(struct stmmac_priv *priv) | |
1419 | { | |
1420 | u32 rx_count = priv->plat->rx_queues_to_use; | |
1421 | u32 queue; | |
1422 | ||
1423 | /* Free RX queue resources */ | |
1424 | for (queue = 0; queue < rx_count; queue++) { | |
1425 | struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; | |
1426 | ||
1427 | /* Release the DMA RX socket buffers */ | |
1428 | dma_free_rx_skbufs(priv, queue); | |
1429 | ||
1430 | /* Free DMA regions of consistent memory previously allocated */ | |
1431 | if (!priv->extend_desc) | |
1432 | dma_free_coherent(priv->device, | |
1433 | DMA_RX_SIZE * sizeof(struct dma_desc), | |
1434 | rx_q->dma_rx, rx_q->dma_rx_phy); | |
1435 | else | |
1436 | dma_free_coherent(priv->device, DMA_RX_SIZE * | |
1437 | sizeof(struct dma_extended_desc), | |
1438 | rx_q->dma_erx, rx_q->dma_rx_phy); | |
1439 | ||
1440 | kfree(rx_q->rx_skbuff_dma); | |
1441 | kfree(rx_q->rx_skbuff); | |
1442 | } | |
1443 | } | |
1444 | ||
ce736788 JP |
1445 | /** |
1446 | * free_dma_tx_desc_resources - free TX dma desc resources | |
1447 | * @priv: private structure | |
1448 | */ | |
1449 | static void free_dma_tx_desc_resources(struct stmmac_priv *priv) | |
1450 | { | |
1451 | u32 tx_count = priv->plat->tx_queues_to_use; | |
62242260 | 1452 | u32 queue; |
ce736788 JP |
1453 | |
1454 | /* Free TX queue resources */ | |
1455 | for (queue = 0; queue < tx_count; queue++) { | |
1456 | struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; | |
1457 | ||
1458 | /* Release the DMA TX socket buffers */ | |
1459 | dma_free_tx_skbufs(priv, queue); | |
1460 | ||
1461 | /* Free DMA regions of consistent memory previously allocated */ | |
1462 | if (!priv->extend_desc) | |
1463 | dma_free_coherent(priv->device, | |
1464 | DMA_TX_SIZE * sizeof(struct dma_desc), | |
1465 | tx_q->dma_tx, tx_q->dma_tx_phy); | |
1466 | else | |
1467 | dma_free_coherent(priv->device, DMA_TX_SIZE * | |
1468 | sizeof(struct dma_extended_desc), | |
1469 | tx_q->dma_etx, tx_q->dma_tx_phy); | |
1470 | ||
1471 | kfree(tx_q->tx_skbuff_dma); | |
1472 | kfree(tx_q->tx_skbuff); | |
1473 | } | |
1474 | } | |
1475 | ||
732fdf0e | 1476 | /** |
71fedb01 | 1477 | * alloc_dma_rx_desc_resources - alloc RX resources. |
732fdf0e GC |
1478 | * @priv: private structure |
1479 | * Description: according to which descriptor can be used (extend or basic) | |
5bacd778 LC |
1480 | * this function allocates the resources for TX and RX paths. In case of |
1481 | * reception, for example, it pre-allocated the RX socket buffer in order to | |
1482 | * allow zero-copy mechanism. | |
732fdf0e | 1483 | */ |
71fedb01 | 1484 | static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv) |
09f8d696 | 1485 | { |
54139cf3 | 1486 | u32 rx_count = priv->plat->rx_queues_to_use; |
09f8d696 | 1487 | int ret = -ENOMEM; |
54139cf3 | 1488 | u32 queue; |
09f8d696 | 1489 | |
54139cf3 JP |
1490 | /* RX queues buffers and DMA */ |
1491 | for (queue = 0; queue < rx_count; queue++) { | |
1492 | struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; | |
09f8d696 | 1493 | |
54139cf3 JP |
1494 | rx_q->queue_index = queue; |
1495 | rx_q->priv_data = priv; | |
5bacd778 | 1496 | |
54139cf3 JP |
1497 | rx_q->rx_skbuff_dma = kmalloc_array(DMA_RX_SIZE, |
1498 | sizeof(dma_addr_t), | |
71fedb01 | 1499 | GFP_KERNEL); |
54139cf3 | 1500 | if (!rx_q->rx_skbuff_dma) |
63c3aa6b | 1501 | goto err_dma; |
71fedb01 | 1502 | |
54139cf3 JP |
1503 | rx_q->rx_skbuff = kmalloc_array(DMA_RX_SIZE, |
1504 | sizeof(struct sk_buff *), | |
1505 | GFP_KERNEL); | |
1506 | if (!rx_q->rx_skbuff) | |
71fedb01 | 1507 | goto err_dma; |
54139cf3 JP |
1508 | |
1509 | if (priv->extend_desc) { | |
1510 | rx_q->dma_erx = dma_zalloc_coherent(priv->device, | |
1511 | DMA_RX_SIZE * | |
1512 | sizeof(struct | |
1513 | dma_extended_desc), | |
1514 | &rx_q->dma_rx_phy, | |
1515 | GFP_KERNEL); | |
1516 | if (!rx_q->dma_erx) | |
1517 | goto err_dma; | |
1518 | ||
1519 | } else { | |
1520 | rx_q->dma_rx = dma_zalloc_coherent(priv->device, | |
1521 | DMA_RX_SIZE * | |
1522 | sizeof(struct | |
1523 | dma_desc), | |
1524 | &rx_q->dma_rx_phy, | |
1525 | GFP_KERNEL); | |
1526 | if (!rx_q->dma_rx) | |
1527 | goto err_dma; | |
1528 | } | |
71fedb01 JP |
1529 | } |
1530 | ||
1531 | return 0; | |
1532 | ||
1533 | err_dma: | |
54139cf3 JP |
1534 | free_dma_rx_desc_resources(priv); |
1535 | ||
71fedb01 JP |
1536 | return ret; |
1537 | } | |
1538 | ||
1539 | /** | |
1540 | * alloc_dma_tx_desc_resources - alloc TX resources. | |
1541 | * @priv: private structure | |
1542 | * Description: according to which descriptor can be used (extend or basic) | |
1543 | * this function allocates the resources for TX and RX paths. In case of | |
1544 | * reception, for example, it pre-allocated the RX socket buffer in order to | |
1545 | * allow zero-copy mechanism. | |
1546 | */ | |
1547 | static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv) | |
1548 | { | |
ce736788 | 1549 | u32 tx_count = priv->plat->tx_queues_to_use; |
71fedb01 | 1550 | int ret = -ENOMEM; |
ce736788 | 1551 | u32 queue; |
71fedb01 | 1552 | |
ce736788 JP |
1553 | /* TX queues buffers and DMA */ |
1554 | for (queue = 0; queue < tx_count; queue++) { | |
1555 | struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; | |
5bacd778 | 1556 | |
ce736788 JP |
1557 | tx_q->queue_index = queue; |
1558 | tx_q->priv_data = priv; | |
5bacd778 | 1559 | |
ce736788 JP |
1560 | tx_q->tx_skbuff_dma = kmalloc_array(DMA_TX_SIZE, |
1561 | sizeof(*tx_q->tx_skbuff_dma), | |
5bacd778 | 1562 | GFP_KERNEL); |
ce736788 | 1563 | if (!tx_q->tx_skbuff_dma) |
62242260 | 1564 | goto err_dma; |
ce736788 JP |
1565 | |
1566 | tx_q->tx_skbuff = kmalloc_array(DMA_TX_SIZE, | |
1567 | sizeof(struct sk_buff *), | |
1568 | GFP_KERNEL); | |
1569 | if (!tx_q->tx_skbuff) | |
62242260 | 1570 | goto err_dma; |
ce736788 JP |
1571 | |
1572 | if (priv->extend_desc) { | |
1573 | tx_q->dma_etx = dma_zalloc_coherent(priv->device, | |
1574 | DMA_TX_SIZE * | |
1575 | sizeof(struct | |
1576 | dma_extended_desc), | |
1577 | &tx_q->dma_tx_phy, | |
1578 | GFP_KERNEL); | |
1579 | if (!tx_q->dma_etx) | |
62242260 | 1580 | goto err_dma; |
ce736788 JP |
1581 | } else { |
1582 | tx_q->dma_tx = dma_zalloc_coherent(priv->device, | |
1583 | DMA_TX_SIZE * | |
1584 | sizeof(struct | |
1585 | dma_desc), | |
1586 | &tx_q->dma_tx_phy, | |
1587 | GFP_KERNEL); | |
1588 | if (!tx_q->dma_tx) | |
62242260 | 1589 | goto err_dma; |
ce736788 | 1590 | } |
09f8d696 SK |
1591 | } |
1592 | ||
1593 | return 0; | |
1594 | ||
62242260 | 1595 | err_dma: |
ce736788 JP |
1596 | free_dma_tx_desc_resources(priv); |
1597 | ||
09f8d696 SK |
1598 | return ret; |
1599 | } | |
1600 | ||
71fedb01 JP |
1601 | /** |
1602 | * alloc_dma_desc_resources - alloc TX/RX resources. | |
1603 | * @priv: private structure | |
1604 | * Description: according to which descriptor can be used (extend or basic) | |
1605 | * this function allocates the resources for TX and RX paths. In case of | |
1606 | * reception, for example, it pre-allocated the RX socket buffer in order to | |
1607 | * allow zero-copy mechanism. | |
1608 | */ | |
1609 | static int alloc_dma_desc_resources(struct stmmac_priv *priv) | |
1610 | { | |
54139cf3 | 1611 | /* RX Allocation */ |
71fedb01 JP |
1612 | int ret = alloc_dma_rx_desc_resources(priv); |
1613 | ||
1614 | if (ret) | |
1615 | return ret; | |
1616 | ||
1617 | ret = alloc_dma_tx_desc_resources(priv); | |
1618 | ||
1619 | return ret; | |
1620 | } | |
1621 | ||
71fedb01 JP |
1622 | /** |
1623 | * free_dma_desc_resources - free dma desc resources | |
1624 | * @priv: private structure | |
1625 | */ | |
1626 | static void free_dma_desc_resources(struct stmmac_priv *priv) | |
1627 | { | |
1628 | /* Release the DMA RX socket buffers */ | |
1629 | free_dma_rx_desc_resources(priv); | |
1630 | ||
1631 | /* Release the DMA TX socket buffers */ | |
1632 | free_dma_tx_desc_resources(priv); | |
1633 | } | |
1634 | ||
9eb12474 | 1635 | /** |
1636 | * stmmac_mac_enable_rx_queues - Enable MAC rx queues | |
1637 | * @priv: driver private structure | |
1638 | * Description: It is used for enabling the rx queues in the MAC | |
1639 | */ | |
1640 | static void stmmac_mac_enable_rx_queues(struct stmmac_priv *priv) | |
1641 | { | |
4f6046f5 JP |
1642 | u32 rx_queues_count = priv->plat->rx_queues_to_use; |
1643 | int queue; | |
1644 | u8 mode; | |
9eb12474 | 1645 | |
4f6046f5 JP |
1646 | for (queue = 0; queue < rx_queues_count; queue++) { |
1647 | mode = priv->plat->rx_queues_cfg[queue].mode_to_use; | |
1648 | priv->hw->mac->rx_queue_enable(priv->hw, mode, queue); | |
1649 | } | |
9eb12474 | 1650 | } |
1651 | ||
ae4f0d46 JP |
1652 | /** |
1653 | * stmmac_start_rx_dma - start RX DMA channel | |
1654 | * @priv: driver private structure | |
1655 | * @chan: RX channel index | |
1656 | * Description: | |
1657 | * This starts a RX DMA channel | |
1658 | */ | |
1659 | static void stmmac_start_rx_dma(struct stmmac_priv *priv, u32 chan) | |
1660 | { | |
1661 | netdev_dbg(priv->dev, "DMA RX processes started in channel %d\n", chan); | |
1662 | priv->hw->dma->start_rx(priv->ioaddr, chan); | |
1663 | } | |
1664 | ||
1665 | /** | |
1666 | * stmmac_start_tx_dma - start TX DMA channel | |
1667 | * @priv: driver private structure | |
1668 | * @chan: TX channel index | |
1669 | * Description: | |
1670 | * This starts a TX DMA channel | |
1671 | */ | |
1672 | static void stmmac_start_tx_dma(struct stmmac_priv *priv, u32 chan) | |
1673 | { | |
1674 | netdev_dbg(priv->dev, "DMA TX processes started in channel %d\n", chan); | |
1675 | priv->hw->dma->start_tx(priv->ioaddr, chan); | |
1676 | } | |
1677 | ||
1678 | /** | |
1679 | * stmmac_stop_rx_dma - stop RX DMA channel | |
1680 | * @priv: driver private structure | |
1681 | * @chan: RX channel index | |
1682 | * Description: | |
1683 | * This stops a RX DMA channel | |
1684 | */ | |
1685 | static void stmmac_stop_rx_dma(struct stmmac_priv *priv, u32 chan) | |
1686 | { | |
1687 | netdev_dbg(priv->dev, "DMA RX processes stopped in channel %d\n", chan); | |
1688 | priv->hw->dma->stop_rx(priv->ioaddr, chan); | |
1689 | } | |
1690 | ||
1691 | /** | |
1692 | * stmmac_stop_tx_dma - stop TX DMA channel | |
1693 | * @priv: driver private structure | |
1694 | * @chan: TX channel index | |
1695 | * Description: | |
1696 | * This stops a TX DMA channel | |
1697 | */ | |
1698 | static void stmmac_stop_tx_dma(struct stmmac_priv *priv, u32 chan) | |
1699 | { | |
1700 | netdev_dbg(priv->dev, "DMA TX processes stopped in channel %d\n", chan); | |
1701 | priv->hw->dma->stop_tx(priv->ioaddr, chan); | |
1702 | } | |
1703 | ||
1704 | /** | |
1705 | * stmmac_start_all_dma - start all RX and TX DMA channels | |
1706 | * @priv: driver private structure | |
1707 | * Description: | |
1708 | * This starts all the RX and TX DMA channels | |
1709 | */ | |
1710 | static void stmmac_start_all_dma(struct stmmac_priv *priv) | |
1711 | { | |
1712 | u32 rx_channels_count = priv->plat->rx_queues_to_use; | |
1713 | u32 tx_channels_count = priv->plat->tx_queues_to_use; | |
1714 | u32 chan = 0; | |
1715 | ||
1716 | for (chan = 0; chan < rx_channels_count; chan++) | |
1717 | stmmac_start_rx_dma(priv, chan); | |
1718 | ||
1719 | for (chan = 0; chan < tx_channels_count; chan++) | |
1720 | stmmac_start_tx_dma(priv, chan); | |
1721 | } | |
1722 | ||
1723 | /** | |
1724 | * stmmac_stop_all_dma - stop all RX and TX DMA channels | |
1725 | * @priv: driver private structure | |
1726 | * Description: | |
1727 | * This stops the RX and TX DMA channels | |
1728 | */ | |
1729 | static void stmmac_stop_all_dma(struct stmmac_priv *priv) | |
1730 | { | |
1731 | u32 rx_channels_count = priv->plat->rx_queues_to_use; | |
1732 | u32 tx_channels_count = priv->plat->tx_queues_to_use; | |
1733 | u32 chan = 0; | |
1734 | ||
1735 | for (chan = 0; chan < rx_channels_count; chan++) | |
1736 | stmmac_stop_rx_dma(priv, chan); | |
1737 | ||
1738 | for (chan = 0; chan < tx_channels_count; chan++) | |
1739 | stmmac_stop_tx_dma(priv, chan); | |
1740 | } | |
1741 | ||
47dd7a54 GC |
1742 | /** |
1743 | * stmmac_dma_operation_mode - HW DMA operation mode | |
32ceabca | 1744 | * @priv: driver private structure |
732fdf0e GC |
1745 | * Description: it is used for configuring the DMA operation mode register in |
1746 | * order to program the tx/rx DMA thresholds or Store-And-Forward mode. | |
47dd7a54 GC |
1747 | */ |
1748 | static void stmmac_dma_operation_mode(struct stmmac_priv *priv) | |
1749 | { | |
6deee222 JP |
1750 | u32 rx_channels_count = priv->plat->rx_queues_to_use; |
1751 | u32 tx_channels_count = priv->plat->tx_queues_to_use; | |
f88203a2 | 1752 | int rxfifosz = priv->plat->rx_fifo_size; |
52a76235 | 1753 | int txfifosz = priv->plat->tx_fifo_size; |
6deee222 JP |
1754 | u32 txmode = 0; |
1755 | u32 rxmode = 0; | |
1756 | u32 chan = 0; | |
f88203a2 | 1757 | |
11fbf811 TR |
1758 | if (rxfifosz == 0) |
1759 | rxfifosz = priv->dma_cap.rx_fifo_size; | |
52a76235 JA |
1760 | if (txfifosz == 0) |
1761 | txfifosz = priv->dma_cap.tx_fifo_size; | |
1762 | ||
1763 | /* Adjust for real per queue fifo size */ | |
1764 | rxfifosz /= rx_channels_count; | |
1765 | txfifosz /= tx_channels_count; | |
11fbf811 | 1766 | |
6deee222 JP |
1767 | if (priv->plat->force_thresh_dma_mode) { |
1768 | txmode = tc; | |
1769 | rxmode = tc; | |
1770 | } else if (priv->plat->force_sf_dma_mode || priv->plat->tx_coe) { | |
61b8013a SK |
1771 | /* |
1772 | * In case of GMAC, SF mode can be enabled | |
1773 | * to perform the TX COE in HW. This depends on: | |
ebbb293f GC |
1774 | * 1) TX COE if actually supported |
1775 | * 2) There is no bugged Jumbo frame support | |
1776 | * that needs to not insert csum in the TDES. | |
1777 | */ | |
6deee222 JP |
1778 | txmode = SF_DMA_MODE; |
1779 | rxmode = SF_DMA_MODE; | |
b2dec116 | 1780 | priv->xstats.threshold = SF_DMA_MODE; |
6deee222 JP |
1781 | } else { |
1782 | txmode = tc; | |
1783 | rxmode = SF_DMA_MODE; | |
1784 | } | |
1785 | ||
1786 | /* configure all channels */ | |
1787 | if (priv->synopsys_id >= DWMAC_CORE_4_00) { | |
1788 | for (chan = 0; chan < rx_channels_count; chan++) | |
1789 | priv->hw->dma->dma_rx_mode(priv->ioaddr, rxmode, chan, | |
1790 | rxfifosz); | |
1791 | ||
1792 | for (chan = 0; chan < tx_channels_count; chan++) | |
52a76235 JA |
1793 | priv->hw->dma->dma_tx_mode(priv->ioaddr, txmode, chan, |
1794 | txfifosz); | |
6deee222 JP |
1795 | } else { |
1796 | priv->hw->dma->dma_mode(priv->ioaddr, txmode, rxmode, | |
f88203a2 | 1797 | rxfifosz); |
6deee222 | 1798 | } |
47dd7a54 GC |
1799 | } |
1800 | ||
47dd7a54 | 1801 | /** |
732fdf0e | 1802 | * stmmac_tx_clean - to manage the transmission completion |
32ceabca | 1803 | * @priv: driver private structure |
ce736788 | 1804 | * @queue: TX queue index |
732fdf0e | 1805 | * Description: it reclaims the transmit resources after transmission completes. |
47dd7a54 | 1806 | */ |
ce736788 | 1807 | static void stmmac_tx_clean(struct stmmac_priv *priv, u32 queue) |
47dd7a54 | 1808 | { |
ce736788 | 1809 | struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; |
38979574 | 1810 | unsigned int bytes_compl = 0, pkts_compl = 0; |
ce736788 | 1811 | unsigned int entry = tx_q->dirty_tx; |
47dd7a54 | 1812 | |
739c8e14 | 1813 | netif_tx_lock(priv->dev); |
a9097a96 | 1814 | |
9125cdd1 GC |
1815 | priv->xstats.tx_clean++; |
1816 | ||
ce736788 JP |
1817 | while (entry != tx_q->cur_tx) { |
1818 | struct sk_buff *skb = tx_q->tx_skbuff[entry]; | |
c24602ef | 1819 | struct dma_desc *p; |
c363b658 | 1820 | int status; |
c24602ef GC |
1821 | |
1822 | if (priv->extend_desc) | |
ce736788 | 1823 | p = (struct dma_desc *)(tx_q->dma_etx + entry); |
c24602ef | 1824 | else |
ce736788 | 1825 | p = tx_q->dma_tx + entry; |
47dd7a54 | 1826 | |
c363b658 | 1827 | status = priv->hw->desc->tx_status(&priv->dev->stats, |
ceb69499 GC |
1828 | &priv->xstats, p, |
1829 | priv->ioaddr); | |
c363b658 FG |
1830 | /* Check if the descriptor is owned by the DMA */ |
1831 | if (unlikely(status & tx_dma_own)) | |
1832 | break; | |
1833 | ||
1834 | /* Just consider the last segment and ...*/ | |
1835 | if (likely(!(status & tx_not_ls))) { | |
1836 | /* ... verify the status error condition */ | |
1837 | if (unlikely(status & tx_err)) { | |
1838 | priv->dev->stats.tx_errors++; | |
1839 | } else { | |
47dd7a54 GC |
1840 | priv->dev->stats.tx_packets++; |
1841 | priv->xstats.tx_pkt_n++; | |
c363b658 | 1842 | } |
ba1ffd74 | 1843 | stmmac_get_tx_hwtstamp(priv, p, skb); |
47dd7a54 | 1844 | } |
47dd7a54 | 1845 | |
ce736788 JP |
1846 | if (likely(tx_q->tx_skbuff_dma[entry].buf)) { |
1847 | if (tx_q->tx_skbuff_dma[entry].map_as_page) | |
362b37be | 1848 | dma_unmap_page(priv->device, |
ce736788 JP |
1849 | tx_q->tx_skbuff_dma[entry].buf, |
1850 | tx_q->tx_skbuff_dma[entry].len, | |
362b37be GC |
1851 | DMA_TO_DEVICE); |
1852 | else | |
1853 | dma_unmap_single(priv->device, | |
ce736788 JP |
1854 | tx_q->tx_skbuff_dma[entry].buf, |
1855 | tx_q->tx_skbuff_dma[entry].len, | |
362b37be | 1856 | DMA_TO_DEVICE); |
ce736788 JP |
1857 | tx_q->tx_skbuff_dma[entry].buf = 0; |
1858 | tx_q->tx_skbuff_dma[entry].len = 0; | |
1859 | tx_q->tx_skbuff_dma[entry].map_as_page = false; | |
cf32deec | 1860 | } |
f748be53 AT |
1861 | |
1862 | if (priv->hw->mode->clean_desc3) | |
ce736788 | 1863 | priv->hw->mode->clean_desc3(tx_q, p); |
f748be53 | 1864 | |
ce736788 JP |
1865 | tx_q->tx_skbuff_dma[entry].last_segment = false; |
1866 | tx_q->tx_skbuff_dma[entry].is_jumbo = false; | |
47dd7a54 GC |
1867 | |
1868 | if (likely(skb != NULL)) { | |
38979574 BG |
1869 | pkts_compl++; |
1870 | bytes_compl += skb->len; | |
7c565c33 | 1871 | dev_consume_skb_any(skb); |
ce736788 | 1872 | tx_q->tx_skbuff[entry] = NULL; |
47dd7a54 GC |
1873 | } |
1874 | ||
4a7d666a | 1875 | priv->hw->desc->release_tx_desc(p, priv->mode); |
47dd7a54 | 1876 | |
e3ad57c9 | 1877 | entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE); |
47dd7a54 | 1878 | } |
ce736788 | 1879 | tx_q->dirty_tx = entry; |
38979574 | 1880 | |
c22a3f48 JP |
1881 | netdev_tx_completed_queue(netdev_get_tx_queue(priv->dev, queue), |
1882 | pkts_compl, bytes_compl); | |
1883 | ||
1884 | if (unlikely(netif_tx_queue_stopped(netdev_get_tx_queue(priv->dev, | |
1885 | queue))) && | |
1886 | stmmac_tx_avail(priv, queue) > STMMAC_TX_THRESH) { | |
38979574 | 1887 | |
739c8e14 LS |
1888 | netif_dbg(priv, tx_done, priv->dev, |
1889 | "%s: restart transmit\n", __func__); | |
c22a3f48 | 1890 | netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, queue)); |
47dd7a54 | 1891 | } |
d765955d GC |
1892 | |
1893 | if ((priv->eee_enabled) && (!priv->tx_path_in_lpi_mode)) { | |
1894 | stmmac_enable_eee_mode(priv); | |
f5351ef7 | 1895 | mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(eee_timer)); |
d765955d | 1896 | } |
739c8e14 | 1897 | netif_tx_unlock(priv->dev); |
47dd7a54 GC |
1898 | } |
1899 | ||
4f513ecd | 1900 | static inline void stmmac_enable_dma_irq(struct stmmac_priv *priv, u32 chan) |
47dd7a54 | 1901 | { |
4f513ecd | 1902 | priv->hw->dma->enable_dma_irq(priv->ioaddr, chan); |
47dd7a54 GC |
1903 | } |
1904 | ||
4f513ecd | 1905 | static inline void stmmac_disable_dma_irq(struct stmmac_priv *priv, u32 chan) |
47dd7a54 | 1906 | { |
4f513ecd | 1907 | priv->hw->dma->disable_dma_irq(priv->ioaddr, chan); |
47dd7a54 GC |
1908 | } |
1909 | ||
47dd7a54 | 1910 | /** |
732fdf0e | 1911 | * stmmac_tx_err - to manage the tx error |
32ceabca | 1912 | * @priv: driver private structure |
5bacd778 | 1913 | * @chan: channel index |
47dd7a54 | 1914 | * Description: it cleans the descriptors and restarts the transmission |
732fdf0e | 1915 | * in case of transmission errors. |
47dd7a54 | 1916 | */ |
5bacd778 | 1917 | static void stmmac_tx_err(struct stmmac_priv *priv, u32 chan) |
47dd7a54 | 1918 | { |
ce736788 | 1919 | struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan]; |
c24602ef | 1920 | int i; |
ce736788 | 1921 | |
c22a3f48 | 1922 | netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, chan)); |
47dd7a54 | 1923 | |
ae4f0d46 | 1924 | stmmac_stop_tx_dma(priv, chan); |
ce736788 | 1925 | dma_free_tx_skbufs(priv, chan); |
e3ad57c9 | 1926 | for (i = 0; i < DMA_TX_SIZE; i++) |
c24602ef | 1927 | if (priv->extend_desc) |
ce736788 | 1928 | priv->hw->desc->init_tx_desc(&tx_q->dma_etx[i].basic, |
c24602ef | 1929 | priv->mode, |
e3ad57c9 | 1930 | (i == DMA_TX_SIZE - 1)); |
c24602ef | 1931 | else |
ce736788 | 1932 | priv->hw->desc->init_tx_desc(&tx_q->dma_tx[i], |
c24602ef | 1933 | priv->mode, |
e3ad57c9 | 1934 | (i == DMA_TX_SIZE - 1)); |
ce736788 JP |
1935 | tx_q->dirty_tx = 0; |
1936 | tx_q->cur_tx = 0; | |
c22a3f48 | 1937 | netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, chan)); |
ae4f0d46 | 1938 | stmmac_start_tx_dma(priv, chan); |
47dd7a54 GC |
1939 | |
1940 | priv->dev->stats.tx_errors++; | |
c22a3f48 | 1941 | netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, chan)); |
47dd7a54 GC |
1942 | } |
1943 | ||
6deee222 JP |
1944 | /** |
1945 | * stmmac_set_dma_operation_mode - Set DMA operation mode by channel | |
1946 | * @priv: driver private structure | |
1947 | * @txmode: TX operating mode | |
1948 | * @rxmode: RX operating mode | |
1949 | * @chan: channel index | |
1950 | * Description: it is used for configuring of the DMA operation mode in | |
1951 | * runtime in order to program the tx/rx DMA thresholds or Store-And-Forward | |
1952 | * mode. | |
1953 | */ | |
1954 | static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode, | |
1955 | u32 rxmode, u32 chan) | |
1956 | { | |
52a76235 JA |
1957 | u32 rx_channels_count = priv->plat->rx_queues_to_use; |
1958 | u32 tx_channels_count = priv->plat->tx_queues_to_use; | |
6deee222 | 1959 | int rxfifosz = priv->plat->rx_fifo_size; |
52a76235 | 1960 | int txfifosz = priv->plat->tx_fifo_size; |
6deee222 JP |
1961 | |
1962 | if (rxfifosz == 0) | |
1963 | rxfifosz = priv->dma_cap.rx_fifo_size; | |
52a76235 JA |
1964 | if (txfifosz == 0) |
1965 | txfifosz = priv->dma_cap.tx_fifo_size; | |
1966 | ||
1967 | /* Adjust for real per queue fifo size */ | |
1968 | rxfifosz /= rx_channels_count; | |
1969 | txfifosz /= tx_channels_count; | |
6deee222 JP |
1970 | |
1971 | if (priv->synopsys_id >= DWMAC_CORE_4_00) { | |
1972 | priv->hw->dma->dma_rx_mode(priv->ioaddr, rxmode, chan, | |
1973 | rxfifosz); | |
52a76235 JA |
1974 | priv->hw->dma->dma_tx_mode(priv->ioaddr, txmode, chan, |
1975 | txfifosz); | |
6deee222 JP |
1976 | } else { |
1977 | priv->hw->dma->dma_mode(priv->ioaddr, txmode, rxmode, | |
1978 | rxfifosz); | |
1979 | } | |
1980 | } | |
1981 | ||
32ceabca | 1982 | /** |
732fdf0e | 1983 | * stmmac_dma_interrupt - DMA ISR |
32ceabca GC |
1984 | * @priv: driver private structure |
1985 | * Description: this is the DMA ISR. It is called by the main ISR. | |
732fdf0e GC |
1986 | * It calls the dwmac dma routine and schedule poll method in case of some |
1987 | * work can be done. | |
32ceabca | 1988 | */ |
aec7ff27 GC |
1989 | static void stmmac_dma_interrupt(struct stmmac_priv *priv) |
1990 | { | |
d62a107a | 1991 | u32 tx_channel_count = priv->plat->tx_queues_to_use; |
aec7ff27 | 1992 | int status; |
d62a107a JP |
1993 | u32 chan; |
1994 | ||
1995 | for (chan = 0; chan < tx_channel_count; chan++) { | |
c22a3f48 JP |
1996 | struct stmmac_rx_queue *rx_q = &priv->rx_queue[chan]; |
1997 | ||
d62a107a JP |
1998 | status = priv->hw->dma->dma_interrupt(priv->ioaddr, |
1999 | &priv->xstats, chan); | |
2000 | if (likely((status & handle_rx)) || (status & handle_tx)) { | |
c22a3f48 | 2001 | if (likely(napi_schedule_prep(&rx_q->napi))) { |
d62a107a | 2002 | stmmac_disable_dma_irq(priv, chan); |
c22a3f48 | 2003 | __napi_schedule(&rx_q->napi); |
d62a107a | 2004 | } |
9125cdd1 | 2005 | } |
6deee222 | 2006 | |
d62a107a JP |
2007 | if (unlikely(status & tx_hard_error_bump_tc)) { |
2008 | /* Try to bump up the dma threshold on this failure */ | |
2009 | if (unlikely(priv->xstats.threshold != SF_DMA_MODE) && | |
2010 | (tc <= 256)) { | |
2011 | tc += 64; | |
2012 | if (priv->plat->force_thresh_dma_mode) | |
2013 | stmmac_set_dma_operation_mode(priv, | |
2014 | tc, | |
2015 | tc, | |
2016 | chan); | |
2017 | else | |
2018 | stmmac_set_dma_operation_mode(priv, | |
2019 | tc, | |
2020 | SF_DMA_MODE, | |
2021 | chan); | |
2022 | priv->xstats.threshold = tc; | |
2023 | } | |
2024 | } else if (unlikely(status == tx_hard_error)) { | |
2025 | stmmac_tx_err(priv, chan); | |
47dd7a54 | 2026 | } |
d62a107a | 2027 | } |
47dd7a54 GC |
2028 | } |
2029 | ||
32ceabca GC |
2030 | /** |
2031 | * stmmac_mmc_setup: setup the Mac Management Counters (MMC) | |
2032 | * @priv: driver private structure | |
2033 | * Description: this masks the MMC irq, in fact, the counters are managed in SW. | |
2034 | */ | |
1c901a46 GC |
2035 | static void stmmac_mmc_setup(struct stmmac_priv *priv) |
2036 | { | |
2037 | unsigned int mode = MMC_CNTRL_RESET_ON_READ | MMC_CNTRL_COUNTER_RESET | | |
36ff7c1e | 2038 | MMC_CNTRL_PRESET | MMC_CNTRL_FULL_HALF_PRESET; |
1c901a46 | 2039 | |
ba1ffd74 GC |
2040 | if (priv->synopsys_id >= DWMAC_CORE_4_00) { |
2041 | priv->ptpaddr = priv->ioaddr + PTP_GMAC4_OFFSET; | |
f748be53 | 2042 | priv->mmcaddr = priv->ioaddr + MMC_GMAC4_OFFSET; |
ba1ffd74 GC |
2043 | } else { |
2044 | priv->ptpaddr = priv->ioaddr + PTP_GMAC3_X_OFFSET; | |
f748be53 | 2045 | priv->mmcaddr = priv->ioaddr + MMC_GMAC3_X_OFFSET; |
ba1ffd74 | 2046 | } |
36ff7c1e AT |
2047 | |
2048 | dwmac_mmc_intr_all_mask(priv->mmcaddr); | |
4f795b25 GC |
2049 | |
2050 | if (priv->dma_cap.rmon) { | |
36ff7c1e | 2051 | dwmac_mmc_ctrl(priv->mmcaddr, mode); |
4f795b25 GC |
2052 | memset(&priv->mmc, 0, sizeof(struct stmmac_counters)); |
2053 | } else | |
38ddc59d | 2054 | netdev_info(priv->dev, "No MAC Management Counters available\n"); |
1c901a46 GC |
2055 | } |
2056 | ||
19e30c14 | 2057 | /** |
732fdf0e | 2058 | * stmmac_selec_desc_mode - to select among: normal/alternate/extend descriptors |
32ceabca GC |
2059 | * @priv: driver private structure |
2060 | * Description: select the Enhanced/Alternate or Normal descriptors. | |
732fdf0e GC |
2061 | * In case of Enhanced/Alternate, it checks if the extended descriptors are |
2062 | * supported by the HW capability register. | |
ff3dd78c | 2063 | */ |
19e30c14 GC |
2064 | static void stmmac_selec_desc_mode(struct stmmac_priv *priv) |
2065 | { | |
2066 | if (priv->plat->enh_desc) { | |
38ddc59d | 2067 | dev_info(priv->device, "Enhanced/Alternate descriptors\n"); |
c24602ef GC |
2068 | |
2069 | /* GMAC older than 3.50 has no extended descriptors */ | |
2070 | if (priv->synopsys_id >= DWMAC_CORE_3_50) { | |
38ddc59d | 2071 | dev_info(priv->device, "Enabled extended descriptors\n"); |
c24602ef GC |
2072 | priv->extend_desc = 1; |
2073 | } else | |
38ddc59d | 2074 | dev_warn(priv->device, "Extended descriptors not supported\n"); |
c24602ef | 2075 | |
19e30c14 GC |
2076 | priv->hw->desc = &enh_desc_ops; |
2077 | } else { | |
38ddc59d | 2078 | dev_info(priv->device, "Normal descriptors\n"); |
19e30c14 GC |
2079 | priv->hw->desc = &ndesc_ops; |
2080 | } | |
2081 | } | |
2082 | ||
2083 | /** | |
732fdf0e | 2084 | * stmmac_get_hw_features - get MAC capabilities from the HW cap. register. |
32ceabca | 2085 | * @priv: driver private structure |
19e30c14 GC |
2086 | * Description: |
2087 | * new GMAC chip generations have a new register to indicate the | |
2088 | * presence of the optional feature/functions. | |
2089 | * This can be also used to override the value passed through the | |
2090 | * platform and necessary for old MAC10/100 and GMAC chips. | |
e7434821 GC |
2091 | */ |
2092 | static int stmmac_get_hw_features(struct stmmac_priv *priv) | |
2093 | { | |
f10a6a35 | 2094 | u32 ret = 0; |
3c20f72f | 2095 | |
5e6efe88 | 2096 | if (priv->hw->dma->get_hw_feature) { |
f10a6a35 AT |
2097 | priv->hw->dma->get_hw_feature(priv->ioaddr, |
2098 | &priv->dma_cap); | |
2099 | ret = 1; | |
19e30c14 | 2100 | } |
e7434821 | 2101 | |
f10a6a35 | 2102 | return ret; |
e7434821 GC |
2103 | } |
2104 | ||
32ceabca | 2105 | /** |
732fdf0e | 2106 | * stmmac_check_ether_addr - check if the MAC addr is valid |
32ceabca GC |
2107 | * @priv: driver private structure |
2108 | * Description: | |
2109 | * it is to verify if the MAC address is valid, in case of failures it | |
2110 | * generates a random MAC address | |
2111 | */ | |
bfab27a1 GC |
2112 | static void stmmac_check_ether_addr(struct stmmac_priv *priv) |
2113 | { | |
bfab27a1 | 2114 | if (!is_valid_ether_addr(priv->dev->dev_addr)) { |
7ed24bbe | 2115 | priv->hw->mac->get_umac_addr(priv->hw, |
bfab27a1 | 2116 | priv->dev->dev_addr, 0); |
ceb69499 | 2117 | if (!is_valid_ether_addr(priv->dev->dev_addr)) |
f2cedb63 | 2118 | eth_hw_addr_random(priv->dev); |
38ddc59d LC |
2119 | netdev_info(priv->dev, "device MAC address %pM\n", |
2120 | priv->dev->dev_addr); | |
bfab27a1 | 2121 | } |
bfab27a1 GC |
2122 | } |
2123 | ||
32ceabca | 2124 | /** |
732fdf0e | 2125 | * stmmac_init_dma_engine - DMA init. |
32ceabca GC |
2126 | * @priv: driver private structure |
2127 | * Description: | |
2128 | * It inits the DMA invoking the specific MAC/GMAC callback. | |
2129 | * Some DMA parameters can be passed from the platform; | |
2130 | * in case of these are not passed a default is kept for the MAC or GMAC. | |
2131 | */ | |
0f1f88a8 GC |
2132 | static int stmmac_init_dma_engine(struct stmmac_priv *priv) |
2133 | { | |
47f2a9ce JP |
2134 | u32 rx_channels_count = priv->plat->rx_queues_to_use; |
2135 | u32 tx_channels_count = priv->plat->tx_queues_to_use; | |
54139cf3 | 2136 | struct stmmac_rx_queue *rx_q; |
ce736788 | 2137 | struct stmmac_tx_queue *tx_q; |
47f2a9ce JP |
2138 | u32 dummy_dma_rx_phy = 0; |
2139 | u32 dummy_dma_tx_phy = 0; | |
2140 | u32 chan = 0; | |
c24602ef | 2141 | int atds = 0; |
495db273 | 2142 | int ret = 0; |
0f1f88a8 | 2143 | |
a332e2fa NC |
2144 | if (!priv->plat->dma_cfg || !priv->plat->dma_cfg->pbl) { |
2145 | dev_err(priv->device, "Invalid DMA configuration\n"); | |
89ab75bf | 2146 | return -EINVAL; |
0f1f88a8 GC |
2147 | } |
2148 | ||
c24602ef GC |
2149 | if (priv->extend_desc && (priv->mode == STMMAC_RING_MODE)) |
2150 | atds = 1; | |
2151 | ||
495db273 GC |
2152 | ret = priv->hw->dma->reset(priv->ioaddr); |
2153 | if (ret) { | |
2154 | dev_err(priv->device, "Failed to reset the dma\n"); | |
2155 | return ret; | |
2156 | } | |
2157 | ||
f748be53 | 2158 | if (priv->synopsys_id >= DWMAC_CORE_4_00) { |
47f2a9ce JP |
2159 | /* DMA Configuration */ |
2160 | priv->hw->dma->init(priv->ioaddr, priv->plat->dma_cfg, | |
2161 | dummy_dma_tx_phy, dummy_dma_rx_phy, atds); | |
2162 | ||
2163 | /* DMA RX Channel Configuration */ | |
2164 | for (chan = 0; chan < rx_channels_count; chan++) { | |
54139cf3 JP |
2165 | rx_q = &priv->rx_queue[chan]; |
2166 | ||
47f2a9ce JP |
2167 | priv->hw->dma->init_rx_chan(priv->ioaddr, |
2168 | priv->plat->dma_cfg, | |
54139cf3 | 2169 | rx_q->dma_rx_phy, chan); |
47f2a9ce | 2170 | |
54139cf3 | 2171 | rx_q->rx_tail_addr = rx_q->dma_rx_phy + |
47f2a9ce JP |
2172 | (DMA_RX_SIZE * sizeof(struct dma_desc)); |
2173 | priv->hw->dma->set_rx_tail_ptr(priv->ioaddr, | |
54139cf3 | 2174 | rx_q->rx_tail_addr, |
47f2a9ce JP |
2175 | chan); |
2176 | } | |
2177 | ||
2178 | /* DMA TX Channel Configuration */ | |
2179 | for (chan = 0; chan < tx_channels_count; chan++) { | |
ce736788 JP |
2180 | tx_q = &priv->tx_queue[chan]; |
2181 | ||
47f2a9ce | 2182 | priv->hw->dma->init_chan(priv->ioaddr, |
ce736788 JP |
2183 | priv->plat->dma_cfg, |
2184 | chan); | |
47f2a9ce JP |
2185 | |
2186 | priv->hw->dma->init_tx_chan(priv->ioaddr, | |
2187 | priv->plat->dma_cfg, | |
ce736788 | 2188 | tx_q->dma_tx_phy, chan); |
47f2a9ce | 2189 | |
ce736788 | 2190 | tx_q->tx_tail_addr = tx_q->dma_tx_phy + |
47f2a9ce JP |
2191 | (DMA_TX_SIZE * sizeof(struct dma_desc)); |
2192 | priv->hw->dma->set_tx_tail_ptr(priv->ioaddr, | |
ce736788 | 2193 | tx_q->tx_tail_addr, |
47f2a9ce JP |
2194 | chan); |
2195 | } | |
2196 | } else { | |
54139cf3 | 2197 | rx_q = &priv->rx_queue[chan]; |
ce736788 | 2198 | tx_q = &priv->tx_queue[chan]; |
47f2a9ce | 2199 | priv->hw->dma->init(priv->ioaddr, priv->plat->dma_cfg, |
ce736788 | 2200 | tx_q->dma_tx_phy, rx_q->dma_rx_phy, atds); |
f748be53 AT |
2201 | } |
2202 | ||
2203 | if (priv->plat->axi && priv->hw->dma->axi) | |
afea0365 GC |
2204 | priv->hw->dma->axi(priv->ioaddr, priv->plat->axi); |
2205 | ||
495db273 | 2206 | return ret; |
0f1f88a8 GC |
2207 | } |
2208 | ||
9125cdd1 | 2209 | /** |
732fdf0e | 2210 | * stmmac_tx_timer - mitigation sw timer for tx. |
9125cdd1 GC |
2211 | * @data: data pointer |
2212 | * Description: | |
2213 | * This is the timer handler to directly invoke the stmmac_tx_clean. | |
2214 | */ | |
2215 | static void stmmac_tx_timer(unsigned long data) | |
2216 | { | |
2217 | struct stmmac_priv *priv = (struct stmmac_priv *)data; | |
ce736788 JP |
2218 | u32 tx_queues_count = priv->plat->tx_queues_to_use; |
2219 | u32 queue; | |
9125cdd1 | 2220 | |
ce736788 JP |
2221 | /* let's scan all the tx queues */ |
2222 | for (queue = 0; queue < tx_queues_count; queue++) | |
2223 | stmmac_tx_clean(priv, queue); | |
9125cdd1 GC |
2224 | } |
2225 | ||
2226 | /** | |
732fdf0e | 2227 | * stmmac_init_tx_coalesce - init tx mitigation options. |
32ceabca | 2228 | * @priv: driver private structure |
9125cdd1 GC |
2229 | * Description: |
2230 | * This inits the transmit coalesce parameters: i.e. timer rate, | |
2231 | * timer handler and default threshold used for enabling the | |
2232 | * interrupt on completion bit. | |
2233 | */ | |
2234 | static void stmmac_init_tx_coalesce(struct stmmac_priv *priv) | |
2235 | { | |
2236 | priv->tx_coal_frames = STMMAC_TX_FRAMES; | |
2237 | priv->tx_coal_timer = STMMAC_COAL_TX_TIMER; | |
997decfb | 2238 | setup_timer(&priv->txtimer, stmmac_tx_timer, (unsigned long)priv); |
9125cdd1 | 2239 | priv->txtimer.expires = STMMAC_COAL_TIMER(priv->tx_coal_timer); |
9125cdd1 GC |
2240 | add_timer(&priv->txtimer); |
2241 | } | |
2242 | ||
4854ab99 JP |
2243 | static void stmmac_set_rings_length(struct stmmac_priv *priv) |
2244 | { | |
2245 | u32 rx_channels_count = priv->plat->rx_queues_to_use; | |
2246 | u32 tx_channels_count = priv->plat->tx_queues_to_use; | |
2247 | u32 chan; | |
2248 | ||
2249 | /* set TX ring length */ | |
2250 | if (priv->hw->dma->set_tx_ring_len) { | |
2251 | for (chan = 0; chan < tx_channels_count; chan++) | |
2252 | priv->hw->dma->set_tx_ring_len(priv->ioaddr, | |
2253 | (DMA_TX_SIZE - 1), chan); | |
2254 | } | |
2255 | ||
2256 | /* set RX ring length */ | |
2257 | if (priv->hw->dma->set_rx_ring_len) { | |
2258 | for (chan = 0; chan < rx_channels_count; chan++) | |
2259 | priv->hw->dma->set_rx_ring_len(priv->ioaddr, | |
2260 | (DMA_RX_SIZE - 1), chan); | |
2261 | } | |
2262 | } | |
2263 | ||
6a3a7193 JP |
2264 | /** |
2265 | * stmmac_set_tx_queue_weight - Set TX queue weight | |
2266 | * @priv: driver private structure | |
2267 | * Description: It is used for setting TX queues weight | |
2268 | */ | |
2269 | static void stmmac_set_tx_queue_weight(struct stmmac_priv *priv) | |
2270 | { | |
2271 | u32 tx_queues_count = priv->plat->tx_queues_to_use; | |
2272 | u32 weight; | |
2273 | u32 queue; | |
2274 | ||
2275 | for (queue = 0; queue < tx_queues_count; queue++) { | |
2276 | weight = priv->plat->tx_queues_cfg[queue].weight; | |
2277 | priv->hw->mac->set_mtl_tx_queue_weight(priv->hw, weight, queue); | |
2278 | } | |
2279 | } | |
2280 | ||
19d91873 JP |
2281 | /** |
2282 | * stmmac_configure_cbs - Configure CBS in TX queue | |
2283 | * @priv: driver private structure | |
2284 | * Description: It is used for configuring CBS in AVB TX queues | |
2285 | */ | |
2286 | static void stmmac_configure_cbs(struct stmmac_priv *priv) | |
2287 | { | |
2288 | u32 tx_queues_count = priv->plat->tx_queues_to_use; | |
2289 | u32 mode_to_use; | |
2290 | u32 queue; | |
2291 | ||
44781fef JP |
2292 | /* queue 0 is reserved for legacy traffic */ |
2293 | for (queue = 1; queue < tx_queues_count; queue++) { | |
19d91873 JP |
2294 | mode_to_use = priv->plat->tx_queues_cfg[queue].mode_to_use; |
2295 | if (mode_to_use == MTL_QUEUE_DCB) | |
2296 | continue; | |
2297 | ||
2298 | priv->hw->mac->config_cbs(priv->hw, | |
2299 | priv->plat->tx_queues_cfg[queue].send_slope, | |
2300 | priv->plat->tx_queues_cfg[queue].idle_slope, | |
2301 | priv->plat->tx_queues_cfg[queue].high_credit, | |
2302 | priv->plat->tx_queues_cfg[queue].low_credit, | |
2303 | queue); | |
2304 | } | |
2305 | } | |
2306 | ||
d43042f4 JP |
2307 | /** |
2308 | * stmmac_rx_queue_dma_chan_map - Map RX queue to RX dma channel | |
2309 | * @priv: driver private structure | |
2310 | * Description: It is used for mapping RX queues to RX dma channels | |
2311 | */ | |
2312 | static void stmmac_rx_queue_dma_chan_map(struct stmmac_priv *priv) | |
2313 | { | |
2314 | u32 rx_queues_count = priv->plat->rx_queues_to_use; | |
2315 | u32 queue; | |
2316 | u32 chan; | |
2317 | ||
2318 | for (queue = 0; queue < rx_queues_count; queue++) { | |
2319 | chan = priv->plat->rx_queues_cfg[queue].chan; | |
2320 | priv->hw->mac->map_mtl_to_dma(priv->hw, queue, chan); | |
2321 | } | |
2322 | } | |
2323 | ||
a8f5102a JP |
2324 | /** |
2325 | * stmmac_mac_config_rx_queues_prio - Configure RX Queue priority | |
2326 | * @priv: driver private structure | |
2327 | * Description: It is used for configuring the RX Queue Priority | |
2328 | */ | |
2329 | static void stmmac_mac_config_rx_queues_prio(struct stmmac_priv *priv) | |
2330 | { | |
2331 | u32 rx_queues_count = priv->plat->rx_queues_to_use; | |
2332 | u32 queue; | |
2333 | u32 prio; | |
2334 | ||
2335 | for (queue = 0; queue < rx_queues_count; queue++) { | |
2336 | if (!priv->plat->rx_queues_cfg[queue].use_prio) | |
2337 | continue; | |
2338 | ||
2339 | prio = priv->plat->rx_queues_cfg[queue].prio; | |
2340 | priv->hw->mac->rx_queue_prio(priv->hw, prio, queue); | |
2341 | } | |
2342 | } | |
2343 | ||
2344 | /** | |
2345 | * stmmac_mac_config_tx_queues_prio - Configure TX Queue priority | |
2346 | * @priv: driver private structure | |
2347 | * Description: It is used for configuring the TX Queue Priority | |
2348 | */ | |
2349 | static void stmmac_mac_config_tx_queues_prio(struct stmmac_priv *priv) | |
2350 | { | |
2351 | u32 tx_queues_count = priv->plat->tx_queues_to_use; | |
2352 | u32 queue; | |
2353 | u32 prio; | |
2354 | ||
2355 | for (queue = 0; queue < tx_queues_count; queue++) { | |
2356 | if (!priv->plat->tx_queues_cfg[queue].use_prio) | |
2357 | continue; | |
2358 | ||
2359 | prio = priv->plat->tx_queues_cfg[queue].prio; | |
2360 | priv->hw->mac->tx_queue_prio(priv->hw, prio, queue); | |
2361 | } | |
2362 | } | |
2363 | ||
abe80fdc JP |
2364 | /** |
2365 | * stmmac_mac_config_rx_queues_routing - Configure RX Queue Routing | |
2366 | * @priv: driver private structure | |
2367 | * Description: It is used for configuring the RX queue routing | |
2368 | */ | |
2369 | static void stmmac_mac_config_rx_queues_routing(struct stmmac_priv *priv) | |
2370 | { | |
2371 | u32 rx_queues_count = priv->plat->rx_queues_to_use; | |
2372 | u32 queue; | |
2373 | u8 packet; | |
2374 | ||
2375 | for (queue = 0; queue < rx_queues_count; queue++) { | |
2376 | /* no specific packet type routing specified for the queue */ | |
2377 | if (priv->plat->rx_queues_cfg[queue].pkt_route == 0x0) | |
2378 | continue; | |
2379 | ||
2380 | packet = priv->plat->rx_queues_cfg[queue].pkt_route; | |
2381 | priv->hw->mac->rx_queue_prio(priv->hw, packet, queue); | |
2382 | } | |
2383 | } | |
2384 | ||
d0a9c9f9 JP |
2385 | /** |
2386 | * stmmac_mtl_configuration - Configure MTL | |
2387 | * @priv: driver private structure | |
2388 | * Description: It is used for configurring MTL | |
2389 | */ | |
2390 | static void stmmac_mtl_configuration(struct stmmac_priv *priv) | |
2391 | { | |
2392 | u32 rx_queues_count = priv->plat->rx_queues_to_use; | |
2393 | u32 tx_queues_count = priv->plat->tx_queues_to_use; | |
2394 | ||
6a3a7193 JP |
2395 | if (tx_queues_count > 1 && priv->hw->mac->set_mtl_tx_queue_weight) |
2396 | stmmac_set_tx_queue_weight(priv); | |
2397 | ||
d0a9c9f9 JP |
2398 | /* Configure MTL RX algorithms */ |
2399 | if (rx_queues_count > 1 && priv->hw->mac->prog_mtl_rx_algorithms) | |
2400 | priv->hw->mac->prog_mtl_rx_algorithms(priv->hw, | |
2401 | priv->plat->rx_sched_algorithm); | |
2402 | ||
2403 | /* Configure MTL TX algorithms */ | |
2404 | if (tx_queues_count > 1 && priv->hw->mac->prog_mtl_tx_algorithms) | |
2405 | priv->hw->mac->prog_mtl_tx_algorithms(priv->hw, | |
2406 | priv->plat->tx_sched_algorithm); | |
2407 | ||
19d91873 JP |
2408 | /* Configure CBS in AVB TX queues */ |
2409 | if (tx_queues_count > 1 && priv->hw->mac->config_cbs) | |
2410 | stmmac_configure_cbs(priv); | |
2411 | ||
d43042f4 | 2412 | /* Map RX MTL to DMA channels */ |
03cf65a9 | 2413 | if (priv->hw->mac->map_mtl_to_dma) |
d43042f4 JP |
2414 | stmmac_rx_queue_dma_chan_map(priv); |
2415 | ||
d0a9c9f9 | 2416 | /* Enable MAC RX Queues */ |
f3976874 | 2417 | if (priv->hw->mac->rx_queue_enable) |
d0a9c9f9 | 2418 | stmmac_mac_enable_rx_queues(priv); |
6deee222 | 2419 | |
a8f5102a JP |
2420 | /* Set RX priorities */ |
2421 | if (rx_queues_count > 1 && priv->hw->mac->rx_queue_prio) | |
2422 | stmmac_mac_config_rx_queues_prio(priv); | |
2423 | ||
2424 | /* Set TX priorities */ | |
2425 | if (tx_queues_count > 1 && priv->hw->mac->tx_queue_prio) | |
2426 | stmmac_mac_config_tx_queues_prio(priv); | |
abe80fdc JP |
2427 | |
2428 | /* Set RX routing */ | |
2429 | if (rx_queues_count > 1 && priv->hw->mac->rx_queue_routing) | |
2430 | stmmac_mac_config_rx_queues_routing(priv); | |
d0a9c9f9 JP |
2431 | } |
2432 | ||
523f11b5 | 2433 | /** |
732fdf0e | 2434 | * stmmac_hw_setup - setup mac in a usable state. |
523f11b5 SK |
2435 | * @dev : pointer to the device structure. |
2436 | * Description: | |
732fdf0e GC |
2437 | * this is the main function to setup the HW in a usable state because the |
2438 | * dma engine is reset, the core registers are configured (e.g. AXI, | |
2439 | * Checksum features, timers). The DMA is ready to start receiving and | |
2440 | * transmitting. | |
523f11b5 SK |
2441 | * Return value: |
2442 | * 0 on success and an appropriate (-)ve integer as defined in errno.h | |
2443 | * file on failure. | |
2444 | */ | |
fe131929 | 2445 | static int stmmac_hw_setup(struct net_device *dev, bool init_ptp) |
523f11b5 SK |
2446 | { |
2447 | struct stmmac_priv *priv = netdev_priv(dev); | |
3c55d4d0 | 2448 | u32 rx_cnt = priv->plat->rx_queues_to_use; |
146617b8 JP |
2449 | u32 tx_cnt = priv->plat->tx_queues_to_use; |
2450 | u32 chan; | |
523f11b5 SK |
2451 | int ret; |
2452 | ||
523f11b5 SK |
2453 | /* DMA initialization and SW reset */ |
2454 | ret = stmmac_init_dma_engine(priv); | |
2455 | if (ret < 0) { | |
38ddc59d LC |
2456 | netdev_err(priv->dev, "%s: DMA engine initialization failed\n", |
2457 | __func__); | |
523f11b5 SK |
2458 | return ret; |
2459 | } | |
2460 | ||
2461 | /* Copy the MAC addr into the HW */ | |
7ed24bbe | 2462 | priv->hw->mac->set_umac_addr(priv->hw, dev->dev_addr, 0); |
523f11b5 | 2463 | |
02e57b9d GC |
2464 | /* PS and related bits will be programmed according to the speed */ |
2465 | if (priv->hw->pcs) { | |
2466 | int speed = priv->plat->mac_port_sel_speed; | |
2467 | ||
2468 | if ((speed == SPEED_10) || (speed == SPEED_100) || | |
2469 | (speed == SPEED_1000)) { | |
2470 | priv->hw->ps = speed; | |
2471 | } else { | |
2472 | dev_warn(priv->device, "invalid port speed\n"); | |
2473 | priv->hw->ps = 0; | |
2474 | } | |
2475 | } | |
2476 | ||
523f11b5 | 2477 | /* Initialize the MAC Core */ |
7ed24bbe | 2478 | priv->hw->mac->core_init(priv->hw, dev->mtu); |
523f11b5 | 2479 | |
d0a9c9f9 JP |
2480 | /* Initialize MTL*/ |
2481 | if (priv->synopsys_id >= DWMAC_CORE_4_00) | |
2482 | stmmac_mtl_configuration(priv); | |
9eb12474 | 2483 | |
978aded4 GC |
2484 | ret = priv->hw->mac->rx_ipc(priv->hw); |
2485 | if (!ret) { | |
38ddc59d | 2486 | netdev_warn(priv->dev, "RX IPC Checksum Offload disabled\n"); |
978aded4 | 2487 | priv->plat->rx_coe = STMMAC_RX_COE_NONE; |
d2afb5bd | 2488 | priv->hw->rx_csum = 0; |
978aded4 GC |
2489 | } |
2490 | ||
523f11b5 | 2491 | /* Enable the MAC Rx/Tx */ |
270c7759 | 2492 | priv->hw->mac->set_mac(priv->ioaddr, true); |
523f11b5 | 2493 | |
b4f0a661 JP |
2494 | /* Set the HW DMA mode and the COE */ |
2495 | stmmac_dma_operation_mode(priv); | |
2496 | ||
523f11b5 SK |
2497 | stmmac_mmc_setup(priv); |
2498 | ||
fe131929 | 2499 | if (init_ptp) { |
0ad2be79 TR |
2500 | ret = clk_prepare_enable(priv->plat->clk_ptp_ref); |
2501 | if (ret < 0) | |
2502 | netdev_warn(priv->dev, "failed to enable PTP reference clock: %d\n", ret); | |
2503 | ||
fe131929 | 2504 | ret = stmmac_init_ptp(priv); |
722eef28 HK |
2505 | if (ret == -EOPNOTSUPP) |
2506 | netdev_warn(priv->dev, "PTP not supported by HW\n"); | |
2507 | else if (ret) | |
2508 | netdev_warn(priv->dev, "PTP init failed\n"); | |
fe131929 | 2509 | } |
523f11b5 | 2510 | |
50fb4f74 | 2511 | #ifdef CONFIG_DEBUG_FS |
523f11b5 SK |
2512 | ret = stmmac_init_fs(dev); |
2513 | if (ret < 0) | |
38ddc59d LC |
2514 | netdev_warn(priv->dev, "%s: failed debugFS registration\n", |
2515 | __func__); | |
523f11b5 SK |
2516 | #endif |
2517 | /* Start the ball rolling... */ | |
ae4f0d46 | 2518 | stmmac_start_all_dma(priv); |
523f11b5 | 2519 | |
523f11b5 SK |
2520 | priv->tx_lpi_timer = STMMAC_DEFAULT_TWT_LS; |
2521 | ||
523f11b5 SK |
2522 | if ((priv->use_riwt) && (priv->hw->dma->rx_watchdog)) { |
2523 | priv->rx_riwt = MAX_DMA_RIWT; | |
3c55d4d0 | 2524 | priv->hw->dma->rx_watchdog(priv->ioaddr, MAX_DMA_RIWT, rx_cnt); |
523f11b5 SK |
2525 | } |
2526 | ||
3fe5cadb | 2527 | if (priv->hw->pcs && priv->hw->mac->pcs_ctrl_ane) |
02e57b9d | 2528 | priv->hw->mac->pcs_ctrl_ane(priv->hw, 1, priv->hw->ps, 0); |
523f11b5 | 2529 | |
4854ab99 JP |
2530 | /* set TX and RX rings length */ |
2531 | stmmac_set_rings_length(priv); | |
2532 | ||
f748be53 | 2533 | /* Enable TSO */ |
146617b8 JP |
2534 | if (priv->tso) { |
2535 | for (chan = 0; chan < tx_cnt; chan++) | |
2536 | priv->hw->dma->enable_tso(priv->ioaddr, 1, chan); | |
2537 | } | |
f748be53 | 2538 | |
523f11b5 SK |
2539 | return 0; |
2540 | } | |
2541 | ||
c66f6c37 TR |
2542 | static void stmmac_hw_teardown(struct net_device *dev) |
2543 | { | |
2544 | struct stmmac_priv *priv = netdev_priv(dev); | |
2545 | ||
2546 | clk_disable_unprepare(priv->plat->clk_ptp_ref); | |
2547 | } | |
2548 | ||
47dd7a54 GC |
2549 | /** |
2550 | * stmmac_open - open entry point of the driver | |
2551 | * @dev : pointer to the device structure. | |
2552 | * Description: | |
2553 | * This function is the open entry point of the driver. | |
2554 | * Return value: | |
2555 | * 0 on success and an appropriate (-)ve integer as defined in errno.h | |
2556 | * file on failure. | |
2557 | */ | |
2558 | static int stmmac_open(struct net_device *dev) | |
2559 | { | |
2560 | struct stmmac_priv *priv = netdev_priv(dev); | |
47dd7a54 GC |
2561 | int ret; |
2562 | ||
4bfcbd7a FV |
2563 | stmmac_check_ether_addr(priv); |
2564 | ||
3fe5cadb GC |
2565 | if (priv->hw->pcs != STMMAC_PCS_RGMII && |
2566 | priv->hw->pcs != STMMAC_PCS_TBI && | |
2567 | priv->hw->pcs != STMMAC_PCS_RTBI) { | |
e58bb43f GC |
2568 | ret = stmmac_init_phy(dev); |
2569 | if (ret) { | |
38ddc59d LC |
2570 | netdev_err(priv->dev, |
2571 | "%s: Cannot attach to PHY (error: %d)\n", | |
2572 | __func__, ret); | |
89df20d9 | 2573 | return ret; |
e58bb43f | 2574 | } |
f66ffe28 | 2575 | } |
47dd7a54 | 2576 | |
523f11b5 SK |
2577 | /* Extra statistics */ |
2578 | memset(&priv->xstats, 0, sizeof(struct stmmac_extra_stats)); | |
2579 | priv->xstats.threshold = tc; | |
2580 | ||
5bacd778 | 2581 | priv->dma_buf_sz = STMMAC_ALIGN(buf_sz); |
22ad3838 | 2582 | priv->rx_copybreak = STMMAC_RX_COPYBREAK; |
56329137 | 2583 | |
5bacd778 LC |
2584 | ret = alloc_dma_desc_resources(priv); |
2585 | if (ret < 0) { | |
2586 | netdev_err(priv->dev, "%s: DMA descriptors allocation failed\n", | |
2587 | __func__); | |
2588 | goto dma_desc_error; | |
2589 | } | |
2590 | ||
2591 | ret = init_dma_desc_rings(dev, GFP_KERNEL); | |
2592 | if (ret < 0) { | |
2593 | netdev_err(priv->dev, "%s: DMA descriptors initialization failed\n", | |
2594 | __func__); | |
2595 | goto init_error; | |
2596 | } | |
2597 | ||
fe131929 | 2598 | ret = stmmac_hw_setup(dev, true); |
56329137 | 2599 | if (ret < 0) { |
38ddc59d | 2600 | netdev_err(priv->dev, "%s: Hw setup failed\n", __func__); |
c9324d18 | 2601 | goto init_error; |
47dd7a54 GC |
2602 | } |
2603 | ||
777da230 GC |
2604 | stmmac_init_tx_coalesce(priv); |
2605 | ||
d6d50c7e PR |
2606 | if (dev->phydev) |
2607 | phy_start(dev->phydev); | |
47dd7a54 | 2608 | |
f66ffe28 GC |
2609 | /* Request the IRQ lines */ |
2610 | ret = request_irq(dev->irq, stmmac_interrupt, | |
ceb69499 | 2611 | IRQF_SHARED, dev->name, dev); |
f66ffe28 | 2612 | if (unlikely(ret < 0)) { |
38ddc59d LC |
2613 | netdev_err(priv->dev, |
2614 | "%s: ERROR: allocating the IRQ %d (error: %d)\n", | |
2615 | __func__, dev->irq, ret); | |
6c1e5abe | 2616 | goto irq_error; |
f66ffe28 GC |
2617 | } |
2618 | ||
7a13f8f5 FV |
2619 | /* Request the Wake IRQ in case of another line is used for WoL */ |
2620 | if (priv->wol_irq != dev->irq) { | |
2621 | ret = request_irq(priv->wol_irq, stmmac_interrupt, | |
2622 | IRQF_SHARED, dev->name, dev); | |
2623 | if (unlikely(ret < 0)) { | |
38ddc59d LC |
2624 | netdev_err(priv->dev, |
2625 | "%s: ERROR: allocating the WoL IRQ %d (%d)\n", | |
2626 | __func__, priv->wol_irq, ret); | |
c9324d18 | 2627 | goto wolirq_error; |
7a13f8f5 FV |
2628 | } |
2629 | } | |
2630 | ||
d765955d | 2631 | /* Request the IRQ lines */ |
d7ec8584 | 2632 | if (priv->lpi_irq > 0) { |
d765955d GC |
2633 | ret = request_irq(priv->lpi_irq, stmmac_interrupt, IRQF_SHARED, |
2634 | dev->name, dev); | |
2635 | if (unlikely(ret < 0)) { | |
38ddc59d LC |
2636 | netdev_err(priv->dev, |
2637 | "%s: ERROR: allocating the LPI IRQ %d (%d)\n", | |
2638 | __func__, priv->lpi_irq, ret); | |
c9324d18 | 2639 | goto lpiirq_error; |
d765955d GC |
2640 | } |
2641 | } | |
2642 | ||
c22a3f48 JP |
2643 | stmmac_enable_all_queues(priv); |
2644 | stmmac_start_all_queues(priv); | |
f66ffe28 | 2645 | |
47dd7a54 | 2646 | return 0; |
f66ffe28 | 2647 | |
c9324d18 | 2648 | lpiirq_error: |
d765955d GC |
2649 | if (priv->wol_irq != dev->irq) |
2650 | free_irq(priv->wol_irq, dev); | |
c9324d18 | 2651 | wolirq_error: |
7a13f8f5 | 2652 | free_irq(dev->irq, dev); |
6c1e5abe TR |
2653 | irq_error: |
2654 | if (dev->phydev) | |
2655 | phy_stop(dev->phydev); | |
7a13f8f5 | 2656 | |
6c1e5abe | 2657 | del_timer_sync(&priv->txtimer); |
c66f6c37 | 2658 | stmmac_hw_teardown(dev); |
c9324d18 GC |
2659 | init_error: |
2660 | free_dma_desc_resources(priv); | |
5bacd778 | 2661 | dma_desc_error: |
d6d50c7e PR |
2662 | if (dev->phydev) |
2663 | phy_disconnect(dev->phydev); | |
4bfcbd7a | 2664 | |
f66ffe28 | 2665 | return ret; |
47dd7a54 GC |
2666 | } |
2667 | ||
2668 | /** | |
2669 | * stmmac_release - close entry point of the driver | |
2670 | * @dev : device pointer. | |
2671 | * Description: | |
2672 | * This is the stop entry point of the driver. | |
2673 | */ | |
2674 | static int stmmac_release(struct net_device *dev) | |
2675 | { | |
2676 | struct stmmac_priv *priv = netdev_priv(dev); | |
2677 | ||
d765955d GC |
2678 | if (priv->eee_enabled) |
2679 | del_timer_sync(&priv->eee_ctrl_timer); | |
2680 | ||
47dd7a54 | 2681 | /* Stop and disconnect the PHY */ |
d6d50c7e PR |
2682 | if (dev->phydev) { |
2683 | phy_stop(dev->phydev); | |
2684 | phy_disconnect(dev->phydev); | |
47dd7a54 GC |
2685 | } |
2686 | ||
c22a3f48 | 2687 | stmmac_stop_all_queues(priv); |
47dd7a54 | 2688 | |
c22a3f48 | 2689 | stmmac_disable_all_queues(priv); |
47dd7a54 | 2690 | |
9125cdd1 GC |
2691 | del_timer_sync(&priv->txtimer); |
2692 | ||
47dd7a54 GC |
2693 | /* Free the IRQ lines */ |
2694 | free_irq(dev->irq, dev); | |
7a13f8f5 FV |
2695 | if (priv->wol_irq != dev->irq) |
2696 | free_irq(priv->wol_irq, dev); | |
d7ec8584 | 2697 | if (priv->lpi_irq > 0) |
d765955d | 2698 | free_irq(priv->lpi_irq, dev); |
47dd7a54 GC |
2699 | |
2700 | /* Stop TX/RX DMA and clear the descriptors */ | |
ae4f0d46 | 2701 | stmmac_stop_all_dma(priv); |
47dd7a54 GC |
2702 | |
2703 | /* Release and free the Rx/Tx resources */ | |
2704 | free_dma_desc_resources(priv); | |
2705 | ||
19449bfc | 2706 | /* Disable the MAC Rx/Tx */ |
270c7759 | 2707 | priv->hw->mac->set_mac(priv->ioaddr, false); |
47dd7a54 GC |
2708 | |
2709 | netif_carrier_off(dev); | |
2710 | ||
50fb4f74 | 2711 | #ifdef CONFIG_DEBUG_FS |
466c5ac8 | 2712 | stmmac_exit_fs(dev); |
bfab27a1 | 2713 | #endif |
bfab27a1 | 2714 | |
92ba6888 RK |
2715 | stmmac_release_ptp(priv); |
2716 | ||
47dd7a54 GC |
2717 | return 0; |
2718 | } | |
2719 | ||
f748be53 AT |
2720 | /** |
2721 | * stmmac_tso_allocator - close entry point of the driver | |
2722 | * @priv: driver private structure | |
2723 | * @des: buffer start address | |
2724 | * @total_len: total length to fill in descriptors | |
2725 | * @last_segmant: condition for the last descriptor | |
ce736788 | 2726 | * @queue: TX queue index |
f748be53 AT |
2727 | * Description: |
2728 | * This function fills descriptor and request new descriptors according to | |
2729 | * buffer length to fill | |
2730 | */ | |
2731 | static void stmmac_tso_allocator(struct stmmac_priv *priv, unsigned int des, | |
ce736788 | 2732 | int total_len, bool last_segment, u32 queue) |
f748be53 | 2733 | { |
ce736788 | 2734 | struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; |
f748be53 | 2735 | struct dma_desc *desc; |
5bacd778 | 2736 | u32 buff_size; |
ce736788 | 2737 | int tmp_len; |
f748be53 AT |
2738 | |
2739 | tmp_len = total_len; | |
2740 | ||
2741 | while (tmp_len > 0) { | |
ce736788 JP |
2742 | tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE); |
2743 | desc = tx_q->dma_tx + tx_q->cur_tx; | |
f748be53 | 2744 | |
f8be0d78 | 2745 | desc->des0 = cpu_to_le32(des + (total_len - tmp_len)); |
f748be53 AT |
2746 | buff_size = tmp_len >= TSO_MAX_BUFF_SIZE ? |
2747 | TSO_MAX_BUFF_SIZE : tmp_len; | |
2748 | ||
2749 | priv->hw->desc->prepare_tso_tx_desc(desc, 0, buff_size, | |
2750 | 0, 1, | |
426849e6 | 2751 | (last_segment) && (tmp_len <= TSO_MAX_BUFF_SIZE), |
f748be53 AT |
2752 | 0, 0); |
2753 | ||
2754 | tmp_len -= TSO_MAX_BUFF_SIZE; | |
2755 | } | |
2756 | } | |
2757 | ||
2758 | /** | |
2759 | * stmmac_tso_xmit - Tx entry point of the driver for oversized frames (TSO) | |
2760 | * @skb : the socket buffer | |
2761 | * @dev : device pointer | |
2762 | * Description: this is the transmit function that is called on TSO frames | |
2763 | * (support available on GMAC4 and newer chips). | |
2764 | * Diagram below show the ring programming in case of TSO frames: | |
2765 | * | |
2766 | * First Descriptor | |
2767 | * -------- | |
2768 | * | DES0 |---> buffer1 = L2/L3/L4 header | |
2769 | * | DES1 |---> TCP Payload (can continue on next descr...) | |
2770 | * | DES2 |---> buffer 1 and 2 len | |
2771 | * | DES3 |---> must set TSE, TCP hdr len-> [22:19]. TCP payload len [17:0] | |
2772 | * -------- | |
2773 | * | | |
2774 | * ... | |
2775 | * | | |
2776 | * -------- | |
2777 | * | DES0 | --| Split TCP Payload on Buffers 1 and 2 | |
2778 | * | DES1 | --| | |
2779 | * | DES2 | --> buffer 1 and 2 len | |
2780 | * | DES3 | | |
2781 | * -------- | |
2782 | * | |
2783 | * mss is fixed when enable tso, so w/o programming the TDES3 ctx field. | |
2784 | */ | |
2785 | static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev) | |
2786 | { | |
ce736788 | 2787 | struct dma_desc *desc, *first, *mss_desc = NULL; |
f748be53 AT |
2788 | struct stmmac_priv *priv = netdev_priv(dev); |
2789 | int nfrags = skb_shinfo(skb)->nr_frags; | |
ce736788 | 2790 | u32 queue = skb_get_queue_mapping(skb); |
f748be53 | 2791 | unsigned int first_entry, des; |
ce736788 JP |
2792 | struct stmmac_tx_queue *tx_q; |
2793 | int tmp_pay_len = 0; | |
2794 | u32 pay_len, mss; | |
f748be53 AT |
2795 | u8 proto_hdr_len; |
2796 | int i; | |
2797 | ||
ce736788 JP |
2798 | tx_q = &priv->tx_queue[queue]; |
2799 | ||
f748be53 AT |
2800 | /* Compute header lengths */ |
2801 | proto_hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); | |
2802 | ||
2803 | /* Desc availability based on threshold should be enough safe */ | |
ce736788 | 2804 | if (unlikely(stmmac_tx_avail(priv, queue) < |
f748be53 | 2805 | (((skb->len - proto_hdr_len) / TSO_MAX_BUFF_SIZE + 1)))) { |
c22a3f48 JP |
2806 | if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) { |
2807 | netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, | |
2808 | queue)); | |
f748be53 | 2809 | /* This is a hard error, log it. */ |
38ddc59d LC |
2810 | netdev_err(priv->dev, |
2811 | "%s: Tx Ring full when queue awake\n", | |
2812 | __func__); | |
f748be53 | 2813 | } |
f748be53 AT |
2814 | return NETDEV_TX_BUSY; |
2815 | } | |
2816 | ||
2817 | pay_len = skb_headlen(skb) - proto_hdr_len; /* no frags */ | |
2818 | ||
2819 | mss = skb_shinfo(skb)->gso_size; | |
2820 | ||
2821 | /* set new MSS value if needed */ | |
2822 | if (mss != priv->mss) { | |
ce736788 | 2823 | mss_desc = tx_q->dma_tx + tx_q->cur_tx; |
f748be53 AT |
2824 | priv->hw->desc->set_mss(mss_desc, mss); |
2825 | priv->mss = mss; | |
ce736788 | 2826 | tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE); |
f748be53 AT |
2827 | } |
2828 | ||
2829 | if (netif_msg_tx_queued(priv)) { | |
2830 | pr_info("%s: tcphdrlen %d, hdr_len %d, pay_len %d, mss %d\n", | |
2831 | __func__, tcp_hdrlen(skb), proto_hdr_len, pay_len, mss); | |
2832 | pr_info("\tskb->len %d, skb->data_len %d\n", skb->len, | |
2833 | skb->data_len); | |
2834 | } | |
2835 | ||
ce736788 | 2836 | first_entry = tx_q->cur_tx; |
f748be53 | 2837 | |
ce736788 | 2838 | desc = tx_q->dma_tx + first_entry; |
f748be53 AT |
2839 | first = desc; |
2840 | ||
2841 | /* first descriptor: fill Headers on Buf1 */ | |
2842 | des = dma_map_single(priv->device, skb->data, skb_headlen(skb), | |
2843 | DMA_TO_DEVICE); | |
2844 | if (dma_mapping_error(priv->device, des)) | |
2845 | goto dma_map_err; | |
2846 | ||
ce736788 JP |
2847 | tx_q->tx_skbuff_dma[first_entry].buf = des; |
2848 | tx_q->tx_skbuff_dma[first_entry].len = skb_headlen(skb); | |
f748be53 | 2849 | |
f8be0d78 | 2850 | first->des0 = cpu_to_le32(des); |
f748be53 AT |
2851 | |
2852 | /* Fill start of payload in buff2 of first descriptor */ | |
2853 | if (pay_len) | |
f8be0d78 | 2854 | first->des1 = cpu_to_le32(des + proto_hdr_len); |
f748be53 AT |
2855 | |
2856 | /* If needed take extra descriptors to fill the remaining payload */ | |
2857 | tmp_pay_len = pay_len - TSO_MAX_BUFF_SIZE; | |
2858 | ||
ce736788 | 2859 | stmmac_tso_allocator(priv, des, tmp_pay_len, (nfrags == 0), queue); |
f748be53 AT |
2860 | |
2861 | /* Prepare fragments */ | |
2862 | for (i = 0; i < nfrags; i++) { | |
2863 | const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; | |
2864 | ||
2865 | des = skb_frag_dma_map(priv->device, frag, 0, | |
2866 | skb_frag_size(frag), | |
2867 | DMA_TO_DEVICE); | |
937071c1 TR |
2868 | if (dma_mapping_error(priv->device, des)) |
2869 | goto dma_map_err; | |
f748be53 AT |
2870 | |
2871 | stmmac_tso_allocator(priv, des, skb_frag_size(frag), | |
ce736788 | 2872 | (i == nfrags - 1), queue); |
f748be53 | 2873 | |
ce736788 JP |
2874 | tx_q->tx_skbuff_dma[tx_q->cur_tx].buf = des; |
2875 | tx_q->tx_skbuff_dma[tx_q->cur_tx].len = skb_frag_size(frag); | |
2876 | tx_q->tx_skbuff[tx_q->cur_tx] = NULL; | |
2877 | tx_q->tx_skbuff_dma[tx_q->cur_tx].map_as_page = true; | |
f748be53 AT |
2878 | } |
2879 | ||
ce736788 | 2880 | tx_q->tx_skbuff_dma[tx_q->cur_tx].last_segment = true; |
f748be53 | 2881 | |
05cf0d1b NC |
2882 | /* Only the last descriptor gets to point to the skb. */ |
2883 | tx_q->tx_skbuff[tx_q->cur_tx] = skb; | |
2884 | ||
2885 | /* We've used all descriptors we need for this skb, however, | |
2886 | * advance cur_tx so that it references a fresh descriptor. | |
2887 | * ndo_start_xmit will fill this descriptor the next time it's | |
2888 | * called and stmmac_tx_clean may clean up to this descriptor. | |
2889 | */ | |
ce736788 | 2890 | tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE); |
f748be53 | 2891 | |
ce736788 | 2892 | if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) { |
b3e51069 LC |
2893 | netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n", |
2894 | __func__); | |
c22a3f48 | 2895 | netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue)); |
f748be53 AT |
2896 | } |
2897 | ||
2898 | dev->stats.tx_bytes += skb->len; | |
2899 | priv->xstats.tx_tso_frames++; | |
2900 | priv->xstats.tx_tso_nfrags += nfrags; | |
2901 | ||
2902 | /* Manage tx mitigation */ | |
2903 | priv->tx_count_frames += nfrags + 1; | |
2904 | if (likely(priv->tx_coal_frames > priv->tx_count_frames)) { | |
2905 | mod_timer(&priv->txtimer, | |
2906 | STMMAC_COAL_TIMER(priv->tx_coal_timer)); | |
2907 | } else { | |
2908 | priv->tx_count_frames = 0; | |
2909 | priv->hw->desc->set_tx_ic(desc); | |
2910 | priv->xstats.tx_set_ic_bit++; | |
2911 | } | |
2912 | ||
74abc9b1 | 2913 | skb_tx_timestamp(skb); |
f748be53 AT |
2914 | |
2915 | if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && | |
2916 | priv->hwts_tx_en)) { | |
2917 | /* declare that device is doing timestamping */ | |
2918 | skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; | |
2919 | priv->hw->desc->enable_tx_timestamp(first); | |
2920 | } | |
2921 | ||
2922 | /* Complete the first descriptor before granting the DMA */ | |
2923 | priv->hw->desc->prepare_tso_tx_desc(first, 1, | |
2924 | proto_hdr_len, | |
2925 | pay_len, | |
ce736788 | 2926 | 1, tx_q->tx_skbuff_dma[first_entry].last_segment, |
f748be53 AT |
2927 | tcp_hdrlen(skb) / 4, (skb->len - proto_hdr_len)); |
2928 | ||
2929 | /* If context desc is used to change MSS */ | |
2930 | if (mss_desc) | |
2931 | priv->hw->desc->set_tx_owner(mss_desc); | |
2932 | ||
2933 | /* The own bit must be the latest setting done when prepare the | |
2934 | * descriptor and then barrier is needed to make sure that | |
2935 | * all is coherent before granting the DMA engine. | |
2936 | */ | |
ad688cdb | 2937 | dma_wmb(); |
f748be53 AT |
2938 | |
2939 | if (netif_msg_pktdata(priv)) { | |
2940 | pr_info("%s: curr=%d dirty=%d f=%d, e=%d, f_p=%p, nfrags %d\n", | |
ce736788 JP |
2941 | __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry, |
2942 | tx_q->cur_tx, first, nfrags); | |
f748be53 | 2943 | |
ce736788 | 2944 | priv->hw->desc->display_ring((void *)tx_q->dma_tx, DMA_TX_SIZE, |
f748be53 AT |
2945 | 0); |
2946 | ||
2947 | pr_info(">>> frame to be transmitted: "); | |
2948 | print_pkt(skb->data, skb_headlen(skb)); | |
2949 | } | |
2950 | ||
c22a3f48 | 2951 | netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len); |
f748be53 | 2952 | |
ce736788 JP |
2953 | priv->hw->dma->set_tx_tail_ptr(priv->ioaddr, tx_q->tx_tail_addr, |
2954 | queue); | |
f748be53 | 2955 | |
f748be53 AT |
2956 | return NETDEV_TX_OK; |
2957 | ||
2958 | dma_map_err: | |
f748be53 AT |
2959 | dev_err(priv->device, "Tx dma map failed\n"); |
2960 | dev_kfree_skb(skb); | |
2961 | priv->dev->stats.tx_dropped++; | |
2962 | return NETDEV_TX_OK; | |
2963 | } | |
2964 | ||
47dd7a54 | 2965 | /** |
732fdf0e | 2966 | * stmmac_xmit - Tx entry point of the driver |
47dd7a54 GC |
2967 | * @skb : the socket buffer |
2968 | * @dev : device pointer | |
32ceabca GC |
2969 | * Description : this is the tx entry point of the driver. |
2970 | * It programs the chain or the ring and supports oversized frames | |
2971 | * and SG feature. | |
47dd7a54 GC |
2972 | */ |
2973 | static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) | |
2974 | { | |
2975 | struct stmmac_priv *priv = netdev_priv(dev); | |
0e80bdc9 | 2976 | unsigned int nopaged_len = skb_headlen(skb); |
4a7d666a | 2977 | int i, csum_insertion = 0, is_jumbo = 0; |
ce736788 | 2978 | u32 queue = skb_get_queue_mapping(skb); |
47dd7a54 | 2979 | int nfrags = skb_shinfo(skb)->nr_frags; |
59423815 CIK |
2980 | int entry; |
2981 | unsigned int first_entry; | |
47dd7a54 | 2982 | struct dma_desc *desc, *first; |
ce736788 | 2983 | struct stmmac_tx_queue *tx_q; |
0e80bdc9 | 2984 | unsigned int enh_desc; |
f748be53 AT |
2985 | unsigned int des; |
2986 | ||
ce736788 JP |
2987 | tx_q = &priv->tx_queue[queue]; |
2988 | ||
f748be53 AT |
2989 | /* Manage oversized TCP frames for GMAC4 device */ |
2990 | if (skb_is_gso(skb) && priv->tso) { | |
9edfa7da | 2991 | if (skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) |
f748be53 AT |
2992 | return stmmac_tso_xmit(skb, dev); |
2993 | } | |
47dd7a54 | 2994 | |
ce736788 | 2995 | if (unlikely(stmmac_tx_avail(priv, queue) < nfrags + 1)) { |
c22a3f48 JP |
2996 | if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) { |
2997 | netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, | |
2998 | queue)); | |
47dd7a54 | 2999 | /* This is a hard error, log it. */ |
38ddc59d LC |
3000 | netdev_err(priv->dev, |
3001 | "%s: Tx Ring full when queue awake\n", | |
3002 | __func__); | |
47dd7a54 GC |
3003 | } |
3004 | return NETDEV_TX_BUSY; | |
3005 | } | |
3006 | ||
d765955d GC |
3007 | if (priv->tx_path_in_lpi_mode) |
3008 | stmmac_disable_eee_mode(priv); | |
3009 | ||
ce736788 | 3010 | entry = tx_q->cur_tx; |
0e80bdc9 | 3011 | first_entry = entry; |
47dd7a54 | 3012 | |
5e982f3b | 3013 | csum_insertion = (skb->ip_summed == CHECKSUM_PARTIAL); |
47dd7a54 | 3014 | |
0e80bdc9 | 3015 | if (likely(priv->extend_desc)) |
ce736788 | 3016 | desc = (struct dma_desc *)(tx_q->dma_etx + entry); |
c24602ef | 3017 | else |
ce736788 | 3018 | desc = tx_q->dma_tx + entry; |
c24602ef | 3019 | |
47dd7a54 GC |
3020 | first = desc; |
3021 | ||
0e80bdc9 | 3022 | enh_desc = priv->plat->enh_desc; |
4a7d666a | 3023 | /* To program the descriptors according to the size of the frame */ |
29896a67 GC |
3024 | if (enh_desc) |
3025 | is_jumbo = priv->hw->mode->is_jumbo_frm(skb->len, enh_desc); | |
3026 | ||
f748be53 AT |
3027 | if (unlikely(is_jumbo) && likely(priv->synopsys_id < |
3028 | DWMAC_CORE_4_00)) { | |
ce736788 | 3029 | entry = priv->hw->mode->jumbo_frm(tx_q, skb, csum_insertion); |
362b37be GC |
3030 | if (unlikely(entry < 0)) |
3031 | goto dma_map_err; | |
29896a67 | 3032 | } |
47dd7a54 GC |
3033 | |
3034 | for (i = 0; i < nfrags; i++) { | |
9e903e08 ED |
3035 | const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; |
3036 | int len = skb_frag_size(frag); | |
be434d50 | 3037 | bool last_segment = (i == (nfrags - 1)); |
47dd7a54 | 3038 | |
e3ad57c9 GC |
3039 | entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE); |
3040 | ||
0e80bdc9 | 3041 | if (likely(priv->extend_desc)) |
ce736788 | 3042 | desc = (struct dma_desc *)(tx_q->dma_etx + entry); |
c24602ef | 3043 | else |
ce736788 | 3044 | desc = tx_q->dma_tx + entry; |
47dd7a54 | 3045 | |
f748be53 AT |
3046 | des = skb_frag_dma_map(priv->device, frag, 0, len, |
3047 | DMA_TO_DEVICE); | |
3048 | if (dma_mapping_error(priv->device, des)) | |
362b37be GC |
3049 | goto dma_map_err; /* should reuse desc w/o issues */ |
3050 | ||
ce736788 | 3051 | tx_q->tx_skbuff[entry] = NULL; |
f748be53 | 3052 | |
ce736788 | 3053 | tx_q->tx_skbuff_dma[entry].buf = des; |
f8be0d78 MW |
3054 | if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00)) |
3055 | desc->des0 = cpu_to_le32(des); | |
3056 | else | |
3057 | desc->des2 = cpu_to_le32(des); | |
f748be53 | 3058 | |
ce736788 JP |
3059 | tx_q->tx_skbuff_dma[entry].map_as_page = true; |
3060 | tx_q->tx_skbuff_dma[entry].len = len; | |
3061 | tx_q->tx_skbuff_dma[entry].last_segment = last_segment; | |
0e80bdc9 GC |
3062 | |
3063 | /* Prepare the descriptor and set the own bit too */ | |
4a7d666a | 3064 | priv->hw->desc->prepare_tx_desc(desc, 0, len, csum_insertion, |
fe6af0e1 NC |
3065 | priv->mode, 1, last_segment, |
3066 | skb->len); | |
47dd7a54 GC |
3067 | } |
3068 | ||
05cf0d1b NC |
3069 | /* Only the last descriptor gets to point to the skb. */ |
3070 | tx_q->tx_skbuff[entry] = skb; | |
e3ad57c9 | 3071 | |
05cf0d1b NC |
3072 | /* We've used all descriptors we need for this skb, however, |
3073 | * advance cur_tx so that it references a fresh descriptor. | |
3074 | * ndo_start_xmit will fill this descriptor the next time it's | |
3075 | * called and stmmac_tx_clean may clean up to this descriptor. | |
3076 | */ | |
3077 | entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE); | |
ce736788 | 3078 | tx_q->cur_tx = entry; |
47dd7a54 | 3079 | |
47dd7a54 | 3080 | if (netif_msg_pktdata(priv)) { |
d0225e7d AT |
3081 | void *tx_head; |
3082 | ||
38ddc59d LC |
3083 | netdev_dbg(priv->dev, |
3084 | "%s: curr=%d dirty=%d f=%d, e=%d, first=%p, nfrags=%d", | |
ce736788 | 3085 | __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry, |
38ddc59d | 3086 | entry, first, nfrags); |
83d7af64 | 3087 | |
c24602ef | 3088 | if (priv->extend_desc) |
ce736788 | 3089 | tx_head = (void *)tx_q->dma_etx; |
c24602ef | 3090 | else |
ce736788 | 3091 | tx_head = (void *)tx_q->dma_tx; |
d0225e7d AT |
3092 | |
3093 | priv->hw->desc->display_ring(tx_head, DMA_TX_SIZE, false); | |
c24602ef | 3094 | |
38ddc59d | 3095 | netdev_dbg(priv->dev, ">>> frame to be transmitted: "); |
47dd7a54 GC |
3096 | print_pkt(skb->data, skb->len); |
3097 | } | |
0e80bdc9 | 3098 | |
ce736788 | 3099 | if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) { |
b3e51069 LC |
3100 | netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n", |
3101 | __func__); | |
c22a3f48 | 3102 | netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue)); |
47dd7a54 GC |
3103 | } |
3104 | ||
3105 | dev->stats.tx_bytes += skb->len; | |
3106 | ||
0e80bdc9 GC |
3107 | /* According to the coalesce parameter the IC bit for the latest |
3108 | * segment is reset and the timer re-started to clean the tx status. | |
3109 | * This approach takes care about the fragments: desc is the first | |
3110 | * element in case of no SG. | |
3111 | */ | |
3112 | priv->tx_count_frames += nfrags + 1; | |
3113 | if (likely(priv->tx_coal_frames > priv->tx_count_frames)) { | |
3114 | mod_timer(&priv->txtimer, | |
3115 | STMMAC_COAL_TIMER(priv->tx_coal_timer)); | |
3116 | } else { | |
3117 | priv->tx_count_frames = 0; | |
3118 | priv->hw->desc->set_tx_ic(desc); | |
3119 | priv->xstats.tx_set_ic_bit++; | |
891434b1 RK |
3120 | } |
3121 | ||
74abc9b1 | 3122 | skb_tx_timestamp(skb); |
3e82ce12 | 3123 | |
0e80bdc9 GC |
3124 | /* Ready to fill the first descriptor and set the OWN bit w/o any |
3125 | * problems because all the descriptors are actually ready to be | |
3126 | * passed to the DMA engine. | |
3127 | */ | |
3128 | if (likely(!is_jumbo)) { | |
3129 | bool last_segment = (nfrags == 0); | |
3130 | ||
f748be53 AT |
3131 | des = dma_map_single(priv->device, skb->data, |
3132 | nopaged_len, DMA_TO_DEVICE); | |
3133 | if (dma_mapping_error(priv->device, des)) | |
0e80bdc9 GC |
3134 | goto dma_map_err; |
3135 | ||
ce736788 | 3136 | tx_q->tx_skbuff_dma[first_entry].buf = des; |
f8be0d78 MW |
3137 | if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00)) |
3138 | first->des0 = cpu_to_le32(des); | |
3139 | else | |
3140 | first->des2 = cpu_to_le32(des); | |
f748be53 | 3141 | |
ce736788 JP |
3142 | tx_q->tx_skbuff_dma[first_entry].len = nopaged_len; |
3143 | tx_q->tx_skbuff_dma[first_entry].last_segment = last_segment; | |
0e80bdc9 GC |
3144 | |
3145 | if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && | |
3146 | priv->hwts_tx_en)) { | |
3147 | /* declare that device is doing timestamping */ | |
3148 | skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; | |
3149 | priv->hw->desc->enable_tx_timestamp(first); | |
3150 | } | |
3151 | ||
3152 | /* Prepare the first descriptor setting the OWN bit too */ | |
3153 | priv->hw->desc->prepare_tx_desc(first, 1, nopaged_len, | |
3154 | csum_insertion, priv->mode, 1, | |
fe6af0e1 | 3155 | last_segment, skb->len); |
0e80bdc9 GC |
3156 | |
3157 | /* The own bit must be the latest setting done when prepare the | |
3158 | * descriptor and then barrier is needed to make sure that | |
3159 | * all is coherent before granting the DMA engine. | |
3160 | */ | |
ad688cdb | 3161 | dma_wmb(); |
0e80bdc9 GC |
3162 | } |
3163 | ||
c22a3f48 | 3164 | netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len); |
f748be53 AT |
3165 | |
3166 | if (priv->synopsys_id < DWMAC_CORE_4_00) | |
3167 | priv->hw->dma->enable_dma_transmission(priv->ioaddr); | |
3168 | else | |
ce736788 JP |
3169 | priv->hw->dma->set_tx_tail_ptr(priv->ioaddr, tx_q->tx_tail_addr, |
3170 | queue); | |
52f64fae | 3171 | |
362b37be | 3172 | return NETDEV_TX_OK; |
a9097a96 | 3173 | |
362b37be | 3174 | dma_map_err: |
38ddc59d | 3175 | netdev_err(priv->dev, "Tx DMA map failed\n"); |
362b37be GC |
3176 | dev_kfree_skb(skb); |
3177 | priv->dev->stats.tx_dropped++; | |
47dd7a54 GC |
3178 | return NETDEV_TX_OK; |
3179 | } | |
3180 | ||
b9381985 VB |
3181 | static void stmmac_rx_vlan(struct net_device *dev, struct sk_buff *skb) |
3182 | { | |
3183 | struct ethhdr *ehdr; | |
3184 | u16 vlanid; | |
3185 | ||
3186 | if ((dev->features & NETIF_F_HW_VLAN_CTAG_RX) == | |
3187 | NETIF_F_HW_VLAN_CTAG_RX && | |
3188 | !__vlan_get_tag(skb, &vlanid)) { | |
3189 | /* pop the vlan tag */ | |
3190 | ehdr = (struct ethhdr *)skb->data; | |
3191 | memmove(skb->data + VLAN_HLEN, ehdr, ETH_ALEN * 2); | |
3192 | skb_pull(skb, VLAN_HLEN); | |
3193 | __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlanid); | |
3194 | } | |
3195 | } | |
3196 | ||
3197 | ||
54139cf3 | 3198 | static inline int stmmac_rx_threshold_count(struct stmmac_rx_queue *rx_q) |
120e87f9 | 3199 | { |
54139cf3 | 3200 | if (rx_q->rx_zeroc_thresh < STMMAC_RX_THRESH) |
120e87f9 GC |
3201 | return 0; |
3202 | ||
3203 | return 1; | |
3204 | } | |
3205 | ||
32ceabca | 3206 | /** |
732fdf0e | 3207 | * stmmac_rx_refill - refill used skb preallocated buffers |
32ceabca | 3208 | * @priv: driver private structure |
54139cf3 | 3209 | * @queue: RX queue index |
32ceabca GC |
3210 | * Description : this is to reallocate the skb for the reception process |
3211 | * that is based on zero-copy. | |
3212 | */ | |
54139cf3 | 3213 | static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue) |
47dd7a54 | 3214 | { |
54139cf3 JP |
3215 | struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; |
3216 | int dirty = stmmac_rx_dirty(priv, queue); | |
3217 | unsigned int entry = rx_q->dirty_rx; | |
3218 | ||
47dd7a54 | 3219 | int bfsize = priv->dma_buf_sz; |
47dd7a54 | 3220 | |
e3ad57c9 | 3221 | while (dirty-- > 0) { |
c24602ef GC |
3222 | struct dma_desc *p; |
3223 | ||
3224 | if (priv->extend_desc) | |
54139cf3 | 3225 | p = (struct dma_desc *)(rx_q->dma_erx + entry); |
c24602ef | 3226 | else |
54139cf3 | 3227 | p = rx_q->dma_rx + entry; |
c24602ef | 3228 | |
54139cf3 | 3229 | if (likely(!rx_q->rx_skbuff[entry])) { |
47dd7a54 GC |
3230 | struct sk_buff *skb; |
3231 | ||
acb600de | 3232 | skb = netdev_alloc_skb_ip_align(priv->dev, bfsize); |
120e87f9 GC |
3233 | if (unlikely(!skb)) { |
3234 | /* so for a while no zero-copy! */ | |
54139cf3 | 3235 | rx_q->rx_zeroc_thresh = STMMAC_RX_THRESH; |
120e87f9 GC |
3236 | if (unlikely(net_ratelimit())) |
3237 | dev_err(priv->device, | |
3238 | "fail to alloc skb entry %d\n", | |
3239 | entry); | |
47dd7a54 | 3240 | break; |
120e87f9 | 3241 | } |
47dd7a54 | 3242 | |
54139cf3 JP |
3243 | rx_q->rx_skbuff[entry] = skb; |
3244 | rx_q->rx_skbuff_dma[entry] = | |
47dd7a54 GC |
3245 | dma_map_single(priv->device, skb->data, bfsize, |
3246 | DMA_FROM_DEVICE); | |
362b37be | 3247 | if (dma_mapping_error(priv->device, |
54139cf3 | 3248 | rx_q->rx_skbuff_dma[entry])) { |
38ddc59d | 3249 | netdev_err(priv->dev, "Rx DMA map failed\n"); |
362b37be GC |
3250 | dev_kfree_skb(skb); |
3251 | break; | |
3252 | } | |
286a8372 | 3253 | |
f748be53 | 3254 | if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00)) { |
54139cf3 | 3255 | p->des0 = cpu_to_le32(rx_q->rx_skbuff_dma[entry]); |
f748be53 AT |
3256 | p->des1 = 0; |
3257 | } else { | |
54139cf3 | 3258 | p->des2 = cpu_to_le32(rx_q->rx_skbuff_dma[entry]); |
f748be53 AT |
3259 | } |
3260 | if (priv->hw->mode->refill_desc3) | |
54139cf3 | 3261 | priv->hw->mode->refill_desc3(rx_q, p); |
286a8372 | 3262 | |
54139cf3 JP |
3263 | if (rx_q->rx_zeroc_thresh > 0) |
3264 | rx_q->rx_zeroc_thresh--; | |
120e87f9 | 3265 | |
b3e51069 LC |
3266 | netif_dbg(priv, rx_status, priv->dev, |
3267 | "refill entry #%d\n", entry); | |
47dd7a54 | 3268 | } |
ad688cdb | 3269 | dma_wmb(); |
f748be53 AT |
3270 | |
3271 | if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00)) | |
3272 | priv->hw->desc->init_rx_desc(p, priv->use_riwt, 0, 0); | |
3273 | else | |
3274 | priv->hw->desc->set_rx_owner(p); | |
3275 | ||
ad688cdb | 3276 | dma_wmb(); |
e3ad57c9 GC |
3277 | |
3278 | entry = STMMAC_GET_ENTRY(entry, DMA_RX_SIZE); | |
47dd7a54 | 3279 | } |
54139cf3 | 3280 | rx_q->dirty_rx = entry; |
47dd7a54 GC |
3281 | } |
3282 | ||
32ceabca | 3283 | /** |
732fdf0e | 3284 | * stmmac_rx - manage the receive process |
32ceabca | 3285 | * @priv: driver private structure |
54139cf3 JP |
3286 | * @limit: napi bugget |
3287 | * @queue: RX queue index. | |
32ceabca GC |
3288 | * Description : this the function called by the napi poll method. |
3289 | * It gets all the frames inside the ring. | |
3290 | */ | |
54139cf3 | 3291 | static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue) |
47dd7a54 | 3292 | { |
54139cf3 JP |
3293 | struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; |
3294 | unsigned int entry = rx_q->cur_rx; | |
3295 | int coe = priv->hw->rx_csum; | |
47dd7a54 GC |
3296 | unsigned int next_entry; |
3297 | unsigned int count = 0; | |
47dd7a54 | 3298 | |
83d7af64 | 3299 | if (netif_msg_rx_status(priv)) { |
d0225e7d AT |
3300 | void *rx_head; |
3301 | ||
38ddc59d | 3302 | netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__); |
c24602ef | 3303 | if (priv->extend_desc) |
54139cf3 | 3304 | rx_head = (void *)rx_q->dma_erx; |
c24602ef | 3305 | else |
54139cf3 | 3306 | rx_head = (void *)rx_q->dma_rx; |
d0225e7d AT |
3307 | |
3308 | priv->hw->desc->display_ring(rx_head, DMA_RX_SIZE, true); | |
47dd7a54 | 3309 | } |
c24602ef | 3310 | while (count < limit) { |
47dd7a54 | 3311 | int status; |
9401bb5c | 3312 | struct dma_desc *p; |
ba1ffd74 | 3313 | struct dma_desc *np; |
47dd7a54 | 3314 | |
c24602ef | 3315 | if (priv->extend_desc) |
54139cf3 | 3316 | p = (struct dma_desc *)(rx_q->dma_erx + entry); |
c24602ef | 3317 | else |
54139cf3 | 3318 | p = rx_q->dma_rx + entry; |
c24602ef | 3319 | |
c1fa3212 FG |
3320 | /* read the status of the incoming frame */ |
3321 | status = priv->hw->desc->rx_status(&priv->dev->stats, | |
3322 | &priv->xstats, p); | |
3323 | /* check if managed by the DMA otherwise go ahead */ | |
3324 | if (unlikely(status & dma_own)) | |
47dd7a54 GC |
3325 | break; |
3326 | ||
3327 | count++; | |
3328 | ||
54139cf3 JP |
3329 | rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx, DMA_RX_SIZE); |
3330 | next_entry = rx_q->cur_rx; | |
e3ad57c9 | 3331 | |
c24602ef | 3332 | if (priv->extend_desc) |
54139cf3 | 3333 | np = (struct dma_desc *)(rx_q->dma_erx + next_entry); |
c24602ef | 3334 | else |
54139cf3 | 3335 | np = rx_q->dma_rx + next_entry; |
ba1ffd74 GC |
3336 | |
3337 | prefetch(np); | |
47dd7a54 | 3338 | |
c24602ef GC |
3339 | if ((priv->extend_desc) && (priv->hw->desc->rx_extended_status)) |
3340 | priv->hw->desc->rx_extended_status(&priv->dev->stats, | |
3341 | &priv->xstats, | |
54139cf3 | 3342 | rx_q->dma_erx + |
c24602ef | 3343 | entry); |
891434b1 | 3344 | if (unlikely(status == discard_frame)) { |
47dd7a54 | 3345 | priv->dev->stats.rx_errors++; |
891434b1 | 3346 | if (priv->hwts_rx_en && !priv->extend_desc) { |
8d45e42b | 3347 | /* DESC2 & DESC3 will be overwritten by device |
891434b1 RK |
3348 | * with timestamp value, hence reinitialize |
3349 | * them in stmmac_rx_refill() function so that | |
3350 | * device can reuse it. | |
3351 | */ | |
54139cf3 | 3352 | rx_q->rx_skbuff[entry] = NULL; |
891434b1 | 3353 | dma_unmap_single(priv->device, |
54139cf3 | 3354 | rx_q->rx_skbuff_dma[entry], |
ceb69499 GC |
3355 | priv->dma_buf_sz, |
3356 | DMA_FROM_DEVICE); | |
891434b1 RK |
3357 | } |
3358 | } else { | |
47dd7a54 | 3359 | struct sk_buff *skb; |
3eeb2997 | 3360 | int frame_len; |
f748be53 AT |
3361 | unsigned int des; |
3362 | ||
3363 | if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00)) | |
f8be0d78 | 3364 | des = le32_to_cpu(p->des0); |
f748be53 | 3365 | else |
f8be0d78 | 3366 | des = le32_to_cpu(p->des2); |
47dd7a54 | 3367 | |
ceb69499 GC |
3368 | frame_len = priv->hw->desc->get_rx_frame_len(p, coe); |
3369 | ||
8d45e42b | 3370 | /* If frame length is greater than skb buffer size |
f748be53 AT |
3371 | * (preallocated during init) then the packet is |
3372 | * ignored | |
3373 | */ | |
e527c4a7 | 3374 | if (frame_len > priv->dma_buf_sz) { |
38ddc59d LC |
3375 | netdev_err(priv->dev, |
3376 | "len %d larger than size (%d)\n", | |
3377 | frame_len, priv->dma_buf_sz); | |
e527c4a7 GC |
3378 | priv->dev->stats.rx_length_errors++; |
3379 | break; | |
3380 | } | |
3381 | ||
3eeb2997 | 3382 | /* ACS is set; GMAC core strips PAD/FCS for IEEE 802.3 |
ceb69499 GC |
3383 | * Type frames (LLC/LLC-SNAP) |
3384 | */ | |
3eeb2997 GC |
3385 | if (unlikely(status != llc_snap)) |
3386 | frame_len -= ETH_FCS_LEN; | |
47dd7a54 | 3387 | |
83d7af64 | 3388 | if (netif_msg_rx_status(priv)) { |
38ddc59d LC |
3389 | netdev_dbg(priv->dev, "\tdesc: %p [entry %d] buff=0x%x\n", |
3390 | p, entry, des); | |
83d7af64 | 3391 | if (frame_len > ETH_FRAME_LEN) |
38ddc59d LC |
3392 | netdev_dbg(priv->dev, "frame size %d, COE: %d\n", |
3393 | frame_len, status); | |
83d7af64 | 3394 | } |
22ad3838 | 3395 | |
f748be53 AT |
3396 | /* The zero-copy is always used for all the sizes |
3397 | * in case of GMAC4 because it needs | |
3398 | * to refill the used descriptors, always. | |
3399 | */ | |
3400 | if (unlikely(!priv->plat->has_gmac4 && | |
3401 | ((frame_len < priv->rx_copybreak) || | |
54139cf3 | 3402 | stmmac_rx_threshold_count(rx_q)))) { |
22ad3838 GC |
3403 | skb = netdev_alloc_skb_ip_align(priv->dev, |
3404 | frame_len); | |
3405 | if (unlikely(!skb)) { | |
3406 | if (net_ratelimit()) | |
3407 | dev_warn(priv->device, | |
3408 | "packet dropped\n"); | |
3409 | priv->dev->stats.rx_dropped++; | |
3410 | break; | |
3411 | } | |
3412 | ||
3413 | dma_sync_single_for_cpu(priv->device, | |
54139cf3 | 3414 | rx_q->rx_skbuff_dma |
22ad3838 GC |
3415 | [entry], frame_len, |
3416 | DMA_FROM_DEVICE); | |
3417 | skb_copy_to_linear_data(skb, | |
54139cf3 | 3418 | rx_q-> |
22ad3838 GC |
3419 | rx_skbuff[entry]->data, |
3420 | frame_len); | |
3421 | ||
3422 | skb_put(skb, frame_len); | |
3423 | dma_sync_single_for_device(priv->device, | |
54139cf3 | 3424 | rx_q->rx_skbuff_dma |
22ad3838 GC |
3425 | [entry], frame_len, |
3426 | DMA_FROM_DEVICE); | |
3427 | } else { | |
54139cf3 | 3428 | skb = rx_q->rx_skbuff[entry]; |
22ad3838 | 3429 | if (unlikely(!skb)) { |
38ddc59d LC |
3430 | netdev_err(priv->dev, |
3431 | "%s: Inconsistent Rx chain\n", | |
3432 | priv->dev->name); | |
22ad3838 GC |
3433 | priv->dev->stats.rx_dropped++; |
3434 | break; | |
3435 | } | |
3436 | prefetch(skb->data - NET_IP_ALIGN); | |
54139cf3 JP |
3437 | rx_q->rx_skbuff[entry] = NULL; |
3438 | rx_q->rx_zeroc_thresh++; | |
22ad3838 GC |
3439 | |
3440 | skb_put(skb, frame_len); | |
3441 | dma_unmap_single(priv->device, | |
54139cf3 | 3442 | rx_q->rx_skbuff_dma[entry], |
22ad3838 GC |
3443 | priv->dma_buf_sz, |
3444 | DMA_FROM_DEVICE); | |
47dd7a54 | 3445 | } |
47dd7a54 | 3446 | |
47dd7a54 | 3447 | if (netif_msg_pktdata(priv)) { |
38ddc59d LC |
3448 | netdev_dbg(priv->dev, "frame received (%dbytes)", |
3449 | frame_len); | |
47dd7a54 GC |
3450 | print_pkt(skb->data, frame_len); |
3451 | } | |
83d7af64 | 3452 | |
ba1ffd74 GC |
3453 | stmmac_get_rx_hwtstamp(priv, p, np, skb); |
3454 | ||
b9381985 VB |
3455 | stmmac_rx_vlan(priv->dev, skb); |
3456 | ||
47dd7a54 GC |
3457 | skb->protocol = eth_type_trans(skb, priv->dev); |
3458 | ||
ceb69499 | 3459 | if (unlikely(!coe)) |
bc8acf2c | 3460 | skb_checksum_none_assert(skb); |
62a2ab93 | 3461 | else |
47dd7a54 | 3462 | skb->ip_summed = CHECKSUM_UNNECESSARY; |
62a2ab93 | 3463 | |
c22a3f48 | 3464 | napi_gro_receive(&rx_q->napi, skb); |
47dd7a54 GC |
3465 | |
3466 | priv->dev->stats.rx_packets++; | |
3467 | priv->dev->stats.rx_bytes += frame_len; | |
47dd7a54 GC |
3468 | } |
3469 | entry = next_entry; | |
47dd7a54 GC |
3470 | } |
3471 | ||
54139cf3 | 3472 | stmmac_rx_refill(priv, queue); |
47dd7a54 GC |
3473 | |
3474 | priv->xstats.rx_pkt_n += count; | |
3475 | ||
3476 | return count; | |
3477 | } | |
3478 | ||
3479 | /** | |
3480 | * stmmac_poll - stmmac poll method (NAPI) | |
3481 | * @napi : pointer to the napi structure. | |
3482 | * @budget : maximum number of packets that the current CPU can receive from | |
3483 | * all interfaces. | |
3484 | * Description : | |
9125cdd1 | 3485 | * To look at the incoming frames and clear the tx resources. |
47dd7a54 GC |
3486 | */ |
3487 | static int stmmac_poll(struct napi_struct *napi, int budget) | |
3488 | { | |
c22a3f48 JP |
3489 | struct stmmac_rx_queue *rx_q = |
3490 | container_of(napi, struct stmmac_rx_queue, napi); | |
3491 | struct stmmac_priv *priv = rx_q->priv_data; | |
ce736788 | 3492 | u32 tx_count = priv->plat->tx_queues_to_use; |
c22a3f48 | 3493 | u32 chan = rx_q->queue_index; |
54139cf3 | 3494 | int work_done = 0; |
c22a3f48 | 3495 | u32 queue; |
47dd7a54 | 3496 | |
9125cdd1 | 3497 | priv->xstats.napi_poll++; |
ce736788 JP |
3498 | |
3499 | /* check all the queues */ | |
3500 | for (queue = 0; queue < tx_count; queue++) | |
3501 | stmmac_tx_clean(priv, queue); | |
3502 | ||
c22a3f48 | 3503 | work_done = stmmac_rx(priv, budget, rx_q->queue_index); |
47dd7a54 | 3504 | if (work_done < budget) { |
6ad20165 | 3505 | napi_complete_done(napi, work_done); |
4f513ecd | 3506 | stmmac_enable_dma_irq(priv, chan); |
47dd7a54 GC |
3507 | } |
3508 | return work_done; | |
3509 | } | |
3510 | ||
3511 | /** | |
3512 | * stmmac_tx_timeout | |
3513 | * @dev : Pointer to net device structure | |
3514 | * Description: this function is called when a packet transmission fails to | |
7284a3f1 | 3515 | * complete within a reasonable time. The driver will mark the error in the |
47dd7a54 GC |
3516 | * netdev structure and arrange for the device to be reset to a sane state |
3517 | * in order to transmit a new packet. | |
3518 | */ | |
3519 | static void stmmac_tx_timeout(struct net_device *dev) | |
3520 | { | |
3521 | struct stmmac_priv *priv = netdev_priv(dev); | |
ce736788 JP |
3522 | u32 tx_count = priv->plat->tx_queues_to_use; |
3523 | u32 chan; | |
47dd7a54 GC |
3524 | |
3525 | /* Clear Tx resources and restart transmitting again */ | |
ce736788 JP |
3526 | for (chan = 0; chan < tx_count; chan++) |
3527 | stmmac_tx_err(priv, chan); | |
47dd7a54 GC |
3528 | } |
3529 | ||
47dd7a54 | 3530 | /** |
01789349 | 3531 | * stmmac_set_rx_mode - entry point for multicast addressing |
47dd7a54 GC |
3532 | * @dev : pointer to the device structure |
3533 | * Description: | |
3534 | * This function is a driver entry point which gets called by the kernel | |
3535 | * whenever multicast addresses must be enabled/disabled. | |
3536 | * Return value: | |
3537 | * void. | |
3538 | */ | |
01789349 | 3539 | static void stmmac_set_rx_mode(struct net_device *dev) |
47dd7a54 GC |
3540 | { |
3541 | struct stmmac_priv *priv = netdev_priv(dev); | |
3542 | ||
3b57de95 | 3543 | priv->hw->mac->set_filter(priv->hw, dev); |
47dd7a54 GC |
3544 | } |
3545 | ||
3546 | /** | |
3547 | * stmmac_change_mtu - entry point to change MTU size for the device. | |
3548 | * @dev : device pointer. | |
3549 | * @new_mtu : the new MTU size for the device. | |
3550 | * Description: the Maximum Transfer Unit (MTU) is used by the network layer | |
3551 | * to drive packet transmission. Ethernet has an MTU of 1500 octets | |
3552 | * (ETH_DATA_LEN). This value can be changed with ifconfig. | |
3553 | * Return value: | |
3554 | * 0 on success and an appropriate (-)ve integer as defined in errno.h | |
3555 | * file on failure. | |
3556 | */ | |
3557 | static int stmmac_change_mtu(struct net_device *dev, int new_mtu) | |
3558 | { | |
38ddc59d LC |
3559 | struct stmmac_priv *priv = netdev_priv(dev); |
3560 | ||
47dd7a54 | 3561 | if (netif_running(dev)) { |
38ddc59d | 3562 | netdev_err(priv->dev, "must be stopped to change its MTU\n"); |
47dd7a54 GC |
3563 | return -EBUSY; |
3564 | } | |
3565 | ||
5e982f3b | 3566 | dev->mtu = new_mtu; |
f748be53 | 3567 | |
5e982f3b MM |
3568 | netdev_update_features(dev); |
3569 | ||
3570 | return 0; | |
3571 | } | |
3572 | ||
c8f44aff | 3573 | static netdev_features_t stmmac_fix_features(struct net_device *dev, |
ceb69499 | 3574 | netdev_features_t features) |
5e982f3b MM |
3575 | { |
3576 | struct stmmac_priv *priv = netdev_priv(dev); | |
3577 | ||
38912bdb | 3578 | if (priv->plat->rx_coe == STMMAC_RX_COE_NONE) |
5e982f3b | 3579 | features &= ~NETIF_F_RXCSUM; |
d2afb5bd | 3580 | |
5e982f3b | 3581 | if (!priv->plat->tx_coe) |
a188222b | 3582 | features &= ~NETIF_F_CSUM_MASK; |
5e982f3b | 3583 | |
ebbb293f GC |
3584 | /* Some GMAC devices have a bugged Jumbo frame support that |
3585 | * needs to have the Tx COE disabled for oversized frames | |
3586 | * (due to limited buffer sizes). In this case we disable | |
8d45e42b | 3587 | * the TX csum insertion in the TDES and not use SF. |
ceb69499 | 3588 | */ |
5e982f3b | 3589 | if (priv->plat->bugged_jumbo && (dev->mtu > ETH_DATA_LEN)) |
a188222b | 3590 | features &= ~NETIF_F_CSUM_MASK; |
ebbb293f | 3591 | |
f748be53 AT |
3592 | /* Disable tso if asked by ethtool */ |
3593 | if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) { | |
3594 | if (features & NETIF_F_TSO) | |
3595 | priv->tso = true; | |
3596 | else | |
3597 | priv->tso = false; | |
3598 | } | |
3599 | ||
5e982f3b | 3600 | return features; |
47dd7a54 GC |
3601 | } |
3602 | ||
d2afb5bd GC |
3603 | static int stmmac_set_features(struct net_device *netdev, |
3604 | netdev_features_t features) | |
3605 | { | |
3606 | struct stmmac_priv *priv = netdev_priv(netdev); | |
3607 | ||
3608 | /* Keep the COE Type in case of csum is supporting */ | |
3609 | if (features & NETIF_F_RXCSUM) | |
3610 | priv->hw->rx_csum = priv->plat->rx_coe; | |
3611 | else | |
3612 | priv->hw->rx_csum = 0; | |
3613 | /* No check needed because rx_coe has been set before and it will be | |
3614 | * fixed in case of issue. | |
3615 | */ | |
3616 | priv->hw->mac->rx_ipc(priv->hw); | |
3617 | ||
3618 | return 0; | |
3619 | } | |
3620 | ||
32ceabca GC |
3621 | /** |
3622 | * stmmac_interrupt - main ISR | |
3623 | * @irq: interrupt number. | |
3624 | * @dev_id: to pass the net device pointer. | |
3625 | * Description: this is the main driver interrupt service routine. | |
732fdf0e GC |
3626 | * It can call: |
3627 | * o DMA service routine (to manage incoming frame reception and transmission | |
3628 | * status) | |
3629 | * o Core interrupts to manage: remote wake-up, management counter, LPI | |
3630 | * interrupts. | |
32ceabca | 3631 | */ |
47dd7a54 GC |
3632 | static irqreturn_t stmmac_interrupt(int irq, void *dev_id) |
3633 | { | |
3634 | struct net_device *dev = (struct net_device *)dev_id; | |
3635 | struct stmmac_priv *priv = netdev_priv(dev); | |
7bac4e1e JP |
3636 | u32 rx_cnt = priv->plat->rx_queues_to_use; |
3637 | u32 tx_cnt = priv->plat->tx_queues_to_use; | |
3638 | u32 queues_count; | |
3639 | u32 queue; | |
3640 | ||
3641 | queues_count = (rx_cnt > tx_cnt) ? rx_cnt : tx_cnt; | |
47dd7a54 | 3642 | |
89f7f2cf SK |
3643 | if (priv->irq_wake) |
3644 | pm_wakeup_event(priv->device, 0); | |
3645 | ||
47dd7a54 | 3646 | if (unlikely(!dev)) { |
38ddc59d | 3647 | netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__); |
47dd7a54 GC |
3648 | return IRQ_NONE; |
3649 | } | |
3650 | ||
d765955d | 3651 | /* To handle GMAC own interrupts */ |
f748be53 | 3652 | if ((priv->plat->has_gmac) || (priv->plat->has_gmac4)) { |
7ed24bbe | 3653 | int status = priv->hw->mac->host_irq_status(priv->hw, |
0982a0f6 | 3654 | &priv->xstats); |
8f71a88d | 3655 | |
d765955d | 3656 | if (unlikely(status)) { |
d765955d | 3657 | /* For LPI we need to save the tx status */ |
0982a0f6 | 3658 | if (status & CORE_IRQ_TX_PATH_IN_LPI_MODE) |
d765955d | 3659 | priv->tx_path_in_lpi_mode = true; |
0982a0f6 | 3660 | if (status & CORE_IRQ_TX_PATH_EXIT_LPI_MODE) |
d765955d | 3661 | priv->tx_path_in_lpi_mode = false; |
7bac4e1e JP |
3662 | } |
3663 | ||
3664 | if (priv->synopsys_id >= DWMAC_CORE_4_00) { | |
3665 | for (queue = 0; queue < queues_count; queue++) { | |
54139cf3 JP |
3666 | struct stmmac_rx_queue *rx_q = |
3667 | &priv->rx_queue[queue]; | |
3668 | ||
7bac4e1e JP |
3669 | status |= |
3670 | priv->hw->mac->host_mtl_irq_status(priv->hw, | |
3671 | queue); | |
3672 | ||
3673 | if (status & CORE_IRQ_MTL_RX_OVERFLOW && | |
3674 | priv->hw->dma->set_rx_tail_ptr) | |
3675 | priv->hw->dma->set_rx_tail_ptr(priv->ioaddr, | |
54139cf3 | 3676 | rx_q->rx_tail_addr, |
7bac4e1e JP |
3677 | queue); |
3678 | } | |
d765955d | 3679 | } |
70523e63 GC |
3680 | |
3681 | /* PCS link status */ | |
3fe5cadb | 3682 | if (priv->hw->pcs) { |
70523e63 GC |
3683 | if (priv->xstats.pcs_link) |
3684 | netif_carrier_on(dev); | |
3685 | else | |
3686 | netif_carrier_off(dev); | |
3687 | } | |
d765955d | 3688 | } |
aec7ff27 | 3689 | |
d765955d | 3690 | /* To handle DMA interrupts */ |
aec7ff27 | 3691 | stmmac_dma_interrupt(priv); |
47dd7a54 GC |
3692 | |
3693 | return IRQ_HANDLED; | |
3694 | } | |
3695 | ||
3696 | #ifdef CONFIG_NET_POLL_CONTROLLER | |
3697 | /* Polling receive - used by NETCONSOLE and other diagnostic tools | |
ceb69499 GC |
3698 | * to allow network I/O with interrupts disabled. |
3699 | */ | |
47dd7a54 GC |
3700 | static void stmmac_poll_controller(struct net_device *dev) |
3701 | { | |
3702 | disable_irq(dev->irq); | |
3703 | stmmac_interrupt(dev->irq, dev); | |
3704 | enable_irq(dev->irq); | |
3705 | } | |
3706 | #endif | |
3707 | ||
3708 | /** | |
3709 | * stmmac_ioctl - Entry point for the Ioctl | |
3710 | * @dev: Device pointer. | |
3711 | * @rq: An IOCTL specefic structure, that can contain a pointer to | |
3712 | * a proprietary structure used to pass information to the driver. | |
3713 | * @cmd: IOCTL command | |
3714 | * Description: | |
32ceabca | 3715 | * Currently it supports the phy_mii_ioctl(...) and HW time stamping. |
47dd7a54 GC |
3716 | */ |
3717 | static int stmmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) | |
3718 | { | |
891434b1 | 3719 | int ret = -EOPNOTSUPP; |
47dd7a54 GC |
3720 | |
3721 | if (!netif_running(dev)) | |
3722 | return -EINVAL; | |
3723 | ||
891434b1 RK |
3724 | switch (cmd) { |
3725 | case SIOCGMIIPHY: | |
3726 | case SIOCGMIIREG: | |
3727 | case SIOCSMIIREG: | |
d6d50c7e | 3728 | if (!dev->phydev) |
891434b1 | 3729 | return -EINVAL; |
d6d50c7e | 3730 | ret = phy_mii_ioctl(dev->phydev, rq, cmd); |
891434b1 RK |
3731 | break; |
3732 | case SIOCSHWTSTAMP: | |
3733 | ret = stmmac_hwtstamp_ioctl(dev, rq); | |
3734 | break; | |
3735 | default: | |
3736 | break; | |
3737 | } | |
28b04113 | 3738 | |
47dd7a54 GC |
3739 | return ret; |
3740 | } | |
3741 | ||
50fb4f74 | 3742 | #ifdef CONFIG_DEBUG_FS |
7ac29055 | 3743 | static struct dentry *stmmac_fs_dir; |
7ac29055 | 3744 | |
c24602ef | 3745 | static void sysfs_display_ring(void *head, int size, int extend_desc, |
ceb69499 | 3746 | struct seq_file *seq) |
7ac29055 | 3747 | { |
7ac29055 | 3748 | int i; |
ceb69499 GC |
3749 | struct dma_extended_desc *ep = (struct dma_extended_desc *)head; |
3750 | struct dma_desc *p = (struct dma_desc *)head; | |
7ac29055 | 3751 | |
c24602ef | 3752 | for (i = 0; i < size; i++) { |
c24602ef | 3753 | if (extend_desc) { |
c24602ef | 3754 | seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n", |
ceb69499 | 3755 | i, (unsigned int)virt_to_phys(ep), |
f8be0d78 MW |
3756 | le32_to_cpu(ep->basic.des0), |
3757 | le32_to_cpu(ep->basic.des1), | |
3758 | le32_to_cpu(ep->basic.des2), | |
3759 | le32_to_cpu(ep->basic.des3)); | |
c24602ef GC |
3760 | ep++; |
3761 | } else { | |
c24602ef | 3762 | seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n", |
66c25f6e | 3763 | i, (unsigned int)virt_to_phys(p), |
f8be0d78 MW |
3764 | le32_to_cpu(p->des0), le32_to_cpu(p->des1), |
3765 | le32_to_cpu(p->des2), le32_to_cpu(p->des3)); | |
c24602ef GC |
3766 | p++; |
3767 | } | |
7ac29055 GC |
3768 | seq_printf(seq, "\n"); |
3769 | } | |
c24602ef | 3770 | } |
7ac29055 | 3771 | |
c24602ef GC |
3772 | static int stmmac_sysfs_ring_read(struct seq_file *seq, void *v) |
3773 | { | |
3774 | struct net_device *dev = seq->private; | |
3775 | struct stmmac_priv *priv = netdev_priv(dev); | |
54139cf3 | 3776 | u32 rx_count = priv->plat->rx_queues_to_use; |
ce736788 | 3777 | u32 tx_count = priv->plat->tx_queues_to_use; |
54139cf3 JP |
3778 | u32 queue; |
3779 | ||
3780 | for (queue = 0; queue < rx_count; queue++) { | |
3781 | struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; | |
3782 | ||
3783 | seq_printf(seq, "RX Queue %d:\n", queue); | |
3784 | ||
3785 | if (priv->extend_desc) { | |
3786 | seq_printf(seq, "Extended descriptor ring:\n"); | |
3787 | sysfs_display_ring((void *)rx_q->dma_erx, | |
3788 | DMA_RX_SIZE, 1, seq); | |
3789 | } else { | |
3790 | seq_printf(seq, "Descriptor ring:\n"); | |
3791 | sysfs_display_ring((void *)rx_q->dma_rx, | |
3792 | DMA_RX_SIZE, 0, seq); | |
3793 | } | |
3794 | } | |
aff3d9ef | 3795 | |
ce736788 JP |
3796 | for (queue = 0; queue < tx_count; queue++) { |
3797 | struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; | |
3798 | ||
3799 | seq_printf(seq, "TX Queue %d:\n", queue); | |
3800 | ||
3801 | if (priv->extend_desc) { | |
3802 | seq_printf(seq, "Extended descriptor ring:\n"); | |
3803 | sysfs_display_ring((void *)tx_q->dma_etx, | |
3804 | DMA_TX_SIZE, 1, seq); | |
3805 | } else { | |
3806 | seq_printf(seq, "Descriptor ring:\n"); | |
3807 | sysfs_display_ring((void *)tx_q->dma_tx, | |
3808 | DMA_TX_SIZE, 0, seq); | |
3809 | } | |
7ac29055 GC |
3810 | } |
3811 | ||
3812 | return 0; | |
3813 | } | |
3814 | ||
3815 | static int stmmac_sysfs_ring_open(struct inode *inode, struct file *file) | |
3816 | { | |
3817 | return single_open(file, stmmac_sysfs_ring_read, inode->i_private); | |
3818 | } | |
3819 | ||
22d3efe5 PM |
3820 | /* Debugfs files, should appear in /sys/kernel/debug/stmmaceth/eth0 */ |
3821 | ||
7ac29055 GC |
3822 | static const struct file_operations stmmac_rings_status_fops = { |
3823 | .owner = THIS_MODULE, | |
3824 | .open = stmmac_sysfs_ring_open, | |
3825 | .read = seq_read, | |
3826 | .llseek = seq_lseek, | |
74863948 | 3827 | .release = single_release, |
7ac29055 GC |
3828 | }; |
3829 | ||
e7434821 GC |
3830 | static int stmmac_sysfs_dma_cap_read(struct seq_file *seq, void *v) |
3831 | { | |
3832 | struct net_device *dev = seq->private; | |
3833 | struct stmmac_priv *priv = netdev_priv(dev); | |
3834 | ||
19e30c14 | 3835 | if (!priv->hw_cap_support) { |
e7434821 GC |
3836 | seq_printf(seq, "DMA HW features not supported\n"); |
3837 | return 0; | |
3838 | } | |
3839 | ||
3840 | seq_printf(seq, "==============================\n"); | |
3841 | seq_printf(seq, "\tDMA HW features\n"); | |
3842 | seq_printf(seq, "==============================\n"); | |
3843 | ||
22d3efe5 | 3844 | seq_printf(seq, "\t10/100 Mbps: %s\n", |
e7434821 | 3845 | (priv->dma_cap.mbps_10_100) ? "Y" : "N"); |
22d3efe5 | 3846 | seq_printf(seq, "\t1000 Mbps: %s\n", |
e7434821 | 3847 | (priv->dma_cap.mbps_1000) ? "Y" : "N"); |
22d3efe5 | 3848 | seq_printf(seq, "\tHalf duplex: %s\n", |
e7434821 GC |
3849 | (priv->dma_cap.half_duplex) ? "Y" : "N"); |
3850 | seq_printf(seq, "\tHash Filter: %s\n", | |
3851 | (priv->dma_cap.hash_filter) ? "Y" : "N"); | |
3852 | seq_printf(seq, "\tMultiple MAC address registers: %s\n", | |
3853 | (priv->dma_cap.multi_addr) ? "Y" : "N"); | |
8d45e42b | 3854 | seq_printf(seq, "\tPCS (TBI/SGMII/RTBI PHY interfaces): %s\n", |
e7434821 GC |
3855 | (priv->dma_cap.pcs) ? "Y" : "N"); |
3856 | seq_printf(seq, "\tSMA (MDIO) Interface: %s\n", | |
3857 | (priv->dma_cap.sma_mdio) ? "Y" : "N"); | |
3858 | seq_printf(seq, "\tPMT Remote wake up: %s\n", | |
3859 | (priv->dma_cap.pmt_remote_wake_up) ? "Y" : "N"); | |
3860 | seq_printf(seq, "\tPMT Magic Frame: %s\n", | |
3861 | (priv->dma_cap.pmt_magic_frame) ? "Y" : "N"); | |
3862 | seq_printf(seq, "\tRMON module: %s\n", | |
3863 | (priv->dma_cap.rmon) ? "Y" : "N"); | |
3864 | seq_printf(seq, "\tIEEE 1588-2002 Time Stamp: %s\n", | |
3865 | (priv->dma_cap.time_stamp) ? "Y" : "N"); | |
22d3efe5 | 3866 | seq_printf(seq, "\tIEEE 1588-2008 Advanced Time Stamp: %s\n", |
e7434821 | 3867 | (priv->dma_cap.atime_stamp) ? "Y" : "N"); |
22d3efe5 | 3868 | seq_printf(seq, "\t802.3az - Energy-Efficient Ethernet (EEE): %s\n", |
e7434821 GC |
3869 | (priv->dma_cap.eee) ? "Y" : "N"); |
3870 | seq_printf(seq, "\tAV features: %s\n", (priv->dma_cap.av) ? "Y" : "N"); | |
3871 | seq_printf(seq, "\tChecksum Offload in TX: %s\n", | |
3872 | (priv->dma_cap.tx_coe) ? "Y" : "N"); | |
f748be53 AT |
3873 | if (priv->synopsys_id >= DWMAC_CORE_4_00) { |
3874 | seq_printf(seq, "\tIP Checksum Offload in RX: %s\n", | |
3875 | (priv->dma_cap.rx_coe) ? "Y" : "N"); | |
3876 | } else { | |
3877 | seq_printf(seq, "\tIP Checksum Offload (type1) in RX: %s\n", | |
3878 | (priv->dma_cap.rx_coe_type1) ? "Y" : "N"); | |
3879 | seq_printf(seq, "\tIP Checksum Offload (type2) in RX: %s\n", | |
3880 | (priv->dma_cap.rx_coe_type2) ? "Y" : "N"); | |
3881 | } | |
e7434821 GC |
3882 | seq_printf(seq, "\tRXFIFO > 2048bytes: %s\n", |
3883 | (priv->dma_cap.rxfifo_over_2048) ? "Y" : "N"); | |
3884 | seq_printf(seq, "\tNumber of Additional RX channel: %d\n", | |
3885 | priv->dma_cap.number_rx_channel); | |
3886 | seq_printf(seq, "\tNumber of Additional TX channel: %d\n", | |
3887 | priv->dma_cap.number_tx_channel); | |
3888 | seq_printf(seq, "\tEnhanced descriptors: %s\n", | |
3889 | (priv->dma_cap.enh_desc) ? "Y" : "N"); | |
3890 | ||
3891 | return 0; | |
3892 | } | |
3893 | ||
3894 | static int stmmac_sysfs_dma_cap_open(struct inode *inode, struct file *file) | |
3895 | { | |
3896 | return single_open(file, stmmac_sysfs_dma_cap_read, inode->i_private); | |
3897 | } | |
3898 | ||
3899 | static const struct file_operations stmmac_dma_cap_fops = { | |
3900 | .owner = THIS_MODULE, | |
3901 | .open = stmmac_sysfs_dma_cap_open, | |
3902 | .read = seq_read, | |
3903 | .llseek = seq_lseek, | |
74863948 | 3904 | .release = single_release, |
e7434821 GC |
3905 | }; |
3906 | ||
7ac29055 GC |
3907 | static int stmmac_init_fs(struct net_device *dev) |
3908 | { | |
466c5ac8 MO |
3909 | struct stmmac_priv *priv = netdev_priv(dev); |
3910 | ||
3911 | /* Create per netdev entries */ | |
3912 | priv->dbgfs_dir = debugfs_create_dir(dev->name, stmmac_fs_dir); | |
7ac29055 | 3913 | |
466c5ac8 | 3914 | if (!priv->dbgfs_dir || IS_ERR(priv->dbgfs_dir)) { |
38ddc59d | 3915 | netdev_err(priv->dev, "ERROR failed to create debugfs directory\n"); |
7ac29055 GC |
3916 | |
3917 | return -ENOMEM; | |
3918 | } | |
3919 | ||
3920 | /* Entry to report DMA RX/TX rings */ | |
466c5ac8 MO |
3921 | priv->dbgfs_rings_status = |
3922 | debugfs_create_file("descriptors_status", S_IRUGO, | |
3923 | priv->dbgfs_dir, dev, | |
3924 | &stmmac_rings_status_fops); | |
7ac29055 | 3925 | |
466c5ac8 | 3926 | if (!priv->dbgfs_rings_status || IS_ERR(priv->dbgfs_rings_status)) { |
38ddc59d | 3927 | netdev_err(priv->dev, "ERROR creating stmmac ring debugfs file\n"); |
466c5ac8 | 3928 | debugfs_remove_recursive(priv->dbgfs_dir); |
7ac29055 GC |
3929 | |
3930 | return -ENOMEM; | |
3931 | } | |
3932 | ||
e7434821 | 3933 | /* Entry to report the DMA HW features */ |
466c5ac8 MO |
3934 | priv->dbgfs_dma_cap = debugfs_create_file("dma_cap", S_IRUGO, |
3935 | priv->dbgfs_dir, | |
3936 | dev, &stmmac_dma_cap_fops); | |
e7434821 | 3937 | |
466c5ac8 | 3938 | if (!priv->dbgfs_dma_cap || IS_ERR(priv->dbgfs_dma_cap)) { |
38ddc59d | 3939 | netdev_err(priv->dev, "ERROR creating stmmac MMC debugfs file\n"); |
466c5ac8 | 3940 | debugfs_remove_recursive(priv->dbgfs_dir); |
e7434821 GC |
3941 | |
3942 | return -ENOMEM; | |
3943 | } | |
3944 | ||
7ac29055 GC |
3945 | return 0; |
3946 | } | |
3947 | ||
466c5ac8 | 3948 | static void stmmac_exit_fs(struct net_device *dev) |
7ac29055 | 3949 | { |
466c5ac8 MO |
3950 | struct stmmac_priv *priv = netdev_priv(dev); |
3951 | ||
3952 | debugfs_remove_recursive(priv->dbgfs_dir); | |
7ac29055 | 3953 | } |
50fb4f74 | 3954 | #endif /* CONFIG_DEBUG_FS */ |
7ac29055 | 3955 | |
47dd7a54 GC |
3956 | static const struct net_device_ops stmmac_netdev_ops = { |
3957 | .ndo_open = stmmac_open, | |
3958 | .ndo_start_xmit = stmmac_xmit, | |
3959 | .ndo_stop = stmmac_release, | |
3960 | .ndo_change_mtu = stmmac_change_mtu, | |
5e982f3b | 3961 | .ndo_fix_features = stmmac_fix_features, |
d2afb5bd | 3962 | .ndo_set_features = stmmac_set_features, |
01789349 | 3963 | .ndo_set_rx_mode = stmmac_set_rx_mode, |
47dd7a54 GC |
3964 | .ndo_tx_timeout = stmmac_tx_timeout, |
3965 | .ndo_do_ioctl = stmmac_ioctl, | |
47dd7a54 GC |
3966 | #ifdef CONFIG_NET_POLL_CONTROLLER |
3967 | .ndo_poll_controller = stmmac_poll_controller, | |
3968 | #endif | |
3969 | .ndo_set_mac_address = eth_mac_addr, | |
3970 | }; | |
3971 | ||
cf3f047b GC |
3972 | /** |
3973 | * stmmac_hw_init - Init the MAC device | |
32ceabca | 3974 | * @priv: driver private structure |
732fdf0e GC |
3975 | * Description: this function is to configure the MAC device according to |
3976 | * some platform parameters or the HW capability register. It prepares the | |
3977 | * driver to use either ring or chain modes and to setup either enhanced or | |
3978 | * normal descriptors. | |
cf3f047b GC |
3979 | */ |
3980 | static int stmmac_hw_init(struct stmmac_priv *priv) | |
3981 | { | |
cf3f047b GC |
3982 | struct mac_device_info *mac; |
3983 | ||
3984 | /* Identify the MAC HW device */ | |
ec33d71d LC |
3985 | if (priv->plat->setup) { |
3986 | mac = priv->plat->setup(priv); | |
3987 | } else if (priv->plat->has_gmac) { | |
03f2eecd | 3988 | priv->dev->priv_flags |= IFF_UNICAST_FLT; |
3b57de95 VB |
3989 | mac = dwmac1000_setup(priv->ioaddr, |
3990 | priv->plat->multicast_filter_bins, | |
c623d149 AT |
3991 | priv->plat->unicast_filter_entries, |
3992 | &priv->synopsys_id); | |
f748be53 AT |
3993 | } else if (priv->plat->has_gmac4) { |
3994 | priv->dev->priv_flags |= IFF_UNICAST_FLT; | |
3995 | mac = dwmac4_setup(priv->ioaddr, | |
3996 | priv->plat->multicast_filter_bins, | |
3997 | priv->plat->unicast_filter_entries, | |
3998 | &priv->synopsys_id); | |
03f2eecd | 3999 | } else { |
c623d149 | 4000 | mac = dwmac100_setup(priv->ioaddr, &priv->synopsys_id); |
03f2eecd | 4001 | } |
cf3f047b GC |
4002 | if (!mac) |
4003 | return -ENOMEM; | |
4004 | ||
4005 | priv->hw = mac; | |
4006 | ||
9f93ac8d LC |
4007 | /* dwmac-sun8i only work in chain mode */ |
4008 | if (priv->plat->has_sun8i) | |
4009 | chain_mode = 1; | |
4010 | ||
4a7d666a | 4011 | /* To use the chained or ring mode */ |
f748be53 AT |
4012 | if (priv->synopsys_id >= DWMAC_CORE_4_00) { |
4013 | priv->hw->mode = &dwmac4_ring_mode_ops; | |
4a7d666a | 4014 | } else { |
f748be53 AT |
4015 | if (chain_mode) { |
4016 | priv->hw->mode = &chain_mode_ops; | |
38ddc59d | 4017 | dev_info(priv->device, "Chain mode enabled\n"); |
f748be53 AT |
4018 | priv->mode = STMMAC_CHAIN_MODE; |
4019 | } else { | |
4020 | priv->hw->mode = &ring_mode_ops; | |
38ddc59d | 4021 | dev_info(priv->device, "Ring mode enabled\n"); |
f748be53 AT |
4022 | priv->mode = STMMAC_RING_MODE; |
4023 | } | |
4a7d666a GC |
4024 | } |
4025 | ||
cf3f047b GC |
4026 | /* Get the HW capability (new GMAC newer than 3.50a) */ |
4027 | priv->hw_cap_support = stmmac_get_hw_features(priv); | |
4028 | if (priv->hw_cap_support) { | |
38ddc59d | 4029 | dev_info(priv->device, "DMA HW capability register supported\n"); |
cf3f047b GC |
4030 | |
4031 | /* We can override some gmac/dma configuration fields: e.g. | |
4032 | * enh_desc, tx_coe (e.g. that are passed through the | |
4033 | * platform) with the values from the HW capability | |
4034 | * register (if supported). | |
4035 | */ | |
4036 | priv->plat->enh_desc = priv->dma_cap.enh_desc; | |
cf3f047b | 4037 | priv->plat->pmt = priv->dma_cap.pmt_remote_wake_up; |
3fe5cadb | 4038 | priv->hw->pmt = priv->plat->pmt; |
38912bdb | 4039 | |
a8df35d4 EG |
4040 | /* TXCOE doesn't work in thresh DMA mode */ |
4041 | if (priv->plat->force_thresh_dma_mode) | |
4042 | priv->plat->tx_coe = 0; | |
4043 | else | |
4044 | priv->plat->tx_coe = priv->dma_cap.tx_coe; | |
4045 | ||
f748be53 AT |
4046 | /* In case of GMAC4 rx_coe is from HW cap register. */ |
4047 | priv->plat->rx_coe = priv->dma_cap.rx_coe; | |
38912bdb DS |
4048 | |
4049 | if (priv->dma_cap.rx_coe_type2) | |
4050 | priv->plat->rx_coe = STMMAC_RX_COE_TYPE2; | |
4051 | else if (priv->dma_cap.rx_coe_type1) | |
4052 | priv->plat->rx_coe = STMMAC_RX_COE_TYPE1; | |
4053 | ||
38ddc59d LC |
4054 | } else { |
4055 | dev_info(priv->device, "No HW DMA feature register supported\n"); | |
4056 | } | |
cf3f047b | 4057 | |
f748be53 AT |
4058 | /* To use alternate (extended), normal or GMAC4 descriptor structures */ |
4059 | if (priv->synopsys_id >= DWMAC_CORE_4_00) | |
4060 | priv->hw->desc = &dwmac4_desc_ops; | |
4061 | else | |
4062 | stmmac_selec_desc_mode(priv); | |
61369d02 | 4063 | |
d2afb5bd GC |
4064 | if (priv->plat->rx_coe) { |
4065 | priv->hw->rx_csum = priv->plat->rx_coe; | |
38ddc59d | 4066 | dev_info(priv->device, "RX Checksum Offload Engine supported\n"); |
f748be53 | 4067 | if (priv->synopsys_id < DWMAC_CORE_4_00) |
38ddc59d | 4068 | dev_info(priv->device, "COE Type %d\n", priv->hw->rx_csum); |
d2afb5bd | 4069 | } |
cf3f047b | 4070 | if (priv->plat->tx_coe) |
38ddc59d | 4071 | dev_info(priv->device, "TX Checksum insertion supported\n"); |
cf3f047b GC |
4072 | |
4073 | if (priv->plat->pmt) { | |
38ddc59d | 4074 | dev_info(priv->device, "Wake-Up On Lan supported\n"); |
cf3f047b GC |
4075 | device_set_wakeup_capable(priv->device, 1); |
4076 | } | |
4077 | ||
f748be53 | 4078 | if (priv->dma_cap.tsoen) |
38ddc59d | 4079 | dev_info(priv->device, "TSO supported\n"); |
f748be53 | 4080 | |
c24602ef | 4081 | return 0; |
cf3f047b GC |
4082 | } |
4083 | ||
47dd7a54 | 4084 | /** |
bfab27a1 GC |
4085 | * stmmac_dvr_probe |
4086 | * @device: device pointer | |
ff3dd78c | 4087 | * @plat_dat: platform data pointer |
e56788cf | 4088 | * @res: stmmac resource pointer |
bfab27a1 GC |
4089 | * Description: this is the main probe function used to |
4090 | * call the alloc_etherdev, allocate the priv structure. | |
9afec6ef | 4091 | * Return: |
15ffac73 | 4092 | * returns 0 on success, otherwise errno. |
47dd7a54 | 4093 | */ |
15ffac73 JE |
4094 | int stmmac_dvr_probe(struct device *device, |
4095 | struct plat_stmmacenet_data *plat_dat, | |
4096 | struct stmmac_resources *res) | |
47dd7a54 | 4097 | { |
bfab27a1 GC |
4098 | struct net_device *ndev = NULL; |
4099 | struct stmmac_priv *priv; | |
c22a3f48 JP |
4100 | int ret = 0; |
4101 | u32 queue; | |
47dd7a54 | 4102 | |
c22a3f48 JP |
4103 | ndev = alloc_etherdev_mqs(sizeof(struct stmmac_priv), |
4104 | MTL_MAX_TX_QUEUES, | |
4105 | MTL_MAX_RX_QUEUES); | |
41de8d4c | 4106 | if (!ndev) |
15ffac73 | 4107 | return -ENOMEM; |
bfab27a1 GC |
4108 | |
4109 | SET_NETDEV_DEV(ndev, device); | |
4110 | ||
4111 | priv = netdev_priv(ndev); | |
4112 | priv->device = device; | |
4113 | priv->dev = ndev; | |
47dd7a54 | 4114 | |
bfab27a1 | 4115 | stmmac_set_ethtool_ops(ndev); |
cf3f047b GC |
4116 | priv->pause = pause; |
4117 | priv->plat = plat_dat; | |
e56788cf JE |
4118 | priv->ioaddr = res->addr; |
4119 | priv->dev->base_addr = (unsigned long)res->addr; | |
4120 | ||
4121 | priv->dev->irq = res->irq; | |
4122 | priv->wol_irq = res->wol_irq; | |
4123 | priv->lpi_irq = res->lpi_irq; | |
4124 | ||
4125 | if (res->mac) | |
4126 | memcpy(priv->dev->dev_addr, res->mac, ETH_ALEN); | |
cf3f047b | 4127 | |
a7a62685 | 4128 | dev_set_drvdata(device, priv->dev); |
803f8fc4 | 4129 | |
cf3f047b GC |
4130 | /* Verify driver arguments */ |
4131 | stmmac_verify_args(); | |
bfab27a1 | 4132 | |
cf3f047b | 4133 | /* Override with kernel parameters if supplied XXX CRS XXX |
ceb69499 GC |
4134 | * this needs to have multiple instances |
4135 | */ | |
cf3f047b GC |
4136 | if ((phyaddr >= 0) && (phyaddr <= 31)) |
4137 | priv->plat->phy_addr = phyaddr; | |
4138 | ||
90f522a2 EP |
4139 | if (priv->plat->stmmac_rst) { |
4140 | ret = reset_control_assert(priv->plat->stmmac_rst); | |
f573c0b9 | 4141 | reset_control_deassert(priv->plat->stmmac_rst); |
90f522a2 EP |
4142 | /* Some reset controllers have only reset callback instead of |
4143 | * assert + deassert callbacks pair. | |
4144 | */ | |
4145 | if (ret == -ENOTSUPP) | |
4146 | reset_control_reset(priv->plat->stmmac_rst); | |
4147 | } | |
c5e4ddbd | 4148 | |
cf3f047b | 4149 | /* Init MAC and get the capabilities */ |
c24602ef GC |
4150 | ret = stmmac_hw_init(priv); |
4151 | if (ret) | |
62866e98 | 4152 | goto error_hw_init; |
cf3f047b | 4153 | |
c22a3f48 | 4154 | /* Configure real RX and TX queues */ |
c02b7a91 JP |
4155 | netif_set_real_num_rx_queues(ndev, priv->plat->rx_queues_to_use); |
4156 | netif_set_real_num_tx_queues(ndev, priv->plat->tx_queues_to_use); | |
c22a3f48 | 4157 | |
cf3f047b | 4158 | ndev->netdev_ops = &stmmac_netdev_ops; |
bfab27a1 | 4159 | |
cf3f047b GC |
4160 | ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | |
4161 | NETIF_F_RXCSUM; | |
f748be53 AT |
4162 | |
4163 | if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) { | |
9edfa7da | 4164 | ndev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6; |
f748be53 | 4165 | priv->tso = true; |
38ddc59d | 4166 | dev_info(priv->device, "TSO feature enabled\n"); |
f748be53 | 4167 | } |
bfab27a1 GC |
4168 | ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA; |
4169 | ndev->watchdog_timeo = msecs_to_jiffies(watchdog); | |
47dd7a54 GC |
4170 | #ifdef STMMAC_VLAN_TAG_USED |
4171 | /* Both mac100 and gmac support receive VLAN tag detection */ | |
f646968f | 4172 | ndev->features |= NETIF_F_HW_VLAN_CTAG_RX; |
47dd7a54 GC |
4173 | #endif |
4174 | priv->msg_enable = netif_msg_init(debug, default_msg_level); | |
4175 | ||
44770e11 JW |
4176 | /* MTU range: 46 - hw-specific max */ |
4177 | ndev->min_mtu = ETH_ZLEN - ETH_HLEN; | |
4178 | if ((priv->plat->enh_desc) || (priv->synopsys_id >= DWMAC_CORE_4_00)) | |
4179 | ndev->max_mtu = JUMBO_LEN; | |
4180 | else | |
4181 | ndev->max_mtu = SKB_MAX_HEAD(NET_SKB_PAD + NET_IP_ALIGN); | |
a2cd64f3 KHL |
4182 | /* Will not overwrite ndev->max_mtu if plat->maxmtu > ndev->max_mtu |
4183 | * as well as plat->maxmtu < ndev->min_mtu which is a invalid range. | |
4184 | */ | |
4185 | if ((priv->plat->maxmtu < ndev->max_mtu) && | |
4186 | (priv->plat->maxmtu >= ndev->min_mtu)) | |
44770e11 | 4187 | ndev->max_mtu = priv->plat->maxmtu; |
a2cd64f3 | 4188 | else if (priv->plat->maxmtu < ndev->min_mtu) |
b618ab45 HK |
4189 | dev_warn(priv->device, |
4190 | "%s: warning: maxmtu having invalid value (%d)\n", | |
4191 | __func__, priv->plat->maxmtu); | |
44770e11 | 4192 | |
47dd7a54 GC |
4193 | if (flow_ctrl) |
4194 | priv->flow_ctrl = FLOW_AUTO; /* RX/TX pause on */ | |
4195 | ||
62a2ab93 GC |
4196 | /* Rx Watchdog is available in the COREs newer than the 3.40. |
4197 | * In some case, for example on bugged HW this feature | |
4198 | * has to be disable and this can be done by passing the | |
4199 | * riwt_off field from the platform. | |
4200 | */ | |
4201 | if ((priv->synopsys_id >= DWMAC_CORE_3_50) && (!priv->plat->riwt_off)) { | |
4202 | priv->use_riwt = 1; | |
b618ab45 HK |
4203 | dev_info(priv->device, |
4204 | "Enable RX Mitigation via HW Watchdog Timer\n"); | |
62a2ab93 GC |
4205 | } |
4206 | ||
c22a3f48 JP |
4207 | for (queue = 0; queue < priv->plat->rx_queues_to_use; queue++) { |
4208 | struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; | |
4209 | ||
4210 | netif_napi_add(ndev, &rx_q->napi, stmmac_poll, | |
4211 | (8 * priv->plat->rx_queues_to_use)); | |
4212 | } | |
47dd7a54 | 4213 | |
f8e96161 VL |
4214 | spin_lock_init(&priv->lock); |
4215 | ||
cd7201f4 GC |
4216 | /* If a specific clk_csr value is passed from the platform |
4217 | * this means that the CSR Clock Range selection cannot be | |
4218 | * changed at run-time and it is fixed. Viceversa the driver'll try to | |
4219 | * set the MDC clock dynamically according to the csr actual | |
4220 | * clock input. | |
4221 | */ | |
4222 | if (!priv->plat->clk_csr) | |
4223 | stmmac_clk_csr_set(priv); | |
4224 | else | |
4225 | priv->clk_csr = priv->plat->clk_csr; | |
4226 | ||
e58bb43f GC |
4227 | stmmac_check_pcs_mode(priv); |
4228 | ||
3fe5cadb GC |
4229 | if (priv->hw->pcs != STMMAC_PCS_RGMII && |
4230 | priv->hw->pcs != STMMAC_PCS_TBI && | |
4231 | priv->hw->pcs != STMMAC_PCS_RTBI) { | |
e58bb43f GC |
4232 | /* MDIO bus Registration */ |
4233 | ret = stmmac_mdio_register(ndev); | |
4234 | if (ret < 0) { | |
b618ab45 HK |
4235 | dev_err(priv->device, |
4236 | "%s: MDIO bus (id: %d) registration failed", | |
4237 | __func__, priv->plat->bus_id); | |
e58bb43f GC |
4238 | goto error_mdio_register; |
4239 | } | |
4bfcbd7a FV |
4240 | } |
4241 | ||
57016590 | 4242 | ret = register_netdev(ndev); |
b2eb09af | 4243 | if (ret) { |
b618ab45 HK |
4244 | dev_err(priv->device, "%s: ERROR %i registering the device\n", |
4245 | __func__, ret); | |
b2eb09af FF |
4246 | goto error_netdev_register; |
4247 | } | |
57016590 FF |
4248 | |
4249 | return ret; | |
47dd7a54 | 4250 | |
6a81c26f | 4251 | error_netdev_register: |
b2eb09af FF |
4252 | if (priv->hw->pcs != STMMAC_PCS_RGMII && |
4253 | priv->hw->pcs != STMMAC_PCS_TBI && | |
4254 | priv->hw->pcs != STMMAC_PCS_RTBI) | |
4255 | stmmac_mdio_unregister(ndev); | |
6a81c26f | 4256 | error_mdio_register: |
c22a3f48 JP |
4257 | for (queue = 0; queue < priv->plat->rx_queues_to_use; queue++) { |
4258 | struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; | |
4259 | ||
4260 | netif_napi_del(&rx_q->napi); | |
4261 | } | |
62866e98 | 4262 | error_hw_init: |
34a52f36 | 4263 | free_netdev(ndev); |
47dd7a54 | 4264 | |
15ffac73 | 4265 | return ret; |
47dd7a54 | 4266 | } |
b2e2f0c7 | 4267 | EXPORT_SYMBOL_GPL(stmmac_dvr_probe); |
47dd7a54 GC |
4268 | |
4269 | /** | |
4270 | * stmmac_dvr_remove | |
f4e7bd81 | 4271 | * @dev: device pointer |
47dd7a54 | 4272 | * Description: this function resets the TX/RX processes, disables the MAC RX/TX |
bfab27a1 | 4273 | * changes the link status, releases the DMA descriptor rings. |
47dd7a54 | 4274 | */ |
f4e7bd81 | 4275 | int stmmac_dvr_remove(struct device *dev) |
47dd7a54 | 4276 | { |
f4e7bd81 | 4277 | struct net_device *ndev = dev_get_drvdata(dev); |
aec7ff27 | 4278 | struct stmmac_priv *priv = netdev_priv(ndev); |
47dd7a54 | 4279 | |
38ddc59d | 4280 | netdev_info(priv->dev, "%s: removing driver", __func__); |
47dd7a54 | 4281 | |
ae4f0d46 | 4282 | stmmac_stop_all_dma(priv); |
47dd7a54 | 4283 | |
270c7759 | 4284 | priv->hw->mac->set_mac(priv->ioaddr, false); |
47dd7a54 | 4285 | netif_carrier_off(ndev); |
47dd7a54 | 4286 | unregister_netdev(ndev); |
f573c0b9 | 4287 | if (priv->plat->stmmac_rst) |
4288 | reset_control_assert(priv->plat->stmmac_rst); | |
4289 | clk_disable_unprepare(priv->plat->pclk); | |
4290 | clk_disable_unprepare(priv->plat->stmmac_clk); | |
3fe5cadb GC |
4291 | if (priv->hw->pcs != STMMAC_PCS_RGMII && |
4292 | priv->hw->pcs != STMMAC_PCS_TBI && | |
4293 | priv->hw->pcs != STMMAC_PCS_RTBI) | |
e743471f | 4294 | stmmac_mdio_unregister(ndev); |
47dd7a54 GC |
4295 | free_netdev(ndev); |
4296 | ||
4297 | return 0; | |
4298 | } | |
b2e2f0c7 | 4299 | EXPORT_SYMBOL_GPL(stmmac_dvr_remove); |
47dd7a54 | 4300 | |
732fdf0e GC |
4301 | /** |
4302 | * stmmac_suspend - suspend callback | |
f4e7bd81 | 4303 | * @dev: device pointer |
732fdf0e GC |
4304 | * Description: this is the function to suspend the device and it is called |
4305 | * by the platform driver to stop the network queue, release the resources, | |
4306 | * program the PMT register (for WoL), clean and release driver resources. | |
4307 | */ | |
f4e7bd81 | 4308 | int stmmac_suspend(struct device *dev) |
47dd7a54 | 4309 | { |
f4e7bd81 | 4310 | struct net_device *ndev = dev_get_drvdata(dev); |
874bd42d | 4311 | struct stmmac_priv *priv = netdev_priv(ndev); |
f8c5a875 | 4312 | unsigned long flags; |
47dd7a54 | 4313 | |
874bd42d | 4314 | if (!ndev || !netif_running(ndev)) |
47dd7a54 GC |
4315 | return 0; |
4316 | ||
d6d50c7e PR |
4317 | if (ndev->phydev) |
4318 | phy_stop(ndev->phydev); | |
102463b1 | 4319 | |
f8c5a875 | 4320 | spin_lock_irqsave(&priv->lock, flags); |
47dd7a54 | 4321 | |
874bd42d | 4322 | netif_device_detach(ndev); |
c22a3f48 | 4323 | stmmac_stop_all_queues(priv); |
47dd7a54 | 4324 | |
c22a3f48 | 4325 | stmmac_disable_all_queues(priv); |
874bd42d GC |
4326 | |
4327 | /* Stop TX/RX DMA */ | |
ae4f0d46 | 4328 | stmmac_stop_all_dma(priv); |
c24602ef | 4329 | |
874bd42d | 4330 | /* Enable Power down mode by programming the PMT regs */ |
89f7f2cf | 4331 | if (device_may_wakeup(priv->device)) { |
7ed24bbe | 4332 | priv->hw->mac->pmt(priv->hw, priv->wolopts); |
89f7f2cf SK |
4333 | priv->irq_wake = 1; |
4334 | } else { | |
270c7759 | 4335 | priv->hw->mac->set_mac(priv->ioaddr, false); |
db88f10a | 4336 | pinctrl_pm_select_sleep_state(priv->device); |
ba1377ff | 4337 | /* Disable clock in case of PWM is off */ |
f573c0b9 | 4338 | clk_disable(priv->plat->pclk); |
4339 | clk_disable(priv->plat->stmmac_clk); | |
ba1377ff | 4340 | } |
f8c5a875 | 4341 | spin_unlock_irqrestore(&priv->lock, flags); |
2d871aa0 | 4342 | |
4d869b03 | 4343 | priv->oldlink = false; |
bd00632c LC |
4344 | priv->speed = SPEED_UNKNOWN; |
4345 | priv->oldduplex = DUPLEX_UNKNOWN; | |
47dd7a54 GC |
4346 | return 0; |
4347 | } | |
b2e2f0c7 | 4348 | EXPORT_SYMBOL_GPL(stmmac_suspend); |
47dd7a54 | 4349 | |
54139cf3 JP |
4350 | /** |
4351 | * stmmac_reset_queues_param - reset queue parameters | |
4352 | * @dev: device pointer | |
4353 | */ | |
4354 | static void stmmac_reset_queues_param(struct stmmac_priv *priv) | |
4355 | { | |
4356 | u32 rx_cnt = priv->plat->rx_queues_to_use; | |
ce736788 | 4357 | u32 tx_cnt = priv->plat->tx_queues_to_use; |
54139cf3 JP |
4358 | u32 queue; |
4359 | ||
4360 | for (queue = 0; queue < rx_cnt; queue++) { | |
4361 | struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; | |
4362 | ||
4363 | rx_q->cur_rx = 0; | |
4364 | rx_q->dirty_rx = 0; | |
4365 | } | |
4366 | ||
ce736788 JP |
4367 | for (queue = 0; queue < tx_cnt; queue++) { |
4368 | struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; | |
4369 | ||
4370 | tx_q->cur_tx = 0; | |
4371 | tx_q->dirty_tx = 0; | |
4372 | } | |
54139cf3 JP |
4373 | } |
4374 | ||
732fdf0e GC |
4375 | /** |
4376 | * stmmac_resume - resume callback | |
f4e7bd81 | 4377 | * @dev: device pointer |
732fdf0e GC |
4378 | * Description: when resume this function is invoked to setup the DMA and CORE |
4379 | * in a usable state. | |
4380 | */ | |
f4e7bd81 | 4381 | int stmmac_resume(struct device *dev) |
47dd7a54 | 4382 | { |
f4e7bd81 | 4383 | struct net_device *ndev = dev_get_drvdata(dev); |
874bd42d | 4384 | struct stmmac_priv *priv = netdev_priv(ndev); |
f8c5a875 | 4385 | unsigned long flags; |
47dd7a54 | 4386 | |
874bd42d | 4387 | if (!netif_running(ndev)) |
47dd7a54 GC |
4388 | return 0; |
4389 | ||
47dd7a54 GC |
4390 | /* Power Down bit, into the PM register, is cleared |
4391 | * automatically as soon as a magic packet or a Wake-up frame | |
4392 | * is received. Anyway, it's better to manually clear | |
4393 | * this bit because it can generate problems while resuming | |
ceb69499 GC |
4394 | * from another devices (e.g. serial console). |
4395 | */ | |
623997fb | 4396 | if (device_may_wakeup(priv->device)) { |
f55d84b0 | 4397 | spin_lock_irqsave(&priv->lock, flags); |
7ed24bbe | 4398 | priv->hw->mac->pmt(priv->hw, 0); |
f55d84b0 | 4399 | spin_unlock_irqrestore(&priv->lock, flags); |
89f7f2cf | 4400 | priv->irq_wake = 0; |
623997fb | 4401 | } else { |
db88f10a | 4402 | pinctrl_pm_select_default_state(priv->device); |
8d45e42b | 4403 | /* enable the clk previously disabled */ |
f573c0b9 | 4404 | clk_enable(priv->plat->stmmac_clk); |
4405 | clk_enable(priv->plat->pclk); | |
623997fb SK |
4406 | /* reset the phy so that it's ready */ |
4407 | if (priv->mii) | |
4408 | stmmac_mdio_reset(priv->mii); | |
4409 | } | |
47dd7a54 | 4410 | |
874bd42d | 4411 | netif_device_attach(ndev); |
47dd7a54 | 4412 | |
f55d84b0 VP |
4413 | spin_lock_irqsave(&priv->lock, flags); |
4414 | ||
54139cf3 JP |
4415 | stmmac_reset_queues_param(priv); |
4416 | ||
f748be53 AT |
4417 | /* reset private mss value to force mss context settings at |
4418 | * next tso xmit (only used for gmac4). | |
4419 | */ | |
4420 | priv->mss = 0; | |
4421 | ||
ae79a639 GC |
4422 | stmmac_clear_descriptors(priv); |
4423 | ||
fe131929 | 4424 | stmmac_hw_setup(ndev, false); |
777da230 | 4425 | stmmac_init_tx_coalesce(priv); |
ac316c78 | 4426 | stmmac_set_rx_mode(ndev); |
47dd7a54 | 4427 | |
c22a3f48 | 4428 | stmmac_enable_all_queues(priv); |
47dd7a54 | 4429 | |
c22a3f48 | 4430 | stmmac_start_all_queues(priv); |
47dd7a54 | 4431 | |
f8c5a875 | 4432 | spin_unlock_irqrestore(&priv->lock, flags); |
102463b1 | 4433 | |
d6d50c7e PR |
4434 | if (ndev->phydev) |
4435 | phy_start(ndev->phydev); | |
102463b1 | 4436 | |
47dd7a54 GC |
4437 | return 0; |
4438 | } | |
b2e2f0c7 | 4439 | EXPORT_SYMBOL_GPL(stmmac_resume); |
ba27ec66 | 4440 | |
47dd7a54 GC |
4441 | #ifndef MODULE |
4442 | static int __init stmmac_cmdline_opt(char *str) | |
4443 | { | |
4444 | char *opt; | |
4445 | ||
4446 | if (!str || !*str) | |
4447 | return -EINVAL; | |
4448 | while ((opt = strsep(&str, ",")) != NULL) { | |
f3240e28 | 4449 | if (!strncmp(opt, "debug:", 6)) { |
ea2ab871 | 4450 | if (kstrtoint(opt + 6, 0, &debug)) |
f3240e28 GC |
4451 | goto err; |
4452 | } else if (!strncmp(opt, "phyaddr:", 8)) { | |
ea2ab871 | 4453 | if (kstrtoint(opt + 8, 0, &phyaddr)) |
f3240e28 | 4454 | goto err; |
f3240e28 | 4455 | } else if (!strncmp(opt, "buf_sz:", 7)) { |
ea2ab871 | 4456 | if (kstrtoint(opt + 7, 0, &buf_sz)) |
f3240e28 GC |
4457 | goto err; |
4458 | } else if (!strncmp(opt, "tc:", 3)) { | |
ea2ab871 | 4459 | if (kstrtoint(opt + 3, 0, &tc)) |
f3240e28 GC |
4460 | goto err; |
4461 | } else if (!strncmp(opt, "watchdog:", 9)) { | |
ea2ab871 | 4462 | if (kstrtoint(opt + 9, 0, &watchdog)) |
f3240e28 GC |
4463 | goto err; |
4464 | } else if (!strncmp(opt, "flow_ctrl:", 10)) { | |
ea2ab871 | 4465 | if (kstrtoint(opt + 10, 0, &flow_ctrl)) |
f3240e28 GC |
4466 | goto err; |
4467 | } else if (!strncmp(opt, "pause:", 6)) { | |
ea2ab871 | 4468 | if (kstrtoint(opt + 6, 0, &pause)) |
f3240e28 | 4469 | goto err; |
506f669c | 4470 | } else if (!strncmp(opt, "eee_timer:", 10)) { |
d765955d GC |
4471 | if (kstrtoint(opt + 10, 0, &eee_timer)) |
4472 | goto err; | |
4a7d666a GC |
4473 | } else if (!strncmp(opt, "chain_mode:", 11)) { |
4474 | if (kstrtoint(opt + 11, 0, &chain_mode)) | |
4475 | goto err; | |
f3240e28 | 4476 | } |
47dd7a54 GC |
4477 | } |
4478 | return 0; | |
f3240e28 GC |
4479 | |
4480 | err: | |
4481 | pr_err("%s: ERROR broken module parameter conversion", __func__); | |
4482 | return -EINVAL; | |
47dd7a54 GC |
4483 | } |
4484 | ||
4485 | __setup("stmmaceth=", stmmac_cmdline_opt); | |
ceb69499 | 4486 | #endif /* MODULE */ |
6fc0d0f2 | 4487 | |
466c5ac8 MO |
4488 | static int __init stmmac_init(void) |
4489 | { | |
4490 | #ifdef CONFIG_DEBUG_FS | |
4491 | /* Create debugfs main directory if it doesn't exist yet */ | |
4492 | if (!stmmac_fs_dir) { | |
4493 | stmmac_fs_dir = debugfs_create_dir(STMMAC_RESOURCE_NAME, NULL); | |
4494 | ||
4495 | if (!stmmac_fs_dir || IS_ERR(stmmac_fs_dir)) { | |
4496 | pr_err("ERROR %s, debugfs create directory failed\n", | |
4497 | STMMAC_RESOURCE_NAME); | |
4498 | ||
4499 | return -ENOMEM; | |
4500 | } | |
4501 | } | |
4502 | #endif | |
4503 | ||
4504 | return 0; | |
4505 | } | |
4506 | ||
4507 | static void __exit stmmac_exit(void) | |
4508 | { | |
4509 | #ifdef CONFIG_DEBUG_FS | |
4510 | debugfs_remove_recursive(stmmac_fs_dir); | |
4511 | #endif | |
4512 | } | |
4513 | ||
4514 | module_init(stmmac_init) | |
4515 | module_exit(stmmac_exit) | |
4516 | ||
6fc0d0f2 GC |
4517 | MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet device driver"); |
4518 | MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>"); | |
4519 | MODULE_LICENSE("GPL"); |