Merge tag 'dma-mapping-2022-12-23' of git://git.infradead.org/users/hch/dma-mapping
[linux-block.git] / drivers / net / ethernet / stmicro / stmmac / stmmac_main.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*******************************************************************************
3   This is the driver for the ST MAC 10/100/1000 on-chip Ethernet controllers.
4   ST Ethernet IPs are built around a Synopsys IP Core.
5
6         Copyright(C) 2007-2011 STMicroelectronics Ltd
7
8
9   Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
10
11   Documentation available at:
12         http://www.stlinux.com
13   Support available at:
14         https://bugzilla.stlinux.com/
15 *******************************************************************************/
16
17 #include <linux/clk.h>
18 #include <linux/kernel.h>
19 #include <linux/interrupt.h>
20 #include <linux/ip.h>
21 #include <linux/tcp.h>
22 #include <linux/skbuff.h>
23 #include <linux/ethtool.h>
24 #include <linux/if_ether.h>
25 #include <linux/crc32.h>
26 #include <linux/mii.h>
27 #include <linux/if.h>
28 #include <linux/if_vlan.h>
29 #include <linux/dma-mapping.h>
30 #include <linux/slab.h>
31 #include <linux/pm_runtime.h>
32 #include <linux/prefetch.h>
33 #include <linux/pinctrl/consumer.h>
34 #ifdef CONFIG_DEBUG_FS
35 #include <linux/debugfs.h>
36 #include <linux/seq_file.h>
37 #endif /* CONFIG_DEBUG_FS */
38 #include <linux/net_tstamp.h>
39 #include <linux/phylink.h>
40 #include <linux/udp.h>
41 #include <linux/bpf_trace.h>
42 #include <net/pkt_cls.h>
43 #include <net/xdp_sock_drv.h>
44 #include "stmmac_ptp.h"
45 #include "stmmac.h"
46 #include "stmmac_xdp.h"
47 #include <linux/reset.h>
48 #include <linux/of_mdio.h>
49 #include "dwmac1000.h"
50 #include "dwxgmac2.h"
51 #include "hwif.h"
52
53 /* As long as the interface is active, we keep the timestamping counter enabled
54  * with fine resolution and binary rollover. This avoid non-monotonic behavior
55  * (clock jumps) when changing timestamping settings at runtime.
56  */
57 #define STMMAC_HWTS_ACTIVE      (PTP_TCR_TSENA | PTP_TCR_TSCFUPDT | \
58                                  PTP_TCR_TSCTRLSSR)
59
60 #define STMMAC_ALIGN(x)         ALIGN(ALIGN(x, SMP_CACHE_BYTES), 16)
61 #define TSO_MAX_BUFF_SIZE       (SZ_16K - 1)
62
63 /* Module parameters */
64 #define TX_TIMEO        5000
65 static int watchdog = TX_TIMEO;
66 module_param(watchdog, int, 0644);
67 MODULE_PARM_DESC(watchdog, "Transmit timeout in milliseconds (default 5s)");
68
69 static int debug = -1;
70 module_param(debug, int, 0644);
71 MODULE_PARM_DESC(debug, "Message Level (-1: default, 0: no output, 16: all)");
72
73 static int phyaddr = -1;
74 module_param(phyaddr, int, 0444);
75 MODULE_PARM_DESC(phyaddr, "Physical device address");
76
77 #define STMMAC_TX_THRESH(x)     ((x)->dma_conf.dma_tx_size / 4)
78 #define STMMAC_RX_THRESH(x)     ((x)->dma_conf.dma_rx_size / 4)
79
80 /* Limit to make sure XDP TX and slow path can coexist */
81 #define STMMAC_XSK_TX_BUDGET_MAX        256
82 #define STMMAC_TX_XSK_AVAIL             16
83 #define STMMAC_RX_FILL_BATCH            16
84
85 #define STMMAC_XDP_PASS         0
86 #define STMMAC_XDP_CONSUMED     BIT(0)
87 #define STMMAC_XDP_TX           BIT(1)
88 #define STMMAC_XDP_REDIRECT     BIT(2)
89
90 static int flow_ctrl = FLOW_AUTO;
91 module_param(flow_ctrl, int, 0644);
92 MODULE_PARM_DESC(flow_ctrl, "Flow control ability [on/off]");
93
94 static int pause = PAUSE_TIME;
95 module_param(pause, int, 0644);
96 MODULE_PARM_DESC(pause, "Flow Control Pause Time");
97
98 #define TC_DEFAULT 64
99 static int tc = TC_DEFAULT;
100 module_param(tc, int, 0644);
101 MODULE_PARM_DESC(tc, "DMA threshold control value");
102
103 #define DEFAULT_BUFSIZE 1536
104 static int buf_sz = DEFAULT_BUFSIZE;
105 module_param(buf_sz, int, 0644);
106 MODULE_PARM_DESC(buf_sz, "DMA buffer size");
107
108 #define STMMAC_RX_COPYBREAK     256
109
110 static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
111                                       NETIF_MSG_LINK | NETIF_MSG_IFUP |
112                                       NETIF_MSG_IFDOWN | NETIF_MSG_TIMER);
113
114 #define STMMAC_DEFAULT_LPI_TIMER        1000
115 static int eee_timer = STMMAC_DEFAULT_LPI_TIMER;
116 module_param(eee_timer, int, 0644);
117 MODULE_PARM_DESC(eee_timer, "LPI tx expiration time in msec");
118 #define STMMAC_LPI_T(x) (jiffies + usecs_to_jiffies(x))
119
120 /* By default the driver will use the ring mode to manage tx and rx descriptors,
121  * but allow user to force to use the chain instead of the ring
122  */
123 static unsigned int chain_mode;
124 module_param(chain_mode, int, 0444);
125 MODULE_PARM_DESC(chain_mode, "To use chain instead of ring mode");
126
127 static irqreturn_t stmmac_interrupt(int irq, void *dev_id);
128 /* For MSI interrupts handling */
129 static irqreturn_t stmmac_mac_interrupt(int irq, void *dev_id);
130 static irqreturn_t stmmac_safety_interrupt(int irq, void *dev_id);
131 static irqreturn_t stmmac_msi_intr_tx(int irq, void *data);
132 static irqreturn_t stmmac_msi_intr_rx(int irq, void *data);
133 static void stmmac_reset_rx_queue(struct stmmac_priv *priv, u32 queue);
134 static void stmmac_reset_tx_queue(struct stmmac_priv *priv, u32 queue);
135 static void stmmac_reset_queues_param(struct stmmac_priv *priv);
136 static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue);
137 static void stmmac_flush_tx_descriptors(struct stmmac_priv *priv, int queue);
138 static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode,
139                                           u32 rxmode, u32 chan);
140
141 #ifdef CONFIG_DEBUG_FS
142 static const struct net_device_ops stmmac_netdev_ops;
143 static void stmmac_init_fs(struct net_device *dev);
144 static void stmmac_exit_fs(struct net_device *dev);
145 #endif
146
147 #define STMMAC_COAL_TIMER(x) (ns_to_ktime((x) * NSEC_PER_USEC))
148
149 int stmmac_bus_clks_config(struct stmmac_priv *priv, bool enabled)
150 {
151         int ret = 0;
152
153         if (enabled) {
154                 ret = clk_prepare_enable(priv->plat->stmmac_clk);
155                 if (ret)
156                         return ret;
157                 ret = clk_prepare_enable(priv->plat->pclk);
158                 if (ret) {
159                         clk_disable_unprepare(priv->plat->stmmac_clk);
160                         return ret;
161                 }
162                 if (priv->plat->clks_config) {
163                         ret = priv->plat->clks_config(priv->plat->bsp_priv, enabled);
164                         if (ret) {
165                                 clk_disable_unprepare(priv->plat->stmmac_clk);
166                                 clk_disable_unprepare(priv->plat->pclk);
167                                 return ret;
168                         }
169                 }
170         } else {
171                 clk_disable_unprepare(priv->plat->stmmac_clk);
172                 clk_disable_unprepare(priv->plat->pclk);
173                 if (priv->plat->clks_config)
174                         priv->plat->clks_config(priv->plat->bsp_priv, enabled);
175         }
176
177         return ret;
178 }
179 EXPORT_SYMBOL_GPL(stmmac_bus_clks_config);
180
181 /**
182  * stmmac_verify_args - verify the driver parameters.
183  * Description: it checks the driver parameters and set a default in case of
184  * errors.
185  */
186 static void stmmac_verify_args(void)
187 {
188         if (unlikely(watchdog < 0))
189                 watchdog = TX_TIMEO;
190         if (unlikely((buf_sz < DEFAULT_BUFSIZE) || (buf_sz > BUF_SIZE_16KiB)))
191                 buf_sz = DEFAULT_BUFSIZE;
192         if (unlikely(flow_ctrl > 1))
193                 flow_ctrl = FLOW_AUTO;
194         else if (likely(flow_ctrl < 0))
195                 flow_ctrl = FLOW_OFF;
196         if (unlikely((pause < 0) || (pause > 0xffff)))
197                 pause = PAUSE_TIME;
198         if (eee_timer < 0)
199                 eee_timer = STMMAC_DEFAULT_LPI_TIMER;
200 }
201
202 static void __stmmac_disable_all_queues(struct stmmac_priv *priv)
203 {
204         u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
205         u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
206         u32 maxq = max(rx_queues_cnt, tx_queues_cnt);
207         u32 queue;
208
209         for (queue = 0; queue < maxq; queue++) {
210                 struct stmmac_channel *ch = &priv->channel[queue];
211
212                 if (stmmac_xdp_is_enabled(priv) &&
213                     test_bit(queue, priv->af_xdp_zc_qps)) {
214                         napi_disable(&ch->rxtx_napi);
215                         continue;
216                 }
217
218                 if (queue < rx_queues_cnt)
219                         napi_disable(&ch->rx_napi);
220                 if (queue < tx_queues_cnt)
221                         napi_disable(&ch->tx_napi);
222         }
223 }
224
225 /**
226  * stmmac_disable_all_queues - Disable all queues
227  * @priv: driver private structure
228  */
229 static void stmmac_disable_all_queues(struct stmmac_priv *priv)
230 {
231         u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
232         struct stmmac_rx_queue *rx_q;
233         u32 queue;
234
235         /* synchronize_rcu() needed for pending XDP buffers to drain */
236         for (queue = 0; queue < rx_queues_cnt; queue++) {
237                 rx_q = &priv->dma_conf.rx_queue[queue];
238                 if (rx_q->xsk_pool) {
239                         synchronize_rcu();
240                         break;
241                 }
242         }
243
244         __stmmac_disable_all_queues(priv);
245 }
246
247 /**
248  * stmmac_enable_all_queues - Enable all queues
249  * @priv: driver private structure
250  */
251 static void stmmac_enable_all_queues(struct stmmac_priv *priv)
252 {
253         u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
254         u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
255         u32 maxq = max(rx_queues_cnt, tx_queues_cnt);
256         u32 queue;
257
258         for (queue = 0; queue < maxq; queue++) {
259                 struct stmmac_channel *ch = &priv->channel[queue];
260
261                 if (stmmac_xdp_is_enabled(priv) &&
262                     test_bit(queue, priv->af_xdp_zc_qps)) {
263                         napi_enable(&ch->rxtx_napi);
264                         continue;
265                 }
266
267                 if (queue < rx_queues_cnt)
268                         napi_enable(&ch->rx_napi);
269                 if (queue < tx_queues_cnt)
270                         napi_enable(&ch->tx_napi);
271         }
272 }
273
274 static void stmmac_service_event_schedule(struct stmmac_priv *priv)
275 {
276         if (!test_bit(STMMAC_DOWN, &priv->state) &&
277             !test_and_set_bit(STMMAC_SERVICE_SCHED, &priv->state))
278                 queue_work(priv->wq, &priv->service_task);
279 }
280
281 static void stmmac_global_err(struct stmmac_priv *priv)
282 {
283         netif_carrier_off(priv->dev);
284         set_bit(STMMAC_RESET_REQUESTED, &priv->state);
285         stmmac_service_event_schedule(priv);
286 }
287
288 /**
289  * stmmac_clk_csr_set - dynamically set the MDC clock
290  * @priv: driver private structure
291  * Description: this is to dynamically set the MDC clock according to the csr
292  * clock input.
293  * Note:
294  *      If a specific clk_csr value is passed from the platform
295  *      this means that the CSR Clock Range selection cannot be
296  *      changed at run-time and it is fixed (as reported in the driver
297  *      documentation). Viceversa the driver will try to set the MDC
298  *      clock dynamically according to the actual clock input.
299  */
300 static void stmmac_clk_csr_set(struct stmmac_priv *priv)
301 {
302         u32 clk_rate;
303
304         clk_rate = clk_get_rate(priv->plat->stmmac_clk);
305
306         /* Platform provided default clk_csr would be assumed valid
307          * for all other cases except for the below mentioned ones.
308          * For values higher than the IEEE 802.3 specified frequency
309          * we can not estimate the proper divider as it is not known
310          * the frequency of clk_csr_i. So we do not change the default
311          * divider.
312          */
313         if (!(priv->clk_csr & MAC_CSR_H_FRQ_MASK)) {
314                 if (clk_rate < CSR_F_35M)
315                         priv->clk_csr = STMMAC_CSR_20_35M;
316                 else if ((clk_rate >= CSR_F_35M) && (clk_rate < CSR_F_60M))
317                         priv->clk_csr = STMMAC_CSR_35_60M;
318                 else if ((clk_rate >= CSR_F_60M) && (clk_rate < CSR_F_100M))
319                         priv->clk_csr = STMMAC_CSR_60_100M;
320                 else if ((clk_rate >= CSR_F_100M) && (clk_rate < CSR_F_150M))
321                         priv->clk_csr = STMMAC_CSR_100_150M;
322                 else if ((clk_rate >= CSR_F_150M) && (clk_rate < CSR_F_250M))
323                         priv->clk_csr = STMMAC_CSR_150_250M;
324                 else if ((clk_rate >= CSR_F_250M) && (clk_rate <= CSR_F_300M))
325                         priv->clk_csr = STMMAC_CSR_250_300M;
326         }
327
328         if (priv->plat->has_sun8i) {
329                 if (clk_rate > 160000000)
330                         priv->clk_csr = 0x03;
331                 else if (clk_rate > 80000000)
332                         priv->clk_csr = 0x02;
333                 else if (clk_rate > 40000000)
334                         priv->clk_csr = 0x01;
335                 else
336                         priv->clk_csr = 0;
337         }
338
339         if (priv->plat->has_xgmac) {
340                 if (clk_rate > 400000000)
341                         priv->clk_csr = 0x5;
342                 else if (clk_rate > 350000000)
343                         priv->clk_csr = 0x4;
344                 else if (clk_rate > 300000000)
345                         priv->clk_csr = 0x3;
346                 else if (clk_rate > 250000000)
347                         priv->clk_csr = 0x2;
348                 else if (clk_rate > 150000000)
349                         priv->clk_csr = 0x1;
350                 else
351                         priv->clk_csr = 0x0;
352         }
353 }
354
355 static void print_pkt(unsigned char *buf, int len)
356 {
357         pr_debug("len = %d byte, buf addr: 0x%p\n", len, buf);
358         print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, buf, len);
359 }
360
361 static inline u32 stmmac_tx_avail(struct stmmac_priv *priv, u32 queue)
362 {
363         struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
364         u32 avail;
365
366         if (tx_q->dirty_tx > tx_q->cur_tx)
367                 avail = tx_q->dirty_tx - tx_q->cur_tx - 1;
368         else
369                 avail = priv->dma_conf.dma_tx_size - tx_q->cur_tx + tx_q->dirty_tx - 1;
370
371         return avail;
372 }
373
374 /**
375  * stmmac_rx_dirty - Get RX queue dirty
376  * @priv: driver private structure
377  * @queue: RX queue index
378  */
379 static inline u32 stmmac_rx_dirty(struct stmmac_priv *priv, u32 queue)
380 {
381         struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
382         u32 dirty;
383
384         if (rx_q->dirty_rx <= rx_q->cur_rx)
385                 dirty = rx_q->cur_rx - rx_q->dirty_rx;
386         else
387                 dirty = priv->dma_conf.dma_rx_size - rx_q->dirty_rx + rx_q->cur_rx;
388
389         return dirty;
390 }
391
392 static void stmmac_lpi_entry_timer_config(struct stmmac_priv *priv, bool en)
393 {
394         int tx_lpi_timer;
395
396         /* Clear/set the SW EEE timer flag based on LPI ET enablement */
397         priv->eee_sw_timer_en = en ? 0 : 1;
398         tx_lpi_timer  = en ? priv->tx_lpi_timer : 0;
399         stmmac_set_eee_lpi_timer(priv, priv->hw, tx_lpi_timer);
400 }
401
402 /**
403  * stmmac_enable_eee_mode - check and enter in LPI mode
404  * @priv: driver private structure
405  * Description: this function is to verify and enter in LPI mode in case of
406  * EEE.
407  */
408 static int stmmac_enable_eee_mode(struct stmmac_priv *priv)
409 {
410         u32 tx_cnt = priv->plat->tx_queues_to_use;
411         u32 queue;
412
413         /* check if all TX queues have the work finished */
414         for (queue = 0; queue < tx_cnt; queue++) {
415                 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
416
417                 if (tx_q->dirty_tx != tx_q->cur_tx)
418                         return -EBUSY; /* still unfinished work */
419         }
420
421         /* Check and enter in LPI mode */
422         if (!priv->tx_path_in_lpi_mode)
423                 stmmac_set_eee_mode(priv, priv->hw,
424                                 priv->plat->en_tx_lpi_clockgating);
425         return 0;
426 }
427
428 /**
429  * stmmac_disable_eee_mode - disable and exit from LPI mode
430  * @priv: driver private structure
431  * Description: this function is to exit and disable EEE in case of
432  * LPI state is true. This is called by the xmit.
433  */
434 void stmmac_disable_eee_mode(struct stmmac_priv *priv)
435 {
436         if (!priv->eee_sw_timer_en) {
437                 stmmac_lpi_entry_timer_config(priv, 0);
438                 return;
439         }
440
441         stmmac_reset_eee_mode(priv, priv->hw);
442         del_timer_sync(&priv->eee_ctrl_timer);
443         priv->tx_path_in_lpi_mode = false;
444 }
445
446 /**
447  * stmmac_eee_ctrl_timer - EEE TX SW timer.
448  * @t:  timer_list struct containing private info
449  * Description:
450  *  if there is no data transfer and if we are not in LPI state,
451  *  then MAC Transmitter can be moved to LPI state.
452  */
453 static void stmmac_eee_ctrl_timer(struct timer_list *t)
454 {
455         struct stmmac_priv *priv = from_timer(priv, t, eee_ctrl_timer);
456
457         if (stmmac_enable_eee_mode(priv))
458                 mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(priv->tx_lpi_timer));
459 }
460
461 /**
462  * stmmac_eee_init - init EEE
463  * @priv: driver private structure
464  * Description:
465  *  if the GMAC supports the EEE (from the HW cap reg) and the phy device
466  *  can also manage EEE, this function enable the LPI state and start related
467  *  timer.
468  */
469 bool stmmac_eee_init(struct stmmac_priv *priv)
470 {
471         int eee_tw_timer = priv->eee_tw_timer;
472
473         /* Using PCS we cannot dial with the phy registers at this stage
474          * so we do not support extra feature like EEE.
475          */
476         if (priv->hw->pcs == STMMAC_PCS_TBI ||
477             priv->hw->pcs == STMMAC_PCS_RTBI)
478                 return false;
479
480         /* Check if MAC core supports the EEE feature. */
481         if (!priv->dma_cap.eee)
482                 return false;
483
484         mutex_lock(&priv->lock);
485
486         /* Check if it needs to be deactivated */
487         if (!priv->eee_active) {
488                 if (priv->eee_enabled) {
489                         netdev_dbg(priv->dev, "disable EEE\n");
490                         stmmac_lpi_entry_timer_config(priv, 0);
491                         del_timer_sync(&priv->eee_ctrl_timer);
492                         stmmac_set_eee_timer(priv, priv->hw, 0, eee_tw_timer);
493                         if (priv->hw->xpcs)
494                                 xpcs_config_eee(priv->hw->xpcs,
495                                                 priv->plat->mult_fact_100ns,
496                                                 false);
497                 }
498                 mutex_unlock(&priv->lock);
499                 return false;
500         }
501
502         if (priv->eee_active && !priv->eee_enabled) {
503                 timer_setup(&priv->eee_ctrl_timer, stmmac_eee_ctrl_timer, 0);
504                 stmmac_set_eee_timer(priv, priv->hw, STMMAC_DEFAULT_LIT_LS,
505                                      eee_tw_timer);
506                 if (priv->hw->xpcs)
507                         xpcs_config_eee(priv->hw->xpcs,
508                                         priv->plat->mult_fact_100ns,
509                                         true);
510         }
511
512         if (priv->plat->has_gmac4 && priv->tx_lpi_timer <= STMMAC_ET_MAX) {
513                 del_timer_sync(&priv->eee_ctrl_timer);
514                 priv->tx_path_in_lpi_mode = false;
515                 stmmac_lpi_entry_timer_config(priv, 1);
516         } else {
517                 stmmac_lpi_entry_timer_config(priv, 0);
518                 mod_timer(&priv->eee_ctrl_timer,
519                           STMMAC_LPI_T(priv->tx_lpi_timer));
520         }
521
522         mutex_unlock(&priv->lock);
523         netdev_dbg(priv->dev, "Energy-Efficient Ethernet initialized\n");
524         return true;
525 }
526
527 /* stmmac_get_tx_hwtstamp - get HW TX timestamps
528  * @priv: driver private structure
529  * @p : descriptor pointer
530  * @skb : the socket buffer
531  * Description :
532  * This function will read timestamp from the descriptor & pass it to stack.
533  * and also perform some sanity checks.
534  */
535 static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv,
536                                    struct dma_desc *p, struct sk_buff *skb)
537 {
538         struct skb_shared_hwtstamps shhwtstamp;
539         bool found = false;
540         u64 ns = 0;
541
542         if (!priv->hwts_tx_en)
543                 return;
544
545         /* exit if skb doesn't support hw tstamp */
546         if (likely(!skb || !(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)))
547                 return;
548
549         /* check tx tstamp status */
550         if (stmmac_get_tx_timestamp_status(priv, p)) {
551                 stmmac_get_timestamp(priv, p, priv->adv_ts, &ns);
552                 found = true;
553         } else if (!stmmac_get_mac_tx_timestamp(priv, priv->hw, &ns)) {
554                 found = true;
555         }
556
557         if (found) {
558                 ns -= priv->plat->cdc_error_adj;
559
560                 memset(&shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
561                 shhwtstamp.hwtstamp = ns_to_ktime(ns);
562
563                 netdev_dbg(priv->dev, "get valid TX hw timestamp %llu\n", ns);
564                 /* pass tstamp to stack */
565                 skb_tstamp_tx(skb, &shhwtstamp);
566         }
567 }
568
569 /* stmmac_get_rx_hwtstamp - get HW RX timestamps
570  * @priv: driver private structure
571  * @p : descriptor pointer
572  * @np : next descriptor pointer
573  * @skb : the socket buffer
574  * Description :
575  * This function will read received packet's timestamp from the descriptor
576  * and pass it to stack. It also perform some sanity checks.
577  */
578 static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p,
579                                    struct dma_desc *np, struct sk_buff *skb)
580 {
581         struct skb_shared_hwtstamps *shhwtstamp = NULL;
582         struct dma_desc *desc = p;
583         u64 ns = 0;
584
585         if (!priv->hwts_rx_en)
586                 return;
587         /* For GMAC4, the valid timestamp is from CTX next desc. */
588         if (priv->plat->has_gmac4 || priv->plat->has_xgmac)
589                 desc = np;
590
591         /* Check if timestamp is available */
592         if (stmmac_get_rx_timestamp_status(priv, p, np, priv->adv_ts)) {
593                 stmmac_get_timestamp(priv, desc, priv->adv_ts, &ns);
594
595                 ns -= priv->plat->cdc_error_adj;
596
597                 netdev_dbg(priv->dev, "get valid RX hw timestamp %llu\n", ns);
598                 shhwtstamp = skb_hwtstamps(skb);
599                 memset(shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
600                 shhwtstamp->hwtstamp = ns_to_ktime(ns);
601         } else  {
602                 netdev_dbg(priv->dev, "cannot get RX hw timestamp\n");
603         }
604 }
605
606 /**
607  *  stmmac_hwtstamp_set - control hardware timestamping.
608  *  @dev: device pointer.
609  *  @ifr: An IOCTL specific structure, that can contain a pointer to
610  *  a proprietary structure used to pass information to the driver.
611  *  Description:
612  *  This function configures the MAC to enable/disable both outgoing(TX)
613  *  and incoming(RX) packets time stamping based on user input.
614  *  Return Value:
615  *  0 on success and an appropriate -ve integer on failure.
616  */
617 static int stmmac_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
618 {
619         struct stmmac_priv *priv = netdev_priv(dev);
620         struct hwtstamp_config config;
621         u32 ptp_v2 = 0;
622         u32 tstamp_all = 0;
623         u32 ptp_over_ipv4_udp = 0;
624         u32 ptp_over_ipv6_udp = 0;
625         u32 ptp_over_ethernet = 0;
626         u32 snap_type_sel = 0;
627         u32 ts_master_en = 0;
628         u32 ts_event_en = 0;
629
630         if (!(priv->dma_cap.time_stamp || priv->adv_ts)) {
631                 netdev_alert(priv->dev, "No support for HW time stamping\n");
632                 priv->hwts_tx_en = 0;
633                 priv->hwts_rx_en = 0;
634
635                 return -EOPNOTSUPP;
636         }
637
638         if (copy_from_user(&config, ifr->ifr_data,
639                            sizeof(config)))
640                 return -EFAULT;
641
642         netdev_dbg(priv->dev, "%s config flags:0x%x, tx_type:0x%x, rx_filter:0x%x\n",
643                    __func__, config.flags, config.tx_type, config.rx_filter);
644
645         if (config.tx_type != HWTSTAMP_TX_OFF &&
646             config.tx_type != HWTSTAMP_TX_ON)
647                 return -ERANGE;
648
649         if (priv->adv_ts) {
650                 switch (config.rx_filter) {
651                 case HWTSTAMP_FILTER_NONE:
652                         /* time stamp no incoming packet at all */
653                         config.rx_filter = HWTSTAMP_FILTER_NONE;
654                         break;
655
656                 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
657                         /* PTP v1, UDP, any kind of event packet */
658                         config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
659                         /* 'xmac' hardware can support Sync, Pdelay_Req and
660                          * Pdelay_resp by setting bit14 and bits17/16 to 01
661                          * This leaves Delay_Req timestamps out.
662                          * Enable all events *and* general purpose message
663                          * timestamping
664                          */
665                         snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
666                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
667                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
668                         break;
669
670                 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
671                         /* PTP v1, UDP, Sync packet */
672                         config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
673                         /* take time stamp for SYNC messages only */
674                         ts_event_en = PTP_TCR_TSEVNTENA;
675
676                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
677                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
678                         break;
679
680                 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
681                         /* PTP v1, UDP, Delay_req packet */
682                         config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
683                         /* take time stamp for Delay_Req messages only */
684                         ts_master_en = PTP_TCR_TSMSTRENA;
685                         ts_event_en = PTP_TCR_TSEVNTENA;
686
687                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
688                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
689                         break;
690
691                 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
692                         /* PTP v2, UDP, any kind of event packet */
693                         config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
694                         ptp_v2 = PTP_TCR_TSVER2ENA;
695                         /* take time stamp for all event messages */
696                         snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
697
698                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
699                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
700                         break;
701
702                 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
703                         /* PTP v2, UDP, Sync packet */
704                         config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
705                         ptp_v2 = PTP_TCR_TSVER2ENA;
706                         /* take time stamp for SYNC messages only */
707                         ts_event_en = PTP_TCR_TSEVNTENA;
708
709                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
710                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
711                         break;
712
713                 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
714                         /* PTP v2, UDP, Delay_req packet */
715                         config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
716                         ptp_v2 = PTP_TCR_TSVER2ENA;
717                         /* take time stamp for Delay_Req messages only */
718                         ts_master_en = PTP_TCR_TSMSTRENA;
719                         ts_event_en = PTP_TCR_TSEVNTENA;
720
721                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
722                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
723                         break;
724
725                 case HWTSTAMP_FILTER_PTP_V2_EVENT:
726                         /* PTP v2/802.AS1 any layer, any kind of event packet */
727                         config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
728                         ptp_v2 = PTP_TCR_TSVER2ENA;
729                         snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
730                         if (priv->synopsys_id < DWMAC_CORE_4_10)
731                                 ts_event_en = PTP_TCR_TSEVNTENA;
732                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
733                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
734                         ptp_over_ethernet = PTP_TCR_TSIPENA;
735                         break;
736
737                 case HWTSTAMP_FILTER_PTP_V2_SYNC:
738                         /* PTP v2/802.AS1, any layer, Sync packet */
739                         config.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
740                         ptp_v2 = PTP_TCR_TSVER2ENA;
741                         /* take time stamp for SYNC messages only */
742                         ts_event_en = PTP_TCR_TSEVNTENA;
743
744                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
745                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
746                         ptp_over_ethernet = PTP_TCR_TSIPENA;
747                         break;
748
749                 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
750                         /* PTP v2/802.AS1, any layer, Delay_req packet */
751                         config.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
752                         ptp_v2 = PTP_TCR_TSVER2ENA;
753                         /* take time stamp for Delay_Req messages only */
754                         ts_master_en = PTP_TCR_TSMSTRENA;
755                         ts_event_en = PTP_TCR_TSEVNTENA;
756
757                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
758                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
759                         ptp_over_ethernet = PTP_TCR_TSIPENA;
760                         break;
761
762                 case HWTSTAMP_FILTER_NTP_ALL:
763                 case HWTSTAMP_FILTER_ALL:
764                         /* time stamp any incoming packet */
765                         config.rx_filter = HWTSTAMP_FILTER_ALL;
766                         tstamp_all = PTP_TCR_TSENALL;
767                         break;
768
769                 default:
770                         return -ERANGE;
771                 }
772         } else {
773                 switch (config.rx_filter) {
774                 case HWTSTAMP_FILTER_NONE:
775                         config.rx_filter = HWTSTAMP_FILTER_NONE;
776                         break;
777                 default:
778                         /* PTP v1, UDP, any kind of event packet */
779                         config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
780                         break;
781                 }
782         }
783         priv->hwts_rx_en = ((config.rx_filter == HWTSTAMP_FILTER_NONE) ? 0 : 1);
784         priv->hwts_tx_en = config.tx_type == HWTSTAMP_TX_ON;
785
786         priv->systime_flags = STMMAC_HWTS_ACTIVE;
787
788         if (priv->hwts_tx_en || priv->hwts_rx_en) {
789                 priv->systime_flags |= tstamp_all | ptp_v2 |
790                                        ptp_over_ethernet | ptp_over_ipv6_udp |
791                                        ptp_over_ipv4_udp | ts_event_en |
792                                        ts_master_en | snap_type_sel;
793         }
794
795         stmmac_config_hw_tstamping(priv, priv->ptpaddr, priv->systime_flags);
796
797         memcpy(&priv->tstamp_config, &config, sizeof(config));
798
799         return copy_to_user(ifr->ifr_data, &config,
800                             sizeof(config)) ? -EFAULT : 0;
801 }
802
803 /**
804  *  stmmac_hwtstamp_get - read hardware timestamping.
805  *  @dev: device pointer.
806  *  @ifr: An IOCTL specific structure, that can contain a pointer to
807  *  a proprietary structure used to pass information to the driver.
808  *  Description:
809  *  This function obtain the current hardware timestamping settings
810  *  as requested.
811  */
812 static int stmmac_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
813 {
814         struct stmmac_priv *priv = netdev_priv(dev);
815         struct hwtstamp_config *config = &priv->tstamp_config;
816
817         if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
818                 return -EOPNOTSUPP;
819
820         return copy_to_user(ifr->ifr_data, config,
821                             sizeof(*config)) ? -EFAULT : 0;
822 }
823
824 /**
825  * stmmac_init_tstamp_counter - init hardware timestamping counter
826  * @priv: driver private structure
827  * @systime_flags: timestamping flags
828  * Description:
829  * Initialize hardware counter for packet timestamping.
830  * This is valid as long as the interface is open and not suspended.
831  * Will be rerun after resuming from suspend, case in which the timestamping
832  * flags updated by stmmac_hwtstamp_set() also need to be restored.
833  */
834 int stmmac_init_tstamp_counter(struct stmmac_priv *priv, u32 systime_flags)
835 {
836         bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
837         struct timespec64 now;
838         u32 sec_inc = 0;
839         u64 temp = 0;
840
841         if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
842                 return -EOPNOTSUPP;
843
844         stmmac_config_hw_tstamping(priv, priv->ptpaddr, systime_flags);
845         priv->systime_flags = systime_flags;
846
847         /* program Sub Second Increment reg */
848         stmmac_config_sub_second_increment(priv, priv->ptpaddr,
849                                            priv->plat->clk_ptp_rate,
850                                            xmac, &sec_inc);
851         temp = div_u64(1000000000ULL, sec_inc);
852
853         /* Store sub second increment for later use */
854         priv->sub_second_inc = sec_inc;
855
856         /* calculate default added value:
857          * formula is :
858          * addend = (2^32)/freq_div_ratio;
859          * where, freq_div_ratio = 1e9ns/sec_inc
860          */
861         temp = (u64)(temp << 32);
862         priv->default_addend = div_u64(temp, priv->plat->clk_ptp_rate);
863         stmmac_config_addend(priv, priv->ptpaddr, priv->default_addend);
864
865         /* initialize system time */
866         ktime_get_real_ts64(&now);
867
868         /* lower 32 bits of tv_sec are safe until y2106 */
869         stmmac_init_systime(priv, priv->ptpaddr, (u32)now.tv_sec, now.tv_nsec);
870
871         return 0;
872 }
873 EXPORT_SYMBOL_GPL(stmmac_init_tstamp_counter);
874
875 /**
876  * stmmac_init_ptp - init PTP
877  * @priv: driver private structure
878  * Description: this is to verify if the HW supports the PTPv1 or PTPv2.
879  * This is done by looking at the HW cap. register.
880  * This function also registers the ptp driver.
881  */
882 static int stmmac_init_ptp(struct stmmac_priv *priv)
883 {
884         bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
885         int ret;
886
887         if (priv->plat->ptp_clk_freq_config)
888                 priv->plat->ptp_clk_freq_config(priv);
889
890         ret = stmmac_init_tstamp_counter(priv, STMMAC_HWTS_ACTIVE);
891         if (ret)
892                 return ret;
893
894         priv->adv_ts = 0;
895         /* Check if adv_ts can be enabled for dwmac 4.x / xgmac core */
896         if (xmac && priv->dma_cap.atime_stamp)
897                 priv->adv_ts = 1;
898         /* Dwmac 3.x core with extend_desc can support adv_ts */
899         else if (priv->extend_desc && priv->dma_cap.atime_stamp)
900                 priv->adv_ts = 1;
901
902         if (priv->dma_cap.time_stamp)
903                 netdev_info(priv->dev, "IEEE 1588-2002 Timestamp supported\n");
904
905         if (priv->adv_ts)
906                 netdev_info(priv->dev,
907                             "IEEE 1588-2008 Advanced Timestamp supported\n");
908
909         priv->hwts_tx_en = 0;
910         priv->hwts_rx_en = 0;
911
912         return 0;
913 }
914
915 static void stmmac_release_ptp(struct stmmac_priv *priv)
916 {
917         clk_disable_unprepare(priv->plat->clk_ptp_ref);
918         stmmac_ptp_unregister(priv);
919 }
920
921 /**
922  *  stmmac_mac_flow_ctrl - Configure flow control in all queues
923  *  @priv: driver private structure
924  *  @duplex: duplex passed to the next function
925  *  Description: It is used for configuring the flow control in all queues
926  */
927 static void stmmac_mac_flow_ctrl(struct stmmac_priv *priv, u32 duplex)
928 {
929         u32 tx_cnt = priv->plat->tx_queues_to_use;
930
931         stmmac_flow_ctrl(priv, priv->hw, duplex, priv->flow_ctrl,
932                         priv->pause, tx_cnt);
933 }
934
935 static struct phylink_pcs *stmmac_mac_select_pcs(struct phylink_config *config,
936                                                  phy_interface_t interface)
937 {
938         struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
939
940         if (!priv->hw->xpcs)
941                 return NULL;
942
943         return &priv->hw->xpcs->pcs;
944 }
945
946 static void stmmac_mac_config(struct phylink_config *config, unsigned int mode,
947                               const struct phylink_link_state *state)
948 {
949         /* Nothing to do, xpcs_config() handles everything */
950 }
951
952 static void stmmac_fpe_link_state_handle(struct stmmac_priv *priv, bool is_up)
953 {
954         struct stmmac_fpe_cfg *fpe_cfg = priv->plat->fpe_cfg;
955         enum stmmac_fpe_state *lo_state = &fpe_cfg->lo_fpe_state;
956         enum stmmac_fpe_state *lp_state = &fpe_cfg->lp_fpe_state;
957         bool *hs_enable = &fpe_cfg->hs_enable;
958
959         if (is_up && *hs_enable) {
960                 stmmac_fpe_send_mpacket(priv, priv->ioaddr, MPACKET_VERIFY);
961         } else {
962                 *lo_state = FPE_STATE_OFF;
963                 *lp_state = FPE_STATE_OFF;
964         }
965 }
966
967 static void stmmac_mac_link_down(struct phylink_config *config,
968                                  unsigned int mode, phy_interface_t interface)
969 {
970         struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
971
972         stmmac_mac_set(priv, priv->ioaddr, false);
973         priv->eee_active = false;
974         priv->tx_lpi_enabled = false;
975         priv->eee_enabled = stmmac_eee_init(priv);
976         stmmac_set_eee_pls(priv, priv->hw, false);
977
978         if (priv->dma_cap.fpesel)
979                 stmmac_fpe_link_state_handle(priv, false);
980 }
981
982 static void stmmac_mac_link_up(struct phylink_config *config,
983                                struct phy_device *phy,
984                                unsigned int mode, phy_interface_t interface,
985                                int speed, int duplex,
986                                bool tx_pause, bool rx_pause)
987 {
988         struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
989         u32 old_ctrl, ctrl;
990
991         if (priv->plat->serdes_up_after_phy_linkup && priv->plat->serdes_powerup)
992                 priv->plat->serdes_powerup(priv->dev, priv->plat->bsp_priv);
993
994         old_ctrl = readl(priv->ioaddr + MAC_CTRL_REG);
995         ctrl = old_ctrl & ~priv->hw->link.speed_mask;
996
997         if (interface == PHY_INTERFACE_MODE_USXGMII) {
998                 switch (speed) {
999                 case SPEED_10000:
1000                         ctrl |= priv->hw->link.xgmii.speed10000;
1001                         break;
1002                 case SPEED_5000:
1003                         ctrl |= priv->hw->link.xgmii.speed5000;
1004                         break;
1005                 case SPEED_2500:
1006                         ctrl |= priv->hw->link.xgmii.speed2500;
1007                         break;
1008                 default:
1009                         return;
1010                 }
1011         } else if (interface == PHY_INTERFACE_MODE_XLGMII) {
1012                 switch (speed) {
1013                 case SPEED_100000:
1014                         ctrl |= priv->hw->link.xlgmii.speed100000;
1015                         break;
1016                 case SPEED_50000:
1017                         ctrl |= priv->hw->link.xlgmii.speed50000;
1018                         break;
1019                 case SPEED_40000:
1020                         ctrl |= priv->hw->link.xlgmii.speed40000;
1021                         break;
1022                 case SPEED_25000:
1023                         ctrl |= priv->hw->link.xlgmii.speed25000;
1024                         break;
1025                 case SPEED_10000:
1026                         ctrl |= priv->hw->link.xgmii.speed10000;
1027                         break;
1028                 case SPEED_2500:
1029                         ctrl |= priv->hw->link.speed2500;
1030                         break;
1031                 case SPEED_1000:
1032                         ctrl |= priv->hw->link.speed1000;
1033                         break;
1034                 default:
1035                         return;
1036                 }
1037         } else {
1038                 switch (speed) {
1039                 case SPEED_2500:
1040                         ctrl |= priv->hw->link.speed2500;
1041                         break;
1042                 case SPEED_1000:
1043                         ctrl |= priv->hw->link.speed1000;
1044                         break;
1045                 case SPEED_100:
1046                         ctrl |= priv->hw->link.speed100;
1047                         break;
1048                 case SPEED_10:
1049                         ctrl |= priv->hw->link.speed10;
1050                         break;
1051                 default:
1052                         return;
1053                 }
1054         }
1055
1056         priv->speed = speed;
1057
1058         if (priv->plat->fix_mac_speed)
1059                 priv->plat->fix_mac_speed(priv->plat->bsp_priv, speed);
1060
1061         if (!duplex)
1062                 ctrl &= ~priv->hw->link.duplex;
1063         else
1064                 ctrl |= priv->hw->link.duplex;
1065
1066         /* Flow Control operation */
1067         if (rx_pause && tx_pause)
1068                 priv->flow_ctrl = FLOW_AUTO;
1069         else if (rx_pause && !tx_pause)
1070                 priv->flow_ctrl = FLOW_RX;
1071         else if (!rx_pause && tx_pause)
1072                 priv->flow_ctrl = FLOW_TX;
1073         else
1074                 priv->flow_ctrl = FLOW_OFF;
1075
1076         stmmac_mac_flow_ctrl(priv, duplex);
1077
1078         if (ctrl != old_ctrl)
1079                 writel(ctrl, priv->ioaddr + MAC_CTRL_REG);
1080
1081         stmmac_mac_set(priv, priv->ioaddr, true);
1082         if (phy && priv->dma_cap.eee) {
1083                 priv->eee_active = phy_init_eee(phy, 1) >= 0;
1084                 priv->eee_enabled = stmmac_eee_init(priv);
1085                 priv->tx_lpi_enabled = priv->eee_enabled;
1086                 stmmac_set_eee_pls(priv, priv->hw, true);
1087         }
1088
1089         if (priv->dma_cap.fpesel)
1090                 stmmac_fpe_link_state_handle(priv, true);
1091 }
1092
1093 static const struct phylink_mac_ops stmmac_phylink_mac_ops = {
1094         .mac_select_pcs = stmmac_mac_select_pcs,
1095         .mac_config = stmmac_mac_config,
1096         .mac_link_down = stmmac_mac_link_down,
1097         .mac_link_up = stmmac_mac_link_up,
1098 };
1099
1100 /**
1101  * stmmac_check_pcs_mode - verify if RGMII/SGMII is supported
1102  * @priv: driver private structure
1103  * Description: this is to verify if the HW supports the PCS.
1104  * Physical Coding Sublayer (PCS) interface that can be used when the MAC is
1105  * configured for the TBI, RTBI, or SGMII PHY interface.
1106  */
1107 static void stmmac_check_pcs_mode(struct stmmac_priv *priv)
1108 {
1109         int interface = priv->plat->interface;
1110
1111         if (priv->dma_cap.pcs) {
1112                 if ((interface == PHY_INTERFACE_MODE_RGMII) ||
1113                     (interface == PHY_INTERFACE_MODE_RGMII_ID) ||
1114                     (interface == PHY_INTERFACE_MODE_RGMII_RXID) ||
1115                     (interface == PHY_INTERFACE_MODE_RGMII_TXID)) {
1116                         netdev_dbg(priv->dev, "PCS RGMII support enabled\n");
1117                         priv->hw->pcs = STMMAC_PCS_RGMII;
1118                 } else if (interface == PHY_INTERFACE_MODE_SGMII) {
1119                         netdev_dbg(priv->dev, "PCS SGMII support enabled\n");
1120                         priv->hw->pcs = STMMAC_PCS_SGMII;
1121                 }
1122         }
1123 }
1124
1125 /**
1126  * stmmac_init_phy - PHY initialization
1127  * @dev: net device structure
1128  * Description: it initializes the driver's PHY state, and attaches the PHY
1129  * to the mac driver.
1130  *  Return value:
1131  *  0 on success
1132  */
1133 static int stmmac_init_phy(struct net_device *dev)
1134 {
1135         struct stmmac_priv *priv = netdev_priv(dev);
1136         struct fwnode_handle *fwnode;
1137         int ret;
1138
1139         fwnode = of_fwnode_handle(priv->plat->phylink_node);
1140         if (!fwnode)
1141                 fwnode = dev_fwnode(priv->device);
1142
1143         if (fwnode)
1144                 ret = phylink_fwnode_phy_connect(priv->phylink, fwnode, 0);
1145
1146         /* Some DT bindings do not set-up the PHY handle. Let's try to
1147          * manually parse it
1148          */
1149         if (!fwnode || ret) {
1150                 int addr = priv->plat->phy_addr;
1151                 struct phy_device *phydev;
1152
1153                 phydev = mdiobus_get_phy(priv->mii, addr);
1154                 if (!phydev) {
1155                         netdev_err(priv->dev, "no phy at addr %d\n", addr);
1156                         return -ENODEV;
1157                 }
1158
1159                 ret = phylink_connect_phy(priv->phylink, phydev);
1160         }
1161
1162         if (!priv->plat->pmt) {
1163                 struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL };
1164
1165                 phylink_ethtool_get_wol(priv->phylink, &wol);
1166                 device_set_wakeup_capable(priv->device, !!wol.supported);
1167         }
1168
1169         return ret;
1170 }
1171
1172 static int stmmac_phy_setup(struct stmmac_priv *priv)
1173 {
1174         struct stmmac_mdio_bus_data *mdio_bus_data = priv->plat->mdio_bus_data;
1175         struct fwnode_handle *fwnode = of_fwnode_handle(priv->plat->phylink_node);
1176         int max_speed = priv->plat->max_speed;
1177         int mode = priv->plat->phy_interface;
1178         struct phylink *phylink;
1179
1180         priv->phylink_config.dev = &priv->dev->dev;
1181         priv->phylink_config.type = PHYLINK_NETDEV;
1182         if (priv->plat->mdio_bus_data)
1183                 priv->phylink_config.ovr_an_inband =
1184                         mdio_bus_data->xpcs_an_inband;
1185
1186         if (!fwnode)
1187                 fwnode = dev_fwnode(priv->device);
1188
1189         /* Set the platform/firmware specified interface mode */
1190         __set_bit(mode, priv->phylink_config.supported_interfaces);
1191
1192         /* If we have an xpcs, it defines which PHY interfaces are supported. */
1193         if (priv->hw->xpcs)
1194                 xpcs_get_interfaces(priv->hw->xpcs,
1195                                     priv->phylink_config.supported_interfaces);
1196
1197         priv->phylink_config.mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
1198                 MAC_10 | MAC_100;
1199
1200         if (!max_speed || max_speed >= 1000)
1201                 priv->phylink_config.mac_capabilities |= MAC_1000;
1202
1203         if (priv->plat->has_gmac4) {
1204                 if (!max_speed || max_speed >= 2500)
1205                         priv->phylink_config.mac_capabilities |= MAC_2500FD;
1206         } else if (priv->plat->has_xgmac) {
1207                 if (!max_speed || max_speed >= 2500)
1208                         priv->phylink_config.mac_capabilities |= MAC_2500FD;
1209                 if (!max_speed || max_speed >= 5000)
1210                         priv->phylink_config.mac_capabilities |= MAC_5000FD;
1211                 if (!max_speed || max_speed >= 10000)
1212                         priv->phylink_config.mac_capabilities |= MAC_10000FD;
1213                 if (!max_speed || max_speed >= 25000)
1214                         priv->phylink_config.mac_capabilities |= MAC_25000FD;
1215                 if (!max_speed || max_speed >= 40000)
1216                         priv->phylink_config.mac_capabilities |= MAC_40000FD;
1217                 if (!max_speed || max_speed >= 50000)
1218                         priv->phylink_config.mac_capabilities |= MAC_50000FD;
1219                 if (!max_speed || max_speed >= 100000)
1220                         priv->phylink_config.mac_capabilities |= MAC_100000FD;
1221         }
1222
1223         /* Half-Duplex can only work with single queue */
1224         if (priv->plat->tx_queues_to_use > 1)
1225                 priv->phylink_config.mac_capabilities &=
1226                         ~(MAC_10HD | MAC_100HD | MAC_1000HD);
1227         priv->phylink_config.mac_managed_pm = true;
1228
1229         phylink = phylink_create(&priv->phylink_config, fwnode,
1230                                  mode, &stmmac_phylink_mac_ops);
1231         if (IS_ERR(phylink))
1232                 return PTR_ERR(phylink);
1233
1234         priv->phylink = phylink;
1235         return 0;
1236 }
1237
1238 static void stmmac_display_rx_rings(struct stmmac_priv *priv,
1239                                     struct stmmac_dma_conf *dma_conf)
1240 {
1241         u32 rx_cnt = priv->plat->rx_queues_to_use;
1242         unsigned int desc_size;
1243         void *head_rx;
1244         u32 queue;
1245
1246         /* Display RX rings */
1247         for (queue = 0; queue < rx_cnt; queue++) {
1248                 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1249
1250                 pr_info("\tRX Queue %u rings\n", queue);
1251
1252                 if (priv->extend_desc) {
1253                         head_rx = (void *)rx_q->dma_erx;
1254                         desc_size = sizeof(struct dma_extended_desc);
1255                 } else {
1256                         head_rx = (void *)rx_q->dma_rx;
1257                         desc_size = sizeof(struct dma_desc);
1258                 }
1259
1260                 /* Display RX ring */
1261                 stmmac_display_ring(priv, head_rx, dma_conf->dma_rx_size, true,
1262                                     rx_q->dma_rx_phy, desc_size);
1263         }
1264 }
1265
1266 static void stmmac_display_tx_rings(struct stmmac_priv *priv,
1267                                     struct stmmac_dma_conf *dma_conf)
1268 {
1269         u32 tx_cnt = priv->plat->tx_queues_to_use;
1270         unsigned int desc_size;
1271         void *head_tx;
1272         u32 queue;
1273
1274         /* Display TX rings */
1275         for (queue = 0; queue < tx_cnt; queue++) {
1276                 struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1277
1278                 pr_info("\tTX Queue %d rings\n", queue);
1279
1280                 if (priv->extend_desc) {
1281                         head_tx = (void *)tx_q->dma_etx;
1282                         desc_size = sizeof(struct dma_extended_desc);
1283                 } else if (tx_q->tbs & STMMAC_TBS_AVAIL) {
1284                         head_tx = (void *)tx_q->dma_entx;
1285                         desc_size = sizeof(struct dma_edesc);
1286                 } else {
1287                         head_tx = (void *)tx_q->dma_tx;
1288                         desc_size = sizeof(struct dma_desc);
1289                 }
1290
1291                 stmmac_display_ring(priv, head_tx, dma_conf->dma_tx_size, false,
1292                                     tx_q->dma_tx_phy, desc_size);
1293         }
1294 }
1295
1296 static void stmmac_display_rings(struct stmmac_priv *priv,
1297                                  struct stmmac_dma_conf *dma_conf)
1298 {
1299         /* Display RX ring */
1300         stmmac_display_rx_rings(priv, dma_conf);
1301
1302         /* Display TX ring */
1303         stmmac_display_tx_rings(priv, dma_conf);
1304 }
1305
1306 static int stmmac_set_bfsize(int mtu, int bufsize)
1307 {
1308         int ret = bufsize;
1309
1310         if (mtu >= BUF_SIZE_8KiB)
1311                 ret = BUF_SIZE_16KiB;
1312         else if (mtu >= BUF_SIZE_4KiB)
1313                 ret = BUF_SIZE_8KiB;
1314         else if (mtu >= BUF_SIZE_2KiB)
1315                 ret = BUF_SIZE_4KiB;
1316         else if (mtu > DEFAULT_BUFSIZE)
1317                 ret = BUF_SIZE_2KiB;
1318         else
1319                 ret = DEFAULT_BUFSIZE;
1320
1321         return ret;
1322 }
1323
1324 /**
1325  * stmmac_clear_rx_descriptors - clear RX descriptors
1326  * @priv: driver private structure
1327  * @dma_conf: structure to take the dma data
1328  * @queue: RX queue index
1329  * Description: this function is called to clear the RX descriptors
1330  * in case of both basic and extended descriptors are used.
1331  */
1332 static void stmmac_clear_rx_descriptors(struct stmmac_priv *priv,
1333                                         struct stmmac_dma_conf *dma_conf,
1334                                         u32 queue)
1335 {
1336         struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1337         int i;
1338
1339         /* Clear the RX descriptors */
1340         for (i = 0; i < dma_conf->dma_rx_size; i++)
1341                 if (priv->extend_desc)
1342                         stmmac_init_rx_desc(priv, &rx_q->dma_erx[i].basic,
1343                                         priv->use_riwt, priv->mode,
1344                                         (i == dma_conf->dma_rx_size - 1),
1345                                         dma_conf->dma_buf_sz);
1346                 else
1347                         stmmac_init_rx_desc(priv, &rx_q->dma_rx[i],
1348                                         priv->use_riwt, priv->mode,
1349                                         (i == dma_conf->dma_rx_size - 1),
1350                                         dma_conf->dma_buf_sz);
1351 }
1352
1353 /**
1354  * stmmac_clear_tx_descriptors - clear tx descriptors
1355  * @priv: driver private structure
1356  * @dma_conf: structure to take the dma data
1357  * @queue: TX queue index.
1358  * Description: this function is called to clear the TX descriptors
1359  * in case of both basic and extended descriptors are used.
1360  */
1361 static void stmmac_clear_tx_descriptors(struct stmmac_priv *priv,
1362                                         struct stmmac_dma_conf *dma_conf,
1363                                         u32 queue)
1364 {
1365         struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1366         int i;
1367
1368         /* Clear the TX descriptors */
1369         for (i = 0; i < dma_conf->dma_tx_size; i++) {
1370                 int last = (i == (dma_conf->dma_tx_size - 1));
1371                 struct dma_desc *p;
1372
1373                 if (priv->extend_desc)
1374                         p = &tx_q->dma_etx[i].basic;
1375                 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
1376                         p = &tx_q->dma_entx[i].basic;
1377                 else
1378                         p = &tx_q->dma_tx[i];
1379
1380                 stmmac_init_tx_desc(priv, p, priv->mode, last);
1381         }
1382 }
1383
1384 /**
1385  * stmmac_clear_descriptors - clear descriptors
1386  * @priv: driver private structure
1387  * @dma_conf: structure to take the dma data
1388  * Description: this function is called to clear the TX and RX descriptors
1389  * in case of both basic and extended descriptors are used.
1390  */
1391 static void stmmac_clear_descriptors(struct stmmac_priv *priv,
1392                                      struct stmmac_dma_conf *dma_conf)
1393 {
1394         u32 rx_queue_cnt = priv->plat->rx_queues_to_use;
1395         u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
1396         u32 queue;
1397
1398         /* Clear the RX descriptors */
1399         for (queue = 0; queue < rx_queue_cnt; queue++)
1400                 stmmac_clear_rx_descriptors(priv, dma_conf, queue);
1401
1402         /* Clear the TX descriptors */
1403         for (queue = 0; queue < tx_queue_cnt; queue++)
1404                 stmmac_clear_tx_descriptors(priv, dma_conf, queue);
1405 }
1406
1407 /**
1408  * stmmac_init_rx_buffers - init the RX descriptor buffer.
1409  * @priv: driver private structure
1410  * @dma_conf: structure to take the dma data
1411  * @p: descriptor pointer
1412  * @i: descriptor index
1413  * @flags: gfp flag
1414  * @queue: RX queue index
1415  * Description: this function is called to allocate a receive buffer, perform
1416  * the DMA mapping and init the descriptor.
1417  */
1418 static int stmmac_init_rx_buffers(struct stmmac_priv *priv,
1419                                   struct stmmac_dma_conf *dma_conf,
1420                                   struct dma_desc *p,
1421                                   int i, gfp_t flags, u32 queue)
1422 {
1423         struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1424         struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
1425         gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN);
1426
1427         if (priv->dma_cap.addr64 <= 32)
1428                 gfp |= GFP_DMA32;
1429
1430         if (!buf->page) {
1431                 buf->page = page_pool_alloc_pages(rx_q->page_pool, gfp);
1432                 if (!buf->page)
1433                         return -ENOMEM;
1434                 buf->page_offset = stmmac_rx_offset(priv);
1435         }
1436
1437         if (priv->sph && !buf->sec_page) {
1438                 buf->sec_page = page_pool_alloc_pages(rx_q->page_pool, gfp);
1439                 if (!buf->sec_page)
1440                         return -ENOMEM;
1441
1442                 buf->sec_addr = page_pool_get_dma_addr(buf->sec_page);
1443                 stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, true);
1444         } else {
1445                 buf->sec_page = NULL;
1446                 stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, false);
1447         }
1448
1449         buf->addr = page_pool_get_dma_addr(buf->page) + buf->page_offset;
1450
1451         stmmac_set_desc_addr(priv, p, buf->addr);
1452         if (dma_conf->dma_buf_sz == BUF_SIZE_16KiB)
1453                 stmmac_init_desc3(priv, p);
1454
1455         return 0;
1456 }
1457
1458 /**
1459  * stmmac_free_rx_buffer - free RX dma buffers
1460  * @priv: private structure
1461  * @rx_q: RX queue
1462  * @i: buffer index.
1463  */
1464 static void stmmac_free_rx_buffer(struct stmmac_priv *priv,
1465                                   struct stmmac_rx_queue *rx_q,
1466                                   int i)
1467 {
1468         struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
1469
1470         if (buf->page)
1471                 page_pool_put_full_page(rx_q->page_pool, buf->page, false);
1472         buf->page = NULL;
1473
1474         if (buf->sec_page)
1475                 page_pool_put_full_page(rx_q->page_pool, buf->sec_page, false);
1476         buf->sec_page = NULL;
1477 }
1478
1479 /**
1480  * stmmac_free_tx_buffer - free RX dma buffers
1481  * @priv: private structure
1482  * @dma_conf: structure to take the dma data
1483  * @queue: RX queue index
1484  * @i: buffer index.
1485  */
1486 static void stmmac_free_tx_buffer(struct stmmac_priv *priv,
1487                                   struct stmmac_dma_conf *dma_conf,
1488                                   u32 queue, int i)
1489 {
1490         struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1491
1492         if (tx_q->tx_skbuff_dma[i].buf &&
1493             tx_q->tx_skbuff_dma[i].buf_type != STMMAC_TXBUF_T_XDP_TX) {
1494                 if (tx_q->tx_skbuff_dma[i].map_as_page)
1495                         dma_unmap_page(priv->device,
1496                                        tx_q->tx_skbuff_dma[i].buf,
1497                                        tx_q->tx_skbuff_dma[i].len,
1498                                        DMA_TO_DEVICE);
1499                 else
1500                         dma_unmap_single(priv->device,
1501                                          tx_q->tx_skbuff_dma[i].buf,
1502                                          tx_q->tx_skbuff_dma[i].len,
1503                                          DMA_TO_DEVICE);
1504         }
1505
1506         if (tx_q->xdpf[i] &&
1507             (tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XDP_TX ||
1508              tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XDP_NDO)) {
1509                 xdp_return_frame(tx_q->xdpf[i]);
1510                 tx_q->xdpf[i] = NULL;
1511         }
1512
1513         if (tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XSK_TX)
1514                 tx_q->xsk_frames_done++;
1515
1516         if (tx_q->tx_skbuff[i] &&
1517             tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_SKB) {
1518                 dev_kfree_skb_any(tx_q->tx_skbuff[i]);
1519                 tx_q->tx_skbuff[i] = NULL;
1520         }
1521
1522         tx_q->tx_skbuff_dma[i].buf = 0;
1523         tx_q->tx_skbuff_dma[i].map_as_page = false;
1524 }
1525
1526 /**
1527  * dma_free_rx_skbufs - free RX dma buffers
1528  * @priv: private structure
1529  * @dma_conf: structure to take the dma data
1530  * @queue: RX queue index
1531  */
1532 static void dma_free_rx_skbufs(struct stmmac_priv *priv,
1533                                struct stmmac_dma_conf *dma_conf,
1534                                u32 queue)
1535 {
1536         struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1537         int i;
1538
1539         for (i = 0; i < dma_conf->dma_rx_size; i++)
1540                 stmmac_free_rx_buffer(priv, rx_q, i);
1541 }
1542
1543 static int stmmac_alloc_rx_buffers(struct stmmac_priv *priv,
1544                                    struct stmmac_dma_conf *dma_conf,
1545                                    u32 queue, gfp_t flags)
1546 {
1547         struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1548         int i;
1549
1550         for (i = 0; i < dma_conf->dma_rx_size; i++) {
1551                 struct dma_desc *p;
1552                 int ret;
1553
1554                 if (priv->extend_desc)
1555                         p = &((rx_q->dma_erx + i)->basic);
1556                 else
1557                         p = rx_q->dma_rx + i;
1558
1559                 ret = stmmac_init_rx_buffers(priv, dma_conf, p, i, flags,
1560                                              queue);
1561                 if (ret)
1562                         return ret;
1563
1564                 rx_q->buf_alloc_num++;
1565         }
1566
1567         return 0;
1568 }
1569
1570 /**
1571  * dma_free_rx_xskbufs - free RX dma buffers from XSK pool
1572  * @priv: private structure
1573  * @dma_conf: structure to take the dma data
1574  * @queue: RX queue index
1575  */
1576 static void dma_free_rx_xskbufs(struct stmmac_priv *priv,
1577                                 struct stmmac_dma_conf *dma_conf,
1578                                 u32 queue)
1579 {
1580         struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1581         int i;
1582
1583         for (i = 0; i < dma_conf->dma_rx_size; i++) {
1584                 struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
1585
1586                 if (!buf->xdp)
1587                         continue;
1588
1589                 xsk_buff_free(buf->xdp);
1590                 buf->xdp = NULL;
1591         }
1592 }
1593
1594 static int stmmac_alloc_rx_buffers_zc(struct stmmac_priv *priv,
1595                                       struct stmmac_dma_conf *dma_conf,
1596                                       u32 queue)
1597 {
1598         struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1599         int i;
1600
1601         for (i = 0; i < dma_conf->dma_rx_size; i++) {
1602                 struct stmmac_rx_buffer *buf;
1603                 dma_addr_t dma_addr;
1604                 struct dma_desc *p;
1605
1606                 if (priv->extend_desc)
1607                         p = (struct dma_desc *)(rx_q->dma_erx + i);
1608                 else
1609                         p = rx_q->dma_rx + i;
1610
1611                 buf = &rx_q->buf_pool[i];
1612
1613                 buf->xdp = xsk_buff_alloc(rx_q->xsk_pool);
1614                 if (!buf->xdp)
1615                         return -ENOMEM;
1616
1617                 dma_addr = xsk_buff_xdp_get_dma(buf->xdp);
1618                 stmmac_set_desc_addr(priv, p, dma_addr);
1619                 rx_q->buf_alloc_num++;
1620         }
1621
1622         return 0;
1623 }
1624
1625 static struct xsk_buff_pool *stmmac_get_xsk_pool(struct stmmac_priv *priv, u32 queue)
1626 {
1627         if (!stmmac_xdp_is_enabled(priv) || !test_bit(queue, priv->af_xdp_zc_qps))
1628                 return NULL;
1629
1630         return xsk_get_pool_from_qid(priv->dev, queue);
1631 }
1632
1633 /**
1634  * __init_dma_rx_desc_rings - init the RX descriptor ring (per queue)
1635  * @priv: driver private structure
1636  * @dma_conf: structure to take the dma data
1637  * @queue: RX queue index
1638  * @flags: gfp flag.
1639  * Description: this function initializes the DMA RX descriptors
1640  * and allocates the socket buffers. It supports the chained and ring
1641  * modes.
1642  */
1643 static int __init_dma_rx_desc_rings(struct stmmac_priv *priv,
1644                                     struct stmmac_dma_conf *dma_conf,
1645                                     u32 queue, gfp_t flags)
1646 {
1647         struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1648         int ret;
1649
1650         netif_dbg(priv, probe, priv->dev,
1651                   "(%s) dma_rx_phy=0x%08x\n", __func__,
1652                   (u32)rx_q->dma_rx_phy);
1653
1654         stmmac_clear_rx_descriptors(priv, dma_conf, queue);
1655
1656         xdp_rxq_info_unreg_mem_model(&rx_q->xdp_rxq);
1657
1658         rx_q->xsk_pool = stmmac_get_xsk_pool(priv, queue);
1659
1660         if (rx_q->xsk_pool) {
1661                 WARN_ON(xdp_rxq_info_reg_mem_model(&rx_q->xdp_rxq,
1662                                                    MEM_TYPE_XSK_BUFF_POOL,
1663                                                    NULL));
1664                 netdev_info(priv->dev,
1665                             "Register MEM_TYPE_XSK_BUFF_POOL RxQ-%d\n",
1666                             rx_q->queue_index);
1667                 xsk_pool_set_rxq_info(rx_q->xsk_pool, &rx_q->xdp_rxq);
1668         } else {
1669                 WARN_ON(xdp_rxq_info_reg_mem_model(&rx_q->xdp_rxq,
1670                                                    MEM_TYPE_PAGE_POOL,
1671                                                    rx_q->page_pool));
1672                 netdev_info(priv->dev,
1673                             "Register MEM_TYPE_PAGE_POOL RxQ-%d\n",
1674                             rx_q->queue_index);
1675         }
1676
1677         if (rx_q->xsk_pool) {
1678                 /* RX XDP ZC buffer pool may not be populated, e.g.
1679                  * xdpsock TX-only.
1680                  */
1681                 stmmac_alloc_rx_buffers_zc(priv, dma_conf, queue);
1682         } else {
1683                 ret = stmmac_alloc_rx_buffers(priv, dma_conf, queue, flags);
1684                 if (ret < 0)
1685                         return -ENOMEM;
1686         }
1687
1688         /* Setup the chained descriptor addresses */
1689         if (priv->mode == STMMAC_CHAIN_MODE) {
1690                 if (priv->extend_desc)
1691                         stmmac_mode_init(priv, rx_q->dma_erx,
1692                                          rx_q->dma_rx_phy,
1693                                          dma_conf->dma_rx_size, 1);
1694                 else
1695                         stmmac_mode_init(priv, rx_q->dma_rx,
1696                                          rx_q->dma_rx_phy,
1697                                          dma_conf->dma_rx_size, 0);
1698         }
1699
1700         return 0;
1701 }
1702
1703 static int init_dma_rx_desc_rings(struct net_device *dev,
1704                                   struct stmmac_dma_conf *dma_conf,
1705                                   gfp_t flags)
1706 {
1707         struct stmmac_priv *priv = netdev_priv(dev);
1708         u32 rx_count = priv->plat->rx_queues_to_use;
1709         int queue;
1710         int ret;
1711
1712         /* RX INITIALIZATION */
1713         netif_dbg(priv, probe, priv->dev,
1714                   "SKB addresses:\nskb\t\tskb data\tdma data\n");
1715
1716         for (queue = 0; queue < rx_count; queue++) {
1717                 ret = __init_dma_rx_desc_rings(priv, dma_conf, queue, flags);
1718                 if (ret)
1719                         goto err_init_rx_buffers;
1720         }
1721
1722         return 0;
1723
1724 err_init_rx_buffers:
1725         while (queue >= 0) {
1726                 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1727
1728                 if (rx_q->xsk_pool)
1729                         dma_free_rx_xskbufs(priv, dma_conf, queue);
1730                 else
1731                         dma_free_rx_skbufs(priv, dma_conf, queue);
1732
1733                 rx_q->buf_alloc_num = 0;
1734                 rx_q->xsk_pool = NULL;
1735
1736                 queue--;
1737         }
1738
1739         return ret;
1740 }
1741
1742 /**
1743  * __init_dma_tx_desc_rings - init the TX descriptor ring (per queue)
1744  * @priv: driver private structure
1745  * @dma_conf: structure to take the dma data
1746  * @queue: TX queue index
1747  * Description: this function initializes the DMA TX descriptors
1748  * and allocates the socket buffers. It supports the chained and ring
1749  * modes.
1750  */
1751 static int __init_dma_tx_desc_rings(struct stmmac_priv *priv,
1752                                     struct stmmac_dma_conf *dma_conf,
1753                                     u32 queue)
1754 {
1755         struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1756         int i;
1757
1758         netif_dbg(priv, probe, priv->dev,
1759                   "(%s) dma_tx_phy=0x%08x\n", __func__,
1760                   (u32)tx_q->dma_tx_phy);
1761
1762         /* Setup the chained descriptor addresses */
1763         if (priv->mode == STMMAC_CHAIN_MODE) {
1764                 if (priv->extend_desc)
1765                         stmmac_mode_init(priv, tx_q->dma_etx,
1766                                          tx_q->dma_tx_phy,
1767                                          dma_conf->dma_tx_size, 1);
1768                 else if (!(tx_q->tbs & STMMAC_TBS_AVAIL))
1769                         stmmac_mode_init(priv, tx_q->dma_tx,
1770                                          tx_q->dma_tx_phy,
1771                                          dma_conf->dma_tx_size, 0);
1772         }
1773
1774         tx_q->xsk_pool = stmmac_get_xsk_pool(priv, queue);
1775
1776         for (i = 0; i < dma_conf->dma_tx_size; i++) {
1777                 struct dma_desc *p;
1778
1779                 if (priv->extend_desc)
1780                         p = &((tx_q->dma_etx + i)->basic);
1781                 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
1782                         p = &((tx_q->dma_entx + i)->basic);
1783                 else
1784                         p = tx_q->dma_tx + i;
1785
1786                 stmmac_clear_desc(priv, p);
1787
1788                 tx_q->tx_skbuff_dma[i].buf = 0;
1789                 tx_q->tx_skbuff_dma[i].map_as_page = false;
1790                 tx_q->tx_skbuff_dma[i].len = 0;
1791                 tx_q->tx_skbuff_dma[i].last_segment = false;
1792                 tx_q->tx_skbuff[i] = NULL;
1793         }
1794
1795         return 0;
1796 }
1797
1798 static int init_dma_tx_desc_rings(struct net_device *dev,
1799                                   struct stmmac_dma_conf *dma_conf)
1800 {
1801         struct stmmac_priv *priv = netdev_priv(dev);
1802         u32 tx_queue_cnt;
1803         u32 queue;
1804
1805         tx_queue_cnt = priv->plat->tx_queues_to_use;
1806
1807         for (queue = 0; queue < tx_queue_cnt; queue++)
1808                 __init_dma_tx_desc_rings(priv, dma_conf, queue);
1809
1810         return 0;
1811 }
1812
1813 /**
1814  * init_dma_desc_rings - init the RX/TX descriptor rings
1815  * @dev: net device structure
1816  * @dma_conf: structure to take the dma data
1817  * @flags: gfp flag.
1818  * Description: this function initializes the DMA RX/TX descriptors
1819  * and allocates the socket buffers. It supports the chained and ring
1820  * modes.
1821  */
1822 static int init_dma_desc_rings(struct net_device *dev,
1823                                struct stmmac_dma_conf *dma_conf,
1824                                gfp_t flags)
1825 {
1826         struct stmmac_priv *priv = netdev_priv(dev);
1827         int ret;
1828
1829         ret = init_dma_rx_desc_rings(dev, dma_conf, flags);
1830         if (ret)
1831                 return ret;
1832
1833         ret = init_dma_tx_desc_rings(dev, dma_conf);
1834
1835         stmmac_clear_descriptors(priv, dma_conf);
1836
1837         if (netif_msg_hw(priv))
1838                 stmmac_display_rings(priv, dma_conf);
1839
1840         return ret;
1841 }
1842
1843 /**
1844  * dma_free_tx_skbufs - free TX dma buffers
1845  * @priv: private structure
1846  * @dma_conf: structure to take the dma data
1847  * @queue: TX queue index
1848  */
1849 static void dma_free_tx_skbufs(struct stmmac_priv *priv,
1850                                struct stmmac_dma_conf *dma_conf,
1851                                u32 queue)
1852 {
1853         struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1854         int i;
1855
1856         tx_q->xsk_frames_done = 0;
1857
1858         for (i = 0; i < dma_conf->dma_tx_size; i++)
1859                 stmmac_free_tx_buffer(priv, dma_conf, queue, i);
1860
1861         if (tx_q->xsk_pool && tx_q->xsk_frames_done) {
1862                 xsk_tx_completed(tx_q->xsk_pool, tx_q->xsk_frames_done);
1863                 tx_q->xsk_frames_done = 0;
1864                 tx_q->xsk_pool = NULL;
1865         }
1866 }
1867
1868 /**
1869  * stmmac_free_tx_skbufs - free TX skb buffers
1870  * @priv: private structure
1871  */
1872 static void stmmac_free_tx_skbufs(struct stmmac_priv *priv)
1873 {
1874         u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
1875         u32 queue;
1876
1877         for (queue = 0; queue < tx_queue_cnt; queue++)
1878                 dma_free_tx_skbufs(priv, &priv->dma_conf, queue);
1879 }
1880
1881 /**
1882  * __free_dma_rx_desc_resources - free RX dma desc resources (per queue)
1883  * @priv: private structure
1884  * @dma_conf: structure to take the dma data
1885  * @queue: RX queue index
1886  */
1887 static void __free_dma_rx_desc_resources(struct stmmac_priv *priv,
1888                                          struct stmmac_dma_conf *dma_conf,
1889                                          u32 queue)
1890 {
1891         struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1892
1893         /* Release the DMA RX socket buffers */
1894         if (rx_q->xsk_pool)
1895                 dma_free_rx_xskbufs(priv, dma_conf, queue);
1896         else
1897                 dma_free_rx_skbufs(priv, dma_conf, queue);
1898
1899         rx_q->buf_alloc_num = 0;
1900         rx_q->xsk_pool = NULL;
1901
1902         /* Free DMA regions of consistent memory previously allocated */
1903         if (!priv->extend_desc)
1904                 dma_free_coherent(priv->device, dma_conf->dma_rx_size *
1905                                   sizeof(struct dma_desc),
1906                                   rx_q->dma_rx, rx_q->dma_rx_phy);
1907         else
1908                 dma_free_coherent(priv->device, dma_conf->dma_rx_size *
1909                                   sizeof(struct dma_extended_desc),
1910                                   rx_q->dma_erx, rx_q->dma_rx_phy);
1911
1912         if (xdp_rxq_info_is_reg(&rx_q->xdp_rxq))
1913                 xdp_rxq_info_unreg(&rx_q->xdp_rxq);
1914
1915         kfree(rx_q->buf_pool);
1916         if (rx_q->page_pool)
1917                 page_pool_destroy(rx_q->page_pool);
1918 }
1919
1920 static void free_dma_rx_desc_resources(struct stmmac_priv *priv,
1921                                        struct stmmac_dma_conf *dma_conf)
1922 {
1923         u32 rx_count = priv->plat->rx_queues_to_use;
1924         u32 queue;
1925
1926         /* Free RX queue resources */
1927         for (queue = 0; queue < rx_count; queue++)
1928                 __free_dma_rx_desc_resources(priv, dma_conf, queue);
1929 }
1930
1931 /**
1932  * __free_dma_tx_desc_resources - free TX dma desc resources (per queue)
1933  * @priv: private structure
1934  * @dma_conf: structure to take the dma data
1935  * @queue: TX queue index
1936  */
1937 static void __free_dma_tx_desc_resources(struct stmmac_priv *priv,
1938                                          struct stmmac_dma_conf *dma_conf,
1939                                          u32 queue)
1940 {
1941         struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1942         size_t size;
1943         void *addr;
1944
1945         /* Release the DMA TX socket buffers */
1946         dma_free_tx_skbufs(priv, dma_conf, queue);
1947
1948         if (priv->extend_desc) {
1949                 size = sizeof(struct dma_extended_desc);
1950                 addr = tx_q->dma_etx;
1951         } else if (tx_q->tbs & STMMAC_TBS_AVAIL) {
1952                 size = sizeof(struct dma_edesc);
1953                 addr = tx_q->dma_entx;
1954         } else {
1955                 size = sizeof(struct dma_desc);
1956                 addr = tx_q->dma_tx;
1957         }
1958
1959         size *= dma_conf->dma_tx_size;
1960
1961         dma_free_coherent(priv->device, size, addr, tx_q->dma_tx_phy);
1962
1963         kfree(tx_q->tx_skbuff_dma);
1964         kfree(tx_q->tx_skbuff);
1965 }
1966
1967 static void free_dma_tx_desc_resources(struct stmmac_priv *priv,
1968                                        struct stmmac_dma_conf *dma_conf)
1969 {
1970         u32 tx_count = priv->plat->tx_queues_to_use;
1971         u32 queue;
1972
1973         /* Free TX queue resources */
1974         for (queue = 0; queue < tx_count; queue++)
1975                 __free_dma_tx_desc_resources(priv, dma_conf, queue);
1976 }
1977
1978 /**
1979  * __alloc_dma_rx_desc_resources - alloc RX resources (per queue).
1980  * @priv: private structure
1981  * @dma_conf: structure to take the dma data
1982  * @queue: RX queue index
1983  * Description: according to which descriptor can be used (extend or basic)
1984  * this function allocates the resources for TX and RX paths. In case of
1985  * reception, for example, it pre-allocated the RX socket buffer in order to
1986  * allow zero-copy mechanism.
1987  */
1988 static int __alloc_dma_rx_desc_resources(struct stmmac_priv *priv,
1989                                          struct stmmac_dma_conf *dma_conf,
1990                                          u32 queue)
1991 {
1992         struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1993         struct stmmac_channel *ch = &priv->channel[queue];
1994         bool xdp_prog = stmmac_xdp_is_enabled(priv);
1995         struct page_pool_params pp_params = { 0 };
1996         unsigned int num_pages;
1997         unsigned int napi_id;
1998         int ret;
1999
2000         rx_q->queue_index = queue;
2001         rx_q->priv_data = priv;
2002
2003         pp_params.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV;
2004         pp_params.pool_size = dma_conf->dma_rx_size;
2005         num_pages = DIV_ROUND_UP(dma_conf->dma_buf_sz, PAGE_SIZE);
2006         pp_params.order = ilog2(num_pages);
2007         pp_params.nid = dev_to_node(priv->device);
2008         pp_params.dev = priv->device;
2009         pp_params.dma_dir = xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE;
2010         pp_params.offset = stmmac_rx_offset(priv);
2011         pp_params.max_len = STMMAC_MAX_RX_BUF_SIZE(num_pages);
2012
2013         rx_q->page_pool = page_pool_create(&pp_params);
2014         if (IS_ERR(rx_q->page_pool)) {
2015                 ret = PTR_ERR(rx_q->page_pool);
2016                 rx_q->page_pool = NULL;
2017                 return ret;
2018         }
2019
2020         rx_q->buf_pool = kcalloc(dma_conf->dma_rx_size,
2021                                  sizeof(*rx_q->buf_pool),
2022                                  GFP_KERNEL);
2023         if (!rx_q->buf_pool)
2024                 return -ENOMEM;
2025
2026         if (priv->extend_desc) {
2027                 rx_q->dma_erx = dma_alloc_coherent(priv->device,
2028                                                    dma_conf->dma_rx_size *
2029                                                    sizeof(struct dma_extended_desc),
2030                                                    &rx_q->dma_rx_phy,
2031                                                    GFP_KERNEL);
2032                 if (!rx_q->dma_erx)
2033                         return -ENOMEM;
2034
2035         } else {
2036                 rx_q->dma_rx = dma_alloc_coherent(priv->device,
2037                                                   dma_conf->dma_rx_size *
2038                                                   sizeof(struct dma_desc),
2039                                                   &rx_q->dma_rx_phy,
2040                                                   GFP_KERNEL);
2041                 if (!rx_q->dma_rx)
2042                         return -ENOMEM;
2043         }
2044
2045         if (stmmac_xdp_is_enabled(priv) &&
2046             test_bit(queue, priv->af_xdp_zc_qps))
2047                 napi_id = ch->rxtx_napi.napi_id;
2048         else
2049                 napi_id = ch->rx_napi.napi_id;
2050
2051         ret = xdp_rxq_info_reg(&rx_q->xdp_rxq, priv->dev,
2052                                rx_q->queue_index,
2053                                napi_id);
2054         if (ret) {
2055                 netdev_err(priv->dev, "Failed to register xdp rxq info\n");
2056                 return -EINVAL;
2057         }
2058
2059         return 0;
2060 }
2061
2062 static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv,
2063                                        struct stmmac_dma_conf *dma_conf)
2064 {
2065         u32 rx_count = priv->plat->rx_queues_to_use;
2066         u32 queue;
2067         int ret;
2068
2069         /* RX queues buffers and DMA */
2070         for (queue = 0; queue < rx_count; queue++) {
2071                 ret = __alloc_dma_rx_desc_resources(priv, dma_conf, queue);
2072                 if (ret)
2073                         goto err_dma;
2074         }
2075
2076         return 0;
2077
2078 err_dma:
2079         free_dma_rx_desc_resources(priv, dma_conf);
2080
2081         return ret;
2082 }
2083
2084 /**
2085  * __alloc_dma_tx_desc_resources - alloc TX resources (per queue).
2086  * @priv: private structure
2087  * @dma_conf: structure to take the dma data
2088  * @queue: TX queue index
2089  * Description: according to which descriptor can be used (extend or basic)
2090  * this function allocates the resources for TX and RX paths. In case of
2091  * reception, for example, it pre-allocated the RX socket buffer in order to
2092  * allow zero-copy mechanism.
2093  */
2094 static int __alloc_dma_tx_desc_resources(struct stmmac_priv *priv,
2095                                          struct stmmac_dma_conf *dma_conf,
2096                                          u32 queue)
2097 {
2098         struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
2099         size_t size;
2100         void *addr;
2101
2102         tx_q->queue_index = queue;
2103         tx_q->priv_data = priv;
2104
2105         tx_q->tx_skbuff_dma = kcalloc(dma_conf->dma_tx_size,
2106                                       sizeof(*tx_q->tx_skbuff_dma),
2107                                       GFP_KERNEL);
2108         if (!tx_q->tx_skbuff_dma)
2109                 return -ENOMEM;
2110
2111         tx_q->tx_skbuff = kcalloc(dma_conf->dma_tx_size,
2112                                   sizeof(struct sk_buff *),
2113                                   GFP_KERNEL);
2114         if (!tx_q->tx_skbuff)
2115                 return -ENOMEM;
2116
2117         if (priv->extend_desc)
2118                 size = sizeof(struct dma_extended_desc);
2119         else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2120                 size = sizeof(struct dma_edesc);
2121         else
2122                 size = sizeof(struct dma_desc);
2123
2124         size *= dma_conf->dma_tx_size;
2125
2126         addr = dma_alloc_coherent(priv->device, size,
2127                                   &tx_q->dma_tx_phy, GFP_KERNEL);
2128         if (!addr)
2129                 return -ENOMEM;
2130
2131         if (priv->extend_desc)
2132                 tx_q->dma_etx = addr;
2133         else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2134                 tx_q->dma_entx = addr;
2135         else
2136                 tx_q->dma_tx = addr;
2137
2138         return 0;
2139 }
2140
2141 static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv,
2142                                        struct stmmac_dma_conf *dma_conf)
2143 {
2144         u32 tx_count = priv->plat->tx_queues_to_use;
2145         u32 queue;
2146         int ret;
2147
2148         /* TX queues buffers and DMA */
2149         for (queue = 0; queue < tx_count; queue++) {
2150                 ret = __alloc_dma_tx_desc_resources(priv, dma_conf, queue);
2151                 if (ret)
2152                         goto err_dma;
2153         }
2154
2155         return 0;
2156
2157 err_dma:
2158         free_dma_tx_desc_resources(priv, dma_conf);
2159         return ret;
2160 }
2161
2162 /**
2163  * alloc_dma_desc_resources - alloc TX/RX resources.
2164  * @priv: private structure
2165  * @dma_conf: structure to take the dma data
2166  * Description: according to which descriptor can be used (extend or basic)
2167  * this function allocates the resources for TX and RX paths. In case of
2168  * reception, for example, it pre-allocated the RX socket buffer in order to
2169  * allow zero-copy mechanism.
2170  */
2171 static int alloc_dma_desc_resources(struct stmmac_priv *priv,
2172                                     struct stmmac_dma_conf *dma_conf)
2173 {
2174         /* RX Allocation */
2175         int ret = alloc_dma_rx_desc_resources(priv, dma_conf);
2176
2177         if (ret)
2178                 return ret;
2179
2180         ret = alloc_dma_tx_desc_resources(priv, dma_conf);
2181
2182         return ret;
2183 }
2184
2185 /**
2186  * free_dma_desc_resources - free dma desc resources
2187  * @priv: private structure
2188  * @dma_conf: structure to take the dma data
2189  */
2190 static void free_dma_desc_resources(struct stmmac_priv *priv,
2191                                     struct stmmac_dma_conf *dma_conf)
2192 {
2193         /* Release the DMA TX socket buffers */
2194         free_dma_tx_desc_resources(priv, dma_conf);
2195
2196         /* Release the DMA RX socket buffers later
2197          * to ensure all pending XDP_TX buffers are returned.
2198          */
2199         free_dma_rx_desc_resources(priv, dma_conf);
2200 }
2201
2202 /**
2203  *  stmmac_mac_enable_rx_queues - Enable MAC rx queues
2204  *  @priv: driver private structure
2205  *  Description: It is used for enabling the rx queues in the MAC
2206  */
2207 static void stmmac_mac_enable_rx_queues(struct stmmac_priv *priv)
2208 {
2209         u32 rx_queues_count = priv->plat->rx_queues_to_use;
2210         int queue;
2211         u8 mode;
2212
2213         for (queue = 0; queue < rx_queues_count; queue++) {
2214                 mode = priv->plat->rx_queues_cfg[queue].mode_to_use;
2215                 stmmac_rx_queue_enable(priv, priv->hw, mode, queue);
2216         }
2217 }
2218
2219 /**
2220  * stmmac_start_rx_dma - start RX DMA channel
2221  * @priv: driver private structure
2222  * @chan: RX channel index
2223  * Description:
2224  * This starts a RX DMA channel
2225  */
2226 static void stmmac_start_rx_dma(struct stmmac_priv *priv, u32 chan)
2227 {
2228         netdev_dbg(priv->dev, "DMA RX processes started in channel %d\n", chan);
2229         stmmac_start_rx(priv, priv->ioaddr, chan);
2230 }
2231
2232 /**
2233  * stmmac_start_tx_dma - start TX DMA channel
2234  * @priv: driver private structure
2235  * @chan: TX channel index
2236  * Description:
2237  * This starts a TX DMA channel
2238  */
2239 static void stmmac_start_tx_dma(struct stmmac_priv *priv, u32 chan)
2240 {
2241         netdev_dbg(priv->dev, "DMA TX processes started in channel %d\n", chan);
2242         stmmac_start_tx(priv, priv->ioaddr, chan);
2243 }
2244
2245 /**
2246  * stmmac_stop_rx_dma - stop RX DMA channel
2247  * @priv: driver private structure
2248  * @chan: RX channel index
2249  * Description:
2250  * This stops a RX DMA channel
2251  */
2252 static void stmmac_stop_rx_dma(struct stmmac_priv *priv, u32 chan)
2253 {
2254         netdev_dbg(priv->dev, "DMA RX processes stopped in channel %d\n", chan);
2255         stmmac_stop_rx(priv, priv->ioaddr, chan);
2256 }
2257
2258 /**
2259  * stmmac_stop_tx_dma - stop TX DMA channel
2260  * @priv: driver private structure
2261  * @chan: TX channel index
2262  * Description:
2263  * This stops a TX DMA channel
2264  */
2265 static void stmmac_stop_tx_dma(struct stmmac_priv *priv, u32 chan)
2266 {
2267         netdev_dbg(priv->dev, "DMA TX processes stopped in channel %d\n", chan);
2268         stmmac_stop_tx(priv, priv->ioaddr, chan);
2269 }
2270
2271 static void stmmac_enable_all_dma_irq(struct stmmac_priv *priv)
2272 {
2273         u32 rx_channels_count = priv->plat->rx_queues_to_use;
2274         u32 tx_channels_count = priv->plat->tx_queues_to_use;
2275         u32 dma_csr_ch = max(rx_channels_count, tx_channels_count);
2276         u32 chan;
2277
2278         for (chan = 0; chan < dma_csr_ch; chan++) {
2279                 struct stmmac_channel *ch = &priv->channel[chan];
2280                 unsigned long flags;
2281
2282                 spin_lock_irqsave(&ch->lock, flags);
2283                 stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
2284                 spin_unlock_irqrestore(&ch->lock, flags);
2285         }
2286 }
2287
2288 /**
2289  * stmmac_start_all_dma - start all RX and TX DMA channels
2290  * @priv: driver private structure
2291  * Description:
2292  * This starts all the RX and TX DMA channels
2293  */
2294 static void stmmac_start_all_dma(struct stmmac_priv *priv)
2295 {
2296         u32 rx_channels_count = priv->plat->rx_queues_to_use;
2297         u32 tx_channels_count = priv->plat->tx_queues_to_use;
2298         u32 chan = 0;
2299
2300         for (chan = 0; chan < rx_channels_count; chan++)
2301                 stmmac_start_rx_dma(priv, chan);
2302
2303         for (chan = 0; chan < tx_channels_count; chan++)
2304                 stmmac_start_tx_dma(priv, chan);
2305 }
2306
2307 /**
2308  * stmmac_stop_all_dma - stop all RX and TX DMA channels
2309  * @priv: driver private structure
2310  * Description:
2311  * This stops the RX and TX DMA channels
2312  */
2313 static void stmmac_stop_all_dma(struct stmmac_priv *priv)
2314 {
2315         u32 rx_channels_count = priv->plat->rx_queues_to_use;
2316         u32 tx_channels_count = priv->plat->tx_queues_to_use;
2317         u32 chan = 0;
2318
2319         for (chan = 0; chan < rx_channels_count; chan++)
2320                 stmmac_stop_rx_dma(priv, chan);
2321
2322         for (chan = 0; chan < tx_channels_count; chan++)
2323                 stmmac_stop_tx_dma(priv, chan);
2324 }
2325
2326 /**
2327  *  stmmac_dma_operation_mode - HW DMA operation mode
2328  *  @priv: driver private structure
2329  *  Description: it is used for configuring the DMA operation mode register in
2330  *  order to program the tx/rx DMA thresholds or Store-And-Forward mode.
2331  */
2332 static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
2333 {
2334         u32 rx_channels_count = priv->plat->rx_queues_to_use;
2335         u32 tx_channels_count = priv->plat->tx_queues_to_use;
2336         int rxfifosz = priv->plat->rx_fifo_size;
2337         int txfifosz = priv->plat->tx_fifo_size;
2338         u32 txmode = 0;
2339         u32 rxmode = 0;
2340         u32 chan = 0;
2341         u8 qmode = 0;
2342
2343         if (rxfifosz == 0)
2344                 rxfifosz = priv->dma_cap.rx_fifo_size;
2345         if (txfifosz == 0)
2346                 txfifosz = priv->dma_cap.tx_fifo_size;
2347
2348         /* Adjust for real per queue fifo size */
2349         rxfifosz /= rx_channels_count;
2350         txfifosz /= tx_channels_count;
2351
2352         if (priv->plat->force_thresh_dma_mode) {
2353                 txmode = tc;
2354                 rxmode = tc;
2355         } else if (priv->plat->force_sf_dma_mode || priv->plat->tx_coe) {
2356                 /*
2357                  * In case of GMAC, SF mode can be enabled
2358                  * to perform the TX COE in HW. This depends on:
2359                  * 1) TX COE if actually supported
2360                  * 2) There is no bugged Jumbo frame support
2361                  *    that needs to not insert csum in the TDES.
2362                  */
2363                 txmode = SF_DMA_MODE;
2364                 rxmode = SF_DMA_MODE;
2365                 priv->xstats.threshold = SF_DMA_MODE;
2366         } else {
2367                 txmode = tc;
2368                 rxmode = SF_DMA_MODE;
2369         }
2370
2371         /* configure all channels */
2372         for (chan = 0; chan < rx_channels_count; chan++) {
2373                 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[chan];
2374                 u32 buf_size;
2375
2376                 qmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
2377
2378                 stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan,
2379                                 rxfifosz, qmode);
2380
2381                 if (rx_q->xsk_pool) {
2382                         buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool);
2383                         stmmac_set_dma_bfsize(priv, priv->ioaddr,
2384                                               buf_size,
2385                                               chan);
2386                 } else {
2387                         stmmac_set_dma_bfsize(priv, priv->ioaddr,
2388                                               priv->dma_conf.dma_buf_sz,
2389                                               chan);
2390                 }
2391         }
2392
2393         for (chan = 0; chan < tx_channels_count; chan++) {
2394                 qmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
2395
2396                 stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan,
2397                                 txfifosz, qmode);
2398         }
2399 }
2400
2401 static bool stmmac_xdp_xmit_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
2402 {
2403         struct netdev_queue *nq = netdev_get_tx_queue(priv->dev, queue);
2404         struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
2405         struct xsk_buff_pool *pool = tx_q->xsk_pool;
2406         unsigned int entry = tx_q->cur_tx;
2407         struct dma_desc *tx_desc = NULL;
2408         struct xdp_desc xdp_desc;
2409         bool work_done = true;
2410
2411         /* Avoids TX time-out as we are sharing with slow path */
2412         txq_trans_cond_update(nq);
2413
2414         budget = min(budget, stmmac_tx_avail(priv, queue));
2415
2416         while (budget-- > 0) {
2417                 dma_addr_t dma_addr;
2418                 bool set_ic;
2419
2420                 /* We are sharing with slow path and stop XSK TX desc submission when
2421                  * available TX ring is less than threshold.
2422                  */
2423                 if (unlikely(stmmac_tx_avail(priv, queue) < STMMAC_TX_XSK_AVAIL) ||
2424                     !netif_carrier_ok(priv->dev)) {
2425                         work_done = false;
2426                         break;
2427                 }
2428
2429                 if (!xsk_tx_peek_desc(pool, &xdp_desc))
2430                         break;
2431
2432                 if (likely(priv->extend_desc))
2433                         tx_desc = (struct dma_desc *)(tx_q->dma_etx + entry);
2434                 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2435                         tx_desc = &tx_q->dma_entx[entry].basic;
2436                 else
2437                         tx_desc = tx_q->dma_tx + entry;
2438
2439                 dma_addr = xsk_buff_raw_get_dma(pool, xdp_desc.addr);
2440                 xsk_buff_raw_dma_sync_for_device(pool, dma_addr, xdp_desc.len);
2441
2442                 tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XSK_TX;
2443
2444                 /* To return XDP buffer to XSK pool, we simple call
2445                  * xsk_tx_completed(), so we don't need to fill up
2446                  * 'buf' and 'xdpf'.
2447                  */
2448                 tx_q->tx_skbuff_dma[entry].buf = 0;
2449                 tx_q->xdpf[entry] = NULL;
2450
2451                 tx_q->tx_skbuff_dma[entry].map_as_page = false;
2452                 tx_q->tx_skbuff_dma[entry].len = xdp_desc.len;
2453                 tx_q->tx_skbuff_dma[entry].last_segment = true;
2454                 tx_q->tx_skbuff_dma[entry].is_jumbo = false;
2455
2456                 stmmac_set_desc_addr(priv, tx_desc, dma_addr);
2457
2458                 tx_q->tx_count_frames++;
2459
2460                 if (!priv->tx_coal_frames[queue])
2461                         set_ic = false;
2462                 else if (tx_q->tx_count_frames % priv->tx_coal_frames[queue] == 0)
2463                         set_ic = true;
2464                 else
2465                         set_ic = false;
2466
2467                 if (set_ic) {
2468                         tx_q->tx_count_frames = 0;
2469                         stmmac_set_tx_ic(priv, tx_desc);
2470                         priv->xstats.tx_set_ic_bit++;
2471                 }
2472
2473                 stmmac_prepare_tx_desc(priv, tx_desc, 1, xdp_desc.len,
2474                                        true, priv->mode, true, true,
2475                                        xdp_desc.len);
2476
2477                 stmmac_enable_dma_transmission(priv, priv->ioaddr);
2478
2479                 tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size);
2480                 entry = tx_q->cur_tx;
2481         }
2482
2483         if (tx_desc) {
2484                 stmmac_flush_tx_descriptors(priv, queue);
2485                 xsk_tx_release(pool);
2486         }
2487
2488         /* Return true if all of the 3 conditions are met
2489          *  a) TX Budget is still available
2490          *  b) work_done = true when XSK TX desc peek is empty (no more
2491          *     pending XSK TX for transmission)
2492          */
2493         return !!budget && work_done;
2494 }
2495
2496 static void stmmac_bump_dma_threshold(struct stmmac_priv *priv, u32 chan)
2497 {
2498         if (unlikely(priv->xstats.threshold != SF_DMA_MODE) && tc <= 256) {
2499                 tc += 64;
2500
2501                 if (priv->plat->force_thresh_dma_mode)
2502                         stmmac_set_dma_operation_mode(priv, tc, tc, chan);
2503                 else
2504                         stmmac_set_dma_operation_mode(priv, tc, SF_DMA_MODE,
2505                                                       chan);
2506
2507                 priv->xstats.threshold = tc;
2508         }
2509 }
2510
2511 /**
2512  * stmmac_tx_clean - to manage the transmission completion
2513  * @priv: driver private structure
2514  * @budget: napi budget limiting this functions packet handling
2515  * @queue: TX queue index
2516  * Description: it reclaims the transmit resources after transmission completes.
2517  */
2518 static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue)
2519 {
2520         struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
2521         unsigned int bytes_compl = 0, pkts_compl = 0;
2522         unsigned int entry, xmits = 0, count = 0;
2523
2524         __netif_tx_lock_bh(netdev_get_tx_queue(priv->dev, queue));
2525
2526         priv->xstats.tx_clean++;
2527
2528         tx_q->xsk_frames_done = 0;
2529
2530         entry = tx_q->dirty_tx;
2531
2532         /* Try to clean all TX complete frame in 1 shot */
2533         while ((entry != tx_q->cur_tx) && count < priv->dma_conf.dma_tx_size) {
2534                 struct xdp_frame *xdpf;
2535                 struct sk_buff *skb;
2536                 struct dma_desc *p;
2537                 int status;
2538
2539                 if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_TX ||
2540                     tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_NDO) {
2541                         xdpf = tx_q->xdpf[entry];
2542                         skb = NULL;
2543                 } else if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_SKB) {
2544                         xdpf = NULL;
2545                         skb = tx_q->tx_skbuff[entry];
2546                 } else {
2547                         xdpf = NULL;
2548                         skb = NULL;
2549                 }
2550
2551                 if (priv->extend_desc)
2552                         p = (struct dma_desc *)(tx_q->dma_etx + entry);
2553                 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2554                         p = &tx_q->dma_entx[entry].basic;
2555                 else
2556                         p = tx_q->dma_tx + entry;
2557
2558                 status = stmmac_tx_status(priv, &priv->dev->stats,
2559                                 &priv->xstats, p, priv->ioaddr);
2560                 /* Check if the descriptor is owned by the DMA */
2561                 if (unlikely(status & tx_dma_own))
2562                         break;
2563
2564                 count++;
2565
2566                 /* Make sure descriptor fields are read after reading
2567                  * the own bit.
2568                  */
2569                 dma_rmb();
2570
2571                 /* Just consider the last segment and ...*/
2572                 if (likely(!(status & tx_not_ls))) {
2573                         /* ... verify the status error condition */
2574                         if (unlikely(status & tx_err)) {
2575                                 priv->dev->stats.tx_errors++;
2576                                 if (unlikely(status & tx_err_bump_tc))
2577                                         stmmac_bump_dma_threshold(priv, queue);
2578                         } else {
2579                                 priv->dev->stats.tx_packets++;
2580                                 priv->xstats.tx_pkt_n++;
2581                                 priv->xstats.txq_stats[queue].tx_pkt_n++;
2582                         }
2583                         if (skb)
2584                                 stmmac_get_tx_hwtstamp(priv, p, skb);
2585                 }
2586
2587                 if (likely(tx_q->tx_skbuff_dma[entry].buf &&
2588                            tx_q->tx_skbuff_dma[entry].buf_type != STMMAC_TXBUF_T_XDP_TX)) {
2589                         if (tx_q->tx_skbuff_dma[entry].map_as_page)
2590                                 dma_unmap_page(priv->device,
2591                                                tx_q->tx_skbuff_dma[entry].buf,
2592                                                tx_q->tx_skbuff_dma[entry].len,
2593                                                DMA_TO_DEVICE);
2594                         else
2595                                 dma_unmap_single(priv->device,
2596                                                  tx_q->tx_skbuff_dma[entry].buf,
2597                                                  tx_q->tx_skbuff_dma[entry].len,
2598                                                  DMA_TO_DEVICE);
2599                         tx_q->tx_skbuff_dma[entry].buf = 0;
2600                         tx_q->tx_skbuff_dma[entry].len = 0;
2601                         tx_q->tx_skbuff_dma[entry].map_as_page = false;
2602                 }
2603
2604                 stmmac_clean_desc3(priv, tx_q, p);
2605
2606                 tx_q->tx_skbuff_dma[entry].last_segment = false;
2607                 tx_q->tx_skbuff_dma[entry].is_jumbo = false;
2608
2609                 if (xdpf &&
2610                     tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_TX) {
2611                         xdp_return_frame_rx_napi(xdpf);
2612                         tx_q->xdpf[entry] = NULL;
2613                 }
2614
2615                 if (xdpf &&
2616                     tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_NDO) {
2617                         xdp_return_frame(xdpf);
2618                         tx_q->xdpf[entry] = NULL;
2619                 }
2620
2621                 if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XSK_TX)
2622                         tx_q->xsk_frames_done++;
2623
2624                 if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_SKB) {
2625                         if (likely(skb)) {
2626                                 pkts_compl++;
2627                                 bytes_compl += skb->len;
2628                                 dev_consume_skb_any(skb);
2629                                 tx_q->tx_skbuff[entry] = NULL;
2630                         }
2631                 }
2632
2633                 stmmac_release_tx_desc(priv, p, priv->mode);
2634
2635                 entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
2636         }
2637         tx_q->dirty_tx = entry;
2638
2639         netdev_tx_completed_queue(netdev_get_tx_queue(priv->dev, queue),
2640                                   pkts_compl, bytes_compl);
2641
2642         if (unlikely(netif_tx_queue_stopped(netdev_get_tx_queue(priv->dev,
2643                                                                 queue))) &&
2644             stmmac_tx_avail(priv, queue) > STMMAC_TX_THRESH(priv)) {
2645
2646                 netif_dbg(priv, tx_done, priv->dev,
2647                           "%s: restart transmit\n", __func__);
2648                 netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, queue));
2649         }
2650
2651         if (tx_q->xsk_pool) {
2652                 bool work_done;
2653
2654                 if (tx_q->xsk_frames_done)
2655                         xsk_tx_completed(tx_q->xsk_pool, tx_q->xsk_frames_done);
2656
2657                 if (xsk_uses_need_wakeup(tx_q->xsk_pool))
2658                         xsk_set_tx_need_wakeup(tx_q->xsk_pool);
2659
2660                 /* For XSK TX, we try to send as many as possible.
2661                  * If XSK work done (XSK TX desc empty and budget still
2662                  * available), return "budget - 1" to reenable TX IRQ.
2663                  * Else, return "budget" to make NAPI continue polling.
2664                  */
2665                 work_done = stmmac_xdp_xmit_zc(priv, queue,
2666                                                STMMAC_XSK_TX_BUDGET_MAX);
2667                 if (work_done)
2668                         xmits = budget - 1;
2669                 else
2670                         xmits = budget;
2671         }
2672
2673         if (priv->eee_enabled && !priv->tx_path_in_lpi_mode &&
2674             priv->eee_sw_timer_en) {
2675                 if (stmmac_enable_eee_mode(priv))
2676                         mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(priv->tx_lpi_timer));
2677         }
2678
2679         /* We still have pending packets, let's call for a new scheduling */
2680         if (tx_q->dirty_tx != tx_q->cur_tx)
2681                 hrtimer_start(&tx_q->txtimer,
2682                               STMMAC_COAL_TIMER(priv->tx_coal_timer[queue]),
2683                               HRTIMER_MODE_REL);
2684
2685         __netif_tx_unlock_bh(netdev_get_tx_queue(priv->dev, queue));
2686
2687         /* Combine decisions from TX clean and XSK TX */
2688         return max(count, xmits);
2689 }
2690
2691 /**
2692  * stmmac_tx_err - to manage the tx error
2693  * @priv: driver private structure
2694  * @chan: channel index
2695  * Description: it cleans the descriptors and restarts the transmission
2696  * in case of transmission errors.
2697  */
2698 static void stmmac_tx_err(struct stmmac_priv *priv, u32 chan)
2699 {
2700         struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
2701
2702         netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, chan));
2703
2704         stmmac_stop_tx_dma(priv, chan);
2705         dma_free_tx_skbufs(priv, &priv->dma_conf, chan);
2706         stmmac_clear_tx_descriptors(priv, &priv->dma_conf, chan);
2707         stmmac_reset_tx_queue(priv, chan);
2708         stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
2709                             tx_q->dma_tx_phy, chan);
2710         stmmac_start_tx_dma(priv, chan);
2711
2712         priv->dev->stats.tx_errors++;
2713         netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, chan));
2714 }
2715
2716 /**
2717  *  stmmac_set_dma_operation_mode - Set DMA operation mode by channel
2718  *  @priv: driver private structure
2719  *  @txmode: TX operating mode
2720  *  @rxmode: RX operating mode
2721  *  @chan: channel index
2722  *  Description: it is used for configuring of the DMA operation mode in
2723  *  runtime in order to program the tx/rx DMA thresholds or Store-And-Forward
2724  *  mode.
2725  */
2726 static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode,
2727                                           u32 rxmode, u32 chan)
2728 {
2729         u8 rxqmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
2730         u8 txqmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
2731         u32 rx_channels_count = priv->plat->rx_queues_to_use;
2732         u32 tx_channels_count = priv->plat->tx_queues_to_use;
2733         int rxfifosz = priv->plat->rx_fifo_size;
2734         int txfifosz = priv->plat->tx_fifo_size;
2735
2736         if (rxfifosz == 0)
2737                 rxfifosz = priv->dma_cap.rx_fifo_size;
2738         if (txfifosz == 0)
2739                 txfifosz = priv->dma_cap.tx_fifo_size;
2740
2741         /* Adjust for real per queue fifo size */
2742         rxfifosz /= rx_channels_count;
2743         txfifosz /= tx_channels_count;
2744
2745         stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan, rxfifosz, rxqmode);
2746         stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan, txfifosz, txqmode);
2747 }
2748
2749 static bool stmmac_safety_feat_interrupt(struct stmmac_priv *priv)
2750 {
2751         int ret;
2752
2753         ret = stmmac_safety_feat_irq_status(priv, priv->dev,
2754                         priv->ioaddr, priv->dma_cap.asp, &priv->sstats);
2755         if (ret && (ret != -EINVAL)) {
2756                 stmmac_global_err(priv);
2757                 return true;
2758         }
2759
2760         return false;
2761 }
2762
2763 static int stmmac_napi_check(struct stmmac_priv *priv, u32 chan, u32 dir)
2764 {
2765         int status = stmmac_dma_interrupt_status(priv, priv->ioaddr,
2766                                                  &priv->xstats, chan, dir);
2767         struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[chan];
2768         struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
2769         struct stmmac_channel *ch = &priv->channel[chan];
2770         struct napi_struct *rx_napi;
2771         struct napi_struct *tx_napi;
2772         unsigned long flags;
2773
2774         rx_napi = rx_q->xsk_pool ? &ch->rxtx_napi : &ch->rx_napi;
2775         tx_napi = tx_q->xsk_pool ? &ch->rxtx_napi : &ch->tx_napi;
2776
2777         if ((status & handle_rx) && (chan < priv->plat->rx_queues_to_use)) {
2778                 if (napi_schedule_prep(rx_napi)) {
2779                         spin_lock_irqsave(&ch->lock, flags);
2780                         stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 0);
2781                         spin_unlock_irqrestore(&ch->lock, flags);
2782                         __napi_schedule(rx_napi);
2783                 }
2784         }
2785
2786         if ((status & handle_tx) && (chan < priv->plat->tx_queues_to_use)) {
2787                 if (napi_schedule_prep(tx_napi)) {
2788                         spin_lock_irqsave(&ch->lock, flags);
2789                         stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 0, 1);
2790                         spin_unlock_irqrestore(&ch->lock, flags);
2791                         __napi_schedule(tx_napi);
2792                 }
2793         }
2794
2795         return status;
2796 }
2797
2798 /**
2799  * stmmac_dma_interrupt - DMA ISR
2800  * @priv: driver private structure
2801  * Description: this is the DMA ISR. It is called by the main ISR.
2802  * It calls the dwmac dma routine and schedule poll method in case of some
2803  * work can be done.
2804  */
2805 static void stmmac_dma_interrupt(struct stmmac_priv *priv)
2806 {
2807         u32 tx_channel_count = priv->plat->tx_queues_to_use;
2808         u32 rx_channel_count = priv->plat->rx_queues_to_use;
2809         u32 channels_to_check = tx_channel_count > rx_channel_count ?
2810                                 tx_channel_count : rx_channel_count;
2811         u32 chan;
2812         int status[max_t(u32, MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES)];
2813
2814         /* Make sure we never check beyond our status buffer. */
2815         if (WARN_ON_ONCE(channels_to_check > ARRAY_SIZE(status)))
2816                 channels_to_check = ARRAY_SIZE(status);
2817
2818         for (chan = 0; chan < channels_to_check; chan++)
2819                 status[chan] = stmmac_napi_check(priv, chan,
2820                                                  DMA_DIR_RXTX);
2821
2822         for (chan = 0; chan < tx_channel_count; chan++) {
2823                 if (unlikely(status[chan] & tx_hard_error_bump_tc)) {
2824                         /* Try to bump up the dma threshold on this failure */
2825                         stmmac_bump_dma_threshold(priv, chan);
2826                 } else if (unlikely(status[chan] == tx_hard_error)) {
2827                         stmmac_tx_err(priv, chan);
2828                 }
2829         }
2830 }
2831
2832 /**
2833  * stmmac_mmc_setup: setup the Mac Management Counters (MMC)
2834  * @priv: driver private structure
2835  * Description: this masks the MMC irq, in fact, the counters are managed in SW.
2836  */
2837 static void stmmac_mmc_setup(struct stmmac_priv *priv)
2838 {
2839         unsigned int mode = MMC_CNTRL_RESET_ON_READ | MMC_CNTRL_COUNTER_RESET |
2840                             MMC_CNTRL_PRESET | MMC_CNTRL_FULL_HALF_PRESET;
2841
2842         stmmac_mmc_intr_all_mask(priv, priv->mmcaddr);
2843
2844         if (priv->dma_cap.rmon) {
2845                 stmmac_mmc_ctrl(priv, priv->mmcaddr, mode);
2846                 memset(&priv->mmc, 0, sizeof(struct stmmac_counters));
2847         } else
2848                 netdev_info(priv->dev, "No MAC Management Counters available\n");
2849 }
2850
2851 /**
2852  * stmmac_get_hw_features - get MAC capabilities from the HW cap. register.
2853  * @priv: driver private structure
2854  * Description:
2855  *  new GMAC chip generations have a new register to indicate the
2856  *  presence of the optional feature/functions.
2857  *  This can be also used to override the value passed through the
2858  *  platform and necessary for old MAC10/100 and GMAC chips.
2859  */
2860 static int stmmac_get_hw_features(struct stmmac_priv *priv)
2861 {
2862         return stmmac_get_hw_feature(priv, priv->ioaddr, &priv->dma_cap) == 0;
2863 }
2864
2865 /**
2866  * stmmac_check_ether_addr - check if the MAC addr is valid
2867  * @priv: driver private structure
2868  * Description:
2869  * it is to verify if the MAC address is valid, in case of failures it
2870  * generates a random MAC address
2871  */
2872 static void stmmac_check_ether_addr(struct stmmac_priv *priv)
2873 {
2874         u8 addr[ETH_ALEN];
2875
2876         if (!is_valid_ether_addr(priv->dev->dev_addr)) {
2877                 stmmac_get_umac_addr(priv, priv->hw, addr, 0);
2878                 if (is_valid_ether_addr(addr))
2879                         eth_hw_addr_set(priv->dev, addr);
2880                 else
2881                         eth_hw_addr_random(priv->dev);
2882                 dev_info(priv->device, "device MAC address %pM\n",
2883                          priv->dev->dev_addr);
2884         }
2885 }
2886
2887 /**
2888  * stmmac_init_dma_engine - DMA init.
2889  * @priv: driver private structure
2890  * Description:
2891  * It inits the DMA invoking the specific MAC/GMAC callback.
2892  * Some DMA parameters can be passed from the platform;
2893  * in case of these are not passed a default is kept for the MAC or GMAC.
2894  */
2895 static int stmmac_init_dma_engine(struct stmmac_priv *priv)
2896 {
2897         u32 rx_channels_count = priv->plat->rx_queues_to_use;
2898         u32 tx_channels_count = priv->plat->tx_queues_to_use;
2899         u32 dma_csr_ch = max(rx_channels_count, tx_channels_count);
2900         struct stmmac_rx_queue *rx_q;
2901         struct stmmac_tx_queue *tx_q;
2902         u32 chan = 0;
2903         int atds = 0;
2904         int ret = 0;
2905
2906         if (!priv->plat->dma_cfg || !priv->plat->dma_cfg->pbl) {
2907                 dev_err(priv->device, "Invalid DMA configuration\n");
2908                 return -EINVAL;
2909         }
2910
2911         if (priv->extend_desc && (priv->mode == STMMAC_RING_MODE))
2912                 atds = 1;
2913
2914         ret = stmmac_reset(priv, priv->ioaddr);
2915         if (ret) {
2916                 dev_err(priv->device, "Failed to reset the dma\n");
2917                 return ret;
2918         }
2919
2920         /* DMA Configuration */
2921         stmmac_dma_init(priv, priv->ioaddr, priv->plat->dma_cfg, atds);
2922
2923         if (priv->plat->axi)
2924                 stmmac_axi(priv, priv->ioaddr, priv->plat->axi);
2925
2926         /* DMA CSR Channel configuration */
2927         for (chan = 0; chan < dma_csr_ch; chan++) {
2928                 stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan);
2929                 stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
2930         }
2931
2932         /* DMA RX Channel Configuration */
2933         for (chan = 0; chan < rx_channels_count; chan++) {
2934                 rx_q = &priv->dma_conf.rx_queue[chan];
2935
2936                 stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
2937                                     rx_q->dma_rx_phy, chan);
2938
2939                 rx_q->rx_tail_addr = rx_q->dma_rx_phy +
2940                                      (rx_q->buf_alloc_num *
2941                                       sizeof(struct dma_desc));
2942                 stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
2943                                        rx_q->rx_tail_addr, chan);
2944         }
2945
2946         /* DMA TX Channel Configuration */
2947         for (chan = 0; chan < tx_channels_count; chan++) {
2948                 tx_q = &priv->dma_conf.tx_queue[chan];
2949
2950                 stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
2951                                     tx_q->dma_tx_phy, chan);
2952
2953                 tx_q->tx_tail_addr = tx_q->dma_tx_phy;
2954                 stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
2955                                        tx_q->tx_tail_addr, chan);
2956         }
2957
2958         return ret;
2959 }
2960
2961 static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue)
2962 {
2963         struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
2964
2965         hrtimer_start(&tx_q->txtimer,
2966                       STMMAC_COAL_TIMER(priv->tx_coal_timer[queue]),
2967                       HRTIMER_MODE_REL);
2968 }
2969
2970 /**
2971  * stmmac_tx_timer - mitigation sw timer for tx.
2972  * @t: data pointer
2973  * Description:
2974  * This is the timer handler to directly invoke the stmmac_tx_clean.
2975  */
2976 static enum hrtimer_restart stmmac_tx_timer(struct hrtimer *t)
2977 {
2978         struct stmmac_tx_queue *tx_q = container_of(t, struct stmmac_tx_queue, txtimer);
2979         struct stmmac_priv *priv = tx_q->priv_data;
2980         struct stmmac_channel *ch;
2981         struct napi_struct *napi;
2982
2983         ch = &priv->channel[tx_q->queue_index];
2984         napi = tx_q->xsk_pool ? &ch->rxtx_napi : &ch->tx_napi;
2985
2986         if (likely(napi_schedule_prep(napi))) {
2987                 unsigned long flags;
2988
2989                 spin_lock_irqsave(&ch->lock, flags);
2990                 stmmac_disable_dma_irq(priv, priv->ioaddr, ch->index, 0, 1);
2991                 spin_unlock_irqrestore(&ch->lock, flags);
2992                 __napi_schedule(napi);
2993         }
2994
2995         return HRTIMER_NORESTART;
2996 }
2997
2998 /**
2999  * stmmac_init_coalesce - init mitigation options.
3000  * @priv: driver private structure
3001  * Description:
3002  * This inits the coalesce parameters: i.e. timer rate,
3003  * timer handler and default threshold used for enabling the
3004  * interrupt on completion bit.
3005  */
3006 static void stmmac_init_coalesce(struct stmmac_priv *priv)
3007 {
3008         u32 tx_channel_count = priv->plat->tx_queues_to_use;
3009         u32 rx_channel_count = priv->plat->rx_queues_to_use;
3010         u32 chan;
3011
3012         for (chan = 0; chan < tx_channel_count; chan++) {
3013                 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
3014
3015                 priv->tx_coal_frames[chan] = STMMAC_TX_FRAMES;
3016                 priv->tx_coal_timer[chan] = STMMAC_COAL_TX_TIMER;
3017
3018                 hrtimer_init(&tx_q->txtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
3019                 tx_q->txtimer.function = stmmac_tx_timer;
3020         }
3021
3022         for (chan = 0; chan < rx_channel_count; chan++)
3023                 priv->rx_coal_frames[chan] = STMMAC_RX_FRAMES;
3024 }
3025
3026 static void stmmac_set_rings_length(struct stmmac_priv *priv)
3027 {
3028         u32 rx_channels_count = priv->plat->rx_queues_to_use;
3029         u32 tx_channels_count = priv->plat->tx_queues_to_use;
3030         u32 chan;
3031
3032         /* set TX ring length */
3033         for (chan = 0; chan < tx_channels_count; chan++)
3034                 stmmac_set_tx_ring_len(priv, priv->ioaddr,
3035                                        (priv->dma_conf.dma_tx_size - 1), chan);
3036
3037         /* set RX ring length */
3038         for (chan = 0; chan < rx_channels_count; chan++)
3039                 stmmac_set_rx_ring_len(priv, priv->ioaddr,
3040                                        (priv->dma_conf.dma_rx_size - 1), chan);
3041 }
3042
3043 /**
3044  *  stmmac_set_tx_queue_weight - Set TX queue weight
3045  *  @priv: driver private structure
3046  *  Description: It is used for setting TX queues weight
3047  */
3048 static void stmmac_set_tx_queue_weight(struct stmmac_priv *priv)
3049 {
3050         u32 tx_queues_count = priv->plat->tx_queues_to_use;
3051         u32 weight;
3052         u32 queue;
3053
3054         for (queue = 0; queue < tx_queues_count; queue++) {
3055                 weight = priv->plat->tx_queues_cfg[queue].weight;
3056                 stmmac_set_mtl_tx_queue_weight(priv, priv->hw, weight, queue);
3057         }
3058 }
3059
3060 /**
3061  *  stmmac_configure_cbs - Configure CBS in TX queue
3062  *  @priv: driver private structure
3063  *  Description: It is used for configuring CBS in AVB TX queues
3064  */
3065 static void stmmac_configure_cbs(struct stmmac_priv *priv)
3066 {
3067         u32 tx_queues_count = priv->plat->tx_queues_to_use;
3068         u32 mode_to_use;
3069         u32 queue;
3070
3071         /* queue 0 is reserved for legacy traffic */
3072         for (queue = 1; queue < tx_queues_count; queue++) {
3073                 mode_to_use = priv->plat->tx_queues_cfg[queue].mode_to_use;
3074                 if (mode_to_use == MTL_QUEUE_DCB)
3075                         continue;
3076
3077                 stmmac_config_cbs(priv, priv->hw,
3078                                 priv->plat->tx_queues_cfg[queue].send_slope,
3079                                 priv->plat->tx_queues_cfg[queue].idle_slope,
3080                                 priv->plat->tx_queues_cfg[queue].high_credit,
3081                                 priv->plat->tx_queues_cfg[queue].low_credit,
3082                                 queue);
3083         }
3084 }
3085
3086 /**
3087  *  stmmac_rx_queue_dma_chan_map - Map RX queue to RX dma channel
3088  *  @priv: driver private structure
3089  *  Description: It is used for mapping RX queues to RX dma channels
3090  */
3091 static void stmmac_rx_queue_dma_chan_map(struct stmmac_priv *priv)
3092 {
3093         u32 rx_queues_count = priv->plat->rx_queues_to_use;
3094         u32 queue;
3095         u32 chan;
3096
3097         for (queue = 0; queue < rx_queues_count; queue++) {
3098                 chan = priv->plat->rx_queues_cfg[queue].chan;
3099                 stmmac_map_mtl_to_dma(priv, priv->hw, queue, chan);
3100         }
3101 }
3102
3103 /**
3104  *  stmmac_mac_config_rx_queues_prio - Configure RX Queue priority
3105  *  @priv: driver private structure
3106  *  Description: It is used for configuring the RX Queue Priority
3107  */
3108 static void stmmac_mac_config_rx_queues_prio(struct stmmac_priv *priv)
3109 {
3110         u32 rx_queues_count = priv->plat->rx_queues_to_use;
3111         u32 queue;
3112         u32 prio;
3113
3114         for (queue = 0; queue < rx_queues_count; queue++) {
3115                 if (!priv->plat->rx_queues_cfg[queue].use_prio)
3116                         continue;
3117
3118                 prio = priv->plat->rx_queues_cfg[queue].prio;
3119                 stmmac_rx_queue_prio(priv, priv->hw, prio, queue);
3120         }
3121 }
3122
3123 /**
3124  *  stmmac_mac_config_tx_queues_prio - Configure TX Queue priority
3125  *  @priv: driver private structure
3126  *  Description: It is used for configuring the TX Queue Priority
3127  */
3128 static void stmmac_mac_config_tx_queues_prio(struct stmmac_priv *priv)
3129 {
3130         u32 tx_queues_count = priv->plat->tx_queues_to_use;
3131         u32 queue;
3132         u32 prio;
3133
3134         for (queue = 0; queue < tx_queues_count; queue++) {
3135                 if (!priv->plat->tx_queues_cfg[queue].use_prio)
3136                         continue;
3137
3138                 prio = priv->plat->tx_queues_cfg[queue].prio;
3139                 stmmac_tx_queue_prio(priv, priv->hw, prio, queue);
3140         }
3141 }
3142
3143 /**
3144  *  stmmac_mac_config_rx_queues_routing - Configure RX Queue Routing
3145  *  @priv: driver private structure
3146  *  Description: It is used for configuring the RX queue routing
3147  */
3148 static void stmmac_mac_config_rx_queues_routing(struct stmmac_priv *priv)
3149 {
3150         u32 rx_queues_count = priv->plat->rx_queues_to_use;
3151         u32 queue;
3152         u8 packet;
3153
3154         for (queue = 0; queue < rx_queues_count; queue++) {
3155                 /* no specific packet type routing specified for the queue */
3156                 if (priv->plat->rx_queues_cfg[queue].pkt_route == 0x0)
3157                         continue;
3158
3159                 packet = priv->plat->rx_queues_cfg[queue].pkt_route;
3160                 stmmac_rx_queue_routing(priv, priv->hw, packet, queue);
3161         }
3162 }
3163
3164 static void stmmac_mac_config_rss(struct stmmac_priv *priv)
3165 {
3166         if (!priv->dma_cap.rssen || !priv->plat->rss_en) {
3167                 priv->rss.enable = false;
3168                 return;
3169         }
3170
3171         if (priv->dev->features & NETIF_F_RXHASH)
3172                 priv->rss.enable = true;
3173         else
3174                 priv->rss.enable = false;
3175
3176         stmmac_rss_configure(priv, priv->hw, &priv->rss,
3177                              priv->plat->rx_queues_to_use);
3178 }
3179
3180 /**
3181  *  stmmac_mtl_configuration - Configure MTL
3182  *  @priv: driver private structure
3183  *  Description: It is used for configurring MTL
3184  */
3185 static void stmmac_mtl_configuration(struct stmmac_priv *priv)
3186 {
3187         u32 rx_queues_count = priv->plat->rx_queues_to_use;
3188         u32 tx_queues_count = priv->plat->tx_queues_to_use;
3189
3190         if (tx_queues_count > 1)
3191                 stmmac_set_tx_queue_weight(priv);
3192
3193         /* Configure MTL RX algorithms */
3194         if (rx_queues_count > 1)
3195                 stmmac_prog_mtl_rx_algorithms(priv, priv->hw,
3196                                 priv->plat->rx_sched_algorithm);
3197
3198         /* Configure MTL TX algorithms */
3199         if (tx_queues_count > 1)
3200                 stmmac_prog_mtl_tx_algorithms(priv, priv->hw,
3201                                 priv->plat->tx_sched_algorithm);
3202
3203         /* Configure CBS in AVB TX queues */
3204         if (tx_queues_count > 1)
3205                 stmmac_configure_cbs(priv);
3206
3207         /* Map RX MTL to DMA channels */
3208         stmmac_rx_queue_dma_chan_map(priv);
3209
3210         /* Enable MAC RX Queues */
3211         stmmac_mac_enable_rx_queues(priv);
3212
3213         /* Set RX priorities */
3214         if (rx_queues_count > 1)
3215                 stmmac_mac_config_rx_queues_prio(priv);
3216
3217         /* Set TX priorities */
3218         if (tx_queues_count > 1)
3219                 stmmac_mac_config_tx_queues_prio(priv);
3220
3221         /* Set RX routing */
3222         if (rx_queues_count > 1)
3223                 stmmac_mac_config_rx_queues_routing(priv);
3224
3225         /* Receive Side Scaling */
3226         if (rx_queues_count > 1)
3227                 stmmac_mac_config_rss(priv);
3228 }
3229
3230 static void stmmac_safety_feat_configuration(struct stmmac_priv *priv)
3231 {
3232         if (priv->dma_cap.asp) {
3233                 netdev_info(priv->dev, "Enabling Safety Features\n");
3234                 stmmac_safety_feat_config(priv, priv->ioaddr, priv->dma_cap.asp,
3235                                           priv->plat->safety_feat_cfg);
3236         } else {
3237                 netdev_info(priv->dev, "No Safety Features support found\n");
3238         }
3239 }
3240
3241 static int stmmac_fpe_start_wq(struct stmmac_priv *priv)
3242 {
3243         char *name;
3244
3245         clear_bit(__FPE_TASK_SCHED, &priv->fpe_task_state);
3246         clear_bit(__FPE_REMOVING,  &priv->fpe_task_state);
3247
3248         name = priv->wq_name;
3249         sprintf(name, "%s-fpe", priv->dev->name);
3250
3251         priv->fpe_wq = create_singlethread_workqueue(name);
3252         if (!priv->fpe_wq) {
3253                 netdev_err(priv->dev, "%s: Failed to create workqueue\n", name);
3254
3255                 return -ENOMEM;
3256         }
3257         netdev_info(priv->dev, "FPE workqueue start");
3258
3259         return 0;
3260 }
3261
3262 /**
3263  * stmmac_hw_setup - setup mac in a usable state.
3264  *  @dev : pointer to the device structure.
3265  *  @ptp_register: register PTP if set
3266  *  Description:
3267  *  this is the main function to setup the HW in a usable state because the
3268  *  dma engine is reset, the core registers are configured (e.g. AXI,
3269  *  Checksum features, timers). The DMA is ready to start receiving and
3270  *  transmitting.
3271  *  Return value:
3272  *  0 on success and an appropriate (-)ve integer as defined in errno.h
3273  *  file on failure.
3274  */
3275 static int stmmac_hw_setup(struct net_device *dev, bool ptp_register)
3276 {
3277         struct stmmac_priv *priv = netdev_priv(dev);
3278         u32 rx_cnt = priv->plat->rx_queues_to_use;
3279         u32 tx_cnt = priv->plat->tx_queues_to_use;
3280         bool sph_en;
3281         u32 chan;
3282         int ret;
3283
3284         /* DMA initialization and SW reset */
3285         ret = stmmac_init_dma_engine(priv);
3286         if (ret < 0) {
3287                 netdev_err(priv->dev, "%s: DMA engine initialization failed\n",
3288                            __func__);
3289                 return ret;
3290         }
3291
3292         /* Copy the MAC addr into the HW  */
3293         stmmac_set_umac_addr(priv, priv->hw, dev->dev_addr, 0);
3294
3295         /* PS and related bits will be programmed according to the speed */
3296         if (priv->hw->pcs) {
3297                 int speed = priv->plat->mac_port_sel_speed;
3298
3299                 if ((speed == SPEED_10) || (speed == SPEED_100) ||
3300                     (speed == SPEED_1000)) {
3301                         priv->hw->ps = speed;
3302                 } else {
3303                         dev_warn(priv->device, "invalid port speed\n");
3304                         priv->hw->ps = 0;
3305                 }
3306         }
3307
3308         /* Initialize the MAC Core */
3309         stmmac_core_init(priv, priv->hw, dev);
3310
3311         /* Initialize MTL*/
3312         stmmac_mtl_configuration(priv);
3313
3314         /* Initialize Safety Features */
3315         stmmac_safety_feat_configuration(priv);
3316
3317         ret = stmmac_rx_ipc(priv, priv->hw);
3318         if (!ret) {
3319                 netdev_warn(priv->dev, "RX IPC Checksum Offload disabled\n");
3320                 priv->plat->rx_coe = STMMAC_RX_COE_NONE;
3321                 priv->hw->rx_csum = 0;
3322         }
3323
3324         /* Enable the MAC Rx/Tx */
3325         stmmac_mac_set(priv, priv->ioaddr, true);
3326
3327         /* Set the HW DMA mode and the COE */
3328         stmmac_dma_operation_mode(priv);
3329
3330         stmmac_mmc_setup(priv);
3331
3332         if (ptp_register) {
3333                 ret = clk_prepare_enable(priv->plat->clk_ptp_ref);
3334                 if (ret < 0)
3335                         netdev_warn(priv->dev,
3336                                     "failed to enable PTP reference clock: %pe\n",
3337                                     ERR_PTR(ret));
3338         }
3339
3340         ret = stmmac_init_ptp(priv);
3341         if (ret == -EOPNOTSUPP)
3342                 netdev_info(priv->dev, "PTP not supported by HW\n");
3343         else if (ret)
3344                 netdev_warn(priv->dev, "PTP init failed\n");
3345         else if (ptp_register)
3346                 stmmac_ptp_register(priv);
3347
3348         priv->eee_tw_timer = STMMAC_DEFAULT_TWT_LS;
3349
3350         /* Convert the timer from msec to usec */
3351         if (!priv->tx_lpi_timer)
3352                 priv->tx_lpi_timer = eee_timer * 1000;
3353
3354         if (priv->use_riwt) {
3355                 u32 queue;
3356
3357                 for (queue = 0; queue < rx_cnt; queue++) {
3358                         if (!priv->rx_riwt[queue])
3359                                 priv->rx_riwt[queue] = DEF_DMA_RIWT;
3360
3361                         stmmac_rx_watchdog(priv, priv->ioaddr,
3362                                            priv->rx_riwt[queue], queue);
3363                 }
3364         }
3365
3366         if (priv->hw->pcs)
3367                 stmmac_pcs_ctrl_ane(priv, priv->ioaddr, 1, priv->hw->ps, 0);
3368
3369         /* set TX and RX rings length */
3370         stmmac_set_rings_length(priv);
3371
3372         /* Enable TSO */
3373         if (priv->tso) {
3374                 for (chan = 0; chan < tx_cnt; chan++) {
3375                         struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
3376
3377                         /* TSO and TBS cannot co-exist */
3378                         if (tx_q->tbs & STMMAC_TBS_AVAIL)
3379                                 continue;
3380
3381                         stmmac_enable_tso(priv, priv->ioaddr, 1, chan);
3382                 }
3383         }
3384
3385         /* Enable Split Header */
3386         sph_en = (priv->hw->rx_csum > 0) && priv->sph;
3387         for (chan = 0; chan < rx_cnt; chan++)
3388                 stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
3389
3390
3391         /* VLAN Tag Insertion */
3392         if (priv->dma_cap.vlins)
3393                 stmmac_enable_vlan(priv, priv->hw, STMMAC_VLAN_INSERT);
3394
3395         /* TBS */
3396         for (chan = 0; chan < tx_cnt; chan++) {
3397                 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
3398                 int enable = tx_q->tbs & STMMAC_TBS_AVAIL;
3399
3400                 stmmac_enable_tbs(priv, priv->ioaddr, enable, chan);
3401         }
3402
3403         /* Configure real RX and TX queues */
3404         netif_set_real_num_rx_queues(dev, priv->plat->rx_queues_to_use);
3405         netif_set_real_num_tx_queues(dev, priv->plat->tx_queues_to_use);
3406
3407         /* Start the ball rolling... */
3408         stmmac_start_all_dma(priv);
3409
3410         if (priv->dma_cap.fpesel) {
3411                 stmmac_fpe_start_wq(priv);
3412
3413                 if (priv->plat->fpe_cfg->enable)
3414                         stmmac_fpe_handshake(priv, true);
3415         }
3416
3417         return 0;
3418 }
3419
3420 static void stmmac_hw_teardown(struct net_device *dev)
3421 {
3422         struct stmmac_priv *priv = netdev_priv(dev);
3423
3424         clk_disable_unprepare(priv->plat->clk_ptp_ref);
3425 }
3426
3427 static void stmmac_free_irq(struct net_device *dev,
3428                             enum request_irq_err irq_err, int irq_idx)
3429 {
3430         struct stmmac_priv *priv = netdev_priv(dev);
3431         int j;
3432
3433         switch (irq_err) {
3434         case REQ_IRQ_ERR_ALL:
3435                 irq_idx = priv->plat->tx_queues_to_use;
3436                 fallthrough;
3437         case REQ_IRQ_ERR_TX:
3438                 for (j = irq_idx - 1; j >= 0; j--) {
3439                         if (priv->tx_irq[j] > 0) {
3440                                 irq_set_affinity_hint(priv->tx_irq[j], NULL);
3441                                 free_irq(priv->tx_irq[j], &priv->dma_conf.tx_queue[j]);
3442                         }
3443                 }
3444                 irq_idx = priv->plat->rx_queues_to_use;
3445                 fallthrough;
3446         case REQ_IRQ_ERR_RX:
3447                 for (j = irq_idx - 1; j >= 0; j--) {
3448                         if (priv->rx_irq[j] > 0) {
3449                                 irq_set_affinity_hint(priv->rx_irq[j], NULL);
3450                                 free_irq(priv->rx_irq[j], &priv->dma_conf.rx_queue[j]);
3451                         }
3452                 }
3453
3454                 if (priv->sfty_ue_irq > 0 && priv->sfty_ue_irq != dev->irq)
3455                         free_irq(priv->sfty_ue_irq, dev);
3456                 fallthrough;
3457         case REQ_IRQ_ERR_SFTY_UE:
3458                 if (priv->sfty_ce_irq > 0 && priv->sfty_ce_irq != dev->irq)
3459                         free_irq(priv->sfty_ce_irq, dev);
3460                 fallthrough;
3461         case REQ_IRQ_ERR_SFTY_CE:
3462                 if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq)
3463                         free_irq(priv->lpi_irq, dev);
3464                 fallthrough;
3465         case REQ_IRQ_ERR_LPI:
3466                 if (priv->wol_irq > 0 && priv->wol_irq != dev->irq)
3467                         free_irq(priv->wol_irq, dev);
3468                 fallthrough;
3469         case REQ_IRQ_ERR_WOL:
3470                 free_irq(dev->irq, dev);
3471                 fallthrough;
3472         case REQ_IRQ_ERR_MAC:
3473         case REQ_IRQ_ERR_NO:
3474                 /* If MAC IRQ request error, no more IRQ to free */
3475                 break;
3476         }
3477 }
3478
3479 static int stmmac_request_irq_multi_msi(struct net_device *dev)
3480 {
3481         struct stmmac_priv *priv = netdev_priv(dev);
3482         enum request_irq_err irq_err;
3483         cpumask_t cpu_mask;
3484         int irq_idx = 0;
3485         char *int_name;
3486         int ret;
3487         int i;
3488
3489         /* For common interrupt */
3490         int_name = priv->int_name_mac;
3491         sprintf(int_name, "%s:%s", dev->name, "mac");
3492         ret = request_irq(dev->irq, stmmac_mac_interrupt,
3493                           0, int_name, dev);
3494         if (unlikely(ret < 0)) {
3495                 netdev_err(priv->dev,
3496                            "%s: alloc mac MSI %d (error: %d)\n",
3497                            __func__, dev->irq, ret);
3498                 irq_err = REQ_IRQ_ERR_MAC;
3499                 goto irq_error;
3500         }
3501
3502         /* Request the Wake IRQ in case of another line
3503          * is used for WoL
3504          */
3505         if (priv->wol_irq > 0 && priv->wol_irq != dev->irq) {
3506                 int_name = priv->int_name_wol;
3507                 sprintf(int_name, "%s:%s", dev->name, "wol");
3508                 ret = request_irq(priv->wol_irq,
3509                                   stmmac_mac_interrupt,
3510                                   0, int_name, dev);
3511                 if (unlikely(ret < 0)) {
3512                         netdev_err(priv->dev,
3513                                    "%s: alloc wol MSI %d (error: %d)\n",
3514                                    __func__, priv->wol_irq, ret);
3515                         irq_err = REQ_IRQ_ERR_WOL;
3516                         goto irq_error;
3517                 }
3518         }
3519
3520         /* Request the LPI IRQ in case of another line
3521          * is used for LPI
3522          */
3523         if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq) {
3524                 int_name = priv->int_name_lpi;
3525                 sprintf(int_name, "%s:%s", dev->name, "lpi");
3526                 ret = request_irq(priv->lpi_irq,
3527                                   stmmac_mac_interrupt,
3528                                   0, int_name, dev);
3529                 if (unlikely(ret < 0)) {
3530                         netdev_err(priv->dev,
3531                                    "%s: alloc lpi MSI %d (error: %d)\n",
3532                                    __func__, priv->lpi_irq, ret);
3533                         irq_err = REQ_IRQ_ERR_LPI;
3534                         goto irq_error;
3535                 }
3536         }
3537
3538         /* Request the Safety Feature Correctible Error line in
3539          * case of another line is used
3540          */
3541         if (priv->sfty_ce_irq > 0 && priv->sfty_ce_irq != dev->irq) {
3542                 int_name = priv->int_name_sfty_ce;
3543                 sprintf(int_name, "%s:%s", dev->name, "safety-ce");
3544                 ret = request_irq(priv->sfty_ce_irq,
3545                                   stmmac_safety_interrupt,
3546                                   0, int_name, dev);
3547                 if (unlikely(ret < 0)) {
3548                         netdev_err(priv->dev,
3549                                    "%s: alloc sfty ce MSI %d (error: %d)\n",
3550                                    __func__, priv->sfty_ce_irq, ret);
3551                         irq_err = REQ_IRQ_ERR_SFTY_CE;
3552                         goto irq_error;
3553                 }
3554         }
3555
3556         /* Request the Safety Feature Uncorrectible Error line in
3557          * case of another line is used
3558          */
3559         if (priv->sfty_ue_irq > 0 && priv->sfty_ue_irq != dev->irq) {
3560                 int_name = priv->int_name_sfty_ue;
3561                 sprintf(int_name, "%s:%s", dev->name, "safety-ue");
3562                 ret = request_irq(priv->sfty_ue_irq,
3563                                   stmmac_safety_interrupt,
3564                                   0, int_name, dev);
3565                 if (unlikely(ret < 0)) {
3566                         netdev_err(priv->dev,
3567                                    "%s: alloc sfty ue MSI %d (error: %d)\n",
3568                                    __func__, priv->sfty_ue_irq, ret);
3569                         irq_err = REQ_IRQ_ERR_SFTY_UE;
3570                         goto irq_error;
3571                 }
3572         }
3573
3574         /* Request Rx MSI irq */
3575         for (i = 0; i < priv->plat->rx_queues_to_use; i++) {
3576                 if (i >= MTL_MAX_RX_QUEUES)
3577                         break;
3578                 if (priv->rx_irq[i] == 0)
3579                         continue;
3580
3581                 int_name = priv->int_name_rx_irq[i];
3582                 sprintf(int_name, "%s:%s-%d", dev->name, "rx", i);
3583                 ret = request_irq(priv->rx_irq[i],
3584                                   stmmac_msi_intr_rx,
3585                                   0, int_name, &priv->dma_conf.rx_queue[i]);
3586                 if (unlikely(ret < 0)) {
3587                         netdev_err(priv->dev,
3588                                    "%s: alloc rx-%d  MSI %d (error: %d)\n",
3589                                    __func__, i, priv->rx_irq[i], ret);
3590                         irq_err = REQ_IRQ_ERR_RX;
3591                         irq_idx = i;
3592                         goto irq_error;
3593                 }
3594                 cpumask_clear(&cpu_mask);
3595                 cpumask_set_cpu(i % num_online_cpus(), &cpu_mask);
3596                 irq_set_affinity_hint(priv->rx_irq[i], &cpu_mask);
3597         }
3598
3599         /* Request Tx MSI irq */
3600         for (i = 0; i < priv->plat->tx_queues_to_use; i++) {
3601                 if (i >= MTL_MAX_TX_QUEUES)
3602                         break;
3603                 if (priv->tx_irq[i] == 0)
3604                         continue;
3605
3606                 int_name = priv->int_name_tx_irq[i];
3607                 sprintf(int_name, "%s:%s-%d", dev->name, "tx", i);
3608                 ret = request_irq(priv->tx_irq[i],
3609                                   stmmac_msi_intr_tx,
3610                                   0, int_name, &priv->dma_conf.tx_queue[i]);
3611                 if (unlikely(ret < 0)) {
3612                         netdev_err(priv->dev,
3613                                    "%s: alloc tx-%d  MSI %d (error: %d)\n",
3614                                    __func__, i, priv->tx_irq[i], ret);
3615                         irq_err = REQ_IRQ_ERR_TX;
3616                         irq_idx = i;
3617                         goto irq_error;
3618                 }
3619                 cpumask_clear(&cpu_mask);
3620                 cpumask_set_cpu(i % num_online_cpus(), &cpu_mask);
3621                 irq_set_affinity_hint(priv->tx_irq[i], &cpu_mask);
3622         }
3623
3624         return 0;
3625
3626 irq_error:
3627         stmmac_free_irq(dev, irq_err, irq_idx);
3628         return ret;
3629 }
3630
3631 static int stmmac_request_irq_single(struct net_device *dev)
3632 {
3633         struct stmmac_priv *priv = netdev_priv(dev);
3634         enum request_irq_err irq_err;
3635         int ret;
3636
3637         ret = request_irq(dev->irq, stmmac_interrupt,
3638                           IRQF_SHARED, dev->name, dev);
3639         if (unlikely(ret < 0)) {
3640                 netdev_err(priv->dev,
3641                            "%s: ERROR: allocating the IRQ %d (error: %d)\n",
3642                            __func__, dev->irq, ret);
3643                 irq_err = REQ_IRQ_ERR_MAC;
3644                 goto irq_error;
3645         }
3646
3647         /* Request the Wake IRQ in case of another line
3648          * is used for WoL
3649          */
3650         if (priv->wol_irq > 0 && priv->wol_irq != dev->irq) {
3651                 ret = request_irq(priv->wol_irq, stmmac_interrupt,
3652                                   IRQF_SHARED, dev->name, dev);
3653                 if (unlikely(ret < 0)) {
3654                         netdev_err(priv->dev,
3655                                    "%s: ERROR: allocating the WoL IRQ %d (%d)\n",
3656                                    __func__, priv->wol_irq, ret);
3657                         irq_err = REQ_IRQ_ERR_WOL;
3658                         goto irq_error;
3659                 }
3660         }
3661
3662         /* Request the IRQ lines */
3663         if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq) {
3664                 ret = request_irq(priv->lpi_irq, stmmac_interrupt,
3665                                   IRQF_SHARED, dev->name, dev);
3666                 if (unlikely(ret < 0)) {
3667                         netdev_err(priv->dev,
3668                                    "%s: ERROR: allocating the LPI IRQ %d (%d)\n",
3669                                    __func__, priv->lpi_irq, ret);
3670                         irq_err = REQ_IRQ_ERR_LPI;
3671                         goto irq_error;
3672                 }
3673         }
3674
3675         return 0;
3676
3677 irq_error:
3678         stmmac_free_irq(dev, irq_err, 0);
3679         return ret;
3680 }
3681
3682 static int stmmac_request_irq(struct net_device *dev)
3683 {
3684         struct stmmac_priv *priv = netdev_priv(dev);
3685         int ret;
3686
3687         /* Request the IRQ lines */
3688         if (priv->plat->multi_msi_en)
3689                 ret = stmmac_request_irq_multi_msi(dev);
3690         else
3691                 ret = stmmac_request_irq_single(dev);
3692
3693         return ret;
3694 }
3695
3696 /**
3697  *  stmmac_setup_dma_desc - Generate a dma_conf and allocate DMA queue
3698  *  @priv: driver private structure
3699  *  @mtu: MTU to setup the dma queue and buf with
3700  *  Description: Allocate and generate a dma_conf based on the provided MTU.
3701  *  Allocate the Tx/Rx DMA queue and init them.
3702  *  Return value:
3703  *  the dma_conf allocated struct on success and an appropriate ERR_PTR on failure.
3704  */
3705 static struct stmmac_dma_conf *
3706 stmmac_setup_dma_desc(struct stmmac_priv *priv, unsigned int mtu)
3707 {
3708         struct stmmac_dma_conf *dma_conf;
3709         int chan, bfsize, ret;
3710
3711         dma_conf = kzalloc(sizeof(*dma_conf), GFP_KERNEL);
3712         if (!dma_conf) {
3713                 netdev_err(priv->dev, "%s: DMA conf allocation failed\n",
3714                            __func__);
3715                 return ERR_PTR(-ENOMEM);
3716         }
3717
3718         bfsize = stmmac_set_16kib_bfsize(priv, mtu);
3719         if (bfsize < 0)
3720                 bfsize = 0;
3721
3722         if (bfsize < BUF_SIZE_16KiB)
3723                 bfsize = stmmac_set_bfsize(mtu, 0);
3724
3725         dma_conf->dma_buf_sz = bfsize;
3726         /* Chose the tx/rx size from the already defined one in the
3727          * priv struct. (if defined)
3728          */
3729         dma_conf->dma_tx_size = priv->dma_conf.dma_tx_size;
3730         dma_conf->dma_rx_size = priv->dma_conf.dma_rx_size;
3731
3732         if (!dma_conf->dma_tx_size)
3733                 dma_conf->dma_tx_size = DMA_DEFAULT_TX_SIZE;
3734         if (!dma_conf->dma_rx_size)
3735                 dma_conf->dma_rx_size = DMA_DEFAULT_RX_SIZE;
3736
3737         /* Earlier check for TBS */
3738         for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) {
3739                 struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[chan];
3740                 int tbs_en = priv->plat->tx_queues_cfg[chan].tbs_en;
3741
3742                 /* Setup per-TXQ tbs flag before TX descriptor alloc */
3743                 tx_q->tbs |= tbs_en ? STMMAC_TBS_AVAIL : 0;
3744         }
3745
3746         ret = alloc_dma_desc_resources(priv, dma_conf);
3747         if (ret < 0) {
3748                 netdev_err(priv->dev, "%s: DMA descriptors allocation failed\n",
3749                            __func__);
3750                 goto alloc_error;
3751         }
3752
3753         ret = init_dma_desc_rings(priv->dev, dma_conf, GFP_KERNEL);
3754         if (ret < 0) {
3755                 netdev_err(priv->dev, "%s: DMA descriptors initialization failed\n",
3756                            __func__);
3757                 goto init_error;
3758         }
3759
3760         return dma_conf;
3761
3762 init_error:
3763         free_dma_desc_resources(priv, dma_conf);
3764 alloc_error:
3765         kfree(dma_conf);
3766         return ERR_PTR(ret);
3767 }
3768
3769 /**
3770  *  __stmmac_open - open entry point of the driver
3771  *  @dev : pointer to the device structure.
3772  *  @dma_conf :  structure to take the dma data
3773  *  Description:
3774  *  This function is the open entry point of the driver.
3775  *  Return value:
3776  *  0 on success and an appropriate (-)ve integer as defined in errno.h
3777  *  file on failure.
3778  */
3779 static int __stmmac_open(struct net_device *dev,
3780                          struct stmmac_dma_conf *dma_conf)
3781 {
3782         struct stmmac_priv *priv = netdev_priv(dev);
3783         int mode = priv->plat->phy_interface;
3784         u32 chan;
3785         int ret;
3786
3787         ret = pm_runtime_resume_and_get(priv->device);
3788         if (ret < 0)
3789                 return ret;
3790
3791         if (priv->hw->pcs != STMMAC_PCS_TBI &&
3792             priv->hw->pcs != STMMAC_PCS_RTBI &&
3793             (!priv->hw->xpcs ||
3794              xpcs_get_an_mode(priv->hw->xpcs, mode) != DW_AN_C73)) {
3795                 ret = stmmac_init_phy(dev);
3796                 if (ret) {
3797                         netdev_err(priv->dev,
3798                                    "%s: Cannot attach to PHY (error: %d)\n",
3799                                    __func__, ret);
3800                         goto init_phy_error;
3801                 }
3802         }
3803
3804         /* Extra statistics */
3805         memset(&priv->xstats, 0, sizeof(struct stmmac_extra_stats));
3806         priv->xstats.threshold = tc;
3807
3808         priv->rx_copybreak = STMMAC_RX_COPYBREAK;
3809
3810         buf_sz = dma_conf->dma_buf_sz;
3811         memcpy(&priv->dma_conf, dma_conf, sizeof(*dma_conf));
3812
3813         stmmac_reset_queues_param(priv);
3814
3815         if (!priv->plat->serdes_up_after_phy_linkup && priv->plat->serdes_powerup) {
3816                 ret = priv->plat->serdes_powerup(dev, priv->plat->bsp_priv);
3817                 if (ret < 0) {
3818                         netdev_err(priv->dev, "%s: Serdes powerup failed\n",
3819                                    __func__);
3820                         goto init_error;
3821                 }
3822         }
3823
3824         ret = stmmac_hw_setup(dev, true);
3825         if (ret < 0) {
3826                 netdev_err(priv->dev, "%s: Hw setup failed\n", __func__);
3827                 goto init_error;
3828         }
3829
3830         stmmac_init_coalesce(priv);
3831
3832         phylink_start(priv->phylink);
3833         /* We may have called phylink_speed_down before */
3834         phylink_speed_up(priv->phylink);
3835
3836         ret = stmmac_request_irq(dev);
3837         if (ret)
3838                 goto irq_error;
3839
3840         stmmac_enable_all_queues(priv);
3841         netif_tx_start_all_queues(priv->dev);
3842         stmmac_enable_all_dma_irq(priv);
3843
3844         return 0;
3845
3846 irq_error:
3847         phylink_stop(priv->phylink);
3848
3849         for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
3850                 hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
3851
3852         stmmac_hw_teardown(dev);
3853 init_error:
3854         free_dma_desc_resources(priv, &priv->dma_conf);
3855         phylink_disconnect_phy(priv->phylink);
3856 init_phy_error:
3857         pm_runtime_put(priv->device);
3858         return ret;
3859 }
3860
3861 static int stmmac_open(struct net_device *dev)
3862 {
3863         struct stmmac_priv *priv = netdev_priv(dev);
3864         struct stmmac_dma_conf *dma_conf;
3865         int ret;
3866
3867         dma_conf = stmmac_setup_dma_desc(priv, dev->mtu);
3868         if (IS_ERR(dma_conf))
3869                 return PTR_ERR(dma_conf);
3870
3871         ret = __stmmac_open(dev, dma_conf);
3872         kfree(dma_conf);
3873         return ret;
3874 }
3875
3876 static void stmmac_fpe_stop_wq(struct stmmac_priv *priv)
3877 {
3878         set_bit(__FPE_REMOVING, &priv->fpe_task_state);
3879
3880         if (priv->fpe_wq)
3881                 destroy_workqueue(priv->fpe_wq);
3882
3883         netdev_info(priv->dev, "FPE workqueue stop");
3884 }
3885
3886 /**
3887  *  stmmac_release - close entry point of the driver
3888  *  @dev : device pointer.
3889  *  Description:
3890  *  This is the stop entry point of the driver.
3891  */
3892 static int stmmac_release(struct net_device *dev)
3893 {
3894         struct stmmac_priv *priv = netdev_priv(dev);
3895         u32 chan;
3896
3897         if (device_may_wakeup(priv->device))
3898                 phylink_speed_down(priv->phylink, false);
3899         /* Stop and disconnect the PHY */
3900         phylink_stop(priv->phylink);
3901         phylink_disconnect_phy(priv->phylink);
3902
3903         stmmac_disable_all_queues(priv);
3904
3905         for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
3906                 hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
3907
3908         netif_tx_disable(dev);
3909
3910         /* Free the IRQ lines */
3911         stmmac_free_irq(dev, REQ_IRQ_ERR_ALL, 0);
3912
3913         if (priv->eee_enabled) {
3914                 priv->tx_path_in_lpi_mode = false;
3915                 del_timer_sync(&priv->eee_ctrl_timer);
3916         }
3917
3918         /* Stop TX/RX DMA and clear the descriptors */
3919         stmmac_stop_all_dma(priv);
3920
3921         /* Release and free the Rx/Tx resources */
3922         free_dma_desc_resources(priv, &priv->dma_conf);
3923
3924         /* Disable the MAC Rx/Tx */
3925         stmmac_mac_set(priv, priv->ioaddr, false);
3926
3927         /* Powerdown Serdes if there is */
3928         if (priv->plat->serdes_powerdown)
3929                 priv->plat->serdes_powerdown(dev, priv->plat->bsp_priv);
3930
3931         netif_carrier_off(dev);
3932
3933         stmmac_release_ptp(priv);
3934
3935         pm_runtime_put(priv->device);
3936
3937         if (priv->dma_cap.fpesel)
3938                 stmmac_fpe_stop_wq(priv);
3939
3940         return 0;
3941 }
3942
3943 static bool stmmac_vlan_insert(struct stmmac_priv *priv, struct sk_buff *skb,
3944                                struct stmmac_tx_queue *tx_q)
3945 {
3946         u16 tag = 0x0, inner_tag = 0x0;
3947         u32 inner_type = 0x0;
3948         struct dma_desc *p;
3949
3950         if (!priv->dma_cap.vlins)
3951                 return false;
3952         if (!skb_vlan_tag_present(skb))
3953                 return false;
3954         if (skb->vlan_proto == htons(ETH_P_8021AD)) {
3955                 inner_tag = skb_vlan_tag_get(skb);
3956                 inner_type = STMMAC_VLAN_INSERT;
3957         }
3958
3959         tag = skb_vlan_tag_get(skb);
3960
3961         if (tx_q->tbs & STMMAC_TBS_AVAIL)
3962                 p = &tx_q->dma_entx[tx_q->cur_tx].basic;
3963         else
3964                 p = &tx_q->dma_tx[tx_q->cur_tx];
3965
3966         if (stmmac_set_desc_vlan_tag(priv, p, tag, inner_tag, inner_type))
3967                 return false;
3968
3969         stmmac_set_tx_owner(priv, p);
3970         tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size);
3971         return true;
3972 }
3973
3974 /**
3975  *  stmmac_tso_allocator - close entry point of the driver
3976  *  @priv: driver private structure
3977  *  @des: buffer start address
3978  *  @total_len: total length to fill in descriptors
3979  *  @last_segment: condition for the last descriptor
3980  *  @queue: TX queue index
3981  *  Description:
3982  *  This function fills descriptor and request new descriptors according to
3983  *  buffer length to fill
3984  */
3985 static void stmmac_tso_allocator(struct stmmac_priv *priv, dma_addr_t des,
3986                                  int total_len, bool last_segment, u32 queue)
3987 {
3988         struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
3989         struct dma_desc *desc;
3990         u32 buff_size;
3991         int tmp_len;
3992
3993         tmp_len = total_len;
3994
3995         while (tmp_len > 0) {
3996                 dma_addr_t curr_addr;
3997
3998                 tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx,
3999                                                 priv->dma_conf.dma_tx_size);
4000                 WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
4001
4002                 if (tx_q->tbs & STMMAC_TBS_AVAIL)
4003                         desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
4004                 else
4005                         desc = &tx_q->dma_tx[tx_q->cur_tx];
4006
4007                 curr_addr = des + (total_len - tmp_len);
4008                 if (priv->dma_cap.addr64 <= 32)
4009                         desc->des0 = cpu_to_le32(curr_addr);
4010                 else
4011                         stmmac_set_desc_addr(priv, desc, curr_addr);
4012
4013                 buff_size = tmp_len >= TSO_MAX_BUFF_SIZE ?
4014                             TSO_MAX_BUFF_SIZE : tmp_len;
4015
4016                 stmmac_prepare_tso_tx_desc(priv, desc, 0, buff_size,
4017                                 0, 1,
4018                                 (last_segment) && (tmp_len <= TSO_MAX_BUFF_SIZE),
4019                                 0, 0);
4020
4021                 tmp_len -= TSO_MAX_BUFF_SIZE;
4022         }
4023 }
4024
4025 static void stmmac_flush_tx_descriptors(struct stmmac_priv *priv, int queue)
4026 {
4027         struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
4028         int desc_size;
4029
4030         if (likely(priv->extend_desc))
4031                 desc_size = sizeof(struct dma_extended_desc);
4032         else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4033                 desc_size = sizeof(struct dma_edesc);
4034         else
4035                 desc_size = sizeof(struct dma_desc);
4036
4037         /* The own bit must be the latest setting done when prepare the
4038          * descriptor and then barrier is needed to make sure that
4039          * all is coherent before granting the DMA engine.
4040          */
4041         wmb();
4042
4043         tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * desc_size);
4044         stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue);
4045 }
4046
4047 /**
4048  *  stmmac_tso_xmit - Tx entry point of the driver for oversized frames (TSO)
4049  *  @skb : the socket buffer
4050  *  @dev : device pointer
4051  *  Description: this is the transmit function that is called on TSO frames
4052  *  (support available on GMAC4 and newer chips).
4053  *  Diagram below show the ring programming in case of TSO frames:
4054  *
4055  *  First Descriptor
4056  *   --------
4057  *   | DES0 |---> buffer1 = L2/L3/L4 header
4058  *   | DES1 |---> TCP Payload (can continue on next descr...)
4059  *   | DES2 |---> buffer 1 and 2 len
4060  *   | DES3 |---> must set TSE, TCP hdr len-> [22:19]. TCP payload len [17:0]
4061  *   --------
4062  *      |
4063  *     ...
4064  *      |
4065  *   --------
4066  *   | DES0 | --| Split TCP Payload on Buffers 1 and 2
4067  *   | DES1 | --|
4068  *   | DES2 | --> buffer 1 and 2 len
4069  *   | DES3 |
4070  *   --------
4071  *
4072  * mss is fixed when enable tso, so w/o programming the TDES3 ctx field.
4073  */
4074 static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
4075 {
4076         struct dma_desc *desc, *first, *mss_desc = NULL;
4077         struct stmmac_priv *priv = netdev_priv(dev);
4078         int nfrags = skb_shinfo(skb)->nr_frags;
4079         u32 queue = skb_get_queue_mapping(skb);
4080         unsigned int first_entry, tx_packets;
4081         int tmp_pay_len = 0, first_tx;
4082         struct stmmac_tx_queue *tx_q;
4083         bool has_vlan, set_ic;
4084         u8 proto_hdr_len, hdr;
4085         u32 pay_len, mss;
4086         dma_addr_t des;
4087         int i;
4088
4089         tx_q = &priv->dma_conf.tx_queue[queue];
4090         first_tx = tx_q->cur_tx;
4091
4092         /* Compute header lengths */
4093         if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) {
4094                 proto_hdr_len = skb_transport_offset(skb) + sizeof(struct udphdr);
4095                 hdr = sizeof(struct udphdr);
4096         } else {
4097                 proto_hdr_len = skb_tcp_all_headers(skb);
4098                 hdr = tcp_hdrlen(skb);
4099         }
4100
4101         /* Desc availability based on threshold should be enough safe */
4102         if (unlikely(stmmac_tx_avail(priv, queue) <
4103                 (((skb->len - proto_hdr_len) / TSO_MAX_BUFF_SIZE + 1)))) {
4104                 if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
4105                         netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
4106                                                                 queue));
4107                         /* This is a hard error, log it. */
4108                         netdev_err(priv->dev,
4109                                    "%s: Tx Ring full when queue awake\n",
4110                                    __func__);
4111                 }
4112                 return NETDEV_TX_BUSY;
4113         }
4114
4115         pay_len = skb_headlen(skb) - proto_hdr_len; /* no frags */
4116
4117         mss = skb_shinfo(skb)->gso_size;
4118
4119         /* set new MSS value if needed */
4120         if (mss != tx_q->mss) {
4121                 if (tx_q->tbs & STMMAC_TBS_AVAIL)
4122                         mss_desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
4123                 else
4124                         mss_desc = &tx_q->dma_tx[tx_q->cur_tx];
4125
4126                 stmmac_set_mss(priv, mss_desc, mss);
4127                 tx_q->mss = mss;
4128                 tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx,
4129                                                 priv->dma_conf.dma_tx_size);
4130                 WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
4131         }
4132
4133         if (netif_msg_tx_queued(priv)) {
4134                 pr_info("%s: hdrlen %d, hdr_len %d, pay_len %d, mss %d\n",
4135                         __func__, hdr, proto_hdr_len, pay_len, mss);
4136                 pr_info("\tskb->len %d, skb->data_len %d\n", skb->len,
4137                         skb->data_len);
4138         }
4139
4140         /* Check if VLAN can be inserted by HW */
4141         has_vlan = stmmac_vlan_insert(priv, skb, tx_q);
4142
4143         first_entry = tx_q->cur_tx;
4144         WARN_ON(tx_q->tx_skbuff[first_entry]);
4145
4146         if (tx_q->tbs & STMMAC_TBS_AVAIL)
4147                 desc = &tx_q->dma_entx[first_entry].basic;
4148         else
4149                 desc = &tx_q->dma_tx[first_entry];
4150         first = desc;
4151
4152         if (has_vlan)
4153                 stmmac_set_desc_vlan(priv, first, STMMAC_VLAN_INSERT);
4154
4155         /* first descriptor: fill Headers on Buf1 */
4156         des = dma_map_single(priv->device, skb->data, skb_headlen(skb),
4157                              DMA_TO_DEVICE);
4158         if (dma_mapping_error(priv->device, des))
4159                 goto dma_map_err;
4160
4161         tx_q->tx_skbuff_dma[first_entry].buf = des;
4162         tx_q->tx_skbuff_dma[first_entry].len = skb_headlen(skb);
4163         tx_q->tx_skbuff_dma[first_entry].map_as_page = false;
4164         tx_q->tx_skbuff_dma[first_entry].buf_type = STMMAC_TXBUF_T_SKB;
4165
4166         if (priv->dma_cap.addr64 <= 32) {
4167                 first->des0 = cpu_to_le32(des);
4168
4169                 /* Fill start of payload in buff2 of first descriptor */
4170                 if (pay_len)
4171                         first->des1 = cpu_to_le32(des + proto_hdr_len);
4172
4173                 /* If needed take extra descriptors to fill the remaining payload */
4174                 tmp_pay_len = pay_len - TSO_MAX_BUFF_SIZE;
4175         } else {
4176                 stmmac_set_desc_addr(priv, first, des);
4177                 tmp_pay_len = pay_len;
4178                 des += proto_hdr_len;
4179                 pay_len = 0;
4180         }
4181
4182         stmmac_tso_allocator(priv, des, tmp_pay_len, (nfrags == 0), queue);
4183
4184         /* Prepare fragments */
4185         for (i = 0; i < nfrags; i++) {
4186                 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4187
4188                 des = skb_frag_dma_map(priv->device, frag, 0,
4189                                        skb_frag_size(frag),
4190                                        DMA_TO_DEVICE);
4191                 if (dma_mapping_error(priv->device, des))
4192                         goto dma_map_err;
4193
4194                 stmmac_tso_allocator(priv, des, skb_frag_size(frag),
4195                                      (i == nfrags - 1), queue);
4196
4197                 tx_q->tx_skbuff_dma[tx_q->cur_tx].buf = des;
4198                 tx_q->tx_skbuff_dma[tx_q->cur_tx].len = skb_frag_size(frag);
4199                 tx_q->tx_skbuff_dma[tx_q->cur_tx].map_as_page = true;
4200                 tx_q->tx_skbuff_dma[tx_q->cur_tx].buf_type = STMMAC_TXBUF_T_SKB;
4201         }
4202
4203         tx_q->tx_skbuff_dma[tx_q->cur_tx].last_segment = true;
4204
4205         /* Only the last descriptor gets to point to the skb. */
4206         tx_q->tx_skbuff[tx_q->cur_tx] = skb;
4207         tx_q->tx_skbuff_dma[tx_q->cur_tx].buf_type = STMMAC_TXBUF_T_SKB;
4208
4209         /* Manage tx mitigation */
4210         tx_packets = (tx_q->cur_tx + 1) - first_tx;
4211         tx_q->tx_count_frames += tx_packets;
4212
4213         if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && priv->hwts_tx_en)
4214                 set_ic = true;
4215         else if (!priv->tx_coal_frames[queue])
4216                 set_ic = false;
4217         else if (tx_packets > priv->tx_coal_frames[queue])
4218                 set_ic = true;
4219         else if ((tx_q->tx_count_frames %
4220                   priv->tx_coal_frames[queue]) < tx_packets)
4221                 set_ic = true;
4222         else
4223                 set_ic = false;
4224
4225         if (set_ic) {
4226                 if (tx_q->tbs & STMMAC_TBS_AVAIL)
4227                         desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
4228                 else
4229                         desc = &tx_q->dma_tx[tx_q->cur_tx];
4230
4231                 tx_q->tx_count_frames = 0;
4232                 stmmac_set_tx_ic(priv, desc);
4233                 priv->xstats.tx_set_ic_bit++;
4234         }
4235
4236         /* We've used all descriptors we need for this skb, however,
4237          * advance cur_tx so that it references a fresh descriptor.
4238          * ndo_start_xmit will fill this descriptor the next time it's
4239          * called and stmmac_tx_clean may clean up to this descriptor.
4240          */
4241         tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size);
4242
4243         if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
4244                 netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
4245                           __func__);
4246                 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
4247         }
4248
4249         dev->stats.tx_bytes += skb->len;
4250         priv->xstats.tx_tso_frames++;
4251         priv->xstats.tx_tso_nfrags += nfrags;
4252
4253         if (priv->sarc_type)
4254                 stmmac_set_desc_sarc(priv, first, priv->sarc_type);
4255
4256         skb_tx_timestamp(skb);
4257
4258         if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
4259                      priv->hwts_tx_en)) {
4260                 /* declare that device is doing timestamping */
4261                 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
4262                 stmmac_enable_tx_timestamp(priv, first);
4263         }
4264
4265         /* Complete the first descriptor before granting the DMA */
4266         stmmac_prepare_tso_tx_desc(priv, first, 1,
4267                         proto_hdr_len,
4268                         pay_len,
4269                         1, tx_q->tx_skbuff_dma[first_entry].last_segment,
4270                         hdr / 4, (skb->len - proto_hdr_len));
4271
4272         /* If context desc is used to change MSS */
4273         if (mss_desc) {
4274                 /* Make sure that first descriptor has been completely
4275                  * written, including its own bit. This is because MSS is
4276                  * actually before first descriptor, so we need to make
4277                  * sure that MSS's own bit is the last thing written.
4278                  */
4279                 dma_wmb();
4280                 stmmac_set_tx_owner(priv, mss_desc);
4281         }
4282
4283         if (netif_msg_pktdata(priv)) {
4284                 pr_info("%s: curr=%d dirty=%d f=%d, e=%d, f_p=%p, nfrags %d\n",
4285                         __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
4286                         tx_q->cur_tx, first, nfrags);
4287                 pr_info(">>> frame to be transmitted: ");
4288                 print_pkt(skb->data, skb_headlen(skb));
4289         }
4290
4291         netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
4292
4293         stmmac_flush_tx_descriptors(priv, queue);
4294         stmmac_tx_timer_arm(priv, queue);
4295
4296         return NETDEV_TX_OK;
4297
4298 dma_map_err:
4299         dev_err(priv->device, "Tx dma map failed\n");
4300         dev_kfree_skb(skb);
4301         priv->dev->stats.tx_dropped++;
4302         return NETDEV_TX_OK;
4303 }
4304
4305 /**
4306  *  stmmac_xmit - Tx entry point of the driver
4307  *  @skb : the socket buffer
4308  *  @dev : device pointer
4309  *  Description : this is the tx entry point of the driver.
4310  *  It programs the chain or the ring and supports oversized frames
4311  *  and SG feature.
4312  */
4313 static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
4314 {
4315         unsigned int first_entry, tx_packets, enh_desc;
4316         struct stmmac_priv *priv = netdev_priv(dev);
4317         unsigned int nopaged_len = skb_headlen(skb);
4318         int i, csum_insertion = 0, is_jumbo = 0;
4319         u32 queue = skb_get_queue_mapping(skb);
4320         int nfrags = skb_shinfo(skb)->nr_frags;
4321         int gso = skb_shinfo(skb)->gso_type;
4322         struct dma_edesc *tbs_desc = NULL;
4323         struct dma_desc *desc, *first;
4324         struct stmmac_tx_queue *tx_q;
4325         bool has_vlan, set_ic;
4326         int entry, first_tx;
4327         dma_addr_t des;
4328
4329         tx_q = &priv->dma_conf.tx_queue[queue];
4330         first_tx = tx_q->cur_tx;
4331
4332         if (priv->tx_path_in_lpi_mode && priv->eee_sw_timer_en)
4333                 stmmac_disable_eee_mode(priv);
4334
4335         /* Manage oversized TCP frames for GMAC4 device */
4336         if (skb_is_gso(skb) && priv->tso) {
4337                 if (gso & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))
4338                         return stmmac_tso_xmit(skb, dev);
4339                 if (priv->plat->has_gmac4 && (gso & SKB_GSO_UDP_L4))
4340                         return stmmac_tso_xmit(skb, dev);
4341         }
4342
4343         if (unlikely(stmmac_tx_avail(priv, queue) < nfrags + 1)) {
4344                 if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
4345                         netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
4346                                                                 queue));
4347                         /* This is a hard error, log it. */
4348                         netdev_err(priv->dev,
4349                                    "%s: Tx Ring full when queue awake\n",
4350                                    __func__);
4351                 }
4352                 return NETDEV_TX_BUSY;
4353         }
4354
4355         /* Check if VLAN can be inserted by HW */
4356         has_vlan = stmmac_vlan_insert(priv, skb, tx_q);
4357
4358         entry = tx_q->cur_tx;
4359         first_entry = entry;
4360         WARN_ON(tx_q->tx_skbuff[first_entry]);
4361
4362         csum_insertion = (skb->ip_summed == CHECKSUM_PARTIAL);
4363
4364         if (likely(priv->extend_desc))
4365                 desc = (struct dma_desc *)(tx_q->dma_etx + entry);
4366         else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4367                 desc = &tx_q->dma_entx[entry].basic;
4368         else
4369                 desc = tx_q->dma_tx + entry;
4370
4371         first = desc;
4372
4373         if (has_vlan)
4374                 stmmac_set_desc_vlan(priv, first, STMMAC_VLAN_INSERT);
4375
4376         enh_desc = priv->plat->enh_desc;
4377         /* To program the descriptors according to the size of the frame */
4378         if (enh_desc)
4379                 is_jumbo = stmmac_is_jumbo_frm(priv, skb->len, enh_desc);
4380
4381         if (unlikely(is_jumbo)) {
4382                 entry = stmmac_jumbo_frm(priv, tx_q, skb, csum_insertion);
4383                 if (unlikely(entry < 0) && (entry != -EINVAL))
4384                         goto dma_map_err;
4385         }
4386
4387         for (i = 0; i < nfrags; i++) {
4388                 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4389                 int len = skb_frag_size(frag);
4390                 bool last_segment = (i == (nfrags - 1));
4391
4392                 entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
4393                 WARN_ON(tx_q->tx_skbuff[entry]);
4394
4395                 if (likely(priv->extend_desc))
4396                         desc = (struct dma_desc *)(tx_q->dma_etx + entry);
4397                 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4398                         desc = &tx_q->dma_entx[entry].basic;
4399                 else
4400                         desc = tx_q->dma_tx + entry;
4401
4402                 des = skb_frag_dma_map(priv->device, frag, 0, len,
4403                                        DMA_TO_DEVICE);
4404                 if (dma_mapping_error(priv->device, des))
4405                         goto dma_map_err; /* should reuse desc w/o issues */
4406
4407                 tx_q->tx_skbuff_dma[entry].buf = des;
4408
4409                 stmmac_set_desc_addr(priv, desc, des);
4410
4411                 tx_q->tx_skbuff_dma[entry].map_as_page = true;
4412                 tx_q->tx_skbuff_dma[entry].len = len;
4413                 tx_q->tx_skbuff_dma[entry].last_segment = last_segment;
4414                 tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_SKB;
4415
4416                 /* Prepare the descriptor and set the own bit too */
4417                 stmmac_prepare_tx_desc(priv, desc, 0, len, csum_insertion,
4418                                 priv->mode, 1, last_segment, skb->len);
4419         }
4420
4421         /* Only the last descriptor gets to point to the skb. */
4422         tx_q->tx_skbuff[entry] = skb;
4423         tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_SKB;
4424
4425         /* According to the coalesce parameter the IC bit for the latest
4426          * segment is reset and the timer re-started to clean the tx status.
4427          * This approach takes care about the fragments: desc is the first
4428          * element in case of no SG.
4429          */
4430         tx_packets = (entry + 1) - first_tx;
4431         tx_q->tx_count_frames += tx_packets;
4432
4433         if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && priv->hwts_tx_en)
4434                 set_ic = true;
4435         else if (!priv->tx_coal_frames[queue])
4436                 set_ic = false;
4437         else if (tx_packets > priv->tx_coal_frames[queue])
4438                 set_ic = true;
4439         else if ((tx_q->tx_count_frames %
4440                   priv->tx_coal_frames[queue]) < tx_packets)
4441                 set_ic = true;
4442         else
4443                 set_ic = false;
4444
4445         if (set_ic) {
4446                 if (likely(priv->extend_desc))
4447                         desc = &tx_q->dma_etx[entry].basic;
4448                 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4449                         desc = &tx_q->dma_entx[entry].basic;
4450                 else
4451                         desc = &tx_q->dma_tx[entry];
4452
4453                 tx_q->tx_count_frames = 0;
4454                 stmmac_set_tx_ic(priv, desc);
4455                 priv->xstats.tx_set_ic_bit++;
4456         }
4457
4458         /* We've used all descriptors we need for this skb, however,
4459          * advance cur_tx so that it references a fresh descriptor.
4460          * ndo_start_xmit will fill this descriptor the next time it's
4461          * called and stmmac_tx_clean may clean up to this descriptor.
4462          */
4463         entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
4464         tx_q->cur_tx = entry;
4465
4466         if (netif_msg_pktdata(priv)) {
4467                 netdev_dbg(priv->dev,
4468                            "%s: curr=%d dirty=%d f=%d, e=%d, first=%p, nfrags=%d",
4469                            __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
4470                            entry, first, nfrags);
4471
4472                 netdev_dbg(priv->dev, ">>> frame to be transmitted: ");
4473                 print_pkt(skb->data, skb->len);
4474         }
4475
4476         if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
4477                 netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
4478                           __func__);
4479                 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
4480         }
4481
4482         dev->stats.tx_bytes += skb->len;
4483
4484         if (priv->sarc_type)
4485                 stmmac_set_desc_sarc(priv, first, priv->sarc_type);
4486
4487         skb_tx_timestamp(skb);
4488
4489         /* Ready to fill the first descriptor and set the OWN bit w/o any
4490          * problems because all the descriptors are actually ready to be
4491          * passed to the DMA engine.
4492          */
4493         if (likely(!is_jumbo)) {
4494                 bool last_segment = (nfrags == 0);
4495
4496                 des = dma_map_single(priv->device, skb->data,
4497                                      nopaged_len, DMA_TO_DEVICE);
4498                 if (dma_mapping_error(priv->device, des))
4499                         goto dma_map_err;
4500
4501                 tx_q->tx_skbuff_dma[first_entry].buf = des;
4502                 tx_q->tx_skbuff_dma[first_entry].buf_type = STMMAC_TXBUF_T_SKB;
4503                 tx_q->tx_skbuff_dma[first_entry].map_as_page = false;
4504
4505                 stmmac_set_desc_addr(priv, first, des);
4506
4507                 tx_q->tx_skbuff_dma[first_entry].len = nopaged_len;
4508                 tx_q->tx_skbuff_dma[first_entry].last_segment = last_segment;
4509
4510                 if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
4511                              priv->hwts_tx_en)) {
4512                         /* declare that device is doing timestamping */
4513                         skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
4514                         stmmac_enable_tx_timestamp(priv, first);
4515                 }
4516
4517                 /* Prepare the first descriptor setting the OWN bit too */
4518                 stmmac_prepare_tx_desc(priv, first, 1, nopaged_len,
4519                                 csum_insertion, priv->mode, 0, last_segment,
4520                                 skb->len);
4521         }
4522
4523         if (tx_q->tbs & STMMAC_TBS_EN) {
4524                 struct timespec64 ts = ns_to_timespec64(skb->tstamp);
4525
4526                 tbs_desc = &tx_q->dma_entx[first_entry];
4527                 stmmac_set_desc_tbs(priv, tbs_desc, ts.tv_sec, ts.tv_nsec);
4528         }
4529
4530         stmmac_set_tx_owner(priv, first);
4531
4532         netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
4533
4534         stmmac_enable_dma_transmission(priv, priv->ioaddr);
4535
4536         stmmac_flush_tx_descriptors(priv, queue);
4537         stmmac_tx_timer_arm(priv, queue);
4538
4539         return NETDEV_TX_OK;
4540
4541 dma_map_err:
4542         netdev_err(priv->dev, "Tx DMA map failed\n");
4543         dev_kfree_skb(skb);
4544         priv->dev->stats.tx_dropped++;
4545         return NETDEV_TX_OK;
4546 }
4547
4548 static void stmmac_rx_vlan(struct net_device *dev, struct sk_buff *skb)
4549 {
4550         struct vlan_ethhdr *veth;
4551         __be16 vlan_proto;
4552         u16 vlanid;
4553
4554         veth = (struct vlan_ethhdr *)skb->data;
4555         vlan_proto = veth->h_vlan_proto;
4556
4557         if ((vlan_proto == htons(ETH_P_8021Q) &&
4558              dev->features & NETIF_F_HW_VLAN_CTAG_RX) ||
4559             (vlan_proto == htons(ETH_P_8021AD) &&
4560              dev->features & NETIF_F_HW_VLAN_STAG_RX)) {
4561                 /* pop the vlan tag */
4562                 vlanid = ntohs(veth->h_vlan_TCI);
4563                 memmove(skb->data + VLAN_HLEN, veth, ETH_ALEN * 2);
4564                 skb_pull(skb, VLAN_HLEN);
4565                 __vlan_hwaccel_put_tag(skb, vlan_proto, vlanid);
4566         }
4567 }
4568
4569 /**
4570  * stmmac_rx_refill - refill used skb preallocated buffers
4571  * @priv: driver private structure
4572  * @queue: RX queue index
4573  * Description : this is to reallocate the skb for the reception process
4574  * that is based on zero-copy.
4575  */
4576 static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue)
4577 {
4578         struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
4579         int dirty = stmmac_rx_dirty(priv, queue);
4580         unsigned int entry = rx_q->dirty_rx;
4581         gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN);
4582
4583         if (priv->dma_cap.addr64 <= 32)
4584                 gfp |= GFP_DMA32;
4585
4586         while (dirty-- > 0) {
4587                 struct stmmac_rx_buffer *buf = &rx_q->buf_pool[entry];
4588                 struct dma_desc *p;
4589                 bool use_rx_wd;
4590
4591                 if (priv->extend_desc)
4592                         p = (struct dma_desc *)(rx_q->dma_erx + entry);
4593                 else
4594                         p = rx_q->dma_rx + entry;
4595
4596                 if (!buf->page) {
4597                         buf->page = page_pool_alloc_pages(rx_q->page_pool, gfp);
4598                         if (!buf->page)
4599                                 break;
4600                 }
4601
4602                 if (priv->sph && !buf->sec_page) {
4603                         buf->sec_page = page_pool_alloc_pages(rx_q->page_pool, gfp);
4604                         if (!buf->sec_page)
4605                                 break;
4606
4607                         buf->sec_addr = page_pool_get_dma_addr(buf->sec_page);
4608                 }
4609
4610                 buf->addr = page_pool_get_dma_addr(buf->page) + buf->page_offset;
4611
4612                 stmmac_set_desc_addr(priv, p, buf->addr);
4613                 if (priv->sph)
4614                         stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, true);
4615                 else
4616                         stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, false);
4617                 stmmac_refill_desc3(priv, rx_q, p);
4618
4619                 rx_q->rx_count_frames++;
4620                 rx_q->rx_count_frames += priv->rx_coal_frames[queue];
4621                 if (rx_q->rx_count_frames > priv->rx_coal_frames[queue])
4622                         rx_q->rx_count_frames = 0;
4623
4624                 use_rx_wd = !priv->rx_coal_frames[queue];
4625                 use_rx_wd |= rx_q->rx_count_frames > 0;
4626                 if (!priv->use_riwt)
4627                         use_rx_wd = false;
4628
4629                 dma_wmb();
4630                 stmmac_set_rx_owner(priv, p, use_rx_wd);
4631
4632                 entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_rx_size);
4633         }
4634         rx_q->dirty_rx = entry;
4635         rx_q->rx_tail_addr = rx_q->dma_rx_phy +
4636                             (rx_q->dirty_rx * sizeof(struct dma_desc));
4637         stmmac_set_rx_tail_ptr(priv, priv->ioaddr, rx_q->rx_tail_addr, queue);
4638 }
4639
4640 static unsigned int stmmac_rx_buf1_len(struct stmmac_priv *priv,
4641                                        struct dma_desc *p,
4642                                        int status, unsigned int len)
4643 {
4644         unsigned int plen = 0, hlen = 0;
4645         int coe = priv->hw->rx_csum;
4646
4647         /* Not first descriptor, buffer is always zero */
4648         if (priv->sph && len)
4649                 return 0;
4650
4651         /* First descriptor, get split header length */
4652         stmmac_get_rx_header_len(priv, p, &hlen);
4653         if (priv->sph && hlen) {
4654                 priv->xstats.rx_split_hdr_pkt_n++;
4655                 return hlen;
4656         }
4657
4658         /* First descriptor, not last descriptor and not split header */
4659         if (status & rx_not_ls)
4660                 return priv->dma_conf.dma_buf_sz;
4661
4662         plen = stmmac_get_rx_frame_len(priv, p, coe);
4663
4664         /* First descriptor and last descriptor and not split header */
4665         return min_t(unsigned int, priv->dma_conf.dma_buf_sz, plen);
4666 }
4667
4668 static unsigned int stmmac_rx_buf2_len(struct stmmac_priv *priv,
4669                                        struct dma_desc *p,
4670                                        int status, unsigned int len)
4671 {
4672         int coe = priv->hw->rx_csum;
4673         unsigned int plen = 0;
4674
4675         /* Not split header, buffer is not available */
4676         if (!priv->sph)
4677                 return 0;
4678
4679         /* Not last descriptor */
4680         if (status & rx_not_ls)
4681                 return priv->dma_conf.dma_buf_sz;
4682
4683         plen = stmmac_get_rx_frame_len(priv, p, coe);
4684
4685         /* Last descriptor */
4686         return plen - len;
4687 }
4688
4689 static int stmmac_xdp_xmit_xdpf(struct stmmac_priv *priv, int queue,
4690                                 struct xdp_frame *xdpf, bool dma_map)
4691 {
4692         struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
4693         unsigned int entry = tx_q->cur_tx;
4694         struct dma_desc *tx_desc;
4695         dma_addr_t dma_addr;
4696         bool set_ic;
4697
4698         if (stmmac_tx_avail(priv, queue) < STMMAC_TX_THRESH(priv))
4699                 return STMMAC_XDP_CONSUMED;
4700
4701         if (likely(priv->extend_desc))
4702                 tx_desc = (struct dma_desc *)(tx_q->dma_etx + entry);
4703         else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4704                 tx_desc = &tx_q->dma_entx[entry].basic;
4705         else
4706                 tx_desc = tx_q->dma_tx + entry;
4707
4708         if (dma_map) {
4709                 dma_addr = dma_map_single(priv->device, xdpf->data,
4710                                           xdpf->len, DMA_TO_DEVICE);
4711                 if (dma_mapping_error(priv->device, dma_addr))
4712                         return STMMAC_XDP_CONSUMED;
4713
4714                 tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XDP_NDO;
4715         } else {
4716                 struct page *page = virt_to_page(xdpf->data);
4717
4718                 dma_addr = page_pool_get_dma_addr(page) + sizeof(*xdpf) +
4719                            xdpf->headroom;
4720                 dma_sync_single_for_device(priv->device, dma_addr,
4721                                            xdpf->len, DMA_BIDIRECTIONAL);
4722
4723                 tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XDP_TX;
4724         }
4725
4726         tx_q->tx_skbuff_dma[entry].buf = dma_addr;
4727         tx_q->tx_skbuff_dma[entry].map_as_page = false;
4728         tx_q->tx_skbuff_dma[entry].len = xdpf->len;
4729         tx_q->tx_skbuff_dma[entry].last_segment = true;
4730         tx_q->tx_skbuff_dma[entry].is_jumbo = false;
4731
4732         tx_q->xdpf[entry] = xdpf;
4733
4734         stmmac_set_desc_addr(priv, tx_desc, dma_addr);
4735
4736         stmmac_prepare_tx_desc(priv, tx_desc, 1, xdpf->len,
4737                                true, priv->mode, true, true,
4738                                xdpf->len);
4739
4740         tx_q->tx_count_frames++;
4741
4742         if (tx_q->tx_count_frames % priv->tx_coal_frames[queue] == 0)
4743                 set_ic = true;
4744         else
4745                 set_ic = false;
4746
4747         if (set_ic) {
4748                 tx_q->tx_count_frames = 0;
4749                 stmmac_set_tx_ic(priv, tx_desc);
4750                 priv->xstats.tx_set_ic_bit++;
4751         }
4752
4753         stmmac_enable_dma_transmission(priv, priv->ioaddr);
4754
4755         entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
4756         tx_q->cur_tx = entry;
4757
4758         return STMMAC_XDP_TX;
4759 }
4760
4761 static int stmmac_xdp_get_tx_queue(struct stmmac_priv *priv,
4762                                    int cpu)
4763 {
4764         int index = cpu;
4765
4766         if (unlikely(index < 0))
4767                 index = 0;
4768
4769         while (index >= priv->plat->tx_queues_to_use)
4770                 index -= priv->plat->tx_queues_to_use;
4771
4772         return index;
4773 }
4774
4775 static int stmmac_xdp_xmit_back(struct stmmac_priv *priv,
4776                                 struct xdp_buff *xdp)
4777 {
4778         struct xdp_frame *xdpf = xdp_convert_buff_to_frame(xdp);
4779         int cpu = smp_processor_id();
4780         struct netdev_queue *nq;
4781         int queue;
4782         int res;
4783
4784         if (unlikely(!xdpf))
4785                 return STMMAC_XDP_CONSUMED;
4786
4787         queue = stmmac_xdp_get_tx_queue(priv, cpu);
4788         nq = netdev_get_tx_queue(priv->dev, queue);
4789
4790         __netif_tx_lock(nq, cpu);
4791         /* Avoids TX time-out as we are sharing with slow path */
4792         txq_trans_cond_update(nq);
4793
4794         res = stmmac_xdp_xmit_xdpf(priv, queue, xdpf, false);
4795         if (res == STMMAC_XDP_TX)
4796                 stmmac_flush_tx_descriptors(priv, queue);
4797
4798         __netif_tx_unlock(nq);
4799
4800         return res;
4801 }
4802
4803 static int __stmmac_xdp_run_prog(struct stmmac_priv *priv,
4804                                  struct bpf_prog *prog,
4805                                  struct xdp_buff *xdp)
4806 {
4807         u32 act;
4808         int res;
4809
4810         act = bpf_prog_run_xdp(prog, xdp);
4811         switch (act) {
4812         case XDP_PASS:
4813                 res = STMMAC_XDP_PASS;
4814                 break;
4815         case XDP_TX:
4816                 res = stmmac_xdp_xmit_back(priv, xdp);
4817                 break;
4818         case XDP_REDIRECT:
4819                 if (xdp_do_redirect(priv->dev, xdp, prog) < 0)
4820                         res = STMMAC_XDP_CONSUMED;
4821                 else
4822                         res = STMMAC_XDP_REDIRECT;
4823                 break;
4824         default:
4825                 bpf_warn_invalid_xdp_action(priv->dev, prog, act);
4826                 fallthrough;
4827         case XDP_ABORTED:
4828                 trace_xdp_exception(priv->dev, prog, act);
4829                 fallthrough;
4830         case XDP_DROP:
4831                 res = STMMAC_XDP_CONSUMED;
4832                 break;
4833         }
4834
4835         return res;
4836 }
4837
4838 static struct sk_buff *stmmac_xdp_run_prog(struct stmmac_priv *priv,
4839                                            struct xdp_buff *xdp)
4840 {
4841         struct bpf_prog *prog;
4842         int res;
4843
4844         prog = READ_ONCE(priv->xdp_prog);
4845         if (!prog) {
4846                 res = STMMAC_XDP_PASS;
4847                 goto out;
4848         }
4849
4850         res = __stmmac_xdp_run_prog(priv, prog, xdp);
4851 out:
4852         return ERR_PTR(-res);
4853 }
4854
4855 static void stmmac_finalize_xdp_rx(struct stmmac_priv *priv,
4856                                    int xdp_status)
4857 {
4858         int cpu = smp_processor_id();
4859         int queue;
4860
4861         queue = stmmac_xdp_get_tx_queue(priv, cpu);
4862
4863         if (xdp_status & STMMAC_XDP_TX)
4864                 stmmac_tx_timer_arm(priv, queue);
4865
4866         if (xdp_status & STMMAC_XDP_REDIRECT)
4867                 xdp_do_flush();
4868 }
4869
4870 static struct sk_buff *stmmac_construct_skb_zc(struct stmmac_channel *ch,
4871                                                struct xdp_buff *xdp)
4872 {
4873         unsigned int metasize = xdp->data - xdp->data_meta;
4874         unsigned int datasize = xdp->data_end - xdp->data;
4875         struct sk_buff *skb;
4876
4877         skb = __napi_alloc_skb(&ch->rxtx_napi,
4878                                xdp->data_end - xdp->data_hard_start,
4879                                GFP_ATOMIC | __GFP_NOWARN);
4880         if (unlikely(!skb))
4881                 return NULL;
4882
4883         skb_reserve(skb, xdp->data - xdp->data_hard_start);
4884         memcpy(__skb_put(skb, datasize), xdp->data, datasize);
4885         if (metasize)
4886                 skb_metadata_set(skb, metasize);
4887
4888         return skb;
4889 }
4890
4891 static void stmmac_dispatch_skb_zc(struct stmmac_priv *priv, u32 queue,
4892                                    struct dma_desc *p, struct dma_desc *np,
4893                                    struct xdp_buff *xdp)
4894 {
4895         struct stmmac_channel *ch = &priv->channel[queue];
4896         unsigned int len = xdp->data_end - xdp->data;
4897         enum pkt_hash_types hash_type;
4898         int coe = priv->hw->rx_csum;
4899         struct sk_buff *skb;
4900         u32 hash;
4901
4902         skb = stmmac_construct_skb_zc(ch, xdp);
4903         if (!skb) {
4904                 priv->dev->stats.rx_dropped++;
4905                 return;
4906         }
4907
4908         stmmac_get_rx_hwtstamp(priv, p, np, skb);
4909         stmmac_rx_vlan(priv->dev, skb);
4910         skb->protocol = eth_type_trans(skb, priv->dev);
4911
4912         if (unlikely(!coe))
4913                 skb_checksum_none_assert(skb);
4914         else
4915                 skb->ip_summed = CHECKSUM_UNNECESSARY;
4916
4917         if (!stmmac_get_rx_hash(priv, p, &hash, &hash_type))
4918                 skb_set_hash(skb, hash, hash_type);
4919
4920         skb_record_rx_queue(skb, queue);
4921         napi_gro_receive(&ch->rxtx_napi, skb);
4922
4923         priv->dev->stats.rx_packets++;
4924         priv->dev->stats.rx_bytes += len;
4925 }
4926
4927 static bool stmmac_rx_refill_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
4928 {
4929         struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
4930         unsigned int entry = rx_q->dirty_rx;
4931         struct dma_desc *rx_desc = NULL;
4932         bool ret = true;
4933
4934         budget = min(budget, stmmac_rx_dirty(priv, queue));
4935
4936         while (budget-- > 0 && entry != rx_q->cur_rx) {
4937                 struct stmmac_rx_buffer *buf = &rx_q->buf_pool[entry];
4938                 dma_addr_t dma_addr;
4939                 bool use_rx_wd;
4940
4941                 if (!buf->xdp) {
4942                         buf->xdp = xsk_buff_alloc(rx_q->xsk_pool);
4943                         if (!buf->xdp) {
4944                                 ret = false;
4945                                 break;
4946                         }
4947                 }
4948
4949                 if (priv->extend_desc)
4950                         rx_desc = (struct dma_desc *)(rx_q->dma_erx + entry);
4951                 else
4952                         rx_desc = rx_q->dma_rx + entry;
4953
4954                 dma_addr = xsk_buff_xdp_get_dma(buf->xdp);
4955                 stmmac_set_desc_addr(priv, rx_desc, dma_addr);
4956                 stmmac_set_desc_sec_addr(priv, rx_desc, 0, false);
4957                 stmmac_refill_desc3(priv, rx_q, rx_desc);
4958
4959                 rx_q->rx_count_frames++;
4960                 rx_q->rx_count_frames += priv->rx_coal_frames[queue];
4961                 if (rx_q->rx_count_frames > priv->rx_coal_frames[queue])
4962                         rx_q->rx_count_frames = 0;
4963
4964                 use_rx_wd = !priv->rx_coal_frames[queue];
4965                 use_rx_wd |= rx_q->rx_count_frames > 0;
4966                 if (!priv->use_riwt)
4967                         use_rx_wd = false;
4968
4969                 dma_wmb();
4970                 stmmac_set_rx_owner(priv, rx_desc, use_rx_wd);
4971
4972                 entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_rx_size);
4973         }
4974
4975         if (rx_desc) {
4976                 rx_q->dirty_rx = entry;
4977                 rx_q->rx_tail_addr = rx_q->dma_rx_phy +
4978                                      (rx_q->dirty_rx * sizeof(struct dma_desc));
4979                 stmmac_set_rx_tail_ptr(priv, priv->ioaddr, rx_q->rx_tail_addr, queue);
4980         }
4981
4982         return ret;
4983 }
4984
4985 static int stmmac_rx_zc(struct stmmac_priv *priv, int limit, u32 queue)
4986 {
4987         struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
4988         unsigned int count = 0, error = 0, len = 0;
4989         int dirty = stmmac_rx_dirty(priv, queue);
4990         unsigned int next_entry = rx_q->cur_rx;
4991         unsigned int desc_size;
4992         struct bpf_prog *prog;
4993         bool failure = false;
4994         int xdp_status = 0;
4995         int status = 0;
4996
4997         if (netif_msg_rx_status(priv)) {
4998                 void *rx_head;
4999
5000                 netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__);
5001                 if (priv->extend_desc) {
5002                         rx_head = (void *)rx_q->dma_erx;
5003                         desc_size = sizeof(struct dma_extended_desc);
5004                 } else {
5005                         rx_head = (void *)rx_q->dma_rx;
5006                         desc_size = sizeof(struct dma_desc);
5007                 }
5008
5009                 stmmac_display_ring(priv, rx_head, priv->dma_conf.dma_rx_size, true,
5010                                     rx_q->dma_rx_phy, desc_size);
5011         }
5012         while (count < limit) {
5013                 struct stmmac_rx_buffer *buf;
5014                 unsigned int buf1_len = 0;
5015                 struct dma_desc *np, *p;
5016                 int entry;
5017                 int res;
5018
5019                 if (!count && rx_q->state_saved) {
5020                         error = rx_q->state.error;
5021                         len = rx_q->state.len;
5022                 } else {
5023                         rx_q->state_saved = false;
5024                         error = 0;
5025                         len = 0;
5026                 }
5027
5028                 if (count >= limit)
5029                         break;
5030
5031 read_again:
5032                 buf1_len = 0;
5033                 entry = next_entry;
5034                 buf = &rx_q->buf_pool[entry];
5035
5036                 if (dirty >= STMMAC_RX_FILL_BATCH) {
5037                         failure = failure ||
5038                                   !stmmac_rx_refill_zc(priv, queue, dirty);
5039                         dirty = 0;
5040                 }
5041
5042                 if (priv->extend_desc)
5043                         p = (struct dma_desc *)(rx_q->dma_erx + entry);
5044                 else
5045                         p = rx_q->dma_rx + entry;
5046
5047                 /* read the status of the incoming frame */
5048                 status = stmmac_rx_status(priv, &priv->dev->stats,
5049                                           &priv->xstats, p);
5050                 /* check if managed by the DMA otherwise go ahead */
5051                 if (unlikely(status & dma_own))
5052                         break;
5053
5054                 /* Prefetch the next RX descriptor */
5055                 rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx,
5056                                                 priv->dma_conf.dma_rx_size);
5057                 next_entry = rx_q->cur_rx;
5058
5059                 if (priv->extend_desc)
5060                         np = (struct dma_desc *)(rx_q->dma_erx + next_entry);
5061                 else
5062                         np = rx_q->dma_rx + next_entry;
5063
5064                 prefetch(np);
5065
5066                 /* Ensure a valid XSK buffer before proceed */
5067                 if (!buf->xdp)
5068                         break;
5069
5070                 if (priv->extend_desc)
5071                         stmmac_rx_extended_status(priv, &priv->dev->stats,
5072                                                   &priv->xstats,
5073                                                   rx_q->dma_erx + entry);
5074                 if (unlikely(status == discard_frame)) {
5075                         xsk_buff_free(buf->xdp);
5076                         buf->xdp = NULL;
5077                         dirty++;
5078                         error = 1;
5079                         if (!priv->hwts_rx_en)
5080                                 priv->dev->stats.rx_errors++;
5081                 }
5082
5083                 if (unlikely(error && (status & rx_not_ls)))
5084                         goto read_again;
5085                 if (unlikely(error)) {
5086                         count++;
5087                         continue;
5088                 }
5089
5090                 /* XSK pool expects RX frame 1:1 mapped to XSK buffer */
5091                 if (likely(status & rx_not_ls)) {
5092                         xsk_buff_free(buf->xdp);
5093                         buf->xdp = NULL;
5094                         dirty++;
5095                         count++;
5096                         goto read_again;
5097                 }
5098
5099                 /* XDP ZC Frame only support primary buffers for now */
5100                 buf1_len = stmmac_rx_buf1_len(priv, p, status, len);
5101                 len += buf1_len;
5102
5103                 /* ACS is disabled; strip manually. */
5104                 if (likely(!(status & rx_not_ls))) {
5105                         buf1_len -= ETH_FCS_LEN;
5106                         len -= ETH_FCS_LEN;
5107                 }
5108
5109                 /* RX buffer is good and fit into a XSK pool buffer */
5110                 buf->xdp->data_end = buf->xdp->data + buf1_len;
5111                 xsk_buff_dma_sync_for_cpu(buf->xdp, rx_q->xsk_pool);
5112
5113                 prog = READ_ONCE(priv->xdp_prog);
5114                 res = __stmmac_xdp_run_prog(priv, prog, buf->xdp);
5115
5116                 switch (res) {
5117                 case STMMAC_XDP_PASS:
5118                         stmmac_dispatch_skb_zc(priv, queue, p, np, buf->xdp);
5119                         xsk_buff_free(buf->xdp);
5120                         break;
5121                 case STMMAC_XDP_CONSUMED:
5122                         xsk_buff_free(buf->xdp);
5123                         priv->dev->stats.rx_dropped++;
5124                         break;
5125                 case STMMAC_XDP_TX:
5126                 case STMMAC_XDP_REDIRECT:
5127                         xdp_status |= res;
5128                         break;
5129                 }
5130
5131                 buf->xdp = NULL;
5132                 dirty++;
5133                 count++;
5134         }
5135
5136         if (status & rx_not_ls) {
5137                 rx_q->state_saved = true;
5138                 rx_q->state.error = error;
5139                 rx_q->state.len = len;
5140         }
5141
5142         stmmac_finalize_xdp_rx(priv, xdp_status);
5143
5144         priv->xstats.rx_pkt_n += count;
5145         priv->xstats.rxq_stats[queue].rx_pkt_n += count;
5146
5147         if (xsk_uses_need_wakeup(rx_q->xsk_pool)) {
5148                 if (failure || stmmac_rx_dirty(priv, queue) > 0)
5149                         xsk_set_rx_need_wakeup(rx_q->xsk_pool);
5150                 else
5151                         xsk_clear_rx_need_wakeup(rx_q->xsk_pool);
5152
5153                 return (int)count;
5154         }
5155
5156         return failure ? limit : (int)count;
5157 }
5158
5159 /**
5160  * stmmac_rx - manage the receive process
5161  * @priv: driver private structure
5162  * @limit: napi bugget
5163  * @queue: RX queue index.
5164  * Description :  this the function called by the napi poll method.
5165  * It gets all the frames inside the ring.
5166  */
5167 static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
5168 {
5169         struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
5170         struct stmmac_channel *ch = &priv->channel[queue];
5171         unsigned int count = 0, error = 0, len = 0;
5172         int status = 0, coe = priv->hw->rx_csum;
5173         unsigned int next_entry = rx_q->cur_rx;
5174         enum dma_data_direction dma_dir;
5175         unsigned int desc_size;
5176         struct sk_buff *skb = NULL;
5177         struct xdp_buff xdp;
5178         int xdp_status = 0;
5179         int buf_sz;
5180
5181         dma_dir = page_pool_get_dma_dir(rx_q->page_pool);
5182         buf_sz = DIV_ROUND_UP(priv->dma_conf.dma_buf_sz, PAGE_SIZE) * PAGE_SIZE;
5183
5184         if (netif_msg_rx_status(priv)) {
5185                 void *rx_head;
5186
5187                 netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__);
5188                 if (priv->extend_desc) {
5189                         rx_head = (void *)rx_q->dma_erx;
5190                         desc_size = sizeof(struct dma_extended_desc);
5191                 } else {
5192                         rx_head = (void *)rx_q->dma_rx;
5193                         desc_size = sizeof(struct dma_desc);
5194                 }
5195
5196                 stmmac_display_ring(priv, rx_head, priv->dma_conf.dma_rx_size, true,
5197                                     rx_q->dma_rx_phy, desc_size);
5198         }
5199         while (count < limit) {
5200                 unsigned int buf1_len = 0, buf2_len = 0;
5201                 enum pkt_hash_types hash_type;
5202                 struct stmmac_rx_buffer *buf;
5203                 struct dma_desc *np, *p;
5204                 int entry;
5205                 u32 hash;
5206
5207                 if (!count && rx_q->state_saved) {
5208                         skb = rx_q->state.skb;
5209                         error = rx_q->state.error;
5210                         len = rx_q->state.len;
5211                 } else {
5212                         rx_q->state_saved = false;
5213                         skb = NULL;
5214                         error = 0;
5215                         len = 0;
5216                 }
5217
5218                 if (count >= limit)
5219                         break;
5220
5221 read_again:
5222                 buf1_len = 0;
5223                 buf2_len = 0;
5224                 entry = next_entry;
5225                 buf = &rx_q->buf_pool[entry];
5226
5227                 if (priv->extend_desc)
5228                         p = (struct dma_desc *)(rx_q->dma_erx + entry);
5229                 else
5230                         p = rx_q->dma_rx + entry;
5231
5232                 /* read the status of the incoming frame */
5233                 status = stmmac_rx_status(priv, &priv->dev->stats,
5234                                 &priv->xstats, p);
5235                 /* check if managed by the DMA otherwise go ahead */
5236                 if (unlikely(status & dma_own))
5237                         break;
5238
5239                 rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx,
5240                                                 priv->dma_conf.dma_rx_size);
5241                 next_entry = rx_q->cur_rx;
5242
5243                 if (priv->extend_desc)
5244                         np = (struct dma_desc *)(rx_q->dma_erx + next_entry);
5245                 else
5246                         np = rx_q->dma_rx + next_entry;
5247
5248                 prefetch(np);
5249
5250                 if (priv->extend_desc)
5251                         stmmac_rx_extended_status(priv, &priv->dev->stats,
5252                                         &priv->xstats, rx_q->dma_erx + entry);
5253                 if (unlikely(status == discard_frame)) {
5254                         page_pool_recycle_direct(rx_q->page_pool, buf->page);
5255                         buf->page = NULL;
5256                         error = 1;
5257                         if (!priv->hwts_rx_en)
5258                                 priv->dev->stats.rx_errors++;
5259                 }
5260
5261                 if (unlikely(error && (status & rx_not_ls)))
5262                         goto read_again;
5263                 if (unlikely(error)) {
5264                         dev_kfree_skb(skb);
5265                         skb = NULL;
5266                         count++;
5267                         continue;
5268                 }
5269
5270                 /* Buffer is good. Go on. */
5271
5272                 prefetch(page_address(buf->page) + buf->page_offset);
5273                 if (buf->sec_page)
5274                         prefetch(page_address(buf->sec_page));
5275
5276                 buf1_len = stmmac_rx_buf1_len(priv, p, status, len);
5277                 len += buf1_len;
5278                 buf2_len = stmmac_rx_buf2_len(priv, p, status, len);
5279                 len += buf2_len;
5280
5281                 /* ACS is disabled; strip manually. */
5282                 if (likely(!(status & rx_not_ls))) {
5283                         if (buf2_len) {
5284                                 buf2_len -= ETH_FCS_LEN;
5285                                 len -= ETH_FCS_LEN;
5286                         } else if (buf1_len) {
5287                                 buf1_len -= ETH_FCS_LEN;
5288                                 len -= ETH_FCS_LEN;
5289                         }
5290                 }
5291
5292                 if (!skb) {
5293                         unsigned int pre_len, sync_len;
5294
5295                         dma_sync_single_for_cpu(priv->device, buf->addr,
5296                                                 buf1_len, dma_dir);
5297
5298                         xdp_init_buff(&xdp, buf_sz, &rx_q->xdp_rxq);
5299                         xdp_prepare_buff(&xdp, page_address(buf->page),
5300                                          buf->page_offset, buf1_len, false);
5301
5302                         pre_len = xdp.data_end - xdp.data_hard_start -
5303                                   buf->page_offset;
5304                         skb = stmmac_xdp_run_prog(priv, &xdp);
5305                         /* Due xdp_adjust_tail: DMA sync for_device
5306                          * cover max len CPU touch
5307                          */
5308                         sync_len = xdp.data_end - xdp.data_hard_start -
5309                                    buf->page_offset;
5310                         sync_len = max(sync_len, pre_len);
5311
5312                         /* For Not XDP_PASS verdict */
5313                         if (IS_ERR(skb)) {
5314                                 unsigned int xdp_res = -PTR_ERR(skb);
5315
5316                                 if (xdp_res & STMMAC_XDP_CONSUMED) {
5317                                         page_pool_put_page(rx_q->page_pool,
5318                                                            virt_to_head_page(xdp.data),
5319                                                            sync_len, true);
5320                                         buf->page = NULL;
5321                                         priv->dev->stats.rx_dropped++;
5322
5323                                         /* Clear skb as it was set as
5324                                          * status by XDP program.
5325                                          */
5326                                         skb = NULL;
5327
5328                                         if (unlikely((status & rx_not_ls)))
5329                                                 goto read_again;
5330
5331                                         count++;
5332                                         continue;
5333                                 } else if (xdp_res & (STMMAC_XDP_TX |
5334                                                       STMMAC_XDP_REDIRECT)) {
5335                                         xdp_status |= xdp_res;
5336                                         buf->page = NULL;
5337                                         skb = NULL;
5338                                         count++;
5339                                         continue;
5340                                 }
5341                         }
5342                 }
5343
5344                 if (!skb) {
5345                         /* XDP program may expand or reduce tail */
5346                         buf1_len = xdp.data_end - xdp.data;
5347
5348                         skb = napi_alloc_skb(&ch->rx_napi, buf1_len);
5349                         if (!skb) {
5350                                 priv->dev->stats.rx_dropped++;
5351                                 count++;
5352                                 goto drain_data;
5353                         }
5354
5355                         /* XDP program may adjust header */
5356                         skb_copy_to_linear_data(skb, xdp.data, buf1_len);
5357                         skb_put(skb, buf1_len);
5358
5359                         /* Data payload copied into SKB, page ready for recycle */
5360                         page_pool_recycle_direct(rx_q->page_pool, buf->page);
5361                         buf->page = NULL;
5362                 } else if (buf1_len) {
5363                         dma_sync_single_for_cpu(priv->device, buf->addr,
5364                                                 buf1_len, dma_dir);
5365                         skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
5366                                         buf->page, buf->page_offset, buf1_len,
5367                                         priv->dma_conf.dma_buf_sz);
5368
5369                         /* Data payload appended into SKB */
5370                         page_pool_release_page(rx_q->page_pool, buf->page);
5371                         buf->page = NULL;
5372                 }
5373
5374                 if (buf2_len) {
5375                         dma_sync_single_for_cpu(priv->device, buf->sec_addr,
5376                                                 buf2_len, dma_dir);
5377                         skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
5378                                         buf->sec_page, 0, buf2_len,
5379                                         priv->dma_conf.dma_buf_sz);
5380
5381                         /* Data payload appended into SKB */
5382                         page_pool_release_page(rx_q->page_pool, buf->sec_page);
5383                         buf->sec_page = NULL;
5384                 }
5385
5386 drain_data:
5387                 if (likely(status & rx_not_ls))
5388                         goto read_again;
5389                 if (!skb)
5390                         continue;
5391
5392                 /* Got entire packet into SKB. Finish it. */
5393
5394                 stmmac_get_rx_hwtstamp(priv, p, np, skb);
5395                 stmmac_rx_vlan(priv->dev, skb);
5396                 skb->protocol = eth_type_trans(skb, priv->dev);
5397
5398                 if (unlikely(!coe))
5399                         skb_checksum_none_assert(skb);
5400                 else
5401                         skb->ip_summed = CHECKSUM_UNNECESSARY;
5402
5403                 if (!stmmac_get_rx_hash(priv, p, &hash, &hash_type))
5404                         skb_set_hash(skb, hash, hash_type);
5405
5406                 skb_record_rx_queue(skb, queue);
5407                 napi_gro_receive(&ch->rx_napi, skb);
5408                 skb = NULL;
5409
5410                 priv->dev->stats.rx_packets++;
5411                 priv->dev->stats.rx_bytes += len;
5412                 count++;
5413         }
5414
5415         if (status & rx_not_ls || skb) {
5416                 rx_q->state_saved = true;
5417                 rx_q->state.skb = skb;
5418                 rx_q->state.error = error;
5419                 rx_q->state.len = len;
5420         }
5421
5422         stmmac_finalize_xdp_rx(priv, xdp_status);
5423
5424         stmmac_rx_refill(priv, queue);
5425
5426         priv->xstats.rx_pkt_n += count;
5427         priv->xstats.rxq_stats[queue].rx_pkt_n += count;
5428
5429         return count;
5430 }
5431
5432 static int stmmac_napi_poll_rx(struct napi_struct *napi, int budget)
5433 {
5434         struct stmmac_channel *ch =
5435                 container_of(napi, struct stmmac_channel, rx_napi);
5436         struct stmmac_priv *priv = ch->priv_data;
5437         u32 chan = ch->index;
5438         int work_done;
5439
5440         priv->xstats.napi_poll++;
5441
5442         work_done = stmmac_rx(priv, budget, chan);
5443         if (work_done < budget && napi_complete_done(napi, work_done)) {
5444                 unsigned long flags;
5445
5446                 spin_lock_irqsave(&ch->lock, flags);
5447                 stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 0);
5448                 spin_unlock_irqrestore(&ch->lock, flags);
5449         }
5450
5451         return work_done;
5452 }
5453
5454 static int stmmac_napi_poll_tx(struct napi_struct *napi, int budget)
5455 {
5456         struct stmmac_channel *ch =
5457                 container_of(napi, struct stmmac_channel, tx_napi);
5458         struct stmmac_priv *priv = ch->priv_data;
5459         u32 chan = ch->index;
5460         int work_done;
5461
5462         priv->xstats.napi_poll++;
5463
5464         work_done = stmmac_tx_clean(priv, budget, chan);
5465         work_done = min(work_done, budget);
5466
5467         if (work_done < budget && napi_complete_done(napi, work_done)) {
5468                 unsigned long flags;
5469
5470                 spin_lock_irqsave(&ch->lock, flags);
5471                 stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 0, 1);
5472                 spin_unlock_irqrestore(&ch->lock, flags);
5473         }
5474
5475         return work_done;
5476 }
5477
5478 static int stmmac_napi_poll_rxtx(struct napi_struct *napi, int budget)
5479 {
5480         struct stmmac_channel *ch =
5481                 container_of(napi, struct stmmac_channel, rxtx_napi);
5482         struct stmmac_priv *priv = ch->priv_data;
5483         int rx_done, tx_done, rxtx_done;
5484         u32 chan = ch->index;
5485
5486         priv->xstats.napi_poll++;
5487
5488         tx_done = stmmac_tx_clean(priv, budget, chan);
5489         tx_done = min(tx_done, budget);
5490
5491         rx_done = stmmac_rx_zc(priv, budget, chan);
5492
5493         rxtx_done = max(tx_done, rx_done);
5494
5495         /* If either TX or RX work is not complete, return budget
5496          * and keep pooling
5497          */
5498         if (rxtx_done >= budget)
5499                 return budget;
5500
5501         /* all work done, exit the polling mode */
5502         if (napi_complete_done(napi, rxtx_done)) {
5503                 unsigned long flags;
5504
5505                 spin_lock_irqsave(&ch->lock, flags);
5506                 /* Both RX and TX work done are compelte,
5507                  * so enable both RX & TX IRQs.
5508                  */
5509                 stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
5510                 spin_unlock_irqrestore(&ch->lock, flags);
5511         }
5512
5513         return min(rxtx_done, budget - 1);
5514 }
5515
5516 /**
5517  *  stmmac_tx_timeout
5518  *  @dev : Pointer to net device structure
5519  *  @txqueue: the index of the hanging transmit queue
5520  *  Description: this function is called when a packet transmission fails to
5521  *   complete within a reasonable time. The driver will mark the error in the
5522  *   netdev structure and arrange for the device to be reset to a sane state
5523  *   in order to transmit a new packet.
5524  */
5525 static void stmmac_tx_timeout(struct net_device *dev, unsigned int txqueue)
5526 {
5527         struct stmmac_priv *priv = netdev_priv(dev);
5528
5529         stmmac_global_err(priv);
5530 }
5531
5532 /**
5533  *  stmmac_set_rx_mode - entry point for multicast addressing
5534  *  @dev : pointer to the device structure
5535  *  Description:
5536  *  This function is a driver entry point which gets called by the kernel
5537  *  whenever multicast addresses must be enabled/disabled.
5538  *  Return value:
5539  *  void.
5540  */
5541 static void stmmac_set_rx_mode(struct net_device *dev)
5542 {
5543         struct stmmac_priv *priv = netdev_priv(dev);
5544
5545         stmmac_set_filter(priv, priv->hw, dev);
5546 }
5547
5548 /**
5549  *  stmmac_change_mtu - entry point to change MTU size for the device.
5550  *  @dev : device pointer.
5551  *  @new_mtu : the new MTU size for the device.
5552  *  Description: the Maximum Transfer Unit (MTU) is used by the network layer
5553  *  to drive packet transmission. Ethernet has an MTU of 1500 octets
5554  *  (ETH_DATA_LEN). This value can be changed with ifconfig.
5555  *  Return value:
5556  *  0 on success and an appropriate (-)ve integer as defined in errno.h
5557  *  file on failure.
5558  */
5559 static int stmmac_change_mtu(struct net_device *dev, int new_mtu)
5560 {
5561         struct stmmac_priv *priv = netdev_priv(dev);
5562         int txfifosz = priv->plat->tx_fifo_size;
5563         struct stmmac_dma_conf *dma_conf;
5564         const int mtu = new_mtu;
5565         int ret;
5566
5567         if (txfifosz == 0)
5568                 txfifosz = priv->dma_cap.tx_fifo_size;
5569
5570         txfifosz /= priv->plat->tx_queues_to_use;
5571
5572         if (stmmac_xdp_is_enabled(priv) && new_mtu > ETH_DATA_LEN) {
5573                 netdev_dbg(priv->dev, "Jumbo frames not supported for XDP\n");
5574                 return -EINVAL;
5575         }
5576
5577         new_mtu = STMMAC_ALIGN(new_mtu);
5578
5579         /* If condition true, FIFO is too small or MTU too large */
5580         if ((txfifosz < new_mtu) || (new_mtu > BUF_SIZE_16KiB))
5581                 return -EINVAL;
5582
5583         if (netif_running(dev)) {
5584                 netdev_dbg(priv->dev, "restarting interface to change its MTU\n");
5585                 /* Try to allocate the new DMA conf with the new mtu */
5586                 dma_conf = stmmac_setup_dma_desc(priv, mtu);
5587                 if (IS_ERR(dma_conf)) {
5588                         netdev_err(priv->dev, "failed allocating new dma conf for new MTU %d\n",
5589                                    mtu);
5590                         return PTR_ERR(dma_conf);
5591                 }
5592
5593                 stmmac_release(dev);
5594
5595                 ret = __stmmac_open(dev, dma_conf);
5596                 kfree(dma_conf);
5597                 if (ret) {
5598                         netdev_err(priv->dev, "failed reopening the interface after MTU change\n");
5599                         return ret;
5600                 }
5601
5602                 stmmac_set_rx_mode(dev);
5603         }
5604
5605         dev->mtu = mtu;
5606         netdev_update_features(dev);
5607
5608         return 0;
5609 }
5610
5611 static netdev_features_t stmmac_fix_features(struct net_device *dev,
5612                                              netdev_features_t features)
5613 {
5614         struct stmmac_priv *priv = netdev_priv(dev);
5615
5616         if (priv->plat->rx_coe == STMMAC_RX_COE_NONE)
5617                 features &= ~NETIF_F_RXCSUM;
5618
5619         if (!priv->plat->tx_coe)
5620                 features &= ~NETIF_F_CSUM_MASK;
5621
5622         /* Some GMAC devices have a bugged Jumbo frame support that
5623          * needs to have the Tx COE disabled for oversized frames
5624          * (due to limited buffer sizes). In this case we disable
5625          * the TX csum insertion in the TDES and not use SF.
5626          */
5627         if (priv->plat->bugged_jumbo && (dev->mtu > ETH_DATA_LEN))
5628                 features &= ~NETIF_F_CSUM_MASK;
5629
5630         /* Disable tso if asked by ethtool */
5631         if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) {
5632                 if (features & NETIF_F_TSO)
5633                         priv->tso = true;
5634                 else
5635                         priv->tso = false;
5636         }
5637
5638         return features;
5639 }
5640
5641 static int stmmac_set_features(struct net_device *netdev,
5642                                netdev_features_t features)
5643 {
5644         struct stmmac_priv *priv = netdev_priv(netdev);
5645
5646         /* Keep the COE Type in case of csum is supporting */
5647         if (features & NETIF_F_RXCSUM)
5648                 priv->hw->rx_csum = priv->plat->rx_coe;
5649         else
5650                 priv->hw->rx_csum = 0;
5651         /* No check needed because rx_coe has been set before and it will be
5652          * fixed in case of issue.
5653          */
5654         stmmac_rx_ipc(priv, priv->hw);
5655
5656         if (priv->sph_cap) {
5657                 bool sph_en = (priv->hw->rx_csum > 0) && priv->sph;
5658                 u32 chan;
5659
5660                 for (chan = 0; chan < priv->plat->rx_queues_to_use; chan++)
5661                         stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
5662         }
5663
5664         return 0;
5665 }
5666
5667 static void stmmac_fpe_event_status(struct stmmac_priv *priv, int status)
5668 {
5669         struct stmmac_fpe_cfg *fpe_cfg = priv->plat->fpe_cfg;
5670         enum stmmac_fpe_state *lo_state = &fpe_cfg->lo_fpe_state;
5671         enum stmmac_fpe_state *lp_state = &fpe_cfg->lp_fpe_state;
5672         bool *hs_enable = &fpe_cfg->hs_enable;
5673
5674         if (status == FPE_EVENT_UNKNOWN || !*hs_enable)
5675                 return;
5676
5677         /* If LP has sent verify mPacket, LP is FPE capable */
5678         if ((status & FPE_EVENT_RVER) == FPE_EVENT_RVER) {
5679                 if (*lp_state < FPE_STATE_CAPABLE)
5680                         *lp_state = FPE_STATE_CAPABLE;
5681
5682                 /* If user has requested FPE enable, quickly response */
5683                 if (*hs_enable)
5684                         stmmac_fpe_send_mpacket(priv, priv->ioaddr,
5685                                                 MPACKET_RESPONSE);
5686         }
5687
5688         /* If Local has sent verify mPacket, Local is FPE capable */
5689         if ((status & FPE_EVENT_TVER) == FPE_EVENT_TVER) {
5690                 if (*lo_state < FPE_STATE_CAPABLE)
5691                         *lo_state = FPE_STATE_CAPABLE;
5692         }
5693
5694         /* If LP has sent response mPacket, LP is entering FPE ON */
5695         if ((status & FPE_EVENT_RRSP) == FPE_EVENT_RRSP)
5696                 *lp_state = FPE_STATE_ENTERING_ON;
5697
5698         /* If Local has sent response mPacket, Local is entering FPE ON */
5699         if ((status & FPE_EVENT_TRSP) == FPE_EVENT_TRSP)
5700                 *lo_state = FPE_STATE_ENTERING_ON;
5701
5702         if (!test_bit(__FPE_REMOVING, &priv->fpe_task_state) &&
5703             !test_and_set_bit(__FPE_TASK_SCHED, &priv->fpe_task_state) &&
5704             priv->fpe_wq) {
5705                 queue_work(priv->fpe_wq, &priv->fpe_task);
5706         }
5707 }
5708
5709 static void stmmac_common_interrupt(struct stmmac_priv *priv)
5710 {
5711         u32 rx_cnt = priv->plat->rx_queues_to_use;
5712         u32 tx_cnt = priv->plat->tx_queues_to_use;
5713         u32 queues_count;
5714         u32 queue;
5715         bool xmac;
5716
5717         xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
5718         queues_count = (rx_cnt > tx_cnt) ? rx_cnt : tx_cnt;
5719
5720         if (priv->irq_wake)
5721                 pm_wakeup_event(priv->device, 0);
5722
5723         if (priv->dma_cap.estsel)
5724                 stmmac_est_irq_status(priv, priv->ioaddr, priv->dev,
5725                                       &priv->xstats, tx_cnt);
5726
5727         if (priv->dma_cap.fpesel) {
5728                 int status = stmmac_fpe_irq_status(priv, priv->ioaddr,
5729                                                    priv->dev);
5730
5731                 stmmac_fpe_event_status(priv, status);
5732         }
5733
5734         /* To handle GMAC own interrupts */
5735         if ((priv->plat->has_gmac) || xmac) {
5736                 int status = stmmac_host_irq_status(priv, priv->hw, &priv->xstats);
5737
5738                 if (unlikely(status)) {
5739                         /* For LPI we need to save the tx status */
5740                         if (status & CORE_IRQ_TX_PATH_IN_LPI_MODE)
5741                                 priv->tx_path_in_lpi_mode = true;
5742                         if (status & CORE_IRQ_TX_PATH_EXIT_LPI_MODE)
5743                                 priv->tx_path_in_lpi_mode = false;
5744                 }
5745
5746                 for (queue = 0; queue < queues_count; queue++) {
5747                         status = stmmac_host_mtl_irq_status(priv, priv->hw,
5748                                                             queue);
5749                 }
5750
5751                 /* PCS link status */
5752                 if (priv->hw->pcs) {
5753                         if (priv->xstats.pcs_link)
5754                                 netif_carrier_on(priv->dev);
5755                         else
5756                                 netif_carrier_off(priv->dev);
5757                 }
5758
5759                 stmmac_timestamp_interrupt(priv, priv);
5760         }
5761 }
5762
5763 /**
5764  *  stmmac_interrupt - main ISR
5765  *  @irq: interrupt number.
5766  *  @dev_id: to pass the net device pointer.
5767  *  Description: this is the main driver interrupt service routine.
5768  *  It can call:
5769  *  o DMA service routine (to manage incoming frame reception and transmission
5770  *    status)
5771  *  o Core interrupts to manage: remote wake-up, management counter, LPI
5772  *    interrupts.
5773  */
5774 static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
5775 {
5776         struct net_device *dev = (struct net_device *)dev_id;
5777         struct stmmac_priv *priv = netdev_priv(dev);
5778
5779         /* Check if adapter is up */
5780         if (test_bit(STMMAC_DOWN, &priv->state))
5781                 return IRQ_HANDLED;
5782
5783         /* Check if a fatal error happened */
5784         if (stmmac_safety_feat_interrupt(priv))
5785                 return IRQ_HANDLED;
5786
5787         /* To handle Common interrupts */
5788         stmmac_common_interrupt(priv);
5789
5790         /* To handle DMA interrupts */
5791         stmmac_dma_interrupt(priv);
5792
5793         return IRQ_HANDLED;
5794 }
5795
5796 static irqreturn_t stmmac_mac_interrupt(int irq, void *dev_id)
5797 {
5798         struct net_device *dev = (struct net_device *)dev_id;
5799         struct stmmac_priv *priv = netdev_priv(dev);
5800
5801         if (unlikely(!dev)) {
5802                 netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
5803                 return IRQ_NONE;
5804         }
5805
5806         /* Check if adapter is up */
5807         if (test_bit(STMMAC_DOWN, &priv->state))
5808                 return IRQ_HANDLED;
5809
5810         /* To handle Common interrupts */
5811         stmmac_common_interrupt(priv);
5812
5813         return IRQ_HANDLED;
5814 }
5815
5816 static irqreturn_t stmmac_safety_interrupt(int irq, void *dev_id)
5817 {
5818         struct net_device *dev = (struct net_device *)dev_id;
5819         struct stmmac_priv *priv = netdev_priv(dev);
5820
5821         if (unlikely(!dev)) {
5822                 netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
5823                 return IRQ_NONE;
5824         }
5825
5826         /* Check if adapter is up */
5827         if (test_bit(STMMAC_DOWN, &priv->state))
5828                 return IRQ_HANDLED;
5829
5830         /* Check if a fatal error happened */
5831         stmmac_safety_feat_interrupt(priv);
5832
5833         return IRQ_HANDLED;
5834 }
5835
5836 static irqreturn_t stmmac_msi_intr_tx(int irq, void *data)
5837 {
5838         struct stmmac_tx_queue *tx_q = (struct stmmac_tx_queue *)data;
5839         struct stmmac_dma_conf *dma_conf;
5840         int chan = tx_q->queue_index;
5841         struct stmmac_priv *priv;
5842         int status;
5843
5844         dma_conf = container_of(tx_q, struct stmmac_dma_conf, tx_queue[chan]);
5845         priv = container_of(dma_conf, struct stmmac_priv, dma_conf);
5846
5847         if (unlikely(!data)) {
5848                 netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
5849                 return IRQ_NONE;
5850         }
5851
5852         /* Check if adapter is up */
5853         if (test_bit(STMMAC_DOWN, &priv->state))
5854                 return IRQ_HANDLED;
5855
5856         status = stmmac_napi_check(priv, chan, DMA_DIR_TX);
5857
5858         if (unlikely(status & tx_hard_error_bump_tc)) {
5859                 /* Try to bump up the dma threshold on this failure */
5860                 stmmac_bump_dma_threshold(priv, chan);
5861         } else if (unlikely(status == tx_hard_error)) {
5862                 stmmac_tx_err(priv, chan);
5863         }
5864
5865         return IRQ_HANDLED;
5866 }
5867
5868 static irqreturn_t stmmac_msi_intr_rx(int irq, void *data)
5869 {
5870         struct stmmac_rx_queue *rx_q = (struct stmmac_rx_queue *)data;
5871         struct stmmac_dma_conf *dma_conf;
5872         int chan = rx_q->queue_index;
5873         struct stmmac_priv *priv;
5874
5875         dma_conf = container_of(rx_q, struct stmmac_dma_conf, rx_queue[chan]);
5876         priv = container_of(dma_conf, struct stmmac_priv, dma_conf);
5877
5878         if (unlikely(!data)) {
5879                 netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
5880                 return IRQ_NONE;
5881         }
5882
5883         /* Check if adapter is up */
5884         if (test_bit(STMMAC_DOWN, &priv->state))
5885                 return IRQ_HANDLED;
5886
5887         stmmac_napi_check(priv, chan, DMA_DIR_RX);
5888
5889         return IRQ_HANDLED;
5890 }
5891
5892 #ifdef CONFIG_NET_POLL_CONTROLLER
5893 /* Polling receive - used by NETCONSOLE and other diagnostic tools
5894  * to allow network I/O with interrupts disabled.
5895  */
5896 static void stmmac_poll_controller(struct net_device *dev)
5897 {
5898         struct stmmac_priv *priv = netdev_priv(dev);
5899         int i;
5900
5901         /* If adapter is down, do nothing */
5902         if (test_bit(STMMAC_DOWN, &priv->state))
5903                 return;
5904
5905         if (priv->plat->multi_msi_en) {
5906                 for (i = 0; i < priv->plat->rx_queues_to_use; i++)
5907                         stmmac_msi_intr_rx(0, &priv->dma_conf.rx_queue[i]);
5908
5909                 for (i = 0; i < priv->plat->tx_queues_to_use; i++)
5910                         stmmac_msi_intr_tx(0, &priv->dma_conf.tx_queue[i]);
5911         } else {
5912                 disable_irq(dev->irq);
5913                 stmmac_interrupt(dev->irq, dev);
5914                 enable_irq(dev->irq);
5915         }
5916 }
5917 #endif
5918
5919 /**
5920  *  stmmac_ioctl - Entry point for the Ioctl
5921  *  @dev: Device pointer.
5922  *  @rq: An IOCTL specefic structure, that can contain a pointer to
5923  *  a proprietary structure used to pass information to the driver.
5924  *  @cmd: IOCTL command
5925  *  Description:
5926  *  Currently it supports the phy_mii_ioctl(...) and HW time stamping.
5927  */
5928 static int stmmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
5929 {
5930         struct stmmac_priv *priv = netdev_priv (dev);
5931         int ret = -EOPNOTSUPP;
5932
5933         if (!netif_running(dev))
5934                 return -EINVAL;
5935
5936         switch (cmd) {
5937         case SIOCGMIIPHY:
5938         case SIOCGMIIREG:
5939         case SIOCSMIIREG:
5940                 ret = phylink_mii_ioctl(priv->phylink, rq, cmd);
5941                 break;
5942         case SIOCSHWTSTAMP:
5943                 ret = stmmac_hwtstamp_set(dev, rq);
5944                 break;
5945         case SIOCGHWTSTAMP:
5946                 ret = stmmac_hwtstamp_get(dev, rq);
5947                 break;
5948         default:
5949                 break;
5950         }
5951
5952         return ret;
5953 }
5954
5955 static int stmmac_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
5956                                     void *cb_priv)
5957 {
5958         struct stmmac_priv *priv = cb_priv;
5959         int ret = -EOPNOTSUPP;
5960
5961         if (!tc_cls_can_offload_and_chain0(priv->dev, type_data))
5962                 return ret;
5963
5964         __stmmac_disable_all_queues(priv);
5965
5966         switch (type) {
5967         case TC_SETUP_CLSU32:
5968                 ret = stmmac_tc_setup_cls_u32(priv, priv, type_data);
5969                 break;
5970         case TC_SETUP_CLSFLOWER:
5971                 ret = stmmac_tc_setup_cls(priv, priv, type_data);
5972                 break;
5973         default:
5974                 break;
5975         }
5976
5977         stmmac_enable_all_queues(priv);
5978         return ret;
5979 }
5980
5981 static LIST_HEAD(stmmac_block_cb_list);
5982
5983 static int stmmac_setup_tc(struct net_device *ndev, enum tc_setup_type type,
5984                            void *type_data)
5985 {
5986         struct stmmac_priv *priv = netdev_priv(ndev);
5987
5988         switch (type) {
5989         case TC_SETUP_BLOCK:
5990                 return flow_block_cb_setup_simple(type_data,
5991                                                   &stmmac_block_cb_list,
5992                                                   stmmac_setup_tc_block_cb,
5993                                                   priv, priv, true);
5994         case TC_SETUP_QDISC_CBS:
5995                 return stmmac_tc_setup_cbs(priv, priv, type_data);
5996         case TC_SETUP_QDISC_TAPRIO:
5997                 return stmmac_tc_setup_taprio(priv, priv, type_data);
5998         case TC_SETUP_QDISC_ETF:
5999                 return stmmac_tc_setup_etf(priv, priv, type_data);
6000         default:
6001                 return -EOPNOTSUPP;
6002         }
6003 }
6004
6005 static u16 stmmac_select_queue(struct net_device *dev, struct sk_buff *skb,
6006                                struct net_device *sb_dev)
6007 {
6008         int gso = skb_shinfo(skb)->gso_type;
6009
6010         if (gso & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6 | SKB_GSO_UDP_L4)) {
6011                 /*
6012                  * There is no way to determine the number of TSO/USO
6013                  * capable Queues. Let's use always the Queue 0
6014                  * because if TSO/USO is supported then at least this
6015                  * one will be capable.
6016                  */
6017                 return 0;
6018         }
6019
6020         return netdev_pick_tx(dev, skb, NULL) % dev->real_num_tx_queues;
6021 }
6022
6023 static int stmmac_set_mac_address(struct net_device *ndev, void *addr)
6024 {
6025         struct stmmac_priv *priv = netdev_priv(ndev);
6026         int ret = 0;
6027
6028         ret = pm_runtime_resume_and_get(priv->device);
6029         if (ret < 0)
6030                 return ret;
6031
6032         ret = eth_mac_addr(ndev, addr);
6033         if (ret)
6034                 goto set_mac_error;
6035
6036         stmmac_set_umac_addr(priv, priv->hw, ndev->dev_addr, 0);
6037
6038 set_mac_error:
6039         pm_runtime_put(priv->device);
6040
6041         return ret;
6042 }
6043
6044 #ifdef CONFIG_DEBUG_FS
6045 static struct dentry *stmmac_fs_dir;
6046
6047 static void sysfs_display_ring(void *head, int size, int extend_desc,
6048                                struct seq_file *seq, dma_addr_t dma_phy_addr)
6049 {
6050         int i;
6051         struct dma_extended_desc *ep = (struct dma_extended_desc *)head;
6052         struct dma_desc *p = (struct dma_desc *)head;
6053         dma_addr_t dma_addr;
6054
6055         for (i = 0; i < size; i++) {
6056                 if (extend_desc) {
6057                         dma_addr = dma_phy_addr + i * sizeof(*ep);
6058                         seq_printf(seq, "%d [%pad]: 0x%x 0x%x 0x%x 0x%x\n",
6059                                    i, &dma_addr,
6060                                    le32_to_cpu(ep->basic.des0),
6061                                    le32_to_cpu(ep->basic.des1),
6062                                    le32_to_cpu(ep->basic.des2),
6063                                    le32_to_cpu(ep->basic.des3));
6064                         ep++;
6065                 } else {
6066                         dma_addr = dma_phy_addr + i * sizeof(*p);
6067                         seq_printf(seq, "%d [%pad]: 0x%x 0x%x 0x%x 0x%x\n",
6068                                    i, &dma_addr,
6069                                    le32_to_cpu(p->des0), le32_to_cpu(p->des1),
6070                                    le32_to_cpu(p->des2), le32_to_cpu(p->des3));
6071                         p++;
6072                 }
6073                 seq_printf(seq, "\n");
6074         }
6075 }
6076
6077 static int stmmac_rings_status_show(struct seq_file *seq, void *v)
6078 {
6079         struct net_device *dev = seq->private;
6080         struct stmmac_priv *priv = netdev_priv(dev);
6081         u32 rx_count = priv->plat->rx_queues_to_use;
6082         u32 tx_count = priv->plat->tx_queues_to_use;
6083         u32 queue;
6084
6085         if ((dev->flags & IFF_UP) == 0)
6086                 return 0;
6087
6088         for (queue = 0; queue < rx_count; queue++) {
6089                 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
6090
6091                 seq_printf(seq, "RX Queue %d:\n", queue);
6092
6093                 if (priv->extend_desc) {
6094                         seq_printf(seq, "Extended descriptor ring:\n");
6095                         sysfs_display_ring((void *)rx_q->dma_erx,
6096                                            priv->dma_conf.dma_rx_size, 1, seq, rx_q->dma_rx_phy);
6097                 } else {
6098                         seq_printf(seq, "Descriptor ring:\n");
6099                         sysfs_display_ring((void *)rx_q->dma_rx,
6100                                            priv->dma_conf.dma_rx_size, 0, seq, rx_q->dma_rx_phy);
6101                 }
6102         }
6103
6104         for (queue = 0; queue < tx_count; queue++) {
6105                 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
6106
6107                 seq_printf(seq, "TX Queue %d:\n", queue);
6108
6109                 if (priv->extend_desc) {
6110                         seq_printf(seq, "Extended descriptor ring:\n");
6111                         sysfs_display_ring((void *)tx_q->dma_etx,
6112                                            priv->dma_conf.dma_tx_size, 1, seq, tx_q->dma_tx_phy);
6113                 } else if (!(tx_q->tbs & STMMAC_TBS_AVAIL)) {
6114                         seq_printf(seq, "Descriptor ring:\n");
6115                         sysfs_display_ring((void *)tx_q->dma_tx,
6116                                            priv->dma_conf.dma_tx_size, 0, seq, tx_q->dma_tx_phy);
6117                 }
6118         }
6119
6120         return 0;
6121 }
6122 DEFINE_SHOW_ATTRIBUTE(stmmac_rings_status);
6123
6124 static int stmmac_dma_cap_show(struct seq_file *seq, void *v)
6125 {
6126         struct net_device *dev = seq->private;
6127         struct stmmac_priv *priv = netdev_priv(dev);
6128
6129         if (!priv->hw_cap_support) {
6130                 seq_printf(seq, "DMA HW features not supported\n");
6131                 return 0;
6132         }
6133
6134         seq_printf(seq, "==============================\n");
6135         seq_printf(seq, "\tDMA HW features\n");
6136         seq_printf(seq, "==============================\n");
6137
6138         seq_printf(seq, "\t10/100 Mbps: %s\n",
6139                    (priv->dma_cap.mbps_10_100) ? "Y" : "N");
6140         seq_printf(seq, "\t1000 Mbps: %s\n",
6141                    (priv->dma_cap.mbps_1000) ? "Y" : "N");
6142         seq_printf(seq, "\tHalf duplex: %s\n",
6143                    (priv->dma_cap.half_duplex) ? "Y" : "N");
6144         seq_printf(seq, "\tHash Filter: %s\n",
6145                    (priv->dma_cap.hash_filter) ? "Y" : "N");
6146         seq_printf(seq, "\tMultiple MAC address registers: %s\n",
6147                    (priv->dma_cap.multi_addr) ? "Y" : "N");
6148         seq_printf(seq, "\tPCS (TBI/SGMII/RTBI PHY interfaces): %s\n",
6149                    (priv->dma_cap.pcs) ? "Y" : "N");
6150         seq_printf(seq, "\tSMA (MDIO) Interface: %s\n",
6151                    (priv->dma_cap.sma_mdio) ? "Y" : "N");
6152         seq_printf(seq, "\tPMT Remote wake up: %s\n",
6153                    (priv->dma_cap.pmt_remote_wake_up) ? "Y" : "N");
6154         seq_printf(seq, "\tPMT Magic Frame: %s\n",
6155                    (priv->dma_cap.pmt_magic_frame) ? "Y" : "N");
6156         seq_printf(seq, "\tRMON module: %s\n",
6157                    (priv->dma_cap.rmon) ? "Y" : "N");
6158         seq_printf(seq, "\tIEEE 1588-2002 Time Stamp: %s\n",
6159                    (priv->dma_cap.time_stamp) ? "Y" : "N");
6160         seq_printf(seq, "\tIEEE 1588-2008 Advanced Time Stamp: %s\n",
6161                    (priv->dma_cap.atime_stamp) ? "Y" : "N");
6162         seq_printf(seq, "\t802.3az - Energy-Efficient Ethernet (EEE): %s\n",
6163                    (priv->dma_cap.eee) ? "Y" : "N");
6164         seq_printf(seq, "\tAV features: %s\n", (priv->dma_cap.av) ? "Y" : "N");
6165         seq_printf(seq, "\tChecksum Offload in TX: %s\n",
6166                    (priv->dma_cap.tx_coe) ? "Y" : "N");
6167         if (priv->synopsys_id >= DWMAC_CORE_4_00) {
6168                 seq_printf(seq, "\tIP Checksum Offload in RX: %s\n",
6169                            (priv->dma_cap.rx_coe) ? "Y" : "N");
6170         } else {
6171                 seq_printf(seq, "\tIP Checksum Offload (type1) in RX: %s\n",
6172                            (priv->dma_cap.rx_coe_type1) ? "Y" : "N");
6173                 seq_printf(seq, "\tIP Checksum Offload (type2) in RX: %s\n",
6174                            (priv->dma_cap.rx_coe_type2) ? "Y" : "N");
6175         }
6176         seq_printf(seq, "\tRXFIFO > 2048bytes: %s\n",
6177                    (priv->dma_cap.rxfifo_over_2048) ? "Y" : "N");
6178         seq_printf(seq, "\tNumber of Additional RX channel: %d\n",
6179                    priv->dma_cap.number_rx_channel);
6180         seq_printf(seq, "\tNumber of Additional TX channel: %d\n",
6181                    priv->dma_cap.number_tx_channel);
6182         seq_printf(seq, "\tNumber of Additional RX queues: %d\n",
6183                    priv->dma_cap.number_rx_queues);
6184         seq_printf(seq, "\tNumber of Additional TX queues: %d\n",
6185                    priv->dma_cap.number_tx_queues);
6186         seq_printf(seq, "\tEnhanced descriptors: %s\n",
6187                    (priv->dma_cap.enh_desc) ? "Y" : "N");
6188         seq_printf(seq, "\tTX Fifo Size: %d\n", priv->dma_cap.tx_fifo_size);
6189         seq_printf(seq, "\tRX Fifo Size: %d\n", priv->dma_cap.rx_fifo_size);
6190         seq_printf(seq, "\tHash Table Size: %d\n", priv->dma_cap.hash_tb_sz);
6191         seq_printf(seq, "\tTSO: %s\n", priv->dma_cap.tsoen ? "Y" : "N");
6192         seq_printf(seq, "\tNumber of PPS Outputs: %d\n",
6193                    priv->dma_cap.pps_out_num);
6194         seq_printf(seq, "\tSafety Features: %s\n",
6195                    priv->dma_cap.asp ? "Y" : "N");
6196         seq_printf(seq, "\tFlexible RX Parser: %s\n",
6197                    priv->dma_cap.frpsel ? "Y" : "N");
6198         seq_printf(seq, "\tEnhanced Addressing: %d\n",
6199                    priv->dma_cap.addr64);
6200         seq_printf(seq, "\tReceive Side Scaling: %s\n",
6201                    priv->dma_cap.rssen ? "Y" : "N");
6202         seq_printf(seq, "\tVLAN Hash Filtering: %s\n",
6203                    priv->dma_cap.vlhash ? "Y" : "N");
6204         seq_printf(seq, "\tSplit Header: %s\n",
6205                    priv->dma_cap.sphen ? "Y" : "N");
6206         seq_printf(seq, "\tVLAN TX Insertion: %s\n",
6207                    priv->dma_cap.vlins ? "Y" : "N");
6208         seq_printf(seq, "\tDouble VLAN: %s\n",
6209                    priv->dma_cap.dvlan ? "Y" : "N");
6210         seq_printf(seq, "\tNumber of L3/L4 Filters: %d\n",
6211                    priv->dma_cap.l3l4fnum);
6212         seq_printf(seq, "\tARP Offloading: %s\n",
6213                    priv->dma_cap.arpoffsel ? "Y" : "N");
6214         seq_printf(seq, "\tEnhancements to Scheduled Traffic (EST): %s\n",
6215                    priv->dma_cap.estsel ? "Y" : "N");
6216         seq_printf(seq, "\tFrame Preemption (FPE): %s\n",
6217                    priv->dma_cap.fpesel ? "Y" : "N");
6218         seq_printf(seq, "\tTime-Based Scheduling (TBS): %s\n",
6219                    priv->dma_cap.tbssel ? "Y" : "N");
6220         return 0;
6221 }
6222 DEFINE_SHOW_ATTRIBUTE(stmmac_dma_cap);
6223
6224 /* Use network device events to rename debugfs file entries.
6225  */
6226 static int stmmac_device_event(struct notifier_block *unused,
6227                                unsigned long event, void *ptr)
6228 {
6229         struct net_device *dev = netdev_notifier_info_to_dev(ptr);
6230         struct stmmac_priv *priv = netdev_priv(dev);
6231
6232         if (dev->netdev_ops != &stmmac_netdev_ops)
6233                 goto done;
6234
6235         switch (event) {
6236         case NETDEV_CHANGENAME:
6237                 if (priv->dbgfs_dir)
6238                         priv->dbgfs_dir = debugfs_rename(stmmac_fs_dir,
6239                                                          priv->dbgfs_dir,
6240                                                          stmmac_fs_dir,
6241                                                          dev->name);
6242                 break;
6243         }
6244 done:
6245         return NOTIFY_DONE;
6246 }
6247
6248 static struct notifier_block stmmac_notifier = {
6249         .notifier_call = stmmac_device_event,
6250 };
6251
6252 static void stmmac_init_fs(struct net_device *dev)
6253 {
6254         struct stmmac_priv *priv = netdev_priv(dev);
6255
6256         rtnl_lock();
6257
6258         /* Create per netdev entries */
6259         priv->dbgfs_dir = debugfs_create_dir(dev->name, stmmac_fs_dir);
6260
6261         /* Entry to report DMA RX/TX rings */
6262         debugfs_create_file("descriptors_status", 0444, priv->dbgfs_dir, dev,
6263                             &stmmac_rings_status_fops);
6264
6265         /* Entry to report the DMA HW features */
6266         debugfs_create_file("dma_cap", 0444, priv->dbgfs_dir, dev,
6267                             &stmmac_dma_cap_fops);
6268
6269         rtnl_unlock();
6270 }
6271
6272 static void stmmac_exit_fs(struct net_device *dev)
6273 {
6274         struct stmmac_priv *priv = netdev_priv(dev);
6275
6276         debugfs_remove_recursive(priv->dbgfs_dir);
6277 }
6278 #endif /* CONFIG_DEBUG_FS */
6279
6280 static u32 stmmac_vid_crc32_le(__le16 vid_le)
6281 {
6282         unsigned char *data = (unsigned char *)&vid_le;
6283         unsigned char data_byte = 0;
6284         u32 crc = ~0x0;
6285         u32 temp = 0;
6286         int i, bits;
6287
6288         bits = get_bitmask_order(VLAN_VID_MASK);
6289         for (i = 0; i < bits; i++) {
6290                 if ((i % 8) == 0)
6291                         data_byte = data[i / 8];
6292
6293                 temp = ((crc & 1) ^ data_byte) & 1;
6294                 crc >>= 1;
6295                 data_byte >>= 1;
6296
6297                 if (temp)
6298                         crc ^= 0xedb88320;
6299         }
6300
6301         return crc;
6302 }
6303
6304 static int stmmac_vlan_update(struct stmmac_priv *priv, bool is_double)
6305 {
6306         u32 crc, hash = 0;
6307         __le16 pmatch = 0;
6308         int count = 0;
6309         u16 vid = 0;
6310
6311         for_each_set_bit(vid, priv->active_vlans, VLAN_N_VID) {
6312                 __le16 vid_le = cpu_to_le16(vid);
6313                 crc = bitrev32(~stmmac_vid_crc32_le(vid_le)) >> 28;
6314                 hash |= (1 << crc);
6315                 count++;
6316         }
6317
6318         if (!priv->dma_cap.vlhash) {
6319                 if (count > 2) /* VID = 0 always passes filter */
6320                         return -EOPNOTSUPP;
6321
6322                 pmatch = cpu_to_le16(vid);
6323                 hash = 0;
6324         }
6325
6326         return stmmac_update_vlan_hash(priv, priv->hw, hash, pmatch, is_double);
6327 }
6328
6329 static int stmmac_vlan_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid)
6330 {
6331         struct stmmac_priv *priv = netdev_priv(ndev);
6332         bool is_double = false;
6333         int ret;
6334
6335         if (be16_to_cpu(proto) == ETH_P_8021AD)
6336                 is_double = true;
6337
6338         set_bit(vid, priv->active_vlans);
6339         ret = stmmac_vlan_update(priv, is_double);
6340         if (ret) {
6341                 clear_bit(vid, priv->active_vlans);
6342                 return ret;
6343         }
6344
6345         if (priv->hw->num_vlan) {
6346                 ret = stmmac_add_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid);
6347                 if (ret)
6348                         return ret;
6349         }
6350
6351         return 0;
6352 }
6353
6354 static int stmmac_vlan_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vid)
6355 {
6356         struct stmmac_priv *priv = netdev_priv(ndev);
6357         bool is_double = false;
6358         int ret;
6359
6360         ret = pm_runtime_resume_and_get(priv->device);
6361         if (ret < 0)
6362                 return ret;
6363
6364         if (be16_to_cpu(proto) == ETH_P_8021AD)
6365                 is_double = true;
6366
6367         clear_bit(vid, priv->active_vlans);
6368
6369         if (priv->hw->num_vlan) {
6370                 ret = stmmac_del_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid);
6371                 if (ret)
6372                         goto del_vlan_error;
6373         }
6374
6375         ret = stmmac_vlan_update(priv, is_double);
6376
6377 del_vlan_error:
6378         pm_runtime_put(priv->device);
6379
6380         return ret;
6381 }
6382
6383 static int stmmac_bpf(struct net_device *dev, struct netdev_bpf *bpf)
6384 {
6385         struct stmmac_priv *priv = netdev_priv(dev);
6386
6387         switch (bpf->command) {
6388         case XDP_SETUP_PROG:
6389                 return stmmac_xdp_set_prog(priv, bpf->prog, bpf->extack);
6390         case XDP_SETUP_XSK_POOL:
6391                 return stmmac_xdp_setup_pool(priv, bpf->xsk.pool,
6392                                              bpf->xsk.queue_id);
6393         default:
6394                 return -EOPNOTSUPP;
6395         }
6396 }
6397
6398 static int stmmac_xdp_xmit(struct net_device *dev, int num_frames,
6399                            struct xdp_frame **frames, u32 flags)
6400 {
6401         struct stmmac_priv *priv = netdev_priv(dev);
6402         int cpu = smp_processor_id();
6403         struct netdev_queue *nq;
6404         int i, nxmit = 0;
6405         int queue;
6406
6407         if (unlikely(test_bit(STMMAC_DOWN, &priv->state)))
6408                 return -ENETDOWN;
6409
6410         if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
6411                 return -EINVAL;
6412
6413         queue = stmmac_xdp_get_tx_queue(priv, cpu);
6414         nq = netdev_get_tx_queue(priv->dev, queue);
6415
6416         __netif_tx_lock(nq, cpu);
6417         /* Avoids TX time-out as we are sharing with slow path */
6418         txq_trans_cond_update(nq);
6419
6420         for (i = 0; i < num_frames; i++) {
6421                 int res;
6422
6423                 res = stmmac_xdp_xmit_xdpf(priv, queue, frames[i], true);
6424                 if (res == STMMAC_XDP_CONSUMED)
6425                         break;
6426
6427                 nxmit++;
6428         }
6429
6430         if (flags & XDP_XMIT_FLUSH) {
6431                 stmmac_flush_tx_descriptors(priv, queue);
6432                 stmmac_tx_timer_arm(priv, queue);
6433         }
6434
6435         __netif_tx_unlock(nq);
6436
6437         return nxmit;
6438 }
6439
6440 void stmmac_disable_rx_queue(struct stmmac_priv *priv, u32 queue)
6441 {
6442         struct stmmac_channel *ch = &priv->channel[queue];
6443         unsigned long flags;
6444
6445         spin_lock_irqsave(&ch->lock, flags);
6446         stmmac_disable_dma_irq(priv, priv->ioaddr, queue, 1, 0);
6447         spin_unlock_irqrestore(&ch->lock, flags);
6448
6449         stmmac_stop_rx_dma(priv, queue);
6450         __free_dma_rx_desc_resources(priv, &priv->dma_conf, queue);
6451 }
6452
6453 void stmmac_enable_rx_queue(struct stmmac_priv *priv, u32 queue)
6454 {
6455         struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
6456         struct stmmac_channel *ch = &priv->channel[queue];
6457         unsigned long flags;
6458         u32 buf_size;
6459         int ret;
6460
6461         ret = __alloc_dma_rx_desc_resources(priv, &priv->dma_conf, queue);
6462         if (ret) {
6463                 netdev_err(priv->dev, "Failed to alloc RX desc.\n");
6464                 return;
6465         }
6466
6467         ret = __init_dma_rx_desc_rings(priv, &priv->dma_conf, queue, GFP_KERNEL);
6468         if (ret) {
6469                 __free_dma_rx_desc_resources(priv, &priv->dma_conf, queue);
6470                 netdev_err(priv->dev, "Failed to init RX desc.\n");
6471                 return;
6472         }
6473
6474         stmmac_reset_rx_queue(priv, queue);
6475         stmmac_clear_rx_descriptors(priv, &priv->dma_conf, queue);
6476
6477         stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
6478                             rx_q->dma_rx_phy, rx_q->queue_index);
6479
6480         rx_q->rx_tail_addr = rx_q->dma_rx_phy + (rx_q->buf_alloc_num *
6481                              sizeof(struct dma_desc));
6482         stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
6483                                rx_q->rx_tail_addr, rx_q->queue_index);
6484
6485         if (rx_q->xsk_pool && rx_q->buf_alloc_num) {
6486                 buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool);
6487                 stmmac_set_dma_bfsize(priv, priv->ioaddr,
6488                                       buf_size,
6489                                       rx_q->queue_index);
6490         } else {
6491                 stmmac_set_dma_bfsize(priv, priv->ioaddr,
6492                                       priv->dma_conf.dma_buf_sz,
6493                                       rx_q->queue_index);
6494         }
6495
6496         stmmac_start_rx_dma(priv, queue);
6497
6498         spin_lock_irqsave(&ch->lock, flags);
6499         stmmac_enable_dma_irq(priv, priv->ioaddr, queue, 1, 0);
6500         spin_unlock_irqrestore(&ch->lock, flags);
6501 }
6502
6503 void stmmac_disable_tx_queue(struct stmmac_priv *priv, u32 queue)
6504 {
6505         struct stmmac_channel *ch = &priv->channel[queue];
6506         unsigned long flags;
6507
6508         spin_lock_irqsave(&ch->lock, flags);
6509         stmmac_disable_dma_irq(priv, priv->ioaddr, queue, 0, 1);
6510         spin_unlock_irqrestore(&ch->lock, flags);
6511
6512         stmmac_stop_tx_dma(priv, queue);
6513         __free_dma_tx_desc_resources(priv, &priv->dma_conf, queue);
6514 }
6515
6516 void stmmac_enable_tx_queue(struct stmmac_priv *priv, u32 queue)
6517 {
6518         struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
6519         struct stmmac_channel *ch = &priv->channel[queue];
6520         unsigned long flags;
6521         int ret;
6522
6523         ret = __alloc_dma_tx_desc_resources(priv, &priv->dma_conf, queue);
6524         if (ret) {
6525                 netdev_err(priv->dev, "Failed to alloc TX desc.\n");
6526                 return;
6527         }
6528
6529         ret = __init_dma_tx_desc_rings(priv,  &priv->dma_conf, queue);
6530         if (ret) {
6531                 __free_dma_tx_desc_resources(priv, &priv->dma_conf, queue);
6532                 netdev_err(priv->dev, "Failed to init TX desc.\n");
6533                 return;
6534         }
6535
6536         stmmac_reset_tx_queue(priv, queue);
6537         stmmac_clear_tx_descriptors(priv, &priv->dma_conf, queue);
6538
6539         stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
6540                             tx_q->dma_tx_phy, tx_q->queue_index);
6541
6542         if (tx_q->tbs & STMMAC_TBS_AVAIL)
6543                 stmmac_enable_tbs(priv, priv->ioaddr, 1, tx_q->queue_index);
6544
6545         tx_q->tx_tail_addr = tx_q->dma_tx_phy;
6546         stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
6547                                tx_q->tx_tail_addr, tx_q->queue_index);
6548
6549         stmmac_start_tx_dma(priv, queue);
6550
6551         spin_lock_irqsave(&ch->lock, flags);
6552         stmmac_enable_dma_irq(priv, priv->ioaddr, queue, 0, 1);
6553         spin_unlock_irqrestore(&ch->lock, flags);
6554 }
6555
6556 void stmmac_xdp_release(struct net_device *dev)
6557 {
6558         struct stmmac_priv *priv = netdev_priv(dev);
6559         u32 chan;
6560
6561         /* Ensure tx function is not running */
6562         netif_tx_disable(dev);
6563
6564         /* Disable NAPI process */
6565         stmmac_disable_all_queues(priv);
6566
6567         for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
6568                 hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
6569
6570         /* Free the IRQ lines */
6571         stmmac_free_irq(dev, REQ_IRQ_ERR_ALL, 0);
6572
6573         /* Stop TX/RX DMA channels */
6574         stmmac_stop_all_dma(priv);
6575
6576         /* Release and free the Rx/Tx resources */
6577         free_dma_desc_resources(priv, &priv->dma_conf);
6578
6579         /* Disable the MAC Rx/Tx */
6580         stmmac_mac_set(priv, priv->ioaddr, false);
6581
6582         /* set trans_start so we don't get spurious
6583          * watchdogs during reset
6584          */
6585         netif_trans_update(dev);
6586         netif_carrier_off(dev);
6587 }
6588
6589 int stmmac_xdp_open(struct net_device *dev)
6590 {
6591         struct stmmac_priv *priv = netdev_priv(dev);
6592         u32 rx_cnt = priv->plat->rx_queues_to_use;
6593         u32 tx_cnt = priv->plat->tx_queues_to_use;
6594         u32 dma_csr_ch = max(rx_cnt, tx_cnt);
6595         struct stmmac_rx_queue *rx_q;
6596         struct stmmac_tx_queue *tx_q;
6597         u32 buf_size;
6598         bool sph_en;
6599         u32 chan;
6600         int ret;
6601
6602         ret = alloc_dma_desc_resources(priv, &priv->dma_conf);
6603         if (ret < 0) {
6604                 netdev_err(dev, "%s: DMA descriptors allocation failed\n",
6605                            __func__);
6606                 goto dma_desc_error;
6607         }
6608
6609         ret = init_dma_desc_rings(dev, &priv->dma_conf, GFP_KERNEL);
6610         if (ret < 0) {
6611                 netdev_err(dev, "%s: DMA descriptors initialization failed\n",
6612                            __func__);
6613                 goto init_error;
6614         }
6615
6616         /* DMA CSR Channel configuration */
6617         for (chan = 0; chan < dma_csr_ch; chan++) {
6618                 stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan);
6619                 stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
6620         }
6621
6622         /* Adjust Split header */
6623         sph_en = (priv->hw->rx_csum > 0) && priv->sph;
6624
6625         /* DMA RX Channel Configuration */
6626         for (chan = 0; chan < rx_cnt; chan++) {
6627                 rx_q = &priv->dma_conf.rx_queue[chan];
6628
6629                 stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
6630                                     rx_q->dma_rx_phy, chan);
6631
6632                 rx_q->rx_tail_addr = rx_q->dma_rx_phy +
6633                                      (rx_q->buf_alloc_num *
6634                                       sizeof(struct dma_desc));
6635                 stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
6636                                        rx_q->rx_tail_addr, chan);
6637
6638                 if (rx_q->xsk_pool && rx_q->buf_alloc_num) {
6639                         buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool);
6640                         stmmac_set_dma_bfsize(priv, priv->ioaddr,
6641                                               buf_size,
6642                                               rx_q->queue_index);
6643                 } else {
6644                         stmmac_set_dma_bfsize(priv, priv->ioaddr,
6645                                               priv->dma_conf.dma_buf_sz,
6646                                               rx_q->queue_index);
6647                 }
6648
6649                 stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
6650         }
6651
6652         /* DMA TX Channel Configuration */
6653         for (chan = 0; chan < tx_cnt; chan++) {
6654                 tx_q = &priv->dma_conf.tx_queue[chan];
6655
6656                 stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
6657                                     tx_q->dma_tx_phy, chan);
6658
6659                 tx_q->tx_tail_addr = tx_q->dma_tx_phy;
6660                 stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
6661                                        tx_q->tx_tail_addr, chan);
6662
6663                 hrtimer_init(&tx_q->txtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
6664                 tx_q->txtimer.function = stmmac_tx_timer;
6665         }
6666
6667         /* Enable the MAC Rx/Tx */
6668         stmmac_mac_set(priv, priv->ioaddr, true);
6669
6670         /* Start Rx & Tx DMA Channels */
6671         stmmac_start_all_dma(priv);
6672
6673         ret = stmmac_request_irq(dev);
6674         if (ret)
6675                 goto irq_error;
6676
6677         /* Enable NAPI process*/
6678         stmmac_enable_all_queues(priv);
6679         netif_carrier_on(dev);
6680         netif_tx_start_all_queues(dev);
6681         stmmac_enable_all_dma_irq(priv);
6682
6683         return 0;
6684
6685 irq_error:
6686         for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
6687                 hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
6688
6689         stmmac_hw_teardown(dev);
6690 init_error:
6691         free_dma_desc_resources(priv, &priv->dma_conf);
6692 dma_desc_error:
6693         return ret;
6694 }
6695
6696 int stmmac_xsk_wakeup(struct net_device *dev, u32 queue, u32 flags)
6697 {
6698         struct stmmac_priv *priv = netdev_priv(dev);
6699         struct stmmac_rx_queue *rx_q;
6700         struct stmmac_tx_queue *tx_q;
6701         struct stmmac_channel *ch;
6702
6703         if (test_bit(STMMAC_DOWN, &priv->state) ||
6704             !netif_carrier_ok(priv->dev))
6705                 return -ENETDOWN;
6706
6707         if (!stmmac_xdp_is_enabled(priv))
6708                 return -EINVAL;
6709
6710         if (queue >= priv->plat->rx_queues_to_use ||
6711             queue >= priv->plat->tx_queues_to_use)
6712                 return -EINVAL;
6713
6714         rx_q = &priv->dma_conf.rx_queue[queue];
6715         tx_q = &priv->dma_conf.tx_queue[queue];
6716         ch = &priv->channel[queue];
6717
6718         if (!rx_q->xsk_pool && !tx_q->xsk_pool)
6719                 return -EINVAL;
6720
6721         if (!napi_if_scheduled_mark_missed(&ch->rxtx_napi)) {
6722                 /* EQoS does not have per-DMA channel SW interrupt,
6723                  * so we schedule RX Napi straight-away.
6724                  */
6725                 if (likely(napi_schedule_prep(&ch->rxtx_napi)))
6726                         __napi_schedule(&ch->rxtx_napi);
6727         }
6728
6729         return 0;
6730 }
6731
6732 static const struct net_device_ops stmmac_netdev_ops = {
6733         .ndo_open = stmmac_open,
6734         .ndo_start_xmit = stmmac_xmit,
6735         .ndo_stop = stmmac_release,
6736         .ndo_change_mtu = stmmac_change_mtu,
6737         .ndo_fix_features = stmmac_fix_features,
6738         .ndo_set_features = stmmac_set_features,
6739         .ndo_set_rx_mode = stmmac_set_rx_mode,
6740         .ndo_tx_timeout = stmmac_tx_timeout,
6741         .ndo_eth_ioctl = stmmac_ioctl,
6742         .ndo_setup_tc = stmmac_setup_tc,
6743         .ndo_select_queue = stmmac_select_queue,
6744 #ifdef CONFIG_NET_POLL_CONTROLLER
6745         .ndo_poll_controller = stmmac_poll_controller,
6746 #endif
6747         .ndo_set_mac_address = stmmac_set_mac_address,
6748         .ndo_vlan_rx_add_vid = stmmac_vlan_rx_add_vid,
6749         .ndo_vlan_rx_kill_vid = stmmac_vlan_rx_kill_vid,
6750         .ndo_bpf = stmmac_bpf,
6751         .ndo_xdp_xmit = stmmac_xdp_xmit,
6752         .ndo_xsk_wakeup = stmmac_xsk_wakeup,
6753 };
6754
6755 static void stmmac_reset_subtask(struct stmmac_priv *priv)
6756 {
6757         if (!test_and_clear_bit(STMMAC_RESET_REQUESTED, &priv->state))
6758                 return;
6759         if (test_bit(STMMAC_DOWN, &priv->state))
6760                 return;
6761
6762         netdev_err(priv->dev, "Reset adapter.\n");
6763
6764         rtnl_lock();
6765         netif_trans_update(priv->dev);
6766         while (test_and_set_bit(STMMAC_RESETING, &priv->state))
6767                 usleep_range(1000, 2000);
6768
6769         set_bit(STMMAC_DOWN, &priv->state);
6770         dev_close(priv->dev);
6771         dev_open(priv->dev, NULL);
6772         clear_bit(STMMAC_DOWN, &priv->state);
6773         clear_bit(STMMAC_RESETING, &priv->state);
6774         rtnl_unlock();
6775 }
6776
6777 static void stmmac_service_task(struct work_struct *work)
6778 {
6779         struct stmmac_priv *priv = container_of(work, struct stmmac_priv,
6780                         service_task);
6781
6782         stmmac_reset_subtask(priv);
6783         clear_bit(STMMAC_SERVICE_SCHED, &priv->state);
6784 }
6785
6786 /**
6787  *  stmmac_hw_init - Init the MAC device
6788  *  @priv: driver private structure
6789  *  Description: this function is to configure the MAC device according to
6790  *  some platform parameters or the HW capability register. It prepares the
6791  *  driver to use either ring or chain modes and to setup either enhanced or
6792  *  normal descriptors.
6793  */
6794 static int stmmac_hw_init(struct stmmac_priv *priv)
6795 {
6796         int ret;
6797
6798         /* dwmac-sun8i only work in chain mode */
6799         if (priv->plat->has_sun8i)
6800                 chain_mode = 1;
6801         priv->chain_mode = chain_mode;
6802
6803         /* Initialize HW Interface */
6804         ret = stmmac_hwif_init(priv);
6805         if (ret)
6806                 return ret;
6807
6808         /* Get the HW capability (new GMAC newer than 3.50a) */
6809         priv->hw_cap_support = stmmac_get_hw_features(priv);
6810         if (priv->hw_cap_support) {
6811                 dev_info(priv->device, "DMA HW capability register supported\n");
6812
6813                 /* We can override some gmac/dma configuration fields: e.g.
6814                  * enh_desc, tx_coe (e.g. that are passed through the
6815                  * platform) with the values from the HW capability
6816                  * register (if supported).
6817                  */
6818                 priv->plat->enh_desc = priv->dma_cap.enh_desc;
6819                 priv->plat->pmt = priv->dma_cap.pmt_remote_wake_up &&
6820                                 !priv->plat->use_phy_wol;
6821                 priv->hw->pmt = priv->plat->pmt;
6822                 if (priv->dma_cap.hash_tb_sz) {
6823                         priv->hw->multicast_filter_bins =
6824                                         (BIT(priv->dma_cap.hash_tb_sz) << 5);
6825                         priv->hw->mcast_bits_log2 =
6826                                         ilog2(priv->hw->multicast_filter_bins);
6827                 }
6828
6829                 /* TXCOE doesn't work in thresh DMA mode */
6830                 if (priv->plat->force_thresh_dma_mode)
6831                         priv->plat->tx_coe = 0;
6832                 else
6833                         priv->plat->tx_coe = priv->dma_cap.tx_coe;
6834
6835                 /* In case of GMAC4 rx_coe is from HW cap register. */
6836                 priv->plat->rx_coe = priv->dma_cap.rx_coe;
6837
6838                 if (priv->dma_cap.rx_coe_type2)
6839                         priv->plat->rx_coe = STMMAC_RX_COE_TYPE2;
6840                 else if (priv->dma_cap.rx_coe_type1)
6841                         priv->plat->rx_coe = STMMAC_RX_COE_TYPE1;
6842
6843         } else {
6844                 dev_info(priv->device, "No HW DMA feature register supported\n");
6845         }
6846
6847         if (priv->plat->rx_coe) {
6848                 priv->hw->rx_csum = priv->plat->rx_coe;
6849                 dev_info(priv->device, "RX Checksum Offload Engine supported\n");
6850                 if (priv->synopsys_id < DWMAC_CORE_4_00)
6851                         dev_info(priv->device, "COE Type %d\n", priv->hw->rx_csum);
6852         }
6853         if (priv->plat->tx_coe)
6854                 dev_info(priv->device, "TX Checksum insertion supported\n");
6855
6856         if (priv->plat->pmt) {
6857                 dev_info(priv->device, "Wake-Up On Lan supported\n");
6858                 device_set_wakeup_capable(priv->device, 1);
6859         }
6860
6861         if (priv->dma_cap.tsoen)
6862                 dev_info(priv->device, "TSO supported\n");
6863
6864         priv->hw->vlan_fail_q_en = priv->plat->vlan_fail_q_en;
6865         priv->hw->vlan_fail_q = priv->plat->vlan_fail_q;
6866
6867         /* Run HW quirks, if any */
6868         if (priv->hwif_quirks) {
6869                 ret = priv->hwif_quirks(priv);
6870                 if (ret)
6871                         return ret;
6872         }
6873
6874         /* Rx Watchdog is available in the COREs newer than the 3.40.
6875          * In some case, for example on bugged HW this feature
6876          * has to be disable and this can be done by passing the
6877          * riwt_off field from the platform.
6878          */
6879         if (((priv->synopsys_id >= DWMAC_CORE_3_50) ||
6880             (priv->plat->has_xgmac)) && (!priv->plat->riwt_off)) {
6881                 priv->use_riwt = 1;
6882                 dev_info(priv->device,
6883                          "Enable RX Mitigation via HW Watchdog Timer\n");
6884         }
6885
6886         return 0;
6887 }
6888
6889 static void stmmac_napi_add(struct net_device *dev)
6890 {
6891         struct stmmac_priv *priv = netdev_priv(dev);
6892         u32 queue, maxq;
6893
6894         maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use);
6895
6896         for (queue = 0; queue < maxq; queue++) {
6897                 struct stmmac_channel *ch = &priv->channel[queue];
6898
6899                 ch->priv_data = priv;
6900                 ch->index = queue;
6901                 spin_lock_init(&ch->lock);
6902
6903                 if (queue < priv->plat->rx_queues_to_use) {
6904                         netif_napi_add(dev, &ch->rx_napi, stmmac_napi_poll_rx);
6905                 }
6906                 if (queue < priv->plat->tx_queues_to_use) {
6907                         netif_napi_add_tx(dev, &ch->tx_napi,
6908                                           stmmac_napi_poll_tx);
6909                 }
6910                 if (queue < priv->plat->rx_queues_to_use &&
6911                     queue < priv->plat->tx_queues_to_use) {
6912                         netif_napi_add(dev, &ch->rxtx_napi,
6913                                        stmmac_napi_poll_rxtx);
6914                 }
6915         }
6916 }
6917
6918 static void stmmac_napi_del(struct net_device *dev)
6919 {
6920         struct stmmac_priv *priv = netdev_priv(dev);
6921         u32 queue, maxq;
6922
6923         maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use);
6924
6925         for (queue = 0; queue < maxq; queue++) {
6926                 struct stmmac_channel *ch = &priv->channel[queue];
6927
6928                 if (queue < priv->plat->rx_queues_to_use)
6929                         netif_napi_del(&ch->rx_napi);
6930                 if (queue < priv->plat->tx_queues_to_use)
6931                         netif_napi_del(&ch->tx_napi);
6932                 if (queue < priv->plat->rx_queues_to_use &&
6933                     queue < priv->plat->tx_queues_to_use) {
6934                         netif_napi_del(&ch->rxtx_napi);
6935                 }
6936         }
6937 }
6938
6939 int stmmac_reinit_queues(struct net_device *dev, u32 rx_cnt, u32 tx_cnt)
6940 {
6941         struct stmmac_priv *priv = netdev_priv(dev);
6942         int ret = 0;
6943
6944         if (netif_running(dev))
6945                 stmmac_release(dev);
6946
6947         stmmac_napi_del(dev);
6948
6949         priv->plat->rx_queues_to_use = rx_cnt;
6950         priv->plat->tx_queues_to_use = tx_cnt;
6951
6952         stmmac_napi_add(dev);
6953
6954         if (netif_running(dev))
6955                 ret = stmmac_open(dev);
6956
6957         return ret;
6958 }
6959
6960 int stmmac_reinit_ringparam(struct net_device *dev, u32 rx_size, u32 tx_size)
6961 {
6962         struct stmmac_priv *priv = netdev_priv(dev);
6963         int ret = 0;
6964
6965         if (netif_running(dev))
6966                 stmmac_release(dev);
6967
6968         priv->dma_conf.dma_rx_size = rx_size;
6969         priv->dma_conf.dma_tx_size = tx_size;
6970
6971         if (netif_running(dev))
6972                 ret = stmmac_open(dev);
6973
6974         return ret;
6975 }
6976
6977 #define SEND_VERIFY_MPAKCET_FMT "Send Verify mPacket lo_state=%d lp_state=%d\n"
6978 static void stmmac_fpe_lp_task(struct work_struct *work)
6979 {
6980         struct stmmac_priv *priv = container_of(work, struct stmmac_priv,
6981                                                 fpe_task);
6982         struct stmmac_fpe_cfg *fpe_cfg = priv->plat->fpe_cfg;
6983         enum stmmac_fpe_state *lo_state = &fpe_cfg->lo_fpe_state;
6984         enum stmmac_fpe_state *lp_state = &fpe_cfg->lp_fpe_state;
6985         bool *hs_enable = &fpe_cfg->hs_enable;
6986         bool *enable = &fpe_cfg->enable;
6987         int retries = 20;
6988
6989         while (retries-- > 0) {
6990                 /* Bail out immediately if FPE handshake is OFF */
6991                 if (*lo_state == FPE_STATE_OFF || !*hs_enable)
6992                         break;
6993
6994                 if (*lo_state == FPE_STATE_ENTERING_ON &&
6995                     *lp_state == FPE_STATE_ENTERING_ON) {
6996                         stmmac_fpe_configure(priv, priv->ioaddr,
6997                                              priv->plat->tx_queues_to_use,
6998                                              priv->plat->rx_queues_to_use,
6999                                              *enable);
7000
7001                         netdev_info(priv->dev, "configured FPE\n");
7002
7003                         *lo_state = FPE_STATE_ON;
7004                         *lp_state = FPE_STATE_ON;
7005                         netdev_info(priv->dev, "!!! BOTH FPE stations ON\n");
7006                         break;
7007                 }
7008
7009                 if ((*lo_state == FPE_STATE_CAPABLE ||
7010                      *lo_state == FPE_STATE_ENTERING_ON) &&
7011                      *lp_state != FPE_STATE_ON) {
7012                         netdev_info(priv->dev, SEND_VERIFY_MPAKCET_FMT,
7013                                     *lo_state, *lp_state);
7014                         stmmac_fpe_send_mpacket(priv, priv->ioaddr,
7015                                                 MPACKET_VERIFY);
7016                 }
7017                 /* Sleep then retry */
7018                 msleep(500);
7019         }
7020
7021         clear_bit(__FPE_TASK_SCHED, &priv->fpe_task_state);
7022 }
7023
7024 void stmmac_fpe_handshake(struct stmmac_priv *priv, bool enable)
7025 {
7026         if (priv->plat->fpe_cfg->hs_enable != enable) {
7027                 if (enable) {
7028                         stmmac_fpe_send_mpacket(priv, priv->ioaddr,
7029                                                 MPACKET_VERIFY);
7030                 } else {
7031                         priv->plat->fpe_cfg->lo_fpe_state = FPE_STATE_OFF;
7032                         priv->plat->fpe_cfg->lp_fpe_state = FPE_STATE_OFF;
7033                 }
7034
7035                 priv->plat->fpe_cfg->hs_enable = enable;
7036         }
7037 }
7038
7039 /**
7040  * stmmac_dvr_probe
7041  * @device: device pointer
7042  * @plat_dat: platform data pointer
7043  * @res: stmmac resource pointer
7044  * Description: this is the main probe function used to
7045  * call the alloc_etherdev, allocate the priv structure.
7046  * Return:
7047  * returns 0 on success, otherwise errno.
7048  */
7049 int stmmac_dvr_probe(struct device *device,
7050                      struct plat_stmmacenet_data *plat_dat,
7051                      struct stmmac_resources *res)
7052 {
7053         struct net_device *ndev = NULL;
7054         struct stmmac_priv *priv;
7055         u32 rxq;
7056         int i, ret = 0;
7057
7058         ndev = devm_alloc_etherdev_mqs(device, sizeof(struct stmmac_priv),
7059                                        MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES);
7060         if (!ndev)
7061                 return -ENOMEM;
7062
7063         SET_NETDEV_DEV(ndev, device);
7064
7065         priv = netdev_priv(ndev);
7066         priv->device = device;
7067         priv->dev = ndev;
7068
7069         stmmac_set_ethtool_ops(ndev);
7070         priv->pause = pause;
7071         priv->plat = plat_dat;
7072         priv->ioaddr = res->addr;
7073         priv->dev->base_addr = (unsigned long)res->addr;
7074         priv->plat->dma_cfg->multi_msi_en = priv->plat->multi_msi_en;
7075
7076         priv->dev->irq = res->irq;
7077         priv->wol_irq = res->wol_irq;
7078         priv->lpi_irq = res->lpi_irq;
7079         priv->sfty_ce_irq = res->sfty_ce_irq;
7080         priv->sfty_ue_irq = res->sfty_ue_irq;
7081         for (i = 0; i < MTL_MAX_RX_QUEUES; i++)
7082                 priv->rx_irq[i] = res->rx_irq[i];
7083         for (i = 0; i < MTL_MAX_TX_QUEUES; i++)
7084                 priv->tx_irq[i] = res->tx_irq[i];
7085
7086         if (!is_zero_ether_addr(res->mac))
7087                 eth_hw_addr_set(priv->dev, res->mac);
7088
7089         dev_set_drvdata(device, priv->dev);
7090
7091         /* Verify driver arguments */
7092         stmmac_verify_args();
7093
7094         priv->af_xdp_zc_qps = bitmap_zalloc(MTL_MAX_TX_QUEUES, GFP_KERNEL);
7095         if (!priv->af_xdp_zc_qps)
7096                 return -ENOMEM;
7097
7098         /* Allocate workqueue */
7099         priv->wq = create_singlethread_workqueue("stmmac_wq");
7100         if (!priv->wq) {
7101                 dev_err(priv->device, "failed to create workqueue\n");
7102                 ret = -ENOMEM;
7103                 goto error_wq_init;
7104         }
7105
7106         INIT_WORK(&priv->service_task, stmmac_service_task);
7107
7108         /* Initialize Link Partner FPE workqueue */
7109         INIT_WORK(&priv->fpe_task, stmmac_fpe_lp_task);
7110
7111         /* Override with kernel parameters if supplied XXX CRS XXX
7112          * this needs to have multiple instances
7113          */
7114         if ((phyaddr >= 0) && (phyaddr <= 31))
7115                 priv->plat->phy_addr = phyaddr;
7116
7117         if (priv->plat->stmmac_rst) {
7118                 ret = reset_control_assert(priv->plat->stmmac_rst);
7119                 reset_control_deassert(priv->plat->stmmac_rst);
7120                 /* Some reset controllers have only reset callback instead of
7121                  * assert + deassert callbacks pair.
7122                  */
7123                 if (ret == -ENOTSUPP)
7124                         reset_control_reset(priv->plat->stmmac_rst);
7125         }
7126
7127         ret = reset_control_deassert(priv->plat->stmmac_ahb_rst);
7128         if (ret == -ENOTSUPP)
7129                 dev_err(priv->device, "unable to bring out of ahb reset: %pe\n",
7130                         ERR_PTR(ret));
7131
7132         /* Init MAC and get the capabilities */
7133         ret = stmmac_hw_init(priv);
7134         if (ret)
7135                 goto error_hw_init;
7136
7137         /* Only DWMAC core version 5.20 onwards supports HW descriptor prefetch.
7138          */
7139         if (priv->synopsys_id < DWMAC_CORE_5_20)
7140                 priv->plat->dma_cfg->dche = false;
7141
7142         stmmac_check_ether_addr(priv);
7143
7144         ndev->netdev_ops = &stmmac_netdev_ops;
7145
7146         ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
7147                             NETIF_F_RXCSUM;
7148
7149         ret = stmmac_tc_init(priv, priv);
7150         if (!ret) {
7151                 ndev->hw_features |= NETIF_F_HW_TC;
7152         }
7153
7154         if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) {
7155                 ndev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
7156                 if (priv->plat->has_gmac4)
7157                         ndev->hw_features |= NETIF_F_GSO_UDP_L4;
7158                 priv->tso = true;
7159                 dev_info(priv->device, "TSO feature enabled\n");
7160         }
7161
7162         if (priv->dma_cap.sphen && !priv->plat->sph_disable) {
7163                 ndev->hw_features |= NETIF_F_GRO;
7164                 priv->sph_cap = true;
7165                 priv->sph = priv->sph_cap;
7166                 dev_info(priv->device, "SPH feature enabled\n");
7167         }
7168
7169         /* The current IP register MAC_HW_Feature1[ADDR64] only define
7170          * 32/40/64 bit width, but some SOC support others like i.MX8MP
7171          * support 34 bits but it map to 40 bits width in MAC_HW_Feature1[ADDR64].
7172          * So overwrite dma_cap.addr64 according to HW real design.
7173          */
7174         if (priv->plat->addr64)
7175                 priv->dma_cap.addr64 = priv->plat->addr64;
7176
7177         if (priv->dma_cap.addr64) {
7178                 ret = dma_set_mask_and_coherent(device,
7179                                 DMA_BIT_MASK(priv->dma_cap.addr64));
7180                 if (!ret) {
7181                         dev_info(priv->device, "Using %d bits DMA width\n",
7182                                  priv->dma_cap.addr64);
7183
7184                         /*
7185                          * If more than 32 bits can be addressed, make sure to
7186                          * enable enhanced addressing mode.
7187                          */
7188                         if (IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT))
7189                                 priv->plat->dma_cfg->eame = true;
7190                 } else {
7191                         ret = dma_set_mask_and_coherent(device, DMA_BIT_MASK(32));
7192                         if (ret) {
7193                                 dev_err(priv->device, "Failed to set DMA Mask\n");
7194                                 goto error_hw_init;
7195                         }
7196
7197                         priv->dma_cap.addr64 = 32;
7198                 }
7199         }
7200
7201         ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA;
7202         ndev->watchdog_timeo = msecs_to_jiffies(watchdog);
7203 #ifdef STMMAC_VLAN_TAG_USED
7204         /* Both mac100 and gmac support receive VLAN tag detection */
7205         ndev->features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX;
7206         if (priv->dma_cap.vlhash) {
7207                 ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
7208                 ndev->features |= NETIF_F_HW_VLAN_STAG_FILTER;
7209         }
7210         if (priv->dma_cap.vlins) {
7211                 ndev->features |= NETIF_F_HW_VLAN_CTAG_TX;
7212                 if (priv->dma_cap.dvlan)
7213                         ndev->features |= NETIF_F_HW_VLAN_STAG_TX;
7214         }
7215 #endif
7216         priv->msg_enable = netif_msg_init(debug, default_msg_level);
7217
7218         /* Initialize RSS */
7219         rxq = priv->plat->rx_queues_to_use;
7220         netdev_rss_key_fill(priv->rss.key, sizeof(priv->rss.key));
7221         for (i = 0; i < ARRAY_SIZE(priv->rss.table); i++)
7222                 priv->rss.table[i] = ethtool_rxfh_indir_default(i, rxq);
7223
7224         if (priv->dma_cap.rssen && priv->plat->rss_en)
7225                 ndev->features |= NETIF_F_RXHASH;
7226
7227         /* MTU range: 46 - hw-specific max */
7228         ndev->min_mtu = ETH_ZLEN - ETH_HLEN;
7229         if (priv->plat->has_xgmac)
7230                 ndev->max_mtu = XGMAC_JUMBO_LEN;
7231         else if ((priv->plat->enh_desc) || (priv->synopsys_id >= DWMAC_CORE_4_00))
7232                 ndev->max_mtu = JUMBO_LEN;
7233         else
7234                 ndev->max_mtu = SKB_MAX_HEAD(NET_SKB_PAD + NET_IP_ALIGN);
7235         /* Will not overwrite ndev->max_mtu if plat->maxmtu > ndev->max_mtu
7236          * as well as plat->maxmtu < ndev->min_mtu which is a invalid range.
7237          */
7238         if ((priv->plat->maxmtu < ndev->max_mtu) &&
7239             (priv->plat->maxmtu >= ndev->min_mtu))
7240                 ndev->max_mtu = priv->plat->maxmtu;
7241         else if (priv->plat->maxmtu < ndev->min_mtu)
7242                 dev_warn(priv->device,
7243                          "%s: warning: maxmtu having invalid value (%d)\n",
7244                          __func__, priv->plat->maxmtu);
7245
7246         if (flow_ctrl)
7247                 priv->flow_ctrl = FLOW_AUTO;    /* RX/TX pause on */
7248
7249         /* Setup channels NAPI */
7250         stmmac_napi_add(ndev);
7251
7252         mutex_init(&priv->lock);
7253
7254         /* If a specific clk_csr value is passed from the platform
7255          * this means that the CSR Clock Range selection cannot be
7256          * changed at run-time and it is fixed. Viceversa the driver'll try to
7257          * set the MDC clock dynamically according to the csr actual
7258          * clock input.
7259          */
7260         if (priv->plat->clk_csr >= 0)
7261                 priv->clk_csr = priv->plat->clk_csr;
7262         else
7263                 stmmac_clk_csr_set(priv);
7264
7265         stmmac_check_pcs_mode(priv);
7266
7267         pm_runtime_get_noresume(device);
7268         pm_runtime_set_active(device);
7269         if (!pm_runtime_enabled(device))
7270                 pm_runtime_enable(device);
7271
7272         if (priv->hw->pcs != STMMAC_PCS_TBI &&
7273             priv->hw->pcs != STMMAC_PCS_RTBI) {
7274                 /* MDIO bus Registration */
7275                 ret = stmmac_mdio_register(ndev);
7276                 if (ret < 0) {
7277                         dev_err_probe(priv->device, ret,
7278                                       "%s: MDIO bus (id: %d) registration failed\n",
7279                                       __func__, priv->plat->bus_id);
7280                         goto error_mdio_register;
7281                 }
7282         }
7283
7284         if (priv->plat->speed_mode_2500)
7285                 priv->plat->speed_mode_2500(ndev, priv->plat->bsp_priv);
7286
7287         if (priv->plat->mdio_bus_data && priv->plat->mdio_bus_data->has_xpcs) {
7288                 ret = stmmac_xpcs_setup(priv->mii);
7289                 if (ret)
7290                         goto error_xpcs_setup;
7291         }
7292
7293         ret = stmmac_phy_setup(priv);
7294         if (ret) {
7295                 netdev_err(ndev, "failed to setup phy (%d)\n", ret);
7296                 goto error_phy_setup;
7297         }
7298
7299         ret = register_netdev(ndev);
7300         if (ret) {
7301                 dev_err(priv->device, "%s: ERROR %i registering the device\n",
7302                         __func__, ret);
7303                 goto error_netdev_register;
7304         }
7305
7306 #ifdef CONFIG_DEBUG_FS
7307         stmmac_init_fs(ndev);
7308 #endif
7309
7310         if (priv->plat->dump_debug_regs)
7311                 priv->plat->dump_debug_regs(priv->plat->bsp_priv);
7312
7313         /* Let pm_runtime_put() disable the clocks.
7314          * If CONFIG_PM is not enabled, the clocks will stay powered.
7315          */
7316         pm_runtime_put(device);
7317
7318         return ret;
7319
7320 error_netdev_register:
7321         phylink_destroy(priv->phylink);
7322 error_xpcs_setup:
7323 error_phy_setup:
7324         if (priv->hw->pcs != STMMAC_PCS_TBI &&
7325             priv->hw->pcs != STMMAC_PCS_RTBI)
7326                 stmmac_mdio_unregister(ndev);
7327 error_mdio_register:
7328         stmmac_napi_del(ndev);
7329 error_hw_init:
7330         destroy_workqueue(priv->wq);
7331 error_wq_init:
7332         bitmap_free(priv->af_xdp_zc_qps);
7333
7334         return ret;
7335 }
7336 EXPORT_SYMBOL_GPL(stmmac_dvr_probe);
7337
7338 /**
7339  * stmmac_dvr_remove
7340  * @dev: device pointer
7341  * Description: this function resets the TX/RX processes, disables the MAC RX/TX
7342  * changes the link status, releases the DMA descriptor rings.
7343  */
7344 int stmmac_dvr_remove(struct device *dev)
7345 {
7346         struct net_device *ndev = dev_get_drvdata(dev);
7347         struct stmmac_priv *priv = netdev_priv(ndev);
7348
7349         netdev_info(priv->dev, "%s: removing driver", __func__);
7350
7351         pm_runtime_get_sync(dev);
7352
7353         stmmac_stop_all_dma(priv);
7354         stmmac_mac_set(priv, priv->ioaddr, false);
7355         netif_carrier_off(ndev);
7356         unregister_netdev(ndev);
7357
7358         /* Serdes power down needs to happen after VLAN filter
7359          * is deleted that is triggered by unregister_netdev().
7360          */
7361         if (priv->plat->serdes_powerdown)
7362                 priv->plat->serdes_powerdown(ndev, priv->plat->bsp_priv);
7363
7364 #ifdef CONFIG_DEBUG_FS
7365         stmmac_exit_fs(ndev);
7366 #endif
7367         phylink_destroy(priv->phylink);
7368         if (priv->plat->stmmac_rst)
7369                 reset_control_assert(priv->plat->stmmac_rst);
7370         reset_control_assert(priv->plat->stmmac_ahb_rst);
7371         if (priv->hw->pcs != STMMAC_PCS_TBI &&
7372             priv->hw->pcs != STMMAC_PCS_RTBI)
7373                 stmmac_mdio_unregister(ndev);
7374         destroy_workqueue(priv->wq);
7375         mutex_destroy(&priv->lock);
7376         bitmap_free(priv->af_xdp_zc_qps);
7377
7378         pm_runtime_disable(dev);
7379         pm_runtime_put_noidle(dev);
7380
7381         return 0;
7382 }
7383 EXPORT_SYMBOL_GPL(stmmac_dvr_remove);
7384
7385 /**
7386  * stmmac_suspend - suspend callback
7387  * @dev: device pointer
7388  * Description: this is the function to suspend the device and it is called
7389  * by the platform driver to stop the network queue, release the resources,
7390  * program the PMT register (for WoL), clean and release driver resources.
7391  */
7392 int stmmac_suspend(struct device *dev)
7393 {
7394         struct net_device *ndev = dev_get_drvdata(dev);
7395         struct stmmac_priv *priv = netdev_priv(ndev);
7396         u32 chan;
7397
7398         if (!ndev || !netif_running(ndev))
7399                 return 0;
7400
7401         mutex_lock(&priv->lock);
7402
7403         netif_device_detach(ndev);
7404
7405         stmmac_disable_all_queues(priv);
7406
7407         for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
7408                 hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
7409
7410         if (priv->eee_enabled) {
7411                 priv->tx_path_in_lpi_mode = false;
7412                 del_timer_sync(&priv->eee_ctrl_timer);
7413         }
7414
7415         /* Stop TX/RX DMA */
7416         stmmac_stop_all_dma(priv);
7417
7418         if (priv->plat->serdes_powerdown)
7419                 priv->plat->serdes_powerdown(ndev, priv->plat->bsp_priv);
7420
7421         /* Enable Power down mode by programming the PMT regs */
7422         if (device_may_wakeup(priv->device) && priv->plat->pmt) {
7423                 stmmac_pmt(priv, priv->hw, priv->wolopts);
7424                 priv->irq_wake = 1;
7425         } else {
7426                 stmmac_mac_set(priv, priv->ioaddr, false);
7427                 pinctrl_pm_select_sleep_state(priv->device);
7428         }
7429
7430         mutex_unlock(&priv->lock);
7431
7432         rtnl_lock();
7433         if (device_may_wakeup(priv->device) && priv->plat->pmt) {
7434                 phylink_suspend(priv->phylink, true);
7435         } else {
7436                 if (device_may_wakeup(priv->device))
7437                         phylink_speed_down(priv->phylink, false);
7438                 phylink_suspend(priv->phylink, false);
7439         }
7440         rtnl_unlock();
7441
7442         if (priv->dma_cap.fpesel) {
7443                 /* Disable FPE */
7444                 stmmac_fpe_configure(priv, priv->ioaddr,
7445                                      priv->plat->tx_queues_to_use,
7446                                      priv->plat->rx_queues_to_use, false);
7447
7448                 stmmac_fpe_handshake(priv, false);
7449                 stmmac_fpe_stop_wq(priv);
7450         }
7451
7452         priv->speed = SPEED_UNKNOWN;
7453         return 0;
7454 }
7455 EXPORT_SYMBOL_GPL(stmmac_suspend);
7456
7457 static void stmmac_reset_rx_queue(struct stmmac_priv *priv, u32 queue)
7458 {
7459         struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
7460
7461         rx_q->cur_rx = 0;
7462         rx_q->dirty_rx = 0;
7463 }
7464
7465 static void stmmac_reset_tx_queue(struct stmmac_priv *priv, u32 queue)
7466 {
7467         struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
7468
7469         tx_q->cur_tx = 0;
7470         tx_q->dirty_tx = 0;
7471         tx_q->mss = 0;
7472
7473         netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, queue));
7474 }
7475
7476 /**
7477  * stmmac_reset_queues_param - reset queue parameters
7478  * @priv: device pointer
7479  */
7480 static void stmmac_reset_queues_param(struct stmmac_priv *priv)
7481 {
7482         u32 rx_cnt = priv->plat->rx_queues_to_use;
7483         u32 tx_cnt = priv->plat->tx_queues_to_use;
7484         u32 queue;
7485
7486         for (queue = 0; queue < rx_cnt; queue++)
7487                 stmmac_reset_rx_queue(priv, queue);
7488
7489         for (queue = 0; queue < tx_cnt; queue++)
7490                 stmmac_reset_tx_queue(priv, queue);
7491 }
7492
7493 /**
7494  * stmmac_resume - resume callback
7495  * @dev: device pointer
7496  * Description: when resume this function is invoked to setup the DMA and CORE
7497  * in a usable state.
7498  */
7499 int stmmac_resume(struct device *dev)
7500 {
7501         struct net_device *ndev = dev_get_drvdata(dev);
7502         struct stmmac_priv *priv = netdev_priv(ndev);
7503         int ret;
7504
7505         if (!netif_running(ndev))
7506                 return 0;
7507
7508         /* Power Down bit, into the PM register, is cleared
7509          * automatically as soon as a magic packet or a Wake-up frame
7510          * is received. Anyway, it's better to manually clear
7511          * this bit because it can generate problems while resuming
7512          * from another devices (e.g. serial console).
7513          */
7514         if (device_may_wakeup(priv->device) && priv->plat->pmt) {
7515                 mutex_lock(&priv->lock);
7516                 stmmac_pmt(priv, priv->hw, 0);
7517                 mutex_unlock(&priv->lock);
7518                 priv->irq_wake = 0;
7519         } else {
7520                 pinctrl_pm_select_default_state(priv->device);
7521                 /* reset the phy so that it's ready */
7522                 if (priv->mii)
7523                         stmmac_mdio_reset(priv->mii);
7524         }
7525
7526         if (!priv->plat->serdes_up_after_phy_linkup && priv->plat->serdes_powerup) {
7527                 ret = priv->plat->serdes_powerup(ndev,
7528                                                  priv->plat->bsp_priv);
7529
7530                 if (ret < 0)
7531                         return ret;
7532         }
7533
7534         rtnl_lock();
7535         if (device_may_wakeup(priv->device) && priv->plat->pmt) {
7536                 phylink_resume(priv->phylink);
7537         } else {
7538                 phylink_resume(priv->phylink);
7539                 if (device_may_wakeup(priv->device))
7540                         phylink_speed_up(priv->phylink);
7541         }
7542         rtnl_unlock();
7543
7544         rtnl_lock();
7545         mutex_lock(&priv->lock);
7546
7547         stmmac_reset_queues_param(priv);
7548
7549         stmmac_free_tx_skbufs(priv);
7550         stmmac_clear_descriptors(priv, &priv->dma_conf);
7551
7552         stmmac_hw_setup(ndev, false);
7553         stmmac_init_coalesce(priv);
7554         stmmac_set_rx_mode(ndev);
7555
7556         stmmac_restore_hw_vlan_rx_fltr(priv, ndev, priv->hw);
7557
7558         stmmac_enable_all_queues(priv);
7559         stmmac_enable_all_dma_irq(priv);
7560
7561         mutex_unlock(&priv->lock);
7562         rtnl_unlock();
7563
7564         netif_device_attach(ndev);
7565
7566         return 0;
7567 }
7568 EXPORT_SYMBOL_GPL(stmmac_resume);
7569
7570 #ifndef MODULE
7571 static int __init stmmac_cmdline_opt(char *str)
7572 {
7573         char *opt;
7574
7575         if (!str || !*str)
7576                 return 1;
7577         while ((opt = strsep(&str, ",")) != NULL) {
7578                 if (!strncmp(opt, "debug:", 6)) {
7579                         if (kstrtoint(opt + 6, 0, &debug))
7580                                 goto err;
7581                 } else if (!strncmp(opt, "phyaddr:", 8)) {
7582                         if (kstrtoint(opt + 8, 0, &phyaddr))
7583                                 goto err;
7584                 } else if (!strncmp(opt, "buf_sz:", 7)) {
7585                         if (kstrtoint(opt + 7, 0, &buf_sz))
7586                                 goto err;
7587                 } else if (!strncmp(opt, "tc:", 3)) {
7588                         if (kstrtoint(opt + 3, 0, &tc))
7589                                 goto err;
7590                 } else if (!strncmp(opt, "watchdog:", 9)) {
7591                         if (kstrtoint(opt + 9, 0, &watchdog))
7592                                 goto err;
7593                 } else if (!strncmp(opt, "flow_ctrl:", 10)) {
7594                         if (kstrtoint(opt + 10, 0, &flow_ctrl))
7595                                 goto err;
7596                 } else if (!strncmp(opt, "pause:", 6)) {
7597                         if (kstrtoint(opt + 6, 0, &pause))
7598                                 goto err;
7599                 } else if (!strncmp(opt, "eee_timer:", 10)) {
7600                         if (kstrtoint(opt + 10, 0, &eee_timer))
7601                                 goto err;
7602                 } else if (!strncmp(opt, "chain_mode:", 11)) {
7603                         if (kstrtoint(opt + 11, 0, &chain_mode))
7604                                 goto err;
7605                 }
7606         }
7607         return 1;
7608
7609 err:
7610         pr_err("%s: ERROR broken module parameter conversion", __func__);
7611         return 1;
7612 }
7613
7614 __setup("stmmaceth=", stmmac_cmdline_opt);
7615 #endif /* MODULE */
7616
7617 static int __init stmmac_init(void)
7618 {
7619 #ifdef CONFIG_DEBUG_FS
7620         /* Create debugfs main directory if it doesn't exist yet */
7621         if (!stmmac_fs_dir)
7622                 stmmac_fs_dir = debugfs_create_dir(STMMAC_RESOURCE_NAME, NULL);
7623         register_netdevice_notifier(&stmmac_notifier);
7624 #endif
7625
7626         return 0;
7627 }
7628
7629 static void __exit stmmac_exit(void)
7630 {
7631 #ifdef CONFIG_DEBUG_FS
7632         unregister_netdevice_notifier(&stmmac_notifier);
7633         debugfs_remove_recursive(stmmac_fs_dir);
7634 #endif
7635 }
7636
7637 module_init(stmmac_init)
7638 module_exit(stmmac_exit)
7639
7640 MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet device driver");
7641 MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>");
7642 MODULE_LICENSE("GPL");