070b5ef165eba4026d25a7d4f2476cbb5a42ed31
[linux-block.git] / drivers / net / ethernet / stmicro / stmmac / stmmac_main.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*******************************************************************************
3   This is the driver for the ST MAC 10/100/1000 on-chip Ethernet controllers.
4   ST Ethernet IPs are built around a Synopsys IP Core.
5
6         Copyright(C) 2007-2011 STMicroelectronics Ltd
7
8
9   Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
10
11   Documentation available at:
12         http://www.stlinux.com
13   Support available at:
14         https://bugzilla.stlinux.com/
15 *******************************************************************************/
16
17 #include <linux/clk.h>
18 #include <linux/kernel.h>
19 #include <linux/interrupt.h>
20 #include <linux/ip.h>
21 #include <linux/tcp.h>
22 #include <linux/skbuff.h>
23 #include <linux/ethtool.h>
24 #include <linux/if_ether.h>
25 #include <linux/crc32.h>
26 #include <linux/mii.h>
27 #include <linux/if.h>
28 #include <linux/if_vlan.h>
29 #include <linux/dma-mapping.h>
30 #include <linux/slab.h>
31 #include <linux/pm_runtime.h>
32 #include <linux/prefetch.h>
33 #include <linux/pinctrl/consumer.h>
34 #ifdef CONFIG_DEBUG_FS
35 #include <linux/debugfs.h>
36 #include <linux/seq_file.h>
37 #endif /* CONFIG_DEBUG_FS */
38 #include <linux/net_tstamp.h>
39 #include <linux/phylink.h>
40 #include <linux/udp.h>
41 #include <linux/bpf_trace.h>
42 #include <net/pkt_cls.h>
43 #include <net/xdp_sock_drv.h>
44 #include "stmmac_ptp.h"
45 #include "stmmac.h"
46 #include "stmmac_xdp.h"
47 #include <linux/reset.h>
48 #include <linux/of_mdio.h>
49 #include "dwmac1000.h"
50 #include "dwxgmac2.h"
51 #include "hwif.h"
52
53 /* As long as the interface is active, we keep the timestamping counter enabled
54  * with fine resolution and binary rollover. This avoid non-monotonic behavior
55  * (clock jumps) when changing timestamping settings at runtime.
56  */
57 #define STMMAC_HWTS_ACTIVE      (PTP_TCR_TSENA | PTP_TCR_TSCFUPDT | \
58                                  PTP_TCR_TSCTRLSSR)
59
60 #define STMMAC_ALIGN(x)         ALIGN(ALIGN(x, SMP_CACHE_BYTES), 16)
61 #define TSO_MAX_BUFF_SIZE       (SZ_16K - 1)
62
63 /* Module parameters */
64 #define TX_TIMEO        5000
65 static int watchdog = TX_TIMEO;
66 module_param(watchdog, int, 0644);
67 MODULE_PARM_DESC(watchdog, "Transmit timeout in milliseconds (default 5s)");
68
69 static int debug = -1;
70 module_param(debug, int, 0644);
71 MODULE_PARM_DESC(debug, "Message Level (-1: default, 0: no output, 16: all)");
72
73 static int phyaddr = -1;
74 module_param(phyaddr, int, 0444);
75 MODULE_PARM_DESC(phyaddr, "Physical device address");
76
77 #define STMMAC_TX_THRESH(x)     ((x)->dma_conf.dma_tx_size / 4)
78 #define STMMAC_RX_THRESH(x)     ((x)->dma_conf.dma_rx_size / 4)
79
80 /* Limit to make sure XDP TX and slow path can coexist */
81 #define STMMAC_XSK_TX_BUDGET_MAX        256
82 #define STMMAC_TX_XSK_AVAIL             16
83 #define STMMAC_RX_FILL_BATCH            16
84
85 #define STMMAC_XDP_PASS         0
86 #define STMMAC_XDP_CONSUMED     BIT(0)
87 #define STMMAC_XDP_TX           BIT(1)
88 #define STMMAC_XDP_REDIRECT     BIT(2)
89
90 static int flow_ctrl = FLOW_AUTO;
91 module_param(flow_ctrl, int, 0644);
92 MODULE_PARM_DESC(flow_ctrl, "Flow control ability [on/off]");
93
94 static int pause = PAUSE_TIME;
95 module_param(pause, int, 0644);
96 MODULE_PARM_DESC(pause, "Flow Control Pause Time");
97
98 #define TC_DEFAULT 64
99 static int tc = TC_DEFAULT;
100 module_param(tc, int, 0644);
101 MODULE_PARM_DESC(tc, "DMA threshold control value");
102
103 #define DEFAULT_BUFSIZE 1536
104 static int buf_sz = DEFAULT_BUFSIZE;
105 module_param(buf_sz, int, 0644);
106 MODULE_PARM_DESC(buf_sz, "DMA buffer size");
107
108 #define STMMAC_RX_COPYBREAK     256
109
110 static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
111                                       NETIF_MSG_LINK | NETIF_MSG_IFUP |
112                                       NETIF_MSG_IFDOWN | NETIF_MSG_TIMER);
113
114 #define STMMAC_DEFAULT_LPI_TIMER        1000
115 static int eee_timer = STMMAC_DEFAULT_LPI_TIMER;
116 module_param(eee_timer, int, 0644);
117 MODULE_PARM_DESC(eee_timer, "LPI tx expiration time in msec");
118 #define STMMAC_LPI_T(x) (jiffies + usecs_to_jiffies(x))
119
120 /* By default the driver will use the ring mode to manage tx and rx descriptors,
121  * but allow user to force to use the chain instead of the ring
122  */
123 static unsigned int chain_mode;
124 module_param(chain_mode, int, 0444);
125 MODULE_PARM_DESC(chain_mode, "To use chain instead of ring mode");
126
127 static irqreturn_t stmmac_interrupt(int irq, void *dev_id);
128 /* For MSI interrupts handling */
129 static irqreturn_t stmmac_mac_interrupt(int irq, void *dev_id);
130 static irqreturn_t stmmac_safety_interrupt(int irq, void *dev_id);
131 static irqreturn_t stmmac_msi_intr_tx(int irq, void *data);
132 static irqreturn_t stmmac_msi_intr_rx(int irq, void *data);
133 static void stmmac_reset_rx_queue(struct stmmac_priv *priv, u32 queue);
134 static void stmmac_reset_tx_queue(struct stmmac_priv *priv, u32 queue);
135 static void stmmac_reset_queues_param(struct stmmac_priv *priv);
136 static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue);
137 static void stmmac_flush_tx_descriptors(struct stmmac_priv *priv, int queue);
138 static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode,
139                                           u32 rxmode, u32 chan);
140
141 #ifdef CONFIG_DEBUG_FS
142 static const struct net_device_ops stmmac_netdev_ops;
143 static void stmmac_init_fs(struct net_device *dev);
144 static void stmmac_exit_fs(struct net_device *dev);
145 #endif
146
147 #define STMMAC_COAL_TIMER(x) (ns_to_ktime((x) * NSEC_PER_USEC))
148
149 int stmmac_bus_clks_config(struct stmmac_priv *priv, bool enabled)
150 {
151         int ret = 0;
152
153         if (enabled) {
154                 ret = clk_prepare_enable(priv->plat->stmmac_clk);
155                 if (ret)
156                         return ret;
157                 ret = clk_prepare_enable(priv->plat->pclk);
158                 if (ret) {
159                         clk_disable_unprepare(priv->plat->stmmac_clk);
160                         return ret;
161                 }
162                 if (priv->plat->clks_config) {
163                         ret = priv->plat->clks_config(priv->plat->bsp_priv, enabled);
164                         if (ret) {
165                                 clk_disable_unprepare(priv->plat->stmmac_clk);
166                                 clk_disable_unprepare(priv->plat->pclk);
167                                 return ret;
168                         }
169                 }
170         } else {
171                 clk_disable_unprepare(priv->plat->stmmac_clk);
172                 clk_disable_unprepare(priv->plat->pclk);
173                 if (priv->plat->clks_config)
174                         priv->plat->clks_config(priv->plat->bsp_priv, enabled);
175         }
176
177         return ret;
178 }
179 EXPORT_SYMBOL_GPL(stmmac_bus_clks_config);
180
181 /**
182  * stmmac_verify_args - verify the driver parameters.
183  * Description: it checks the driver parameters and set a default in case of
184  * errors.
185  */
186 static void stmmac_verify_args(void)
187 {
188         if (unlikely(watchdog < 0))
189                 watchdog = TX_TIMEO;
190         if (unlikely((buf_sz < DEFAULT_BUFSIZE) || (buf_sz > BUF_SIZE_16KiB)))
191                 buf_sz = DEFAULT_BUFSIZE;
192         if (unlikely(flow_ctrl > 1))
193                 flow_ctrl = FLOW_AUTO;
194         else if (likely(flow_ctrl < 0))
195                 flow_ctrl = FLOW_OFF;
196         if (unlikely((pause < 0) || (pause > 0xffff)))
197                 pause = PAUSE_TIME;
198         if (eee_timer < 0)
199                 eee_timer = STMMAC_DEFAULT_LPI_TIMER;
200 }
201
202 static void __stmmac_disable_all_queues(struct stmmac_priv *priv)
203 {
204         u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
205         u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
206         u32 maxq = max(rx_queues_cnt, tx_queues_cnt);
207         u32 queue;
208
209         for (queue = 0; queue < maxq; queue++) {
210                 struct stmmac_channel *ch = &priv->channel[queue];
211
212                 if (stmmac_xdp_is_enabled(priv) &&
213                     test_bit(queue, priv->af_xdp_zc_qps)) {
214                         napi_disable(&ch->rxtx_napi);
215                         continue;
216                 }
217
218                 if (queue < rx_queues_cnt)
219                         napi_disable(&ch->rx_napi);
220                 if (queue < tx_queues_cnt)
221                         napi_disable(&ch->tx_napi);
222         }
223 }
224
225 /**
226  * stmmac_disable_all_queues - Disable all queues
227  * @priv: driver private structure
228  */
229 static void stmmac_disable_all_queues(struct stmmac_priv *priv)
230 {
231         u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
232         struct stmmac_rx_queue *rx_q;
233         u32 queue;
234
235         /* synchronize_rcu() needed for pending XDP buffers to drain */
236         for (queue = 0; queue < rx_queues_cnt; queue++) {
237                 rx_q = &priv->dma_conf.rx_queue[queue];
238                 if (rx_q->xsk_pool) {
239                         synchronize_rcu();
240                         break;
241                 }
242         }
243
244         __stmmac_disable_all_queues(priv);
245 }
246
247 /**
248  * stmmac_enable_all_queues - Enable all queues
249  * @priv: driver private structure
250  */
251 static void stmmac_enable_all_queues(struct stmmac_priv *priv)
252 {
253         u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
254         u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
255         u32 maxq = max(rx_queues_cnt, tx_queues_cnt);
256         u32 queue;
257
258         for (queue = 0; queue < maxq; queue++) {
259                 struct stmmac_channel *ch = &priv->channel[queue];
260
261                 if (stmmac_xdp_is_enabled(priv) &&
262                     test_bit(queue, priv->af_xdp_zc_qps)) {
263                         napi_enable(&ch->rxtx_napi);
264                         continue;
265                 }
266
267                 if (queue < rx_queues_cnt)
268                         napi_enable(&ch->rx_napi);
269                 if (queue < tx_queues_cnt)
270                         napi_enable(&ch->tx_napi);
271         }
272 }
273
274 static void stmmac_service_event_schedule(struct stmmac_priv *priv)
275 {
276         if (!test_bit(STMMAC_DOWN, &priv->state) &&
277             !test_and_set_bit(STMMAC_SERVICE_SCHED, &priv->state))
278                 queue_work(priv->wq, &priv->service_task);
279 }
280
281 static void stmmac_global_err(struct stmmac_priv *priv)
282 {
283         netif_carrier_off(priv->dev);
284         set_bit(STMMAC_RESET_REQUESTED, &priv->state);
285         stmmac_service_event_schedule(priv);
286 }
287
288 /**
289  * stmmac_clk_csr_set - dynamically set the MDC clock
290  * @priv: driver private structure
291  * Description: this is to dynamically set the MDC clock according to the csr
292  * clock input.
293  * Note:
294  *      If a specific clk_csr value is passed from the platform
295  *      this means that the CSR Clock Range selection cannot be
296  *      changed at run-time and it is fixed (as reported in the driver
297  *      documentation). Viceversa the driver will try to set the MDC
298  *      clock dynamically according to the actual clock input.
299  */
300 static void stmmac_clk_csr_set(struct stmmac_priv *priv)
301 {
302         u32 clk_rate;
303
304         clk_rate = clk_get_rate(priv->plat->stmmac_clk);
305
306         /* Platform provided default clk_csr would be assumed valid
307          * for all other cases except for the below mentioned ones.
308          * For values higher than the IEEE 802.3 specified frequency
309          * we can not estimate the proper divider as it is not known
310          * the frequency of clk_csr_i. So we do not change the default
311          * divider.
312          */
313         if (!(priv->clk_csr & MAC_CSR_H_FRQ_MASK)) {
314                 if (clk_rate < CSR_F_35M)
315                         priv->clk_csr = STMMAC_CSR_20_35M;
316                 else if ((clk_rate >= CSR_F_35M) && (clk_rate < CSR_F_60M))
317                         priv->clk_csr = STMMAC_CSR_35_60M;
318                 else if ((clk_rate >= CSR_F_60M) && (clk_rate < CSR_F_100M))
319                         priv->clk_csr = STMMAC_CSR_60_100M;
320                 else if ((clk_rate >= CSR_F_100M) && (clk_rate < CSR_F_150M))
321                         priv->clk_csr = STMMAC_CSR_100_150M;
322                 else if ((clk_rate >= CSR_F_150M) && (clk_rate < CSR_F_250M))
323                         priv->clk_csr = STMMAC_CSR_150_250M;
324                 else if ((clk_rate >= CSR_F_250M) && (clk_rate <= CSR_F_300M))
325                         priv->clk_csr = STMMAC_CSR_250_300M;
326         }
327
328         if (priv->plat->has_sun8i) {
329                 if (clk_rate > 160000000)
330                         priv->clk_csr = 0x03;
331                 else if (clk_rate > 80000000)
332                         priv->clk_csr = 0x02;
333                 else if (clk_rate > 40000000)
334                         priv->clk_csr = 0x01;
335                 else
336                         priv->clk_csr = 0;
337         }
338
339         if (priv->plat->has_xgmac) {
340                 if (clk_rate > 400000000)
341                         priv->clk_csr = 0x5;
342                 else if (clk_rate > 350000000)
343                         priv->clk_csr = 0x4;
344                 else if (clk_rate > 300000000)
345                         priv->clk_csr = 0x3;
346                 else if (clk_rate > 250000000)
347                         priv->clk_csr = 0x2;
348                 else if (clk_rate > 150000000)
349                         priv->clk_csr = 0x1;
350                 else
351                         priv->clk_csr = 0x0;
352         }
353 }
354
355 static void print_pkt(unsigned char *buf, int len)
356 {
357         pr_debug("len = %d byte, buf addr: 0x%p\n", len, buf);
358         print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, buf, len);
359 }
360
361 static inline u32 stmmac_tx_avail(struct stmmac_priv *priv, u32 queue)
362 {
363         struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
364         u32 avail;
365
366         if (tx_q->dirty_tx > tx_q->cur_tx)
367                 avail = tx_q->dirty_tx - tx_q->cur_tx - 1;
368         else
369                 avail = priv->dma_conf.dma_tx_size - tx_q->cur_tx + tx_q->dirty_tx - 1;
370
371         return avail;
372 }
373
374 /**
375  * stmmac_rx_dirty - Get RX queue dirty
376  * @priv: driver private structure
377  * @queue: RX queue index
378  */
379 static inline u32 stmmac_rx_dirty(struct stmmac_priv *priv, u32 queue)
380 {
381         struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
382         u32 dirty;
383
384         if (rx_q->dirty_rx <= rx_q->cur_rx)
385                 dirty = rx_q->cur_rx - rx_q->dirty_rx;
386         else
387                 dirty = priv->dma_conf.dma_rx_size - rx_q->dirty_rx + rx_q->cur_rx;
388
389         return dirty;
390 }
391
392 static void stmmac_lpi_entry_timer_config(struct stmmac_priv *priv, bool en)
393 {
394         int tx_lpi_timer;
395
396         /* Clear/set the SW EEE timer flag based on LPI ET enablement */
397         priv->eee_sw_timer_en = en ? 0 : 1;
398         tx_lpi_timer  = en ? priv->tx_lpi_timer : 0;
399         stmmac_set_eee_lpi_timer(priv, priv->hw, tx_lpi_timer);
400 }
401
402 /**
403  * stmmac_enable_eee_mode - check and enter in LPI mode
404  * @priv: driver private structure
405  * Description: this function is to verify and enter in LPI mode in case of
406  * EEE.
407  */
408 static int stmmac_enable_eee_mode(struct stmmac_priv *priv)
409 {
410         u32 tx_cnt = priv->plat->tx_queues_to_use;
411         u32 queue;
412
413         /* check if all TX queues have the work finished */
414         for (queue = 0; queue < tx_cnt; queue++) {
415                 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
416
417                 if (tx_q->dirty_tx != tx_q->cur_tx)
418                         return -EBUSY; /* still unfinished work */
419         }
420
421         /* Check and enter in LPI mode */
422         if (!priv->tx_path_in_lpi_mode)
423                 stmmac_set_eee_mode(priv, priv->hw,
424                                 priv->plat->en_tx_lpi_clockgating);
425         return 0;
426 }
427
428 /**
429  * stmmac_disable_eee_mode - disable and exit from LPI mode
430  * @priv: driver private structure
431  * Description: this function is to exit and disable EEE in case of
432  * LPI state is true. This is called by the xmit.
433  */
434 void stmmac_disable_eee_mode(struct stmmac_priv *priv)
435 {
436         if (!priv->eee_sw_timer_en) {
437                 stmmac_lpi_entry_timer_config(priv, 0);
438                 return;
439         }
440
441         stmmac_reset_eee_mode(priv, priv->hw);
442         del_timer_sync(&priv->eee_ctrl_timer);
443         priv->tx_path_in_lpi_mode = false;
444 }
445
446 /**
447  * stmmac_eee_ctrl_timer - EEE TX SW timer.
448  * @t:  timer_list struct containing private info
449  * Description:
450  *  if there is no data transfer and if we are not in LPI state,
451  *  then MAC Transmitter can be moved to LPI state.
452  */
453 static void stmmac_eee_ctrl_timer(struct timer_list *t)
454 {
455         struct stmmac_priv *priv = from_timer(priv, t, eee_ctrl_timer);
456
457         if (stmmac_enable_eee_mode(priv))
458                 mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(priv->tx_lpi_timer));
459 }
460
461 /**
462  * stmmac_eee_init - init EEE
463  * @priv: driver private structure
464  * Description:
465  *  if the GMAC supports the EEE (from the HW cap reg) and the phy device
466  *  can also manage EEE, this function enable the LPI state and start related
467  *  timer.
468  */
469 bool stmmac_eee_init(struct stmmac_priv *priv)
470 {
471         int eee_tw_timer = priv->eee_tw_timer;
472
473         /* Using PCS we cannot dial with the phy registers at this stage
474          * so we do not support extra feature like EEE.
475          */
476         if (priv->hw->pcs == STMMAC_PCS_TBI ||
477             priv->hw->pcs == STMMAC_PCS_RTBI)
478                 return false;
479
480         /* Check if MAC core supports the EEE feature. */
481         if (!priv->dma_cap.eee)
482                 return false;
483
484         mutex_lock(&priv->lock);
485
486         /* Check if it needs to be deactivated */
487         if (!priv->eee_active) {
488                 if (priv->eee_enabled) {
489                         netdev_dbg(priv->dev, "disable EEE\n");
490                         stmmac_lpi_entry_timer_config(priv, 0);
491                         del_timer_sync(&priv->eee_ctrl_timer);
492                         stmmac_set_eee_timer(priv, priv->hw, 0, eee_tw_timer);
493                         if (priv->hw->xpcs)
494                                 xpcs_config_eee(priv->hw->xpcs,
495                                                 priv->plat->mult_fact_100ns,
496                                                 false);
497                 }
498                 mutex_unlock(&priv->lock);
499                 return false;
500         }
501
502         if (priv->eee_active && !priv->eee_enabled) {
503                 timer_setup(&priv->eee_ctrl_timer, stmmac_eee_ctrl_timer, 0);
504                 stmmac_set_eee_timer(priv, priv->hw, STMMAC_DEFAULT_LIT_LS,
505                                      eee_tw_timer);
506                 if (priv->hw->xpcs)
507                         xpcs_config_eee(priv->hw->xpcs,
508                                         priv->plat->mult_fact_100ns,
509                                         true);
510         }
511
512         if (priv->plat->has_gmac4 && priv->tx_lpi_timer <= STMMAC_ET_MAX) {
513                 del_timer_sync(&priv->eee_ctrl_timer);
514                 priv->tx_path_in_lpi_mode = false;
515                 stmmac_lpi_entry_timer_config(priv, 1);
516         } else {
517                 stmmac_lpi_entry_timer_config(priv, 0);
518                 mod_timer(&priv->eee_ctrl_timer,
519                           STMMAC_LPI_T(priv->tx_lpi_timer));
520         }
521
522         mutex_unlock(&priv->lock);
523         netdev_dbg(priv->dev, "Energy-Efficient Ethernet initialized\n");
524         return true;
525 }
526
527 /* stmmac_get_tx_hwtstamp - get HW TX timestamps
528  * @priv: driver private structure
529  * @p : descriptor pointer
530  * @skb : the socket buffer
531  * Description :
532  * This function will read timestamp from the descriptor & pass it to stack.
533  * and also perform some sanity checks.
534  */
535 static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv,
536                                    struct dma_desc *p, struct sk_buff *skb)
537 {
538         struct skb_shared_hwtstamps shhwtstamp;
539         bool found = false;
540         u64 ns = 0;
541
542         if (!priv->hwts_tx_en)
543                 return;
544
545         /* exit if skb doesn't support hw tstamp */
546         if (likely(!skb || !(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)))
547                 return;
548
549         /* check tx tstamp status */
550         if (stmmac_get_tx_timestamp_status(priv, p)) {
551                 stmmac_get_timestamp(priv, p, priv->adv_ts, &ns);
552                 found = true;
553         } else if (!stmmac_get_mac_tx_timestamp(priv, priv->hw, &ns)) {
554                 found = true;
555         }
556
557         if (found) {
558                 ns -= priv->plat->cdc_error_adj;
559
560                 memset(&shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
561                 shhwtstamp.hwtstamp = ns_to_ktime(ns);
562
563                 netdev_dbg(priv->dev, "get valid TX hw timestamp %llu\n", ns);
564                 /* pass tstamp to stack */
565                 skb_tstamp_tx(skb, &shhwtstamp);
566         }
567 }
568
569 /* stmmac_get_rx_hwtstamp - get HW RX timestamps
570  * @priv: driver private structure
571  * @p : descriptor pointer
572  * @np : next descriptor pointer
573  * @skb : the socket buffer
574  * Description :
575  * This function will read received packet's timestamp from the descriptor
576  * and pass it to stack. It also perform some sanity checks.
577  */
578 static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p,
579                                    struct dma_desc *np, struct sk_buff *skb)
580 {
581         struct skb_shared_hwtstamps *shhwtstamp = NULL;
582         struct dma_desc *desc = p;
583         u64 ns = 0;
584
585         if (!priv->hwts_rx_en)
586                 return;
587         /* For GMAC4, the valid timestamp is from CTX next desc. */
588         if (priv->plat->has_gmac4 || priv->plat->has_xgmac)
589                 desc = np;
590
591         /* Check if timestamp is available */
592         if (stmmac_get_rx_timestamp_status(priv, p, np, priv->adv_ts)) {
593                 stmmac_get_timestamp(priv, desc, priv->adv_ts, &ns);
594
595                 ns -= priv->plat->cdc_error_adj;
596
597                 netdev_dbg(priv->dev, "get valid RX hw timestamp %llu\n", ns);
598                 shhwtstamp = skb_hwtstamps(skb);
599                 memset(shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
600                 shhwtstamp->hwtstamp = ns_to_ktime(ns);
601         } else  {
602                 netdev_dbg(priv->dev, "cannot get RX hw timestamp\n");
603         }
604 }
605
606 /**
607  *  stmmac_hwtstamp_set - control hardware timestamping.
608  *  @dev: device pointer.
609  *  @ifr: An IOCTL specific structure, that can contain a pointer to
610  *  a proprietary structure used to pass information to the driver.
611  *  Description:
612  *  This function configures the MAC to enable/disable both outgoing(TX)
613  *  and incoming(RX) packets time stamping based on user input.
614  *  Return Value:
615  *  0 on success and an appropriate -ve integer on failure.
616  */
617 static int stmmac_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
618 {
619         struct stmmac_priv *priv = netdev_priv(dev);
620         struct hwtstamp_config config;
621         u32 ptp_v2 = 0;
622         u32 tstamp_all = 0;
623         u32 ptp_over_ipv4_udp = 0;
624         u32 ptp_over_ipv6_udp = 0;
625         u32 ptp_over_ethernet = 0;
626         u32 snap_type_sel = 0;
627         u32 ts_master_en = 0;
628         u32 ts_event_en = 0;
629
630         if (!(priv->dma_cap.time_stamp || priv->adv_ts)) {
631                 netdev_alert(priv->dev, "No support for HW time stamping\n");
632                 priv->hwts_tx_en = 0;
633                 priv->hwts_rx_en = 0;
634
635                 return -EOPNOTSUPP;
636         }
637
638         if (copy_from_user(&config, ifr->ifr_data,
639                            sizeof(config)))
640                 return -EFAULT;
641
642         netdev_dbg(priv->dev, "%s config flags:0x%x, tx_type:0x%x, rx_filter:0x%x\n",
643                    __func__, config.flags, config.tx_type, config.rx_filter);
644
645         if (config.tx_type != HWTSTAMP_TX_OFF &&
646             config.tx_type != HWTSTAMP_TX_ON)
647                 return -ERANGE;
648
649         if (priv->adv_ts) {
650                 switch (config.rx_filter) {
651                 case HWTSTAMP_FILTER_NONE:
652                         /* time stamp no incoming packet at all */
653                         config.rx_filter = HWTSTAMP_FILTER_NONE;
654                         break;
655
656                 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
657                         /* PTP v1, UDP, any kind of event packet */
658                         config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
659                         /* 'xmac' hardware can support Sync, Pdelay_Req and
660                          * Pdelay_resp by setting bit14 and bits17/16 to 01
661                          * This leaves Delay_Req timestamps out.
662                          * Enable all events *and* general purpose message
663                          * timestamping
664                          */
665                         snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
666                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
667                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
668                         break;
669
670                 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
671                         /* PTP v1, UDP, Sync packet */
672                         config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
673                         /* take time stamp for SYNC messages only */
674                         ts_event_en = PTP_TCR_TSEVNTENA;
675
676                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
677                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
678                         break;
679
680                 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
681                         /* PTP v1, UDP, Delay_req packet */
682                         config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
683                         /* take time stamp for Delay_Req messages only */
684                         ts_master_en = PTP_TCR_TSMSTRENA;
685                         ts_event_en = PTP_TCR_TSEVNTENA;
686
687                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
688                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
689                         break;
690
691                 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
692                         /* PTP v2, UDP, any kind of event packet */
693                         config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
694                         ptp_v2 = PTP_TCR_TSVER2ENA;
695                         /* take time stamp for all event messages */
696                         snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
697
698                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
699                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
700                         break;
701
702                 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
703                         /* PTP v2, UDP, Sync packet */
704                         config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
705                         ptp_v2 = PTP_TCR_TSVER2ENA;
706                         /* take time stamp for SYNC messages only */
707                         ts_event_en = PTP_TCR_TSEVNTENA;
708
709                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
710                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
711                         break;
712
713                 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
714                         /* PTP v2, UDP, Delay_req packet */
715                         config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
716                         ptp_v2 = PTP_TCR_TSVER2ENA;
717                         /* take time stamp for Delay_Req messages only */
718                         ts_master_en = PTP_TCR_TSMSTRENA;
719                         ts_event_en = PTP_TCR_TSEVNTENA;
720
721                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
722                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
723                         break;
724
725                 case HWTSTAMP_FILTER_PTP_V2_EVENT:
726                         /* PTP v2/802.AS1 any layer, any kind of event packet */
727                         config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
728                         ptp_v2 = PTP_TCR_TSVER2ENA;
729                         snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
730                         if (priv->synopsys_id < DWMAC_CORE_4_10)
731                                 ts_event_en = PTP_TCR_TSEVNTENA;
732                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
733                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
734                         ptp_over_ethernet = PTP_TCR_TSIPENA;
735                         break;
736
737                 case HWTSTAMP_FILTER_PTP_V2_SYNC:
738                         /* PTP v2/802.AS1, any layer, Sync packet */
739                         config.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
740                         ptp_v2 = PTP_TCR_TSVER2ENA;
741                         /* take time stamp for SYNC messages only */
742                         ts_event_en = PTP_TCR_TSEVNTENA;
743
744                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
745                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
746                         ptp_over_ethernet = PTP_TCR_TSIPENA;
747                         break;
748
749                 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
750                         /* PTP v2/802.AS1, any layer, Delay_req packet */
751                         config.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
752                         ptp_v2 = PTP_TCR_TSVER2ENA;
753                         /* take time stamp for Delay_Req messages only */
754                         ts_master_en = PTP_TCR_TSMSTRENA;
755                         ts_event_en = PTP_TCR_TSEVNTENA;
756
757                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
758                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
759                         ptp_over_ethernet = PTP_TCR_TSIPENA;
760                         break;
761
762                 case HWTSTAMP_FILTER_NTP_ALL:
763                 case HWTSTAMP_FILTER_ALL:
764                         /* time stamp any incoming packet */
765                         config.rx_filter = HWTSTAMP_FILTER_ALL;
766                         tstamp_all = PTP_TCR_TSENALL;
767                         break;
768
769                 default:
770                         return -ERANGE;
771                 }
772         } else {
773                 switch (config.rx_filter) {
774                 case HWTSTAMP_FILTER_NONE:
775                         config.rx_filter = HWTSTAMP_FILTER_NONE;
776                         break;
777                 default:
778                         /* PTP v1, UDP, any kind of event packet */
779                         config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
780                         break;
781                 }
782         }
783         priv->hwts_rx_en = ((config.rx_filter == HWTSTAMP_FILTER_NONE) ? 0 : 1);
784         priv->hwts_tx_en = config.tx_type == HWTSTAMP_TX_ON;
785
786         priv->systime_flags = STMMAC_HWTS_ACTIVE;
787
788         if (priv->hwts_tx_en || priv->hwts_rx_en) {
789                 priv->systime_flags |= tstamp_all | ptp_v2 |
790                                        ptp_over_ethernet | ptp_over_ipv6_udp |
791                                        ptp_over_ipv4_udp | ts_event_en |
792                                        ts_master_en | snap_type_sel;
793         }
794
795         stmmac_config_hw_tstamping(priv, priv->ptpaddr, priv->systime_flags);
796
797         memcpy(&priv->tstamp_config, &config, sizeof(config));
798
799         return copy_to_user(ifr->ifr_data, &config,
800                             sizeof(config)) ? -EFAULT : 0;
801 }
802
803 /**
804  *  stmmac_hwtstamp_get - read hardware timestamping.
805  *  @dev: device pointer.
806  *  @ifr: An IOCTL specific structure, that can contain a pointer to
807  *  a proprietary structure used to pass information to the driver.
808  *  Description:
809  *  This function obtain the current hardware timestamping settings
810  *  as requested.
811  */
812 static int stmmac_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
813 {
814         struct stmmac_priv *priv = netdev_priv(dev);
815         struct hwtstamp_config *config = &priv->tstamp_config;
816
817         if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
818                 return -EOPNOTSUPP;
819
820         return copy_to_user(ifr->ifr_data, config,
821                             sizeof(*config)) ? -EFAULT : 0;
822 }
823
824 /**
825  * stmmac_init_tstamp_counter - init hardware timestamping counter
826  * @priv: driver private structure
827  * @systime_flags: timestamping flags
828  * Description:
829  * Initialize hardware counter for packet timestamping.
830  * This is valid as long as the interface is open and not suspended.
831  * Will be rerun after resuming from suspend, case in which the timestamping
832  * flags updated by stmmac_hwtstamp_set() also need to be restored.
833  */
834 int stmmac_init_tstamp_counter(struct stmmac_priv *priv, u32 systime_flags)
835 {
836         bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
837         struct timespec64 now;
838         u32 sec_inc = 0;
839         u64 temp = 0;
840
841         if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
842                 return -EOPNOTSUPP;
843
844         stmmac_config_hw_tstamping(priv, priv->ptpaddr, systime_flags);
845         priv->systime_flags = systime_flags;
846
847         /* program Sub Second Increment reg */
848         stmmac_config_sub_second_increment(priv, priv->ptpaddr,
849                                            priv->plat->clk_ptp_rate,
850                                            xmac, &sec_inc);
851         temp = div_u64(1000000000ULL, sec_inc);
852
853         /* Store sub second increment for later use */
854         priv->sub_second_inc = sec_inc;
855
856         /* calculate default added value:
857          * formula is :
858          * addend = (2^32)/freq_div_ratio;
859          * where, freq_div_ratio = 1e9ns/sec_inc
860          */
861         temp = (u64)(temp << 32);
862         priv->default_addend = div_u64(temp, priv->plat->clk_ptp_rate);
863         stmmac_config_addend(priv, priv->ptpaddr, priv->default_addend);
864
865         /* initialize system time */
866         ktime_get_real_ts64(&now);
867
868         /* lower 32 bits of tv_sec are safe until y2106 */
869         stmmac_init_systime(priv, priv->ptpaddr, (u32)now.tv_sec, now.tv_nsec);
870
871         return 0;
872 }
873 EXPORT_SYMBOL_GPL(stmmac_init_tstamp_counter);
874
875 /**
876  * stmmac_init_ptp - init PTP
877  * @priv: driver private structure
878  * Description: this is to verify if the HW supports the PTPv1 or PTPv2.
879  * This is done by looking at the HW cap. register.
880  * This function also registers the ptp driver.
881  */
882 static int stmmac_init_ptp(struct stmmac_priv *priv)
883 {
884         bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
885         int ret;
886
887         if (priv->plat->ptp_clk_freq_config)
888                 priv->plat->ptp_clk_freq_config(priv);
889
890         ret = stmmac_init_tstamp_counter(priv, STMMAC_HWTS_ACTIVE);
891         if (ret)
892                 return ret;
893
894         priv->adv_ts = 0;
895         /* Check if adv_ts can be enabled for dwmac 4.x / xgmac core */
896         if (xmac && priv->dma_cap.atime_stamp)
897                 priv->adv_ts = 1;
898         /* Dwmac 3.x core with extend_desc can support adv_ts */
899         else if (priv->extend_desc && priv->dma_cap.atime_stamp)
900                 priv->adv_ts = 1;
901
902         if (priv->dma_cap.time_stamp)
903                 netdev_info(priv->dev, "IEEE 1588-2002 Timestamp supported\n");
904
905         if (priv->adv_ts)
906                 netdev_info(priv->dev,
907                             "IEEE 1588-2008 Advanced Timestamp supported\n");
908
909         priv->hwts_tx_en = 0;
910         priv->hwts_rx_en = 0;
911
912         return 0;
913 }
914
915 static void stmmac_release_ptp(struct stmmac_priv *priv)
916 {
917         clk_disable_unprepare(priv->plat->clk_ptp_ref);
918         stmmac_ptp_unregister(priv);
919 }
920
921 /**
922  *  stmmac_mac_flow_ctrl - Configure flow control in all queues
923  *  @priv: driver private structure
924  *  @duplex: duplex passed to the next function
925  *  Description: It is used for configuring the flow control in all queues
926  */
927 static void stmmac_mac_flow_ctrl(struct stmmac_priv *priv, u32 duplex)
928 {
929         u32 tx_cnt = priv->plat->tx_queues_to_use;
930
931         stmmac_flow_ctrl(priv, priv->hw, duplex, priv->flow_ctrl,
932                         priv->pause, tx_cnt);
933 }
934
935 static struct phylink_pcs *stmmac_mac_select_pcs(struct phylink_config *config,
936                                                  phy_interface_t interface)
937 {
938         struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
939
940         if (!priv->hw->xpcs)
941                 return NULL;
942
943         return &priv->hw->xpcs->pcs;
944 }
945
946 static void stmmac_mac_config(struct phylink_config *config, unsigned int mode,
947                               const struct phylink_link_state *state)
948 {
949         /* Nothing to do, xpcs_config() handles everything */
950 }
951
952 static void stmmac_fpe_link_state_handle(struct stmmac_priv *priv, bool is_up)
953 {
954         struct stmmac_fpe_cfg *fpe_cfg = priv->plat->fpe_cfg;
955         enum stmmac_fpe_state *lo_state = &fpe_cfg->lo_fpe_state;
956         enum stmmac_fpe_state *lp_state = &fpe_cfg->lp_fpe_state;
957         bool *hs_enable = &fpe_cfg->hs_enable;
958
959         if (is_up && *hs_enable) {
960                 stmmac_fpe_send_mpacket(priv, priv->ioaddr, MPACKET_VERIFY);
961         } else {
962                 *lo_state = FPE_STATE_OFF;
963                 *lp_state = FPE_STATE_OFF;
964         }
965 }
966
967 static void stmmac_mac_link_down(struct phylink_config *config,
968                                  unsigned int mode, phy_interface_t interface)
969 {
970         struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
971
972         stmmac_mac_set(priv, priv->ioaddr, false);
973         priv->eee_active = false;
974         priv->tx_lpi_enabled = false;
975         priv->eee_enabled = stmmac_eee_init(priv);
976         stmmac_set_eee_pls(priv, priv->hw, false);
977
978         if (priv->dma_cap.fpesel)
979                 stmmac_fpe_link_state_handle(priv, false);
980 }
981
982 static void stmmac_mac_link_up(struct phylink_config *config,
983                                struct phy_device *phy,
984                                unsigned int mode, phy_interface_t interface,
985                                int speed, int duplex,
986                                bool tx_pause, bool rx_pause)
987 {
988         struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
989         u32 ctrl;
990
991         ctrl = readl(priv->ioaddr + MAC_CTRL_REG);
992         ctrl &= ~priv->hw->link.speed_mask;
993
994         if (interface == PHY_INTERFACE_MODE_USXGMII) {
995                 switch (speed) {
996                 case SPEED_10000:
997                         ctrl |= priv->hw->link.xgmii.speed10000;
998                         break;
999                 case SPEED_5000:
1000                         ctrl |= priv->hw->link.xgmii.speed5000;
1001                         break;
1002                 case SPEED_2500:
1003                         ctrl |= priv->hw->link.xgmii.speed2500;
1004                         break;
1005                 default:
1006                         return;
1007                 }
1008         } else if (interface == PHY_INTERFACE_MODE_XLGMII) {
1009                 switch (speed) {
1010                 case SPEED_100000:
1011                         ctrl |= priv->hw->link.xlgmii.speed100000;
1012                         break;
1013                 case SPEED_50000:
1014                         ctrl |= priv->hw->link.xlgmii.speed50000;
1015                         break;
1016                 case SPEED_40000:
1017                         ctrl |= priv->hw->link.xlgmii.speed40000;
1018                         break;
1019                 case SPEED_25000:
1020                         ctrl |= priv->hw->link.xlgmii.speed25000;
1021                         break;
1022                 case SPEED_10000:
1023                         ctrl |= priv->hw->link.xgmii.speed10000;
1024                         break;
1025                 case SPEED_2500:
1026                         ctrl |= priv->hw->link.speed2500;
1027                         break;
1028                 case SPEED_1000:
1029                         ctrl |= priv->hw->link.speed1000;
1030                         break;
1031                 default:
1032                         return;
1033                 }
1034         } else {
1035                 switch (speed) {
1036                 case SPEED_2500:
1037                         ctrl |= priv->hw->link.speed2500;
1038                         break;
1039                 case SPEED_1000:
1040                         ctrl |= priv->hw->link.speed1000;
1041                         break;
1042                 case SPEED_100:
1043                         ctrl |= priv->hw->link.speed100;
1044                         break;
1045                 case SPEED_10:
1046                         ctrl |= priv->hw->link.speed10;
1047                         break;
1048                 default:
1049                         return;
1050                 }
1051         }
1052
1053         priv->speed = speed;
1054
1055         if (priv->plat->fix_mac_speed)
1056                 priv->plat->fix_mac_speed(priv->plat->bsp_priv, speed);
1057
1058         if (!duplex)
1059                 ctrl &= ~priv->hw->link.duplex;
1060         else
1061                 ctrl |= priv->hw->link.duplex;
1062
1063         /* Flow Control operation */
1064         if (tx_pause && rx_pause)
1065                 stmmac_mac_flow_ctrl(priv, duplex);
1066
1067         writel(ctrl, priv->ioaddr + MAC_CTRL_REG);
1068
1069         stmmac_mac_set(priv, priv->ioaddr, true);
1070         if (phy && priv->dma_cap.eee) {
1071                 priv->eee_active = phy_init_eee(phy, 1) >= 0;
1072                 priv->eee_enabled = stmmac_eee_init(priv);
1073                 priv->tx_lpi_enabled = priv->eee_enabled;
1074                 stmmac_set_eee_pls(priv, priv->hw, true);
1075         }
1076
1077         if (priv->dma_cap.fpesel)
1078                 stmmac_fpe_link_state_handle(priv, true);
1079 }
1080
1081 static const struct phylink_mac_ops stmmac_phylink_mac_ops = {
1082         .validate = phylink_generic_validate,
1083         .mac_select_pcs = stmmac_mac_select_pcs,
1084         .mac_config = stmmac_mac_config,
1085         .mac_link_down = stmmac_mac_link_down,
1086         .mac_link_up = stmmac_mac_link_up,
1087 };
1088
1089 /**
1090  * stmmac_check_pcs_mode - verify if RGMII/SGMII is supported
1091  * @priv: driver private structure
1092  * Description: this is to verify if the HW supports the PCS.
1093  * Physical Coding Sublayer (PCS) interface that can be used when the MAC is
1094  * configured for the TBI, RTBI, or SGMII PHY interface.
1095  */
1096 static void stmmac_check_pcs_mode(struct stmmac_priv *priv)
1097 {
1098         int interface = priv->plat->interface;
1099
1100         if (priv->dma_cap.pcs) {
1101                 if ((interface == PHY_INTERFACE_MODE_RGMII) ||
1102                     (interface == PHY_INTERFACE_MODE_RGMII_ID) ||
1103                     (interface == PHY_INTERFACE_MODE_RGMII_RXID) ||
1104                     (interface == PHY_INTERFACE_MODE_RGMII_TXID)) {
1105                         netdev_dbg(priv->dev, "PCS RGMII support enabled\n");
1106                         priv->hw->pcs = STMMAC_PCS_RGMII;
1107                 } else if (interface == PHY_INTERFACE_MODE_SGMII) {
1108                         netdev_dbg(priv->dev, "PCS SGMII support enabled\n");
1109                         priv->hw->pcs = STMMAC_PCS_SGMII;
1110                 }
1111         }
1112 }
1113
1114 /**
1115  * stmmac_init_phy - PHY initialization
1116  * @dev: net device structure
1117  * Description: it initializes the driver's PHY state, and attaches the PHY
1118  * to the mac driver.
1119  *  Return value:
1120  *  0 on success
1121  */
1122 static int stmmac_init_phy(struct net_device *dev)
1123 {
1124         struct stmmac_priv *priv = netdev_priv(dev);
1125         struct fwnode_handle *fwnode;
1126         int ret;
1127
1128         fwnode = of_fwnode_handle(priv->plat->phylink_node);
1129         if (!fwnode)
1130                 fwnode = dev_fwnode(priv->device);
1131
1132         if (fwnode)
1133                 ret = phylink_fwnode_phy_connect(priv->phylink, fwnode, 0);
1134
1135         /* Some DT bindings do not set-up the PHY handle. Let's try to
1136          * manually parse it
1137          */
1138         if (!fwnode || ret) {
1139                 int addr = priv->plat->phy_addr;
1140                 struct phy_device *phydev;
1141
1142                 phydev = mdiobus_get_phy(priv->mii, addr);
1143                 if (!phydev) {
1144                         netdev_err(priv->dev, "no phy at addr %d\n", addr);
1145                         return -ENODEV;
1146                 }
1147
1148                 ret = phylink_connect_phy(priv->phylink, phydev);
1149         }
1150
1151         if (!priv->plat->pmt) {
1152                 struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL };
1153
1154                 phylink_ethtool_get_wol(priv->phylink, &wol);
1155                 device_set_wakeup_capable(priv->device, !!wol.supported);
1156         }
1157
1158         return ret;
1159 }
1160
1161 static int stmmac_phy_setup(struct stmmac_priv *priv)
1162 {
1163         struct stmmac_mdio_bus_data *mdio_bus_data = priv->plat->mdio_bus_data;
1164         struct fwnode_handle *fwnode = of_fwnode_handle(priv->plat->phylink_node);
1165         int max_speed = priv->plat->max_speed;
1166         int mode = priv->plat->phy_interface;
1167         struct phylink *phylink;
1168
1169         priv->phylink_config.dev = &priv->dev->dev;
1170         priv->phylink_config.type = PHYLINK_NETDEV;
1171         if (priv->plat->mdio_bus_data)
1172                 priv->phylink_config.ovr_an_inband =
1173                         mdio_bus_data->xpcs_an_inband;
1174
1175         if (!fwnode)
1176                 fwnode = dev_fwnode(priv->device);
1177
1178         /* Set the platform/firmware specified interface mode */
1179         __set_bit(mode, priv->phylink_config.supported_interfaces);
1180
1181         /* If we have an xpcs, it defines which PHY interfaces are supported. */
1182         if (priv->hw->xpcs)
1183                 xpcs_get_interfaces(priv->hw->xpcs,
1184                                     priv->phylink_config.supported_interfaces);
1185
1186         priv->phylink_config.mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
1187                 MAC_10 | MAC_100;
1188
1189         if (!max_speed || max_speed >= 1000)
1190                 priv->phylink_config.mac_capabilities |= MAC_1000;
1191
1192         if (priv->plat->has_gmac4) {
1193                 if (!max_speed || max_speed >= 2500)
1194                         priv->phylink_config.mac_capabilities |= MAC_2500FD;
1195         } else if (priv->plat->has_xgmac) {
1196                 if (!max_speed || max_speed >= 2500)
1197                         priv->phylink_config.mac_capabilities |= MAC_2500FD;
1198                 if (!max_speed || max_speed >= 5000)
1199                         priv->phylink_config.mac_capabilities |= MAC_5000FD;
1200                 if (!max_speed || max_speed >= 10000)
1201                         priv->phylink_config.mac_capabilities |= MAC_10000FD;
1202                 if (!max_speed || max_speed >= 25000)
1203                         priv->phylink_config.mac_capabilities |= MAC_25000FD;
1204                 if (!max_speed || max_speed >= 40000)
1205                         priv->phylink_config.mac_capabilities |= MAC_40000FD;
1206                 if (!max_speed || max_speed >= 50000)
1207                         priv->phylink_config.mac_capabilities |= MAC_50000FD;
1208                 if (!max_speed || max_speed >= 100000)
1209                         priv->phylink_config.mac_capabilities |= MAC_100000FD;
1210         }
1211
1212         /* Half-Duplex can only work with single queue */
1213         if (priv->plat->tx_queues_to_use > 1)
1214                 priv->phylink_config.mac_capabilities &=
1215                         ~(MAC_10HD | MAC_100HD | MAC_1000HD);
1216
1217         phylink = phylink_create(&priv->phylink_config, fwnode,
1218                                  mode, &stmmac_phylink_mac_ops);
1219         if (IS_ERR(phylink))
1220                 return PTR_ERR(phylink);
1221
1222         priv->phylink = phylink;
1223         return 0;
1224 }
1225
1226 static void stmmac_display_rx_rings(struct stmmac_priv *priv,
1227                                     struct stmmac_dma_conf *dma_conf)
1228 {
1229         u32 rx_cnt = priv->plat->rx_queues_to_use;
1230         unsigned int desc_size;
1231         void *head_rx;
1232         u32 queue;
1233
1234         /* Display RX rings */
1235         for (queue = 0; queue < rx_cnt; queue++) {
1236                 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1237
1238                 pr_info("\tRX Queue %u rings\n", queue);
1239
1240                 if (priv->extend_desc) {
1241                         head_rx = (void *)rx_q->dma_erx;
1242                         desc_size = sizeof(struct dma_extended_desc);
1243                 } else {
1244                         head_rx = (void *)rx_q->dma_rx;
1245                         desc_size = sizeof(struct dma_desc);
1246                 }
1247
1248                 /* Display RX ring */
1249                 stmmac_display_ring(priv, head_rx, dma_conf->dma_rx_size, true,
1250                                     rx_q->dma_rx_phy, desc_size);
1251         }
1252 }
1253
1254 static void stmmac_display_tx_rings(struct stmmac_priv *priv,
1255                                     struct stmmac_dma_conf *dma_conf)
1256 {
1257         u32 tx_cnt = priv->plat->tx_queues_to_use;
1258         unsigned int desc_size;
1259         void *head_tx;
1260         u32 queue;
1261
1262         /* Display TX rings */
1263         for (queue = 0; queue < tx_cnt; queue++) {
1264                 struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1265
1266                 pr_info("\tTX Queue %d rings\n", queue);
1267
1268                 if (priv->extend_desc) {
1269                         head_tx = (void *)tx_q->dma_etx;
1270                         desc_size = sizeof(struct dma_extended_desc);
1271                 } else if (tx_q->tbs & STMMAC_TBS_AVAIL) {
1272                         head_tx = (void *)tx_q->dma_entx;
1273                         desc_size = sizeof(struct dma_edesc);
1274                 } else {
1275                         head_tx = (void *)tx_q->dma_tx;
1276                         desc_size = sizeof(struct dma_desc);
1277                 }
1278
1279                 stmmac_display_ring(priv, head_tx, dma_conf->dma_tx_size, false,
1280                                     tx_q->dma_tx_phy, desc_size);
1281         }
1282 }
1283
1284 static void stmmac_display_rings(struct stmmac_priv *priv,
1285                                  struct stmmac_dma_conf *dma_conf)
1286 {
1287         /* Display RX ring */
1288         stmmac_display_rx_rings(priv, dma_conf);
1289
1290         /* Display TX ring */
1291         stmmac_display_tx_rings(priv, dma_conf);
1292 }
1293
1294 static int stmmac_set_bfsize(int mtu, int bufsize)
1295 {
1296         int ret = bufsize;
1297
1298         if (mtu >= BUF_SIZE_8KiB)
1299                 ret = BUF_SIZE_16KiB;
1300         else if (mtu >= BUF_SIZE_4KiB)
1301                 ret = BUF_SIZE_8KiB;
1302         else if (mtu >= BUF_SIZE_2KiB)
1303                 ret = BUF_SIZE_4KiB;
1304         else if (mtu > DEFAULT_BUFSIZE)
1305                 ret = BUF_SIZE_2KiB;
1306         else
1307                 ret = DEFAULT_BUFSIZE;
1308
1309         return ret;
1310 }
1311
1312 /**
1313  * stmmac_clear_rx_descriptors - clear RX descriptors
1314  * @priv: driver private structure
1315  * @dma_conf: structure to take the dma data
1316  * @queue: RX queue index
1317  * Description: this function is called to clear the RX descriptors
1318  * in case of both basic and extended descriptors are used.
1319  */
1320 static void stmmac_clear_rx_descriptors(struct stmmac_priv *priv,
1321                                         struct stmmac_dma_conf *dma_conf,
1322                                         u32 queue)
1323 {
1324         struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1325         int i;
1326
1327         /* Clear the RX descriptors */
1328         for (i = 0; i < dma_conf->dma_rx_size; i++)
1329                 if (priv->extend_desc)
1330                         stmmac_init_rx_desc(priv, &rx_q->dma_erx[i].basic,
1331                                         priv->use_riwt, priv->mode,
1332                                         (i == dma_conf->dma_rx_size - 1),
1333                                         dma_conf->dma_buf_sz);
1334                 else
1335                         stmmac_init_rx_desc(priv, &rx_q->dma_rx[i],
1336                                         priv->use_riwt, priv->mode,
1337                                         (i == dma_conf->dma_rx_size - 1),
1338                                         dma_conf->dma_buf_sz);
1339 }
1340
1341 /**
1342  * stmmac_clear_tx_descriptors - clear tx descriptors
1343  * @priv: driver private structure
1344  * @dma_conf: structure to take the dma data
1345  * @queue: TX queue index.
1346  * Description: this function is called to clear the TX descriptors
1347  * in case of both basic and extended descriptors are used.
1348  */
1349 static void stmmac_clear_tx_descriptors(struct stmmac_priv *priv,
1350                                         struct stmmac_dma_conf *dma_conf,
1351                                         u32 queue)
1352 {
1353         struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1354         int i;
1355
1356         /* Clear the TX descriptors */
1357         for (i = 0; i < dma_conf->dma_tx_size; i++) {
1358                 int last = (i == (dma_conf->dma_tx_size - 1));
1359                 struct dma_desc *p;
1360
1361                 if (priv->extend_desc)
1362                         p = &tx_q->dma_etx[i].basic;
1363                 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
1364                         p = &tx_q->dma_entx[i].basic;
1365                 else
1366                         p = &tx_q->dma_tx[i];
1367
1368                 stmmac_init_tx_desc(priv, p, priv->mode, last);
1369         }
1370 }
1371
1372 /**
1373  * stmmac_clear_descriptors - clear descriptors
1374  * @priv: driver private structure
1375  * @dma_conf: structure to take the dma data
1376  * Description: this function is called to clear the TX and RX descriptors
1377  * in case of both basic and extended descriptors are used.
1378  */
1379 static void stmmac_clear_descriptors(struct stmmac_priv *priv,
1380                                      struct stmmac_dma_conf *dma_conf)
1381 {
1382         u32 rx_queue_cnt = priv->plat->rx_queues_to_use;
1383         u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
1384         u32 queue;
1385
1386         /* Clear the RX descriptors */
1387         for (queue = 0; queue < rx_queue_cnt; queue++)
1388                 stmmac_clear_rx_descriptors(priv, dma_conf, queue);
1389
1390         /* Clear the TX descriptors */
1391         for (queue = 0; queue < tx_queue_cnt; queue++)
1392                 stmmac_clear_tx_descriptors(priv, dma_conf, queue);
1393 }
1394
1395 /**
1396  * stmmac_init_rx_buffers - init the RX descriptor buffer.
1397  * @priv: driver private structure
1398  * @dma_conf: structure to take the dma data
1399  * @p: descriptor pointer
1400  * @i: descriptor index
1401  * @flags: gfp flag
1402  * @queue: RX queue index
1403  * Description: this function is called to allocate a receive buffer, perform
1404  * the DMA mapping and init the descriptor.
1405  */
1406 static int stmmac_init_rx_buffers(struct stmmac_priv *priv,
1407                                   struct stmmac_dma_conf *dma_conf,
1408                                   struct dma_desc *p,
1409                                   int i, gfp_t flags, u32 queue)
1410 {
1411         struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1412         struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
1413         gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN);
1414
1415         if (priv->dma_cap.addr64 <= 32)
1416                 gfp |= GFP_DMA32;
1417
1418         if (!buf->page) {
1419                 buf->page = page_pool_alloc_pages(rx_q->page_pool, gfp);
1420                 if (!buf->page)
1421                         return -ENOMEM;
1422                 buf->page_offset = stmmac_rx_offset(priv);
1423         }
1424
1425         if (priv->sph && !buf->sec_page) {
1426                 buf->sec_page = page_pool_alloc_pages(rx_q->page_pool, gfp);
1427                 if (!buf->sec_page)
1428                         return -ENOMEM;
1429
1430                 buf->sec_addr = page_pool_get_dma_addr(buf->sec_page);
1431                 stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, true);
1432         } else {
1433                 buf->sec_page = NULL;
1434                 stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, false);
1435         }
1436
1437         buf->addr = page_pool_get_dma_addr(buf->page) + buf->page_offset;
1438
1439         stmmac_set_desc_addr(priv, p, buf->addr);
1440         if (dma_conf->dma_buf_sz == BUF_SIZE_16KiB)
1441                 stmmac_init_desc3(priv, p);
1442
1443         return 0;
1444 }
1445
1446 /**
1447  * stmmac_free_rx_buffer - free RX dma buffers
1448  * @priv: private structure
1449  * @rx_q: RX queue
1450  * @i: buffer index.
1451  */
1452 static void stmmac_free_rx_buffer(struct stmmac_priv *priv,
1453                                   struct stmmac_rx_queue *rx_q,
1454                                   int i)
1455 {
1456         struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
1457
1458         if (buf->page)
1459                 page_pool_put_full_page(rx_q->page_pool, buf->page, false);
1460         buf->page = NULL;
1461
1462         if (buf->sec_page)
1463                 page_pool_put_full_page(rx_q->page_pool, buf->sec_page, false);
1464         buf->sec_page = NULL;
1465 }
1466
1467 /**
1468  * stmmac_free_tx_buffer - free RX dma buffers
1469  * @priv: private structure
1470  * @dma_conf: structure to take the dma data
1471  * @queue: RX queue index
1472  * @i: buffer index.
1473  */
1474 static void stmmac_free_tx_buffer(struct stmmac_priv *priv,
1475                                   struct stmmac_dma_conf *dma_conf,
1476                                   u32 queue, int i)
1477 {
1478         struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1479
1480         if (tx_q->tx_skbuff_dma[i].buf &&
1481             tx_q->tx_skbuff_dma[i].buf_type != STMMAC_TXBUF_T_XDP_TX) {
1482                 if (tx_q->tx_skbuff_dma[i].map_as_page)
1483                         dma_unmap_page(priv->device,
1484                                        tx_q->tx_skbuff_dma[i].buf,
1485                                        tx_q->tx_skbuff_dma[i].len,
1486                                        DMA_TO_DEVICE);
1487                 else
1488                         dma_unmap_single(priv->device,
1489                                          tx_q->tx_skbuff_dma[i].buf,
1490                                          tx_q->tx_skbuff_dma[i].len,
1491                                          DMA_TO_DEVICE);
1492         }
1493
1494         if (tx_q->xdpf[i] &&
1495             (tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XDP_TX ||
1496              tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XDP_NDO)) {
1497                 xdp_return_frame(tx_q->xdpf[i]);
1498                 tx_q->xdpf[i] = NULL;
1499         }
1500
1501         if (tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XSK_TX)
1502                 tx_q->xsk_frames_done++;
1503
1504         if (tx_q->tx_skbuff[i] &&
1505             tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_SKB) {
1506                 dev_kfree_skb_any(tx_q->tx_skbuff[i]);
1507                 tx_q->tx_skbuff[i] = NULL;
1508         }
1509
1510         tx_q->tx_skbuff_dma[i].buf = 0;
1511         tx_q->tx_skbuff_dma[i].map_as_page = false;
1512 }
1513
1514 /**
1515  * dma_free_rx_skbufs - free RX dma buffers
1516  * @priv: private structure
1517  * @dma_conf: structure to take the dma data
1518  * @queue: RX queue index
1519  */
1520 static void dma_free_rx_skbufs(struct stmmac_priv *priv,
1521                                struct stmmac_dma_conf *dma_conf,
1522                                u32 queue)
1523 {
1524         struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1525         int i;
1526
1527         for (i = 0; i < dma_conf->dma_rx_size; i++)
1528                 stmmac_free_rx_buffer(priv, rx_q, i);
1529 }
1530
1531 static int stmmac_alloc_rx_buffers(struct stmmac_priv *priv,
1532                                    struct stmmac_dma_conf *dma_conf,
1533                                    u32 queue, gfp_t flags)
1534 {
1535         struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1536         int i;
1537
1538         for (i = 0; i < dma_conf->dma_rx_size; i++) {
1539                 struct dma_desc *p;
1540                 int ret;
1541
1542                 if (priv->extend_desc)
1543                         p = &((rx_q->dma_erx + i)->basic);
1544                 else
1545                         p = rx_q->dma_rx + i;
1546
1547                 ret = stmmac_init_rx_buffers(priv, dma_conf, p, i, flags,
1548                                              queue);
1549                 if (ret)
1550                         return ret;
1551
1552                 rx_q->buf_alloc_num++;
1553         }
1554
1555         return 0;
1556 }
1557
1558 /**
1559  * dma_free_rx_xskbufs - free RX dma buffers from XSK pool
1560  * @priv: private structure
1561  * @dma_conf: structure to take the dma data
1562  * @queue: RX queue index
1563  */
1564 static void dma_free_rx_xskbufs(struct stmmac_priv *priv,
1565                                 struct stmmac_dma_conf *dma_conf,
1566                                 u32 queue)
1567 {
1568         struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1569         int i;
1570
1571         for (i = 0; i < dma_conf->dma_rx_size; i++) {
1572                 struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
1573
1574                 if (!buf->xdp)
1575                         continue;
1576
1577                 xsk_buff_free(buf->xdp);
1578                 buf->xdp = NULL;
1579         }
1580 }
1581
1582 static int stmmac_alloc_rx_buffers_zc(struct stmmac_priv *priv,
1583                                       struct stmmac_dma_conf *dma_conf,
1584                                       u32 queue)
1585 {
1586         struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1587         int i;
1588
1589         for (i = 0; i < dma_conf->dma_rx_size; i++) {
1590                 struct stmmac_rx_buffer *buf;
1591                 dma_addr_t dma_addr;
1592                 struct dma_desc *p;
1593
1594                 if (priv->extend_desc)
1595                         p = (struct dma_desc *)(rx_q->dma_erx + i);
1596                 else
1597                         p = rx_q->dma_rx + i;
1598
1599                 buf = &rx_q->buf_pool[i];
1600
1601                 buf->xdp = xsk_buff_alloc(rx_q->xsk_pool);
1602                 if (!buf->xdp)
1603                         return -ENOMEM;
1604
1605                 dma_addr = xsk_buff_xdp_get_dma(buf->xdp);
1606                 stmmac_set_desc_addr(priv, p, dma_addr);
1607                 rx_q->buf_alloc_num++;
1608         }
1609
1610         return 0;
1611 }
1612
1613 static struct xsk_buff_pool *stmmac_get_xsk_pool(struct stmmac_priv *priv, u32 queue)
1614 {
1615         if (!stmmac_xdp_is_enabled(priv) || !test_bit(queue, priv->af_xdp_zc_qps))
1616                 return NULL;
1617
1618         return xsk_get_pool_from_qid(priv->dev, queue);
1619 }
1620
1621 /**
1622  * __init_dma_rx_desc_rings - init the RX descriptor ring (per queue)
1623  * @priv: driver private structure
1624  * @dma_conf: structure to take the dma data
1625  * @queue: RX queue index
1626  * @flags: gfp flag.
1627  * Description: this function initializes the DMA RX descriptors
1628  * and allocates the socket buffers. It supports the chained and ring
1629  * modes.
1630  */
1631 static int __init_dma_rx_desc_rings(struct stmmac_priv *priv,
1632                                     struct stmmac_dma_conf *dma_conf,
1633                                     u32 queue, gfp_t flags)
1634 {
1635         struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1636         int ret;
1637
1638         netif_dbg(priv, probe, priv->dev,
1639                   "(%s) dma_rx_phy=0x%08x\n", __func__,
1640                   (u32)rx_q->dma_rx_phy);
1641
1642         stmmac_clear_rx_descriptors(priv, dma_conf, queue);
1643
1644         xdp_rxq_info_unreg_mem_model(&rx_q->xdp_rxq);
1645
1646         rx_q->xsk_pool = stmmac_get_xsk_pool(priv, queue);
1647
1648         if (rx_q->xsk_pool) {
1649                 WARN_ON(xdp_rxq_info_reg_mem_model(&rx_q->xdp_rxq,
1650                                                    MEM_TYPE_XSK_BUFF_POOL,
1651                                                    NULL));
1652                 netdev_info(priv->dev,
1653                             "Register MEM_TYPE_XSK_BUFF_POOL RxQ-%d\n",
1654                             rx_q->queue_index);
1655                 xsk_pool_set_rxq_info(rx_q->xsk_pool, &rx_q->xdp_rxq);
1656         } else {
1657                 WARN_ON(xdp_rxq_info_reg_mem_model(&rx_q->xdp_rxq,
1658                                                    MEM_TYPE_PAGE_POOL,
1659                                                    rx_q->page_pool));
1660                 netdev_info(priv->dev,
1661                             "Register MEM_TYPE_PAGE_POOL RxQ-%d\n",
1662                             rx_q->queue_index);
1663         }
1664
1665         if (rx_q->xsk_pool) {
1666                 /* RX XDP ZC buffer pool may not be populated, e.g.
1667                  * xdpsock TX-only.
1668                  */
1669                 stmmac_alloc_rx_buffers_zc(priv, dma_conf, queue);
1670         } else {
1671                 ret = stmmac_alloc_rx_buffers(priv, dma_conf, queue, flags);
1672                 if (ret < 0)
1673                         return -ENOMEM;
1674         }
1675
1676         /* Setup the chained descriptor addresses */
1677         if (priv->mode == STMMAC_CHAIN_MODE) {
1678                 if (priv->extend_desc)
1679                         stmmac_mode_init(priv, rx_q->dma_erx,
1680                                          rx_q->dma_rx_phy,
1681                                          dma_conf->dma_rx_size, 1);
1682                 else
1683                         stmmac_mode_init(priv, rx_q->dma_rx,
1684                                          rx_q->dma_rx_phy,
1685                                          dma_conf->dma_rx_size, 0);
1686         }
1687
1688         return 0;
1689 }
1690
1691 static int init_dma_rx_desc_rings(struct net_device *dev,
1692                                   struct stmmac_dma_conf *dma_conf,
1693                                   gfp_t flags)
1694 {
1695         struct stmmac_priv *priv = netdev_priv(dev);
1696         u32 rx_count = priv->plat->rx_queues_to_use;
1697         int queue;
1698         int ret;
1699
1700         /* RX INITIALIZATION */
1701         netif_dbg(priv, probe, priv->dev,
1702                   "SKB addresses:\nskb\t\tskb data\tdma data\n");
1703
1704         for (queue = 0; queue < rx_count; queue++) {
1705                 ret = __init_dma_rx_desc_rings(priv, dma_conf, queue, flags);
1706                 if (ret)
1707                         goto err_init_rx_buffers;
1708         }
1709
1710         return 0;
1711
1712 err_init_rx_buffers:
1713         while (queue >= 0) {
1714                 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1715
1716                 if (rx_q->xsk_pool)
1717                         dma_free_rx_xskbufs(priv, dma_conf, queue);
1718                 else
1719                         dma_free_rx_skbufs(priv, dma_conf, queue);
1720
1721                 rx_q->buf_alloc_num = 0;
1722                 rx_q->xsk_pool = NULL;
1723
1724                 queue--;
1725         }
1726
1727         return ret;
1728 }
1729
1730 /**
1731  * __init_dma_tx_desc_rings - init the TX descriptor ring (per queue)
1732  * @priv: driver private structure
1733  * @dma_conf: structure to take the dma data
1734  * @queue: TX queue index
1735  * Description: this function initializes the DMA TX descriptors
1736  * and allocates the socket buffers. It supports the chained and ring
1737  * modes.
1738  */
1739 static int __init_dma_tx_desc_rings(struct stmmac_priv *priv,
1740                                     struct stmmac_dma_conf *dma_conf,
1741                                     u32 queue)
1742 {
1743         struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1744         int i;
1745
1746         netif_dbg(priv, probe, priv->dev,
1747                   "(%s) dma_tx_phy=0x%08x\n", __func__,
1748                   (u32)tx_q->dma_tx_phy);
1749
1750         /* Setup the chained descriptor addresses */
1751         if (priv->mode == STMMAC_CHAIN_MODE) {
1752                 if (priv->extend_desc)
1753                         stmmac_mode_init(priv, tx_q->dma_etx,
1754                                          tx_q->dma_tx_phy,
1755                                          dma_conf->dma_tx_size, 1);
1756                 else if (!(tx_q->tbs & STMMAC_TBS_AVAIL))
1757                         stmmac_mode_init(priv, tx_q->dma_tx,
1758                                          tx_q->dma_tx_phy,
1759                                          dma_conf->dma_tx_size, 0);
1760         }
1761
1762         tx_q->xsk_pool = stmmac_get_xsk_pool(priv, queue);
1763
1764         for (i = 0; i < dma_conf->dma_tx_size; i++) {
1765                 struct dma_desc *p;
1766
1767                 if (priv->extend_desc)
1768                         p = &((tx_q->dma_etx + i)->basic);
1769                 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
1770                         p = &((tx_q->dma_entx + i)->basic);
1771                 else
1772                         p = tx_q->dma_tx + i;
1773
1774                 stmmac_clear_desc(priv, p);
1775
1776                 tx_q->tx_skbuff_dma[i].buf = 0;
1777                 tx_q->tx_skbuff_dma[i].map_as_page = false;
1778                 tx_q->tx_skbuff_dma[i].len = 0;
1779                 tx_q->tx_skbuff_dma[i].last_segment = false;
1780                 tx_q->tx_skbuff[i] = NULL;
1781         }
1782
1783         return 0;
1784 }
1785
1786 static int init_dma_tx_desc_rings(struct net_device *dev,
1787                                   struct stmmac_dma_conf *dma_conf)
1788 {
1789         struct stmmac_priv *priv = netdev_priv(dev);
1790         u32 tx_queue_cnt;
1791         u32 queue;
1792
1793         tx_queue_cnt = priv->plat->tx_queues_to_use;
1794
1795         for (queue = 0; queue < tx_queue_cnt; queue++)
1796                 __init_dma_tx_desc_rings(priv, dma_conf, queue);
1797
1798         return 0;
1799 }
1800
1801 /**
1802  * init_dma_desc_rings - init the RX/TX descriptor rings
1803  * @dev: net device structure
1804  * @dma_conf: structure to take the dma data
1805  * @flags: gfp flag.
1806  * Description: this function initializes the DMA RX/TX descriptors
1807  * and allocates the socket buffers. It supports the chained and ring
1808  * modes.
1809  */
1810 static int init_dma_desc_rings(struct net_device *dev,
1811                                struct stmmac_dma_conf *dma_conf,
1812                                gfp_t flags)
1813 {
1814         struct stmmac_priv *priv = netdev_priv(dev);
1815         int ret;
1816
1817         ret = init_dma_rx_desc_rings(dev, dma_conf, flags);
1818         if (ret)
1819                 return ret;
1820
1821         ret = init_dma_tx_desc_rings(dev, dma_conf);
1822
1823         stmmac_clear_descriptors(priv, dma_conf);
1824
1825         if (netif_msg_hw(priv))
1826                 stmmac_display_rings(priv, dma_conf);
1827
1828         return ret;
1829 }
1830
1831 /**
1832  * dma_free_tx_skbufs - free TX dma buffers
1833  * @priv: private structure
1834  * @dma_conf: structure to take the dma data
1835  * @queue: TX queue index
1836  */
1837 static void dma_free_tx_skbufs(struct stmmac_priv *priv,
1838                                struct stmmac_dma_conf *dma_conf,
1839                                u32 queue)
1840 {
1841         struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1842         int i;
1843
1844         tx_q->xsk_frames_done = 0;
1845
1846         for (i = 0; i < dma_conf->dma_tx_size; i++)
1847                 stmmac_free_tx_buffer(priv, dma_conf, queue, i);
1848
1849         if (tx_q->xsk_pool && tx_q->xsk_frames_done) {
1850                 xsk_tx_completed(tx_q->xsk_pool, tx_q->xsk_frames_done);
1851                 tx_q->xsk_frames_done = 0;
1852                 tx_q->xsk_pool = NULL;
1853         }
1854 }
1855
1856 /**
1857  * stmmac_free_tx_skbufs - free TX skb buffers
1858  * @priv: private structure
1859  */
1860 static void stmmac_free_tx_skbufs(struct stmmac_priv *priv)
1861 {
1862         u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
1863         u32 queue;
1864
1865         for (queue = 0; queue < tx_queue_cnt; queue++)
1866                 dma_free_tx_skbufs(priv, &priv->dma_conf, queue);
1867 }
1868
1869 /**
1870  * __free_dma_rx_desc_resources - free RX dma desc resources (per queue)
1871  * @priv: private structure
1872  * @dma_conf: structure to take the dma data
1873  * @queue: RX queue index
1874  */
1875 static void __free_dma_rx_desc_resources(struct stmmac_priv *priv,
1876                                          struct stmmac_dma_conf *dma_conf,
1877                                          u32 queue)
1878 {
1879         struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1880
1881         /* Release the DMA RX socket buffers */
1882         if (rx_q->xsk_pool)
1883                 dma_free_rx_xskbufs(priv, dma_conf, queue);
1884         else
1885                 dma_free_rx_skbufs(priv, dma_conf, queue);
1886
1887         rx_q->buf_alloc_num = 0;
1888         rx_q->xsk_pool = NULL;
1889
1890         /* Free DMA regions of consistent memory previously allocated */
1891         if (!priv->extend_desc)
1892                 dma_free_coherent(priv->device, dma_conf->dma_rx_size *
1893                                   sizeof(struct dma_desc),
1894                                   rx_q->dma_rx, rx_q->dma_rx_phy);
1895         else
1896                 dma_free_coherent(priv->device, dma_conf->dma_rx_size *
1897                                   sizeof(struct dma_extended_desc),
1898                                   rx_q->dma_erx, rx_q->dma_rx_phy);
1899
1900         if (xdp_rxq_info_is_reg(&rx_q->xdp_rxq))
1901                 xdp_rxq_info_unreg(&rx_q->xdp_rxq);
1902
1903         kfree(rx_q->buf_pool);
1904         if (rx_q->page_pool)
1905                 page_pool_destroy(rx_q->page_pool);
1906 }
1907
1908 static void free_dma_rx_desc_resources(struct stmmac_priv *priv,
1909                                        struct stmmac_dma_conf *dma_conf)
1910 {
1911         u32 rx_count = priv->plat->rx_queues_to_use;
1912         u32 queue;
1913
1914         /* Free RX queue resources */
1915         for (queue = 0; queue < rx_count; queue++)
1916                 __free_dma_rx_desc_resources(priv, dma_conf, queue);
1917 }
1918
1919 /**
1920  * __free_dma_tx_desc_resources - free TX dma desc resources (per queue)
1921  * @priv: private structure
1922  * @dma_conf: structure to take the dma data
1923  * @queue: TX queue index
1924  */
1925 static void __free_dma_tx_desc_resources(struct stmmac_priv *priv,
1926                                          struct stmmac_dma_conf *dma_conf,
1927                                          u32 queue)
1928 {
1929         struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1930         size_t size;
1931         void *addr;
1932
1933         /* Release the DMA TX socket buffers */
1934         dma_free_tx_skbufs(priv, dma_conf, queue);
1935
1936         if (priv->extend_desc) {
1937                 size = sizeof(struct dma_extended_desc);
1938                 addr = tx_q->dma_etx;
1939         } else if (tx_q->tbs & STMMAC_TBS_AVAIL) {
1940                 size = sizeof(struct dma_edesc);
1941                 addr = tx_q->dma_entx;
1942         } else {
1943                 size = sizeof(struct dma_desc);
1944                 addr = tx_q->dma_tx;
1945         }
1946
1947         size *= dma_conf->dma_tx_size;
1948
1949         dma_free_coherent(priv->device, size, addr, tx_q->dma_tx_phy);
1950
1951         kfree(tx_q->tx_skbuff_dma);
1952         kfree(tx_q->tx_skbuff);
1953 }
1954
1955 static void free_dma_tx_desc_resources(struct stmmac_priv *priv,
1956                                        struct stmmac_dma_conf *dma_conf)
1957 {
1958         u32 tx_count = priv->plat->tx_queues_to_use;
1959         u32 queue;
1960
1961         /* Free TX queue resources */
1962         for (queue = 0; queue < tx_count; queue++)
1963                 __free_dma_tx_desc_resources(priv, dma_conf, queue);
1964 }
1965
1966 /**
1967  * __alloc_dma_rx_desc_resources - alloc RX resources (per queue).
1968  * @priv: private structure
1969  * @dma_conf: structure to take the dma data
1970  * @queue: RX queue index
1971  * Description: according to which descriptor can be used (extend or basic)
1972  * this function allocates the resources for TX and RX paths. In case of
1973  * reception, for example, it pre-allocated the RX socket buffer in order to
1974  * allow zero-copy mechanism.
1975  */
1976 static int __alloc_dma_rx_desc_resources(struct stmmac_priv *priv,
1977                                          struct stmmac_dma_conf *dma_conf,
1978                                          u32 queue)
1979 {
1980         struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1981         struct stmmac_channel *ch = &priv->channel[queue];
1982         bool xdp_prog = stmmac_xdp_is_enabled(priv);
1983         struct page_pool_params pp_params = { 0 };
1984         unsigned int num_pages;
1985         unsigned int napi_id;
1986         int ret;
1987
1988         rx_q->queue_index = queue;
1989         rx_q->priv_data = priv;
1990
1991         pp_params.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV;
1992         pp_params.pool_size = dma_conf->dma_rx_size;
1993         num_pages = DIV_ROUND_UP(dma_conf->dma_buf_sz, PAGE_SIZE);
1994         pp_params.order = ilog2(num_pages);
1995         pp_params.nid = dev_to_node(priv->device);
1996         pp_params.dev = priv->device;
1997         pp_params.dma_dir = xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE;
1998         pp_params.offset = stmmac_rx_offset(priv);
1999         pp_params.max_len = STMMAC_MAX_RX_BUF_SIZE(num_pages);
2000
2001         rx_q->page_pool = page_pool_create(&pp_params);
2002         if (IS_ERR(rx_q->page_pool)) {
2003                 ret = PTR_ERR(rx_q->page_pool);
2004                 rx_q->page_pool = NULL;
2005                 return ret;
2006         }
2007
2008         rx_q->buf_pool = kcalloc(dma_conf->dma_rx_size,
2009                                  sizeof(*rx_q->buf_pool),
2010                                  GFP_KERNEL);
2011         if (!rx_q->buf_pool)
2012                 return -ENOMEM;
2013
2014         if (priv->extend_desc) {
2015                 rx_q->dma_erx = dma_alloc_coherent(priv->device,
2016                                                    dma_conf->dma_rx_size *
2017                                                    sizeof(struct dma_extended_desc),
2018                                                    &rx_q->dma_rx_phy,
2019                                                    GFP_KERNEL);
2020                 if (!rx_q->dma_erx)
2021                         return -ENOMEM;
2022
2023         } else {
2024                 rx_q->dma_rx = dma_alloc_coherent(priv->device,
2025                                                   dma_conf->dma_rx_size *
2026                                                   sizeof(struct dma_desc),
2027                                                   &rx_q->dma_rx_phy,
2028                                                   GFP_KERNEL);
2029                 if (!rx_q->dma_rx)
2030                         return -ENOMEM;
2031         }
2032
2033         if (stmmac_xdp_is_enabled(priv) &&
2034             test_bit(queue, priv->af_xdp_zc_qps))
2035                 napi_id = ch->rxtx_napi.napi_id;
2036         else
2037                 napi_id = ch->rx_napi.napi_id;
2038
2039         ret = xdp_rxq_info_reg(&rx_q->xdp_rxq, priv->dev,
2040                                rx_q->queue_index,
2041                                napi_id);
2042         if (ret) {
2043                 netdev_err(priv->dev, "Failed to register xdp rxq info\n");
2044                 return -EINVAL;
2045         }
2046
2047         return 0;
2048 }
2049
2050 static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv,
2051                                        struct stmmac_dma_conf *dma_conf)
2052 {
2053         u32 rx_count = priv->plat->rx_queues_to_use;
2054         u32 queue;
2055         int ret;
2056
2057         /* RX queues buffers and DMA */
2058         for (queue = 0; queue < rx_count; queue++) {
2059                 ret = __alloc_dma_rx_desc_resources(priv, dma_conf, queue);
2060                 if (ret)
2061                         goto err_dma;
2062         }
2063
2064         return 0;
2065
2066 err_dma:
2067         free_dma_rx_desc_resources(priv, dma_conf);
2068
2069         return ret;
2070 }
2071
2072 /**
2073  * __alloc_dma_tx_desc_resources - alloc TX resources (per queue).
2074  * @priv: private structure
2075  * @dma_conf: structure to take the dma data
2076  * @queue: TX queue index
2077  * Description: according to which descriptor can be used (extend or basic)
2078  * this function allocates the resources for TX and RX paths. In case of
2079  * reception, for example, it pre-allocated the RX socket buffer in order to
2080  * allow zero-copy mechanism.
2081  */
2082 static int __alloc_dma_tx_desc_resources(struct stmmac_priv *priv,
2083                                          struct stmmac_dma_conf *dma_conf,
2084                                          u32 queue)
2085 {
2086         struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
2087         size_t size;
2088         void *addr;
2089
2090         tx_q->queue_index = queue;
2091         tx_q->priv_data = priv;
2092
2093         tx_q->tx_skbuff_dma = kcalloc(dma_conf->dma_tx_size,
2094                                       sizeof(*tx_q->tx_skbuff_dma),
2095                                       GFP_KERNEL);
2096         if (!tx_q->tx_skbuff_dma)
2097                 return -ENOMEM;
2098
2099         tx_q->tx_skbuff = kcalloc(dma_conf->dma_tx_size,
2100                                   sizeof(struct sk_buff *),
2101                                   GFP_KERNEL);
2102         if (!tx_q->tx_skbuff)
2103                 return -ENOMEM;
2104
2105         if (priv->extend_desc)
2106                 size = sizeof(struct dma_extended_desc);
2107         else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2108                 size = sizeof(struct dma_edesc);
2109         else
2110                 size = sizeof(struct dma_desc);
2111
2112         size *= dma_conf->dma_tx_size;
2113
2114         addr = dma_alloc_coherent(priv->device, size,
2115                                   &tx_q->dma_tx_phy, GFP_KERNEL);
2116         if (!addr)
2117                 return -ENOMEM;
2118
2119         if (priv->extend_desc)
2120                 tx_q->dma_etx = addr;
2121         else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2122                 tx_q->dma_entx = addr;
2123         else
2124                 tx_q->dma_tx = addr;
2125
2126         return 0;
2127 }
2128
2129 static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv,
2130                                        struct stmmac_dma_conf *dma_conf)
2131 {
2132         u32 tx_count = priv->plat->tx_queues_to_use;
2133         u32 queue;
2134         int ret;
2135
2136         /* TX queues buffers and DMA */
2137         for (queue = 0; queue < tx_count; queue++) {
2138                 ret = __alloc_dma_tx_desc_resources(priv, dma_conf, queue);
2139                 if (ret)
2140                         goto err_dma;
2141         }
2142
2143         return 0;
2144
2145 err_dma:
2146         free_dma_tx_desc_resources(priv, dma_conf);
2147         return ret;
2148 }
2149
2150 /**
2151  * alloc_dma_desc_resources - alloc TX/RX resources.
2152  * @priv: private structure
2153  * @dma_conf: structure to take the dma data
2154  * Description: according to which descriptor can be used (extend or basic)
2155  * this function allocates the resources for TX and RX paths. In case of
2156  * reception, for example, it pre-allocated the RX socket buffer in order to
2157  * allow zero-copy mechanism.
2158  */
2159 static int alloc_dma_desc_resources(struct stmmac_priv *priv,
2160                                     struct stmmac_dma_conf *dma_conf)
2161 {
2162         /* RX Allocation */
2163         int ret = alloc_dma_rx_desc_resources(priv, dma_conf);
2164
2165         if (ret)
2166                 return ret;
2167
2168         ret = alloc_dma_tx_desc_resources(priv, dma_conf);
2169
2170         return ret;
2171 }
2172
2173 /**
2174  * free_dma_desc_resources - free dma desc resources
2175  * @priv: private structure
2176  * @dma_conf: structure to take the dma data
2177  */
2178 static void free_dma_desc_resources(struct stmmac_priv *priv,
2179                                     struct stmmac_dma_conf *dma_conf)
2180 {
2181         /* Release the DMA TX socket buffers */
2182         free_dma_tx_desc_resources(priv, dma_conf);
2183
2184         /* Release the DMA RX socket buffers later
2185          * to ensure all pending XDP_TX buffers are returned.
2186          */
2187         free_dma_rx_desc_resources(priv, dma_conf);
2188 }
2189
2190 /**
2191  *  stmmac_mac_enable_rx_queues - Enable MAC rx queues
2192  *  @priv: driver private structure
2193  *  Description: It is used for enabling the rx queues in the MAC
2194  */
2195 static void stmmac_mac_enable_rx_queues(struct stmmac_priv *priv)
2196 {
2197         u32 rx_queues_count = priv->plat->rx_queues_to_use;
2198         int queue;
2199         u8 mode;
2200
2201         for (queue = 0; queue < rx_queues_count; queue++) {
2202                 mode = priv->plat->rx_queues_cfg[queue].mode_to_use;
2203                 stmmac_rx_queue_enable(priv, priv->hw, mode, queue);
2204         }
2205 }
2206
2207 /**
2208  * stmmac_start_rx_dma - start RX DMA channel
2209  * @priv: driver private structure
2210  * @chan: RX channel index
2211  * Description:
2212  * This starts a RX DMA channel
2213  */
2214 static void stmmac_start_rx_dma(struct stmmac_priv *priv, u32 chan)
2215 {
2216         netdev_dbg(priv->dev, "DMA RX processes started in channel %d\n", chan);
2217         stmmac_start_rx(priv, priv->ioaddr, chan);
2218 }
2219
2220 /**
2221  * stmmac_start_tx_dma - start TX DMA channel
2222  * @priv: driver private structure
2223  * @chan: TX channel index
2224  * Description:
2225  * This starts a TX DMA channel
2226  */
2227 static void stmmac_start_tx_dma(struct stmmac_priv *priv, u32 chan)
2228 {
2229         netdev_dbg(priv->dev, "DMA TX processes started in channel %d\n", chan);
2230         stmmac_start_tx(priv, priv->ioaddr, chan);
2231 }
2232
2233 /**
2234  * stmmac_stop_rx_dma - stop RX DMA channel
2235  * @priv: driver private structure
2236  * @chan: RX channel index
2237  * Description:
2238  * This stops a RX DMA channel
2239  */
2240 static void stmmac_stop_rx_dma(struct stmmac_priv *priv, u32 chan)
2241 {
2242         netdev_dbg(priv->dev, "DMA RX processes stopped in channel %d\n", chan);
2243         stmmac_stop_rx(priv, priv->ioaddr, chan);
2244 }
2245
2246 /**
2247  * stmmac_stop_tx_dma - stop TX DMA channel
2248  * @priv: driver private structure
2249  * @chan: TX channel index
2250  * Description:
2251  * This stops a TX DMA channel
2252  */
2253 static void stmmac_stop_tx_dma(struct stmmac_priv *priv, u32 chan)
2254 {
2255         netdev_dbg(priv->dev, "DMA TX processes stopped in channel %d\n", chan);
2256         stmmac_stop_tx(priv, priv->ioaddr, chan);
2257 }
2258
2259 static void stmmac_enable_all_dma_irq(struct stmmac_priv *priv)
2260 {
2261         u32 rx_channels_count = priv->plat->rx_queues_to_use;
2262         u32 tx_channels_count = priv->plat->tx_queues_to_use;
2263         u32 dma_csr_ch = max(rx_channels_count, tx_channels_count);
2264         u32 chan;
2265
2266         for (chan = 0; chan < dma_csr_ch; chan++) {
2267                 struct stmmac_channel *ch = &priv->channel[chan];
2268                 unsigned long flags;
2269
2270                 spin_lock_irqsave(&ch->lock, flags);
2271                 stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
2272                 spin_unlock_irqrestore(&ch->lock, flags);
2273         }
2274 }
2275
2276 /**
2277  * stmmac_start_all_dma - start all RX and TX DMA channels
2278  * @priv: driver private structure
2279  * Description:
2280  * This starts all the RX and TX DMA channels
2281  */
2282 static void stmmac_start_all_dma(struct stmmac_priv *priv)
2283 {
2284         u32 rx_channels_count = priv->plat->rx_queues_to_use;
2285         u32 tx_channels_count = priv->plat->tx_queues_to_use;
2286         u32 chan = 0;
2287
2288         for (chan = 0; chan < rx_channels_count; chan++)
2289                 stmmac_start_rx_dma(priv, chan);
2290
2291         for (chan = 0; chan < tx_channels_count; chan++)
2292                 stmmac_start_tx_dma(priv, chan);
2293 }
2294
2295 /**
2296  * stmmac_stop_all_dma - stop all RX and TX DMA channels
2297  * @priv: driver private structure
2298  * Description:
2299  * This stops the RX and TX DMA channels
2300  */
2301 static void stmmac_stop_all_dma(struct stmmac_priv *priv)
2302 {
2303         u32 rx_channels_count = priv->plat->rx_queues_to_use;
2304         u32 tx_channels_count = priv->plat->tx_queues_to_use;
2305         u32 chan = 0;
2306
2307         for (chan = 0; chan < rx_channels_count; chan++)
2308                 stmmac_stop_rx_dma(priv, chan);
2309
2310         for (chan = 0; chan < tx_channels_count; chan++)
2311                 stmmac_stop_tx_dma(priv, chan);
2312 }
2313
2314 /**
2315  *  stmmac_dma_operation_mode - HW DMA operation mode
2316  *  @priv: driver private structure
2317  *  Description: it is used for configuring the DMA operation mode register in
2318  *  order to program the tx/rx DMA thresholds or Store-And-Forward mode.
2319  */
2320 static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
2321 {
2322         u32 rx_channels_count = priv->plat->rx_queues_to_use;
2323         u32 tx_channels_count = priv->plat->tx_queues_to_use;
2324         int rxfifosz = priv->plat->rx_fifo_size;
2325         int txfifosz = priv->plat->tx_fifo_size;
2326         u32 txmode = 0;
2327         u32 rxmode = 0;
2328         u32 chan = 0;
2329         u8 qmode = 0;
2330
2331         if (rxfifosz == 0)
2332                 rxfifosz = priv->dma_cap.rx_fifo_size;
2333         if (txfifosz == 0)
2334                 txfifosz = priv->dma_cap.tx_fifo_size;
2335
2336         /* Adjust for real per queue fifo size */
2337         rxfifosz /= rx_channels_count;
2338         txfifosz /= tx_channels_count;
2339
2340         if (priv->plat->force_thresh_dma_mode) {
2341                 txmode = tc;
2342                 rxmode = tc;
2343         } else if (priv->plat->force_sf_dma_mode || priv->plat->tx_coe) {
2344                 /*
2345                  * In case of GMAC, SF mode can be enabled
2346                  * to perform the TX COE in HW. This depends on:
2347                  * 1) TX COE if actually supported
2348                  * 2) There is no bugged Jumbo frame support
2349                  *    that needs to not insert csum in the TDES.
2350                  */
2351                 txmode = SF_DMA_MODE;
2352                 rxmode = SF_DMA_MODE;
2353                 priv->xstats.threshold = SF_DMA_MODE;
2354         } else {
2355                 txmode = tc;
2356                 rxmode = SF_DMA_MODE;
2357         }
2358
2359         /* configure all channels */
2360         for (chan = 0; chan < rx_channels_count; chan++) {
2361                 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[chan];
2362                 u32 buf_size;
2363
2364                 qmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
2365
2366                 stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan,
2367                                 rxfifosz, qmode);
2368
2369                 if (rx_q->xsk_pool) {
2370                         buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool);
2371                         stmmac_set_dma_bfsize(priv, priv->ioaddr,
2372                                               buf_size,
2373                                               chan);
2374                 } else {
2375                         stmmac_set_dma_bfsize(priv, priv->ioaddr,
2376                                               priv->dma_conf.dma_buf_sz,
2377                                               chan);
2378                 }
2379         }
2380
2381         for (chan = 0; chan < tx_channels_count; chan++) {
2382                 qmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
2383
2384                 stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan,
2385                                 txfifosz, qmode);
2386         }
2387 }
2388
2389 static bool stmmac_xdp_xmit_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
2390 {
2391         struct netdev_queue *nq = netdev_get_tx_queue(priv->dev, queue);
2392         struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
2393         struct xsk_buff_pool *pool = tx_q->xsk_pool;
2394         unsigned int entry = tx_q->cur_tx;
2395         struct dma_desc *tx_desc = NULL;
2396         struct xdp_desc xdp_desc;
2397         bool work_done = true;
2398
2399         /* Avoids TX time-out as we are sharing with slow path */
2400         txq_trans_cond_update(nq);
2401
2402         budget = min(budget, stmmac_tx_avail(priv, queue));
2403
2404         while (budget-- > 0) {
2405                 dma_addr_t dma_addr;
2406                 bool set_ic;
2407
2408                 /* We are sharing with slow path and stop XSK TX desc submission when
2409                  * available TX ring is less than threshold.
2410                  */
2411                 if (unlikely(stmmac_tx_avail(priv, queue) < STMMAC_TX_XSK_AVAIL) ||
2412                     !netif_carrier_ok(priv->dev)) {
2413                         work_done = false;
2414                         break;
2415                 }
2416
2417                 if (!xsk_tx_peek_desc(pool, &xdp_desc))
2418                         break;
2419
2420                 if (likely(priv->extend_desc))
2421                         tx_desc = (struct dma_desc *)(tx_q->dma_etx + entry);
2422                 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2423                         tx_desc = &tx_q->dma_entx[entry].basic;
2424                 else
2425                         tx_desc = tx_q->dma_tx + entry;
2426
2427                 dma_addr = xsk_buff_raw_get_dma(pool, xdp_desc.addr);
2428                 xsk_buff_raw_dma_sync_for_device(pool, dma_addr, xdp_desc.len);
2429
2430                 tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XSK_TX;
2431
2432                 /* To return XDP buffer to XSK pool, we simple call
2433                  * xsk_tx_completed(), so we don't need to fill up
2434                  * 'buf' and 'xdpf'.
2435                  */
2436                 tx_q->tx_skbuff_dma[entry].buf = 0;
2437                 tx_q->xdpf[entry] = NULL;
2438
2439                 tx_q->tx_skbuff_dma[entry].map_as_page = false;
2440                 tx_q->tx_skbuff_dma[entry].len = xdp_desc.len;
2441                 tx_q->tx_skbuff_dma[entry].last_segment = true;
2442                 tx_q->tx_skbuff_dma[entry].is_jumbo = false;
2443
2444                 stmmac_set_desc_addr(priv, tx_desc, dma_addr);
2445
2446                 tx_q->tx_count_frames++;
2447
2448                 if (!priv->tx_coal_frames[queue])
2449                         set_ic = false;
2450                 else if (tx_q->tx_count_frames % priv->tx_coal_frames[queue] == 0)
2451                         set_ic = true;
2452                 else
2453                         set_ic = false;
2454
2455                 if (set_ic) {
2456                         tx_q->tx_count_frames = 0;
2457                         stmmac_set_tx_ic(priv, tx_desc);
2458                         priv->xstats.tx_set_ic_bit++;
2459                 }
2460
2461                 stmmac_prepare_tx_desc(priv, tx_desc, 1, xdp_desc.len,
2462                                        true, priv->mode, true, true,
2463                                        xdp_desc.len);
2464
2465                 stmmac_enable_dma_transmission(priv, priv->ioaddr);
2466
2467                 tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size);
2468                 entry = tx_q->cur_tx;
2469         }
2470
2471         if (tx_desc) {
2472                 stmmac_flush_tx_descriptors(priv, queue);
2473                 xsk_tx_release(pool);
2474         }
2475
2476         /* Return true if all of the 3 conditions are met
2477          *  a) TX Budget is still available
2478          *  b) work_done = true when XSK TX desc peek is empty (no more
2479          *     pending XSK TX for transmission)
2480          */
2481         return !!budget && work_done;
2482 }
2483
2484 static void stmmac_bump_dma_threshold(struct stmmac_priv *priv, u32 chan)
2485 {
2486         if (unlikely(priv->xstats.threshold != SF_DMA_MODE) && tc <= 256) {
2487                 tc += 64;
2488
2489                 if (priv->plat->force_thresh_dma_mode)
2490                         stmmac_set_dma_operation_mode(priv, tc, tc, chan);
2491                 else
2492                         stmmac_set_dma_operation_mode(priv, tc, SF_DMA_MODE,
2493                                                       chan);
2494
2495                 priv->xstats.threshold = tc;
2496         }
2497 }
2498
2499 /**
2500  * stmmac_tx_clean - to manage the transmission completion
2501  * @priv: driver private structure
2502  * @budget: napi budget limiting this functions packet handling
2503  * @queue: TX queue index
2504  * Description: it reclaims the transmit resources after transmission completes.
2505  */
2506 static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue)
2507 {
2508         struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
2509         unsigned int bytes_compl = 0, pkts_compl = 0;
2510         unsigned int entry, xmits = 0, count = 0;
2511
2512         __netif_tx_lock_bh(netdev_get_tx_queue(priv->dev, queue));
2513
2514         priv->xstats.tx_clean++;
2515
2516         tx_q->xsk_frames_done = 0;
2517
2518         entry = tx_q->dirty_tx;
2519
2520         /* Try to clean all TX complete frame in 1 shot */
2521         while ((entry != tx_q->cur_tx) && count < priv->dma_conf.dma_tx_size) {
2522                 struct xdp_frame *xdpf;
2523                 struct sk_buff *skb;
2524                 struct dma_desc *p;
2525                 int status;
2526
2527                 if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_TX ||
2528                     tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_NDO) {
2529                         xdpf = tx_q->xdpf[entry];
2530                         skb = NULL;
2531                 } else if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_SKB) {
2532                         xdpf = NULL;
2533                         skb = tx_q->tx_skbuff[entry];
2534                 } else {
2535                         xdpf = NULL;
2536                         skb = NULL;
2537                 }
2538
2539                 if (priv->extend_desc)
2540                         p = (struct dma_desc *)(tx_q->dma_etx + entry);
2541                 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2542                         p = &tx_q->dma_entx[entry].basic;
2543                 else
2544                         p = tx_q->dma_tx + entry;
2545
2546                 status = stmmac_tx_status(priv, &priv->dev->stats,
2547                                 &priv->xstats, p, priv->ioaddr);
2548                 /* Check if the descriptor is owned by the DMA */
2549                 if (unlikely(status & tx_dma_own))
2550                         break;
2551
2552                 count++;
2553
2554                 /* Make sure descriptor fields are read after reading
2555                  * the own bit.
2556                  */
2557                 dma_rmb();
2558
2559                 /* Just consider the last segment and ...*/
2560                 if (likely(!(status & tx_not_ls))) {
2561                         /* ... verify the status error condition */
2562                         if (unlikely(status & tx_err)) {
2563                                 priv->dev->stats.tx_errors++;
2564                                 if (unlikely(status & tx_err_bump_tc))
2565                                         stmmac_bump_dma_threshold(priv, queue);
2566                         } else {
2567                                 priv->dev->stats.tx_packets++;
2568                                 priv->xstats.tx_pkt_n++;
2569                                 priv->xstats.txq_stats[queue].tx_pkt_n++;
2570                         }
2571                         if (skb)
2572                                 stmmac_get_tx_hwtstamp(priv, p, skb);
2573                 }
2574
2575                 if (likely(tx_q->tx_skbuff_dma[entry].buf &&
2576                            tx_q->tx_skbuff_dma[entry].buf_type != STMMAC_TXBUF_T_XDP_TX)) {
2577                         if (tx_q->tx_skbuff_dma[entry].map_as_page)
2578                                 dma_unmap_page(priv->device,
2579                                                tx_q->tx_skbuff_dma[entry].buf,
2580                                                tx_q->tx_skbuff_dma[entry].len,
2581                                                DMA_TO_DEVICE);
2582                         else
2583                                 dma_unmap_single(priv->device,
2584                                                  tx_q->tx_skbuff_dma[entry].buf,
2585                                                  tx_q->tx_skbuff_dma[entry].len,
2586                                                  DMA_TO_DEVICE);
2587                         tx_q->tx_skbuff_dma[entry].buf = 0;
2588                         tx_q->tx_skbuff_dma[entry].len = 0;
2589                         tx_q->tx_skbuff_dma[entry].map_as_page = false;
2590                 }
2591
2592                 stmmac_clean_desc3(priv, tx_q, p);
2593
2594                 tx_q->tx_skbuff_dma[entry].last_segment = false;
2595                 tx_q->tx_skbuff_dma[entry].is_jumbo = false;
2596
2597                 if (xdpf &&
2598                     tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_TX) {
2599                         xdp_return_frame_rx_napi(xdpf);
2600                         tx_q->xdpf[entry] = NULL;
2601                 }
2602
2603                 if (xdpf &&
2604                     tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_NDO) {
2605                         xdp_return_frame(xdpf);
2606                         tx_q->xdpf[entry] = NULL;
2607                 }
2608
2609                 if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XSK_TX)
2610                         tx_q->xsk_frames_done++;
2611
2612                 if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_SKB) {
2613                         if (likely(skb)) {
2614                                 pkts_compl++;
2615                                 bytes_compl += skb->len;
2616                                 dev_consume_skb_any(skb);
2617                                 tx_q->tx_skbuff[entry] = NULL;
2618                         }
2619                 }
2620
2621                 stmmac_release_tx_desc(priv, p, priv->mode);
2622
2623                 entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
2624         }
2625         tx_q->dirty_tx = entry;
2626
2627         netdev_tx_completed_queue(netdev_get_tx_queue(priv->dev, queue),
2628                                   pkts_compl, bytes_compl);
2629
2630         if (unlikely(netif_tx_queue_stopped(netdev_get_tx_queue(priv->dev,
2631                                                                 queue))) &&
2632             stmmac_tx_avail(priv, queue) > STMMAC_TX_THRESH(priv)) {
2633
2634                 netif_dbg(priv, tx_done, priv->dev,
2635                           "%s: restart transmit\n", __func__);
2636                 netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, queue));
2637         }
2638
2639         if (tx_q->xsk_pool) {
2640                 bool work_done;
2641
2642                 if (tx_q->xsk_frames_done)
2643                         xsk_tx_completed(tx_q->xsk_pool, tx_q->xsk_frames_done);
2644
2645                 if (xsk_uses_need_wakeup(tx_q->xsk_pool))
2646                         xsk_set_tx_need_wakeup(tx_q->xsk_pool);
2647
2648                 /* For XSK TX, we try to send as many as possible.
2649                  * If XSK work done (XSK TX desc empty and budget still
2650                  * available), return "budget - 1" to reenable TX IRQ.
2651                  * Else, return "budget" to make NAPI continue polling.
2652                  */
2653                 work_done = stmmac_xdp_xmit_zc(priv, queue,
2654                                                STMMAC_XSK_TX_BUDGET_MAX);
2655                 if (work_done)
2656                         xmits = budget - 1;
2657                 else
2658                         xmits = budget;
2659         }
2660
2661         if (priv->eee_enabled && !priv->tx_path_in_lpi_mode &&
2662             priv->eee_sw_timer_en) {
2663                 if (stmmac_enable_eee_mode(priv))
2664                         mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(priv->tx_lpi_timer));
2665         }
2666
2667         /* We still have pending packets, let's call for a new scheduling */
2668         if (tx_q->dirty_tx != tx_q->cur_tx)
2669                 hrtimer_start(&tx_q->txtimer,
2670                               STMMAC_COAL_TIMER(priv->tx_coal_timer[queue]),
2671                               HRTIMER_MODE_REL);
2672
2673         __netif_tx_unlock_bh(netdev_get_tx_queue(priv->dev, queue));
2674
2675         /* Combine decisions from TX clean and XSK TX */
2676         return max(count, xmits);
2677 }
2678
2679 /**
2680  * stmmac_tx_err - to manage the tx error
2681  * @priv: driver private structure
2682  * @chan: channel index
2683  * Description: it cleans the descriptors and restarts the transmission
2684  * in case of transmission errors.
2685  */
2686 static void stmmac_tx_err(struct stmmac_priv *priv, u32 chan)
2687 {
2688         struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
2689
2690         netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, chan));
2691
2692         stmmac_stop_tx_dma(priv, chan);
2693         dma_free_tx_skbufs(priv, &priv->dma_conf, chan);
2694         stmmac_clear_tx_descriptors(priv, &priv->dma_conf, chan);
2695         stmmac_reset_tx_queue(priv, chan);
2696         stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
2697                             tx_q->dma_tx_phy, chan);
2698         stmmac_start_tx_dma(priv, chan);
2699
2700         priv->dev->stats.tx_errors++;
2701         netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, chan));
2702 }
2703
2704 /**
2705  *  stmmac_set_dma_operation_mode - Set DMA operation mode by channel
2706  *  @priv: driver private structure
2707  *  @txmode: TX operating mode
2708  *  @rxmode: RX operating mode
2709  *  @chan: channel index
2710  *  Description: it is used for configuring of the DMA operation mode in
2711  *  runtime in order to program the tx/rx DMA thresholds or Store-And-Forward
2712  *  mode.
2713  */
2714 static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode,
2715                                           u32 rxmode, u32 chan)
2716 {
2717         u8 rxqmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
2718         u8 txqmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
2719         u32 rx_channels_count = priv->plat->rx_queues_to_use;
2720         u32 tx_channels_count = priv->plat->tx_queues_to_use;
2721         int rxfifosz = priv->plat->rx_fifo_size;
2722         int txfifosz = priv->plat->tx_fifo_size;
2723
2724         if (rxfifosz == 0)
2725                 rxfifosz = priv->dma_cap.rx_fifo_size;
2726         if (txfifosz == 0)
2727                 txfifosz = priv->dma_cap.tx_fifo_size;
2728
2729         /* Adjust for real per queue fifo size */
2730         rxfifosz /= rx_channels_count;
2731         txfifosz /= tx_channels_count;
2732
2733         stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan, rxfifosz, rxqmode);
2734         stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan, txfifosz, txqmode);
2735 }
2736
2737 static bool stmmac_safety_feat_interrupt(struct stmmac_priv *priv)
2738 {
2739         int ret;
2740
2741         ret = stmmac_safety_feat_irq_status(priv, priv->dev,
2742                         priv->ioaddr, priv->dma_cap.asp, &priv->sstats);
2743         if (ret && (ret != -EINVAL)) {
2744                 stmmac_global_err(priv);
2745                 return true;
2746         }
2747
2748         return false;
2749 }
2750
2751 static int stmmac_napi_check(struct stmmac_priv *priv, u32 chan, u32 dir)
2752 {
2753         int status = stmmac_dma_interrupt_status(priv, priv->ioaddr,
2754                                                  &priv->xstats, chan, dir);
2755         struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[chan];
2756         struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
2757         struct stmmac_channel *ch = &priv->channel[chan];
2758         struct napi_struct *rx_napi;
2759         struct napi_struct *tx_napi;
2760         unsigned long flags;
2761
2762         rx_napi = rx_q->xsk_pool ? &ch->rxtx_napi : &ch->rx_napi;
2763         tx_napi = tx_q->xsk_pool ? &ch->rxtx_napi : &ch->tx_napi;
2764
2765         if ((status & handle_rx) && (chan < priv->plat->rx_queues_to_use)) {
2766                 if (napi_schedule_prep(rx_napi)) {
2767                         spin_lock_irqsave(&ch->lock, flags);
2768                         stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 0);
2769                         spin_unlock_irqrestore(&ch->lock, flags);
2770                         __napi_schedule(rx_napi);
2771                 }
2772         }
2773
2774         if ((status & handle_tx) && (chan < priv->plat->tx_queues_to_use)) {
2775                 if (napi_schedule_prep(tx_napi)) {
2776                         spin_lock_irqsave(&ch->lock, flags);
2777                         stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 0, 1);
2778                         spin_unlock_irqrestore(&ch->lock, flags);
2779                         __napi_schedule(tx_napi);
2780                 }
2781         }
2782
2783         return status;
2784 }
2785
2786 /**
2787  * stmmac_dma_interrupt - DMA ISR
2788  * @priv: driver private structure
2789  * Description: this is the DMA ISR. It is called by the main ISR.
2790  * It calls the dwmac dma routine and schedule poll method in case of some
2791  * work can be done.
2792  */
2793 static void stmmac_dma_interrupt(struct stmmac_priv *priv)
2794 {
2795         u32 tx_channel_count = priv->plat->tx_queues_to_use;
2796         u32 rx_channel_count = priv->plat->rx_queues_to_use;
2797         u32 channels_to_check = tx_channel_count > rx_channel_count ?
2798                                 tx_channel_count : rx_channel_count;
2799         u32 chan;
2800         int status[max_t(u32, MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES)];
2801
2802         /* Make sure we never check beyond our status buffer. */
2803         if (WARN_ON_ONCE(channels_to_check > ARRAY_SIZE(status)))
2804                 channels_to_check = ARRAY_SIZE(status);
2805
2806         for (chan = 0; chan < channels_to_check; chan++)
2807                 status[chan] = stmmac_napi_check(priv, chan,
2808                                                  DMA_DIR_RXTX);
2809
2810         for (chan = 0; chan < tx_channel_count; chan++) {
2811                 if (unlikely(status[chan] & tx_hard_error_bump_tc)) {
2812                         /* Try to bump up the dma threshold on this failure */
2813                         stmmac_bump_dma_threshold(priv, chan);
2814                 } else if (unlikely(status[chan] == tx_hard_error)) {
2815                         stmmac_tx_err(priv, chan);
2816                 }
2817         }
2818 }
2819
2820 /**
2821  * stmmac_mmc_setup: setup the Mac Management Counters (MMC)
2822  * @priv: driver private structure
2823  * Description: this masks the MMC irq, in fact, the counters are managed in SW.
2824  */
2825 static void stmmac_mmc_setup(struct stmmac_priv *priv)
2826 {
2827         unsigned int mode = MMC_CNTRL_RESET_ON_READ | MMC_CNTRL_COUNTER_RESET |
2828                             MMC_CNTRL_PRESET | MMC_CNTRL_FULL_HALF_PRESET;
2829
2830         stmmac_mmc_intr_all_mask(priv, priv->mmcaddr);
2831
2832         if (priv->dma_cap.rmon) {
2833                 stmmac_mmc_ctrl(priv, priv->mmcaddr, mode);
2834                 memset(&priv->mmc, 0, sizeof(struct stmmac_counters));
2835         } else
2836                 netdev_info(priv->dev, "No MAC Management Counters available\n");
2837 }
2838
2839 /**
2840  * stmmac_get_hw_features - get MAC capabilities from the HW cap. register.
2841  * @priv: driver private structure
2842  * Description:
2843  *  new GMAC chip generations have a new register to indicate the
2844  *  presence of the optional feature/functions.
2845  *  This can be also used to override the value passed through the
2846  *  platform and necessary for old MAC10/100 and GMAC chips.
2847  */
2848 static int stmmac_get_hw_features(struct stmmac_priv *priv)
2849 {
2850         return stmmac_get_hw_feature(priv, priv->ioaddr, &priv->dma_cap) == 0;
2851 }
2852
2853 /**
2854  * stmmac_check_ether_addr - check if the MAC addr is valid
2855  * @priv: driver private structure
2856  * Description:
2857  * it is to verify if the MAC address is valid, in case of failures it
2858  * generates a random MAC address
2859  */
2860 static void stmmac_check_ether_addr(struct stmmac_priv *priv)
2861 {
2862         u8 addr[ETH_ALEN];
2863
2864         if (!is_valid_ether_addr(priv->dev->dev_addr)) {
2865                 stmmac_get_umac_addr(priv, priv->hw, addr, 0);
2866                 if (is_valid_ether_addr(addr))
2867                         eth_hw_addr_set(priv->dev, addr);
2868                 else
2869                         eth_hw_addr_random(priv->dev);
2870                 dev_info(priv->device, "device MAC address %pM\n",
2871                          priv->dev->dev_addr);
2872         }
2873 }
2874
2875 /**
2876  * stmmac_init_dma_engine - DMA init.
2877  * @priv: driver private structure
2878  * Description:
2879  * It inits the DMA invoking the specific MAC/GMAC callback.
2880  * Some DMA parameters can be passed from the platform;
2881  * in case of these are not passed a default is kept for the MAC or GMAC.
2882  */
2883 static int stmmac_init_dma_engine(struct stmmac_priv *priv)
2884 {
2885         u32 rx_channels_count = priv->plat->rx_queues_to_use;
2886         u32 tx_channels_count = priv->plat->tx_queues_to_use;
2887         u32 dma_csr_ch = max(rx_channels_count, tx_channels_count);
2888         struct stmmac_rx_queue *rx_q;
2889         struct stmmac_tx_queue *tx_q;
2890         u32 chan = 0;
2891         int atds = 0;
2892         int ret = 0;
2893
2894         if (!priv->plat->dma_cfg || !priv->plat->dma_cfg->pbl) {
2895                 dev_err(priv->device, "Invalid DMA configuration\n");
2896                 return -EINVAL;
2897         }
2898
2899         if (priv->extend_desc && (priv->mode == STMMAC_RING_MODE))
2900                 atds = 1;
2901
2902         ret = stmmac_reset(priv, priv->ioaddr);
2903         if (ret) {
2904                 dev_err(priv->device, "Failed to reset the dma\n");
2905                 return ret;
2906         }
2907
2908         /* DMA Configuration */
2909         stmmac_dma_init(priv, priv->ioaddr, priv->plat->dma_cfg, atds);
2910
2911         if (priv->plat->axi)
2912                 stmmac_axi(priv, priv->ioaddr, priv->plat->axi);
2913
2914         /* DMA CSR Channel configuration */
2915         for (chan = 0; chan < dma_csr_ch; chan++) {
2916                 stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan);
2917                 stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
2918         }
2919
2920         /* DMA RX Channel Configuration */
2921         for (chan = 0; chan < rx_channels_count; chan++) {
2922                 rx_q = &priv->dma_conf.rx_queue[chan];
2923
2924                 stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
2925                                     rx_q->dma_rx_phy, chan);
2926
2927                 rx_q->rx_tail_addr = rx_q->dma_rx_phy +
2928                                      (rx_q->buf_alloc_num *
2929                                       sizeof(struct dma_desc));
2930                 stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
2931                                        rx_q->rx_tail_addr, chan);
2932         }
2933
2934         /* DMA TX Channel Configuration */
2935         for (chan = 0; chan < tx_channels_count; chan++) {
2936                 tx_q = &priv->dma_conf.tx_queue[chan];
2937
2938                 stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
2939                                     tx_q->dma_tx_phy, chan);
2940
2941                 tx_q->tx_tail_addr = tx_q->dma_tx_phy;
2942                 stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
2943                                        tx_q->tx_tail_addr, chan);
2944         }
2945
2946         return ret;
2947 }
2948
2949 static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue)
2950 {
2951         struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
2952
2953         hrtimer_start(&tx_q->txtimer,
2954                       STMMAC_COAL_TIMER(priv->tx_coal_timer[queue]),
2955                       HRTIMER_MODE_REL);
2956 }
2957
2958 /**
2959  * stmmac_tx_timer - mitigation sw timer for tx.
2960  * @t: data pointer
2961  * Description:
2962  * This is the timer handler to directly invoke the stmmac_tx_clean.
2963  */
2964 static enum hrtimer_restart stmmac_tx_timer(struct hrtimer *t)
2965 {
2966         struct stmmac_tx_queue *tx_q = container_of(t, struct stmmac_tx_queue, txtimer);
2967         struct stmmac_priv *priv = tx_q->priv_data;
2968         struct stmmac_channel *ch;
2969         struct napi_struct *napi;
2970
2971         ch = &priv->channel[tx_q->queue_index];
2972         napi = tx_q->xsk_pool ? &ch->rxtx_napi : &ch->tx_napi;
2973
2974         if (likely(napi_schedule_prep(napi))) {
2975                 unsigned long flags;
2976
2977                 spin_lock_irqsave(&ch->lock, flags);
2978                 stmmac_disable_dma_irq(priv, priv->ioaddr, ch->index, 0, 1);
2979                 spin_unlock_irqrestore(&ch->lock, flags);
2980                 __napi_schedule(napi);
2981         }
2982
2983         return HRTIMER_NORESTART;
2984 }
2985
2986 /**
2987  * stmmac_init_coalesce - init mitigation options.
2988  * @priv: driver private structure
2989  * Description:
2990  * This inits the coalesce parameters: i.e. timer rate,
2991  * timer handler and default threshold used for enabling the
2992  * interrupt on completion bit.
2993  */
2994 static void stmmac_init_coalesce(struct stmmac_priv *priv)
2995 {
2996         u32 tx_channel_count = priv->plat->tx_queues_to_use;
2997         u32 rx_channel_count = priv->plat->rx_queues_to_use;
2998         u32 chan;
2999
3000         for (chan = 0; chan < tx_channel_count; chan++) {
3001                 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
3002
3003                 priv->tx_coal_frames[chan] = STMMAC_TX_FRAMES;
3004                 priv->tx_coal_timer[chan] = STMMAC_COAL_TX_TIMER;
3005
3006                 hrtimer_init(&tx_q->txtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
3007                 tx_q->txtimer.function = stmmac_tx_timer;
3008         }
3009
3010         for (chan = 0; chan < rx_channel_count; chan++)
3011                 priv->rx_coal_frames[chan] = STMMAC_RX_FRAMES;
3012 }
3013
3014 static void stmmac_set_rings_length(struct stmmac_priv *priv)
3015 {
3016         u32 rx_channels_count = priv->plat->rx_queues_to_use;
3017         u32 tx_channels_count = priv->plat->tx_queues_to_use;
3018         u32 chan;
3019
3020         /* set TX ring length */
3021         for (chan = 0; chan < tx_channels_count; chan++)
3022                 stmmac_set_tx_ring_len(priv, priv->ioaddr,
3023                                        (priv->dma_conf.dma_tx_size - 1), chan);
3024
3025         /* set RX ring length */
3026         for (chan = 0; chan < rx_channels_count; chan++)
3027                 stmmac_set_rx_ring_len(priv, priv->ioaddr,
3028                                        (priv->dma_conf.dma_rx_size - 1), chan);
3029 }
3030
3031 /**
3032  *  stmmac_set_tx_queue_weight - Set TX queue weight
3033  *  @priv: driver private structure
3034  *  Description: It is used for setting TX queues weight
3035  */
3036 static void stmmac_set_tx_queue_weight(struct stmmac_priv *priv)
3037 {
3038         u32 tx_queues_count = priv->plat->tx_queues_to_use;
3039         u32 weight;
3040         u32 queue;
3041
3042         for (queue = 0; queue < tx_queues_count; queue++) {
3043                 weight = priv->plat->tx_queues_cfg[queue].weight;
3044                 stmmac_set_mtl_tx_queue_weight(priv, priv->hw, weight, queue);
3045         }
3046 }
3047
3048 /**
3049  *  stmmac_configure_cbs - Configure CBS in TX queue
3050  *  @priv: driver private structure
3051  *  Description: It is used for configuring CBS in AVB TX queues
3052  */
3053 static void stmmac_configure_cbs(struct stmmac_priv *priv)
3054 {
3055         u32 tx_queues_count = priv->plat->tx_queues_to_use;
3056         u32 mode_to_use;
3057         u32 queue;
3058
3059         /* queue 0 is reserved for legacy traffic */
3060         for (queue = 1; queue < tx_queues_count; queue++) {
3061                 mode_to_use = priv->plat->tx_queues_cfg[queue].mode_to_use;
3062                 if (mode_to_use == MTL_QUEUE_DCB)
3063                         continue;
3064
3065                 stmmac_config_cbs(priv, priv->hw,
3066                                 priv->plat->tx_queues_cfg[queue].send_slope,
3067                                 priv->plat->tx_queues_cfg[queue].idle_slope,
3068                                 priv->plat->tx_queues_cfg[queue].high_credit,
3069                                 priv->plat->tx_queues_cfg[queue].low_credit,
3070                                 queue);
3071         }
3072 }
3073
3074 /**
3075  *  stmmac_rx_queue_dma_chan_map - Map RX queue to RX dma channel
3076  *  @priv: driver private structure
3077  *  Description: It is used for mapping RX queues to RX dma channels
3078  */
3079 static void stmmac_rx_queue_dma_chan_map(struct stmmac_priv *priv)
3080 {
3081         u32 rx_queues_count = priv->plat->rx_queues_to_use;
3082         u32 queue;
3083         u32 chan;
3084
3085         for (queue = 0; queue < rx_queues_count; queue++) {
3086                 chan = priv->plat->rx_queues_cfg[queue].chan;
3087                 stmmac_map_mtl_to_dma(priv, priv->hw, queue, chan);
3088         }
3089 }
3090
3091 /**
3092  *  stmmac_mac_config_rx_queues_prio - Configure RX Queue priority
3093  *  @priv: driver private structure
3094  *  Description: It is used for configuring the RX Queue Priority
3095  */
3096 static void stmmac_mac_config_rx_queues_prio(struct stmmac_priv *priv)
3097 {
3098         u32 rx_queues_count = priv->plat->rx_queues_to_use;
3099         u32 queue;
3100         u32 prio;
3101
3102         for (queue = 0; queue < rx_queues_count; queue++) {
3103                 if (!priv->plat->rx_queues_cfg[queue].use_prio)
3104                         continue;
3105
3106                 prio = priv->plat->rx_queues_cfg[queue].prio;
3107                 stmmac_rx_queue_prio(priv, priv->hw, prio, queue);
3108         }
3109 }
3110
3111 /**
3112  *  stmmac_mac_config_tx_queues_prio - Configure TX Queue priority
3113  *  @priv: driver private structure
3114  *  Description: It is used for configuring the TX Queue Priority
3115  */
3116 static void stmmac_mac_config_tx_queues_prio(struct stmmac_priv *priv)
3117 {
3118         u32 tx_queues_count = priv->plat->tx_queues_to_use;
3119         u32 queue;
3120         u32 prio;
3121
3122         for (queue = 0; queue < tx_queues_count; queue++) {
3123                 if (!priv->plat->tx_queues_cfg[queue].use_prio)
3124                         continue;
3125
3126                 prio = priv->plat->tx_queues_cfg[queue].prio;
3127                 stmmac_tx_queue_prio(priv, priv->hw, prio, queue);
3128         }
3129 }
3130
3131 /**
3132  *  stmmac_mac_config_rx_queues_routing - Configure RX Queue Routing
3133  *  @priv: driver private structure
3134  *  Description: It is used for configuring the RX queue routing
3135  */
3136 static void stmmac_mac_config_rx_queues_routing(struct stmmac_priv *priv)
3137 {
3138         u32 rx_queues_count = priv->plat->rx_queues_to_use;
3139         u32 queue;
3140         u8 packet;
3141
3142         for (queue = 0; queue < rx_queues_count; queue++) {
3143                 /* no specific packet type routing specified for the queue */
3144                 if (priv->plat->rx_queues_cfg[queue].pkt_route == 0x0)
3145                         continue;
3146
3147                 packet = priv->plat->rx_queues_cfg[queue].pkt_route;
3148                 stmmac_rx_queue_routing(priv, priv->hw, packet, queue);
3149         }
3150 }
3151
3152 static void stmmac_mac_config_rss(struct stmmac_priv *priv)
3153 {
3154         if (!priv->dma_cap.rssen || !priv->plat->rss_en) {
3155                 priv->rss.enable = false;
3156                 return;
3157         }
3158
3159         if (priv->dev->features & NETIF_F_RXHASH)
3160                 priv->rss.enable = true;
3161         else
3162                 priv->rss.enable = false;
3163
3164         stmmac_rss_configure(priv, priv->hw, &priv->rss,
3165                              priv->plat->rx_queues_to_use);
3166 }
3167
3168 /**
3169  *  stmmac_mtl_configuration - Configure MTL
3170  *  @priv: driver private structure
3171  *  Description: It is used for configurring MTL
3172  */
3173 static void stmmac_mtl_configuration(struct stmmac_priv *priv)
3174 {
3175         u32 rx_queues_count = priv->plat->rx_queues_to_use;
3176         u32 tx_queues_count = priv->plat->tx_queues_to_use;
3177
3178         if (tx_queues_count > 1)
3179                 stmmac_set_tx_queue_weight(priv);
3180
3181         /* Configure MTL RX algorithms */
3182         if (rx_queues_count > 1)
3183                 stmmac_prog_mtl_rx_algorithms(priv, priv->hw,
3184                                 priv->plat->rx_sched_algorithm);
3185
3186         /* Configure MTL TX algorithms */
3187         if (tx_queues_count > 1)
3188                 stmmac_prog_mtl_tx_algorithms(priv, priv->hw,
3189                                 priv->plat->tx_sched_algorithm);
3190
3191         /* Configure CBS in AVB TX queues */
3192         if (tx_queues_count > 1)
3193                 stmmac_configure_cbs(priv);
3194
3195         /* Map RX MTL to DMA channels */
3196         stmmac_rx_queue_dma_chan_map(priv);
3197
3198         /* Enable MAC RX Queues */
3199         stmmac_mac_enable_rx_queues(priv);
3200
3201         /* Set RX priorities */
3202         if (rx_queues_count > 1)
3203                 stmmac_mac_config_rx_queues_prio(priv);
3204
3205         /* Set TX priorities */
3206         if (tx_queues_count > 1)
3207                 stmmac_mac_config_tx_queues_prio(priv);
3208
3209         /* Set RX routing */
3210         if (rx_queues_count > 1)
3211                 stmmac_mac_config_rx_queues_routing(priv);
3212
3213         /* Receive Side Scaling */
3214         if (rx_queues_count > 1)
3215                 stmmac_mac_config_rss(priv);
3216 }
3217
3218 static void stmmac_safety_feat_configuration(struct stmmac_priv *priv)
3219 {
3220         if (priv->dma_cap.asp) {
3221                 netdev_info(priv->dev, "Enabling Safety Features\n");
3222                 stmmac_safety_feat_config(priv, priv->ioaddr, priv->dma_cap.asp,
3223                                           priv->plat->safety_feat_cfg);
3224         } else {
3225                 netdev_info(priv->dev, "No Safety Features support found\n");
3226         }
3227 }
3228
3229 static int stmmac_fpe_start_wq(struct stmmac_priv *priv)
3230 {
3231         char *name;
3232
3233         clear_bit(__FPE_TASK_SCHED, &priv->fpe_task_state);
3234         clear_bit(__FPE_REMOVING,  &priv->fpe_task_state);
3235
3236         name = priv->wq_name;
3237         sprintf(name, "%s-fpe", priv->dev->name);
3238
3239         priv->fpe_wq = create_singlethread_workqueue(name);
3240         if (!priv->fpe_wq) {
3241                 netdev_err(priv->dev, "%s: Failed to create workqueue\n", name);
3242
3243                 return -ENOMEM;
3244         }
3245         netdev_info(priv->dev, "FPE workqueue start");
3246
3247         return 0;
3248 }
3249
3250 /**
3251  * stmmac_hw_setup - setup mac in a usable state.
3252  *  @dev : pointer to the device structure.
3253  *  @ptp_register: register PTP if set
3254  *  Description:
3255  *  this is the main function to setup the HW in a usable state because the
3256  *  dma engine is reset, the core registers are configured (e.g. AXI,
3257  *  Checksum features, timers). The DMA is ready to start receiving and
3258  *  transmitting.
3259  *  Return value:
3260  *  0 on success and an appropriate (-)ve integer as defined in errno.h
3261  *  file on failure.
3262  */
3263 static int stmmac_hw_setup(struct net_device *dev, bool ptp_register)
3264 {
3265         struct stmmac_priv *priv = netdev_priv(dev);
3266         u32 rx_cnt = priv->plat->rx_queues_to_use;
3267         u32 tx_cnt = priv->plat->tx_queues_to_use;
3268         bool sph_en;
3269         u32 chan;
3270         int ret;
3271
3272         /* DMA initialization and SW reset */
3273         ret = stmmac_init_dma_engine(priv);
3274         if (ret < 0) {
3275                 netdev_err(priv->dev, "%s: DMA engine initialization failed\n",
3276                            __func__);
3277                 return ret;
3278         }
3279
3280         /* Copy the MAC addr into the HW  */
3281         stmmac_set_umac_addr(priv, priv->hw, dev->dev_addr, 0);
3282
3283         /* PS and related bits will be programmed according to the speed */
3284         if (priv->hw->pcs) {
3285                 int speed = priv->plat->mac_port_sel_speed;
3286
3287                 if ((speed == SPEED_10) || (speed == SPEED_100) ||
3288                     (speed == SPEED_1000)) {
3289                         priv->hw->ps = speed;
3290                 } else {
3291                         dev_warn(priv->device, "invalid port speed\n");
3292                         priv->hw->ps = 0;
3293                 }
3294         }
3295
3296         /* Initialize the MAC Core */
3297         stmmac_core_init(priv, priv->hw, dev);
3298
3299         /* Initialize MTL*/
3300         stmmac_mtl_configuration(priv);
3301
3302         /* Initialize Safety Features */
3303         stmmac_safety_feat_configuration(priv);
3304
3305         ret = stmmac_rx_ipc(priv, priv->hw);
3306         if (!ret) {
3307                 netdev_warn(priv->dev, "RX IPC Checksum Offload disabled\n");
3308                 priv->plat->rx_coe = STMMAC_RX_COE_NONE;
3309                 priv->hw->rx_csum = 0;
3310         }
3311
3312         /* Enable the MAC Rx/Tx */
3313         stmmac_mac_set(priv, priv->ioaddr, true);
3314
3315         /* Set the HW DMA mode and the COE */
3316         stmmac_dma_operation_mode(priv);
3317
3318         stmmac_mmc_setup(priv);
3319
3320         if (ptp_register) {
3321                 ret = clk_prepare_enable(priv->plat->clk_ptp_ref);
3322                 if (ret < 0)
3323                         netdev_warn(priv->dev,
3324                                     "failed to enable PTP reference clock: %pe\n",
3325                                     ERR_PTR(ret));
3326         }
3327
3328         ret = stmmac_init_ptp(priv);
3329         if (ret == -EOPNOTSUPP)
3330                 netdev_info(priv->dev, "PTP not supported by HW\n");
3331         else if (ret)
3332                 netdev_warn(priv->dev, "PTP init failed\n");
3333         else if (ptp_register)
3334                 stmmac_ptp_register(priv);
3335
3336         priv->eee_tw_timer = STMMAC_DEFAULT_TWT_LS;
3337
3338         /* Convert the timer from msec to usec */
3339         if (!priv->tx_lpi_timer)
3340                 priv->tx_lpi_timer = eee_timer * 1000;
3341
3342         if (priv->use_riwt) {
3343                 u32 queue;
3344
3345                 for (queue = 0; queue < rx_cnt; queue++) {
3346                         if (!priv->rx_riwt[queue])
3347                                 priv->rx_riwt[queue] = DEF_DMA_RIWT;
3348
3349                         stmmac_rx_watchdog(priv, priv->ioaddr,
3350                                            priv->rx_riwt[queue], queue);
3351                 }
3352         }
3353
3354         if (priv->hw->pcs)
3355                 stmmac_pcs_ctrl_ane(priv, priv->ioaddr, 1, priv->hw->ps, 0);
3356
3357         /* set TX and RX rings length */
3358         stmmac_set_rings_length(priv);
3359
3360         /* Enable TSO */
3361         if (priv->tso) {
3362                 for (chan = 0; chan < tx_cnt; chan++) {
3363                         struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
3364
3365                         /* TSO and TBS cannot co-exist */
3366                         if (tx_q->tbs & STMMAC_TBS_AVAIL)
3367                                 continue;
3368
3369                         stmmac_enable_tso(priv, priv->ioaddr, 1, chan);
3370                 }
3371         }
3372
3373         /* Enable Split Header */
3374         sph_en = (priv->hw->rx_csum > 0) && priv->sph;
3375         for (chan = 0; chan < rx_cnt; chan++)
3376                 stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
3377
3378
3379         /* VLAN Tag Insertion */
3380         if (priv->dma_cap.vlins)
3381                 stmmac_enable_vlan(priv, priv->hw, STMMAC_VLAN_INSERT);
3382
3383         /* TBS */
3384         for (chan = 0; chan < tx_cnt; chan++) {
3385                 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
3386                 int enable = tx_q->tbs & STMMAC_TBS_AVAIL;
3387
3388                 stmmac_enable_tbs(priv, priv->ioaddr, enable, chan);
3389         }
3390
3391         /* Configure real RX and TX queues */
3392         netif_set_real_num_rx_queues(dev, priv->plat->rx_queues_to_use);
3393         netif_set_real_num_tx_queues(dev, priv->plat->tx_queues_to_use);
3394
3395         /* Start the ball rolling... */
3396         stmmac_start_all_dma(priv);
3397
3398         if (priv->dma_cap.fpesel) {
3399                 stmmac_fpe_start_wq(priv);
3400
3401                 if (priv->plat->fpe_cfg->enable)
3402                         stmmac_fpe_handshake(priv, true);
3403         }
3404
3405         return 0;
3406 }
3407
3408 static void stmmac_hw_teardown(struct net_device *dev)
3409 {
3410         struct stmmac_priv *priv = netdev_priv(dev);
3411
3412         clk_disable_unprepare(priv->plat->clk_ptp_ref);
3413 }
3414
3415 static void stmmac_free_irq(struct net_device *dev,
3416                             enum request_irq_err irq_err, int irq_idx)
3417 {
3418         struct stmmac_priv *priv = netdev_priv(dev);
3419         int j;
3420
3421         switch (irq_err) {
3422         case REQ_IRQ_ERR_ALL:
3423                 irq_idx = priv->plat->tx_queues_to_use;
3424                 fallthrough;
3425         case REQ_IRQ_ERR_TX:
3426                 for (j = irq_idx - 1; j >= 0; j--) {
3427                         if (priv->tx_irq[j] > 0) {
3428                                 irq_set_affinity_hint(priv->tx_irq[j], NULL);
3429                                 free_irq(priv->tx_irq[j], &priv->dma_conf.tx_queue[j]);
3430                         }
3431                 }
3432                 irq_idx = priv->plat->rx_queues_to_use;
3433                 fallthrough;
3434         case REQ_IRQ_ERR_RX:
3435                 for (j = irq_idx - 1; j >= 0; j--) {
3436                         if (priv->rx_irq[j] > 0) {
3437                                 irq_set_affinity_hint(priv->rx_irq[j], NULL);
3438                                 free_irq(priv->rx_irq[j], &priv->dma_conf.rx_queue[j]);
3439                         }
3440                 }
3441
3442                 if (priv->sfty_ue_irq > 0 && priv->sfty_ue_irq != dev->irq)
3443                         free_irq(priv->sfty_ue_irq, dev);
3444                 fallthrough;
3445         case REQ_IRQ_ERR_SFTY_UE:
3446                 if (priv->sfty_ce_irq > 0 && priv->sfty_ce_irq != dev->irq)
3447                         free_irq(priv->sfty_ce_irq, dev);
3448                 fallthrough;
3449         case REQ_IRQ_ERR_SFTY_CE:
3450                 if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq)
3451                         free_irq(priv->lpi_irq, dev);
3452                 fallthrough;
3453         case REQ_IRQ_ERR_LPI:
3454                 if (priv->wol_irq > 0 && priv->wol_irq != dev->irq)
3455                         free_irq(priv->wol_irq, dev);
3456                 fallthrough;
3457         case REQ_IRQ_ERR_WOL:
3458                 free_irq(dev->irq, dev);
3459                 fallthrough;
3460         case REQ_IRQ_ERR_MAC:
3461         case REQ_IRQ_ERR_NO:
3462                 /* If MAC IRQ request error, no more IRQ to free */
3463                 break;
3464         }
3465 }
3466
3467 static int stmmac_request_irq_multi_msi(struct net_device *dev)
3468 {
3469         struct stmmac_priv *priv = netdev_priv(dev);
3470         enum request_irq_err irq_err;
3471         cpumask_t cpu_mask;
3472         int irq_idx = 0;
3473         char *int_name;
3474         int ret;
3475         int i;
3476
3477         /* For common interrupt */
3478         int_name = priv->int_name_mac;
3479         sprintf(int_name, "%s:%s", dev->name, "mac");
3480         ret = request_irq(dev->irq, stmmac_mac_interrupt,
3481                           0, int_name, dev);
3482         if (unlikely(ret < 0)) {
3483                 netdev_err(priv->dev,
3484                            "%s: alloc mac MSI %d (error: %d)\n",
3485                            __func__, dev->irq, ret);
3486                 irq_err = REQ_IRQ_ERR_MAC;
3487                 goto irq_error;
3488         }
3489
3490         /* Request the Wake IRQ in case of another line
3491          * is used for WoL
3492          */
3493         if (priv->wol_irq > 0 && priv->wol_irq != dev->irq) {
3494                 int_name = priv->int_name_wol;
3495                 sprintf(int_name, "%s:%s", dev->name, "wol");
3496                 ret = request_irq(priv->wol_irq,
3497                                   stmmac_mac_interrupt,
3498                                   0, int_name, dev);
3499                 if (unlikely(ret < 0)) {
3500                         netdev_err(priv->dev,
3501                                    "%s: alloc wol MSI %d (error: %d)\n",
3502                                    __func__, priv->wol_irq, ret);
3503                         irq_err = REQ_IRQ_ERR_WOL;
3504                         goto irq_error;
3505                 }
3506         }
3507
3508         /* Request the LPI IRQ in case of another line
3509          * is used for LPI
3510          */
3511         if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq) {
3512                 int_name = priv->int_name_lpi;
3513                 sprintf(int_name, "%s:%s", dev->name, "lpi");
3514                 ret = request_irq(priv->lpi_irq,
3515                                   stmmac_mac_interrupt,
3516                                   0, int_name, dev);
3517                 if (unlikely(ret < 0)) {
3518                         netdev_err(priv->dev,
3519                                    "%s: alloc lpi MSI %d (error: %d)\n",
3520                                    __func__, priv->lpi_irq, ret);
3521                         irq_err = REQ_IRQ_ERR_LPI;
3522                         goto irq_error;
3523                 }
3524         }
3525
3526         /* Request the Safety Feature Correctible Error line in
3527          * case of another line is used
3528          */
3529         if (priv->sfty_ce_irq > 0 && priv->sfty_ce_irq != dev->irq) {
3530                 int_name = priv->int_name_sfty_ce;
3531                 sprintf(int_name, "%s:%s", dev->name, "safety-ce");
3532                 ret = request_irq(priv->sfty_ce_irq,
3533                                   stmmac_safety_interrupt,
3534                                   0, int_name, dev);
3535                 if (unlikely(ret < 0)) {
3536                         netdev_err(priv->dev,
3537                                    "%s: alloc sfty ce MSI %d (error: %d)\n",
3538                                    __func__, priv->sfty_ce_irq, ret);
3539                         irq_err = REQ_IRQ_ERR_SFTY_CE;
3540                         goto irq_error;
3541                 }
3542         }
3543
3544         /* Request the Safety Feature Uncorrectible Error line in
3545          * case of another line is used
3546          */
3547         if (priv->sfty_ue_irq > 0 && priv->sfty_ue_irq != dev->irq) {
3548                 int_name = priv->int_name_sfty_ue;
3549                 sprintf(int_name, "%s:%s", dev->name, "safety-ue");
3550                 ret = request_irq(priv->sfty_ue_irq,
3551                                   stmmac_safety_interrupt,
3552                                   0, int_name, dev);
3553                 if (unlikely(ret < 0)) {
3554                         netdev_err(priv->dev,
3555                                    "%s: alloc sfty ue MSI %d (error: %d)\n",
3556                                    __func__, priv->sfty_ue_irq, ret);
3557                         irq_err = REQ_IRQ_ERR_SFTY_UE;
3558                         goto irq_error;
3559                 }
3560         }
3561
3562         /* Request Rx MSI irq */
3563         for (i = 0; i < priv->plat->rx_queues_to_use; i++) {
3564                 if (i >= MTL_MAX_RX_QUEUES)
3565                         break;
3566                 if (priv->rx_irq[i] == 0)
3567                         continue;
3568
3569                 int_name = priv->int_name_rx_irq[i];
3570                 sprintf(int_name, "%s:%s-%d", dev->name, "rx", i);
3571                 ret = request_irq(priv->rx_irq[i],
3572                                   stmmac_msi_intr_rx,
3573                                   0, int_name, &priv->dma_conf.rx_queue[i]);
3574                 if (unlikely(ret < 0)) {
3575                         netdev_err(priv->dev,
3576                                    "%s: alloc rx-%d  MSI %d (error: %d)\n",
3577                                    __func__, i, priv->rx_irq[i], ret);
3578                         irq_err = REQ_IRQ_ERR_RX;
3579                         irq_idx = i;
3580                         goto irq_error;
3581                 }
3582                 cpumask_clear(&cpu_mask);
3583                 cpumask_set_cpu(i % num_online_cpus(), &cpu_mask);
3584                 irq_set_affinity_hint(priv->rx_irq[i], &cpu_mask);
3585         }
3586
3587         /* Request Tx MSI irq */
3588         for (i = 0; i < priv->plat->tx_queues_to_use; i++) {
3589                 if (i >= MTL_MAX_TX_QUEUES)
3590                         break;
3591                 if (priv->tx_irq[i] == 0)
3592                         continue;
3593
3594                 int_name = priv->int_name_tx_irq[i];
3595                 sprintf(int_name, "%s:%s-%d", dev->name, "tx", i);
3596                 ret = request_irq(priv->tx_irq[i],
3597                                   stmmac_msi_intr_tx,
3598                                   0, int_name, &priv->dma_conf.tx_queue[i]);
3599                 if (unlikely(ret < 0)) {
3600                         netdev_err(priv->dev,
3601                                    "%s: alloc tx-%d  MSI %d (error: %d)\n",
3602                                    __func__, i, priv->tx_irq[i], ret);
3603                         irq_err = REQ_IRQ_ERR_TX;
3604                         irq_idx = i;
3605                         goto irq_error;
3606                 }
3607                 cpumask_clear(&cpu_mask);
3608                 cpumask_set_cpu(i % num_online_cpus(), &cpu_mask);
3609                 irq_set_affinity_hint(priv->tx_irq[i], &cpu_mask);
3610         }
3611
3612         return 0;
3613
3614 irq_error:
3615         stmmac_free_irq(dev, irq_err, irq_idx);
3616         return ret;
3617 }
3618
3619 static int stmmac_request_irq_single(struct net_device *dev)
3620 {
3621         struct stmmac_priv *priv = netdev_priv(dev);
3622         enum request_irq_err irq_err;
3623         int ret;
3624
3625         ret = request_irq(dev->irq, stmmac_interrupt,
3626                           IRQF_SHARED, dev->name, dev);
3627         if (unlikely(ret < 0)) {
3628                 netdev_err(priv->dev,
3629                            "%s: ERROR: allocating the IRQ %d (error: %d)\n",
3630                            __func__, dev->irq, ret);
3631                 irq_err = REQ_IRQ_ERR_MAC;
3632                 goto irq_error;
3633         }
3634
3635         /* Request the Wake IRQ in case of another line
3636          * is used for WoL
3637          */
3638         if (priv->wol_irq > 0 && priv->wol_irq != dev->irq) {
3639                 ret = request_irq(priv->wol_irq, stmmac_interrupt,
3640                                   IRQF_SHARED, dev->name, dev);
3641                 if (unlikely(ret < 0)) {
3642                         netdev_err(priv->dev,
3643                                    "%s: ERROR: allocating the WoL IRQ %d (%d)\n",
3644                                    __func__, priv->wol_irq, ret);
3645                         irq_err = REQ_IRQ_ERR_WOL;
3646                         goto irq_error;
3647                 }
3648         }
3649
3650         /* Request the IRQ lines */
3651         if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq) {
3652                 ret = request_irq(priv->lpi_irq, stmmac_interrupt,
3653                                   IRQF_SHARED, dev->name, dev);
3654                 if (unlikely(ret < 0)) {
3655                         netdev_err(priv->dev,
3656                                    "%s: ERROR: allocating the LPI IRQ %d (%d)\n",
3657                                    __func__, priv->lpi_irq, ret);
3658                         irq_err = REQ_IRQ_ERR_LPI;
3659                         goto irq_error;
3660                 }
3661         }
3662
3663         return 0;
3664
3665 irq_error:
3666         stmmac_free_irq(dev, irq_err, 0);
3667         return ret;
3668 }
3669
3670 static int stmmac_request_irq(struct net_device *dev)
3671 {
3672         struct stmmac_priv *priv = netdev_priv(dev);
3673         int ret;
3674
3675         /* Request the IRQ lines */
3676         if (priv->plat->multi_msi_en)
3677                 ret = stmmac_request_irq_multi_msi(dev);
3678         else
3679                 ret = stmmac_request_irq_single(dev);
3680
3681         return ret;
3682 }
3683
3684 /**
3685  *  stmmac_setup_dma_desc - Generate a dma_conf and allocate DMA queue
3686  *  @priv: driver private structure
3687  *  @mtu: MTU to setup the dma queue and buf with
3688  *  Description: Allocate and generate a dma_conf based on the provided MTU.
3689  *  Allocate the Tx/Rx DMA queue and init them.
3690  *  Return value:
3691  *  the dma_conf allocated struct on success and an appropriate ERR_PTR on failure.
3692  */
3693 static struct stmmac_dma_conf *
3694 stmmac_setup_dma_desc(struct stmmac_priv *priv, unsigned int mtu)
3695 {
3696         struct stmmac_dma_conf *dma_conf;
3697         int chan, bfsize, ret;
3698
3699         dma_conf = kzalloc(sizeof(*dma_conf), GFP_KERNEL);
3700         if (!dma_conf) {
3701                 netdev_err(priv->dev, "%s: DMA conf allocation failed\n",
3702                            __func__);
3703                 return ERR_PTR(-ENOMEM);
3704         }
3705
3706         bfsize = stmmac_set_16kib_bfsize(priv, mtu);
3707         if (bfsize < 0)
3708                 bfsize = 0;
3709
3710         if (bfsize < BUF_SIZE_16KiB)
3711                 bfsize = stmmac_set_bfsize(mtu, 0);
3712
3713         dma_conf->dma_buf_sz = bfsize;
3714         /* Chose the tx/rx size from the already defined one in the
3715          * priv struct. (if defined)
3716          */
3717         dma_conf->dma_tx_size = priv->dma_conf.dma_tx_size;
3718         dma_conf->dma_rx_size = priv->dma_conf.dma_rx_size;
3719
3720         if (!dma_conf->dma_tx_size)
3721                 dma_conf->dma_tx_size = DMA_DEFAULT_TX_SIZE;
3722         if (!dma_conf->dma_rx_size)
3723                 dma_conf->dma_rx_size = DMA_DEFAULT_RX_SIZE;
3724
3725         /* Earlier check for TBS */
3726         for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) {
3727                 struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[chan];
3728                 int tbs_en = priv->plat->tx_queues_cfg[chan].tbs_en;
3729
3730                 /* Setup per-TXQ tbs flag before TX descriptor alloc */
3731                 tx_q->tbs |= tbs_en ? STMMAC_TBS_AVAIL : 0;
3732         }
3733
3734         ret = alloc_dma_desc_resources(priv, dma_conf);
3735         if (ret < 0) {
3736                 netdev_err(priv->dev, "%s: DMA descriptors allocation failed\n",
3737                            __func__);
3738                 goto alloc_error;
3739         }
3740
3741         ret = init_dma_desc_rings(priv->dev, dma_conf, GFP_KERNEL);
3742         if (ret < 0) {
3743                 netdev_err(priv->dev, "%s: DMA descriptors initialization failed\n",
3744                            __func__);
3745                 goto init_error;
3746         }
3747
3748         return dma_conf;
3749
3750 init_error:
3751         free_dma_desc_resources(priv, dma_conf);
3752 alloc_error:
3753         kfree(dma_conf);
3754         return ERR_PTR(ret);
3755 }
3756
3757 /**
3758  *  __stmmac_open - open entry point of the driver
3759  *  @dev : pointer to the device structure.
3760  *  @dma_conf :  structure to take the dma data
3761  *  Description:
3762  *  This function is the open entry point of the driver.
3763  *  Return value:
3764  *  0 on success and an appropriate (-)ve integer as defined in errno.h
3765  *  file on failure.
3766  */
3767 static int __stmmac_open(struct net_device *dev,
3768                          struct stmmac_dma_conf *dma_conf)
3769 {
3770         struct stmmac_priv *priv = netdev_priv(dev);
3771         int mode = priv->plat->phy_interface;
3772         u32 chan;
3773         int ret;
3774
3775         ret = pm_runtime_resume_and_get(priv->device);
3776         if (ret < 0)
3777                 return ret;
3778
3779         if (priv->hw->pcs != STMMAC_PCS_TBI &&
3780             priv->hw->pcs != STMMAC_PCS_RTBI &&
3781             (!priv->hw->xpcs ||
3782              xpcs_get_an_mode(priv->hw->xpcs, mode) != DW_AN_C73)) {
3783                 ret = stmmac_init_phy(dev);
3784                 if (ret) {
3785                         netdev_err(priv->dev,
3786                                    "%s: Cannot attach to PHY (error: %d)\n",
3787                                    __func__, ret);
3788                         goto init_phy_error;
3789                 }
3790         }
3791
3792         /* Extra statistics */
3793         memset(&priv->xstats, 0, sizeof(struct stmmac_extra_stats));
3794         priv->xstats.threshold = tc;
3795
3796         priv->rx_copybreak = STMMAC_RX_COPYBREAK;
3797
3798         buf_sz = dma_conf->dma_buf_sz;
3799         memcpy(&priv->dma_conf, dma_conf, sizeof(*dma_conf));
3800
3801         stmmac_reset_queues_param(priv);
3802
3803         ret = stmmac_hw_setup(dev, true);
3804         if (ret < 0) {
3805                 netdev_err(priv->dev, "%s: Hw setup failed\n", __func__);
3806                 goto init_error;
3807         }
3808
3809         stmmac_init_coalesce(priv);
3810
3811         phylink_start(priv->phylink);
3812         /* We may have called phylink_speed_down before */
3813         phylink_speed_up(priv->phylink);
3814
3815         ret = stmmac_request_irq(dev);
3816         if (ret)
3817                 goto irq_error;
3818
3819         stmmac_enable_all_queues(priv);
3820         netif_tx_start_all_queues(priv->dev);
3821         stmmac_enable_all_dma_irq(priv);
3822
3823         return 0;
3824
3825 irq_error:
3826         phylink_stop(priv->phylink);
3827
3828         for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
3829                 hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
3830
3831         stmmac_hw_teardown(dev);
3832 init_error:
3833         free_dma_desc_resources(priv, &priv->dma_conf);
3834         phylink_disconnect_phy(priv->phylink);
3835 init_phy_error:
3836         pm_runtime_put(priv->device);
3837         return ret;
3838 }
3839
3840 static int stmmac_open(struct net_device *dev)
3841 {
3842         struct stmmac_priv *priv = netdev_priv(dev);
3843         struct stmmac_dma_conf *dma_conf;
3844         int ret;
3845
3846         dma_conf = stmmac_setup_dma_desc(priv, dev->mtu);
3847         if (IS_ERR(dma_conf))
3848                 return PTR_ERR(dma_conf);
3849
3850         ret = __stmmac_open(dev, dma_conf);
3851         kfree(dma_conf);
3852         return ret;
3853 }
3854
3855 static void stmmac_fpe_stop_wq(struct stmmac_priv *priv)
3856 {
3857         set_bit(__FPE_REMOVING, &priv->fpe_task_state);
3858
3859         if (priv->fpe_wq)
3860                 destroy_workqueue(priv->fpe_wq);
3861
3862         netdev_info(priv->dev, "FPE workqueue stop");
3863 }
3864
3865 /**
3866  *  stmmac_release - close entry point of the driver
3867  *  @dev : device pointer.
3868  *  Description:
3869  *  This is the stop entry point of the driver.
3870  */
3871 static int stmmac_release(struct net_device *dev)
3872 {
3873         struct stmmac_priv *priv = netdev_priv(dev);
3874         u32 chan;
3875
3876         if (device_may_wakeup(priv->device))
3877                 phylink_speed_down(priv->phylink, false);
3878         /* Stop and disconnect the PHY */
3879         phylink_stop(priv->phylink);
3880         phylink_disconnect_phy(priv->phylink);
3881
3882         stmmac_disable_all_queues(priv);
3883
3884         for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
3885                 hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
3886
3887         netif_tx_disable(dev);
3888
3889         /* Free the IRQ lines */
3890         stmmac_free_irq(dev, REQ_IRQ_ERR_ALL, 0);
3891
3892         if (priv->eee_enabled) {
3893                 priv->tx_path_in_lpi_mode = false;
3894                 del_timer_sync(&priv->eee_ctrl_timer);
3895         }
3896
3897         /* Stop TX/RX DMA and clear the descriptors */
3898         stmmac_stop_all_dma(priv);
3899
3900         /* Release and free the Rx/Tx resources */
3901         free_dma_desc_resources(priv, &priv->dma_conf);
3902
3903         /* Disable the MAC Rx/Tx */
3904         stmmac_mac_set(priv, priv->ioaddr, false);
3905
3906         netif_carrier_off(dev);
3907
3908         stmmac_release_ptp(priv);
3909
3910         pm_runtime_put(priv->device);
3911
3912         if (priv->dma_cap.fpesel)
3913                 stmmac_fpe_stop_wq(priv);
3914
3915         return 0;
3916 }
3917
3918 static bool stmmac_vlan_insert(struct stmmac_priv *priv, struct sk_buff *skb,
3919                                struct stmmac_tx_queue *tx_q)
3920 {
3921         u16 tag = 0x0, inner_tag = 0x0;
3922         u32 inner_type = 0x0;
3923         struct dma_desc *p;
3924
3925         if (!priv->dma_cap.vlins)
3926                 return false;
3927         if (!skb_vlan_tag_present(skb))
3928                 return false;
3929         if (skb->vlan_proto == htons(ETH_P_8021AD)) {
3930                 inner_tag = skb_vlan_tag_get(skb);
3931                 inner_type = STMMAC_VLAN_INSERT;
3932         }
3933
3934         tag = skb_vlan_tag_get(skb);
3935
3936         if (tx_q->tbs & STMMAC_TBS_AVAIL)
3937                 p = &tx_q->dma_entx[tx_q->cur_tx].basic;
3938         else
3939                 p = &tx_q->dma_tx[tx_q->cur_tx];
3940
3941         if (stmmac_set_desc_vlan_tag(priv, p, tag, inner_tag, inner_type))
3942                 return false;
3943
3944         stmmac_set_tx_owner(priv, p);
3945         tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size);
3946         return true;
3947 }
3948
3949 /**
3950  *  stmmac_tso_allocator - close entry point of the driver
3951  *  @priv: driver private structure
3952  *  @des: buffer start address
3953  *  @total_len: total length to fill in descriptors
3954  *  @last_segment: condition for the last descriptor
3955  *  @queue: TX queue index
3956  *  Description:
3957  *  This function fills descriptor and request new descriptors according to
3958  *  buffer length to fill
3959  */
3960 static void stmmac_tso_allocator(struct stmmac_priv *priv, dma_addr_t des,
3961                                  int total_len, bool last_segment, u32 queue)
3962 {
3963         struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
3964         struct dma_desc *desc;
3965         u32 buff_size;
3966         int tmp_len;
3967
3968         tmp_len = total_len;
3969
3970         while (tmp_len > 0) {
3971                 dma_addr_t curr_addr;
3972
3973                 tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx,
3974                                                 priv->dma_conf.dma_tx_size);
3975                 WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
3976
3977                 if (tx_q->tbs & STMMAC_TBS_AVAIL)
3978                         desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
3979                 else
3980                         desc = &tx_q->dma_tx[tx_q->cur_tx];
3981
3982                 curr_addr = des + (total_len - tmp_len);
3983                 if (priv->dma_cap.addr64 <= 32)
3984                         desc->des0 = cpu_to_le32(curr_addr);
3985                 else
3986                         stmmac_set_desc_addr(priv, desc, curr_addr);
3987
3988                 buff_size = tmp_len >= TSO_MAX_BUFF_SIZE ?
3989                             TSO_MAX_BUFF_SIZE : tmp_len;
3990
3991                 stmmac_prepare_tso_tx_desc(priv, desc, 0, buff_size,
3992                                 0, 1,
3993                                 (last_segment) && (tmp_len <= TSO_MAX_BUFF_SIZE),
3994                                 0, 0);
3995
3996                 tmp_len -= TSO_MAX_BUFF_SIZE;
3997         }
3998 }
3999
4000 static void stmmac_flush_tx_descriptors(struct stmmac_priv *priv, int queue)
4001 {
4002         struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
4003         int desc_size;
4004
4005         if (likely(priv->extend_desc))
4006                 desc_size = sizeof(struct dma_extended_desc);
4007         else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4008                 desc_size = sizeof(struct dma_edesc);
4009         else
4010                 desc_size = sizeof(struct dma_desc);
4011
4012         /* The own bit must be the latest setting done when prepare the
4013          * descriptor and then barrier is needed to make sure that
4014          * all is coherent before granting the DMA engine.
4015          */
4016         wmb();
4017
4018         tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * desc_size);
4019         stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue);
4020 }
4021
4022 /**
4023  *  stmmac_tso_xmit - Tx entry point of the driver for oversized frames (TSO)
4024  *  @skb : the socket buffer
4025  *  @dev : device pointer
4026  *  Description: this is the transmit function that is called on TSO frames
4027  *  (support available on GMAC4 and newer chips).
4028  *  Diagram below show the ring programming in case of TSO frames:
4029  *
4030  *  First Descriptor
4031  *   --------
4032  *   | DES0 |---> buffer1 = L2/L3/L4 header
4033  *   | DES1 |---> TCP Payload (can continue on next descr...)
4034  *   | DES2 |---> buffer 1 and 2 len
4035  *   | DES3 |---> must set TSE, TCP hdr len-> [22:19]. TCP payload len [17:0]
4036  *   --------
4037  *      |
4038  *     ...
4039  *      |
4040  *   --------
4041  *   | DES0 | --| Split TCP Payload on Buffers 1 and 2
4042  *   | DES1 | --|
4043  *   | DES2 | --> buffer 1 and 2 len
4044  *   | DES3 |
4045  *   --------
4046  *
4047  * mss is fixed when enable tso, so w/o programming the TDES3 ctx field.
4048  */
4049 static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
4050 {
4051         struct dma_desc *desc, *first, *mss_desc = NULL;
4052         struct stmmac_priv *priv = netdev_priv(dev);
4053         int nfrags = skb_shinfo(skb)->nr_frags;
4054         u32 queue = skb_get_queue_mapping(skb);
4055         unsigned int first_entry, tx_packets;
4056         int tmp_pay_len = 0, first_tx;
4057         struct stmmac_tx_queue *tx_q;
4058         bool has_vlan, set_ic;
4059         u8 proto_hdr_len, hdr;
4060         u32 pay_len, mss;
4061         dma_addr_t des;
4062         int i;
4063
4064         tx_q = &priv->dma_conf.tx_queue[queue];
4065         first_tx = tx_q->cur_tx;
4066
4067         /* Compute header lengths */
4068         if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) {
4069                 proto_hdr_len = skb_transport_offset(skb) + sizeof(struct udphdr);
4070                 hdr = sizeof(struct udphdr);
4071         } else {
4072                 proto_hdr_len = skb_tcp_all_headers(skb);
4073                 hdr = tcp_hdrlen(skb);
4074         }
4075
4076         /* Desc availability based on threshold should be enough safe */
4077         if (unlikely(stmmac_tx_avail(priv, queue) <
4078                 (((skb->len - proto_hdr_len) / TSO_MAX_BUFF_SIZE + 1)))) {
4079                 if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
4080                         netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
4081                                                                 queue));
4082                         /* This is a hard error, log it. */
4083                         netdev_err(priv->dev,
4084                                    "%s: Tx Ring full when queue awake\n",
4085                                    __func__);
4086                 }
4087                 return NETDEV_TX_BUSY;
4088         }
4089
4090         pay_len = skb_headlen(skb) - proto_hdr_len; /* no frags */
4091
4092         mss = skb_shinfo(skb)->gso_size;
4093
4094         /* set new MSS value if needed */
4095         if (mss != tx_q->mss) {
4096                 if (tx_q->tbs & STMMAC_TBS_AVAIL)
4097                         mss_desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
4098                 else
4099                         mss_desc = &tx_q->dma_tx[tx_q->cur_tx];
4100
4101                 stmmac_set_mss(priv, mss_desc, mss);
4102                 tx_q->mss = mss;
4103                 tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx,
4104                                                 priv->dma_conf.dma_tx_size);
4105                 WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
4106         }
4107
4108         if (netif_msg_tx_queued(priv)) {
4109                 pr_info("%s: hdrlen %d, hdr_len %d, pay_len %d, mss %d\n",
4110                         __func__, hdr, proto_hdr_len, pay_len, mss);
4111                 pr_info("\tskb->len %d, skb->data_len %d\n", skb->len,
4112                         skb->data_len);
4113         }
4114
4115         /* Check if VLAN can be inserted by HW */
4116         has_vlan = stmmac_vlan_insert(priv, skb, tx_q);
4117
4118         first_entry = tx_q->cur_tx;
4119         WARN_ON(tx_q->tx_skbuff[first_entry]);
4120
4121         if (tx_q->tbs & STMMAC_TBS_AVAIL)
4122                 desc = &tx_q->dma_entx[first_entry].basic;
4123         else
4124                 desc = &tx_q->dma_tx[first_entry];
4125         first = desc;
4126
4127         if (has_vlan)
4128                 stmmac_set_desc_vlan(priv, first, STMMAC_VLAN_INSERT);
4129
4130         /* first descriptor: fill Headers on Buf1 */
4131         des = dma_map_single(priv->device, skb->data, skb_headlen(skb),
4132                              DMA_TO_DEVICE);
4133         if (dma_mapping_error(priv->device, des))
4134                 goto dma_map_err;
4135
4136         tx_q->tx_skbuff_dma[first_entry].buf = des;
4137         tx_q->tx_skbuff_dma[first_entry].len = skb_headlen(skb);
4138         tx_q->tx_skbuff_dma[first_entry].map_as_page = false;
4139         tx_q->tx_skbuff_dma[first_entry].buf_type = STMMAC_TXBUF_T_SKB;
4140
4141         if (priv->dma_cap.addr64 <= 32) {
4142                 first->des0 = cpu_to_le32(des);
4143
4144                 /* Fill start of payload in buff2 of first descriptor */
4145                 if (pay_len)
4146                         first->des1 = cpu_to_le32(des + proto_hdr_len);
4147
4148                 /* If needed take extra descriptors to fill the remaining payload */
4149                 tmp_pay_len = pay_len - TSO_MAX_BUFF_SIZE;
4150         } else {
4151                 stmmac_set_desc_addr(priv, first, des);
4152                 tmp_pay_len = pay_len;
4153                 des += proto_hdr_len;
4154                 pay_len = 0;
4155         }
4156
4157         stmmac_tso_allocator(priv, des, tmp_pay_len, (nfrags == 0), queue);
4158
4159         /* Prepare fragments */
4160         for (i = 0; i < nfrags; i++) {
4161                 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4162
4163                 des = skb_frag_dma_map(priv->device, frag, 0,
4164                                        skb_frag_size(frag),
4165                                        DMA_TO_DEVICE);
4166                 if (dma_mapping_error(priv->device, des))
4167                         goto dma_map_err;
4168
4169                 stmmac_tso_allocator(priv, des, skb_frag_size(frag),
4170                                      (i == nfrags - 1), queue);
4171
4172                 tx_q->tx_skbuff_dma[tx_q->cur_tx].buf = des;
4173                 tx_q->tx_skbuff_dma[tx_q->cur_tx].len = skb_frag_size(frag);
4174                 tx_q->tx_skbuff_dma[tx_q->cur_tx].map_as_page = true;
4175                 tx_q->tx_skbuff_dma[tx_q->cur_tx].buf_type = STMMAC_TXBUF_T_SKB;
4176         }
4177
4178         tx_q->tx_skbuff_dma[tx_q->cur_tx].last_segment = true;
4179
4180         /* Only the last descriptor gets to point to the skb. */
4181         tx_q->tx_skbuff[tx_q->cur_tx] = skb;
4182         tx_q->tx_skbuff_dma[tx_q->cur_tx].buf_type = STMMAC_TXBUF_T_SKB;
4183
4184         /* Manage tx mitigation */
4185         tx_packets = (tx_q->cur_tx + 1) - first_tx;
4186         tx_q->tx_count_frames += tx_packets;
4187
4188         if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && priv->hwts_tx_en)
4189                 set_ic = true;
4190         else if (!priv->tx_coal_frames[queue])
4191                 set_ic = false;
4192         else if (tx_packets > priv->tx_coal_frames[queue])
4193                 set_ic = true;
4194         else if ((tx_q->tx_count_frames %
4195                   priv->tx_coal_frames[queue]) < tx_packets)
4196                 set_ic = true;
4197         else
4198                 set_ic = false;
4199
4200         if (set_ic) {
4201                 if (tx_q->tbs & STMMAC_TBS_AVAIL)
4202                         desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
4203                 else
4204                         desc = &tx_q->dma_tx[tx_q->cur_tx];
4205
4206                 tx_q->tx_count_frames = 0;
4207                 stmmac_set_tx_ic(priv, desc);
4208                 priv->xstats.tx_set_ic_bit++;
4209         }
4210
4211         /* We've used all descriptors we need for this skb, however,
4212          * advance cur_tx so that it references a fresh descriptor.
4213          * ndo_start_xmit will fill this descriptor the next time it's
4214          * called and stmmac_tx_clean may clean up to this descriptor.
4215          */
4216         tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size);
4217
4218         if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
4219                 netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
4220                           __func__);
4221                 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
4222         }
4223
4224         dev->stats.tx_bytes += skb->len;
4225         priv->xstats.tx_tso_frames++;
4226         priv->xstats.tx_tso_nfrags += nfrags;
4227
4228         if (priv->sarc_type)
4229                 stmmac_set_desc_sarc(priv, first, priv->sarc_type);
4230
4231         skb_tx_timestamp(skb);
4232
4233         if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
4234                      priv->hwts_tx_en)) {
4235                 /* declare that device is doing timestamping */
4236                 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
4237                 stmmac_enable_tx_timestamp(priv, first);
4238         }
4239
4240         /* Complete the first descriptor before granting the DMA */
4241         stmmac_prepare_tso_tx_desc(priv, first, 1,
4242                         proto_hdr_len,
4243                         pay_len,
4244                         1, tx_q->tx_skbuff_dma[first_entry].last_segment,
4245                         hdr / 4, (skb->len - proto_hdr_len));
4246
4247         /* If context desc is used to change MSS */
4248         if (mss_desc) {
4249                 /* Make sure that first descriptor has been completely
4250                  * written, including its own bit. This is because MSS is
4251                  * actually before first descriptor, so we need to make
4252                  * sure that MSS's own bit is the last thing written.
4253                  */
4254                 dma_wmb();
4255                 stmmac_set_tx_owner(priv, mss_desc);
4256         }
4257
4258         if (netif_msg_pktdata(priv)) {
4259                 pr_info("%s: curr=%d dirty=%d f=%d, e=%d, f_p=%p, nfrags %d\n",
4260                         __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
4261                         tx_q->cur_tx, first, nfrags);
4262                 pr_info(">>> frame to be transmitted: ");
4263                 print_pkt(skb->data, skb_headlen(skb));
4264         }
4265
4266         netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
4267
4268         stmmac_flush_tx_descriptors(priv, queue);
4269         stmmac_tx_timer_arm(priv, queue);
4270
4271         return NETDEV_TX_OK;
4272
4273 dma_map_err:
4274         dev_err(priv->device, "Tx dma map failed\n");
4275         dev_kfree_skb(skb);
4276         priv->dev->stats.tx_dropped++;
4277         return NETDEV_TX_OK;
4278 }
4279
4280 /**
4281  *  stmmac_xmit - Tx entry point of the driver
4282  *  @skb : the socket buffer
4283  *  @dev : device pointer
4284  *  Description : this is the tx entry point of the driver.
4285  *  It programs the chain or the ring and supports oversized frames
4286  *  and SG feature.
4287  */
4288 static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
4289 {
4290         unsigned int first_entry, tx_packets, enh_desc;
4291         struct stmmac_priv *priv = netdev_priv(dev);
4292         unsigned int nopaged_len = skb_headlen(skb);
4293         int i, csum_insertion = 0, is_jumbo = 0;
4294         u32 queue = skb_get_queue_mapping(skb);
4295         int nfrags = skb_shinfo(skb)->nr_frags;
4296         int gso = skb_shinfo(skb)->gso_type;
4297         struct dma_edesc *tbs_desc = NULL;
4298         struct dma_desc *desc, *first;
4299         struct stmmac_tx_queue *tx_q;
4300         bool has_vlan, set_ic;
4301         int entry, first_tx;
4302         dma_addr_t des;
4303
4304         tx_q = &priv->dma_conf.tx_queue[queue];
4305         first_tx = tx_q->cur_tx;
4306
4307         if (priv->tx_path_in_lpi_mode && priv->eee_sw_timer_en)
4308                 stmmac_disable_eee_mode(priv);
4309
4310         /* Manage oversized TCP frames for GMAC4 device */
4311         if (skb_is_gso(skb) && priv->tso) {
4312                 if (gso & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))
4313                         return stmmac_tso_xmit(skb, dev);
4314                 if (priv->plat->has_gmac4 && (gso & SKB_GSO_UDP_L4))
4315                         return stmmac_tso_xmit(skb, dev);
4316         }
4317
4318         if (unlikely(stmmac_tx_avail(priv, queue) < nfrags + 1)) {
4319                 if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
4320                         netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
4321                                                                 queue));
4322                         /* This is a hard error, log it. */
4323                         netdev_err(priv->dev,
4324                                    "%s: Tx Ring full when queue awake\n",
4325                                    __func__);
4326                 }
4327                 return NETDEV_TX_BUSY;
4328         }
4329
4330         /* Check if VLAN can be inserted by HW */
4331         has_vlan = stmmac_vlan_insert(priv, skb, tx_q);
4332
4333         entry = tx_q->cur_tx;
4334         first_entry = entry;
4335         WARN_ON(tx_q->tx_skbuff[first_entry]);
4336
4337         csum_insertion = (skb->ip_summed == CHECKSUM_PARTIAL);
4338
4339         if (likely(priv->extend_desc))
4340                 desc = (struct dma_desc *)(tx_q->dma_etx + entry);
4341         else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4342                 desc = &tx_q->dma_entx[entry].basic;
4343         else
4344                 desc = tx_q->dma_tx + entry;
4345
4346         first = desc;
4347
4348         if (has_vlan)
4349                 stmmac_set_desc_vlan(priv, first, STMMAC_VLAN_INSERT);
4350
4351         enh_desc = priv->plat->enh_desc;
4352         /* To program the descriptors according to the size of the frame */
4353         if (enh_desc)
4354                 is_jumbo = stmmac_is_jumbo_frm(priv, skb->len, enh_desc);
4355
4356         if (unlikely(is_jumbo)) {
4357                 entry = stmmac_jumbo_frm(priv, tx_q, skb, csum_insertion);
4358                 if (unlikely(entry < 0) && (entry != -EINVAL))
4359                         goto dma_map_err;
4360         }
4361
4362         for (i = 0; i < nfrags; i++) {
4363                 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4364                 int len = skb_frag_size(frag);
4365                 bool last_segment = (i == (nfrags - 1));
4366
4367                 entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
4368                 WARN_ON(tx_q->tx_skbuff[entry]);
4369
4370                 if (likely(priv->extend_desc))
4371                         desc = (struct dma_desc *)(tx_q->dma_etx + entry);
4372                 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4373                         desc = &tx_q->dma_entx[entry].basic;
4374                 else
4375                         desc = tx_q->dma_tx + entry;
4376
4377                 des = skb_frag_dma_map(priv->device, frag, 0, len,
4378                                        DMA_TO_DEVICE);
4379                 if (dma_mapping_error(priv->device, des))
4380                         goto dma_map_err; /* should reuse desc w/o issues */
4381
4382                 tx_q->tx_skbuff_dma[entry].buf = des;
4383
4384                 stmmac_set_desc_addr(priv, desc, des);
4385
4386                 tx_q->tx_skbuff_dma[entry].map_as_page = true;
4387                 tx_q->tx_skbuff_dma[entry].len = len;
4388                 tx_q->tx_skbuff_dma[entry].last_segment = last_segment;
4389                 tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_SKB;
4390
4391                 /* Prepare the descriptor and set the own bit too */
4392                 stmmac_prepare_tx_desc(priv, desc, 0, len, csum_insertion,
4393                                 priv->mode, 1, last_segment, skb->len);
4394         }
4395
4396         /* Only the last descriptor gets to point to the skb. */
4397         tx_q->tx_skbuff[entry] = skb;
4398         tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_SKB;
4399
4400         /* According to the coalesce parameter the IC bit for the latest
4401          * segment is reset and the timer re-started to clean the tx status.
4402          * This approach takes care about the fragments: desc is the first
4403          * element in case of no SG.
4404          */
4405         tx_packets = (entry + 1) - first_tx;
4406         tx_q->tx_count_frames += tx_packets;
4407
4408         if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && priv->hwts_tx_en)
4409                 set_ic = true;
4410         else if (!priv->tx_coal_frames[queue])
4411                 set_ic = false;
4412         else if (tx_packets > priv->tx_coal_frames[queue])
4413                 set_ic = true;
4414         else if ((tx_q->tx_count_frames %
4415                   priv->tx_coal_frames[queue]) < tx_packets)
4416                 set_ic = true;
4417         else
4418                 set_ic = false;
4419
4420         if (set_ic) {
4421                 if (likely(priv->extend_desc))
4422                         desc = &tx_q->dma_etx[entry].basic;
4423                 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4424                         desc = &tx_q->dma_entx[entry].basic;
4425                 else
4426                         desc = &tx_q->dma_tx[entry];
4427
4428                 tx_q->tx_count_frames = 0;
4429                 stmmac_set_tx_ic(priv, desc);
4430                 priv->xstats.tx_set_ic_bit++;
4431         }
4432
4433         /* We've used all descriptors we need for this skb, however,
4434          * advance cur_tx so that it references a fresh descriptor.
4435          * ndo_start_xmit will fill this descriptor the next time it's
4436          * called and stmmac_tx_clean may clean up to this descriptor.
4437          */
4438         entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
4439         tx_q->cur_tx = entry;
4440
4441         if (netif_msg_pktdata(priv)) {
4442                 netdev_dbg(priv->dev,
4443                            "%s: curr=%d dirty=%d f=%d, e=%d, first=%p, nfrags=%d",
4444                            __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
4445                            entry, first, nfrags);
4446
4447                 netdev_dbg(priv->dev, ">>> frame to be transmitted: ");
4448                 print_pkt(skb->data, skb->len);
4449         }
4450
4451         if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
4452                 netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
4453                           __func__);
4454                 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
4455         }
4456
4457         dev->stats.tx_bytes += skb->len;
4458
4459         if (priv->sarc_type)
4460                 stmmac_set_desc_sarc(priv, first, priv->sarc_type);
4461
4462         skb_tx_timestamp(skb);
4463
4464         /* Ready to fill the first descriptor and set the OWN bit w/o any
4465          * problems because all the descriptors are actually ready to be
4466          * passed to the DMA engine.
4467          */
4468         if (likely(!is_jumbo)) {
4469                 bool last_segment = (nfrags == 0);
4470
4471                 des = dma_map_single(priv->device, skb->data,
4472                                      nopaged_len, DMA_TO_DEVICE);
4473                 if (dma_mapping_error(priv->device, des))
4474                         goto dma_map_err;
4475
4476                 tx_q->tx_skbuff_dma[first_entry].buf = des;
4477                 tx_q->tx_skbuff_dma[first_entry].buf_type = STMMAC_TXBUF_T_SKB;
4478                 tx_q->tx_skbuff_dma[first_entry].map_as_page = false;
4479
4480                 stmmac_set_desc_addr(priv, first, des);
4481
4482                 tx_q->tx_skbuff_dma[first_entry].len = nopaged_len;
4483                 tx_q->tx_skbuff_dma[first_entry].last_segment = last_segment;
4484
4485                 if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
4486                              priv->hwts_tx_en)) {
4487                         /* declare that device is doing timestamping */
4488                         skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
4489                         stmmac_enable_tx_timestamp(priv, first);
4490                 }
4491
4492                 /* Prepare the first descriptor setting the OWN bit too */
4493                 stmmac_prepare_tx_desc(priv, first, 1, nopaged_len,
4494                                 csum_insertion, priv->mode, 0, last_segment,
4495                                 skb->len);
4496         }
4497
4498         if (tx_q->tbs & STMMAC_TBS_EN) {
4499                 struct timespec64 ts = ns_to_timespec64(skb->tstamp);
4500
4501                 tbs_desc = &tx_q->dma_entx[first_entry];
4502                 stmmac_set_desc_tbs(priv, tbs_desc, ts.tv_sec, ts.tv_nsec);
4503         }
4504
4505         stmmac_set_tx_owner(priv, first);
4506
4507         netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
4508
4509         stmmac_enable_dma_transmission(priv, priv->ioaddr);
4510
4511         stmmac_flush_tx_descriptors(priv, queue);
4512         stmmac_tx_timer_arm(priv, queue);
4513
4514         return NETDEV_TX_OK;
4515
4516 dma_map_err:
4517         netdev_err(priv->dev, "Tx DMA map failed\n");
4518         dev_kfree_skb(skb);
4519         priv->dev->stats.tx_dropped++;
4520         return NETDEV_TX_OK;
4521 }
4522
4523 static void stmmac_rx_vlan(struct net_device *dev, struct sk_buff *skb)
4524 {
4525         struct vlan_ethhdr *veth;
4526         __be16 vlan_proto;
4527         u16 vlanid;
4528
4529         veth = (struct vlan_ethhdr *)skb->data;
4530         vlan_proto = veth->h_vlan_proto;
4531
4532         if ((vlan_proto == htons(ETH_P_8021Q) &&
4533              dev->features & NETIF_F_HW_VLAN_CTAG_RX) ||
4534             (vlan_proto == htons(ETH_P_8021AD) &&
4535              dev->features & NETIF_F_HW_VLAN_STAG_RX)) {
4536                 /* pop the vlan tag */
4537                 vlanid = ntohs(veth->h_vlan_TCI);
4538                 memmove(skb->data + VLAN_HLEN, veth, ETH_ALEN * 2);
4539                 skb_pull(skb, VLAN_HLEN);
4540                 __vlan_hwaccel_put_tag(skb, vlan_proto, vlanid);
4541         }
4542 }
4543
4544 /**
4545  * stmmac_rx_refill - refill used skb preallocated buffers
4546  * @priv: driver private structure
4547  * @queue: RX queue index
4548  * Description : this is to reallocate the skb for the reception process
4549  * that is based on zero-copy.
4550  */
4551 static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue)
4552 {
4553         struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
4554         int dirty = stmmac_rx_dirty(priv, queue);
4555         unsigned int entry = rx_q->dirty_rx;
4556         gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN);
4557
4558         if (priv->dma_cap.addr64 <= 32)
4559                 gfp |= GFP_DMA32;
4560
4561         while (dirty-- > 0) {
4562                 struct stmmac_rx_buffer *buf = &rx_q->buf_pool[entry];
4563                 struct dma_desc *p;
4564                 bool use_rx_wd;
4565
4566                 if (priv->extend_desc)
4567                         p = (struct dma_desc *)(rx_q->dma_erx + entry);
4568                 else
4569                         p = rx_q->dma_rx + entry;
4570
4571                 if (!buf->page) {
4572                         buf->page = page_pool_alloc_pages(rx_q->page_pool, gfp);
4573                         if (!buf->page)
4574                                 break;
4575                 }
4576
4577                 if (priv->sph && !buf->sec_page) {
4578                         buf->sec_page = page_pool_alloc_pages(rx_q->page_pool, gfp);
4579                         if (!buf->sec_page)
4580                                 break;
4581
4582                         buf->sec_addr = page_pool_get_dma_addr(buf->sec_page);
4583                 }
4584
4585                 buf->addr = page_pool_get_dma_addr(buf->page) + buf->page_offset;
4586
4587                 stmmac_set_desc_addr(priv, p, buf->addr);
4588                 if (priv->sph)
4589                         stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, true);
4590                 else
4591                         stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, false);
4592                 stmmac_refill_desc3(priv, rx_q, p);
4593
4594                 rx_q->rx_count_frames++;
4595                 rx_q->rx_count_frames += priv->rx_coal_frames[queue];
4596                 if (rx_q->rx_count_frames > priv->rx_coal_frames[queue])
4597                         rx_q->rx_count_frames = 0;
4598
4599                 use_rx_wd = !priv->rx_coal_frames[queue];
4600                 use_rx_wd |= rx_q->rx_count_frames > 0;
4601                 if (!priv->use_riwt)
4602                         use_rx_wd = false;
4603
4604                 dma_wmb();
4605                 stmmac_set_rx_owner(priv, p, use_rx_wd);
4606
4607                 entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_rx_size);
4608         }
4609         rx_q->dirty_rx = entry;
4610         rx_q->rx_tail_addr = rx_q->dma_rx_phy +
4611                             (rx_q->dirty_rx * sizeof(struct dma_desc));
4612         stmmac_set_rx_tail_ptr(priv, priv->ioaddr, rx_q->rx_tail_addr, queue);
4613 }
4614
4615 static unsigned int stmmac_rx_buf1_len(struct stmmac_priv *priv,
4616                                        struct dma_desc *p,
4617                                        int status, unsigned int len)
4618 {
4619         unsigned int plen = 0, hlen = 0;
4620         int coe = priv->hw->rx_csum;
4621
4622         /* Not first descriptor, buffer is always zero */
4623         if (priv->sph && len)
4624                 return 0;
4625
4626         /* First descriptor, get split header length */
4627         stmmac_get_rx_header_len(priv, p, &hlen);
4628         if (priv->sph && hlen) {
4629                 priv->xstats.rx_split_hdr_pkt_n++;
4630                 return hlen;
4631         }
4632
4633         /* First descriptor, not last descriptor and not split header */
4634         if (status & rx_not_ls)
4635                 return priv->dma_conf.dma_buf_sz;
4636
4637         plen = stmmac_get_rx_frame_len(priv, p, coe);
4638
4639         /* First descriptor and last descriptor and not split header */
4640         return min_t(unsigned int, priv->dma_conf.dma_buf_sz, plen);
4641 }
4642
4643 static unsigned int stmmac_rx_buf2_len(struct stmmac_priv *priv,
4644                                        struct dma_desc *p,
4645                                        int status, unsigned int len)
4646 {
4647         int coe = priv->hw->rx_csum;
4648         unsigned int plen = 0;
4649
4650         /* Not split header, buffer is not available */
4651         if (!priv->sph)
4652                 return 0;
4653
4654         /* Not last descriptor */
4655         if (status & rx_not_ls)
4656                 return priv->dma_conf.dma_buf_sz;
4657
4658         plen = stmmac_get_rx_frame_len(priv, p, coe);
4659
4660         /* Last descriptor */
4661         return plen - len;
4662 }
4663
4664 static int stmmac_xdp_xmit_xdpf(struct stmmac_priv *priv, int queue,
4665                                 struct xdp_frame *xdpf, bool dma_map)
4666 {
4667         struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
4668         unsigned int entry = tx_q->cur_tx;
4669         struct dma_desc *tx_desc;
4670         dma_addr_t dma_addr;
4671         bool set_ic;
4672
4673         if (stmmac_tx_avail(priv, queue) < STMMAC_TX_THRESH(priv))
4674                 return STMMAC_XDP_CONSUMED;
4675
4676         if (likely(priv->extend_desc))
4677                 tx_desc = (struct dma_desc *)(tx_q->dma_etx + entry);
4678         else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4679                 tx_desc = &tx_q->dma_entx[entry].basic;
4680         else
4681                 tx_desc = tx_q->dma_tx + entry;
4682
4683         if (dma_map) {
4684                 dma_addr = dma_map_single(priv->device, xdpf->data,
4685                                           xdpf->len, DMA_TO_DEVICE);
4686                 if (dma_mapping_error(priv->device, dma_addr))
4687                         return STMMAC_XDP_CONSUMED;
4688
4689                 tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XDP_NDO;
4690         } else {
4691                 struct page *page = virt_to_page(xdpf->data);
4692
4693                 dma_addr = page_pool_get_dma_addr(page) + sizeof(*xdpf) +
4694                            xdpf->headroom;
4695                 dma_sync_single_for_device(priv->device, dma_addr,
4696                                            xdpf->len, DMA_BIDIRECTIONAL);
4697
4698                 tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XDP_TX;
4699         }
4700
4701         tx_q->tx_skbuff_dma[entry].buf = dma_addr;
4702         tx_q->tx_skbuff_dma[entry].map_as_page = false;
4703         tx_q->tx_skbuff_dma[entry].len = xdpf->len;
4704         tx_q->tx_skbuff_dma[entry].last_segment = true;
4705         tx_q->tx_skbuff_dma[entry].is_jumbo = false;
4706
4707         tx_q->xdpf[entry] = xdpf;
4708
4709         stmmac_set_desc_addr(priv, tx_desc, dma_addr);
4710
4711         stmmac_prepare_tx_desc(priv, tx_desc, 1, xdpf->len,
4712                                true, priv->mode, true, true,
4713                                xdpf->len);
4714
4715         tx_q->tx_count_frames++;
4716
4717         if (tx_q->tx_count_frames % priv->tx_coal_frames[queue] == 0)
4718                 set_ic = true;
4719         else
4720                 set_ic = false;
4721
4722         if (set_ic) {
4723                 tx_q->tx_count_frames = 0;
4724                 stmmac_set_tx_ic(priv, tx_desc);
4725                 priv->xstats.tx_set_ic_bit++;
4726         }
4727
4728         stmmac_enable_dma_transmission(priv, priv->ioaddr);
4729
4730         entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
4731         tx_q->cur_tx = entry;
4732
4733         return STMMAC_XDP_TX;
4734 }
4735
4736 static int stmmac_xdp_get_tx_queue(struct stmmac_priv *priv,
4737                                    int cpu)
4738 {
4739         int index = cpu;
4740
4741         if (unlikely(index < 0))
4742                 index = 0;
4743
4744         while (index >= priv->plat->tx_queues_to_use)
4745                 index -= priv->plat->tx_queues_to_use;
4746
4747         return index;
4748 }
4749
4750 static int stmmac_xdp_xmit_back(struct stmmac_priv *priv,
4751                                 struct xdp_buff *xdp)
4752 {
4753         struct xdp_frame *xdpf = xdp_convert_buff_to_frame(xdp);
4754         int cpu = smp_processor_id();
4755         struct netdev_queue *nq;
4756         int queue;
4757         int res;
4758
4759         if (unlikely(!xdpf))
4760                 return STMMAC_XDP_CONSUMED;
4761
4762         queue = stmmac_xdp_get_tx_queue(priv, cpu);
4763         nq = netdev_get_tx_queue(priv->dev, queue);
4764
4765         __netif_tx_lock(nq, cpu);
4766         /* Avoids TX time-out as we are sharing with slow path */
4767         txq_trans_cond_update(nq);
4768
4769         res = stmmac_xdp_xmit_xdpf(priv, queue, xdpf, false);
4770         if (res == STMMAC_XDP_TX)
4771                 stmmac_flush_tx_descriptors(priv, queue);
4772
4773         __netif_tx_unlock(nq);
4774
4775         return res;
4776 }
4777
4778 static int __stmmac_xdp_run_prog(struct stmmac_priv *priv,
4779                                  struct bpf_prog *prog,
4780                                  struct xdp_buff *xdp)
4781 {
4782         u32 act;
4783         int res;
4784
4785         act = bpf_prog_run_xdp(prog, xdp);
4786         switch (act) {
4787         case XDP_PASS:
4788                 res = STMMAC_XDP_PASS;
4789                 break;
4790         case XDP_TX:
4791                 res = stmmac_xdp_xmit_back(priv, xdp);
4792                 break;
4793         case XDP_REDIRECT:
4794                 if (xdp_do_redirect(priv->dev, xdp, prog) < 0)
4795                         res = STMMAC_XDP_CONSUMED;
4796                 else
4797                         res = STMMAC_XDP_REDIRECT;
4798                 break;
4799         default:
4800                 bpf_warn_invalid_xdp_action(priv->dev, prog, act);
4801                 fallthrough;
4802         case XDP_ABORTED:
4803                 trace_xdp_exception(priv->dev, prog, act);
4804                 fallthrough;
4805         case XDP_DROP:
4806                 res = STMMAC_XDP_CONSUMED;
4807                 break;
4808         }
4809
4810         return res;
4811 }
4812
4813 static struct sk_buff *stmmac_xdp_run_prog(struct stmmac_priv *priv,
4814                                            struct xdp_buff *xdp)
4815 {
4816         struct bpf_prog *prog;
4817         int res;
4818
4819         prog = READ_ONCE(priv->xdp_prog);
4820         if (!prog) {
4821                 res = STMMAC_XDP_PASS;
4822                 goto out;
4823         }
4824
4825         res = __stmmac_xdp_run_prog(priv, prog, xdp);
4826 out:
4827         return ERR_PTR(-res);
4828 }
4829
4830 static void stmmac_finalize_xdp_rx(struct stmmac_priv *priv,
4831                                    int xdp_status)
4832 {
4833         int cpu = smp_processor_id();
4834         int queue;
4835
4836         queue = stmmac_xdp_get_tx_queue(priv, cpu);
4837
4838         if (xdp_status & STMMAC_XDP_TX)
4839                 stmmac_tx_timer_arm(priv, queue);
4840
4841         if (xdp_status & STMMAC_XDP_REDIRECT)
4842                 xdp_do_flush();
4843 }
4844
4845 static struct sk_buff *stmmac_construct_skb_zc(struct stmmac_channel *ch,
4846                                                struct xdp_buff *xdp)
4847 {
4848         unsigned int metasize = xdp->data - xdp->data_meta;
4849         unsigned int datasize = xdp->data_end - xdp->data;
4850         struct sk_buff *skb;
4851
4852         skb = __napi_alloc_skb(&ch->rxtx_napi,
4853                                xdp->data_end - xdp->data_hard_start,
4854                                GFP_ATOMIC | __GFP_NOWARN);
4855         if (unlikely(!skb))
4856                 return NULL;
4857
4858         skb_reserve(skb, xdp->data - xdp->data_hard_start);
4859         memcpy(__skb_put(skb, datasize), xdp->data, datasize);
4860         if (metasize)
4861                 skb_metadata_set(skb, metasize);
4862
4863         return skb;
4864 }
4865
4866 static void stmmac_dispatch_skb_zc(struct stmmac_priv *priv, u32 queue,
4867                                    struct dma_desc *p, struct dma_desc *np,
4868                                    struct xdp_buff *xdp)
4869 {
4870         struct stmmac_channel *ch = &priv->channel[queue];
4871         unsigned int len = xdp->data_end - xdp->data;
4872         enum pkt_hash_types hash_type;
4873         int coe = priv->hw->rx_csum;
4874         struct sk_buff *skb;
4875         u32 hash;
4876
4877         skb = stmmac_construct_skb_zc(ch, xdp);
4878         if (!skb) {
4879                 priv->dev->stats.rx_dropped++;
4880                 return;
4881         }
4882
4883         stmmac_get_rx_hwtstamp(priv, p, np, skb);
4884         stmmac_rx_vlan(priv->dev, skb);
4885         skb->protocol = eth_type_trans(skb, priv->dev);
4886
4887         if (unlikely(!coe))
4888                 skb_checksum_none_assert(skb);
4889         else
4890                 skb->ip_summed = CHECKSUM_UNNECESSARY;
4891
4892         if (!stmmac_get_rx_hash(priv, p, &hash, &hash_type))
4893                 skb_set_hash(skb, hash, hash_type);
4894
4895         skb_record_rx_queue(skb, queue);
4896         napi_gro_receive(&ch->rxtx_napi, skb);
4897
4898         priv->dev->stats.rx_packets++;
4899         priv->dev->stats.rx_bytes += len;
4900 }
4901
4902 static bool stmmac_rx_refill_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
4903 {
4904         struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
4905         unsigned int entry = rx_q->dirty_rx;
4906         struct dma_desc *rx_desc = NULL;
4907         bool ret = true;
4908
4909         budget = min(budget, stmmac_rx_dirty(priv, queue));
4910
4911         while (budget-- > 0 && entry != rx_q->cur_rx) {
4912                 struct stmmac_rx_buffer *buf = &rx_q->buf_pool[entry];
4913                 dma_addr_t dma_addr;
4914                 bool use_rx_wd;
4915
4916                 if (!buf->xdp) {
4917                         buf->xdp = xsk_buff_alloc(rx_q->xsk_pool);
4918                         if (!buf->xdp) {
4919                                 ret = false;
4920                                 break;
4921                         }
4922                 }
4923
4924                 if (priv->extend_desc)
4925                         rx_desc = (struct dma_desc *)(rx_q->dma_erx + entry);
4926                 else
4927                         rx_desc = rx_q->dma_rx + entry;
4928
4929                 dma_addr = xsk_buff_xdp_get_dma(buf->xdp);
4930                 stmmac_set_desc_addr(priv, rx_desc, dma_addr);
4931                 stmmac_set_desc_sec_addr(priv, rx_desc, 0, false);
4932                 stmmac_refill_desc3(priv, rx_q, rx_desc);
4933
4934                 rx_q->rx_count_frames++;
4935                 rx_q->rx_count_frames += priv->rx_coal_frames[queue];
4936                 if (rx_q->rx_count_frames > priv->rx_coal_frames[queue])
4937                         rx_q->rx_count_frames = 0;
4938
4939                 use_rx_wd = !priv->rx_coal_frames[queue];
4940                 use_rx_wd |= rx_q->rx_count_frames > 0;
4941                 if (!priv->use_riwt)
4942                         use_rx_wd = false;
4943
4944                 dma_wmb();
4945                 stmmac_set_rx_owner(priv, rx_desc, use_rx_wd);
4946
4947                 entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_rx_size);
4948         }
4949
4950         if (rx_desc) {
4951                 rx_q->dirty_rx = entry;
4952                 rx_q->rx_tail_addr = rx_q->dma_rx_phy +
4953                                      (rx_q->dirty_rx * sizeof(struct dma_desc));
4954                 stmmac_set_rx_tail_ptr(priv, priv->ioaddr, rx_q->rx_tail_addr, queue);
4955         }
4956
4957         return ret;
4958 }
4959
4960 static int stmmac_rx_zc(struct stmmac_priv *priv, int limit, u32 queue)
4961 {
4962         struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
4963         unsigned int count = 0, error = 0, len = 0;
4964         int dirty = stmmac_rx_dirty(priv, queue);
4965         unsigned int next_entry = rx_q->cur_rx;
4966         unsigned int desc_size;
4967         struct bpf_prog *prog;
4968         bool failure = false;
4969         int xdp_status = 0;
4970         int status = 0;
4971
4972         if (netif_msg_rx_status(priv)) {
4973                 void *rx_head;
4974
4975                 netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__);
4976                 if (priv->extend_desc) {
4977                         rx_head = (void *)rx_q->dma_erx;
4978                         desc_size = sizeof(struct dma_extended_desc);
4979                 } else {
4980                         rx_head = (void *)rx_q->dma_rx;
4981                         desc_size = sizeof(struct dma_desc);
4982                 }
4983
4984                 stmmac_display_ring(priv, rx_head, priv->dma_conf.dma_rx_size, true,
4985                                     rx_q->dma_rx_phy, desc_size);
4986         }
4987         while (count < limit) {
4988                 struct stmmac_rx_buffer *buf;
4989                 unsigned int buf1_len = 0;
4990                 struct dma_desc *np, *p;
4991                 int entry;
4992                 int res;
4993
4994                 if (!count && rx_q->state_saved) {
4995                         error = rx_q->state.error;
4996                         len = rx_q->state.len;
4997                 } else {
4998                         rx_q->state_saved = false;
4999                         error = 0;
5000                         len = 0;
5001                 }
5002
5003                 if (count >= limit)
5004                         break;
5005
5006 read_again:
5007                 buf1_len = 0;
5008                 entry = next_entry;
5009                 buf = &rx_q->buf_pool[entry];
5010
5011                 if (dirty >= STMMAC_RX_FILL_BATCH) {
5012                         failure = failure ||
5013                                   !stmmac_rx_refill_zc(priv, queue, dirty);
5014                         dirty = 0;
5015                 }
5016
5017                 if (priv->extend_desc)
5018                         p = (struct dma_desc *)(rx_q->dma_erx + entry);
5019                 else
5020                         p = rx_q->dma_rx + entry;
5021
5022                 /* read the status of the incoming frame */
5023                 status = stmmac_rx_status(priv, &priv->dev->stats,
5024                                           &priv->xstats, p);
5025                 /* check if managed by the DMA otherwise go ahead */
5026                 if (unlikely(status & dma_own))
5027                         break;
5028
5029                 /* Prefetch the next RX descriptor */
5030                 rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx,
5031                                                 priv->dma_conf.dma_rx_size);
5032                 next_entry = rx_q->cur_rx;
5033
5034                 if (priv->extend_desc)
5035                         np = (struct dma_desc *)(rx_q->dma_erx + next_entry);
5036                 else
5037                         np = rx_q->dma_rx + next_entry;
5038
5039                 prefetch(np);
5040
5041                 /* Ensure a valid XSK buffer before proceed */
5042                 if (!buf->xdp)
5043                         break;
5044
5045                 if (priv->extend_desc)
5046                         stmmac_rx_extended_status(priv, &priv->dev->stats,
5047                                                   &priv->xstats,
5048                                                   rx_q->dma_erx + entry);
5049                 if (unlikely(status == discard_frame)) {
5050                         xsk_buff_free(buf->xdp);
5051                         buf->xdp = NULL;
5052                         dirty++;
5053                         error = 1;
5054                         if (!priv->hwts_rx_en)
5055                                 priv->dev->stats.rx_errors++;
5056                 }
5057
5058                 if (unlikely(error && (status & rx_not_ls)))
5059                         goto read_again;
5060                 if (unlikely(error)) {
5061                         count++;
5062                         continue;
5063                 }
5064
5065                 /* XSK pool expects RX frame 1:1 mapped to XSK buffer */
5066                 if (likely(status & rx_not_ls)) {
5067                         xsk_buff_free(buf->xdp);
5068                         buf->xdp = NULL;
5069                         dirty++;
5070                         count++;
5071                         goto read_again;
5072                 }
5073
5074                 /* XDP ZC Frame only support primary buffers for now */
5075                 buf1_len = stmmac_rx_buf1_len(priv, p, status, len);
5076                 len += buf1_len;
5077
5078                 /* ACS is set; GMAC core strips PAD/FCS for IEEE 802.3
5079                  * Type frames (LLC/LLC-SNAP)
5080                  *
5081                  * llc_snap is never checked in GMAC >= 4, so this ACS
5082                  * feature is always disabled and packets need to be
5083                  * stripped manually.
5084                  */
5085                 if (likely(!(status & rx_not_ls)) &&
5086                     (likely(priv->synopsys_id >= DWMAC_CORE_4_00) ||
5087                      unlikely(status != llc_snap))) {
5088                         buf1_len -= ETH_FCS_LEN;
5089                         len -= ETH_FCS_LEN;
5090                 }
5091
5092                 /* RX buffer is good and fit into a XSK pool buffer */
5093                 buf->xdp->data_end = buf->xdp->data + buf1_len;
5094                 xsk_buff_dma_sync_for_cpu(buf->xdp, rx_q->xsk_pool);
5095
5096                 prog = READ_ONCE(priv->xdp_prog);
5097                 res = __stmmac_xdp_run_prog(priv, prog, buf->xdp);
5098
5099                 switch (res) {
5100                 case STMMAC_XDP_PASS:
5101                         stmmac_dispatch_skb_zc(priv, queue, p, np, buf->xdp);
5102                         xsk_buff_free(buf->xdp);
5103                         break;
5104                 case STMMAC_XDP_CONSUMED:
5105                         xsk_buff_free(buf->xdp);
5106                         priv->dev->stats.rx_dropped++;
5107                         break;
5108                 case STMMAC_XDP_TX:
5109                 case STMMAC_XDP_REDIRECT:
5110                         xdp_status |= res;
5111                         break;
5112                 }
5113
5114                 buf->xdp = NULL;
5115                 dirty++;
5116                 count++;
5117         }
5118
5119         if (status & rx_not_ls) {
5120                 rx_q->state_saved = true;
5121                 rx_q->state.error = error;
5122                 rx_q->state.len = len;
5123         }
5124
5125         stmmac_finalize_xdp_rx(priv, xdp_status);
5126
5127         priv->xstats.rx_pkt_n += count;
5128         priv->xstats.rxq_stats[queue].rx_pkt_n += count;
5129
5130         if (xsk_uses_need_wakeup(rx_q->xsk_pool)) {
5131                 if (failure || stmmac_rx_dirty(priv, queue) > 0)
5132                         xsk_set_rx_need_wakeup(rx_q->xsk_pool);
5133                 else
5134                         xsk_clear_rx_need_wakeup(rx_q->xsk_pool);
5135
5136                 return (int)count;
5137         }
5138
5139         return failure ? limit : (int)count;
5140 }
5141
5142 /**
5143  * stmmac_rx - manage the receive process
5144  * @priv: driver private structure
5145  * @limit: napi bugget
5146  * @queue: RX queue index.
5147  * Description :  this the function called by the napi poll method.
5148  * It gets all the frames inside the ring.
5149  */
5150 static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
5151 {
5152         struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
5153         struct stmmac_channel *ch = &priv->channel[queue];
5154         unsigned int count = 0, error = 0, len = 0;
5155         int status = 0, coe = priv->hw->rx_csum;
5156         unsigned int next_entry = rx_q->cur_rx;
5157         enum dma_data_direction dma_dir;
5158         unsigned int desc_size;
5159         struct sk_buff *skb = NULL;
5160         struct xdp_buff xdp;
5161         int xdp_status = 0;
5162         int buf_sz;
5163
5164         dma_dir = page_pool_get_dma_dir(rx_q->page_pool);
5165         buf_sz = DIV_ROUND_UP(priv->dma_conf.dma_buf_sz, PAGE_SIZE) * PAGE_SIZE;
5166
5167         if (netif_msg_rx_status(priv)) {
5168                 void *rx_head;
5169
5170                 netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__);
5171                 if (priv->extend_desc) {
5172                         rx_head = (void *)rx_q->dma_erx;
5173                         desc_size = sizeof(struct dma_extended_desc);
5174                 } else {
5175                         rx_head = (void *)rx_q->dma_rx;
5176                         desc_size = sizeof(struct dma_desc);
5177                 }
5178
5179                 stmmac_display_ring(priv, rx_head, priv->dma_conf.dma_rx_size, true,
5180                                     rx_q->dma_rx_phy, desc_size);
5181         }
5182         while (count < limit) {
5183                 unsigned int buf1_len = 0, buf2_len = 0;
5184                 enum pkt_hash_types hash_type;
5185                 struct stmmac_rx_buffer *buf;
5186                 struct dma_desc *np, *p;
5187                 int entry;
5188                 u32 hash;
5189
5190                 if (!count && rx_q->state_saved) {
5191                         skb = rx_q->state.skb;
5192                         error = rx_q->state.error;
5193                         len = rx_q->state.len;
5194                 } else {
5195                         rx_q->state_saved = false;
5196                         skb = NULL;
5197                         error = 0;
5198                         len = 0;
5199                 }
5200
5201                 if (count >= limit)
5202                         break;
5203
5204 read_again:
5205                 buf1_len = 0;
5206                 buf2_len = 0;
5207                 entry = next_entry;
5208                 buf = &rx_q->buf_pool[entry];
5209
5210                 if (priv->extend_desc)
5211                         p = (struct dma_desc *)(rx_q->dma_erx + entry);
5212                 else
5213                         p = rx_q->dma_rx + entry;
5214
5215                 /* read the status of the incoming frame */
5216                 status = stmmac_rx_status(priv, &priv->dev->stats,
5217                                 &priv->xstats, p);
5218                 /* check if managed by the DMA otherwise go ahead */
5219                 if (unlikely(status & dma_own))
5220                         break;
5221
5222                 rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx,
5223                                                 priv->dma_conf.dma_rx_size);
5224                 next_entry = rx_q->cur_rx;
5225
5226                 if (priv->extend_desc)
5227                         np = (struct dma_desc *)(rx_q->dma_erx + next_entry);
5228                 else
5229                         np = rx_q->dma_rx + next_entry;
5230
5231                 prefetch(np);
5232
5233                 if (priv->extend_desc)
5234                         stmmac_rx_extended_status(priv, &priv->dev->stats,
5235                                         &priv->xstats, rx_q->dma_erx + entry);
5236                 if (unlikely(status == discard_frame)) {
5237                         page_pool_recycle_direct(rx_q->page_pool, buf->page);
5238                         buf->page = NULL;
5239                         error = 1;
5240                         if (!priv->hwts_rx_en)
5241                                 priv->dev->stats.rx_errors++;
5242                 }
5243
5244                 if (unlikely(error && (status & rx_not_ls)))
5245                         goto read_again;
5246                 if (unlikely(error)) {
5247                         dev_kfree_skb(skb);
5248                         skb = NULL;
5249                         count++;
5250                         continue;
5251                 }
5252
5253                 /* Buffer is good. Go on. */
5254
5255                 prefetch(page_address(buf->page) + buf->page_offset);
5256                 if (buf->sec_page)
5257                         prefetch(page_address(buf->sec_page));
5258
5259                 buf1_len = stmmac_rx_buf1_len(priv, p, status, len);
5260                 len += buf1_len;
5261                 buf2_len = stmmac_rx_buf2_len(priv, p, status, len);
5262                 len += buf2_len;
5263
5264                 /* ACS is set; GMAC core strips PAD/FCS for IEEE 802.3
5265                  * Type frames (LLC/LLC-SNAP)
5266                  *
5267                  * llc_snap is never checked in GMAC >= 4, so this ACS
5268                  * feature is always disabled and packets need to be
5269                  * stripped manually.
5270                  */
5271                 if (likely(!(status & rx_not_ls)) &&
5272                     (likely(priv->synopsys_id >= DWMAC_CORE_4_00) ||
5273                      unlikely(status != llc_snap))) {
5274                         if (buf2_len) {
5275                                 buf2_len -= ETH_FCS_LEN;
5276                                 len -= ETH_FCS_LEN;
5277                         } else if (buf1_len) {
5278                                 buf1_len -= ETH_FCS_LEN;
5279                                 len -= ETH_FCS_LEN;
5280                         }
5281                 }
5282
5283                 if (!skb) {
5284                         unsigned int pre_len, sync_len;
5285
5286                         dma_sync_single_for_cpu(priv->device, buf->addr,
5287                                                 buf1_len, dma_dir);
5288
5289                         xdp_init_buff(&xdp, buf_sz, &rx_q->xdp_rxq);
5290                         xdp_prepare_buff(&xdp, page_address(buf->page),
5291                                          buf->page_offset, buf1_len, false);
5292
5293                         pre_len = xdp.data_end - xdp.data_hard_start -
5294                                   buf->page_offset;
5295                         skb = stmmac_xdp_run_prog(priv, &xdp);
5296                         /* Due xdp_adjust_tail: DMA sync for_device
5297                          * cover max len CPU touch
5298                          */
5299                         sync_len = xdp.data_end - xdp.data_hard_start -
5300                                    buf->page_offset;
5301                         sync_len = max(sync_len, pre_len);
5302
5303                         /* For Not XDP_PASS verdict */
5304                         if (IS_ERR(skb)) {
5305                                 unsigned int xdp_res = -PTR_ERR(skb);
5306
5307                                 if (xdp_res & STMMAC_XDP_CONSUMED) {
5308                                         page_pool_put_page(rx_q->page_pool,
5309                                                            virt_to_head_page(xdp.data),
5310                                                            sync_len, true);
5311                                         buf->page = NULL;
5312                                         priv->dev->stats.rx_dropped++;
5313
5314                                         /* Clear skb as it was set as
5315                                          * status by XDP program.
5316                                          */
5317                                         skb = NULL;
5318
5319                                         if (unlikely((status & rx_not_ls)))
5320                                                 goto read_again;
5321
5322                                         count++;
5323                                         continue;
5324                                 } else if (xdp_res & (STMMAC_XDP_TX |
5325                                                       STMMAC_XDP_REDIRECT)) {
5326                                         xdp_status |= xdp_res;
5327                                         buf->page = NULL;
5328                                         skb = NULL;
5329                                         count++;
5330                                         continue;
5331                                 }
5332                         }
5333                 }
5334
5335                 if (!skb) {
5336                         /* XDP program may expand or reduce tail */
5337                         buf1_len = xdp.data_end - xdp.data;
5338
5339                         skb = napi_alloc_skb(&ch->rx_napi, buf1_len);
5340                         if (!skb) {
5341                                 priv->dev->stats.rx_dropped++;
5342                                 count++;
5343                                 goto drain_data;
5344                         }
5345
5346                         /* XDP program may adjust header */
5347                         skb_copy_to_linear_data(skb, xdp.data, buf1_len);
5348                         skb_put(skb, buf1_len);
5349
5350                         /* Data payload copied into SKB, page ready for recycle */
5351                         page_pool_recycle_direct(rx_q->page_pool, buf->page);
5352                         buf->page = NULL;
5353                 } else if (buf1_len) {
5354                         dma_sync_single_for_cpu(priv->device, buf->addr,
5355                                                 buf1_len, dma_dir);
5356                         skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
5357                                         buf->page, buf->page_offset, buf1_len,
5358                                         priv->dma_conf.dma_buf_sz);
5359
5360                         /* Data payload appended into SKB */
5361                         page_pool_release_page(rx_q->page_pool, buf->page);
5362                         buf->page = NULL;
5363                 }
5364
5365                 if (buf2_len) {
5366                         dma_sync_single_for_cpu(priv->device, buf->sec_addr,
5367                                                 buf2_len, dma_dir);
5368                         skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
5369                                         buf->sec_page, 0, buf2_len,
5370                                         priv->dma_conf.dma_buf_sz);
5371
5372                         /* Data payload appended into SKB */
5373                         page_pool_release_page(rx_q->page_pool, buf->sec_page);
5374                         buf->sec_page = NULL;
5375                 }
5376
5377 drain_data:
5378                 if (likely(status & rx_not_ls))
5379                         goto read_again;
5380                 if (!skb)
5381                         continue;
5382
5383                 /* Got entire packet into SKB. Finish it. */
5384
5385                 stmmac_get_rx_hwtstamp(priv, p, np, skb);
5386                 stmmac_rx_vlan(priv->dev, skb);
5387                 skb->protocol = eth_type_trans(skb, priv->dev);
5388
5389                 if (unlikely(!coe))
5390                         skb_checksum_none_assert(skb);
5391                 else
5392                         skb->ip_summed = CHECKSUM_UNNECESSARY;
5393
5394                 if (!stmmac_get_rx_hash(priv, p, &hash, &hash_type))
5395                         skb_set_hash(skb, hash, hash_type);
5396
5397                 skb_record_rx_queue(skb, queue);
5398                 napi_gro_receive(&ch->rx_napi, skb);
5399                 skb = NULL;
5400
5401                 priv->dev->stats.rx_packets++;
5402                 priv->dev->stats.rx_bytes += len;
5403                 count++;
5404         }
5405
5406         if (status & rx_not_ls || skb) {
5407                 rx_q->state_saved = true;
5408                 rx_q->state.skb = skb;
5409                 rx_q->state.error = error;
5410                 rx_q->state.len = len;
5411         }
5412
5413         stmmac_finalize_xdp_rx(priv, xdp_status);
5414
5415         stmmac_rx_refill(priv, queue);
5416
5417         priv->xstats.rx_pkt_n += count;
5418         priv->xstats.rxq_stats[queue].rx_pkt_n += count;
5419
5420         return count;
5421 }
5422
5423 static int stmmac_napi_poll_rx(struct napi_struct *napi, int budget)
5424 {
5425         struct stmmac_channel *ch =
5426                 container_of(napi, struct stmmac_channel, rx_napi);
5427         struct stmmac_priv *priv = ch->priv_data;
5428         u32 chan = ch->index;
5429         int work_done;
5430
5431         priv->xstats.napi_poll++;
5432
5433         work_done = stmmac_rx(priv, budget, chan);
5434         if (work_done < budget && napi_complete_done(napi, work_done)) {
5435                 unsigned long flags;
5436
5437                 spin_lock_irqsave(&ch->lock, flags);
5438                 stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 0);
5439                 spin_unlock_irqrestore(&ch->lock, flags);
5440         }
5441
5442         return work_done;
5443 }
5444
5445 static int stmmac_napi_poll_tx(struct napi_struct *napi, int budget)
5446 {
5447         struct stmmac_channel *ch =
5448                 container_of(napi, struct stmmac_channel, tx_napi);
5449         struct stmmac_priv *priv = ch->priv_data;
5450         u32 chan = ch->index;
5451         int work_done;
5452
5453         priv->xstats.napi_poll++;
5454
5455         work_done = stmmac_tx_clean(priv, budget, chan);
5456         work_done = min(work_done, budget);
5457
5458         if (work_done < budget && napi_complete_done(napi, work_done)) {
5459                 unsigned long flags;
5460
5461                 spin_lock_irqsave(&ch->lock, flags);
5462                 stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 0, 1);
5463                 spin_unlock_irqrestore(&ch->lock, flags);
5464         }
5465
5466         return work_done;
5467 }
5468
5469 static int stmmac_napi_poll_rxtx(struct napi_struct *napi, int budget)
5470 {
5471         struct stmmac_channel *ch =
5472                 container_of(napi, struct stmmac_channel, rxtx_napi);
5473         struct stmmac_priv *priv = ch->priv_data;
5474         int rx_done, tx_done, rxtx_done;
5475         u32 chan = ch->index;
5476
5477         priv->xstats.napi_poll++;
5478
5479         tx_done = stmmac_tx_clean(priv, budget, chan);
5480         tx_done = min(tx_done, budget);
5481
5482         rx_done = stmmac_rx_zc(priv, budget, chan);
5483
5484         rxtx_done = max(tx_done, rx_done);
5485
5486         /* If either TX or RX work is not complete, return budget
5487          * and keep pooling
5488          */
5489         if (rxtx_done >= budget)
5490                 return budget;
5491
5492         /* all work done, exit the polling mode */
5493         if (napi_complete_done(napi, rxtx_done)) {
5494                 unsigned long flags;
5495
5496                 spin_lock_irqsave(&ch->lock, flags);
5497                 /* Both RX and TX work done are compelte,
5498                  * so enable both RX & TX IRQs.
5499                  */
5500                 stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
5501                 spin_unlock_irqrestore(&ch->lock, flags);
5502         }
5503
5504         return min(rxtx_done, budget - 1);
5505 }
5506
5507 /**
5508  *  stmmac_tx_timeout
5509  *  @dev : Pointer to net device structure
5510  *  @txqueue: the index of the hanging transmit queue
5511  *  Description: this function is called when a packet transmission fails to
5512  *   complete within a reasonable time. The driver will mark the error in the
5513  *   netdev structure and arrange for the device to be reset to a sane state
5514  *   in order to transmit a new packet.
5515  */
5516 static void stmmac_tx_timeout(struct net_device *dev, unsigned int txqueue)
5517 {
5518         struct stmmac_priv *priv = netdev_priv(dev);
5519
5520         stmmac_global_err(priv);
5521 }
5522
5523 /**
5524  *  stmmac_set_rx_mode - entry point for multicast addressing
5525  *  @dev : pointer to the device structure
5526  *  Description:
5527  *  This function is a driver entry point which gets called by the kernel
5528  *  whenever multicast addresses must be enabled/disabled.
5529  *  Return value:
5530  *  void.
5531  */
5532 static void stmmac_set_rx_mode(struct net_device *dev)
5533 {
5534         struct stmmac_priv *priv = netdev_priv(dev);
5535
5536         stmmac_set_filter(priv, priv->hw, dev);
5537 }
5538
5539 /**
5540  *  stmmac_change_mtu - entry point to change MTU size for the device.
5541  *  @dev : device pointer.
5542  *  @new_mtu : the new MTU size for the device.
5543  *  Description: the Maximum Transfer Unit (MTU) is used by the network layer
5544  *  to drive packet transmission. Ethernet has an MTU of 1500 octets
5545  *  (ETH_DATA_LEN). This value can be changed with ifconfig.
5546  *  Return value:
5547  *  0 on success and an appropriate (-)ve integer as defined in errno.h
5548  *  file on failure.
5549  */
5550 static int stmmac_change_mtu(struct net_device *dev, int new_mtu)
5551 {
5552         struct stmmac_priv *priv = netdev_priv(dev);
5553         int txfifosz = priv->plat->tx_fifo_size;
5554         struct stmmac_dma_conf *dma_conf;
5555         const int mtu = new_mtu;
5556         int ret;
5557
5558         if (txfifosz == 0)
5559                 txfifosz = priv->dma_cap.tx_fifo_size;
5560
5561         txfifosz /= priv->plat->tx_queues_to_use;
5562
5563         if (stmmac_xdp_is_enabled(priv) && new_mtu > ETH_DATA_LEN) {
5564                 netdev_dbg(priv->dev, "Jumbo frames not supported for XDP\n");
5565                 return -EINVAL;
5566         }
5567
5568         new_mtu = STMMAC_ALIGN(new_mtu);
5569
5570         /* If condition true, FIFO is too small or MTU too large */
5571         if ((txfifosz < new_mtu) || (new_mtu > BUF_SIZE_16KiB))
5572                 return -EINVAL;
5573
5574         if (netif_running(dev)) {
5575                 netdev_dbg(priv->dev, "restarting interface to change its MTU\n");
5576                 /* Try to allocate the new DMA conf with the new mtu */
5577                 dma_conf = stmmac_setup_dma_desc(priv, mtu);
5578                 if (IS_ERR(dma_conf)) {
5579                         netdev_err(priv->dev, "failed allocating new dma conf for new MTU %d\n",
5580                                    mtu);
5581                         return PTR_ERR(dma_conf);
5582                 }
5583
5584                 stmmac_release(dev);
5585
5586                 ret = __stmmac_open(dev, dma_conf);
5587                 kfree(dma_conf);
5588                 if (ret) {
5589                         netdev_err(priv->dev, "failed reopening the interface after MTU change\n");
5590                         return ret;
5591                 }
5592
5593                 stmmac_set_rx_mode(dev);
5594         }
5595
5596         dev->mtu = mtu;
5597         netdev_update_features(dev);
5598
5599         return 0;
5600 }
5601
5602 static netdev_features_t stmmac_fix_features(struct net_device *dev,
5603                                              netdev_features_t features)
5604 {
5605         struct stmmac_priv *priv = netdev_priv(dev);
5606
5607         if (priv->plat->rx_coe == STMMAC_RX_COE_NONE)
5608                 features &= ~NETIF_F_RXCSUM;
5609
5610         if (!priv->plat->tx_coe)
5611                 features &= ~NETIF_F_CSUM_MASK;
5612
5613         /* Some GMAC devices have a bugged Jumbo frame support that
5614          * needs to have the Tx COE disabled for oversized frames
5615          * (due to limited buffer sizes). In this case we disable
5616          * the TX csum insertion in the TDES and not use SF.
5617          */
5618         if (priv->plat->bugged_jumbo && (dev->mtu > ETH_DATA_LEN))
5619                 features &= ~NETIF_F_CSUM_MASK;
5620
5621         /* Disable tso if asked by ethtool */
5622         if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) {
5623                 if (features & NETIF_F_TSO)
5624                         priv->tso = true;
5625                 else
5626                         priv->tso = false;
5627         }
5628
5629         return features;
5630 }
5631
5632 static int stmmac_set_features(struct net_device *netdev,
5633                                netdev_features_t features)
5634 {
5635         struct stmmac_priv *priv = netdev_priv(netdev);
5636
5637         /* Keep the COE Type in case of csum is supporting */
5638         if (features & NETIF_F_RXCSUM)
5639                 priv->hw->rx_csum = priv->plat->rx_coe;
5640         else
5641                 priv->hw->rx_csum = 0;
5642         /* No check needed because rx_coe has been set before and it will be
5643          * fixed in case of issue.
5644          */
5645         stmmac_rx_ipc(priv, priv->hw);
5646
5647         if (priv->sph_cap) {
5648                 bool sph_en = (priv->hw->rx_csum > 0) && priv->sph;
5649                 u32 chan;
5650
5651                 for (chan = 0; chan < priv->plat->rx_queues_to_use; chan++)
5652                         stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
5653         }
5654
5655         return 0;
5656 }
5657
5658 static void stmmac_fpe_event_status(struct stmmac_priv *priv, int status)
5659 {
5660         struct stmmac_fpe_cfg *fpe_cfg = priv->plat->fpe_cfg;
5661         enum stmmac_fpe_state *lo_state = &fpe_cfg->lo_fpe_state;
5662         enum stmmac_fpe_state *lp_state = &fpe_cfg->lp_fpe_state;
5663         bool *hs_enable = &fpe_cfg->hs_enable;
5664
5665         if (status == FPE_EVENT_UNKNOWN || !*hs_enable)
5666                 return;
5667
5668         /* If LP has sent verify mPacket, LP is FPE capable */
5669         if ((status & FPE_EVENT_RVER) == FPE_EVENT_RVER) {
5670                 if (*lp_state < FPE_STATE_CAPABLE)
5671                         *lp_state = FPE_STATE_CAPABLE;
5672
5673                 /* If user has requested FPE enable, quickly response */
5674                 if (*hs_enable)
5675                         stmmac_fpe_send_mpacket(priv, priv->ioaddr,
5676                                                 MPACKET_RESPONSE);
5677         }
5678
5679         /* If Local has sent verify mPacket, Local is FPE capable */
5680         if ((status & FPE_EVENT_TVER) == FPE_EVENT_TVER) {
5681                 if (*lo_state < FPE_STATE_CAPABLE)
5682                         *lo_state = FPE_STATE_CAPABLE;
5683         }
5684
5685         /* If LP has sent response mPacket, LP is entering FPE ON */
5686         if ((status & FPE_EVENT_RRSP) == FPE_EVENT_RRSP)
5687                 *lp_state = FPE_STATE_ENTERING_ON;
5688
5689         /* If Local has sent response mPacket, Local is entering FPE ON */
5690         if ((status & FPE_EVENT_TRSP) == FPE_EVENT_TRSP)
5691                 *lo_state = FPE_STATE_ENTERING_ON;
5692
5693         if (!test_bit(__FPE_REMOVING, &priv->fpe_task_state) &&
5694             !test_and_set_bit(__FPE_TASK_SCHED, &priv->fpe_task_state) &&
5695             priv->fpe_wq) {
5696                 queue_work(priv->fpe_wq, &priv->fpe_task);
5697         }
5698 }
5699
5700 static void stmmac_common_interrupt(struct stmmac_priv *priv)
5701 {
5702         u32 rx_cnt = priv->plat->rx_queues_to_use;
5703         u32 tx_cnt = priv->plat->tx_queues_to_use;
5704         u32 queues_count;
5705         u32 queue;
5706         bool xmac;
5707
5708         xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
5709         queues_count = (rx_cnt > tx_cnt) ? rx_cnt : tx_cnt;
5710
5711         if (priv->irq_wake)
5712                 pm_wakeup_event(priv->device, 0);
5713
5714         if (priv->dma_cap.estsel)
5715                 stmmac_est_irq_status(priv, priv->ioaddr, priv->dev,
5716                                       &priv->xstats, tx_cnt);
5717
5718         if (priv->dma_cap.fpesel) {
5719                 int status = stmmac_fpe_irq_status(priv, priv->ioaddr,
5720                                                    priv->dev);
5721
5722                 stmmac_fpe_event_status(priv, status);
5723         }
5724
5725         /* To handle GMAC own interrupts */
5726         if ((priv->plat->has_gmac) || xmac) {
5727                 int status = stmmac_host_irq_status(priv, priv->hw, &priv->xstats);
5728
5729                 if (unlikely(status)) {
5730                         /* For LPI we need to save the tx status */
5731                         if (status & CORE_IRQ_TX_PATH_IN_LPI_MODE)
5732                                 priv->tx_path_in_lpi_mode = true;
5733                         if (status & CORE_IRQ_TX_PATH_EXIT_LPI_MODE)
5734                                 priv->tx_path_in_lpi_mode = false;
5735                 }
5736
5737                 for (queue = 0; queue < queues_count; queue++) {
5738                         status = stmmac_host_mtl_irq_status(priv, priv->hw,
5739                                                             queue);
5740                 }
5741
5742                 /* PCS link status */
5743                 if (priv->hw->pcs) {
5744                         if (priv->xstats.pcs_link)
5745                                 netif_carrier_on(priv->dev);
5746                         else
5747                                 netif_carrier_off(priv->dev);
5748                 }
5749
5750                 stmmac_timestamp_interrupt(priv, priv);
5751         }
5752 }
5753
5754 /**
5755  *  stmmac_interrupt - main ISR
5756  *  @irq: interrupt number.
5757  *  @dev_id: to pass the net device pointer.
5758  *  Description: this is the main driver interrupt service routine.
5759  *  It can call:
5760  *  o DMA service routine (to manage incoming frame reception and transmission
5761  *    status)
5762  *  o Core interrupts to manage: remote wake-up, management counter, LPI
5763  *    interrupts.
5764  */
5765 static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
5766 {
5767         struct net_device *dev = (struct net_device *)dev_id;
5768         struct stmmac_priv *priv = netdev_priv(dev);
5769
5770         /* Check if adapter is up */
5771         if (test_bit(STMMAC_DOWN, &priv->state))
5772                 return IRQ_HANDLED;
5773
5774         /* Check if a fatal error happened */
5775         if (stmmac_safety_feat_interrupt(priv))
5776                 return IRQ_HANDLED;
5777
5778         /* To handle Common interrupts */
5779         stmmac_common_interrupt(priv);
5780
5781         /* To handle DMA interrupts */
5782         stmmac_dma_interrupt(priv);
5783
5784         return IRQ_HANDLED;
5785 }
5786
5787 static irqreturn_t stmmac_mac_interrupt(int irq, void *dev_id)
5788 {
5789         struct net_device *dev = (struct net_device *)dev_id;
5790         struct stmmac_priv *priv = netdev_priv(dev);
5791
5792         if (unlikely(!dev)) {
5793                 netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
5794                 return IRQ_NONE;
5795         }
5796
5797         /* Check if adapter is up */
5798         if (test_bit(STMMAC_DOWN, &priv->state))
5799                 return IRQ_HANDLED;
5800
5801         /* To handle Common interrupts */
5802         stmmac_common_interrupt(priv);
5803
5804         return IRQ_HANDLED;
5805 }
5806
5807 static irqreturn_t stmmac_safety_interrupt(int irq, void *dev_id)
5808 {
5809         struct net_device *dev = (struct net_device *)dev_id;
5810         struct stmmac_priv *priv = netdev_priv(dev);
5811
5812         if (unlikely(!dev)) {
5813                 netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
5814                 return IRQ_NONE;
5815         }
5816
5817         /* Check if adapter is up */
5818         if (test_bit(STMMAC_DOWN, &priv->state))
5819                 return IRQ_HANDLED;
5820
5821         /* Check if a fatal error happened */
5822         stmmac_safety_feat_interrupt(priv);
5823
5824         return IRQ_HANDLED;
5825 }
5826
5827 static irqreturn_t stmmac_msi_intr_tx(int irq, void *data)
5828 {
5829         struct stmmac_tx_queue *tx_q = (struct stmmac_tx_queue *)data;
5830         struct stmmac_dma_conf *dma_conf;
5831         int chan = tx_q->queue_index;
5832         struct stmmac_priv *priv;
5833         int status;
5834
5835         dma_conf = container_of(tx_q, struct stmmac_dma_conf, tx_queue[chan]);
5836         priv = container_of(dma_conf, struct stmmac_priv, dma_conf);
5837
5838         if (unlikely(!data)) {
5839                 netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
5840                 return IRQ_NONE;
5841         }
5842
5843         /* Check if adapter is up */
5844         if (test_bit(STMMAC_DOWN, &priv->state))
5845                 return IRQ_HANDLED;
5846
5847         status = stmmac_napi_check(priv, chan, DMA_DIR_TX);
5848
5849         if (unlikely(status & tx_hard_error_bump_tc)) {
5850                 /* Try to bump up the dma threshold on this failure */
5851                 stmmac_bump_dma_threshold(priv, chan);
5852         } else if (unlikely(status == tx_hard_error)) {
5853                 stmmac_tx_err(priv, chan);
5854         }
5855
5856         return IRQ_HANDLED;
5857 }
5858
5859 static irqreturn_t stmmac_msi_intr_rx(int irq, void *data)
5860 {
5861         struct stmmac_rx_queue *rx_q = (struct stmmac_rx_queue *)data;
5862         struct stmmac_dma_conf *dma_conf;
5863         int chan = rx_q->queue_index;
5864         struct stmmac_priv *priv;
5865
5866         dma_conf = container_of(rx_q, struct stmmac_dma_conf, rx_queue[chan]);
5867         priv = container_of(dma_conf, struct stmmac_priv, dma_conf);
5868
5869         if (unlikely(!data)) {
5870                 netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
5871                 return IRQ_NONE;
5872         }
5873
5874         /* Check if adapter is up */
5875         if (test_bit(STMMAC_DOWN, &priv->state))
5876                 return IRQ_HANDLED;
5877
5878         stmmac_napi_check(priv, chan, DMA_DIR_RX);
5879
5880         return IRQ_HANDLED;
5881 }
5882
5883 #ifdef CONFIG_NET_POLL_CONTROLLER
5884 /* Polling receive - used by NETCONSOLE and other diagnostic tools
5885  * to allow network I/O with interrupts disabled.
5886  */
5887 static void stmmac_poll_controller(struct net_device *dev)
5888 {
5889         struct stmmac_priv *priv = netdev_priv(dev);
5890         int i;
5891
5892         /* If adapter is down, do nothing */
5893         if (test_bit(STMMAC_DOWN, &priv->state))
5894                 return;
5895
5896         if (priv->plat->multi_msi_en) {
5897                 for (i = 0; i < priv->plat->rx_queues_to_use; i++)
5898                         stmmac_msi_intr_rx(0, &priv->dma_conf.rx_queue[i]);
5899
5900                 for (i = 0; i < priv->plat->tx_queues_to_use; i++)
5901                         stmmac_msi_intr_tx(0, &priv->dma_conf.tx_queue[i]);
5902         } else {
5903                 disable_irq(dev->irq);
5904                 stmmac_interrupt(dev->irq, dev);
5905                 enable_irq(dev->irq);
5906         }
5907 }
5908 #endif
5909
5910 /**
5911  *  stmmac_ioctl - Entry point for the Ioctl
5912  *  @dev: Device pointer.
5913  *  @rq: An IOCTL specefic structure, that can contain a pointer to
5914  *  a proprietary structure used to pass information to the driver.
5915  *  @cmd: IOCTL command
5916  *  Description:
5917  *  Currently it supports the phy_mii_ioctl(...) and HW time stamping.
5918  */
5919 static int stmmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
5920 {
5921         struct stmmac_priv *priv = netdev_priv (dev);
5922         int ret = -EOPNOTSUPP;
5923
5924         if (!netif_running(dev))
5925                 return -EINVAL;
5926
5927         switch (cmd) {
5928         case SIOCGMIIPHY:
5929         case SIOCGMIIREG:
5930         case SIOCSMIIREG:
5931                 ret = phylink_mii_ioctl(priv->phylink, rq, cmd);
5932                 break;
5933         case SIOCSHWTSTAMP:
5934                 ret = stmmac_hwtstamp_set(dev, rq);
5935                 break;
5936         case SIOCGHWTSTAMP:
5937                 ret = stmmac_hwtstamp_get(dev, rq);
5938                 break;
5939         default:
5940                 break;
5941         }
5942
5943         return ret;
5944 }
5945
5946 static int stmmac_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
5947                                     void *cb_priv)
5948 {
5949         struct stmmac_priv *priv = cb_priv;
5950         int ret = -EOPNOTSUPP;
5951
5952         if (!tc_cls_can_offload_and_chain0(priv->dev, type_data))
5953                 return ret;
5954
5955         __stmmac_disable_all_queues(priv);
5956
5957         switch (type) {
5958         case TC_SETUP_CLSU32:
5959                 ret = stmmac_tc_setup_cls_u32(priv, priv, type_data);
5960                 break;
5961         case TC_SETUP_CLSFLOWER:
5962                 ret = stmmac_tc_setup_cls(priv, priv, type_data);
5963                 break;
5964         default:
5965                 break;
5966         }
5967
5968         stmmac_enable_all_queues(priv);
5969         return ret;
5970 }
5971
5972 static LIST_HEAD(stmmac_block_cb_list);
5973
5974 static int stmmac_setup_tc(struct net_device *ndev, enum tc_setup_type type,
5975                            void *type_data)
5976 {
5977         struct stmmac_priv *priv = netdev_priv(ndev);
5978
5979         switch (type) {
5980         case TC_SETUP_BLOCK:
5981                 return flow_block_cb_setup_simple(type_data,
5982                                                   &stmmac_block_cb_list,
5983                                                   stmmac_setup_tc_block_cb,
5984                                                   priv, priv, true);
5985         case TC_SETUP_QDISC_CBS:
5986                 return stmmac_tc_setup_cbs(priv, priv, type_data);
5987         case TC_SETUP_QDISC_TAPRIO:
5988                 return stmmac_tc_setup_taprio(priv, priv, type_data);
5989         case TC_SETUP_QDISC_ETF:
5990                 return stmmac_tc_setup_etf(priv, priv, type_data);
5991         default:
5992                 return -EOPNOTSUPP;
5993         }
5994 }
5995
5996 static u16 stmmac_select_queue(struct net_device *dev, struct sk_buff *skb,
5997                                struct net_device *sb_dev)
5998 {
5999         int gso = skb_shinfo(skb)->gso_type;
6000
6001         if (gso & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6 | SKB_GSO_UDP_L4)) {
6002                 /*
6003                  * There is no way to determine the number of TSO/USO
6004                  * capable Queues. Let's use always the Queue 0
6005                  * because if TSO/USO is supported then at least this
6006                  * one will be capable.
6007                  */
6008                 return 0;
6009         }
6010
6011         return netdev_pick_tx(dev, skb, NULL) % dev->real_num_tx_queues;
6012 }
6013
6014 static int stmmac_set_mac_address(struct net_device *ndev, void *addr)
6015 {
6016         struct stmmac_priv *priv = netdev_priv(ndev);
6017         int ret = 0;
6018
6019         ret = pm_runtime_resume_and_get(priv->device);
6020         if (ret < 0)
6021                 return ret;
6022
6023         ret = eth_mac_addr(ndev, addr);
6024         if (ret)
6025                 goto set_mac_error;
6026
6027         stmmac_set_umac_addr(priv, priv->hw, ndev->dev_addr, 0);
6028
6029 set_mac_error:
6030         pm_runtime_put(priv->device);
6031
6032         return ret;
6033 }
6034
6035 #ifdef CONFIG_DEBUG_FS
6036 static struct dentry *stmmac_fs_dir;
6037
6038 static void sysfs_display_ring(void *head, int size, int extend_desc,
6039                                struct seq_file *seq, dma_addr_t dma_phy_addr)
6040 {
6041         int i;
6042         struct dma_extended_desc *ep = (struct dma_extended_desc *)head;
6043         struct dma_desc *p = (struct dma_desc *)head;
6044         dma_addr_t dma_addr;
6045
6046         for (i = 0; i < size; i++) {
6047                 if (extend_desc) {
6048                         dma_addr = dma_phy_addr + i * sizeof(*ep);
6049                         seq_printf(seq, "%d [%pad]: 0x%x 0x%x 0x%x 0x%x\n",
6050                                    i, &dma_addr,
6051                                    le32_to_cpu(ep->basic.des0),
6052                                    le32_to_cpu(ep->basic.des1),
6053                                    le32_to_cpu(ep->basic.des2),
6054                                    le32_to_cpu(ep->basic.des3));
6055                         ep++;
6056                 } else {
6057                         dma_addr = dma_phy_addr + i * sizeof(*p);
6058                         seq_printf(seq, "%d [%pad]: 0x%x 0x%x 0x%x 0x%x\n",
6059                                    i, &dma_addr,
6060                                    le32_to_cpu(p->des0), le32_to_cpu(p->des1),
6061                                    le32_to_cpu(p->des2), le32_to_cpu(p->des3));
6062                         p++;
6063                 }
6064                 seq_printf(seq, "\n");
6065         }
6066 }
6067
6068 static int stmmac_rings_status_show(struct seq_file *seq, void *v)
6069 {
6070         struct net_device *dev = seq->private;
6071         struct stmmac_priv *priv = netdev_priv(dev);
6072         u32 rx_count = priv->plat->rx_queues_to_use;
6073         u32 tx_count = priv->plat->tx_queues_to_use;
6074         u32 queue;
6075
6076         if ((dev->flags & IFF_UP) == 0)
6077                 return 0;
6078
6079         for (queue = 0; queue < rx_count; queue++) {
6080                 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
6081
6082                 seq_printf(seq, "RX Queue %d:\n", queue);
6083
6084                 if (priv->extend_desc) {
6085                         seq_printf(seq, "Extended descriptor ring:\n");
6086                         sysfs_display_ring((void *)rx_q->dma_erx,
6087                                            priv->dma_conf.dma_rx_size, 1, seq, rx_q->dma_rx_phy);
6088                 } else {
6089                         seq_printf(seq, "Descriptor ring:\n");
6090                         sysfs_display_ring((void *)rx_q->dma_rx,
6091                                            priv->dma_conf.dma_rx_size, 0, seq, rx_q->dma_rx_phy);
6092                 }
6093         }
6094
6095         for (queue = 0; queue < tx_count; queue++) {
6096                 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
6097
6098                 seq_printf(seq, "TX Queue %d:\n", queue);
6099
6100                 if (priv->extend_desc) {
6101                         seq_printf(seq, "Extended descriptor ring:\n");
6102                         sysfs_display_ring((void *)tx_q->dma_etx,
6103                                            priv->dma_conf.dma_tx_size, 1, seq, tx_q->dma_tx_phy);
6104                 } else if (!(tx_q->tbs & STMMAC_TBS_AVAIL)) {
6105                         seq_printf(seq, "Descriptor ring:\n");
6106                         sysfs_display_ring((void *)tx_q->dma_tx,
6107                                            priv->dma_conf.dma_tx_size, 0, seq, tx_q->dma_tx_phy);
6108                 }
6109         }
6110
6111         return 0;
6112 }
6113 DEFINE_SHOW_ATTRIBUTE(stmmac_rings_status);
6114
6115 static int stmmac_dma_cap_show(struct seq_file *seq, void *v)
6116 {
6117         struct net_device *dev = seq->private;
6118         struct stmmac_priv *priv = netdev_priv(dev);
6119
6120         if (!priv->hw_cap_support) {
6121                 seq_printf(seq, "DMA HW features not supported\n");
6122                 return 0;
6123         }
6124
6125         seq_printf(seq, "==============================\n");
6126         seq_printf(seq, "\tDMA HW features\n");
6127         seq_printf(seq, "==============================\n");
6128
6129         seq_printf(seq, "\t10/100 Mbps: %s\n",
6130                    (priv->dma_cap.mbps_10_100) ? "Y" : "N");
6131         seq_printf(seq, "\t1000 Mbps: %s\n",
6132                    (priv->dma_cap.mbps_1000) ? "Y" : "N");
6133         seq_printf(seq, "\tHalf duplex: %s\n",
6134                    (priv->dma_cap.half_duplex) ? "Y" : "N");
6135         seq_printf(seq, "\tHash Filter: %s\n",
6136                    (priv->dma_cap.hash_filter) ? "Y" : "N");
6137         seq_printf(seq, "\tMultiple MAC address registers: %s\n",
6138                    (priv->dma_cap.multi_addr) ? "Y" : "N");
6139         seq_printf(seq, "\tPCS (TBI/SGMII/RTBI PHY interfaces): %s\n",
6140                    (priv->dma_cap.pcs) ? "Y" : "N");
6141         seq_printf(seq, "\tSMA (MDIO) Interface: %s\n",
6142                    (priv->dma_cap.sma_mdio) ? "Y" : "N");
6143         seq_printf(seq, "\tPMT Remote wake up: %s\n",
6144                    (priv->dma_cap.pmt_remote_wake_up) ? "Y" : "N");
6145         seq_printf(seq, "\tPMT Magic Frame: %s\n",
6146                    (priv->dma_cap.pmt_magic_frame) ? "Y" : "N");
6147         seq_printf(seq, "\tRMON module: %s\n",
6148                    (priv->dma_cap.rmon) ? "Y" : "N");
6149         seq_printf(seq, "\tIEEE 1588-2002 Time Stamp: %s\n",
6150                    (priv->dma_cap.time_stamp) ? "Y" : "N");
6151         seq_printf(seq, "\tIEEE 1588-2008 Advanced Time Stamp: %s\n",
6152                    (priv->dma_cap.atime_stamp) ? "Y" : "N");
6153         seq_printf(seq, "\t802.3az - Energy-Efficient Ethernet (EEE): %s\n",
6154                    (priv->dma_cap.eee) ? "Y" : "N");
6155         seq_printf(seq, "\tAV features: %s\n", (priv->dma_cap.av) ? "Y" : "N");
6156         seq_printf(seq, "\tChecksum Offload in TX: %s\n",
6157                    (priv->dma_cap.tx_coe) ? "Y" : "N");
6158         if (priv->synopsys_id >= DWMAC_CORE_4_00) {
6159                 seq_printf(seq, "\tIP Checksum Offload in RX: %s\n",
6160                            (priv->dma_cap.rx_coe) ? "Y" : "N");
6161         } else {
6162                 seq_printf(seq, "\tIP Checksum Offload (type1) in RX: %s\n",
6163                            (priv->dma_cap.rx_coe_type1) ? "Y" : "N");
6164                 seq_printf(seq, "\tIP Checksum Offload (type2) in RX: %s\n",
6165                            (priv->dma_cap.rx_coe_type2) ? "Y" : "N");
6166         }
6167         seq_printf(seq, "\tRXFIFO > 2048bytes: %s\n",
6168                    (priv->dma_cap.rxfifo_over_2048) ? "Y" : "N");
6169         seq_printf(seq, "\tNumber of Additional RX channel: %d\n",
6170                    priv->dma_cap.number_rx_channel);
6171         seq_printf(seq, "\tNumber of Additional TX channel: %d\n",
6172                    priv->dma_cap.number_tx_channel);
6173         seq_printf(seq, "\tNumber of Additional RX queues: %d\n",
6174                    priv->dma_cap.number_rx_queues);
6175         seq_printf(seq, "\tNumber of Additional TX queues: %d\n",
6176                    priv->dma_cap.number_tx_queues);
6177         seq_printf(seq, "\tEnhanced descriptors: %s\n",
6178                    (priv->dma_cap.enh_desc) ? "Y" : "N");
6179         seq_printf(seq, "\tTX Fifo Size: %d\n", priv->dma_cap.tx_fifo_size);
6180         seq_printf(seq, "\tRX Fifo Size: %d\n", priv->dma_cap.rx_fifo_size);
6181         seq_printf(seq, "\tHash Table Size: %d\n", priv->dma_cap.hash_tb_sz);
6182         seq_printf(seq, "\tTSO: %s\n", priv->dma_cap.tsoen ? "Y" : "N");
6183         seq_printf(seq, "\tNumber of PPS Outputs: %d\n",
6184                    priv->dma_cap.pps_out_num);
6185         seq_printf(seq, "\tSafety Features: %s\n",
6186                    priv->dma_cap.asp ? "Y" : "N");
6187         seq_printf(seq, "\tFlexible RX Parser: %s\n",
6188                    priv->dma_cap.frpsel ? "Y" : "N");
6189         seq_printf(seq, "\tEnhanced Addressing: %d\n",
6190                    priv->dma_cap.addr64);
6191         seq_printf(seq, "\tReceive Side Scaling: %s\n",
6192                    priv->dma_cap.rssen ? "Y" : "N");
6193         seq_printf(seq, "\tVLAN Hash Filtering: %s\n",
6194                    priv->dma_cap.vlhash ? "Y" : "N");
6195         seq_printf(seq, "\tSplit Header: %s\n",
6196                    priv->dma_cap.sphen ? "Y" : "N");
6197         seq_printf(seq, "\tVLAN TX Insertion: %s\n",
6198                    priv->dma_cap.vlins ? "Y" : "N");
6199         seq_printf(seq, "\tDouble VLAN: %s\n",
6200                    priv->dma_cap.dvlan ? "Y" : "N");
6201         seq_printf(seq, "\tNumber of L3/L4 Filters: %d\n",
6202                    priv->dma_cap.l3l4fnum);
6203         seq_printf(seq, "\tARP Offloading: %s\n",
6204                    priv->dma_cap.arpoffsel ? "Y" : "N");
6205         seq_printf(seq, "\tEnhancements to Scheduled Traffic (EST): %s\n",
6206                    priv->dma_cap.estsel ? "Y" : "N");
6207         seq_printf(seq, "\tFrame Preemption (FPE): %s\n",
6208                    priv->dma_cap.fpesel ? "Y" : "N");
6209         seq_printf(seq, "\tTime-Based Scheduling (TBS): %s\n",
6210                    priv->dma_cap.tbssel ? "Y" : "N");
6211         return 0;
6212 }
6213 DEFINE_SHOW_ATTRIBUTE(stmmac_dma_cap);
6214
6215 /* Use network device events to rename debugfs file entries.
6216  */
6217 static int stmmac_device_event(struct notifier_block *unused,
6218                                unsigned long event, void *ptr)
6219 {
6220         struct net_device *dev = netdev_notifier_info_to_dev(ptr);
6221         struct stmmac_priv *priv = netdev_priv(dev);
6222
6223         if (dev->netdev_ops != &stmmac_netdev_ops)
6224                 goto done;
6225
6226         switch (event) {
6227         case NETDEV_CHANGENAME:
6228                 if (priv->dbgfs_dir)
6229                         priv->dbgfs_dir = debugfs_rename(stmmac_fs_dir,
6230                                                          priv->dbgfs_dir,
6231                                                          stmmac_fs_dir,
6232                                                          dev->name);
6233                 break;
6234         }
6235 done:
6236         return NOTIFY_DONE;
6237 }
6238
6239 static struct notifier_block stmmac_notifier = {
6240         .notifier_call = stmmac_device_event,
6241 };
6242
6243 static void stmmac_init_fs(struct net_device *dev)
6244 {
6245         struct stmmac_priv *priv = netdev_priv(dev);
6246
6247         rtnl_lock();
6248
6249         /* Create per netdev entries */
6250         priv->dbgfs_dir = debugfs_create_dir(dev->name, stmmac_fs_dir);
6251
6252         /* Entry to report DMA RX/TX rings */
6253         debugfs_create_file("descriptors_status", 0444, priv->dbgfs_dir, dev,
6254                             &stmmac_rings_status_fops);
6255
6256         /* Entry to report the DMA HW features */
6257         debugfs_create_file("dma_cap", 0444, priv->dbgfs_dir, dev,
6258                             &stmmac_dma_cap_fops);
6259
6260         rtnl_unlock();
6261 }
6262
6263 static void stmmac_exit_fs(struct net_device *dev)
6264 {
6265         struct stmmac_priv *priv = netdev_priv(dev);
6266
6267         debugfs_remove_recursive(priv->dbgfs_dir);
6268 }
6269 #endif /* CONFIG_DEBUG_FS */
6270
6271 static u32 stmmac_vid_crc32_le(__le16 vid_le)
6272 {
6273         unsigned char *data = (unsigned char *)&vid_le;
6274         unsigned char data_byte = 0;
6275         u32 crc = ~0x0;
6276         u32 temp = 0;
6277         int i, bits;
6278
6279         bits = get_bitmask_order(VLAN_VID_MASK);
6280         for (i = 0; i < bits; i++) {
6281                 if ((i % 8) == 0)
6282                         data_byte = data[i / 8];
6283
6284                 temp = ((crc & 1) ^ data_byte) & 1;
6285                 crc >>= 1;
6286                 data_byte >>= 1;
6287
6288                 if (temp)
6289                         crc ^= 0xedb88320;
6290         }
6291
6292         return crc;
6293 }
6294
6295 static int stmmac_vlan_update(struct stmmac_priv *priv, bool is_double)
6296 {
6297         u32 crc, hash = 0;
6298         __le16 pmatch = 0;
6299         int count = 0;
6300         u16 vid = 0;
6301
6302         for_each_set_bit(vid, priv->active_vlans, VLAN_N_VID) {
6303                 __le16 vid_le = cpu_to_le16(vid);
6304                 crc = bitrev32(~stmmac_vid_crc32_le(vid_le)) >> 28;
6305                 hash |= (1 << crc);
6306                 count++;
6307         }
6308
6309         if (!priv->dma_cap.vlhash) {
6310                 if (count > 2) /* VID = 0 always passes filter */
6311                         return -EOPNOTSUPP;
6312
6313                 pmatch = cpu_to_le16(vid);
6314                 hash = 0;
6315         }
6316
6317         return stmmac_update_vlan_hash(priv, priv->hw, hash, pmatch, is_double);
6318 }
6319
6320 static int stmmac_vlan_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid)
6321 {
6322         struct stmmac_priv *priv = netdev_priv(ndev);
6323         bool is_double = false;
6324         int ret;
6325
6326         if (be16_to_cpu(proto) == ETH_P_8021AD)
6327                 is_double = true;
6328
6329         set_bit(vid, priv->active_vlans);
6330         ret = stmmac_vlan_update(priv, is_double);
6331         if (ret) {
6332                 clear_bit(vid, priv->active_vlans);
6333                 return ret;
6334         }
6335
6336         if (priv->hw->num_vlan) {
6337                 ret = stmmac_add_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid);
6338                 if (ret)
6339                         return ret;
6340         }
6341
6342         return 0;
6343 }
6344
6345 static int stmmac_vlan_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vid)
6346 {
6347         struct stmmac_priv *priv = netdev_priv(ndev);
6348         bool is_double = false;
6349         int ret;
6350
6351         ret = pm_runtime_resume_and_get(priv->device);
6352         if (ret < 0)
6353                 return ret;
6354
6355         if (be16_to_cpu(proto) == ETH_P_8021AD)
6356                 is_double = true;
6357
6358         clear_bit(vid, priv->active_vlans);
6359
6360         if (priv->hw->num_vlan) {
6361                 ret = stmmac_del_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid);
6362                 if (ret)
6363                         goto del_vlan_error;
6364         }
6365
6366         ret = stmmac_vlan_update(priv, is_double);
6367
6368 del_vlan_error:
6369         pm_runtime_put(priv->device);
6370
6371         return ret;
6372 }
6373
6374 static int stmmac_bpf(struct net_device *dev, struct netdev_bpf *bpf)
6375 {
6376         struct stmmac_priv *priv = netdev_priv(dev);
6377
6378         switch (bpf->command) {
6379         case XDP_SETUP_PROG:
6380                 return stmmac_xdp_set_prog(priv, bpf->prog, bpf->extack);
6381         case XDP_SETUP_XSK_POOL:
6382                 return stmmac_xdp_setup_pool(priv, bpf->xsk.pool,
6383                                              bpf->xsk.queue_id);
6384         default:
6385                 return -EOPNOTSUPP;
6386         }
6387 }
6388
6389 static int stmmac_xdp_xmit(struct net_device *dev, int num_frames,
6390                            struct xdp_frame **frames, u32 flags)
6391 {
6392         struct stmmac_priv *priv = netdev_priv(dev);
6393         int cpu = smp_processor_id();
6394         struct netdev_queue *nq;
6395         int i, nxmit = 0;
6396         int queue;
6397
6398         if (unlikely(test_bit(STMMAC_DOWN, &priv->state)))
6399                 return -ENETDOWN;
6400
6401         if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
6402                 return -EINVAL;
6403
6404         queue = stmmac_xdp_get_tx_queue(priv, cpu);
6405         nq = netdev_get_tx_queue(priv->dev, queue);
6406
6407         __netif_tx_lock(nq, cpu);
6408         /* Avoids TX time-out as we are sharing with slow path */
6409         txq_trans_cond_update(nq);
6410
6411         for (i = 0; i < num_frames; i++) {
6412                 int res;
6413
6414                 res = stmmac_xdp_xmit_xdpf(priv, queue, frames[i], true);
6415                 if (res == STMMAC_XDP_CONSUMED)
6416                         break;
6417
6418                 nxmit++;
6419         }
6420
6421         if (flags & XDP_XMIT_FLUSH) {
6422                 stmmac_flush_tx_descriptors(priv, queue);
6423                 stmmac_tx_timer_arm(priv, queue);
6424         }
6425
6426         __netif_tx_unlock(nq);
6427
6428         return nxmit;
6429 }
6430
6431 void stmmac_disable_rx_queue(struct stmmac_priv *priv, u32 queue)
6432 {
6433         struct stmmac_channel *ch = &priv->channel[queue];
6434         unsigned long flags;
6435
6436         spin_lock_irqsave(&ch->lock, flags);
6437         stmmac_disable_dma_irq(priv, priv->ioaddr, queue, 1, 0);
6438         spin_unlock_irqrestore(&ch->lock, flags);
6439
6440         stmmac_stop_rx_dma(priv, queue);
6441         __free_dma_rx_desc_resources(priv, &priv->dma_conf, queue);
6442 }
6443
6444 void stmmac_enable_rx_queue(struct stmmac_priv *priv, u32 queue)
6445 {
6446         struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
6447         struct stmmac_channel *ch = &priv->channel[queue];
6448         unsigned long flags;
6449         u32 buf_size;
6450         int ret;
6451
6452         ret = __alloc_dma_rx_desc_resources(priv, &priv->dma_conf, queue);
6453         if (ret) {
6454                 netdev_err(priv->dev, "Failed to alloc RX desc.\n");
6455                 return;
6456         }
6457
6458         ret = __init_dma_rx_desc_rings(priv, &priv->dma_conf, queue, GFP_KERNEL);
6459         if (ret) {
6460                 __free_dma_rx_desc_resources(priv, &priv->dma_conf, queue);
6461                 netdev_err(priv->dev, "Failed to init RX desc.\n");
6462                 return;
6463         }
6464
6465         stmmac_reset_rx_queue(priv, queue);
6466         stmmac_clear_rx_descriptors(priv, &priv->dma_conf, queue);
6467
6468         stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
6469                             rx_q->dma_rx_phy, rx_q->queue_index);
6470
6471         rx_q->rx_tail_addr = rx_q->dma_rx_phy + (rx_q->buf_alloc_num *
6472                              sizeof(struct dma_desc));
6473         stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
6474                                rx_q->rx_tail_addr, rx_q->queue_index);
6475
6476         if (rx_q->xsk_pool && rx_q->buf_alloc_num) {
6477                 buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool);
6478                 stmmac_set_dma_bfsize(priv, priv->ioaddr,
6479                                       buf_size,
6480                                       rx_q->queue_index);
6481         } else {
6482                 stmmac_set_dma_bfsize(priv, priv->ioaddr,
6483                                       priv->dma_conf.dma_buf_sz,
6484                                       rx_q->queue_index);
6485         }
6486
6487         stmmac_start_rx_dma(priv, queue);
6488
6489         spin_lock_irqsave(&ch->lock, flags);
6490         stmmac_enable_dma_irq(priv, priv->ioaddr, queue, 1, 0);
6491         spin_unlock_irqrestore(&ch->lock, flags);
6492 }
6493
6494 void stmmac_disable_tx_queue(struct stmmac_priv *priv, u32 queue)
6495 {
6496         struct stmmac_channel *ch = &priv->channel[queue];
6497         unsigned long flags;
6498
6499         spin_lock_irqsave(&ch->lock, flags);
6500         stmmac_disable_dma_irq(priv, priv->ioaddr, queue, 0, 1);
6501         spin_unlock_irqrestore(&ch->lock, flags);
6502
6503         stmmac_stop_tx_dma(priv, queue);
6504         __free_dma_tx_desc_resources(priv, &priv->dma_conf, queue);
6505 }
6506
6507 void stmmac_enable_tx_queue(struct stmmac_priv *priv, u32 queue)
6508 {
6509         struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
6510         struct stmmac_channel *ch = &priv->channel[queue];
6511         unsigned long flags;
6512         int ret;
6513
6514         ret = __alloc_dma_tx_desc_resources(priv, &priv->dma_conf, queue);
6515         if (ret) {
6516                 netdev_err(priv->dev, "Failed to alloc TX desc.\n");
6517                 return;
6518         }
6519
6520         ret = __init_dma_tx_desc_rings(priv,  &priv->dma_conf, queue);
6521         if (ret) {
6522                 __free_dma_tx_desc_resources(priv, &priv->dma_conf, queue);
6523                 netdev_err(priv->dev, "Failed to init TX desc.\n");
6524                 return;
6525         }
6526
6527         stmmac_reset_tx_queue(priv, queue);
6528         stmmac_clear_tx_descriptors(priv, &priv->dma_conf, queue);
6529
6530         stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
6531                             tx_q->dma_tx_phy, tx_q->queue_index);
6532
6533         if (tx_q->tbs & STMMAC_TBS_AVAIL)
6534                 stmmac_enable_tbs(priv, priv->ioaddr, 1, tx_q->queue_index);
6535
6536         tx_q->tx_tail_addr = tx_q->dma_tx_phy;
6537         stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
6538                                tx_q->tx_tail_addr, tx_q->queue_index);
6539
6540         stmmac_start_tx_dma(priv, queue);
6541
6542         spin_lock_irqsave(&ch->lock, flags);
6543         stmmac_enable_dma_irq(priv, priv->ioaddr, queue, 0, 1);
6544         spin_unlock_irqrestore(&ch->lock, flags);
6545 }
6546
6547 void stmmac_xdp_release(struct net_device *dev)
6548 {
6549         struct stmmac_priv *priv = netdev_priv(dev);
6550         u32 chan;
6551
6552         /* Disable NAPI process */
6553         stmmac_disable_all_queues(priv);
6554
6555         for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
6556                 hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
6557
6558         /* Free the IRQ lines */
6559         stmmac_free_irq(dev, REQ_IRQ_ERR_ALL, 0);
6560
6561         /* Stop TX/RX DMA channels */
6562         stmmac_stop_all_dma(priv);
6563
6564         /* Release and free the Rx/Tx resources */
6565         free_dma_desc_resources(priv, &priv->dma_conf);
6566
6567         /* Disable the MAC Rx/Tx */
6568         stmmac_mac_set(priv, priv->ioaddr, false);
6569
6570         /* set trans_start so we don't get spurious
6571          * watchdogs during reset
6572          */
6573         netif_trans_update(dev);
6574         netif_carrier_off(dev);
6575 }
6576
6577 int stmmac_xdp_open(struct net_device *dev)
6578 {
6579         struct stmmac_priv *priv = netdev_priv(dev);
6580         u32 rx_cnt = priv->plat->rx_queues_to_use;
6581         u32 tx_cnt = priv->plat->tx_queues_to_use;
6582         u32 dma_csr_ch = max(rx_cnt, tx_cnt);
6583         struct stmmac_rx_queue *rx_q;
6584         struct stmmac_tx_queue *tx_q;
6585         u32 buf_size;
6586         bool sph_en;
6587         u32 chan;
6588         int ret;
6589
6590         ret = alloc_dma_desc_resources(priv, &priv->dma_conf);
6591         if (ret < 0) {
6592                 netdev_err(dev, "%s: DMA descriptors allocation failed\n",
6593                            __func__);
6594                 goto dma_desc_error;
6595         }
6596
6597         ret = init_dma_desc_rings(dev, &priv->dma_conf, GFP_KERNEL);
6598         if (ret < 0) {
6599                 netdev_err(dev, "%s: DMA descriptors initialization failed\n",
6600                            __func__);
6601                 goto init_error;
6602         }
6603
6604         /* DMA CSR Channel configuration */
6605         for (chan = 0; chan < dma_csr_ch; chan++) {
6606                 stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan);
6607                 stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
6608         }
6609
6610         /* Adjust Split header */
6611         sph_en = (priv->hw->rx_csum > 0) && priv->sph;
6612
6613         /* DMA RX Channel Configuration */
6614         for (chan = 0; chan < rx_cnt; chan++) {
6615                 rx_q = &priv->dma_conf.rx_queue[chan];
6616
6617                 stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
6618                                     rx_q->dma_rx_phy, chan);
6619
6620                 rx_q->rx_tail_addr = rx_q->dma_rx_phy +
6621                                      (rx_q->buf_alloc_num *
6622                                       sizeof(struct dma_desc));
6623                 stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
6624                                        rx_q->rx_tail_addr, chan);
6625
6626                 if (rx_q->xsk_pool && rx_q->buf_alloc_num) {
6627                         buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool);
6628                         stmmac_set_dma_bfsize(priv, priv->ioaddr,
6629                                               buf_size,
6630                                               rx_q->queue_index);
6631                 } else {
6632                         stmmac_set_dma_bfsize(priv, priv->ioaddr,
6633                                               priv->dma_conf.dma_buf_sz,
6634                                               rx_q->queue_index);
6635                 }
6636
6637                 stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
6638         }
6639
6640         /* DMA TX Channel Configuration */
6641         for (chan = 0; chan < tx_cnt; chan++) {
6642                 tx_q = &priv->dma_conf.tx_queue[chan];
6643
6644                 stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
6645                                     tx_q->dma_tx_phy, chan);
6646
6647                 tx_q->tx_tail_addr = tx_q->dma_tx_phy;
6648                 stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
6649                                        tx_q->tx_tail_addr, chan);
6650
6651                 hrtimer_init(&tx_q->txtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
6652                 tx_q->txtimer.function = stmmac_tx_timer;
6653         }
6654
6655         /* Enable the MAC Rx/Tx */
6656         stmmac_mac_set(priv, priv->ioaddr, true);
6657
6658         /* Start Rx & Tx DMA Channels */
6659         stmmac_start_all_dma(priv);
6660
6661         ret = stmmac_request_irq(dev);
6662         if (ret)
6663                 goto irq_error;
6664
6665         /* Enable NAPI process*/
6666         stmmac_enable_all_queues(priv);
6667         netif_carrier_on(dev);
6668         netif_tx_start_all_queues(dev);
6669         stmmac_enable_all_dma_irq(priv);
6670
6671         return 0;
6672
6673 irq_error:
6674         for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
6675                 hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
6676
6677         stmmac_hw_teardown(dev);
6678 init_error:
6679         free_dma_desc_resources(priv, &priv->dma_conf);
6680 dma_desc_error:
6681         return ret;
6682 }
6683
6684 int stmmac_xsk_wakeup(struct net_device *dev, u32 queue, u32 flags)
6685 {
6686         struct stmmac_priv *priv = netdev_priv(dev);
6687         struct stmmac_rx_queue *rx_q;
6688         struct stmmac_tx_queue *tx_q;
6689         struct stmmac_channel *ch;
6690
6691         if (test_bit(STMMAC_DOWN, &priv->state) ||
6692             !netif_carrier_ok(priv->dev))
6693                 return -ENETDOWN;
6694
6695         if (!stmmac_xdp_is_enabled(priv))
6696                 return -EINVAL;
6697
6698         if (queue >= priv->plat->rx_queues_to_use ||
6699             queue >= priv->plat->tx_queues_to_use)
6700                 return -EINVAL;
6701
6702         rx_q = &priv->dma_conf.rx_queue[queue];
6703         tx_q = &priv->dma_conf.tx_queue[queue];
6704         ch = &priv->channel[queue];
6705
6706         if (!rx_q->xsk_pool && !tx_q->xsk_pool)
6707                 return -EINVAL;
6708
6709         if (!napi_if_scheduled_mark_missed(&ch->rxtx_napi)) {
6710                 /* EQoS does not have per-DMA channel SW interrupt,
6711                  * so we schedule RX Napi straight-away.
6712                  */
6713                 if (likely(napi_schedule_prep(&ch->rxtx_napi)))
6714                         __napi_schedule(&ch->rxtx_napi);
6715         }
6716
6717         return 0;
6718 }
6719
6720 static const struct net_device_ops stmmac_netdev_ops = {
6721         .ndo_open = stmmac_open,
6722         .ndo_start_xmit = stmmac_xmit,
6723         .ndo_stop = stmmac_release,
6724         .ndo_change_mtu = stmmac_change_mtu,
6725         .ndo_fix_features = stmmac_fix_features,
6726         .ndo_set_features = stmmac_set_features,
6727         .ndo_set_rx_mode = stmmac_set_rx_mode,
6728         .ndo_tx_timeout = stmmac_tx_timeout,
6729         .ndo_eth_ioctl = stmmac_ioctl,
6730         .ndo_setup_tc = stmmac_setup_tc,
6731         .ndo_select_queue = stmmac_select_queue,
6732 #ifdef CONFIG_NET_POLL_CONTROLLER
6733         .ndo_poll_controller = stmmac_poll_controller,
6734 #endif
6735         .ndo_set_mac_address = stmmac_set_mac_address,
6736         .ndo_vlan_rx_add_vid = stmmac_vlan_rx_add_vid,
6737         .ndo_vlan_rx_kill_vid = stmmac_vlan_rx_kill_vid,
6738         .ndo_bpf = stmmac_bpf,
6739         .ndo_xdp_xmit = stmmac_xdp_xmit,
6740         .ndo_xsk_wakeup = stmmac_xsk_wakeup,
6741 };
6742
6743 static void stmmac_reset_subtask(struct stmmac_priv *priv)
6744 {
6745         if (!test_and_clear_bit(STMMAC_RESET_REQUESTED, &priv->state))
6746                 return;
6747         if (test_bit(STMMAC_DOWN, &priv->state))
6748                 return;
6749
6750         netdev_err(priv->dev, "Reset adapter.\n");
6751
6752         rtnl_lock();
6753         netif_trans_update(priv->dev);
6754         while (test_and_set_bit(STMMAC_RESETING, &priv->state))
6755                 usleep_range(1000, 2000);
6756
6757         set_bit(STMMAC_DOWN, &priv->state);
6758         dev_close(priv->dev);
6759         dev_open(priv->dev, NULL);
6760         clear_bit(STMMAC_DOWN, &priv->state);
6761         clear_bit(STMMAC_RESETING, &priv->state);
6762         rtnl_unlock();
6763 }
6764
6765 static void stmmac_service_task(struct work_struct *work)
6766 {
6767         struct stmmac_priv *priv = container_of(work, struct stmmac_priv,
6768                         service_task);
6769
6770         stmmac_reset_subtask(priv);
6771         clear_bit(STMMAC_SERVICE_SCHED, &priv->state);
6772 }
6773
6774 /**
6775  *  stmmac_hw_init - Init the MAC device
6776  *  @priv: driver private structure
6777  *  Description: this function is to configure the MAC device according to
6778  *  some platform parameters or the HW capability register. It prepares the
6779  *  driver to use either ring or chain modes and to setup either enhanced or
6780  *  normal descriptors.
6781  */
6782 static int stmmac_hw_init(struct stmmac_priv *priv)
6783 {
6784         int ret;
6785
6786         /* dwmac-sun8i only work in chain mode */
6787         if (priv->plat->has_sun8i)
6788                 chain_mode = 1;
6789         priv->chain_mode = chain_mode;
6790
6791         /* Initialize HW Interface */
6792         ret = stmmac_hwif_init(priv);
6793         if (ret)
6794                 return ret;
6795
6796         /* Get the HW capability (new GMAC newer than 3.50a) */
6797         priv->hw_cap_support = stmmac_get_hw_features(priv);
6798         if (priv->hw_cap_support) {
6799                 dev_info(priv->device, "DMA HW capability register supported\n");
6800
6801                 /* We can override some gmac/dma configuration fields: e.g.
6802                  * enh_desc, tx_coe (e.g. that are passed through the
6803                  * platform) with the values from the HW capability
6804                  * register (if supported).
6805                  */
6806                 priv->plat->enh_desc = priv->dma_cap.enh_desc;
6807                 priv->plat->pmt = priv->dma_cap.pmt_remote_wake_up &&
6808                                 !priv->plat->use_phy_wol;
6809                 priv->hw->pmt = priv->plat->pmt;
6810                 if (priv->dma_cap.hash_tb_sz) {
6811                         priv->hw->multicast_filter_bins =
6812                                         (BIT(priv->dma_cap.hash_tb_sz) << 5);
6813                         priv->hw->mcast_bits_log2 =
6814                                         ilog2(priv->hw->multicast_filter_bins);
6815                 }
6816
6817                 /* TXCOE doesn't work in thresh DMA mode */
6818                 if (priv->plat->force_thresh_dma_mode)
6819                         priv->plat->tx_coe = 0;
6820                 else
6821                         priv->plat->tx_coe = priv->dma_cap.tx_coe;
6822
6823                 /* In case of GMAC4 rx_coe is from HW cap register. */
6824                 priv->plat->rx_coe = priv->dma_cap.rx_coe;
6825
6826                 if (priv->dma_cap.rx_coe_type2)
6827                         priv->plat->rx_coe = STMMAC_RX_COE_TYPE2;
6828                 else if (priv->dma_cap.rx_coe_type1)
6829                         priv->plat->rx_coe = STMMAC_RX_COE_TYPE1;
6830
6831         } else {
6832                 dev_info(priv->device, "No HW DMA feature register supported\n");
6833         }
6834
6835         if (priv->plat->rx_coe) {
6836                 priv->hw->rx_csum = priv->plat->rx_coe;
6837                 dev_info(priv->device, "RX Checksum Offload Engine supported\n");
6838                 if (priv->synopsys_id < DWMAC_CORE_4_00)
6839                         dev_info(priv->device, "COE Type %d\n", priv->hw->rx_csum);
6840         }
6841         if (priv->plat->tx_coe)
6842                 dev_info(priv->device, "TX Checksum insertion supported\n");
6843
6844         if (priv->plat->pmt) {
6845                 dev_info(priv->device, "Wake-Up On Lan supported\n");
6846                 device_set_wakeup_capable(priv->device, 1);
6847         }
6848
6849         if (priv->dma_cap.tsoen)
6850                 dev_info(priv->device, "TSO supported\n");
6851
6852         priv->hw->vlan_fail_q_en = priv->plat->vlan_fail_q_en;
6853         priv->hw->vlan_fail_q = priv->plat->vlan_fail_q;
6854
6855         /* Run HW quirks, if any */
6856         if (priv->hwif_quirks) {
6857                 ret = priv->hwif_quirks(priv);
6858                 if (ret)
6859                         return ret;
6860         }
6861
6862         /* Rx Watchdog is available in the COREs newer than the 3.40.
6863          * In some case, for example on bugged HW this feature
6864          * has to be disable and this can be done by passing the
6865          * riwt_off field from the platform.
6866          */
6867         if (((priv->synopsys_id >= DWMAC_CORE_3_50) ||
6868             (priv->plat->has_xgmac)) && (!priv->plat->riwt_off)) {
6869                 priv->use_riwt = 1;
6870                 dev_info(priv->device,
6871                          "Enable RX Mitigation via HW Watchdog Timer\n");
6872         }
6873
6874         return 0;
6875 }
6876
6877 static void stmmac_napi_add(struct net_device *dev)
6878 {
6879         struct stmmac_priv *priv = netdev_priv(dev);
6880         u32 queue, maxq;
6881
6882         maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use);
6883
6884         for (queue = 0; queue < maxq; queue++) {
6885                 struct stmmac_channel *ch = &priv->channel[queue];
6886
6887                 ch->priv_data = priv;
6888                 ch->index = queue;
6889                 spin_lock_init(&ch->lock);
6890
6891                 if (queue < priv->plat->rx_queues_to_use) {
6892                         netif_napi_add(dev, &ch->rx_napi, stmmac_napi_poll_rx,
6893                                        NAPI_POLL_WEIGHT);
6894                 }
6895                 if (queue < priv->plat->tx_queues_to_use) {
6896                         netif_napi_add_tx(dev, &ch->tx_napi,
6897                                           stmmac_napi_poll_tx);
6898                 }
6899                 if (queue < priv->plat->rx_queues_to_use &&
6900                     queue < priv->plat->tx_queues_to_use) {
6901                         netif_napi_add(dev, &ch->rxtx_napi,
6902                                        stmmac_napi_poll_rxtx,
6903                                        NAPI_POLL_WEIGHT);
6904                 }
6905         }
6906 }
6907
6908 static void stmmac_napi_del(struct net_device *dev)
6909 {
6910         struct stmmac_priv *priv = netdev_priv(dev);
6911         u32 queue, maxq;
6912
6913         maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use);
6914
6915         for (queue = 0; queue < maxq; queue++) {
6916                 struct stmmac_channel *ch = &priv->channel[queue];
6917
6918                 if (queue < priv->plat->rx_queues_to_use)
6919                         netif_napi_del(&ch->rx_napi);
6920                 if (queue < priv->plat->tx_queues_to_use)
6921                         netif_napi_del(&ch->tx_napi);
6922                 if (queue < priv->plat->rx_queues_to_use &&
6923                     queue < priv->plat->tx_queues_to_use) {
6924                         netif_napi_del(&ch->rxtx_napi);
6925                 }
6926         }
6927 }
6928
6929 int stmmac_reinit_queues(struct net_device *dev, u32 rx_cnt, u32 tx_cnt)
6930 {
6931         struct stmmac_priv *priv = netdev_priv(dev);
6932         int ret = 0;
6933
6934         if (netif_running(dev))
6935                 stmmac_release(dev);
6936
6937         stmmac_napi_del(dev);
6938
6939         priv->plat->rx_queues_to_use = rx_cnt;
6940         priv->plat->tx_queues_to_use = tx_cnt;
6941
6942         stmmac_napi_add(dev);
6943
6944         if (netif_running(dev))
6945                 ret = stmmac_open(dev);
6946
6947         return ret;
6948 }
6949
6950 int stmmac_reinit_ringparam(struct net_device *dev, u32 rx_size, u32 tx_size)
6951 {
6952         struct stmmac_priv *priv = netdev_priv(dev);
6953         int ret = 0;
6954
6955         if (netif_running(dev))
6956                 stmmac_release(dev);
6957
6958         priv->dma_conf.dma_rx_size = rx_size;
6959         priv->dma_conf.dma_tx_size = tx_size;
6960
6961         if (netif_running(dev))
6962                 ret = stmmac_open(dev);
6963
6964         return ret;
6965 }
6966
6967 #define SEND_VERIFY_MPAKCET_FMT "Send Verify mPacket lo_state=%d lp_state=%d\n"
6968 static void stmmac_fpe_lp_task(struct work_struct *work)
6969 {
6970         struct stmmac_priv *priv = container_of(work, struct stmmac_priv,
6971                                                 fpe_task);
6972         struct stmmac_fpe_cfg *fpe_cfg = priv->plat->fpe_cfg;
6973         enum stmmac_fpe_state *lo_state = &fpe_cfg->lo_fpe_state;
6974         enum stmmac_fpe_state *lp_state = &fpe_cfg->lp_fpe_state;
6975         bool *hs_enable = &fpe_cfg->hs_enable;
6976         bool *enable = &fpe_cfg->enable;
6977         int retries = 20;
6978
6979         while (retries-- > 0) {
6980                 /* Bail out immediately if FPE handshake is OFF */
6981                 if (*lo_state == FPE_STATE_OFF || !*hs_enable)
6982                         break;
6983
6984                 if (*lo_state == FPE_STATE_ENTERING_ON &&
6985                     *lp_state == FPE_STATE_ENTERING_ON) {
6986                         stmmac_fpe_configure(priv, priv->ioaddr,
6987                                              priv->plat->tx_queues_to_use,
6988                                              priv->plat->rx_queues_to_use,
6989                                              *enable);
6990
6991                         netdev_info(priv->dev, "configured FPE\n");
6992
6993                         *lo_state = FPE_STATE_ON;
6994                         *lp_state = FPE_STATE_ON;
6995                         netdev_info(priv->dev, "!!! BOTH FPE stations ON\n");
6996                         break;
6997                 }
6998
6999                 if ((*lo_state == FPE_STATE_CAPABLE ||
7000                      *lo_state == FPE_STATE_ENTERING_ON) &&
7001                      *lp_state != FPE_STATE_ON) {
7002                         netdev_info(priv->dev, SEND_VERIFY_MPAKCET_FMT,
7003                                     *lo_state, *lp_state);
7004                         stmmac_fpe_send_mpacket(priv, priv->ioaddr,
7005                                                 MPACKET_VERIFY);
7006                 }
7007                 /* Sleep then retry */
7008                 msleep(500);
7009         }
7010
7011         clear_bit(__FPE_TASK_SCHED, &priv->fpe_task_state);
7012 }
7013
7014 void stmmac_fpe_handshake(struct stmmac_priv *priv, bool enable)
7015 {
7016         if (priv->plat->fpe_cfg->hs_enable != enable) {
7017                 if (enable) {
7018                         stmmac_fpe_send_mpacket(priv, priv->ioaddr,
7019                                                 MPACKET_VERIFY);
7020                 } else {
7021                         priv->plat->fpe_cfg->lo_fpe_state = FPE_STATE_OFF;
7022                         priv->plat->fpe_cfg->lp_fpe_state = FPE_STATE_OFF;
7023                 }
7024
7025                 priv->plat->fpe_cfg->hs_enable = enable;
7026         }
7027 }
7028
7029 /**
7030  * stmmac_dvr_probe
7031  * @device: device pointer
7032  * @plat_dat: platform data pointer
7033  * @res: stmmac resource pointer
7034  * Description: this is the main probe function used to
7035  * call the alloc_etherdev, allocate the priv structure.
7036  * Return:
7037  * returns 0 on success, otherwise errno.
7038  */
7039 int stmmac_dvr_probe(struct device *device,
7040                      struct plat_stmmacenet_data *plat_dat,
7041                      struct stmmac_resources *res)
7042 {
7043         struct net_device *ndev = NULL;
7044         struct stmmac_priv *priv;
7045         u32 rxq;
7046         int i, ret = 0;
7047
7048         ndev = devm_alloc_etherdev_mqs(device, sizeof(struct stmmac_priv),
7049                                        MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES);
7050         if (!ndev)
7051                 return -ENOMEM;
7052
7053         SET_NETDEV_DEV(ndev, device);
7054
7055         priv = netdev_priv(ndev);
7056         priv->device = device;
7057         priv->dev = ndev;
7058
7059         stmmac_set_ethtool_ops(ndev);
7060         priv->pause = pause;
7061         priv->plat = plat_dat;
7062         priv->ioaddr = res->addr;
7063         priv->dev->base_addr = (unsigned long)res->addr;
7064         priv->plat->dma_cfg->multi_msi_en = priv->plat->multi_msi_en;
7065
7066         priv->dev->irq = res->irq;
7067         priv->wol_irq = res->wol_irq;
7068         priv->lpi_irq = res->lpi_irq;
7069         priv->sfty_ce_irq = res->sfty_ce_irq;
7070         priv->sfty_ue_irq = res->sfty_ue_irq;
7071         for (i = 0; i < MTL_MAX_RX_QUEUES; i++)
7072                 priv->rx_irq[i] = res->rx_irq[i];
7073         for (i = 0; i < MTL_MAX_TX_QUEUES; i++)
7074                 priv->tx_irq[i] = res->tx_irq[i];
7075
7076         if (!is_zero_ether_addr(res->mac))
7077                 eth_hw_addr_set(priv->dev, res->mac);
7078
7079         dev_set_drvdata(device, priv->dev);
7080
7081         /* Verify driver arguments */
7082         stmmac_verify_args();
7083
7084         priv->af_xdp_zc_qps = bitmap_zalloc(MTL_MAX_TX_QUEUES, GFP_KERNEL);
7085         if (!priv->af_xdp_zc_qps)
7086                 return -ENOMEM;
7087
7088         /* Allocate workqueue */
7089         priv->wq = create_singlethread_workqueue("stmmac_wq");
7090         if (!priv->wq) {
7091                 dev_err(priv->device, "failed to create workqueue\n");
7092                 return -ENOMEM;
7093         }
7094
7095         INIT_WORK(&priv->service_task, stmmac_service_task);
7096
7097         /* Initialize Link Partner FPE workqueue */
7098         INIT_WORK(&priv->fpe_task, stmmac_fpe_lp_task);
7099
7100         /* Override with kernel parameters if supplied XXX CRS XXX
7101          * this needs to have multiple instances
7102          */
7103         if ((phyaddr >= 0) && (phyaddr <= 31))
7104                 priv->plat->phy_addr = phyaddr;
7105
7106         if (priv->plat->stmmac_rst) {
7107                 ret = reset_control_assert(priv->plat->stmmac_rst);
7108                 reset_control_deassert(priv->plat->stmmac_rst);
7109                 /* Some reset controllers have only reset callback instead of
7110                  * assert + deassert callbacks pair.
7111                  */
7112                 if (ret == -ENOTSUPP)
7113                         reset_control_reset(priv->plat->stmmac_rst);
7114         }
7115
7116         ret = reset_control_deassert(priv->plat->stmmac_ahb_rst);
7117         if (ret == -ENOTSUPP)
7118                 dev_err(priv->device, "unable to bring out of ahb reset: %pe\n",
7119                         ERR_PTR(ret));
7120
7121         /* Init MAC and get the capabilities */
7122         ret = stmmac_hw_init(priv);
7123         if (ret)
7124                 goto error_hw_init;
7125
7126         /* Only DWMAC core version 5.20 onwards supports HW descriptor prefetch.
7127          */
7128         if (priv->synopsys_id < DWMAC_CORE_5_20)
7129                 priv->plat->dma_cfg->dche = false;
7130
7131         stmmac_check_ether_addr(priv);
7132
7133         ndev->netdev_ops = &stmmac_netdev_ops;
7134
7135         ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
7136                             NETIF_F_RXCSUM;
7137
7138         ret = stmmac_tc_init(priv, priv);
7139         if (!ret) {
7140                 ndev->hw_features |= NETIF_F_HW_TC;
7141         }
7142
7143         if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) {
7144                 ndev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
7145                 if (priv->plat->has_gmac4)
7146                         ndev->hw_features |= NETIF_F_GSO_UDP_L4;
7147                 priv->tso = true;
7148                 dev_info(priv->device, "TSO feature enabled\n");
7149         }
7150
7151         if (priv->dma_cap.sphen && !priv->plat->sph_disable) {
7152                 ndev->hw_features |= NETIF_F_GRO;
7153                 priv->sph_cap = true;
7154                 priv->sph = priv->sph_cap;
7155                 dev_info(priv->device, "SPH feature enabled\n");
7156         }
7157
7158         /* The current IP register MAC_HW_Feature1[ADDR64] only define
7159          * 32/40/64 bit width, but some SOC support others like i.MX8MP
7160          * support 34 bits but it map to 40 bits width in MAC_HW_Feature1[ADDR64].
7161          * So overwrite dma_cap.addr64 according to HW real design.
7162          */
7163         if (priv->plat->addr64)
7164                 priv->dma_cap.addr64 = priv->plat->addr64;
7165
7166         if (priv->dma_cap.addr64) {
7167                 ret = dma_set_mask_and_coherent(device,
7168                                 DMA_BIT_MASK(priv->dma_cap.addr64));
7169                 if (!ret) {
7170                         dev_info(priv->device, "Using %d bits DMA width\n",
7171                                  priv->dma_cap.addr64);
7172
7173                         /*
7174                          * If more than 32 bits can be addressed, make sure to
7175                          * enable enhanced addressing mode.
7176                          */
7177                         if (IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT))
7178                                 priv->plat->dma_cfg->eame = true;
7179                 } else {
7180                         ret = dma_set_mask_and_coherent(device, DMA_BIT_MASK(32));
7181                         if (ret) {
7182                                 dev_err(priv->device, "Failed to set DMA Mask\n");
7183                                 goto error_hw_init;
7184                         }
7185
7186                         priv->dma_cap.addr64 = 32;
7187                 }
7188         }
7189
7190         ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA;
7191         ndev->watchdog_timeo = msecs_to_jiffies(watchdog);
7192 #ifdef STMMAC_VLAN_TAG_USED
7193         /* Both mac100 and gmac support receive VLAN tag detection */
7194         ndev->features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX;
7195         if (priv->dma_cap.vlhash) {
7196                 ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
7197                 ndev->features |= NETIF_F_HW_VLAN_STAG_FILTER;
7198         }
7199         if (priv->dma_cap.vlins) {
7200                 ndev->features |= NETIF_F_HW_VLAN_CTAG_TX;
7201                 if (priv->dma_cap.dvlan)
7202                         ndev->features |= NETIF_F_HW_VLAN_STAG_TX;
7203         }
7204 #endif
7205         priv->msg_enable = netif_msg_init(debug, default_msg_level);
7206
7207         /* Initialize RSS */
7208         rxq = priv->plat->rx_queues_to_use;
7209         netdev_rss_key_fill(priv->rss.key, sizeof(priv->rss.key));
7210         for (i = 0; i < ARRAY_SIZE(priv->rss.table); i++)
7211                 priv->rss.table[i] = ethtool_rxfh_indir_default(i, rxq);
7212
7213         if (priv->dma_cap.rssen && priv->plat->rss_en)
7214                 ndev->features |= NETIF_F_RXHASH;
7215
7216         /* MTU range: 46 - hw-specific max */
7217         ndev->min_mtu = ETH_ZLEN - ETH_HLEN;
7218         if (priv->plat->has_xgmac)
7219                 ndev->max_mtu = XGMAC_JUMBO_LEN;
7220         else if ((priv->plat->enh_desc) || (priv->synopsys_id >= DWMAC_CORE_4_00))
7221                 ndev->max_mtu = JUMBO_LEN;
7222         else
7223                 ndev->max_mtu = SKB_MAX_HEAD(NET_SKB_PAD + NET_IP_ALIGN);
7224         /* Will not overwrite ndev->max_mtu if plat->maxmtu > ndev->max_mtu
7225          * as well as plat->maxmtu < ndev->min_mtu which is a invalid range.
7226          */
7227         if ((priv->plat->maxmtu < ndev->max_mtu) &&
7228             (priv->plat->maxmtu >= ndev->min_mtu))
7229                 ndev->max_mtu = priv->plat->maxmtu;
7230         else if (priv->plat->maxmtu < ndev->min_mtu)
7231                 dev_warn(priv->device,
7232                          "%s: warning: maxmtu having invalid value (%d)\n",
7233                          __func__, priv->plat->maxmtu);
7234
7235         if (flow_ctrl)
7236                 priv->flow_ctrl = FLOW_AUTO;    /* RX/TX pause on */
7237
7238         /* Setup channels NAPI */
7239         stmmac_napi_add(ndev);
7240
7241         mutex_init(&priv->lock);
7242
7243         /* If a specific clk_csr value is passed from the platform
7244          * this means that the CSR Clock Range selection cannot be
7245          * changed at run-time and it is fixed. Viceversa the driver'll try to
7246          * set the MDC clock dynamically according to the csr actual
7247          * clock input.
7248          */
7249         if (priv->plat->clk_csr >= 0)
7250                 priv->clk_csr = priv->plat->clk_csr;
7251         else
7252                 stmmac_clk_csr_set(priv);
7253
7254         stmmac_check_pcs_mode(priv);
7255
7256         pm_runtime_get_noresume(device);
7257         pm_runtime_set_active(device);
7258         if (!pm_runtime_enabled(device))
7259                 pm_runtime_enable(device);
7260
7261         if (priv->hw->pcs != STMMAC_PCS_TBI &&
7262             priv->hw->pcs != STMMAC_PCS_RTBI) {
7263                 /* MDIO bus Registration */
7264                 ret = stmmac_mdio_register(ndev);
7265                 if (ret < 0) {
7266                         dev_err_probe(priv->device, ret,
7267                                       "%s: MDIO bus (id: %d) registration failed\n",
7268                                       __func__, priv->plat->bus_id);
7269                         goto error_mdio_register;
7270                 }
7271         }
7272
7273         if (priv->plat->speed_mode_2500)
7274                 priv->plat->speed_mode_2500(ndev, priv->plat->bsp_priv);
7275
7276         if (priv->plat->mdio_bus_data && priv->plat->mdio_bus_data->has_xpcs) {
7277                 ret = stmmac_xpcs_setup(priv->mii);
7278                 if (ret)
7279                         goto error_xpcs_setup;
7280         }
7281
7282         ret = stmmac_phy_setup(priv);
7283         if (ret) {
7284                 netdev_err(ndev, "failed to setup phy (%d)\n", ret);
7285                 goto error_phy_setup;
7286         }
7287
7288         ret = register_netdev(ndev);
7289         if (ret) {
7290                 dev_err(priv->device, "%s: ERROR %i registering the device\n",
7291                         __func__, ret);
7292                 goto error_netdev_register;
7293         }
7294
7295         if (priv->plat->serdes_powerup) {
7296                 ret = priv->plat->serdes_powerup(ndev,
7297                                                  priv->plat->bsp_priv);
7298
7299                 if (ret < 0)
7300                         goto error_serdes_powerup;
7301         }
7302
7303 #ifdef CONFIG_DEBUG_FS
7304         stmmac_init_fs(ndev);
7305 #endif
7306
7307         if (priv->plat->dump_debug_regs)
7308                 priv->plat->dump_debug_regs(priv->plat->bsp_priv);
7309
7310         /* Let pm_runtime_put() disable the clocks.
7311          * If CONFIG_PM is not enabled, the clocks will stay powered.
7312          */
7313         pm_runtime_put(device);
7314
7315         return ret;
7316
7317 error_serdes_powerup:
7318         unregister_netdev(ndev);
7319 error_netdev_register:
7320         phylink_destroy(priv->phylink);
7321 error_xpcs_setup:
7322 error_phy_setup:
7323         if (priv->hw->pcs != STMMAC_PCS_TBI &&
7324             priv->hw->pcs != STMMAC_PCS_RTBI)
7325                 stmmac_mdio_unregister(ndev);
7326 error_mdio_register:
7327         stmmac_napi_del(ndev);
7328 error_hw_init:
7329         destroy_workqueue(priv->wq);
7330         bitmap_free(priv->af_xdp_zc_qps);
7331
7332         return ret;
7333 }
7334 EXPORT_SYMBOL_GPL(stmmac_dvr_probe);
7335
7336 /**
7337  * stmmac_dvr_remove
7338  * @dev: device pointer
7339  * Description: this function resets the TX/RX processes, disables the MAC RX/TX
7340  * changes the link status, releases the DMA descriptor rings.
7341  */
7342 int stmmac_dvr_remove(struct device *dev)
7343 {
7344         struct net_device *ndev = dev_get_drvdata(dev);
7345         struct stmmac_priv *priv = netdev_priv(ndev);
7346
7347         netdev_info(priv->dev, "%s: removing driver", __func__);
7348
7349         pm_runtime_get_sync(dev);
7350
7351         stmmac_stop_all_dma(priv);
7352         stmmac_mac_set(priv, priv->ioaddr, false);
7353         netif_carrier_off(ndev);
7354         unregister_netdev(ndev);
7355
7356         /* Serdes power down needs to happen after VLAN filter
7357          * is deleted that is triggered by unregister_netdev().
7358          */
7359         if (priv->plat->serdes_powerdown)
7360                 priv->plat->serdes_powerdown(ndev, priv->plat->bsp_priv);
7361
7362 #ifdef CONFIG_DEBUG_FS
7363         stmmac_exit_fs(ndev);
7364 #endif
7365         phylink_destroy(priv->phylink);
7366         if (priv->plat->stmmac_rst)
7367                 reset_control_assert(priv->plat->stmmac_rst);
7368         reset_control_assert(priv->plat->stmmac_ahb_rst);
7369         if (priv->hw->pcs != STMMAC_PCS_TBI &&
7370             priv->hw->pcs != STMMAC_PCS_RTBI)
7371                 stmmac_mdio_unregister(ndev);
7372         destroy_workqueue(priv->wq);
7373         mutex_destroy(&priv->lock);
7374         bitmap_free(priv->af_xdp_zc_qps);
7375
7376         pm_runtime_disable(dev);
7377         pm_runtime_put_noidle(dev);
7378
7379         return 0;
7380 }
7381 EXPORT_SYMBOL_GPL(stmmac_dvr_remove);
7382
7383 /**
7384  * stmmac_suspend - suspend callback
7385  * @dev: device pointer
7386  * Description: this is the function to suspend the device and it is called
7387  * by the platform driver to stop the network queue, release the resources,
7388  * program the PMT register (for WoL), clean and release driver resources.
7389  */
7390 int stmmac_suspend(struct device *dev)
7391 {
7392         struct net_device *ndev = dev_get_drvdata(dev);
7393         struct stmmac_priv *priv = netdev_priv(ndev);
7394         u32 chan;
7395
7396         if (!ndev || !netif_running(ndev))
7397                 return 0;
7398
7399         mutex_lock(&priv->lock);
7400
7401         netif_device_detach(ndev);
7402
7403         stmmac_disable_all_queues(priv);
7404
7405         for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
7406                 hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
7407
7408         if (priv->eee_enabled) {
7409                 priv->tx_path_in_lpi_mode = false;
7410                 del_timer_sync(&priv->eee_ctrl_timer);
7411         }
7412
7413         /* Stop TX/RX DMA */
7414         stmmac_stop_all_dma(priv);
7415
7416         if (priv->plat->serdes_powerdown)
7417                 priv->plat->serdes_powerdown(ndev, priv->plat->bsp_priv);
7418
7419         /* Enable Power down mode by programming the PMT regs */
7420         if (device_may_wakeup(priv->device) && priv->plat->pmt) {
7421                 stmmac_pmt(priv, priv->hw, priv->wolopts);
7422                 priv->irq_wake = 1;
7423         } else {
7424                 stmmac_mac_set(priv, priv->ioaddr, false);
7425                 pinctrl_pm_select_sleep_state(priv->device);
7426         }
7427
7428         mutex_unlock(&priv->lock);
7429
7430         rtnl_lock();
7431         if (device_may_wakeup(priv->device) && priv->plat->pmt) {
7432                 phylink_suspend(priv->phylink, true);
7433         } else {
7434                 if (device_may_wakeup(priv->device))
7435                         phylink_speed_down(priv->phylink, false);
7436                 phylink_suspend(priv->phylink, false);
7437         }
7438         rtnl_unlock();
7439
7440         if (priv->dma_cap.fpesel) {
7441                 /* Disable FPE */
7442                 stmmac_fpe_configure(priv, priv->ioaddr,
7443                                      priv->plat->tx_queues_to_use,
7444                                      priv->plat->rx_queues_to_use, false);
7445
7446                 stmmac_fpe_handshake(priv, false);
7447                 stmmac_fpe_stop_wq(priv);
7448         }
7449
7450         priv->speed = SPEED_UNKNOWN;
7451         return 0;
7452 }
7453 EXPORT_SYMBOL_GPL(stmmac_suspend);
7454
7455 static void stmmac_reset_rx_queue(struct stmmac_priv *priv, u32 queue)
7456 {
7457         struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
7458
7459         rx_q->cur_rx = 0;
7460         rx_q->dirty_rx = 0;
7461 }
7462
7463 static void stmmac_reset_tx_queue(struct stmmac_priv *priv, u32 queue)
7464 {
7465         struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
7466
7467         tx_q->cur_tx = 0;
7468         tx_q->dirty_tx = 0;
7469         tx_q->mss = 0;
7470
7471         netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, queue));
7472 }
7473
7474 /**
7475  * stmmac_reset_queues_param - reset queue parameters
7476  * @priv: device pointer
7477  */
7478 static void stmmac_reset_queues_param(struct stmmac_priv *priv)
7479 {
7480         u32 rx_cnt = priv->plat->rx_queues_to_use;
7481         u32 tx_cnt = priv->plat->tx_queues_to_use;
7482         u32 queue;
7483
7484         for (queue = 0; queue < rx_cnt; queue++)
7485                 stmmac_reset_rx_queue(priv, queue);
7486
7487         for (queue = 0; queue < tx_cnt; queue++)
7488                 stmmac_reset_tx_queue(priv, queue);
7489 }
7490
7491 /**
7492  * stmmac_resume - resume callback
7493  * @dev: device pointer
7494  * Description: when resume this function is invoked to setup the DMA and CORE
7495  * in a usable state.
7496  */
7497 int stmmac_resume(struct device *dev)
7498 {
7499         struct net_device *ndev = dev_get_drvdata(dev);
7500         struct stmmac_priv *priv = netdev_priv(ndev);
7501         int ret;
7502
7503         if (!netif_running(ndev))
7504                 return 0;
7505
7506         /* Power Down bit, into the PM register, is cleared
7507          * automatically as soon as a magic packet or a Wake-up frame
7508          * is received. Anyway, it's better to manually clear
7509          * this bit because it can generate problems while resuming
7510          * from another devices (e.g. serial console).
7511          */
7512         if (device_may_wakeup(priv->device) && priv->plat->pmt) {
7513                 mutex_lock(&priv->lock);
7514                 stmmac_pmt(priv, priv->hw, 0);
7515                 mutex_unlock(&priv->lock);
7516                 priv->irq_wake = 0;
7517         } else {
7518                 pinctrl_pm_select_default_state(priv->device);
7519                 /* reset the phy so that it's ready */
7520                 if (priv->mii)
7521                         stmmac_mdio_reset(priv->mii);
7522         }
7523
7524         if (priv->plat->serdes_powerup) {
7525                 ret = priv->plat->serdes_powerup(ndev,
7526                                                  priv->plat->bsp_priv);
7527
7528                 if (ret < 0)
7529                         return ret;
7530         }
7531
7532         rtnl_lock();
7533         if (device_may_wakeup(priv->device) && priv->plat->pmt) {
7534                 phylink_resume(priv->phylink);
7535         } else {
7536                 phylink_resume(priv->phylink);
7537                 if (device_may_wakeup(priv->device))
7538                         phylink_speed_up(priv->phylink);
7539         }
7540         rtnl_unlock();
7541
7542         rtnl_lock();
7543         mutex_lock(&priv->lock);
7544
7545         stmmac_reset_queues_param(priv);
7546
7547         stmmac_free_tx_skbufs(priv);
7548         stmmac_clear_descriptors(priv, &priv->dma_conf);
7549
7550         stmmac_hw_setup(ndev, false);
7551         stmmac_init_coalesce(priv);
7552         stmmac_set_rx_mode(ndev);
7553
7554         stmmac_restore_hw_vlan_rx_fltr(priv, ndev, priv->hw);
7555
7556         stmmac_enable_all_queues(priv);
7557         stmmac_enable_all_dma_irq(priv);
7558
7559         mutex_unlock(&priv->lock);
7560         rtnl_unlock();
7561
7562         netif_device_attach(ndev);
7563
7564         return 0;
7565 }
7566 EXPORT_SYMBOL_GPL(stmmac_resume);
7567
7568 #ifndef MODULE
7569 static int __init stmmac_cmdline_opt(char *str)
7570 {
7571         char *opt;
7572
7573         if (!str || !*str)
7574                 return 1;
7575         while ((opt = strsep(&str, ",")) != NULL) {
7576                 if (!strncmp(opt, "debug:", 6)) {
7577                         if (kstrtoint(opt + 6, 0, &debug))
7578                                 goto err;
7579                 } else if (!strncmp(opt, "phyaddr:", 8)) {
7580                         if (kstrtoint(opt + 8, 0, &phyaddr))
7581                                 goto err;
7582                 } else if (!strncmp(opt, "buf_sz:", 7)) {
7583                         if (kstrtoint(opt + 7, 0, &buf_sz))
7584                                 goto err;
7585                 } else if (!strncmp(opt, "tc:", 3)) {
7586                         if (kstrtoint(opt + 3, 0, &tc))
7587                                 goto err;
7588                 } else if (!strncmp(opt, "watchdog:", 9)) {
7589                         if (kstrtoint(opt + 9, 0, &watchdog))
7590                                 goto err;
7591                 } else if (!strncmp(opt, "flow_ctrl:", 10)) {
7592                         if (kstrtoint(opt + 10, 0, &flow_ctrl))
7593                                 goto err;
7594                 } else if (!strncmp(opt, "pause:", 6)) {
7595                         if (kstrtoint(opt + 6, 0, &pause))
7596                                 goto err;
7597                 } else if (!strncmp(opt, "eee_timer:", 10)) {
7598                         if (kstrtoint(opt + 10, 0, &eee_timer))
7599                                 goto err;
7600                 } else if (!strncmp(opt, "chain_mode:", 11)) {
7601                         if (kstrtoint(opt + 11, 0, &chain_mode))
7602                                 goto err;
7603                 }
7604         }
7605         return 1;
7606
7607 err:
7608         pr_err("%s: ERROR broken module parameter conversion", __func__);
7609         return 1;
7610 }
7611
7612 __setup("stmmaceth=", stmmac_cmdline_opt);
7613 #endif /* MODULE */
7614
7615 static int __init stmmac_init(void)
7616 {
7617 #ifdef CONFIG_DEBUG_FS
7618         /* Create debugfs main directory if it doesn't exist yet */
7619         if (!stmmac_fs_dir)
7620                 stmmac_fs_dir = debugfs_create_dir(STMMAC_RESOURCE_NAME, NULL);
7621         register_netdevice_notifier(&stmmac_notifier);
7622 #endif
7623
7624         return 0;
7625 }
7626
7627 static void __exit stmmac_exit(void)
7628 {
7629 #ifdef CONFIG_DEBUG_FS
7630         unregister_netdevice_notifier(&stmmac_notifier);
7631         debugfs_remove_recursive(stmmac_fs_dir);
7632 #endif
7633 }
7634
7635 module_init(stmmac_init)
7636 module_exit(stmmac_exit)
7637
7638 MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet device driver");
7639 MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>");
7640 MODULE_LICENSE("GPL");